3 * Copyright (c) 2000-2011 The libav developers.
5 * This file is part of Libav.
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
32 #include "libavformat/avformat.h"
33 #include "libavdevice/avdevice.h"
34 #include "libswscale/swscale.h"
35 #include "libavresample/avresample.h"
36 #include "libavutil/opt.h"
37 #include "libavutil/channel_layout.h"
38 #include "libavutil/parseutils.h"
39 #include "libavutil/samplefmt.h"
40 #include "libavutil/fifo.h"
41 #include "libavutil/intreadwrite.h"
42 #include "libavutil/dict.h"
43 #include "libavutil/mathematics.h"
44 #include "libavutil/pixdesc.h"
45 #include "libavutil/avstring.h"
46 #include "libavutil/libm.h"
47 #include "libavutil/imgutils.h"
48 #include "libavutil/time.h"
49 #include "libavformat/os_support.h"
51 # include "libavfilter/avfilter.h"
52 # include "libavfilter/buffersrc.h"
53 # include "libavfilter/buffersink.h"
55 #if HAVE_SYS_RESOURCE_H
57 #include <sys/types.h>
58 #include <sys/resource.h>
59 #elif HAVE_GETPROCESSTIMES
62 #if HAVE_GETPROCESSMEMORYINFO
68 #include <sys/select.h>
80 #include "libavutil/avassert.h"
82 const char program_name[] = "avconv";
83 const int program_birth_year = 2000;
85 static FILE *vstats_file;
87 static int nb_frames_drop = 0;
92 /* signal to input threads that they should exit; set by the main thread */
93 static int transcoding_finished;
96 #define DEFAULT_PASS_LOGFILENAME_PREFIX "av2pass"
98 InputStream **input_streams = NULL;
99 int nb_input_streams = 0;
100 InputFile **input_files = NULL;
101 int nb_input_files = 0;
103 OutputStream **output_streams = NULL;
104 int nb_output_streams = 0;
105 OutputFile **output_files = NULL;
106 int nb_output_files = 0;
108 FilterGraph **filtergraphs;
111 static void term_exit(void)
113 av_log(NULL, AV_LOG_QUIET, "");
116 static volatile int received_sigterm = 0;
117 static volatile int received_nb_signals = 0;
120 sigterm_handler(int sig)
122 received_sigterm = sig;
123 received_nb_signals++;
127 static void term_init(void)
129 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
130 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
132 signal(SIGXCPU, sigterm_handler);
136 static int decode_interrupt_cb(void *ctx)
138 return received_nb_signals > 1;
141 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
143 static void avconv_cleanup(int ret)
147 for (i = 0; i < nb_filtergraphs; i++) {
148 FilterGraph *fg = filtergraphs[i];
149 avfilter_graph_free(&fg->graph);
150 for (j = 0; j < fg->nb_inputs; j++) {
151 av_freep(&fg->inputs[j]->name);
152 av_freep(&fg->inputs[j]);
154 av_freep(&fg->inputs);
155 for (j = 0; j < fg->nb_outputs; j++) {
156 av_freep(&fg->outputs[j]->name);
157 av_freep(&fg->outputs[j]);
159 av_freep(&fg->outputs);
160 av_freep(&fg->graph_desc);
162 av_freep(&filtergraphs[i]);
164 av_freep(&filtergraphs);
167 for (i = 0; i < nb_output_files; i++) {
168 OutputFile *of = output_files[i];
169 AVFormatContext *s = of->ctx;
170 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE) && s->pb)
172 avformat_free_context(s);
173 av_dict_free(&of->opts);
175 av_freep(&output_files[i]);
177 for (i = 0; i < nb_output_streams; i++) {
178 OutputStream *ost = output_streams[i];
179 AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
181 AVBitStreamFilterContext *next = bsfc->next;
182 av_bitstream_filter_close(bsfc);
185 ost->bitstream_filters = NULL;
186 av_frame_free(&ost->filtered_frame);
188 av_parser_close(ost->parser);
190 av_freep(&ost->forced_keyframes);
191 av_freep(&ost->avfilter);
192 av_freep(&ost->logfile_prefix);
194 avcodec_free_context(&ost->enc_ctx);
196 av_freep(&output_streams[i]);
198 for (i = 0; i < nb_input_files; i++) {
199 avformat_close_input(&input_files[i]->ctx);
200 av_freep(&input_files[i]);
202 for (i = 0; i < nb_input_streams; i++) {
203 InputStream *ist = input_streams[i];
205 av_frame_free(&ist->decoded_frame);
206 av_frame_free(&ist->filter_frame);
207 av_dict_free(&ist->decoder_opts);
208 av_freep(&ist->filters);
209 av_freep(&ist->hwaccel_device);
211 avcodec_free_context(&ist->dec_ctx);
213 av_freep(&input_streams[i]);
218 av_free(vstats_filename);
220 av_freep(&input_streams);
221 av_freep(&input_files);
222 av_freep(&output_streams);
223 av_freep(&output_files);
227 avformat_network_deinit();
229 if (received_sigterm) {
230 av_log(NULL, AV_LOG_INFO, "Received signal %d: terminating.\n",
231 (int) received_sigterm);
236 void assert_avoptions(AVDictionary *m)
238 AVDictionaryEntry *t;
239 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
240 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
245 static void abort_codec_experimental(AVCodec *c, int encoder)
247 const char *codec_string = encoder ? "encoder" : "decoder";
249 av_log(NULL, AV_LOG_FATAL, "%s '%s' is experimental and might produce bad "
250 "results.\nAdd '-strict experimental' if you want to use it.\n",
251 codec_string, c->name);
252 codec = encoder ? avcodec_find_encoder(c->id) : avcodec_find_decoder(c->id);
253 if (!(codec->capabilities & CODEC_CAP_EXPERIMENTAL))
254 av_log(NULL, AV_LOG_FATAL, "Or use the non experimental %s '%s'.\n",
255 codec_string, codec->name);
260 * Update the requested input sample format based on the output sample format.
261 * This is currently only used to request float output from decoders which
262 * support multiple sample formats, one of which is AV_SAMPLE_FMT_FLT.
263 * Ideally this will be removed in the future when decoders do not do format
264 * conversion and only output in their native format.
266 static void update_sample_fmt(AVCodecContext *dec, AVCodec *dec_codec,
269 /* if sample formats match or a decoder sample format has already been
270 requested, just return */
271 if (enc->sample_fmt == dec->sample_fmt ||
272 dec->request_sample_fmt > AV_SAMPLE_FMT_NONE)
275 /* if decoder supports more than one output format */
276 if (dec_codec && dec_codec->sample_fmts &&
277 dec_codec->sample_fmts[0] != AV_SAMPLE_FMT_NONE &&
278 dec_codec->sample_fmts[1] != AV_SAMPLE_FMT_NONE) {
279 const enum AVSampleFormat *p;
280 int min_dec = INT_MAX, min_inc = INT_MAX;
281 enum AVSampleFormat dec_fmt = AV_SAMPLE_FMT_NONE;
282 enum AVSampleFormat inc_fmt = AV_SAMPLE_FMT_NONE;
284 /* find a matching sample format in the encoder */
285 for (p = dec_codec->sample_fmts; *p != AV_SAMPLE_FMT_NONE; p++) {
286 if (*p == enc->sample_fmt) {
287 dec->request_sample_fmt = *p;
290 enum AVSampleFormat dfmt = av_get_packed_sample_fmt(*p);
291 enum AVSampleFormat efmt = av_get_packed_sample_fmt(enc->sample_fmt);
292 int fmt_diff = 32 * abs(dfmt - efmt);
293 if (av_sample_fmt_is_planar(*p) !=
294 av_sample_fmt_is_planar(enc->sample_fmt))
299 } else if (dfmt > efmt) {
300 if (fmt_diff < min_inc) {
305 if (fmt_diff < min_dec) {
313 /* if none match, provide the one that matches quality closest */
314 dec->request_sample_fmt = min_inc != INT_MAX ? inc_fmt : dec_fmt;
318 static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
320 AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
321 AVCodecContext *avctx = ost->encoding_needed ? ost->enc_ctx : ost->st->codec;
325 * Audio encoders may split the packets -- #frames in != #packets out.
326 * But there is no reordering, so we can limit the number of output packets
327 * by simply dropping them here.
328 * Counting encoded video frames needs to be done separately because of
329 * reordering, see do_video_out()
331 if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
332 if (ost->frame_number >= ost->max_frames) {
340 AVPacket new_pkt = *pkt;
341 int a = av_bitstream_filter_filter(bsfc, avctx, NULL,
342 &new_pkt.data, &new_pkt.size,
343 pkt->data, pkt->size,
344 pkt->flags & AV_PKT_FLAG_KEY);
347 new_pkt.buf = av_buffer_create(new_pkt.data, new_pkt.size,
348 av_buffer_default_free, NULL, 0);
352 av_log(NULL, AV_LOG_ERROR, "%s failed for stream %d, codec %s",
353 bsfc->filter->name, pkt->stream_index,
354 avctx->codec ? avctx->codec->name : "copy");
364 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS) &&
365 ost->last_mux_dts != AV_NOPTS_VALUE &&
366 pkt->dts < ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT)) {
367 av_log(NULL, AV_LOG_WARNING, "Non-monotonous DTS in output stream "
368 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
369 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
371 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
374 av_log(NULL, AV_LOG_WARNING, "changing to %"PRId64". This may result "
375 "in incorrect timestamps in the output file.\n",
376 ost->last_mux_dts + 1);
377 pkt->dts = ost->last_mux_dts + 1;
378 if (pkt->pts != AV_NOPTS_VALUE)
379 pkt->pts = FFMAX(pkt->pts, pkt->dts);
381 ost->last_mux_dts = pkt->dts;
383 ost->data_size += pkt->size;
384 ost->packets_written++;
386 pkt->stream_index = ost->index;
387 ret = av_interleaved_write_frame(s, pkt);
389 print_error("av_interleaved_write_frame()", ret);
394 static int check_recording_time(OutputStream *ost)
396 OutputFile *of = output_files[ost->file_index];
398 if (of->recording_time != INT64_MAX &&
399 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
400 AV_TIME_BASE_Q) >= 0) {
407 static void do_audio_out(AVFormatContext *s, OutputStream *ost,
410 AVCodecContext *enc = ost->enc_ctx;
414 av_init_packet(&pkt);
418 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
419 frame->pts = ost->sync_opts;
420 ost->sync_opts = frame->pts + frame->nb_samples;
422 ost->samples_encoded += frame->nb_samples;
423 ost->frames_encoded++;
425 if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
426 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
431 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
432 write_frame(s, &pkt, ost);
436 static void do_subtitle_out(AVFormatContext *s,
442 static uint8_t *subtitle_out = NULL;
443 int subtitle_out_max_size = 1024 * 1024;
444 int subtitle_out_size, nb, i;
448 if (pts == AV_NOPTS_VALUE) {
449 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
458 subtitle_out = av_malloc(subtitle_out_max_size);
461 /* Note: DVB subtitle need one packet to draw them and one other
462 packet to clear them */
463 /* XXX: signal it in the codec context ? */
464 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
469 for (i = 0; i < nb; i++) {
470 ost->sync_opts = av_rescale_q(pts, ist->st->time_base, enc->time_base);
471 if (!check_recording_time(ost))
474 sub->pts = av_rescale_q(pts, ist->st->time_base, AV_TIME_BASE_Q);
475 // start_display_time is required to be 0
476 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
477 sub->end_display_time -= sub->start_display_time;
478 sub->start_display_time = 0;
480 ost->frames_encoded++;
482 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
483 subtitle_out_max_size, sub);
484 if (subtitle_out_size < 0) {
485 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
489 av_init_packet(&pkt);
490 pkt.data = subtitle_out;
491 pkt.size = subtitle_out_size;
492 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
493 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
494 /* XXX: the pts correction is handled here. Maybe handling
495 it in the codec would be better */
497 pkt.pts += 90 * sub->start_display_time;
499 pkt.pts += 90 * sub->end_display_time;
501 write_frame(s, &pkt, ost);
505 static void do_video_out(AVFormatContext *s,
510 int ret, format_video_sync;
512 AVCodecContext *enc = ost->enc_ctx;
516 format_video_sync = video_sync_method;
517 if (format_video_sync == VSYNC_AUTO)
518 format_video_sync = (s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH :
519 (s->oformat->flags & AVFMT_VARIABLE_FPS) ? VSYNC_VFR : VSYNC_CFR;
520 if (format_video_sync != VSYNC_PASSTHROUGH &&
522 in_picture->pts != AV_NOPTS_VALUE &&
523 in_picture->pts < ost->sync_opts) {
525 av_log(NULL, AV_LOG_WARNING,
526 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
527 ost->frame_number, ost->st->index, in_picture->pts);
531 if (in_picture->pts == AV_NOPTS_VALUE)
532 in_picture->pts = ost->sync_opts;
533 ost->sync_opts = in_picture->pts;
536 if (!ost->frame_number)
537 ost->first_pts = in_picture->pts;
539 av_init_packet(&pkt);
543 if (ost->frame_number >= ost->max_frames)
546 if (s->oformat->flags & AVFMT_RAWPICTURE &&
547 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
548 /* raw pictures are written as AVPicture structure to
549 avoid any copies. We support temporarily the older
551 enc->coded_frame->interlaced_frame = in_picture->interlaced_frame;
552 enc->coded_frame->top_field_first = in_picture->top_field_first;
553 pkt.data = (uint8_t *)in_picture;
554 pkt.size = sizeof(AVPicture);
555 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
556 pkt.flags |= AV_PKT_FLAG_KEY;
558 write_frame(s, &pkt, ost);
562 if (enc->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME) &&
563 ost->top_field_first >= 0)
564 in_picture->top_field_first = !!ost->top_field_first;
566 in_picture->quality = enc->global_quality;
567 in_picture->pict_type = 0;
568 if (ost->forced_kf_index < ost->forced_kf_count &&
569 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
570 in_picture->pict_type = AV_PICTURE_TYPE_I;
571 ost->forced_kf_index++;
574 ost->frames_encoded++;
576 ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
578 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
583 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
584 write_frame(s, &pkt, ost);
585 *frame_size = pkt.size;
587 /* if two pass, output log */
588 if (ost->logfile && enc->stats_out) {
589 fprintf(ost->logfile, "%s", enc->stats_out);
595 * For video, number of frames in == number of packets out.
596 * But there may be reordering, so we can't throw away frames on encoder
597 * flush, we need to limit them here, before they go into encoder.
602 static double psnr(double d)
604 return -10.0 * log(d) / log(10.0);
607 static void do_video_stats(OutputStream *ost, int frame_size)
611 double ti1, bitrate, avg_bitrate;
613 /* this is executed just the first time do_video_stats is called */
615 vstats_file = fopen(vstats_filename, "w");
623 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
624 frame_number = ost->frame_number;
625 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality / (float)FF_QP2LAMBDA);
626 if (enc->flags&CODEC_FLAG_PSNR)
627 fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
629 fprintf(vstats_file,"f_size= %6d ", frame_size);
630 /* compute pts value */
631 ti1 = ost->sync_opts * av_q2d(enc->time_base);
635 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
636 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
637 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
638 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
639 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(enc->coded_frame->pict_type));
644 * Read one frame for lavfi output for ost and encode it.
646 static int poll_filter(OutputStream *ost)
648 OutputFile *of = output_files[ost->file_index];
649 AVFrame *filtered_frame = NULL;
652 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
653 return AVERROR(ENOMEM);
655 filtered_frame = ost->filtered_frame;
657 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
658 !(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))
659 ret = av_buffersink_get_samples(ost->filter->filter, filtered_frame,
660 ost->enc_ctx->frame_size);
662 ret = av_buffersink_get_frame(ost->filter->filter, filtered_frame);
667 if (filtered_frame->pts != AV_NOPTS_VALUE) {
668 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
669 filtered_frame->pts = av_rescale_q(filtered_frame->pts,
670 ost->filter->filter->inputs[0]->time_base,
671 ost->enc_ctx->time_base) -
672 av_rescale_q(start_time,
674 ost->enc_ctx->time_base);
677 switch (ost->filter->filter->inputs[0]->type) {
678 case AVMEDIA_TYPE_VIDEO:
679 if (!ost->frame_aspect_ratio)
680 ost->enc_ctx->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
682 do_video_out(of->ctx, ost, filtered_frame, &frame_size);
683 if (vstats_filename && frame_size)
684 do_video_stats(ost, frame_size);
686 case AVMEDIA_TYPE_AUDIO:
687 do_audio_out(of->ctx, ost, filtered_frame);
690 // TODO support subtitle filters
694 av_frame_unref(filtered_frame);
699 static void finish_output_stream(OutputStream *ost)
701 OutputFile *of = output_files[ost->file_index];
707 for (i = 0; i < of->ctx->nb_streams; i++)
708 output_streams[of->ost_index + i]->finished = 1;
713 * Read as many frames from possible from lavfi and encode them.
715 * Always read from the active stream with the lowest timestamp. If no frames
716 * are available for it then return EAGAIN and wait for more input. This way we
717 * can use lavfi sources that generate unlimited amount of frames without memory
720 static int poll_filters(void)
724 while (ret >= 0 && !received_sigterm) {
725 OutputStream *ost = NULL;
726 int64_t min_pts = INT64_MAX;
728 /* choose output stream with the lowest timestamp */
729 for (i = 0; i < nb_output_streams; i++) {
730 int64_t pts = output_streams[i]->sync_opts;
732 if (!output_streams[i]->filter || output_streams[i]->finished)
735 pts = av_rescale_q(pts, output_streams[i]->enc_ctx->time_base,
739 ost = output_streams[i];
746 ret = poll_filter(ost);
748 if (ret == AVERROR_EOF) {
749 finish_output_stream(ost);
751 } else if (ret == AVERROR(EAGAIN))
758 static void print_final_stats(int64_t total_size)
760 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
761 uint64_t data_size = 0;
762 float percent = -1.0;
765 for (i = 0; i < nb_output_streams; i++) {
766 OutputStream *ost = output_streams[i];
767 switch (ost->enc_ctx->codec_type) {
768 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
769 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
770 default: other_size += ost->data_size; break;
772 extra_size += ost->enc_ctx->extradata_size;
773 data_size += ost->data_size;
776 if (data_size && total_size >= data_size)
777 percent = 100.0 * (total_size - data_size) / data_size;
779 av_log(NULL, AV_LOG_INFO, "\n");
780 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
784 extra_size / 1024.0);
786 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
788 av_log(NULL, AV_LOG_INFO, "unknown");
789 av_log(NULL, AV_LOG_INFO, "\n");
791 /* print verbose per-stream stats */
792 for (i = 0; i < nb_input_files; i++) {
793 InputFile *f = input_files[i];
794 uint64_t total_packets = 0, total_size = 0;
796 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
797 i, f->ctx->filename);
799 for (j = 0; j < f->nb_streams; j++) {
800 InputStream *ist = input_streams[f->ist_index + j];
801 enum AVMediaType type = ist->dec_ctx->codec_type;
803 total_size += ist->data_size;
804 total_packets += ist->nb_packets;
806 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
807 i, j, media_type_string(type));
808 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
809 ist->nb_packets, ist->data_size);
811 if (ist->decoding_needed) {
812 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
813 ist->frames_decoded);
814 if (type == AVMEDIA_TYPE_AUDIO)
815 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
816 av_log(NULL, AV_LOG_VERBOSE, "; ");
819 av_log(NULL, AV_LOG_VERBOSE, "\n");
822 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
823 total_packets, total_size);
826 for (i = 0; i < nb_output_files; i++) {
827 OutputFile *of = output_files[i];
828 uint64_t total_packets = 0, total_size = 0;
830 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
831 i, of->ctx->filename);
833 for (j = 0; j < of->ctx->nb_streams; j++) {
834 OutputStream *ost = output_streams[of->ost_index + j];
835 enum AVMediaType type = ost->enc_ctx->codec_type;
837 total_size += ost->data_size;
838 total_packets += ost->packets_written;
840 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
841 i, j, media_type_string(type));
842 if (ost->encoding_needed) {
843 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
844 ost->frames_encoded);
845 if (type == AVMEDIA_TYPE_AUDIO)
846 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
847 av_log(NULL, AV_LOG_VERBOSE, "; ");
850 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
851 ost->packets_written, ost->data_size);
853 av_log(NULL, AV_LOG_VERBOSE, "\n");
856 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
857 total_packets, total_size);
861 static void print_report(int is_last_report, int64_t timer_start)
868 int frame_number, vid, i;
869 double bitrate, ti1, pts;
870 static int64_t last_time = -1;
871 static int qp_histogram[52];
873 if (!print_stats && !is_last_report)
876 if (!is_last_report) {
878 /* display the report every 0.5 seconds */
879 cur_time = av_gettime_relative();
880 if (last_time == -1) {
881 last_time = cur_time;
884 if ((cur_time - last_time) < 500000)
886 last_time = cur_time;
890 oc = output_files[0]->ctx;
892 total_size = avio_size(oc->pb);
893 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
894 total_size = avio_tell(oc->pb);
895 if (total_size < 0) {
897 av_strerror(total_size, errbuf, sizeof(errbuf));
898 av_log(NULL, AV_LOG_VERBOSE, "Bitrate not available, "
899 "avio_tell() failed: %s\n", errbuf);
906 for (i = 0; i < nb_output_streams; i++) {
908 ost = output_streams[i];
910 if (!ost->stream_copy && enc->coded_frame)
911 q = enc->coded_frame->quality / (float)FF_QP2LAMBDA;
912 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
913 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
915 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
916 float t = (av_gettime_relative() - timer_start) / 1000000.0;
918 frame_number = ost->frame_number;
919 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3d q=%3.1f ",
920 frame_number, (t > 1) ? (int)(frame_number / t + 0.5) : 0, q);
922 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
926 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
928 for (j = 0; j < 32; j++)
929 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log2(qp_histogram[j] + 1)));
931 if (enc->flags&CODEC_FLAG_PSNR) {
933 double error, error_sum = 0;
934 double scale, scale_sum = 0;
935 char type[3] = { 'Y','U','V' };
936 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
937 for (j = 0; j < 3; j++) {
938 if (is_last_report) {
939 error = enc->error[j];
940 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
942 error = enc->coded_frame->error[j];
943 scale = enc->width * enc->height * 255.0 * 255.0;
949 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], psnr(error / scale));
951 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
955 /* compute min output value */
956 pts = (double)ost->last_mux_dts * av_q2d(ost->st->time_base);
957 if ((pts < ti1) && (pts > 0))
963 bitrate = (double)(total_size * 8) / ti1 / 1000.0;
965 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
966 "size=%8.0fkB time=%0.2f bitrate=%6.1fkbits/s",
967 (double)total_size / 1024, ti1, bitrate);
970 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " drop=%d",
973 av_log(NULL, AV_LOG_INFO, "%s \r", buf);
978 print_final_stats(total_size);
982 static void flush_encoders(void)
986 for (i = 0; i < nb_output_streams; i++) {
987 OutputStream *ost = output_streams[i];
988 AVCodecContext *enc = ost->enc_ctx;
989 AVFormatContext *os = output_files[ost->file_index]->ctx;
990 int stop_encoding = 0;
992 if (!ost->encoding_needed)
995 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
997 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1001 int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1004 switch (enc->codec_type) {
1005 case AVMEDIA_TYPE_AUDIO:
1006 encode = avcodec_encode_audio2;
1009 case AVMEDIA_TYPE_VIDEO:
1010 encode = avcodec_encode_video2;
1020 av_init_packet(&pkt);
1024 ret = encode(enc, &pkt, NULL, &got_packet);
1026 av_log(NULL, AV_LOG_FATAL, "%s encoding failed\n", desc);
1029 if (ost->logfile && enc->stats_out) {
1030 fprintf(ost->logfile, "%s", enc->stats_out);
1036 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1037 write_frame(os, &pkt, ost);
1047 * Check whether a packet from ist should be written into ost at this time
1049 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1051 OutputFile *of = output_files[ost->file_index];
1052 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1054 if (ost->source_index != ist_index)
1057 if (of->start_time != AV_NOPTS_VALUE && ist->last_dts < of->start_time)
1063 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1065 OutputFile *of = output_files[ost->file_index];
1066 InputFile *f = input_files [ist->file_index];
1067 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1068 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1071 av_init_packet(&opkt);
1073 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1074 !ost->copy_initial_nonkeyframes)
1077 if (of->recording_time != INT64_MAX &&
1078 ist->last_dts >= of->recording_time + start_time) {
1083 if (f->recording_time != INT64_MAX) {
1084 start_time = f->ctx->start_time;
1085 if (f->start_time != AV_NOPTS_VALUE)
1086 start_time += f->start_time;
1087 if (ist->last_dts >= f->recording_time + start_time) {
1093 /* force the input stream PTS */
1094 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1097 if (pkt->pts != AV_NOPTS_VALUE)
1098 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1100 opkt.pts = AV_NOPTS_VALUE;
1102 if (pkt->dts == AV_NOPTS_VALUE)
1103 opkt.dts = av_rescale_q(ist->last_dts, AV_TIME_BASE_Q, ost->st->time_base);
1105 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1106 opkt.dts -= ost_tb_start_time;
1108 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1109 opkt.flags = pkt->flags;
1111 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1112 if ( ost->enc_ctx->codec_id != AV_CODEC_ID_H264
1113 && ost->enc_ctx->codec_id != AV_CODEC_ID_MPEG1VIDEO
1114 && ost->enc_ctx->codec_id != AV_CODEC_ID_MPEG2VIDEO
1115 && ost->enc_ctx->codec_id != AV_CODEC_ID_VC1
1117 if (av_parser_change(ost->parser, ost->st->codec,
1118 &opkt.data, &opkt.size,
1119 pkt->data, pkt->size,
1120 pkt->flags & AV_PKT_FLAG_KEY)) {
1121 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1126 opkt.data = pkt->data;
1127 opkt.size = pkt->size;
1130 write_frame(of->ctx, &opkt, ost);
1133 int guess_input_channel_layout(InputStream *ist)
1135 AVCodecContext *dec = ist->dec_ctx;
1137 if (!dec->channel_layout) {
1138 char layout_name[256];
1140 dec->channel_layout = av_get_default_channel_layout(dec->channels);
1141 if (!dec->channel_layout)
1143 av_get_channel_layout_string(layout_name, sizeof(layout_name),
1144 dec->channels, dec->channel_layout);
1145 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1146 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1151 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1153 AVFrame *decoded_frame, *f;
1154 AVCodecContext *avctx = ist->dec_ctx;
1155 int i, ret, err = 0, resample_changed;
1157 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1158 return AVERROR(ENOMEM);
1159 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1160 return AVERROR(ENOMEM);
1161 decoded_frame = ist->decoded_frame;
1163 ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1164 if (!*got_output || ret < 0)
1167 ist->samples_decoded += decoded_frame->nb_samples;
1168 ist->frames_decoded++;
1170 /* if the decoder provides a pts, use it instead of the last packet pts.
1171 the decoder could be delaying output by a packet or more. */
1172 if (decoded_frame->pts != AV_NOPTS_VALUE)
1173 ist->next_dts = decoded_frame->pts;
1174 else if (pkt->pts != AV_NOPTS_VALUE)
1175 decoded_frame->pts = pkt->pts;
1176 pkt->pts = AV_NOPTS_VALUE;
1178 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
1179 ist->resample_channels != avctx->channels ||
1180 ist->resample_channel_layout != decoded_frame->channel_layout ||
1181 ist->resample_sample_rate != decoded_frame->sample_rate;
1182 if (resample_changed) {
1183 char layout1[64], layout2[64];
1185 if (!guess_input_channel_layout(ist)) {
1186 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1187 "layout for Input Stream #%d.%d\n", ist->file_index,
1191 decoded_frame->channel_layout = avctx->channel_layout;
1193 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
1194 ist->resample_channel_layout);
1195 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
1196 decoded_frame->channel_layout);
1198 av_log(NULL, AV_LOG_INFO,
1199 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
1200 ist->file_index, ist->st->index,
1201 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
1202 ist->resample_channels, layout1,
1203 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
1204 avctx->channels, layout2);
1206 ist->resample_sample_fmt = decoded_frame->format;
1207 ist->resample_sample_rate = decoded_frame->sample_rate;
1208 ist->resample_channel_layout = decoded_frame->channel_layout;
1209 ist->resample_channels = avctx->channels;
1211 for (i = 0; i < nb_filtergraphs; i++)
1212 if (ist_in_filtergraph(filtergraphs[i], ist) &&
1213 configure_filtergraph(filtergraphs[i]) < 0) {
1214 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
1219 if (decoded_frame->pts != AV_NOPTS_VALUE)
1220 decoded_frame->pts = av_rescale_q(decoded_frame->pts,
1222 (AVRational){1, avctx->sample_rate});
1223 for (i = 0; i < ist->nb_filters; i++) {
1224 if (i < ist->nb_filters - 1) {
1225 f = ist->filter_frame;
1226 err = av_frame_ref(f, decoded_frame);
1232 err = av_buffersrc_add_frame(ist->filters[i]->filter, f);
1237 av_frame_unref(ist->filter_frame);
1238 av_frame_unref(decoded_frame);
1239 return err < 0 ? err : ret;
1242 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
1244 AVFrame *decoded_frame, *f;
1245 int i, ret = 0, err = 0, resample_changed;
1247 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1248 return AVERROR(ENOMEM);
1249 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1250 return AVERROR(ENOMEM);
1251 decoded_frame = ist->decoded_frame;
1253 ret = avcodec_decode_video2(ist->dec_ctx,
1254 decoded_frame, got_output, pkt);
1255 if (!*got_output || ret < 0)
1258 ist->frames_decoded++;
1260 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
1261 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
1265 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
1267 decoded_frame->pts = guess_correct_pts(&ist->pts_ctx, decoded_frame->pkt_pts,
1268 decoded_frame->pkt_dts);
1271 if (ist->st->sample_aspect_ratio.num)
1272 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
1274 resample_changed = ist->resample_width != decoded_frame->width ||
1275 ist->resample_height != decoded_frame->height ||
1276 ist->resample_pix_fmt != decoded_frame->format;
1277 if (resample_changed) {
1278 av_log(NULL, AV_LOG_INFO,
1279 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
1280 ist->file_index, ist->st->index,
1281 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
1282 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
1284 ret = poll_filters();
1285 if (ret < 0 && (ret != AVERROR_EOF && ret != AVERROR(EAGAIN))) {
1287 av_strerror(ret, errbuf, sizeof(errbuf));
1289 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
1292 ist->resample_width = decoded_frame->width;
1293 ist->resample_height = decoded_frame->height;
1294 ist->resample_pix_fmt = decoded_frame->format;
1296 for (i = 0; i < nb_filtergraphs; i++)
1297 if (ist_in_filtergraph(filtergraphs[i], ist) &&
1298 configure_filtergraph(filtergraphs[i]) < 0) {
1299 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
1304 for (i = 0; i < ist->nb_filters; i++) {
1305 if (i < ist->nb_filters - 1) {
1306 f = ist->filter_frame;
1307 err = av_frame_ref(f, decoded_frame);
1313 err = av_buffersrc_add_frame(ist->filters[i]->filter, f);
1319 av_frame_unref(ist->filter_frame);
1320 av_frame_unref(decoded_frame);
1321 return err < 0 ? err : ret;
1324 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
1326 AVSubtitle subtitle;
1327 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
1328 &subtitle, got_output, pkt);
1334 ist->frames_decoded++;
1336 for (i = 0; i < nb_output_streams; i++) {
1337 OutputStream *ost = output_streams[i];
1339 if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
1342 do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle, pkt->pts);
1345 avsubtitle_free(&subtitle);
1349 static int send_filter_eof(InputStream *ist)
1352 for (i = 0; i < ist->nb_filters; i++) {
1353 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
1360 /* pkt = NULL means EOF (needed to flush decoder buffers) */
1361 static void process_input_packet(InputStream *ist, const AVPacket *pkt)
1367 if (ist->next_dts == AV_NOPTS_VALUE)
1368 ist->next_dts = ist->last_dts;
1372 av_init_packet(&avpkt);
1380 if (pkt->dts != AV_NOPTS_VALUE)
1381 ist->next_dts = ist->last_dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
1383 // while we have more to decode or while the decoder did output something on EOF
1384 while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
1388 ist->last_dts = ist->next_dts;
1390 if (avpkt.size && avpkt.size != pkt->size &&
1391 !(ist->dec->capabilities & CODEC_CAP_SUBFRAMES)) {
1392 av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
1393 "Multiple frames in a packet from stream %d\n", pkt->stream_index);
1394 ist->showed_multi_packet_warning = 1;
1397 switch (ist->dec_ctx->codec_type) {
1398 case AVMEDIA_TYPE_AUDIO:
1399 ret = decode_audio (ist, &avpkt, &got_output);
1401 case AVMEDIA_TYPE_VIDEO:
1402 ret = decode_video (ist, &avpkt, &got_output);
1404 ist->next_dts += av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
1405 else if (ist->st->avg_frame_rate.num)
1406 ist->next_dts += av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate),
1408 else if (ist->dec_ctx->framerate.num != 0) {
1409 int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 :
1410 ist->dec_ctx->ticks_per_frame;
1411 ist->next_dts += av_rescale_q(ticks, ist->dec_ctx->framerate, AV_TIME_BASE_Q);
1414 case AVMEDIA_TYPE_SUBTITLE:
1415 ret = transcode_subtitles(ist, &avpkt, &got_output);
1422 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d\n",
1423 ist->file_index, ist->st->index);
1429 // touch data and size only if not EOF
1439 /* after flushing, send an EOF on all the filter inputs attached to the stream */
1440 if (!pkt && ist->decoding_needed) {
1441 int ret = send_filter_eof(ist);
1443 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
1448 /* handle stream copy */
1449 if (!ist->decoding_needed) {
1450 ist->last_dts = ist->next_dts;
1451 switch (ist->dec_ctx->codec_type) {
1452 case AVMEDIA_TYPE_AUDIO:
1453 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
1454 ist->dec_ctx->sample_rate;
1456 case AVMEDIA_TYPE_VIDEO:
1457 if (ist->dec_ctx->framerate.num != 0) {
1458 int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
1459 ist->next_dts += ((int64_t)AV_TIME_BASE *
1460 ist->dec_ctx->framerate.den * ticks) /
1461 ist->dec_ctx->framerate.num;
1466 for (i = 0; pkt && i < nb_output_streams; i++) {
1467 OutputStream *ost = output_streams[i];
1469 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
1472 do_streamcopy(ist, ost, pkt);
1478 static void print_sdp(void)
1482 AVFormatContext **avc = av_malloc(sizeof(*avc) * nb_output_files);
1486 for (i = 0; i < nb_output_files; i++)
1487 avc[i] = output_files[i]->ctx;
1489 av_sdp_create(avc, nb_output_files, sdp, sizeof(sdp));
1490 printf("SDP:\n%s\n", sdp);
1495 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
1498 for (i = 0; hwaccels[i].name; i++)
1499 if (hwaccels[i].pix_fmt == pix_fmt)
1500 return &hwaccels[i];
1504 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
1506 InputStream *ist = s->opaque;
1507 const enum AVPixelFormat *p;
1510 for (p = pix_fmts; *p != -1; p++) {
1511 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
1512 const HWAccel *hwaccel;
1514 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
1517 hwaccel = get_hwaccel(*p);
1519 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
1520 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
1523 ret = hwaccel->init(s);
1525 if (ist->hwaccel_id == hwaccel->id) {
1526 av_log(NULL, AV_LOG_FATAL,
1527 "%s hwaccel requested for input stream #%d:%d, "
1528 "but cannot be initialized.\n", hwaccel->name,
1529 ist->file_index, ist->st->index);
1530 return AV_PIX_FMT_NONE;
1534 ist->active_hwaccel_id = hwaccel->id;
1535 ist->hwaccel_pix_fmt = *p;
1542 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
1544 InputStream *ist = s->opaque;
1546 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
1547 return ist->hwaccel_get_buffer(s, frame, flags);
1549 return avcodec_default_get_buffer2(s, frame, flags);
1552 static int init_input_stream(int ist_index, char *error, int error_len)
1555 InputStream *ist = input_streams[ist_index];
1556 if (ist->decoding_needed) {
1557 AVCodec *codec = ist->dec;
1559 snprintf(error, error_len, "Decoder (codec id %d) not found for input stream #%d:%d",
1560 ist->dec_ctx->codec_id, ist->file_index, ist->st->index);
1561 return AVERROR(EINVAL);
1564 /* update requested sample format for the decoder based on the
1565 corresponding encoder sample format */
1566 for (i = 0; i < nb_output_streams; i++) {
1567 OutputStream *ost = output_streams[i];
1568 if (ost->source_index == ist_index) {
1569 update_sample_fmt(ist->dec_ctx, codec, ost->enc_ctx);
1574 ist->dec_ctx->opaque = ist;
1575 ist->dec_ctx->get_format = get_format;
1576 ist->dec_ctx->get_buffer2 = get_buffer;
1577 ist->dec_ctx->thread_safe_callbacks = 1;
1579 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
1581 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
1582 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
1583 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
1585 if (ret == AVERROR_EXPERIMENTAL)
1586 abort_codec_experimental(codec, 0);
1588 av_strerror(ret, errbuf, sizeof(errbuf));
1590 snprintf(error, error_len,
1591 "Error while opening decoder for input stream "
1593 ist->file_index, ist->st->index, errbuf);
1596 assert_avoptions(ist->decoder_opts);
1599 ist->last_dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
1600 ist->next_dts = AV_NOPTS_VALUE;
1601 init_pts_correction(&ist->pts_ctx);
1606 static InputStream *get_input_stream(OutputStream *ost)
1608 if (ost->source_index >= 0)
1609 return input_streams[ost->source_index];
1612 FilterGraph *fg = ost->filter->graph;
1615 for (i = 0; i < fg->nb_inputs; i++)
1616 if (fg->inputs[i]->ist->dec_ctx->codec_type == ost->enc_ctx->codec_type)
1617 return fg->inputs[i]->ist;
1623 static void parse_forced_key_frames(char *kf, OutputStream *ost,
1624 AVCodecContext *avctx)
1630 for (p = kf; *p; p++)
1633 ost->forced_kf_count = n;
1634 ost->forced_kf_pts = av_malloc(sizeof(*ost->forced_kf_pts) * n);
1635 if (!ost->forced_kf_pts) {
1636 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
1641 for (i = 0; i < n; i++) {
1642 char *next = strchr(p, ',');
1647 t = parse_time_or_die("force_key_frames", p, 1);
1648 ost->forced_kf_pts[i] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
1654 static void set_encoder_id(OutputFile *of, OutputStream *ost)
1656 AVDictionaryEntry *e;
1658 uint8_t *encoder_string;
1659 int encoder_string_len;
1660 int format_flags = 0;
1662 e = av_dict_get(of->opts, "fflags", NULL, 0);
1664 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
1667 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
1670 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
1671 encoder_string = av_mallocz(encoder_string_len);
1672 if (!encoder_string)
1675 if (!(format_flags & AVFMT_FLAG_BITEXACT))
1676 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
1677 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
1678 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
1679 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
1682 static int transcode_init(void)
1684 int ret = 0, i, j, k;
1685 AVFormatContext *oc;
1691 /* init framerate emulation */
1692 for (i = 0; i < nb_input_files; i++) {
1693 InputFile *ifile = input_files[i];
1694 if (ifile->rate_emu)
1695 for (j = 0; j < ifile->nb_streams; j++)
1696 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
1699 /* output stream init */
1700 for (i = 0; i < nb_output_files; i++) {
1701 oc = output_files[i]->ctx;
1702 if (!oc->nb_streams && !(oc->oformat->flags & AVFMT_NOSTREAMS)) {
1703 av_dump_format(oc, i, oc->filename, 1);
1704 av_log(NULL, AV_LOG_ERROR, "Output file #%d does not contain any stream\n", i);
1705 return AVERROR(EINVAL);
1709 /* init complex filtergraphs */
1710 for (i = 0; i < nb_filtergraphs; i++)
1711 if ((ret = avfilter_graph_config(filtergraphs[i]->graph, NULL)) < 0)
1714 /* for each output stream, we compute the right encoding parameters */
1715 for (i = 0; i < nb_output_streams; i++) {
1716 AVCodecContext *enc_ctx;
1717 AVCodecContext *dec_ctx = NULL;
1718 ost = output_streams[i];
1719 oc = output_files[ost->file_index]->ctx;
1720 ist = get_input_stream(ost);
1722 if (ost->attachment_filename)
1725 enc_ctx = ost->stream_copy ? ost->st->codec : ost->enc_ctx;
1728 dec_ctx = ist->dec_ctx;
1730 ost->st->disposition = ist->st->disposition;
1731 enc_ctx->bits_per_raw_sample = dec_ctx->bits_per_raw_sample;
1732 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
1735 if (ost->stream_copy) {
1737 uint64_t extra_size;
1739 av_assert0(ist && !ost->filter);
1741 extra_size = (uint64_t)dec_ctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE;
1743 if (extra_size > INT_MAX) {
1744 return AVERROR(EINVAL);
1747 /* if stream_copy is selected, no need to decode or encode */
1748 enc_ctx->codec_id = dec_ctx->codec_id;
1749 enc_ctx->codec_type = dec_ctx->codec_type;
1751 if (!enc_ctx->codec_tag) {
1752 if (!oc->oformat->codec_tag ||
1753 av_codec_get_id (oc->oformat->codec_tag, dec_ctx->codec_tag) == enc_ctx->codec_id ||
1754 av_codec_get_tag(oc->oformat->codec_tag, dec_ctx->codec_id) <= 0)
1755 enc_ctx->codec_tag = dec_ctx->codec_tag;
1758 enc_ctx->bit_rate = dec_ctx->bit_rate;
1759 enc_ctx->rc_max_rate = dec_ctx->rc_max_rate;
1760 enc_ctx->rc_buffer_size = dec_ctx->rc_buffer_size;
1761 enc_ctx->field_order = dec_ctx->field_order;
1762 enc_ctx->extradata = av_mallocz(extra_size);
1763 if (!enc_ctx->extradata) {
1764 return AVERROR(ENOMEM);
1766 memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
1767 enc_ctx->extradata_size = dec_ctx->extradata_size;
1769 enc_ctx->time_base = dec_ctx->time_base;
1770 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
1771 av_reduce(&enc_ctx->time_base.num, &enc_ctx->time_base.den,
1772 enc_ctx->time_base.num, enc_ctx->time_base.den, INT_MAX);
1774 enc_ctx->time_base = ist->st->time_base;
1776 if (ist->st->nb_side_data) {
1777 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
1778 sizeof(*ist->st->side_data));
1779 if (!ost->st->side_data)
1780 return AVERROR(ENOMEM);
1782 for (j = 0; j < ist->st->nb_side_data; j++) {
1783 const AVPacketSideData *sd_src = &ist->st->side_data[j];
1784 AVPacketSideData *sd_dst = &ost->st->side_data[j];
1786 sd_dst->data = av_malloc(sd_src->size);
1788 return AVERROR(ENOMEM);
1789 memcpy(sd_dst->data, sd_src->data, sd_src->size);
1790 sd_dst->size = sd_src->size;
1791 sd_dst->type = sd_src->type;
1792 ost->st->nb_side_data++;
1796 ost->parser = av_parser_init(enc_ctx->codec_id);
1798 switch (enc_ctx->codec_type) {
1799 case AVMEDIA_TYPE_AUDIO:
1800 if (audio_volume != 256) {
1801 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
1804 enc_ctx->channel_layout = dec_ctx->channel_layout;
1805 enc_ctx->sample_rate = dec_ctx->sample_rate;
1806 enc_ctx->channels = dec_ctx->channels;
1807 enc_ctx->frame_size = dec_ctx->frame_size;
1808 enc_ctx->audio_service_type = dec_ctx->audio_service_type;
1809 enc_ctx->block_align = dec_ctx->block_align;
1811 case AVMEDIA_TYPE_VIDEO:
1812 enc_ctx->pix_fmt = dec_ctx->pix_fmt;
1813 enc_ctx->width = dec_ctx->width;
1814 enc_ctx->height = dec_ctx->height;
1815 enc_ctx->has_b_frames = dec_ctx->has_b_frames;
1816 if (ost->frame_aspect_ratio)
1817 sar = av_d2q(ost->frame_aspect_ratio * enc_ctx->height / enc_ctx->width, 255);
1818 else if (ist->st->sample_aspect_ratio.num)
1819 sar = ist->st->sample_aspect_ratio;
1821 sar = dec_ctx->sample_aspect_ratio;
1822 ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = sar;
1824 case AVMEDIA_TYPE_SUBTITLE:
1825 enc_ctx->width = dec_ctx->width;
1826 enc_ctx->height = dec_ctx->height;
1828 case AVMEDIA_TYPE_DATA:
1829 case AVMEDIA_TYPE_ATTACHMENT:
1836 /* should only happen when a default codec is not present. */
1837 snprintf(error, sizeof(error), "Automatic encoder selection "
1838 "failed for output stream #%d:%d. Default encoder for "
1839 "format %s is probably disabled. Please choose an "
1840 "encoder manually.\n", ost->file_index, ost->index,
1842 ret = AVERROR(EINVAL);
1847 ist->decoding_needed = 1;
1848 ost->encoding_needed = 1;
1850 set_encoder_id(output_files[ost->file_index], ost);
1853 * We want CFR output if and only if one of those is true:
1854 * 1) user specified output framerate with -r
1855 * 2) user specified -vsync cfr
1856 * 3) output format is CFR and the user didn't force vsync to
1857 * something else than CFR
1859 * in such a case, set ost->frame_rate
1861 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO &&
1862 !ost->frame_rate.num && ist &&
1863 (video_sync_method == VSYNC_CFR ||
1864 (video_sync_method == VSYNC_AUTO &&
1865 !(oc->oformat->flags & (AVFMT_NOTIMESTAMPS | AVFMT_VARIABLE_FPS))))) {
1866 if (ist->framerate.num)
1867 ost->frame_rate = ist->framerate;
1868 else if (ist->st->avg_frame_rate.num)
1869 ost->frame_rate = ist->st->avg_frame_rate;
1871 av_log(NULL, AV_LOG_WARNING, "Constant framerate requested "
1872 "for the output stream #%d:%d, but no information "
1873 "about the input framerate is available. Falling "
1874 "back to a default value of 25fps. Use the -r option "
1875 "if you want a different framerate.\n",
1876 ost->file_index, ost->index);
1877 ost->frame_rate = (AVRational){ 25, 1 };
1880 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
1881 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
1882 ost->frame_rate = ost->enc->supported_framerates[idx];
1887 (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
1888 enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) {
1890 fg = init_simple_filtergraph(ist, ost);
1891 if (configure_filtergraph(fg)) {
1892 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
1897 switch (enc_ctx->codec_type) {
1898 case AVMEDIA_TYPE_AUDIO:
1899 enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
1900 enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
1901 enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
1902 enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);
1903 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
1905 case AVMEDIA_TYPE_VIDEO:
1906 enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
1908 enc_ctx->width = ost->filter->filter->inputs[0]->w;
1909 enc_ctx->height = ost->filter->filter->inputs[0]->h;
1910 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
1911 ost->frame_aspect_ratio ? // overridden by the -aspect cli option
1912 av_d2q(ost->frame_aspect_ratio * enc_ctx->height/enc_ctx->width, 255) :
1913 ost->filter->filter->inputs[0]->sample_aspect_ratio;
1914 enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
1916 ost->st->avg_frame_rate = ost->frame_rate;
1919 (enc_ctx->width != dec_ctx->width ||
1920 enc_ctx->height != dec_ctx->height ||
1921 enc_ctx->pix_fmt != dec_ctx->pix_fmt)) {
1922 enc_ctx->bits_per_raw_sample = 0;
1925 if (ost->forced_keyframes)
1926 parse_forced_key_frames(ost->forced_keyframes, ost,
1929 case AVMEDIA_TYPE_SUBTITLE:
1930 enc_ctx->time_base = (AVRational){1, 1000};
1937 if ((enc_ctx->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))) {
1938 char logfilename[1024];
1941 snprintf(logfilename, sizeof(logfilename), "%s-%d.log",
1942 ost->logfile_prefix ? ost->logfile_prefix :
1943 DEFAULT_PASS_LOGFILENAME_PREFIX,
1945 if (!strcmp(ost->enc->name, "libx264")) {
1946 av_dict_set(&ost->encoder_opts, "stats", logfilename, AV_DICT_DONT_OVERWRITE);
1948 if (enc_ctx->flags & CODEC_FLAG_PASS1) {
1949 f = fopen(logfilename, "wb");
1951 av_log(NULL, AV_LOG_FATAL, "Cannot write log file '%s' for pass-1 encoding: %s\n",
1952 logfilename, strerror(errno));
1958 size_t logbuffer_size;
1959 if (cmdutils_read_file(logfilename, &logbuffer, &logbuffer_size) < 0) {
1960 av_log(NULL, AV_LOG_FATAL, "Error reading log file '%s' for pass-2 encoding\n",
1964 enc_ctx->stats_in = logbuffer;
1971 /* open each encoder */
1972 for (i = 0; i < nb_output_streams; i++) {
1973 ost = output_streams[i];
1974 if (ost->encoding_needed) {
1975 AVCodec *codec = ost->enc;
1976 AVCodecContext *dec = NULL;
1978 if ((ist = get_input_stream(ost)))
1980 if (dec && dec->subtitle_header) {
1981 ost->enc_ctx->subtitle_header = av_malloc(dec->subtitle_header_size);
1982 if (!ost->enc_ctx->subtitle_header) {
1983 ret = AVERROR(ENOMEM);
1986 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
1987 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
1989 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
1990 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
1991 av_dict_set(&ost->encoder_opts, "side_data_only_packets", "1", 0);
1993 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
1994 if (ret == AVERROR_EXPERIMENTAL)
1995 abort_codec_experimental(codec, 1);
1996 snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d:%d - maybe incorrect parameters such as bit_rate, rate, width or height",
1997 ost->file_index, ost->index);
2000 assert_avoptions(ost->encoder_opts);
2001 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
2002 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2003 "It takes bits/s as argument, not kbits/s\n");
2005 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
2007 av_log(NULL, AV_LOG_FATAL,
2008 "Error initializing the output stream codec context.\n");
2012 ost->st->time_base = ost->enc_ctx->time_base;
2014 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
2017 ost->st->time_base = ost->st->codec->time_base;
2021 /* init input streams */
2022 for (i = 0; i < nb_input_streams; i++)
2023 if ((ret = init_input_stream(i, error, sizeof(error))) < 0)
2026 /* discard unused programs */
2027 for (i = 0; i < nb_input_files; i++) {
2028 InputFile *ifile = input_files[i];
2029 for (j = 0; j < ifile->ctx->nb_programs; j++) {
2030 AVProgram *p = ifile->ctx->programs[j];
2031 int discard = AVDISCARD_ALL;
2033 for (k = 0; k < p->nb_stream_indexes; k++)
2034 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
2035 discard = AVDISCARD_DEFAULT;
2038 p->discard = discard;
2042 /* open files and write file headers */
2043 for (i = 0; i < nb_output_files; i++) {
2044 oc = output_files[i]->ctx;
2045 oc->interrupt_callback = int_cb;
2046 if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
2048 av_strerror(ret, errbuf, sizeof(errbuf));
2049 snprintf(error, sizeof(error),
2050 "Could not write header for output file #%d "
2051 "(incorrect codec parameters ?): %s",
2053 ret = AVERROR(EINVAL);
2056 assert_avoptions(output_files[i]->opts);
2057 if (strcmp(oc->oformat->name, "rtp")) {
2063 /* dump the file output parameters - cannot be done before in case
2065 for (i = 0; i < nb_output_files; i++) {
2066 av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
2069 /* dump the stream mapping */
2070 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
2071 for (i = 0; i < nb_input_streams; i++) {
2072 ist = input_streams[i];
2074 for (j = 0; j < ist->nb_filters; j++) {
2075 if (ist->filters[j]->graph->graph_desc) {
2076 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
2077 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
2078 ist->filters[j]->name);
2079 if (nb_filtergraphs > 1)
2080 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
2081 av_log(NULL, AV_LOG_INFO, "\n");
2086 for (i = 0; i < nb_output_streams; i++) {
2087 ost = output_streams[i];
2089 if (ost->attachment_filename) {
2090 /* an attached file */
2091 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
2092 ost->attachment_filename, ost->file_index, ost->index);
2096 if (ost->filter && ost->filter->graph->graph_desc) {
2097 /* output from a complex graph */
2098 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
2099 if (nb_filtergraphs > 1)
2100 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
2102 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
2103 ost->index, ost->enc ? ost->enc->name : "?");
2107 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
2108 input_streams[ost->source_index]->file_index,
2109 input_streams[ost->source_index]->st->index,
2112 if (ost->sync_ist != input_streams[ost->source_index])
2113 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
2114 ost->sync_ist->file_index,
2115 ost->sync_ist->st->index);
2116 if (ost->stream_copy)
2117 av_log(NULL, AV_LOG_INFO, " (copy)");
2119 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
2120 const AVCodec *out_codec = ost->enc;
2121 const char *decoder_name = "?";
2122 const char *in_codec_name = "?";
2123 const char *encoder_name = "?";
2124 const char *out_codec_name = "?";
2125 const AVCodecDescriptor *desc;
2128 decoder_name = in_codec->name;
2129 desc = avcodec_descriptor_get(in_codec->id);
2131 in_codec_name = desc->name;
2132 if (!strcmp(decoder_name, in_codec_name))
2133 decoder_name = "native";
2137 encoder_name = out_codec->name;
2138 desc = avcodec_descriptor_get(out_codec->id);
2140 out_codec_name = desc->name;
2141 if (!strcmp(encoder_name, out_codec_name))
2142 encoder_name = "native";
2145 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
2146 in_codec_name, decoder_name,
2147 out_codec_name, encoder_name);
2149 av_log(NULL, AV_LOG_INFO, "\n");
2153 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
2164 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
2165 static int need_output(void)
2169 for (i = 0; i < nb_output_streams; i++) {
2170 OutputStream *ost = output_streams[i];
2171 OutputFile *of = output_files[ost->file_index];
2172 AVFormatContext *os = output_files[ost->file_index]->ctx;
2174 if (ost->finished ||
2175 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
2177 if (ost->frame_number >= ost->max_frames) {
2179 for (j = 0; j < of->ctx->nb_streams; j++)
2180 output_streams[of->ost_index + j]->finished = 1;
2190 static InputFile *select_input_file(void)
2192 InputFile *ifile = NULL;
2193 int64_t ipts_min = INT64_MAX;
2196 for (i = 0; i < nb_input_streams; i++) {
2197 InputStream *ist = input_streams[i];
2198 int64_t ipts = ist->last_dts;
2200 if (ist->discard || input_files[ist->file_index]->eagain)
2202 if (!input_files[ist->file_index]->eof_reached) {
2203 if (ipts < ipts_min) {
2205 ifile = input_files[ist->file_index];
2214 static void *input_thread(void *arg)
2219 while (!transcoding_finished && ret >= 0) {
2221 ret = av_read_frame(f->ctx, &pkt);
2223 if (ret == AVERROR(EAGAIN)) {
2230 pthread_mutex_lock(&f->fifo_lock);
2231 while (!av_fifo_space(f->fifo))
2232 pthread_cond_wait(&f->fifo_cond, &f->fifo_lock);
2234 av_dup_packet(&pkt);
2235 av_fifo_generic_write(f->fifo, &pkt, sizeof(pkt), NULL);
2237 pthread_mutex_unlock(&f->fifo_lock);
2244 static void free_input_threads(void)
2248 if (nb_input_files == 1)
2251 transcoding_finished = 1;
2253 for (i = 0; i < nb_input_files; i++) {
2254 InputFile *f = input_files[i];
2257 if (!f->fifo || f->joined)
2260 pthread_mutex_lock(&f->fifo_lock);
2261 while (av_fifo_size(f->fifo)) {
2262 av_fifo_generic_read(f->fifo, &pkt, sizeof(pkt), NULL);
2263 av_free_packet(&pkt);
2265 pthread_cond_signal(&f->fifo_cond);
2266 pthread_mutex_unlock(&f->fifo_lock);
2268 pthread_join(f->thread, NULL);
2271 while (av_fifo_size(f->fifo)) {
2272 av_fifo_generic_read(f->fifo, &pkt, sizeof(pkt), NULL);
2273 av_free_packet(&pkt);
2275 av_fifo_free(f->fifo);
2279 static int init_input_threads(void)
2283 if (nb_input_files == 1)
2286 for (i = 0; i < nb_input_files; i++) {
2287 InputFile *f = input_files[i];
2289 if (!(f->fifo = av_fifo_alloc(8*sizeof(AVPacket))))
2290 return AVERROR(ENOMEM);
2292 pthread_mutex_init(&f->fifo_lock, NULL);
2293 pthread_cond_init (&f->fifo_cond, NULL);
2295 if ((ret = pthread_create(&f->thread, NULL, input_thread, f)))
2296 return AVERROR(ret);
2301 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
2305 pthread_mutex_lock(&f->fifo_lock);
2307 if (av_fifo_size(f->fifo)) {
2308 av_fifo_generic_read(f->fifo, pkt, sizeof(*pkt), NULL);
2309 pthread_cond_signal(&f->fifo_cond);
2314 ret = AVERROR(EAGAIN);
2317 pthread_mutex_unlock(&f->fifo_lock);
2323 static int get_input_packet(InputFile *f, AVPacket *pkt)
2327 for (i = 0; i < f->nb_streams; i++) {
2328 InputStream *ist = input_streams[f->ist_index + i];
2329 int64_t pts = av_rescale(ist->last_dts, 1000000, AV_TIME_BASE);
2330 int64_t now = av_gettime_relative() - ist->start;
2332 return AVERROR(EAGAIN);
2337 if (nb_input_files > 1)
2338 return get_input_packet_mt(f, pkt);
2340 return av_read_frame(f->ctx, pkt);
2343 static int got_eagain(void)
2346 for (i = 0; i < nb_input_files; i++)
2347 if (input_files[i]->eagain)
2352 static void reset_eagain(void)
2355 for (i = 0; i < nb_input_files; i++)
2356 input_files[i]->eagain = 0;
2360 * Read one packet from an input file and send it for
2361 * - decoding -> lavfi (audio/video)
2362 * - decoding -> encoding -> muxing (subtitles)
2363 * - muxing (streamcopy)
2366 * - 0 -- one packet was read and processed
2367 * - AVERROR(EAGAIN) -- no packets were available for selected file,
2368 * this function should be called again
2369 * - AVERROR_EOF -- this function should not be called again
2371 static int process_input(void)
2374 AVFormatContext *is;
2379 /* select the stream that we must read now */
2380 ifile = select_input_file();
2381 /* if none, if is finished */
2386 return AVERROR(EAGAIN);
2388 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from.\n");
2393 ret = get_input_packet(ifile, &pkt);
2395 if (ret == AVERROR(EAGAIN)) {
2400 if (ret != AVERROR_EOF) {
2401 print_error(is->filename, ret);
2405 ifile->eof_reached = 1;
2407 for (i = 0; i < ifile->nb_streams; i++) {
2408 ist = input_streams[ifile->ist_index + i];
2409 if (ist->decoding_needed)
2410 process_input_packet(ist, NULL);
2412 /* mark all outputs that don't go through lavfi as finished */
2413 for (j = 0; j < nb_output_streams; j++) {
2414 OutputStream *ost = output_streams[j];
2416 if (ost->source_index == ifile->ist_index + i &&
2417 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
2418 finish_output_stream(ost);
2422 return AVERROR(EAGAIN);
2428 av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,
2429 is->streams[pkt.stream_index]);
2431 /* the following test is needed in case new streams appear
2432 dynamically in stream : we ignore them */
2433 if (pkt.stream_index >= ifile->nb_streams)
2434 goto discard_packet;
2436 ist = input_streams[ifile->ist_index + pkt.stream_index];
2438 ist->data_size += pkt.size;
2442 goto discard_packet;
2444 /* add the stream-global side data to the first packet */
2445 if (ist->nb_packets == 1)
2446 for (i = 0; i < ist->st->nb_side_data; i++) {
2447 AVPacketSideData *src_sd = &ist->st->side_data[i];
2450 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
2452 if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
2455 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
2459 memcpy(dst_data, src_sd->data, src_sd->size);
2462 if (pkt.dts != AV_NOPTS_VALUE)
2463 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
2464 if (pkt.pts != AV_NOPTS_VALUE)
2465 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
2467 if (pkt.pts != AV_NOPTS_VALUE)
2468 pkt.pts *= ist->ts_scale;
2469 if (pkt.dts != AV_NOPTS_VALUE)
2470 pkt.dts *= ist->ts_scale;
2472 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
2473 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
2474 pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
2475 (is->iformat->flags & AVFMT_TS_DISCONT)) {
2476 int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
2477 int64_t delta = pkt_dts - ist->next_dts;
2479 if ((FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE || pkt_dts + 1 < ist->last_dts) && !copy_ts) {
2480 ifile->ts_offset -= delta;
2481 av_log(NULL, AV_LOG_DEBUG,
2482 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
2483 delta, ifile->ts_offset);
2484 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
2485 if (pkt.pts != AV_NOPTS_VALUE)
2486 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
2490 process_input_packet(ist, &pkt);
2493 av_free_packet(&pkt);
2499 * The following code is the main loop of the file converter
2501 static int transcode(void)
2503 int ret, i, need_input = 1;
2504 AVFormatContext *os;
2507 int64_t timer_start;
2509 ret = transcode_init();
2513 av_log(NULL, AV_LOG_INFO, "Press ctrl-c to stop encoding\n");
2516 timer_start = av_gettime_relative();
2519 if ((ret = init_input_threads()) < 0)
2523 while (!received_sigterm) {
2524 /* check if there's any stream where output is still needed */
2525 if (!need_output()) {
2526 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
2530 /* read and process one input packet if needed */
2532 ret = process_input();
2533 if (ret == AVERROR_EOF)
2537 ret = poll_filters();
2539 if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN)) {
2543 av_strerror(ret, errbuf, sizeof(errbuf));
2545 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
2550 /* dump report by using the output first video and audio streams */
2551 print_report(0, timer_start);
2554 free_input_threads();
2557 /* at the end of stream, we must flush the decoder buffers */
2558 for (i = 0; i < nb_input_streams; i++) {
2559 ist = input_streams[i];
2560 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
2561 process_input_packet(ist, NULL);
2569 /* write the trailer if needed and close file */
2570 for (i = 0; i < nb_output_files; i++) {
2571 os = output_files[i]->ctx;
2572 av_write_trailer(os);
2575 /* dump report by using the first video and audio streams */
2576 print_report(1, timer_start);
2578 /* close each encoder */
2579 for (i = 0; i < nb_output_streams; i++) {
2580 ost = output_streams[i];
2581 if (ost->encoding_needed) {
2582 av_freep(&ost->enc_ctx->stats_in);
2586 /* close each decoder */
2587 for (i = 0; i < nb_input_streams; i++) {
2588 ist = input_streams[i];
2589 if (ist->decoding_needed) {
2590 avcodec_close(ist->dec_ctx);
2591 if (ist->hwaccel_uninit)
2592 ist->hwaccel_uninit(ist->dec_ctx);
2601 free_input_threads();
2604 if (output_streams) {
2605 for (i = 0; i < nb_output_streams; i++) {
2606 ost = output_streams[i];
2609 fclose(ost->logfile);
2610 ost->logfile = NULL;
2612 av_free(ost->forced_kf_pts);
2613 av_dict_free(&ost->encoder_opts);
2614 av_dict_free(&ost->resample_opts);
2621 static int64_t getutime(void)
2624 struct rusage rusage;
2626 getrusage(RUSAGE_SELF, &rusage);
2627 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
2628 #elif HAVE_GETPROCESSTIMES
2630 FILETIME c, e, k, u;
2631 proc = GetCurrentProcess();
2632 GetProcessTimes(proc, &c, &e, &k, &u);
2633 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
2635 return av_gettime_relative();
2639 static int64_t getmaxrss(void)
2641 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
2642 struct rusage rusage;
2643 getrusage(RUSAGE_SELF, &rusage);
2644 return (int64_t)rusage.ru_maxrss * 1024;
2645 #elif HAVE_GETPROCESSMEMORYINFO
2647 PROCESS_MEMORY_COUNTERS memcounters;
2648 proc = GetCurrentProcess();
2649 memcounters.cb = sizeof(memcounters);
2650 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
2651 return memcounters.PeakPagefileUsage;
2657 int main(int argc, char **argv)
2662 register_exit(avconv_cleanup);
2664 av_log_set_flags(AV_LOG_SKIP_REPEATED);
2665 parse_loglevel(argc, argv, options);
2667 avcodec_register_all();
2669 avdevice_register_all();
2671 avfilter_register_all();
2673 avformat_network_init();
2677 /* parse options and open all input/output files */
2678 ret = avconv_parse_options(argc, argv);
2682 if (nb_output_files <= 0 && nb_input_files == 0) {
2684 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
2688 /* file converter / grab */
2689 if (nb_output_files <= 0) {
2690 fprintf(stderr, "At least one output file must be specified\n");
2695 if (transcode() < 0)
2697 ti = getutime() - ti;
2699 int maxrss = getmaxrss() / 1024;
2700 printf("bench: utime=%0.3fs maxrss=%ikB\n", ti / 1000000.0, maxrss);