3 * Copyright (c) 2000-2011 The libav developers.
5 * This file is part of Libav.
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
32 #include "libavformat/avformat.h"
33 #include "libavdevice/avdevice.h"
34 #include "libswscale/swscale.h"
35 #include "libavresample/avresample.h"
36 #include "libavutil/opt.h"
37 #include "libavutil/channel_layout.h"
38 #include "libavutil/parseutils.h"
39 #include "libavutil/samplefmt.h"
40 #include "libavutil/fifo.h"
41 #include "libavutil/intreadwrite.h"
42 #include "libavutil/dict.h"
43 #include "libavutil/mathematics.h"
44 #include "libavutil/pixdesc.h"
45 #include "libavutil/avstring.h"
46 #include "libavutil/libm.h"
47 #include "libavutil/imgutils.h"
48 #include "libavutil/time.h"
49 #include "libavformat/os_support.h"
51 # include "libavfilter/avfilter.h"
52 # include "libavfilter/buffersrc.h"
53 # include "libavfilter/buffersink.h"
55 #if HAVE_SYS_RESOURCE_H
57 #include <sys/types.h>
58 #include <sys/resource.h>
59 #elif HAVE_GETPROCESSTIMES
62 #if HAVE_GETPROCESSMEMORYINFO
68 #include <sys/select.h>
80 #include "libavutil/avassert.h"
82 const char program_name[] = "avconv";
83 const int program_birth_year = 2000;
85 static FILE *vstats_file;
87 static int nb_frames_drop = 0;
92 /* signal to input threads that they should exit; set by the main thread */
93 static int transcoding_finished;
96 #define DEFAULT_PASS_LOGFILENAME_PREFIX "av2pass"
98 InputStream **input_streams = NULL;
99 int nb_input_streams = 0;
100 InputFile **input_files = NULL;
101 int nb_input_files = 0;
103 OutputStream **output_streams = NULL;
104 int nb_output_streams = 0;
105 OutputFile **output_files = NULL;
106 int nb_output_files = 0;
108 FilterGraph **filtergraphs;
111 static void term_exit(void)
113 av_log(NULL, AV_LOG_QUIET, "");
116 static volatile int received_sigterm = 0;
117 static volatile int received_nb_signals = 0;
120 sigterm_handler(int sig)
122 received_sigterm = sig;
123 received_nb_signals++;
127 static void term_init(void)
129 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
130 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
132 signal(SIGXCPU, sigterm_handler);
136 static int decode_interrupt_cb(void *ctx)
138 return received_nb_signals > 1;
141 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
143 static void avconv_cleanup(int ret)
147 for (i = 0; i < nb_filtergraphs; i++) {
148 FilterGraph *fg = filtergraphs[i];
149 avfilter_graph_free(&fg->graph);
150 for (j = 0; j < fg->nb_inputs; j++) {
151 av_freep(&fg->inputs[j]->name);
152 av_freep(&fg->inputs[j]);
154 av_freep(&fg->inputs);
155 for (j = 0; j < fg->nb_outputs; j++) {
156 av_freep(&fg->outputs[j]->name);
157 av_freep(&fg->outputs[j]);
159 av_freep(&fg->outputs);
160 av_freep(&fg->graph_desc);
162 av_freep(&filtergraphs[i]);
164 av_freep(&filtergraphs);
167 for (i = 0; i < nb_output_files; i++) {
168 OutputFile *of = output_files[i];
169 AVFormatContext *s = of->ctx;
170 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE) && s->pb)
172 avformat_free_context(s);
173 av_dict_free(&of->opts);
175 av_freep(&output_files[i]);
177 for (i = 0; i < nb_output_streams; i++) {
178 OutputStream *ost = output_streams[i];
179 AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
181 AVBitStreamFilterContext *next = bsfc->next;
182 av_bitstream_filter_close(bsfc);
185 ost->bitstream_filters = NULL;
186 av_frame_free(&ost->filtered_frame);
188 av_parser_close(ost->parser);
190 av_freep(&ost->forced_keyframes);
191 av_freep(&ost->avfilter);
192 av_freep(&ost->logfile_prefix);
194 avcodec_free_context(&ost->enc_ctx);
196 av_freep(&output_streams[i]);
198 for (i = 0; i < nb_input_files; i++) {
199 avformat_close_input(&input_files[i]->ctx);
200 av_freep(&input_files[i]);
202 for (i = 0; i < nb_input_streams; i++) {
203 InputStream *ist = input_streams[i];
205 av_frame_free(&ist->decoded_frame);
206 av_frame_free(&ist->filter_frame);
207 av_dict_free(&ist->decoder_opts);
208 av_freep(&ist->filters);
209 av_freep(&ist->hwaccel_device);
211 avcodec_free_context(&ist->dec_ctx);
213 av_freep(&input_streams[i]);
218 av_free(vstats_filename);
220 av_freep(&input_streams);
221 av_freep(&input_files);
222 av_freep(&output_streams);
223 av_freep(&output_files);
227 avformat_network_deinit();
229 if (received_sigterm) {
230 av_log(NULL, AV_LOG_INFO, "Received signal %d: terminating.\n",
231 (int) received_sigterm);
236 void assert_avoptions(AVDictionary *m)
238 AVDictionaryEntry *t;
239 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
240 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
245 static void abort_codec_experimental(AVCodec *c, int encoder)
247 const char *codec_string = encoder ? "encoder" : "decoder";
249 av_log(NULL, AV_LOG_FATAL, "%s '%s' is experimental and might produce bad "
250 "results.\nAdd '-strict experimental' if you want to use it.\n",
251 codec_string, c->name);
252 codec = encoder ? avcodec_find_encoder(c->id) : avcodec_find_decoder(c->id);
253 if (!(codec->capabilities & CODEC_CAP_EXPERIMENTAL))
254 av_log(NULL, AV_LOG_FATAL, "Or use the non experimental %s '%s'.\n",
255 codec_string, codec->name);
260 * Update the requested input sample format based on the output sample format.
261 * This is currently only used to request float output from decoders which
262 * support multiple sample formats, one of which is AV_SAMPLE_FMT_FLT.
263 * Ideally this will be removed in the future when decoders do not do format
264 * conversion and only output in their native format.
266 static void update_sample_fmt(AVCodecContext *dec, AVCodec *dec_codec,
269 /* if sample formats match or a decoder sample format has already been
270 requested, just return */
271 if (enc->sample_fmt == dec->sample_fmt ||
272 dec->request_sample_fmt > AV_SAMPLE_FMT_NONE)
275 /* if decoder supports more than one output format */
276 if (dec_codec && dec_codec->sample_fmts &&
277 dec_codec->sample_fmts[0] != AV_SAMPLE_FMT_NONE &&
278 dec_codec->sample_fmts[1] != AV_SAMPLE_FMT_NONE) {
279 const enum AVSampleFormat *p;
280 int min_dec = INT_MAX, min_inc = INT_MAX;
281 enum AVSampleFormat dec_fmt = AV_SAMPLE_FMT_NONE;
282 enum AVSampleFormat inc_fmt = AV_SAMPLE_FMT_NONE;
284 /* find a matching sample format in the encoder */
285 for (p = dec_codec->sample_fmts; *p != AV_SAMPLE_FMT_NONE; p++) {
286 if (*p == enc->sample_fmt) {
287 dec->request_sample_fmt = *p;
290 enum AVSampleFormat dfmt = av_get_packed_sample_fmt(*p);
291 enum AVSampleFormat efmt = av_get_packed_sample_fmt(enc->sample_fmt);
292 int fmt_diff = 32 * abs(dfmt - efmt);
293 if (av_sample_fmt_is_planar(*p) !=
294 av_sample_fmt_is_planar(enc->sample_fmt))
299 } else if (dfmt > efmt) {
300 if (fmt_diff < min_inc) {
305 if (fmt_diff < min_dec) {
313 /* if none match, provide the one that matches quality closest */
314 dec->request_sample_fmt = min_inc != INT_MAX ? inc_fmt : dec_fmt;
318 static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
320 AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
321 AVCodecContext *avctx = ost->enc_ctx;
325 * Audio encoders may split the packets -- #frames in != #packets out.
326 * But there is no reordering, so we can limit the number of output packets
327 * by simply dropping them here.
328 * Counting encoded video frames needs to be done separately because of
329 * reordering, see do_video_out()
331 if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
332 if (ost->frame_number >= ost->max_frames) {
340 AVPacket new_pkt = *pkt;
341 int a = av_bitstream_filter_filter(bsfc, avctx, NULL,
342 &new_pkt.data, &new_pkt.size,
343 pkt->data, pkt->size,
344 pkt->flags & AV_PKT_FLAG_KEY);
347 new_pkt.buf = av_buffer_create(new_pkt.data, new_pkt.size,
348 av_buffer_default_free, NULL, 0);
352 av_log(NULL, AV_LOG_ERROR, "%s failed for stream %d, codec %s",
353 bsfc->filter->name, pkt->stream_index,
354 avctx->codec ? avctx->codec->name : "copy");
364 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS) &&
365 ost->last_mux_dts != AV_NOPTS_VALUE &&
366 pkt->dts < ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT)) {
367 av_log(NULL, AV_LOG_WARNING, "Non-monotonous DTS in output stream "
368 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
369 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
371 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
374 av_log(NULL, AV_LOG_WARNING, "changing to %"PRId64". This may result "
375 "in incorrect timestamps in the output file.\n",
376 ost->last_mux_dts + 1);
377 pkt->dts = ost->last_mux_dts + 1;
378 if (pkt->pts != AV_NOPTS_VALUE)
379 pkt->pts = FFMAX(pkt->pts, pkt->dts);
381 ost->last_mux_dts = pkt->dts;
383 ost->data_size += pkt->size;
384 ost->packets_written++;
386 pkt->stream_index = ost->index;
387 ret = av_interleaved_write_frame(s, pkt);
389 print_error("av_interleaved_write_frame()", ret);
394 static int check_recording_time(OutputStream *ost)
396 OutputFile *of = output_files[ost->file_index];
398 if (of->recording_time != INT64_MAX &&
399 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
400 AV_TIME_BASE_Q) >= 0) {
407 static void do_audio_out(AVFormatContext *s, OutputStream *ost,
410 AVCodecContext *enc = ost->enc_ctx;
414 av_init_packet(&pkt);
418 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
419 frame->pts = ost->sync_opts;
420 ost->sync_opts = frame->pts + frame->nb_samples;
422 ost->samples_encoded += frame->nb_samples;
423 ost->frames_encoded++;
425 if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
426 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
431 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
432 write_frame(s, &pkt, ost);
436 static void do_subtitle_out(AVFormatContext *s,
442 static uint8_t *subtitle_out = NULL;
443 int subtitle_out_max_size = 1024 * 1024;
444 int subtitle_out_size, nb, i;
448 if (pts == AV_NOPTS_VALUE) {
449 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
458 subtitle_out = av_malloc(subtitle_out_max_size);
461 /* Note: DVB subtitle need one packet to draw them and one other
462 packet to clear them */
463 /* XXX: signal it in the codec context ? */
464 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
469 for (i = 0; i < nb; i++) {
470 ost->sync_opts = av_rescale_q(pts, ist->st->time_base, enc->time_base);
471 if (!check_recording_time(ost))
474 sub->pts = av_rescale_q(pts, ist->st->time_base, AV_TIME_BASE_Q);
475 // start_display_time is required to be 0
476 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
477 sub->end_display_time -= sub->start_display_time;
478 sub->start_display_time = 0;
480 ost->frames_encoded++;
482 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
483 subtitle_out_max_size, sub);
484 if (subtitle_out_size < 0) {
485 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
489 av_init_packet(&pkt);
490 pkt.data = subtitle_out;
491 pkt.size = subtitle_out_size;
492 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
493 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
494 /* XXX: the pts correction is handled here. Maybe handling
495 it in the codec would be better */
497 pkt.pts += 90 * sub->start_display_time;
499 pkt.pts += 90 * sub->end_display_time;
501 write_frame(s, &pkt, ost);
505 static void do_video_out(AVFormatContext *s,
510 int ret, format_video_sync;
512 AVCodecContext *enc = ost->enc_ctx;
516 format_video_sync = video_sync_method;
517 if (format_video_sync == VSYNC_AUTO)
518 format_video_sync = (s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH :
519 (s->oformat->flags & AVFMT_VARIABLE_FPS) ? VSYNC_VFR : VSYNC_CFR;
520 if (format_video_sync != VSYNC_PASSTHROUGH &&
522 in_picture->pts != AV_NOPTS_VALUE &&
523 in_picture->pts < ost->sync_opts) {
525 av_log(NULL, AV_LOG_WARNING,
526 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
527 ost->frame_number, ost->st->index, in_picture->pts);
531 if (in_picture->pts == AV_NOPTS_VALUE)
532 in_picture->pts = ost->sync_opts;
533 ost->sync_opts = in_picture->pts;
536 if (!ost->frame_number)
537 ost->first_pts = in_picture->pts;
539 av_init_packet(&pkt);
543 if (ost->frame_number >= ost->max_frames)
546 if (s->oformat->flags & AVFMT_RAWPICTURE &&
547 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
548 /* raw pictures are written as AVPicture structure to
549 avoid any copies. We support temporarily the older
551 enc->coded_frame->interlaced_frame = in_picture->interlaced_frame;
552 enc->coded_frame->top_field_first = in_picture->top_field_first;
553 pkt.data = (uint8_t *)in_picture;
554 pkt.size = sizeof(AVPicture);
555 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
556 pkt.flags |= AV_PKT_FLAG_KEY;
558 write_frame(s, &pkt, ost);
562 if (enc->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME) &&
563 ost->top_field_first >= 0)
564 in_picture->top_field_first = !!ost->top_field_first;
566 in_picture->quality = enc->global_quality;
567 if (!enc->me_threshold)
568 in_picture->pict_type = 0;
569 if (ost->forced_kf_index < ost->forced_kf_count &&
570 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
571 in_picture->pict_type = AV_PICTURE_TYPE_I;
572 ost->forced_kf_index++;
575 ost->frames_encoded++;
577 ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
579 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
584 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
585 write_frame(s, &pkt, ost);
586 *frame_size = pkt.size;
588 /* if two pass, output log */
589 if (ost->logfile && enc->stats_out) {
590 fprintf(ost->logfile, "%s", enc->stats_out);
596 * For video, number of frames in == number of packets out.
597 * But there may be reordering, so we can't throw away frames on encoder
598 * flush, we need to limit them here, before they go into encoder.
603 static double psnr(double d)
605 return -10.0 * log(d) / log(10.0);
608 static void do_video_stats(OutputStream *ost, int frame_size)
612 double ti1, bitrate, avg_bitrate;
614 /* this is executed just the first time do_video_stats is called */
616 vstats_file = fopen(vstats_filename, "w");
624 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
625 frame_number = ost->frame_number;
626 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality / (float)FF_QP2LAMBDA);
627 if (enc->flags&CODEC_FLAG_PSNR)
628 fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
630 fprintf(vstats_file,"f_size= %6d ", frame_size);
631 /* compute pts value */
632 ti1 = ost->sync_opts * av_q2d(enc->time_base);
636 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
637 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
638 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
639 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
640 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(enc->coded_frame->pict_type));
645 * Read one frame for lavfi output for ost and encode it.
647 static int poll_filter(OutputStream *ost)
649 OutputFile *of = output_files[ost->file_index];
650 AVFrame *filtered_frame = NULL;
653 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
654 return AVERROR(ENOMEM);
656 filtered_frame = ost->filtered_frame;
658 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
659 !(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))
660 ret = av_buffersink_get_samples(ost->filter->filter, filtered_frame,
661 ost->enc_ctx->frame_size);
663 ret = av_buffersink_get_frame(ost->filter->filter, filtered_frame);
668 if (filtered_frame->pts != AV_NOPTS_VALUE) {
669 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
670 filtered_frame->pts = av_rescale_q(filtered_frame->pts,
671 ost->filter->filter->inputs[0]->time_base,
672 ost->enc_ctx->time_base) -
673 av_rescale_q(start_time,
675 ost->enc_ctx->time_base);
678 switch (ost->filter->filter->inputs[0]->type) {
679 case AVMEDIA_TYPE_VIDEO:
680 if (!ost->frame_aspect_ratio)
681 ost->enc_ctx->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
683 do_video_out(of->ctx, ost, filtered_frame, &frame_size);
684 if (vstats_filename && frame_size)
685 do_video_stats(ost, frame_size);
687 case AVMEDIA_TYPE_AUDIO:
688 do_audio_out(of->ctx, ost, filtered_frame);
691 // TODO support subtitle filters
695 av_frame_unref(filtered_frame);
700 static void finish_output_stream(OutputStream *ost)
702 OutputFile *of = output_files[ost->file_index];
708 for (i = 0; i < of->ctx->nb_streams; i++)
709 output_streams[of->ost_index + i]->finished = 1;
714 * Read as many frames from possible from lavfi and encode them.
716 * Always read from the active stream with the lowest timestamp. If no frames
717 * are available for it then return EAGAIN and wait for more input. This way we
718 * can use lavfi sources that generate unlimited amount of frames without memory
721 static int poll_filters(void)
725 while (ret >= 0 && !received_sigterm) {
726 OutputStream *ost = NULL;
727 int64_t min_pts = INT64_MAX;
729 /* choose output stream with the lowest timestamp */
730 for (i = 0; i < nb_output_streams; i++) {
731 int64_t pts = output_streams[i]->sync_opts;
733 if (!output_streams[i]->filter || output_streams[i]->finished)
736 pts = av_rescale_q(pts, output_streams[i]->enc_ctx->time_base,
740 ost = output_streams[i];
747 ret = poll_filter(ost);
749 if (ret == AVERROR_EOF) {
750 finish_output_stream(ost);
752 } else if (ret == AVERROR(EAGAIN))
759 static void print_final_stats(int64_t total_size)
761 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
762 uint64_t data_size = 0;
763 float percent = -1.0;
766 for (i = 0; i < nb_output_streams; i++) {
767 OutputStream *ost = output_streams[i];
768 switch (ost->enc_ctx->codec_type) {
769 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
770 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
771 default: other_size += ost->data_size; break;
773 extra_size += ost->enc_ctx->extradata_size;
774 data_size += ost->data_size;
777 if (data_size && total_size >= data_size)
778 percent = 100.0 * (total_size - data_size) / data_size;
780 av_log(NULL, AV_LOG_INFO, "\n");
781 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
785 extra_size / 1024.0);
787 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
789 av_log(NULL, AV_LOG_INFO, "unknown");
790 av_log(NULL, AV_LOG_INFO, "\n");
792 /* print verbose per-stream stats */
793 for (i = 0; i < nb_input_files; i++) {
794 InputFile *f = input_files[i];
795 uint64_t total_packets = 0, total_size = 0;
797 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
798 i, f->ctx->filename);
800 for (j = 0; j < f->nb_streams; j++) {
801 InputStream *ist = input_streams[f->ist_index + j];
802 enum AVMediaType type = ist->dec_ctx->codec_type;
804 total_size += ist->data_size;
805 total_packets += ist->nb_packets;
807 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
808 i, j, media_type_string(type));
809 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
810 ist->nb_packets, ist->data_size);
812 if (ist->decoding_needed) {
813 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
814 ist->frames_decoded);
815 if (type == AVMEDIA_TYPE_AUDIO)
816 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
817 av_log(NULL, AV_LOG_VERBOSE, "; ");
820 av_log(NULL, AV_LOG_VERBOSE, "\n");
823 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
824 total_packets, total_size);
827 for (i = 0; i < nb_output_files; i++) {
828 OutputFile *of = output_files[i];
829 uint64_t total_packets = 0, total_size = 0;
831 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
832 i, of->ctx->filename);
834 for (j = 0; j < of->ctx->nb_streams; j++) {
835 OutputStream *ost = output_streams[of->ost_index + j];
836 enum AVMediaType type = ost->enc_ctx->codec_type;
838 total_size += ost->data_size;
839 total_packets += ost->packets_written;
841 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
842 i, j, media_type_string(type));
843 if (ost->encoding_needed) {
844 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
845 ost->frames_encoded);
846 if (type == AVMEDIA_TYPE_AUDIO)
847 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
848 av_log(NULL, AV_LOG_VERBOSE, "; ");
851 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
852 ost->packets_written, ost->data_size);
854 av_log(NULL, AV_LOG_VERBOSE, "\n");
857 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
858 total_packets, total_size);
862 static void print_report(int is_last_report, int64_t timer_start)
869 int frame_number, vid, i;
870 double bitrate, ti1, pts;
871 static int64_t last_time = -1;
872 static int qp_histogram[52];
874 if (!print_stats && !is_last_report)
877 if (!is_last_report) {
879 /* display the report every 0.5 seconds */
880 cur_time = av_gettime();
881 if (last_time == -1) {
882 last_time = cur_time;
885 if ((cur_time - last_time) < 500000)
887 last_time = cur_time;
891 oc = output_files[0]->ctx;
893 total_size = avio_size(oc->pb);
894 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
895 total_size = avio_tell(oc->pb);
896 if (total_size < 0) {
898 av_strerror(total_size, errbuf, sizeof(errbuf));
899 av_log(NULL, AV_LOG_VERBOSE, "Bitrate not available, "
900 "avio_tell() failed: %s\n", errbuf);
907 for (i = 0; i < nb_output_streams; i++) {
909 ost = output_streams[i];
911 if (!ost->stream_copy && enc->coded_frame)
912 q = enc->coded_frame->quality / (float)FF_QP2LAMBDA;
913 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
914 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
916 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
917 float t = (av_gettime() - timer_start) / 1000000.0;
919 frame_number = ost->frame_number;
920 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3d q=%3.1f ",
921 frame_number, (t > 1) ? (int)(frame_number / t + 0.5) : 0, q);
923 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
927 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
929 for (j = 0; j < 32; j++)
930 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log2(qp_histogram[j] + 1)));
932 if (enc->flags&CODEC_FLAG_PSNR) {
934 double error, error_sum = 0;
935 double scale, scale_sum = 0;
936 char type[3] = { 'Y','U','V' };
937 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
938 for (j = 0; j < 3; j++) {
939 if (is_last_report) {
940 error = enc->error[j];
941 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
943 error = enc->coded_frame->error[j];
944 scale = enc->width * enc->height * 255.0 * 255.0;
950 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], psnr(error / scale));
952 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
956 /* compute min output value */
957 pts = (double)ost->last_mux_dts * av_q2d(ost->st->time_base);
958 if ((pts < ti1) && (pts > 0))
964 bitrate = (double)(total_size * 8) / ti1 / 1000.0;
966 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
967 "size=%8.0fkB time=%0.2f bitrate=%6.1fkbits/s",
968 (double)total_size / 1024, ti1, bitrate);
971 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " drop=%d",
974 av_log(NULL, AV_LOG_INFO, "%s \r", buf);
979 print_final_stats(total_size);
983 static void flush_encoders(void)
987 for (i = 0; i < nb_output_streams; i++) {
988 OutputStream *ost = output_streams[i];
989 AVCodecContext *enc = ost->enc_ctx;
990 AVFormatContext *os = output_files[ost->file_index]->ctx;
991 int stop_encoding = 0;
993 if (!ost->encoding_needed)
996 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
998 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1002 int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1005 switch (enc->codec_type) {
1006 case AVMEDIA_TYPE_AUDIO:
1007 encode = avcodec_encode_audio2;
1010 case AVMEDIA_TYPE_VIDEO:
1011 encode = avcodec_encode_video2;
1021 av_init_packet(&pkt);
1025 ret = encode(enc, &pkt, NULL, &got_packet);
1027 av_log(NULL, AV_LOG_FATAL, "%s encoding failed\n", desc);
1030 if (ost->logfile && enc->stats_out) {
1031 fprintf(ost->logfile, "%s", enc->stats_out);
1037 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1038 write_frame(os, &pkt, ost);
1048 * Check whether a packet from ist should be written into ost at this time
1050 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1052 OutputFile *of = output_files[ost->file_index];
1053 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1055 if (ost->source_index != ist_index)
1058 if (of->start_time != AV_NOPTS_VALUE && ist->last_dts < of->start_time)
1064 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1066 OutputFile *of = output_files[ost->file_index];
1067 InputFile *f = input_files [ist->file_index];
1068 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1069 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1072 av_init_packet(&opkt);
1074 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1075 !ost->copy_initial_nonkeyframes)
1078 if (of->recording_time != INT64_MAX &&
1079 ist->last_dts >= of->recording_time + start_time) {
1084 if (f->recording_time != INT64_MAX) {
1085 start_time = f->ctx->start_time;
1086 if (f->start_time != AV_NOPTS_VALUE)
1087 start_time += f->start_time;
1088 if (ist->last_dts >= f->recording_time + start_time) {
1094 /* force the input stream PTS */
1095 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1098 if (pkt->pts != AV_NOPTS_VALUE)
1099 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1101 opkt.pts = AV_NOPTS_VALUE;
1103 if (pkt->dts == AV_NOPTS_VALUE)
1104 opkt.dts = av_rescale_q(ist->last_dts, AV_TIME_BASE_Q, ost->st->time_base);
1106 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1107 opkt.dts -= ost_tb_start_time;
1109 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1110 opkt.flags = pkt->flags;
1112 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1113 if ( ost->enc_ctx->codec_id != AV_CODEC_ID_H264
1114 && ost->enc_ctx->codec_id != AV_CODEC_ID_MPEG1VIDEO
1115 && ost->enc_ctx->codec_id != AV_CODEC_ID_MPEG2VIDEO
1116 && ost->enc_ctx->codec_id != AV_CODEC_ID_VC1
1118 if (av_parser_change(ost->parser, ost->st->codec,
1119 &opkt.data, &opkt.size,
1120 pkt->data, pkt->size,
1121 pkt->flags & AV_PKT_FLAG_KEY)) {
1122 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1127 opkt.data = pkt->data;
1128 opkt.size = pkt->size;
1131 write_frame(of->ctx, &opkt, ost);
1134 int guess_input_channel_layout(InputStream *ist)
1136 AVCodecContext *dec = ist->dec_ctx;
1138 if (!dec->channel_layout) {
1139 char layout_name[256];
1141 dec->channel_layout = av_get_default_channel_layout(dec->channels);
1142 if (!dec->channel_layout)
1144 av_get_channel_layout_string(layout_name, sizeof(layout_name),
1145 dec->channels, dec->channel_layout);
1146 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1147 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1152 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1154 AVFrame *decoded_frame, *f;
1155 AVCodecContext *avctx = ist->dec_ctx;
1156 int i, ret, err = 0, resample_changed;
1158 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1159 return AVERROR(ENOMEM);
1160 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1161 return AVERROR(ENOMEM);
1162 decoded_frame = ist->decoded_frame;
1164 ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1165 if (!*got_output || ret < 0) {
1167 for (i = 0; i < ist->nb_filters; i++)
1168 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
1173 ist->samples_decoded += decoded_frame->nb_samples;
1174 ist->frames_decoded++;
1176 /* if the decoder provides a pts, use it instead of the last packet pts.
1177 the decoder could be delaying output by a packet or more. */
1178 if (decoded_frame->pts != AV_NOPTS_VALUE)
1179 ist->next_dts = decoded_frame->pts;
1180 else if (pkt->pts != AV_NOPTS_VALUE)
1181 decoded_frame->pts = pkt->pts;
1182 pkt->pts = AV_NOPTS_VALUE;
1184 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
1185 ist->resample_channels != avctx->channels ||
1186 ist->resample_channel_layout != decoded_frame->channel_layout ||
1187 ist->resample_sample_rate != decoded_frame->sample_rate;
1188 if (resample_changed) {
1189 char layout1[64], layout2[64];
1191 if (!guess_input_channel_layout(ist)) {
1192 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1193 "layout for Input Stream #%d.%d\n", ist->file_index,
1197 decoded_frame->channel_layout = avctx->channel_layout;
1199 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
1200 ist->resample_channel_layout);
1201 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
1202 decoded_frame->channel_layout);
1204 av_log(NULL, AV_LOG_INFO,
1205 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
1206 ist->file_index, ist->st->index,
1207 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
1208 ist->resample_channels, layout1,
1209 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
1210 avctx->channels, layout2);
1212 ist->resample_sample_fmt = decoded_frame->format;
1213 ist->resample_sample_rate = decoded_frame->sample_rate;
1214 ist->resample_channel_layout = decoded_frame->channel_layout;
1215 ist->resample_channels = avctx->channels;
1217 for (i = 0; i < nb_filtergraphs; i++)
1218 if (ist_in_filtergraph(filtergraphs[i], ist) &&
1219 configure_filtergraph(filtergraphs[i]) < 0) {
1220 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
1225 if (decoded_frame->pts != AV_NOPTS_VALUE)
1226 decoded_frame->pts = av_rescale_q(decoded_frame->pts,
1228 (AVRational){1, avctx->sample_rate});
1229 for (i = 0; i < ist->nb_filters; i++) {
1230 if (i < ist->nb_filters - 1) {
1231 f = ist->filter_frame;
1232 err = av_frame_ref(f, decoded_frame);
1238 err = av_buffersrc_add_frame(ist->filters[i]->filter, f);
1243 av_frame_unref(ist->filter_frame);
1244 av_frame_unref(decoded_frame);
1245 return err < 0 ? err : ret;
1248 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
1250 AVFrame *decoded_frame, *f;
1251 int i, ret = 0, err = 0, resample_changed;
1253 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1254 return AVERROR(ENOMEM);
1255 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1256 return AVERROR(ENOMEM);
1257 decoded_frame = ist->decoded_frame;
1259 ret = avcodec_decode_video2(ist->dec_ctx,
1260 decoded_frame, got_output, pkt);
1261 if (!*got_output || ret < 0) {
1263 for (i = 0; i < ist->nb_filters; i++)
1264 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
1269 ist->frames_decoded++;
1271 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
1272 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
1276 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
1278 decoded_frame->pts = guess_correct_pts(&ist->pts_ctx, decoded_frame->pkt_pts,
1279 decoded_frame->pkt_dts);
1282 if (ist->st->sample_aspect_ratio.num)
1283 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
1285 resample_changed = ist->resample_width != decoded_frame->width ||
1286 ist->resample_height != decoded_frame->height ||
1287 ist->resample_pix_fmt != decoded_frame->format;
1288 if (resample_changed) {
1289 av_log(NULL, AV_LOG_INFO,
1290 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
1291 ist->file_index, ist->st->index,
1292 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
1293 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
1295 ret = poll_filters();
1296 if (ret < 0 && (ret != AVERROR_EOF && ret != AVERROR(EAGAIN)))
1297 av_log(NULL, AV_LOG_ERROR, "Error while filtering.\n");
1299 ist->resample_width = decoded_frame->width;
1300 ist->resample_height = decoded_frame->height;
1301 ist->resample_pix_fmt = decoded_frame->format;
1303 for (i = 0; i < nb_filtergraphs; i++)
1304 if (ist_in_filtergraph(filtergraphs[i], ist) &&
1305 configure_filtergraph(filtergraphs[i]) < 0) {
1306 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
1311 for (i = 0; i < ist->nb_filters; i++) {
1312 if (i < ist->nb_filters - 1) {
1313 f = ist->filter_frame;
1314 err = av_frame_ref(f, decoded_frame);
1320 err = av_buffersrc_add_frame(ist->filters[i]->filter, f);
1326 av_frame_unref(ist->filter_frame);
1327 av_frame_unref(decoded_frame);
1328 return err < 0 ? err : ret;
1331 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
1333 AVSubtitle subtitle;
1334 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
1335 &subtitle, got_output, pkt);
1341 ist->frames_decoded++;
1343 for (i = 0; i < nb_output_streams; i++) {
1344 OutputStream *ost = output_streams[i];
1346 if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
1349 do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle, pkt->pts);
1352 avsubtitle_free(&subtitle);
1356 /* pkt = NULL means EOF (needed to flush decoder buffers) */
1357 static int output_packet(InputStream *ist, const AVPacket *pkt)
1363 if (ist->next_dts == AV_NOPTS_VALUE)
1364 ist->next_dts = ist->last_dts;
1368 av_init_packet(&avpkt);
1376 if (pkt->dts != AV_NOPTS_VALUE)
1377 ist->next_dts = ist->last_dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
1379 // while we have more to decode or while the decoder did output something on EOF
1380 while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
1384 ist->last_dts = ist->next_dts;
1386 if (avpkt.size && avpkt.size != pkt->size &&
1387 !(ist->dec->capabilities & CODEC_CAP_SUBFRAMES)) {
1388 av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
1389 "Multiple frames in a packet from stream %d\n", pkt->stream_index);
1390 ist->showed_multi_packet_warning = 1;
1393 switch (ist->dec_ctx->codec_type) {
1394 case AVMEDIA_TYPE_AUDIO:
1395 ret = decode_audio (ist, &avpkt, &got_output);
1397 case AVMEDIA_TYPE_VIDEO:
1398 ret = decode_video (ist, &avpkt, &got_output);
1400 ist->next_dts += av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
1401 else if (ist->st->avg_frame_rate.num)
1402 ist->next_dts += av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate),
1404 else if (ist->dec_ctx->time_base.num != 0) {
1405 int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 :
1406 ist->dec_ctx->ticks_per_frame;
1407 ist->next_dts += av_rescale_q(ticks, ist->dec_ctx->time_base, AV_TIME_BASE_Q);
1410 case AVMEDIA_TYPE_SUBTITLE:
1411 ret = transcode_subtitles(ist, &avpkt, &got_output);
1419 // touch data and size only if not EOF
1429 /* handle stream copy */
1430 if (!ist->decoding_needed) {
1431 ist->last_dts = ist->next_dts;
1432 switch (ist->dec_ctx->codec_type) {
1433 case AVMEDIA_TYPE_AUDIO:
1434 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
1435 ist->dec_ctx->sample_rate;
1437 case AVMEDIA_TYPE_VIDEO:
1438 if (ist->dec_ctx->time_base.num != 0) {
1439 int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
1440 ist->next_dts += ((int64_t)AV_TIME_BASE *
1441 ist->dec_ctx->time_base.num * ticks) /
1442 ist->dec_ctx->time_base.den;
1447 for (i = 0; pkt && i < nb_output_streams; i++) {
1448 OutputStream *ost = output_streams[i];
1450 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
1453 do_streamcopy(ist, ost, pkt);
1459 static void print_sdp(void)
1463 AVFormatContext **avc = av_malloc(sizeof(*avc) * nb_output_files);
1467 for (i = 0; i < nb_output_files; i++)
1468 avc[i] = output_files[i]->ctx;
1470 av_sdp_create(avc, nb_output_files, sdp, sizeof(sdp));
1471 printf("SDP:\n%s\n", sdp);
1476 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
1479 for (i = 0; hwaccels[i].name; i++)
1480 if (hwaccels[i].pix_fmt == pix_fmt)
1481 return &hwaccels[i];
1485 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
1487 InputStream *ist = s->opaque;
1488 const enum AVPixelFormat *p;
1491 for (p = pix_fmts; *p != -1; p++) {
1492 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
1493 const HWAccel *hwaccel;
1495 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
1498 hwaccel = get_hwaccel(*p);
1500 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
1501 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
1504 ret = hwaccel->init(s);
1506 if (ist->hwaccel_id == hwaccel->id) {
1507 av_log(NULL, AV_LOG_FATAL,
1508 "%s hwaccel requested for input stream #%d:%d, "
1509 "but cannot be initialized.\n", hwaccel->name,
1510 ist->file_index, ist->st->index);
1515 ist->active_hwaccel_id = hwaccel->id;
1516 ist->hwaccel_pix_fmt = *p;
1523 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
1525 InputStream *ist = s->opaque;
1527 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
1528 return ist->hwaccel_get_buffer(s, frame, flags);
1530 return avcodec_default_get_buffer2(s, frame, flags);
1533 static int init_input_stream(int ist_index, char *error, int error_len)
1536 InputStream *ist = input_streams[ist_index];
1537 if (ist->decoding_needed) {
1538 AVCodec *codec = ist->dec;
1540 snprintf(error, error_len, "Decoder (codec id %d) not found for input stream #%d:%d",
1541 ist->dec_ctx->codec_id, ist->file_index, ist->st->index);
1542 return AVERROR(EINVAL);
1545 /* update requested sample format for the decoder based on the
1546 corresponding encoder sample format */
1547 for (i = 0; i < nb_output_streams; i++) {
1548 OutputStream *ost = output_streams[i];
1549 if (ost->source_index == ist_index) {
1550 update_sample_fmt(ist->dec_ctx, codec, ost->enc_ctx);
1555 ist->dec_ctx->opaque = ist;
1556 ist->dec_ctx->get_format = get_format;
1557 ist->dec_ctx->get_buffer2 = get_buffer;
1558 ist->dec_ctx->thread_safe_callbacks = 1;
1560 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
1562 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
1563 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
1564 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
1566 if (ret == AVERROR_EXPERIMENTAL)
1567 abort_codec_experimental(codec, 0);
1569 av_strerror(ret, errbuf, sizeof(errbuf));
1571 snprintf(error, error_len,
1572 "Error while opening decoder for input stream "
1574 ist->file_index, ist->st->index, errbuf);
1577 assert_avoptions(ist->decoder_opts);
1580 ist->last_dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
1581 ist->next_dts = AV_NOPTS_VALUE;
1582 init_pts_correction(&ist->pts_ctx);
1587 static InputStream *get_input_stream(OutputStream *ost)
1589 if (ost->source_index >= 0)
1590 return input_streams[ost->source_index];
1593 FilterGraph *fg = ost->filter->graph;
1596 for (i = 0; i < fg->nb_inputs; i++)
1597 if (fg->inputs[i]->ist->dec_ctx->codec_type == ost->enc_ctx->codec_type)
1598 return fg->inputs[i]->ist;
1604 static void parse_forced_key_frames(char *kf, OutputStream *ost,
1605 AVCodecContext *avctx)
1611 for (p = kf; *p; p++)
1614 ost->forced_kf_count = n;
1615 ost->forced_kf_pts = av_malloc(sizeof(*ost->forced_kf_pts) * n);
1616 if (!ost->forced_kf_pts) {
1617 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
1622 for (i = 0; i < n; i++) {
1623 char *next = strchr(p, ',');
1628 t = parse_time_or_die("force_key_frames", p, 1);
1629 ost->forced_kf_pts[i] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
1635 static void set_encoder_id(OutputFile *of, OutputStream *ost)
1637 AVDictionaryEntry *e;
1639 uint8_t *encoder_string;
1640 int encoder_string_len;
1641 int format_flags = 0;
1643 e = av_dict_get(of->opts, "fflags", NULL, 0);
1645 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
1648 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
1651 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
1652 encoder_string = av_mallocz(encoder_string_len);
1653 if (!encoder_string)
1656 if (!(format_flags & AVFMT_FLAG_BITEXACT))
1657 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
1658 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
1659 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
1660 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
1663 static int transcode_init(void)
1665 int ret = 0, i, j, k;
1666 AVFormatContext *oc;
1672 /* init framerate emulation */
1673 for (i = 0; i < nb_input_files; i++) {
1674 InputFile *ifile = input_files[i];
1675 if (ifile->rate_emu)
1676 for (j = 0; j < ifile->nb_streams; j++)
1677 input_streams[j + ifile->ist_index]->start = av_gettime();
1680 /* output stream init */
1681 for (i = 0; i < nb_output_files; i++) {
1682 oc = output_files[i]->ctx;
1683 if (!oc->nb_streams && !(oc->oformat->flags & AVFMT_NOSTREAMS)) {
1684 av_dump_format(oc, i, oc->filename, 1);
1685 av_log(NULL, AV_LOG_ERROR, "Output file #%d does not contain any stream\n", i);
1686 return AVERROR(EINVAL);
1690 /* init complex filtergraphs */
1691 for (i = 0; i < nb_filtergraphs; i++)
1692 if ((ret = avfilter_graph_config(filtergraphs[i]->graph, NULL)) < 0)
1695 /* for each output stream, we compute the right encoding parameters */
1696 for (i = 0; i < nb_output_streams; i++) {
1697 AVCodecContext *enc_ctx;
1698 AVCodecContext *dec_ctx = NULL;
1699 ost = output_streams[i];
1700 oc = output_files[ost->file_index]->ctx;
1701 ist = get_input_stream(ost);
1703 if (ost->attachment_filename)
1706 enc_ctx = ost->enc_ctx;
1709 dec_ctx = ist->dec_ctx;
1711 ost->st->disposition = ist->st->disposition;
1712 enc_ctx->bits_per_raw_sample = dec_ctx->bits_per_raw_sample;
1713 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
1716 if (ost->stream_copy) {
1718 uint64_t extra_size;
1720 av_assert0(ist && !ost->filter);
1722 extra_size = (uint64_t)dec_ctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE;
1724 if (extra_size > INT_MAX) {
1725 return AVERROR(EINVAL);
1728 /* if stream_copy is selected, no need to decode or encode */
1729 enc_ctx->codec_id = dec_ctx->codec_id;
1730 enc_ctx->codec_type = dec_ctx->codec_type;
1732 if (!enc_ctx->codec_tag) {
1733 if (!oc->oformat->codec_tag ||
1734 av_codec_get_id (oc->oformat->codec_tag, dec_ctx->codec_tag) == enc_ctx->codec_id ||
1735 av_codec_get_tag(oc->oformat->codec_tag, dec_ctx->codec_id) <= 0)
1736 enc_ctx->codec_tag = dec_ctx->codec_tag;
1739 enc_ctx->bit_rate = dec_ctx->bit_rate;
1740 enc_ctx->rc_max_rate = dec_ctx->rc_max_rate;
1741 enc_ctx->rc_buffer_size = dec_ctx->rc_buffer_size;
1742 enc_ctx->field_order = dec_ctx->field_order;
1743 enc_ctx->extradata = av_mallocz(extra_size);
1744 if (!enc_ctx->extradata) {
1745 return AVERROR(ENOMEM);
1747 memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
1748 enc_ctx->extradata_size = dec_ctx->extradata_size;
1750 enc_ctx->time_base = dec_ctx->time_base;
1751 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
1752 av_reduce(&enc_ctx->time_base.num, &enc_ctx->time_base.den,
1753 enc_ctx->time_base.num, enc_ctx->time_base.den, INT_MAX);
1755 enc_ctx->time_base = ist->st->time_base;
1757 ost->parser = av_parser_init(enc_ctx->codec_id);
1759 switch (enc_ctx->codec_type) {
1760 case AVMEDIA_TYPE_AUDIO:
1761 if (audio_volume != 256) {
1762 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
1765 enc_ctx->channel_layout = dec_ctx->channel_layout;
1766 enc_ctx->sample_rate = dec_ctx->sample_rate;
1767 enc_ctx->channels = dec_ctx->channels;
1768 enc_ctx->frame_size = dec_ctx->frame_size;
1769 enc_ctx->audio_service_type = dec_ctx->audio_service_type;
1770 enc_ctx->block_align = dec_ctx->block_align;
1772 case AVMEDIA_TYPE_VIDEO:
1773 enc_ctx->pix_fmt = dec_ctx->pix_fmt;
1774 enc_ctx->width = dec_ctx->width;
1775 enc_ctx->height = dec_ctx->height;
1776 enc_ctx->has_b_frames = dec_ctx->has_b_frames;
1777 if (ost->frame_aspect_ratio)
1778 sar = av_d2q(ost->frame_aspect_ratio * enc_ctx->height / enc_ctx->width, 255);
1779 else if (ist->st->sample_aspect_ratio.num)
1780 sar = ist->st->sample_aspect_ratio;
1782 sar = dec_ctx->sample_aspect_ratio;
1783 ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = sar;
1785 case AVMEDIA_TYPE_SUBTITLE:
1786 enc_ctx->width = dec_ctx->width;
1787 enc_ctx->height = dec_ctx->height;
1789 case AVMEDIA_TYPE_DATA:
1790 case AVMEDIA_TYPE_ATTACHMENT:
1797 /* should only happen when a default codec is not present. */
1798 snprintf(error, sizeof(error), "Automatic encoder selection "
1799 "failed for output stream #%d:%d. Default encoder for "
1800 "format %s is probably disabled. Please choose an "
1801 "encoder manually.\n", ost->file_index, ost->index,
1803 ret = AVERROR(EINVAL);
1808 ist->decoding_needed = 1;
1809 ost->encoding_needed = 1;
1811 set_encoder_id(output_files[ost->file_index], ost);
1814 * We want CFR output if and only if one of those is true:
1815 * 1) user specified output framerate with -r
1816 * 2) user specified -vsync cfr
1817 * 3) output format is CFR and the user didn't force vsync to
1818 * something else than CFR
1820 * in such a case, set ost->frame_rate
1822 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO &&
1823 !ost->frame_rate.num && ist &&
1824 (video_sync_method == VSYNC_CFR ||
1825 (video_sync_method == VSYNC_AUTO &&
1826 !(oc->oformat->flags & (AVFMT_NOTIMESTAMPS | AVFMT_VARIABLE_FPS))))) {
1827 if (ist->framerate.num)
1828 ost->frame_rate = ist->framerate;
1829 else if (ist->st->avg_frame_rate.num)
1830 ost->frame_rate = ist->st->avg_frame_rate;
1832 av_log(NULL, AV_LOG_WARNING, "Constant framerate requested "
1833 "for the output stream #%d:%d, but no information "
1834 "about the input framerate is available. Falling "
1835 "back to a default value of 25fps. Use the -r option "
1836 "if you want a different framerate.\n",
1837 ost->file_index, ost->index);
1838 ost->frame_rate = (AVRational){ 25, 1 };
1841 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
1842 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
1843 ost->frame_rate = ost->enc->supported_framerates[idx];
1848 (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
1849 enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) {
1851 fg = init_simple_filtergraph(ist, ost);
1852 if (configure_filtergraph(fg)) {
1853 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
1858 switch (enc_ctx->codec_type) {
1859 case AVMEDIA_TYPE_AUDIO:
1860 enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
1861 enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
1862 enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
1863 enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);
1864 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
1866 case AVMEDIA_TYPE_VIDEO:
1867 enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
1869 enc_ctx->width = ost->filter->filter->inputs[0]->w;
1870 enc_ctx->height = ost->filter->filter->inputs[0]->h;
1871 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
1872 ost->frame_aspect_ratio ? // overridden by the -aspect cli option
1873 av_d2q(ost->frame_aspect_ratio * enc_ctx->height/enc_ctx->width, 255) :
1874 ost->filter->filter->inputs[0]->sample_aspect_ratio;
1875 enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
1877 ost->st->avg_frame_rate = ost->frame_rate;
1880 (enc_ctx->width != dec_ctx->width ||
1881 enc_ctx->height != dec_ctx->height ||
1882 enc_ctx->pix_fmt != dec_ctx->pix_fmt)) {
1883 enc_ctx->bits_per_raw_sample = 0;
1886 if (ost->forced_keyframes)
1887 parse_forced_key_frames(ost->forced_keyframes, ost,
1890 case AVMEDIA_TYPE_SUBTITLE:
1891 enc_ctx->time_base = (AVRational){1, 1000};
1898 if ((enc_ctx->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))) {
1899 char logfilename[1024];
1902 snprintf(logfilename, sizeof(logfilename), "%s-%d.log",
1903 ost->logfile_prefix ? ost->logfile_prefix :
1904 DEFAULT_PASS_LOGFILENAME_PREFIX,
1906 if (!strcmp(ost->enc->name, "libx264")) {
1907 av_dict_set(&ost->encoder_opts, "stats", logfilename, AV_DICT_DONT_OVERWRITE);
1909 if (enc_ctx->flags & CODEC_FLAG_PASS1) {
1910 f = fopen(logfilename, "wb");
1912 av_log(NULL, AV_LOG_FATAL, "Cannot write log file '%s' for pass-1 encoding: %s\n",
1913 logfilename, strerror(errno));
1919 size_t logbuffer_size;
1920 if (cmdutils_read_file(logfilename, &logbuffer, &logbuffer_size) < 0) {
1921 av_log(NULL, AV_LOG_FATAL, "Error reading log file '%s' for pass-2 encoding\n",
1925 enc_ctx->stats_in = logbuffer;
1932 /* open each encoder */
1933 for (i = 0; i < nb_output_streams; i++) {
1934 ost = output_streams[i];
1935 if (ost->encoding_needed) {
1936 AVCodec *codec = ost->enc;
1937 AVCodecContext *dec = NULL;
1939 if ((ist = get_input_stream(ost)))
1941 if (dec && dec->subtitle_header) {
1942 ost->enc_ctx->subtitle_header = av_malloc(dec->subtitle_header_size);
1943 if (!ost->enc_ctx->subtitle_header) {
1944 ret = AVERROR(ENOMEM);
1947 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
1948 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
1950 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
1951 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
1952 av_dict_set(&ost->encoder_opts, "side_data_only_packets", "1", 0);
1954 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
1955 if (ret == AVERROR_EXPERIMENTAL)
1956 abort_codec_experimental(codec, 1);
1957 snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d:%d - maybe incorrect parameters such as bit_rate, rate, width or height",
1958 ost->file_index, ost->index);
1961 assert_avoptions(ost->encoder_opts);
1962 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
1963 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
1964 "It takes bits/s as argument, not kbits/s\n");
1966 av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
1969 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
1971 av_log(NULL, AV_LOG_FATAL,
1972 "Error initializing the output stream codec context.\n");
1976 ost->st->time_base = ost->enc_ctx->time_base;
1979 /* init input streams */
1980 for (i = 0; i < nb_input_streams; i++)
1981 if ((ret = init_input_stream(i, error, sizeof(error))) < 0)
1984 /* discard unused programs */
1985 for (i = 0; i < nb_input_files; i++) {
1986 InputFile *ifile = input_files[i];
1987 for (j = 0; j < ifile->ctx->nb_programs; j++) {
1988 AVProgram *p = ifile->ctx->programs[j];
1989 int discard = AVDISCARD_ALL;
1991 for (k = 0; k < p->nb_stream_indexes; k++)
1992 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
1993 discard = AVDISCARD_DEFAULT;
1996 p->discard = discard;
2000 /* open files and write file headers */
2001 for (i = 0; i < nb_output_files; i++) {
2002 oc = output_files[i]->ctx;
2003 oc->interrupt_callback = int_cb;
2004 if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
2006 av_strerror(ret, errbuf, sizeof(errbuf));
2007 snprintf(error, sizeof(error),
2008 "Could not write header for output file #%d "
2009 "(incorrect codec parameters ?): %s",
2011 ret = AVERROR(EINVAL);
2014 assert_avoptions(output_files[i]->opts);
2015 if (strcmp(oc->oformat->name, "rtp")) {
2021 /* dump the file output parameters - cannot be done before in case
2023 for (i = 0; i < nb_output_files; i++) {
2024 av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
2027 /* dump the stream mapping */
2028 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
2029 for (i = 0; i < nb_input_streams; i++) {
2030 ist = input_streams[i];
2032 for (j = 0; j < ist->nb_filters; j++) {
2033 if (ist->filters[j]->graph->graph_desc) {
2034 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
2035 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
2036 ist->filters[j]->name);
2037 if (nb_filtergraphs > 1)
2038 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
2039 av_log(NULL, AV_LOG_INFO, "\n");
2044 for (i = 0; i < nb_output_streams; i++) {
2045 ost = output_streams[i];
2047 if (ost->attachment_filename) {
2048 /* an attached file */
2049 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
2050 ost->attachment_filename, ost->file_index, ost->index);
2054 if (ost->filter && ost->filter->graph->graph_desc) {
2055 /* output from a complex graph */
2056 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
2057 if (nb_filtergraphs > 1)
2058 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
2060 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
2061 ost->index, ost->enc ? ost->enc->name : "?");
2065 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
2066 input_streams[ost->source_index]->file_index,
2067 input_streams[ost->source_index]->st->index,
2070 if (ost->sync_ist != input_streams[ost->source_index])
2071 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
2072 ost->sync_ist->file_index,
2073 ost->sync_ist->st->index);
2074 if (ost->stream_copy)
2075 av_log(NULL, AV_LOG_INFO, " (copy)");
2077 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
2078 const AVCodec *out_codec = ost->enc;
2079 const char *decoder_name = "?";
2080 const char *in_codec_name = "?";
2081 const char *encoder_name = "?";
2082 const char *out_codec_name = "?";
2085 decoder_name = in_codec->name;
2086 in_codec_name = avcodec_descriptor_get(in_codec->id)->name;
2087 if (!strcmp(decoder_name, in_codec_name))
2088 decoder_name = "native";
2092 encoder_name = out_codec->name;
2093 out_codec_name = avcodec_descriptor_get(out_codec->id)->name;
2094 if (!strcmp(encoder_name, out_codec_name))
2095 encoder_name = "native";
2098 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
2099 in_codec_name, decoder_name,
2100 out_codec_name, encoder_name);
2102 av_log(NULL, AV_LOG_INFO, "\n");
2106 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
2117 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
2118 static int need_output(void)
2122 for (i = 0; i < nb_output_streams; i++) {
2123 OutputStream *ost = output_streams[i];
2124 OutputFile *of = output_files[ost->file_index];
2125 AVFormatContext *os = output_files[ost->file_index]->ctx;
2127 if (ost->finished ||
2128 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
2130 if (ost->frame_number >= ost->max_frames) {
2132 for (j = 0; j < of->ctx->nb_streams; j++)
2133 output_streams[of->ost_index + j]->finished = 1;
2143 static InputFile *select_input_file(void)
2145 InputFile *ifile = NULL;
2146 int64_t ipts_min = INT64_MAX;
2149 for (i = 0; i < nb_input_streams; i++) {
2150 InputStream *ist = input_streams[i];
2151 int64_t ipts = ist->last_dts;
2153 if (ist->discard || input_files[ist->file_index]->eagain)
2155 if (!input_files[ist->file_index]->eof_reached) {
2156 if (ipts < ipts_min) {
2158 ifile = input_files[ist->file_index];
2167 static void *input_thread(void *arg)
2172 while (!transcoding_finished && ret >= 0) {
2174 ret = av_read_frame(f->ctx, &pkt);
2176 if (ret == AVERROR(EAGAIN)) {
2183 pthread_mutex_lock(&f->fifo_lock);
2184 while (!av_fifo_space(f->fifo))
2185 pthread_cond_wait(&f->fifo_cond, &f->fifo_lock);
2187 av_dup_packet(&pkt);
2188 av_fifo_generic_write(f->fifo, &pkt, sizeof(pkt), NULL);
2190 pthread_mutex_unlock(&f->fifo_lock);
2197 static void free_input_threads(void)
2201 if (nb_input_files == 1)
2204 transcoding_finished = 1;
2206 for (i = 0; i < nb_input_files; i++) {
2207 InputFile *f = input_files[i];
2210 if (!f->fifo || f->joined)
2213 pthread_mutex_lock(&f->fifo_lock);
2214 while (av_fifo_size(f->fifo)) {
2215 av_fifo_generic_read(f->fifo, &pkt, sizeof(pkt), NULL);
2216 av_free_packet(&pkt);
2218 pthread_cond_signal(&f->fifo_cond);
2219 pthread_mutex_unlock(&f->fifo_lock);
2221 pthread_join(f->thread, NULL);
2224 while (av_fifo_size(f->fifo)) {
2225 av_fifo_generic_read(f->fifo, &pkt, sizeof(pkt), NULL);
2226 av_free_packet(&pkt);
2228 av_fifo_free(f->fifo);
2232 static int init_input_threads(void)
2236 if (nb_input_files == 1)
2239 for (i = 0; i < nb_input_files; i++) {
2240 InputFile *f = input_files[i];
2242 if (!(f->fifo = av_fifo_alloc(8*sizeof(AVPacket))))
2243 return AVERROR(ENOMEM);
2245 pthread_mutex_init(&f->fifo_lock, NULL);
2246 pthread_cond_init (&f->fifo_cond, NULL);
2248 if ((ret = pthread_create(&f->thread, NULL, input_thread, f)))
2249 return AVERROR(ret);
2254 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
2258 pthread_mutex_lock(&f->fifo_lock);
2260 if (av_fifo_size(f->fifo)) {
2261 av_fifo_generic_read(f->fifo, pkt, sizeof(*pkt), NULL);
2262 pthread_cond_signal(&f->fifo_cond);
2267 ret = AVERROR(EAGAIN);
2270 pthread_mutex_unlock(&f->fifo_lock);
2276 static int get_input_packet(InputFile *f, AVPacket *pkt)
2280 for (i = 0; i < f->nb_streams; i++) {
2281 InputStream *ist = input_streams[f->ist_index + i];
2282 int64_t pts = av_rescale(ist->last_dts, 1000000, AV_TIME_BASE);
2283 int64_t now = av_gettime() - ist->start;
2285 return AVERROR(EAGAIN);
2290 if (nb_input_files > 1)
2291 return get_input_packet_mt(f, pkt);
2293 return av_read_frame(f->ctx, pkt);
2296 static int got_eagain(void)
2299 for (i = 0; i < nb_input_files; i++)
2300 if (input_files[i]->eagain)
2305 static void reset_eagain(void)
2308 for (i = 0; i < nb_input_files; i++)
2309 input_files[i]->eagain = 0;
2313 * Read one packet from an input file and send it for
2314 * - decoding -> lavfi (audio/video)
2315 * - decoding -> encoding -> muxing (subtitles)
2316 * - muxing (streamcopy)
2319 * - 0 -- one packet was read and processed
2320 * - AVERROR(EAGAIN) -- no packets were available for selected file,
2321 * this function should be called again
2322 * - AVERROR_EOF -- this function should not be called again
2324 static int process_input(void)
2327 AVFormatContext *is;
2332 /* select the stream that we must read now */
2333 ifile = select_input_file();
2334 /* if none, if is finished */
2339 return AVERROR(EAGAIN);
2341 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from.\n");
2346 ret = get_input_packet(ifile, &pkt);
2348 if (ret == AVERROR(EAGAIN)) {
2353 if (ret != AVERROR_EOF) {
2354 print_error(is->filename, ret);
2358 ifile->eof_reached = 1;
2360 for (i = 0; i < ifile->nb_streams; i++) {
2361 ist = input_streams[ifile->ist_index + i];
2362 if (ist->decoding_needed)
2363 output_packet(ist, NULL);
2365 /* mark all outputs that don't go through lavfi as finished */
2366 for (j = 0; j < nb_output_streams; j++) {
2367 OutputStream *ost = output_streams[j];
2369 if (ost->source_index == ifile->ist_index + i &&
2370 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
2371 finish_output_stream(ost);
2375 return AVERROR(EAGAIN);
2381 av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,
2382 is->streams[pkt.stream_index]);
2384 /* the following test is needed in case new streams appear
2385 dynamically in stream : we ignore them */
2386 if (pkt.stream_index >= ifile->nb_streams)
2387 goto discard_packet;
2389 ist = input_streams[ifile->ist_index + pkt.stream_index];
2391 ist->data_size += pkt.size;
2395 goto discard_packet;
2397 /* add the stream-global side data to the first packet */
2398 if (ist->nb_packets == 1)
2399 for (i = 0; i < ist->st->nb_side_data; i++) {
2400 AVPacketSideData *src_sd = &ist->st->side_data[i];
2403 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
2406 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
2410 memcpy(dst_data, src_sd->data, src_sd->size);
2413 if (pkt.dts != AV_NOPTS_VALUE)
2414 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
2415 if (pkt.pts != AV_NOPTS_VALUE)
2416 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
2418 if (pkt.pts != AV_NOPTS_VALUE)
2419 pkt.pts *= ist->ts_scale;
2420 if (pkt.dts != AV_NOPTS_VALUE)
2421 pkt.dts *= ist->ts_scale;
2423 if (pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
2424 (is->iformat->flags & AVFMT_TS_DISCONT)) {
2425 int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
2426 int64_t delta = pkt_dts - ist->next_dts;
2428 if ((FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE || pkt_dts + 1 < ist->last_dts) && !copy_ts) {
2429 ifile->ts_offset -= delta;
2430 av_log(NULL, AV_LOG_DEBUG,
2431 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
2432 delta, ifile->ts_offset);
2433 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
2434 if (pkt.pts != AV_NOPTS_VALUE)
2435 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
2439 ret = output_packet(ist, &pkt);
2441 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d\n",
2442 ist->file_index, ist->st->index);
2448 av_free_packet(&pkt);
2454 * The following code is the main loop of the file converter
2456 static int transcode(void)
2458 int ret, i, need_input = 1;
2459 AVFormatContext *os;
2462 int64_t timer_start;
2464 ret = transcode_init();
2468 av_log(NULL, AV_LOG_INFO, "Press ctrl-c to stop encoding\n");
2471 timer_start = av_gettime();
2474 if ((ret = init_input_threads()) < 0)
2478 while (!received_sigterm) {
2479 /* check if there's any stream where output is still needed */
2480 if (!need_output()) {
2481 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
2485 /* read and process one input packet if needed */
2487 ret = process_input();
2488 if (ret == AVERROR_EOF)
2492 ret = poll_filters();
2494 if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN))
2497 av_log(NULL, AV_LOG_ERROR, "Error while filtering.\n");
2501 /* dump report by using the output first video and audio streams */
2502 print_report(0, timer_start);
2505 free_input_threads();
2508 /* at the end of stream, we must flush the decoder buffers */
2509 for (i = 0; i < nb_input_streams; i++) {
2510 ist = input_streams[i];
2511 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
2512 output_packet(ist, NULL);
2520 /* write the trailer if needed and close file */
2521 for (i = 0; i < nb_output_files; i++) {
2522 os = output_files[i]->ctx;
2523 av_write_trailer(os);
2526 /* dump report by using the first video and audio streams */
2527 print_report(1, timer_start);
2529 /* close each encoder */
2530 for (i = 0; i < nb_output_streams; i++) {
2531 ost = output_streams[i];
2532 if (ost->encoding_needed) {
2533 av_freep(&ost->enc_ctx->stats_in);
2537 /* close each decoder */
2538 for (i = 0; i < nb_input_streams; i++) {
2539 ist = input_streams[i];
2540 if (ist->decoding_needed) {
2541 avcodec_close(ist->dec_ctx);
2542 if (ist->hwaccel_uninit)
2543 ist->hwaccel_uninit(ist->dec_ctx);
2552 free_input_threads();
2555 if (output_streams) {
2556 for (i = 0; i < nb_output_streams; i++) {
2557 ost = output_streams[i];
2560 fclose(ost->logfile);
2561 ost->logfile = NULL;
2563 av_free(ost->forced_kf_pts);
2564 av_dict_free(&ost->encoder_opts);
2565 av_dict_free(&ost->resample_opts);
2572 static int64_t getutime(void)
2575 struct rusage rusage;
2577 getrusage(RUSAGE_SELF, &rusage);
2578 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
2579 #elif HAVE_GETPROCESSTIMES
2581 FILETIME c, e, k, u;
2582 proc = GetCurrentProcess();
2583 GetProcessTimes(proc, &c, &e, &k, &u);
2584 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
2586 return av_gettime();
2590 static int64_t getmaxrss(void)
2592 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
2593 struct rusage rusage;
2594 getrusage(RUSAGE_SELF, &rusage);
2595 return (int64_t)rusage.ru_maxrss * 1024;
2596 #elif HAVE_GETPROCESSMEMORYINFO
2598 PROCESS_MEMORY_COUNTERS memcounters;
2599 proc = GetCurrentProcess();
2600 memcounters.cb = sizeof(memcounters);
2601 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
2602 return memcounters.PeakPagefileUsage;
2608 int main(int argc, char **argv)
2613 register_exit(avconv_cleanup);
2615 av_log_set_flags(AV_LOG_SKIP_REPEATED);
2616 parse_loglevel(argc, argv, options);
2618 avcodec_register_all();
2620 avdevice_register_all();
2622 avfilter_register_all();
2624 avformat_network_init();
2628 /* parse options and open all input/output files */
2629 ret = avconv_parse_options(argc, argv);
2633 if (nb_output_files <= 0 && nb_input_files == 0) {
2635 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
2639 /* file converter / grab */
2640 if (nb_output_files <= 0) {
2641 fprintf(stderr, "At least one output file must be specified\n");
2646 if (transcode() < 0)
2648 ti = getutime() - ti;
2650 int maxrss = getmaxrss() / 1024;
2651 printf("bench: utime=%0.3fs maxrss=%ikB\n", ti / 1000000.0, maxrss);