3 * Copyright (c) 2000-2011 The libav developers.
5 * This file is part of Libav.
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
32 #include "libavformat/avformat.h"
33 #include "libavdevice/avdevice.h"
34 #include "libswscale/swscale.h"
35 #include "libavresample/avresample.h"
36 #include "libavutil/opt.h"
37 #include "libavutil/channel_layout.h"
38 #include "libavutil/parseutils.h"
39 #include "libavutil/samplefmt.h"
40 #include "libavutil/fifo.h"
41 #include "libavutil/intreadwrite.h"
42 #include "libavutil/dict.h"
43 #include "libavutil/mathematics.h"
44 #include "libavutil/pixdesc.h"
45 #include "libavutil/avstring.h"
46 #include "libavutil/libm.h"
47 #include "libavutil/imgutils.h"
48 #include "libavutil/time.h"
49 #include "libavformat/os_support.h"
51 # include "libavfilter/avfilter.h"
52 # include "libavfilter/buffersrc.h"
53 # include "libavfilter/buffersink.h"
55 #if HAVE_SYS_RESOURCE_H
57 #include <sys/types.h>
58 #include <sys/resource.h>
59 #elif HAVE_GETPROCESSTIMES
62 #if HAVE_GETPROCESSMEMORYINFO
68 #include <sys/select.h>
80 #include "libavutil/avassert.h"
82 const char program_name[] = "avconv";
83 const int program_birth_year = 2000;
85 static FILE *vstats_file;
87 static int nb_frames_drop = 0;
92 /* signal to input threads that they should exit; set by the main thread */
93 static int transcoding_finished;
96 #define DEFAULT_PASS_LOGFILENAME_PREFIX "av2pass"
98 InputStream **input_streams = NULL;
99 int nb_input_streams = 0;
100 InputFile **input_files = NULL;
101 int nb_input_files = 0;
103 OutputStream **output_streams = NULL;
104 int nb_output_streams = 0;
105 OutputFile **output_files = NULL;
106 int nb_output_files = 0;
108 FilterGraph **filtergraphs;
111 static void term_exit(void)
113 av_log(NULL, AV_LOG_QUIET, "");
116 static volatile int received_sigterm = 0;
117 static volatile int received_nb_signals = 0;
120 sigterm_handler(int sig)
122 received_sigterm = sig;
123 received_nb_signals++;
127 static void term_init(void)
129 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
130 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
132 signal(SIGXCPU, sigterm_handler);
136 static int decode_interrupt_cb(void *ctx)
138 return received_nb_signals > 1;
141 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
143 static void avconv_cleanup(int ret)
147 for (i = 0; i < nb_filtergraphs; i++) {
148 FilterGraph *fg = filtergraphs[i];
149 avfilter_graph_free(&fg->graph);
150 for (j = 0; j < fg->nb_inputs; j++) {
151 av_freep(&fg->inputs[j]->name);
152 av_freep(&fg->inputs[j]);
154 av_freep(&fg->inputs);
155 for (j = 0; j < fg->nb_outputs; j++) {
156 av_freep(&fg->outputs[j]->name);
157 av_freep(&fg->outputs[j]);
159 av_freep(&fg->outputs);
160 av_freep(&fg->graph_desc);
162 av_freep(&filtergraphs[i]);
164 av_freep(&filtergraphs);
167 for (i = 0; i < nb_output_files; i++) {
168 OutputFile *of = output_files[i];
169 AVFormatContext *s = of->ctx;
170 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE) && s->pb)
172 avformat_free_context(s);
173 av_dict_free(&of->opts);
175 av_freep(&output_files[i]);
177 for (i = 0; i < nb_output_streams; i++) {
178 OutputStream *ost = output_streams[i];
179 AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
181 AVBitStreamFilterContext *next = bsfc->next;
182 av_bitstream_filter_close(bsfc);
185 ost->bitstream_filters = NULL;
186 av_frame_free(&ost->filtered_frame);
188 av_parser_close(ost->parser);
190 av_freep(&ost->forced_keyframes);
191 av_freep(&ost->avfilter);
192 av_freep(&ost->logfile_prefix);
194 avcodec_free_context(&ost->enc_ctx);
196 av_freep(&output_streams[i]);
198 for (i = 0; i < nb_input_files; i++) {
199 avformat_close_input(&input_files[i]->ctx);
200 av_freep(&input_files[i]);
202 for (i = 0; i < nb_input_streams; i++) {
203 InputStream *ist = input_streams[i];
205 av_frame_free(&ist->decoded_frame);
206 av_frame_free(&ist->filter_frame);
207 av_dict_free(&ist->decoder_opts);
208 av_freep(&ist->filters);
209 av_freep(&ist->hwaccel_device);
211 avcodec_free_context(&ist->dec_ctx);
213 av_freep(&input_streams[i]);
218 av_free(vstats_filename);
220 av_freep(&input_streams);
221 av_freep(&input_files);
222 av_freep(&output_streams);
223 av_freep(&output_files);
227 avformat_network_deinit();
229 if (received_sigterm) {
230 av_log(NULL, AV_LOG_INFO, "Received signal %d: terminating.\n",
231 (int) received_sigterm);
236 void assert_avoptions(AVDictionary *m)
238 AVDictionaryEntry *t;
239 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
240 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
245 static void abort_codec_experimental(AVCodec *c, int encoder)
247 const char *codec_string = encoder ? "encoder" : "decoder";
249 av_log(NULL, AV_LOG_FATAL, "%s '%s' is experimental and might produce bad "
250 "results.\nAdd '-strict experimental' if you want to use it.\n",
251 codec_string, c->name);
252 codec = encoder ? avcodec_find_encoder(c->id) : avcodec_find_decoder(c->id);
253 if (!(codec->capabilities & CODEC_CAP_EXPERIMENTAL))
254 av_log(NULL, AV_LOG_FATAL, "Or use the non experimental %s '%s'.\n",
255 codec_string, codec->name);
260 * Update the requested input sample format based on the output sample format.
261 * This is currently only used to request float output from decoders which
262 * support multiple sample formats, one of which is AV_SAMPLE_FMT_FLT.
263 * Ideally this will be removed in the future when decoders do not do format
264 * conversion and only output in their native format.
266 static void update_sample_fmt(AVCodecContext *dec, AVCodec *dec_codec,
269 /* if sample formats match or a decoder sample format has already been
270 requested, just return */
271 if (enc->sample_fmt == dec->sample_fmt ||
272 dec->request_sample_fmt > AV_SAMPLE_FMT_NONE)
275 /* if decoder supports more than one output format */
276 if (dec_codec && dec_codec->sample_fmts &&
277 dec_codec->sample_fmts[0] != AV_SAMPLE_FMT_NONE &&
278 dec_codec->sample_fmts[1] != AV_SAMPLE_FMT_NONE) {
279 const enum AVSampleFormat *p;
280 int min_dec = INT_MAX, min_inc = INT_MAX;
281 enum AVSampleFormat dec_fmt = AV_SAMPLE_FMT_NONE;
282 enum AVSampleFormat inc_fmt = AV_SAMPLE_FMT_NONE;
284 /* find a matching sample format in the encoder */
285 for (p = dec_codec->sample_fmts; *p != AV_SAMPLE_FMT_NONE; p++) {
286 if (*p == enc->sample_fmt) {
287 dec->request_sample_fmt = *p;
290 enum AVSampleFormat dfmt = av_get_packed_sample_fmt(*p);
291 enum AVSampleFormat efmt = av_get_packed_sample_fmt(enc->sample_fmt);
292 int fmt_diff = 32 * abs(dfmt - efmt);
293 if (av_sample_fmt_is_planar(*p) !=
294 av_sample_fmt_is_planar(enc->sample_fmt))
299 } else if (dfmt > efmt) {
300 if (fmt_diff < min_inc) {
305 if (fmt_diff < min_dec) {
313 /* if none match, provide the one that matches quality closest */
314 dec->request_sample_fmt = min_inc != INT_MAX ? inc_fmt : dec_fmt;
318 static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
320 AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
321 AVCodecContext *avctx = ost->enc_ctx;
325 * Audio encoders may split the packets -- #frames in != #packets out.
326 * But there is no reordering, so we can limit the number of output packets
327 * by simply dropping them here.
328 * Counting encoded video frames needs to be done separately because of
329 * reordering, see do_video_out()
331 if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
332 if (ost->frame_number >= ost->max_frames) {
340 AVPacket new_pkt = *pkt;
341 int a = av_bitstream_filter_filter(bsfc, avctx, NULL,
342 &new_pkt.data, &new_pkt.size,
343 pkt->data, pkt->size,
344 pkt->flags & AV_PKT_FLAG_KEY);
347 new_pkt.buf = av_buffer_create(new_pkt.data, new_pkt.size,
348 av_buffer_default_free, NULL, 0);
352 av_log(NULL, AV_LOG_ERROR, "%s failed for stream %d, codec %s",
353 bsfc->filter->name, pkt->stream_index,
354 avctx->codec ? avctx->codec->name : "copy");
364 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS) &&
365 ost->last_mux_dts != AV_NOPTS_VALUE &&
366 pkt->dts < ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT)) {
367 av_log(NULL, AV_LOG_WARNING, "Non-monotonous DTS in output stream "
368 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
369 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
371 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
374 av_log(NULL, AV_LOG_WARNING, "changing to %"PRId64". This may result "
375 "in incorrect timestamps in the output file.\n",
376 ost->last_mux_dts + 1);
377 pkt->dts = ost->last_mux_dts + 1;
378 if (pkt->pts != AV_NOPTS_VALUE)
379 pkt->pts = FFMAX(pkt->pts, pkt->dts);
381 ost->last_mux_dts = pkt->dts;
383 ost->data_size += pkt->size;
384 ost->packets_written++;
386 pkt->stream_index = ost->index;
387 ret = av_interleaved_write_frame(s, pkt);
389 print_error("av_interleaved_write_frame()", ret);
394 static int check_recording_time(OutputStream *ost)
396 OutputFile *of = output_files[ost->file_index];
398 if (of->recording_time != INT64_MAX &&
399 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
400 AV_TIME_BASE_Q) >= 0) {
407 static void do_audio_out(AVFormatContext *s, OutputStream *ost,
410 AVCodecContext *enc = ost->enc_ctx;
414 av_init_packet(&pkt);
418 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
419 frame->pts = ost->sync_opts;
420 ost->sync_opts = frame->pts + frame->nb_samples;
422 ost->samples_encoded += frame->nb_samples;
423 ost->frames_encoded++;
425 if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
426 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
431 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
432 write_frame(s, &pkt, ost);
436 static void do_subtitle_out(AVFormatContext *s,
442 static uint8_t *subtitle_out = NULL;
443 int subtitle_out_max_size = 1024 * 1024;
444 int subtitle_out_size, nb, i;
448 if (pts == AV_NOPTS_VALUE) {
449 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
458 subtitle_out = av_malloc(subtitle_out_max_size);
461 /* Note: DVB subtitle need one packet to draw them and one other
462 packet to clear them */
463 /* XXX: signal it in the codec context ? */
464 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
469 for (i = 0; i < nb; i++) {
470 ost->sync_opts = av_rescale_q(pts, ist->st->time_base, enc->time_base);
471 if (!check_recording_time(ost))
474 sub->pts = av_rescale_q(pts, ist->st->time_base, AV_TIME_BASE_Q);
475 // start_display_time is required to be 0
476 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
477 sub->end_display_time -= sub->start_display_time;
478 sub->start_display_time = 0;
480 ost->frames_encoded++;
482 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
483 subtitle_out_max_size, sub);
484 if (subtitle_out_size < 0) {
485 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
489 av_init_packet(&pkt);
490 pkt.data = subtitle_out;
491 pkt.size = subtitle_out_size;
492 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
493 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
494 /* XXX: the pts correction is handled here. Maybe handling
495 it in the codec would be better */
497 pkt.pts += 90 * sub->start_display_time;
499 pkt.pts += 90 * sub->end_display_time;
501 write_frame(s, &pkt, ost);
505 static void do_video_out(AVFormatContext *s,
510 int ret, format_video_sync;
512 AVCodecContext *enc = ost->enc_ctx;
516 format_video_sync = video_sync_method;
517 if (format_video_sync == VSYNC_AUTO)
518 format_video_sync = (s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH :
519 (s->oformat->flags & AVFMT_VARIABLE_FPS) ? VSYNC_VFR : VSYNC_CFR;
520 if (format_video_sync != VSYNC_PASSTHROUGH &&
522 in_picture->pts != AV_NOPTS_VALUE &&
523 in_picture->pts < ost->sync_opts) {
525 av_log(NULL, AV_LOG_WARNING,
526 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
527 ost->frame_number, ost->st->index, in_picture->pts);
531 if (in_picture->pts == AV_NOPTS_VALUE)
532 in_picture->pts = ost->sync_opts;
533 ost->sync_opts = in_picture->pts;
536 if (!ost->frame_number)
537 ost->first_pts = in_picture->pts;
539 av_init_packet(&pkt);
543 if (ost->frame_number >= ost->max_frames)
546 if (s->oformat->flags & AVFMT_RAWPICTURE &&
547 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
548 /* raw pictures are written as AVPicture structure to
549 avoid any copies. We support temporarily the older
551 enc->coded_frame->interlaced_frame = in_picture->interlaced_frame;
552 enc->coded_frame->top_field_first = in_picture->top_field_first;
553 pkt.data = (uint8_t *)in_picture;
554 pkt.size = sizeof(AVPicture);
555 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
556 pkt.flags |= AV_PKT_FLAG_KEY;
558 write_frame(s, &pkt, ost);
562 if (enc->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME) &&
563 ost->top_field_first >= 0)
564 in_picture->top_field_first = !!ost->top_field_first;
566 in_picture->quality = enc->global_quality;
567 in_picture->pict_type = 0;
568 if (ost->forced_kf_index < ost->forced_kf_count &&
569 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
570 in_picture->pict_type = AV_PICTURE_TYPE_I;
571 ost->forced_kf_index++;
574 ost->frames_encoded++;
576 ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
578 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
583 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
584 write_frame(s, &pkt, ost);
585 *frame_size = pkt.size;
587 /* if two pass, output log */
588 if (ost->logfile && enc->stats_out) {
589 fprintf(ost->logfile, "%s", enc->stats_out);
595 * For video, number of frames in == number of packets out.
596 * But there may be reordering, so we can't throw away frames on encoder
597 * flush, we need to limit them here, before they go into encoder.
602 static double psnr(double d)
604 return -10.0 * log(d) / log(10.0);
607 static void do_video_stats(OutputStream *ost, int frame_size)
611 double ti1, bitrate, avg_bitrate;
613 /* this is executed just the first time do_video_stats is called */
615 vstats_file = fopen(vstats_filename, "w");
623 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
624 frame_number = ost->frame_number;
625 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality / (float)FF_QP2LAMBDA);
626 if (enc->flags&CODEC_FLAG_PSNR)
627 fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
629 fprintf(vstats_file,"f_size= %6d ", frame_size);
630 /* compute pts value */
631 ti1 = ost->sync_opts * av_q2d(enc->time_base);
635 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
636 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
637 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
638 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
639 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(enc->coded_frame->pict_type));
644 * Read one frame for lavfi output for ost and encode it.
646 static int poll_filter(OutputStream *ost)
648 OutputFile *of = output_files[ost->file_index];
649 AVFrame *filtered_frame = NULL;
652 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
653 return AVERROR(ENOMEM);
655 filtered_frame = ost->filtered_frame;
657 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
658 !(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))
659 ret = av_buffersink_get_samples(ost->filter->filter, filtered_frame,
660 ost->enc_ctx->frame_size);
662 ret = av_buffersink_get_frame(ost->filter->filter, filtered_frame);
667 if (filtered_frame->pts != AV_NOPTS_VALUE) {
668 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
669 filtered_frame->pts = av_rescale_q(filtered_frame->pts,
670 ost->filter->filter->inputs[0]->time_base,
671 ost->enc_ctx->time_base) -
672 av_rescale_q(start_time,
674 ost->enc_ctx->time_base);
677 switch (ost->filter->filter->inputs[0]->type) {
678 case AVMEDIA_TYPE_VIDEO:
679 if (!ost->frame_aspect_ratio)
680 ost->enc_ctx->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
682 do_video_out(of->ctx, ost, filtered_frame, &frame_size);
683 if (vstats_filename && frame_size)
684 do_video_stats(ost, frame_size);
686 case AVMEDIA_TYPE_AUDIO:
687 do_audio_out(of->ctx, ost, filtered_frame);
690 // TODO support subtitle filters
694 av_frame_unref(filtered_frame);
699 static void finish_output_stream(OutputStream *ost)
701 OutputFile *of = output_files[ost->file_index];
707 for (i = 0; i < of->ctx->nb_streams; i++)
708 output_streams[of->ost_index + i]->finished = 1;
713 * Read as many frames from possible from lavfi and encode them.
715 * Always read from the active stream with the lowest timestamp. If no frames
716 * are available for it then return EAGAIN and wait for more input. This way we
717 * can use lavfi sources that generate unlimited amount of frames without memory
720 static int poll_filters(void)
724 while (ret >= 0 && !received_sigterm) {
725 OutputStream *ost = NULL;
726 int64_t min_pts = INT64_MAX;
728 /* choose output stream with the lowest timestamp */
729 for (i = 0; i < nb_output_streams; i++) {
730 int64_t pts = output_streams[i]->sync_opts;
732 if (!output_streams[i]->filter || output_streams[i]->finished)
735 pts = av_rescale_q(pts, output_streams[i]->enc_ctx->time_base,
739 ost = output_streams[i];
746 ret = poll_filter(ost);
748 if (ret == AVERROR_EOF) {
749 finish_output_stream(ost);
751 } else if (ret == AVERROR(EAGAIN))
758 static void print_final_stats(int64_t total_size)
760 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
761 uint64_t data_size = 0;
762 float percent = -1.0;
765 for (i = 0; i < nb_output_streams; i++) {
766 OutputStream *ost = output_streams[i];
767 switch (ost->enc_ctx->codec_type) {
768 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
769 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
770 default: other_size += ost->data_size; break;
772 extra_size += ost->enc_ctx->extradata_size;
773 data_size += ost->data_size;
776 if (data_size && total_size >= data_size)
777 percent = 100.0 * (total_size - data_size) / data_size;
779 av_log(NULL, AV_LOG_INFO, "\n");
780 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
784 extra_size / 1024.0);
786 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
788 av_log(NULL, AV_LOG_INFO, "unknown");
789 av_log(NULL, AV_LOG_INFO, "\n");
791 /* print verbose per-stream stats */
792 for (i = 0; i < nb_input_files; i++) {
793 InputFile *f = input_files[i];
794 uint64_t total_packets = 0, total_size = 0;
796 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
797 i, f->ctx->filename);
799 for (j = 0; j < f->nb_streams; j++) {
800 InputStream *ist = input_streams[f->ist_index + j];
801 enum AVMediaType type = ist->dec_ctx->codec_type;
803 total_size += ist->data_size;
804 total_packets += ist->nb_packets;
806 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
807 i, j, media_type_string(type));
808 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
809 ist->nb_packets, ist->data_size);
811 if (ist->decoding_needed) {
812 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
813 ist->frames_decoded);
814 if (type == AVMEDIA_TYPE_AUDIO)
815 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
816 av_log(NULL, AV_LOG_VERBOSE, "; ");
819 av_log(NULL, AV_LOG_VERBOSE, "\n");
822 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
823 total_packets, total_size);
826 for (i = 0; i < nb_output_files; i++) {
827 OutputFile *of = output_files[i];
828 uint64_t total_packets = 0, total_size = 0;
830 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
831 i, of->ctx->filename);
833 for (j = 0; j < of->ctx->nb_streams; j++) {
834 OutputStream *ost = output_streams[of->ost_index + j];
835 enum AVMediaType type = ost->enc_ctx->codec_type;
837 total_size += ost->data_size;
838 total_packets += ost->packets_written;
840 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
841 i, j, media_type_string(type));
842 if (ost->encoding_needed) {
843 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
844 ost->frames_encoded);
845 if (type == AVMEDIA_TYPE_AUDIO)
846 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
847 av_log(NULL, AV_LOG_VERBOSE, "; ");
850 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
851 ost->packets_written, ost->data_size);
853 av_log(NULL, AV_LOG_VERBOSE, "\n");
856 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
857 total_packets, total_size);
861 static void print_report(int is_last_report, int64_t timer_start)
868 int frame_number, vid, i;
869 double bitrate, ti1, pts;
870 static int64_t last_time = -1;
871 static int qp_histogram[52];
873 if (!print_stats && !is_last_report)
876 if (!is_last_report) {
878 /* display the report every 0.5 seconds */
879 cur_time = av_gettime_relative();
880 if (last_time == -1) {
881 last_time = cur_time;
884 if ((cur_time - last_time) < 500000)
886 last_time = cur_time;
890 oc = output_files[0]->ctx;
892 total_size = avio_size(oc->pb);
893 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
894 total_size = avio_tell(oc->pb);
895 if (total_size < 0) {
897 av_strerror(total_size, errbuf, sizeof(errbuf));
898 av_log(NULL, AV_LOG_VERBOSE, "Bitrate not available, "
899 "avio_tell() failed: %s\n", errbuf);
906 for (i = 0; i < nb_output_streams; i++) {
908 ost = output_streams[i];
910 if (!ost->stream_copy && enc->coded_frame)
911 q = enc->coded_frame->quality / (float)FF_QP2LAMBDA;
912 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
913 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
915 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
916 float t = (av_gettime_relative() - timer_start) / 1000000.0;
918 frame_number = ost->frame_number;
919 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3d q=%3.1f ",
920 frame_number, (t > 1) ? (int)(frame_number / t + 0.5) : 0, q);
922 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
926 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
928 for (j = 0; j < 32; j++)
929 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log2(qp_histogram[j] + 1)));
931 if (enc->flags&CODEC_FLAG_PSNR) {
933 double error, error_sum = 0;
934 double scale, scale_sum = 0;
935 char type[3] = { 'Y','U','V' };
936 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
937 for (j = 0; j < 3; j++) {
938 if (is_last_report) {
939 error = enc->error[j];
940 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
942 error = enc->coded_frame->error[j];
943 scale = enc->width * enc->height * 255.0 * 255.0;
949 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], psnr(error / scale));
951 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
955 /* compute min output value */
956 pts = (double)ost->last_mux_dts * av_q2d(ost->st->time_base);
957 if ((pts < ti1) && (pts > 0))
963 bitrate = (double)(total_size * 8) / ti1 / 1000.0;
965 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
966 "size=%8.0fkB time=%0.2f bitrate=%6.1fkbits/s",
967 (double)total_size / 1024, ti1, bitrate);
970 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " drop=%d",
973 av_log(NULL, AV_LOG_INFO, "%s \r", buf);
978 print_final_stats(total_size);
982 static void flush_encoders(void)
986 for (i = 0; i < nb_output_streams; i++) {
987 OutputStream *ost = output_streams[i];
988 AVCodecContext *enc = ost->enc_ctx;
989 AVFormatContext *os = output_files[ost->file_index]->ctx;
990 int stop_encoding = 0;
992 if (!ost->encoding_needed)
995 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
997 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1001 int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1004 switch (enc->codec_type) {
1005 case AVMEDIA_TYPE_AUDIO:
1006 encode = avcodec_encode_audio2;
1009 case AVMEDIA_TYPE_VIDEO:
1010 encode = avcodec_encode_video2;
1020 av_init_packet(&pkt);
1024 ret = encode(enc, &pkt, NULL, &got_packet);
1026 av_log(NULL, AV_LOG_FATAL, "%s encoding failed\n", desc);
1029 if (ost->logfile && enc->stats_out) {
1030 fprintf(ost->logfile, "%s", enc->stats_out);
1036 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1037 write_frame(os, &pkt, ost);
1047 * Check whether a packet from ist should be written into ost at this time
1049 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1051 OutputFile *of = output_files[ost->file_index];
1052 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1054 if (ost->source_index != ist_index)
1057 if (of->start_time != AV_NOPTS_VALUE && ist->last_dts < of->start_time)
1063 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1065 OutputFile *of = output_files[ost->file_index];
1066 InputFile *f = input_files [ist->file_index];
1067 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1068 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1071 av_init_packet(&opkt);
1073 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1074 !ost->copy_initial_nonkeyframes)
1077 if (of->recording_time != INT64_MAX &&
1078 ist->last_dts >= of->recording_time + start_time) {
1083 if (f->recording_time != INT64_MAX) {
1084 start_time = f->ctx->start_time;
1085 if (f->start_time != AV_NOPTS_VALUE)
1086 start_time += f->start_time;
1087 if (ist->last_dts >= f->recording_time + start_time) {
1093 /* force the input stream PTS */
1094 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1097 if (pkt->pts != AV_NOPTS_VALUE)
1098 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1100 opkt.pts = AV_NOPTS_VALUE;
1102 if (pkt->dts == AV_NOPTS_VALUE)
1103 opkt.dts = av_rescale_q(ist->last_dts, AV_TIME_BASE_Q, ost->st->time_base);
1105 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1106 opkt.dts -= ost_tb_start_time;
1108 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1109 opkt.flags = pkt->flags;
1111 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1112 if ( ost->enc_ctx->codec_id != AV_CODEC_ID_H264
1113 && ost->enc_ctx->codec_id != AV_CODEC_ID_MPEG1VIDEO
1114 && ost->enc_ctx->codec_id != AV_CODEC_ID_MPEG2VIDEO
1115 && ost->enc_ctx->codec_id != AV_CODEC_ID_VC1
1117 if (av_parser_change(ost->parser, ost->st->codec,
1118 &opkt.data, &opkt.size,
1119 pkt->data, pkt->size,
1120 pkt->flags & AV_PKT_FLAG_KEY)) {
1121 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1126 opkt.data = pkt->data;
1127 opkt.size = pkt->size;
1130 write_frame(of->ctx, &opkt, ost);
1133 int guess_input_channel_layout(InputStream *ist)
1135 AVCodecContext *dec = ist->dec_ctx;
1137 if (!dec->channel_layout) {
1138 char layout_name[256];
1140 dec->channel_layout = av_get_default_channel_layout(dec->channels);
1141 if (!dec->channel_layout)
1143 av_get_channel_layout_string(layout_name, sizeof(layout_name),
1144 dec->channels, dec->channel_layout);
1145 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1146 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1151 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1153 AVFrame *decoded_frame, *f;
1154 AVCodecContext *avctx = ist->dec_ctx;
1155 int i, ret, err = 0, resample_changed;
1157 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1158 return AVERROR(ENOMEM);
1159 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1160 return AVERROR(ENOMEM);
1161 decoded_frame = ist->decoded_frame;
1163 ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1164 if (!*got_output || ret < 0) {
1166 for (i = 0; i < ist->nb_filters; i++)
1167 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
1172 ist->samples_decoded += decoded_frame->nb_samples;
1173 ist->frames_decoded++;
1175 /* if the decoder provides a pts, use it instead of the last packet pts.
1176 the decoder could be delaying output by a packet or more. */
1177 if (decoded_frame->pts != AV_NOPTS_VALUE)
1178 ist->next_dts = decoded_frame->pts;
1179 else if (pkt->pts != AV_NOPTS_VALUE)
1180 decoded_frame->pts = pkt->pts;
1181 pkt->pts = AV_NOPTS_VALUE;
1183 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
1184 ist->resample_channels != avctx->channels ||
1185 ist->resample_channel_layout != decoded_frame->channel_layout ||
1186 ist->resample_sample_rate != decoded_frame->sample_rate;
1187 if (resample_changed) {
1188 char layout1[64], layout2[64];
1190 if (!guess_input_channel_layout(ist)) {
1191 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1192 "layout for Input Stream #%d.%d\n", ist->file_index,
1196 decoded_frame->channel_layout = avctx->channel_layout;
1198 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
1199 ist->resample_channel_layout);
1200 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
1201 decoded_frame->channel_layout);
1203 av_log(NULL, AV_LOG_INFO,
1204 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
1205 ist->file_index, ist->st->index,
1206 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
1207 ist->resample_channels, layout1,
1208 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
1209 avctx->channels, layout2);
1211 ist->resample_sample_fmt = decoded_frame->format;
1212 ist->resample_sample_rate = decoded_frame->sample_rate;
1213 ist->resample_channel_layout = decoded_frame->channel_layout;
1214 ist->resample_channels = avctx->channels;
1216 for (i = 0; i < nb_filtergraphs; i++)
1217 if (ist_in_filtergraph(filtergraphs[i], ist) &&
1218 configure_filtergraph(filtergraphs[i]) < 0) {
1219 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
1224 if (decoded_frame->pts != AV_NOPTS_VALUE)
1225 decoded_frame->pts = av_rescale_q(decoded_frame->pts,
1227 (AVRational){1, avctx->sample_rate});
1228 for (i = 0; i < ist->nb_filters; i++) {
1229 if (i < ist->nb_filters - 1) {
1230 f = ist->filter_frame;
1231 err = av_frame_ref(f, decoded_frame);
1237 err = av_buffersrc_add_frame(ist->filters[i]->filter, f);
1242 av_frame_unref(ist->filter_frame);
1243 av_frame_unref(decoded_frame);
1244 return err < 0 ? err : ret;
1247 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
1249 AVFrame *decoded_frame, *f;
1250 int i, ret = 0, err = 0, resample_changed;
1252 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1253 return AVERROR(ENOMEM);
1254 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1255 return AVERROR(ENOMEM);
1256 decoded_frame = ist->decoded_frame;
1258 ret = avcodec_decode_video2(ist->dec_ctx,
1259 decoded_frame, got_output, pkt);
1260 if (!*got_output || ret < 0) {
1262 for (i = 0; i < ist->nb_filters; i++)
1263 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
1268 ist->frames_decoded++;
1270 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
1271 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
1275 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
1277 decoded_frame->pts = guess_correct_pts(&ist->pts_ctx, decoded_frame->pkt_pts,
1278 decoded_frame->pkt_dts);
1281 if (ist->st->sample_aspect_ratio.num)
1282 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
1284 resample_changed = ist->resample_width != decoded_frame->width ||
1285 ist->resample_height != decoded_frame->height ||
1286 ist->resample_pix_fmt != decoded_frame->format;
1287 if (resample_changed) {
1288 av_log(NULL, AV_LOG_INFO,
1289 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
1290 ist->file_index, ist->st->index,
1291 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
1292 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
1294 ret = poll_filters();
1295 if (ret < 0 && (ret != AVERROR_EOF && ret != AVERROR(EAGAIN)))
1296 av_log(NULL, AV_LOG_ERROR, "Error while filtering.\n");
1298 ist->resample_width = decoded_frame->width;
1299 ist->resample_height = decoded_frame->height;
1300 ist->resample_pix_fmt = decoded_frame->format;
1302 for (i = 0; i < nb_filtergraphs; i++)
1303 if (ist_in_filtergraph(filtergraphs[i], ist) &&
1304 configure_filtergraph(filtergraphs[i]) < 0) {
1305 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
1310 for (i = 0; i < ist->nb_filters; i++) {
1311 if (i < ist->nb_filters - 1) {
1312 f = ist->filter_frame;
1313 err = av_frame_ref(f, decoded_frame);
1319 err = av_buffersrc_add_frame(ist->filters[i]->filter, f);
1325 av_frame_unref(ist->filter_frame);
1326 av_frame_unref(decoded_frame);
1327 return err < 0 ? err : ret;
1330 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
1332 AVSubtitle subtitle;
1333 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
1334 &subtitle, got_output, pkt);
1340 ist->frames_decoded++;
1342 for (i = 0; i < nb_output_streams; i++) {
1343 OutputStream *ost = output_streams[i];
1345 if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
1348 do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle, pkt->pts);
1351 avsubtitle_free(&subtitle);
1355 /* pkt = NULL means EOF (needed to flush decoder buffers) */
1356 static int process_input_packet(InputStream *ist, const AVPacket *pkt)
1362 if (ist->next_dts == AV_NOPTS_VALUE)
1363 ist->next_dts = ist->last_dts;
1367 av_init_packet(&avpkt);
1375 if (pkt->dts != AV_NOPTS_VALUE)
1376 ist->next_dts = ist->last_dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
1378 // while we have more to decode or while the decoder did output something on EOF
1379 while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
1383 ist->last_dts = ist->next_dts;
1385 if (avpkt.size && avpkt.size != pkt->size &&
1386 !(ist->dec->capabilities & CODEC_CAP_SUBFRAMES)) {
1387 av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
1388 "Multiple frames in a packet from stream %d\n", pkt->stream_index);
1389 ist->showed_multi_packet_warning = 1;
1392 switch (ist->dec_ctx->codec_type) {
1393 case AVMEDIA_TYPE_AUDIO:
1394 ret = decode_audio (ist, &avpkt, &got_output);
1396 case AVMEDIA_TYPE_VIDEO:
1397 ret = decode_video (ist, &avpkt, &got_output);
1399 ist->next_dts += av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
1400 else if (ist->st->avg_frame_rate.num)
1401 ist->next_dts += av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate),
1403 else if (ist->dec_ctx->framerate.num != 0) {
1404 int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 :
1405 ist->dec_ctx->ticks_per_frame;
1406 ist->next_dts += av_rescale_q(ticks, ist->dec_ctx->framerate, AV_TIME_BASE_Q);
1409 case AVMEDIA_TYPE_SUBTITLE:
1410 ret = transcode_subtitles(ist, &avpkt, &got_output);
1418 // touch data and size only if not EOF
1428 /* handle stream copy */
1429 if (!ist->decoding_needed) {
1430 ist->last_dts = ist->next_dts;
1431 switch (ist->dec_ctx->codec_type) {
1432 case AVMEDIA_TYPE_AUDIO:
1433 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
1434 ist->dec_ctx->sample_rate;
1436 case AVMEDIA_TYPE_VIDEO:
1437 if (ist->dec_ctx->framerate.num != 0) {
1438 int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
1439 ist->next_dts += ((int64_t)AV_TIME_BASE *
1440 ist->dec_ctx->framerate.den * ticks) /
1441 ist->dec_ctx->framerate.num;
1446 for (i = 0; pkt && i < nb_output_streams; i++) {
1447 OutputStream *ost = output_streams[i];
1449 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
1452 do_streamcopy(ist, ost, pkt);
1458 static void print_sdp(void)
1462 AVFormatContext **avc = av_malloc(sizeof(*avc) * nb_output_files);
1466 for (i = 0; i < nb_output_files; i++)
1467 avc[i] = output_files[i]->ctx;
1469 av_sdp_create(avc, nb_output_files, sdp, sizeof(sdp));
1470 printf("SDP:\n%s\n", sdp);
1475 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
1478 for (i = 0; hwaccels[i].name; i++)
1479 if (hwaccels[i].pix_fmt == pix_fmt)
1480 return &hwaccels[i];
1484 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
1486 InputStream *ist = s->opaque;
1487 const enum AVPixelFormat *p;
1490 for (p = pix_fmts; *p != -1; p++) {
1491 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
1492 const HWAccel *hwaccel;
1494 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
1497 hwaccel = get_hwaccel(*p);
1499 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
1500 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
1503 ret = hwaccel->init(s);
1505 if (ist->hwaccel_id == hwaccel->id) {
1506 av_log(NULL, AV_LOG_FATAL,
1507 "%s hwaccel requested for input stream #%d:%d, "
1508 "but cannot be initialized.\n", hwaccel->name,
1509 ist->file_index, ist->st->index);
1514 ist->active_hwaccel_id = hwaccel->id;
1515 ist->hwaccel_pix_fmt = *p;
1522 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
1524 InputStream *ist = s->opaque;
1526 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
1527 return ist->hwaccel_get_buffer(s, frame, flags);
1529 return avcodec_default_get_buffer2(s, frame, flags);
1532 static int init_input_stream(int ist_index, char *error, int error_len)
1535 InputStream *ist = input_streams[ist_index];
1536 if (ist->decoding_needed) {
1537 AVCodec *codec = ist->dec;
1539 snprintf(error, error_len, "Decoder (codec id %d) not found for input stream #%d:%d",
1540 ist->dec_ctx->codec_id, ist->file_index, ist->st->index);
1541 return AVERROR(EINVAL);
1544 /* update requested sample format for the decoder based on the
1545 corresponding encoder sample format */
1546 for (i = 0; i < nb_output_streams; i++) {
1547 OutputStream *ost = output_streams[i];
1548 if (ost->source_index == ist_index) {
1549 update_sample_fmt(ist->dec_ctx, codec, ost->enc_ctx);
1554 ist->dec_ctx->opaque = ist;
1555 ist->dec_ctx->get_format = get_format;
1556 ist->dec_ctx->get_buffer2 = get_buffer;
1557 ist->dec_ctx->thread_safe_callbacks = 1;
1559 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
1561 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
1562 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
1563 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
1565 if (ret == AVERROR_EXPERIMENTAL)
1566 abort_codec_experimental(codec, 0);
1568 av_strerror(ret, errbuf, sizeof(errbuf));
1570 snprintf(error, error_len,
1571 "Error while opening decoder for input stream "
1573 ist->file_index, ist->st->index, errbuf);
1576 assert_avoptions(ist->decoder_opts);
1579 ist->last_dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
1580 ist->next_dts = AV_NOPTS_VALUE;
1581 init_pts_correction(&ist->pts_ctx);
1586 static InputStream *get_input_stream(OutputStream *ost)
1588 if (ost->source_index >= 0)
1589 return input_streams[ost->source_index];
1592 FilterGraph *fg = ost->filter->graph;
1595 for (i = 0; i < fg->nb_inputs; i++)
1596 if (fg->inputs[i]->ist->dec_ctx->codec_type == ost->enc_ctx->codec_type)
1597 return fg->inputs[i]->ist;
1603 static void parse_forced_key_frames(char *kf, OutputStream *ost,
1604 AVCodecContext *avctx)
1610 for (p = kf; *p; p++)
1613 ost->forced_kf_count = n;
1614 ost->forced_kf_pts = av_malloc(sizeof(*ost->forced_kf_pts) * n);
1615 if (!ost->forced_kf_pts) {
1616 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
1621 for (i = 0; i < n; i++) {
1622 char *next = strchr(p, ',');
1627 t = parse_time_or_die("force_key_frames", p, 1);
1628 ost->forced_kf_pts[i] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
1634 static void set_encoder_id(OutputFile *of, OutputStream *ost)
1636 AVDictionaryEntry *e;
1638 uint8_t *encoder_string;
1639 int encoder_string_len;
1640 int format_flags = 0;
1642 e = av_dict_get(of->opts, "fflags", NULL, 0);
1644 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
1647 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
1650 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
1651 encoder_string = av_mallocz(encoder_string_len);
1652 if (!encoder_string)
1655 if (!(format_flags & AVFMT_FLAG_BITEXACT))
1656 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
1657 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
1658 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
1659 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
1662 static int transcode_init(void)
1664 int ret = 0, i, j, k;
1665 AVFormatContext *oc;
1671 /* init framerate emulation */
1672 for (i = 0; i < nb_input_files; i++) {
1673 InputFile *ifile = input_files[i];
1674 if (ifile->rate_emu)
1675 for (j = 0; j < ifile->nb_streams; j++)
1676 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
1679 /* output stream init */
1680 for (i = 0; i < nb_output_files; i++) {
1681 oc = output_files[i]->ctx;
1682 if (!oc->nb_streams && !(oc->oformat->flags & AVFMT_NOSTREAMS)) {
1683 av_dump_format(oc, i, oc->filename, 1);
1684 av_log(NULL, AV_LOG_ERROR, "Output file #%d does not contain any stream\n", i);
1685 return AVERROR(EINVAL);
1689 /* init complex filtergraphs */
1690 for (i = 0; i < nb_filtergraphs; i++)
1691 if ((ret = avfilter_graph_config(filtergraphs[i]->graph, NULL)) < 0)
1694 /* for each output stream, we compute the right encoding parameters */
1695 for (i = 0; i < nb_output_streams; i++) {
1696 AVCodecContext *enc_ctx;
1697 AVCodecContext *dec_ctx = NULL;
1698 ost = output_streams[i];
1699 oc = output_files[ost->file_index]->ctx;
1700 ist = get_input_stream(ost);
1702 if (ost->attachment_filename)
1705 enc_ctx = ost->enc_ctx;
1708 dec_ctx = ist->dec_ctx;
1710 ost->st->disposition = ist->st->disposition;
1711 enc_ctx->bits_per_raw_sample = dec_ctx->bits_per_raw_sample;
1712 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
1715 if (ost->stream_copy) {
1717 uint64_t extra_size;
1719 av_assert0(ist && !ost->filter);
1721 extra_size = (uint64_t)dec_ctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE;
1723 if (extra_size > INT_MAX) {
1724 return AVERROR(EINVAL);
1727 /* if stream_copy is selected, no need to decode or encode */
1728 enc_ctx->codec_id = dec_ctx->codec_id;
1729 enc_ctx->codec_type = dec_ctx->codec_type;
1731 if (!enc_ctx->codec_tag) {
1732 if (!oc->oformat->codec_tag ||
1733 av_codec_get_id (oc->oformat->codec_tag, dec_ctx->codec_tag) == enc_ctx->codec_id ||
1734 av_codec_get_tag(oc->oformat->codec_tag, dec_ctx->codec_id) <= 0)
1735 enc_ctx->codec_tag = dec_ctx->codec_tag;
1738 enc_ctx->bit_rate = dec_ctx->bit_rate;
1739 enc_ctx->rc_max_rate = dec_ctx->rc_max_rate;
1740 enc_ctx->rc_buffer_size = dec_ctx->rc_buffer_size;
1741 enc_ctx->field_order = dec_ctx->field_order;
1742 enc_ctx->extradata = av_mallocz(extra_size);
1743 if (!enc_ctx->extradata) {
1744 return AVERROR(ENOMEM);
1746 memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
1747 enc_ctx->extradata_size = dec_ctx->extradata_size;
1749 enc_ctx->time_base = dec_ctx->time_base;
1750 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
1751 av_reduce(&enc_ctx->time_base.num, &enc_ctx->time_base.den,
1752 enc_ctx->time_base.num, enc_ctx->time_base.den, INT_MAX);
1754 enc_ctx->time_base = ist->st->time_base;
1756 if (ist->st->nb_side_data) {
1757 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
1758 sizeof(*ist->st->side_data));
1759 if (!ost->st->side_data)
1760 return AVERROR(ENOMEM);
1762 for (j = 0; j < ist->st->nb_side_data; j++) {
1763 const AVPacketSideData *sd_src = &ist->st->side_data[j];
1764 AVPacketSideData *sd_dst = &ost->st->side_data[j];
1766 sd_dst->data = av_malloc(sd_src->size);
1768 return AVERROR(ENOMEM);
1769 memcpy(sd_dst->data, sd_src->data, sd_src->size);
1770 sd_dst->size = sd_src->size;
1771 sd_dst->type = sd_src->type;
1772 ost->st->nb_side_data++;
1776 ost->parser = av_parser_init(enc_ctx->codec_id);
1778 switch (enc_ctx->codec_type) {
1779 case AVMEDIA_TYPE_AUDIO:
1780 if (audio_volume != 256) {
1781 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
1784 enc_ctx->channel_layout = dec_ctx->channel_layout;
1785 enc_ctx->sample_rate = dec_ctx->sample_rate;
1786 enc_ctx->channels = dec_ctx->channels;
1787 enc_ctx->frame_size = dec_ctx->frame_size;
1788 enc_ctx->audio_service_type = dec_ctx->audio_service_type;
1789 enc_ctx->block_align = dec_ctx->block_align;
1791 case AVMEDIA_TYPE_VIDEO:
1792 enc_ctx->pix_fmt = dec_ctx->pix_fmt;
1793 enc_ctx->width = dec_ctx->width;
1794 enc_ctx->height = dec_ctx->height;
1795 enc_ctx->has_b_frames = dec_ctx->has_b_frames;
1796 if (ost->frame_aspect_ratio)
1797 sar = av_d2q(ost->frame_aspect_ratio * enc_ctx->height / enc_ctx->width, 255);
1798 else if (ist->st->sample_aspect_ratio.num)
1799 sar = ist->st->sample_aspect_ratio;
1801 sar = dec_ctx->sample_aspect_ratio;
1802 ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = sar;
1804 case AVMEDIA_TYPE_SUBTITLE:
1805 enc_ctx->width = dec_ctx->width;
1806 enc_ctx->height = dec_ctx->height;
1808 case AVMEDIA_TYPE_DATA:
1809 case AVMEDIA_TYPE_ATTACHMENT:
1816 /* should only happen when a default codec is not present. */
1817 snprintf(error, sizeof(error), "Automatic encoder selection "
1818 "failed for output stream #%d:%d. Default encoder for "
1819 "format %s is probably disabled. Please choose an "
1820 "encoder manually.\n", ost->file_index, ost->index,
1822 ret = AVERROR(EINVAL);
1827 ist->decoding_needed = 1;
1828 ost->encoding_needed = 1;
1830 set_encoder_id(output_files[ost->file_index], ost);
1833 * We want CFR output if and only if one of those is true:
1834 * 1) user specified output framerate with -r
1835 * 2) user specified -vsync cfr
1836 * 3) output format is CFR and the user didn't force vsync to
1837 * something else than CFR
1839 * in such a case, set ost->frame_rate
1841 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO &&
1842 !ost->frame_rate.num && ist &&
1843 (video_sync_method == VSYNC_CFR ||
1844 (video_sync_method == VSYNC_AUTO &&
1845 !(oc->oformat->flags & (AVFMT_NOTIMESTAMPS | AVFMT_VARIABLE_FPS))))) {
1846 if (ist->framerate.num)
1847 ost->frame_rate = ist->framerate;
1848 else if (ist->st->avg_frame_rate.num)
1849 ost->frame_rate = ist->st->avg_frame_rate;
1851 av_log(NULL, AV_LOG_WARNING, "Constant framerate requested "
1852 "for the output stream #%d:%d, but no information "
1853 "about the input framerate is available. Falling "
1854 "back to a default value of 25fps. Use the -r option "
1855 "if you want a different framerate.\n",
1856 ost->file_index, ost->index);
1857 ost->frame_rate = (AVRational){ 25, 1 };
1860 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
1861 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
1862 ost->frame_rate = ost->enc->supported_framerates[idx];
1867 (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
1868 enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) {
1870 fg = init_simple_filtergraph(ist, ost);
1871 if (configure_filtergraph(fg)) {
1872 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
1877 switch (enc_ctx->codec_type) {
1878 case AVMEDIA_TYPE_AUDIO:
1879 enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
1880 enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
1881 enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
1882 enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);
1883 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
1885 case AVMEDIA_TYPE_VIDEO:
1886 enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
1888 enc_ctx->width = ost->filter->filter->inputs[0]->w;
1889 enc_ctx->height = ost->filter->filter->inputs[0]->h;
1890 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
1891 ost->frame_aspect_ratio ? // overridden by the -aspect cli option
1892 av_d2q(ost->frame_aspect_ratio * enc_ctx->height/enc_ctx->width, 255) :
1893 ost->filter->filter->inputs[0]->sample_aspect_ratio;
1894 enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
1896 ost->st->avg_frame_rate = ost->frame_rate;
1899 (enc_ctx->width != dec_ctx->width ||
1900 enc_ctx->height != dec_ctx->height ||
1901 enc_ctx->pix_fmt != dec_ctx->pix_fmt)) {
1902 enc_ctx->bits_per_raw_sample = 0;
1905 if (ost->forced_keyframes)
1906 parse_forced_key_frames(ost->forced_keyframes, ost,
1909 case AVMEDIA_TYPE_SUBTITLE:
1910 enc_ctx->time_base = (AVRational){1, 1000};
1917 if ((enc_ctx->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))) {
1918 char logfilename[1024];
1921 snprintf(logfilename, sizeof(logfilename), "%s-%d.log",
1922 ost->logfile_prefix ? ost->logfile_prefix :
1923 DEFAULT_PASS_LOGFILENAME_PREFIX,
1925 if (!strcmp(ost->enc->name, "libx264")) {
1926 av_dict_set(&ost->encoder_opts, "stats", logfilename, AV_DICT_DONT_OVERWRITE);
1928 if (enc_ctx->flags & CODEC_FLAG_PASS1) {
1929 f = fopen(logfilename, "wb");
1931 av_log(NULL, AV_LOG_FATAL, "Cannot write log file '%s' for pass-1 encoding: %s\n",
1932 logfilename, strerror(errno));
1938 size_t logbuffer_size;
1939 if (cmdutils_read_file(logfilename, &logbuffer, &logbuffer_size) < 0) {
1940 av_log(NULL, AV_LOG_FATAL, "Error reading log file '%s' for pass-2 encoding\n",
1944 enc_ctx->stats_in = logbuffer;
1951 /* open each encoder */
1952 for (i = 0; i < nb_output_streams; i++) {
1953 ost = output_streams[i];
1954 if (ost->encoding_needed) {
1955 AVCodec *codec = ost->enc;
1956 AVCodecContext *dec = NULL;
1958 if ((ist = get_input_stream(ost)))
1960 if (dec && dec->subtitle_header) {
1961 ost->enc_ctx->subtitle_header = av_malloc(dec->subtitle_header_size);
1962 if (!ost->enc_ctx->subtitle_header) {
1963 ret = AVERROR(ENOMEM);
1966 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
1967 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
1969 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
1970 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
1971 av_dict_set(&ost->encoder_opts, "side_data_only_packets", "1", 0);
1973 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
1974 if (ret == AVERROR_EXPERIMENTAL)
1975 abort_codec_experimental(codec, 1);
1976 snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d:%d - maybe incorrect parameters such as bit_rate, rate, width or height",
1977 ost->file_index, ost->index);
1980 assert_avoptions(ost->encoder_opts);
1981 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
1982 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
1983 "It takes bits/s as argument, not kbits/s\n");
1985 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
1990 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
1992 av_log(NULL, AV_LOG_FATAL,
1993 "Error initializing the output stream codec context.\n");
1997 ost->st->time_base = ost->enc_ctx->time_base;
2000 /* init input streams */
2001 for (i = 0; i < nb_input_streams; i++)
2002 if ((ret = init_input_stream(i, error, sizeof(error))) < 0)
2005 /* discard unused programs */
2006 for (i = 0; i < nb_input_files; i++) {
2007 InputFile *ifile = input_files[i];
2008 for (j = 0; j < ifile->ctx->nb_programs; j++) {
2009 AVProgram *p = ifile->ctx->programs[j];
2010 int discard = AVDISCARD_ALL;
2012 for (k = 0; k < p->nb_stream_indexes; k++)
2013 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
2014 discard = AVDISCARD_DEFAULT;
2017 p->discard = discard;
2021 /* open files and write file headers */
2022 for (i = 0; i < nb_output_files; i++) {
2023 oc = output_files[i]->ctx;
2024 oc->interrupt_callback = int_cb;
2025 if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
2027 av_strerror(ret, errbuf, sizeof(errbuf));
2028 snprintf(error, sizeof(error),
2029 "Could not write header for output file #%d "
2030 "(incorrect codec parameters ?): %s",
2032 ret = AVERROR(EINVAL);
2035 assert_avoptions(output_files[i]->opts);
2036 if (strcmp(oc->oformat->name, "rtp")) {
2042 /* dump the file output parameters - cannot be done before in case
2044 for (i = 0; i < nb_output_files; i++) {
2045 av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
2048 /* dump the stream mapping */
2049 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
2050 for (i = 0; i < nb_input_streams; i++) {
2051 ist = input_streams[i];
2053 for (j = 0; j < ist->nb_filters; j++) {
2054 if (ist->filters[j]->graph->graph_desc) {
2055 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
2056 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
2057 ist->filters[j]->name);
2058 if (nb_filtergraphs > 1)
2059 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
2060 av_log(NULL, AV_LOG_INFO, "\n");
2065 for (i = 0; i < nb_output_streams; i++) {
2066 ost = output_streams[i];
2068 if (ost->attachment_filename) {
2069 /* an attached file */
2070 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
2071 ost->attachment_filename, ost->file_index, ost->index);
2075 if (ost->filter && ost->filter->graph->graph_desc) {
2076 /* output from a complex graph */
2077 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
2078 if (nb_filtergraphs > 1)
2079 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
2081 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
2082 ost->index, ost->enc ? ost->enc->name : "?");
2086 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
2087 input_streams[ost->source_index]->file_index,
2088 input_streams[ost->source_index]->st->index,
2091 if (ost->sync_ist != input_streams[ost->source_index])
2092 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
2093 ost->sync_ist->file_index,
2094 ost->sync_ist->st->index);
2095 if (ost->stream_copy)
2096 av_log(NULL, AV_LOG_INFO, " (copy)");
2098 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
2099 const AVCodec *out_codec = ost->enc;
2100 const char *decoder_name = "?";
2101 const char *in_codec_name = "?";
2102 const char *encoder_name = "?";
2103 const char *out_codec_name = "?";
2106 decoder_name = in_codec->name;
2107 in_codec_name = avcodec_descriptor_get(in_codec->id)->name;
2108 if (!strcmp(decoder_name, in_codec_name))
2109 decoder_name = "native";
2113 encoder_name = out_codec->name;
2114 out_codec_name = avcodec_descriptor_get(out_codec->id)->name;
2115 if (!strcmp(encoder_name, out_codec_name))
2116 encoder_name = "native";
2119 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
2120 in_codec_name, decoder_name,
2121 out_codec_name, encoder_name);
2123 av_log(NULL, AV_LOG_INFO, "\n");
2127 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
2138 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
2139 static int need_output(void)
2143 for (i = 0; i < nb_output_streams; i++) {
2144 OutputStream *ost = output_streams[i];
2145 OutputFile *of = output_files[ost->file_index];
2146 AVFormatContext *os = output_files[ost->file_index]->ctx;
2148 if (ost->finished ||
2149 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
2151 if (ost->frame_number >= ost->max_frames) {
2153 for (j = 0; j < of->ctx->nb_streams; j++)
2154 output_streams[of->ost_index + j]->finished = 1;
2164 static InputFile *select_input_file(void)
2166 InputFile *ifile = NULL;
2167 int64_t ipts_min = INT64_MAX;
2170 for (i = 0; i < nb_input_streams; i++) {
2171 InputStream *ist = input_streams[i];
2172 int64_t ipts = ist->last_dts;
2174 if (ist->discard || input_files[ist->file_index]->eagain)
2176 if (!input_files[ist->file_index]->eof_reached) {
2177 if (ipts < ipts_min) {
2179 ifile = input_files[ist->file_index];
2188 static void *input_thread(void *arg)
2193 while (!transcoding_finished && ret >= 0) {
2195 ret = av_read_frame(f->ctx, &pkt);
2197 if (ret == AVERROR(EAGAIN)) {
2204 pthread_mutex_lock(&f->fifo_lock);
2205 while (!av_fifo_space(f->fifo))
2206 pthread_cond_wait(&f->fifo_cond, &f->fifo_lock);
2208 av_dup_packet(&pkt);
2209 av_fifo_generic_write(f->fifo, &pkt, sizeof(pkt), NULL);
2211 pthread_mutex_unlock(&f->fifo_lock);
2218 static void free_input_threads(void)
2222 if (nb_input_files == 1)
2225 transcoding_finished = 1;
2227 for (i = 0; i < nb_input_files; i++) {
2228 InputFile *f = input_files[i];
2231 if (!f->fifo || f->joined)
2234 pthread_mutex_lock(&f->fifo_lock);
2235 while (av_fifo_size(f->fifo)) {
2236 av_fifo_generic_read(f->fifo, &pkt, sizeof(pkt), NULL);
2237 av_free_packet(&pkt);
2239 pthread_cond_signal(&f->fifo_cond);
2240 pthread_mutex_unlock(&f->fifo_lock);
2242 pthread_join(f->thread, NULL);
2245 while (av_fifo_size(f->fifo)) {
2246 av_fifo_generic_read(f->fifo, &pkt, sizeof(pkt), NULL);
2247 av_free_packet(&pkt);
2249 av_fifo_free(f->fifo);
2253 static int init_input_threads(void)
2257 if (nb_input_files == 1)
2260 for (i = 0; i < nb_input_files; i++) {
2261 InputFile *f = input_files[i];
2263 if (!(f->fifo = av_fifo_alloc(8*sizeof(AVPacket))))
2264 return AVERROR(ENOMEM);
2266 pthread_mutex_init(&f->fifo_lock, NULL);
2267 pthread_cond_init (&f->fifo_cond, NULL);
2269 if ((ret = pthread_create(&f->thread, NULL, input_thread, f)))
2270 return AVERROR(ret);
2275 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
2279 pthread_mutex_lock(&f->fifo_lock);
2281 if (av_fifo_size(f->fifo)) {
2282 av_fifo_generic_read(f->fifo, pkt, sizeof(*pkt), NULL);
2283 pthread_cond_signal(&f->fifo_cond);
2288 ret = AVERROR(EAGAIN);
2291 pthread_mutex_unlock(&f->fifo_lock);
2297 static int get_input_packet(InputFile *f, AVPacket *pkt)
2301 for (i = 0; i < f->nb_streams; i++) {
2302 InputStream *ist = input_streams[f->ist_index + i];
2303 int64_t pts = av_rescale(ist->last_dts, 1000000, AV_TIME_BASE);
2304 int64_t now = av_gettime_relative() - ist->start;
2306 return AVERROR(EAGAIN);
2311 if (nb_input_files > 1)
2312 return get_input_packet_mt(f, pkt);
2314 return av_read_frame(f->ctx, pkt);
2317 static int got_eagain(void)
2320 for (i = 0; i < nb_input_files; i++)
2321 if (input_files[i]->eagain)
2326 static void reset_eagain(void)
2329 for (i = 0; i < nb_input_files; i++)
2330 input_files[i]->eagain = 0;
2334 * Read one packet from an input file and send it for
2335 * - decoding -> lavfi (audio/video)
2336 * - decoding -> encoding -> muxing (subtitles)
2337 * - muxing (streamcopy)
2340 * - 0 -- one packet was read and processed
2341 * - AVERROR(EAGAIN) -- no packets were available for selected file,
2342 * this function should be called again
2343 * - AVERROR_EOF -- this function should not be called again
2345 static int process_input(void)
2348 AVFormatContext *is;
2353 /* select the stream that we must read now */
2354 ifile = select_input_file();
2355 /* if none, if is finished */
2360 return AVERROR(EAGAIN);
2362 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from.\n");
2367 ret = get_input_packet(ifile, &pkt);
2369 if (ret == AVERROR(EAGAIN)) {
2374 if (ret != AVERROR_EOF) {
2375 print_error(is->filename, ret);
2379 ifile->eof_reached = 1;
2381 for (i = 0; i < ifile->nb_streams; i++) {
2382 ist = input_streams[ifile->ist_index + i];
2383 if (ist->decoding_needed)
2384 process_input_packet(ist, NULL);
2386 /* mark all outputs that don't go through lavfi as finished */
2387 for (j = 0; j < nb_output_streams; j++) {
2388 OutputStream *ost = output_streams[j];
2390 if (ost->source_index == ifile->ist_index + i &&
2391 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
2392 finish_output_stream(ost);
2396 return AVERROR(EAGAIN);
2402 av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,
2403 is->streams[pkt.stream_index]);
2405 /* the following test is needed in case new streams appear
2406 dynamically in stream : we ignore them */
2407 if (pkt.stream_index >= ifile->nb_streams)
2408 goto discard_packet;
2410 ist = input_streams[ifile->ist_index + pkt.stream_index];
2412 ist->data_size += pkt.size;
2416 goto discard_packet;
2418 /* add the stream-global side data to the first packet */
2419 if (ist->nb_packets == 1)
2420 for (i = 0; i < ist->st->nb_side_data; i++) {
2421 AVPacketSideData *src_sd = &ist->st->side_data[i];
2424 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
2427 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
2431 memcpy(dst_data, src_sd->data, src_sd->size);
2434 if (pkt.dts != AV_NOPTS_VALUE)
2435 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
2436 if (pkt.pts != AV_NOPTS_VALUE)
2437 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
2439 if (pkt.pts != AV_NOPTS_VALUE)
2440 pkt.pts *= ist->ts_scale;
2441 if (pkt.dts != AV_NOPTS_VALUE)
2442 pkt.dts *= ist->ts_scale;
2444 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
2445 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
2446 pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
2447 (is->iformat->flags & AVFMT_TS_DISCONT)) {
2448 int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
2449 int64_t delta = pkt_dts - ist->next_dts;
2451 if ((FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE || pkt_dts + 1 < ist->last_dts) && !copy_ts) {
2452 ifile->ts_offset -= delta;
2453 av_log(NULL, AV_LOG_DEBUG,
2454 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
2455 delta, ifile->ts_offset);
2456 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
2457 if (pkt.pts != AV_NOPTS_VALUE)
2458 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
2462 ret = process_input_packet(ist, &pkt);
2464 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d\n",
2465 ist->file_index, ist->st->index);
2471 av_free_packet(&pkt);
2477 * The following code is the main loop of the file converter
2479 static int transcode(void)
2481 int ret, i, need_input = 1;
2482 AVFormatContext *os;
2485 int64_t timer_start;
2487 ret = transcode_init();
2491 av_log(NULL, AV_LOG_INFO, "Press ctrl-c to stop encoding\n");
2494 timer_start = av_gettime_relative();
2497 if ((ret = init_input_threads()) < 0)
2501 while (!received_sigterm) {
2502 /* check if there's any stream where output is still needed */
2503 if (!need_output()) {
2504 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
2508 /* read and process one input packet if needed */
2510 ret = process_input();
2511 if (ret == AVERROR_EOF)
2515 ret = poll_filters();
2517 if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN))
2520 av_log(NULL, AV_LOG_ERROR, "Error while filtering.\n");
2524 /* dump report by using the output first video and audio streams */
2525 print_report(0, timer_start);
2528 free_input_threads();
2531 /* at the end of stream, we must flush the decoder buffers */
2532 for (i = 0; i < nb_input_streams; i++) {
2533 ist = input_streams[i];
2534 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
2535 process_input_packet(ist, NULL);
2543 /* write the trailer if needed and close file */
2544 for (i = 0; i < nb_output_files; i++) {
2545 os = output_files[i]->ctx;
2546 av_write_trailer(os);
2549 /* dump report by using the first video and audio streams */
2550 print_report(1, timer_start);
2552 /* close each encoder */
2553 for (i = 0; i < nb_output_streams; i++) {
2554 ost = output_streams[i];
2555 if (ost->encoding_needed) {
2556 av_freep(&ost->enc_ctx->stats_in);
2560 /* close each decoder */
2561 for (i = 0; i < nb_input_streams; i++) {
2562 ist = input_streams[i];
2563 if (ist->decoding_needed) {
2564 avcodec_close(ist->dec_ctx);
2565 if (ist->hwaccel_uninit)
2566 ist->hwaccel_uninit(ist->dec_ctx);
2575 free_input_threads();
2578 if (output_streams) {
2579 for (i = 0; i < nb_output_streams; i++) {
2580 ost = output_streams[i];
2583 fclose(ost->logfile);
2584 ost->logfile = NULL;
2586 av_free(ost->forced_kf_pts);
2587 av_dict_free(&ost->encoder_opts);
2588 av_dict_free(&ost->resample_opts);
2595 static int64_t getutime(void)
2598 struct rusage rusage;
2600 getrusage(RUSAGE_SELF, &rusage);
2601 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
2602 #elif HAVE_GETPROCESSTIMES
2604 FILETIME c, e, k, u;
2605 proc = GetCurrentProcess();
2606 GetProcessTimes(proc, &c, &e, &k, &u);
2607 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
2609 return av_gettime_relative();
2613 static int64_t getmaxrss(void)
2615 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
2616 struct rusage rusage;
2617 getrusage(RUSAGE_SELF, &rusage);
2618 return (int64_t)rusage.ru_maxrss * 1024;
2619 #elif HAVE_GETPROCESSMEMORYINFO
2621 PROCESS_MEMORY_COUNTERS memcounters;
2622 proc = GetCurrentProcess();
2623 memcounters.cb = sizeof(memcounters);
2624 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
2625 return memcounters.PeakPagefileUsage;
2631 int main(int argc, char **argv)
2636 register_exit(avconv_cleanup);
2638 av_log_set_flags(AV_LOG_SKIP_REPEATED);
2639 parse_loglevel(argc, argv, options);
2641 avcodec_register_all();
2643 avdevice_register_all();
2645 avfilter_register_all();
2647 avformat_network_init();
2651 /* parse options and open all input/output files */
2652 ret = avconv_parse_options(argc, argv);
2656 if (nb_output_files <= 0 && nb_input_files == 0) {
2658 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
2662 /* file converter / grab */
2663 if (nb_output_files <= 0) {
2664 fprintf(stderr, "At least one output file must be specified\n");
2669 if (transcode() < 0)
2671 ti = getutime() - ti;
2673 int maxrss = getmaxrss() / 1024;
2674 printf("bench: utime=%0.3fs maxrss=%ikB\n", ti / 1000000.0, maxrss);