X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=avconv.c;h=59e12ff66f2f0e05bc3d2c101dafa8950e4f8504;hb=5c011706bc752d34bc6ada31d7df2ca0c9af7c6b;hp=d6860c64f3d5943219409f833ce4546a0e54a9f5;hpb=7b9373db89096dea65a206bb4637db61348776f2;p=ffmpeg diff --git a/avconv.c b/avconv.c index d6860c64f3d..59e12ff66f2 100644 --- a/avconv.c +++ b/avconv.c @@ -1,21 +1,21 @@ /* - * avconv main - * Copyright (c) 2000-2011 The libav developers. + * ffmpeg main + * Copyright (c) 2000-2003 Fabrice Bellard * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -47,9 +47,13 @@ #include "libavutil/imgutils.h" #include "libavformat/os_support.h" +#include "libavformat/ffm.h" // not public API + #if CONFIG_AVFILTER +# include "libavfilter/avcodec.h" # include "libavfilter/avfilter.h" # include "libavfilter/avfiltergraph.h" +# include "libavfilter/buffersink.h" # include "libavfilter/buffersrc.h" # include "libavfilter/vsrc_buffer.h" #endif @@ -70,6 +74,14 @@ #include #endif +#if HAVE_TERMIOS_H +#include +#include +#include +#include +#elif HAVE_KBHIT +#include +#endif #include #include "cmdutils.h" @@ -104,6 +116,9 @@ typedef struct MetadataMap { static const OptionDef options[]; +#define MAX_STREAMS 1024 /* arbitrary sanity check value */ + +static int frame_bits_per_raw_sample = 0; static int video_discard = 0; static int same_quant = 0; static int do_deinterlace = 0; @@ -111,11 +126,12 @@ static int intra_dc_precision = 8; static int qp_hist = 0; static int file_overwrite = 0; +static int no_file_overwrite = 0; static int do_benchmark = 0; static int do_hex_dump = 0; static int do_pkt_dump = 0; static int do_pass = 0; -static char *pass_logfilename_prefix = NULL; +static const char *pass_logfilename_prefix; static int video_sync_method = VSYNC_AUTO; static int audio_sync_method = 0; static float audio_drift_threshold = 0.1; @@ -129,6 +145,8 @@ static int audio_volume = 256; static int exit_on_error = 0; static int using_stdin = 0; +static int run_as_daemon = 0; +static int q_pressed = 0; static int64_t video_size = 0; static int64_t audio_size = 0; static int64_t extra_size = 0; @@ -141,8 +159,7 @@ static float dts_delta_threshold = 10; static int print_stats = 1; static uint8_t *audio_buf; -static uint8_t *audio_out; -static unsigned int allocated_audio_out_size, allocated_audio_buf_size; +static unsigned int allocated_audio_buf_size; #define DEFAULT_PASS_LOGFILENAME_PREFIX "av2pass" @@ -172,7 +189,6 @@ typedef struct InputStream { int64_t next_pts; /* synthetic pts for cases where pkt.pts is not defined */ int64_t pts; /* current pts */ - PtsCorrectionContext pts_ctx; double ts_scale; int is_start; /* is 1 at the start and after a discontinuity */ int showed_multi_packet_warning; @@ -208,10 +224,11 @@ typedef struct OutputStream { AVBitStreamFilterContext *bitstream_filters; AVCodec *enc; int64_t max_frames; + AVFrame *output_frame; /* video only */ int video_resample; - AVFrame pict_tmp; /* temporary image for resampling */ + AVFrame resample_frame; /* temporary frame for image resampling */ struct SwsContext *img_resample_ctx; /* for image resampling */ int resample_height; int resample_width; @@ -254,6 +271,11 @@ typedef struct OutputStream { int copy_initial_nonkeyframes; } OutputStream; +#if HAVE_TERMIOS_H + +/* init terminal so that we can grab keys */ +static struct termios oldtty; +#endif typedef struct OutputFile { AVFormatContext *ctx; @@ -550,7 +572,8 @@ static int configure_video_filters(InputStream *ist, OutputStream *ost) /** filter graph containing all filters including input & output */ AVCodecContext *codec = ost->st->codec; AVCodecContext *icodec = ist->st->codec; - FFSinkContext ffsink_ctx = { .pix_fmt = codec->pix_fmt }; + enum PixelFormat pix_fmts[] = { codec->pix_fmt, PIX_FMT_NONE }; + AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc(); AVRational sample_aspect_ratio; char args[255]; int ret; @@ -570,8 +593,15 @@ static int configure_video_filters(InputStream *ist, OutputStream *ost) "src", args, NULL, ost->graph); if (ret < 0) return ret; - ret = avfilter_graph_create_filter(&ost->output_video_filter, &ffsink, - "out", NULL, &ffsink_ctx, ost->graph); +#if FF_API_OLD_VSINK_API + ret = avfilter_graph_create_filter(&ost->output_video_filter, avfilter_get_by_name("buffersink"), + "out", NULL, pix_fmts, ost->graph); +#else + buffersink_params->pixel_fmts = pix_fmts; + ret = avfilter_graph_create_filter(&ost->output_video_filter, avfilter_get_by_name("buffersink"), + "out", NULL, buffersink_params, ost->graph); +#endif + av_freep(&buffersink_params); if (ret < 0) return ret; last_filter = ost->input_video_filter; @@ -593,8 +623,8 @@ static int configure_video_filters(InputStream *ist, OutputStream *ost) ost->graph->scale_sws_opts = av_strdup(args); if (ost->avfilter) { - AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut)); - AVFilterInOut *inputs = av_malloc(sizeof(AVFilterInOut)); + AVFilterInOut *outputs = avfilter_inout_alloc(); + AVFilterInOut *inputs = avfilter_inout_alloc(); outputs->name = av_strdup("in"); outputs->filter_ctx = last_filter; @@ -606,9 +636,8 @@ static int configure_video_filters(InputStream *ist, OutputStream *ost) inputs->pad_idx = 0; inputs->next = NULL; - if ((ret = avfilter_graph_parse(ost->graph, ost->avfilter, inputs, outputs, NULL)) < 0) + if ((ret = avfilter_graph_parse(ost->graph, ost->avfilter, &inputs, &outputs, NULL)) < 0) return ret; - av_freep(&ost->avfilter); } else { if ((ret = avfilter_link(last_filter, 0, ost->output_video_filter, 0)) < 0) return ret; @@ -630,22 +659,47 @@ static int configure_video_filters(InputStream *ist, OutputStream *ost) static void term_exit(void) { - av_log(NULL, AV_LOG_QUIET, ""); + av_log(NULL, AV_LOG_QUIET, "%s", ""); +#if HAVE_TERMIOS_H + if(!run_as_daemon) + tcsetattr (0, TCSANOW, &oldtty); +#endif } static volatile int received_sigterm = 0; -static volatile int received_nb_signals = 0; static void sigterm_handler(int sig) { received_sigterm = sig; - received_nb_signals++; + q_pressed++; term_exit(); } static void term_init(void) { +#if HAVE_TERMIOS_H + if(!run_as_daemon){ + struct termios tty; + + tcgetattr (0, &tty); + oldtty = tty; + atexit(term_exit); + + tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP + |INLCR|IGNCR|ICRNL|IXON); + tty.c_oflag |= OPOST; + tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN); + tty.c_cflag &= ~(CSIZE|PARENB); + tty.c_cflag |= CS8; + tty.c_cc[VMIN] = 1; + tty.c_cc[VTIME] = 0; + + tcsetattr (0, TCSANOW, &tty); + signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */ + } +#endif + signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */ signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */ #ifdef SIGXCPU @@ -653,9 +707,41 @@ static void term_init(void) #endif } +/* read a key without blocking */ +static int read_key(void) +{ +#if HAVE_TERMIOS_H + int n = 1; + unsigned char ch; + struct timeval tv; + fd_set rfds; + + if(run_as_daemon) + return -1; + + FD_ZERO(&rfds); + FD_SET(0, &rfds); + tv.tv_sec = 0; + tv.tv_usec = 0; + n = select(1, &rfds, NULL, NULL, &tv); + if (n > 0) { + n = read(0, &ch, 1); + if (n == 1) + return ch; + + return n; + } +#elif HAVE_KBHIT + if(kbhit()) + return(getch()); +#endif + return -1; +} + static int decode_interrupt_cb(void *ctx) { - return received_nb_signals > 1; + q_pressed += read_key() == 'q'; + return q_pressed > 1; } static const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL }; @@ -680,6 +766,17 @@ void exit_program(int ret) bsfc = next; } output_streams[i].bitstream_filters = NULL; + + if (output_streams[i].output_frame) { + AVFrame *frame = output_streams[i].output_frame; + if (frame->extended_data != frame->data) + av_freep(&frame->extended_data); + av_freep(&frame); + } + +#if CONFIG_AVFILTER + av_freep(&output_streams[i].avfilter); +#endif } for (i = 0; i < nb_input_files; i++) { avformat_close_input(&input_files[i].ctx); @@ -702,8 +799,7 @@ void exit_program(int ret) uninit_opts(); av_free(audio_buf); - av_free(audio_out); - allocated_audio_buf_size = allocated_audio_out_size = 0; + allocated_audio_buf_size = 0; #if CONFIG_AVFILTER avfilter_uninit(); @@ -754,6 +850,9 @@ static void choose_sample_fmt(AVStream *st, AVCodec *codec) break; } if (*p == -1) { + if((codec->capabilities & CODEC_CAP_LOSSLESS) && av_get_sample_fmt_name(st->codec->sample_fmt) > av_get_sample_fmt_name(codec->sample_fmts[0])) + av_log(NULL, AV_LOG_ERROR, "Convertion will not be lossless'\n"); + if(av_get_sample_fmt_name(st->codec->sample_fmt)) av_log(NULL, AV_LOG_WARNING, "Incompatible sample format '%s' for codec '%s', auto-selecting format '%s'\n", av_get_sample_fmt_name(st->codec->sample_fmt), @@ -764,46 +863,6 @@ static void choose_sample_fmt(AVStream *st, AVCodec *codec) } } -/** - * Update the requested input sample format based on the output sample format. - * This is currently only used to request float output from decoders which - * support multiple sample formats, one of which is AV_SAMPLE_FMT_FLT. - * Ideally this will be removed in the future when decoders do not do format - * conversion and only output in their native format. - */ -static void update_sample_fmt(AVCodecContext *dec, AVCodec *dec_codec, - AVCodecContext *enc) -{ - /* if sample formats match or a decoder sample format has already been - requested, just return */ - if (enc->sample_fmt == dec->sample_fmt || - dec->request_sample_fmt > AV_SAMPLE_FMT_NONE) - return; - - /* if decoder supports more than one output format */ - if (dec_codec && dec_codec->sample_fmts && - dec_codec->sample_fmts[0] != AV_SAMPLE_FMT_NONE && - dec_codec->sample_fmts[1] != AV_SAMPLE_FMT_NONE) { - const enum AVSampleFormat *p; - int min_dec = -1, min_inc = -1; - - /* find a matching sample format in the encoder */ - for (p = dec_codec->sample_fmts; *p != AV_SAMPLE_FMT_NONE; p++) { - if (*p == enc->sample_fmt) { - dec->request_sample_fmt = *p; - return; - } else if (*p > enc->sample_fmt) { - min_inc = FFMIN(min_inc, *p - enc->sample_fmt); - } else - min_dec = FFMIN(min_dec, enc->sample_fmt - *p); - } - - /* if none match, provide the one that matches quality closest */ - dec->request_sample_fmt = min_inc > 0 ? enc->sample_fmt + min_inc : - enc->sample_fmt - min_dec; - } -} - static void choose_sample_rate(AVStream *st, AVCodec *codec) { if (codec && codec->supported_samplerates) { @@ -866,6 +925,19 @@ static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost) AVCodecContext *avctx = ost->st->codec; int ret; + /* + * Audio encoders may split the packets -- #frames in != #packets out. + * But there is no reordering, so we can limit the number of output packets + * by simply dropping them here. + * Counting encoded video frames needs to be done separately because of + * reordering, see do_video_out() + */ + if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) { + if (ost->frame_number >= ost->max_frames) + return; + ost->frame_number++; + } + while (bsfc) { AVPacket new_pkt = *pkt; int a = av_bitstream_filter_filter(bsfc, avctx, NULL, @@ -893,7 +965,6 @@ static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost) print_error("av_interleaved_write_frame()", ret); exit_program(1); } - ost->frame_number++; } static void generate_silence(uint8_t* buf, enum AVSampleFormat sample_fmt, size_t size) @@ -904,18 +975,75 @@ static void generate_silence(uint8_t* buf, enum AVSampleFormat sample_fmt, size_ memset(buf, fill_char, size); } +static int encode_audio_frame(AVFormatContext *s, OutputStream *ost, + const uint8_t *buf, int buf_size) +{ + AVCodecContext *enc = ost->st->codec; + AVFrame *frame = NULL; + AVPacket pkt; + int ret, got_packet; + + av_init_packet(&pkt); + pkt.data = NULL; + pkt.size = 0; + + if (buf) { + if (!ost->output_frame) { + ost->output_frame = avcodec_alloc_frame(); + if (!ost->output_frame) { + av_log(NULL, AV_LOG_FATAL, "out-of-memory in encode_audio_frame()\n"); + exit_program(1); + } + } + frame = ost->output_frame; + if (frame->extended_data != frame->data) + av_freep(&frame->extended_data); + avcodec_get_frame_defaults(frame); + + frame->nb_samples = buf_size / + (enc->channels * av_get_bytes_per_sample(enc->sample_fmt)); + if ((ret = avcodec_fill_audio_frame(frame, enc->channels, enc->sample_fmt, + buf, buf_size, 1)) < 0) { + av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n"); + exit_program(1); + } + } + + got_packet = 0; + if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) { + av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n"); + exit_program(1); + } + + if (got_packet) { + pkt.stream_index = ost->index; + if (pkt.pts != AV_NOPTS_VALUE) + pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base); + if (pkt.duration > 0) + pkt.duration = av_rescale_q(pkt.duration, enc->time_base, ost->st->time_base); + + write_frame(s, &pkt, ost); + + audio_size += pkt.size; + } + + if (frame) + ost->sync_opts += frame->nb_samples; + + return pkt.size; +} + static void do_audio_out(AVFormatContext *s, OutputStream *ost, InputStream *ist, AVFrame *decoded_frame) { uint8_t *buftmp; - int64_t audio_out_size, audio_buf_size; + int64_t audio_buf_size; - int size_out, frame_bytes, ret, resample_changed; + int size_out, frame_bytes, resample_changed; AVCodecContext *enc = ost->st->codec; AVCodecContext *dec = ist->st->codec; int osize = av_get_bytes_per_sample(enc->sample_fmt); int isize = av_get_bytes_per_sample(dec->sample_fmt); - const int coded_bps = av_get_bits_per_sample(enc->codec->id); uint8_t *buf = decoded_frame->data[0]; int size = decoded_frame->nb_samples * dec->channels * isize; int64_t allocated_for_size = size; @@ -927,24 +1055,18 @@ need_realloc: audio_buf_size = FFMAX(audio_buf_size, enc->frame_size); audio_buf_size *= osize * enc->channels; - audio_out_size = FFMAX(audio_buf_size, enc->frame_size * osize * enc->channels); - if (coded_bps > 8 * osize) - audio_out_size = audio_out_size * coded_bps / (8*osize); - audio_out_size += FF_MIN_BUFFER_SIZE; - - if (audio_out_size > INT_MAX || audio_buf_size > INT_MAX) { + if (audio_buf_size > INT_MAX) { av_log(NULL, AV_LOG_FATAL, "Buffer sizes too large\n"); exit_program(1); } av_fast_malloc(&audio_buf, &allocated_audio_buf_size, audio_buf_size); - av_fast_malloc(&audio_out, &allocated_audio_out_size, audio_out_size); - if (!audio_buf || !audio_out) { + if (!audio_buf) { av_log(NULL, AV_LOG_FATAL, "Out of memory in do_audio_out\n"); exit_program(1); } - if (enc->channels != dec->channels || enc->sample_rate != dec->sample_rate) + if (enc->channels != dec->channels) ost->audio_resample = 1; resample_changed = ost->resample_sample_fmt != dec->sample_fmt || @@ -970,7 +1092,7 @@ need_realloc: ost->resample_sample_rate == enc->sample_rate) { ost->resample = NULL; ost->audio_resample = 0; - } else if (ost->audio_resample) { + } else { if (dec->sample_fmt != AV_SAMPLE_FMT_S16) av_log(NULL, AV_LOG_WARNING, "Using s16 intermediate sample format for resampling\n"); ost->resample = av_audio_resample_init(enc->channels, dec->channels, @@ -1077,7 +1199,7 @@ need_realloc: } /* now encode as many frames as possible */ - if (enc->frame_size > 1) { + if (!(enc->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)) { /* output resampled raw samples */ if (av_fifo_realloc2(ost->fifo, av_fifo_size(ost->fifo) + size_out) < 0) { av_log(NULL, AV_LOG_FATAL, "av_fifo_realloc2() failed\n"); @@ -1088,62 +1210,11 @@ need_realloc: frame_bytes = enc->frame_size * osize * enc->channels; while (av_fifo_size(ost->fifo) >= frame_bytes) { - AVPacket pkt; - av_init_packet(&pkt); - av_fifo_generic_read(ost->fifo, audio_buf, frame_bytes, NULL); - - // FIXME pass ost->sync_opts as AVFrame.pts in avcodec_encode_audio() - - ret = avcodec_encode_audio(enc, audio_out, audio_out_size, - (short *)audio_buf); - if (ret < 0) { - av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n"); - exit_program(1); - } - audio_size += ret; - pkt.stream_index = ost->index; - pkt.data = audio_out; - pkt.size = ret; - if (enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE) - pkt.pts = av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base); - pkt.flags |= AV_PKT_FLAG_KEY; - write_frame(s, &pkt, ost); - - ost->sync_opts += enc->frame_size; + encode_audio_frame(s, ost, audio_buf, frame_bytes); } } else { - AVPacket pkt; - av_init_packet(&pkt); - - ost->sync_opts += size_out / (osize * enc->channels); - - /* output a pcm frame */ - /* determine the size of the coded buffer */ - size_out /= osize; - if (coded_bps) - size_out = size_out * coded_bps / 8; - - if (size_out > audio_out_size) { - av_log(NULL, AV_LOG_FATAL, "Internal error, buffer size too small\n"); - exit_program(1); - } - - // FIXME pass ost->sync_opts as AVFrame.pts in avcodec_encode_audio() - ret = avcodec_encode_audio(enc, audio_out, size_out, - (short *)buftmp); - if (ret < 0) { - av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n"); - exit_program(1); - } - audio_size += ret; - pkt.stream_index = ost->index; - pkt.data = audio_out; - pkt.size = ret; - if (enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE) - pkt.pts = av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base); - pkt.flags |= AV_PKT_FLAG_KEY; - write_frame(s, &pkt, ost); + encode_audio_frame(s, ost, buftmp, size_out); } } @@ -1252,6 +1323,7 @@ static void do_subtitle_out(AVFormatContext *s, static int bit_buffer_size = 1024 * 256; static uint8_t *bit_buffer = NULL; +#if !CONFIG_AVFILTER static void do_video_resample(OutputStream *ost, InputStream *ist, AVFrame *in_picture, @@ -1259,36 +1331,47 @@ static void do_video_resample(OutputStream *ost, { int resample_changed = 0; AVCodecContext *dec = ist->st->codec; + AVCodecContext *enc = ost->st->codec; *out_picture = in_picture; - resample_changed = ost->resample_width != dec->width || - ost->resample_height != dec->height || - ost->resample_pix_fmt != dec->pix_fmt; + resample_changed = ost->resample_width != in_picture->width || + ost->resample_height != in_picture->height || + ost->resample_pix_fmt != in_picture->format; if (resample_changed) { av_log(NULL, AV_LOG_INFO, - "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n", + "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s / frm size:%dx%d fmt:%s\n", ist->file_index, ist->st->index, ost->resample_width, ost->resample_height, av_get_pix_fmt_name(ost->resample_pix_fmt), - dec->width , dec->height , av_get_pix_fmt_name(dec->pix_fmt)); - if (!ost->video_resample) - ost->video_resample = 1; + dec->width , dec->height , av_get_pix_fmt_name(dec->pix_fmt), + in_picture->width, in_picture->height, av_get_pix_fmt_name(in_picture->format)); + ost->resample_width = in_picture->width; + ost->resample_height = in_picture->height; + ost->resample_pix_fmt = in_picture->format; } -#if !CONFIG_AVFILTER + ost->video_resample = dec->width != enc->width || + dec->height != enc->height || + dec->pix_fmt != enc->pix_fmt; + + if (ost->video_resample) { - *out_picture = &ost->pict_tmp; - if (resample_changed) { + *out_picture = &ost->resample_frame; + if (!ost->img_resample_ctx || resample_changed) { + /* initialize the destination picture */ + if (!ost->resample_frame.data[0]) { + avcodec_get_frame_defaults(&ost->resample_frame); + if (avpicture_alloc((AVPicture *)&ost->resample_frame, enc->pix_fmt, + enc->width, enc->height)) { + fprintf(stderr, "Cannot allocate temp picture, check pix fmt\n"); + exit_program(1); + } + } /* initialize a new scaler context */ sws_freeContext(ost->img_resample_ctx); - ost->img_resample_ctx = sws_getContext( - ist->st->codec->width, - ist->st->codec->height, - ist->st->codec->pix_fmt, - ost->st->codec->width, - ost->st->codec->height, - ost->st->codec->pix_fmt, - ost->sws_flags, NULL, NULL, NULL); + ost->img_resample_ctx = sws_getContext(dec->width, dec->height, dec->pix_fmt, + enc->width, enc->height, enc->pix_fmt, + ost->sws_flags, NULL, NULL, NULL); if (ost->img_resample_ctx == NULL) { av_log(NULL, AV_LOG_FATAL, "Cannot get resampling context\n"); exit_program(1); @@ -1297,21 +1380,13 @@ static void do_video_resample(OutputStream *ost, sws_scale(ost->img_resample_ctx, in_picture->data, in_picture->linesize, 0, ost->resample_height, (*out_picture)->data, (*out_picture)->linesize); } -#else - if (resample_changed) { - avfilter_graph_free(&ost->graph); - if (configure_video_filters(ist, ost)) { - av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n"); - exit_program(1); - } - } -#endif if (resample_changed) { - ost->resample_width = dec->width; - ost->resample_height = dec->height; - ost->resample_pix_fmt = dec->pix_fmt; + ost->resample_width = in_picture->width; + ost->resample_height = in_picture->height; + ost->resample_pix_fmt = in_picture->format; } } +#endif static void do_video_out(AVFormatContext *s, @@ -1366,7 +1441,11 @@ static void do_video_out(AVFormatContext *s, if (nb_frames <= 0) return; +#if !CONFIG_AVFILTER do_video_resample(ost, ist, in_picture, &final_picture); +#else + final_picture = in_picture; +#endif /* duplicates frame if needed */ for (i = 0; i < nb_frames; i++) { @@ -1446,6 +1525,12 @@ static void do_video_out(AVFormatContext *s, } } ost->sync_opts++; + /* + * For video, number of frames in == number of packets out. + * But there may be reordering, so we can't throw away frames on encoder + * flush, we need to limit them here, before they go into encoder. + */ + ost->frame_number++; } } @@ -1501,9 +1586,11 @@ static void print_report(OutputFile *output_files, int64_t total_size; AVCodecContext *enc; int frame_number, vid, i; - double bitrate, ti1, pts; + double bitrate; + int64_t pts = INT64_MAX; static int64_t last_time = -1; static int qp_histogram[52]; + int hours, mins, secs, us; if (!print_stats && !is_last_report) return; @@ -1529,7 +1616,6 @@ static void print_report(OutputFile *output_files, total_size = avio_tell(oc->pb); buf[0] = '\0'; - ti1 = 1e10; vid = 0; for (i = 0; i < nb_ostreams; i++) { float q = -1; @@ -1581,18 +1667,26 @@ static void print_report(OutputFile *output_files, vid = 1; } /* compute min output value */ - pts = (double)ost->st->pts.val * av_q2d(ost->st->time_base); - if ((pts < ti1) && (pts > 0)) - ti1 = pts; + pts = FFMIN(pts, av_rescale_q(ost->st->pts.val, + ost->st->time_base, AV_TIME_BASE_Q)); } - if (ti1 < 0.01) - ti1 = 0.01; - bitrate = (double)(total_size * 8) / ti1 / 1000.0; + secs = pts / AV_TIME_BASE; + us = pts % AV_TIME_BASE; + mins = secs / 60; + secs %= 60; + hours = mins / 60; + mins %= 60; + + bitrate = pts ? total_size * 8 / (pts / 1000.0) : 0; snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), - "size=%8.0fkB time=%0.2f bitrate=%6.1fkbits/s", - (double)total_size / 1024, ti1, bitrate); + "size=%8.0fkB time=", total_size / 1024.0); + snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), + "%02d:%02d:%02d.%02d ", hours, mins, secs, + (100 * us) / AV_TIME_BASE); + snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), + "bitrate=%6.1fkbits/s", bitrate); if (nb_frames_dup || nb_frames_drop) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d", @@ -1622,6 +1716,7 @@ static void flush_encoders(OutputStream *ost_table, int nb_ostreams) OutputStream *ost = &ost_table[i]; AVCodecContext *enc = ost->st->codec; AVFormatContext *os = output_files[ost->file_index].ctx; + int stop_encoding = 0; if (!ost->encoding_needed) continue; @@ -1635,41 +1730,35 @@ static void flush_encoders(OutputStream *ost_table, int nb_ostreams) AVPacket pkt; int fifo_bytes; av_init_packet(&pkt); - pkt.stream_index = ost->index; + pkt.data = NULL; + pkt.size = 0; switch (ost->st->codec->codec_type) { case AVMEDIA_TYPE_AUDIO: fifo_bytes = av_fifo_size(ost->fifo); - ret = 0; - /* encode any samples remaining in fifo */ if (fifo_bytes > 0) { - int osize = av_get_bytes_per_sample(enc->sample_fmt); - int fs_tmp = enc->frame_size; + /* encode any samples remaining in fifo */ + int frame_bytes = fifo_bytes; av_fifo_generic_read(ost->fifo, audio_buf, fifo_bytes, NULL); - if (enc->codec->capabilities & CODEC_CAP_SMALL_LAST_FRAME) { - enc->frame_size = fifo_bytes / (osize * enc->channels); - } else { /* pad */ - int frame_bytes = enc->frame_size*osize*enc->channels; + + /* pad last frame with silence if needed */ + if (!(enc->codec->capabilities & CODEC_CAP_SMALL_LAST_FRAME)) { + frame_bytes = enc->frame_size * enc->channels * + av_get_bytes_per_sample(enc->sample_fmt); if (allocated_audio_buf_size < frame_bytes) exit_program(1); generate_silence(audio_buf+fifo_bytes, enc->sample_fmt, frame_bytes - fifo_bytes); } - - ret = avcodec_encode_audio(enc, bit_buffer, bit_buffer_size, (short *)audio_buf); - pkt.duration = av_rescale((int64_t)enc->frame_size*ost->st->time_base.den, - ost->st->time_base.num, enc->sample_rate); - enc->frame_size = fs_tmp; - } - if (ret <= 0) { - ret = avcodec_encode_audio(enc, bit_buffer, bit_buffer_size, NULL); - } - if (ret < 0) { - av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n"); - exit_program(1); + encode_audio_frame(os, ost, audio_buf, frame_bytes); + } else { + /* flush encoder with NULL frames until it is done + returning packets */ + if (encode_audio_frame(os, ost, NULL, 0) == 0) { + stop_encoding = 1; + break; + } } - audio_size += ret; - pkt.flags |= AV_PKT_FLAG_KEY; break; case AVMEDIA_TYPE_VIDEO: ret = avcodec_encode_video(enc, bit_buffer, bit_buffer_size, NULL); @@ -1683,18 +1772,22 @@ static void flush_encoders(OutputStream *ost_table, int nb_ostreams) if (ost->logfile && enc->stats_out) { fprintf(ost->logfile, "%s", enc->stats_out); } + if (ret <= 0) { + stop_encoding = 1; + break; + } + pkt.stream_index = ost->index; + pkt.data = bit_buffer; + pkt.size = ret; + if (enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE) + pkt.pts = av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base); + write_frame(os, &pkt, ost); break; default: - ret = -1; + stop_encoding = 1; } - - if (ret <= 0) + if (stop_encoding) break; - pkt.data = bit_buffer; - pkt.size = ret; - if (enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE) - pkt.pts = av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base); - write_frame(os, &pkt, ost); } } } @@ -1727,6 +1820,7 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p { OutputFile *of = &output_files[ost->file_index]; int64_t ost_tb_start_time = av_rescale_q(of->start_time, AV_TIME_BASE_Q, ost->st->time_base); + AVPicture pict; AVPacket opkt; av_init_packet(&opkt); @@ -1769,6 +1863,13 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p opkt.data = pkt->data; opkt.size = pkt->size; } + if (of->ctx->oformat->flags & AVFMT_RAWPICTURE) { + /* store AVPicture in AVPacket, as expected by the output format */ + avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height); + opkt.data = (uint8_t *)&pict; + opkt.size = sizeof(AVPicture); + opkt.flags |= AV_PKT_FLAG_KEY; + } write_frame(of->ctx, &opkt, ost); ost->st->codec->frame_number++; @@ -1918,8 +2019,7 @@ static int transcode_video(InputStream *ist, AVPacket *pkt, int *got_output, int /* no picture yet */ return ret; } - ist->next_pts = ist->pts = guess_correct_pts(&ist->pts_ctx, decoded_frame->pkt_pts, - decoded_frame->pkt_dts); + ist->next_pts = ist->pts = decoded_frame->best_effort_timestamp; if (pkt->duration) ist->next_pts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q); else if (ist->st->codec->time_base.num != 0) { @@ -1936,14 +2036,36 @@ static int transcode_video(InputStream *ist, AVPacket *pkt, int *got_output, int for (i = 0; i < nb_output_streams; i++) { OutputStream *ost = &output_streams[i]; - int frame_size; + int frame_size, resample_changed; if (!check_output_constraints(ist, ost) || !ost->encoding_needed) continue; #if CONFIG_AVFILTER - if (ist->st->sample_aspect_ratio.num) + resample_changed = ost->resample_width != decoded_frame->width || + ost->resample_height != decoded_frame->height || + ost->resample_pix_fmt != decoded_frame->format; + if (resample_changed) { + av_log(NULL, AV_LOG_INFO, + "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n", + ist->file_index, ist->st->index, + ost->resample_width, ost->resample_height, av_get_pix_fmt_name(ost->resample_pix_fmt), + decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format)); + + avfilter_graph_free(&ost->graph); + if (configure_video_filters(ist, ost)) { + av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n"); + exit_program(1); + } + + ost->resample_width = decoded_frame->width; + ost->resample_height = decoded_frame->height; + ost->resample_pix_fmt = decoded_frame->format; + } + + if (!decoded_frame->sample_aspect_ratio.num) decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio; + decoded_frame->pts = ist->pts; if (ist->st->codec->codec->capabilities & CODEC_CAP_DR1) { FrameBuffer *buf = decoded_frame->opaque; AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays( @@ -1960,8 +2082,7 @@ static int transcode_video(InputStream *ist, AVPacket *pkt, int *got_output, int buf->refcount++; av_buffersrc_buffer(ost->input_video_filter, fb); } else - av_vsrc_buffer_add_frame(ost->input_video_filter, decoded_frame, - ist->pts, decoded_frame->sample_aspect_ratio); + av_vsrc_buffer_add_frame(ost->input_video_filter, decoded_frame, AV_VSRC_BUF_FLAG_OVERWRITE); if (!ist->filtered_frame && !(ist->filtered_frame = avcodec_alloc_frame())) { av_free(buffer_to_free); @@ -1972,13 +2093,17 @@ static int transcode_video(InputStream *ist, AVPacket *pkt, int *got_output, int frame_available = avfilter_poll_frame(ost->output_video_filter->inputs[0]); while (frame_available) { - AVRational ist_pts_tb; - if (ost->output_video_filter) - get_filtered_video_frame(ost->output_video_filter, filtered_frame, &ost->picref, &ist_pts_tb); - if (ost->picref) - ist->pts = av_rescale_q(ost->picref->pts, ist_pts_tb, AV_TIME_BASE_Q); + if (ost->output_video_filter) { + AVRational ist_pts_tb = ost->output_video_filter->inputs[0]->time_base; + if (av_buffersink_get_buffer_ref(ost->output_video_filter, &ost->picref, 0) < 0) + goto cont; + if (ost->picref) { + avfilter_fill_frame_from_video_buffer_ref(filtered_frame, ost->picref); + ist->pts = av_rescale_q(ost->picref->pts, ist_pts_tb, AV_TIME_BASE_Q); + } + } if (ost->picref->video && !ost->frame_aspect_ratio) - ost->st->codec->sample_aspect_ratio = ost->picref->video->pixel_aspect; + ost->st->codec->sample_aspect_ratio = ost->picref->video->sample_aspect_ratio; #else filtered_frame = decoded_frame; #endif @@ -1988,6 +2113,7 @@ static int transcode_video(InputStream *ist, AVPacket *pkt, int *got_output, int if (vstats_filename && frame_size) do_video_stats(output_files[ost->file_index].ctx, ost, frame_size); #if CONFIG_AVFILTER + cont: frame_available = ost->output_video_filter && avfilter_poll_frame(ost->output_video_filter->inputs[0]); if (ost->picref) avfilter_unref_buffer(ost->picref); @@ -2152,22 +2278,14 @@ static int init_input_stream(int ist_index, OutputStream *output_streams, int nb return AVERROR(EINVAL); } - /* update requested sample format for the decoder based on the - corresponding encoder sample format */ - for (i = 0; i < nb_output_streams; i++) { - OutputStream *ost = &output_streams[i]; - if (ost->source_index == ist_index) { - update_sample_fmt(ist->st->codec, codec, ost->st->codec); - break; - } - } - if (codec->type == AVMEDIA_TYPE_VIDEO && codec->capabilities & CODEC_CAP_DR1) { ist->st->codec->get_buffer = codec_get_buffer; ist->st->codec->release_buffer = codec_release_buffer; ist->st->codec->opaque = ist; } + if (!av_dict_get(ist->opts, "threads", NULL, 0)) + av_dict_set(&ist->opts, "threads", "auto", 0); if (avcodec_open2(ist->st->codec, codec, &ist->opts) < 0) { snprintf(error, error_len, "Error while opening decoder for input stream #%d:%d", ist->file_index, ist->st->index); @@ -2179,7 +2297,6 @@ static int init_input_stream(int ist_index, OutputStream *output_streams, int nb ist->pts = ist->st->avg_frame_rate.num ? - ist->st->codec->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0; ist->next_pts = AV_NOPTS_VALUE; - init_pts_correction(&ist->pts_ctx); ist->is_start = 1; return 0; @@ -2259,6 +2376,7 @@ static int transcode_init(OutputFile *output_files, return AVERROR(ENOMEM); } memcpy(codec->extradata, icodec->extradata, icodec->extradata_size); + codec->extradata_size = icodec->extradata_size; if (!copy_tb) { codec->time_base = icodec->time_base; @@ -2328,9 +2446,10 @@ static int transcode_init(OutputFile *output_files, codec->sample_fmt = icodec->sample_fmt; choose_sample_fmt(ost->st, ost->enc); - if (!codec->channels) + if (!codec->channels) { codec->channels = icodec->channels; - codec->channel_layout = icodec->channel_layout; + codec->channel_layout = icodec->channel_layout; + } if (av_get_channel_layout_nb_channels(codec->channel_layout) != codec->channels) codec->channel_layout = 0; @@ -2359,27 +2478,7 @@ static int transcode_init(OutputFile *output_files, codec->height != icodec->height || codec->pix_fmt != icodec->pix_fmt; if (ost->video_resample) { -#if !CONFIG_AVFILTER - avcodec_get_frame_defaults(&ost->pict_tmp); - if (avpicture_alloc((AVPicture*)&ost->pict_tmp, codec->pix_fmt, - codec->width, codec->height)) { - av_log(NULL, AV_LOG_FATAL, "Cannot allocate temp picture, check pix fmt\n"); - exit_program(1); - } - ost->img_resample_ctx = sws_getContext( - icodec->width, - icodec->height, - icodec->pix_fmt, - codec->width, - codec->height, - codec->pix_fmt, - ost->sws_flags, NULL, NULL, NULL); - if (ost->img_resample_ctx == NULL) { - av_log(NULL, AV_LOG_FATAL, "Cannot get resampling context\n"); - exit_program(1); - } -#endif - codec->bits_per_raw_sample = 0; + codec->bits_per_raw_sample= frame_bits_per_raw_sample; } ost->resample_height = icodec->height; @@ -2393,6 +2492,11 @@ static int transcode_init(OutputFile *output_files, ost->frame_rate = ost->enc->supported_framerates[idx]; } codec->time_base = (AVRational){ost->frame_rate.den, ost->frame_rate.num}; + if( av_q2d(codec->time_base) < 0.001 && video_sync_method + && (video_sync_method==1 || (video_sync_method<0 && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){ + av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not effciciently supporting it.\n" + "Please consider specifiying a lower framerate, a different muxer or -vsync 2\n"); + } #if CONFIG_AVFILTER if (configure_video_filters(ist, ost)) { @@ -2408,7 +2512,8 @@ static int transcode_init(OutputFile *output_files, break; } /* two pass mode */ - if ((codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))) { + if (codec->codec_id != CODEC_ID_H264 && + (codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))) { char logfilename[1024]; FILE *f; @@ -2436,8 +2541,9 @@ static int transcode_init(OutputFile *output_files, } } if (codec->codec_type == AVMEDIA_TYPE_VIDEO) { + /* maximum video buffer size is 6-bytes per pixel, plus DPX header size */ int size = codec->width * codec->height; - bit_buffer_size = FFMAX(bit_buffer_size, 6 * size + 200); + bit_buffer_size = FFMAX(bit_buffer_size, 6 * size + 1664); } } @@ -2470,6 +2576,8 @@ static int transcode_init(OutputFile *output_files, memcpy(ost->st->codec->subtitle_header, dec->subtitle_header, dec->subtitle_header_size); ost->st->codec->subtitle_header_size = dec->subtitle_header_size; } + if (!av_dict_get(ost->opts, "threads", NULL, 0)) + av_dict_set(&ost->opts, "threads", "auto", 0); if (avcodec_open2(ost->st->codec, codec, &ost->opts) < 0) { snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d:%d - maybe incorrect parameters such as bit_rate, rate, width or height", ost->file_index, ost->index); @@ -2518,7 +2626,7 @@ static int transcode_init(OutputFile *output_files, ret = AVERROR(EINVAL); goto dump_format; } - assert_avoptions(output_files[i].opts); +// assert_avoptions(output_files[i].opts); if (strcmp(oc->oformat->name, "rtp")) { want_sdp = 0; } @@ -2587,6 +2695,7 @@ static int transcode(OutputFile *output_files, uint8_t *no_packet; int no_packet_count = 0; int64_t timer_start; + int key; if (!(no_packet = av_mallocz(nb_input_files))) exit_program(1); @@ -2595,7 +2704,10 @@ static int transcode(OutputFile *output_files, if (ret < 0) goto fail; - av_log(NULL, AV_LOG_INFO, "Press ctrl-c to stop encoding\n"); + if (!using_stdin) { + av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n"); + avio_set_interrupt_cb(decode_interrupt_cb); + } term_init(); timer_start = av_gettime(); @@ -2608,6 +2720,57 @@ static int transcode(OutputFile *output_files, ipts_min = INT64_MAX; opts_min = 1e100; + /* if 'q' pressed, exits */ + if (!using_stdin) { + if (q_pressed) + break; + /* read_key() returns 0 on EOF */ + key = read_key(); + if (key == 'q') + break; + if (key == '+') av_log_set_level(av_log_get_level()+10); + if (key == '-') av_log_set_level(av_log_get_level()-10); + if (key == 's') qp_hist ^= 1; + if (key == 'h'){ + if (do_hex_dump){ + do_hex_dump = do_pkt_dump = 0; + } else if(do_pkt_dump){ + do_hex_dump = 1; + } else + do_pkt_dump = 1; + av_log_set_level(AV_LOG_DEBUG); + } + if (key == 'd' || key == 'D'){ + int debug=0; + if(key == 'D') { + debug = input_streams[0].st->codec->debug<<1; + if(!debug) debug = 1; + while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash + debug += debug; + }else + scanf("%d", &debug); + for(i=0;icodec->debug = debug; + } + for(i=0;ist->codec->debug = debug; + } + if(debug) av_log_set_level(AV_LOG_DEBUG); + fprintf(stderr,"debug=%d\n", debug); + } + if (key == '?'){ + fprintf(stderr, "key function\n" + "? show this help\n" + "+ increase verbosity\n" + "- decrease verbosity\n" + "D cycle through available debug modes\n" + "h dump packets/hex press to cycle through the 3 states\n" + "q quit\n" + "s Show QP histogram\n" + ); + } + } /* select the stream that we must read now by looking at the smallest output pts */ @@ -2793,7 +2956,7 @@ static int transcode(OutputFile *output_files, av_fifo_free(ost->fifo); /* works even if fifo is not initialized but set to zero */ av_freep(&ost->st->codec->subtitle_header); - av_free(ost->pict_tmp.data[0]); + av_free(ost->resample_frame.data[0]); av_free(ost->forced_kf_pts); if (ost->video_resample) sws_freeContext(ost->img_resample_ctx); @@ -3099,7 +3262,7 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic) ist->st = st; ist->file_index = nb_input_files; ist->discard = 1; - ist->opts = filter_codec_opts(codec_opts, ist->st->codec->codec_id, ic, st); + ist->opts = filter_codec_opts(codec_opts, choose_decoder(o, ic, st), ic, st); ist->ts_scale = 1.0; MATCH_PER_STREAM_OPT(ts_scale, dbl, ist->ts_scale, ic, st); @@ -3108,14 +3271,16 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic) switch (dec->codec_type) { case AVMEDIA_TYPE_AUDIO: - if (o->audio_disable) + if(!ist->dec) + ist->dec = avcodec_find_decoder(dec->codec_id); + if(o->audio_disable) st->discard = AVDISCARD_ALL; break; case AVMEDIA_TYPE_VIDEO: + if(!ist->dec) + ist->dec = avcodec_find_decoder(dec->codec_id); if (dec->lowres) { dec->flags |= CODEC_FLAG_EMU_EDGE; - dec->height >>= dec->lowres; - dec->width >>= dec->lowres; } if (o->video_disable) @@ -3126,7 +3291,9 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic) case AVMEDIA_TYPE_DATA: break; case AVMEDIA_TYPE_SUBTITLE: - if (o->subtitle_disable) + if(!ist->dec) + ist->dec = avcodec_find_decoder(dec->codec_id); + if(o->subtitle_disable) st->discard = AVDISCARD_ALL; break; case AVMEDIA_TYPE_ATTACHMENT: @@ -3140,11 +3307,11 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic) static void assert_file_overwrite(const char *filename) { - if (!file_overwrite && + if ((!file_overwrite || no_file_overwrite) && (strchr(filename, ':') == NULL || filename[1] == ':' || av_strstart(filename, "file:", NULL))) { if (avio_check(filename, 0) == 0) { - if (!using_stdin) { + if (!using_stdin && (!no_file_overwrite || file_overwrite)) { fprintf(stderr,"File '%s' already exists. Overwrite ? [y/N] ", filename); fflush(stderr); if (!read_yesno()) { @@ -3241,7 +3408,7 @@ static int opt_input_file(OptionsContext *o, const char *opt, const char *filena ic->flags |= AVFMT_FLAG_NONBLOCK; ic->interrupt_callback = int_cb; - /* open the input file with generic libav function */ + /* open the input file with generic avformat function */ err = avformat_open_input(&ic, filename, file_iformat, &format_opts); if (err < 0) { print_error(filename, err); @@ -3425,7 +3592,7 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e st->codec->codec_type = type; choose_encoder(o, oc, ost); if (ost->enc) { - ost->opts = filter_codec_opts(codec_opts, ost->enc->id, oc, st); + ost->opts = filter_codec_opts(codec_opts, ost->enc, oc, st); } avcodec_get_context_defaults3(st->codec, ost->enc); @@ -3711,7 +3878,7 @@ static int opt_streamid(OptionsContext *o, const char *opt, const char *arg) exit_program(1); } *p++ = '\0'; - idx = parse_number_or_die(opt, idx_str, OPT_INT, 0, INT_MAX); + idx = parse_number_or_die(opt, idx_str, OPT_INT, 0, MAX_STREAMS-1); o->streamid_map = grow_array(o->streamid_map, sizeof(*o->streamid_map), &o->nb_streamid_map, idx+1); o->streamid_map[idx] = parse_number_or_die(opt, p, OPT_INT, 0, INT_MAX); return 0; @@ -3769,30 +3936,14 @@ static void opt_output_file(void *optctx, const char *filename) if (!strcmp(filename, "-")) filename = "pipe:"; - oc = avformat_alloc_context(); + err = avformat_alloc_output_context2(&oc, NULL, o->format, filename); if (!oc) { - print_error(filename, AVERROR(ENOMEM)); + print_error(filename, err); exit_program(1); } - if (o->format) { - file_oformat = av_guess_format(o->format, NULL, NULL); - if (!file_oformat) { - av_log(NULL, AV_LOG_FATAL, "Requested output format '%s' is not a suitable output format\n", o->format); - exit_program(1); - } - } else { - file_oformat = av_guess_format(NULL, filename, NULL); - if (!file_oformat) { - av_log(NULL, AV_LOG_FATAL, "Unable to find a suitable output format for '%s'\n", - filename); - exit_program(1); - } - } - - oc->oformat = file_oformat; + file_oformat= oc->oformat; oc->interrupt_callback = int_cb; - av_strlcpy(oc->filename, filename, sizeof(oc->filename)); if (!o->nb_stream_maps) { /* pick the "best" stream of each type */ @@ -3939,7 +4090,6 @@ static void opt_output_file(void *optctx, const char *filename) av_dict_set(&output_files[nb_output_files - 1].opts, "preload", buf, 0); } oc->max_delay = (int)(o->mux_max_delay * AV_TIME_BASE); - oc->flags |= AVFMT_FLAG_NONBLOCK; /* copy metadata */ for (i = 0; i < o->nb_metadata_map; i++) { @@ -4086,12 +4236,12 @@ static int opt_audio_qscale(OptionsContext *o, const char *opt, const char *arg) static void show_usage(void) { - printf("Hyper fast Audio and Video encoder\n"); - printf("usage: %s [options] [[infile options] -i infile]... {[outfile options] outfile}...\n", program_name); - printf("\n"); + av_log(NULL, AV_LOG_INFO, "Hyper fast Audio and Video encoder\n"); + av_log(NULL, AV_LOG_INFO, "usage: %s [options] [[infile options] -i infile]... {[outfile options] outfile}...\n", program_name); + av_log(NULL, AV_LOG_INFO, "\n"); } -static void show_help(void) +static int opt_help(const char *opt, const char *arg) { int flags = AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_ENCODING_PARAM; av_log_set_callback(log_callback_help); @@ -4123,6 +4273,8 @@ static void show_help(void) show_help_children(avcodec_get_class(), flags); show_help_children(avformat_get_class(), flags); show_help_children(sws_get_class(), flags); + + return 0; } static int opt_target(OptionsContext *o, const char *opt, const char *arg) @@ -4294,6 +4446,20 @@ static int opt_data_frames(OptionsContext *o, const char *opt, const char *arg) return parse_option(o, "frames:d", arg, options); } +static void log_callback_null(void* ptr, int level, const char* fmt, va_list vl) +{ +} + +static int opt_passlogfile(const char *opt, const char *arg) +{ + pass_logfilename_prefix = arg; +#if CONFIG_LIBX264_ENCODER + return opt_default("passlogfile", arg); +#else + return 0; +#endif +} + static int opt_video_tag(OptionsContext *o, const char *opt, const char *arg) { return parse_option(o, "tag:v", arg, options); @@ -4332,6 +4498,7 @@ static const OptionDef options[] = { { "f", HAS_ARG | OPT_STRING | OPT_OFFSET, {.off = OFFSET(format)}, "force format", "fmt" }, { "i", HAS_ARG | OPT_FUNC2, {(void*)opt_input_file}, "input file name", "filename" }, { "y", OPT_BOOL, {(void*)&file_overwrite}, "overwrite output files" }, + { "n", OPT_BOOL, {(void*)&no_file_overwrite}, "do not overwrite output files" }, { "c", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(codec_names)}, "codec name", "codec" }, { "codec", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(codec_names)}, "codec name", "codec" }, { "pre", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(presets)}, "preset name", "preset" }, @@ -4381,6 +4548,7 @@ static const OptionDef options[] = { { "s", HAS_ARG | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_sizes)}, "set frame size (WxH or abbreviation)", "size" }, { "aspect", HAS_ARG | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_aspect_ratios)}, "set aspect ratio (4:3, 16:9 or 1.3333, 1.7777)", "aspect" }, { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_pix_fmts)}, "set pixel format", "format" }, + { "bits_per_raw_sample", OPT_INT | HAS_ARG | OPT_VIDEO, {(void*)&frame_bits_per_raw_sample}, "set the number of bits per raw sample", "number" }, { "vn", OPT_BOOL | OPT_VIDEO | OPT_OFFSET, {.off = OFFSET(video_disable)}, "disable video" }, { "vdt", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)&video_discard}, "discard threshold", "n" }, { "rc_override", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(rc_overrides)}, "rate control override for specific intervals", "override" }, @@ -4388,7 +4556,7 @@ static const OptionDef options[] = { { "same_quant", OPT_BOOL | OPT_VIDEO, {(void*)&same_quant}, "use same quantizer as source (implies VBR)" }, { "pass", HAS_ARG | OPT_VIDEO, {(void*)opt_pass}, "select the pass number (1 or 2)", "n" }, - { "passlogfile", HAS_ARG | OPT_STRING | OPT_VIDEO, {(void*)&pass_logfilename_prefix}, "select two pass log file name prefix", "prefix" }, + { "passlogfile", HAS_ARG | OPT_VIDEO, {(void*)&opt_passlogfile}, "select two pass log file name prefix", "prefix" }, { "deinterlace", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, {(void*)&do_deinterlace}, "deinterlace pictures" }, { "vstats", OPT_EXPERT | OPT_VIDEO, {(void*)&opt_vstats}, "dump video coding statistics to file" }, @@ -4448,6 +4616,13 @@ int main(int argc, char **argv) av_log_set_flags(AV_LOG_SKIP_REPEATED); parse_loglevel(argc, argv, options); + if(argc>1 && !strcmp(argv[1], "-d")){ + run_as_daemon=1; + av_log_set_callback(log_callback_null); + argc--; + argv++; + } + avcodec_register_all(); #if CONFIG_AVDEVICE avdevice_register_all(); @@ -4458,7 +4633,7 @@ int main(int argc, char **argv) av_register_all(); avformat_network_init(); - show_banner(); + show_banner(argc, argv, options); /* parse options */ parse_options(&o, argc, argv, options, opt_output_file);