int decoding_needed; /* true if the packets must be decoded in 'raw_fifo' */
AVCodec *dec;
AVFrame *decoded_frame;
- AVFrame *filtered_frame;
int64_t start; /* time when read started */
/* predicted dts of the next packet read for this stream or (when there are
int showed_multi_packet_warning;
AVDictionary *opts;
+ int resample_height;
+ int resample_width;
+ int resample_pix_fmt;
+
/* a pool of free buffers for decoded data */
FrameBuffer *buffer_pool;
} InputStream;
AVCodec *enc;
int64_t max_frames;
AVFrame *output_frame;
+ AVFrame *filtered_frame;
/* video only */
- int video_resample;
- int resample_height;
- int resample_width;
- int resample_pix_fmt;
AVRational frame_rate;
int force_fps;
int top_field_first;
AVFilterContext *output_video_filter;
AVFilterContext *input_video_filter;
- AVFilterBufferRef *picref;
char *avfilter;
AVFilterGraph *graph;
int stream_copy;
const char *attachment_filename;
int copy_initial_nonkeyframes;
+
+ enum PixelFormat pix_fmts[2];
} OutputStream;
uint64_t limit_filesize;
} OutputFile;
-static InputStream *input_streams = NULL;
-static int nb_input_streams = 0;
-static InputFile *input_files = NULL;
-static int nb_input_files = 0;
+static InputStream **input_streams = NULL;
+static int nb_input_streams = 0;
+static InputFile **input_files = NULL;
+static int nb_input_files = 0;
-static OutputStream *output_streams = NULL;
-static int nb_output_streams = 0;
-static OutputFile *output_files = NULL;
-static int nb_output_files = 0;
+static OutputStream **output_streams = NULL;
+static int nb_output_streams = 0;
+static OutputFile **output_files = NULL;
+static int nb_output_files = 0;
typedef struct OptionsContext {
/* input/output options */
unref_buffer(buf->ist, buf);
}
+static const enum PixelFormat *choose_pixel_fmts(OutputStream *ost)
+{
+ if (ost->st->codec->pix_fmt != PIX_FMT_NONE) {
+ ost->pix_fmts[0] = ost->st->codec->pix_fmt;
+ return ost->pix_fmts;
+ } else if (ost->enc->pix_fmts)
+ return ost->enc->pix_fmts;
+ else
+ return NULL;
+}
+
static int configure_video_filters(InputStream *ist, OutputStream *ost)
{
AVFilterContext *last_filter, *filter;
/** filter graph containing all filters including input & output */
AVCodecContext *codec = ost->st->codec;
- AVCodecContext *icodec = ist->st->codec;
- SinkContext sink_ctx = { .pix_fmt = codec->pix_fmt };
+ SinkContext sink_ctx = { .pix_fmts = choose_pixel_fmts(ost) };
AVRational sample_aspect_ratio;
char args[255];
int ret;
return ret;
last_filter = ost->input_video_filter;
- if (codec->width != icodec->width || codec->height != icodec->height) {
+ if (codec->width || codec->height) {
snprintf(args, 255, "%d:%d:flags=0x%X",
codec->width,
codec->height,
ost->graph->scale_sws_opts = av_strdup(args);
if (ost->avfilter) {
- AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
- AVFilterInOut *inputs = av_malloc(sizeof(AVFilterInOut));
+ AVFilterInOut *outputs = avfilter_inout_alloc();
+ AVFilterInOut *inputs = avfilter_inout_alloc();
outputs->name = av_strdup("in");
outputs->filter_ctx = last_filter;
ost->frame_aspect_ratio ? // overridden by the -aspect cli option
av_d2q(ost->frame_aspect_ratio * codec->height/codec->width, 255) :
ost->output_video_filter->inputs[0]->sample_aspect_ratio;
+ codec->pix_fmt = ost->output_video_filter->inputs[0]->format;
return 0;
}
/* close files */
for (i = 0; i < nb_output_files; i++) {
- AVFormatContext *s = output_files[i].ctx;
+ AVFormatContext *s = output_files[i]->ctx;
if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
avio_close(s->pb);
avformat_free_context(s);
- av_dict_free(&output_files[i].opts);
+ av_dict_free(&output_files[i]->opts);
+ av_freep(&output_files[i]);
}
for (i = 0; i < nb_output_streams; i++) {
- AVBitStreamFilterContext *bsfc = output_streams[i].bitstream_filters;
+ AVBitStreamFilterContext *bsfc = output_streams[i]->bitstream_filters;
while (bsfc) {
AVBitStreamFilterContext *next = bsfc->next;
av_bitstream_filter_close(bsfc);
bsfc = next;
}
- output_streams[i].bitstream_filters = NULL;
+ output_streams[i]->bitstream_filters = NULL;
- if (output_streams[i].output_frame) {
- AVFrame *frame = output_streams[i].output_frame;
+ if (output_streams[i]->output_frame) {
+ AVFrame *frame = output_streams[i]->output_frame;
if (frame->extended_data != frame->data)
av_freep(&frame->extended_data);
av_freep(&frame);
}
- av_freep(&output_streams[i].avfilter);
+ av_freep(&output_streams[i]->avfilter);
+ av_freep(&output_streams[i]->filtered_frame);
+ av_freep(&output_streams[i]);
}
for (i = 0; i < nb_input_files; i++) {
- avformat_close_input(&input_files[i].ctx);
+ avformat_close_input(&input_files[i]->ctx);
+ av_freep(&input_files[i]);
}
for (i = 0; i < nb_input_streams; i++) {
- av_freep(&input_streams[i].decoded_frame);
- av_freep(&input_streams[i].filtered_frame);
- av_dict_free(&input_streams[i].opts);
- free_buffer_pool(&input_streams[i]);
+ av_freep(&input_streams[i]->decoded_frame);
+ av_dict_free(&input_streams[i]->opts);
+ free_buffer_pool(input_streams[i]);
+ av_freep(&input_streams[i]);
}
if (vstats_file)
}
}
-static void choose_pixel_fmt(AVStream *st, AVCodec *codec)
-{
- if (codec && codec->pix_fmts) {
- const enum PixelFormat *p = codec->pix_fmts;
- if (st->codec->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) {
- if (st->codec->codec_id == CODEC_ID_MJPEG) {
- p = (const enum PixelFormat[]) { PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_NONE };
- } else if (st->codec->codec_id == CODEC_ID_LJPEG) {
- p = (const enum PixelFormat[]) { PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_YUVJ444P, PIX_FMT_YUV420P,
- PIX_FMT_YUV422P, PIX_FMT_YUV444P, PIX_FMT_BGRA, PIX_FMT_NONE };
- }
- }
- for (; *p != PIX_FMT_NONE; p++) {
- if (*p == st->codec->pix_fmt)
- break;
- }
- if (*p == PIX_FMT_NONE) {
- if (st->codec->pix_fmt != PIX_FMT_NONE)
- av_log(NULL, AV_LOG_WARNING,
- "Incompatible pixel format '%s' for codec '%s', auto-selecting format '%s'\n",
- av_pix_fmt_descriptors[st->codec->pix_fmt].name,
- codec->name,
- av_pix_fmt_descriptors[codec->pix_fmts[0]].name);
- st->codec->pix_fmt = codec->pix_fmts[0];
- }
- }
-}
-
static double
get_sync_ipts(const OutputStream *ost, int64_t pts)
{
- OutputFile *of = &output_files[ost->file_index];
+ OutputFile *of = output_files[ost->file_index];
return (double)(pts - of->start_time) / AV_TIME_BASE;
}
static int check_recording_time(OutputStream *ost)
{
- OutputFile *of = &output_files[ost->file_index];
+ OutputFile *of = output_files[ost->file_index];
if (of->recording_time != INT64_MAX &&
av_compare_ts(ost->sync_opts - ost->first_pts, ost->st->codec->time_base, of->recording_time,
audio_buf_size = av_samples_get_buffer_size(NULL, enc->channels,
audio_buf_samples,
- enc->sample_fmt, 32);
+ enc->sample_fmt, 0);
if (audio_buf_size < 0)
return audio_buf_size;
ost->reformat_pair = MAKE_SFMT_PAIR(enc->sample_fmt,dec->sample_fmt);
}
- if (audio_sync_method) {
+ if (audio_sync_method > 0) {
double delta = get_sync_ipts(ost, ist->last_dts) * enc->sample_rate - ost->sync_opts -
av_fifo_size(ost->fifo) / (enc->channels * osize);
int idelta = delta * dec->sample_rate / enc->sample_rate;
av_resample_compensate(*(struct AVResampleContext**)ost->resample, comp, enc->sample_rate);
}
}
- } else
+ } else if (audio_sync_method == 0)
ost->sync_opts = lrintf(get_sync_ipts(ost, ist->last_dts) * enc->sample_rate) -
av_fifo_size(ost->fifo) / (enc->channels * osize); // FIXME wrong
}
}
-static void print_report(OutputFile *output_files,
- OutputStream *ost_table, int nb_ostreams,
- int is_last_report, int64_t timer_start)
+static void print_report(int is_last_report, int64_t timer_start)
{
char buf[1024];
OutputStream *ost;
}
- oc = output_files[0].ctx;
+ oc = output_files[0]->ctx;
total_size = avio_size(oc->pb);
if (total_size < 0) // FIXME improve avio_size() so it works with non seekable output too
buf[0] = '\0';
ti1 = 1e10;
vid = 0;
- for (i = 0; i < nb_ostreams; i++) {
+ for (i = 0; i < nb_output_streams; i++) {
float q = -1;
- ost = &ost_table[i];
+ ost = output_streams[i];
enc = ost->st->codec;
if (!ost->stream_copy && enc->coded_frame)
q = enc->coded_frame->quality / (float)FF_QP2LAMBDA;
}
}
-static void flush_encoders(OutputStream *ost_table, int nb_ostreams)
+static void flush_encoders(void)
{
int i, ret;
- for (i = 0; i < nb_ostreams; i++) {
- OutputStream *ost = &ost_table[i];
+ for (i = 0; i < nb_output_streams; i++) {
+ OutputStream *ost = output_streams[i];
AVCodecContext *enc = ost->st->codec;
- AVFormatContext *os = output_files[ost->file_index].ctx;
+ AVFormatContext *os = output_files[ost->file_index]->ctx;
int stop_encoding = 0;
if (!ost->encoding_needed)
*/
static int check_output_constraints(InputStream *ist, OutputStream *ost)
{
- OutputFile *of = &output_files[ost->file_index];
- int ist_index = ist - input_streams;
+ OutputFile *of = output_files[ost->file_index];
+ int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
if (ost->source_index != ist_index)
return 0;
static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
{
- OutputFile *of = &output_files[ost->file_index];
+ OutputFile *of = output_files[ost->file_index];
int64_t ost_tb_start_time = av_rescale_q(of->start_time, AV_TIME_BASE_Q, ost->st->time_base);
AVPacket opkt;
static void rate_emu_sleep(InputStream *ist)
{
- if (input_files[ist->file_index].rate_emu) {
+ if (input_files[ist->file_index]->rate_emu) {
int64_t pts = av_rescale(ist->last_dts, 1000000, AV_TIME_BASE);
int64_t now = av_gettime() - ist->start;
if (pts > now)
rate_emu_sleep(ist);
for (i = 0; i < nb_output_streams; i++) {
- OutputStream *ost = &output_streams[i];
+ OutputStream *ost = output_streams[i];
if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
continue;
- do_audio_out(output_files[ost->file_index].ctx, ost, ist, decoded_frame);
+ do_audio_out(output_files[ost->file_index]->ctx, ost, ist, decoded_frame);
}
return ret;
{
AVFrame *decoded_frame, *filtered_frame = NULL;
void *buffer_to_free = NULL;
- int i, ret = 0;
+ int i, ret = 0, resample_changed;
float quality;
int frame_available = 1;
if (ist->st->sample_aspect_ratio.num)
decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
+ resample_changed = ist->resample_width != decoded_frame->width ||
+ ist->resample_height != decoded_frame->height ||
+ ist->resample_pix_fmt != decoded_frame->format;
+ if (resample_changed) {
+ av_log(NULL, AV_LOG_INFO,
+ "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
+ ist->file_index, ist->st->index,
+ ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
+ decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
+
+ ist->resample_width = decoded_frame->width;
+ ist->resample_height = decoded_frame->height;
+ ist->resample_pix_fmt = decoded_frame->format;
+ }
+
for (i = 0; i < nb_output_streams; i++) {
- OutputStream *ost = &output_streams[i];
- int frame_size, resample_changed;
+ OutputStream *ost = output_streams[i];
+ int frame_size;
if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
continue;
- resample_changed = ost->resample_width != decoded_frame->width ||
- ost->resample_height != decoded_frame->height ||
- ost->resample_pix_fmt != decoded_frame->format;
if (resample_changed) {
- av_log(NULL, AV_LOG_INFO,
- "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
- ist->file_index, ist->st->index,
- ost->resample_width, ost->resample_height, av_get_pix_fmt_name(ost->resample_pix_fmt),
- decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
-
avfilter_graph_free(&ost->graph);
if (configure_video_filters(ist, ost)) {
av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
exit_program(1);
}
-
- ost->resample_width = decoded_frame->width;
- ost->resample_height = decoded_frame->height;
- ost->resample_pix_fmt = decoded_frame->format;
}
if (ist->st->codec->codec->capabilities & CODEC_CAP_DR1) {
av_vsrc_buffer_add_frame(ost->input_video_filter, decoded_frame,
decoded_frame->pts, decoded_frame->sample_aspect_ratio);
- if (!ist->filtered_frame && !(ist->filtered_frame = avcodec_alloc_frame())) {
+ if (!ost->filtered_frame && !(ost->filtered_frame = avcodec_alloc_frame())) {
ret = AVERROR(ENOMEM);
goto fail;
} else
- avcodec_get_frame_defaults(ist->filtered_frame);
- filtered_frame = ist->filtered_frame;
+ avcodec_get_frame_defaults(ost->filtered_frame);
+ filtered_frame = ost->filtered_frame;
frame_available = avfilter_poll_frame(ost->output_video_filter->inputs[0]);
while (frame_available) {
+ AVFilterBufferRef *picref;
AVRational ist_pts_tb;
if ((ret = get_filtered_video_frame(ost->output_video_filter,
- filtered_frame, &ost->picref,
+ filtered_frame, &picref,
&ist_pts_tb)) < 0)
goto fail;
- filtered_frame->pts = av_rescale_q(ost->picref->pts, ist_pts_tb, AV_TIME_BASE_Q);
+ filtered_frame->pts = av_rescale_q(picref->pts, ist_pts_tb, AV_TIME_BASE_Q);
if (!ost->frame_aspect_ratio)
- ost->st->codec->sample_aspect_ratio = ost->picref->video->pixel_aspect;
+ ost->st->codec->sample_aspect_ratio = picref->video->pixel_aspect;
- do_video_out(output_files[ost->file_index].ctx, ost, filtered_frame, &frame_size,
+ do_video_out(output_files[ost->file_index]->ctx, ost, filtered_frame, &frame_size,
same_quant ? quality : ost->st->codec->global_quality);
if (vstats_filename && frame_size)
- do_video_stats(output_files[ost->file_index].ctx, ost, frame_size);
+ do_video_stats(output_files[ost->file_index]->ctx, ost, frame_size);
frame_available = ost->output_video_filter && avfilter_poll_frame(ost->output_video_filter->inputs[0]);
- avfilter_unref_buffer(ost->picref);
+ avfilter_unref_buffer(picref);
}
}
rate_emu_sleep(ist);
for (i = 0; i < nb_output_streams; i++) {
- OutputStream *ost = &output_streams[i];
+ OutputStream *ost = output_streams[i];
if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
continue;
- do_subtitle_out(output_files[ost->file_index].ctx, ost, ist, &subtitle, pkt->pts);
+ do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle, pkt->pts);
}
avsubtitle_free(&subtitle);
}
/* pkt = NULL means EOF (needed to flush decoder buffers) */
-static int output_packet(InputStream *ist,
- OutputStream *ost_table, int nb_ostreams,
- const AVPacket *pkt)
+static int output_packet(InputStream *ist, const AVPacket *pkt)
{
int i;
int got_output;
break;
}
}
- for (i = 0; pkt && i < nb_ostreams; i++) {
- OutputStream *ost = &ost_table[i];
+ for (i = 0; pkt && i < nb_output_streams; i++) {
+ OutputStream *ost = output_streams[i];
if (!check_output_constraints(ist, ost) || ost->encoding_needed)
continue;
return 0;
}
-static void print_sdp(OutputFile *output_files, int n)
+static void print_sdp(void)
{
char sdp[2048];
int i;
- AVFormatContext **avc = av_malloc(sizeof(*avc) * n);
+ AVFormatContext **avc = av_malloc(sizeof(*avc) * nb_output_files);
if (!avc)
exit_program(1);
- for (i = 0; i < n; i++)
- avc[i] = output_files[i].ctx;
+ for (i = 0; i < nb_output_files; i++)
+ avc[i] = output_files[i]->ctx;
- av_sdp_create(avc, n, sdp, sizeof(sdp));
+ av_sdp_create(avc, nb_output_files, sdp, sizeof(sdp));
printf("SDP:\n%s\n", sdp);
fflush(stdout);
av_freep(&avc);
}
-static int init_input_stream(int ist_index, OutputStream *output_streams, int nb_output_streams,
- char *error, int error_len)
+static void get_default_channel_layouts(OutputStream *ost, InputStream *ist)
+{
+ char layout_name[256];
+ AVCodecContext *enc = ost->st->codec;
+ AVCodecContext *dec = ist->st->codec;
+
+ if (!dec->channel_layout) {
+ if (enc->channel_layout && dec->channels == enc->channels) {
+ dec->channel_layout = enc->channel_layout;
+ } else {
+ dec->channel_layout = av_get_default_channel_layout(dec->channels);
+
+ if (!dec->channel_layout) {
+ av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
+ "layout for Input Stream #%d.%d\n", ist->file_index,
+ ist->st->index);
+ exit_program(1);
+ }
+ }
+ av_get_channel_layout_string(layout_name, sizeof(layout_name),
+ dec->channels, dec->channel_layout);
+ av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
+ "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
+ }
+ if (!enc->channel_layout) {
+ if (dec->channels == enc->channels) {
+ enc->channel_layout = dec->channel_layout;
+ return;
+ } else {
+ enc->channel_layout = av_get_default_channel_layout(enc->channels);
+ }
+ if (!enc->channel_layout) {
+ av_log(NULL, AV_LOG_FATAL, "Unable to find default channel layout "
+ "for Output Stream #%d.%d\n", ost->file_index,
+ ost->st->index);
+ exit_program(1);
+ }
+ av_get_channel_layout_string(layout_name, sizeof(layout_name),
+ enc->channels, enc->channel_layout);
+ av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Output Stream "
+ "#%d.%d : %s\n", ost->file_index, ost->st->index, layout_name);
+ }
+}
+
+
+static int init_input_stream(int ist_index, char *error, int error_len)
{
int i;
- InputStream *ist = &input_streams[ist_index];
+ InputStream *ist = input_streams[ist_index];
if (ist->decoding_needed) {
AVCodec *codec = ist->dec;
if (!codec) {
/* update requested sample format for the decoder based on the
corresponding encoder sample format */
for (i = 0; i < nb_output_streams; i++) {
- OutputStream *ost = &output_streams[i];
+ OutputStream *ost = output_streams[i];
if (ost->source_index == ist_index) {
update_sample_fmt(ist->st->codec, codec, ost->st->codec);
break;
}
assert_codec_experimental(ist->st->codec, 0);
assert_avoptions(ist->opts);
+
+ if (ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
+ for (i = 0; i < nb_output_streams; i++) {
+ OutputStream *ost = output_streams[i];
+ if (ost->source_index == ist_index) {
+ if (!ist->st->codec->channel_layout || !ost->st->codec->channel_layout)
+ get_default_channel_layouts(ost, ist);
+ break;
+ }
+ }
+ }
}
ist->last_dts = ist->st->avg_frame_rate.num ? - ist->st->codec->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
return 0;
}
-static int transcode_init(OutputFile *output_files,
- int nb_output_files,
- InputFile *input_files,
- int nb_input_files)
+static int transcode_init(void)
{
int ret = 0, i, j, k;
AVFormatContext *oc;
/* init framerate emulation */
for (i = 0; i < nb_input_files; i++) {
- InputFile *ifile = &input_files[i];
+ InputFile *ifile = input_files[i];
if (ifile->rate_emu)
for (j = 0; j < ifile->nb_streams; j++)
- input_streams[j + ifile->ist_index].start = av_gettime();
+ input_streams[j + ifile->ist_index]->start = av_gettime();
}
/* output stream init */
for (i = 0; i < nb_output_files; i++) {
- oc = output_files[i].ctx;
+ oc = output_files[i]->ctx;
if (!oc->nb_streams && !(oc->oformat->flags & AVFMT_NOSTREAMS)) {
av_dump_format(oc, i, oc->filename, 1);
av_log(NULL, AV_LOG_ERROR, "Output file #%d does not contain any stream\n", i);
/* for each output stream, we compute the right encoding parameters */
for (i = 0; i < nb_output_streams; i++) {
- ost = &output_streams[i];
- oc = output_files[ost->file_index].ctx;
- ist = &input_streams[ost->source_index];
+ ost = output_streams[i];
+ oc = output_files[ost->file_index]->ctx;
+ ist = input_streams[ost->source_index];
if (ost->attachment_filename)
continue;
ost->resample_channels = icodec->channels;
break;
case AVMEDIA_TYPE_VIDEO:
- if (codec->pix_fmt == PIX_FMT_NONE)
- codec->pix_fmt = icodec->pix_fmt;
- choose_pixel_fmt(ost->st, ost->enc);
-
- if (ost->st->codec->pix_fmt == PIX_FMT_NONE) {
- av_log(NULL, AV_LOG_FATAL, "Video pixel format is unknown, stream cannot be encoded\n");
- exit_program(1);
- }
-
- if (!codec->width || !codec->height) {
- codec->width = icodec->width;
- codec->height = icodec->height;
- }
-
- ost->video_resample = codec->width != icodec->width ||
- codec->height != icodec->height ||
- codec->pix_fmt != icodec->pix_fmt;
- if (ost->video_resample) {
- codec->bits_per_raw_sample = 0;
- }
-
- ost->resample_height = icodec->height;
- ost->resample_width = icodec->width;
- ost->resample_pix_fmt = icodec->pix_fmt;
-
/*
* We want CFR output if and only if one of those is true:
* 1) user specified output framerate with -r
av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
exit(1);
}
+
+ if (codec->width != icodec->width ||
+ codec->height != icodec->height ||
+ codec->pix_fmt != icodec->pix_fmt) {
+ codec->bits_per_raw_sample = 0;
+ }
+
break;
case AVMEDIA_TYPE_SUBTITLE:
codec->time_base = (AVRational){1, 1000};
/* open each encoder */
for (i = 0; i < nb_output_streams; i++) {
- ost = &output_streams[i];
+ ost = output_streams[i];
if (ost->encoding_needed) {
AVCodec *codec = ost->enc;
- AVCodecContext *dec = input_streams[ost->source_index].st->codec;
+ AVCodecContext *dec = input_streams[ost->source_index]->st->codec;
if (!codec) {
snprintf(error, sizeof(error), "Encoder (codec id %d) not found for output stream #%d:%d",
ost->st->codec->codec_id, ost->file_index, ost->index);
extra_size += ost->st->codec->extradata_size;
if (ost->st->codec->me_threshold)
- input_streams[ost->source_index].st->codec->debug |= FF_DEBUG_MV;
+ input_streams[ost->source_index]->st->codec->debug |= FF_DEBUG_MV;
}
}
/* init input streams */
for (i = 0; i < nb_input_streams; i++)
- if ((ret = init_input_stream(i, output_streams, nb_output_streams, error, sizeof(error))) < 0)
+ if ((ret = init_input_stream(i, error, sizeof(error))) < 0)
goto dump_format;
/* discard unused programs */
for (i = 0; i < nb_input_files; i++) {
- InputFile *ifile = &input_files[i];
+ InputFile *ifile = input_files[i];
for (j = 0; j < ifile->ctx->nb_programs; j++) {
AVProgram *p = ifile->ctx->programs[j];
int discard = AVDISCARD_ALL;
for (k = 0; k < p->nb_stream_indexes; k++)
- if (!input_streams[ifile->ist_index + p->stream_index[k]].discard) {
+ if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
discard = AVDISCARD_DEFAULT;
break;
}
/* open files and write file headers */
for (i = 0; i < nb_output_files; i++) {
- oc = output_files[i].ctx;
+ oc = output_files[i]->ctx;
oc->interrupt_callback = int_cb;
- if (avformat_write_header(oc, &output_files[i].opts) < 0) {
+ if (avformat_write_header(oc, &output_files[i]->opts) < 0) {
snprintf(error, sizeof(error), "Could not write header for output file #%d (incorrect codec parameters ?)", i);
ret = AVERROR(EINVAL);
goto dump_format;
}
- assert_avoptions(output_files[i].opts);
+ assert_avoptions(output_files[i]->opts);
if (strcmp(oc->oformat->name, "rtp")) {
want_sdp = 0;
}
/* dump the file output parameters - cannot be done before in case
of stream copy */
for (i = 0; i < nb_output_files; i++) {
- av_dump_format(output_files[i].ctx, i, output_files[i].ctx->filename, 1);
+ av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
}
/* dump the stream mapping */
av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
for (i = 0; i < nb_output_streams; i++) {
- ost = &output_streams[i];
+ ost = output_streams[i];
if (ost->attachment_filename) {
/* an attached file */
continue;
}
av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
- input_streams[ost->source_index].file_index,
- input_streams[ost->source_index].st->index,
+ input_streams[ost->source_index]->file_index,
+ input_streams[ost->source_index]->st->index,
ost->file_index,
ost->index);
- if (ost->sync_ist != &input_streams[ost->source_index])
+ if (ost->sync_ist != input_streams[ost->source_index])
av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
ost->sync_ist->file_index,
ost->sync_ist->st->index);
if (ost->stream_copy)
av_log(NULL, AV_LOG_INFO, " (copy)");
else
- av_log(NULL, AV_LOG_INFO, " (%s -> %s)", input_streams[ost->source_index].dec ?
- input_streams[ost->source_index].dec->name : "?",
+ av_log(NULL, AV_LOG_INFO, " (%s -> %s)", input_streams[ost->source_index]->dec ?
+ input_streams[ost->source_index]->dec->name : "?",
ost->enc ? ost->enc->name : "?");
av_log(NULL, AV_LOG_INFO, "\n");
}
}
if (want_sdp) {
- print_sdp(output_files, nb_output_files);
+ print_sdp();
}
return 0;
/*
* The following code is the main loop of the file converter
*/
-static int transcode(OutputFile *output_files,
- int nb_output_files,
- InputFile *input_files,
- int nb_input_files)
+static int transcode(void)
{
int ret, i;
AVFormatContext *is, *os;
if (!(no_packet = av_mallocz(nb_input_files)))
exit_program(1);
- ret = transcode_init(output_files, nb_output_files, input_files, nb_input_files);
+ ret = transcode_init();
if (ret < 0)
goto fail;
OutputFile *of;
int64_t ipts;
double opts;
- ost = &output_streams[i];
- of = &output_files[ost->file_index];
- os = output_files[ost->file_index].ctx;
- ist = &input_streams[ost->source_index];
+ ost = output_streams[i];
+ of = output_files[ost->file_index];
+ os = output_files[ost->file_index]->ctx;
+ ist = input_streams[ost->source_index];
if (ost->is_past_recording_time || no_packet[ist->file_index] ||
(os->pb && avio_tell(os->pb) >= of->limit_filesize))
continue;
opts = ost->st->pts.val * av_q2d(ost->st->time_base);
ipts = ist->last_dts;
- if (!input_files[ist->file_index].eof_reached) {
+ if (!input_files[ist->file_index]->eof_reached) {
if (ipts < ipts_min) {
ipts_min = ipts;
if (input_sync)
if (ost->frame_number >= ost->max_frames) {
int j;
for (j = 0; j < of->ctx->nb_streams; j++)
- output_streams[of->ost_index + j].is_past_recording_time = 1;
+ output_streams[of->ost_index + j]->is_past_recording_time = 1;
continue;
}
}
}
/* read a frame from it and output it in the fifo */
- is = input_files[file_index].ctx;
+ is = input_files[file_index]->ctx;
ret = av_read_frame(is, &pkt);
if (ret == AVERROR(EAGAIN)) {
no_packet[file_index] = 1;
continue;
}
if (ret < 0) {
- input_files[file_index].eof_reached = 1;
+ input_files[file_index]->eof_reached = 1;
if (opt_shortest)
break;
else
}
/* the following test is needed in case new streams appear
dynamically in stream : we ignore them */
- if (pkt.stream_index >= input_files[file_index].nb_streams)
+ if (pkt.stream_index >= input_files[file_index]->nb_streams)
goto discard_packet;
- ist_index = input_files[file_index].ist_index + pkt.stream_index;
- ist = &input_streams[ist_index];
+ ist_index = input_files[file_index]->ist_index + pkt.stream_index;
+ ist = input_streams[ist_index];
if (ist->discard)
goto discard_packet;
if (pkt.dts != AV_NOPTS_VALUE)
- pkt.dts += av_rescale_q(input_files[ist->file_index].ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
+ pkt.dts += av_rescale_q(input_files[ist->file_index]->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
if (pkt.pts != AV_NOPTS_VALUE)
- pkt.pts += av_rescale_q(input_files[ist->file_index].ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
+ pkt.pts += av_rescale_q(input_files[ist->file_index]->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
if (pkt.pts != AV_NOPTS_VALUE)
pkt.pts *= ist->ts_scale;
int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
int64_t delta = pkt_dts - ist->next_dts;
if ((FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE || pkt_dts + 1 < ist->last_dts) && !copy_ts) {
- input_files[ist->file_index].ts_offset -= delta;
+ input_files[ist->file_index]->ts_offset -= delta;
av_log(NULL, AV_LOG_DEBUG,
"timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
- delta, input_files[ist->file_index].ts_offset);
+ delta, input_files[ist->file_index]->ts_offset);
pkt.dts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
if (pkt.pts != AV_NOPTS_VALUE)
pkt.pts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
}
// fprintf(stderr,"read #%d.%d size=%d\n", ist->file_index, ist->st->index, pkt.size);
- if (output_packet(ist, output_streams, nb_output_streams, &pkt) < 0) {
+ if (output_packet(ist, &pkt) < 0) {
av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d\n",
ist->file_index, ist->st->index);
av_free_packet(&pkt);
/* dump report by using the output first video and audio streams */
- print_report(output_files, output_streams, nb_output_streams, 0, timer_start);
+ print_report(0, timer_start);
}
/* at the end of stream, we must flush the decoder buffers */
for (i = 0; i < nb_input_streams; i++) {
- ist = &input_streams[i];
+ ist = input_streams[i];
if (ist->decoding_needed) {
- output_packet(ist, output_streams, nb_output_streams, NULL);
+ output_packet(ist, NULL);
}
}
- flush_encoders(output_streams, nb_output_streams);
+ flush_encoders();
term_exit();
/* write the trailer if needed and close file */
for (i = 0; i < nb_output_files; i++) {
- os = output_files[i].ctx;
+ os = output_files[i]->ctx;
av_write_trailer(os);
}
/* dump report by using the first video and audio streams */
- print_report(output_files, output_streams, nb_output_streams, 1, timer_start);
+ print_report(1, timer_start);
/* close each encoder */
for (i = 0; i < nb_output_streams; i++) {
- ost = &output_streams[i];
+ ost = output_streams[i];
if (ost->encoding_needed) {
av_freep(&ost->st->codec->stats_in);
avcodec_close(ost->st->codec);
/* close each decoder */
for (i = 0; i < nb_input_streams; i++) {
- ist = &input_streams[i];
+ ist = input_streams[i];
if (ist->decoding_needed) {
avcodec_close(ist->st->codec);
}
if (output_streams) {
for (i = 0; i < nb_output_streams; i++) {
- ost = &output_streams[i];
+ ost = output_streams[i];
if (ost) {
if (ost->stream_copy)
av_freep(&ost->st->codec->extradata);
}
if (*sync)
sync++;
- for (i = 0; i < input_files[sync_file_idx].nb_streams; i++)
- if (check_stream_specifier(input_files[sync_file_idx].ctx,
- input_files[sync_file_idx].ctx->streams[i], sync) == 1) {
+ for (i = 0; i < input_files[sync_file_idx]->nb_streams; i++)
+ if (check_stream_specifier(input_files[sync_file_idx]->ctx,
+ input_files[sync_file_idx]->ctx->streams[i], sync) == 1) {
sync_stream_idx = i;
break;
}
- if (i == input_files[sync_file_idx].nb_streams) {
+ if (i == input_files[sync_file_idx]->nb_streams) {
av_log(NULL, AV_LOG_FATAL, "Sync stream specification in map %s does not "
"match any streams.\n", arg);
exit_program(1);
for (i = 0; i < o->nb_stream_maps; i++) {
m = &o->stream_maps[i];
if (file_idx == m->file_index &&
- check_stream_specifier(input_files[m->file_index].ctx,
- input_files[m->file_index].ctx->streams[m->stream_index],
+ check_stream_specifier(input_files[m->file_index]->ctx,
+ input_files[m->file_index]->ctx->streams[m->stream_index],
*p == ':' ? p + 1 : p) > 0)
m->disabled = 1;
}
else
- for (i = 0; i < input_files[file_idx].nb_streams; i++) {
- if (check_stream_specifier(input_files[file_idx].ctx, input_files[file_idx].ctx->streams[i],
+ for (i = 0; i < input_files[file_idx]->nb_streams; i++) {
+ if (check_stream_specifier(input_files[file_idx]->ctx, input_files[file_idx]->ctx->streams[i],
*p == ':' ? p + 1 : p) <= 0)
continue;
o->stream_maps = grow_array(o->stream_maps, sizeof(*o->stream_maps),
for (i = 0; i < ic->nb_streams; i++) {
AVStream *st = ic->streams[i];
AVCodecContext *dec = st->codec;
- InputStream *ist;
+ InputStream *ist = av_mallocz(sizeof(*ist));
+
+ if (!ist)
+ exit_program(1);
input_streams = grow_array(input_streams, sizeof(*input_streams), &nb_input_streams, nb_input_streams + 1);
- ist = &input_streams[nb_input_streams - 1];
+ input_streams[nb_input_streams - 1] = ist;
+
ist->st = st;
ist->file_index = nb_input_files;
ist->discard = 1;
dec->width >>= dec->lowres;
}
+ ist->resample_height = dec->height;
+ ist->resample_width = dec->width;
+ ist->resample_pix_fmt = dec->pix_fmt;
+
break;
case AVMEDIA_TYPE_AUDIO:
case AVMEDIA_TYPE_DATA:
av_dump_format(ic, nb_input_files, filename, 0);
input_files = grow_array(input_files, sizeof(*input_files), &nb_input_files, nb_input_files + 1);
- input_files[nb_input_files - 1].ctx = ic;
- input_files[nb_input_files - 1].ist_index = nb_input_streams - ic->nb_streams;
- input_files[nb_input_files - 1].ts_offset = o->input_ts_offset - (copy_ts ? 0 : timestamp);
- input_files[nb_input_files - 1].nb_streams = ic->nb_streams;
- input_files[nb_input_files - 1].rate_emu = o->rate_emu;
+ if (!(input_files[nb_input_files - 1] = av_mallocz(sizeof(*input_files[0]))))
+ exit_program(1);
+
+ input_files[nb_input_files - 1]->ctx = ic;
+ input_files[nb_input_files - 1]->ist_index = nb_input_streams - ic->nb_streams;
+ input_files[nb_input_files - 1]->ts_offset = o->input_ts_offset - (copy_ts ? 0 : timestamp);
+ input_files[nb_input_files - 1]->nb_streams = ic->nb_streams;
+ input_files[nb_input_files - 1]->rate_emu = o->rate_emu;
for (i = 0; i < o->nb_dump_attachment; i++) {
int j;
output_streams = grow_array(output_streams, sizeof(*output_streams), &nb_output_streams,
nb_output_streams + 1);
- ost = &output_streams[nb_output_streams - 1];
+ if (!(ost = av_mallocz(sizeof(*ost))))
+ exit_program(1);
+ output_streams[nb_output_streams - 1] = ost;
+
ost->file_index = nb_output_files;
ost->index = idx;
ost->st = st;
st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
av_opt_get_int(sws_opts, "sws_flags", 0, &ost->sws_flags);
+
+ ost->pix_fmts[0] = ost->pix_fmts[1] = PIX_FMT_NONE;
+
return ost;
}
if (index >= 0) {\
ost = new_ ## type ## _stream(o, oc);\
ost->source_index = index;\
- ost->sync_ist = &input_streams[index];\
- input_streams[index].discard = 0;\
- input_streams[index].st->discard = AVDISCARD_NONE;\
+ ost->sync_ist = input_streams[index];\
+ input_streams[index]->discard = 0;\
+ input_streams[index]->st->discard = AVDISCARD_NONE;\
}
/* video: highest resolution */
if (!o->video_disable && oc->oformat->video_codec != CODEC_ID_NONE) {
int area = 0, idx = -1;
for (i = 0; i < nb_input_streams; i++) {
- ist = &input_streams[i];
+ ist = input_streams[i];
if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
ist->st->codec->width * ist->st->codec->height > area) {
area = ist->st->codec->width * ist->st->codec->height;
if (!o->audio_disable && oc->oformat->audio_codec != CODEC_ID_NONE) {
int channels = 0, idx = -1;
for (i = 0; i < nb_input_streams; i++) {
- ist = &input_streams[i];
+ ist = input_streams[i];
if (ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO &&
ist->st->codec->channels > channels) {
channels = ist->st->codec->channels;
/* subtitles: pick first */
if (!o->subtitle_disable && oc->oformat->subtitle_codec != CODEC_ID_NONE) {
for (i = 0; i < nb_input_streams; i++)
- if (input_streams[i].st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
+ if (input_streams[i]->st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
NEW_STREAM(subtitle, i);
break;
}
if (map->disabled)
continue;
- ist = &input_streams[input_files[map->file_index].ist_index + map->stream_index];
+ ist = input_streams[input_files[map->file_index]->ist_index + map->stream_index];
switch (ist->st->codec->codec_type) {
case AVMEDIA_TYPE_VIDEO: ost = new_video_stream(o, oc); break;
case AVMEDIA_TYPE_AUDIO: ost = new_audio_stream(o, oc); break;
exit_program(1);
}
- ost->source_index = input_files[map->file_index].ist_index + map->stream_index;
- ost->sync_ist = &input_streams[input_files[map->sync_file_index].ist_index +
+ ost->source_index = input_files[map->file_index]->ist_index + map->stream_index;
+ ost->sync_ist = input_streams[input_files[map->sync_file_index]->ist_index +
map->sync_stream_index];
ist->discard = 0;
ist->st->discard = AVDISCARD_NONE;
}
output_files = grow_array(output_files, sizeof(*output_files), &nb_output_files, nb_output_files + 1);
- output_files[nb_output_files - 1].ctx = oc;
- output_files[nb_output_files - 1].ost_index = nb_output_streams - oc->nb_streams;
- output_files[nb_output_files - 1].recording_time = o->recording_time;
+ if (!(output_files[nb_output_files - 1] = av_mallocz(sizeof(*output_files[0]))))
+ exit_program(1);
+
+ output_files[nb_output_files - 1]->ctx = oc;
+ output_files[nb_output_files - 1]->ost_index = nb_output_streams - oc->nb_streams;
+ output_files[nb_output_files - 1]->recording_time = o->recording_time;
if (o->recording_time != INT64_MAX)
oc->duration = o->recording_time;
- output_files[nb_output_files - 1].start_time = o->start_time;
- output_files[nb_output_files - 1].limit_filesize = o->limit_filesize;
- av_dict_copy(&output_files[nb_output_files - 1].opts, format_opts, 0);
+ output_files[nb_output_files - 1]->start_time = o->start_time;
+ output_files[nb_output_files - 1]->limit_filesize = o->limit_filesize;
+ av_dict_copy(&output_files[nb_output_files - 1]->opts, format_opts, 0);
/* check filename in case of an image number is expected */
if (oc->oformat->flags & AVFMT_NEEDNUMBER) {
/* open the file */
if ((err = avio_open2(&oc->pb, filename, AVIO_FLAG_WRITE,
&oc->interrupt_callback,
- &output_files[nb_output_files - 1].opts)) < 0) {
+ &output_files[nb_output_files - 1]->opts)) < 0) {
print_error(filename, err);
exit_program(1);
}
if (o->mux_preload) {
uint8_t buf[64];
snprintf(buf, sizeof(buf), "%d", (int)(o->mux_preload*AV_TIME_BASE));
- av_dict_set(&output_files[nb_output_files - 1].opts, "preload", buf, 0);
+ av_dict_set(&output_files[nb_output_files - 1]->opts, "preload", buf, 0);
}
oc->max_delay = (int)(o->mux_max_delay * AV_TIME_BASE);
oc->flags |= AVFMT_FLAG_NONBLOCK;
av_log(NULL, AV_LOG_FATAL, "Invalid input file index %d while processing metadata maps\n", in_file_index);
exit_program(1);
}
- copy_metadata(o->metadata_map[i].specifier, *p ? p + 1 : p, oc, input_files[in_file_index].ctx, o);
+ copy_metadata(o->metadata_map[i].specifier, *p ? p + 1 : p, oc, input_files[in_file_index]->ctx, o);
}
/* copy chapters */
/* copy chapters from the first input file that has them*/
o->chapters_input_file = -1;
for (i = 0; i < nb_input_files; i++)
- if (input_files[i].ctx->nb_chapters) {
+ if (input_files[i]->ctx->nb_chapters) {
o->chapters_input_file = i;
break;
}
}
}
if (o->chapters_input_file >= 0)
- copy_chapters(&input_files[o->chapters_input_file], &output_files[nb_output_files - 1],
+ copy_chapters(input_files[o->chapters_input_file], output_files[nb_output_files - 1],
!o->metadata_chapters_manual);
/* copy global metadata by default */
if (!o->metadata_global_manual && nb_input_files)
- av_dict_copy(&oc->metadata, input_files[0].ctx->metadata,
+ av_dict_copy(&oc->metadata, input_files[0]->ctx->metadata,
AV_DICT_DONT_OVERWRITE);
if (!o->metadata_streams_manual)
- for (i = output_files[nb_output_files - 1].ost_index; i < nb_output_streams; i++) {
+ for (i = output_files[nb_output_files - 1]->ost_index; i < nb_output_streams; i++) {
InputStream *ist;
- if (output_streams[i].source_index < 0) /* this is true e.g. for attached files */
+ if (output_streams[i]->source_index < 0) /* this is true e.g. for attached files */
continue;
- ist = &input_streams[output_streams[i].source_index];
- av_dict_copy(&output_streams[i].st->metadata, ist->st->metadata, AV_DICT_DONT_OVERWRITE);
+ ist = input_streams[output_streams[i]->source_index];
+ av_dict_copy(&output_streams[i]->st->metadata, ist->st->metadata, AV_DICT_DONT_OVERWRITE);
}
/* process manually set metadata */
if (nb_input_files) {
int i, j, fr;
for (j = 0; j < nb_input_files; j++) {
- for (i = 0; i < input_files[j].nb_streams; i++) {
- AVCodecContext *c = input_files[j].ctx->streams[i]->codec;
+ for (i = 0; i < input_files[j]->nb_streams; i++) {
+ AVCodecContext *c = input_files[j]->ctx->streams[i]->codec;
if (c->codec_type != AVMEDIA_TYPE_VIDEO)
continue;
fr = c->time_base.den * 1000 / c->time_base.num;
opt_cpuflags("cpuflags", argv[idx + 1]);
}
+static int opt_channel_layout(OptionsContext *o, const char *opt, const char *arg)
+{
+ char layout_str[32];
+ char *stream_str;
+ char *ac_str;
+ int ret, channels, ac_str_size;
+ uint64_t layout;
+
+ layout = av_get_channel_layout(arg);
+ if (!layout) {
+ av_log(NULL, AV_LOG_ERROR, "Unknown channel layout: %s\n", arg);
+ return AVERROR(EINVAL);
+ }
+ snprintf(layout_str, sizeof(layout_str), "%"PRIu64, layout);
+ ret = opt_default(opt, layout_str);
+ if (ret < 0)
+ return ret;
+
+ /* set 'ac' option based on channel layout */
+ channels = av_get_channel_layout_nb_channels(layout);
+ snprintf(layout_str, sizeof(layout_str), "%d", channels);
+ stream_str = strchr(opt, ':');
+ ac_str_size = 3 + (stream_str ? strlen(stream_str) : 0);
+ ac_str = av_mallocz(ac_str_size);
+ if (!ac_str)
+ return AVERROR(ENOMEM);
+ av_strlcpy(ac_str, "ac", 3);
+ if (stream_str)
+ av_strlcat(ac_str, stream_str, ac_str_size);
+ ret = parse_option(o, ac_str, layout_str, options);
+ av_free(ac_str);
+
+ return ret;
+}
+
#define OFFSET(x) offsetof(OptionsContext, x)
static const OptionDef options[] = {
/* main options */
{ "atag", HAS_ARG | OPT_EXPERT | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_tag}, "force audio tag/fourcc", "fourcc/tag" },
{ "vol", OPT_INT | HAS_ARG | OPT_AUDIO, {(void*)&audio_volume}, "change audio volume (256=normal)" , "volume" }, //
{ "sample_fmt", HAS_ARG | OPT_EXPERT | OPT_AUDIO | OPT_SPEC | OPT_STRING, {.off = OFFSET(sample_fmts)}, "set sample format", "format" },
+ { "channel_layout", HAS_ARG | OPT_EXPERT | OPT_AUDIO | OPT_FUNC2, {(void*)opt_channel_layout}, "set channel layout", "layout" },
/* subtitle options */
{ "sn", OPT_BOOL | OPT_SUBTITLE | OPT_OFFSET, {.off = OFFSET(subtitle_disable)}, "disable subtitle" },
}
ti = getutime();
- if (transcode(output_files, nb_output_files, input_files, nb_input_files) < 0)
+ if (transcode() < 0)
exit_program(1);
ti = getutime() - ti;
if (do_benchmark) {