#include "libavutil/libm.h"
#include "libavformat/os_support.h"
+#if CONFIG_AVFILTER
+# include "libavfilter/avfilter.h"
+# include "libavfilter/avfiltergraph.h"
+# include "libavfilter/graphparser.h"
+# include "libavfilter/vsrc_buffer.h"
+#endif
+
#if HAVE_SYS_RESOURCE_H
#include <sys/types.h>
#include <sys/time.h>
static float frame_aspect_ratio = 0;
static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
static enum SampleFormat audio_sample_fmt = SAMPLE_FMT_NONE;
-static int frame_padtop = 0;
-static int frame_padbottom = 0;
-static int frame_padleft = 0;
-static int frame_padright = 0;
-static int padcolor[3] = {16,128,128}; /* default to black */
static int frame_topBand = 0;
static int frame_bottomBand = 0;
static int frame_leftBand = 0;
static int loop_input = 0;
static int loop_output = AVFMT_NOOUTPUTLOOP;
static int qp_hist = 0;
+#if CONFIG_AVFILTER
+static char *vfilters = NULL;
+AVFilterGraph *graph = NULL;
+#endif
static int intra_only = 0;
static int audio_sample_rate = 44100;
int original_leftBand;
int original_rightBand;
- /* padding area sizes */
- int video_pad;
- int padtop;
- int padbottom;
- int padleft;
- int padright;
-
/* audio only */
int audio_resample;
ReSampleContext *resample; /* for audio resampling */
int is_start; /* is 1 at the start and after a discontinuity */
int showed_multi_packet_warning;
int is_past_recording_time;
+#if CONFIG_AVFILTER
+ AVFilterContext *out_video_filter;
+ AVFilterContext *input_video_filter;
+ AVFrame *filter_frame;
+ int has_filter_frame;
+ AVFilterPicRef *picref;
+#endif
} AVInputStream;
typedef struct AVInputFile {
static struct termios oldtty;
#endif
+#if CONFIG_AVFILTER
+typedef struct {
+ int pix_fmt;
+} FilterOutPriv;
+
+
+static int output_init(AVFilterContext *ctx, const char *args, void *opaque)
+{
+ FilterOutPriv *priv = ctx->priv;
+
+ if(!opaque) return -1;
+
+ priv->pix_fmt = *((int *)opaque);
+
+ return 0;
+}
+
+static void output_end_frame(AVFilterLink *link)
+{
+}
+
+static int output_query_formats(AVFilterContext *ctx)
+{
+ FilterOutPriv *priv = ctx->priv;
+ enum PixelFormat pix_fmts[] = { priv->pix_fmt, PIX_FMT_NONE };
+
+ avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
+ return 0;
+}
+
+static int get_filtered_video_pic(AVFilterContext *ctx,
+ AVFilterPicRef **picref, AVFrame *pic2,
+ uint64_t *pts)
+{
+ AVFilterPicRef *pic;
+
+ if(avfilter_request_frame(ctx->inputs[0]))
+ return -1;
+ if(!(pic = ctx->inputs[0]->cur_pic))
+ return -1;
+ *picref = pic;
+ ctx->inputs[0]->cur_pic = NULL;
+
+ *pts = pic->pts;
+
+ memcpy(pic2->data, pic->data, sizeof(pic->data));
+ memcpy(pic2->linesize, pic->linesize, sizeof(pic->linesize));
+ pic2->interlaced_frame = pic->interlaced;
+ pic2->top_field_first = pic->top_field_first;
+
+ return 1;
+}
+
+static AVFilter output_filter =
+{
+ .name = "ffmpeg_output",
+
+ .priv_size = sizeof(FilterOutPriv),
+ .init = output_init,
+
+ .query_formats = output_query_formats,
+
+ .inputs = (AVFilterPad[]) {{ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .end_frame = output_end_frame,
+ .min_perms = AV_PERM_READ, },
+ { .name = NULL }},
+ .outputs = (AVFilterPad[]) {{ .name = NULL }},
+};
+
+static int configure_filters(AVInputStream *ist, AVOutputStream *ost)
+{
+ AVFilterContext *last_filter, *filter;
+ /** filter graph containing all filters including input & output */
+ AVCodecContext *codec = ost->st->codec;
+ AVCodecContext *icodec = ist->st->codec;
+ char args[255];
+
+ graph = av_mallocz(sizeof(AVFilterGraph));
+
+ if (!(ist->input_video_filter = avfilter_open(avfilter_get_by_name("buffer"), "src")))
+ return -1;
+ if (!(ist->out_video_filter = avfilter_open(&output_filter, "out")))
+ return -1;
+
+ snprintf(args, 255, "%d:%d:%d", ist->st->codec->width,
+ ist->st->codec->height, ist->st->codec->pix_fmt);
+ if (avfilter_init_filter(ist->input_video_filter, args, NULL))
+ return -1;
+ if (avfilter_init_filter(ist->out_video_filter, NULL, &codec->pix_fmt))
+ return -1;
+
+ /* add input and output filters to the overall graph */
+ avfilter_graph_add_filter(graph, ist->input_video_filter);
+ avfilter_graph_add_filter(graph, ist->out_video_filter);
+
+ last_filter = ist->input_video_filter;
+
+ if (ost->video_crop) {
+ snprintf(args, 255, "%d:%d:%d:%d", ost->leftBand, ost->topBand,
+ codec->width,
+ codec->height);
+ filter = avfilter_open(avfilter_get_by_name("crop"), NULL);
+ if (!filter)
+ return -1;
+ if (avfilter_init_filter(filter, args, NULL))
+ return -1;
+ if (avfilter_link(last_filter, 0, filter, 0))
+ return -1;
+ last_filter = filter;
+ avfilter_graph_add_filter(graph, last_filter);
+ }
+
+ if((codec->width !=
+ icodec->width - (frame_leftBand + frame_rightBand)) ||
+ (codec->height != icodec->height - (frame_topBand + frame_bottomBand))) {
+ snprintf(args, 255, "%d:%d:flags=0x%X",
+ codec->width,
+ codec->height,
+ (int)av_get_int(sws_opts, "sws_flags", NULL));
+ filter = avfilter_open(avfilter_get_by_name("scale"), NULL);
+ if (!filter)
+ return -1;
+ if (avfilter_init_filter(filter, args, NULL))
+ return -1;
+ if (avfilter_link(last_filter, 0, filter, 0))
+ return -1;
+ last_filter = filter;
+ avfilter_graph_add_filter(graph, last_filter);
+ }
+
+ if (vfilters) {
+ AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
+ AVFilterInOut *inputs = av_malloc(sizeof(AVFilterInOut));
+
+ outputs->name = av_strdup("in");
+ outputs->filter = last_filter;
+ outputs->pad_idx = 0;
+ outputs->next = NULL;
+
+ inputs->name = av_strdup("out");
+ inputs->filter = ist->out_video_filter;
+ inputs->pad_idx = 0;
+ inputs->next = NULL;
+
+ if (avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL) < 0)
+ return -1;
+ av_freep(&vfilters);
+ } else {
+ if (avfilter_link(last_filter, 0, ist->out_video_filter, 0) < 0)
+ return -1;
+ }
+
+ snprintf(args, sizeof(args), "flags=0x%X", (int)av_get_int(sws_opts, "sws_flags", NULL));
+ graph->scale_sws_opts = av_strdup(args);
+
+ /* configure all the filter links */
+ if (avfilter_graph_check_validity(graph, NULL))
+ return -1;
+ if (avfilter_graph_config_formats(graph, NULL))
+ return -1;
+ if (avfilter_graph_config_links(graph, NULL))
+ return -1;
+
+ codec->width = ist->out_video_filter->inputs[0]->w;
+ codec->height = ist->out_video_filter->inputs[0]->h;
+
+ return 0;
+}
+#endif /* CONFIG_AVFILTER */
+
static void term_exit(void)
{
#if HAVE_TERMIOS_H
allocated_audio_buf_size= allocated_audio_out_size= 0;
av_free(samples);
+#if CONFIG_AVFILTER
+ avfilter_uninit();
+#endif
+
if (received_sigterm) {
fprintf(stderr,
"Received signal %d: terminating.\n",
}
}
+static void choose_sample_rate(AVStream *st, AVCodec *codec)
+{
+ if(codec && codec->supported_samplerates){
+ const int *p= codec->supported_samplerates;
+ int best;
+ int best_dist=INT_MAX;
+ for(; *p; p++){
+ int dist= abs(st->codec->sample_rate - *p);
+ if(dist < best_dist){
+ best_dist= dist;
+ best= *p;
+ }
+ }
+ if(best_dist){
+ av_log(st->codec, AV_LOG_WARNING, "Requested sampling rate unsupported using closest supported (%d)\n", best);
+ }
+ st->codec->sample_rate= best;
+ }
+}
+
static void choose_pixel_fmt(AVStream *st, AVCodec *codec)
{
if(codec && codec->pix_fmts){
int *frame_size)
{
int nb_frames, i, ret;
+#if !CONFIG_AVFILTER
int64_t topBand, bottomBand, leftBand, rightBand;
+#endif
AVFrame *final_picture, *formatted_picture, *resampling_dst, *padding_src;
AVFrame picture_crop_temp, picture_pad_temp;
AVCodecContext *enc, *dec;
if (nb_frames <= 0)
return;
+#if CONFIG_AVFILTER
+ formatted_picture = in_picture;
+#else
if (ost->video_crop) {
if (av_picture_crop((AVPicture *)&picture_crop_temp, (AVPicture *)in_picture, dec->pix_fmt, ost->topBand, ost->leftBand) < 0) {
fprintf(stderr, "error cropping picture\n");
} else {
formatted_picture = in_picture;
}
+#endif
final_picture = formatted_picture;
padding_src = formatted_picture;
resampling_dst = &ost->pict_tmp;
- if (ost->video_pad) {
- final_picture = &ost->pict_tmp;
- if (ost->video_resample) {
- if (av_picture_crop((AVPicture *)&picture_pad_temp, (AVPicture *)final_picture, enc->pix_fmt, ost->padtop, ost->padleft) < 0) {
- fprintf(stderr, "error padding picture\n");
- if (exit_on_error)
- av_exit(1);
- return;
- }
- resampling_dst = &picture_pad_temp;
- }
- }
if( (ost->resample_height != (ist->st->codec->height - (ost->topBand + ost->bottomBand)))
|| (ost->resample_width != (ist->st->codec->width - (ost->leftBand + ost->rightBand)))
av_exit(1);
}
+#if !CONFIG_AVFILTER
if (ost->video_resample) {
padding_src = NULL;
final_picture = &ost->pict_tmp;
ist->st->codec->width - (ost->leftBand + ost->rightBand),
ist->st->codec->height - (ost->topBand + ost->bottomBand),
ist->st->codec->pix_fmt,
- ost->st->codec->width - (ost->padleft + ost->padright),
- ost->st->codec->height - (ost->padtop + ost->padbottom),
+ ost->st->codec->width,
+ ost->st->codec->height,
ost->st->codec->pix_fmt,
sws_flags, NULL, NULL, NULL);
if (ost->img_resample_ctx == NULL) {
sws_scale(ost->img_resample_ctx, formatted_picture->data, formatted_picture->linesize,
0, ost->resample_height, resampling_dst->data, resampling_dst->linesize);
}
-
- if (ost->video_pad) {
- av_picture_pad((AVPicture*)final_picture, (AVPicture *)padding_src,
- enc->height, enc->width, enc->pix_fmt,
- ost->padtop, ost->padbottom, ost->padleft, ost->padright, padcolor);
- }
+#endif
/* duplicates frame if needed */
for(i=0;i<nb_frames;i++) {
static unsigned int samples_size= 0;
AVSubtitle subtitle, *subtitle_to_free;
int got_subtitle;
+#if CONFIG_AVFILTER
+ int frame_available;
+#endif
+
AVPacket avpkt;
int bps = av_get_bits_per_sample_format(ist->st->codec->sample_fmt)>>3;
&buffer_to_free);
}
+#if CONFIG_AVFILTER
+ if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && ist->input_video_filter) {
+ // add it to be filtered
+ av_vsrc_buffer_add_frame(ist->input_video_filter, &picture,
+ ist->pts,
+ ist->st->codec->sample_aspect_ratio);
+ }
+#endif
+
// preprocess audio (volume)
if (ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
if (audio_volume != 256) {
if (pts > now)
usleep(pts - now);
}
-
+#if CONFIG_AVFILTER
+ frame_available = ist->st->codec->codec_type != AVMEDIA_TYPE_VIDEO ||
+ !ist->out_video_filter || avfilter_poll_frame(ist->out_video_filter->inputs[0]);
+#endif
/* if output time reached then transcode raw format,
encode packets and output them */
if (start_time == 0 || ist->pts >= start_time)
+#if CONFIG_AVFILTER
+ while (frame_available) {
+ if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && ist->out_video_filter)
+ get_filtered_video_pic(ist->out_video_filter, &ist->picref, &picture, &ist->pts);
+#endif
for(i=0;i<nb_ostreams;i++) {
int frame_size;
do_audio_out(os, ost, ist, decoded_data_buf, decoded_data_size);
break;
case AVMEDIA_TYPE_VIDEO:
+#if CONFIG_AVFILTER
+ ost->st->codec->sample_aspect_ratio = ist->picref->pixel_aspect;
+#endif
do_video_out(os, ost, ist, &picture, &frame_size);
if (vstats_filename && frame_size)
do_video_stats(os, ost, frame_size);
av_free_packet(&opkt);
}
}
+#if CONFIG_AVFILTER
+ frame_available = (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) &&
+ ist->out_video_filter && avfilter_poll_frame(ist->out_video_filter->inputs[0]);
+#endif
}
+
+#if CONFIG_AVFILTER
+ if(ist->picref)
+ avfilter_unref_pic(ist->picref);
+ }
+#endif
av_free(buffer_to_free);
/* XXX: allocate the subtitles in the codec ? */
if (subtitle_to_free) {
* The following code is the main loop of the file converter
*/
static int av_transcode(AVFormatContext **output_files,
- int nb_output_files,
- AVFormatContext **input_files,
- int nb_input_files,
- AVStreamMap *stream_maps, int nb_stream_maps)
+ int nb_output_files,
+ AVFormatContext **input_files,
+ int nb_input_files,
+ AVStreamMap *stream_maps, int nb_stream_maps)
{
int ret = 0, i, j, k, n, nb_istreams = 0, nb_ostreams = 0;
AVFormatContext *is, *os;
/* for each output stream, we compute the right encoding parameters */
for(i=0;i<nb_ostreams;i++) {
- AVMetadataTag *t = NULL, *lang = NULL;
+ AVMetadataTag *t = NULL;
ost = ost_table[i];
os = output_files[ost->file_index];
ist = ist_table[ost->source_index];
codec = ost->st->codec;
icodec = ist->st->codec;
- if (av_metadata_get(ist->st->metadata, "language", NULL, 0))
- lang = av_metadata_get(ost->st->metadata, "language", NULL, 0);
while ((t = av_metadata_get(ist->st->metadata, "", t, AV_METADATA_IGNORE_SUFFIX))) {
- if (lang && !strcmp(t->key, "language"))
- continue;
- av_metadata_set2(&ost->st->metadata, t->key, t->value, 0);
+ av_metadata_set2(&ost->st->metadata, t->key, t->value, AV_METADATA_DONT_OVERWRITE);
}
ost->st->disposition = ist->st->disposition;
av_exit(1);
}
ost->video_crop = ((frame_leftBand + frame_rightBand + frame_topBand + frame_bottomBand) != 0);
- ost->video_pad = ((frame_padleft + frame_padright + frame_padtop + frame_padbottom) != 0);
ost->video_resample = ((codec->width != icodec->width -
- (frame_leftBand + frame_rightBand) +
- (frame_padleft + frame_padright)) ||
+ (frame_leftBand + frame_rightBand)) ||
(codec->height != icodec->height -
- (frame_topBand + frame_bottomBand) +
- (frame_padtop + frame_padbottom)) ||
+ (frame_topBand + frame_bottomBand)) ||
(codec->pix_fmt != icodec->pix_fmt));
if (ost->video_crop) {
ost->topBand = ost->original_topBand = frame_topBand;
ost->leftBand = ost->original_leftBand = frame_leftBand;
ost->rightBand = ost->original_rightBand = frame_rightBand;
}
- if (ost->video_pad) {
- ost->padtop = frame_padtop;
- ost->padleft = frame_padleft;
- ost->padbottom = frame_padbottom;
- ost->padright = frame_padright;
- if (!ost->video_resample) {
- avcodec_get_frame_defaults(&ost->pict_tmp);
- if(avpicture_alloc((AVPicture*)&ost->pict_tmp, codec->pix_fmt,
- codec->width, codec->height))
- goto fail;
- }
- }
if (ost->video_resample) {
avcodec_get_frame_defaults(&ost->pict_tmp);
if(avpicture_alloc((AVPicture*)&ost->pict_tmp, codec->pix_fmt,
icodec->width - (frame_leftBand + frame_rightBand),
icodec->height - (frame_topBand + frame_bottomBand),
icodec->pix_fmt,
- codec->width - (frame_padleft + frame_padright),
- codec->height - (frame_padtop + frame_padbottom),
+ codec->width,
+ codec->height,
codec->pix_fmt,
sws_flags, NULL, NULL, NULL);
if (ost->img_resample_ctx == NULL) {
av_exit(1);
}
+#if !CONFIG_AVFILTER
ost->original_height = icodec->height;
ost->original_width = icodec->width;
-
+#endif
codec->bits_per_raw_sample= 0;
}
ost->resample_height = icodec->height - (frame_topBand + frame_bottomBand);
ost->resample_pix_fmt= icodec->pix_fmt;
ost->encoding_needed = 1;
ist->decoding_needed = 1;
+
+#if CONFIG_AVFILTER
+ if (configure_filters(ist, ost)) {
+ fprintf(stderr, "Error opening filters!\n");
+ exit(1);
+ }
+#endif
break;
case AVMEDIA_TYPE_SUBTITLE:
ost->encoding_needed = 1;
mtag=NULL;
while((mtag=av_metadata_get(in_file->metadata, "", mtag, AV_METADATA_IGNORE_SUFFIX)))
- av_metadata_set(&out_file->metadata, mtag->key, mtag->value);
+ av_metadata_set2(&out_file->metadata, mtag->key, mtag->value, AV_METADATA_DONT_OVERWRITE);
av_metadata_conv(out_file, out_file->oformat->metadata_conv,
in_file->iformat->metadata_conv);
}
avcodec_close(ist->st->codec);
}
}
+#if CONFIG_AVFILTER
+ if (graph) {
+ avfilter_graph_destroy(graph);
+ av_freep(&graph);
+ }
+#endif
/* finished ! */
ret = 0;
}
}
-static void opt_pad_color(const char *arg) {
- /* Input is expected to be six hex digits similar to
- how colors are expressed in html tags (but without the #) */
- int rgb = strtol(arg, NULL, 16);
- int r,g,b;
-
- r = (rgb >> 16);
- g = ((rgb >> 8) & 255);
- b = (rgb & 255);
-
- padcolor[0] = RGB_TO_Y(r,g,b);
- padcolor[1] = RGB_TO_U(r,g,b,0);
- padcolor[2] = RGB_TO_V(r,g,b,0);
-}
-
-static void opt_frame_pad_top(const char *arg)
-{
- frame_padtop = atoi(arg);
- if (frame_padtop < 0) {
- fprintf(stderr, "Incorrect top pad size\n");
- av_exit(1);
- }
-}
-
-static void opt_frame_pad_bottom(const char *arg)
-{
- frame_padbottom = atoi(arg);
- if (frame_padbottom < 0) {
- fprintf(stderr, "Incorrect bottom pad size\n");
- av_exit(1);
- }
-}
-
-
-static void opt_frame_pad_left(const char *arg)
-{
- frame_padleft = atoi(arg);
- if (frame_padleft < 0) {
- fprintf(stderr, "Incorrect left pad size\n");
- av_exit(1);
- }
-}
-
-
-static void opt_frame_pad_right(const char *arg)
-{
- frame_padright = atoi(arg);
- if (frame_padright < 0) {
- fprintf(stderr, "Incorrect right pad size\n");
- av_exit(1);
- }
+static void opt_pad(const char *arg) {
+ fprintf(stderr, "Please use vf=pad\n");
+ av_exit(1);
}
static void opt_frame_pix_fmt(const char *arg)
ap->channels = audio_channels;
ap->time_base.den = frame_rate.num;
ap->time_base.num = frame_rate.den;
- ap->width = frame_width + frame_padleft + frame_padright;
- ap->height = frame_height + frame_padtop + frame_padbottom;
+ ap->width = frame_width;
+ ap->height = frame_height;
ap->pix_fmt = frame_pix_fmt;
// ap->sample_fmt = audio_sample_fmt; //FIXME:not implemented in libavformat
ap->channel = video_channel;
if(me_threshold)
enc->debug |= FF_DEBUG_MV;
- if (enc->time_base.den != rfps || enc->time_base.num != rfps_base) {
+ if (enc->time_base.den != rfps*enc->ticks_per_frame || enc->time_base.num != rfps_base) {
if (verbose >= 0)
fprintf(stderr,"\nSeems stream %d codec frame rate differs from container frame rate: %2.2f (%d/%d) -> %2.2f (%d/%d)\n",
video_enc->time_base.den = fps.num;
video_enc->time_base.num = fps.den;
- video_enc->width = frame_width + frame_padright + frame_padleft;
- video_enc->height = frame_height + frame_padtop + frame_padbottom;
+ video_enc->width = frame_width;
+ video_enc->height = frame_height;
video_enc->sample_aspect_ratio = av_d2q(frame_aspect_ratio*video_enc->height/video_enc->width, 255);
video_enc->pix_fmt = frame_pix_fmt;
st->sample_aspect_ratio = video_enc->sample_aspect_ratio;
}
nb_ocodecs++;
if (video_language) {
- av_metadata_set(&st->metadata, "language", video_language);
+ av_metadata_set2(&st->metadata, "language", video_language, 0);
av_freep(&video_language);
}
if (audio_stream_copy) {
st->stream_copy = 1;
audio_enc->channels = audio_channels;
+ audio_enc->sample_rate = audio_sample_rate;
} else {
AVCodec *codec;
}
audio_enc->channels = audio_channels;
audio_enc->sample_fmt = audio_sample_fmt;
+ audio_enc->sample_rate = audio_sample_rate;
audio_enc->channel_layout = channel_layout;
if (avcodec_channel_layout_num_channels(channel_layout) != audio_channels)
audio_enc->channel_layout = 0;
choose_sample_fmt(st, codec);
+ choose_sample_rate(st, codec);
}
nb_ocodecs++;
- audio_enc->sample_rate = audio_sample_rate;
audio_enc->time_base= (AVRational){1, audio_sample_rate};
if (audio_language) {
- av_metadata_set(&st->metadata, "language", audio_language);
+ av_metadata_set2(&st->metadata, "language", audio_language, 0);
av_freep(&audio_language);
}
nb_ocodecs++;
if (subtitle_language) {
- av_metadata_set(&st->metadata, "language", subtitle_language);
+ av_metadata_set2(&st->metadata, "language", subtitle_language, 0);
av_freep(&subtitle_language);
}
static void opt_output_file(const char *filename)
{
AVFormatContext *oc;
- int use_video, use_audio, use_subtitle;
+ int err, use_video, use_audio, use_subtitle;
int input_has_video, input_has_audio, input_has_subtitle;
AVFormatParameters params, *ap = ¶ms;
AVOutputFormat *file_oformat;
oc->timestamp = rec_timestamp;
for(; metadata_count>0; metadata_count--){
- av_metadata_set(&oc->metadata, metadata[metadata_count-1].key,
- metadata[metadata_count-1].value);
+ av_metadata_set2(&oc->metadata, metadata[metadata_count-1].key,
+ metadata[metadata_count-1].value, 0);
}
av_metadata_conv(oc, oc->oformat->metadata_conv, NULL);
}
}
/* open the file */
- if (url_fopen(&oc->pb, filename, URL_WRONLY) < 0) {
- fprintf(stderr, "Could not open '%s'\n", filename);
+ if ((err = url_fopen(&oc->pb, filename, URL_WRONLY)) < 0) {
+ print_error(filename, err);
av_exit(1);
}
}
FILE *f=NULL;
char filename[1000], tmp[1000], tmp2[1000], line[1000];
int i;
- const char *base[2]= { getenv("HOME"),
+ const char *base[3]= { getenv("FFMPEG_DATADIR"),
+ getenv("HOME"),
FFMPEG_DATADIR,
};
if (*opt != 'f') {
- for(i=!base[0]; i<2 && !f; i++){
- snprintf(filename, sizeof(filename), "%s%s/%s.ffpreset", base[i], i ? "" : "/.ffmpeg", arg);
+ for(i=0; i<3 && !f; i++){
+ if(!base[i])
+ continue;
+ snprintf(filename, sizeof(filename), "%s%s/%s.ffpreset", base[i], i != 1 ? "" : "/.ffmpeg", arg);
f= fopen(filename, "r");
if(!f){
char *codec_name= *opt == 'v' ? video_codec_name :
*opt == 'a' ? audio_codec_name :
subtitle_codec_name;
- snprintf(filename, sizeof(filename), "%s%s/%s-%s.ffpreset", base[i], i ? "" : "/.ffmpeg", codec_name, arg);
+ snprintf(filename, sizeof(filename), "%s%s/%s-%s.ffpreset", base[i], i != 1 ? "" : "/.ffmpeg", codec_name, arg);
f= fopen(filename, "r");
}
}
{ "cropbottom", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_crop_bottom}, "set bottom crop band size (in pixels)", "size" },
{ "cropleft", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_crop_left}, "set left crop band size (in pixels)", "size" },
{ "cropright", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_crop_right}, "set right crop band size (in pixels)", "size" },
- { "padtop", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_pad_top}, "set top pad band size (in pixels)", "size" },
- { "padbottom", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_pad_bottom}, "set bottom pad band size (in pixels)", "size" },
- { "padleft", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_pad_left}, "set left pad band size (in pixels)", "size" },
- { "padright", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_pad_right}, "set right pad band size (in pixels)", "size" },
- { "padcolor", HAS_ARG | OPT_VIDEO, {(void*)opt_pad_color}, "set color of pad bands (Hex 000000 thru FFFFFF)", "color" },
+ { "padtop", HAS_ARG | OPT_VIDEO, {(void*)opt_pad}, "set top pad band size (in pixels)", "size" },
+ { "padbottom", HAS_ARG | OPT_VIDEO, {(void*)opt_pad}, "set bottom pad band size (in pixels)", "size" },
+ { "padleft", HAS_ARG | OPT_VIDEO, {(void*)opt_pad}, "set left pad band size (in pixels)", "size" },
+ { "padright", HAS_ARG | OPT_VIDEO, {(void*)opt_pad}, "set right pad band size (in pixels)", "size" },
+ { "padcolor", HAS_ARG | OPT_VIDEO, {(void*)opt_pad}, "set color of pad bands (Hex 000000 thru FFFFFF)", "color" },
{ "intra", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, {(void*)&intra_only}, "use only intra frames"},
{ "vn", OPT_BOOL | OPT_VIDEO, {(void*)&video_disable}, "disable video" },
{ "vdt", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)&video_discard}, "discard threshold", "n" },
{ "psnr", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, {(void*)&do_psnr}, "calculate PSNR of compressed frames" },
{ "vstats", OPT_EXPERT | OPT_VIDEO, {(void*)&opt_vstats}, "dump video coding statistics to file" },
{ "vstats_file", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_vstats_file}, "dump video coding statistics to file", "file" },
+#if CONFIG_AVFILTER
+ { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
+#endif
{ "intra_matrix", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_intra_matrix}, "specify intra matrix coeffs", "matrix" },
{ "inter_matrix", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_inter_matrix}, "specify inter matrix coeffs", "matrix" },
{ "top", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_top_field_first}, "top=1/bottom=0/auto=-1 field first", "" },
int64_t ti;
avcodec_register_all();
+#if CONFIG_AVDEVICE
avdevice_register_all();
+#endif
+#if CONFIG_AVFILTER
+ avfilter_register_all();
+#endif
av_register_all();
#if HAVE_ISATTY
ti = getutime();
if (av_transcode(output_files, nb_output_files, input_files, nb_input_files,
- stream_maps, nb_stream_maps) < 0)
+ stream_maps, nb_stream_maps) < 0)
av_exit(1);
ti = getutime() - ti;
if (do_benchmark) {