]> git.sesse.net Git - ffmpeg/commitdiff
avfilter: add scale2ref filter
authorMichael Niedermayer <michael@niedermayer.cc>
Sat, 15 Aug 2015 16:38:06 +0000 (18:38 +0200)
committerMichael Niedermayer <michael@niedermayer.cc>
Mon, 17 Aug 2015 15:18:01 +0000 (17:18 +0200)
This filter can be used to scale one stream to match another or based on
another, useful to scale subtitles or other things to be overlayed

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
configure
doc/filters.texi
libavfilter/Makefile
libavfilter/allfilters.c
libavfilter/version.h
libavfilter/vf_scale.c

index 381b09e519c7a452230e1bdfb27703ce3ab9bea8..55cc7fb64b2d844e03ba6f875b587ea22edf476a 100755 (executable)
--- a/configure
+++ b/configure
@@ -2780,6 +2780,7 @@ repeatfields_filter_deps="gpl"
 resample_filter_deps="avresample"
 sab_filter_deps="gpl swscale"
 scale_filter_deps="swscale"
+scale2ref_filter_deps="swscale"
 select_filter_select="pixelutils"
 smartblur_filter_deps="gpl swscale"
 showcqt_filter_deps="avcodec"
@@ -5831,6 +5832,7 @@ enabled removelogo_filter   && prepend avfilter_deps "avformat avcodec swscale"
 enabled resample_filter && prepend avfilter_deps "avresample"
 enabled sab_filter          && prepend avfilter_deps "swscale"
 enabled scale_filter    && prepend avfilter_deps "swscale"
+enabled scale2ref_filter    && prepend avfilter_deps "swscale"
 enabled showspectrum_filter && prepend avfilter_deps "avcodec"
 enabled smartblur_filter    && prepend avfilter_deps "swscale"
 enabled subtitles_filter    && prepend avfilter_deps "avformat avcodec"
index 2dd7d22355ee370bf6dddb606c301d5eedc0995e..7b386eff0f3d8b58af04ce48b9c8986a5555d6b4 100644 (file)
@@ -9122,6 +9122,23 @@ If the specified expression is not valid, it is kept at its current
 value.
 @end table
 
+@section scale2ref
+
+Scale (resize) the input video, based on a reference video.
+
+See the scale filter for available options, scale2ref supports the same but
+uses the reference video instead of the main input as basis.
+
+@subsection Examples
+
+@itemize
+@item
+Scale a subtitle stream to match the main video in size before overlaying
+@example
+'scale2ref[b][a];[a][b]overlay'
+@end example
+@end itemize
+
 @section separatefields
 
 The @code{separatefields} takes a frame-based video input and splits
index d431f9925dfc1af2ebe914e71795546b75150434..75581f2cf7fb438ebd70d5f5d9cceb9c607c6cad 100644 (file)
@@ -197,6 +197,7 @@ OBJS-$(CONFIG_ROTATE_FILTER)                 += vf_rotate.o
 OBJS-$(CONFIG_SEPARATEFIELDS_FILTER)         += vf_separatefields.o
 OBJS-$(CONFIG_SAB_FILTER)                    += vf_sab.o
 OBJS-$(CONFIG_SCALE_FILTER)                  += vf_scale.o
+OBJS-$(CONFIG_SCALE2REF_FILTER)              += vf_scale.o
 OBJS-$(CONFIG_SELECT_FILTER)                 += f_select.o
 OBJS-$(CONFIG_SENDCMD_FILTER)                += f_sendcmd.o
 OBJS-$(CONFIG_SETDAR_FILTER)                 += vf_aspect.o
index 2900e88d3dcebe4a9d4bbd9cc6c994b885e6301e..ce5138220d580f974c3c1f20caf2e1d06909c7e5 100644 (file)
@@ -211,6 +211,7 @@ void avfilter_register_all(void)
     REGISTER_FILTER(ROTATE,         rotate,         vf);
     REGISTER_FILTER(SAB,            sab,            vf);
     REGISTER_FILTER(SCALE,          scale,          vf);
+    REGISTER_FILTER(SCALE2REF,      scale2ref,      vf);
     REGISTER_FILTER(SELECT,         select,         vf);
     REGISTER_FILTER(SENDCMD,        sendcmd,        vf);
     REGISTER_FILTER(SEPARATEFIELDS, separatefields, vf);
index 993d9ee1aebaa3dfa83cbf78701c357a9c4300e1..90a6dc05b1647fb0e653efa0cc5901e9967b254c 100644 (file)
@@ -30,7 +30,7 @@
 #include "libavutil/version.h"
 
 #define LIBAVFILTER_VERSION_MAJOR  5
-#define LIBAVFILTER_VERSION_MINOR  33
+#define LIBAVFILTER_VERSION_MINOR  34
 #define LIBAVFILTER_VERSION_MICRO 100
 
 #define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \
index ac2efe3a9ba5a8a487d4a86c4ea723902b1116f0..702adc41626777a272ef0225882600cef1a337a1 100644 (file)
@@ -111,6 +111,8 @@ typedef struct ScaleContext {
     int force_original_aspect_ratio;
 } ScaleContext;
 
+AVFilter ff_vf_scale2ref;
+
 static av_cold int init_dict(AVFilterContext *ctx, AVDictionary **opts)
 {
     ScaleContext *scale = ctx->priv;
@@ -234,7 +236,10 @@ static const int *parse_yuv_type(const char *s, enum AVColorSpace colorspace)
 static int config_props(AVFilterLink *outlink)
 {
     AVFilterContext *ctx = outlink->src;
-    AVFilterLink *inlink = outlink->src->inputs[0];
+    AVFilterLink *inlink0 = outlink->src->inputs[0];
+    AVFilterLink *inlink  = ctx->filter == &ff_vf_scale2ref ?
+                            outlink->src->inputs[1] :
+                            outlink->src->inputs[0];
     enum AVPixelFormat outfmt = outlink->format;
     ScaleContext *scale = ctx->priv;
     const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
@@ -343,8 +348,9 @@ static int config_props(AVFilterLink *outlink)
     if (scale->isws[1])
         sws_freeContext(scale->isws[1]);
     scale->isws[0] = scale->isws[1] = scale->sws = NULL;
-    if (inlink->w == outlink->w && inlink->h == outlink->h &&
-        inlink->format == outlink->format)
+    if (inlink0->w == outlink->w &&
+        inlink0->h == outlink->h &&
+        inlink0->format == outlink->format)
         ;
     else {
         struct SwsContext **swscs[3] = {&scale->sws, &scale->isws[0], &scale->isws[1]};
@@ -356,9 +362,9 @@ static int config_props(AVFilterLink *outlink)
             if (!*s)
                 return AVERROR(ENOMEM);
 
-            av_opt_set_int(*s, "srcw", inlink ->w, 0);
-            av_opt_set_int(*s, "srch", inlink ->h >> !!i, 0);
-            av_opt_set_int(*s, "src_format", inlink->format, 0);
+            av_opt_set_int(*s, "srcw", inlink0 ->w, 0);
+            av_opt_set_int(*s, "srch", inlink0 ->h >> !!i, 0);
+            av_opt_set_int(*s, "src_format", inlink0->format, 0);
             av_opt_set_int(*s, "dstw", outlink->w, 0);
             av_opt_set_int(*s, "dsth", outlink->h >> !!i, 0);
             av_opt_set_int(*s, "dst_format", outfmt, 0);
@@ -374,7 +380,7 @@ static int config_props(AVFilterLink *outlink)
             /* Override YUV420P settings to have the correct (MPEG-2) chroma positions
              * MPEG-2 chroma positions are used by convention
              * XXX: support other 4:2:0 pixel formats */
-            if (inlink->format == AV_PIX_FMT_YUV420P) {
+            if (inlink0->format == AV_PIX_FMT_YUV420P) {
                 scale->in_v_chr_pos = (i == 0) ? 128 : (i == 1) ? 64 : 192;
             }
 
@@ -415,6 +421,17 @@ fail:
     return ret;
 }
 
+static int config_props_ref(AVFilterLink *outlink)
+{
+    AVFilterLink *inlink = outlink->src->inputs[1];
+
+    outlink->w = inlink->w;
+    outlink->h = inlink->h;
+    outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
+
+    return 0;
+}
+
 static int scale_slice(AVFilterLink *link, AVFrame *out_buf, AVFrame *cur_pic, struct SwsContext *sws, int y, int h, int mul, int field)
 {
     ScaleContext *scale = link->dst->priv;
@@ -542,6 +559,13 @@ static int filter_frame(AVFilterLink *link, AVFrame *in)
     return ff_filter_frame(outlink, out);
 }
 
+static int filter_frame_ref(AVFilterLink *link, AVFrame *in)
+{
+    AVFilterLink *outlink = link->dst->outputs[1];
+
+    return ff_filter_frame(outlink, in);
+}
+
 static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
                            char *res, int res_len, int flags)
 {
@@ -643,3 +667,53 @@ AVFilter ff_vf_scale = {
     .outputs         = avfilter_vf_scale_outputs,
     .process_command = process_command,
 };
+
+static const AVClass scale2ref_class = {
+    .class_name       = "scale2ref",
+    .item_name        = av_default_item_name,
+    .option           = scale_options,
+    .version          = LIBAVUTIL_VERSION_INT,
+    .category         = AV_CLASS_CATEGORY_FILTER,
+    .child_class_next = child_class_next,
+};
+
+static const AVFilterPad avfilter_vf_scale2ref_inputs[] = {
+    {
+        .name         = "default",
+        .type         = AVMEDIA_TYPE_VIDEO,
+        .filter_frame = filter_frame,
+    },
+    {
+        .name         = "ref",
+        .type         = AVMEDIA_TYPE_VIDEO,
+        .filter_frame = filter_frame_ref,
+    },
+    { NULL }
+};
+
+static const AVFilterPad avfilter_vf_scale2ref_outputs[] = {
+    {
+        .name         = "default",
+        .type         = AVMEDIA_TYPE_VIDEO,
+        .config_props = config_props,
+    },
+    {
+        .name         = "ref",
+        .type         = AVMEDIA_TYPE_VIDEO,
+        .config_props = config_props_ref,
+    },
+    { NULL }
+};
+
+AVFilter ff_vf_scale2ref = {
+    .name            = "scale2ref",
+    .description     = NULL_IF_CONFIG_SMALL("Scale the input video size and/or convert the image format to the given reference."),
+    .init_dict       = init_dict,
+    .uninit          = uninit,
+    .query_formats   = query_formats,
+    .priv_size       = sizeof(ScaleContext),
+    .priv_class      = &scale2ref_class,
+    .inputs          = avfilter_vf_scale2ref_inputs,
+    .outputs         = avfilter_vf_scale2ref_outputs,
+    .process_command = process_command,
+};