X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libavfilter%2Fvf_scale.c;h=3add31bace47572856800c9e2be2e317e958eeeb;hb=d7e0d428faaa04e2fd850eca82f314ca2ad3dfe5;hp=41ddec766188486c17eaf668ff1321158a0ea582;hpb=b9d479bac45bb4ce42b3f685bce5cad8d89f8f71;p=ffmpeg diff --git a/libavfilter/vf_scale.c b/libavfilter/vf_scale.c index 41ddec76618..3add31bace4 100644 --- a/libavfilter/vf_scale.c +++ b/libavfilter/vf_scale.c @@ -29,9 +29,10 @@ #include "avfilter.h" #include "formats.h" #include "internal.h" -#include "scale.h" +#include "scale_eval.h" #include "video.h" #include "libavutil/avstring.h" +#include "libavutil/eval.h" #include "libavutil/internal.h" #include "libavutil/mathematics.h" #include "libavutil/opt.h" @@ -41,6 +42,62 @@ #include "libavutil/avassert.h" #include "libswscale/swscale.h" +static const char *const var_names[] = { + "in_w", "iw", + "in_h", "ih", + "out_w", "ow", + "out_h", "oh", + "a", + "sar", + "dar", + "hsub", + "vsub", + "ohsub", + "ovsub", + "n", + "t", + "pos", + "main_w", + "main_h", + "main_a", + "main_sar", + "main_dar", "mdar", + "main_hsub", + "main_vsub", + "main_n", + "main_t", + "main_pos", + NULL +}; + +enum var_name { + VAR_IN_W, VAR_IW, + VAR_IN_H, VAR_IH, + VAR_OUT_W, VAR_OW, + VAR_OUT_H, VAR_OH, + VAR_A, + VAR_SAR, + VAR_DAR, + VAR_HSUB, + VAR_VSUB, + VAR_OHSUB, + VAR_OVSUB, + VAR_N, + VAR_T, + VAR_POS, + VAR_S2R_MAIN_W, + VAR_S2R_MAIN_H, + VAR_S2R_MAIN_A, + VAR_S2R_MAIN_SAR, + VAR_S2R_MAIN_DAR, VAR_S2R_MDAR, + VAR_S2R_MAIN_HSUB, + VAR_S2R_MAIN_VSUB, + VAR_S2R_MAIN_N, + VAR_S2R_MAIN_T, + VAR_S2R_MAIN_POS, + VARS_NB +}; + enum EvalMode { EVAL_MODE_INIT, EVAL_MODE_FRAME, @@ -72,6 +129,10 @@ typedef struct ScaleContext { char *w_expr; ///< width expression string char *h_expr; ///< height expression string + AVExpr *w_pexpr; + AVExpr *h_pexpr; + double var_values[VARS_NB]; + char *flags_str; char *in_color_matrix; @@ -96,6 +157,119 @@ typedef struct ScaleContext { AVFilter ff_vf_scale2ref; +static int config_props(AVFilterLink *outlink); + +static int check_exprs(AVFilterContext *ctx) +{ + ScaleContext *scale = ctx->priv; + unsigned vars_w[VARS_NB] = { 0 }, vars_h[VARS_NB] = { 0 }; + + if (!scale->w_pexpr && !scale->h_pexpr) + return AVERROR(EINVAL); + + if (scale->w_pexpr) + av_expr_count_vars(scale->w_pexpr, vars_w, VARS_NB); + if (scale->h_pexpr) + av_expr_count_vars(scale->h_pexpr, vars_h, VARS_NB); + + if (vars_w[VAR_OUT_W] || vars_w[VAR_OW]) { + av_log(ctx, AV_LOG_ERROR, "Width expression cannot be self-referencing: '%s'.\n", scale->w_expr); + return AVERROR(EINVAL); + } + + if (vars_h[VAR_OUT_H] || vars_h[VAR_OH]) { + av_log(ctx, AV_LOG_ERROR, "Height expression cannot be self-referencing: '%s'.\n", scale->h_expr); + return AVERROR(EINVAL); + } + + if ((vars_w[VAR_OUT_H] || vars_w[VAR_OH]) && + (vars_h[VAR_OUT_W] || vars_h[VAR_OW])) { + av_log(ctx, AV_LOG_WARNING, "Circular references detected for width '%s' and height '%s' - possibly invalid.\n", scale->w_expr, scale->h_expr); + } + + if (ctx->filter != &ff_vf_scale2ref && + (vars_w[VAR_S2R_MAIN_W] || vars_h[VAR_S2R_MAIN_W] || + vars_w[VAR_S2R_MAIN_H] || vars_h[VAR_S2R_MAIN_H] || + vars_w[VAR_S2R_MAIN_A] || vars_h[VAR_S2R_MAIN_A] || + vars_w[VAR_S2R_MAIN_SAR] || vars_h[VAR_S2R_MAIN_SAR] || + vars_w[VAR_S2R_MAIN_DAR] || vars_h[VAR_S2R_MAIN_DAR] || + vars_w[VAR_S2R_MDAR] || vars_h[VAR_S2R_MDAR] || + vars_w[VAR_S2R_MAIN_HSUB] || vars_h[VAR_S2R_MAIN_HSUB] || + vars_w[VAR_S2R_MAIN_VSUB] || vars_h[VAR_S2R_MAIN_VSUB] || + vars_w[VAR_S2R_MAIN_N] || vars_h[VAR_S2R_MAIN_N] || + vars_w[VAR_S2R_MAIN_T] || vars_h[VAR_S2R_MAIN_T] || + vars_w[VAR_S2R_MAIN_POS] || vars_h[VAR_S2R_MAIN_POS]) ) { + av_log(ctx, AV_LOG_ERROR, "Expressions with scale2ref variables are not valid in scale filter.\n"); + return AVERROR(EINVAL); + } + + if (scale->eval_mode == EVAL_MODE_INIT && + (vars_w[VAR_N] || vars_h[VAR_N] || + vars_w[VAR_T] || vars_h[VAR_T] || + vars_w[VAR_POS] || vars_h[VAR_POS] || + vars_w[VAR_S2R_MAIN_N] || vars_h[VAR_S2R_MAIN_N] || + vars_w[VAR_S2R_MAIN_T] || vars_h[VAR_S2R_MAIN_T] || + vars_w[VAR_S2R_MAIN_POS] || vars_h[VAR_S2R_MAIN_POS]) ) { + av_log(ctx, AV_LOG_ERROR, "Expressions with frame variables 'n', 't', 'pos' are not valid in init eval_mode.\n"); + return AVERROR(EINVAL); + } + + return 0; +} + +static int scale_parse_expr(AVFilterContext *ctx, char *str_expr, AVExpr **pexpr_ptr, const char *var, const char *args) +{ + ScaleContext *scale = ctx->priv; + int ret, is_inited = 0; + char *old_str_expr = NULL; + AVExpr *old_pexpr = NULL; + + if (str_expr) { + old_str_expr = av_strdup(str_expr); + if (!old_str_expr) + return AVERROR(ENOMEM); + av_opt_set(scale, var, args, 0); + } + + if (*pexpr_ptr) { + old_pexpr = *pexpr_ptr; + *pexpr_ptr = NULL; + is_inited = 1; + } + + ret = av_expr_parse(pexpr_ptr, args, var_names, + NULL, NULL, NULL, NULL, 0, ctx); + if (ret < 0) { + av_log(ctx, AV_LOG_ERROR, "Cannot parse expression for %s: '%s'\n", var, args); + goto revert; + } + + ret = check_exprs(ctx); + if (ret < 0) + goto revert; + + if (is_inited && (ret = config_props(ctx->outputs[0])) < 0) + goto revert; + + av_expr_free(old_pexpr); + old_pexpr = NULL; + av_freep(&old_str_expr); + + return 0; + +revert: + av_expr_free(*pexpr_ptr); + *pexpr_ptr = NULL; + if (old_str_expr) { + av_opt_set(scale, var, old_str_expr, 0); + av_free(old_str_expr); + } + if (old_pexpr) + *pexpr_ptr = old_pexpr; + + return ret; +} + static av_cold int init_dict(AVFilterContext *ctx, AVDictionary **opts) { ScaleContext *scale = ctx->priv; @@ -127,6 +301,14 @@ static av_cold int init_dict(AVFilterContext *ctx, AVDictionary **opts) if (!scale->h_expr) av_opt_set(scale, "h", "ih", 0); + ret = scale_parse_expr(ctx, NULL, &scale->w_pexpr, "width", scale->w_expr); + if (ret < 0) + return ret; + + ret = scale_parse_expr(ctx, NULL, &scale->h_pexpr, "height", scale->h_expr); + if (ret < 0) + return ret; + av_log(ctx, AV_LOG_VERBOSE, "w:%s h:%s flags:'%s' interl:%d\n", scale->w_expr, scale->h_expr, (char *)av_x_if_null(scale->flags_str, ""), scale->interlaced); @@ -149,6 +331,9 @@ static av_cold int init_dict(AVFilterContext *ctx, AVDictionary **opts) static av_cold void uninit(AVFilterContext *ctx) { ScaleContext *scale = ctx->priv; + av_expr_free(scale->w_pexpr); + av_expr_free(scale->h_pexpr); + scale->w_pexpr = scale->h_pexpr = NULL; sws_freeContext(scale->sws); sws_freeContext(scale->isws[0]); sws_freeContext(scale->isws[1]); @@ -173,7 +358,7 @@ static int query_formats(AVFilterContext *ctx) return ret; } } - if ((ret = ff_formats_ref(formats, &ctx->inputs[0]->out_formats)) < 0) + if ((ret = ff_formats_ref(formats, &ctx->inputs[0]->outcfg.formats)) < 0) return ret; } if (ctx->outputs[0]) { @@ -187,7 +372,7 @@ static int query_formats(AVFilterContext *ctx) return ret; } } - if ((ret = ff_formats_ref(formats, &ctx->outputs[0]->in_formats)) < 0) + if ((ret = ff_formats_ref(formats, &ctx->outputs[0]->incfg.formats)) < 0) return ret; } @@ -218,6 +403,81 @@ static const int *parse_yuv_type(const char *s, enum AVColorSpace colorspace) return sws_getCoefficients(colorspace); } +static int scale_eval_dimensions(AVFilterContext *ctx) +{ + ScaleContext *scale = ctx->priv; + const char scale2ref = ctx->filter == &ff_vf_scale2ref; + const AVFilterLink *inlink = scale2ref ? ctx->inputs[1] : ctx->inputs[0]; + const AVFilterLink *outlink = ctx->outputs[0]; + const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); + const AVPixFmtDescriptor *out_desc = av_pix_fmt_desc_get(outlink->format); + char *expr; + int eval_w, eval_h; + int ret; + double res; + const AVPixFmtDescriptor *main_desc; + const AVFilterLink *main_link; + + if (scale2ref) { + main_link = ctx->inputs[0]; + main_desc = av_pix_fmt_desc_get(main_link->format); + } + + scale->var_values[VAR_IN_W] = scale->var_values[VAR_IW] = inlink->w; + scale->var_values[VAR_IN_H] = scale->var_values[VAR_IH] = inlink->h; + scale->var_values[VAR_OUT_W] = scale->var_values[VAR_OW] = NAN; + scale->var_values[VAR_OUT_H] = scale->var_values[VAR_OH] = NAN; + scale->var_values[VAR_A] = (double) inlink->w / inlink->h; + scale->var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ? + (double) inlink->sample_aspect_ratio.num / inlink->sample_aspect_ratio.den : 1; + scale->var_values[VAR_DAR] = scale->var_values[VAR_A] * scale->var_values[VAR_SAR]; + scale->var_values[VAR_HSUB] = 1 << desc->log2_chroma_w; + scale->var_values[VAR_VSUB] = 1 << desc->log2_chroma_h; + scale->var_values[VAR_OHSUB] = 1 << out_desc->log2_chroma_w; + scale->var_values[VAR_OVSUB] = 1 << out_desc->log2_chroma_h; + + if (scale2ref) { + scale->var_values[VAR_S2R_MAIN_W] = main_link->w; + scale->var_values[VAR_S2R_MAIN_H] = main_link->h; + scale->var_values[VAR_S2R_MAIN_A] = (double) main_link->w / main_link->h; + scale->var_values[VAR_S2R_MAIN_SAR] = main_link->sample_aspect_ratio.num ? + (double) main_link->sample_aspect_ratio.num / main_link->sample_aspect_ratio.den : 1; + scale->var_values[VAR_S2R_MAIN_DAR] = scale->var_values[VAR_S2R_MDAR] = + scale->var_values[VAR_S2R_MAIN_A] * scale->var_values[VAR_S2R_MAIN_SAR]; + scale->var_values[VAR_S2R_MAIN_HSUB] = 1 << main_desc->log2_chroma_w; + scale->var_values[VAR_S2R_MAIN_VSUB] = 1 << main_desc->log2_chroma_h; + } + + res = av_expr_eval(scale->w_pexpr, scale->var_values, NULL); + eval_w = scale->var_values[VAR_OUT_W] = scale->var_values[VAR_OW] = (int) res == 0 ? inlink->w : (int) res; + + res = av_expr_eval(scale->h_pexpr, scale->var_values, NULL); + if (isnan(res)) { + expr = scale->h_expr; + ret = AVERROR(EINVAL); + goto fail; + } + eval_h = scale->var_values[VAR_OUT_H] = scale->var_values[VAR_OH] = (int) res == 0 ? inlink->h : (int) res; + + res = av_expr_eval(scale->w_pexpr, scale->var_values, NULL); + if (isnan(res)) { + expr = scale->w_expr; + ret = AVERROR(EINVAL); + goto fail; + } + eval_w = scale->var_values[VAR_OUT_W] = scale->var_values[VAR_OW] = (int) res == 0 ? inlink->w : (int) res; + + scale->w = eval_w; + scale->h = eval_h; + + return 0; + +fail: + av_log(ctx, AV_LOG_ERROR, + "Error when evaluating the expression '%s'.\n", expr); + return ret; +} + static int config_props(AVFilterLink *outlink) { AVFilterContext *ctx = outlink->src; @@ -228,55 +488,29 @@ static int config_props(AVFilterLink *outlink) enum AVPixelFormat outfmt = outlink->format; const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); ScaleContext *scale = ctx->priv; - int w, h; int ret; - if ((ret = ff_scale_eval_dimensions(ctx, - scale->w_expr, scale->h_expr, - inlink, outlink, - &w, &h)) < 0) + if ((ret = scale_eval_dimensions(ctx)) < 0) goto fail; - /* Note that force_original_aspect_ratio may overwrite the previous set - * dimensions so that it is not divisible by the set factors anymore - * unless force_divisible_by is defined as well */ - if (scale->force_original_aspect_ratio) { - int tmp_w = av_rescale(h, inlink->w, inlink->h); - int tmp_h = av_rescale(w, inlink->h, inlink->w); - - if (scale->force_original_aspect_ratio == 1) { - w = FFMIN(tmp_w, w); - h = FFMIN(tmp_h, h); - if (scale->force_divisible_by > 1) { - // round down - w = w / scale->force_divisible_by * scale->force_divisible_by; - h = h / scale->force_divisible_by * scale->force_divisible_by; - } - } else { - w = FFMAX(tmp_w, w); - h = FFMAX(tmp_h, h); - if (scale->force_divisible_by > 1) { - // round up - w = (w + scale->force_divisible_by - 1) / scale->force_divisible_by * scale->force_divisible_by; - h = (h + scale->force_divisible_by - 1) / scale->force_divisible_by * scale->force_divisible_by; - } - } - } + ff_scale_adjust_dimensions(inlink, &scale->w, &scale->h, + scale->force_original_aspect_ratio, + scale->force_divisible_by); - if (w > INT_MAX || h > INT_MAX || - (h * inlink->w) > INT_MAX || - (w * inlink->h) > INT_MAX) + if (scale->w > INT_MAX || + scale->h > INT_MAX || + (scale->h * inlink->w) > INT_MAX || + (scale->w * inlink->h) > INT_MAX) av_log(ctx, AV_LOG_ERROR, "Rescaled value for width or height is too big.\n"); - outlink->w = w; - outlink->h = h; + outlink->w = scale->w; + outlink->h = scale->h; /* TODO: make algorithm configurable */ scale->input_is_pal = desc->flags & AV_PIX_FMT_FLAG_PAL; if (outfmt == AV_PIX_FMT_PAL8) outfmt = AV_PIX_FMT_BGR8; - scale->output_is_pal = av_pix_fmt_desc_get(outfmt)->flags & AV_PIX_FMT_FLAG_PAL || - av_pix_fmt_desc_get(outfmt)->flags & FF_PSEUDOPAL; + scale->output_is_pal = av_pix_fmt_desc_get(outfmt)->flags & AV_PIX_FMT_FLAG_PAL; if (scale->sws) sws_freeContext(scale->sws); @@ -400,8 +634,8 @@ static int scale_slice(AVFilterLink *link, AVFrame *out_buf, AVFrame *cur_pic, s int vsub= ((i+1)&2) ? scale->vsub : 0; in_stride[i] = cur_pic->linesize[i] * mul; out_stride[i] = out_buf->linesize[i] * mul; - in[i] = cur_pic->data[i] + ((y>>vsub)+field) * cur_pic->linesize[i]; - out[i] = out_buf->data[i] + field * out_buf->linesize[i]; + in[i] = FF_PTR_ADD(cur_pic->data[i], ((y>>vsub)+field) * cur_pic->linesize[i]); + out[i] = FF_PTR_ADD(out_buf->data[i], field * out_buf->linesize[i]); } if (scale->input_is_pal) in[1] = cur_pic->data[1]; @@ -414,28 +648,63 @@ static int scale_slice(AVFilterLink *link, AVFrame *out_buf, AVFrame *cur_pic, s static int scale_frame(AVFilterLink *link, AVFrame *in, AVFrame **frame_out) { - ScaleContext *scale = link->dst->priv; - AVFilterLink *outlink = link->dst->outputs[0]; + AVFilterContext *ctx = link->dst; + ScaleContext *scale = ctx->priv; + AVFilterLink *outlink = ctx->outputs[0]; AVFrame *out; const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format); char buf[32]; int in_range; + int frame_changed; *frame_out = NULL; if (in->colorspace == AVCOL_SPC_YCGCO) av_log(link->dst, AV_LOG_WARNING, "Detected unsupported YCgCo colorspace.\n"); - if ( in->width != link->w - || in->height != link->h - || in->format != link->format - || in->sample_aspect_ratio.den != link->sample_aspect_ratio.den || in->sample_aspect_ratio.num != link->sample_aspect_ratio.num) { + frame_changed = in->width != link->w || + in->height != link->h || + in->format != link->format || + in->sample_aspect_ratio.den != link->sample_aspect_ratio.den || + in->sample_aspect_ratio.num != link->sample_aspect_ratio.num; + + if (scale->eval_mode == EVAL_MODE_FRAME || frame_changed) { int ret; + unsigned vars_w[VARS_NB] = { 0 }, vars_h[VARS_NB] = { 0 }; + + av_expr_count_vars(scale->w_pexpr, vars_w, VARS_NB); + av_expr_count_vars(scale->h_pexpr, vars_h, VARS_NB); + + if (scale->eval_mode == EVAL_MODE_FRAME && + !frame_changed && + ctx->filter != &ff_vf_scale2ref && + !(vars_w[VAR_N] || vars_w[VAR_T] || vars_w[VAR_POS]) && + !(vars_h[VAR_N] || vars_h[VAR_T] || vars_h[VAR_POS]) && + scale->w && scale->h) + goto scale; if (scale->eval_mode == EVAL_MODE_INIT) { snprintf(buf, sizeof(buf)-1, "%d", outlink->w); av_opt_set(scale, "w", buf, 0); snprintf(buf, sizeof(buf)-1, "%d", outlink->h); av_opt_set(scale, "h", buf, 0); + + ret = scale_parse_expr(ctx, NULL, &scale->w_pexpr, "width", scale->w_expr); + if (ret < 0) + return ret; + + ret = scale_parse_expr(ctx, NULL, &scale->h_pexpr, "height", scale->h_expr); + if (ret < 0) + return ret; + } + + if (ctx->filter == &ff_vf_scale2ref) { + scale->var_values[VAR_S2R_MAIN_N] = link->frame_count_out; + scale->var_values[VAR_S2R_MAIN_T] = TS2T(in->pts, link->time_base); + scale->var_values[VAR_S2R_MAIN_POS] = in->pkt_pos == -1 ? NAN : in->pkt_pos; + } else { + scale->var_values[VAR_N] = link->frame_count_out; + scale->var_values[VAR_T] = TS2T(in->pts, link->time_base); + scale->var_values[VAR_POS] = in->pkt_pos == -1 ? NAN : in->pkt_pos; } link->dst->inputs[0]->format = in->format; @@ -449,6 +718,7 @@ static int scale_frame(AVFilterLink *link, AVFrame *in, AVFrame **frame_out) return ret; } +scale: if (!scale->sws) { *frame_out = in; return 0; @@ -555,7 +825,31 @@ static int filter_frame(AVFilterLink *link, AVFrame *in) static int filter_frame_ref(AVFilterLink *link, AVFrame *in) { + ScaleContext *scale = link->dst->priv; AVFilterLink *outlink = link->dst->outputs[1]; + int frame_changed; + + frame_changed = in->width != link->w || + in->height != link->h || + in->format != link->format || + in->sample_aspect_ratio.den != link->sample_aspect_ratio.den || + in->sample_aspect_ratio.num != link->sample_aspect_ratio.num; + + if (frame_changed) { + link->format = in->format; + link->w = in->width; + link->h = in->height; + link->sample_aspect_ratio.num = in->sample_aspect_ratio.num; + link->sample_aspect_ratio.den = in->sample_aspect_ratio.den; + + config_props_ref(outlink); + } + + if (scale->eval_mode == EVAL_MODE_FRAME) { + scale->var_values[VAR_N] = link->frame_count_out; + scale->var_values[VAR_T] = TS2T(in->pts, link->time_base); + scale->var_values[VAR_POS] = in->pkt_pos == -1 ? NAN : in->pkt_pos; + } return ff_filter_frame(outlink, in); } @@ -564,39 +858,50 @@ static int process_command(AVFilterContext *ctx, const char *cmd, const char *ar char *res, int res_len, int flags) { ScaleContext *scale = ctx->priv; - int ret; + char *str_expr; + AVExpr **pexpr_ptr; + int ret, w, h; - if ( !strcmp(cmd, "width") || !strcmp(cmd, "w") - || !strcmp(cmd, "height") || !strcmp(cmd, "h")) { + w = !strcmp(cmd, "width") || !strcmp(cmd, "w"); + h = !strcmp(cmd, "height") || !strcmp(cmd, "h"); - int old_w = scale->w; - int old_h = scale->h; - AVFilterLink *outlink = ctx->outputs[0]; + if (w || h) { + str_expr = w ? scale->w_expr : scale->h_expr; + pexpr_ptr = w ? &scale->w_pexpr : &scale->h_pexpr; - av_opt_set(scale, cmd, args, 0); - if ((ret = config_props(outlink)) < 0) { - scale->w = old_w; - scale->h = old_h; - } + ret = scale_parse_expr(ctx, str_expr, pexpr_ptr, cmd, args); } else ret = AVERROR(ENOSYS); + if (ret < 0) + av_log(ctx, AV_LOG_ERROR, "Failed to process command. Continuing with existing parameters.\n"); + return ret; } +#if FF_API_CHILD_CLASS_NEXT static const AVClass *child_class_next(const AVClass *prev) { return prev ? NULL : sws_get_class(); } +#endif + +static const AVClass *child_class_iterate(void **iter) +{ + const AVClass *c = *iter ? NULL : sws_get_class(); + *iter = (void*)(uintptr_t)c; + return c; +} #define OFFSET(x) offsetof(ScaleContext, x) #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM +#define TFLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM static const AVOption scale_options[] = { - { "w", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, .flags = FLAGS }, - { "width", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, .flags = FLAGS }, - { "h", "Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, .flags = FLAGS }, - { "height","Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, .flags = FLAGS }, + { "w", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, .flags = TFLAGS }, + { "width", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, .flags = TFLAGS }, + { "h", "Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, .flags = TFLAGS }, + { "height","Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, .flags = TFLAGS }, { "flags", "Flags to pass to libswscale", OFFSET(flags_str), AV_OPT_TYPE_STRING, { .str = "bilinear" }, .flags = FLAGS }, { "interl", "set interlacing", OFFSET(interlaced), AV_OPT_TYPE_BOOL, {.i64 = 0 }, -1, 1, FLAGS }, { "size", "set video size", OFFSET(size_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, FLAGS }, @@ -645,7 +950,10 @@ static const AVClass scale_class = { .option = scale_options, .version = LIBAVUTIL_VERSION_INT, .category = AV_CLASS_CATEGORY_FILTER, +#if FF_API_CHILD_CLASS_NEXT .child_class_next = child_class_next, +#endif + .child_class_iterate = child_class_iterate, }; static const AVFilterPad avfilter_vf_scale_inputs[] = { @@ -685,7 +993,10 @@ static const AVClass scale2ref_class = { .option = scale_options, .version = LIBAVUTIL_VERSION_INT, .category = AV_CLASS_CATEGORY_FILTER, +#if FF_API_CHILD_CLASS_NEXT .child_class_next = child_class_next, +#endif + .child_class_iterate = child_class_iterate, }; static const AVFilterPad avfilter_vf_scale2ref_inputs[] = {