typedef struct LUT2Context {
const AVClass *class;
+ FFFrameSync fs;
char *comp_expr_str[4];
int width[4], height[4];
int nb_planes;
int depth, depthx, depthy;
+ int tlut2;
+ AVFrame *prev_frame; /* only used with tlut2 */
void (*lut2)(struct LUT2Context *s, AVFrame *dst, AVFrame *srcx, AVFrame *srcy);
- FFFrameSync fs;
} LUT2Context;
#define OFFSET(x) offsetof(LUT2Context, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
-static const AVOption lut2_options[] = {
+static const AVOption options[] = {
{ "c0", "set component #0 expression", OFFSET(comp_expr_str[0]), AV_OPT_TYPE_STRING, { .str = "x" }, .flags = FLAGS },
{ "c1", "set component #1 expression", OFFSET(comp_expr_str[1]), AV_OPT_TYPE_STRING, { .str = "x" }, .flags = FLAGS },
{ "c2", "set component #2 expression", OFFSET(comp_expr_str[2]), AV_OPT_TYPE_STRING, { .str = "x" }, .flags = FLAGS },
LUT2Context *s = ctx->priv;
int i;
+ ff_framesync_uninit(&s->fs);
+ av_frame_free(&s->prev_frame);
+
for (i = 0; i < 4; i++) {
av_expr_free(s->comp_expr[i]);
s->comp_expr[i] = NULL;
AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA444P10,
AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10,
AV_PIX_FMT_GBRP12,
- AV_PIX_FMT_GBRAP, AV_PIX_FMT_GBRAP12,
- AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_GBRAP, AV_PIX_FMT_GBRAP10, AV_PIX_FMT_GBRAP12,
+ AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY9, AV_PIX_FMT_GRAY10, AV_PIX_FMT_GRAY12,
AV_PIX_FMT_NONE
};
s->depthx = desc->comp[0].depth;
s->var_values[VAR_BITDEPTHX] = s->depthx;
+ if (s->tlut2) {
+ s->depthy = desc->comp[0].depth;
+ s->var_values[VAR_BITDEPTHY] = s->depthy;
+ }
+
return 0;
}
AVFilterContext *ctx = fs->parent;
LUT2Context *s = fs->opaque;
AVFilterLink *outlink = ctx->outputs[0];
- AVFrame *out, *srcx, *srcy;
+ AVFrame *out, *srcx = NULL, *srcy = NULL;
int ret;
if ((ret = ff_framesync_get_frame(&s->fs, 0, &srcx, 0)) < 0 ||
(ret = ff_framesync_get_frame(&s->fs, 1, &srcy, 0)) < 0)
return ret;
- if (ctx->is_disabled) {
+ if (ctx->is_disabled || !srcy) {
out = av_frame_clone(srcx);
if (!out)
return AVERROR(ENOMEM);
{
AVFilterContext *ctx = outlink->src;
LUT2Context *s = ctx->priv;
- AVFilterLink *srcx = ctx->inputs[0];
- AVFilterLink *srcy = ctx->inputs[1];
- FFFrameSyncIn *in;
int p, ret;
s->depth = s->depthx + s->depthy;
- if (srcx->format != srcy->format) {
- av_log(ctx, AV_LOG_ERROR, "inputs must be of same pixel format\n");
- return AVERROR(EINVAL);
- }
- if (srcx->w != srcy->w ||
- srcx->h != srcy->h ||
- srcx->sample_aspect_ratio.num != srcy->sample_aspect_ratio.num ||
- srcx->sample_aspect_ratio.den != srcy->sample_aspect_ratio.den) {
- av_log(ctx, AV_LOG_ERROR, "First input link %s parameters "
- "(size %dx%d, SAR %d:%d) do not match the corresponding "
- "second input link %s parameters (%dx%d, SAR %d:%d)\n",
- ctx->input_pads[0].name, srcx->w, srcx->h,
- srcx->sample_aspect_ratio.num,
- srcx->sample_aspect_ratio.den,
- ctx->input_pads[1].name,
- srcy->w, srcy->h,
- srcy->sample_aspect_ratio.num,
- srcy->sample_aspect_ratio.den);
- return AVERROR(EINVAL);
- }
-
- outlink->w = srcx->w;
- outlink->h = srcx->h;
- outlink->time_base = srcx->time_base;
- outlink->sample_aspect_ratio = srcx->sample_aspect_ratio;
- outlink->frame_rate = srcx->frame_rate;
-
- if ((ret = ff_framesync_init(&s->fs, ctx, 2)) < 0)
- return ret;
-
- in = s->fs.in;
- in[0].time_base = srcx->time_base;
- in[1].time_base = srcy->time_base;
- in[0].sync = 1;
- in[0].before = EXT_STOP;
- in[0].after = EXT_INFINITY;
- in[1].sync = 1;
- in[1].before = EXT_STOP;
- in[1].after = EXT_INFINITY;
- s->fs.opaque = s;
- s->fs.on_event = process_frame;
-
s->lut2 = s->depth > 16 ? lut2_16bit : lut2_8bit;
for (p = 0; p < s->nb_planes; p++) {
}
}
- return ff_framesync_configure(&s->fs);
+ return 0;
}
-static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
+static int lut2_config_output(AVFilterLink *outlink)
{
- LUT2Context *s = inlink->dst->priv;
- return ff_framesync_filter_frame(&s->fs, inlink, buf);
+ AVFilterContext *ctx = outlink->src;
+ LUT2Context *s = ctx->priv;
+ AVFilterLink *srcx = ctx->inputs[0];
+ AVFilterLink *srcy = ctx->inputs[1];
+ FFFrameSyncIn *in;
+ int ret;
+
+ if (srcx->format != srcy->format) {
+ av_log(ctx, AV_LOG_ERROR, "inputs must be of same pixel format\n");
+ return AVERROR(EINVAL);
+ }
+ if (srcx->w != srcy->w ||
+ srcx->h != srcy->h ||
+ srcx->sample_aspect_ratio.num != srcy->sample_aspect_ratio.num ||
+ srcx->sample_aspect_ratio.den != srcy->sample_aspect_ratio.den) {
+ av_log(ctx, AV_LOG_ERROR, "First input link %s parameters "
+ "(size %dx%d, SAR %d:%d) do not match the corresponding "
+ "second input link %s parameters (%dx%d, SAR %d:%d)\n",
+ ctx->input_pads[0].name, srcx->w, srcx->h,
+ srcx->sample_aspect_ratio.num,
+ srcx->sample_aspect_ratio.den,
+ ctx->input_pads[1].name,
+ srcy->w, srcy->h,
+ srcy->sample_aspect_ratio.num,
+ srcy->sample_aspect_ratio.den);
+ return AVERROR(EINVAL);
+ }
+
+ outlink->w = srcx->w;
+ outlink->h = srcx->h;
+ outlink->time_base = srcx->time_base;
+ outlink->sample_aspect_ratio = srcx->sample_aspect_ratio;
+ outlink->frame_rate = srcx->frame_rate;
+
+ if ((ret = ff_framesync_init(&s->fs, ctx, 2)) < 0)
+ return ret;
+
+ in = s->fs.in;
+ in[0].time_base = srcx->time_base;
+ in[1].time_base = srcy->time_base;
+ in[0].sync = 2;
+ in[0].before = EXT_STOP;
+ in[0].after = EXT_INFINITY;
+ in[1].sync = 1;
+ in[1].before = EXT_STOP;
+ in[1].after = EXT_INFINITY;
+ s->fs.opaque = s;
+ s->fs.on_event = process_frame;
+
+ if ((ret = config_output(outlink)) < 0)
+ return ret;
+
+ return ff_framesync_configure(&s->fs);
}
-static int request_frame(AVFilterLink *outlink)
+static int activate(AVFilterContext *ctx)
{
- LUT2Context *s = outlink->src->priv;
- return ff_framesync_request_frame(&s->fs, outlink);
+ LUT2Context *s = ctx->priv;
+ return ff_framesync_activate(&s->fs);
}
static const AVFilterPad inputs[] = {
{
.name = "srcx",
.type = AVMEDIA_TYPE_VIDEO,
- .filter_frame = filter_frame,
.config_props = config_inputx,
},
{
.name = "srcy",
.type = AVMEDIA_TYPE_VIDEO,
- .filter_frame = filter_frame,
.config_props = config_inputy,
},
{ NULL }
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
- .config_props = config_output,
- .request_frame = request_frame,
+ .config_props = lut2_config_output,
},
{ NULL }
};
-AVFILTER_DEFINE_CLASS(lut2);
+#define lut2_options options
+
+FRAMESYNC_DEFINE_CLASS(lut2, LUT2Context, fs);
AVFilter ff_vf_lut2 = {
.name = "lut2",
.description = NULL_IF_CONFIG_SMALL("Compute and apply a lookup table from two video inputs."),
+ .preinit = lut2_framesync_preinit,
.priv_size = sizeof(LUT2Context),
.priv_class = &lut2_class,
.uninit = uninit,
.query_formats = query_formats,
+ .activate = activate,
.inputs = inputs,
.outputs = outputs,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
};
+
+#if CONFIG_TLUT2_FILTER
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ LUT2Context *s = ctx->priv;
+
+ s->tlut2 = !strcmp(ctx->filter->name, "tlut2");
+
+ return 0;
+}
+
+static int tlut2_filter_frame(AVFilterLink *inlink, AVFrame *frame)
+{
+ LUT2Context *s = inlink->dst->priv;
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+
+ if (s->prev_frame) {
+ AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&s->prev_frame);
+ s->prev_frame = frame;
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, frame);
+ s->lut2(s, out, frame, s->prev_frame);
+ av_frame_free(&s->prev_frame);
+ s->prev_frame = frame;
+ return ff_filter_frame(outlink, out);
+ }
+ s->prev_frame = frame;
+ return 0;
+}
+
+#define tlut2_options options
+
+AVFILTER_DEFINE_CLASS(tlut2);
+
+static const AVFilterPad tlut2_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = tlut2_filter_frame,
+ .config_props = config_inputx,
+ },
+ { NULL }
+};
+
+static const AVFilterPad tlut2_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_tlut2 = {
+ .name = "tlut2",
+ .description = NULL_IF_CONFIG_SMALL("Compute and apply a lookup table from two successive frames."),
+ .priv_size = sizeof(LUT2Context),
+ .priv_class = &tlut2_class,
+ .query_formats = query_formats,
+ .init = init,
+ .uninit = uninit,
+ .inputs = tlut2_inputs,
+ .outputs = tlut2_outputs,
+};
+
+#endif