2 * Copyright (c) 2016 Paul B Mahol
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "libavutil/attributes.h"
22 #include "libavutil/common.h"
23 #include "libavutil/eval.h"
24 #include "libavutil/opt.h"
25 #include "libavutil/pixdesc.h"
27 #include "drawutils.h"
31 #include "framesync.h"
33 static const char *const var_names[] = {
34 "w", ///< width of the input video
35 "h", ///< height of the input video
36 "x", ///< input value for the pixel from input #1
37 "y", ///< input value for the pixel from input #2
38 "bdx", ///< input #1 video bitdepth
39 "bdy", ///< input #2 video bitdepth
53 typedef struct LUT2Context {
57 char *comp_expr_str[4];
60 double var_values[VAR_VARS_NB];
61 uint16_t *lut[4]; ///< lookup table for each component
62 int width[4], height[4];
64 int depth, depthx, depthy;
66 AVFrame *prev_frame; /* only used with tlut2 */
68 void (*lut2)(struct LUT2Context *s, AVFrame *dst, AVFrame *srcx, AVFrame *srcy);
72 #define OFFSET(x) offsetof(LUT2Context, x)
73 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
75 static const AVOption options[] = {
76 { "c0", "set component #0 expression", OFFSET(comp_expr_str[0]), AV_OPT_TYPE_STRING, { .str = "x" }, .flags = FLAGS },
77 { "c1", "set component #1 expression", OFFSET(comp_expr_str[1]), AV_OPT_TYPE_STRING, { .str = "x" }, .flags = FLAGS },
78 { "c2", "set component #2 expression", OFFSET(comp_expr_str[2]), AV_OPT_TYPE_STRING, { .str = "x" }, .flags = FLAGS },
79 { "c3", "set component #3 expression", OFFSET(comp_expr_str[3]), AV_OPT_TYPE_STRING, { .str = "x" }, .flags = FLAGS },
83 static av_cold void uninit(AVFilterContext *ctx)
85 LUT2Context *s = ctx->priv;
88 ff_framesync_uninit(&s->fs);
89 av_frame_free(&s->prev_frame);
91 for (i = 0; i < 4; i++) {
92 av_expr_free(s->comp_expr[i]);
93 s->comp_expr[i] = NULL;
94 av_freep(&s->comp_expr_str[i]);
99 static int query_formats(AVFilterContext *ctx)
101 static const enum AVPixelFormat pix_fmts[] = {
102 AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P,
103 AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P,
104 AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUV420P,
105 AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P,
106 AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
107 AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9,
108 AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
109 AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV440P12,
110 AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA444P9,
111 AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA444P10,
112 AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10,
114 AV_PIX_FMT_GBRAP, AV_PIX_FMT_GBRAP10, AV_PIX_FMT_GBRAP12,
115 AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY9, AV_PIX_FMT_GRAY10, AV_PIX_FMT_GRAY12,
119 return ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
122 static int config_inputx(AVFilterLink *inlink)
124 AVFilterContext *ctx = inlink->dst;
125 LUT2Context *s = ctx->priv;
126 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
127 int hsub = desc->log2_chroma_w;
128 int vsub = desc->log2_chroma_h;
130 s->nb_planes = av_pix_fmt_count_planes(inlink->format);
131 s->height[1] = s->height[2] = AV_CEIL_RSHIFT(inlink->h, vsub);
132 s->height[0] = s->height[3] = inlink->h;
133 s->width[1] = s->width[2] = AV_CEIL_RSHIFT(inlink->w, hsub);
134 s->width[0] = s->width[3] = inlink->w;
136 s->var_values[VAR_W] = inlink->w;
137 s->var_values[VAR_H] = inlink->h;
138 s->depthx = desc->comp[0].depth;
139 s->var_values[VAR_BITDEPTHX] = s->depthx;
142 s->depthy = desc->comp[0].depth;
143 s->var_values[VAR_BITDEPTHY] = s->depthy;
149 static int config_inputy(AVFilterLink *inlink)
151 AVFilterContext *ctx = inlink->dst;
152 LUT2Context *s = ctx->priv;
153 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
155 s->depthy = desc->comp[0].depth;
156 s->var_values[VAR_BITDEPTHY] = s->depthy;
161 static void lut2_8bit(struct LUT2Context *s, AVFrame *out, AVFrame *srcx, AVFrame *srcy)
165 for (p = 0; p < s->nb_planes; p++) {
166 const uint16_t *lut = s->lut[p];
167 const uint8_t *srcxx, *srcyy;
171 srcxx = srcx->data[p];
172 srcyy = srcy->data[p];
174 for (y = 0; y < s->height[p]; y++) {
175 for (x = 0; x < s->width[p]; x++) {
176 dst[x] = lut[(srcyy[x] << s->depthx) | srcxx[x]];
179 dst += out->linesize[p];
180 srcxx += srcx->linesize[p];
181 srcyy += srcy->linesize[p];
186 static void lut2_16bit(struct LUT2Context *s, AVFrame *out, AVFrame *srcx, AVFrame *srcy)
190 for (p = 0; p < s->nb_planes; p++) {
191 const uint16_t *lut = s->lut[p];
192 const uint16_t *srcxx, *srcyy;
195 dst = (uint16_t *)out->data[p];
196 srcxx = (uint16_t *)srcx->data[p];
197 srcyy = (uint16_t *)srcy->data[p];
199 for (y = 0; y < s->height[p]; y++) {
200 for (x = 0; x < s->width[p]; x++) {
201 dst[x] = lut[(srcyy[x] << s->depthx) | srcxx[x]];
204 dst += out->linesize[p] / 2;
205 srcxx += srcx->linesize[p] / 2;
206 srcyy += srcy->linesize[p] / 2;
211 static int process_frame(FFFrameSync *fs)
213 AVFilterContext *ctx = fs->parent;
214 LUT2Context *s = fs->opaque;
215 AVFilterLink *outlink = ctx->outputs[0];
216 AVFrame *out, *srcx = NULL, *srcy = NULL;
219 if ((ret = ff_framesync_get_frame(&s->fs, 0, &srcx, 0)) < 0 ||
220 (ret = ff_framesync_get_frame(&s->fs, 1, &srcy, 0)) < 0)
223 if (ctx->is_disabled || !srcy) {
224 out = av_frame_clone(srcx);
226 return AVERROR(ENOMEM);
228 out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
230 return AVERROR(ENOMEM);
231 av_frame_copy_props(out, srcx);
233 s->lut2(s, out, srcx, srcy);
236 out->pts = av_rescale_q(s->fs.pts, s->fs.time_base, outlink->time_base);
238 return ff_filter_frame(outlink, out);
241 static int config_output(AVFilterLink *outlink)
243 AVFilterContext *ctx = outlink->src;
244 LUT2Context *s = ctx->priv;
247 s->depth = s->depthx + s->depthy;
249 s->lut2 = s->depth > 16 ? lut2_16bit : lut2_8bit;
251 for (p = 0; p < s->nb_planes; p++) {
252 s->lut[p] = av_malloc_array(1 << s->depth, sizeof(uint16_t));
254 return AVERROR(ENOMEM);
257 for (p = 0; p < s->nb_planes; p++) {
261 /* create the parsed expression */
262 av_expr_free(s->comp_expr[p]);
263 s->comp_expr[p] = NULL;
264 ret = av_expr_parse(&s->comp_expr[p], s->comp_expr_str[p],
265 var_names, NULL, NULL, NULL, NULL, 0, ctx);
267 av_log(ctx, AV_LOG_ERROR,
268 "Error when parsing the expression '%s' for the component %d.\n",
269 s->comp_expr_str[p], p);
270 return AVERROR(EINVAL);
273 /* compute the lut */
274 for (y = 0; y < (1 << s->depthx); y++) {
275 s->var_values[VAR_Y] = y;
276 for (x = 0; x < (1 << s->depthx); x++) {
277 s->var_values[VAR_X] = x;
278 res = av_expr_eval(s->comp_expr[p], s->var_values, s);
280 av_log(ctx, AV_LOG_ERROR,
281 "Error when evaluating the expression '%s' for the values %d and %d for the component %d.\n",
282 s->comp_expr_str[p], x, y, p);
283 return AVERROR(EINVAL);
286 s->lut[p][(y << s->depthx) + x] = res;
294 static int lut2_config_output(AVFilterLink *outlink)
296 AVFilterContext *ctx = outlink->src;
297 LUT2Context *s = ctx->priv;
298 AVFilterLink *srcx = ctx->inputs[0];
299 AVFilterLink *srcy = ctx->inputs[1];
303 if (srcx->format != srcy->format) {
304 av_log(ctx, AV_LOG_ERROR, "inputs must be of same pixel format\n");
305 return AVERROR(EINVAL);
307 if (srcx->w != srcy->w || srcx->h != srcy->h) {
308 av_log(ctx, AV_LOG_ERROR, "First input link %s parameters "
309 "(size %dx%d) do not match the corresponding "
310 "second input link %s parameters (size %dx%d)\n",
311 ctx->input_pads[0].name, srcx->w, srcx->h,
312 ctx->input_pads[1].name,
314 return AVERROR(EINVAL);
317 outlink->w = srcx->w;
318 outlink->h = srcx->h;
319 outlink->time_base = srcx->time_base;
320 outlink->sample_aspect_ratio = srcx->sample_aspect_ratio;
321 outlink->frame_rate = srcx->frame_rate;
323 if ((ret = ff_framesync_init(&s->fs, ctx, 2)) < 0)
327 in[0].time_base = srcx->time_base;
328 in[1].time_base = srcy->time_base;
330 in[0].before = EXT_STOP;
331 in[0].after = EXT_INFINITY;
333 in[1].before = EXT_STOP;
334 in[1].after = EXT_INFINITY;
336 s->fs.on_event = process_frame;
338 if ((ret = config_output(outlink)) < 0)
341 return ff_framesync_configure(&s->fs);
344 static int activate(AVFilterContext *ctx)
346 LUT2Context *s = ctx->priv;
347 return ff_framesync_activate(&s->fs);
350 static const AVFilterPad inputs[] = {
353 .type = AVMEDIA_TYPE_VIDEO,
354 .config_props = config_inputx,
358 .type = AVMEDIA_TYPE_VIDEO,
359 .config_props = config_inputy,
364 static const AVFilterPad outputs[] = {
367 .type = AVMEDIA_TYPE_VIDEO,
368 .config_props = lut2_config_output,
373 #define lut2_options options
375 FRAMESYNC_DEFINE_CLASS(lut2, LUT2Context, fs);
377 AVFilter ff_vf_lut2 = {
379 .description = NULL_IF_CONFIG_SMALL("Compute and apply a lookup table from two video inputs."),
380 .preinit = lut2_framesync_preinit,
381 .priv_size = sizeof(LUT2Context),
382 .priv_class = &lut2_class,
384 .query_formats = query_formats,
385 .activate = activate,
388 .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
391 #if CONFIG_TLUT2_FILTER
393 static av_cold int init(AVFilterContext *ctx)
395 LUT2Context *s = ctx->priv;
397 s->tlut2 = !strcmp(ctx->filter->name, "tlut2");
402 static int tlut2_filter_frame(AVFilterLink *inlink, AVFrame *frame)
404 AVFilterContext *ctx = inlink->dst;
405 LUT2Context *s = ctx->priv;
406 AVFilterLink *outlink = ctx->outputs[0];
411 if (ctx->is_disabled) {
412 out = av_frame_clone(frame);
414 out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
416 av_frame_free(&s->prev_frame);
417 s->prev_frame = frame;
418 return AVERROR(ENOMEM);
421 av_frame_copy_props(out, frame);
422 s->lut2(s, out, frame, s->prev_frame);
424 av_frame_free(&s->prev_frame);
425 s->prev_frame = frame;
426 return ff_filter_frame(outlink, out);
428 s->prev_frame = frame;
432 #define tlut2_options options
434 AVFILTER_DEFINE_CLASS(tlut2);
436 static const AVFilterPad tlut2_inputs[] = {
439 .type = AVMEDIA_TYPE_VIDEO,
440 .filter_frame = tlut2_filter_frame,
441 .config_props = config_inputx,
446 static const AVFilterPad tlut2_outputs[] = {
449 .type = AVMEDIA_TYPE_VIDEO,
450 .config_props = config_output,
455 AVFilter ff_vf_tlut2 = {
457 .description = NULL_IF_CONFIG_SMALL("Compute and apply a lookup table from two successive frames."),
458 .priv_size = sizeof(LUT2Context),
459 .priv_class = &tlut2_class,
460 .query_formats = query_formats,
463 .inputs = tlut2_inputs,
464 .outputs = tlut2_outputs,
465 .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,