2 * Copyright (c) 2016 Paul B Mahol
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "libavutil/attributes.h"
22 #include "libavutil/common.h"
23 #include "libavutil/eval.h"
24 #include "libavutil/opt.h"
25 #include "libavutil/pixdesc.h"
27 #include "drawutils.h"
31 #include "framesync.h"
33 static const char *const var_names[] = {
34 "w", ///< width of the input video
35 "h", ///< height of the input video
36 "x", ///< input value for the pixel from input #1
37 "y", ///< input value for the pixel from input #2
49 typedef struct LUT2Context {
52 char *comp_expr_str[4];
55 double var_values[VAR_VARS_NB];
56 uint16_t *lut[4]; ///< lookup table for each component
57 int width[4], height[4];
59 int depth, depthx, depthy;
61 void (*lut2)(struct LUT2Context *s, AVFrame *dst, AVFrame *srcx, AVFrame *srcy);
66 #define OFFSET(x) offsetof(LUT2Context, x)
67 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
69 static const AVOption lut2_options[] = {
70 { "c0", "set component #0 expression", OFFSET(comp_expr_str[0]), AV_OPT_TYPE_STRING, { .str = "x" }, .flags = FLAGS },
71 { "c1", "set component #1 expression", OFFSET(comp_expr_str[1]), AV_OPT_TYPE_STRING, { .str = "x" }, .flags = FLAGS },
72 { "c2", "set component #2 expression", OFFSET(comp_expr_str[2]), AV_OPT_TYPE_STRING, { .str = "x" }, .flags = FLAGS },
73 { "c3", "set component #3 expression", OFFSET(comp_expr_str[3]), AV_OPT_TYPE_STRING, { .str = "x" }, .flags = FLAGS },
77 static av_cold void uninit(AVFilterContext *ctx)
79 LUT2Context *s = ctx->priv;
82 for (i = 0; i < 4; i++) {
83 av_expr_free(s->comp_expr[i]);
84 s->comp_expr[i] = NULL;
85 av_freep(&s->comp_expr_str[i]);
90 static int query_formats(AVFilterContext *ctx)
92 static const enum AVPixelFormat pix_fmts[] = {
93 AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P,
94 AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P,
95 AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUV420P,
96 AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P,
97 AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
98 AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9,
99 AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
100 AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV440P12,
101 AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA444P9,
102 AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA444P10,
103 AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10,
105 AV_PIX_FMT_GBRAP, AV_PIX_FMT_GBRAP12,
110 return ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
113 static int config_inputx(AVFilterLink *inlink)
115 AVFilterContext *ctx = inlink->dst;
116 LUT2Context *s = ctx->priv;
117 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
118 int hsub = desc->log2_chroma_w;
119 int vsub = desc->log2_chroma_h;
121 s->nb_planes = av_pix_fmt_count_planes(inlink->format);
122 s->height[1] = s->height[2] = AV_CEIL_RSHIFT(inlink->h, vsub);
123 s->height[0] = s->height[3] = inlink->h;
124 s->width[1] = s->width[2] = AV_CEIL_RSHIFT(inlink->w, hsub);
125 s->width[0] = s->width[3] = inlink->w;
127 s->var_values[VAR_W] = inlink->w;
128 s->var_values[VAR_H] = inlink->h;
129 s->depthx = desc->comp[0].depth;
134 static int config_inputy(AVFilterLink *inlink)
136 AVFilterContext *ctx = inlink->dst;
137 LUT2Context *s = ctx->priv;
138 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
140 s->depthy = desc->comp[0].depth;
145 static void lut2_8bit(struct LUT2Context *s, AVFrame *out, AVFrame *srcx, AVFrame *srcy)
149 for (p = 0; p < s->nb_planes; p++) {
150 const uint16_t *lut = s->lut[p];
151 const uint8_t *srcxx, *srcyy;
155 srcxx = srcx->data[p];
156 srcyy = srcy->data[p];
158 for (y = 0; y < s->height[p]; y++) {
159 for (x = 0; x < s->width[p]; x++) {
160 dst[x] = lut[(srcyy[x] << s->depthx) | srcxx[x]];
163 dst += out->linesize[p];
164 srcxx += srcx->linesize[p];
165 srcyy += srcy->linesize[p];
170 static void lut2_16bit(struct LUT2Context *s, AVFrame *out, AVFrame *srcx, AVFrame *srcy)
174 for (p = 0; p < s->nb_planes; p++) {
175 const uint16_t *lut = s->lut[p];
176 const uint16_t *srcxx, *srcyy;
179 dst = (uint16_t *)out->data[p];
180 srcxx = (uint16_t *)srcx->data[p];
181 srcyy = (uint16_t *)srcy->data[p];
183 for (y = 0; y < s->height[p]; y++) {
184 for (x = 0; x < s->width[p]; x++) {
185 dst[x] = lut[(srcyy[x] << s->depthx) | srcxx[x]];
188 dst += out->linesize[p] / 2;
189 srcxx += srcx->linesize[p] / 2;
190 srcyy += srcy->linesize[p] / 2;
195 static int process_frame(FFFrameSync *fs)
197 AVFilterContext *ctx = fs->parent;
198 LUT2Context *s = fs->opaque;
199 AVFilterLink *outlink = ctx->outputs[0];
200 AVFrame *out, *srcx, *srcy;
203 if ((ret = ff_framesync_get_frame(&s->fs, 0, &srcx, 0)) < 0 ||
204 (ret = ff_framesync_get_frame(&s->fs, 1, &srcy, 0)) < 0)
207 if (ctx->is_disabled) {
208 out = av_frame_clone(srcx);
210 return AVERROR(ENOMEM);
212 out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
214 return AVERROR(ENOMEM);
215 av_frame_copy_props(out, srcx);
217 s->lut2(s, out, srcx, srcy);
220 out->pts = av_rescale_q(s->fs.pts, s->fs.time_base, outlink->time_base);
222 return ff_filter_frame(outlink, out);
225 static int config_output(AVFilterLink *outlink)
227 AVFilterContext *ctx = outlink->src;
228 LUT2Context *s = ctx->priv;
229 AVFilterLink *srcx = ctx->inputs[0];
230 AVFilterLink *srcy = ctx->inputs[1];
234 s->depth = s->depthx + s->depthy;
236 if (srcx->format != srcy->format) {
237 av_log(ctx, AV_LOG_ERROR, "inputs must be of same pixel format\n");
238 return AVERROR(EINVAL);
240 if (srcx->w != srcy->w ||
241 srcx->h != srcy->h ||
242 srcx->sample_aspect_ratio.num != srcy->sample_aspect_ratio.num ||
243 srcx->sample_aspect_ratio.den != srcy->sample_aspect_ratio.den) {
244 av_log(ctx, AV_LOG_ERROR, "First input link %s parameters "
245 "(size %dx%d, SAR %d:%d) do not match the corresponding "
246 "second input link %s parameters (%dx%d, SAR %d:%d)\n",
247 ctx->input_pads[0].name, srcx->w, srcx->h,
248 srcx->sample_aspect_ratio.num,
249 srcx->sample_aspect_ratio.den,
250 ctx->input_pads[1].name,
252 srcy->sample_aspect_ratio.num,
253 srcy->sample_aspect_ratio.den);
254 return AVERROR(EINVAL);
257 outlink->w = srcx->w;
258 outlink->h = srcx->h;
259 outlink->time_base = srcx->time_base;
260 outlink->sample_aspect_ratio = srcx->sample_aspect_ratio;
261 outlink->frame_rate = srcx->frame_rate;
263 if ((ret = ff_framesync_init(&s->fs, ctx, 2)) < 0)
267 in[0].time_base = srcx->time_base;
268 in[1].time_base = srcy->time_base;
270 in[0].before = EXT_STOP;
271 in[0].after = EXT_INFINITY;
273 in[1].before = EXT_STOP;
274 in[1].after = EXT_INFINITY;
276 s->fs.on_event = process_frame;
278 s->lut2 = s->depth > 16 ? lut2_16bit : lut2_8bit;
280 for (p = 0; p < s->nb_planes; p++) {
281 s->lut[p] = av_malloc_array(1 << s->depth, sizeof(uint16_t));
283 return AVERROR(ENOMEM);
286 for (p = 0; p < s->nb_planes; p++) {
290 /* create the parsed expression */
291 av_expr_free(s->comp_expr[p]);
292 s->comp_expr[p] = NULL;
293 ret = av_expr_parse(&s->comp_expr[p], s->comp_expr_str[p],
294 var_names, NULL, NULL, NULL, NULL, 0, ctx);
296 av_log(ctx, AV_LOG_ERROR,
297 "Error when parsing the expression '%s' for the component %d.\n",
298 s->comp_expr_str[p], p);
299 return AVERROR(EINVAL);
302 /* compute the lut */
303 for (y = 0; y < (1 << s->depthx); y++) {
304 s->var_values[VAR_Y] = y;
305 for (x = 0; x < (1 << s->depthx); x++) {
306 s->var_values[VAR_X] = x;
307 res = av_expr_eval(s->comp_expr[p], s->var_values, s);
309 av_log(ctx, AV_LOG_ERROR,
310 "Error when evaluating the expression '%s' for the values %d and %d for the component %d.\n",
311 s->comp_expr_str[p], x, y, p);
312 return AVERROR(EINVAL);
315 s->lut[p][(y << s->depthx) + x] = res;
320 return ff_framesync_configure(&s->fs);
323 static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
325 LUT2Context *s = inlink->dst->priv;
326 return ff_framesync_filter_frame(&s->fs, inlink, buf);
329 static int request_frame(AVFilterLink *outlink)
331 LUT2Context *s = outlink->src->priv;
332 return ff_framesync_request_frame(&s->fs, outlink);
335 static const AVFilterPad inputs[] = {
338 .type = AVMEDIA_TYPE_VIDEO,
339 .filter_frame = filter_frame,
340 .config_props = config_inputx,
344 .type = AVMEDIA_TYPE_VIDEO,
345 .filter_frame = filter_frame,
346 .config_props = config_inputy,
351 static const AVFilterPad outputs[] = {
354 .type = AVMEDIA_TYPE_VIDEO,
355 .config_props = config_output,
356 .request_frame = request_frame,
361 AVFILTER_DEFINE_CLASS(lut2);
363 AVFilter ff_vf_lut2 = {
365 .description = NULL_IF_CONFIG_SMALL("Compute and apply a lookup table from two video inputs."),
366 .priv_size = sizeof(LUT2Context),
367 .priv_class = &lut2_class,
369 .query_formats = query_formats,
372 .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,