2 * Copyright (c) 2016 Paul B Mahol
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "libavutil/attributes.h"
22 #include "libavutil/common.h"
23 #include "libavutil/eval.h"
24 #include "libavutil/opt.h"
25 #include "libavutil/pixdesc.h"
27 #include "drawutils.h"
31 #include "framesync2.h"
33 static const char *const var_names[] = {
34 "w", ///< width of the input video
35 "h", ///< height of the input video
36 "x", ///< input value for the pixel from input #1
37 "y", ///< input value for the pixel from input #2
38 "bdx", ///< input #1 video bitdepth
39 "bdy", ///< input #2 video bitdepth
53 typedef struct LUT2Context {
56 char *comp_expr_str[4];
59 double var_values[VAR_VARS_NB];
60 uint16_t *lut[4]; ///< lookup table for each component
61 int width[4], height[4];
63 int depth, depthx, depthy;
65 AVFrame *prev_frame; /* only used with tlut2 */
67 void (*lut2)(struct LUT2Context *s, AVFrame *dst, AVFrame *srcx, AVFrame *srcy);
72 #define OFFSET(x) offsetof(LUT2Context, x)
73 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
75 static const AVOption options[] = {
76 { "c0", "set component #0 expression", OFFSET(comp_expr_str[0]), AV_OPT_TYPE_STRING, { .str = "x" }, .flags = FLAGS },
77 { "c1", "set component #1 expression", OFFSET(comp_expr_str[1]), AV_OPT_TYPE_STRING, { .str = "x" }, .flags = FLAGS },
78 { "c2", "set component #2 expression", OFFSET(comp_expr_str[2]), AV_OPT_TYPE_STRING, { .str = "x" }, .flags = FLAGS },
79 { "c3", "set component #3 expression", OFFSET(comp_expr_str[3]), AV_OPT_TYPE_STRING, { .str = "x" }, .flags = FLAGS },
83 static av_cold void uninit(AVFilterContext *ctx)
85 LUT2Context *s = ctx->priv;
88 av_frame_free(&s->prev_frame);
90 for (i = 0; i < 4; i++) {
91 av_expr_free(s->comp_expr[i]);
92 s->comp_expr[i] = NULL;
93 av_freep(&s->comp_expr_str[i]);
98 static int query_formats(AVFilterContext *ctx)
100 static const enum AVPixelFormat pix_fmts[] = {
101 AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P,
102 AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P,
103 AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUV420P,
104 AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P,
105 AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
106 AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9,
107 AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
108 AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV440P12,
109 AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA444P9,
110 AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA444P10,
111 AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10,
113 AV_PIX_FMT_GBRAP, AV_PIX_FMT_GBRAP12,
114 AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY10, AV_PIX_FMT_GRAY12,
118 return ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
121 static int config_inputx(AVFilterLink *inlink)
123 AVFilterContext *ctx = inlink->dst;
124 LUT2Context *s = ctx->priv;
125 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
126 int hsub = desc->log2_chroma_w;
127 int vsub = desc->log2_chroma_h;
129 s->nb_planes = av_pix_fmt_count_planes(inlink->format);
130 s->height[1] = s->height[2] = AV_CEIL_RSHIFT(inlink->h, vsub);
131 s->height[0] = s->height[3] = inlink->h;
132 s->width[1] = s->width[2] = AV_CEIL_RSHIFT(inlink->w, hsub);
133 s->width[0] = s->width[3] = inlink->w;
135 s->var_values[VAR_W] = inlink->w;
136 s->var_values[VAR_H] = inlink->h;
137 s->depthx = desc->comp[0].depth;
138 s->var_values[VAR_BITDEPTHX] = s->depthx;
141 s->depthy = desc->comp[0].depth;
142 s->var_values[VAR_BITDEPTHY] = s->depthy;
148 static int config_inputy(AVFilterLink *inlink)
150 AVFilterContext *ctx = inlink->dst;
151 LUT2Context *s = ctx->priv;
152 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
154 s->depthy = desc->comp[0].depth;
155 s->var_values[VAR_BITDEPTHY] = s->depthy;
160 static void lut2_8bit(struct LUT2Context *s, AVFrame *out, AVFrame *srcx, AVFrame *srcy)
164 for (p = 0; p < s->nb_planes; p++) {
165 const uint16_t *lut = s->lut[p];
166 const uint8_t *srcxx, *srcyy;
170 srcxx = srcx->data[p];
171 srcyy = srcy->data[p];
173 for (y = 0; y < s->height[p]; y++) {
174 for (x = 0; x < s->width[p]; x++) {
175 dst[x] = lut[(srcyy[x] << s->depthx) | srcxx[x]];
178 dst += out->linesize[p];
179 srcxx += srcx->linesize[p];
180 srcyy += srcy->linesize[p];
185 static void lut2_16bit(struct LUT2Context *s, AVFrame *out, AVFrame *srcx, AVFrame *srcy)
189 for (p = 0; p < s->nb_planes; p++) {
190 const uint16_t *lut = s->lut[p];
191 const uint16_t *srcxx, *srcyy;
194 dst = (uint16_t *)out->data[p];
195 srcxx = (uint16_t *)srcx->data[p];
196 srcyy = (uint16_t *)srcy->data[p];
198 for (y = 0; y < s->height[p]; y++) {
199 for (x = 0; x < s->width[p]; x++) {
200 dst[x] = lut[(srcyy[x] << s->depthx) | srcxx[x]];
203 dst += out->linesize[p] / 2;
204 srcxx += srcx->linesize[p] / 2;
205 srcyy += srcy->linesize[p] / 2;
210 static int process_frame(FFFrameSync *fs)
212 AVFilterContext *ctx = fs->parent;
213 LUT2Context *s = fs->opaque;
214 AVFilterLink *outlink = ctx->outputs[0];
215 AVFrame *out, *srcx, *srcy;
218 if ((ret = ff_framesync2_get_frame(&s->fs, 0, &srcx, 0)) < 0 ||
219 (ret = ff_framesync2_get_frame(&s->fs, 1, &srcy, 0)) < 0)
222 if (ctx->is_disabled) {
223 out = av_frame_clone(srcx);
225 return AVERROR(ENOMEM);
227 out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
229 return AVERROR(ENOMEM);
230 av_frame_copy_props(out, srcx);
232 s->lut2(s, out, srcx, srcy);
235 out->pts = av_rescale_q(s->fs.pts, s->fs.time_base, outlink->time_base);
237 return ff_filter_frame(outlink, out);
240 static int config_output(AVFilterLink *outlink)
242 AVFilterContext *ctx = outlink->src;
243 LUT2Context *s = ctx->priv;
246 s->depth = s->depthx + s->depthy;
248 s->lut2 = s->depth > 16 ? lut2_16bit : lut2_8bit;
250 for (p = 0; p < s->nb_planes; p++) {
251 s->lut[p] = av_malloc_array(1 << s->depth, sizeof(uint16_t));
253 return AVERROR(ENOMEM);
256 for (p = 0; p < s->nb_planes; p++) {
260 /* create the parsed expression */
261 av_expr_free(s->comp_expr[p]);
262 s->comp_expr[p] = NULL;
263 ret = av_expr_parse(&s->comp_expr[p], s->comp_expr_str[p],
264 var_names, NULL, NULL, NULL, NULL, 0, ctx);
266 av_log(ctx, AV_LOG_ERROR,
267 "Error when parsing the expression '%s' for the component %d.\n",
268 s->comp_expr_str[p], p);
269 return AVERROR(EINVAL);
272 /* compute the lut */
273 for (y = 0; y < (1 << s->depthx); y++) {
274 s->var_values[VAR_Y] = y;
275 for (x = 0; x < (1 << s->depthx); x++) {
276 s->var_values[VAR_X] = x;
277 res = av_expr_eval(s->comp_expr[p], s->var_values, s);
279 av_log(ctx, AV_LOG_ERROR,
280 "Error when evaluating the expression '%s' for the values %d and %d for the component %d.\n",
281 s->comp_expr_str[p], x, y, p);
282 return AVERROR(EINVAL);
285 s->lut[p][(y << s->depthx) + x] = res;
293 static int lut2_config_output(AVFilterLink *outlink)
295 AVFilterContext *ctx = outlink->src;
296 LUT2Context *s = ctx->priv;
297 AVFilterLink *srcx = ctx->inputs[0];
298 AVFilterLink *srcy = ctx->inputs[1];
302 if (srcx->format != srcy->format) {
303 av_log(ctx, AV_LOG_ERROR, "inputs must be of same pixel format\n");
304 return AVERROR(EINVAL);
306 if (srcx->w != srcy->w ||
307 srcx->h != srcy->h ||
308 srcx->sample_aspect_ratio.num != srcy->sample_aspect_ratio.num ||
309 srcx->sample_aspect_ratio.den != srcy->sample_aspect_ratio.den) {
310 av_log(ctx, AV_LOG_ERROR, "First input link %s parameters "
311 "(size %dx%d, SAR %d:%d) do not match the corresponding "
312 "second input link %s parameters (%dx%d, SAR %d:%d)\n",
313 ctx->input_pads[0].name, srcx->w, srcx->h,
314 srcx->sample_aspect_ratio.num,
315 srcx->sample_aspect_ratio.den,
316 ctx->input_pads[1].name,
318 srcy->sample_aspect_ratio.num,
319 srcy->sample_aspect_ratio.den);
320 return AVERROR(EINVAL);
323 outlink->w = srcx->w;
324 outlink->h = srcx->h;
325 outlink->time_base = srcx->time_base;
326 outlink->sample_aspect_ratio = srcx->sample_aspect_ratio;
327 outlink->frame_rate = srcx->frame_rate;
329 if ((ret = ff_framesync2_init(&s->fs, ctx, 2)) < 0)
333 in[0].time_base = srcx->time_base;
334 in[1].time_base = srcy->time_base;
336 in[0].before = EXT_STOP;
337 in[0].after = EXT_INFINITY;
339 in[1].before = EXT_STOP;
340 in[1].after = EXT_INFINITY;
342 s->fs.on_event = process_frame;
344 if ((ret = config_output(outlink)) < 0)
347 return ff_framesync2_configure(&s->fs);
350 static int activate(AVFilterContext *ctx)
352 LUT2Context *s = ctx->priv;
353 return ff_framesync2_activate(&s->fs);
356 static const AVFilterPad inputs[] = {
359 .type = AVMEDIA_TYPE_VIDEO,
360 .config_props = config_inputx,
364 .type = AVMEDIA_TYPE_VIDEO,
365 .config_props = config_inputy,
370 static const AVFilterPad outputs[] = {
373 .type = AVMEDIA_TYPE_VIDEO,
374 .config_props = lut2_config_output,
379 #define lut2_options options
381 AVFILTER_DEFINE_CLASS(lut2);
383 AVFilter ff_vf_lut2 = {
385 .description = NULL_IF_CONFIG_SMALL("Compute and apply a lookup table from two video inputs."),
386 .priv_size = sizeof(LUT2Context),
387 .priv_class = &lut2_class,
389 .query_formats = query_formats,
390 .activate = activate,
393 .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
396 #if CONFIG_TLUT2_FILTER
398 static av_cold int init(AVFilterContext *ctx)
400 LUT2Context *s = ctx->priv;
402 s->tlut2 = !strcmp(ctx->filter->name, "tlut2");
407 static int tlut2_filter_frame(AVFilterLink *inlink, AVFrame *frame)
409 LUT2Context *s = inlink->dst->priv;
410 AVFilterLink *outlink = inlink->dst->outputs[0];
413 AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
415 av_frame_free(&s->prev_frame);
416 s->prev_frame = frame;
417 return AVERROR(ENOMEM);
419 av_frame_copy_props(out, frame);
420 s->lut2(s, out, frame, s->prev_frame);
421 av_frame_free(&s->prev_frame);
422 s->prev_frame = frame;
423 return ff_filter_frame(outlink, out);
425 s->prev_frame = frame;
429 #define tlut2_options options
431 AVFILTER_DEFINE_CLASS(tlut2);
433 static const AVFilterPad tlut2_inputs[] = {
436 .type = AVMEDIA_TYPE_VIDEO,
437 .filter_frame = tlut2_filter_frame,
438 .config_props = config_inputx,
443 static const AVFilterPad tlut2_outputs[] = {
446 .type = AVMEDIA_TYPE_VIDEO,
447 .config_props = config_output,
452 AVFilter ff_vf_tlut2 = {
454 .description = NULL_IF_CONFIG_SMALL("Compute and apply a lookup table from two successive frames."),
455 .priv_size = sizeof(LUT2Context),
456 .priv_class = &tlut2_class,
457 .query_formats = query_formats,
460 .inputs = tlut2_inputs,
461 .outputs = tlut2_outputs,