2 * Copyright (c) 2011 Pascal Getreuer
3 * Copyright (c) 2016 Paul B Mahol
5 * Redistribution and use in source and binary forms, with or without modification,
6 * are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following
12 * disclaimer in the documentation and/or other materials provided
13 * with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
18 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
19 * HOLDER BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
23 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
24 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include "libavutil/imgutils.h"
29 #include "libavutil/opt.h"
30 #include "libavutil/pixdesc.h"
36 typedef struct GBlurContext {
57 #define OFFSET(x) offsetof(GBlurContext, x)
58 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
60 static const AVOption gblur_options[] = {
61 { "sigma", "set sigma", OFFSET(sigma), AV_OPT_TYPE_FLOAT, {.dbl=0.5}, 0.0, 1024, FLAGS },
62 { "steps", "set number of steps", OFFSET(steps), AV_OPT_TYPE_INT, {.i64=1}, 1, 6, FLAGS },
63 { "planes", "set planes to filter", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=0xF}, 0, 0xF, FLAGS },
64 { "sigmaV", "set vertical sigma", OFFSET(sigmaV), AV_OPT_TYPE_FLOAT, {.dbl=-1}, -1, 1024, FLAGS },
68 AVFILTER_DEFINE_CLASS(gblur);
70 typedef struct ThreadData {
75 static int filter_horizontally(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
77 GBlurContext *s = ctx->priv;
79 const int height = td->height;
80 const int width = td->width;
81 const int slice_start = (height * jobnr ) / nb_jobs;
82 const int slice_end = (height * (jobnr+1)) / nb_jobs;
83 const float boundaryscale = s->boundaryscale;
84 const int steps = s->steps;
85 const float nu = s->nu;
86 float *buffer = s->buffer;
90 /* Filter horizontally along each row */
91 for (y = slice_start; y < slice_end; y++) {
92 for (step = 0; step < steps; step++) {
93 ptr = buffer + width * y;
94 ptr[0] *= boundaryscale;
96 /* Filter rightwards */
97 for (x = 1; x < width; x++)
98 ptr[x] += nu * ptr[x - 1];
100 ptr[x = width - 1] *= boundaryscale;
102 /* Filter leftwards */
104 ptr[x - 1] += nu * ptr[x];
111 static int filter_vertically(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
113 GBlurContext *s = ctx->priv;
114 ThreadData *td = arg;
115 const int height = td->height;
116 const int width = td->width;
117 const int slice_start = (width * jobnr ) / nb_jobs;
118 const int slice_end = (width * (jobnr+1)) / nb_jobs;
119 const float boundaryscale = s->boundaryscaleV;
120 const int numpixels = width * height;
121 const int steps = s->steps;
122 const float nu = s->nuV;
123 float *buffer = s->buffer;
127 /* Filter vertically along each column */
128 for (x = slice_start; x < slice_end; x++) {
129 for (step = 0; step < steps; step++) {
131 ptr[0] *= boundaryscale;
133 /* Filter downwards */
134 for (i = width; i < numpixels; i += width)
135 ptr[i] += nu * ptr[i - width];
137 ptr[i = numpixels - width] *= boundaryscale;
140 for (; i > 0; i -= width)
141 ptr[i - width] += nu * ptr[i];
149 static int filter_postscale(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
151 GBlurContext *s = ctx->priv;
152 ThreadData *td = arg;
153 const int height = td->height;
154 const int width = td->width;
155 const int64_t numpixels = width * (int64_t)height;
156 const unsigned slice_start = (numpixels * jobnr ) / nb_jobs;
157 const unsigned slice_end = (numpixels * (jobnr+1)) / nb_jobs;
158 const float postscale = s->postscale * s->postscaleV;
159 float *buffer = s->buffer;
162 for (i = slice_start; i < slice_end; i++)
163 buffer[i] *= postscale;
168 static void gaussianiir2d(AVFilterContext *ctx, int plane)
170 GBlurContext *s = ctx->priv;
171 const int width = s->planewidth[plane];
172 const int height = s->planeheight[plane];
173 const int nb_threads = ff_filter_get_nb_threads(ctx);
176 if (s->sigma <= 0 || s->steps < 0)
181 ctx->internal->execute(ctx, filter_horizontally, &td, NULL, FFMIN(height, nb_threads));
182 ctx->internal->execute(ctx, filter_vertically, &td, NULL, FFMIN(width, nb_threads));
183 ctx->internal->execute(ctx, filter_postscale, &td, NULL, FFMIN(width * height, nb_threads));
186 static int query_formats(AVFilterContext *ctx)
188 static const enum AVPixelFormat pix_fmts[] = {
189 AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P,
190 AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P,
191 AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUV420P,
192 AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P,
193 AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
194 AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9,
195 AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
196 AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV440P12,
197 AV_PIX_FMT_YUV420P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV444P14,
198 AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16,
199 AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA444P9,
200 AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA444P10,
201 AV_PIX_FMT_YUVA420P16, AV_PIX_FMT_YUVA422P16, AV_PIX_FMT_YUVA444P16,
202 AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10,
203 AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14, AV_PIX_FMT_GBRP16,
204 AV_PIX_FMT_GBRAP, AV_PIX_FMT_GBRAP10, AV_PIX_FMT_GBRAP12, AV_PIX_FMT_GBRAP16,
205 AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY9, AV_PIX_FMT_GRAY10, AV_PIX_FMT_GRAY12, AV_PIX_FMT_GRAY16,
209 return ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
212 static int config_input(AVFilterLink *inlink)
214 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
215 GBlurContext *s = inlink->dst->priv;
217 s->depth = desc->comp[0].depth;
218 s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
219 s->planewidth[0] = s->planewidth[3] = inlink->w;
220 s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
221 s->planeheight[0] = s->planeheight[3] = inlink->h;
223 s->nb_planes = av_pix_fmt_count_planes(inlink->format);
225 s->buffer = av_malloc_array(inlink->w, inlink->h * sizeof(*s->buffer));
227 return AVERROR(ENOMEM);
230 s->sigmaV = s->sigma;
236 static void set_params(float sigma, int steps, float *postscale, float *boundaryscale, float *nu)
240 lambda = (sigma * sigma) / (2.0 * steps);
241 dnu = (1.0 + 2.0 * lambda - sqrt(1.0 + 4.0 * lambda)) / (2.0 * lambda);
242 *postscale = pow(dnu / lambda, steps);
243 *boundaryscale = 1.0 / (1.0 - dnu);
247 static int filter_frame(AVFilterLink *inlink, AVFrame *in)
249 AVFilterContext *ctx = inlink->dst;
250 GBlurContext *s = ctx->priv;
251 AVFilterLink *outlink = ctx->outputs[0];
255 set_params(s->sigma, s->steps, &s->postscale, &s->boundaryscale, &s->nu);
256 set_params(s->sigmaV, s->steps, &s->postscaleV, &s->boundaryscaleV, &s->nuV);
258 if (av_frame_is_writable(in)) {
261 out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
264 return AVERROR(ENOMEM);
266 av_frame_copy_props(out, in);
269 for (plane = 0; plane < s->nb_planes; plane++) {
270 const int height = s->planeheight[plane];
271 const int width = s->planewidth[plane];
272 float *bptr = s->buffer;
273 const uint8_t *src = in->data[plane];
274 const uint16_t *src16 = (const uint16_t *)in->data[plane];
275 uint8_t *dst = out->data[plane];
276 uint16_t *dst16 = (uint16_t *)out->data[plane];
279 if (!s->sigma || !(s->planes & (1 << plane))) {
281 av_image_copy_plane(out->data[plane], out->linesize[plane],
282 in->data[plane], in->linesize[plane],
283 width * ((s->depth + 7) / 8), height);
288 for (y = 0; y < height; y++) {
289 for (x = 0; x < width; x++) {
293 src += in->linesize[plane];
296 for (y = 0; y < height; y++) {
297 for (x = 0; x < width; x++) {
301 src16 += in->linesize[plane] / 2;
305 gaussianiir2d(ctx, plane);
309 for (y = 0; y < height; y++) {
310 for (x = 0; x < width; x++) {
314 dst += out->linesize[plane];
317 for (y = 0; y < height; y++) {
318 for (x = 0; x < width; x++) {
322 dst16 += out->linesize[plane] / 2;
329 return ff_filter_frame(outlink, out);
332 static av_cold void uninit(AVFilterContext *ctx)
334 GBlurContext *s = ctx->priv;
336 av_freep(&s->buffer);
339 static const AVFilterPad gblur_inputs[] = {
342 .type = AVMEDIA_TYPE_VIDEO,
343 .config_props = config_input,
344 .filter_frame = filter_frame,
349 static const AVFilterPad gblur_outputs[] = {
352 .type = AVMEDIA_TYPE_VIDEO,
357 AVFilter ff_vf_gblur = {
359 .description = NULL_IF_CONFIG_SMALL("Apply Gaussian Blur filter."),
360 .priv_size = sizeof(GBlurContext),
361 .priv_class = &gblur_class,
363 .query_formats = query_formats,
364 .inputs = gblur_inputs,
365 .outputs = gblur_outputs,
366 .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS,