2 * Copyright (c) 2013 Paul B Mahol
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "libavutil/imgutils.h"
22 #include "libavutil/eval.h"
23 #include "libavutil/opt.h"
24 #include "libavutil/pixfmt.h"
26 #include "bufferqueue.h"
29 #include "dualinput.h"
68 static const char *const var_names[] = { "X", "Y", "W", "H", "SW", "SH", "T", "N", "A", "B", "TOP", "BOTTOM", NULL };
69 enum { VAR_X, VAR_Y, VAR_W, VAR_H, VAR_SW, VAR_SH, VAR_T, VAR_N, VAR_A, VAR_B, VAR_TOP, VAR_BOTTOM, VAR_VARS_NB };
71 typedef struct FilterParams {
76 void (*blend)(const uint8_t *top, int top_linesize,
77 const uint8_t *bottom, int bottom_linesize,
78 uint8_t *dst, int dst_linesize,
79 int width, int start, int end,
80 struct FilterParams *param, double *values);
83 typedef struct ThreadData {
84 const AVFrame *top, *bottom;
94 FFDualInputContext dinput;
95 int hsub, vsub; ///< chroma subsampling values
98 enum BlendMode all_mode;
101 FilterParams params[4];
103 AVFrame *prev_frame; /* only used with tblend */
106 #define COMMON_OPTIONS \
107 { "c0_mode", "set component #0 blend mode", OFFSET(params[0].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, "mode"},\
108 { "c1_mode", "set component #1 blend mode", OFFSET(params[1].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, "mode"},\
109 { "c2_mode", "set component #2 blend mode", OFFSET(params[2].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, "mode"},\
110 { "c3_mode", "set component #3 blend mode", OFFSET(params[3].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, "mode"},\
111 { "all_mode", "set blend mode for all components", OFFSET(all_mode), AV_OPT_TYPE_INT, {.i64=-1},-1, BLEND_NB-1, FLAGS, "mode"},\
112 { "addition", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_ADDITION}, 0, 0, FLAGS, "mode" },\
113 { "and", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_AND}, 0, 0, FLAGS, "mode" },\
114 { "average", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_AVERAGE}, 0, 0, FLAGS, "mode" },\
115 { "burn", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_BURN}, 0, 0, FLAGS, "mode" },\
116 { "darken", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DARKEN}, 0, 0, FLAGS, "mode" },\
117 { "difference", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DIFFERENCE}, 0, 0, FLAGS, "mode" },\
118 { "difference128", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DIFFERENCE128}, 0, 0, FLAGS, "mode" },\
119 { "divide", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DIVIDE}, 0, 0, FLAGS, "mode" },\
120 { "dodge", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DODGE}, 0, 0, FLAGS, "mode" },\
121 { "exclusion", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_EXCLUSION}, 0, 0, FLAGS, "mode" },\
122 { "glow", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_GLOW}, 0, 0, FLAGS, "mode" },\
123 { "hardlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_HARDLIGHT}, 0, 0, FLAGS, "mode" },\
124 { "hardmix", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_HARDMIX}, 0, 0, FLAGS, "mode" },\
125 { "lighten", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_LIGHTEN}, 0, 0, FLAGS, "mode" },\
126 { "linearlight","", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_LINEARLIGHT},0, 0, FLAGS, "mode" },\
127 { "multiply", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_MULTIPLY}, 0, 0, FLAGS, "mode" },\
128 { "negation", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_NEGATION}, 0, 0, FLAGS, "mode" },\
129 { "normal", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_NORMAL}, 0, 0, FLAGS, "mode" },\
130 { "or", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_OR}, 0, 0, FLAGS, "mode" },\
131 { "overlay", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_OVERLAY}, 0, 0, FLAGS, "mode" },\
132 { "phoenix", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_PHOENIX}, 0, 0, FLAGS, "mode" },\
133 { "pinlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_PINLIGHT}, 0, 0, FLAGS, "mode" },\
134 { "reflect", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_REFLECT}, 0, 0, FLAGS, "mode" },\
135 { "screen", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_SCREEN}, 0, 0, FLAGS, "mode" },\
136 { "softlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_SOFTLIGHT}, 0, 0, FLAGS, "mode" },\
137 { "subtract", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_SUBTRACT}, 0, 0, FLAGS, "mode" },\
138 { "vividlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_VIVIDLIGHT}, 0, 0, FLAGS, "mode" },\
139 { "xor", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_XOR}, 0, 0, FLAGS, "mode" },\
140 { "c0_expr", "set color component #0 expression", OFFSET(params[0].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },\
141 { "c1_expr", "set color component #1 expression", OFFSET(params[1].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },\
142 { "c2_expr", "set color component #2 expression", OFFSET(params[2].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },\
143 { "c3_expr", "set color component #3 expression", OFFSET(params[3].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },\
144 { "all_expr", "set expression for all color components", OFFSET(all_expr), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },\
145 { "c0_opacity", "set color component #0 opacity", OFFSET(params[0].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },\
146 { "c1_opacity", "set color component #1 opacity", OFFSET(params[1].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },\
147 { "c2_opacity", "set color component #2 opacity", OFFSET(params[2].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },\
148 { "c3_opacity", "set color component #3 opacity", OFFSET(params[3].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },\
149 { "all_opacity", "set opacity for all color components", OFFSET(all_opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS}
151 #define OFFSET(x) offsetof(BlendContext, x)
152 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
154 static const AVOption blend_options[] = {
156 { "shortest", "force termination when the shortest input terminates", OFFSET(dinput.shortest), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS },
157 { "repeatlast", "repeat last bottom frame", OFFSET(dinput.repeatlast), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS },
161 AVFILTER_DEFINE_CLASS(blend);
163 static void blend_normal(const uint8_t *top, int top_linesize,
164 const uint8_t *bottom, int bottom_linesize,
165 uint8_t *dst, int dst_linesize,
166 int width, int start, int end,
167 FilterParams *param, double *values)
169 av_image_copy_plane(dst, dst_linesize, top, top_linesize, width, end - start);
172 #define DEFINE_BLEND(name, expr) \
173 static void blend_## name(const uint8_t *top, int top_linesize, \
174 const uint8_t *bottom, int bottom_linesize, \
175 uint8_t *dst, int dst_linesize, \
176 int width, int start, int end, \
177 FilterParams *param, double *values) \
179 double opacity = param->opacity; \
182 for (i = start; i < end; i++) { \
183 for (j = 0; j < width; j++) { \
184 dst[j] = top[j] + ((expr) - top[j]) * opacity; \
186 dst += dst_linesize; \
187 top += top_linesize; \
188 bottom += bottom_linesize; \
195 #define MULTIPLY(x, a, b) ((x) * (((a) * (b)) / 255))
196 #define SCREEN(x, a, b) (255 - (x) * ((255 - (a)) * (255 - (b)) / 255))
197 #define BURN(a, b) (((a) == 0) ? (a) : FFMAX(0, 255 - ((255 - (b)) << 8) / (a)))
198 #define DODGE(a, b) (((a) == 255) ? (a) : FFMIN(255, (((b) << 8) / (255 - (a)))))
200 DEFINE_BLEND(addition, FFMIN(255, A + B))
201 DEFINE_BLEND(average, (A + B) / 2)
202 DEFINE_BLEND(subtract, FFMAX(0, A - B))
203 DEFINE_BLEND(multiply, MULTIPLY(1, A, B))
204 DEFINE_BLEND(negation, 255 - FFABS(255 - A - B))
205 DEFINE_BLEND(difference, FFABS(A - B))
206 DEFINE_BLEND(difference128, av_clip_uint8(128 + A - B))
207 DEFINE_BLEND(screen, SCREEN(1, A, B))
208 DEFINE_BLEND(overlay, (A < 128) ? MULTIPLY(2, A, B) : SCREEN(2, A, B))
209 DEFINE_BLEND(hardlight, (B < 128) ? MULTIPLY(2, B, A) : SCREEN(2, B, A))
210 DEFINE_BLEND(hardmix, (A < (255 - B)) ? 0: 255)
211 DEFINE_BLEND(darken, FFMIN(A, B))
212 DEFINE_BLEND(lighten, FFMAX(A, B))
213 DEFINE_BLEND(divide, av_clip_uint8(((float)A / ((float)B) * 255)))
214 DEFINE_BLEND(dodge, DODGE(A, B))
215 DEFINE_BLEND(burn, BURN(A, B))
216 DEFINE_BLEND(softlight, (A > 127) ? B + (255 - B) * (A - 127.5) / 127.5 * (0.5 - FFABS(B - 127.5) / 255): B - B * ((127.5 - A) / 127.5) * (0.5 - FFABS(B - 127.5)/255))
217 DEFINE_BLEND(exclusion, A + B - 2 * A * B / 255)
218 DEFINE_BLEND(pinlight, (B < 128) ? FFMIN(A, 2 * B) : FFMAX(A, 2 * (B - 128)))
219 DEFINE_BLEND(phoenix, FFMIN(A, B) - FFMAX(A, B) + 255)
220 DEFINE_BLEND(reflect, (B == 255) ? B : FFMIN(255, (A * A / (255 - B))))
221 DEFINE_BLEND(glow, (A == 255) ? A : FFMIN(255, (B * B / (255 - A))))
222 DEFINE_BLEND(and, A & B)
223 DEFINE_BLEND(or, A | B)
224 DEFINE_BLEND(xor, A ^ B)
225 DEFINE_BLEND(vividlight, (A < 128) ? BURN(2 * A, B) : DODGE(2 * (A - 128), B))
226 DEFINE_BLEND(linearlight,av_clip_uint8((B < 128) ? B + 2 * A - 255 : B + 2 * (A - 128)))
228 static void blend_expr(const uint8_t *top, int top_linesize,
229 const uint8_t *bottom, int bottom_linesize,
230 uint8_t *dst, int dst_linesize,
231 int width, int start, int end,
232 FilterParams *param, double *values)
234 AVExpr *e = param->e;
237 for (y = start; y < end; y++) {
239 for (x = 0; x < width; x++) {
241 values[VAR_TOP] = values[VAR_A] = top[x];
242 values[VAR_BOTTOM] = values[VAR_B] = bottom[x];
243 dst[x] = av_expr_eval(e, values, NULL);
247 bottom += bottom_linesize;
251 static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
253 ThreadData *td = arg;
254 int slice_start = (td->h * jobnr ) / nb_jobs;
255 int slice_end = (td->h * (jobnr+1)) / nb_jobs;
256 const uint8_t *top = td->top->data[td->plane];
257 const uint8_t *bottom = td->bottom->data[td->plane];
258 uint8_t *dst = td->dst->data[td->plane];
259 double values[VAR_VARS_NB];
261 values[VAR_N] = td->inlink->frame_count;
262 values[VAR_T] = td->dst->pts == AV_NOPTS_VALUE ? NAN : td->dst->pts * av_q2d(td->inlink->time_base);
263 values[VAR_W] = td->w;
264 values[VAR_H] = td->h;
265 values[VAR_SW] = td->w / (double)td->dst->width;
266 values[VAR_SH] = td->h / (double)td->dst->height;
268 td->param->blend(top + slice_start * td->top->linesize[td->plane],
269 td->top->linesize[td->plane],
270 bottom + slice_start * td->bottom->linesize[td->plane],
271 td->bottom->linesize[td->plane],
272 dst + slice_start * td->dst->linesize[td->plane],
273 td->dst->linesize[td->plane],
274 td->w, slice_start, slice_end, td->param, &values[0]);
278 static AVFrame *blend_frame(AVFilterContext *ctx, AVFrame *top_buf,
279 const AVFrame *bottom_buf)
281 BlendContext *b = ctx->priv;
282 AVFilterLink *inlink = ctx->inputs[0];
283 AVFilterLink *outlink = ctx->outputs[0];
287 dst_buf = ff_get_video_buffer(outlink, outlink->w, outlink->h);
290 av_frame_copy_props(dst_buf, top_buf);
292 for (plane = 0; plane < b->nb_planes; plane++) {
293 int hsub = plane == 1 || plane == 2 ? b->hsub : 0;
294 int vsub = plane == 1 || plane == 2 ? b->vsub : 0;
295 int outw = FF_CEIL_RSHIFT(dst_buf->width, hsub);
296 int outh = FF_CEIL_RSHIFT(dst_buf->height, vsub);
297 FilterParams *param = &b->params[plane];
298 ThreadData td = { .top = top_buf, .bottom = bottom_buf, .dst = dst_buf,
299 .w = outw, .h = outh, .param = param, .plane = plane,
302 ctx->internal->execute(ctx, filter_slice, &td, NULL, FFMIN(outh, ctx->graph->nb_threads));
306 av_frame_free(&top_buf);
311 static av_cold int init(AVFilterContext *ctx)
313 BlendContext *b = ctx->priv;
316 b->tblend = !strcmp(ctx->filter->name, "tblend");
318 for (plane = 0; plane < FF_ARRAY_ELEMS(b->params); plane++) {
319 FilterParams *param = &b->params[plane];
321 if (b->all_mode >= 0)
322 param->mode = b->all_mode;
323 if (b->all_opacity < 1)
324 param->opacity = b->all_opacity;
326 switch (param->mode) {
327 case BLEND_ADDITION: param->blend = blend_addition; break;
328 case BLEND_AND: param->blend = blend_and; break;
329 case BLEND_AVERAGE: param->blend = blend_average; break;
330 case BLEND_BURN: param->blend = blend_burn; break;
331 case BLEND_DARKEN: param->blend = blend_darken; break;
332 case BLEND_DIFFERENCE: param->blend = blend_difference; break;
333 case BLEND_DIFFERENCE128: param->blend = blend_difference128; break;
334 case BLEND_DIVIDE: param->blend = blend_divide; break;
335 case BLEND_DODGE: param->blend = blend_dodge; break;
336 case BLEND_EXCLUSION: param->blend = blend_exclusion; break;
337 case BLEND_GLOW: param->blend = blend_glow; break;
338 case BLEND_HARDLIGHT: param->blend = blend_hardlight; break;
339 case BLEND_HARDMIX: param->blend = blend_hardmix; break;
340 case BLEND_LIGHTEN: param->blend = blend_lighten; break;
341 case BLEND_LINEARLIGHT:param->blend = blend_linearlight;break;
342 case BLEND_MULTIPLY: param->blend = blend_multiply; break;
343 case BLEND_NEGATION: param->blend = blend_negation; break;
344 case BLEND_NORMAL: param->blend = blend_normal; break;
345 case BLEND_OR: param->blend = blend_or; break;
346 case BLEND_OVERLAY: param->blend = blend_overlay; break;
347 case BLEND_PHOENIX: param->blend = blend_phoenix; break;
348 case BLEND_PINLIGHT: param->blend = blend_pinlight; break;
349 case BLEND_REFLECT: param->blend = blend_reflect; break;
350 case BLEND_SCREEN: param->blend = blend_screen; break;
351 case BLEND_SOFTLIGHT: param->blend = blend_softlight; break;
352 case BLEND_SUBTRACT: param->blend = blend_subtract; break;
353 case BLEND_VIVIDLIGHT: param->blend = blend_vividlight; break;
354 case BLEND_XOR: param->blend = blend_xor; break;
357 if (b->all_expr && !param->expr_str) {
358 param->expr_str = av_strdup(b->all_expr);
359 if (!param->expr_str)
360 return AVERROR(ENOMEM);
362 if (param->expr_str) {
363 ret = av_expr_parse(¶m->e, param->expr_str, var_names,
364 NULL, NULL, NULL, NULL, 0, ctx);
367 param->blend = blend_expr;
371 b->dinput.process = blend_frame;
375 static int query_formats(AVFilterContext *ctx)
377 static const enum AVPixelFormat pix_fmts[] = {
378 AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA420P,
379 AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ422P,AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ411P,
380 AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
381 AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP, AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE
384 AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
386 return AVERROR(ENOMEM);
387 return ff_set_common_formats(ctx, fmts_list);
390 static av_cold void uninit(AVFilterContext *ctx)
392 BlendContext *b = ctx->priv;
395 ff_dualinput_uninit(&b->dinput);
396 av_frame_free(&b->prev_frame);
398 for (i = 0; i < FF_ARRAY_ELEMS(b->params); i++)
399 av_expr_free(b->params[i].e);
402 #if CONFIG_BLEND_FILTER
404 static int config_output(AVFilterLink *outlink)
406 AVFilterContext *ctx = outlink->src;
407 AVFilterLink *toplink = ctx->inputs[TOP];
408 AVFilterLink *bottomlink = ctx->inputs[BOTTOM];
409 BlendContext *b = ctx->priv;
410 const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(toplink->format);
413 if (toplink->format != bottomlink->format) {
414 av_log(ctx, AV_LOG_ERROR, "inputs must be of same pixel format\n");
415 return AVERROR(EINVAL);
417 if (toplink->w != bottomlink->w ||
418 toplink->h != bottomlink->h ||
419 toplink->sample_aspect_ratio.num != bottomlink->sample_aspect_ratio.num ||
420 toplink->sample_aspect_ratio.den != bottomlink->sample_aspect_ratio.den) {
421 av_log(ctx, AV_LOG_ERROR, "First input link %s parameters "
422 "(size %dx%d, SAR %d:%d) do not match the corresponding "
423 "second input link %s parameters (%dx%d, SAR %d:%d)\n",
424 ctx->input_pads[TOP].name, toplink->w, toplink->h,
425 toplink->sample_aspect_ratio.num,
426 toplink->sample_aspect_ratio.den,
427 ctx->input_pads[BOTTOM].name, bottomlink->w, bottomlink->h,
428 bottomlink->sample_aspect_ratio.num,
429 bottomlink->sample_aspect_ratio.den);
430 return AVERROR(EINVAL);
433 outlink->w = toplink->w;
434 outlink->h = toplink->h;
435 outlink->time_base = toplink->time_base;
436 outlink->sample_aspect_ratio = toplink->sample_aspect_ratio;
437 outlink->frame_rate = toplink->frame_rate;
439 b->hsub = pix_desc->log2_chroma_w;
440 b->vsub = pix_desc->log2_chroma_h;
441 b->nb_planes = av_pix_fmt_count_planes(toplink->format);
443 if ((ret = ff_dualinput_init(ctx, &b->dinput)) < 0)
449 static int request_frame(AVFilterLink *outlink)
451 BlendContext *b = outlink->src->priv;
452 return ff_dualinput_request_frame(&b->dinput, outlink);
455 static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
457 BlendContext *b = inlink->dst->priv;
458 return ff_dualinput_filter_frame(&b->dinput, inlink, buf);
461 static const AVFilterPad blend_inputs[] = {
464 .type = AVMEDIA_TYPE_VIDEO,
465 .filter_frame = filter_frame,
468 .type = AVMEDIA_TYPE_VIDEO,
469 .filter_frame = filter_frame,
474 static const AVFilterPad blend_outputs[] = {
477 .type = AVMEDIA_TYPE_VIDEO,
478 .config_props = config_output,
479 .request_frame = request_frame,
484 AVFilter ff_vf_blend = {
486 .description = NULL_IF_CONFIG_SMALL("Blend two video frames into each other."),
489 .priv_size = sizeof(BlendContext),
490 .query_formats = query_formats,
491 .inputs = blend_inputs,
492 .outputs = blend_outputs,
493 .priv_class = &blend_class,
494 .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL | AVFILTER_FLAG_SLICE_THREADS,
499 #if CONFIG_TBLEND_FILTER
501 static int tblend_config_output(AVFilterLink *outlink)
503 AVFilterContext *ctx = outlink->src;
504 AVFilterLink *inlink = ctx->inputs[0];
505 BlendContext *b = ctx->priv;
506 const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
508 b->hsub = pix_desc->log2_chroma_w;
509 b->vsub = pix_desc->log2_chroma_h;
510 b->nb_planes = av_pix_fmt_count_planes(inlink->format);
511 outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
516 static int tblend_filter_frame(AVFilterLink *inlink, AVFrame *frame)
518 BlendContext *b = inlink->dst->priv;
519 AVFilterLink *outlink = inlink->dst->outputs[0];
522 AVFrame *out = blend_frame(inlink->dst, frame, b->prev_frame);
523 av_frame_free(&b->prev_frame);
524 b->prev_frame = frame;
525 return ff_filter_frame(outlink, out);
527 b->prev_frame = frame;
531 static const AVOption tblend_options[] = {
536 AVFILTER_DEFINE_CLASS(tblend);
538 static const AVFilterPad tblend_inputs[] = {
541 .type = AVMEDIA_TYPE_VIDEO,
542 .filter_frame = tblend_filter_frame,
547 static const AVFilterPad tblend_outputs[] = {
550 .type = AVMEDIA_TYPE_VIDEO,
551 .config_props = tblend_config_output,
556 AVFilter ff_vf_tblend = {
558 .description = NULL_IF_CONFIG_SMALL("Blend successive frames."),
559 .priv_size = sizeof(BlendContext),
560 .priv_class = &tblend_class,
561 .query_formats = query_formats,
564 .inputs = tblend_inputs,
565 .outputs = tblend_outputs,
566 .flags = AVFILTER_FLAG_SLICE_THREADS,