#include "maskedmerge.h"
#define OFFSET(x) offsetof(MaskedMergeContext, x)
-#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
static const AVOption maskedmerge_options[] = {
{ "planes", "set planes", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=0xF}, 0, 0xF, FLAGS },
AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P,
AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9,
- AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
+ AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10, AV_PIX_FMT_YUV440P10,
AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV440P12,
AV_PIX_FMT_YUV420P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV444P14,
AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16,
AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA444P9,
AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA444P10,
+ AV_PIX_FMT_YUVA422P12, AV_PIX_FMT_YUVA444P12,
AV_PIX_FMT_YUVA420P16, AV_PIX_FMT_YUVA422P16, AV_PIX_FMT_YUVA444P16,
AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10,
AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14, AV_PIX_FMT_GBRP16,
AV_PIX_FMT_GBRAP, AV_PIX_FMT_GBRAP10, AV_PIX_FMT_GBRAP12, AV_PIX_FMT_GBRAP16,
- AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY9, AV_PIX_FMT_GRAY10, AV_PIX_FMT_GRAY12, AV_PIX_FMT_GRAY16,
+ AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY9, AV_PIX_FMT_GRAY10, AV_PIX_FMT_GRAY12, AV_PIX_FMT_GRAY14, AV_PIX_FMT_GRAY16,
AV_PIX_FMT_NONE
};
return ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
}
+typedef struct ThreadData {
+ AVFrame *base, *overlay, *mask;
+ AVFrame *out;
+} ThreadData;
+
+static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ MaskedMergeContext *s = ctx->priv;
+ ThreadData *td = arg;
+ AVFrame *base = td->base;
+ AVFrame *overlay = td->overlay;
+ AVFrame *mask = td->mask;
+ AVFrame *out = td->out;
+ int p;
+
+ for (p = 0; p < s->nb_planes; p++) {
+ const int h = s->height[p];
+ const int slice_start = (h * jobnr) / nb_jobs;
+ const int slice_end = (h * (jobnr+1)) / nb_jobs;
+
+ if (!((1 << p) & s->planes)) {
+ av_image_copy_plane(out->data[p] + slice_start * out->linesize[p],
+ out->linesize[p],
+ base->data[p] + slice_start * base->linesize[p],
+ base->linesize[p],
+ s->linesize[p], slice_end - slice_start);
+ continue;
+ }
+
+ s->maskedmerge(base->data[p] + slice_start * base->linesize[p],
+ overlay->data[p] + slice_start * overlay->linesize[p],
+ mask->data[p] + slice_start * mask->linesize[p],
+ out->data[p] + slice_start * out->linesize[p],
+ base->linesize[p], overlay->linesize[p],
+ mask->linesize[p], out->linesize[p],
+ s->width[p], slice_end - slice_start,
+ s->half, s->depth);
+ }
+
+ return 0;
+}
+
static int process_frame(FFFrameSync *fs)
{
AVFilterContext *ctx = fs->parent;
MaskedMergeContext *s = fs->opaque;
AVFilterLink *outlink = ctx->outputs[0];
AVFrame *out, *base, *overlay, *mask;
+ ThreadData td;
int ret;
if ((ret = ff_framesync_get_frame(&s->fs, 0, &base, 0)) < 0 ||
if (!out)
return AVERROR(ENOMEM);
} else {
- int p;
-
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out)
return AVERROR(ENOMEM);
av_frame_copy_props(out, base);
- for (p = 0; p < s->nb_planes; p++) {
- if (!((1 << p) & s->planes)) {
- av_image_copy_plane(out->data[p], out->linesize[p], base->data[p], base->linesize[p],
- s->linesize[p], s->height[p]);
- continue;
- }
-
- s->maskedmerge(base->data[p], overlay->data[p],
- mask->data[p], out->data[p],
- base->linesize[p], overlay->linesize[p],
- mask->linesize[p], out->linesize[p],
- s->width[p], s->height[p],
- s->half, s->depth);
- }
+ td.out = out;
+ td.base = base;
+ td.overlay = overlay;
+ td.mask = mask;
+ ctx->internal->execute(ctx, filter_slice, &td, NULL,
+ FFMIN(s->height[2], ff_filter_get_nb_threads(ctx)));
}
- out->pts = av_rescale_q(base->pts, s->fs.time_base, outlink->time_base);
+ out->pts = av_rescale_q(s->fs.pts, s->fs.time_base, outlink->time_base);
return ff_filter_frame(outlink, out);
}
outlink->w = base->w;
outlink->h = base->h;
- outlink->time_base = base->time_base;
outlink->sample_aspect_ratio = base->sample_aspect_ratio;
outlink->frame_rate = base->frame_rate;
s->fs.opaque = s;
s->fs.on_event = process_frame;
- return ff_framesync_configure(&s->fs);
+ ret = ff_framesync_configure(&s->fs);
+ outlink->time_base = s->fs.time_base;
+
+ return ret;
}
static int activate(AVFilterContext *ctx)
.inputs = maskedmerge_inputs,
.outputs = maskedmerge_outputs,
.priv_class = &maskedmerge_class,
- .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL | AVFILTER_FLAG_SLICE_THREADS,
+ .process_command = ff_filter_process_command,
};