]> git.sesse.net Git - ffmpeg/blobdiff - libavfilter/vf_threshold.c
avfilter: Constify all AVFilters
[ffmpeg] / libavfilter / vf_threshold.c
index bd73392863dbe40859823aa7647bd8b1c1a15365..decfbf8564c5ee7ce8577f4b1e4b3687bd1cbc88 100644 (file)
@@ -52,10 +52,12 @@ static int query_formats(AVFilterContext *ctx)
         AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P,
         AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
         AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9,
-        AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
+        AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV440P10, AV_PIX_FMT_YUV444P10,
+        AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV440P12, AV_PIX_FMT_YUV444P12,
         AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16,
         AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA444P9,
         AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA444P10,
+        AV_PIX_FMT_YUVA422P12, AV_PIX_FMT_YUVA444P12,
         AV_PIX_FMT_YUVA420P16, AV_PIX_FMT_YUVA422P16, AV_PIX_FMT_YUVA444P16,
         AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10,
         AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14, AV_PIX_FMT_GBRP16,
@@ -68,12 +70,59 @@ static int query_formats(AVFilterContext *ctx)
     return ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
 }
 
+typedef struct ThreadData {
+    AVFrame *in;
+    AVFrame *threshold;
+    AVFrame *min;
+    AVFrame *max;
+    AVFrame *out;
+} ThreadData;
+
+static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+    ThresholdContext *s = ctx->priv;
+    ThreadData *td = arg;
+    AVFrame *min = td->min;
+    AVFrame *max = td->max;
+    AVFrame *threshold = td->threshold;
+    AVFrame *in = td->in;
+    AVFrame *out = td->out;
+
+    for (int p = 0; p < s->nb_planes; p++) {
+        const int h = s->height[p];
+        const int slice_start = (h * jobnr) / nb_jobs;
+        const int slice_end = (h * (jobnr+1)) / nb_jobs;
+
+        if (!(s->planes & (1 << p))) {
+            av_image_copy_plane(out->data[p] + slice_start * out->linesize[p],
+                                out->linesize[p],
+                                in->data[p] + slice_start * in->linesize[p],
+                                in->linesize[p],
+                                s->width[p] * s->bpc,
+                                slice_end - slice_start);
+            continue;
+        }
+        s->threshold(in->data[p] + slice_start * in->linesize[p],
+                     threshold->data[p] + slice_start * threshold->linesize[p],
+                     min->data[p] + slice_start * min->linesize[p],
+                     max->data[p] + slice_start * max->linesize[p],
+                     out->data[p] + slice_start * out->linesize[p],
+                     in->linesize[p], threshold->linesize[p],
+                     min->linesize[p], max->linesize[p],
+                     out->linesize[p],
+                     s->width[p], slice_end - slice_start);
+    }
+
+    return 0;
+}
+
 static int process_frame(FFFrameSync *fs)
 {
     AVFilterContext *ctx = fs->parent;
     ThresholdContext *s = fs->opaque;
     AVFilterLink *outlink = ctx->outputs[0];
     AVFrame *out, *in, *threshold, *min, *max;
+    ThreadData td;
     int ret;
 
     if ((ret = ff_framesync_get_frame(&s->fs, 0, &in,        0)) < 0 ||
@@ -87,29 +136,18 @@ static int process_frame(FFFrameSync *fs)
         if (!out)
             return AVERROR(ENOMEM);
     } else {
-        int p;
-
         out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
         if (!out)
             return AVERROR(ENOMEM);
         av_frame_copy_props(out, in);
 
-        for (p = 0; p < s->nb_planes; p++) {
-            if (!(s->planes & (1 << p))) {
-                av_image_copy_plane(out->data[p], out->linesize[p],
-                                    in->data[p], in->linesize[p],
-                                    s->width[p] * s->bpc,
-                                    s->height[p]);
-                continue;
-            }
-            s->threshold(in->data[p], threshold->data[p],
-                         min->data[p], max->data[p],
-                         out->data[p],
-                         in->linesize[p], threshold->linesize[p],
-                         min->linesize[p], max->linesize[p],
-                         out->linesize[p],
-                         s->width[p], s->height[p]);
-        }
+        td.out = out;
+        td.in = in;
+        td.threshold = threshold;
+        td.min = min;
+        td.max = max;
+        ctx->internal->execute(ctx, filter_slice, &td, NULL,
+                               FFMIN(s->height[2], ff_filter_get_nb_threads(ctx)));
     }
 
     out->pts = av_rescale_q(s->fs.pts, s->fs.time_base, outlink->time_base);
@@ -241,7 +279,6 @@ static int config_output(AVFilterLink *outlink)
 
     outlink->w = base->w;
     outlink->h = base->h;
-    outlink->time_base = base->time_base;
     outlink->sample_aspect_ratio = base->sample_aspect_ratio;
     outlink->frame_rate = base->frame_rate;
 
@@ -268,7 +305,10 @@ static int config_output(AVFilterLink *outlink)
     s->fs.opaque   = s;
     s->fs.on_event = process_frame;
 
-    return ff_framesync_configure(&s->fs);
+    ret = ff_framesync_configure(&s->fs);
+    outlink->time_base = s->fs.time_base;
+
+    return ret;
 }
 
 static int activate(AVFilterContext *ctx)
@@ -314,7 +354,7 @@ static const AVFilterPad outputs[] = {
     { NULL }
 };
 
-AVFilter ff_vf_threshold = {
+const AVFilter ff_vf_threshold = {
     .name          = "threshold",
     .description   = NULL_IF_CONFIG_SMALL("Threshold first video stream using other video streams."),
     .priv_size     = sizeof(ThresholdContext),
@@ -324,5 +364,5 @@ AVFilter ff_vf_threshold = {
     .activate      = activate,
     .inputs        = inputs,
     .outputs       = outputs,
-    .flags         = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
+    .flags         = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL | AVFILTER_FLAG_SLICE_THREADS,
 };