2 * Copyright (c) 2010 Mark Heath mjpeg0 @ silicontrip dot org
3 * Copyright (c) 2014 Clément Bœsch
4 * Copyright (c) 2014 Dave Rice @dericed
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "libavutil/intreadwrite.h"
24 #include "libavutil/opt.h"
25 #include "libavutil/pixdesc.h"
36 typedef struct SignalstatsContext {
38 int chromah; // height of chroma plane
39 int chromaw; // width of chroma plane
40 int hsub; // horizontal subsampling
41 int vsub; // vertical subsampling
42 int depth; // pixel depth
43 int fs; // pixel count per frame
44 int cfs; // pixel count per frame of chroma planes
45 int outfilter; // FilterMode
48 uint8_t rgba_color[4];
53 int *histy, *histu, *histv, *histsat;
59 typedef struct ThreadData {
64 typedef struct ThreadDataHueSatMetrics {
66 AVFrame *dst_sat, *dst_hue;
67 } ThreadDataHueSatMetrics;
69 #define OFFSET(x) offsetof(SignalstatsContext, x)
70 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
72 static const AVOption signalstats_options[] = {
73 {"stat", "set statistics filters", OFFSET(filters), AV_OPT_TYPE_FLAGS, {.i64=0}, 0, INT_MAX, FLAGS, "filters"},
74 {"tout", "analyze pixels for temporal outliers", 0, AV_OPT_TYPE_CONST, {.i64=1<<FILTER_TOUT}, 0, 0, FLAGS, "filters"},
75 {"vrep", "analyze video lines for vertical line repetition", 0, AV_OPT_TYPE_CONST, {.i64=1<<FILTER_VREP}, 0, 0, FLAGS, "filters"},
76 {"brng", "analyze for pixels outside of broadcast range", 0, AV_OPT_TYPE_CONST, {.i64=1<<FILTER_BRNG}, 0, 0, FLAGS, "filters"},
77 {"out", "set video filter", OFFSET(outfilter), AV_OPT_TYPE_INT, {.i64=FILTER_NONE}, -1, FILT_NUMB-1, FLAGS, "out"},
78 {"tout", "highlight pixels that depict temporal outliers", 0, AV_OPT_TYPE_CONST, {.i64=FILTER_TOUT}, 0, 0, FLAGS, "out"},
79 {"vrep", "highlight video lines that depict vertical line repetition", 0, AV_OPT_TYPE_CONST, {.i64=FILTER_VREP}, 0, 0, FLAGS, "out"},
80 {"brng", "highlight pixels that are outside of broadcast range", 0, AV_OPT_TYPE_CONST, {.i64=FILTER_BRNG}, 0, 0, FLAGS, "out"},
81 {"c", "set highlight color", OFFSET(rgba_color), AV_OPT_TYPE_COLOR, {.str="yellow"}, .flags=FLAGS},
82 {"color", "set highlight color", OFFSET(rgba_color), AV_OPT_TYPE_COLOR, {.str="yellow"}, .flags=FLAGS},
86 AVFILTER_DEFINE_CLASS(signalstats);
88 static av_cold int init(AVFilterContext *ctx)
91 SignalstatsContext *s = ctx->priv;
93 if (s->outfilter != FILTER_NONE)
94 s->filters |= 1 << s->outfilter;
99 s->yuv_color[0] = (( 66*r + 129*g + 25*b + (1<<7)) >> 8) + 16;
100 s->yuv_color[1] = ((-38*r + -74*g + 112*b + (1<<7)) >> 8) + 128;
101 s->yuv_color[2] = ((112*r + -94*g + -18*b + (1<<7)) >> 8) + 128;
105 static av_cold void uninit(AVFilterContext *ctx)
107 SignalstatsContext *s = ctx->priv;
108 av_frame_free(&s->frame_prev);
109 av_frame_free(&s->frame_sat);
110 av_frame_free(&s->frame_hue);
111 av_freep(&s->jobs_rets);
115 av_freep(&s->histsat);
118 static int query_formats(AVFilterContext *ctx)
121 static const enum AVPixelFormat pix_fmts[] = {
122 AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P,
124 AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ411P,
126 AV_PIX_FMT_YUV444P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV420P9,
127 AV_PIX_FMT_YUV444P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV420P10,
128 AV_PIX_FMT_YUV440P10,
129 AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV420P12,
130 AV_PIX_FMT_YUV440P12,
131 AV_PIX_FMT_YUV444P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV420P14,
132 AV_PIX_FMT_YUV444P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV420P16,
136 AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
138 return AVERROR(ENOMEM);
139 return ff_set_common_formats(ctx, fmts_list);
142 static AVFrame *alloc_frame(enum AVPixelFormat pixfmt, int w, int h)
144 AVFrame *frame = av_frame_alloc();
148 frame->format = pixfmt;
152 if (av_frame_get_buffer(frame, 32) < 0) {
153 av_frame_free(&frame);
160 static int config_props(AVFilterLink *outlink)
162 AVFilterContext *ctx = outlink->src;
163 SignalstatsContext *s = ctx->priv;
164 AVFilterLink *inlink = outlink->src->inputs[0];
165 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(outlink->format);
166 s->hsub = desc->log2_chroma_w;
167 s->vsub = desc->log2_chroma_h;
168 s->depth = desc->comp[0].depth;
170 s->histy = av_malloc_array(1 << s->depth, sizeof(*s->histy));
171 s->histu = av_malloc_array(1 << s->depth, sizeof(*s->histu));
172 s->histv = av_malloc_array(1 << s->depth, sizeof(*s->histv));
173 s->histsat = av_malloc_array(1 << s->depth, sizeof(*s->histsat));
175 if (!s->histy || !s->histu || !s->histv || !s->histsat)
176 return AVERROR(ENOMEM);
179 outlink->w = inlink->w;
180 outlink->h = inlink->h;
182 s->chromaw = AV_CEIL_RSHIFT(inlink->w, s->hsub);
183 s->chromah = AV_CEIL_RSHIFT(inlink->h, s->vsub);
185 s->fs = inlink->w * inlink->h;
186 s->cfs = s->chromaw * s->chromah;
188 s->nb_jobs = FFMAX(1, FFMIN(inlink->h, ff_filter_get_nb_threads(ctx)));
189 s->jobs_rets = av_malloc_array(s->nb_jobs, sizeof(*s->jobs_rets));
191 return AVERROR(ENOMEM);
193 s->frame_sat = alloc_frame(s->depth > 8 ? AV_PIX_FMT_GRAY16 : AV_PIX_FMT_GRAY8, inlink->w, inlink->h);
194 s->frame_hue = alloc_frame(AV_PIX_FMT_GRAY16, inlink->w, inlink->h);
195 if (!s->frame_sat || !s->frame_hue)
196 return AVERROR(ENOMEM);
201 static void burn_frame8(const SignalstatsContext *s, AVFrame *f, int x, int y)
203 const int chromax = x >> s->hsub;
204 const int chromay = y >> s->vsub;
205 f->data[0][y * f->linesize[0] + x] = s->yuv_color[0];
206 f->data[1][chromay * f->linesize[1] + chromax] = s->yuv_color[1];
207 f->data[2][chromay * f->linesize[2] + chromax] = s->yuv_color[2];
210 static void burn_frame16(const SignalstatsContext *s, AVFrame *f, int x, int y)
212 const int chromax = x >> s->hsub;
213 const int chromay = y >> s->vsub;
214 const int mult = 1 << (s->depth - 8);
215 AV_WN16(f->data[0] + y * f->linesize[0] + x * 2, s->yuv_color[0] * mult);
216 AV_WN16(f->data[1] + chromay * f->linesize[1] + chromax * 2, s->yuv_color[1] * mult);
217 AV_WN16(f->data[2] + chromay * f->linesize[2] + chromax * 2, s->yuv_color[2] * mult);
220 static int filter8_brng(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
222 ThreadData *td = arg;
223 const SignalstatsContext *s = ctx->priv;
224 const AVFrame *in = td->in;
225 AVFrame *out = td->out;
226 const int w = in->width;
227 const int h = in->height;
228 const int slice_start = (h * jobnr ) / nb_jobs;
229 const int slice_end = (h * (jobnr+1)) / nb_jobs;
232 for (y = slice_start; y < slice_end; y++) {
233 const int yc = y >> s->vsub;
234 const uint8_t *pluma = &in->data[0][y * in->linesize[0]];
235 const uint8_t *pchromau = &in->data[1][yc * in->linesize[1]];
236 const uint8_t *pchromav = &in->data[2][yc * in->linesize[2]];
238 for (x = 0; x < w; x++) {
239 const int xc = x >> s->hsub;
240 const int luma = pluma[x];
241 const int chromau = pchromau[xc];
242 const int chromav = pchromav[xc];
243 const int filt = luma < 16 || luma > 235 ||
244 chromau < 16 || chromau > 240 ||
245 chromav < 16 || chromav > 240;
248 burn_frame8(s, out, x, y);
254 static int filter16_brng(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
256 ThreadData *td = arg;
257 const SignalstatsContext *s = ctx->priv;
258 const AVFrame *in = td->in;
259 AVFrame *out = td->out;
260 const int mult = 1 << (s->depth - 8);
261 const int w = in->width;
262 const int h = in->height;
263 const int slice_start = (h * jobnr ) / nb_jobs;
264 const int slice_end = (h * (jobnr+1)) / nb_jobs;
267 for (y = slice_start; y < slice_end; y++) {
268 const int yc = y >> s->vsub;
269 const uint16_t *pluma = (uint16_t *)&in->data[0][y * in->linesize[0]];
270 const uint16_t *pchromau = (uint16_t *)&in->data[1][yc * in->linesize[1]];
271 const uint16_t *pchromav = (uint16_t *)&in->data[2][yc * in->linesize[2]];
273 for (x = 0; x < w; x++) {
274 const int xc = x >> s->hsub;
275 const int luma = pluma[x];
276 const int chromau = pchromau[xc];
277 const int chromav = pchromav[xc];
278 const int filt = luma < 16 * mult || luma > 235 * mult ||
279 chromau < 16 * mult || chromau > 240 * mult ||
280 chromav < 16 * mult || chromav > 240 * mult;
283 burn_frame16(s, out, x, y);
289 static int filter_tout_outlier(uint8_t x, uint8_t y, uint8_t z)
291 return ((abs(x - y) + abs (z - y)) / 2) - abs(z - x) > 4; // make 4 configurable?
294 static int filter8_tout(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
296 ThreadData *td = arg;
297 const SignalstatsContext *s = ctx->priv;
298 const AVFrame *in = td->in;
299 AVFrame *out = td->out;
300 const int w = in->width;
301 const int h = in->height;
302 const int slice_start = (h * jobnr ) / nb_jobs;
303 const int slice_end = (h * (jobnr+1)) / nb_jobs;
304 const uint8_t *p = in->data[0];
305 int lw = in->linesize[0];
306 int x, y, score = 0, filt;
308 for (y = slice_start; y < slice_end; y++) {
310 if (y - 1 < 0 || y + 1 >= h)
313 // detect two pixels above and below (to eliminate interlace artefacts)
314 // should check that video format is infact interlaced.
316 #define FILTER(i, j) \
317 filter_tout_outlier(p[(y-j) * lw + x + i], \
318 p[ y * lw + x + i], \
319 p[(y+j) * lw + x + i])
321 #define FILTER3(j) (FILTER(-1, j) && FILTER(0, j) && FILTER(1, j))
323 if (y - 2 >= 0 && y + 2 < h) {
324 for (x = 1; x < w - 1; x++) {
325 filt = FILTER3(2) && FILTER3(1);
328 burn_frame8(s, out, x, y);
331 for (x = 1; x < w - 1; x++) {
335 burn_frame8(s, out, x, y);
342 static int filter16_tout(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
344 ThreadData *td = arg;
345 const SignalstatsContext *s = ctx->priv;
346 const AVFrame *in = td->in;
347 AVFrame *out = td->out;
348 const int w = in->width;
349 const int h = in->height;
350 const int slice_start = (h * jobnr ) / nb_jobs;
351 const int slice_end = (h * (jobnr+1)) / nb_jobs;
352 const uint16_t *p = (uint16_t *)in->data[0];
353 int lw = in->linesize[0] / 2;
354 int x, y, score = 0, filt;
356 for (y = slice_start; y < slice_end; y++) {
358 if (y - 1 < 0 || y + 1 >= h)
361 // detect two pixels above and below (to eliminate interlace artefacts)
362 // should check that video format is infact interlaced.
364 if (y - 2 >= 0 && y + 2 < h) {
365 for (x = 1; x < w - 1; x++) {
366 filt = FILTER3(2) && FILTER3(1);
369 burn_frame16(s, out, x, y);
372 for (x = 1; x < w - 1; x++) {
376 burn_frame16(s, out, x, y);
385 static int filter8_vrep(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
387 ThreadData *td = arg;
388 const SignalstatsContext *s = ctx->priv;
389 const AVFrame *in = td->in;
390 AVFrame *out = td->out;
391 const int w = in->width;
392 const int h = in->height;
393 const int slice_start = (h * jobnr ) / nb_jobs;
394 const int slice_end = (h * (jobnr+1)) / nb_jobs;
395 const uint8_t *p = in->data[0];
396 const int lw = in->linesize[0];
399 for (y = slice_start; y < slice_end; y++) {
400 const int y2lw = (y - VREP_START) * lw;
401 const int ylw = y * lw;
402 int filt, totdiff = 0;
407 for (x = 0; x < w; x++)
408 totdiff += abs(p[y2lw + x] - p[ylw + x]);
413 for (x = 0; x < w; x++)
414 burn_frame8(s, out, x, y);
419 static int filter16_vrep(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
421 ThreadData *td = arg;
422 const SignalstatsContext *s = ctx->priv;
423 const AVFrame *in = td->in;
424 AVFrame *out = td->out;
425 const int w = in->width;
426 const int h = in->height;
427 const int slice_start = (h * jobnr ) / nb_jobs;
428 const int slice_end = (h * (jobnr+1)) / nb_jobs;
429 const uint16_t *p = (uint16_t *)in->data[0];
430 const int lw = in->linesize[0] / 2;
433 for (y = slice_start; y < slice_end; y++) {
434 const int y2lw = (y - VREP_START) * lw;
435 const int ylw = y * lw;
442 for (x = 0; x < w; x++)
443 totdiff += abs(p[y2lw + x] - p[ylw + x]);
448 for (x = 0; x < w; x++)
449 burn_frame16(s, out, x, y);
454 static const struct {
456 int (*process8)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
457 int (*process16)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
459 {"TOUT", filter8_tout, filter16_tout},
460 {"VREP", filter8_vrep, filter16_vrep},
461 {"BRNG", filter8_brng, filter16_brng},
467 static int compute_sat_hue_metrics8(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
470 ThreadDataHueSatMetrics *td = arg;
471 const SignalstatsContext *s = ctx->priv;
472 const AVFrame *src = td->src;
473 AVFrame *dst_sat = td->dst_sat;
474 AVFrame *dst_hue = td->dst_hue;
476 const int slice_start = (s->chromah * jobnr ) / nb_jobs;
477 const int slice_end = (s->chromah * (jobnr+1)) / nb_jobs;
479 const int lsz_u = src->linesize[1];
480 const int lsz_v = src->linesize[2];
481 const uint8_t *p_u = src->data[1] + slice_start * lsz_u;
482 const uint8_t *p_v = src->data[2] + slice_start * lsz_v;
484 const int lsz_sat = dst_sat->linesize[0];
485 const int lsz_hue = dst_hue->linesize[0];
486 uint8_t *p_sat = dst_sat->data[0] + slice_start * lsz_sat;
487 uint8_t *p_hue = dst_hue->data[0] + slice_start * lsz_hue;
489 for (j = slice_start; j < slice_end; j++) {
490 for (i = 0; i < s->chromaw; i++) {
491 const int yuvu = p_u[i];
492 const int yuvv = p_v[i];
493 p_sat[i] = hypot(yuvu - 128, yuvv - 128); // int or round?
494 ((int16_t*)p_hue)[i] = floor((180 / M_PI) * atan2f(yuvu-128, yuvv-128) + 180);
505 static int compute_sat_hue_metrics16(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
508 ThreadDataHueSatMetrics *td = arg;
509 const SignalstatsContext *s = ctx->priv;
510 const AVFrame *src = td->src;
511 AVFrame *dst_sat = td->dst_sat;
512 AVFrame *dst_hue = td->dst_hue;
513 const int mid = 1 << (s->depth - 1);
515 const int slice_start = (s->chromah * jobnr ) / nb_jobs;
516 const int slice_end = (s->chromah * (jobnr+1)) / nb_jobs;
518 const int lsz_u = src->linesize[1] / 2;
519 const int lsz_v = src->linesize[2] / 2;
520 const uint16_t *p_u = (uint16_t*)src->data[1] + slice_start * lsz_u;
521 const uint16_t *p_v = (uint16_t*)src->data[2] + slice_start * lsz_v;
523 const int lsz_sat = dst_sat->linesize[0] / 2;
524 const int lsz_hue = dst_hue->linesize[0] / 2;
525 uint16_t *p_sat = (uint16_t*)dst_sat->data[0] + slice_start * lsz_sat;
526 uint16_t *p_hue = (uint16_t*)dst_hue->data[0] + slice_start * lsz_hue;
528 for (j = slice_start; j < slice_end; j++) {
529 for (i = 0; i < s->chromaw; i++) {
530 const int yuvu = p_u[i];
531 const int yuvv = p_v[i];
532 p_sat[i] = hypot(yuvu - mid, yuvv - mid); // int or round?
533 ((int16_t*)p_hue)[i] = floor((180 / M_PI) * atan2f(yuvu-mid, yuvv-mid) + 180);
544 static unsigned compute_bit_depth(uint16_t mask)
546 return av_popcount(mask);
549 static int filter_frame8(AVFilterLink *link, AVFrame *in)
551 AVFilterContext *ctx = link->dst;
552 SignalstatsContext *s = ctx->priv;
553 AVFilterLink *outlink = ctx->outputs[0];
556 int w = 0, cw = 0, // in
557 pw = 0, cpw = 0; // prev
560 unsigned int histy[DEPTH] = {0},
564 histsat[DEPTH] = {0}; // limited to 8 bit data.
565 int miny = -1, minu = -1, minv = -1;
566 int maxy = -1, maxu = -1, maxv = -1;
567 int lowy = -1, lowu = -1, lowv = -1;
568 int highy = -1, highu = -1, highv = -1;
569 int minsat = -1, maxsat = -1, lowsat = -1, highsat = -1;
570 int lowp, highp, clowp, chighp;
571 int accy, accu, accv;
572 int accsat, acchue = 0;
574 int toty = 0, totu = 0, totv = 0, totsat=0;
576 int dify = 0, difu = 0, difv = 0;
577 uint16_t masky = 0, masku = 0, maskv = 0;
579 int filtot[FILT_NUMB] = {0};
582 AVFrame *sat = s->frame_sat;
583 AVFrame *hue = s->frame_hue;
584 const uint8_t *p_sat = sat->data[0];
585 const uint8_t *p_hue = hue->data[0];
586 const int lsz_sat = sat->linesize[0];
587 const int lsz_hue = hue->linesize[0];
588 ThreadDataHueSatMetrics td_huesat = {
595 s->frame_prev = av_frame_clone(in);
597 prev = s->frame_prev;
599 if (s->outfilter != FILTER_NONE) {
600 out = av_frame_clone(in);
601 av_frame_make_writable(out);
604 ctx->internal->execute(ctx, compute_sat_hue_metrics8, &td_huesat,
605 NULL, FFMIN(s->chromah, ff_filter_get_nb_threads(ctx)));
607 // Calculate luma histogram and difference with previous frame or field.
608 for (j = 0; j < link->h; j++) {
609 for (i = 0; i < link->w; i++) {
610 const int yuv = in->data[0][w + i];
614 dify += abs(yuv - prev->data[0][pw + i]);
616 w += in->linesize[0];
617 pw += prev->linesize[0];
620 // Calculate chroma histogram and difference with previous frame or field.
621 for (j = 0; j < s->chromah; j++) {
622 for (i = 0; i < s->chromaw; i++) {
623 const int yuvu = in->data[1][cw+i];
624 const int yuvv = in->data[2][cw+i];
629 difu += abs(yuvu - prev->data[1][cpw+i]);
631 difv += abs(yuvv - prev->data[2][cpw+i]);
634 histhue[((int16_t*)p_hue)[i]]++;
636 cw += in->linesize[1];
637 cpw += prev->linesize[1];
642 for (fil = 0; fil < FILT_NUMB; fil ++) {
643 if (s->filters & 1<<fil) {
646 .out = out != in && s->outfilter == fil ? out : NULL,
648 memset(s->jobs_rets, 0, s->nb_jobs * sizeof(*s->jobs_rets));
649 ctx->internal->execute(ctx, filters_def[fil].process8,
650 &td, s->jobs_rets, s->nb_jobs);
651 for (i = 0; i < s->nb_jobs; i++)
652 filtot[fil] += s->jobs_rets[i];
656 // find low / high based on histogram percentile
657 // these only need to be calculated once.
659 lowp = lrint(s->fs * 10 / 100.);
660 highp = lrint(s->fs * 90 / 100.);
661 clowp = lrint(s->cfs * 10 / 100.);
662 chighp = lrint(s->cfs * 90 / 100.);
664 accy = accu = accv = accsat = 0;
665 for (fil = 0; fil < DEPTH; fil++) {
666 if (miny < 0 && histy[fil]) miny = fil;
667 if (minu < 0 && histu[fil]) minu = fil;
668 if (minv < 0 && histv[fil]) minv = fil;
669 if (minsat < 0 && histsat[fil]) minsat = fil;
671 if (histy[fil]) maxy = fil;
672 if (histu[fil]) maxu = fil;
673 if (histv[fil]) maxv = fil;
674 if (histsat[fil]) maxsat = fil;
676 toty += histy[fil] * fil;
677 totu += histu[fil] * fil;
678 totv += histv[fil] * fil;
679 totsat += histsat[fil] * fil;
684 accsat += histsat[fil];
686 if (lowy == -1 && accy >= lowp) lowy = fil;
687 if (lowu == -1 && accu >= clowp) lowu = fil;
688 if (lowv == -1 && accv >= clowp) lowv = fil;
689 if (lowsat == -1 && accsat >= clowp) lowsat = fil;
691 if (highy == -1 && accy >= highp) highy = fil;
692 if (highu == -1 && accu >= chighp) highu = fil;
693 if (highv == -1 && accv >= chighp) highv = fil;
694 if (highsat == -1 && accsat >= chighp) highsat = fil;
699 for (fil = 0; fil < 360; fil++) {
700 tothue += histhue[fil] * fil;
701 acchue += histhue[fil];
703 if (medhue == -1 && acchue > s->cfs / 2)
705 if (histhue[fil] > maxhue) {
706 maxhue = histhue[fil];
710 av_frame_free(&s->frame_prev);
711 s->frame_prev = av_frame_clone(in);
713 #define SET_META(key, fmt, val) do { \
714 snprintf(metabuf, sizeof(metabuf), fmt, val); \
715 av_dict_set(&out->metadata, "lavfi.signalstats." key, metabuf, 0); \
718 SET_META("YMIN", "%d", miny);
719 SET_META("YLOW", "%d", lowy);
720 SET_META("YAVG", "%g", 1.0 * toty / s->fs);
721 SET_META("YHIGH", "%d", highy);
722 SET_META("YMAX", "%d", maxy);
724 SET_META("UMIN", "%d", minu);
725 SET_META("ULOW", "%d", lowu);
726 SET_META("UAVG", "%g", 1.0 * totu / s->cfs);
727 SET_META("UHIGH", "%d", highu);
728 SET_META("UMAX", "%d", maxu);
730 SET_META("VMIN", "%d", minv);
731 SET_META("VLOW", "%d", lowv);
732 SET_META("VAVG", "%g", 1.0 * totv / s->cfs);
733 SET_META("VHIGH", "%d", highv);
734 SET_META("VMAX", "%d", maxv);
736 SET_META("SATMIN", "%d", minsat);
737 SET_META("SATLOW", "%d", lowsat);
738 SET_META("SATAVG", "%g", 1.0 * totsat / s->cfs);
739 SET_META("SATHIGH", "%d", highsat);
740 SET_META("SATMAX", "%d", maxsat);
742 SET_META("HUEMED", "%d", medhue);
743 SET_META("HUEAVG", "%g", 1.0 * tothue / s->cfs);
745 SET_META("YDIF", "%g", 1.0 * dify / s->fs);
746 SET_META("UDIF", "%g", 1.0 * difu / s->cfs);
747 SET_META("VDIF", "%g", 1.0 * difv / s->cfs);
749 SET_META("YBITDEPTH", "%d", compute_bit_depth(masky));
750 SET_META("UBITDEPTH", "%d", compute_bit_depth(masku));
751 SET_META("VBITDEPTH", "%d", compute_bit_depth(maskv));
753 for (fil = 0; fil < FILT_NUMB; fil ++) {
754 if (s->filters & 1<<fil) {
756 snprintf(metabuf, sizeof(metabuf), "%g", 1.0 * filtot[fil] / s->fs);
757 snprintf(metaname, sizeof(metaname), "lavfi.signalstats.%s", filters_def[fil].name);
758 av_dict_set(&out->metadata, metaname, metabuf, 0);
764 return ff_filter_frame(outlink, out);
767 static int filter_frame16(AVFilterLink *link, AVFrame *in)
769 AVFilterContext *ctx = link->dst;
770 SignalstatsContext *s = ctx->priv;
771 AVFilterLink *outlink = ctx->outputs[0];
774 int w = 0, cw = 0, // in
775 pw = 0, cpw = 0; // prev
778 unsigned int *histy = s->histy,
782 *histsat = s->histsat;
783 int miny = -1, minu = -1, minv = -1;
784 int maxy = -1, maxu = -1, maxv = -1;
785 int lowy = -1, lowu = -1, lowv = -1;
786 int highy = -1, highu = -1, highv = -1;
787 int minsat = -1, maxsat = -1, lowsat = -1, highsat = -1;
788 int lowp, highp, clowp, chighp;
789 int accy, accu, accv;
790 int accsat, acchue = 0;
792 int64_t toty = 0, totu = 0, totv = 0, totsat=0;
794 int64_t dify = 0, difu = 0, difv = 0;
795 uint16_t masky = 0, masku = 0, maskv = 0;
797 int filtot[FILT_NUMB] = {0};
800 AVFrame *sat = s->frame_sat;
801 AVFrame *hue = s->frame_hue;
802 const uint16_t *p_sat = (uint16_t *)sat->data[0];
803 const uint16_t *p_hue = (uint16_t *)hue->data[0];
804 const int lsz_sat = sat->linesize[0] / 2;
805 const int lsz_hue = hue->linesize[0] / 2;
806 ThreadDataHueSatMetrics td_huesat = {
813 s->frame_prev = av_frame_clone(in);
815 prev = s->frame_prev;
817 if (s->outfilter != FILTER_NONE) {
818 out = av_frame_clone(in);
819 av_frame_make_writable(out);
822 ctx->internal->execute(ctx, compute_sat_hue_metrics16, &td_huesat,
823 NULL, FFMIN(s->chromah, ff_filter_get_nb_threads(ctx)));
825 // Calculate luma histogram and difference with previous frame or field.
826 memset(s->histy, 0, (1 << s->depth) * sizeof(*s->histy));
827 for (j = 0; j < link->h; j++) {
828 for (i = 0; i < link->w; i++) {
829 const int yuv = AV_RN16(in->data[0] + w + i * 2);
833 dify += abs(yuv - AV_RN16(prev->data[0] + pw + i * 2));
835 w += in->linesize[0];
836 pw += prev->linesize[0];
839 // Calculate chroma histogram and difference with previous frame or field.
840 memset(s->histu, 0, (1 << s->depth) * sizeof(*s->histu));
841 memset(s->histv, 0, (1 << s->depth) * sizeof(*s->histv));
842 memset(s->histsat, 0, (1 << s->depth) * sizeof(*s->histsat));
843 for (j = 0; j < s->chromah; j++) {
844 for (i = 0; i < s->chromaw; i++) {
845 const int yuvu = AV_RN16(in->data[1] + cw + i * 2);
846 const int yuvv = AV_RN16(in->data[2] + cw + i * 2);
851 difu += abs(yuvu - AV_RN16(prev->data[1] + cpw + i * 2));
853 difv += abs(yuvv - AV_RN16(prev->data[2] + cpw + i * 2));
856 histhue[((int16_t*)p_hue)[i]]++;
858 cw += in->linesize[1];
859 cpw += prev->linesize[1];
864 for (fil = 0; fil < FILT_NUMB; fil ++) {
865 if (s->filters & 1<<fil) {
868 .out = out != in && s->outfilter == fil ? out : NULL,
870 memset(s->jobs_rets, 0, s->nb_jobs * sizeof(*s->jobs_rets));
871 ctx->internal->execute(ctx, filters_def[fil].process16,
872 &td, s->jobs_rets, s->nb_jobs);
873 for (i = 0; i < s->nb_jobs; i++)
874 filtot[fil] += s->jobs_rets[i];
878 // find low / high based on histogram percentile
879 // these only need to be calculated once.
881 lowp = lrint(s->fs * 10 / 100.);
882 highp = lrint(s->fs * 90 / 100.);
883 clowp = lrint(s->cfs * 10 / 100.);
884 chighp = lrint(s->cfs * 90 / 100.);
886 accy = accu = accv = accsat = 0;
887 for (fil = 0; fil < 1 << s->depth; fil++) {
888 if (miny < 0 && histy[fil]) miny = fil;
889 if (minu < 0 && histu[fil]) minu = fil;
890 if (minv < 0 && histv[fil]) minv = fil;
891 if (minsat < 0 && histsat[fil]) minsat = fil;
893 if (histy[fil]) maxy = fil;
894 if (histu[fil]) maxu = fil;
895 if (histv[fil]) maxv = fil;
896 if (histsat[fil]) maxsat = fil;
898 toty += histy[fil] * fil;
899 totu += histu[fil] * fil;
900 totv += histv[fil] * fil;
901 totsat += histsat[fil] * fil;
906 accsat += histsat[fil];
908 if (lowy == -1 && accy >= lowp) lowy = fil;
909 if (lowu == -1 && accu >= clowp) lowu = fil;
910 if (lowv == -1 && accv >= clowp) lowv = fil;
911 if (lowsat == -1 && accsat >= clowp) lowsat = fil;
913 if (highy == -1 && accy >= highp) highy = fil;
914 if (highu == -1 && accu >= chighp) highu = fil;
915 if (highv == -1 && accv >= chighp) highv = fil;
916 if (highsat == -1 && accsat >= chighp) highsat = fil;
921 for (fil = 0; fil < 360; fil++) {
922 tothue += histhue[fil] * fil;
923 acchue += histhue[fil];
925 if (medhue == -1 && acchue > s->cfs / 2)
927 if (histhue[fil] > maxhue) {
928 maxhue = histhue[fil];
932 av_frame_free(&s->frame_prev);
933 s->frame_prev = av_frame_clone(in);
935 SET_META("YMIN", "%d", miny);
936 SET_META("YLOW", "%d", lowy);
937 SET_META("YAVG", "%g", 1.0 * toty / s->fs);
938 SET_META("YHIGH", "%d", highy);
939 SET_META("YMAX", "%d", maxy);
941 SET_META("UMIN", "%d", minu);
942 SET_META("ULOW", "%d", lowu);
943 SET_META("UAVG", "%g", 1.0 * totu / s->cfs);
944 SET_META("UHIGH", "%d", highu);
945 SET_META("UMAX", "%d", maxu);
947 SET_META("VMIN", "%d", minv);
948 SET_META("VLOW", "%d", lowv);
949 SET_META("VAVG", "%g", 1.0 * totv / s->cfs);
950 SET_META("VHIGH", "%d", highv);
951 SET_META("VMAX", "%d", maxv);
953 SET_META("SATMIN", "%d", minsat);
954 SET_META("SATLOW", "%d", lowsat);
955 SET_META("SATAVG", "%g", 1.0 * totsat / s->cfs);
956 SET_META("SATHIGH", "%d", highsat);
957 SET_META("SATMAX", "%d", maxsat);
959 SET_META("HUEMED", "%d", medhue);
960 SET_META("HUEAVG", "%g", 1.0 * tothue / s->cfs);
962 SET_META("YDIF", "%g", 1.0 * dify / s->fs);
963 SET_META("UDIF", "%g", 1.0 * difu / s->cfs);
964 SET_META("VDIF", "%g", 1.0 * difv / s->cfs);
966 SET_META("YBITDEPTH", "%d", compute_bit_depth(masky));
967 SET_META("UBITDEPTH", "%d", compute_bit_depth(masku));
968 SET_META("VBITDEPTH", "%d", compute_bit_depth(maskv));
970 for (fil = 0; fil < FILT_NUMB; fil ++) {
971 if (s->filters & 1<<fil) {
973 snprintf(metabuf, sizeof(metabuf), "%g", 1.0 * filtot[fil] / s->fs);
974 snprintf(metaname, sizeof(metaname), "lavfi.signalstats.%s", filters_def[fil].name);
975 av_dict_set(&out->metadata, metaname, metabuf, 0);
981 return ff_filter_frame(outlink, out);
984 static int filter_frame(AVFilterLink *link, AVFrame *in)
986 AVFilterContext *ctx = link->dst;
987 SignalstatsContext *s = ctx->priv;
990 return filter_frame16(link, in);
992 return filter_frame8(link, in);
995 static const AVFilterPad signalstats_inputs[] = {
998 .type = AVMEDIA_TYPE_VIDEO,
999 .filter_frame = filter_frame,
1004 static const AVFilterPad signalstats_outputs[] = {
1007 .config_props = config_props,
1008 .type = AVMEDIA_TYPE_VIDEO,
1013 AVFilter ff_vf_signalstats = {
1014 .name = "signalstats",
1015 .description = "Generate statistics from video analysis.",
1018 .query_formats = query_formats,
1019 .priv_size = sizeof(SignalstatsContext),
1020 .inputs = signalstats_inputs,
1021 .outputs = signalstats_outputs,
1022 .priv_class = &signalstats_class,
1023 .flags = AVFILTER_FLAG_SLICE_THREADS,