2 * Copyright (c) 2012 Fredrik Mellbin
3 * Copyright (c) 2013 Clément Bœsch
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include "libavutil/opt.h"
23 #include "libavutil/pixdesc.h"
24 #include "libavutil/timestamp.h"
30 #define INPUT_CLEANSRC 1
38 typedef struct DecimateContext {
40 struct qitem *queue; ///< window of cycle frames and the associated data diff
41 int fid; ///< current frame id in the queue
42 int filled; ///< 1 if the queue is filled, 0 otherwise
43 AVFrame *last; ///< last frame from the previous queue
44 AVFrame **clean_src; ///< frame queue for the clean source
45 int got_frame[2]; ///< frame request flag for each input stream
46 AVRational ts_unit; ///< timestamp units for the output frames
47 int64_t last_pts; ///< last output timestamp
48 int64_t start_pts; ///< base for output timestamps
49 uint32_t eof; ///< bitmask for end of stream
50 int hsub, vsub; ///< chroma subsampling values
52 int nxblocks, nyblocks;
67 #define OFFSET(x) offsetof(DecimateContext, x)
68 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
70 static const AVOption decimate_options[] = {
71 { "cycle", "set the number of frame from which one will be dropped", OFFSET(cycle), AV_OPT_TYPE_INT, {.i64 = 5}, 2, 25, FLAGS },
72 { "dupthresh", "set duplicate threshold", OFFSET(dupthresh_flt), AV_OPT_TYPE_DOUBLE, {.dbl = 1.1}, 0, 100, FLAGS },
73 { "scthresh", "set scene change threshold", OFFSET(scthresh_flt), AV_OPT_TYPE_DOUBLE, {.dbl = 15.0}, 0, 100, FLAGS },
74 { "blockx", "set the size of the x-axis blocks used during metric calculations", OFFSET(blockx), AV_OPT_TYPE_INT, {.i64 = 32}, 4, 1<<9, FLAGS },
75 { "blocky", "set the size of the y-axis blocks used during metric calculations", OFFSET(blocky), AV_OPT_TYPE_INT, {.i64 = 32}, 4, 1<<9, FLAGS },
76 { "ppsrc", "mark main input as a pre-processed input and activate clean source input stream", OFFSET(ppsrc), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
77 { "chroma", "set whether or not chroma is considered in the metric calculations", OFFSET(chroma), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, FLAGS },
81 AVFILTER_DEFINE_CLASS(decimate);
83 static void calc_diffs(const DecimateContext *dm, struct qitem *q,
84 const AVFrame *f1, const AVFrame *f2)
87 int64_t *bdiffs = dm->bdiffs;
90 memset(bdiffs, 0, dm->bdiffsize * sizeof(*bdiffs));
92 for (plane = 0; plane < (dm->chroma && f1->data[2] ? 3 : 1); plane++) {
94 const int linesize1 = f1->linesize[plane];
95 const int linesize2 = f2->linesize[plane];
96 const uint8_t *f1p = f1->data[plane];
97 const uint8_t *f2p = f2->data[plane];
98 int width = plane ? AV_CEIL_RSHIFT(f1->width, dm->hsub) : f1->width;
99 int height = plane ? AV_CEIL_RSHIFT(f1->height, dm->vsub) : f1->height;
100 int hblockx = dm->blockx / 2;
101 int hblocky = dm->blocky / 2;
104 hblockx >>= dm->hsub;
105 hblocky >>= dm->vsub;
108 for (y = 0; y < height; y++) {
109 int ydest = y / hblocky;
112 #define CALC_DIFF(nbits) do { \
113 for (x = 0; x < width; x += hblockx) { \
115 int m = FFMIN(width, x + hblockx); \
116 for (xl = x; xl < m; xl++) \
117 acc += abs(((const uint##nbits##_t *)f1p)[xl] - \
118 ((const uint##nbits##_t *)f2p)[xl]); \
119 bdiffs[ydest * dm->nxblocks + xdest] += acc; \
123 if (dm->depth == 8) CALC_DIFF(8);
131 for (i = 0; i < dm->nyblocks - 1; i++) {
132 for (j = 0; j < dm->nxblocks - 1; j++) {
133 int64_t tmp = bdiffs[ i * dm->nxblocks + j ]
134 + bdiffs[ i * dm->nxblocks + j + 1]
135 + bdiffs[(i + 1) * dm->nxblocks + j ]
136 + bdiffs[(i + 1) * dm->nxblocks + j + 1];
143 for (i = 0; i < dm->bdiffsize; i++)
144 q->totdiff += bdiffs[i];
145 q->maxbdiff = maxdiff;
148 static int filter_frame(AVFilterLink *inlink, AVFrame *in)
150 int scpos = -1, duppos = -1;
151 int drop = INT_MIN, i, lowest = 0, ret;
152 AVFilterContext *ctx = inlink->dst;
153 AVFilterLink *outlink = ctx->outputs[0];
154 DecimateContext *dm = ctx->priv;
157 /* update frames queue(s) */
158 if (FF_INLINK_IDX(inlink) == INPUT_MAIN) {
159 dm->queue[dm->fid].frame = in;
160 dm->got_frame[INPUT_MAIN] = 1;
162 dm->clean_src[dm->fid] = in;
163 dm->got_frame[INPUT_CLEANSRC] = 1;
165 if (!dm->got_frame[INPUT_MAIN] || (dm->ppsrc && !dm->got_frame[INPUT_CLEANSRC]))
167 dm->got_frame[INPUT_MAIN] = dm->got_frame[INPUT_CLEANSRC] = 0;
170 in = dm->queue[dm->fid].frame;
173 /* update frame metrics */
174 prv = dm->fid ? dm->queue[dm->fid - 1].frame : dm->last;
176 dm->queue[dm->fid].maxbdiff = INT64_MAX;
177 dm->queue[dm->fid].totdiff = INT64_MAX;
179 calc_diffs(dm, &dm->queue[dm->fid], prv, in);
181 if (++dm->fid != dm->cycle)
183 av_frame_free(&dm->last);
184 dm->last = av_frame_clone(in);
187 /* we have a complete cycle, select the frame to drop */
189 for (i = 0; i < dm->cycle; i++) {
190 if (dm->queue[i].totdiff > dm->scthresh)
192 if (dm->queue[i].maxbdiff < dm->queue[lowest].maxbdiff)
195 if (dm->queue[lowest].maxbdiff < dm->dupthresh)
197 drop = scpos >= 0 && duppos < 0 ? scpos : lowest;
201 if (av_log_get_level() >= AV_LOG_DEBUG) {
202 av_log(ctx, AV_LOG_DEBUG, "1/%d frame drop:\n", dm->cycle);
203 for (i = 0; i < dm->cycle && dm->queue[i].frame; i++) {
204 av_log(ctx, AV_LOG_DEBUG," #%d: totdiff=%08"PRIx64" maxbdiff=%08"PRIx64"%s%s%s%s\n",
205 i + 1, dm->queue[i].totdiff, dm->queue[i].maxbdiff,
206 i == scpos ? " sc" : "",
207 i == duppos ? " dup" : "",
208 i == lowest ? " lowest" : "",
209 i == drop ? " [DROP]" : "");
213 /* push all frames except the drop */
215 for (i = 0; i < dm->cycle && dm->queue[i].frame; i++) {
218 av_frame_free(&dm->clean_src[i]);
219 av_frame_free(&dm->queue[i].frame);
221 AVFrame *frame = dm->queue[i].frame;
222 dm->queue[i].frame = NULL;
223 if (frame->pts != AV_NOPTS_VALUE && dm->start_pts == AV_NOPTS_VALUE)
224 dm->start_pts = frame->pts;
226 av_frame_free(&frame);
227 frame = dm->clean_src[i];
230 dm->clean_src[i] = NULL;
232 frame->pts = av_rescale_q(outlink->frame_count_in, dm->ts_unit, (AVRational){1,1}) +
233 (dm->start_pts == AV_NOPTS_VALUE ? 0 : dm->start_pts);
234 dm->last_pts = frame->pts;
235 ret = ff_filter_frame(outlink, frame);
244 static int activate(AVFilterContext *ctx)
246 DecimateContext *dm = ctx->priv;
247 AVFrame *frame = NULL;
251 FF_FILTER_FORWARD_STATUS_BACK_ALL(ctx->outputs[0], ctx);
253 if ((dm->got_frame[INPUT_MAIN] == 0) && !(dm->eof & (1 << INPUT_MAIN)) &&
254 (ret = ff_inlink_consume_frame(ctx->inputs[INPUT_MAIN], &frame)) > 0) {
255 ret = filter_frame(ctx->inputs[INPUT_MAIN], frame);
262 (dm->got_frame[INPUT_CLEANSRC] == 0) && !(dm->eof & (1 << INPUT_CLEANSRC)) &&
263 (ret = ff_inlink_consume_frame(ctx->inputs[INPUT_CLEANSRC], &frame)) > 0) {
264 ret = filter_frame(ctx->inputs[INPUT_CLEANSRC], frame);
270 } else if (dm->eof == ((1 << INPUT_MAIN) | (dm->ppsrc << INPUT_CLEANSRC))) {
271 ff_outlink_set_status(ctx->outputs[0], AVERROR_EOF, dm->last_pts);
273 } else if (!(dm->eof & (1 << INPUT_MAIN)) && ff_inlink_acknowledge_status(ctx->inputs[INPUT_MAIN], &status, &pts)) {
274 if (status == AVERROR_EOF) { // flushing
275 dm->eof |= 1 << INPUT_MAIN;
277 filter_frame(ctx->inputs[INPUT_CLEANSRC], NULL);
278 filter_frame(ctx->inputs[INPUT_MAIN], NULL);
279 ff_outlink_set_status(ctx->outputs[0], AVERROR_EOF, dm->last_pts);
282 } else if (dm->ppsrc && !(dm->eof & (1 << INPUT_CLEANSRC)) && ff_inlink_acknowledge_status(ctx->inputs[INPUT_CLEANSRC], &status, &pts)) {
283 if (status == AVERROR_EOF) { // flushing
284 dm->eof |= 1 << INPUT_CLEANSRC;
285 filter_frame(ctx->inputs[INPUT_MAIN], NULL);
286 filter_frame(ctx->inputs[INPUT_CLEANSRC], NULL);
287 ff_outlink_set_status(ctx->outputs[0], AVERROR_EOF, dm->last_pts);
292 if (ff_inlink_queued_frames(ctx->inputs[INPUT_MAIN]) > 0 &&
293 (dm->ppsrc && ff_inlink_queued_frames(ctx->inputs[INPUT_CLEANSRC]) > 0)) {
294 ff_filter_set_ready(ctx, 100);
295 } else if (ff_outlink_frame_wanted(ctx->outputs[0])) {
296 if (dm->got_frame[INPUT_MAIN] == 0)
297 ff_inlink_request_frame(ctx->inputs[INPUT_MAIN]);
298 if (dm->ppsrc && (dm->got_frame[INPUT_CLEANSRC] == 0))
299 ff_inlink_request_frame(ctx->inputs[INPUT_CLEANSRC]);
304 static av_cold int decimate_init(AVFilterContext *ctx)
306 DecimateContext *dm = ctx->priv;
309 .type = AVMEDIA_TYPE_VIDEO,
313 if ((ret = ff_insert_inpad(ctx, INPUT_MAIN, &pad)) < 0)
317 pad.name = "clean_src";
318 pad.config_props = NULL;
319 if ((ret = ff_insert_inpad(ctx, INPUT_CLEANSRC, &pad)) < 0)
323 if ((dm->blockx & (dm->blockx - 1)) ||
324 (dm->blocky & (dm->blocky - 1))) {
325 av_log(ctx, AV_LOG_ERROR, "blockx and blocky settings must be power of two\n");
326 return AVERROR(EINVAL);
329 dm->start_pts = AV_NOPTS_VALUE;
334 static av_cold void decimate_uninit(AVFilterContext *ctx)
337 DecimateContext *dm = ctx->priv;
339 av_frame_free(&dm->last);
340 av_freep(&dm->bdiffs);
342 for (i = 0; i < dm->cycle; i++)
343 av_frame_free(&dm->queue[i].frame);
345 av_freep(&dm->queue);
347 for (i = 0; i < dm->cycle; i++)
348 av_frame_free(&dm->clean_src[i]);
350 av_freep(&dm->clean_src);
353 static int query_formats(AVFilterContext *ctx)
355 static const enum AVPixelFormat pix_fmts[] = {
356 #define PF_NOALPHA(suf) AV_PIX_FMT_YUV420##suf, AV_PIX_FMT_YUV422##suf, AV_PIX_FMT_YUV444##suf
357 #define PF_ALPHA(suf) AV_PIX_FMT_YUVA420##suf, AV_PIX_FMT_YUVA422##suf, AV_PIX_FMT_YUVA444##suf
358 #define PF(suf) PF_NOALPHA(suf), PF_ALPHA(suf)
359 PF(P), PF(P9), PF(P10), PF_NOALPHA(P12), PF_NOALPHA(P14), PF(P16),
360 AV_PIX_FMT_YUV440P10, AV_PIX_FMT_YUV440P12,
361 AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
362 AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P,
364 AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY9, AV_PIX_FMT_GRAY10, AV_PIX_FMT_GRAY12, AV_PIX_FMT_GRAY14,
368 AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
370 return AVERROR(ENOMEM);
371 return ff_set_common_formats(ctx, fmts_list);
374 static int config_output(AVFilterLink *outlink)
376 AVFilterContext *ctx = outlink->src;
377 DecimateContext *dm = ctx->priv;
378 const AVFilterLink *inlink = ctx->inputs[INPUT_MAIN];
379 AVRational fps = inlink->frame_rate;
381 const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
382 const int w = inlink->w;
383 const int h = inlink->h;
385 dm->hsub = pix_desc->log2_chroma_w;
386 dm->vsub = pix_desc->log2_chroma_h;
387 dm->depth = pix_desc->comp[0].depth;
388 max_value = (1 << dm->depth) - 1;
389 dm->scthresh = (int64_t)(((int64_t)max_value * w * h * dm->scthresh_flt) / 100);
390 dm->dupthresh = (int64_t)(((int64_t)max_value * dm->blockx * dm->blocky * dm->dupthresh_flt) / 100);
391 dm->nxblocks = (w + dm->blockx/2 - 1) / (dm->blockx/2);
392 dm->nyblocks = (h + dm->blocky/2 - 1) / (dm->blocky/2);
393 dm->bdiffsize = dm->nxblocks * dm->nyblocks;
394 dm->bdiffs = av_malloc_array(dm->bdiffsize, sizeof(*dm->bdiffs));
395 dm->queue = av_calloc(dm->cycle, sizeof(*dm->queue));
397 if (!dm->bdiffs || !dm->queue)
398 return AVERROR(ENOMEM);
401 dm->clean_src = av_calloc(dm->cycle, sizeof(*dm->clean_src));
403 return AVERROR(ENOMEM);
406 if (!fps.num || !fps.den) {
407 av_log(ctx, AV_LOG_ERROR, "The input needs a constant frame rate; "
408 "current rate of %d/%d is invalid\n", fps.num, fps.den);
409 return AVERROR(EINVAL);
411 fps = av_mul_q(fps, (AVRational){dm->cycle - 1, dm->cycle});
412 av_log(ctx, AV_LOG_VERBOSE, "FPS: %d/%d -> %d/%d\n",
413 inlink->frame_rate.num, inlink->frame_rate.den, fps.num, fps.den);
414 outlink->time_base = inlink->time_base;
415 outlink->frame_rate = fps;
416 outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
418 outlink->w = ctx->inputs[INPUT_CLEANSRC]->w;
419 outlink->h = ctx->inputs[INPUT_CLEANSRC]->h;
421 outlink->w = inlink->w;
422 outlink->h = inlink->h;
424 dm->ts_unit = av_inv_q(av_mul_q(fps, outlink->time_base));
428 static const AVFilterPad decimate_outputs[] = {
431 .type = AVMEDIA_TYPE_VIDEO,
432 .config_props = config_output,
437 AVFilter ff_vf_decimate = {
439 .description = NULL_IF_CONFIG_SMALL("Decimate frames (post field matching filter)."),
440 .init = decimate_init,
441 .activate = activate,
442 .uninit = decimate_uninit,
443 .priv_size = sizeof(DecimateContext),
444 .query_formats = query_formats,
445 .outputs = decimate_outputs,
446 .priv_class = &decimate_class,
447 .flags = AVFILTER_FLAG_DYNAMIC_INPUTS,