2 * BobWeaver Deinterlacing Filter
3 * Copyright (C) 2016 Thomas Mundt <loudmax@yahoo.de>
5 * Based on YADIF (Yet Another Deinterlacing Filter)
6 * Copyright (C) 2006-2011 Michael Niedermayer <michaelni@gmx.at>
7 * 2010 James Darnley <james.darnley@gmail.com>
9 * With use of Weston 3 Field Deinterlacing Filter algorithm
10 * Copyright (C) 2012 British Broadcasting Corporation, All Rights Reserved
11 * Author of de-interlace algorithm: Jim Easterbrook for BBC R&D
12 * Based on the process described by Martin Weston for BBC R&D
14 * This file is part of FFmpeg.
16 * FFmpeg is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU Lesser General Public
18 * License as published by the Free Software Foundation; either
19 * version 2.1 of the License, or (at your option) any later version.
21 * FFmpeg is distributed in the hope that it will be useful,
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 * Lesser General Public License for more details.
26 * You should have received a copy of the GNU Lesser General Public
27 * License along with FFmpeg; if not, write to the Free Software
28 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
31 #include "libavutil/avassert.h"
32 #include "libavutil/common.h"
33 #include "libavutil/opt.h"
34 #include "libavutil/pixdesc.h"
35 #include "libavutil/imgutils.h"
42 * Filter coefficients coef_lf and coef_hf taken from BBC PH-2071 (Weston 3 Field Deinterlacer).
43 * Used when there is spatial and temporal interpolation.
44 * Filter coefficients coef_sp are used when there is spatial interpolation only.
45 * Adjusted for matching visual sharpness impression of spatial and temporal interpolation.
47 static const uint16_t coef_lf[2] = { 4309, 213 };
48 static const uint16_t coef_hf[3] = { 5570, 3801, 1016 };
49 static const uint16_t coef_sp[2] = { 5077, 981 };
52 BWDIF_MODE_SEND_FRAME = 0, ///< send 1 frame for each frame
53 BWDIF_MODE_SEND_FIELD = 1, ///< send 1 frame for each field
57 BWDIF_PARITY_TFF = 0, ///< top field first
58 BWDIF_PARITY_BFF = 1, ///< bottom field first
59 BWDIF_PARITY_AUTO = -1, ///< auto detection
63 BWDIF_DEINT_ALL = 0, ///< deinterlace all frames
64 BWDIF_DEINT_INTERLACED = 1, ///< only deinterlace frames marked as interlaced
67 typedef struct BWDIFContext {
70 int mode; ///< BWDIFMode
71 int parity; ///< BWDIFParity
72 int deint; ///< BWDIFDeint
81 void (*filter_intra)(void *dst1, void *cur1, int w, int prefs, int mrefs,
82 int prefs3, int mrefs3, int parity, int clip_max);
83 void (*filter_line)(void *dst, void *prev, void *cur, void *next,
84 int w, int prefs, int mrefs, int prefs2, int mrefs2,
85 int prefs3, int mrefs3, int prefs4, int mrefs4,
86 int parity, int clip_max);
87 void (*filter_edge)(void *dst, void *prev, void *cur, void *next,
88 int w, int prefs, int mrefs, int prefs2, int mrefs2,
89 int parity, int clip_max, int spat);
91 const AVPixFmtDescriptor *csp;
96 typedef struct ThreadData {
104 #define FILTER_INTRA() \
105 for (x = 0; x < w; x++) { \
106 interpol = (coef_sp[0] * (cur[mrefs] + cur[prefs]) - coef_sp[1] * (cur[mrefs3] + cur[prefs3])) >> 13; \
107 dst[0] = av_clip(interpol, 0, clip_max); \
114 for (x = 0; x < w; x++) { \
115 int c = cur[mrefs]; \
116 int d = (prev2[0] + next2[0]) >> 1; \
117 int e = cur[prefs]; \
118 int temporal_diff0 = FFABS(prev2[0] - next2[0]); \
119 int temporal_diff1 =(FFABS(prev[mrefs] - c) + FFABS(prev[prefs] - e)) >> 1; \
120 int temporal_diff2 =(FFABS(next[mrefs] - c) + FFABS(next[prefs] - e)) >> 1; \
121 int diff = FFMAX3(temporal_diff0 >> 1, temporal_diff1, temporal_diff2); \
127 #define SPAT_CHECK() \
128 int b = ((prev2[mrefs2] + next2[mrefs2]) >> 1) - c; \
129 int f = ((prev2[prefs2] + next2[prefs2]) >> 1) - e; \
132 int max = FFMAX3(de, dc, FFMIN(b, f)); \
133 int min = FFMIN3(de, dc, FFMAX(b, f)); \
134 diff = FFMAX3(diff, min, -max);
136 #define FILTER_LINE() \
138 if (FFABS(c - e) > temporal_diff0) { \
139 interpol = (((coef_hf[0] * (prev2[0] + next2[0]) \
140 - coef_hf[1] * (prev2[mrefs2] + next2[mrefs2] + prev2[prefs2] + next2[prefs2]) \
141 + coef_hf[2] * (prev2[mrefs4] + next2[mrefs4] + prev2[prefs4] + next2[prefs4])) >> 2) \
142 + coef_lf[0] * (c + e) - coef_lf[1] * (cur[mrefs3] + cur[prefs3])) >> 13; \
144 interpol = (coef_sp[0] * (c + e) - coef_sp[1] * (cur[mrefs3] + cur[prefs3])) >> 13; \
147 #define FILTER_EDGE() \
151 interpol = (c + e) >> 1;
154 if (interpol > d + diff) \
155 interpol = d + diff; \
156 else if (interpol < d - diff) \
157 interpol = d - diff; \
159 dst[0] = av_clip(interpol, 0, clip_max); \
170 static void filter_intra(void *dst1, void *cur1, int w, int prefs, int mrefs,
171 int prefs3, int mrefs3, int parity, int clip_max)
180 static void filter_line(void *dst1, void *prev1, void *cur1, void *next1,
181 int w, int prefs, int mrefs, int prefs2, int mrefs2,
182 int prefs3, int mrefs3, int prefs4, int mrefs4,
183 int parity, int clip_max)
186 uint8_t *prev = prev1;
188 uint8_t *next = next1;
189 uint8_t *prev2 = parity ? prev : cur ;
190 uint8_t *next2 = parity ? cur : next;
198 static void filter_edge(void *dst1, void *prev1, void *cur1, void *next1,
199 int w, int prefs, int mrefs, int prefs2, int mrefs2,
200 int parity, int clip_max, int spat)
203 uint8_t *prev = prev1;
205 uint8_t *next = next1;
206 uint8_t *prev2 = parity ? prev : cur ;
207 uint8_t *next2 = parity ? cur : next;
215 static void filter_intra_16bit(void *dst1, void *cur1, int w, int prefs, int mrefs,
216 int prefs3, int mrefs3, int parity, int clip_max)
218 uint16_t *dst = dst1;
219 uint16_t *cur = cur1;
225 static void filter_line_16bit(void *dst1, void *prev1, void *cur1, void *next1,
226 int w, int prefs, int mrefs, int prefs2, int mrefs2,
227 int prefs3, int mrefs3, int prefs4, int mrefs4,
228 int parity, int clip_max)
230 uint16_t *dst = dst1;
231 uint16_t *prev = prev1;
232 uint16_t *cur = cur1;
233 uint16_t *next = next1;
234 uint16_t *prev2 = parity ? prev : cur ;
235 uint16_t *next2 = parity ? cur : next;
243 static void filter_edge_16bit(void *dst1, void *prev1, void *cur1, void *next1,
244 int w, int prefs, int mrefs, int prefs2, int mrefs2,
245 int parity, int clip_max, int spat)
247 uint16_t *dst = dst1;
248 uint16_t *prev = prev1;
249 uint16_t *cur = cur1;
250 uint16_t *next = next1;
251 uint16_t *prev2 = parity ? prev : cur ;
252 uint16_t *next2 = parity ? cur : next;
260 static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
262 BWDIFContext *s = ctx->priv;
263 ThreadData *td = arg;
264 int linesize = s->cur->linesize[td->plane];
265 int clip_max = (1 << (s->csp->comp[td->plane].depth)) - 1;
266 int df = (s->csp->comp[td->plane].depth + 7) / 8;
267 int refs = linesize / df;
268 int slice_start = (td->h * jobnr ) / nb_jobs;
269 int slice_end = (td->h * (jobnr+1)) / nb_jobs;
272 for (y = slice_start; y < slice_end; y++) {
273 if ((y ^ td->parity) & 1) {
274 uint8_t *prev = &s->prev->data[td->plane][y * linesize];
275 uint8_t *cur = &s->cur ->data[td->plane][y * linesize];
276 uint8_t *next = &s->next->data[td->plane][y * linesize];
277 uint8_t *dst = &td->frame->data[td->plane][y * td->frame->linesize[td->plane]];
278 if (!s->inter_field) {
279 s->filter_intra(dst, cur, td->w, (y + df) < td->h ? refs : -refs,
280 y > (df - 1) ? -refs : refs,
281 (y + 3*df) < td->h ? 3 * refs : -refs,
282 y > (3*df - 1) ? -3 * refs : refs,
283 td->parity ^ td->tff, clip_max);
284 } else if ((y < 4) || ((y + 5) > td->h)) {
285 s->filter_edge(dst, prev, cur, next, td->w,
286 (y + df) < td->h ? refs : -refs,
287 y > (df - 1) ? -refs : refs,
288 refs << 1, -(refs << 1),
289 td->parity ^ td->tff, clip_max,
290 (y < 2) || ((y + 3) > td->h) ? 0 : 1);
292 s->filter_line(dst, prev, cur, next, td->w,
293 refs, -refs, refs << 1, -(refs << 1),
294 3 * refs, -3 * refs, refs << 2, -(refs << 2),
295 td->parity ^ td->tff, clip_max);
298 memcpy(&td->frame->data[td->plane][y * td->frame->linesize[td->plane]],
299 &s->cur->data[td->plane][y * linesize], td->w * df);
305 static void filter(AVFilterContext *ctx, AVFrame *dstpic,
308 BWDIFContext *bwdif = ctx->priv;
309 ThreadData td = { .frame = dstpic, .parity = parity, .tff = tff };
312 for (i = 0; i < bwdif->csp->nb_components; i++) {
313 int w = dstpic->width;
314 int h = dstpic->height;
316 if (i == 1 || i == 2) {
317 w = AV_CEIL_RSHIFT(w, bwdif->csp->log2_chroma_w);
318 h = AV_CEIL_RSHIFT(h, bwdif->csp->log2_chroma_h);
325 ctx->internal->execute(ctx, filter_slice, &td, NULL, FFMIN(h, ctx->graph->nb_threads));
327 if (!bwdif->inter_field) {
328 bwdif->inter_field = 1;
334 static int return_frame(AVFilterContext *ctx, int is_second)
336 BWDIFContext *bwdif = ctx->priv;
337 AVFilterLink *link = ctx->outputs[0];
340 if (bwdif->parity == -1) {
341 tff = bwdif->cur->interlaced_frame ?
342 bwdif->cur->top_field_first : 1;
344 tff = bwdif->parity ^ 1;
348 bwdif->out = ff_get_video_buffer(link, link->w, link->h);
350 return AVERROR(ENOMEM);
352 av_frame_copy_props(bwdif->out, bwdif->cur);
353 bwdif->out->interlaced_frame = 0;
354 if (bwdif->inter_field < 0)
355 bwdif->inter_field = 0;
358 filter(ctx, bwdif->out, tff ^ !is_second, tff);
361 int64_t cur_pts = bwdif->cur->pts;
362 int64_t next_pts = bwdif->next->pts;
364 if (next_pts != AV_NOPTS_VALUE && cur_pts != AV_NOPTS_VALUE) {
365 bwdif->out->pts = cur_pts + next_pts;
367 bwdif->out->pts = AV_NOPTS_VALUE;
370 ret = ff_filter_frame(ctx->outputs[0], bwdif->out);
372 bwdif->frame_pending = (bwdif->mode&1) && !is_second;
376 static int checkstride(BWDIFContext *bwdif, const AVFrame *a, const AVFrame *b)
379 for (i = 0; i < bwdif->csp->nb_components; i++)
380 if (a->linesize[i] != b->linesize[i])
385 static void fixstride(AVFilterLink *link, AVFrame *f)
387 AVFrame *dst = ff_default_get_video_buffer(link, f->width, f->height);
390 av_frame_copy_props(dst, f);
391 av_image_copy(dst->data, dst->linesize,
392 (const uint8_t **)f->data, f->linesize,
393 dst->format, dst->width, dst->height);
395 av_frame_move_ref(f, dst);
399 static int filter_frame(AVFilterLink *link, AVFrame *frame)
401 AVFilterContext *ctx = link->dst;
402 BWDIFContext *bwdif = ctx->priv;
406 if (bwdif->frame_pending)
407 return_frame(ctx, 1);
410 av_frame_free(&bwdif->prev);
411 bwdif->prev = bwdif->cur;
412 bwdif->cur = bwdif->next;
416 bwdif->cur = av_frame_clone(bwdif->next);
418 return AVERROR(ENOMEM);
419 bwdif->inter_field = 0;
422 if (checkstride(bwdif, bwdif->next, bwdif->cur)) {
423 av_log(ctx, AV_LOG_VERBOSE, "Reallocating frame due to differing stride\n");
424 fixstride(link, bwdif->next);
426 if (checkstride(bwdif, bwdif->next, bwdif->cur))
427 fixstride(link, bwdif->cur);
428 if (bwdif->prev && checkstride(bwdif, bwdif->next, bwdif->prev))
429 fixstride(link, bwdif->prev);
430 if (checkstride(bwdif, bwdif->next, bwdif->cur) || (bwdif->prev && checkstride(bwdif, bwdif->next, bwdif->prev))) {
431 av_log(ctx, AV_LOG_ERROR, "Failed to reallocate frame\n");
438 if ((bwdif->deint && !bwdif->cur->interlaced_frame) ||
440 (bwdif->deint && !bwdif->prev->interlaced_frame && bwdif->prev->repeat_pict) ||
441 (bwdif->deint && !bwdif->next->interlaced_frame && bwdif->next->repeat_pict)
443 bwdif->out = av_frame_clone(bwdif->cur);
445 return AVERROR(ENOMEM);
447 av_frame_free(&bwdif->prev);
448 if (bwdif->out->pts != AV_NOPTS_VALUE)
449 bwdif->out->pts *= 2;
450 return ff_filter_frame(ctx->outputs[0], bwdif->out);
453 bwdif->out = ff_get_video_buffer(ctx->outputs[0], link->w, link->h);
455 return AVERROR(ENOMEM);
457 av_frame_copy_props(bwdif->out, bwdif->cur);
458 bwdif->out->interlaced_frame = 0;
460 if (bwdif->out->pts != AV_NOPTS_VALUE)
461 bwdif->out->pts *= 2;
463 return return_frame(ctx, 0);
466 static int request_frame(AVFilterLink *link)
468 AVFilterContext *ctx = link->src;
469 BWDIFContext *bwdif = ctx->priv;
472 if (bwdif->frame_pending) {
473 return_frame(ctx, 1);
480 ret = ff_request_frame(link->src->inputs[0]);
482 if (ret == AVERROR_EOF && bwdif->cur) {
483 AVFrame *next = av_frame_clone(bwdif->next);
486 return AVERROR(ENOMEM);
488 bwdif->inter_field = -1;
489 next->pts = bwdif->next->pts * 2 - bwdif->cur->pts;
491 filter_frame(link->src->inputs[0], next);
493 } else if (ret < 0) {
500 static av_cold void uninit(AVFilterContext *ctx)
502 BWDIFContext *bwdif = ctx->priv;
504 av_frame_free(&bwdif->prev);
505 av_frame_free(&bwdif->cur );
506 av_frame_free(&bwdif->next);
509 static int query_formats(AVFilterContext *ctx)
511 static const enum AVPixelFormat pix_fmts[] = {
512 AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV420P,
513 AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV444P,
514 AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUVJ420P,
515 AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ444P,
516 AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9,
517 AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
518 AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12,
519 AV_PIX_FMT_YUV420P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV444P14,
520 AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16,
521 AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P,
522 AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA444P9,
523 AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA444P10,
524 AV_PIX_FMT_YUVA420P16, AV_PIX_FMT_YUVA422P16, AV_PIX_FMT_YUVA444P16,
525 AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10,
526 AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14, AV_PIX_FMT_GBRP16,
527 AV_PIX_FMT_GBRAP, AV_PIX_FMT_GBRAP16,
528 AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY16,
532 AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
534 return AVERROR(ENOMEM);
536 return ff_set_common_formats(ctx, fmts_list);
539 static int config_props(AVFilterLink *link)
541 AVFilterContext *ctx = link->src;
542 BWDIFContext *s = link->src->priv;
544 link->time_base.num = link->src->inputs[0]->time_base.num;
545 link->time_base.den = link->src->inputs[0]->time_base.den * 2;
546 link->w = link->src->inputs[0]->w;
547 link->h = link->src->inputs[0]->h;
550 link->frame_rate = av_mul_q(link->src->inputs[0]->frame_rate, (AVRational){2,1});
552 if (link->w < 3 || link->h < 3) {
553 av_log(ctx, AV_LOG_ERROR, "Video of less than 3 columns or lines is not supported\n");
554 return AVERROR(EINVAL);
557 s->csp = av_pix_fmt_desc_get(link->format);
558 if (s->csp->comp[0].depth > 8) {
559 s->filter_intra = filter_intra_16bit;
560 s->filter_line = filter_line_16bit;
561 s->filter_edge = filter_edge_16bit;
563 s->filter_intra = filter_intra;
564 s->filter_line = filter_line;
565 s->filter_edge = filter_edge;
572 #define OFFSET(x) offsetof(BWDIFContext, x)
573 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
575 #define CONST(name, help, val, unit) { name, help, 0, AV_OPT_TYPE_CONST, {.i64=val}, INT_MIN, INT_MAX, FLAGS, unit }
577 static const AVOption bwdif_options[] = {
578 { "mode", "specify the interlacing mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=BWDIF_MODE_SEND_FIELD}, 0, 1, FLAGS, "mode"},
579 CONST("send_frame", "send one frame for each frame", BWDIF_MODE_SEND_FRAME, "mode"),
580 CONST("send_field", "send one frame for each field", BWDIF_MODE_SEND_FIELD, "mode"),
582 { "parity", "specify the assumed picture field parity", OFFSET(parity), AV_OPT_TYPE_INT, {.i64=BWDIF_PARITY_AUTO}, -1, 1, FLAGS, "parity" },
583 CONST("tff", "assume top field first", BWDIF_PARITY_TFF, "parity"),
584 CONST("bff", "assume bottom field first", BWDIF_PARITY_BFF, "parity"),
585 CONST("auto", "auto detect parity", BWDIF_PARITY_AUTO, "parity"),
587 { "deint", "specify which frames to deinterlace", OFFSET(deint), AV_OPT_TYPE_INT, {.i64=BWDIF_DEINT_INTERLACED}, 0, 1, FLAGS, "deint" },
588 CONST("all", "deinterlace all frames", BWDIF_DEINT_ALL, "deint"),
589 CONST("interlaced", "only deinterlace frames marked as interlaced", BWDIF_DEINT_INTERLACED, "deint"),
594 AVFILTER_DEFINE_CLASS(bwdif);
596 static const AVFilterPad avfilter_vf_bwdif_inputs[] = {
599 .type = AVMEDIA_TYPE_VIDEO,
600 .filter_frame = filter_frame,
605 static const AVFilterPad avfilter_vf_bwdif_outputs[] = {
608 .type = AVMEDIA_TYPE_VIDEO,
609 .request_frame = request_frame,
610 .config_props = config_props,
615 AVFilter ff_vf_bwdif = {
617 .description = NULL_IF_CONFIG_SMALL("Deinterlace the input image."),
618 .priv_size = sizeof(BWDIFContext),
619 .priv_class = &bwdif_class,
621 .query_formats = query_formats,
622 .inputs = avfilter_vf_bwdif_inputs,
623 .outputs = avfilter_vf_bwdif_outputs,
624 .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL | AVFILTER_FLAG_SLICE_THREADS,