2 * Copyright (C) 2006-2010 Michael Niedermayer <michaelni@gmx.at>
3 * 2010 James Darnley <james.darnley@gmail.com>
5 * This file is part of Libav.
7 * Libav is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License along
18 * with Libav; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
22 #include "libavutil/cpu.h"
23 #include "libavutil/common.h"
24 #include "libavutil/opt.h"
25 #include "libavutil/pixdesc.h"
36 { int score = FFABS(cur[mrefs - 1 + (j)] - cur[prefs - 1 - (j)])\
37 + FFABS(cur[mrefs +(j)] - cur[prefs -(j)])\
38 + FFABS(cur[mrefs + 1 + (j)] - cur[prefs + 1 - (j)]);\
39 if (score < spatial_score) {\
40 spatial_score= score;\
41 spatial_pred= (cur[mrefs +(j)] + cur[prefs -(j)])>>1;\
43 /* The is_not_edge argument here controls when the code will enter a branch
44 * which reads up to and including x-3 and x+3. */
46 #define FILTER(start, end, is_not_edge) \
47 for (x = start; x < end; x++) { \
49 int d = (prev2[0] + next2[0])>>1; \
51 int temporal_diff0 = FFABS(prev2[0] - next2[0]); \
52 int temporal_diff1 =(FFABS(prev[mrefs] - c) + FFABS(prev[prefs] - e) )>>1; \
53 int temporal_diff2 =(FFABS(next[mrefs] - c) + FFABS(next[prefs] - e) )>>1; \
54 int diff = FFMAX3(temporal_diff0 >> 1, temporal_diff1, temporal_diff2); \
55 int spatial_pred = (c+e) >> 1; \
58 int spatial_score = FFABS(cur[mrefs - 1] - cur[prefs - 1]) + FFABS(c-e) \
59 + FFABS(cur[mrefs + 1] - cur[prefs + 1]) - 1; \
60 CHECK(-1) CHECK(-2) }} }} \
61 CHECK( 1) CHECK( 2) }} }} \
65 int b = (prev2[2 * mrefs] + next2[2 * mrefs])>>1; \
66 int f = (prev2[2 * prefs] + next2[2 * prefs])>>1; \
67 int max = FFMAX3(d - e, d - c, FFMIN(b - c, f - e)); \
68 int min = FFMIN3(d - e, d - c, FFMAX(b - c, f - e)); \
70 diff = FFMAX3(diff, min, -max); \
73 if (spatial_pred > d + diff) \
74 spatial_pred = d + diff; \
75 else if (spatial_pred < d - diff) \
76 spatial_pred = d - diff; \
78 dst[0] = spatial_pred; \
88 static void filter_line_c(void *dst1,
89 void *prev1, void *cur1, void *next1,
90 int w, int prefs, int mrefs, int parity, int mode)
93 uint8_t *prev = prev1;
95 uint8_t *next = next1;
97 uint8_t *prev2 = parity ? prev : cur ;
98 uint8_t *next2 = parity ? cur : next;
100 /* The function is called with the pointers already pointing to data[3] and
101 * with 6 subtracted from the width. This allows the FILTER macro to be
102 * called so that it processes all the pixels normally. A constant value of
103 * true for is_not_edge lets the compiler ignore the if statement. */
107 static void filter_edges(void *dst1, void *prev1, void *cur1, void *next1,
108 int w, int prefs, int mrefs, int parity, int mode)
111 uint8_t *prev = prev1;
113 uint8_t *next = next1;
115 uint8_t *prev2 = parity ? prev : cur ;
116 uint8_t *next2 = parity ? cur : next;
118 /* Only edge pixels need to be processed here. A constant value of false
119 * for is_not_edge should let the compiler ignore the whole branch. */
122 dst = (uint8_t*)dst1 + w - 3;
123 prev = (uint8_t*)prev1 + w - 3;
124 cur = (uint8_t*)cur1 + w - 3;
125 next = (uint8_t*)next1 + w - 3;
126 prev2 = (uint8_t*)(parity ? prev : cur);
127 next2 = (uint8_t*)(parity ? cur : next);
133 static void filter_line_c_16bit(void *dst1,
134 void *prev1, void *cur1, void *next1,
135 int w, int prefs, int mrefs, int parity,
138 uint16_t *dst = dst1;
139 uint16_t *prev = prev1;
140 uint16_t *cur = cur1;
141 uint16_t *next = next1;
143 uint16_t *prev2 = parity ? prev : cur ;
144 uint16_t *next2 = parity ? cur : next;
151 static void filter_edges_16bit(void *dst1, void *prev1, void *cur1, void *next1,
152 int w, int prefs, int mrefs, int parity, int mode)
154 uint16_t *dst = dst1;
155 uint16_t *prev = prev1;
156 uint16_t *cur = cur1;
157 uint16_t *next = next1;
159 uint16_t *prev2 = parity ? prev : cur ;
160 uint16_t *next2 = parity ? cur : next;
164 dst = (uint16_t*)dst1 + w - 3;
165 prev = (uint16_t*)prev1 + w - 3;
166 cur = (uint16_t*)cur1 + w - 3;
167 next = (uint16_t*)next1 + w - 3;
168 prev2 = (uint16_t*)(parity ? prev : cur);
169 next2 = (uint16_t*)(parity ? cur : next);
174 static void filter(AVFilterContext *ctx, AVFrame *dstpic,
177 YADIFContext *yadif = ctx->priv;
180 for (i = 0; i < yadif->csp->nb_components; i++) {
181 int w = dstpic->width;
182 int h = dstpic->height;
183 int refs = yadif->cur->linesize[i];
184 int df = (yadif->csp->comp[i].depth_minus1 + 8) / 8;
187 if (i == 1 || i == 2) {
188 /* Why is this not part of the per-plane description thing? */
189 w >>= yadif->csp->log2_chroma_w;
190 h >>= yadif->csp->log2_chroma_h;
193 /* filtering reads 3 pixels to the left/right; to avoid invalid reads,
194 * we need to call the c variant which avoids this for border pixels
197 for (y = 0; y < h; y++) {
198 if ((y ^ parity) & 1) {
199 uint8_t *prev = &yadif->prev->data[i][y * refs];
200 uint8_t *cur = &yadif->cur ->data[i][y * refs];
201 uint8_t *next = &yadif->next->data[i][y * refs];
202 uint8_t *dst = &dstpic->data[i][y * dstpic->linesize[i]];
203 int mode = y == 1 || y + 2 == h ? 2 : yadif->mode;
204 yadif->filter_line(dst + pix_3, prev + pix_3, cur + pix_3,
206 y + 1 < h ? refs : -refs,
209 yadif->filter_edges(dst, prev, cur, next, w,
210 y + 1 < h ? refs : -refs,
214 memcpy(&dstpic->data[i][y * dstpic->linesize[i]],
215 &yadif->cur->data[i][y * refs], w * df);
223 static AVFrame *get_video_buffer(AVFilterLink *link, int w, int h)
226 int width = FFALIGN(w, 32);
227 int height = FFALIGN(h + 2, 32);
230 frame = ff_default_get_video_buffer(link, width, height);
235 for (i = 0; i < 3; i++)
236 frame->data[i] += frame->linesize[i];
241 static int return_frame(AVFilterContext *ctx, int is_second)
243 YADIFContext *yadif = ctx->priv;
244 AVFilterLink *link = ctx->outputs[0];
247 if (yadif->parity == -1) {
248 tff = yadif->cur->interlaced_frame ?
249 yadif->cur->top_field_first : 1;
251 tff = yadif->parity ^ 1;
255 yadif->out = ff_get_video_buffer(link, link->w, link->h);
257 return AVERROR(ENOMEM);
259 av_frame_copy_props(yadif->out, yadif->cur);
260 yadif->out->interlaced_frame = 0;
263 filter(ctx, yadif->out, tff ^ !is_second, tff);
266 int64_t cur_pts = yadif->cur->pts;
267 int64_t next_pts = yadif->next->pts;
269 if (next_pts != AV_NOPTS_VALUE && cur_pts != AV_NOPTS_VALUE) {
270 yadif->out->pts = cur_pts + next_pts;
272 yadif->out->pts = AV_NOPTS_VALUE;
275 ret = ff_filter_frame(ctx->outputs[0], yadif->out);
277 yadif->frame_pending = (yadif->mode&1) && !is_second;
281 static int filter_frame(AVFilterLink *link, AVFrame *frame)
283 AVFilterContext *ctx = link->dst;
284 YADIFContext *yadif = ctx->priv;
286 if (yadif->frame_pending)
287 return_frame(ctx, 1);
290 av_frame_free(&yadif->prev);
291 yadif->prev = yadif->cur;
292 yadif->cur = yadif->next;
298 if (yadif->auto_enable && !yadif->cur->interlaced_frame) {
299 yadif->out = av_frame_clone(yadif->cur);
301 return AVERROR(ENOMEM);
303 av_frame_free(&yadif->prev);
304 if (yadif->out->pts != AV_NOPTS_VALUE)
305 yadif->out->pts *= 2;
306 return ff_filter_frame(ctx->outputs[0], yadif->out);
310 !(yadif->prev = av_frame_clone(yadif->cur)))
311 return AVERROR(ENOMEM);
313 yadif->out = ff_get_video_buffer(ctx->outputs[0], link->w, link->h);
315 return AVERROR(ENOMEM);
317 av_frame_copy_props(yadif->out, yadif->cur);
318 yadif->out->interlaced_frame = 0;
320 if (yadif->out->pts != AV_NOPTS_VALUE)
321 yadif->out->pts *= 2;
323 return return_frame(ctx, 0);
326 static int request_frame(AVFilterLink *link)
328 AVFilterContext *ctx = link->src;
329 YADIFContext *yadif = ctx->priv;
331 if (yadif->frame_pending) {
332 return_frame(ctx, 1);
342 ret = ff_request_frame(link->src->inputs[0]);
344 if (ret == AVERROR_EOF && yadif->next) {
345 AVFrame *next = av_frame_clone(yadif->next);
348 return AVERROR(ENOMEM);
350 next->pts = yadif->next->pts * 2 - yadif->cur->pts;
352 filter_frame(link->src->inputs[0], next);
354 } else if (ret < 0) {
357 } while (!yadif->cur);
362 static int poll_frame(AVFilterLink *link)
364 YADIFContext *yadif = link->src->priv;
367 if (yadif->frame_pending)
370 val = ff_poll_frame(link->src->inputs[0]);
374 //FIXME change API to not requre this red tape
375 if (val == 1 && !yadif->next) {
376 if ((ret = ff_request_frame(link->src->inputs[0])) < 0)
378 val = ff_poll_frame(link->src->inputs[0]);
382 assert(yadif->next || !val);
384 if (yadif->auto_enable && yadif->next && !yadif->next->interlaced_frame)
387 return val * ((yadif->mode&1)+1);
390 static av_cold void uninit(AVFilterContext *ctx)
392 YADIFContext *yadif = ctx->priv;
394 if (yadif->prev) av_frame_free(&yadif->prev);
395 if (yadif->cur ) av_frame_free(&yadif->cur );
396 if (yadif->next) av_frame_free(&yadif->next);
399 static int query_formats(AVFilterContext *ctx)
401 static const enum AVPixelFormat pix_fmts[] = {
411 AV_NE( AV_PIX_FMT_GRAY16BE, AV_PIX_FMT_GRAY16LE ),
414 AV_NE( AV_PIX_FMT_YUV420P10BE, AV_PIX_FMT_YUV420P10LE ),
415 AV_NE( AV_PIX_FMT_YUV422P10BE, AV_PIX_FMT_YUV422P10LE ),
416 AV_NE( AV_PIX_FMT_YUV444P10BE, AV_PIX_FMT_YUV444P10LE ),
417 AV_NE( AV_PIX_FMT_YUV420P16BE, AV_PIX_FMT_YUV420P16LE ),
418 AV_NE( AV_PIX_FMT_YUV422P16BE, AV_PIX_FMT_YUV422P16LE ),
419 AV_NE( AV_PIX_FMT_YUV444P16BE, AV_PIX_FMT_YUV444P16LE ),
424 ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
429 static int config_props(AVFilterLink *link)
431 YADIFContext *s = link->src->priv;
433 link->time_base.num = link->src->inputs[0]->time_base.num;
434 link->time_base.den = link->src->inputs[0]->time_base.den * 2;
435 link->w = link->src->inputs[0]->w;
436 link->h = link->src->inputs[0]->h;
438 s->csp = av_pix_fmt_desc_get(link->format);
439 if (s->csp->comp[0].depth_minus1 / 8 == 1) {
440 s->filter_line = filter_line_c_16bit;
441 s->filter_edges = filter_edges_16bit;
443 s->filter_line = filter_line_c;
444 s->filter_edges = filter_edges;
447 ff_yadif_init_x86(s);
453 #define OFFSET(x) offsetof(YADIFContext, x)
454 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM
455 static const AVOption options[] = {
456 { "mode", NULL, OFFSET(mode), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 3, FLAGS },
457 { "parity", NULL, OFFSET(parity), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 1, FLAGS, "parity" },
458 { "auto", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = -1 }, .unit = "parity" },
459 { "tff", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, .unit = "parity" },
460 { "bff", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, .unit = "parity" },
461 { "auto", NULL, OFFSET(auto_enable), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, FLAGS },
465 static const AVClass yadif_class = {
466 .class_name = "yadif",
467 .item_name = av_default_item_name,
469 .version = LIBAVUTIL_VERSION_INT,
472 static const AVFilterPad avfilter_vf_yadif_inputs[] = {
475 .type = AVMEDIA_TYPE_VIDEO,
476 .get_video_buffer = get_video_buffer,
477 .filter_frame = filter_frame,
482 static const AVFilterPad avfilter_vf_yadif_outputs[] = {
485 .type = AVMEDIA_TYPE_VIDEO,
486 .poll_frame = poll_frame,
487 .request_frame = request_frame,
488 .config_props = config_props,
493 AVFilter avfilter_vf_yadif = {
495 .description = NULL_IF_CONFIG_SMALL("Deinterlace the input image"),
497 .priv_size = sizeof(YADIFContext),
498 .priv_class = &yadif_class,
500 .query_formats = query_formats,
502 .inputs = avfilter_vf_yadif_inputs,
504 .outputs = avfilter_vf_yadif_outputs,