2 * Copyright (C) 2006-2010 Michael Niedermayer <michaelni@gmx.at>
3 * 2010 James Darnley <james.darnley@gmail.com>
5 * This file is part of Libav.
7 * Libav is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License along
18 * with Libav; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
22 #include "libavutil/cpu.h"
23 #include "libavutil/common.h"
24 #include "libavutil/pixdesc.h"
34 #define PERM_RWP AV_PERM_WRITE | AV_PERM_PRESERVE | AV_PERM_REUSE
37 { int score = FFABS(cur[mrefs-1+(j)] - cur[prefs-1-(j)])\
38 + FFABS(cur[mrefs +(j)] - cur[prefs -(j)])\
39 + FFABS(cur[mrefs+1+(j)] - cur[prefs+1-(j)]);\
40 if (score < spatial_score) {\
41 spatial_score= score;\
42 spatial_pred= (cur[mrefs +(j)] + cur[prefs -(j)])>>1;\
45 for (x = 0; x < w; x++) { \
47 int d = (prev2[0] + next2[0])>>1; \
49 int temporal_diff0 = FFABS(prev2[0] - next2[0]); \
50 int temporal_diff1 =(FFABS(prev[mrefs] - c) + FFABS(prev[prefs] - e) )>>1; \
51 int temporal_diff2 =(FFABS(next[mrefs] - c) + FFABS(next[prefs] - e) )>>1; \
52 int diff = FFMAX3(temporal_diff0 >> 1, temporal_diff1, temporal_diff2); \
53 int spatial_pred = (c+e) >> 1; \
54 int spatial_score = FFABS(cur[mrefs - 1] - cur[prefs - 1]) + FFABS(c-e) \
55 + FFABS(cur[mrefs + 1] - cur[prefs + 1]) - 1; \
57 CHECK(-1) CHECK(-2) }} }} \
58 CHECK( 1) CHECK( 2) }} }} \
61 int b = (prev2[2 * mrefs] + next2[2 * mrefs])>>1; \
62 int f = (prev2[2 * prefs] + next2[2 * prefs])>>1; \
63 int max = FFMAX3(d - e, d - c, FFMIN(b - c, f - e)); \
64 int min = FFMIN3(d - e, d - c, FFMAX(b - c, f - e)); \
66 diff = FFMAX3(diff, min, -max); \
69 if (spatial_pred > d + diff) \
70 spatial_pred = d + diff; \
71 else if (spatial_pred < d - diff) \
72 spatial_pred = d - diff; \
74 dst[0] = spatial_pred; \
84 static void filter_line_c(void *dst1,
85 void *prev1, void *cur1, void *next1,
86 int w, int prefs, int mrefs, int parity, int mode)
89 uint8_t *prev = prev1;
91 uint8_t *next = next1;
93 uint8_t *prev2 = parity ? prev : cur ;
94 uint8_t *next2 = parity ? cur : next;
99 static void filter_line_c_16bit(void *dst1,
100 void *prev1, void *cur1, void *next1,
101 int w, int prefs, int mrefs, int parity,
104 uint16_t *dst = dst1;
105 uint16_t *prev = prev1;
106 uint16_t *cur = cur1;
107 uint16_t *next = next1;
109 uint16_t *prev2 = parity ? prev : cur ;
110 uint16_t *next2 = parity ? cur : next;
117 static void filter(AVFilterContext *ctx, AVFilterBufferRef *dstpic,
120 YADIFContext *yadif = ctx->priv;
123 for (i = 0; i < yadif->csp->nb_components; i++) {
124 int w = dstpic->video->w;
125 int h = dstpic->video->h;
126 int refs = yadif->cur->linesize[i];
127 int df = (yadif->csp->comp[i].depth_minus1 + 8) / 8;
129 if (i == 1 || i == 2) {
130 /* Why is this not part of the per-plane description thing? */
131 w >>= yadif->csp->log2_chroma_w;
132 h >>= yadif->csp->log2_chroma_h;
135 for (y = 0; y < h; y++) {
136 if ((y ^ parity) & 1) {
137 uint8_t *prev = &yadif->prev->data[i][y * refs];
138 uint8_t *cur = &yadif->cur ->data[i][y * refs];
139 uint8_t *next = &yadif->next->data[i][y * refs];
140 uint8_t *dst = &dstpic->data[i][y * dstpic->linesize[i]];
141 int mode = y == 1 || y + 2 == h ? 2 : yadif->mode;
142 yadif->filter_line(dst, prev, cur, next, w,
143 y + 1 < h ? refs : -refs,
147 memcpy(&dstpic->data[i][y * dstpic->linesize[i]],
148 &yadif->cur->data[i][y * refs], w * df);
156 static AVFilterBufferRef *get_video_buffer(AVFilterLink *link, int perms,
159 AVFilterBufferRef *picref;
160 int width = FFALIGN(w, 32);
161 int height = FFALIGN(h + 2, 32);
164 picref = ff_default_get_video_buffer(link, perms, width, height);
166 picref->video->w = w;
167 picref->video->h = h;
169 for (i = 0; i < 3; i++)
170 picref->data[i] += picref->linesize[i];
175 static int return_frame(AVFilterContext *ctx, int is_second)
177 YADIFContext *yadif = ctx->priv;
178 AVFilterLink *link = ctx->outputs[0];
181 if (yadif->parity == -1) {
182 tff = yadif->cur->video->interlaced ?
183 yadif->cur->video->top_field_first : 1;
185 tff = yadif->parity ^ 1;
189 yadif->out = ff_get_video_buffer(link, PERM_RWP, link->w, link->h);
191 return AVERROR(ENOMEM);
193 avfilter_copy_buffer_ref_props(yadif->out, yadif->cur);
194 yadif->out->video->interlaced = 0;
198 yadif->csp = av_pix_fmt_desc_get(link->format);
199 if (yadif->csp->comp[0].depth_minus1 / 8 == 1)
200 yadif->filter_line = filter_line_c_16bit;
202 filter(ctx, yadif->out, tff ^ !is_second, tff);
205 int64_t cur_pts = yadif->cur->pts;
206 int64_t next_pts = yadif->next->pts;
208 if (next_pts != AV_NOPTS_VALUE && cur_pts != AV_NOPTS_VALUE) {
209 yadif->out->pts = cur_pts + next_pts;
211 yadif->out->pts = AV_NOPTS_VALUE;
214 ret = ff_filter_frame(ctx->outputs[0], yadif->out);
216 yadif->frame_pending = (yadif->mode&1) && !is_second;
220 static int filter_frame(AVFilterLink *link, AVFilterBufferRef *picref)
222 AVFilterContext *ctx = link->dst;
223 YADIFContext *yadif = ctx->priv;
225 if (yadif->frame_pending)
226 return_frame(ctx, 1);
229 avfilter_unref_buffer(yadif->prev);
230 yadif->prev = yadif->cur;
231 yadif->cur = yadif->next;
232 yadif->next = picref;
237 if (yadif->auto_enable && !yadif->cur->video->interlaced) {
238 yadif->out = avfilter_ref_buffer(yadif->cur, AV_PERM_READ);
240 return AVERROR(ENOMEM);
242 avfilter_unref_bufferp(&yadif->prev);
243 if (yadif->out->pts != AV_NOPTS_VALUE)
244 yadif->out->pts *= 2;
245 return ff_filter_frame(ctx->outputs[0], yadif->out);
249 !(yadif->prev = avfilter_ref_buffer(yadif->cur, AV_PERM_READ)))
250 return AVERROR(ENOMEM);
252 yadif->out = ff_get_video_buffer(ctx->outputs[0], PERM_RWP,
255 return AVERROR(ENOMEM);
257 avfilter_copy_buffer_ref_props(yadif->out, yadif->cur);
258 yadif->out->video->interlaced = 0;
260 if (yadif->out->pts != AV_NOPTS_VALUE)
261 yadif->out->pts *= 2;
263 return return_frame(ctx, 0);
266 static int request_frame(AVFilterLink *link)
268 AVFilterContext *ctx = link->src;
269 YADIFContext *yadif = ctx->priv;
271 if (yadif->frame_pending) {
272 return_frame(ctx, 1);
282 ret = ff_request_frame(link->src->inputs[0]);
284 if (ret == AVERROR_EOF && yadif->next) {
285 AVFilterBufferRef *next =
286 avfilter_ref_buffer(yadif->next, AV_PERM_READ);
289 return AVERROR(ENOMEM);
291 next->pts = yadif->next->pts * 2 - yadif->cur->pts;
293 filter_frame(link->src->inputs[0], next);
295 } else if (ret < 0) {
298 } while (!yadif->cur);
303 static int poll_frame(AVFilterLink *link)
305 YADIFContext *yadif = link->src->priv;
308 if (yadif->frame_pending)
311 val = ff_poll_frame(link->src->inputs[0]);
315 //FIXME change API to not requre this red tape
316 if (val == 1 && !yadif->next) {
317 if ((ret = ff_request_frame(link->src->inputs[0])) < 0)
319 val = ff_poll_frame(link->src->inputs[0]);
323 assert(yadif->next || !val);
325 if (yadif->auto_enable && yadif->next && !yadif->next->video->interlaced)
328 return val * ((yadif->mode&1)+1);
331 static av_cold void uninit(AVFilterContext *ctx)
333 YADIFContext *yadif = ctx->priv;
335 if (yadif->prev) avfilter_unref_bufferp(&yadif->prev);
336 if (yadif->cur ) avfilter_unref_bufferp(&yadif->cur );
337 if (yadif->next) avfilter_unref_bufferp(&yadif->next);
340 static int query_formats(AVFilterContext *ctx)
342 static const enum AVPixelFormat pix_fmts[] = {
352 AV_NE( AV_PIX_FMT_GRAY16BE, AV_PIX_FMT_GRAY16LE ),
355 AV_NE( AV_PIX_FMT_YUV420P10BE, AV_PIX_FMT_YUV420P10LE ),
356 AV_NE( AV_PIX_FMT_YUV422P10BE, AV_PIX_FMT_YUV422P10LE ),
357 AV_NE( AV_PIX_FMT_YUV444P10BE, AV_PIX_FMT_YUV444P10LE ),
358 AV_NE( AV_PIX_FMT_YUV420P16BE, AV_PIX_FMT_YUV420P16LE ),
359 AV_NE( AV_PIX_FMT_YUV422P16BE, AV_PIX_FMT_YUV422P16LE ),
360 AV_NE( AV_PIX_FMT_YUV444P16BE, AV_PIX_FMT_YUV444P16LE ),
365 ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
370 static av_cold int init(AVFilterContext *ctx, const char *args)
372 YADIFContext *yadif = ctx->priv;
376 yadif->auto_enable = 0;
380 sscanf(args, "%d:%d:%d",
381 &yadif->mode, &yadif->parity, &yadif->auto_enable);
383 yadif->filter_line = filter_line_c;
386 ff_yadif_init_x86(yadif);
388 av_log(ctx, AV_LOG_VERBOSE, "mode:%d parity:%d auto_enable:%d\n",
389 yadif->mode, yadif->parity, yadif->auto_enable);
394 static int config_props(AVFilterLink *link)
396 link->time_base.num = link->src->inputs[0]->time_base.num;
397 link->time_base.den = link->src->inputs[0]->time_base.den * 2;
398 link->w = link->src->inputs[0]->w;
399 link->h = link->src->inputs[0]->h;
404 static const AVFilterPad avfilter_vf_yadif_inputs[] = {
407 .type = AVMEDIA_TYPE_VIDEO,
408 .get_video_buffer = get_video_buffer,
409 .filter_frame = filter_frame,
414 static const AVFilterPad avfilter_vf_yadif_outputs[] = {
417 .type = AVMEDIA_TYPE_VIDEO,
418 .poll_frame = poll_frame,
419 .request_frame = request_frame,
420 .config_props = config_props,
425 AVFilter avfilter_vf_yadif = {
427 .description = NULL_IF_CONFIG_SMALL("Deinterlace the input image"),
429 .priv_size = sizeof(YADIFContext),
432 .query_formats = query_formats,
434 .inputs = avfilter_vf_yadif_inputs,
436 .outputs = avfilter_vf_yadif_outputs,