2 * Copyright (C) 2006-2010 Michael Niedermayer <michaelni@gmx.at>
3 * 2010 James Darnley <james.darnley@gmail.com>
5 * This file is part of Libav.
7 * Libav is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License along
18 * with Libav; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
22 #include "libavutil/cpu.h"
23 #include "libavutil/common.h"
24 #include "libavutil/pixdesc.h"
33 * 0: send 1 frame for each frame
34 * 1: send 1 frame for each field
35 * 2: like 0 but skips spatial interlacing check
36 * 3: like 1 but skips spatial interlacing check
42 * 1: bottom field first
50 * 0: deinterlace all frames
51 * 1: only deinterlace frames marked as interlaced
55 AVFilterBufferRef *cur;
56 AVFilterBufferRef *next;
57 AVFilterBufferRef *prev;
58 AVFilterBufferRef *out;
59 void (*filter_line)(uint8_t *dst,
60 uint8_t *prev, uint8_t *cur, uint8_t *next,
61 int w, int prefs, int mrefs, int parity, int mode);
63 const AVPixFmtDescriptor *csp;
68 { int score = FFABS(cur[mrefs-1+(j)] - cur[prefs-1-(j)])\
69 + FFABS(cur[mrefs +(j)] - cur[prefs -(j)])\
70 + FFABS(cur[mrefs+1+(j)] - cur[prefs+1-(j)]);\
71 if (score < spatial_score) {\
72 spatial_score= score;\
73 spatial_pred= (cur[mrefs +(j)] + cur[prefs -(j)])>>1;\
76 for (x = 0; x < w; x++) { \
78 int d = (prev2[0] + next2[0])>>1; \
80 int temporal_diff0 = FFABS(prev2[0] - next2[0]); \
81 int temporal_diff1 =(FFABS(prev[mrefs] - c) + FFABS(prev[prefs] - e) )>>1; \
82 int temporal_diff2 =(FFABS(next[mrefs] - c) + FFABS(next[prefs] - e) )>>1; \
83 int diff = FFMAX3(temporal_diff0>>1, temporal_diff1, temporal_diff2); \
84 int spatial_pred = (c+e)>>1; \
85 int spatial_score = FFABS(cur[mrefs-1] - cur[prefs-1]) + FFABS(c-e) \
86 + FFABS(cur[mrefs+1] - cur[prefs+1]) - 1; \
88 CHECK(-1) CHECK(-2) }} }} \
89 CHECK( 1) CHECK( 2) }} }} \
92 int b = (prev2[2*mrefs] + next2[2*mrefs])>>1; \
93 int f = (prev2[2*prefs] + next2[2*prefs])>>1; \
94 int max = FFMAX3(d-e, d-c, FFMIN(b-c, f-e)); \
95 int min = FFMIN3(d-e, d-c, FFMAX(b-c, f-e)); \
97 diff = FFMAX3(diff, min, -max); \
100 if (spatial_pred > d + diff) \
101 spatial_pred = d + diff; \
102 else if (spatial_pred < d - diff) \
103 spatial_pred = d - diff; \
105 dst[0] = spatial_pred; \
115 static void filter_line_c(uint8_t *dst,
116 uint8_t *prev, uint8_t *cur, uint8_t *next,
117 int w, int prefs, int mrefs, int parity, int mode)
120 uint8_t *prev2 = parity ? prev : cur ;
121 uint8_t *next2 = parity ? cur : next;
126 static void filter_line_c_16bit(uint16_t *dst,
127 uint16_t *prev, uint16_t *cur, uint16_t *next,
128 int w, int prefs, int mrefs, int parity, int mode)
131 uint16_t *prev2 = parity ? prev : cur ;
132 uint16_t *next2 = parity ? cur : next;
139 static void filter(AVFilterContext *ctx, AVFilterBufferRef *dstpic,
142 YADIFContext *yadif = ctx->priv;
145 for (i = 0; i < yadif->csp->nb_components; i++) {
146 int w = dstpic->video->w;
147 int h = dstpic->video->h;
148 int refs = yadif->cur->linesize[i];
149 int df = (yadif->csp->comp[i].depth_minus1 + 8) / 8;
151 if (i == 1 || i == 2) {
152 /* Why is this not part of the per-plane description thing? */
153 w >>= yadif->csp->log2_chroma_w;
154 h >>= yadif->csp->log2_chroma_h;
157 for (y = 0; y < h; y++) {
158 if ((y ^ parity) & 1) {
159 uint8_t *prev = &yadif->prev->data[i][y*refs];
160 uint8_t *cur = &yadif->cur ->data[i][y*refs];
161 uint8_t *next = &yadif->next->data[i][y*refs];
162 uint8_t *dst = &dstpic->data[i][y*dstpic->linesize[i]];
163 int mode = y==1 || y+2==h ? 2 : yadif->mode;
164 yadif->filter_line(dst, prev, cur, next, w, y+1<h ? refs : -refs, y ? -refs : refs, parity ^ tff, mode);
166 memcpy(&dstpic->data[i][y*dstpic->linesize[i]],
167 &yadif->cur->data[i][y*refs], w*df);
172 __asm__ volatile("emms \n\t" : : : "memory");
176 static AVFilterBufferRef *get_video_buffer(AVFilterLink *link, int perms, int w, int h)
178 AVFilterBufferRef *picref;
179 int width = FFALIGN(w, 32);
180 int height= FFALIGN(h+2, 32);
183 picref = avfilter_default_get_video_buffer(link, perms, width, height);
185 picref->video->w = w;
186 picref->video->h = h;
188 for (i = 0; i < 3; i++)
189 picref->data[i] += picref->linesize[i];
194 static void return_frame(AVFilterContext *ctx, int is_second)
196 YADIFContext *yadif = ctx->priv;
197 AVFilterLink *link= ctx->outputs[0];
200 if (yadif->parity == -1) {
201 tff = yadif->cur->video->interlaced ?
202 yadif->cur->video->top_field_first : 1;
204 tff = yadif->parity^1;
208 yadif->out = avfilter_get_video_buffer(link, AV_PERM_WRITE | AV_PERM_PRESERVE |
209 AV_PERM_REUSE, link->w, link->h);
210 avfilter_copy_buffer_ref_props(yadif->out, yadif->cur);
211 yadif->out->video->interlaced = 0;
215 yadif->csp = &av_pix_fmt_descriptors[link->format];
216 if (yadif->csp->comp[0].depth_minus1 / 8 == 1)
217 yadif->filter_line = filter_line_c_16bit;
219 filter(ctx, yadif->out, tff ^ !is_second, tff);
222 int64_t cur_pts = yadif->cur->pts;
223 int64_t next_pts = yadif->next->pts;
225 if (next_pts != AV_NOPTS_VALUE && cur_pts != AV_NOPTS_VALUE) {
226 yadif->out->pts = cur_pts + next_pts;
228 yadif->out->pts = AV_NOPTS_VALUE;
230 avfilter_start_frame(ctx->outputs[0], yadif->out);
232 avfilter_draw_slice(ctx->outputs[0], 0, link->h, 1);
233 avfilter_end_frame(ctx->outputs[0]);
235 yadif->frame_pending = (yadif->mode&1) && !is_second;
238 static void start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
240 AVFilterContext *ctx = link->dst;
241 YADIFContext *yadif = ctx->priv;
243 if (yadif->frame_pending)
244 return_frame(ctx, 1);
247 avfilter_unref_buffer(yadif->prev);
248 yadif->prev = yadif->cur;
249 yadif->cur = yadif->next;
250 yadif->next = picref;
255 if (yadif->auto_enable && !yadif->cur->video->interlaced) {
256 yadif->out = avfilter_ref_buffer(yadif->cur, AV_PERM_READ);
257 avfilter_unref_buffer(yadif->prev);
259 if (yadif->out->pts != AV_NOPTS_VALUE)
260 yadif->out->pts *= 2;
261 avfilter_start_frame(ctx->outputs[0], yadif->out);
266 yadif->prev = avfilter_ref_buffer(yadif->cur, AV_PERM_READ);
268 yadif->out = avfilter_get_video_buffer(ctx->outputs[0], AV_PERM_WRITE | AV_PERM_PRESERVE |
269 AV_PERM_REUSE, link->w, link->h);
271 avfilter_copy_buffer_ref_props(yadif->out, yadif->cur);
272 yadif->out->video->interlaced = 0;
273 if (yadif->out->pts != AV_NOPTS_VALUE)
274 yadif->out->pts *= 2;
275 avfilter_start_frame(ctx->outputs[0], yadif->out);
278 static void end_frame(AVFilterLink *link)
280 AVFilterContext *ctx = link->dst;
281 YADIFContext *yadif = ctx->priv;
286 if (yadif->auto_enable && !yadif->cur->video->interlaced) {
287 avfilter_draw_slice(ctx->outputs[0], 0, link->h, 1);
288 avfilter_end_frame(ctx->outputs[0]);
292 return_frame(ctx, 0);
295 static int request_frame(AVFilterLink *link)
297 AVFilterContext *ctx = link->src;
298 YADIFContext *yadif = ctx->priv;
300 if (yadif->frame_pending) {
301 return_frame(ctx, 1);
311 ret = avfilter_request_frame(link->src->inputs[0]);
313 if (ret == AVERROR_EOF && yadif->next) {
314 AVFilterBufferRef *next = avfilter_ref_buffer(yadif->next, AV_PERM_READ);
315 next->pts = yadif->next->pts * 2 - yadif->cur->pts;
317 start_frame(link->src->inputs[0], next);
318 end_frame(link->src->inputs[0]);
320 } else if (ret < 0) {
323 } while (!yadif->cur);
328 static int poll_frame(AVFilterLink *link)
330 YADIFContext *yadif = link->src->priv;
333 if (yadif->frame_pending)
336 val = avfilter_poll_frame(link->src->inputs[0]);
340 if (val==1 && !yadif->next) { //FIXME change API to not requre this red tape
341 if ((ret = avfilter_request_frame(link->src->inputs[0])) < 0)
343 val = avfilter_poll_frame(link->src->inputs[0]);
347 assert(yadif->next || !val);
349 if (yadif->auto_enable && yadif->next && !yadif->next->video->interlaced)
352 return val * ((yadif->mode&1)+1);
355 static av_cold void uninit(AVFilterContext *ctx)
357 YADIFContext *yadif = ctx->priv;
359 if (yadif->prev) avfilter_unref_buffer(yadif->prev);
360 if (yadif->cur ) avfilter_unref_buffer(yadif->cur );
361 if (yadif->next) avfilter_unref_buffer(yadif->next);
364 static int query_formats(AVFilterContext *ctx)
366 static const enum PixelFormat pix_fmts[] = {
376 AV_NE( PIX_FMT_GRAY16BE, PIX_FMT_GRAY16LE ),
379 AV_NE( PIX_FMT_YUV420P10BE, PIX_FMT_YUV420P10LE ),
380 AV_NE( PIX_FMT_YUV422P10BE, PIX_FMT_YUV422P10LE ),
381 AV_NE( PIX_FMT_YUV444P10BE, PIX_FMT_YUV444P10LE ),
382 AV_NE( PIX_FMT_YUV420P16BE, PIX_FMT_YUV420P16LE ),
383 AV_NE( PIX_FMT_YUV422P16BE, PIX_FMT_YUV422P16LE ),
384 AV_NE( PIX_FMT_YUV444P16BE, PIX_FMT_YUV444P16LE ),
389 avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
394 static av_cold int init(AVFilterContext *ctx, const char *args, void *opaque)
396 YADIFContext *yadif = ctx->priv;
397 int cpu_flags = av_get_cpu_flags();
401 yadif->auto_enable = 0;
404 if (args) sscanf(args, "%d:%d:%d", &yadif->mode, &yadif->parity, &yadif->auto_enable);
406 yadif->filter_line = filter_line_c;
407 if (HAVE_SSSE3 && cpu_flags & AV_CPU_FLAG_SSSE3)
408 yadif->filter_line = ff_yadif_filter_line_ssse3;
409 else if (HAVE_SSE && cpu_flags & AV_CPU_FLAG_SSE2)
410 yadif->filter_line = ff_yadif_filter_line_sse2;
411 else if (HAVE_MMX && cpu_flags & AV_CPU_FLAG_MMX)
412 yadif->filter_line = ff_yadif_filter_line_mmx;
414 av_log(ctx, AV_LOG_INFO, "mode:%d parity:%d auto_enable:%d\n", yadif->mode, yadif->parity, yadif->auto_enable);
419 static void null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir) { }
421 static int config_props(AVFilterLink *link)
423 link->time_base.num = link->src->inputs[0]->time_base.num;
424 link->time_base.den = link->src->inputs[0]->time_base.den * 2;
425 link->w = link->src->inputs[0]->w;
426 link->h = link->src->inputs[0]->h;
431 AVFilter avfilter_vf_yadif = {
433 .description = NULL_IF_CONFIG_SMALL("Deinterlace the input image"),
435 .priv_size = sizeof(YADIFContext),
438 .query_formats = query_formats,
440 .inputs = (AVFilterPad[]) {{ .name = "default",
441 .type = AVMEDIA_TYPE_VIDEO,
442 .start_frame = start_frame,
443 .get_video_buffer = get_video_buffer,
444 .draw_slice = null_draw_slice,
445 .end_frame = end_frame, },
448 .outputs = (AVFilterPad[]) {{ .name = "default",
449 .type = AVMEDIA_TYPE_VIDEO,
450 .poll_frame = poll_frame,
451 .request_frame = request_frame,
452 .config_props = config_props, },