X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libavfilter%2Fvf_yadif.c;h=574eac4a03285e557d2a58bd7fd0125868187c8e;hb=2268db2cd052674fde55c7d48b7a5098ce89b4ba;hp=aa5c434af99b2c4d71165704bc39792d79e0c677;hpb=ab09df9deae8be0e3346e9255a80d616517c32d5;p=ffmpeg diff --git a/libavfilter/vf_yadif.c b/libavfilter/vf_yadif.c index aa5c434af99..574eac4a032 100644 --- a/libavfilter/vf_yadif.c +++ b/libavfilter/vf_yadif.c @@ -4,94 +4,76 @@ * * This file is part of Libav. * - * Libav is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * Libav is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. * * Libav is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with Libav; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU Lesser General Public + * License along with Libav; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/cpu.h" #include "libavutil/common.h" +#include "libavutil/opt.h" #include "libavutil/pixdesc.h" #include "avfilter.h" +#include "formats.h" +#include "internal.h" +#include "video.h" #include "yadif.h" #undef NDEBUG #include -typedef struct { - /** - * 0: send 1 frame for each frame - * 1: send 1 frame for each field - * 2: like 0 but skips spatial interlacing check - * 3: like 1 but skips spatial interlacing check - */ - int mode; - - /** - * 0: bottom field first - * 1: top field first - * -1: auto-detection - */ +typedef struct ThreadData { + AVFrame *frame; + int plane; + int w, h; int parity; - - int frame_pending; - - /** - * 0: deinterlace all frames - * 1: only deinterlace frames marked as interlaced - */ - int auto_enable; - - AVFilterBufferRef *cur; - AVFilterBufferRef *next; - AVFilterBufferRef *prev; - AVFilterBufferRef *out; - void (*filter_line)(uint8_t *dst, - uint8_t *prev, uint8_t *cur, uint8_t *next, - int w, int prefs, int mrefs, int parity, int mode); - - const AVPixFmtDescriptor *csp; -} YADIFContext; + int tff; +} ThreadData; #define CHECK(j)\ - { int score = FFABS(cur[mrefs-1+(j)] - cur[prefs-1-(j)])\ + { int score = FFABS(cur[mrefs - 1 + (j)] - cur[prefs - 1 - (j)])\ + FFABS(cur[mrefs +(j)] - cur[prefs -(j)])\ - + FFABS(cur[mrefs+1+(j)] - cur[prefs+1-(j)]);\ + + FFABS(cur[mrefs + 1 + (j)] - cur[prefs + 1 - (j)]);\ if (score < spatial_score) {\ spatial_score= score;\ spatial_pred= (cur[mrefs +(j)] + cur[prefs -(j)])>>1;\ -#define FILTER \ - for (x = 0; x < w; x++) { \ +/* The is_not_edge argument here controls when the code will enter a branch + * which reads up to and including x-3 and x+3. */ + +#define FILTER(start, end, is_not_edge) \ + for (x = start; x < end; x++) { \ int c = cur[mrefs]; \ int d = (prev2[0] + next2[0])>>1; \ int e = cur[prefs]; \ int temporal_diff0 = FFABS(prev2[0] - next2[0]); \ int temporal_diff1 =(FFABS(prev[mrefs] - c) + FFABS(prev[prefs] - e) )>>1; \ int temporal_diff2 =(FFABS(next[mrefs] - c) + FFABS(next[prefs] - e) )>>1; \ - int diff = FFMAX3(temporal_diff0>>1, temporal_diff1, temporal_diff2); \ - int spatial_pred = (c+e)>>1; \ - int spatial_score = FFABS(cur[mrefs-1] - cur[prefs-1]) + FFABS(c-e) \ - + FFABS(cur[mrefs+1] - cur[prefs+1]) - 1; \ + int diff = FFMAX3(temporal_diff0 >> 1, temporal_diff1, temporal_diff2); \ + int spatial_pred = (c+e) >> 1; \ \ - CHECK(-1) CHECK(-2) }} }} \ - CHECK( 1) CHECK( 2) }} }} \ + if (is_not_edge) {\ + int spatial_score = FFABS(cur[mrefs - 1] - cur[prefs - 1]) + FFABS(c-e) \ + + FFABS(cur[mrefs + 1] - cur[prefs + 1]) - 1; \ + CHECK(-1) CHECK(-2) }} }} \ + CHECK( 1) CHECK( 2) }} }} \ + }\ \ if (mode < 2) { \ - int b = (prev2[2*mrefs] + next2[2*mrefs])>>1; \ - int f = (prev2[2*prefs] + next2[2*prefs])>>1; \ - int max = FFMAX3(d-e, d-c, FFMIN(b-c, f-e)); \ - int min = FFMIN3(d-e, d-c, FFMAX(b-c, f-e)); \ + int b = (prev2[2 * mrefs] + next2[2 * mrefs])>>1; \ + int f = (prev2[2 * prefs] + next2[2 * prefs])>>1; \ + int max = FFMAX3(d - e, d - c, FFMIN(b - c, f - e)); \ + int min = FFMIN3(d - e, d - c, FFMAX(b - c, f - e)); \ \ diff = FFMAX3(diff, min, -max); \ } \ @@ -111,127 +93,219 @@ typedef struct { next2++; \ } -static void filter_line_c(uint8_t *dst, - uint8_t *prev, uint8_t *cur, uint8_t *next, +static void filter_line_c(void *dst1, + void *prev1, void *cur1, void *next1, int w, int prefs, int mrefs, int parity, int mode) { + uint8_t *dst = dst1; + uint8_t *prev = prev1; + uint8_t *cur = cur1; + uint8_t *next = next1; int x; uint8_t *prev2 = parity ? prev : cur ; uint8_t *next2 = parity ? cur : next; - FILTER + /* The function is called with the pointers already pointing to data[3] and + * with 6 subtracted from the width. This allows the FILTER macro to be + * called so that it processes all the pixels normally. A constant value of + * true for is_not_edge lets the compiler ignore the if statement. */ + FILTER(0, w, 1) } -static void filter_line_c_16bit(uint16_t *dst, - uint16_t *prev, uint16_t *cur, uint16_t *next, - int w, int prefs, int mrefs, int parity, int mode) +static void filter_edges(void *dst1, void *prev1, void *cur1, void *next1, + int w, int prefs, int mrefs, int parity, int mode) +{ + uint8_t *dst = dst1; + uint8_t *prev = prev1; + uint8_t *cur = cur1; + uint8_t *next = next1; + int x; + uint8_t *prev2 = parity ? prev : cur ; + uint8_t *next2 = parity ? cur : next; + + /* Only edge pixels need to be processed here. A constant value of false + * for is_not_edge should let the compiler ignore the whole branch. */ + FILTER(0, 3, 0) + + dst = (uint8_t*)dst1 + w - 3; + prev = (uint8_t*)prev1 + w - 3; + cur = (uint8_t*)cur1 + w - 3; + next = (uint8_t*)next1 + w - 3; + prev2 = (uint8_t*)(parity ? prev : cur); + next2 = (uint8_t*)(parity ? cur : next); + + FILTER(w - 3, w, 0) +} + + +static void filter_line_c_16bit(void *dst1, + void *prev1, void *cur1, void *next1, + int w, int prefs, int mrefs, int parity, + int mode) { + uint16_t *dst = dst1; + uint16_t *prev = prev1; + uint16_t *cur = cur1; + uint16_t *next = next1; int x; uint16_t *prev2 = parity ? prev : cur ; uint16_t *next2 = parity ? cur : next; mrefs /= 2; prefs /= 2; - FILTER + FILTER(0, w, 1) +} + +static void filter_edges_16bit(void *dst1, void *prev1, void *cur1, void *next1, + int w, int prefs, int mrefs, int parity, int mode) +{ + uint16_t *dst = dst1; + uint16_t *prev = prev1; + uint16_t *cur = cur1; + uint16_t *next = next1; + int x; + uint16_t *prev2 = parity ? prev : cur ; + uint16_t *next2 = parity ? cur : next; + mrefs /= 2; + prefs /= 2; + + FILTER(0, 3, 0) + + dst = (uint16_t*)dst1 + w - 3; + prev = (uint16_t*)prev1 + w - 3; + cur = (uint16_t*)cur1 + w - 3; + next = (uint16_t*)next1 + w - 3; + prev2 = (uint16_t*)(parity ? prev : cur); + next2 = (uint16_t*)(parity ? cur : next); + + FILTER(w - 3, w, 0) +} + +static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) +{ + YADIFContext *s = ctx->priv; + ThreadData *td = arg; + int refs = s->cur->linesize[td->plane]; + int df = (s->csp->comp[td->plane].depth + 7) / 8; + int pix_3 = 3 * df; + int slice_h = td->h / nb_jobs; + int slice_start = jobnr * slice_h; + int slice_end = (jobnr == nb_jobs - 1) ? td->h : (jobnr + 1) * slice_h; + int y; + + /* filtering reads 3 pixels to the left/right; to avoid invalid reads, + * we need to call the c variant which avoids this for border pixels + */ + for (y = slice_start; y < slice_end; y++) { + if ((y ^ td->parity) & 1) { + uint8_t *prev = &s->prev->data[td->plane][y * refs]; + uint8_t *cur = &s->cur ->data[td->plane][y * refs]; + uint8_t *next = &s->next->data[td->plane][y * refs]; + uint8_t *dst = &td->frame->data[td->plane][y * td->frame->linesize[td->plane]]; + int mode = y == 1 || y + 2 == td->h ? 2 : s->mode; + s->filter_line(dst + pix_3, prev + pix_3, cur + pix_3, + next + pix_3, td->w - 6, + y + 1 < td->h ? refs : -refs, + y ? -refs : refs, + td->parity ^ td->tff, mode); + s->filter_edges(dst, prev, cur, next, td->w, + y + 1 < td->h ? refs : -refs, + y ? -refs : refs, + td->parity ^ td->tff, mode); + } else { + memcpy(&td->frame->data[td->plane][y * td->frame->linesize[td->plane]], + &s->cur->data[td->plane][y * refs], td->w * df); + } + } + return 0; } -static void filter(AVFilterContext *ctx, AVFilterBufferRef *dstpic, +static void filter(AVFilterContext *ctx, AVFrame *dstpic, int parity, int tff) { YADIFContext *yadif = ctx->priv; - int y, i; + ThreadData td = { .frame = dstpic, .parity = parity, .tff = tff }; + int i; for (i = 0; i < yadif->csp->nb_components; i++) { - int w = dstpic->video->w; - int h = dstpic->video->h; - int refs = yadif->cur->linesize[i]; - int df = (yadif->csp->comp[i].depth_minus1+1) / 8; + int w = dstpic->width; + int h = dstpic->height; - if (i) { - /* Why is this not part of the per-plane description thing? */ + if (i == 1 || i == 2) { w >>= yadif->csp->log2_chroma_w; h >>= yadif->csp->log2_chroma_h; } - for (y = 0; y < h; y++) { - if ((y ^ parity) & 1) { - uint8_t *prev = &yadif->prev->data[i][y*refs]; - uint8_t *cur = &yadif->cur ->data[i][y*refs]; - uint8_t *next = &yadif->next->data[i][y*refs]; - uint8_t *dst = &dstpic->data[i][y*dstpic->linesize[i]]; - int mode = y==1 || y+2==h ? 2 : yadif->mode; - yadif->filter_line(dst, prev, cur, next, w, y+1data[i][y*dstpic->linesize[i]], - &yadif->cur->data[i][y*refs], w*df); - } - } + + td.w = w; + td.h = h; + td.plane = i; + + ctx->internal->execute(ctx, filter_slice, &td, NULL, FFMIN(h, ctx->graph->nb_threads)); } -#if HAVE_MMX - __asm__ volatile("emms \n\t" : : : "memory"); -#endif + + emms_c(); } -static AVFilterBufferRef *get_video_buffer(AVFilterLink *link, int perms, int w, int h) +static AVFrame *get_video_buffer(AVFilterLink *link, int w, int h) { - AVFilterBufferRef *picref; - int width = FFALIGN(w, 32); - int height= FFALIGN(h+2, 32); + AVFrame *frame; + int width = FFALIGN(w, 32); + int height = FFALIGN(h + 2, 32); int i; - picref = avfilter_default_get_video_buffer(link, perms, width, height); + frame = ff_default_get_video_buffer(link, width, height); - picref->video->w = w; - picref->video->h = h; + frame->width = w; + frame->height = h; for (i = 0; i < 3; i++) - picref->data[i] += picref->linesize[i]; + frame->data[i] += frame->linesize[i]; - return picref; + return frame; } -static void return_frame(AVFilterContext *ctx, int is_second) +static int return_frame(AVFilterContext *ctx, int is_second) { YADIFContext *yadif = ctx->priv; - AVFilterLink *link= ctx->outputs[0]; - int tff; + AVFilterLink *link = ctx->outputs[0]; + int tff, ret; if (yadif->parity == -1) { - tff = yadif->cur->video->interlaced ? - yadif->cur->video->top_field_first : 1; + tff = yadif->cur->interlaced_frame ? + yadif->cur->top_field_first : 1; } else { - tff = yadif->parity^1; + tff = yadif->parity ^ 1; } - if (is_second) - yadif->out = avfilter_get_video_buffer(link, AV_PERM_WRITE | AV_PERM_PRESERVE | - AV_PERM_REUSE, link->w, link->h); + if (is_second) { + yadif->out = ff_get_video_buffer(link, link->w, link->h); + if (!yadif->out) + return AVERROR(ENOMEM); - if (!yadif->csp) - yadif->csp = &av_pix_fmt_descriptors[link->format]; - if (yadif->csp->comp[0].depth_minus1 == 15) - yadif->filter_line = filter_line_c_16bit; + av_frame_copy_props(yadif->out, yadif->cur); + yadif->out->interlaced_frame = 0; + } filter(ctx, yadif->out, tff ^ !is_second, tff); if (is_second) { - if (yadif->next->pts != AV_NOPTS_VALUE && - yadif->cur->pts != AV_NOPTS_VALUE) { - yadif->out->pts = - (yadif->next->pts&yadif->cur->pts) + - ((yadif->next->pts^yadif->cur->pts)>>1); + int64_t cur_pts = yadif->cur->pts; + int64_t next_pts = yadif->next->pts; + + if (next_pts != AV_NOPTS_VALUE && cur_pts != AV_NOPTS_VALUE) { + yadif->out->pts = cur_pts + next_pts; } else { yadif->out->pts = AV_NOPTS_VALUE; } - avfilter_start_frame(ctx->outputs[0], yadif->out); } - avfilter_draw_slice(ctx->outputs[0], 0, link->h, 1); - avfilter_end_frame(ctx->outputs[0]); + ret = ff_filter_frame(ctx->outputs[0], yadif->out); yadif->frame_pending = (yadif->mode&1) && !is_second; + return ret; } -static void start_frame(AVFilterLink *link, AVFilterBufferRef *picref) +static int filter_frame(AVFilterLink *link, AVFrame *frame) { AVFilterContext *ctx = link->dst; YADIFContext *yadif = ctx->priv; @@ -240,48 +314,40 @@ static void start_frame(AVFilterLink *link, AVFilterBufferRef *picref) return_frame(ctx, 1); if (yadif->prev) - avfilter_unref_buffer(yadif->prev); + av_frame_free(&yadif->prev); yadif->prev = yadif->cur; yadif->cur = yadif->next; - yadif->next = picref; + yadif->next = frame; if (!yadif->cur) - return; - - if (yadif->auto_enable && !yadif->cur->video->interlaced) { - yadif->out = avfilter_ref_buffer(yadif->cur, AV_PERM_READ); - avfilter_unref_buffer(yadif->prev); - yadif->prev = NULL; - avfilter_start_frame(ctx->outputs[0], yadif->out); - return; - } - - if (!yadif->prev) - yadif->prev = avfilter_ref_buffer(yadif->cur, AV_PERM_READ); + return 0; - yadif->out = avfilter_get_video_buffer(ctx->outputs[0], AV_PERM_WRITE | AV_PERM_PRESERVE | - AV_PERM_REUSE, link->w, link->h); + if (yadif->auto_enable && !yadif->cur->interlaced_frame) { + yadif->out = av_frame_clone(yadif->cur); + if (!yadif->out) + return AVERROR(ENOMEM); - avfilter_copy_buffer_ref_props(yadif->out, yadif->cur); - yadif->out->video->interlaced = 0; - avfilter_start_frame(ctx->outputs[0], yadif->out); -} + av_frame_free(&yadif->prev); + if (yadif->out->pts != AV_NOPTS_VALUE) + yadif->out->pts *= 2; + return ff_filter_frame(ctx->outputs[0], yadif->out); + } -static void end_frame(AVFilterLink *link) -{ - AVFilterContext *ctx = link->dst; - YADIFContext *yadif = ctx->priv; + if (!yadif->prev && + !(yadif->prev = av_frame_clone(yadif->cur))) + return AVERROR(ENOMEM); + yadif->out = ff_get_video_buffer(ctx->outputs[0], link->w, link->h); if (!yadif->out) - return; + return AVERROR(ENOMEM); - if (yadif->auto_enable && !yadif->cur->video->interlaced) { - avfilter_draw_slice(ctx->outputs[0], 0, link->h, 1); - avfilter_end_frame(ctx->outputs[0]); - return; - } + av_frame_copy_props(yadif->out, yadif->cur); + yadif->out->interlaced_frame = 0; - return_frame(ctx, 0); + if (yadif->out->pts != AV_NOPTS_VALUE) + yadif->out->pts *= 2; + + return return_frame(ctx, 0); } static int request_frame(AVFilterLink *link) @@ -297,8 +363,24 @@ static int request_frame(AVFilterLink *link) do { int ret; - if ((ret = avfilter_request_frame(link->src->inputs[0]))) + if (yadif->eof) + return AVERROR_EOF; + + ret = ff_request_frame(link->src->inputs[0]); + + if (ret == AVERROR_EOF && yadif->next) { + AVFrame *next = av_frame_clone(yadif->next); + + if (!next) + return AVERROR(ENOMEM); + + next->pts = yadif->next->pts * 2 - yadif->cur->pts; + + filter_frame(link->src->inputs[0], next); + yadif->eof = 1; + } else if (ret < 0) { return ret; + } } while (!yadif->cur); return 0; @@ -312,16 +394,21 @@ static int poll_frame(AVFilterLink *link) if (yadif->frame_pending) return 1; - val = avfilter_poll_frame(link->src->inputs[0]); + val = ff_poll_frame(link->src->inputs[0]); + if (val <= 0) + return val; - if (val==1 && !yadif->next) { //FIXME change API to not requre this red tape - if ((ret = avfilter_request_frame(link->src->inputs[0])) < 0) + //FIXME change API to not requre this red tape + if (val == 1 && !yadif->next) { + if ((ret = ff_request_frame(link->src->inputs[0])) < 0) return ret; - val = avfilter_poll_frame(link->src->inputs[0]); + val = ff_poll_frame(link->src->inputs[0]); + if (val <= 0) + return val; } assert(yadif->next || !val); - if (yadif->auto_enable && yadif->next && !yadif->next->video->interlaced) + if (yadif->auto_enable && yadif->next && !yadif->next->interlaced_frame) return val; return val * ((yadif->mode&1)+1); @@ -331,84 +418,117 @@ static av_cold void uninit(AVFilterContext *ctx) { YADIFContext *yadif = ctx->priv; - if (yadif->prev) avfilter_unref_buffer(yadif->prev); - if (yadif->cur ) avfilter_unref_buffer(yadif->cur ); - if (yadif->next) avfilter_unref_buffer(yadif->next); + if (yadif->prev) av_frame_free(&yadif->prev); + if (yadif->cur ) av_frame_free(&yadif->cur ); + if (yadif->next) av_frame_free(&yadif->next); } static int query_formats(AVFilterContext *ctx) { - static const enum PixelFormat pix_fmts[] = { - PIX_FMT_YUV420P, - PIX_FMT_YUV422P, - PIX_FMT_YUV444P, - PIX_FMT_YUV410P, - PIX_FMT_YUV411P, - PIX_FMT_GRAY8, - PIX_FMT_YUVJ420P, - PIX_FMT_YUVJ422P, - PIX_FMT_YUVJ444P, - AV_NE( PIX_FMT_GRAY16BE, PIX_FMT_GRAY16LE ), - PIX_FMT_YUV440P, - PIX_FMT_YUVJ440P, - AV_NE( PIX_FMT_YUV420P16BE, PIX_FMT_YUV420P16LE ), - AV_NE( PIX_FMT_YUV422P16BE, PIX_FMT_YUV422P16LE ), - AV_NE( PIX_FMT_YUV444P16BE, PIX_FMT_YUV444P16LE ), - PIX_FMT_NONE + static const enum AVPixelFormat pix_fmts[] = { + AV_PIX_FMT_YUV420P, + AV_PIX_FMT_YUV422P, + AV_PIX_FMT_YUV444P, + AV_PIX_FMT_YUV410P, + AV_PIX_FMT_YUV411P, + AV_PIX_FMT_GRAY8, + AV_PIX_FMT_YUVJ420P, + AV_PIX_FMT_YUVJ422P, + AV_PIX_FMT_YUVJ444P, + AV_NE( AV_PIX_FMT_GRAY16BE, AV_PIX_FMT_GRAY16LE ), + AV_PIX_FMT_YUV440P, + AV_PIX_FMT_YUVJ440P, + AV_NE( AV_PIX_FMT_YUV420P10BE, AV_PIX_FMT_YUV420P10LE ), + AV_NE( AV_PIX_FMT_YUV422P10BE, AV_PIX_FMT_YUV422P10LE ), + AV_NE( AV_PIX_FMT_YUV444P10BE, AV_PIX_FMT_YUV444P10LE ), + AV_NE( AV_PIX_FMT_YUV420P16BE, AV_PIX_FMT_YUV420P16LE ), + AV_NE( AV_PIX_FMT_YUV422P16BE, AV_PIX_FMT_YUV422P16LE ), + AV_NE( AV_PIX_FMT_YUV444P16BE, AV_PIX_FMT_YUV444P16LE ), + AV_PIX_FMT_YUVA420P, + AV_PIX_FMT_NONE }; - avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts)); + ff_set_common_formats(ctx, ff_make_format_list(pix_fmts)); return 0; } -static av_cold int init(AVFilterContext *ctx, const char *args, void *opaque) +static int config_props(AVFilterLink *link) { - YADIFContext *yadif = ctx->priv; - av_unused int cpu_flags = av_get_cpu_flags(); - - yadif->mode = 0; - yadif->parity = -1; - yadif->auto_enable = 0; - yadif->csp = NULL; + YADIFContext *s = link->src->priv; - if (args) sscanf(args, "%d:%d:%d", &yadif->mode, &yadif->parity, &yadif->auto_enable); + link->time_base.num = link->src->inputs[0]->time_base.num; + link->time_base.den = link->src->inputs[0]->time_base.den * 2; + link->w = link->src->inputs[0]->w; + link->h = link->src->inputs[0]->h; - yadif->filter_line = filter_line_c; - if (HAVE_SSSE3 && cpu_flags & AV_CPU_FLAG_SSSE3) - yadif->filter_line = ff_yadif_filter_line_ssse3; - else if (HAVE_SSE && cpu_flags & AV_CPU_FLAG_SSE2) - yadif->filter_line = ff_yadif_filter_line_sse2; - else if (HAVE_MMX && cpu_flags & AV_CPU_FLAG_MMX) - yadif->filter_line = ff_yadif_filter_line_mmx; + s->csp = av_pix_fmt_desc_get(link->format); + if (s->csp->comp[0].depth > 8) { + s->filter_line = filter_line_c_16bit; + s->filter_edges = filter_edges_16bit; + } else { + s->filter_line = filter_line_c; + s->filter_edges = filter_edges; - av_log(ctx, AV_LOG_INFO, "mode:%d parity:%d auto_enable:%d\n", yadif->mode, yadif->parity, yadif->auto_enable); + if (ARCH_X86) + ff_yadif_init_x86(s); + } return 0; } -static void null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir) { } +#define OFFSET(x) offsetof(YADIFContext, x) +#define FLAGS AV_OPT_FLAG_VIDEO_PARAM +static const AVOption options[] = { + { "mode", NULL, OFFSET(mode), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 3, FLAGS }, + { "parity", NULL, OFFSET(parity), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 1, FLAGS, "parity" }, + { "auto", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = -1 }, .unit = "parity" }, + { "tff", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, .unit = "parity" }, + { "bff", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, .unit = "parity" }, + { "auto", NULL, OFFSET(auto_enable), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, FLAGS }, + { NULL }, +}; + +static const AVClass yadif_class = { + .class_name = "yadif", + .item_name = av_default_item_name, + .option = options, + .version = LIBAVUTIL_VERSION_INT, +}; + +static const AVFilterPad avfilter_vf_yadif_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .get_video_buffer = get_video_buffer, + .filter_frame = filter_frame, + }, + { NULL } +}; -AVFilter avfilter_vf_yadif = { +static const AVFilterPad avfilter_vf_yadif_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .poll_frame = poll_frame, + .request_frame = request_frame, + .config_props = config_props, + }, + { NULL } +}; + +AVFilter ff_vf_yadif = { .name = "yadif", .description = NULL_IF_CONFIG_SMALL("Deinterlace the input image"), .priv_size = sizeof(YADIFContext), - .init = init, + .priv_class = &yadif_class, .uninit = uninit, .query_formats = query_formats, - .inputs = (AVFilterPad[]) {{ .name = "default", - .type = AVMEDIA_TYPE_VIDEO, - .start_frame = start_frame, - .get_video_buffer = get_video_buffer, - .draw_slice = null_draw_slice, - .end_frame = end_frame, }, - { .name = NULL}}, - - .outputs = (AVFilterPad[]) {{ .name = "default", - .type = AVMEDIA_TYPE_VIDEO, - .poll_frame = poll_frame, - .request_frame = request_frame, }, - { .name = NULL}}, + .inputs = avfilter_vf_yadif_inputs, + + .outputs = avfilter_vf_yadif_outputs, + + .flags = AVFILTER_FLAG_SLICE_THREADS, };