X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;ds=sidebyside;f=libavfilter%2Fvf_midequalizer.c;h=8101741bdc816f92507cadce565b0dda80827919;hb=f5a9c63401c840024defeb50a3dab9f86551b67e;hp=99d26c751ed9daf9e0d7d6b3a7c2d6e50320b22e;hpb=cb0eba71f047944ccc1220fca04c50f45f88a8ff;p=ffmpeg diff --git a/libavfilter/vf_midequalizer.c b/libavfilter/vf_midequalizer.c index 99d26c751ed..8101741bdc8 100644 --- a/libavfilter/vf_midequalizer.c +++ b/libavfilter/vf_midequalizer.c @@ -25,7 +25,7 @@ #include "formats.h" #include "internal.h" #include "video.h" -#include "framesync.h" +#include "framesync2.h" typedef struct MidEqualizerContext { const AVClass *class; @@ -66,7 +66,7 @@ static int query_formats(AVFilterContext *ctx) AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP, - AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY10, AV_PIX_FMT_GRAY12, + AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY9, AV_PIX_FMT_GRAY10, AV_PIX_FMT_GRAY12, AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9, AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10, AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12, @@ -89,8 +89,8 @@ static int process_frame(FFFrameSync *fs) AVFrame *out, *in0, *in1; int ret; - if ((ret = ff_framesync_get_frame(&s->fs, 0, &in0, 0)) < 0 || - (ret = ff_framesync_get_frame(&s->fs, 1, &in1, 0)) < 0) + if ((ret = ff_framesync2_get_frame(&s->fs, 0, &in0, 0)) < 0 || + (ret = ff_framesync2_get_frame(&s->fs, 1, &in1, 0)) < 0) return ret; if (ctx->is_disabled) { @@ -311,7 +311,7 @@ static int config_output(AVFilterLink *outlink) outlink->sample_aspect_ratio = in0->sample_aspect_ratio; outlink->frame_rate = in0->frame_rate; - if ((ret = ff_framesync_init(&s->fs, ctx, 2)) < 0) + if ((ret = ff_framesync2_init(&s->fs, ctx, 2)) < 0) return ret; in = s->fs.in; @@ -326,26 +326,20 @@ static int config_output(AVFilterLink *outlink) s->fs.opaque = s; s->fs.on_event = process_frame; - return ff_framesync_configure(&s->fs); + return ff_framesync2_configure(&s->fs); } -static int filter_frame(AVFilterLink *inlink, AVFrame *buf) +static int activate(AVFilterContext *ctx) { - MidEqualizerContext *s = inlink->dst->priv; - return ff_framesync_filter_frame(&s->fs, inlink, buf); -} - -static int request_frame(AVFilterLink *outlink) -{ - MidEqualizerContext *s = outlink->src->priv; - return ff_framesync_request_frame(&s->fs, outlink); + MidEqualizerContext *s = ctx->priv; + return ff_framesync2_activate(&s->fs); } static av_cold void uninit(AVFilterContext *ctx) { MidEqualizerContext *s = ctx->priv; - ff_framesync_uninit(&s->fs); + ff_framesync2_uninit(&s->fs); av_freep(&s->histogram[0]); av_freep(&s->histogram[1]); av_freep(&s->cchange); @@ -355,13 +349,11 @@ static const AVFilterPad midequalizer_inputs[] = { { .name = "in0", .type = AVMEDIA_TYPE_VIDEO, - .filter_frame = filter_frame, .config_props = config_input0, }, { .name = "in1", .type = AVMEDIA_TYPE_VIDEO, - .filter_frame = filter_frame, .config_props = config_input1, }, { NULL } @@ -372,7 +364,6 @@ static const AVFilterPad midequalizer_outputs[] = { .name = "default", .type = AVMEDIA_TYPE_VIDEO, .config_props = config_output, - .request_frame = request_frame, }, { NULL } }; @@ -383,6 +374,7 @@ AVFilter ff_vf_midequalizer = { .priv_size = sizeof(MidEqualizerContext), .uninit = uninit, .query_formats = query_formats, + .activate = activate, .inputs = midequalizer_inputs, .outputs = midequalizer_outputs, .priv_class = &midequalizer_class,