X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libavfilter%2Ff_interleave.c;h=06f4cda7985ebdf8c9018c9ef24fd891ee14ea6f;hb=e55018ee110bf42e32739b1725411dde44e70291;hp=d8a73b52e5d36359118aece5aeafcfbb459c11db;hpb=fda424b300c1a0b991296aa585691609d01196bd;p=ffmpeg diff --git a/libavfilter/f_interleave.c b/libavfilter/f_interleave.c index d8a73b52e5d..06f4cda7985 100644 --- a/libavfilter/f_interleave.c +++ b/libavfilter/f_interleave.c @@ -27,12 +27,9 @@ #include "libavutil/avstring.h" #include "libavutil/opt.h" -#define FF_INTERNAL_FIELDS 1 -#include "framequeue.h" - #include "avfilter.h" -#include "bufferqueue.h" #include "formats.h" +#include "filters.h" #include "internal.h" #include "audio.h" #include "video.h" @@ -40,7 +37,7 @@ typedef struct InterleaveContext { const AVClass *class; int nb_inputs; - struct FFBufQueue *queues; + int64_t pts; } InterleaveContext; #define OFFSET(x) offsetof(InterleaveContext, x) @@ -52,58 +49,78 @@ static const AVOption filt_name##_options[] = { \ { NULL } \ } -inline static int push_frame(AVFilterContext *ctx) +static int activate(AVFilterContext *ctx) { + AVFilterLink *outlink = ctx->outputs[0]; InterleaveContext *s = ctx->priv; - AVFrame *frame; - int i, queue_idx = -1; - int64_t pts_min = INT64_MAX; + int64_t q_pts, pts = INT64_MAX; + int i, nb_eofs = 0, input_idx = -1; + + FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, ctx); - /* look for oldest frame */ for (i = 0; i < ctx->nb_inputs; i++) { - struct FFBufQueue *q = &s->queues[i]; + if (!ff_outlink_get_status(ctx->inputs[i]) && + !ff_inlink_queued_frames(ctx->inputs[i])) + break; + } - if (!q->available && !ctx->inputs[i]->status_out) - return 0; - if (q->available) { - frame = ff_bufqueue_peek(q, 0); - if (frame->pts < pts_min) { - pts_min = frame->pts; - queue_idx = i; + if (i == ctx->nb_inputs) { + for (i = 0; i < ctx->nb_inputs; i++) { + AVFrame *frame; + + if (ff_outlink_get_status(ctx->inputs[i])) + continue; + + frame = ff_inlink_peek_frame(ctx->inputs[i], 0); + if (frame->pts == AV_NOPTS_VALUE) { + int ret; + + av_log(ctx, AV_LOG_WARNING, + "NOPTS value for input frame cannot be accepted, frame discarded\n"); + ret = ff_inlink_consume_frame(ctx->inputs[i], &frame); + if (ret < 0) + return ret; + av_frame_free(&frame); + return AVERROR_INVALIDDATA; + } + + q_pts = av_rescale_q(frame->pts, ctx->inputs[i]->time_base, AV_TIME_BASE_Q); + if (q_pts < pts) { + pts = q_pts; + input_idx = i; } } - } - /* all inputs are closed */ - if (queue_idx < 0) - return AVERROR_EOF; + if (input_idx >= 0) { + AVFrame *frame; + int ret; - frame = ff_bufqueue_get(&s->queues[queue_idx]); - av_log(ctx, AV_LOG_DEBUG, "queue:%d -> frame time:%f\n", - queue_idx, frame->pts * av_q2d(AV_TIME_BASE_Q)); - return ff_filter_frame(ctx->outputs[0], frame); -} + ret = ff_inlink_consume_frame(ctx->inputs[input_idx], &frame); + if (ret < 0) + return ret; -static int filter_frame(AVFilterLink *inlink, AVFrame *frame) -{ - AVFilterContext *ctx = inlink->dst; - InterleaveContext *s = ctx->priv; - unsigned in_no = FF_INLINK_IDX(inlink); + frame->pts = s->pts = pts; + return ff_filter_frame(outlink, frame); + } + } - if (frame->pts == AV_NOPTS_VALUE) { - av_log(ctx, AV_LOG_WARNING, - "NOPTS value for input frame cannot be accepted, frame discarded\n"); - av_frame_free(&frame); - return AVERROR_INVALIDDATA; + for (i = 0; i < ctx->nb_inputs; i++) { + if (ff_inlink_queued_frames(ctx->inputs[i])) + continue; + if (ff_outlink_frame_wanted(outlink) && + !ff_outlink_get_status(ctx->inputs[i])) { + ff_inlink_request_frame(ctx->inputs[i]); + return 0; + } + nb_eofs++; } - /* queue frame */ - frame->pts = av_rescale_q(frame->pts, inlink->time_base, AV_TIME_BASE_Q); - av_log(ctx, AV_LOG_DEBUG, "frame pts:%f -> queue idx:%d available:%d\n", - frame->pts * av_q2d(AV_TIME_BASE_Q), in_no, s->queues[in_no].available); - ff_bufqueue_add(ctx, &s->queues[in_no], frame); + if (nb_eofs == ctx->nb_inputs) { + ff_outlink_set_status(outlink, AVERROR_EOF, s->pts); + return 0; + } - return push_frame(ctx); + return FFERROR_NOT_READY; } static av_cold int init(AVFilterContext *ctx) @@ -112,10 +129,6 @@ static av_cold int init(AVFilterContext *ctx) const AVFilterPad *outpad = &ctx->filter->outputs[0]; int i, ret; - s->queues = av_calloc(s->nb_inputs, sizeof(s->queues[0])); - if (!s->queues) - return AVERROR(ENOMEM); - for (i = 0; i < s->nb_inputs; i++) { AVFilterPad inpad = { 0 }; @@ -123,7 +136,6 @@ static av_cold int init(AVFilterContext *ctx) if (!inpad.name) return AVERROR(ENOMEM); inpad.type = outpad->type; - inpad.filter_frame = filter_frame; switch (outpad->type) { case AVMEDIA_TYPE_VIDEO: @@ -144,14 +156,8 @@ static av_cold int init(AVFilterContext *ctx) static av_cold void uninit(AVFilterContext *ctx) { - InterleaveContext *s = ctx->priv; - int i; - - for (i = 0; i < ctx->nb_inputs; i++) { - ff_bufqueue_discard_all(&s->queues[i]); - av_freep(&s->queues[i]); + for (int i = 0; i < ctx->nb_inputs; i++) av_freep(&ctx->input_pads[i].name); - } } static int config_output(AVFilterLink *outlink) @@ -190,23 +196,6 @@ static int config_output(AVFilterLink *outlink) return 0; } -static int request_frame(AVFilterLink *outlink) -{ - AVFilterContext *ctx = outlink->src; - InterleaveContext *s = ctx->priv; - int i, ret; - - for (i = 0; i < ctx->nb_inputs; i++) { - if (!s->queues[i].available && !ctx->inputs[i]->status_out) { - ret = ff_request_frame(ctx->inputs[i]); - if (ret != AVERROR_EOF) - return ret; - } - } - - return push_frame(ctx); -} - #if CONFIG_INTERLEAVE_FILTER DEFINE_OPTIONS(interleave, AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM); @@ -217,7 +206,6 @@ static const AVFilterPad interleave_outputs[] = { .name = "default", .type = AVMEDIA_TYPE_VIDEO, .config_props = config_output, - .request_frame = request_frame, }, { NULL } }; @@ -228,6 +216,7 @@ AVFilter ff_vf_interleave = { .priv_size = sizeof(InterleaveContext), .init = init, .uninit = uninit, + .activate = activate, .outputs = interleave_outputs, .priv_class = &interleave_class, .flags = AVFILTER_FLAG_DYNAMIC_INPUTS, @@ -245,7 +234,6 @@ static const AVFilterPad ainterleave_outputs[] = { .name = "default", .type = AVMEDIA_TYPE_AUDIO, .config_props = config_output, - .request_frame = request_frame, }, { NULL } }; @@ -256,6 +244,7 @@ AVFilter ff_af_ainterleave = { .priv_size = sizeof(InterleaveContext), .init = init, .uninit = uninit, + .activate = activate, .outputs = ainterleave_outputs, .priv_class = &ainterleave_class, .flags = AVFILTER_FLAG_DYNAMIC_INPUTS,