#include "libavutil/opt.h"
#include "avfilter.h"
#include "audio.h"
+#include "filters.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
int64_t ignored_samples;
int loop;
+ int eof;
int64_t size;
int64_t start;
int64_t pts;
#define VFLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
#define OFFSET(x) offsetof(LoopContext, x)
+static void check_size(AVFilterContext *ctx)
+{
+ LoopContext *s = ctx->priv;
+
+ if (!s->size)
+ av_log(ctx, AV_LOG_WARNING, "Number of %s to loop is not set!\n",
+ ctx->input_pads[0].type == AVMEDIA_TYPE_VIDEO ? "frames" : "samples");
+}
+
#if CONFIG_ALOOP_FILTER
static int aconfig_input(AVFilterLink *inlink)
if (!s->fifo || !s->left)
return AVERROR(ENOMEM);
+ check_size(ctx);
+
return 0;
}
}
out->pts = s->pts;
out->nb_samples = ret;
- s->pts += out->nb_samples;
+ s->pts += av_rescale_q(out->nb_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base);
i += out->nb_samples;
s->current_sample += out->nb_samples;
return ret;
if (s->current_sample >= s->nb_samples) {
+ s->duration = s->pts;
s->current_sample = 0;
if (s->loop > 0)
drain = FFMAX(0, s->start - s->ignored_samples);
s->pts = frame->pts;
av_audio_fifo_drain(s->fifo, drain);
- s->pts += s->start - s->ignored_samples;
+ s->pts += av_rescale_q(s->start - s->ignored_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base);
}
s->nb_samples += ret - drain;
drain = frame->nb_samples - written;
av_audio_fifo_drain(s->left, drain);
}
frame->nb_samples = ret;
- s->pts += ret;
+ s->pts += av_rescale_q(ret, (AVRational){1, outlink->sample_rate}, outlink->time_base);
ret = ff_filter_frame(outlink, frame);
} else {
int nb_samples = frame->nb_samples;
} else {
s->ignored_samples += frame->nb_samples;
frame->pts = s->pts;
- s->pts += frame->nb_samples;
+ s->pts += av_rescale_q(frame->nb_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base);
ret = ff_filter_frame(outlink, frame);
}
return AVERROR(ENOMEM);
av_audio_fifo_read(s->left, (void **)out->extended_data, nb_samples);
out->pts = s->pts;
- s->pts += nb_samples;
+ s->pts += av_rescale_q(nb_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base);
ret = ff_filter_frame(outlink, out);
if (ret < 0)
return ret;
ret = push_samples(ctx, 1024);
}
- if (ret == AVERROR_EOF && s->nb_samples > 0 && s->loop != 0) {
- ret = push_samples(ctx, outlink->sample_rate);
+ if (s->eof && s->nb_samples > 0 && s->loop != 0) {
+ ret = push_samples(ctx, 1024);
}
return ret;
}
+static int aactivate(AVFilterContext *ctx)
+{
+ AVFilterLink *inlink = ctx->inputs[0];
+ AVFilterLink *outlink = ctx->outputs[0];
+ LoopContext *s = ctx->priv;
+ AVFrame *frame = NULL;
+ int ret, status;
+ int64_t pts;
+
+ FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
+
+ if (!s->eof && (s->nb_samples < s->size || !s->loop || !s->size)) {
+ ret = ff_inlink_consume_frame(inlink, &frame);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ return afilter_frame(inlink, frame);
+ }
+
+ if (!s->eof && ff_inlink_acknowledge_status(inlink, &status, &pts)) {
+ if (status == AVERROR_EOF) {
+ s->size = s->nb_samples;
+ s->eof = 1;
+ }
+ }
+
+ if (s->eof && (!s->loop || !s->size)) {
+ ff_outlink_set_status(outlink, AVERROR_EOF, s->duration);
+ return 0;
+ }
+
+ if (!s->eof && (!s->size ||
+ (s->nb_samples < s->size) ||
+ (s->nb_samples >= s->size && s->loop == 0))) {
+ FF_FILTER_FORWARD_WANTED(outlink, inlink);
+ } else if (s->loop && s->nb_samples == s->size) {
+ return arequest_frame(outlink);
+ }
+
+ return FFERROR_NOT_READY;
+}
+
static const AVOption aloop_options[] = {
{ "loop", "number of loops", OFFSET(loop), AV_OPT_TYPE_INT, {.i64 = 0 }, -1, INT_MAX, AFLAGS },
{ "size", "max number of samples to loop", OFFSET(size), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT32_MAX, AFLAGS },
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
- .filter_frame = afilter_frame,
.config_props = aconfig_input,
},
{ NULL }
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
- .request_frame = arequest_frame,
},
{ NULL }
};
-AVFilter ff_af_aloop = {
+const AVFilter ff_af_aloop = {
.name = "aloop",
.description = NULL_IF_CONFIG_SMALL("Loop audio samples."),
.priv_size = sizeof(LoopContext),
.priv_class = &aloop_class,
+ .activate = aactivate,
.uninit = auninit,
.inputs = ainputs,
.outputs = aoutputs,
if (!s->frames)
return AVERROR(ENOMEM);
+ check_size(ctx);
+
return 0;
}
{
AVFilterLink *outlink = ctx->outputs[0];
LoopContext *s = ctx->priv;
- int64_t pts;
+ int64_t pts, duration;
int ret;
AVFrame *out = av_frame_clone(s->frames[s->current_frame]);
if (!out)
return AVERROR(ENOMEM);
out->pts += s->duration - s->start_pts;
- pts = out->pts + out->pkt_duration;
+ if (out->pkt_duration)
+ duration = out->pkt_duration;
+ else
+ duration = av_rescale_q(1, av_inv_q(outlink->frame_rate), outlink->time_base);
+ pts = out->pts + duration;
ret = ff_filter_frame(outlink, out);
s->current_frame++;
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
LoopContext *s = ctx->priv;
+ int64_t duration;
int ret = 0;
if (inlink->frame_count_out >= s->start && s->size > 0 && s->loop != 0) {
return AVERROR(ENOMEM);
}
s->nb_frames++;
- s->duration = frame->pts + frame->pkt_duration;
+ if (frame->pkt_duration)
+ duration = frame->pkt_duration;
+ else
+ duration = av_rescale_q(1, av_inv_q(outlink->frame_rate), outlink->time_base);
+ s->duration = frame->pts + duration;
ret = ff_filter_frame(outlink, frame);
} else {
av_frame_free(&frame);
return ret;
}
-static int request_frame(AVFilterLink *outlink)
+static int activate(AVFilterContext *ctx)
{
- AVFilterContext *ctx = outlink->src;
+ AVFilterLink *inlink = ctx->inputs[0];
+ AVFilterLink *outlink = ctx->outputs[0];
LoopContext *s = ctx->priv;
- int ret = 0;
+ AVFrame *frame = NULL;
+ int ret, status;
+ int64_t pts;
- if ((!s->size) ||
- (s->nb_frames < s->size) ||
- (s->nb_frames >= s->size && s->loop == 0)) {
- ret = ff_request_frame(ctx->inputs[0]);
- } else {
- ret = push_frame(ctx);
+ FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
+
+ if (!s->eof && (s->nb_frames < s->size || !s->loop || !s->size)) {
+ ret = ff_inlink_consume_frame(inlink, &frame);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ return filter_frame(inlink, frame);
}
- if (ret == AVERROR_EOF && s->nb_frames > 0 && s->loop != 0) {
- ret = push_frame(ctx);
+ if (!s->eof && ff_inlink_acknowledge_status(inlink, &status, &pts)) {
+ if (status == AVERROR_EOF) {
+ s->size = s->nb_frames;
+ s->eof = 1;
+ }
}
- return ret;
+ if (s->eof && (!s->loop || !s->size)) {
+ ff_outlink_set_status(outlink, AVERROR_EOF, s->duration);
+ return 0;
+ }
+
+ if (!s->eof && (!s->size ||
+ (s->nb_frames < s->size) ||
+ (s->nb_frames >= s->size && s->loop == 0))) {
+ FF_FILTER_FORWARD_WANTED(outlink, inlink);
+ } else if (s->loop && s->nb_frames == s->size) {
+ return push_frame(ctx);
+ }
+
+ return FFERROR_NOT_READY;
}
static const AVOption loop_options[] = {
static const AVFilterPad inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
static const AVFilterPad outputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .request_frame = request_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
-AVFilter ff_vf_loop = {
+const AVFilter ff_vf_loop = {
.name = "loop",
.description = NULL_IF_CONFIG_SMALL("Loop video frames."),
.priv_size = sizeof(LoopContext),
.priv_class = &loop_class,
.init = init,
.uninit = uninit,
+ .activate = activate,
.inputs = inputs,
.outputs = outputs,
};