*/
#include "libavutil/avstring.h"
+#include "libavutil/eval.h"
#include "libavutil/opt.h"
#include "libavutil/samplefmt.h"
#include "avfilter.h"
#include "audio.h"
+#include "filters.h"
#include "internal.h"
typedef struct ChanDelay {
- int delay;
- unsigned delay_index;
- unsigned index;
+ int64_t delay;
+ size_t delay_index;
+ size_t index;
uint8_t *samples;
} ChanDelay;
typedef struct AudioDelayContext {
const AVClass *class;
+ int all;
char *delays;
ChanDelay *chandelay;
int nb_delays;
int block_align;
- unsigned max_delay;
+ int64_t padding;
+ int64_t max_delay;
int64_t next_pts;
+ int eof;
void (*delay_channel)(ChanDelay *d, int nb_samples,
const uint8_t *src, uint8_t *dst);
static const AVOption adelay_options[] = {
{ "delays", "set list of delays for each channel", OFFSET(delays), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, A },
+ { "all", "use last available delay for remained channels", OFFSET(all), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, A },
{ NULL }
};
p = s->delays;
for (i = 0; i < s->nb_delays; i++) {
ChanDelay *d = &s->chandelay[i];
- float delay;
+ float delay, div;
char type = 0;
int ret;
p = NULL;
- ret = sscanf(arg, "%d%c", &d->delay, &type);
+ ret = av_sscanf(arg, "%"SCNd64"%c", &d->delay, &type);
if (ret != 2 || type != 'S') {
- sscanf(arg, "%f", &delay);
- d->delay = delay * inlink->sample_rate / 1000.0;
+ div = type == 's' ? 1.0 : 1000.0;
+ if (av_sscanf(arg, "%f", &delay) != 1) {
+ av_log(ctx, AV_LOG_ERROR, "Invalid syntax for delay.\n");
+ return AVERROR(EINVAL);
+ }
+ d->delay = delay * inlink->sample_rate / div;
}
if (d->delay < 0) {
}
}
+ if (s->all && i) {
+ for (int j = i; j < s->nb_delays; j++)
+ s->chandelay[j].delay = s->chandelay[i-1].delay;
+ }
+
+ s->padding = s->chandelay[0].delay;
+ for (i = 1; i < s->nb_delays; i++) {
+ ChanDelay *d = &s->chandelay[i];
+
+ s->padding = FFMIN(s->padding, d->delay);
+ }
+
+ if (s->padding) {
+ for (i = 0; i < s->nb_delays; i++) {
+ ChanDelay *d = &s->chandelay[i];
+
+ d->delay -= s->padding;
+ }
+ }
+
for (i = 0; i < s->nb_delays; i++) {
ChanDelay *d = &s->chandelay[i];
if (!d->delay)
continue;
+ if (d->delay > SIZE_MAX) {
+ av_log(ctx, AV_LOG_ERROR, "Requested delay is too big.\n");
+ return AVERROR(EINVAL);
+ }
+
d->samples = av_malloc_array(d->delay, s->block_align);
if (!d->samples)
return AVERROR(ENOMEM);
s->delay_channel(d, frame->nb_samples, src, dst);
}
- s->next_pts = frame->pts + av_rescale_q(frame->nb_samples, (AVRational){1, inlink->sample_rate}, inlink->time_base);
+ out_frame->pts = s->next_pts;
+ s->next_pts += av_rescale_q(frame->nb_samples, (AVRational){1, inlink->sample_rate}, inlink->time_base);
av_frame_free(&frame);
return ff_filter_frame(ctx->outputs[0], out_frame);
}
-static int request_frame(AVFilterLink *outlink)
+static int activate(AVFilterContext *ctx)
{
- AVFilterContext *ctx = outlink->src;
+ AVFilterLink *inlink = ctx->inputs[0];
+ AVFilterLink *outlink = ctx->outputs[0];
AudioDelayContext *s = ctx->priv;
- int ret;
+ AVFrame *frame = NULL;
+ int ret, status;
+ int64_t pts;
- ret = ff_request_frame(ctx->inputs[0]);
- if (ret == AVERROR_EOF && !ctx->is_disabled && s->max_delay) {
- int nb_samples = FFMIN(s->max_delay, 2048);
- AVFrame *frame;
+ FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
+
+ if (s->padding) {
+ int nb_samples = FFMIN(s->padding, 2048);
frame = ff_get_audio_buffer(outlink, nb_samples);
if (!frame)
return AVERROR(ENOMEM);
- s->max_delay -= nb_samples;
+ s->padding -= nb_samples;
av_samples_set_silence(frame->extended_data, 0,
frame->nb_samples,
if (s->next_pts != AV_NOPTS_VALUE)
s->next_pts += av_rescale_q(nb_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base);
- ret = filter_frame(ctx->inputs[0], frame);
+ return ff_filter_frame(outlink, frame);
}
- return ret;
+ ret = ff_inlink_consume_frame(inlink, &frame);
+ if (ret < 0)
+ return ret;
+
+ if (ret > 0)
+ return filter_frame(inlink, frame);
+
+ if (ff_inlink_acknowledge_status(inlink, &status, &pts)) {
+ if (status == AVERROR_EOF)
+ s->eof = 1;
+ }
+
+ if (s->eof && s->max_delay) {
+ int nb_samples = FFMIN(s->max_delay, 2048);
+
+ frame = ff_get_audio_buffer(outlink, nb_samples);
+ if (!frame)
+ return AVERROR(ENOMEM);
+ s->max_delay -= nb_samples;
+
+ av_samples_set_silence(frame->extended_data, 0,
+ frame->nb_samples,
+ outlink->channels,
+ frame->format);
+
+ frame->pts = s->next_pts;
+ return filter_frame(inlink, frame);
+ }
+
+ if (s->eof && s->max_delay == 0) {
+ ff_outlink_set_status(outlink, AVERROR_EOF, s->next_pts);
+ return 0;
+ }
+
+ if (!s->eof)
+ FF_FILTER_FORWARD_WANTED(outlink, inlink);
+
+ return FFERROR_NOT_READY;
}
static av_cold void uninit(AVFilterContext *ctx)
{
AudioDelayContext *s = ctx->priv;
- int i;
- for (i = 0; i < s->nb_delays; i++)
- av_freep(&s->chandelay[i].samples);
+ if (s->chandelay) {
+ for (int i = 0; i < s->nb_delays; i++)
+ av_freep(&s->chandelay[i].samples);
+ }
av_freep(&s->chandelay);
}
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_input,
- .filter_frame = filter_frame,
},
{ NULL }
};
static const AVFilterPad adelay_outputs[] = {
{
- .name = "default",
- .request_frame = request_frame,
- .type = AVMEDIA_TYPE_AUDIO,
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
-AVFilter ff_af_adelay = {
+const AVFilter ff_af_adelay = {
.name = "adelay",
.description = NULL_IF_CONFIG_SMALL("Delay one or more audio channels."),
.query_formats = query_formats,
.priv_size = sizeof(AudioDelayContext),
.priv_class = &adelay_class,
+ .activate = activate,
.uninit = uninit,
.inputs = adelay_inputs,
.outputs = adelay_outputs,