#include "libavutil/intmath.h"
#include "libavutil/opt.h"
#include "avfilter.h"
+#include "filters.h"
#include "internal.h"
#include "audio.h"
int buffer_length; /* is: longest IR plus max. delay in all SOFA files */
/* then choose next power of 2 */
int n_fft; /* number of samples in one FFT block */
+ int nb_samples;
/* netCDF variables */
int *delay[2]; /* broadband delay for each channel/IR to be convolved */
return 0;
}
-static int parse_channel_name(char **arg, int *rchannel, char *buf)
+static int parse_channel_name(AVFilterContext *ctx, char **arg, int *rchannel)
{
int len, i, channel_id = 0;
int64_t layout, layout0;
+ char buf[8] = {0};
/* try to parse a channel name, e.g. "FL" */
if (av_sscanf(*arg, "%7[A-Z]%n", buf, &len)) {
}
}
/* reject layouts that are not a single channel */
- if (channel_id >= 64 || layout0 != 1LL << channel_id)
+ if (channel_id >= 64 || layout0 != 1LL << channel_id) {
+ av_log(ctx, AV_LOG_WARNING, "Failed to parse \'%s\' as channel name.\n", buf);
+ return AVERROR(EINVAL);
+ }
+ *rchannel = channel_id;
+ *arg += len;
+ return 0;
+ } else if (av_sscanf(*arg, "%d%n", &channel_id, &len) == 1) {
+ if (channel_id < 0 || channel_id >= 64) {
+ av_log(ctx, AV_LOG_WARNING, "Failed to parse \'%d\' as channel number.\n", channel_id);
return AVERROR(EINVAL);
+ }
*rchannel = channel_id;
*arg += len;
return 0;
p = args;
while ((arg = av_strtok(p, "|", &tokenizer))) {
- char buf[8];
float azim, elev;
int out_ch_id;
p = NULL;
- if (parse_channel_name(&arg, &out_ch_id, buf)) {
- av_log(ctx, AV_LOG_WARNING, "Failed to parse \'%s\' as channel name.\n", buf);
+ if (parse_channel_name(ctx, &arg, &out_ch_id)) {
continue;
}
if (av_sscanf(arg, "%f %f", &azim, &elev) == 2) {
{
struct SOFAlizerContext *s = ctx->priv;
uint64_t channels_layout = ctx->inputs[0]->channel_layout;
- float azim[16] = { 0 };
- float elev[16] = { 0 };
+ float azim[64] = { 0 };
+ float elev[64] = { 0 };
int m, ch, n_conv = ctx->inputs[0]->channels; /* get no. input channels */
- if (n_conv > 16)
+ if (n_conv < 0 || n_conv > 64)
return AVERROR(EINVAL);
s->lfe_channel = -1;
const int buffer_length = s->buffer_length;
/* -1 for AND instead of MODULO (applied to powers of 2): */
const uint32_t modulo = (uint32_t)buffer_length - 1;
- float *buffer[16]; /* holds ringbuffer for each input channel */
+ float *buffer[64]; /* holds ringbuffer for each input channel */
int wr = *write;
int read;
int i, l;
/* current read position in ringbuffer: input sample write position
* - delay for l-th ch. + diff. betw. IR length and buffer length
* (mod buffer length) */
- read = (wr - delay[l] - (n_samples - 1) + buffer_length) & modulo;
+ read = (wr - delay[l] - (ir_samples - 1) + buffer_length) & modulo;
- if (read + n_samples < buffer_length) {
- memmove(temp_src, bptr + read, n_samples * sizeof(*temp_src));
+ if (read + ir_samples < buffer_length) {
+ memmove(temp_src, bptr + read, ir_samples * sizeof(*temp_src));
} else {
- int len = FFMIN(n_samples - (read % n_samples), buffer_length - read);
+ int len = FFMIN(n_samples - (read % ir_samples), buffer_length - read);
memmove(temp_src, bptr + read, len * sizeof(*temp_src));
memmove(temp_src + len, bptr, (n_samples - len) * sizeof(*temp_src));
FFTComplex *hrtf = s->data_hrtf[jobnr]; /* get pointers to current HRTF data */
int *n_clippings = &td->n_clippings[jobnr];
float *ringbuffer = td->ringbuffer[jobnr];
- const int n_samples = s->sofa.n_samples; /* length of one IR */
+ const int ir_samples = s->sofa.ir_samples; /* length of one IR */
const int planar = in->format == AV_SAMPLE_FMT_FLTP;
const int mult = 1 + !planar;
float *dst = (float *)out->extended_data[jobnr * planar]; /* get pointer to audio output buffer */
/* find minimum between number of samples and output buffer length:
* (important, if one IR is longer than the output buffer) */
- n_read = FFMIN(s->sofa.n_samples, in->nb_samples);
+ n_read = FFMIN(ir_samples, in->nb_samples);
for (j = 0; j < n_read; j++) {
/* initialize output buf with saved signal from overflow buf */
dst[mult * j] = ringbuffer[wr];
dst[mult * j] += fft_acc[j].re * fft_scale;
}
- for (j = 0; j < n_samples - 1; j++) { /* overflow length is IR length - 1 */
+ for (j = 0; j < ir_samples - 1; j++) { /* overflow length is IR length - 1 */
/* write the rest of output signal to overflow buffer */
int write_pos = (wr + j) & modulo;
if (s->type == TIME_DOMAIN) {
ctx->internal->execute(ctx, sofalizer_convolute, &td, NULL, 2);
- } else {
+ } else if (s->type == FREQUENCY_DOMAIN) {
ctx->internal->execute(ctx, sofalizer_fast_convolute, &td, NULL, 2);
}
emms_c();
return ff_filter_frame(outlink, out);
}
+static int activate(AVFilterContext *ctx)
+{
+ AVFilterLink *inlink = ctx->inputs[0];
+ AVFilterLink *outlink = ctx->outputs[0];
+ SOFAlizerContext *s = ctx->priv;
+ AVFrame *in;
+ int ret;
+
+ FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
+
+ if (s->nb_samples)
+ ret = ff_inlink_consume_samples(inlink, s->nb_samples, s->nb_samples, &in);
+ else
+ ret = ff_inlink_consume_frame(inlink, &in);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ return filter_frame(inlink, in);
+
+ FF_FILTER_FORWARD_STATUS(inlink, outlink);
+ FF_FILTER_FORWARD_WANTED(outlink, inlink);
+
+ return FFERROR_NOT_READY;
+}
+
static int query_formats(AVFilterContext *ctx)
{
struct SOFAlizerContext *s = ctx->priv;
if (!layouts)
return AVERROR(ENOMEM);
- ret = ff_channel_layouts_ref(layouts, &ctx->inputs[0]->out_channel_layouts);
+ ret = ff_channel_layouts_ref(layouts, &ctx->inputs[0]->outcfg.channel_layouts);
if (ret)
return ret;
if (ret)
return ret;
- ret = ff_channel_layouts_ref(layouts, &ctx->outputs[0]->in_channel_layouts);
+ ret = ff_channel_layouts_ref(layouts, &ctx->outputs[0]->incfg.channel_layouts);
if (ret)
return ret;
n_samples = s->sofa.n_samples;
ir_samples = s->sofa.ir_samples;
- s->data_ir[0] = av_calloc(n_samples, sizeof(float) * s->n_conv);
- s->data_ir[1] = av_calloc(n_samples, sizeof(float) * s->n_conv);
+ if (s->type == TIME_DOMAIN) {
+ s->data_ir[0] = av_calloc(n_samples, sizeof(float) * s->n_conv);
+ s->data_ir[1] = av_calloc(n_samples, sizeof(float) * s->n_conv);
+
+ if (!s->data_ir[0] || !s->data_ir[1]) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ }
+
s->delay[0] = av_calloc(s->n_conv, sizeof(int));
s->delay[1] = av_calloc(s->n_conv, sizeof(int));
- if (!s->data_ir[0] || !s->data_ir[1] || !s->delay[0] || !s->delay[1]) {
+ if (!s->delay[0] || !s->delay[1]) {
ret = AVERROR(ENOMEM);
goto fail;
}
if (s->type == FREQUENCY_DOMAIN) {
av_fft_end(s->fft[0]);
av_fft_end(s->fft[1]);
- s->fft[0] = av_fft_init(log2(s->n_fft), 0);
- s->fft[1] = av_fft_init(log2(s->n_fft), 0);
+ s->fft[0] = av_fft_init(av_log2(s->n_fft), 0);
+ s->fft[1] = av_fft_init(av_log2(s->n_fft), 0);
av_fft_end(s->ifft[0]);
av_fft_end(s->ifft[1]);
- s->ifft[0] = av_fft_init(log2(s->n_fft), 1);
- s->ifft[1] = av_fft_init(log2(s->n_fft), 1);
+ s->ifft[0] = av_fft_init(av_log2(s->n_fft), 1);
+ s->ifft[1] = av_fft_init(av_log2(s->n_fft), 1);
if (!s->fft[0] || !s->fft[1] || !s->ifft[0] || !s->ifft[1]) {
av_log(ctx, AV_LOG_ERROR, "Unable to create FFT contexts of size %d.\n", s->n_fft);
if (s->type == TIME_DOMAIN) {
s->ringbuffer[0] = av_calloc(s->buffer_length, sizeof(float) * nb_input_channels);
s->ringbuffer[1] = av_calloc(s->buffer_length, sizeof(float) * nb_input_channels);
- } else {
+ } else if (s->type == FREQUENCY_DOMAIN) {
/* get temporary HRTF memory for L and R channel */
data_hrtf_l = av_malloc_array(n_fft, sizeof(*data_hrtf_l) * n_conv);
data_hrtf_r = av_malloc_array(n_fft, sizeof(*data_hrtf_r) * n_conv);
s->data_ir[0][offset + j] = lir[ir_samples - 1 - j] * gain_lin;
s->data_ir[1][offset + j] = rir[ir_samples - 1 - j] * gain_lin;
}
- } else {
+ } else if (s->type == FREQUENCY_DOMAIN) {
memset(fft_in_l, 0, n_fft * sizeof(*fft_in_l));
memset(fft_in_r, 0, n_fft * sizeof(*fft_in_r));
SOFAlizerContext *s = ctx->priv;
int ret;
- if (s->type == FREQUENCY_DOMAIN) {
- inlink->partial_buf_size =
- inlink->min_samples =
- inlink->max_samples = s->framesize;
- }
+ if (s->type == FREQUENCY_DOMAIN)
+ s->nb_samples = s->framesize;
/* gain -3 dB per channel */
s->gain_lfe = expf((s->gain - 3 * inlink->channels + s->lfe_gain) / 20 * M_LN10);
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_input,
- .filter_frame = filter_frame,
},
{ NULL }
};
{ NULL }
};
-AVFilter ff_af_sofalizer = {
+const AVFilter ff_af_sofalizer = {
.name = "sofalizer",
.description = NULL_IF_CONFIG_SMALL("SOFAlizer (Spatially Oriented Format for Acoustics)."),
.priv_size = sizeof(SOFAlizerContext),
.priv_class = &sofalizer_class,
.init = init,
+ .activate = activate,
.uninit = uninit,
.query_formats = query_formats,
.inputs = inputs,