2 * Copyright (c) 2013-2014 Mozilla Corporation
3 * Copyright (c) 2017 Rostislav Pehlivanov <atomnuker@gmail.com>
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 * Celt non-power of 2 iMDCT
33 #include "libavutil/attributes.h"
34 #include "libavutil/common.h"
40 #define CMUL3(cre, cim, are, aim, bre, bim) \
42 cre = are * bre - aim * bim; \
43 cim = are * bim + aim * bre; \
46 #define CMUL(c, a, b) CMUL3((c).re, (c).im, (a).re, (a).im, (b).re, (b).im)
48 av_cold void ff_mdct15_uninit(MDCT15Context **ps)
50 MDCT15Context *s = *ps;
55 ff_fft_end(&s->ptwo_fft);
57 av_freep(&s->pfa_prereindex);
58 av_freep(&s->pfa_postreindex);
59 av_freep(&s->twiddle_exptab);
65 static void mdct15(MDCT15Context *s, float *dst, const float *src, ptrdiff_t stride);
67 static void imdct15_half(MDCT15Context *s, float *dst, const float *src,
68 ptrdiff_t stride, float scale);
70 static inline int init_pfa_reindex_tabs(MDCT15Context *s)
73 const int b_ptwo = s->ptwo_fft.nbits; /* Bits for the power of two FFTs */
74 const int l_ptwo = 1 << b_ptwo; /* Total length for the power of two FFTs */
75 const int inv_1 = l_ptwo << ((4 - b_ptwo) & 3); /* (2^b_ptwo)^-1 mod 15 */
76 const int inv_2 = 0xeeeeeeef & ((1U << b_ptwo) - 1); /* 15^-1 mod 2^b_ptwo */
78 s->pfa_prereindex = av_malloc(15 * l_ptwo * sizeof(*s->pfa_prereindex));
79 if (!s->pfa_prereindex)
82 s->pfa_postreindex = av_malloc(15 * l_ptwo * sizeof(*s->pfa_postreindex));
83 if (!s->pfa_postreindex)
86 /* Pre/Post-reindex */
87 for (i = 0; i < l_ptwo; i++) {
88 for (j = 0; j < 15; j++) {
89 const int q_pre = ((l_ptwo * j)/15 + i) >> b_ptwo;
90 const int q_post = (((j*inv_1)/15) + (i*inv_2)) >> b_ptwo;
91 const int k_pre = 15*i + ((j - q_pre*15) << b_ptwo);
92 const int k_post = i*inv_2*15 + j*inv_1 - 15*q_post*l_ptwo;
93 s->pfa_prereindex[i*15 + j] = k_pre;
94 s->pfa_postreindex[k_post] = l_ptwo*j + i;
101 av_cold int ff_mdct15_init(MDCT15Context **ps, int inverse, int N, double scale)
105 int len2 = 15 * (1 << N);
109 /* Tested and verified to work on everything in between */
110 if ((N < 2) || (N > 13))
111 return AVERROR(EINVAL);
113 s = av_mallocz(sizeof(*s));
115 return AVERROR(ENOMEM);
120 s->inverse = inverse;
122 s->imdct_half = imdct15_half;
124 if (ff_fft_init(&s->ptwo_fft, N - 1, s->inverse) < 0)
127 if (init_pfa_reindex_tabs(s))
130 s->tmp = av_malloc_array(len, 2 * sizeof(*s->tmp));
134 s->twiddle_exptab = av_malloc_array(s->len4, sizeof(*s->twiddle_exptab));
135 if (!s->twiddle_exptab)
138 theta = 0.125f + (scale < 0 ? s->len4 : 0);
139 scale = sqrt(fabs(scale));
140 for (i = 0; i < s->len4; i++) {
141 alpha = 2 * M_PI * (i + theta) / len;
142 s->twiddle_exptab[i].re = cos(alpha) * scale;
143 s->twiddle_exptab[i].im = sin(alpha) * scale;
146 /* 15-point FFT exptab */
147 for (i = 0; i < 19; i++) {
149 double theta = (2.0f * M_PI * i) / 15.0f;
152 s->exptab[i].re = cos(theta);
153 s->exptab[i].im = sin(theta);
154 } else { /* Wrap around to simplify fft15 */
155 s->exptab[i] = s->exptab[i - 15];
159 /* 5-point FFT exptab */
160 s->exptab[19].re = cos(2.0f * M_PI / 5.0f);
161 s->exptab[19].im = sin(2.0f * M_PI / 5.0f);
162 s->exptab[20].re = cos(1.0f * M_PI / 5.0f);
163 s->exptab[20].im = sin(1.0f * M_PI / 5.0f);
165 /* Invert the phase for an inverse transform, do nothing for a forward transform */
167 s->exptab[19].im *= -1;
168 s->exptab[20].im *= -1;
176 ff_mdct15_uninit(&s);
177 return AVERROR(ENOMEM);
180 /* Stride is hardcoded to 3 */
181 static inline void fft5(const FFTComplex exptab[2], FFTComplex *out,
182 const FFTComplex *in)
184 FFTComplex z0[4], t[6];
186 t[0].re = in[3].re + in[12].re;
187 t[0].im = in[3].im + in[12].im;
188 t[1].im = in[3].re - in[12].re;
189 t[1].re = in[3].im - in[12].im;
190 t[2].re = in[6].re + in[ 9].re;
191 t[2].im = in[6].im + in[ 9].im;
192 t[3].im = in[6].re - in[ 9].re;
193 t[3].re = in[6].im - in[ 9].im;
195 out[0].re = in[0].re + in[3].re + in[6].re + in[9].re + in[12].re;
196 out[0].im = in[0].im + in[3].im + in[6].im + in[9].im + in[12].im;
198 t[4].re = exptab[0].re * t[2].re - exptab[1].re * t[0].re;
199 t[4].im = exptab[0].re * t[2].im - exptab[1].re * t[0].im;
200 t[0].re = exptab[0].re * t[0].re - exptab[1].re * t[2].re;
201 t[0].im = exptab[0].re * t[0].im - exptab[1].re * t[2].im;
202 t[5].re = exptab[0].im * t[3].re - exptab[1].im * t[1].re;
203 t[5].im = exptab[0].im * t[3].im - exptab[1].im * t[1].im;
204 t[1].re = exptab[0].im * t[1].re + exptab[1].im * t[3].re;
205 t[1].im = exptab[0].im * t[1].im + exptab[1].im * t[3].im;
207 z0[0].re = t[0].re - t[1].re;
208 z0[0].im = t[0].im - t[1].im;
209 z0[1].re = t[4].re + t[5].re;
210 z0[1].im = t[4].im + t[5].im;
212 z0[2].re = t[4].re - t[5].re;
213 z0[2].im = t[4].im - t[5].im;
214 z0[3].re = t[0].re + t[1].re;
215 z0[3].im = t[0].im + t[1].im;
217 out[1].re = in[0].re + z0[3].re;
218 out[1].im = in[0].im + z0[0].im;
219 out[2].re = in[0].re + z0[2].re;
220 out[2].im = in[0].im + z0[1].im;
221 out[3].re = in[0].re + z0[1].re;
222 out[3].im = in[0].im + z0[2].im;
223 out[4].re = in[0].re + z0[0].re;
224 out[4].im = in[0].im + z0[3].im;
227 static void fft15(const FFTComplex exptab[22], FFTComplex *out, const FFTComplex *in, size_t stride)
230 FFTComplex tmp1[5], tmp2[5], tmp3[5];
232 fft5(exptab + 19, tmp1, in + 0);
233 fft5(exptab + 19, tmp2, in + 1);
234 fft5(exptab + 19, tmp3, in + 2);
236 for (k = 0; k < 5; k++) {
239 CMUL(t[0], tmp2[k], exptab[k]);
240 CMUL(t[1], tmp3[k], exptab[2 * k]);
241 out[stride*k].re = tmp1[k].re + t[0].re + t[1].re;
242 out[stride*k].im = tmp1[k].im + t[0].im + t[1].im;
244 CMUL(t[0], tmp2[k], exptab[k + 5]);
245 CMUL(t[1], tmp3[k], exptab[2 * (k + 5)]);
246 out[stride*(k + 5)].re = tmp1[k].re + t[0].re + t[1].re;
247 out[stride*(k + 5)].im = tmp1[k].im + t[0].im + t[1].im;
249 CMUL(t[0], tmp2[k], exptab[k + 10]);
250 CMUL(t[1], tmp3[k], exptab[2 * k + 5]);
251 out[stride*(k + 10)].re = tmp1[k].re + t[0].re + t[1].re;
252 out[stride*(k + 10)].im = tmp1[k].im + t[0].im + t[1].im;
256 static void mdct15(MDCT15Context *s, float *dst, const float *src, ptrdiff_t stride)
259 const int len4 = s->len4, len3 = len4 * 3, len8 = len4 >> 1;
260 const int l_ptwo = 1 << s->ptwo_fft.nbits;
261 FFTComplex fft15in[15];
263 /* Folding and pre-reindexing */
264 for (i = 0; i < l_ptwo; i++) {
265 for (j = 0; j < 15; j++) {
267 const int k = s->pfa_prereindex[i*15 + j];
269 re = -src[2*k+len3] - src[len3-1-2*k];
270 im = -src[len4+2*k] + src[len4-1-2*k];
272 re = src[2*k-len4] - src[1*len3-1-2*k];
273 im = -src[2*k+len4] - src[5*len4-1-2*k];
275 CMUL3(fft15in[j].re, fft15in[j].im, re, im, s->twiddle_exptab[k].re, -s->twiddle_exptab[k].im);
277 fft15(s->exptab, s->tmp + s->ptwo_fft.revtab[i], fft15in, l_ptwo);
280 /* Then a 15xN FFT (where N is a power of two) */
281 for (i = 0; i < 15; i++)
282 s->ptwo_fft.fft_calc(&s->ptwo_fft, s->tmp + l_ptwo*i);
284 /* Reindex again, apply twiddles and output */
285 for (i = 0; i < len8; i++) {
286 float re0, im0, re1, im1;
287 const int i0 = len8 + i, i1 = len8 - i - 1;
288 const int s0 = s->pfa_postreindex[i0], s1 = s->pfa_postreindex[i1];
290 CMUL3(im1, re0, s->tmp[s1].re, s->tmp[s1].im, s->twiddle_exptab[i1].im, s->twiddle_exptab[i1].re);
291 CMUL3(im0, re1, s->tmp[s0].re, s->tmp[s0].im, s->twiddle_exptab[i0].im, s->twiddle_exptab[i0].re);
293 dst[2*i1*stride ] = re0;
294 dst[2*i1*stride + stride] = im0;
295 dst[2*i0*stride ] = re1;
296 dst[2*i0*stride + stride] = im1;
300 static void imdct15_half(MDCT15Context *s, float *dst, const float *src,
301 ptrdiff_t stride, float scale)
303 FFTComplex fft15in[15];
304 FFTComplex *z = (FFTComplex *)dst;
305 int i, j, len8 = s->len4 >> 1, l_ptwo = 1 << s->ptwo_fft.nbits;
306 const float *in1 = src, *in2 = src + (s->len2 - 1) * stride;
308 /* Reindex input, putting it into a buffer and doing an Nx15 FFT */
309 for (i = 0; i < l_ptwo; i++) {
310 for (j = 0; j < 15; j++) {
311 const int k = s->pfa_prereindex[i*15 + j];
312 FFTComplex tmp = { *(in2 - 2*k*stride), *(in1 + 2*k*stride) };
313 CMUL(fft15in[j], tmp, s->twiddle_exptab[k]);
315 fft15(s->exptab, s->tmp + s->ptwo_fft.revtab[i], fft15in, l_ptwo);
318 /* Then a 15xN FFT (where N is a power of two) */
319 for (i = 0; i < 15; i++)
320 s->ptwo_fft.fft_calc(&s->ptwo_fft, s->tmp + l_ptwo*i);
322 /* Reindex again, apply twiddles and output */
323 for (i = 0; i < len8; i++) {
324 float re0, im0, re1, im1;
325 const int i0 = len8 + i, i1 = len8 - i - 1;
326 const int s0 = s->pfa_postreindex[i0], s1 = s->pfa_postreindex[i1];
328 CMUL3(re0, im1, s->tmp[s1].im, s->tmp[s1].re, s->twiddle_exptab[i1].im, s->twiddle_exptab[i1].re);
329 CMUL3(re1, im0, s->tmp[s0].im, s->tmp[s0].re, s->twiddle_exptab[i0].im, s->twiddle_exptab[i0].re);
330 z[i1].re = scale * re0;
331 z[i1].im = scale * im0;
332 z[i0].re = scale * re1;
333 z[i0].im = scale * im1;