2 * Copyright (c) 2007-2008 CSIRO
3 * Copyright (c) 2007-2009 Xiph.Org Foundation
4 * Copyright (c) 2008-2009 Gregory Maxwell
5 * Copyright (c) 2012 Andrew D'Addesio
6 * Copyright (c) 2013-2014 Mozilla Corporation
7 * Copyright (c) 2017 Rostislav Pehlivanov <atomnuker@gmail.com>
9 * This file is part of FFmpeg.
11 * FFmpeg is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
16 * FFmpeg is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with FFmpeg; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
29 #define CELT_PVQ_U(n, k) (ff_celt_pvq_u_row[FFMIN(n, k)][FFMAX(n, k)])
30 #define CELT_PVQ_V(n, k) (CELT_PVQ_U(n, k) + CELT_PVQ_U(n, (k) + 1))
32 static inline int16_t celt_cos(int16_t x)
34 x = (MUL16(x, x) + 4096) >> 13;
35 x = (32767-x) + ROUND_MUL16(x, (-7651 + ROUND_MUL16(x, (8277 + ROUND_MUL16(-626, x)))));
39 static inline int celt_log2tan(int isin, int icos)
46 return (ls << 11) - (lc << 11) +
47 ROUND_MUL16(isin, ROUND_MUL16(isin, -2597) + 7932) -
48 ROUND_MUL16(icos, ROUND_MUL16(icos, -2597) + 7932);
51 static inline int celt_bits2pulses(const uint8_t *cache, int bits)
53 // TODO: Find the size of cache and make it into an array in the parameters list
59 for (i = 0; i < 6; i++) {
60 int center = (low + high + 1) >> 1;
61 if (cache[center] >= bits)
67 return (bits - (low == 0 ? -1 : cache[low]) <= cache[high] - bits) ? low : high;
70 static inline int celt_pulses2bits(const uint8_t *cache, int pulses)
72 // TODO: Find the size of cache and make it into an array in the parameters list
73 return (pulses == 0) ? 0 : cache[pulses] + 1;
76 static inline void celt_normalize_residual(const int * av_restrict iy, float * av_restrict X,
80 for (i = 0; i < N; i++)
84 static void celt_exp_rotation_impl(float *X, uint32_t len, uint32_t stride,
91 for (i = 0; i < len - stride; i++) {
93 float x2 = Xptr[stride];
94 Xptr[stride] = c * x2 + s * x1;
95 *Xptr++ = c * x1 - s * x2;
98 Xptr = &X[len - 2 * stride - 1];
99 for (i = len - 2 * stride - 1; i >= 0; i--) {
101 float x2 = Xptr[stride];
102 Xptr[stride] = c * x2 + s * x1;
103 *Xptr-- = c * x1 - s * x2;
107 static inline void celt_exp_rotation(float *X, uint32_t len,
108 uint32_t stride, uint32_t K,
109 enum CeltSpread spread, const int encode)
111 uint32_t stride2 = 0;
116 if (2*K >= len || spread == CELT_SPREAD_NONE)
119 gain = (float)len / (len + (20 - 5*spread) * K);
120 theta = M_PI * gain * gain / 4;
125 if (len >= stride << 3) {
127 /* This is just a simple (equivalent) way of computing sqrt(len/stride) with rounding.
128 It's basically incrementing long as (stride2+0.5)^2 < len/stride. */
129 while ((stride2 * stride2 + stride2) * stride + (stride >> 2) < len)
134 for (i = 0; i < stride; i++) {
136 celt_exp_rotation_impl(X + i * len, len, 1, c, -s);
138 celt_exp_rotation_impl(X + i * len, len, stride2, s, -c);
141 celt_exp_rotation_impl(X + i * len, len, stride2, s, c);
142 celt_exp_rotation_impl(X + i * len, len, 1, c, s);
147 static inline uint32_t celt_extract_collapse_mask(const int *iy, uint32_t N, uint32_t B)
149 int i, j, N0 = N / B;
150 uint32_t collapse_mask = 0;
155 for (i = 0; i < B; i++)
156 for (j = 0; j < N0; j++)
157 collapse_mask |= (!!iy[i*N0+j]) << i;
158 return collapse_mask;
161 static inline void celt_stereo_merge(float *X, float *Y, float mid, int N)
164 float xp = 0, side = 0;
169 /* Compute the norm of X+Y and X-Y as |X|^2 + |Y|^2 +/- sum(xy) */
170 for (i = 0; i < N; i++) {
175 /* Compensating for the mid normalization */
178 E[0] = mid2 * mid2 + side - 2 * xp;
179 E[1] = mid2 * mid2 + side + 2 * xp;
180 if (E[0] < 6e-4f || E[1] < 6e-4f) {
181 for (i = 0; i < N; i++)
186 gain[0] = 1.0f / sqrtf(E[0]);
187 gain[1] = 1.0f / sqrtf(E[1]);
189 for (i = 0; i < N; i++) {
191 /* Apply mid scaling (side is already scaled) */
192 value[0] = mid * X[i];
194 X[i] = gain[0] * (value[0] - value[1]);
195 Y[i] = gain[1] * (value[0] + value[1]);
199 static void celt_interleave_hadamard(float *tmp, float *X, int N0,
200 int stride, int hadamard)
202 int i, j, N = N0*stride;
203 const uint8_t *order = &ff_celt_hadamard_order[hadamard ? stride - 2 : 30];
205 for (i = 0; i < stride; i++)
206 for (j = 0; j < N0; j++)
207 tmp[j*stride+i] = X[order[i]*N0+j];
209 memcpy(X, tmp, N*sizeof(float));
212 static void celt_deinterleave_hadamard(float *tmp, float *X, int N0,
213 int stride, int hadamard)
215 int i, j, N = N0*stride;
216 const uint8_t *order = &ff_celt_hadamard_order[hadamard ? stride - 2 : 30];
218 for (i = 0; i < stride; i++)
219 for (j = 0; j < N0; j++)
220 tmp[order[i]*N0+j] = X[j*stride+i];
222 memcpy(X, tmp, N*sizeof(float));
225 static void celt_haar1(float *X, int N0, int stride)
229 for (i = 0; i < stride; i++) {
230 for (j = 0; j < N0; j++) {
231 float x0 = X[stride * (2 * j + 0) + i];
232 float x1 = X[stride * (2 * j + 1) + i];
233 X[stride * (2 * j + 0) + i] = (x0 + x1) * M_SQRT1_2;
234 X[stride * (2 * j + 1) + i] = (x0 - x1) * M_SQRT1_2;
239 static inline int celt_compute_qn(int N, int b, int offset, int pulse_cap,
244 if (stereo && N == 2)
247 /* The upper limit ensures that in a stereo split with itheta==16384, we'll
248 * always have enough bits left over to code at least one pulse in the
249 * side; otherwise it would collapse, since it doesn't get folded. */
250 qb = FFMIN3(b - pulse_cap - (4 << 3), (b + N2 * offset) / N2, 8 << 3);
251 qn = (qb < (1 << 3 >> 1)) ? 1 : ((ff_celt_qn_exp2[qb & 0x7] >> (14 - (qb >> 3))) + 1) >> 1 << 1;
255 /* Convert the quantized vector to an index */
256 static inline uint32_t celt_icwrsi(uint32_t N, uint32_t K, const int *y)
258 int i, idx = 0, sum = 0;
259 for (i = N - 1; i >= 0; i--) {
260 const uint32_t i_s = CELT_PVQ_U(N - i, sum + FFABS(y[i]) + 1);
261 idx += CELT_PVQ_U(N - i, sum) + (y[i] < 0)*i_s;
267 // this code was adapted from libopus
268 static inline uint64_t celt_cwrsi(uint32_t N, uint32_t K, uint32_t i, int *y)
276 /*Lots of pulses case:*/
278 const uint32_t *row = ff_celt_pvq_u_row[N];
280 /* Are the pulses in this dimension negative? */
285 /*Count how many pulses were placed in this dimension.*/
291 p = ff_celt_pvq_u_row[--K][N];
294 for (p = row[K]; p > i; p = row[K])
298 val = (k0 - K + s) ^ s;
301 } else { /*Lots of dimensions case:*/
302 /*Are there any pulses in this dimension at all?*/
303 p = ff_celt_pvq_u_row[K ][N];
304 q = ff_celt_pvq_u_row[K + 1][N];
306 if (p <= i && i < q) {
310 /*Are the pulses in this dimension negative?*/
314 /*Count how many pulses were placed in this dimension.*/
316 do p = ff_celt_pvq_u_row[--K][N];
320 val = (k0 - K + s) ^ s;
338 val = (k0 - K + s) ^ s;
351 static inline void celt_encode_pulses(OpusRangeCoder *rc, int *y, uint32_t N, uint32_t K)
353 ff_opus_rc_enc_uint(rc, celt_icwrsi(N, K, y), CELT_PVQ_V(N, K));
356 static inline float celt_decode_pulses(OpusRangeCoder *rc, int *y, uint32_t N, uint32_t K)
358 const uint32_t idx = ff_opus_rc_dec_uint(rc, CELT_PVQ_V(N, K));
359 return celt_cwrsi(N, K, idx, y);
363 * Faster than libopus's search, operates entirely in the signed domain.
364 * Slightly worse/better depending on N, K and the input vector.
366 static float ppp_pvq_search_c(float *X, int *y, int K, int N)
369 float res = 0.0f, xy_norm = 0.0f;
371 for (i = 0; i < N; i++)
374 res = K/(res + FLT_EPSILON);
376 for (i = 0; i < N; i++) {
377 y[i] = lrintf(res*X[i]);
379 xy_norm += y[i]*X[i];
384 int max_idx = 0, phase = FFSIGN(K);
385 float max_num = 0.0f;
386 float max_den = 1.0f;
389 for (i = 0; i < N; i++) {
390 /* If the sum has been overshot and the best place has 0 pulses allocated
391 * to it, attempting to decrease it further will actually increase the
392 * sum. Prevent this by disregarding any 0 positions when decrementing. */
393 const int ca = 1 ^ ((y[i] == 0) & (phase < 0));
394 const int y_new = y_norm + 2*phase*FFABS(y[i]);
395 float xy_new = xy_norm + 1*phase*FFABS(X[i]);
396 xy_new = xy_new * xy_new;
397 if (ca && (max_den*xy_new) > (y_new*max_num)) {
406 phase *= FFSIGN(X[max_idx]);
407 xy_norm += 1*phase*X[max_idx];
408 y_norm += 2*phase*y[max_idx];
412 return (float)y_norm;
415 static uint32_t celt_alg_quant(OpusRangeCoder *rc, float *X, uint32_t N, uint32_t K,
416 enum CeltSpread spread, uint32_t blocks, float gain,
419 int *y = pvq->qcoeff;
421 celt_exp_rotation(X, N, blocks, K, spread, 1);
422 gain /= sqrtf(pvq->pvq_search(X, y, K, N));
423 celt_encode_pulses(rc, y, N, K);
424 celt_normalize_residual(y, X, N, gain);
425 celt_exp_rotation(X, N, blocks, K, spread, 0);
426 return celt_extract_collapse_mask(y, N, blocks);
429 /** Decode pulse vector and combine the result with the pitch vector to produce
430 the final normalised signal in the current band. */
431 static uint32_t celt_alg_unquant(OpusRangeCoder *rc, float *X, uint32_t N, uint32_t K,
432 enum CeltSpread spread, uint32_t blocks, float gain,
435 int *y = pvq->qcoeff;
437 gain /= sqrtf(celt_decode_pulses(rc, y, N, K));
438 celt_normalize_residual(y, X, N, gain);
439 celt_exp_rotation(X, N, blocks, K, spread, 0);
440 return celt_extract_collapse_mask(y, N, blocks);
443 static int celt_calc_theta(const float *X, const float *Y, int coupling, int N)
446 float e[2] = { 0.0f, 0.0f };
447 if (coupling) { /* Coupling case */
448 for (i = 0; i < N; i++) {
449 e[0] += (X[i] + Y[i])*(X[i] + Y[i]);
450 e[1] += (X[i] - Y[i])*(X[i] - Y[i]);
453 for (i = 0; i < N; i++) {
458 return lrintf(32768.0f*atan2f(sqrtf(e[1]), sqrtf(e[0]))/M_PI);
461 static void celt_stereo_is_decouple(float *X, float *Y, float e_l, float e_r, int N)
464 const float energy_n = 1.0f/(sqrtf(e_l*e_l + e_r*e_r) + FLT_EPSILON);
467 for (i = 0; i < N; i++)
468 X[i] = e_l*X[i] + e_r*Y[i];
471 static void celt_stereo_ms_decouple(float *X, float *Y, int N)
474 for (i = 0; i < N; i++) {
475 const float Xret = X[i];
476 X[i] = (X[i] + Y[i])*M_SQRT1_2;
477 Y[i] = (Y[i] - Xret)*M_SQRT1_2;
481 static av_always_inline uint32_t quant_band_template(CeltPVQ *pvq, CeltFrame *f,
483 const int band, float *X,
484 float *Y, int N, int b,
485 uint32_t blocks, float *lowband,
486 int duration, float *lowband_out,
487 int level, float gain,
488 float *lowband_scratch,
492 const uint8_t *cache;
493 int stereo = !!Y, split = stereo;
494 int imid = 0, iside = 0;
496 int N_B = N / blocks;
502 float mid = 0, side = 0;
503 int longblocks = (B0 == 1);
508 for (i = 0; i <= stereo; i++) {
510 if (f->remaining2 >= 1 << 3) {
513 ff_opus_rc_put_raw(rc, sign, 1);
515 sign = ff_opus_rc_get_raw(rc, 1);
517 f->remaining2 -= 1 << 3;
519 x[0] = 1.0f - 2.0f*sign;
523 lowband_out[0] = X[0];
527 if (!stereo && level == 0) {
528 int tf_change = f->tf_change[band];
531 recombine = tf_change;
532 /* Band recombining to increase frequency resolution */
535 (recombine || ((N_B & 1) == 0 && tf_change < 0) || B0 > 1)) {
536 for (i = 0; i < N; i++)
537 lowband_scratch[i] = lowband[i];
538 lowband = lowband_scratch;
541 for (k = 0; k < recombine; k++) {
542 if (quant || lowband)
543 celt_haar1(quant ? X : lowband, N >> k, 1 << k);
544 fill = ff_celt_bit_interleave[fill & 0xF] | ff_celt_bit_interleave[fill >> 4] << 2;
546 blocks >>= recombine;
549 /* Increasing the time resolution */
550 while ((N_B & 1) == 0 && tf_change < 0) {
551 if (quant || lowband)
552 celt_haar1(quant ? X : lowband, N_B, blocks);
553 fill |= fill << blocks;
562 /* Reorganize the samples in time order instead of frequency order */
563 if (B0 > 1 && (quant || lowband))
564 celt_deinterleave_hadamard(pvq->hadamard_tmp, quant ? X : lowband,
565 N_B >> recombine, B0 << recombine,
569 /* If we need 1.5 more bit than we can produce, split the band in two. */
570 cache = ff_celt_cache_bits +
571 ff_celt_cache_index[(duration + 1) * CELT_MAX_BANDS + band];
572 if (!stereo && duration >= 0 && b > cache[cache[0]] + 12 && N > 2) {
578 fill = (fill & 1) | (fill << 1);
579 blocks = (blocks + 1) >> 1;
584 int itheta = quant ? celt_calc_theta(X, Y, stereo, N) : 0;
585 int mbits, sbits, delta;
592 /* Decide on the resolution to give to the split parameter theta */
593 pulse_cap = ff_celt_log_freq_range[band] + duration * 8;
594 offset = (pulse_cap >> 1) - (stereo && N == 2 ? CELT_QTHETA_OFFSET_TWOPHASE :
596 qn = (stereo && band >= f->intensity_stereo) ? 1 :
597 celt_compute_qn(N, b, offset, pulse_cap, stereo);
598 tell = opus_rc_tell_frac(rc);
601 itheta = (itheta*qn + 8192) >> 14;
602 /* Entropy coding of the angle. We use a uniform pdf for the
603 * time split, a step for stereo, and a triangular one for the rest. */
606 ff_opus_rc_enc_uint_step(rc, itheta, qn / 2);
607 else if (stereo || B0 > 1)
608 ff_opus_rc_enc_uint(rc, itheta, qn + 1);
610 ff_opus_rc_enc_uint_tri(rc, itheta, qn);
611 itheta = itheta * 16384 / qn;
614 celt_stereo_is_decouple(X, Y, f->block[0].lin_energy[band],
615 f->block[1].lin_energy[band], N);
617 celt_stereo_ms_decouple(X, Y, N);
621 itheta = ff_opus_rc_dec_uint_step(rc, qn / 2);
622 else if (stereo || B0 > 1)
623 itheta = ff_opus_rc_dec_uint(rc, qn+1);
625 itheta = ff_opus_rc_dec_uint_tri(rc, qn);
626 itheta = itheta * 16384 / qn;
632 for (i = 0; i < N; i++)
635 celt_stereo_is_decouple(X, Y, f->block[0].lin_energy[band],
636 f->block[1].lin_energy[band], N);
638 if (b > 2 << 3 && f->remaining2 > 2 << 3) {
639 ff_opus_rc_enc_log(rc, inv, 2);
644 inv = (b > 2 << 3 && f->remaining2 > 2 << 3) ? ff_opus_rc_dec_log(rc, 2) : 0;
645 inv = f->apply_phase_inv ? inv : 0;
649 qalloc = opus_rc_tell_frac(rc) - tell;
656 fill = av_mod_uintp2(fill, blocks);
658 } else if (itheta == 16384) {
661 fill &= ((1 << blocks) - 1) << blocks;
664 imid = celt_cos(itheta);
665 iside = celt_cos(16384-itheta);
666 /* This is the mid vs side allocation that minimizes squared error
668 delta = ROUND_MUL16((N - 1) << 7, celt_log2tan(iside, imid));
671 mid = imid / 32768.0f;
672 side = iside / 32768.0f;
674 /* This is a special case for N=2 that only works for stereo and takes
675 advantage of the fact that mid and side are orthogonal to encode
676 the side with just one bit. */
677 if (N == 2 && stereo) {
683 /* Only need one bit for the side */
684 sbits = (itheta != 0 && itheta != 16384) ? 1 << 3 : 0;
687 f->remaining2 -= qalloc+sbits;
693 sign = x2[0]*y2[1] - x2[1]*y2[0] < 0;
694 ff_opus_rc_put_raw(rc, sign, 1);
696 sign = ff_opus_rc_get_raw(rc, 1);
700 /* We use orig_fill here because we want to fold the side, but if
701 itheta==16384, we'll have cleared the low bits of fill. */
702 cm = pvq->quant_band(pvq, f, rc, band, x2, NULL, N, mbits, blocks, lowband, duration,
703 lowband_out, level, gain, lowband_scratch, orig_fill);
704 /* We don't split N=2 bands, so cm is either 1 or 0 (for a fold-collapse),
705 and there's no need to worry about mixing with the other channel. */
706 y2[0] = -sign * x2[1];
707 y2[1] = sign * x2[0];
719 /* "Normal" split code */
720 float *next_lowband2 = NULL;
721 float *next_lowband_out1 = NULL;
726 /* Give more bits to low-energy MDCTs than they would
727 * otherwise deserve */
728 if (B0 > 1 && !stereo && (itheta & 0x3fff)) {
730 /* Rough approximation for pre-echo masking */
731 delta -= delta >> (4 - duration);
733 /* Corresponds to a forward-masking slope of
734 * 1.5 dB per 10 ms */
735 delta = FFMIN(0, delta + (N << 3 >> (5 - duration)));
737 mbits = av_clip((b - delta) / 2, 0, b);
739 f->remaining2 -= qalloc;
741 if (lowband && !stereo)
742 next_lowband2 = lowband + N; /* >32-bit split case */
744 /* Only stereo needs to pass on lowband_out.
745 * Otherwise, it's handled at the end */
747 next_lowband_out1 = lowband_out;
749 next_level = level + 1;
751 rebalance = f->remaining2;
752 if (mbits >= sbits) {
753 /* In stereo mode, we do not apply a scaling to the mid
754 * because we need the normalized mid for folding later */
755 cm = pvq->quant_band(pvq, f, rc, band, X, NULL, N, mbits, blocks,
756 lowband, duration, next_lowband_out1, next_level,
757 stereo ? 1.0f : (gain * mid), lowband_scratch, fill);
758 rebalance = mbits - (rebalance - f->remaining2);
759 if (rebalance > 3 << 3 && itheta != 0)
760 sbits += rebalance - (3 << 3);
762 /* For a stereo split, the high bits of fill are always zero,
763 * so no folding will be done to the side. */
764 cmt = pvq->quant_band(pvq, f, rc, band, Y, NULL, N, sbits, blocks,
765 next_lowband2, duration, NULL, next_level,
766 gain * side, NULL, fill >> blocks);
767 cm |= cmt << ((B0 >> 1) & (stereo - 1));
769 /* For a stereo split, the high bits of fill are always zero,
770 * so no folding will be done to the side. */
771 cm = pvq->quant_band(pvq, f, rc, band, Y, NULL, N, sbits, blocks,
772 next_lowband2, duration, NULL, next_level,
773 gain * side, NULL, fill >> blocks);
774 cm <<= ((B0 >> 1) & (stereo - 1));
775 rebalance = sbits - (rebalance - f->remaining2);
776 if (rebalance > 3 << 3 && itheta != 16384)
777 mbits += rebalance - (3 << 3);
779 /* In stereo mode, we do not apply a scaling to the mid because
780 * we need the normalized mid for folding later */
781 cm |= pvq->quant_band(pvq, f, rc, band, X, NULL, N, mbits, blocks,
782 lowband, duration, next_lowband_out1, next_level,
783 stereo ? 1.0f : (gain * mid), lowband_scratch, fill);
787 /* This is the basic no-split case */
788 uint32_t q = celt_bits2pulses(cache, b);
789 uint32_t curr_bits = celt_pulses2bits(cache, q);
790 f->remaining2 -= curr_bits;
792 /* Ensures we can never bust the budget */
793 while (f->remaining2 < 0 && q > 0) {
794 f->remaining2 += curr_bits;
795 curr_bits = celt_pulses2bits(cache, --q);
796 f->remaining2 -= curr_bits;
800 /* Finally do the actual (de)quantization */
802 cm = celt_alg_quant(rc, X, N, (q < 8) ? q : (8 + (q & 7)) << ((q >> 3) - 1),
803 f->spread, blocks, gain, pvq);
805 cm = celt_alg_unquant(rc, X, N, (q < 8) ? q : (8 + (q & 7)) << ((q >> 3) - 1),
806 f->spread, blocks, gain, pvq);
809 /* If there's no pulse, fill the band anyway */
810 uint32_t cm_mask = (1 << blocks) - 1;
815 for (i = 0; i < N; i++)
816 X[i] = (((int32_t)celt_rng(f)) >> 20);
819 /* Folded spectrum */
820 for (i = 0; i < N; i++) {
821 /* About 48 dB below the "normal" folding level */
822 X[i] = lowband[i] + (((celt_rng(f)) & 0x8000) ? 1.0f / 256 : -1.0f / 256);
826 celt_renormalize_vector(X, N, gain);
828 memset(X, 0, N*sizeof(float));
833 /* This code is used by the decoder and by the resynthesis-enabled encoder */
836 celt_stereo_merge(X, Y, mid, N);
838 for (i = 0; i < N; i++)
841 } else if (level == 0) {
844 /* Undo the sample reorganization going from time order to frequency order */
846 celt_interleave_hadamard(pvq->hadamard_tmp, X, N_B >> recombine,
847 B0 << recombine, longblocks);
849 /* Undo time-freq changes that we did earlier */
852 for (k = 0; k < time_divide; k++) {
856 celt_haar1(X, N_B, blocks);
859 for (k = 0; k < recombine; k++) {
860 cm = ff_celt_bit_deinterleave[cm];
861 celt_haar1(X, N0>>k, 1<<k);
863 blocks <<= recombine;
865 /* Scale output for later folding */
868 for (i = 0; i < N0; i++)
869 lowband_out[i] = n * X[i];
871 cm = av_mod_uintp2(cm, blocks);
877 static QUANT_FN(pvq_decode_band)
879 #if CONFIG_OPUS_DECODER
880 return quant_band_template(pvq, f, rc, band, X, Y, N, b, blocks, lowband, duration,
881 lowband_out, level, gain, lowband_scratch, fill, 0);
887 static QUANT_FN(pvq_encode_band)
889 #if CONFIG_OPUS_ENCODER
890 return quant_band_template(pvq, f, rc, band, X, Y, N, b, blocks, lowband, duration,
891 lowband_out, level, gain, lowband_scratch, fill, 1);
897 int av_cold ff_celt_pvq_init(CeltPVQ **pvq, int encode)
899 CeltPVQ *s = av_malloc(sizeof(CeltPVQ));
901 return AVERROR(ENOMEM);
903 s->pvq_search = ppp_pvq_search_c;
904 s->quant_band = encode ? pvq_encode_band : pvq_decode_band;
906 if (CONFIG_OPUS_ENCODER && ARCH_X86)
907 ff_celt_pvq_init_x86(s);
914 void av_cold ff_celt_pvq_uninit(CeltPVQ **pvq)