2 * G.723.1 compatible decoder
3 * Copyright (c) 2006 Benjamin Larsson
4 * Copyright (c) 2010 Mohamed Naufal Basheer
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 * G.723.1 compatible decoder
28 #include "libavutil/channel_layout.h"
29 #include "libavutil/mem.h"
30 #include "libavutil/opt.h"
32 #define BITSTREAM_READER_LE
33 #include "acelp_vectors.h"
35 #include "celp_filters.h"
36 #include "celp_math.h"
41 #define CNG_RANDOM_SEED 12345
43 static av_cold int g723_1_decode_init(AVCodecContext *avctx)
45 G723_1_Context *s = avctx->priv_data;
47 avctx->sample_fmt = AV_SAMPLE_FMT_S16P;
48 if (avctx->channels < 1 || avctx->channels > 2) {
49 av_log(avctx, AV_LOG_ERROR, "Only mono and stereo are supported (requested channels: %d).\n", avctx->channels);
50 return AVERROR(EINVAL);
52 avctx->channel_layout = avctx->channels == 1 ? AV_CH_LAYOUT_MONO : AV_CH_LAYOUT_STEREO;
53 for (int ch = 0; ch < avctx->channels; ch++) {
54 G723_1_ChannelContext *p = &s->ch[ch];
58 memcpy(p->prev_lsp, dc_lsp, LPC_ORDER * sizeof(*p->prev_lsp));
59 memcpy(p->sid_lsp, dc_lsp, LPC_ORDER * sizeof(*p->sid_lsp));
61 p->cng_random_seed = CNG_RANDOM_SEED;
62 p->past_frame_type = SID_FRAME;
69 * Unpack the frame into parameters.
71 * @param p the context
72 * @param buf pointer to the input buffer
73 * @param buf_size size of the input buffer
75 static int unpack_bitstream(G723_1_ChannelContext *p, const uint8_t *buf,
80 int temp, info_bits, i;
82 init_get_bits(&gb, buf, buf_size * 8);
84 /* Extract frame type and rate info */
85 info_bits = get_bits(&gb, 2);
88 p->cur_frame_type = UNTRANSMITTED_FRAME;
92 /* Extract 24 bit lsp indices, 8 bit for each band */
93 p->lsp_index[2] = get_bits(&gb, 8);
94 p->lsp_index[1] = get_bits(&gb, 8);
95 p->lsp_index[0] = get_bits(&gb, 8);
98 p->cur_frame_type = SID_FRAME;
99 p->subframe[0].amp_index = get_bits(&gb, 6);
103 /* Extract the info common to both rates */
104 p->cur_rate = info_bits ? RATE_5300 : RATE_6300;
105 p->cur_frame_type = ACTIVE_FRAME;
107 p->pitch_lag[0] = get_bits(&gb, 7);
108 if (p->pitch_lag[0] > 123) /* test if forbidden code */
110 p->pitch_lag[0] += PITCH_MIN;
111 p->subframe[1].ad_cb_lag = get_bits(&gb, 2);
113 p->pitch_lag[1] = get_bits(&gb, 7);
114 if (p->pitch_lag[1] > 123)
116 p->pitch_lag[1] += PITCH_MIN;
117 p->subframe[3].ad_cb_lag = get_bits(&gb, 2);
118 p->subframe[0].ad_cb_lag = 1;
119 p->subframe[2].ad_cb_lag = 1;
121 for (i = 0; i < SUBFRAMES; i++) {
122 /* Extract combined gain */
123 temp = get_bits(&gb, 12);
125 p->subframe[i].dirac_train = 0;
126 if (p->cur_rate == RATE_6300 && p->pitch_lag[i >> 1] < SUBFRAME_LEN - 2) {
127 p->subframe[i].dirac_train = temp >> 11;
131 p->subframe[i].ad_cb_gain = FASTDIV(temp, GAIN_LEVELS);
132 if (p->subframe[i].ad_cb_gain < ad_cb_len) {
133 p->subframe[i].amp_index = temp - p->subframe[i].ad_cb_gain *
140 p->subframe[0].grid_index = get_bits1(&gb);
141 p->subframe[1].grid_index = get_bits1(&gb);
142 p->subframe[2].grid_index = get_bits1(&gb);
143 p->subframe[3].grid_index = get_bits1(&gb);
145 if (p->cur_rate == RATE_6300) {
146 skip_bits1(&gb); /* skip reserved bit */
148 /* Compute pulse_pos index using the 13-bit combined position index */
149 temp = get_bits(&gb, 13);
150 p->subframe[0].pulse_pos = temp / 810;
152 temp -= p->subframe[0].pulse_pos * 810;
153 p->subframe[1].pulse_pos = FASTDIV(temp, 90);
155 temp -= p->subframe[1].pulse_pos * 90;
156 p->subframe[2].pulse_pos = FASTDIV(temp, 9);
157 p->subframe[3].pulse_pos = temp - p->subframe[2].pulse_pos * 9;
159 p->subframe[0].pulse_pos = (p->subframe[0].pulse_pos << 16) +
161 p->subframe[1].pulse_pos = (p->subframe[1].pulse_pos << 14) +
163 p->subframe[2].pulse_pos = (p->subframe[2].pulse_pos << 16) +
165 p->subframe[3].pulse_pos = (p->subframe[3].pulse_pos << 14) +
168 p->subframe[0].pulse_sign = get_bits(&gb, 6);
169 p->subframe[1].pulse_sign = get_bits(&gb, 5);
170 p->subframe[2].pulse_sign = get_bits(&gb, 6);
171 p->subframe[3].pulse_sign = get_bits(&gb, 5);
172 } else { /* 5300 bps */
173 p->subframe[0].pulse_pos = get_bits(&gb, 12);
174 p->subframe[1].pulse_pos = get_bits(&gb, 12);
175 p->subframe[2].pulse_pos = get_bits(&gb, 12);
176 p->subframe[3].pulse_pos = get_bits(&gb, 12);
178 p->subframe[0].pulse_sign = get_bits(&gb, 4);
179 p->subframe[1].pulse_sign = get_bits(&gb, 4);
180 p->subframe[2].pulse_sign = get_bits(&gb, 4);
181 p->subframe[3].pulse_sign = get_bits(&gb, 4);
188 * Bitexact implementation of sqrt(val/2).
190 static int16_t square_root(unsigned val)
192 av_assert2(!(val & 0x80000000));
194 return (ff_sqrt(val << 1) >> 1) & (~1);
198 * Generate fixed codebook excitation vector.
200 * @param vector decoded excitation vector
201 * @param subfrm current subframe
202 * @param cur_rate current bitrate
203 * @param pitch_lag closed loop pitch lag
204 * @param index current subframe index
206 static void gen_fcb_excitation(int16_t *vector, G723_1_Subframe *subfrm,
207 enum Rate cur_rate, int pitch_lag, int index)
211 memset(vector, 0, SUBFRAME_LEN * sizeof(*vector));
213 if (cur_rate == RATE_6300) {
214 if (subfrm->pulse_pos >= max_pos[index])
217 /* Decode amplitudes and positions */
218 j = PULSE_MAX - pulses[index];
219 temp = subfrm->pulse_pos;
220 for (i = 0; i < SUBFRAME_LEN / GRID_SIZE; i++) {
221 temp -= combinatorial_table[j][i];
224 temp += combinatorial_table[j++][i];
225 if (subfrm->pulse_sign & (1 << (PULSE_MAX - j))) {
226 vector[subfrm->grid_index + GRID_SIZE * i] =
227 -fixed_cb_gain[subfrm->amp_index];
229 vector[subfrm->grid_index + GRID_SIZE * i] =
230 fixed_cb_gain[subfrm->amp_index];
235 if (subfrm->dirac_train == 1)
236 ff_g723_1_gen_dirac_train(vector, pitch_lag);
237 } else { /* 5300 bps */
238 int cb_gain = fixed_cb_gain[subfrm->amp_index];
239 int cb_shift = subfrm->grid_index;
240 int cb_sign = subfrm->pulse_sign;
241 int cb_pos = subfrm->pulse_pos;
242 int offset, beta, lag;
244 for (i = 0; i < 8; i += 2) {
245 offset = ((cb_pos & 7) << 3) + cb_shift + i;
246 vector[offset] = (cb_sign & 1) ? cb_gain : -cb_gain;
251 /* Enhance harmonic components */
252 lag = pitch_contrib[subfrm->ad_cb_gain << 1] + pitch_lag +
253 subfrm->ad_cb_lag - 1;
254 beta = pitch_contrib[(subfrm->ad_cb_gain << 1) + 1];
256 if (lag < SUBFRAME_LEN - 2) {
257 for (i = lag; i < SUBFRAME_LEN; i++)
258 vector[i] += beta * vector[i - lag] >> 15;
264 * Estimate maximum auto-correlation around pitch lag.
266 * @param buf buffer with offset applied
267 * @param offset offset of the excitation vector
268 * @param ccr_max pointer to the maximum auto-correlation
269 * @param pitch_lag decoded pitch lag
270 * @param length length of autocorrelation
271 * @param dir forward lag(1) / backward lag(-1)
273 static int autocorr_max(const int16_t *buf, int offset, int *ccr_max,
274 int pitch_lag, int length, int dir)
276 int limit, ccr, lag = 0;
279 pitch_lag = FFMIN(PITCH_MAX - 3, pitch_lag);
281 limit = FFMIN(FRAME_LEN + PITCH_MAX - offset - length, pitch_lag + 3);
283 limit = pitch_lag + 3;
285 for (i = pitch_lag - 3; i <= limit; i++) {
286 ccr = ff_g723_1_dot_product(buf, buf + dir * i, length);
288 if (ccr > *ccr_max) {
297 * Calculate pitch postfilter optimal and scaling gains.
299 * @param lag pitch postfilter forward/backward lag
300 * @param ppf pitch postfilter parameters
301 * @param cur_rate current bitrate
302 * @param tgt_eng target energy
303 * @param ccr cross-correlation
304 * @param res_eng residual energy
306 static void comp_ppf_gains(int lag, PPFParam *ppf, enum Rate cur_rate,
307 int tgt_eng, int ccr, int res_eng)
309 int pf_residual; /* square of postfiltered residual */
314 temp1 = tgt_eng * res_eng >> 1;
315 temp2 = ccr * ccr << 1;
318 if (ccr >= res_eng) {
319 ppf->opt_gain = ppf_gain_weight[cur_rate];
321 ppf->opt_gain = (ccr << 15) / res_eng *
322 ppf_gain_weight[cur_rate] >> 15;
324 /* pf_res^2 = tgt_eng + 2*ccr*gain + res_eng*gain^2 */
325 temp1 = (tgt_eng << 15) + (ccr * ppf->opt_gain << 1);
326 temp2 = (ppf->opt_gain * ppf->opt_gain >> 15) * res_eng;
327 pf_residual = av_sat_add32(temp1, temp2 + (1 << 15)) >> 16;
329 if (tgt_eng >= pf_residual << 1) {
332 temp1 = (tgt_eng << 14) / pf_residual;
335 /* scaling_gain = sqrt(tgt_eng/pf_res^2) */
336 ppf->sc_gain = square_root(temp1 << 16);
339 ppf->sc_gain = 0x7fff;
342 ppf->opt_gain = av_clip_int16(ppf->opt_gain * ppf->sc_gain >> 15);
346 * Calculate pitch postfilter parameters.
348 * @param p the context
349 * @param offset offset of the excitation vector
350 * @param pitch_lag decoded pitch lag
351 * @param ppf pitch postfilter parameters
352 * @param cur_rate current bitrate
354 static void comp_ppf_coeff(G723_1_ChannelContext *p, int offset, int pitch_lag,
355 PPFParam *ppf, enum Rate cur_rate)
364 * 1 - forward cross-correlation
365 * 2 - forward residual energy
366 * 3 - backward cross-correlation
367 * 4 - backward residual energy
369 int energy[5] = {0, 0, 0, 0, 0};
370 int16_t *buf = p->audio + LPC_ORDER + offset;
371 int fwd_lag = autocorr_max(buf, offset, &energy[1], pitch_lag,
373 int back_lag = autocorr_max(buf, offset, &energy[3], pitch_lag,
378 ppf->sc_gain = 0x7fff;
380 /* Case 0, Section 3.6 */
381 if (!back_lag && !fwd_lag)
384 /* Compute target energy */
385 energy[0] = ff_g723_1_dot_product(buf, buf, SUBFRAME_LEN);
387 /* Compute forward residual energy */
389 energy[2] = ff_g723_1_dot_product(buf + fwd_lag, buf + fwd_lag,
392 /* Compute backward residual energy */
394 energy[4] = ff_g723_1_dot_product(buf - back_lag, buf - back_lag,
397 /* Normalize and shorten */
399 for (i = 0; i < 5; i++)
400 temp1 = FFMAX(energy[i], temp1);
402 scale = ff_g723_1_normalize_bits(temp1, 31);
403 for (i = 0; i < 5; i++)
404 energy[i] = (energy[i] << scale) >> 16;
406 if (fwd_lag && !back_lag) { /* Case 1 */
407 comp_ppf_gains(fwd_lag, ppf, cur_rate, energy[0], energy[1],
409 } else if (!fwd_lag) { /* Case 2 */
410 comp_ppf_gains(-back_lag, ppf, cur_rate, energy[0], energy[3],
412 } else { /* Case 3 */
415 * Select the largest of energy[1]^2/energy[2]
416 * and energy[3]^2/energy[4]
418 temp1 = energy[4] * ((energy[1] * energy[1] + (1 << 14)) >> 15);
419 temp2 = energy[2] * ((energy[3] * energy[3] + (1 << 14)) >> 15);
420 if (temp1 >= temp2) {
421 comp_ppf_gains(fwd_lag, ppf, cur_rate, energy[0], energy[1],
424 comp_ppf_gains(-back_lag, ppf, cur_rate, energy[0], energy[3],
431 * Classify frames as voiced/unvoiced.
433 * @param p the context
434 * @param pitch_lag decoded pitch_lag
435 * @param exc_eng excitation energy estimation
436 * @param scale scaling factor of exc_eng
438 * @return residual interpolation index if voiced, 0 otherwise
440 static int comp_interp_index(G723_1_ChannelContext *p, int pitch_lag,
441 int *exc_eng, int *scale)
443 int offset = PITCH_MAX + 2 * SUBFRAME_LEN;
444 int16_t *buf = p->audio + LPC_ORDER;
446 int index, ccr, tgt_eng, best_eng, temp;
448 *scale = ff_g723_1_scale_vector(buf, p->excitation, FRAME_LEN + PITCH_MAX);
451 /* Compute maximum backward cross-correlation */
453 index = autocorr_max(buf, offset, &ccr, pitch_lag, SUBFRAME_LEN * 2, -1);
454 ccr = av_sat_add32(ccr, 1 << 15) >> 16;
456 /* Compute target energy */
457 tgt_eng = ff_g723_1_dot_product(buf, buf, SUBFRAME_LEN * 2);
458 *exc_eng = av_sat_add32(tgt_eng, 1 << 15) >> 16;
463 /* Compute best energy */
464 best_eng = ff_g723_1_dot_product(buf - index, buf - index,
466 best_eng = av_sat_add32(best_eng, 1 << 15) >> 16;
468 temp = best_eng * *exc_eng >> 3;
470 if (temp < ccr * ccr) {
477 * Perform residual interpolation based on frame classification.
479 * @param buf decoded excitation vector
480 * @param out output vector
481 * @param lag decoded pitch lag
482 * @param gain interpolated gain
483 * @param rseed seed for random number generator
485 static void residual_interp(int16_t *buf, int16_t *out, int lag,
486 int gain, int *rseed)
489 if (lag) { /* Voiced */
490 int16_t *vector_ptr = buf + PITCH_MAX;
492 for (i = 0; i < lag; i++)
493 out[i] = vector_ptr[i - lag] * 3 >> 2;
494 av_memcpy_backptr((uint8_t*)(out + lag), lag * sizeof(*out),
495 (FRAME_LEN - lag) * sizeof(*out));
496 } else { /* Unvoiced */
497 for (i = 0; i < FRAME_LEN; i++) {
498 *rseed = (int16_t)(*rseed * 521 + 259);
499 out[i] = gain * *rseed >> 15;
501 memset(buf, 0, (FRAME_LEN + PITCH_MAX) * sizeof(*buf));
506 * Perform IIR filtering.
508 * @param fir_coef FIR coefficients
509 * @param iir_coef IIR coefficients
510 * @param src source vector
511 * @param dest destination vector
512 * @param width width of the output, 16 bits(0) / 32 bits(1)
514 #define iir_filter(fir_coef, iir_coef, src, dest, width)\
517 int res_shift = 16 & ~-(width);\
518 int in_shift = 16 - res_shift;\
520 for (m = 0; m < SUBFRAME_LEN; m++) {\
522 for (n = 1; n <= LPC_ORDER; n++) {\
523 filter -= (fir_coef)[n - 1] * (src)[m - n] -\
524 (iir_coef)[n - 1] * ((dest)[m - n] >> in_shift);\
527 (dest)[m] = av_clipl_int32(((src)[m] * 65536) + (filter * 8) +\
528 (1 << 15)) >> res_shift;\
533 * Adjust gain of postfiltered signal.
535 * @param p the context
536 * @param buf postfiltered output vector
537 * @param energy input energy coefficient
539 static void gain_scale(G723_1_ChannelContext *p, int16_t * buf, int energy)
541 int num, denom, gain, bits1, bits2;
546 for (i = 0; i < SUBFRAME_LEN; i++) {
547 int temp = buf[i] >> 2;
549 denom = av_sat_dadd32(denom, temp);
553 bits1 = ff_g723_1_normalize_bits(num, 31);
554 bits2 = ff_g723_1_normalize_bits(denom, 31);
555 num = num << bits1 >> 1;
558 bits2 = 5 + bits1 - bits2;
559 bits2 = av_clip_uintp2(bits2, 5);
561 gain = (num >> 1) / (denom >> 16);
562 gain = square_root(gain << 16 >> bits2);
567 for (i = 0; i < SUBFRAME_LEN; i++) {
568 p->pf_gain = (15 * p->pf_gain + gain + (1 << 3)) >> 4;
569 buf[i] = av_clip_int16((buf[i] * (p->pf_gain + (p->pf_gain >> 4)) +
575 * Perform formant filtering.
577 * @param p the context
578 * @param lpc quantized lpc coefficients
579 * @param buf input buffer
580 * @param dst output buffer
582 static void formant_postfilter(G723_1_ChannelContext *p, int16_t *lpc,
583 int16_t *buf, int16_t *dst)
585 int16_t filter_coef[2][LPC_ORDER];
586 int filter_signal[LPC_ORDER + FRAME_LEN], *signal_ptr;
589 memcpy(buf, p->fir_mem, LPC_ORDER * sizeof(*buf));
590 memcpy(filter_signal, p->iir_mem, LPC_ORDER * sizeof(*filter_signal));
592 for (i = LPC_ORDER, j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++) {
593 for (k = 0; k < LPC_ORDER; k++) {
594 filter_coef[0][k] = (-lpc[k] * postfilter_tbl[0][k] +
596 filter_coef[1][k] = (-lpc[k] * postfilter_tbl[1][k] +
599 iir_filter(filter_coef[0], filter_coef[1], buf + i, filter_signal + i, 1);
603 memcpy(p->fir_mem, buf + FRAME_LEN, LPC_ORDER * sizeof(int16_t));
604 memcpy(p->iir_mem, filter_signal + FRAME_LEN, LPC_ORDER * sizeof(int));
607 signal_ptr = filter_signal + LPC_ORDER;
608 for (i = 0; i < SUBFRAMES; i++) {
614 scale = ff_g723_1_scale_vector(dst, buf, SUBFRAME_LEN);
616 /* Compute auto correlation coefficients */
617 auto_corr[0] = ff_g723_1_dot_product(dst, dst + 1, SUBFRAME_LEN - 1);
618 auto_corr[1] = ff_g723_1_dot_product(dst, dst, SUBFRAME_LEN);
620 /* Compute reflection coefficient */
621 temp = auto_corr[1] >> 16;
623 temp = (auto_corr[0] >> 2) / temp;
625 p->reflection_coef = (3 * p->reflection_coef + temp + 2) >> 2;
626 temp = -p->reflection_coef >> 1 & ~3;
628 /* Compensation filter */
629 for (j = 0; j < SUBFRAME_LEN; j++) {
630 dst[j] = av_sat_dadd32(signal_ptr[j],
631 (signal_ptr[j - 1] >> 16) * temp) >> 16;
634 /* Compute normalized signal energy */
635 temp = 2 * scale + 4;
637 energy = av_clipl_int32((int64_t)auto_corr[1] << -temp);
639 energy = auto_corr[1] >> temp;
641 gain_scale(p, dst, energy);
644 signal_ptr += SUBFRAME_LEN;
649 static int sid_gain_to_lsp_index(int gain)
653 else if (gain < 0x20)
654 return gain - 8 << 7;
656 return gain - 20 << 8;
659 static inline int cng_rand(int *state, int base)
661 *state = (*state * 521 + 259) & 0xFFFF;
662 return (*state & 0x7FFF) * base >> 15;
665 static int estimate_sid_gain(G723_1_ChannelContext *p)
667 int i, shift, seg, seg2, t, val, val_add, x, y;
669 shift = 16 - p->cur_gain * 2;
671 if (p->sid_gain == 0) {
673 } else if (shift >= 31 || (int32_t)((uint32_t)p->sid_gain << shift) >> shift != p->sid_gain) {
674 if (p->sid_gain < 0) t = INT32_MIN;
677 t = p->sid_gain << shift;
679 t = p->sid_gain >> -shift;
680 x = av_clipl_int32(t * (int64_t)cng_filt[0] >> 16);
682 if (x >= cng_bseg[2])
685 if (x >= cng_bseg[1]) {
690 seg = (x >= cng_bseg[0]);
692 seg2 = FFMIN(seg, 3);
696 for (i = 0; i < shift; i++) {
697 t = seg * 32 + (val << seg2);
706 t = seg * 32 + (val << seg2);
709 t = seg * 32 + (val + 1 << seg2);
711 val = (seg2 - 1) * 16 + val;
715 t = seg * 32 + (val - 1 << seg2);
717 val = (seg2 - 1) * 16 + val;
725 static void generate_noise(G723_1_ChannelContext *p)
729 int signs[SUBFRAMES / 2 * 11], pos[SUBFRAMES / 2 * 11];
730 int tmp[SUBFRAME_LEN * 2];
733 int b0, c, delta, x, shift;
735 p->pitch_lag[0] = cng_rand(&p->cng_random_seed, 21) + 123;
736 p->pitch_lag[1] = cng_rand(&p->cng_random_seed, 19) + 123;
738 for (i = 0; i < SUBFRAMES; i++) {
739 p->subframe[i].ad_cb_gain = cng_rand(&p->cng_random_seed, 50) + 1;
740 p->subframe[i].ad_cb_lag = cng_adaptive_cb_lag[i];
743 for (i = 0; i < SUBFRAMES / 2; i++) {
744 t = cng_rand(&p->cng_random_seed, 1 << 13);
746 off[i * 2 + 1] = ((t >> 1) & 1) + SUBFRAME_LEN;
748 for (j = 0; j < 11; j++) {
749 signs[i * 11 + j] = ((t & 1) * 2 - 1) * (1 << 14);
755 for (i = 0; i < SUBFRAMES; i++) {
756 for (j = 0; j < SUBFRAME_LEN / 2; j++)
758 t = SUBFRAME_LEN / 2;
759 for (j = 0; j < pulses[i]; j++, idx++) {
760 int idx2 = cng_rand(&p->cng_random_seed, t);
762 pos[idx] = tmp[idx2] * 2 + off[i];
763 tmp[idx2] = tmp[--t];
767 vector_ptr = p->audio + LPC_ORDER;
768 memcpy(vector_ptr, p->prev_excitation,
769 PITCH_MAX * sizeof(*p->excitation));
770 for (i = 0; i < SUBFRAMES; i += 2) {
771 ff_g723_1_gen_acb_excitation(vector_ptr, vector_ptr,
772 p->pitch_lag[i >> 1], &p->subframe[i],
774 ff_g723_1_gen_acb_excitation(vector_ptr + SUBFRAME_LEN,
775 vector_ptr + SUBFRAME_LEN,
776 p->pitch_lag[i >> 1], &p->subframe[i + 1],
780 for (j = 0; j < SUBFRAME_LEN * 2; j++)
781 t |= FFABS(vector_ptr[j]);
782 t = FFMIN(t, 0x7FFF);
786 shift = -10 + av_log2(t);
792 for (j = 0; j < SUBFRAME_LEN * 2; j++) {
793 t = vector_ptr[j] * (1 << -shift);
798 for (j = 0; j < SUBFRAME_LEN * 2; j++) {
799 t = vector_ptr[j] >> shift;
806 for (j = 0; j < 11; j++)
807 b0 += tmp[pos[(i / 2) * 11 + j]] * signs[(i / 2) * 11 + j];
808 b0 = b0 * 2 * 2979LL + (1 << 29) >> 30; // approximated division by 11
810 c = p->cur_gain * (p->cur_gain * SUBFRAME_LEN >> 5);
811 if (shift * 2 + 3 >= 0)
814 c <<= -(shift * 2 + 3);
815 c = (av_clipl_int32(sum << 1) - c) * 2979LL >> 15;
817 delta = b0 * b0 * 2 - c;
821 delta = square_root(delta);
824 if (FFABS(t) < FFABS(x))
832 x = av_clip(x, -10000, 10000);
834 for (j = 0; j < 11; j++) {
835 idx = (i / 2) * 11 + j;
836 vector_ptr[pos[idx]] = av_clip_int16(vector_ptr[pos[idx]] +
837 (x * signs[idx] >> 15));
840 /* copy decoded data to serve as a history for the next decoded subframes */
841 memcpy(vector_ptr + PITCH_MAX, vector_ptr,
842 sizeof(*vector_ptr) * SUBFRAME_LEN * 2);
843 vector_ptr += SUBFRAME_LEN * 2;
845 /* Save the excitation for the next frame */
846 memcpy(p->prev_excitation, p->audio + LPC_ORDER + FRAME_LEN,
847 PITCH_MAX * sizeof(*p->excitation));
850 static int g723_1_decode_frame(AVCodecContext *avctx, void *data,
851 int *got_frame_ptr, AVPacket *avpkt)
853 G723_1_Context *s = avctx->priv_data;
854 AVFrame *frame = data;
855 const uint8_t *buf = avpkt->data;
856 int buf_size = avpkt->size;
857 int dec_mode = buf[0] & 3;
859 PPFParam ppf[SUBFRAMES];
860 int16_t cur_lsp[LPC_ORDER];
861 int16_t lpc[SUBFRAMES * LPC_ORDER];
862 int16_t acb_vector[SUBFRAME_LEN];
864 int bad_frame = 0, i, j, ret;
866 if (buf_size < frame_size[dec_mode] * avctx->channels) {
868 av_log(avctx, AV_LOG_WARNING,
869 "Expected %d bytes, got %d - skipping packet\n",
870 frame_size[dec_mode], buf_size);
875 frame->nb_samples = FRAME_LEN;
876 if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
879 for (int ch = 0; ch < avctx->channels; ch++) {
880 G723_1_ChannelContext *p = &s->ch[ch];
881 int16_t *audio = p->audio;
883 if (unpack_bitstream(p, buf, buf_size) < 0) {
885 if (p->past_frame_type == ACTIVE_FRAME)
886 p->cur_frame_type = ACTIVE_FRAME;
888 p->cur_frame_type = UNTRANSMITTED_FRAME;
891 out = (int16_t *)frame->extended_data[ch];
893 if (p->cur_frame_type == ACTIVE_FRAME) {
895 p->erased_frames = 0;
896 else if (p->erased_frames != 3)
899 ff_g723_1_inverse_quant(cur_lsp, p->prev_lsp, p->lsp_index, bad_frame);
900 ff_g723_1_lsp_interpolate(lpc, cur_lsp, p->prev_lsp);
902 /* Save the lsp_vector for the next frame */
903 memcpy(p->prev_lsp, cur_lsp, LPC_ORDER * sizeof(*p->prev_lsp));
905 /* Generate the excitation for the frame */
906 memcpy(p->excitation, p->prev_excitation,
907 PITCH_MAX * sizeof(*p->excitation));
908 if (!p->erased_frames) {
909 int16_t *vector_ptr = p->excitation + PITCH_MAX;
911 /* Update interpolation gain memory */
912 p->interp_gain = fixed_cb_gain[(p->subframe[2].amp_index +
913 p->subframe[3].amp_index) >> 1];
914 for (i = 0; i < SUBFRAMES; i++) {
915 gen_fcb_excitation(vector_ptr, &p->subframe[i], p->cur_rate,
916 p->pitch_lag[i >> 1], i);
917 ff_g723_1_gen_acb_excitation(acb_vector,
918 &p->excitation[SUBFRAME_LEN * i],
919 p->pitch_lag[i >> 1],
920 &p->subframe[i], p->cur_rate);
921 /* Get the total excitation */
922 for (j = 0; j < SUBFRAME_LEN; j++) {
923 int v = av_clip_int16(vector_ptr[j] * 2);
924 vector_ptr[j] = av_clip_int16(v + acb_vector[j]);
926 vector_ptr += SUBFRAME_LEN;
929 vector_ptr = p->excitation + PITCH_MAX;
931 p->interp_index = comp_interp_index(p, p->pitch_lag[1],
932 &p->sid_gain, &p->cur_gain);
934 /* Perform pitch postfiltering */
937 for (j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++)
938 comp_ppf_coeff(p, i, p->pitch_lag[j >> 1],
939 ppf + j, p->cur_rate);
941 for (i = 0, j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++)
942 ff_acelp_weighted_vector_sum(p->audio + LPC_ORDER + i,
944 vector_ptr + i + ppf[j].index,
947 1 << 14, 15, SUBFRAME_LEN);
949 audio = vector_ptr - LPC_ORDER;
952 /* Save the excitation for the next frame */
953 memcpy(p->prev_excitation, p->excitation + FRAME_LEN,
954 PITCH_MAX * sizeof(*p->excitation));
956 p->interp_gain = (p->interp_gain * 3 + 2) >> 2;
957 if (p->erased_frames == 3) {
959 memset(p->excitation, 0,
960 (FRAME_LEN + PITCH_MAX) * sizeof(*p->excitation));
961 memset(p->prev_excitation, 0,
962 PITCH_MAX * sizeof(*p->excitation));
963 memset(frame->data[0], 0,
964 (FRAME_LEN + LPC_ORDER) * sizeof(int16_t));
966 int16_t *buf = p->audio + LPC_ORDER;
968 /* Regenerate frame */
969 residual_interp(p->excitation, buf, p->interp_index,
970 p->interp_gain, &p->random_seed);
972 /* Save the excitation for the next frame */
973 memcpy(p->prev_excitation, buf + (FRAME_LEN - PITCH_MAX),
974 PITCH_MAX * sizeof(*p->excitation));
977 p->cng_random_seed = CNG_RANDOM_SEED;
979 if (p->cur_frame_type == SID_FRAME) {
980 p->sid_gain = sid_gain_to_lsp_index(p->subframe[0].amp_index);
981 ff_g723_1_inverse_quant(p->sid_lsp, p->prev_lsp, p->lsp_index, 0);
982 } else if (p->past_frame_type == ACTIVE_FRAME) {
983 p->sid_gain = estimate_sid_gain(p);
986 if (p->past_frame_type == ACTIVE_FRAME)
987 p->cur_gain = p->sid_gain;
989 p->cur_gain = (p->cur_gain * 7 + p->sid_gain) >> 3;
991 ff_g723_1_lsp_interpolate(lpc, p->sid_lsp, p->prev_lsp);
992 /* Save the lsp_vector for the next frame */
993 memcpy(p->prev_lsp, p->sid_lsp, LPC_ORDER * sizeof(*p->prev_lsp));
996 p->past_frame_type = p->cur_frame_type;
998 memcpy(p->audio, p->synth_mem, LPC_ORDER * sizeof(*p->audio));
999 for (i = LPC_ORDER, j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++)
1000 ff_celp_lp_synthesis_filter(p->audio + i, &lpc[j * LPC_ORDER],
1001 audio + i, SUBFRAME_LEN, LPC_ORDER,
1003 memcpy(p->synth_mem, p->audio + FRAME_LEN, LPC_ORDER * sizeof(*p->audio));
1005 if (s->postfilter) {
1006 formant_postfilter(p, lpc, p->audio, out);
1007 } else { // if output is not postfiltered it should be scaled by 2
1008 for (i = 0; i < FRAME_LEN; i++)
1009 out[i] = av_clip_int16(p->audio[LPC_ORDER + i] << 1);
1015 return frame_size[dec_mode] * avctx->channels;
1018 #define OFFSET(x) offsetof(G723_1_Context, x)
1019 #define AD AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_DECODING_PARAM
1021 static const AVOption options[] = {
1022 { "postfilter", "enable postfilter", OFFSET(postfilter), AV_OPT_TYPE_BOOL,
1023 { .i64 = 1 }, 0, 1, AD },
1028 static const AVClass g723_1dec_class = {
1029 .class_name = "G.723.1 decoder",
1030 .item_name = av_default_item_name,
1032 .version = LIBAVUTIL_VERSION_INT,
1035 AVCodec ff_g723_1_decoder = {
1037 .long_name = NULL_IF_CONFIG_SMALL("G.723.1"),
1038 .type = AVMEDIA_TYPE_AUDIO,
1039 .id = AV_CODEC_ID_G723_1,
1040 .priv_data_size = sizeof(G723_1_Context),
1041 .init = g723_1_decode_init,
1042 .decode = g723_1_decode_frame,
1043 .capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DR1,
1044 .priv_class = &g723_1dec_class,