*/
#define BITSTREAM_READER_LE
-#include "libavutil/audioconvert.h"
-#include "libavutil/lzo.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/mem.h"
#include "libavutil/opt.h"
#include "avcodec.h"
#include "get_bits.h"
#include "acelp_vectors.h"
#include "celp_filters.h"
-#include "celp_math.h"
-#include "lsp.h"
#include "g723_1_data.h"
+#include "internal.h"
+
+#define CNG_RANDOM_SEED 12345
/**
* G723.1 frame types
typedef struct g723_1_context {
AVClass *class;
- AVFrame frame;
G723_1_Subframe subframe[4];
enum FrameType cur_frame_type;
int erased_frames;
int16_t prev_lsp[LPC_ORDER];
+ int16_t sid_lsp[LPC_ORDER];
int16_t prev_excitation[PITCH_MAX];
- int16_t excitation[PITCH_MAX + FRAME_LEN];
+ int16_t excitation[PITCH_MAX + FRAME_LEN + 4];
int16_t synth_mem[LPC_ORDER];
int16_t fir_mem[LPC_ORDER];
int iir_mem[LPC_ORDER];
int random_seed;
+ int cng_random_seed;
int interp_index;
int interp_gain;
int sid_gain;
int pf_gain;
int postfilter;
- int16_t audio[FRAME_LEN + LPC_ORDER];
+ int16_t audio[FRAME_LEN + LPC_ORDER + PITCH_MAX + 4];
} G723_1_Context;
static av_cold int g723_1_decode_init(AVCodecContext *avctx)
avctx->sample_rate = 8000;
p->pf_gain = 1 << 12;
- avcodec_get_frame_defaults(&p->frame);
- avctx->coded_frame = &p->frame;
-
memcpy(p->prev_lsp, dc_lsp, LPC_ORDER * sizeof(*p->prev_lsp));
+ memcpy(p->sid_lsp, dc_lsp, LPC_ORDER * sizeof(*p->sid_lsp));
+
+ p->cng_random_seed = CNG_RANDOM_SEED;
+ p->past_frame_type = SID_FRAME;
return 0;
}
*/
static int normalize_bits(int num, int width)
{
- if (!num)
- return 0;
- if (num == -1)
- return width;
- if (num < 0)
- num = ~num;
-
- return width - av_log2(num);
+ return width - av_log2(num) - 1;
}
/**
* Scale vector contents based on the largest of their absolutes.
*/
-static int scale_vector(int16_t *vector, int length)
+static int scale_vector(int16_t *dst, const int16_t *vector, int length)
{
- int bits, scale, max = 0;
+ int bits, max = 0;
int i;
for (i = 0; i < length; i++)
- max = FFMAX(max, FFABS(vector[i]));
+ max |= FFABS(vector[i]);
+ max = FFMIN(max, 0x7FFF);
bits = normalize_bits(max, 15);
- scale = (bits == 15) ? 0x7FFF : (1 << bits);
for (i = 0; i < length; i++)
- vector[i] = (vector[i] * scale) >> 4;
+ dst[i] = vector[i] << bits >> 3;
return bits - 3;
}
for (j = 0; j < LPC_ORDER; j++) {
int index = lpc[j] >> 7;
int offset = lpc[j] & 0x7f;
- int64_t temp1 = cos_tab[index] << 16;
+ int temp1 = cos_tab[index] << 16;
int temp2 = (cos_tab[index + 1] - cos_tab[index]) *
((offset << 8) + 0x80) << 1;
- lpc[j] = -(av_clipl_int32(((temp1 + temp2) << 1) + (1 << 15)) >> 16);
+ lpc[j] = -(av_sat_dadd32(1 << 15, temp1 + temp2) >> 16);
}
/*
* @param pitch_lag closed loop pitch lag
* @param index current subframe index
*/
-static void gen_fcb_excitation(int16_t *vector, G723_1_Subframe subfrm,
+static void gen_fcb_excitation(int16_t *vector, G723_1_Subframe *subfrm,
enum Rate cur_rate, int pitch_lag, int index)
{
int temp, i, j;
memset(vector, 0, SUBFRAME_LEN * sizeof(*vector));
if (cur_rate == RATE_6300) {
- if (subfrm.pulse_pos >= max_pos[index])
+ if (subfrm->pulse_pos >= max_pos[index])
return;
/* Decode amplitudes and positions */
j = PULSE_MAX - pulses[index];
- temp = subfrm.pulse_pos;
+ temp = subfrm->pulse_pos;
for (i = 0; i < SUBFRAME_LEN / GRID_SIZE; i++) {
temp -= combinatorial_table[j][i];
if (temp >= 0)
continue;
temp += combinatorial_table[j++][i];
- if (subfrm.pulse_sign & (1 << (PULSE_MAX - j))) {
- vector[subfrm.grid_index + GRID_SIZE * i] =
- -fixed_cb_gain[subfrm.amp_index];
+ if (subfrm->pulse_sign & (1 << (PULSE_MAX - j))) {
+ vector[subfrm->grid_index + GRID_SIZE * i] =
+ -fixed_cb_gain[subfrm->amp_index];
} else {
- vector[subfrm.grid_index + GRID_SIZE * i] =
- fixed_cb_gain[subfrm.amp_index];
+ vector[subfrm->grid_index + GRID_SIZE * i] =
+ fixed_cb_gain[subfrm->amp_index];
}
if (j == PULSE_MAX)
break;
}
- if (subfrm.dirac_train == 1)
+ if (subfrm->dirac_train == 1)
gen_dirac_train(vector, pitch_lag);
} else { /* 5300 bps */
- int cb_gain = fixed_cb_gain[subfrm.amp_index];
- int cb_shift = subfrm.grid_index;
- int cb_sign = subfrm.pulse_sign;
- int cb_pos = subfrm.pulse_pos;
+ int cb_gain = fixed_cb_gain[subfrm->amp_index];
+ int cb_shift = subfrm->grid_index;
+ int cb_sign = subfrm->pulse_sign;
+ int cb_pos = subfrm->pulse_pos;
int offset, beta, lag;
for (i = 0; i < 8; i += 2) {
}
/* Enhance harmonic components */
- lag = pitch_contrib[subfrm.ad_cb_gain << 1] + pitch_lag +
- subfrm.ad_cb_lag - 1;
- beta = pitch_contrib[(subfrm.ad_cb_gain << 1) + 1];
+ lag = pitch_contrib[subfrm->ad_cb_gain << 1] + pitch_lag +
+ subfrm->ad_cb_lag - 1;
+ beta = pitch_contrib[(subfrm->ad_cb_gain << 1) + 1];
if (lag < SUBFRAME_LEN - 2) {
for (i = lag; i < SUBFRAME_LEN; i++)
residual[i] = prev_excitation[offset + (i - 2) % lag];
}
-static int dot_product(const int16_t *a, const int16_t *b, int length,
- int shift)
+static int dot_product(const int16_t *a, const int16_t *b, int length)
{
int i, sum = 0;
for (i = 0; i < length; i++) {
- int64_t prod = av_clipl_int32(MUL64(a[i], b[i]) << shift);
- sum = av_clipl_int32(sum + prod);
+ int prod = a[i] * b[i];
+ sum = av_sat_dadd32(sum, prod);
}
return sum;
}
* Generate adaptive codebook excitation.
*/
static void gen_acb_excitation(int16_t *vector, int16_t *prev_excitation,
- int pitch_lag, G723_1_Subframe subfrm,
+ int pitch_lag, G723_1_Subframe *subfrm,
enum Rate cur_rate)
{
int16_t residual[SUBFRAME_LEN + PITCH_ORDER - 1];
const int16_t *cb_ptr;
- int lag = pitch_lag + subfrm.ad_cb_lag - 1;
+ int lag = pitch_lag + subfrm->ad_cb_lag - 1;
int i;
- int64_t sum;
+ int sum;
get_residual(residual, prev_excitation, lag);
cb_ptr = adaptive_cb_gain170;
/* Calculate adaptive vector */
- cb_ptr += subfrm.ad_cb_gain * 20;
+ cb_ptr += subfrm->ad_cb_gain * 20;
for (i = 0; i < SUBFRAME_LEN; i++) {
- sum = dot_product(residual + i, cb_ptr, PITCH_ORDER, 1);
- vector[i] = av_clipl_int32((sum << 1) + (1 << 15)) >> 16;
+ sum = dot_product(residual + i, cb_ptr, PITCH_ORDER);
+ vector[i] = av_sat_dadd32(1 << 15, sum) >> 16;
}
}
/**
* Estimate maximum auto-correlation around pitch lag.
*
- * @param p the context
+ * @param buf buffer with offset applied
* @param offset offset of the excitation vector
* @param ccr_max pointer to the maximum auto-correlation
* @param pitch_lag decoded pitch lag
* @param length length of autocorrelation
* @param dir forward lag(1) / backward lag(-1)
*/
-static int autocorr_max(G723_1_Context *p, int offset, int *ccr_max,
+static int autocorr_max(const int16_t *buf, int offset, int *ccr_max,
int pitch_lag, int length, int dir)
{
int limit, ccr, lag = 0;
- int16_t *buf = p->excitation + offset;
int i;
pitch_lag = FFMIN(PITCH_MAX - 3, pitch_lag);
- limit = FFMIN(FRAME_LEN + PITCH_MAX - offset - length, pitch_lag + 3);
+ if (dir > 0)
+ limit = FFMIN(FRAME_LEN + PITCH_MAX - offset - length, pitch_lag + 3);
+ else
+ limit = pitch_lag + 3;
for (i = pitch_lag - 3; i <= limit; i++) {
- ccr = dot_product(buf, buf + dir * i, length, 1);
+ ccr = dot_product(buf, buf + dir * i, length);
if (ccr > *ccr_max) {
*ccr_max = ccr;
int tgt_eng, int ccr, int res_eng)
{
int pf_residual; /* square of postfiltered residual */
- int64_t temp1, temp2;
+ int temp1, temp2;
ppf->index = lag;
/* pf_res^2 = tgt_eng + 2*ccr*gain + res_eng*gain^2 */
temp1 = (tgt_eng << 15) + (ccr * ppf->opt_gain << 1);
temp2 = (ppf->opt_gain * ppf->opt_gain >> 15) * res_eng;
- pf_residual = av_clipl_int32(temp1 + temp2 + (1 << 15)) >> 16;
+ pf_residual = av_sat_add32(temp1, temp2 + (1 << 15)) >> 16;
if (tgt_eng >= pf_residual << 1) {
temp1 = 0x7fff;
int16_t scale;
int i;
- int64_t temp1, temp2;
+ int temp1, temp2;
/*
* 0 - target energy
* 4 - backward residual energy
*/
int energy[5] = {0, 0, 0, 0, 0};
- int16_t *buf = p->excitation + offset;
- int fwd_lag = autocorr_max(p, offset, &energy[1], pitch_lag,
+ int16_t *buf = p->audio + LPC_ORDER + offset;
+ int fwd_lag = autocorr_max(buf, offset, &energy[1], pitch_lag,
SUBFRAME_LEN, 1);
- int back_lag = autocorr_max(p, offset, &energy[3], pitch_lag,
+ int back_lag = autocorr_max(buf, offset, &energy[3], pitch_lag,
SUBFRAME_LEN, -1);
ppf->index = 0;
return;
/* Compute target energy */
- energy[0] = dot_product(buf, buf, SUBFRAME_LEN, 1);
+ energy[0] = dot_product(buf, buf, SUBFRAME_LEN);
/* Compute forward residual energy */
if (fwd_lag)
- energy[2] = dot_product(buf + fwd_lag, buf + fwd_lag,
- SUBFRAME_LEN, 1);
+ energy[2] = dot_product(buf + fwd_lag, buf + fwd_lag, SUBFRAME_LEN);
/* Compute backward residual energy */
if (back_lag)
- energy[4] = dot_product(buf - back_lag, buf - back_lag,
- SUBFRAME_LEN, 1);
+ energy[4] = dot_product(buf - back_lag, buf - back_lag, SUBFRAME_LEN);
/* Normalize and shorten */
temp1 = 0;
int *exc_eng, int *scale)
{
int offset = PITCH_MAX + 2 * SUBFRAME_LEN;
- int16_t *buf = p->excitation + offset;
+ int16_t *buf = p->audio + LPC_ORDER;
int index, ccr, tgt_eng, best_eng, temp;
- *scale = scale_vector(p->excitation, FRAME_LEN + PITCH_MAX);
+ *scale = scale_vector(buf, p->excitation, FRAME_LEN + PITCH_MAX);
+ buf += offset;
/* Compute maximum backward cross-correlation */
ccr = 0;
- index = autocorr_max(p, offset, &ccr, pitch_lag, SUBFRAME_LEN * 2, -1);
- ccr = av_clipl_int32((int64_t)ccr + (1 << 15)) >> 16;
+ index = autocorr_max(buf, offset, &ccr, pitch_lag, SUBFRAME_LEN * 2, -1);
+ ccr = av_sat_add32(ccr, 1 << 15) >> 16;
/* Compute target energy */
- tgt_eng = dot_product(buf, buf, SUBFRAME_LEN * 2, 1);
- *exc_eng = av_clipl_int32((int64_t)tgt_eng + (1 << 15)) >> 16;
+ tgt_eng = dot_product(buf, buf, SUBFRAME_LEN * 2);
+ *exc_eng = av_sat_add32(tgt_eng, 1 << 15) >> 16;
if (ccr <= 0)
return 0;
/* Compute best energy */
- best_eng = dot_product(buf - index, buf - index,
- SUBFRAME_LEN * 2, 1);
- best_eng = av_clipl_int32((int64_t)best_eng + (1 << 15)) >> 16;
+ best_eng = dot_product(buf - index, buf - index, SUBFRAME_LEN * 2);
+ best_eng = av_sat_add32(best_eng, 1 << 15) >> 16;
temp = best_eng * *exc_eng >> 3;
int16_t *vector_ptr = buf + PITCH_MAX;
/* Attenuate */
for (i = 0; i < lag; i++)
- vector_ptr[i - lag] = vector_ptr[i - lag] * 3 >> 2;
- av_memcpy_backptr((uint8_t*)vector_ptr, lag * sizeof(*vector_ptr),
- FRAME_LEN * sizeof(*vector_ptr));
- memcpy(out, vector_ptr, FRAME_LEN * sizeof(*vector_ptr));
+ out[i] = vector_ptr[i - lag] * 3 >> 2;
+ av_memcpy_backptr((uint8_t*)(out + lag), lag * sizeof(*out),
+ (FRAME_LEN - lag) * sizeof(*out));
} else { /* Unvoiced */
for (i = 0; i < FRAME_LEN; i++) {
*rseed = *rseed * 521 + 259;
num = energy;
denom = 0;
for (i = 0; i < SUBFRAME_LEN; i++) {
- int64_t temp = buf[i] >> 2;
- temp = av_clipl_int32(MUL64(temp, temp) << 1);
- denom = av_clipl_int32(denom + temp);
+ int temp = buf[i] >> 2;
+ temp *= temp;
+ denom = av_sat_dadd32(denom, temp);
}
if (num && denom) {
}
for (i = 0; i < SUBFRAME_LEN; i++) {
- p->pf_gain = ((p->pf_gain << 4) - p->pf_gain + gain + (1 << 3)) >> 4;
+ p->pf_gain = (15 * p->pf_gain + gain + (1 << 3)) >> 4;
buf[i] = av_clip_int16((buf[i] * (p->pf_gain + (p->pf_gain >> 4)) +
(1 << 10)) >> 11);
}
*
* @param p the context
* @param lpc quantized lpc coefficients
- * @param buf output buffer
+ * @param buf input buffer
+ * @param dst output buffer
*/
-static void formant_postfilter(G723_1_Context *p, int16_t *lpc, int16_t *buf)
+static void formant_postfilter(G723_1_Context *p, int16_t *lpc,
+ int16_t *buf, int16_t *dst)
{
- int16_t filter_coef[2][LPC_ORDER], *buf_ptr;
+ int16_t filter_coef[2][LPC_ORDER];
int filter_signal[LPC_ORDER + FRAME_LEN], *signal_ptr;
int i, j, k;
}
iir_filter(filter_coef[0], filter_coef[1], buf + i,
filter_signal + i);
+ lpc += LPC_ORDER;
}
memcpy(p->fir_mem, buf + FRAME_LEN, LPC_ORDER * sizeof(*p->fir_mem));
memcpy(p->iir_mem, filter_signal + FRAME_LEN,
LPC_ORDER * sizeof(*p->iir_mem));
- buf_ptr = buf + LPC_ORDER;
+ buf += LPC_ORDER;
signal_ptr = filter_signal + LPC_ORDER;
for (i = 0; i < SUBFRAMES; i++) {
- int16_t temp_vector[SUBFRAME_LEN];
- int16_t temp;
+ int temp;
int auto_corr[2];
int scale, energy;
/* Normalize */
- memcpy(temp_vector, buf_ptr, SUBFRAME_LEN * sizeof(*temp_vector));
- scale = scale_vector(temp_vector, SUBFRAME_LEN);
+ scale = scale_vector(dst, buf, SUBFRAME_LEN);
/* Compute auto correlation coefficients */
- auto_corr[0] = dot_product(temp_vector, temp_vector + 1,
- SUBFRAME_LEN - 1, 1);
- auto_corr[1] = dot_product(temp_vector, temp_vector, SUBFRAME_LEN, 1);
+ auto_corr[0] = dot_product(dst, dst + 1, SUBFRAME_LEN - 1);
+ auto_corr[1] = dot_product(dst, dst, SUBFRAME_LEN);
/* Compute reflection coefficient */
temp = auto_corr[1] >> 16;
if (temp) {
temp = (auto_corr[0] >> 2) / temp;
}
- p->reflection_coef = ((p->reflection_coef << 2) - p->reflection_coef +
- temp + 2) >> 2;
- temp = (p->reflection_coef * 0xffffc >> 3) & 0xfffc;
+ p->reflection_coef = (3 * p->reflection_coef + temp + 2) >> 2;
+ temp = -p->reflection_coef >> 1 & ~3;
/* Compensation filter */
for (j = 0; j < SUBFRAME_LEN; j++) {
- buf_ptr[j] = av_clipl_int32(signal_ptr[j] +
- ((signal_ptr[j - 1] >> 16) *
- temp << 1)) >> 16;
+ dst[j] = av_sat_dadd32(signal_ptr[j],
+ (signal_ptr[j - 1] >> 16) * temp) >> 16;
}
/* Compute normalized signal energy */
} else
energy = auto_corr[1] >> temp;
- gain_scale(p, buf_ptr, energy);
+ gain_scale(p, dst, energy);
- buf_ptr += SUBFRAME_LEN;
+ buf += SUBFRAME_LEN;
signal_ptr += SUBFRAME_LEN;
+ dst += SUBFRAME_LEN;
+ }
+}
+
+static int sid_gain_to_lsp_index(int gain)
+{
+ if (gain < 0x10)
+ return gain << 6;
+ else if (gain < 0x20)
+ return gain - 8 << 7;
+ else
+ return gain - 20 << 8;
+}
+
+static inline int cng_rand(int *state, int base)
+{
+ *state = (*state * 521 + 259) & 0xFFFF;
+ return (*state & 0x7FFF) * base >> 15;
+}
+
+static int estimate_sid_gain(G723_1_Context *p)
+{
+ int i, shift, seg, seg2, t, val, val_add, x, y;
+
+ shift = 16 - p->cur_gain * 2;
+ if (shift > 0)
+ t = p->sid_gain << shift;
+ else
+ t = p->sid_gain >> -shift;
+ x = t * cng_filt[0] >> 16;
+
+ if (x >= cng_bseg[2])
+ return 0x3F;
+
+ if (x >= cng_bseg[1]) {
+ shift = 4;
+ seg = 3;
+ } else {
+ shift = 3;
+ seg = (x >= cng_bseg[0]);
}
+ seg2 = FFMIN(seg, 3);
+
+ val = 1 << shift;
+ val_add = val >> 1;
+ for (i = 0; i < shift; i++) {
+ t = seg * 32 + (val << seg2);
+ t *= t;
+ if (x >= t)
+ val += val_add;
+ else
+ val -= val_add;
+ val_add >>= 1;
+ }
+
+ t = seg * 32 + (val << seg2);
+ y = t * t - x;
+ if (y <= 0) {
+ t = seg * 32 + (val + 1 << seg2);
+ t = t * t - x;
+ val = (seg2 - 1 << 4) + val;
+ if (t >= y)
+ val++;
+ } else {
+ t = seg * 32 + (val - 1 << seg2);
+ t = t * t - x;
+ val = (seg2 - 1 << 4) + val;
+ if (t >= y)
+ val--;
+ }
+
+ return val;
+}
+
+static void generate_noise(G723_1_Context *p)
+{
+ int i, j, idx, t;
+ int off[SUBFRAMES];
+ int signs[SUBFRAMES / 2 * 11], pos[SUBFRAMES / 2 * 11];
+ int tmp[SUBFRAME_LEN * 2];
+ int16_t *vector_ptr;
+ int64_t sum;
+ int b0, c, delta, x, shift;
+
+ p->pitch_lag[0] = cng_rand(&p->cng_random_seed, 21) + 123;
+ p->pitch_lag[1] = cng_rand(&p->cng_random_seed, 19) + 123;
+
+ for (i = 0; i < SUBFRAMES; i++) {
+ p->subframe[i].ad_cb_gain = cng_rand(&p->cng_random_seed, 50) + 1;
+ p->subframe[i].ad_cb_lag = cng_adaptive_cb_lag[i];
+ }
+
+ for (i = 0; i < SUBFRAMES / 2; i++) {
+ t = cng_rand(&p->cng_random_seed, 1 << 13);
+ off[i * 2] = t & 1;
+ off[i * 2 + 1] = ((t >> 1) & 1) + SUBFRAME_LEN;
+ t >>= 2;
+ for (j = 0; j < 11; j++) {
+ signs[i * 11 + j] = (t & 1) * 2 - 1 << 14;
+ t >>= 1;
+ }
+ }
+
+ idx = 0;
+ for (i = 0; i < SUBFRAMES; i++) {
+ for (j = 0; j < SUBFRAME_LEN / 2; j++)
+ tmp[j] = j;
+ t = SUBFRAME_LEN / 2;
+ for (j = 0; j < pulses[i]; j++, idx++) {
+ int idx2 = cng_rand(&p->cng_random_seed, t);
+
+ pos[idx] = tmp[idx2] * 2 + off[i];
+ tmp[idx2] = tmp[--t];
+ }
+ }
+
+ vector_ptr = p->audio + LPC_ORDER;
+ memcpy(vector_ptr, p->prev_excitation,
+ PITCH_MAX * sizeof(*p->excitation));
+ for (i = 0; i < SUBFRAMES; i += 2) {
+ gen_acb_excitation(vector_ptr, vector_ptr,
+ p->pitch_lag[i >> 1], &p->subframe[i],
+ p->cur_rate);
+ gen_acb_excitation(vector_ptr + SUBFRAME_LEN,
+ vector_ptr + SUBFRAME_LEN,
+ p->pitch_lag[i >> 1], &p->subframe[i + 1],
+ p->cur_rate);
+
+ t = 0;
+ for (j = 0; j < SUBFRAME_LEN * 2; j++)
+ t |= FFABS(vector_ptr[j]);
+ t = FFMIN(t, 0x7FFF);
+ if (!t) {
+ shift = 0;
+ } else {
+ shift = -10 + av_log2(t);
+ if (shift < -2)
+ shift = -2;
+ }
+ sum = 0;
+ if (shift < 0) {
+ for (j = 0; j < SUBFRAME_LEN * 2; j++) {
+ t = vector_ptr[j] << -shift;
+ sum += t * t;
+ tmp[j] = t;
+ }
+ } else {
+ for (j = 0; j < SUBFRAME_LEN * 2; j++) {
+ t = vector_ptr[j] >> shift;
+ sum += t * t;
+ tmp[j] = t;
+ }
+ }
+
+ b0 = 0;
+ for (j = 0; j < 11; j++)
+ b0 += tmp[pos[(i / 2) * 11 + j]] * signs[(i / 2) * 11 + j];
+ b0 = b0 * 2 * 2979LL + (1 << 29) >> 30; // approximated division by 11
+
+ c = p->cur_gain * (p->cur_gain * SUBFRAME_LEN >> 5);
+ if (shift * 2 + 3 >= 0)
+ c >>= shift * 2 + 3;
+ else
+ c <<= -(shift * 2 + 3);
+ c = (av_clipl_int32(sum << 1) - c) * 2979LL >> 15;
+
+ delta = b0 * b0 * 2 - c;
+ if (delta <= 0) {
+ x = -b0;
+ } else {
+ delta = square_root(delta);
+ x = delta - b0;
+ t = delta + b0;
+ if (FFABS(t) < FFABS(x))
+ x = -t;
+ }
+ shift++;
+ if (shift < 0)
+ x >>= -shift;
+ else
+ x <<= shift;
+ x = av_clip(x, -10000, 10000);
+
+ for (j = 0; j < 11; j++) {
+ idx = (i / 2) * 11 + j;
+ vector_ptr[pos[idx]] = av_clip_int16(vector_ptr[pos[idx]] +
+ (x * signs[idx] >> 15));
+ }
+
+ /* copy decoded data to serve as a history for the next decoded subframes */
+ memcpy(vector_ptr + PITCH_MAX, vector_ptr,
+ sizeof(*vector_ptr) * SUBFRAME_LEN * 2);
+ vector_ptr += SUBFRAME_LEN * 2;
+ }
+ /* Save the excitation for the next frame */
+ memcpy(p->prev_excitation, p->audio + LPC_ORDER + FRAME_LEN,
+ PITCH_MAX * sizeof(*p->excitation));
}
static int g723_1_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame_ptr, AVPacket *avpkt)
{
G723_1_Context *p = avctx->priv_data;
+ AVFrame *frame = data;
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
int dec_mode = buf[0] & 3;
int16_t cur_lsp[LPC_ORDER];
int16_t lpc[SUBFRAMES * LPC_ORDER];
int16_t acb_vector[SUBFRAME_LEN];
- int16_t *vector_ptr;
+ int16_t *out;
int bad_frame = 0, i, j, ret;
+ int16_t *audio = p->audio;
if (buf_size < frame_size[dec_mode]) {
if (buf_size)
p->cur_frame_type = UNTRANSMITTED_FRAME;
}
- p->frame.nb_samples = FRAME_LEN;
- if ((ret = avctx->get_buffer(avctx, &p->frame)) < 0) {
+ frame->nb_samples = FRAME_LEN;
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
+ out = (int16_t *)frame->data[0];
+
if (p->cur_frame_type == ACTIVE_FRAME) {
if (!bad_frame)
p->erased_frames = 0;
/* Generate the excitation for the frame */
memcpy(p->excitation, p->prev_excitation,
PITCH_MAX * sizeof(*p->excitation));
- vector_ptr = p->excitation + PITCH_MAX;
if (!p->erased_frames) {
+ int16_t *vector_ptr = p->excitation + PITCH_MAX;
+
/* Update interpolation gain memory */
p->interp_gain = fixed_cb_gain[(p->subframe[2].amp_index +
p->subframe[3].amp_index) >> 1];
for (i = 0; i < SUBFRAMES; i++) {
- gen_fcb_excitation(vector_ptr, p->subframe[i], p->cur_rate,
+ gen_fcb_excitation(vector_ptr, &p->subframe[i], p->cur_rate,
p->pitch_lag[i >> 1], i);
gen_acb_excitation(acb_vector, &p->excitation[SUBFRAME_LEN * i],
- p->pitch_lag[i >> 1], p->subframe[i],
+ p->pitch_lag[i >> 1], &p->subframe[i],
p->cur_rate);
/* Get the total excitation */
for (j = 0; j < SUBFRAME_LEN; j++) {
- vector_ptr[j] = av_clip_int16(vector_ptr[j] << 1);
- vector_ptr[j] = av_clip_int16(vector_ptr[j] +
- acb_vector[j]);
+ int v = av_clip_int16(vector_ptr[j] << 1);
+ vector_ptr[j] = av_clip_int16(v + acb_vector[j]);
}
vector_ptr += SUBFRAME_LEN;
}
vector_ptr = p->excitation + PITCH_MAX;
- /* Save the excitation */
- memcpy(p->audio, vector_ptr, FRAME_LEN * sizeof(*p->audio));
-
p->interp_index = comp_interp_index(p, p->pitch_lag[1],
&p->sid_gain, &p->cur_gain);
+ /* Peform pitch postfiltering */
if (p->postfilter) {
i = PITCH_MAX;
for (j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++)
comp_ppf_coeff(p, i, p->pitch_lag[j >> 1],
ppf + j, p->cur_rate);
- }
- /* Restore the original excitation */
- memcpy(p->excitation, p->prev_excitation,
- PITCH_MAX * sizeof(*p->excitation));
- memcpy(vector_ptr, p->audio, FRAME_LEN * sizeof(*vector_ptr));
-
- /* Peform pitch postfiltering */
- if (p->postfilter)
for (i = 0, j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++)
ff_acelp_weighted_vector_sum(p->audio + LPC_ORDER + i,
vector_ptr + i,
ppf[j].sc_gain,
ppf[j].opt_gain,
1 << 14, 15, SUBFRAME_LEN);
+ } else {
+ audio = vector_ptr - LPC_ORDER;
+ }
+ /* Save the excitation for the next frame */
+ memcpy(p->prev_excitation, p->excitation + FRAME_LEN,
+ PITCH_MAX * sizeof(*p->excitation));
} else {
p->interp_gain = (p->interp_gain * 3 + 2) >> 2;
if (p->erased_frames == 3) {
/* Mute output */
memset(p->excitation, 0,
(FRAME_LEN + PITCH_MAX) * sizeof(*p->excitation));
- memset(p->frame.data[0], 0,
+ memset(p->prev_excitation, 0,
+ PITCH_MAX * sizeof(*p->excitation));
+ memset(frame->data[0], 0,
(FRAME_LEN + LPC_ORDER) * sizeof(int16_t));
} else {
+ int16_t *buf = p->audio + LPC_ORDER;
+
/* Regenerate frame */
- residual_interp(p->excitation, p->audio + LPC_ORDER, p->interp_index,
+ residual_interp(p->excitation, buf, p->interp_index,
p->interp_gain, &p->random_seed);
+
+ /* Save the excitation for the next frame */
+ memcpy(p->prev_excitation, buf + (FRAME_LEN - PITCH_MAX),
+ PITCH_MAX * sizeof(*p->excitation));
}
}
- /* Save the excitation for the next frame */
- memcpy(p->prev_excitation, p->excitation + FRAME_LEN,
- PITCH_MAX * sizeof(*p->excitation));
+ p->cng_random_seed = CNG_RANDOM_SEED;
} else {
- memset(p->frame.data[0], 0, FRAME_LEN * 2);
- av_log(avctx, AV_LOG_WARNING,
- "G.723.1: Comfort noise generation not supported yet\n");
+ if (p->cur_frame_type == SID_FRAME) {
+ p->sid_gain = sid_gain_to_lsp_index(p->subframe[0].amp_index);
+ inverse_quant(p->sid_lsp, p->prev_lsp, p->lsp_index, 0);
+ } else if (p->past_frame_type == ACTIVE_FRAME) {
+ p->sid_gain = estimate_sid_gain(p);
+ }
- *got_frame_ptr = 1;
- *(AVFrame *)data = p->frame;
- return frame_size[dec_mode];
+ if (p->past_frame_type == ACTIVE_FRAME)
+ p->cur_gain = p->sid_gain;
+ else
+ p->cur_gain = (p->cur_gain * 7 + p->sid_gain) >> 3;
+ generate_noise(p);
+ lsp_interpolate(lpc, p->sid_lsp, p->prev_lsp);
+ /* Save the lsp_vector for the next frame */
+ memcpy(p->prev_lsp, p->sid_lsp, LPC_ORDER * sizeof(*p->prev_lsp));
}
p->past_frame_type = p->cur_frame_type;
memcpy(p->audio, p->synth_mem, LPC_ORDER * sizeof(*p->audio));
for (i = LPC_ORDER, j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++)
ff_celp_lp_synthesis_filter(p->audio + i, &lpc[j * LPC_ORDER],
- p->audio + i, SUBFRAME_LEN, LPC_ORDER,
+ audio + i, SUBFRAME_LEN, LPC_ORDER,
0, 1, 1 << 12);
memcpy(p->synth_mem, p->audio + FRAME_LEN, LPC_ORDER * sizeof(*p->audio));
- if (p->postfilter)
- formant_postfilter(p, lpc, p->audio);
-
- memcpy(p->frame.data[0], p->audio + LPC_ORDER, FRAME_LEN * 2);
+ if (p->postfilter) {
+ formant_postfilter(p, lpc, p->audio, out);
+ } else { // if output is not postfiltered it should be scaled by 2
+ for (i = 0; i < FRAME_LEN; i++)
+ out[i] = av_clip_int16(p->audio[LPC_ORDER + i] << 1);
+ }
- *got_frame_ptr = 1;
- *(AVFrame *)data = p->frame;
+ *got_frame_ptr = 1;
return frame_size[dec_mode];
}
static const AVOption options[] = {
{ "postfilter", "postfilter on/off", OFFSET(postfilter), AV_OPT_TYPE_INT,
- { 1 }, 0, 1, AD },
+ { .i64 = 1 }, 0, 1, AD },
{ NULL }
};
AVCodec ff_g723_1_decoder = {
.name = "g723_1",
.type = AVMEDIA_TYPE_AUDIO,
- .id = CODEC_ID_G723_1,
+ .id = AV_CODEC_ID_G723_1,
.priv_data_size = sizeof(G723_1_Context),
.init = g723_1_decode_init,
.decode = g723_1_decode_frame,
.long_name = NULL_IF_CONFIG_SMALL("G.723.1"),
- .capabilities = CODEC_CAP_SUBFRAMES,
+ .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DR1,
.priv_class = &g723_1dec_class,
};