return voice_decision;
}
+static int32_t scalarproduct_int16_c(const int16_t * v1, const int16_t * v2, int order, int shift)
+{
+ int res = 0;
+
+ while (order--)
+ res += (*v1++ * *v2++) >> shift;
+
+ return res;
+}
+
static av_cold int decoder_init(AVCodecContext * avctx)
{
G729Context* ctx = avctx->priv_data;
for(i=0; i<4; i++)
ctx->quant_energy[i] = -14336; // -14 in (5.10)
- avctx->dsp_mask= ~AV_CPU_FLAG_FORCE;
dsputil_init(&ctx->dsp, avctx);
+ ctx->dsp.scalarproduct_int16 = scalarproduct_int16_c;
return 0;
}
frame_erasure |= buf[i];
frame_erasure = !frame_erasure;
- init_get_bits(&gb, buf, buf_size);
+ init_get_bits(&gb, buf, 8*buf_size);
ma_predictor = get_bits(&gb, 1);
quantizer_1st = get_bits(&gb, VQ_1ST_BITS);
SUBFRAME_SIZE,
10,
1,
+ 0,
0x800))
/* Overflow occured, downscale excitation signal... */
for (j = 0; j < 2 * SUBFRAME_SIZE + PITCH_DELAY_MAX + INTERPOL_LEN; j++)
SUBFRAME_SIZE,
10,
0,
+ 0,
0x800);
} else {
ff_celp_lp_synthesis_filter(
SUBFRAME_SIZE,
10,
0,
+ 0,
0x800);
}
/* Save data (without postfilter) for use in next subframe. */