X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libavcodec%2Fg722dec.c;h=07af0a082a5cbe1eefbdd7ef93806f59817a6c81;hb=ac12486714b48f9bd5d9167f90b77c936751d6ef;hp=50a224ba10702e621c67a32c845ae027cc1c3945;hpb=6fcbb0f553b8e45993e2746d3763adc2faec0781;p=ffmpeg diff --git a/libavcodec/g722dec.c b/libavcodec/g722dec.c index 50a224ba107..07af0a082a5 100644 --- a/libavcodec/g722dec.c +++ b/libavcodec/g722dec.c @@ -34,15 +34,18 @@ * respectively of each byte are ignored. */ +#include "libavutil/channel_layout.h" +#include "libavutil/opt.h" + #include "avcodec.h" -#include "get_bits.h" +#include "bitstream.h" #include "g722.h" -#include "libavutil/opt.h" +#include "internal.h" #define OFFSET(x) offsetof(G722Context, x) #define AD AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_DECODING_PARAM static const AVOption options[] = { - { "bits_per_codeword", "Bits per G722 codeword", OFFSET(bits_per_codeword), AV_OPT_TYPE_FLAGS, { 8 }, 6, 8, AD }, + { "bits_per_codeword", "Bits per G722 codeword", OFFSET(bits_per_codeword), AV_OPT_TYPE_INT, { .i64 = 8 }, 6, 8, AD }, { NULL } }; @@ -57,18 +60,15 @@ static av_cold int g722_decode_init(AVCodecContext * avctx) { G722Context *c = avctx->priv_data; - if (avctx->channels != 1) { - av_log(avctx, AV_LOG_ERROR, "Only mono tracks are allowed.\n"); - return AVERROR_INVALIDDATA; - } - avctx->sample_fmt = AV_SAMPLE_FMT_S16; + avctx->channels = 1; + avctx->channel_layout = AV_CH_LAYOUT_MONO; + avctx->sample_fmt = AV_SAMPLE_FMT_S16; c->band[0].scale_factor = 8; c->band[1].scale_factor = 2; c->prev_samples_pos = 22; - avcodec_get_frame_defaults(&c->frame); - avctx->coded_frame = &c->frame; + ff_g722dsp_init(&c->dsp); return 0; } @@ -80,54 +80,54 @@ static const int16_t low_inv_quant5[32] = { 587, 473, 370, 276, 190, 110, 35, -35 }; -static const int16_t *low_inv_quants[3] = { ff_g722_low_inv_quant6, - low_inv_quant5, - ff_g722_low_inv_quant4 }; +static const int16_t * const low_inv_quants[3] = { ff_g722_low_inv_quant6, + low_inv_quant5, + ff_g722_low_inv_quant4 }; static int g722_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt) { G722Context *c = avctx->priv_data; + AVFrame *frame = data; int16_t *out_buf; int j, ret; const int skip = 8 - c->bits_per_codeword; const int16_t *quantizer_table = low_inv_quants[skip]; - GetBitContext gb; + BitstreamContext bc; /* get output buffer */ - c->frame.nb_samples = avpkt->size * 2; - if ((ret = avctx->get_buffer(avctx, &c->frame)) < 0) { + frame->nb_samples = avpkt->size * 2; + if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return ret; } - out_buf = (int16_t *)c->frame.data[0]; + out_buf = (int16_t *)frame->data[0]; - init_get_bits(&gb, avpkt->data, avpkt->size * 8); + bitstream_init8(&bc, avpkt->data, avpkt->size); for (j = 0; j < avpkt->size; j++) { int ilow, ihigh, rlow, rhigh, dhigh; - int xout1, xout2; + int xout[2]; - ihigh = get_bits(&gb, 2); - ilow = get_bits(&gb, 6 - skip); - skip_bits(&gb, skip); + ihigh = bitstream_read(&bc, 2); + ilow = bitstream_read(&bc, 6 - skip); + bitstream_skip(&bc, skip); - rlow = av_clip((c->band[0].scale_factor * quantizer_table[ilow] >> 10) - + c->band[0].s_predictor, -16384, 16383); + rlow = av_clip_intp2((c->band[0].scale_factor * quantizer_table[ilow] >> 10) + + c->band[0].s_predictor, 14); ff_g722_update_low_predictor(&c->band[0], ilow >> (2 - skip)); dhigh = c->band[1].scale_factor * ff_g722_high_inv_quant[ihigh] >> 10; - rhigh = av_clip(dhigh + c->band[1].s_predictor, -16384, 16383); + rhigh = av_clip_intp2(dhigh + c->band[1].s_predictor, 14); ff_g722_update_high_predictor(&c->band[1], dhigh, ihigh); c->prev_samples[c->prev_samples_pos++] = rlow + rhigh; c->prev_samples[c->prev_samples_pos++] = rlow - rhigh; - ff_g722_apply_qmf(c->prev_samples + c->prev_samples_pos - 24, - &xout1, &xout2); - *out_buf++ = av_clip_int16(xout1 >> 12); - *out_buf++ = av_clip_int16(xout2 >> 12); + c->dsp.apply_qmf(c->prev_samples + c->prev_samples_pos - 24, xout); + *out_buf++ = av_clip_int16(xout[0] >> 11); + *out_buf++ = av_clip_int16(xout[1] >> 11); if (c->prev_samples_pos >= PREV_SAMPLES_BUF_SIZE) { memmove(c->prev_samples, c->prev_samples + c->prev_samples_pos - 22, 22 * sizeof(c->prev_samples[0])); @@ -135,20 +135,19 @@ static int g722_decode_frame(AVCodecContext *avctx, void *data, } } - *got_frame_ptr = 1; - *(AVFrame *)data = c->frame; + *got_frame_ptr = 1; return avpkt->size; } AVCodec ff_adpcm_g722_decoder = { .name = "g722", + .long_name = NULL_IF_CONFIG_SMALL("G.722 ADPCM"), .type = AVMEDIA_TYPE_AUDIO, - .id = CODEC_ID_ADPCM_G722, + .id = AV_CODEC_ID_ADPCM_G722, .priv_data_size = sizeof(G722Context), .init = g722_decode_init, .decode = g722_decode_frame, - .capabilities = CODEC_CAP_DR1, - .long_name = NULL_IF_CONFIG_SMALL("G.722 ADPCM"), + .capabilities = AV_CODEC_CAP_DR1, .priv_class = &g722_decoder_class, };