X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;ds=sidebyside;f=libavcodec%2Fg722.c;h=257292de7f495fbddcd06bb28afb745befcad395;hb=b239526873dc81f9b66796ad4d9fe1cb93ec34d3;hp=8707d16719b9fdb4c6d01cf2869b5be54f56203f;hpb=58e37eafce10bbcf10b111cafb7ba8586c76ef4b;p=ffmpeg diff --git a/libavcodec/g722.c b/libavcodec/g722.c index 8707d16719b..257292de7f4 100644 --- a/libavcodec/g722.c +++ b/libavcodec/g722.c @@ -1,5 +1,5 @@ /* - * G.722 ADPCM audio decoder + * G.722 ADPCM audio encoder/decoder * * Copyright (c) CMU 1993 Computer Science, Speech Group * Chengxiang Lu and Alex Hauptmann @@ -7,20 +7,20 @@ * Copyright (c) 2009 Kenan Gillet * Copyright (c) 2010 Martin Storsjo * - * This file is part of FFmpeg. + * This file is part of Libav. * - * FFmpeg is free software; you can redistribute it and/or + * Libav is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * FFmpeg is distributed in the hope that it will be useful, + * Libav is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software + * License along with Libav; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -43,6 +43,8 @@ #define PREV_SAMPLES_BUF_SIZE 1024 +#define FREEZE_INTERVAL 128 + typedef struct { int16_t prev_samples[PREV_SAMPLES_BUF_SIZE]; ///< memory of past decoded samples int prev_samples_pos; ///< the number of values in prev_samples @@ -61,6 +63,17 @@ typedef struct { int16_t log_factor; ///< delayed 2-logarithmic quantizer factor int16_t scale_factor; ///< delayed quantizer scale factor } band[2]; + + struct TrellisNode { + struct G722Band state; + uint32_t ssd; + int path; + } *node_buf[2], **nodep_buf[2]; + + struct TrellisPath { + int value; + int prev; + } *paths[2]; } G722Context; @@ -85,6 +98,16 @@ static const int16_t low_inv_quant4[16] = { 0, -2557, -1612, -1121, -786, -530, -323, -150, 2557, 1612, 1121, 786, 530, 323, 150, 0 }; +static const int16_t low_inv_quant6[64] = { + -17, -17, -17, -17, -3101, -2738, -2376, -2088, + -1873, -1689, -1535, -1399, -1279, -1170, -1072, -982, + -899, -822, -750, -682, -618, -558, -501, -447, + -396, -347, -300, -254, -211, -170, -130, -91, + 3101, 2738, 2376, 2088, 1873, 1689, 1535, 1399, + 1279, 1170, 1072, 982, 899, 822, 750, 682, + 618, 558, 501, 447, 396, 347, 300, 254, + 211, 170, 130, 91, 54, 17, -54, -17 +}; /** * quadrature mirror filter (QMF) coefficients @@ -193,7 +216,7 @@ static av_cold int g722_init(AVCodecContext * avctx) av_log(avctx, AV_LOG_ERROR, "Only mono tracks are allowed.\n"); return AVERROR_INVALIDDATA; } - avctx->sample_fmt = SAMPLE_FMT_S16; + avctx->sample_fmt = AV_SAMPLE_FMT_S16; switch (avctx->bits_per_coded_sample) { case 8: @@ -216,25 +239,39 @@ static av_cold int g722_init(AVCodecContext * avctx) if (avctx->lowres) avctx->sample_rate /= 2; + if (avctx->trellis) { + int frontier = 1 << avctx->trellis; + int max_paths = frontier * FREEZE_INTERVAL; + int i; + for (i = 0; i < 2; i++) { + c->paths[i] = av_mallocz(max_paths * sizeof(**c->paths)); + c->node_buf[i] = av_mallocz(2 * frontier * sizeof(**c->node_buf)); + c->nodep_buf[i] = av_mallocz(2 * frontier * sizeof(**c->nodep_buf)); + } + } + return 0; } +static av_cold int g722_close(AVCodecContext *avctx) +{ + G722Context *c = avctx->priv_data; + int i; + for (i = 0; i < 2; i++) { + av_freep(&c->paths[i]); + av_freep(&c->node_buf[i]); + av_freep(&c->nodep_buf[i]); + } + return 0; +} + +#if CONFIG_ADPCM_G722_DECODER static const int16_t low_inv_quant5[32] = { -35, -35, -2919, -2195, -1765, -1458, -1219, -1023, -858, -714, -587, -473, -370, -276, -190, -110, 2919, 2195, 1765, 1458, 1219, 1023, 858, 714, 587, 473, 370, 276, 190, 110, 35, -35 }; -static const int16_t low_inv_quant6[64] = { - -17, -17, -17, -17, -3101, -2738, -2376, -2088, - -1873, -1689, -1535, -1399, -1279, -1170, -1072, -982, - -899, -822, -750, -682, -618, -558, -501, -447, - -396, -347, -300, -254, -211, -170, -130, -91, - 3101, 2738, 2376, 2088, 1873, 1689, 1535, 1399, - 1279, 1170, 1072, 982, 899, 822, 750, 682, - 618, 558, 501, 447, 396, 347, 300, 254, - 211, 170, 130, 91, 54, 17, -54, -17 -}; static const int16_t *low_inv_quants[3] = { low_inv_quant6, low_inv_quant5, low_inv_quant4 }; @@ -291,7 +328,7 @@ static int g722_decode_frame(AVCodecContext *avctx, void *data, return avpkt->size; } -AVCodec adpcm_g722_decoder = { +AVCodec ff_adpcm_g722_decoder = { .name = "g722", .type = AVMEDIA_TYPE_AUDIO, .id = CODEC_ID_ADPCM_G722, @@ -301,4 +338,246 @@ AVCodec adpcm_g722_decoder = { .long_name = NULL_IF_CONFIG_SMALL("G.722 ADPCM"), .max_lowres = 1, }; +#endif + +#if CONFIG_ADPCM_G722_ENCODER +static const int16_t low_quant[33] = { + 35, 72, 110, 150, 190, 233, 276, 323, + 370, 422, 473, 530, 587, 650, 714, 786, + 858, 940, 1023, 1121, 1219, 1339, 1458, 1612, + 1765, 1980, 2195, 2557, 2919 +}; + +static inline void filter_samples(G722Context *c, const int16_t *samples, + int *xlow, int *xhigh) +{ + int xout1, xout2; + c->prev_samples[c->prev_samples_pos++] = samples[0]; + c->prev_samples[c->prev_samples_pos++] = samples[1]; + apply_qmf(c->prev_samples + c->prev_samples_pos - 24, &xout1, &xout2); + *xlow = xout1 + xout2 >> 13; + *xhigh = xout1 - xout2 >> 13; + if (c->prev_samples_pos >= PREV_SAMPLES_BUF_SIZE) { + memmove(c->prev_samples, + c->prev_samples + c->prev_samples_pos - 22, + 22 * sizeof(c->prev_samples[0])); + c->prev_samples_pos = 22; + } +} + +static inline int encode_high(const struct G722Band *state, int xhigh) +{ + int diff = av_clip_int16(xhigh - state->s_predictor); + int pred = 141 * state->scale_factor >> 8; + /* = diff >= 0 ? (diff < pred) + 2 : diff >= -pred */ + return ((diff ^ (diff >> (sizeof(diff)*8-1))) < pred) + 2*(diff >= 0); +} + +static inline int encode_low(const struct G722Band* state, int xlow) +{ + int diff = av_clip_int16(xlow - state->s_predictor); + /* = diff >= 0 ? diff : -(diff + 1) */ + int limit = diff ^ (diff >> (sizeof(diff)*8-1)); + int i = 0; + limit = limit + 1 << 10; + if (limit > low_quant[8] * state->scale_factor) + i = 9; + while (i < 29 && limit > low_quant[i] * state->scale_factor) + i++; + return (diff < 0 ? (i < 2 ? 63 : 33) : 61) - i; +} + +static int g722_encode_trellis(AVCodecContext *avctx, + uint8_t *dst, int buf_size, void *data) +{ + G722Context *c = avctx->priv_data; + const int16_t *samples = data; + int i, j, k; + int frontier = 1 << avctx->trellis; + struct TrellisNode **nodes[2]; + struct TrellisNode **nodes_next[2]; + int pathn[2] = {0, 0}, froze = -1; + struct TrellisPath *p[2]; + + for (i = 0; i < 2; i++) { + nodes[i] = c->nodep_buf[i]; + nodes_next[i] = c->nodep_buf[i] + frontier; + memset(c->nodep_buf[i], 0, 2 * frontier * sizeof(*c->nodep_buf)); + nodes[i][0] = c->node_buf[i] + frontier; + nodes[i][0]->ssd = 0; + nodes[i][0]->path = 0; + nodes[i][0]->state = c->band[i]; + } + + for (i = 0; i < buf_size >> 1; i++) { + int xlow, xhigh; + struct TrellisNode *next[2]; + int heap_pos[2] = {0, 0}; + + for (j = 0; j < 2; j++) { + next[j] = c->node_buf[j] + frontier*(i & 1); + memset(nodes_next[j], 0, frontier * sizeof(**nodes_next)); + } + + filter_samples(c, &samples[2*i], &xlow, &xhigh); + + for (j = 0; j < frontier && nodes[0][j]; j++) { + /* Only k >> 2 affects the future adaptive state, therefore testing + * small steps that don't change k >> 2 is useless, the orignal + * value from encode_low is better than them. Since we step k + * in steps of 4, make sure range is a multiple of 4, so that + * we don't miss the original value from encode_low. */ + int range = j < frontier/2 ? 4 : 0; + struct TrellisNode *cur_node = nodes[0][j]; + + int ilow = encode_low(&cur_node->state, xlow); + + for (k = ilow - range; k <= ilow + range && k <= 63; k += 4) { + int decoded, dec_diff, pos; + uint32_t ssd; + struct TrellisNode* node; + + if (k < 0) + continue; + + decoded = av_clip((cur_node->state.scale_factor * + low_inv_quant6[k] >> 10) + + cur_node->state.s_predictor, -16384, 16383); + dec_diff = xlow - decoded; + +#define STORE_NODE(index, UPDATE, VALUE)\ + ssd = cur_node->ssd + dec_diff*dec_diff;\ + /* Check for wraparound. Using 64 bit ssd counters would \ + * be simpler, but is slower on x86 32 bit. */\ + if (ssd < cur_node->ssd)\ + continue;\ + if (heap_pos[index] < frontier) {\ + pos = heap_pos[index]++;\ + assert(pathn[index] < FREEZE_INTERVAL * frontier);\ + node = nodes_next[index][pos] = next[index]++;\ + node->path = pathn[index]++;\ + } else {\ + /* Try to replace one of the leaf nodes with the new \ + * one, but not always testing the same leaf position */\ + pos = (frontier>>1) + (heap_pos[index] & ((frontier>>1) - 1));\ + if (ssd >= nodes_next[index][pos]->ssd)\ + continue;\ + heap_pos[index]++;\ + node = nodes_next[index][pos];\ + }\ + node->ssd = ssd;\ + node->state = cur_node->state;\ + UPDATE;\ + c->paths[index][node->path].value = VALUE;\ + c->paths[index][node->path].prev = cur_node->path;\ + /* Sift the newly inserted node up in the heap to restore \ + * the heap property */\ + while (pos > 0) {\ + int parent = (pos - 1) >> 1;\ + if (nodes_next[index][parent]->ssd <= ssd)\ + break;\ + FFSWAP(struct TrellisNode*, nodes_next[index][parent],\ + nodes_next[index][pos]);\ + pos = parent;\ + } + STORE_NODE(0, update_low_predictor(&node->state, k >> 2), k); + } + } + + for (j = 0; j < frontier && nodes[1][j]; j++) { + int ihigh; + struct TrellisNode *cur_node = nodes[1][j]; + + /* We don't try to get any initial guess for ihigh via + * encode_high - since there's only 4 possible values, test + * them all. Testing all of these gives a much, much larger + * gain than testing a larger range around ilow. */ + for (ihigh = 0; ihigh < 4; ihigh++) { + int dhigh, decoded, dec_diff, pos; + uint32_t ssd; + struct TrellisNode* node; + + dhigh = cur_node->state.scale_factor * + high_inv_quant[ihigh] >> 10; + decoded = av_clip(dhigh + cur_node->state.s_predictor, + -16384, 16383); + dec_diff = xhigh - decoded; + + STORE_NODE(1, update_high_predictor(&node->state, dhigh, ihigh), ihigh); + } + } + + for (j = 0; j < 2; j++) { + FFSWAP(struct TrellisNode**, nodes[j], nodes_next[j]); + + if (nodes[j][0]->ssd > (1 << 16)) { + for (k = 1; k < frontier && nodes[j][k]; k++) + nodes[j][k]->ssd -= nodes[j][0]->ssd; + nodes[j][0]->ssd = 0; + } + } + + if (i == froze + FREEZE_INTERVAL) { + p[0] = &c->paths[0][nodes[0][0]->path]; + p[1] = &c->paths[1][nodes[1][0]->path]; + for (j = i; j > froze; j--) { + dst[j] = p[1]->value << 6 | p[0]->value; + p[0] = &c->paths[0][p[0]->prev]; + p[1] = &c->paths[1][p[1]->prev]; + } + froze = i; + pathn[0] = pathn[1] = 0; + memset(nodes[0] + 1, 0, (frontier - 1)*sizeof(**nodes)); + memset(nodes[1] + 1, 0, (frontier - 1)*sizeof(**nodes)); + } + } + + p[0] = &c->paths[0][nodes[0][0]->path]; + p[1] = &c->paths[1][nodes[1][0]->path]; + for (j = i; j > froze; j--) { + dst[j] = p[1]->value << 6 | p[0]->value; + p[0] = &c->paths[0][p[0]->prev]; + p[1] = &c->paths[1][p[1]->prev]; + } + c->band[0] = nodes[0][0]->state; + c->band[1] = nodes[1][0]->state; + + return i; +} + +static int g722_encode_frame(AVCodecContext *avctx, + uint8_t *dst, int buf_size, void *data) +{ + G722Context *c = avctx->priv_data; + const int16_t *samples = data; + int i; + + if (avctx->trellis) + return g722_encode_trellis(avctx, dst, buf_size, data); + + for (i = 0; i < buf_size >> 1; i++) { + int xlow, xhigh, ihigh, ilow; + filter_samples(c, &samples[2*i], &xlow, &xhigh); + ihigh = encode_high(&c->band[1], xhigh); + ilow = encode_low(&c->band[0], xlow); + update_high_predictor(&c->band[1], c->band[1].scale_factor * + high_inv_quant[ihigh] >> 10, ihigh); + update_low_predictor(&c->band[0], ilow >> 2); + *dst++ = ihigh << 6 | ilow; + } + return i; +} + +AVCodec ff_adpcm_g722_encoder = { + .name = "g722", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_ADPCM_G722, + .priv_data_size = sizeof(G722Context), + .init = g722_init, + .close = g722_close, + .encode = g722_encode_frame, + .long_name = NULL_IF_CONFIG_SMALL("G.722 ADPCM"), + .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE}, +}; +#endif