X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;ds=sidebyside;f=libavcodec%2Fg722enc.c;h=545825b057e5741bd065d0ca39280d3b2be06c7e;hb=89a13998a1b5074411dff5a461dce3837057b0b8;hp=1cb0070649254aacd9e650749318c8147485b4af;hpb=cf1a259ad6eb7ad80fce1f2c2b86fda846e401c2;p=ffmpeg diff --git a/libavcodec/g722enc.c b/libavcodec/g722enc.c index 1cb00706492..545825b057e 100644 --- a/libavcodec/g722enc.c +++ b/libavcodec/g722enc.c @@ -28,7 +28,9 @@ */ #include "avcodec.h" +#include "internal.h" #include "g722.h" +#include "libavutil/common.h" #define FREEZE_INTERVAL 128 @@ -41,9 +43,22 @@ #define MIN_TRELLIS 0 #define MAX_TRELLIS 16 +static av_cold int g722_encode_close(AVCodecContext *avctx) +{ + G722Context *c = avctx->priv_data; + int i; + for (i = 0; i < 2; i++) { + av_freep(&c->paths[i]); + av_freep(&c->node_buf[i]); + av_freep(&c->nodep_buf[i]); + } + return 0; +} + static av_cold int g722_encode_init(AVCodecContext * avctx) { G722Context *c = avctx->priv_data; + int ret; if (avctx->channels != 1) { av_log(avctx, AV_LOG_ERROR, "Only mono tracks are allowed.\n"); @@ -62,6 +77,10 @@ static av_cold int g722_encode_init(AVCodecContext * avctx) c->paths[i] = av_mallocz(max_paths * sizeof(**c->paths)); c->node_buf[i] = av_mallocz(2 * frontier * sizeof(**c->node_buf)); c->nodep_buf[i] = av_mallocz(2 * frontier * sizeof(**c->nodep_buf)); + if (!c->paths[i] || !c->node_buf[i] || !c->nodep_buf[i]) { + ret = AVERROR(ENOMEM); + goto error; + } } } @@ -87,6 +106,7 @@ static av_cold int g722_encode_init(AVCodecContext * avctx) a common packet size for VoIP applications */ avctx->frame_size = 320; } + avctx->initial_padding = 22; if (avctx->trellis) { /* validate trellis */ @@ -99,19 +119,12 @@ static av_cold int g722_encode_init(AVCodecContext * avctx) } } - return 0; -} + ff_g722dsp_init(&c->dsp); -static av_cold int g722_encode_close(AVCodecContext *avctx) -{ - G722Context *c = avctx->priv_data; - int i; - for (i = 0; i < 2; i++) { - av_freep(&c->paths[i]); - av_freep(&c->node_buf[i]); - av_freep(&c->nodep_buf[i]); - } return 0; +error: + g722_encode_close(avctx); + return ret; } static const int16_t low_quant[33] = { @@ -124,12 +137,12 @@ static const int16_t low_quant[33] = { static inline void filter_samples(G722Context *c, const int16_t *samples, int *xlow, int *xhigh) { - int xout1, xout2; + int xout[2]; c->prev_samples[c->prev_samples_pos++] = samples[0]; c->prev_samples[c->prev_samples_pos++] = samples[1]; - ff_g722_apply_qmf(c->prev_samples + c->prev_samples_pos - 24, &xout1, &xout2); - *xlow = xout1 + xout2 >> 13; - *xhigh = xout1 - xout2 >> 13; + c->dsp.apply_qmf(c->prev_samples + c->prev_samples_pos - 24, xout); + *xlow = xout[0] + xout[1] >> 14; + *xhigh = xout[0] - xout[1] >> 14; if (c->prev_samples_pos >= PREV_SAMPLES_BUF_SIZE) { memmove(c->prev_samples, c->prev_samples + c->prev_samples_pos - 22, @@ -174,7 +187,7 @@ static void g722_encode_trellis(G722Context *c, int trellis, for (i = 0; i < 2; i++) { nodes[i] = c->nodep_buf[i]; nodes_next[i] = c->nodep_buf[i] + frontier; - memset(c->nodep_buf[i], 0, 2 * frontier * sizeof(*c->nodep_buf)); + memset(c->nodep_buf[i], 0, 2 * frontier * sizeof(*c->nodep_buf[i])); nodes[i][0] = c->node_buf[i] + frontier; nodes[i][0]->ssd = 0; nodes[i][0]->path = 0; @@ -212,9 +225,9 @@ static void g722_encode_trellis(G722Context *c, int trellis, if (k < 0) continue; - decoded = av_clip((cur_node->state.scale_factor * + decoded = av_clip_intp2((cur_node->state.scale_factor * ff_g722_low_inv_quant6[k] >> 10) - + cur_node->state.s_predictor, -16384, 16383); + + cur_node->state.s_predictor, 14); dec_diff = xlow - decoded; #define STORE_NODE(index, UPDATE, VALUE)\ @@ -271,8 +284,7 @@ static void g722_encode_trellis(G722Context *c, int trellis, dhigh = cur_node->state.scale_factor * ff_g722_high_inv_quant[ihigh] >> 10; - decoded = av_clip(dhigh + cur_node->state.s_predictor, - -16384, 16383); + decoded = av_clip_intp2(dhigh + cur_node->state.s_predictor, 14); dec_diff = xhigh - decoded; STORE_NODE(1, ff_g722_update_high_predictor(&node->state, dhigh, ihigh), ihigh); @@ -337,38 +349,48 @@ static void g722_encode_no_trellis(G722Context *c, encode_byte(c, dst++, &samples[i]); } -static int g722_encode_frame(AVCodecContext *avctx, - uint8_t *dst, int buf_size, void *data) +static int g722_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, + const AVFrame *frame, int *got_packet_ptr) { G722Context *c = avctx->priv_data; - const int16_t *samples = data; - int nb_samples; + const int16_t *samples = (const int16_t *)frame->data[0]; + int nb_samples, out_size, ret; + + out_size = (frame->nb_samples + 1) / 2; + if ((ret = ff_alloc_packet(avpkt, out_size))) { + av_log(avctx, AV_LOG_ERROR, "Error getting output packet\n"); + return ret; + } - nb_samples = avctx->frame_size - (avctx->frame_size & 1); + nb_samples = frame->nb_samples - (frame->nb_samples & 1); if (avctx->trellis) - g722_encode_trellis(c, avctx->trellis, dst, nb_samples, samples); + g722_encode_trellis(c, avctx->trellis, avpkt->data, nb_samples, samples); else - g722_encode_no_trellis(c, dst, nb_samples, samples); + g722_encode_no_trellis(c, avpkt->data, nb_samples, samples); /* handle last frame with odd frame_size */ - if (nb_samples < avctx->frame_size) { + if (nb_samples < frame->nb_samples) { int16_t last_samples[2] = { samples[nb_samples], samples[nb_samples] }; - encode_byte(c, &dst[nb_samples >> 1], last_samples); + encode_byte(c, &avpkt->data[nb_samples >> 1], last_samples); } - return (avctx->frame_size + 1) >> 1; + if (frame->pts != AV_NOPTS_VALUE) + avpkt->pts = frame->pts - ff_samples_to_time_base(avctx, avctx->initial_padding); + *got_packet_ptr = 1; + return 0; } AVCodec ff_adpcm_g722_encoder = { .name = "g722", + .long_name = NULL_IF_CONFIG_SMALL("G.722 ADPCM"), .type = AVMEDIA_TYPE_AUDIO, - .id = CODEC_ID_ADPCM_G722, + .id = AV_CODEC_ID_ADPCM_G722, .priv_data_size = sizeof(G722Context), .init = g722_encode_init, .close = g722_encode_close, - .encode = g722_encode_frame, - .capabilities = CODEC_CAP_SMALL_LAST_FRAME, - .long_name = NULL_IF_CONFIG_SMALL("G.722 ADPCM"), - .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE}, + .encode2 = g722_encode_frame, + .capabilities = AV_CODEC_CAP_SMALL_LAST_FRAME, + .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16, + AV_SAMPLE_FMT_NONE }, };