X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libavcodec%2Fg726.c;h=62aeb797e8337dd5c3fef8ba3d20358b8be54c8d;hb=18f4fa251b0eb36392839f5bf6180f280dc04d8d;hp=b17d578d53084dc5e62f0964c00433d78d780efc;hpb=6e8d4a7afbf40c0eb4bd70a6e7724d22ce7a6239;p=ffmpeg diff --git a/libavcodec/g726.c b/libavcodec/g726.c index b17d578d530..62aeb797e83 100644 --- a/libavcodec/g726.c +++ b/libavcodec/g726.c @@ -22,14 +22,19 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include + +#include "libavutil/avassert.h" +#include "libavutil/channel_layout.h" +#include "libavutil/opt.h" #include "avcodec.h" +#include "internal.h" #include "get_bits.h" #include "put_bits.h" /** * G.726 11bit float. * G.726 Standard uses rather odd 11bit floating point arithmentic for - * numerous occasions. It's a mistery to me why they did it this way + * numerous occasions. It's a mystery to me why they did it this way * instead of simply using 32bit integer arithmetic. */ typedef struct Float11 { @@ -71,6 +76,7 @@ typedef struct G726Tables { } G726Tables; typedef struct G726Context { + AVClass *class; G726Tables tbls; /**< static tables needed for computation */ Float11 sr[2]; /**< prev. reconstructed samples */ @@ -266,11 +272,11 @@ static int16_t g726_decode(G726Context* c, int I) return av_clip(re_signal << 2, -0xffff, 0xffff); } -static av_cold int g726_reset(G726Context* c, int index) +static av_cold int g726_reset(G726Context *c) { int i; - c->tbls = G726Tables_pool[index]; + c->tbls = G726Tables_pool[c->code_size - 2]; for (i=0; i<2; i++) { c->sr[i].mant = 1<<5; c->pk[i] = 1; @@ -302,33 +308,28 @@ static av_cold int g726_encode_init(AVCodecContext *avctx) { G726Context* c = avctx->priv_data; - if (avctx->sample_rate <= 0) { - av_log(avctx, AV_LOG_ERROR, "Samplerate is invalid\n"); - return -1; + if (avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL && + avctx->sample_rate != 8000) { + av_log(avctx, AV_LOG_ERROR, "Sample rates other than 8kHz are not " + "allowed when the compliance level is higher than unofficial. " + "Resample or reduce the compliance level.\n"); + return AVERROR(EINVAL); } + av_assert0(avctx->sample_rate > 0); if(avctx->channels != 1){ av_log(avctx, AV_LOG_ERROR, "Only mono is supported\n"); - return -1; - } - - if (avctx->bit_rate % avctx->sample_rate) { - av_log(avctx, AV_LOG_ERROR, "Bitrate - Samplerate combination is invalid\n"); return AVERROR(EINVAL); } - c->code_size = (avctx->bit_rate + avctx->sample_rate/2) / avctx->sample_rate; - if (c->code_size < 2 || c->code_size > 5) { - av_log(avctx, AV_LOG_ERROR, "Invalid number of bits %d\n", c->code_size); - return AVERROR(EINVAL); - } - avctx->bits_per_coded_sample = c->code_size; - g726_reset(c, c->code_size - 2); + if (avctx->bit_rate) + c->code_size = (avctx->bit_rate + avctx->sample_rate/2) / avctx->sample_rate; + + c->code_size = av_clip(c->code_size, 2, 5); + avctx->bit_rate = c->code_size * avctx->sample_rate; + avctx->bits_per_coded_sample = c->code_size; - avctx->coded_frame = avcodec_alloc_frame(); - if (!avctx->coded_frame) - return AVERROR(ENOMEM); - avctx->coded_frame->key_frame = 1; + g726_reset(c); /* select a frame size that will end on a byte boundary and have a size of approximately 1024 bytes */ @@ -337,69 +338,106 @@ static av_cold int g726_encode_init(AVCodecContext *avctx) return 0; } -static av_cold int g726_encode_close(AVCodecContext *avctx) -{ - av_freep(&avctx->coded_frame); - return 0; -} - -static int g726_encode_frame(AVCodecContext *avctx, - uint8_t *dst, int buf_size, void *data) +static int g726_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, + const AVFrame *frame, int *got_packet_ptr) { G726Context *c = avctx->priv_data; - const int16_t *samples = data; + const int16_t *samples = (const int16_t *)frame->data[0]; PutBitContext pb; - int i; + int i, ret, out_size; - init_put_bits(&pb, dst, 1024*1024); + out_size = (frame->nb_samples * c->code_size + 7) / 8; + if ((ret = ff_alloc_packet(avpkt, out_size))) { + av_log(avctx, AV_LOG_ERROR, "Error getting output packet\n"); + return ret; + } + init_put_bits(&pb, avpkt->data, avpkt->size); - for (i = 0; i < avctx->frame_size; i++) + for (i = 0; i < frame->nb_samples; i++) put_bits(&pb, c->code_size, g726_encode(c, *samples++)); flush_put_bits(&pb); - return put_bits_count(&pb)>>3; + avpkt->size = out_size; + *got_packet_ptr = 1; + return 0; } + +#define OFFSET(x) offsetof(G726Context, x) +#define AE AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_ENCODING_PARAM +static const AVOption options[] = { + { "code_size", "Bits per code", OFFSET(code_size), AV_OPT_TYPE_INT, { .i64 = 4 }, 2, 5, AE }, + { NULL }, +}; + +static const AVClass class = { + .class_name = "g726", + .item_name = av_default_item_name, + .option = options, + .version = LIBAVUTIL_VERSION_INT, +}; + +static const AVCodecDefault defaults[] = { + { "b", "0" }, + { NULL }, +}; + +AVCodec ff_adpcm_g726_encoder = { + .name = "g726", + .long_name = NULL_IF_CONFIG_SMALL("G.726 ADPCM"), + .type = AVMEDIA_TYPE_AUDIO, + .id = AV_CODEC_ID_ADPCM_G726, + .priv_data_size = sizeof(G726Context), + .init = g726_encode_init, + .encode2 = g726_encode_frame, + .capabilities = CODEC_CAP_SMALL_LAST_FRAME, + .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16, + AV_SAMPLE_FMT_NONE }, + .priv_class = &class, + .defaults = defaults, +}; #endif +#if CONFIG_ADPCM_G726_DECODER static av_cold int g726_decode_init(AVCodecContext *avctx) { G726Context* c = avctx->priv_data; - if(avctx->channels != 1){ - av_log(avctx, AV_LOG_ERROR, "Only mono is supported\n"); - return -1; - } + avctx->channels = 1; + avctx->channel_layout = AV_CH_LAYOUT_MONO; c->code_size = avctx->bits_per_coded_sample; if (c->code_size < 2 || c->code_size > 5) { av_log(avctx, AV_LOG_ERROR, "Invalid number of bits %d\n", c->code_size); return AVERROR(EINVAL); } - g726_reset(c, c->code_size - 2); + g726_reset(c); avctx->sample_fmt = AV_SAMPLE_FMT_S16; return 0; } -static int g726_decode_frame(AVCodecContext *avctx, - void *data, int *data_size, - AVPacket *avpkt) +static int g726_decode_frame(AVCodecContext *avctx, void *data, + int *got_frame_ptr, AVPacket *avpkt) { + AVFrame *frame = data; const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; G726Context *c = avctx->priv_data; - int16_t *samples = data; + int16_t *samples; GetBitContext gb; - int out_samples, out_size; + int out_samples, ret; out_samples = buf_size * 8 / c->code_size; - out_size = out_samples * av_get_bytes_per_sample(avctx->sample_fmt); - if (*data_size < out_size) { - av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n"); - return AVERROR(EINVAL); + + /* get output buffer */ + frame->nb_samples = out_samples; + if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return ret; } + samples = (int16_t *)frame->data[0]; init_get_bits(&gb, buf, buf_size * 8); @@ -409,31 +447,26 @@ static int g726_decode_frame(AVCodecContext *avctx, if (get_bits_left(&gb) > 0) av_log(avctx, AV_LOG_ERROR, "Frame invalidly split, missing parser?\n"); - *data_size = out_size; + *got_frame_ptr = 1; + return buf_size; } -#if CONFIG_ADPCM_G726_ENCODER -AVCodec ff_adpcm_g726_encoder = { - .name = "g726", - .type = AVMEDIA_TYPE_AUDIO, - .id = CODEC_ID_ADPCM_G726, - .priv_data_size = sizeof(G726Context), - .init = g726_encode_init, - .encode = g726_encode_frame, - .close = g726_encode_close, - .capabilities = CODEC_CAP_SMALL_LAST_FRAME, - .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE}, - .long_name = NULL_IF_CONFIG_SMALL("G.726 ADPCM"), -}; -#endif +static void g726_decode_flush(AVCodecContext *avctx) +{ + G726Context *c = avctx->priv_data; + g726_reset(c); +} AVCodec ff_adpcm_g726_decoder = { .name = "g726", + .long_name = NULL_IF_CONFIG_SMALL("G.726 ADPCM"), .type = AVMEDIA_TYPE_AUDIO, - .id = CODEC_ID_ADPCM_G726, + .id = AV_CODEC_ID_ADPCM_G726, .priv_data_size = sizeof(G726Context), .init = g726_decode_init, .decode = g726_decode_frame, - .long_name = NULL_IF_CONFIG_SMALL("G.726 ADPCM"), + .flush = g726_decode_flush, + .capabilities = CODEC_CAP_DR1, }; +#endif