X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libavcodec%2Falacenc.c;h=d921fa124cb381f4ecf284fd04500c710d419cc1;hb=d1a91ebe4990001e0800ee9ac54ed2207e4f56ff;hp=d337e4023d586b84fcae43cd0bcfebd04b795f63;hpb=358078d9bb89d6266e274720eba8582ec7b1c6b7;p=ffmpeg diff --git a/libavcodec/alacenc.c b/libavcodec/alacenc.c index d337e4023d5..d921fa124cb 100644 --- a/libavcodec/alacenc.c +++ b/libavcodec/alacenc.c @@ -19,16 +19,16 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +#include "libavutil/opt.h" + #include "avcodec.h" #include "put_bits.h" -#include "dsputil.h" #include "internal.h" #include "lpc.h" #include "mathops.h" +#include "alac_data.h" #define DEFAULT_FRAME_SIZE 4096 -#define DEFAULT_SAMPLE_SIZE 16 -#define MAX_CHANNELS 8 #define ALAC_EXTRADATA_SIZE 36 #define ALAC_FRAME_HEADER_SIZE 55 #define ALAC_FRAME_FOOTER_SIZE 3 @@ -59,6 +59,8 @@ typedef struct AlacLPCContext { } AlacLPCContext; typedef struct AlacEncodeContext { + const AVClass *class; + AVCodecContext *avctx; int frame_size; /**< current frame size */ int verbatim; /**< current frame verbatim mode flag */ int compression_level; @@ -66,30 +68,38 @@ typedef struct AlacEncodeContext { int max_prediction_order; int max_coded_frame_size; int write_sample_size; - int32_t sample_buf[MAX_CHANNELS][DEFAULT_FRAME_SIZE]; + int extra_bits; + int32_t sample_buf[2][DEFAULT_FRAME_SIZE]; int32_t predictor_buf[DEFAULT_FRAME_SIZE]; int interlacing_shift; int interlacing_leftweight; PutBitContext pbctx; RiceContext rc; - AlacLPCContext lpc[MAX_CHANNELS]; + AlacLPCContext lpc[2]; LPCContext lpc_ctx; - AVCodecContext *avctx; } AlacEncodeContext; -static void init_sample_buffers(AlacEncodeContext *s, - const int16_t *input_samples) +static void init_sample_buffers(AlacEncodeContext *s, int channels, + const uint8_t *samples[2]) { int ch, i; - - for (ch = 0; ch < s->avctx->channels; ch++) { - const int16_t *sptr = input_samples + ch; - for (i = 0; i < s->frame_size; i++) { - s->sample_buf[ch][i] = *sptr; - sptr += s->avctx->channels; - } - } + int shift = av_get_bytes_per_sample(s->avctx->sample_fmt) * 8 - + s->avctx->bits_per_raw_sample; + +#define COPY_SAMPLES(type) do { \ + for (ch = 0; ch < channels; ch++) { \ + int32_t *bptr = s->sample_buf[ch]; \ + const type *sptr = (const type *)samples[ch]; \ + for (i = 0; i < s->frame_size; i++) \ + bptr[i] = sptr[i] >> shift; \ + } \ + } while (0) + + if (s->avctx->sample_fmt == AV_SAMPLE_FMT_S32P) + COPY_SAMPLES(int32_t); + else + COPY_SAMPLES(int16_t); } static void encode_scalar(AlacEncodeContext *s, int x, @@ -120,17 +130,20 @@ static void encode_scalar(AlacEncodeContext *s, int x, } } -static void write_frame_header(AlacEncodeContext *s) +static void write_element_header(AlacEncodeContext *s, + enum AlacRawDataBlockType element, + int instance) { int encode_fs = 0; if (s->frame_size < DEFAULT_FRAME_SIZE) encode_fs = 1; - put_bits(&s->pbctx, 3, s->avctx->channels-1); // No. of channels -1 - put_bits(&s->pbctx, 16, 0); // Seems to be zero + put_bits(&s->pbctx, 3, element); // element type + put_bits(&s->pbctx, 4, instance); // element instance + put_bits(&s->pbctx, 12, 0); // unused header bits put_bits(&s->pbctx, 1, encode_fs); // Sample count is in the header - put_bits(&s->pbctx, 2, 0); // FIXME: Wasted bytes field + put_bits(&s->pbctx, 2, s->extra_bits >> 3); // Extra bytes (for 24-bit) put_bits(&s->pbctx, 1, s->verbatim); // Audio block is verbatim if (encode_fs) put_bits32(&s->pbctx, s->frame_size); // No. of samples in the frame @@ -347,29 +360,51 @@ static void alac_entropy_coder(AlacEncodeContext *s) } } -static int write_frame(AlacEncodeContext *s, AVPacket *avpkt, - const int16_t *samples) +static void write_element(AlacEncodeContext *s, + enum AlacRawDataBlockType element, int instance, + const uint8_t *samples0, const uint8_t *samples1) { - int i, j; + const uint8_t *samples[2] = { samples0, samples1 }; + int i, j, channels; int prediction_type = 0; PutBitContext *pb = &s->pbctx; - init_put_bits(pb, avpkt->data, avpkt->size); + channels = element == TYPE_CPE ? 2 : 1; if (s->verbatim) { - write_frame_header(s); - for (i = 0; i < s->frame_size * s->avctx->channels; i++) - put_sbits(pb, 16, *samples++); + write_element_header(s, element, instance); + /* samples are channel-interleaved in verbatim mode */ + if (s->avctx->sample_fmt == AV_SAMPLE_FMT_S32P) { + int shift = 32 - s->avctx->bits_per_raw_sample; + const int32_t *samples_s32[2] = { (const int32_t *)samples0, + (const int32_t *)samples1 }; + for (i = 0; i < s->frame_size; i++) + for (j = 0; j < channels; j++) + put_sbits(pb, s->avctx->bits_per_raw_sample, + samples_s32[j][i] >> shift); + } else { + const int16_t *samples_s16[2] = { (const int16_t *)samples0, + (const int16_t *)samples1 }; + for (i = 0; i < s->frame_size; i++) + for (j = 0; j < channels; j++) + put_sbits(pb, s->avctx->bits_per_raw_sample, + samples_s16[j][i]); + } } else { - init_sample_buffers(s, samples); - write_frame_header(s); + s->write_sample_size = s->avctx->bits_per_raw_sample - s->extra_bits + + channels - 1; + + init_sample_buffers(s, channels, samples); + write_element_header(s, element, instance); - if (s->avctx->channels == 2) + if (channels == 2) alac_stereo_decorrelation(s); + else + s->interlacing_shift = s->interlacing_leftweight = 0; put_bits(pb, 8, s->interlacing_shift); put_bits(pb, 8, s->interlacing_leftweight); - for (i = 0; i < s->avctx->channels; i++) { + for (i = 0; i < channels; i++) { calc_predictor_params(s, i); put_bits(pb, 4, prediction_type); @@ -382,9 +417,19 @@ static int write_frame(AlacEncodeContext *s, AVPacket *avpkt, put_sbits(pb, 16, s->lpc[i].lpc_coeff[j]); } - // apply lpc and entropy coding to audio samples + // write extra bits if needed + if (s->extra_bits) { + uint32_t mask = (1 << s->extra_bits) - 1; + for (i = 0; i < s->frame_size; i++) { + for (j = 0; j < channels; j++) { + put_bits(pb, s->extra_bits, s->sample_buf[j][i] & mask); + s->sample_buf[j][i] >>= s->extra_bits; + } + } + } - for (i = 0; i < s->avctx->channels; i++) { + // apply lpc and entropy coding to audio samples + for (i = 0; i < channels; i++) { alac_linear_predictor(s, i); // TODO: determine when this will actually help. for now it's not used. @@ -393,12 +438,39 @@ static int write_frame(AlacEncodeContext *s, AVPacket *avpkt, for (j = s->frame_size - 1; j > 0; j--) s->predictor_buf[j] -= s->predictor_buf[j - 1]; } - alac_entropy_coder(s); } } - put_bits(pb, 3, 7); +} + +static int write_frame(AlacEncodeContext *s, AVPacket *avpkt, + uint8_t * const *samples) +{ + PutBitContext *pb = &s->pbctx; + const enum AlacRawDataBlockType *ch_elements = ff_alac_channel_elements[s->avctx->channels - 1]; + const uint8_t *ch_map = ff_alac_channel_layout_offsets[s->avctx->channels - 1]; + int ch, element, sce, cpe; + + init_put_bits(pb, avpkt->data, avpkt->size); + + ch = element = sce = cpe = 0; + while (ch < s->avctx->channels) { + if (ch_elements[element] == TYPE_CPE) { + write_element(s, TYPE_CPE, cpe, samples[ch_map[ch]], + samples[ch_map[ch + 1]]); + cpe++; + ch += 2; + } else { + write_element(s, TYPE_SCE, sce, samples[ch_map[ch]], NULL); + sce++; + ch++; + } + element++; + } + + put_bits(pb, 3, TYPE_END); flush_put_bits(pb); + return put_bits_count(pb) >> 3; } @@ -414,7 +486,6 @@ static av_cold int alac_encode_close(AVCodecContext *avctx) ff_lpc_end(&s->lpc_ctx); av_freep(&avctx->extradata); avctx->extradata_size = 0; - av_freep(&avctx->coded_frame); return 0; } @@ -426,12 +497,13 @@ static av_cold int alac_encode_init(AVCodecContext *avctx) avctx->frame_size = s->frame_size = DEFAULT_FRAME_SIZE; - /* TODO: Correctly implement multi-channel ALAC. - It is similar to multi-channel AAC, in that it has a series of - single-channel (SCE), channel-pair (CPE), and LFE elements. */ - if (avctx->channels > 2) { - av_log(avctx, AV_LOG_ERROR, "only mono or stereo input is currently supported\n"); - return AVERROR_PATCHWELCOME; + if (avctx->sample_fmt == AV_SAMPLE_FMT_S32P) { + if (avctx->bits_per_raw_sample != 24) + av_log(avctx, AV_LOG_WARNING, "encoding as 24 bits-per-sample\n"); + avctx->bits_per_raw_sample = 24; + } else { + avctx->bits_per_raw_sample = 16; + s->extra_bits = 0; } // Set default compression level @@ -448,12 +520,9 @@ static av_cold int alac_encode_init(AVCodecContext *avctx) s->max_coded_frame_size = get_max_frame_size(avctx->frame_size, avctx->channels, - DEFAULT_SAMPLE_SIZE); - - // FIXME: consider wasted_bytes - s->write_sample_size = DEFAULT_SAMPLE_SIZE + avctx->channels - 1; + avctx->bits_per_raw_sample); - avctx->extradata = av_mallocz(ALAC_EXTRADATA_SIZE + FF_INPUT_BUFFER_PADDING_SIZE); + avctx->extradata = av_mallocz(ALAC_EXTRADATA_SIZE + AV_INPUT_BUFFER_PADDING_SIZE); if (!avctx->extradata) { ret = AVERROR(ENOMEM); goto error; @@ -464,11 +533,11 @@ static av_cold int alac_encode_init(AVCodecContext *avctx) AV_WB32(alac_extradata, ALAC_EXTRADATA_SIZE); AV_WB32(alac_extradata+4, MKBETAG('a','l','a','c')); AV_WB32(alac_extradata+12, avctx->frame_size); - AV_WB8 (alac_extradata+17, DEFAULT_SAMPLE_SIZE); + AV_WB8 (alac_extradata+17, avctx->bits_per_raw_sample); AV_WB8 (alac_extradata+21, avctx->channels); AV_WB32(alac_extradata+24, s->max_coded_frame_size); AV_WB32(alac_extradata+28, - avctx->sample_rate * avctx->channels * DEFAULT_SAMPLE_SIZE); // average bitrate + avctx->sample_rate * avctx->channels * avctx->bits_per_raw_sample); // average bitrate AV_WB32(alac_extradata+32, avctx->sample_rate); // Set relevant extradata fields @@ -478,7 +547,8 @@ static av_cold int alac_encode_init(AVCodecContext *avctx) AV_WB8(alac_extradata+20, s->rc.k_modifier); } - s->min_prediction_order = DEFAULT_MIN_PRED_ORDER; +#if FF_API_PRIVATE_OPT +FF_DISABLE_DEPRECATION_WARNINGS if (avctx->min_prediction_order >= 0) { if (avctx->min_prediction_order < MIN_LPC_ORDER || avctx->min_prediction_order > ALAC_MAX_LPC_ORDER) { @@ -491,7 +561,6 @@ static av_cold int alac_encode_init(AVCodecContext *avctx) s->min_prediction_order = avctx->min_prediction_order; } - s->max_prediction_order = DEFAULT_MAX_PRED_ORDER; if (avctx->max_prediction_order >= 0) { if (avctx->max_prediction_order < MIN_LPC_ORDER || avctx->max_prediction_order > ALAC_MAX_LPC_ORDER) { @@ -503,6 +572,8 @@ static av_cold int alac_encode_init(AVCodecContext *avctx) s->max_prediction_order = avctx->max_prediction_order; } +FF_ENABLE_DEPRECATION_WARNINGS +#endif if (s->max_prediction_order < s->min_prediction_order) { av_log(avctx, AV_LOG_ERROR, @@ -512,12 +583,6 @@ static av_cold int alac_encode_init(AVCodecContext *avctx) goto error; } - avctx->coded_frame = avcodec_alloc_frame(); - if (!avctx->coded_frame) { - ret = AVERROR(ENOMEM); - goto error; - } - s->avctx = avctx; if ((ret = ff_lpc_init(&s->lpc_ctx, avctx->frame_size, @@ -537,13 +602,12 @@ static int alac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, { AlacEncodeContext *s = avctx->priv_data; int out_bytes, max_frame_size, ret; - const int16_t *samples = (const int16_t *)frame->data[0]; s->frame_size = frame->nb_samples; if (frame->nb_samples < DEFAULT_FRAME_SIZE) max_frame_size = get_max_frame_size(s->frame_size, avctx->channels, - DEFAULT_SAMPLE_SIZE); + avctx->bits_per_raw_sample); else max_frame_size = s->max_coded_frame_size; @@ -553,14 +617,21 @@ static int alac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, } /* use verbatim mode for compression_level 0 */ - s->verbatim = !s->compression_level; + if (s->compression_level) { + s->verbatim = 0; + s->extra_bits = avctx->bits_per_raw_sample - 16; + } else { + s->verbatim = 1; + s->extra_bits = 0; + } - out_bytes = write_frame(s, avpkt, samples); + out_bytes = write_frame(s, avpkt, frame->extended_data); if (out_bytes > max_frame_size) { /* frame too large. use verbatim mode */ s->verbatim = 1; - out_bytes = write_frame(s, avpkt, samples); + s->extra_bits = 0; + out_bytes = write_frame(s, avpkt, frame->extended_data); } avpkt->size = out_bytes; @@ -568,16 +639,35 @@ static int alac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, return 0; } +#define OFFSET(x) offsetof(AlacEncodeContext, x) +#define AE AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_ENCODING_PARAM +static const AVOption options[] = { + { "min_prediction_order", NULL, OFFSET(min_prediction_order), AV_OPT_TYPE_INT, { .i64 = DEFAULT_MIN_PRED_ORDER }, MIN_LPC_ORDER, ALAC_MAX_LPC_ORDER, AE }, + { "max_prediction_order", NULL, OFFSET(max_prediction_order), AV_OPT_TYPE_INT, { .i64 = DEFAULT_MAX_PRED_ORDER }, MIN_LPC_ORDER, ALAC_MAX_LPC_ORDER, AE }, + + { NULL }, +}; + +static const AVClass alacenc_class = { + .class_name = "alacenc", + .item_name = av_default_item_name, + .option = options, + .version = LIBAVUTIL_VERSION_INT, +}; + AVCodec ff_alac_encoder = { .name = "alac", + .long_name = NULL_IF_CONFIG_SMALL("ALAC (Apple Lossless Audio Codec)"), .type = AVMEDIA_TYPE_AUDIO, .id = AV_CODEC_ID_ALAC, .priv_data_size = sizeof(AlacEncodeContext), + .priv_class = &alacenc_class, .init = alac_encode_init, .encode2 = alac_encode_frame, .close = alac_encode_close, - .capabilities = CODEC_CAP_SMALL_LAST_FRAME, - .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16, + .capabilities = AV_CODEC_CAP_SMALL_LAST_FRAME, + .channel_layouts = ff_alac_channel_layouts, + .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S32P, + AV_SAMPLE_FMT_S16P, AV_SAMPLE_FMT_NONE }, - .long_name = NULL_IF_CONFIG_SMALL("ALAC (Apple Lossless Audio Codec)"), };