#include "avcodec.h"
#include "put_bits.h"
-#include "dsputil.h"
+#include "internal.h"
#include "lpc.h"
#include "mathops.h"
+#include "alac_data.h"
#define DEFAULT_FRAME_SIZE 4096
-#define DEFAULT_SAMPLE_SIZE 16
-#define MAX_CHANNELS 8
#define ALAC_EXTRADATA_SIZE 36
#define ALAC_FRAME_HEADER_SIZE 55
#define ALAC_FRAME_FOOTER_SIZE 3
} AlacLPCContext;
typedef struct AlacEncodeContext {
+ int frame_size; /**< current frame size */
+ int verbatim; /**< current frame verbatim mode flag */
int compression_level;
int min_prediction_order;
int max_prediction_order;
int max_coded_frame_size;
int write_sample_size;
- int32_t sample_buf[MAX_CHANNELS][DEFAULT_FRAME_SIZE];
+ int extra_bits;
+ int32_t sample_buf[2][DEFAULT_FRAME_SIZE];
int32_t predictor_buf[DEFAULT_FRAME_SIZE];
int interlacing_shift;
int interlacing_leftweight;
PutBitContext pbctx;
RiceContext rc;
- AlacLPCContext lpc[MAX_CHANNELS];
+ AlacLPCContext lpc[2];
LPCContext lpc_ctx;
AVCodecContext *avctx;
} AlacEncodeContext;
-static void init_sample_buffers(AlacEncodeContext *s,
- const int16_t *input_samples)
+static void init_sample_buffers(AlacEncodeContext *s, int channels,
+ uint8_t const *samples[2])
{
int ch, i;
-
- for (ch = 0; ch < s->avctx->channels; ch++) {
- const int16_t *sptr = input_samples + ch;
- for (i = 0; i < s->avctx->frame_size; i++) {
- s->sample_buf[ch][i] = *sptr;
- sptr += s->avctx->channels;
- }
- }
+ int shift = av_get_bytes_per_sample(s->avctx->sample_fmt) * 8 -
+ s->avctx->bits_per_raw_sample;
+
+#define COPY_SAMPLES(type) do { \
+ for (ch = 0; ch < channels; ch++) { \
+ int32_t *bptr = s->sample_buf[ch]; \
+ const type *sptr = (const type *)samples[ch]; \
+ for (i = 0; i < s->frame_size; i++) \
+ bptr[i] = sptr[i] >> shift; \
+ } \
+ } while (0)
+
+ if (s->avctx->sample_fmt == AV_SAMPLE_FMT_S32P)
+ COPY_SAMPLES(int32_t);
+ else
+ COPY_SAMPLES(int16_t);
}
static void encode_scalar(AlacEncodeContext *s, int x,
}
}
-static void write_frame_header(AlacEncodeContext *s, int is_verbatim)
+static void write_element_header(AlacEncodeContext *s,
+ enum AlacRawDataBlockType element,
+ int instance)
{
- put_bits(&s->pbctx, 3, s->avctx->channels-1); // No. of channels -1
- put_bits(&s->pbctx, 16, 0); // Seems to be zero
- put_bits(&s->pbctx, 1, 1); // Sample count is in the header
- put_bits(&s->pbctx, 2, 0); // FIXME: Wasted bytes field
- put_bits(&s->pbctx, 1, is_verbatim); // Audio block is verbatim
- put_bits32(&s->pbctx, s->avctx->frame_size); // No. of samples in the frame
+ int encode_fs = 0;
+
+ if (s->frame_size < DEFAULT_FRAME_SIZE)
+ encode_fs = 1;
+
+ put_bits(&s->pbctx, 3, element); // element type
+ put_bits(&s->pbctx, 4, instance); // element instance
+ put_bits(&s->pbctx, 12, 0); // unused header bits
+ put_bits(&s->pbctx, 1, encode_fs); // Sample count is in the header
+ put_bits(&s->pbctx, 2, s->extra_bits >> 3); // Extra bytes (for 24-bit)
+ put_bits(&s->pbctx, 1, s->verbatim); // Audio block is verbatim
+ if (encode_fs)
+ put_bits32(&s->pbctx, s->frame_size); // No. of samples in the frame
}
static void calc_predictor_params(AlacEncodeContext *s, int ch)
s->lpc[ch].lpc_coeff[5] = -25;
} else {
opt_order = ff_lpc_calc_coefs(&s->lpc_ctx, s->sample_buf[ch],
- s->avctx->frame_size,
+ s->frame_size,
s->min_prediction_order,
s->max_prediction_order,
ALAC_MAX_LPC_PRECISION, coefs, shift,
/* calculate sum of 2nd order residual for each channel */
sum[0] = sum[1] = sum[2] = sum[3] = 0;
for (i = 2; i < n; i++) {
- lt = left_ch[i] - 2*left_ch[i-1] + left_ch[i-2];
- rt = right_ch[i] - 2*right_ch[i-1] + right_ch[i-2];
+ lt = left_ch[i] - 2 * left_ch[i - 1] + left_ch[i - 2];
+ rt = right_ch[i] - 2 * right_ch[i - 1] + right_ch[i - 2];
sum[2] += FFABS((lt + rt) >> 1);
sum[3] += FFABS(lt - rt);
sum[0] += FFABS(lt);
/* return mode with lowest score */
best = 0;
for (i = 1; i < 4; i++) {
- if (score[i] < score[best]) {
+ if (score[i] < score[best])
best = i;
- }
}
return best;
}
static void alac_stereo_decorrelation(AlacEncodeContext *s)
{
int32_t *left = s->sample_buf[0], *right = s->sample_buf[1];
- int i, mode, n = s->avctx->frame_size;
+ int i, mode, n = s->frame_size;
int32_t tmp;
mode = estimate_stereo_mode(left, right, n);
- switch(mode)
- {
- case ALAC_CHMODE_LEFT_RIGHT:
- s->interlacing_leftweight = 0;
- s->interlacing_shift = 0;
- break;
-
- case ALAC_CHMODE_LEFT_SIDE:
- for (i = 0; i < n; i++) {
- right[i] = left[i] - right[i];
- }
- s->interlacing_leftweight = 1;
- s->interlacing_shift = 0;
- break;
-
- case ALAC_CHMODE_RIGHT_SIDE:
- for (i = 0; i < n; i++) {
- tmp = right[i];
- right[i] = left[i] - right[i];
- left[i] = tmp + (right[i] >> 31);
- }
- s->interlacing_leftweight = 1;
- s->interlacing_shift = 31;
- break;
-
- default:
- for (i = 0; i < n; i++) {
- tmp = left[i];
- left[i] = (tmp + right[i]) >> 1;
- right[i] = tmp - right[i];
- }
- s->interlacing_leftweight = 1;
- s->interlacing_shift = 1;
- break;
+ switch (mode) {
+ case ALAC_CHMODE_LEFT_RIGHT:
+ s->interlacing_leftweight = 0;
+ s->interlacing_shift = 0;
+ break;
+ case ALAC_CHMODE_LEFT_SIDE:
+ for (i = 0; i < n; i++)
+ right[i] = left[i] - right[i];
+ s->interlacing_leftweight = 1;
+ s->interlacing_shift = 0;
+ break;
+ case ALAC_CHMODE_RIGHT_SIDE:
+ for (i = 0; i < n; i++) {
+ tmp = right[i];
+ right[i] = left[i] - right[i];
+ left[i] = tmp + (right[i] >> 31);
+ }
+ s->interlacing_leftweight = 1;
+ s->interlacing_shift = 31;
+ break;
+ default:
+ for (i = 0; i < n; i++) {
+ tmp = left[i];
+ left[i] = (tmp + right[i]) >> 1;
+ right[i] = tmp - right[i];
+ }
+ s->interlacing_leftweight = 1;
+ s->interlacing_shift = 1;
+ break;
}
}
if (lpc.lpc_order == 31) {
s->predictor_buf[0] = s->sample_buf[ch][0];
- for (i = 1; i < s->avctx->frame_size; i++)
- s->predictor_buf[i] = s->sample_buf[ch][i] - s->sample_buf[ch][i-1];
+ for (i = 1; i < s->frame_size; i++) {
+ s->predictor_buf[i] = s->sample_buf[ch][i ] -
+ s->sample_buf[ch][i - 1];
+ }
return;
}
residual[i] = samples[i] - samples[i-1];
// perform lpc on remaining samples
- for (i = lpc.lpc_order + 1; i < s->avctx->frame_size; i++) {
+ for (i = lpc.lpc_order + 1; i < s->frame_size; i++) {
int sum = 1 << (lpc.lpc_quant - 1), res_val, j;
for (j = 0; j < lpc.lpc_order; j++) {
sum += (samples[lpc.lpc_order-j] - samples[0]) *
- lpc.lpc_coeff[j];
+ lpc.lpc_coeff[j];
}
sum >>= lpc.lpc_quant;
s->write_sample_size);
res_val = residual[i];
- if(res_val) {
+ if (res_val) {
int index = lpc.lpc_order - 1;
int neg = (res_val < 0);
- while(index >= 0 && (neg ? (res_val < 0):(res_val > 0))) {
- int val = samples[0] - samples[lpc.lpc_order - index];
+ while (index >= 0 && (neg ? (res_val < 0) : (res_val > 0))) {
+ int val = samples[0] - samples[lpc.lpc_order - index];
int sign = (val ? FFSIGN(val) : 0);
- if(neg)
- sign*=-1;
+ if (neg)
+ sign *= -1;
lpc.lpc_coeff[index] -= sign;
val *= sign;
- res_val -= ((val >> lpc.lpc_quant) *
- (lpc.lpc_order - index));
+ res_val -= (val >> lpc.lpc_quant) * (lpc.lpc_order - index);
index--;
}
}
int sign_modifier = 0, i, k;
int32_t *samples = s->predictor_buf;
- for (i = 0; i < s->avctx->frame_size;) {
+ for (i = 0; i < s->frame_size;) {
int x;
k = av_log2((history >> 9) + 3);
- x = -2*(*samples)-1;
- x ^= (x>>31);
+ x = -2 * (*samples) -1;
+ x ^= x >> 31;
samples++;
i++;
encode_scalar(s, x - sign_modifier, k, s->write_sample_size);
- history += x * s->rc.history_mult
- - ((history * s->rc.history_mult) >> 9);
+ history += x * s->rc.history_mult -
+ ((history * s->rc.history_mult) >> 9);
sign_modifier = 0;
if (x > 0xFFFF)
history = 0xFFFF;
- if (history < 128 && i < s->avctx->frame_size) {
+ if (history < 128 && i < s->frame_size) {
unsigned int block_size = 0;
k = 7 - av_log2(history) + ((history + 16) >> 6);
- while (*samples == 0 && i < s->avctx->frame_size) {
+ while (*samples == 0 && i < s->frame_size) {
samples++;
i++;
block_size++;
}
encode_scalar(s, block_size, k, 16);
-
sign_modifier = (block_size <= 0xFFFF);
-
history = 0;
}
}
}
-static void write_compressed_frame(AlacEncodeContext *s)
+static void write_element(AlacEncodeContext *s,
+ enum AlacRawDataBlockType element, int instance,
+ const uint8_t *samples0, const uint8_t *samples1)
{
- int i, j;
+ uint8_t const *samples[2] = { samples0, samples1 };
+ int i, j, channels;
int prediction_type = 0;
+ PutBitContext *pb = &s->pbctx;
- if (s->avctx->channels == 2)
- alac_stereo_decorrelation(s);
- put_bits(&s->pbctx, 8, s->interlacing_shift);
- put_bits(&s->pbctx, 8, s->interlacing_leftweight);
+ channels = element == TYPE_CPE ? 2 : 1;
+
+ if (s->verbatim) {
+ write_element_header(s, element, instance);
+ /* samples are channel-interleaved in verbatim mode */
+ if (s->avctx->sample_fmt == AV_SAMPLE_FMT_S32P) {
+ int shift = 32 - s->avctx->bits_per_raw_sample;
+ int32_t const *samples_s32[2] = { (const int32_t *)samples0,
+ (const int32_t *)samples1 };
+ for (i = 0; i < s->frame_size; i++)
+ for (j = 0; j < channels; j++)
+ put_sbits(pb, s->avctx->bits_per_raw_sample,
+ samples_s32[j][i] >> shift);
+ } else {
+ int16_t const *samples_s16[2] = { (const int16_t *)samples0,
+ (const int16_t *)samples1 };
+ for (i = 0; i < s->frame_size; i++)
+ for (j = 0; j < channels; j++)
+ put_sbits(pb, s->avctx->bits_per_raw_sample,
+ samples_s16[j][i]);
+ }
+ } else {
+ s->write_sample_size = s->avctx->bits_per_raw_sample - s->extra_bits +
+ channels - 1;
+
+ init_sample_buffers(s, channels, samples);
+ write_element_header(s, element, instance);
+
+ if (channels == 2)
+ alac_stereo_decorrelation(s);
+ else
+ s->interlacing_shift = s->interlacing_leftweight = 0;
+ put_bits(pb, 8, s->interlacing_shift);
+ put_bits(pb, 8, s->interlacing_leftweight);
+
+ for (i = 0; i < channels; i++) {
+ calc_predictor_params(s, i);
+
+ put_bits(pb, 4, prediction_type);
+ put_bits(pb, 4, s->lpc[i].lpc_quant);
+
+ put_bits(pb, 3, s->rc.rice_modifier);
+ put_bits(pb, 5, s->lpc[i].lpc_order);
+ // predictor coeff. table
+ for (j = 0; j < s->lpc[i].lpc_order; j++)
+ put_sbits(pb, 16, s->lpc[i].lpc_coeff[j]);
+ }
- for (i = 0; i < s->avctx->channels; i++) {
+ // write extra bits if needed
+ if (s->extra_bits) {
+ uint32_t mask = (1 << s->extra_bits) - 1;
+ for (i = 0; i < s->frame_size; i++) {
+ for (j = 0; j < channels; j++) {
+ put_bits(pb, s->extra_bits, s->sample_buf[j][i] & mask);
+ s->sample_buf[j][i] >>= s->extra_bits;
+ }
+ }
+ }
- calc_predictor_params(s, i);
+ // apply lpc and entropy coding to audio samples
+ for (i = 0; i < channels; i++) {
+ alac_linear_predictor(s, i);
- put_bits(&s->pbctx, 4, prediction_type);
- put_bits(&s->pbctx, 4, s->lpc[i].lpc_quant);
+ // TODO: determine when this will actually help. for now it's not used.
+ if (prediction_type == 15) {
+ // 2nd pass 1st order filter
+ for (j = s->frame_size - 1; j > 0; j--)
+ s->predictor_buf[j] -= s->predictor_buf[j - 1];
+ }
+ alac_entropy_coder(s);
+ }
+ }
+}
- put_bits(&s->pbctx, 3, s->rc.rice_modifier);
- put_bits(&s->pbctx, 5, s->lpc[i].lpc_order);
- // predictor coeff. table
- for (j = 0; j < s->lpc[i].lpc_order; j++) {
- put_sbits(&s->pbctx, 16, s->lpc[i].lpc_coeff[j]);
+static int write_frame(AlacEncodeContext *s, AVPacket *avpkt,
+ uint8_t * const *samples)
+{
+ PutBitContext *pb = &s->pbctx;
+ const enum AlacRawDataBlockType *ch_elements = ff_alac_channel_elements[s->avctx->channels - 1];
+ const uint8_t *ch_map = ff_alac_channel_layout_offsets[s->avctx->channels - 1];
+ int ch, element, sce, cpe;
+
+ init_put_bits(pb, avpkt->data, avpkt->size);
+
+ ch = element = sce = cpe = 0;
+ while (ch < s->avctx->channels) {
+ if (ch_elements[element] == TYPE_CPE) {
+ write_element(s, TYPE_CPE, cpe, samples[ch_map[ch]],
+ samples[ch_map[ch + 1]]);
+ cpe++;
+ ch += 2;
+ } else {
+ write_element(s, TYPE_SCE, sce, samples[ch_map[ch]], NULL);
+ sce++;
+ ch++;
}
+ element++;
}
- // apply lpc and entropy coding to audio samples
+ put_bits(pb, 3, TYPE_END);
+ flush_put_bits(pb);
- for (i = 0; i < s->avctx->channels; i++) {
- alac_linear_predictor(s, i);
+ return put_bits_count(pb) >> 3;
+}
- // TODO: determine when this will actually help. for now it's not used.
- if (prediction_type == 15) {
- // 2nd pass 1st order filter
- for (j = s->avctx->frame_size - 1; j > 0; j--)
- s->predictor_buf[j] -= s->predictor_buf[j - 1];
- }
+static av_always_inline int get_max_frame_size(int frame_size, int ch, int bps)
+{
+ int header_bits = 23 + 32 * (frame_size < DEFAULT_FRAME_SIZE);
+ return FFALIGN(header_bits + bps * ch * frame_size + 3, 8) / 8;
+}
- alac_entropy_coder(s);
- }
+static av_cold int alac_encode_close(AVCodecContext *avctx)
+{
+ AlacEncodeContext *s = avctx->priv_data;
+ ff_lpc_end(&s->lpc_ctx);
+ av_freep(&avctx->extradata);
+ avctx->extradata_size = 0;
+ av_freep(&avctx->coded_frame);
+ return 0;
}
static av_cold int alac_encode_init(AVCodecContext *avctx)
{
- AlacEncodeContext *s = avctx->priv_data;
+ AlacEncodeContext *s = avctx->priv_data;
int ret;
- uint8_t *alac_extradata = av_mallocz(ALAC_EXTRADATA_SIZE+1);
+ uint8_t *alac_extradata;
- avctx->frame_size = DEFAULT_FRAME_SIZE;
-
- if (avctx->sample_fmt != AV_SAMPLE_FMT_S16) {
- av_log(avctx, AV_LOG_ERROR, "only pcm_s16 input samples are supported\n");
- return -1;
- }
+ avctx->frame_size = s->frame_size = DEFAULT_FRAME_SIZE;
- /* TODO: Correctly implement multi-channel ALAC.
- It is similar to multi-channel AAC, in that it has a series of
- single-channel (SCE), channel-pair (CPE), and LFE elements. */
- if (avctx->channels > 2) {
- av_log(avctx, AV_LOG_ERROR, "only mono or stereo input is currently supported\n");
- return AVERROR_PATCHWELCOME;
+ if (avctx->sample_fmt == AV_SAMPLE_FMT_S32P) {
+ if (avctx->bits_per_raw_sample != 24)
+ av_log(avctx, AV_LOG_WARNING, "encoding as 24 bits-per-sample\n");
+ avctx->bits_per_raw_sample = 24;
+ } else {
+ avctx->bits_per_raw_sample = 16;
+ s->extra_bits = 0;
}
// Set default compression level
s->rc.k_modifier = 14;
s->rc.rice_modifier = 4;
- s->max_coded_frame_size = 8 + (avctx->frame_size * avctx->channels * DEFAULT_SAMPLE_SIZE >> 3);
+ s->max_coded_frame_size = get_max_frame_size(avctx->frame_size,
+ avctx->channels,
+ avctx->bits_per_raw_sample);
- s->write_sample_size = DEFAULT_SAMPLE_SIZE + avctx->channels - 1; // FIXME: consider wasted_bytes
+ avctx->extradata = av_mallocz(ALAC_EXTRADATA_SIZE + FF_INPUT_BUFFER_PADDING_SIZE);
+ if (!avctx->extradata) {
+ ret = AVERROR(ENOMEM);
+ goto error;
+ }
+ avctx->extradata_size = ALAC_EXTRADATA_SIZE;
+ alac_extradata = avctx->extradata;
AV_WB32(alac_extradata, ALAC_EXTRADATA_SIZE);
AV_WB32(alac_extradata+4, MKBETAG('a','l','a','c'));
AV_WB32(alac_extradata+12, avctx->frame_size);
- AV_WB8 (alac_extradata+17, DEFAULT_SAMPLE_SIZE);
+ AV_WB8 (alac_extradata+17, avctx->bits_per_raw_sample);
AV_WB8 (alac_extradata+21, avctx->channels);
AV_WB32(alac_extradata+24, s->max_coded_frame_size);
AV_WB32(alac_extradata+28,
- avctx->sample_rate * avctx->channels * DEFAULT_SAMPLE_SIZE); // average bitrate
+ avctx->sample_rate * avctx->channels * avctx->bits_per_raw_sample); // average bitrate
AV_WB32(alac_extradata+32, avctx->sample_rate);
// Set relevant extradata fields
avctx->min_prediction_order > ALAC_MAX_LPC_ORDER) {
av_log(avctx, AV_LOG_ERROR, "invalid min prediction order: %d\n",
avctx->min_prediction_order);
- return -1;
+ ret = AVERROR(EINVAL);
+ goto error;
}
s->min_prediction_order = avctx->min_prediction_order;
avctx->max_prediction_order > ALAC_MAX_LPC_ORDER) {
av_log(avctx, AV_LOG_ERROR, "invalid max prediction order: %d\n",
avctx->max_prediction_order);
- return -1;
+ ret = AVERROR(EINVAL);
+ goto error;
}
s->max_prediction_order = avctx->max_prediction_order;
av_log(avctx, AV_LOG_ERROR,
"invalid prediction orders: min=%d max=%d\n",
s->min_prediction_order, s->max_prediction_order);
- return -1;
+ ret = AVERROR(EINVAL);
+ goto error;
}
- avctx->extradata = alac_extradata;
- avctx->extradata_size = ALAC_EXTRADATA_SIZE;
-
avctx->coded_frame = avcodec_alloc_frame();
- avctx->coded_frame->key_frame = 1;
+ if (!avctx->coded_frame) {
+ ret = AVERROR(ENOMEM);
+ goto error;
+ }
s->avctx = avctx;
- ret = ff_lpc_init(&s->lpc_ctx, avctx->frame_size, s->max_prediction_order,
- FF_LPC_TYPE_LEVINSON);
+ if ((ret = ff_lpc_init(&s->lpc_ctx, avctx->frame_size,
+ s->max_prediction_order,
+ FF_LPC_TYPE_LEVINSON)) < 0) {
+ goto error;
+ }
+
+ return 0;
+error:
+ alac_encode_close(avctx);
return ret;
}
-static int alac_encode_frame(AVCodecContext *avctx, uint8_t *frame,
- int buf_size, void *data)
+static int alac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
+ const AVFrame *frame, int *got_packet_ptr)
{
AlacEncodeContext *s = avctx->priv_data;
- PutBitContext *pb = &s->pbctx;
- int i, out_bytes, verbatim_flag = 0;
+ int out_bytes, max_frame_size, ret;
- if (buf_size < 2 * s->max_coded_frame_size) {
- av_log(avctx, AV_LOG_ERROR, "buffer size is too small\n");
- return -1;
- }
+ s->frame_size = frame->nb_samples;
-verbatim:
- init_put_bits(pb, frame, buf_size);
+ if (frame->nb_samples < DEFAULT_FRAME_SIZE)
+ max_frame_size = get_max_frame_size(s->frame_size, avctx->channels,
+ avctx->bits_per_raw_sample);
+ else
+ max_frame_size = s->max_coded_frame_size;
- if (s->compression_level == 0 || verbatim_flag) {
- // Verbatim mode
- const int16_t *samples = data;
- write_frame_header(s, 1);
- for (i = 0; i < avctx->frame_size * avctx->channels; i++) {
- put_sbits(pb, 16, *samples++);
- }
+ if ((ret = ff_alloc_packet(avpkt, 2 * max_frame_size))) {
+ av_log(avctx, AV_LOG_ERROR, "Error getting output packet\n");
+ return ret;
+ }
+
+ /* use verbatim mode for compression_level 0 */
+ if (s->compression_level) {
+ s->verbatim = 0;
+ s->extra_bits = avctx->bits_per_raw_sample - 16;
} else {
- init_sample_buffers(s, data);
- write_frame_header(s, 0);
- write_compressed_frame(s);
+ s->verbatim = 1;
+ s->extra_bits = 0;
}
- put_bits(pb, 3, 7);
- flush_put_bits(pb);
- out_bytes = put_bits_count(pb) >> 3;
+ out_bytes = write_frame(s, avpkt, frame->extended_data);
- if (out_bytes > s->max_coded_frame_size) {
+ if (out_bytes > max_frame_size) {
/* frame too large. use verbatim mode */
- if (verbatim_flag || s->compression_level == 0) {
- /* still too large. must be an error. */
- av_log(avctx, AV_LOG_ERROR, "error encoding frame\n");
- return -1;
- }
- verbatim_flag = 1;
- goto verbatim;
+ s->verbatim = 1;
+ s->extra_bits = 0;
+ out_bytes = write_frame(s, avpkt, frame->extended_data);
}
- return out_bytes;
-}
-
-static av_cold int alac_encode_close(AVCodecContext *avctx)
-{
- AlacEncodeContext *s = avctx->priv_data;
- ff_lpc_end(&s->lpc_ctx);
- av_freep(&avctx->extradata);
- avctx->extradata_size = 0;
- av_freep(&avctx->coded_frame);
+ avpkt->size = out_bytes;
+ *got_packet_ptr = 1;
return 0;
}
AVCodec ff_alac_encoder = {
.name = "alac",
.type = AVMEDIA_TYPE_AUDIO,
- .id = CODEC_ID_ALAC,
+ .id = AV_CODEC_ID_ALAC,
.priv_data_size = sizeof(AlacEncodeContext),
.init = alac_encode_init,
- .encode = alac_encode_frame,
+ .encode2 = alac_encode_frame,
.close = alac_encode_close,
- .capabilities = CODEC_CAP_SMALL_LAST_FRAME,
- .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
- AV_SAMPLE_FMT_NONE },
- .long_name = NULL_IF_CONFIG_SMALL("ALAC (Apple Lossless Audio Codec)"),
+ .capabilities = CODEC_CAP_SMALL_LAST_FRAME,
+ .channel_layouts = ff_alac_channel_layouts,
+ .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S32P,
+ AV_SAMPLE_FMT_S16P,
+ AV_SAMPLE_FMT_NONE },
+ .long_name = NULL_IF_CONFIG_SMALL("ALAC (Apple Lossless Audio Codec)"),
};