#include "avcodec.h"
#include "put_bits.h"
#include "dsputil.h"
+#include "internal.h"
#include "lpc.h"
#include "mathops.h"
typedef struct AlacEncodeContext {
int frame_size; /**< current frame size */
+ int verbatim; /**< current frame verbatim mode flag */
int compression_level;
int min_prediction_order;
int max_prediction_order;
}
}
-static void write_frame_header(AlacEncodeContext *s, int is_verbatim)
+static void write_frame_header(AlacEncodeContext *s)
{
int encode_fs = 0;
put_bits(&s->pbctx, 16, 0); // Seems to be zero
put_bits(&s->pbctx, 1, encode_fs); // Sample count is in the header
put_bits(&s->pbctx, 2, 0); // FIXME: Wasted bytes field
- put_bits(&s->pbctx, 1, is_verbatim); // Audio block is verbatim
+ put_bits(&s->pbctx, 1, s->verbatim); // Audio block is verbatim
if (encode_fs)
put_bits32(&s->pbctx, s->frame_size); // No. of samples in the frame
}
}
}
-static void write_compressed_frame(AlacEncodeContext *s)
+static int write_frame(AlacEncodeContext *s, AVPacket *avpkt,
+ const int16_t *samples)
{
int i, j;
int prediction_type = 0;
+ PutBitContext *pb = &s->pbctx;
- if (s->avctx->channels == 2)
- alac_stereo_decorrelation(s);
- put_bits(&s->pbctx, 8, s->interlacing_shift);
- put_bits(&s->pbctx, 8, s->interlacing_leftweight);
-
- for (i = 0; i < s->avctx->channels; i++) {
- calc_predictor_params(s, i);
+ init_put_bits(pb, avpkt->data, avpkt->size);
- put_bits(&s->pbctx, 4, prediction_type);
- put_bits(&s->pbctx, 4, s->lpc[i].lpc_quant);
+ if (s->verbatim) {
+ write_frame_header(s);
+ for (i = 0; i < s->frame_size * s->avctx->channels; i++)
+ put_sbits(pb, 16, *samples++);
+ } else {
+ init_sample_buffers(s, samples);
+ write_frame_header(s);
- put_bits(&s->pbctx, 3, s->rc.rice_modifier);
- put_bits(&s->pbctx, 5, s->lpc[i].lpc_order);
- // predictor coeff. table
- for (j = 0; j < s->lpc[i].lpc_order; j++)
- put_sbits(&s->pbctx, 16, s->lpc[i].lpc_coeff[j]);
- }
+ if (s->avctx->channels == 2)
+ alac_stereo_decorrelation(s);
+ put_bits(pb, 8, s->interlacing_shift);
+ put_bits(pb, 8, s->interlacing_leftweight);
- // apply lpc and entropy coding to audio samples
+ for (i = 0; i < s->avctx->channels; i++) {
+ calc_predictor_params(s, i);
- for (i = 0; i < s->avctx->channels; i++) {
- alac_linear_predictor(s, i);
+ put_bits(pb, 4, prediction_type);
+ put_bits(pb, 4, s->lpc[i].lpc_quant);
- // TODO: determine when this will actually help. for now it's not used.
- if (prediction_type == 15) {
- // 2nd pass 1st order filter
- for (j = s->frame_size - 1; j > 0; j--)
- s->predictor_buf[j] -= s->predictor_buf[j - 1];
+ put_bits(pb, 3, s->rc.rice_modifier);
+ put_bits(pb, 5, s->lpc[i].lpc_order);
+ // predictor coeff. table
+ for (j = 0; j < s->lpc[i].lpc_order; j++)
+ put_sbits(pb, 16, s->lpc[i].lpc_coeff[j]);
}
- alac_entropy_coder(s);
+ // apply lpc and entropy coding to audio samples
+
+ for (i = 0; i < s->avctx->channels; i++) {
+ alac_linear_predictor(s, i);
+
+ // TODO: determine when this will actually help. for now it's not used.
+ if (prediction_type == 15) {
+ // 2nd pass 1st order filter
+ for (j = s->frame_size - 1; j > 0; j--)
+ s->predictor_buf[j] -= s->predictor_buf[j - 1];
+ }
+
+ alac_entropy_coder(s);
+ }
}
+ put_bits(pb, 3, 7);
+ flush_put_bits(pb);
+ return put_bits_count(pb) >> 3;
}
static av_always_inline int get_max_frame_size(int frame_size, int ch, int bps)
return ret;
}
-static int alac_encode_frame(AVCodecContext *avctx, uint8_t *frame,
- int buf_size, void *data)
+static int alac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
+ const AVFrame *frame, int *got_packet_ptr)
{
AlacEncodeContext *s = avctx->priv_data;
- PutBitContext *pb = &s->pbctx;
- int i, out_bytes, verbatim_flag = 0;
- int max_frame_size;
+ int out_bytes, max_frame_size, ret;
+ const int16_t *samples = (const int16_t *)frame->data[0];
- s->frame_size = avctx->frame_size;
+ s->frame_size = frame->nb_samples;
if (avctx->frame_size < DEFAULT_FRAME_SIZE)
max_frame_size = get_max_frame_size(s->frame_size, avctx->channels,
else
max_frame_size = s->max_coded_frame_size;
- if (buf_size < 2 * max_frame_size) {
- av_log(avctx, AV_LOG_ERROR, "buffer size is too small\n");
- return AVERROR(EINVAL);
+ if ((ret = ff_alloc_packet(avpkt, 2 * max_frame_size))) {
+ av_log(avctx, AV_LOG_ERROR, "Error getting output packet\n");
+ return ret;
}
-verbatim:
- init_put_bits(pb, frame, buf_size);
+ /* use verbatim mode for compression_level 0 */
+ s->verbatim = !s->compression_level;
- if (s->compression_level == 0 || verbatim_flag) {
- // Verbatim mode
- const int16_t *samples = data;
- write_frame_header(s, 1);
- for (i = 0; i < s->frame_size * avctx->channels; i++) {
- put_sbits(pb, 16, *samples++);
- }
- } else {
- init_sample_buffers(s, data);
- write_frame_header(s, 0);
- write_compressed_frame(s);
- }
-
- put_bits(pb, 3, 7);
- flush_put_bits(pb);
- out_bytes = put_bits_count(pb) >> 3;
+ out_bytes = write_frame(s, avpkt, samples);
if (out_bytes > max_frame_size) {
/* frame too large. use verbatim mode */
- if (verbatim_flag || s->compression_level == 0) {
- /* still too large. must be an error. */
- av_log(avctx, AV_LOG_ERROR, "error encoding frame\n");
- return AVERROR_BUG;
- }
- verbatim_flag = 1;
- goto verbatim;
+ s->verbatim = 1;
+ out_bytes = write_frame(s, avpkt, samples);
}
- return out_bytes;
+ avpkt->size = out_bytes;
+ *got_packet_ptr = 1;
+ return 0;
}
AVCodec ff_alac_encoder = {
.id = CODEC_ID_ALAC,
.priv_data_size = sizeof(AlacEncodeContext),
.init = alac_encode_init,
- .encode = alac_encode_frame,
+ .encode2 = alac_encode_frame,
.close = alac_encode_close,
.capabilities = CODEC_CAP_SMALL_LAST_FRAME,
.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,