* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include <inttypes.h>
+
#include "libavutil/attributes.h"
#include "libavutil/avassert.h"
/**
* @brief frame-specific decoder context for a single channel
*/
-typedef struct {
+typedef struct WmallChannelCtx {
int16_t prev_block_len; ///< length of the previous block
uint8_t transmit_coefs;
uint8_t num_subframes;
typedef struct WmallDecodeCtx {
/* generic decoder variables */
AVCodecContext *avctx;
- AVFrame frame;
- uint8_t frame_data[MAX_FRAMESIZE + FF_INPUT_BUFFER_PADDING_SIZE]; ///< compressed frame data
+ AVFrame *frame;
+ uint8_t frame_data[MAX_FRAMESIZE + AV_INPUT_BUFFER_PADDING_SIZE]; ///< compressed frame data
PutBitContext pb; ///< context for filling the frame_data buffer
/* frame size dependent frame information (set during initialization) */
uint32_t frame_num; ///< current frame number (not used for decoding)
GetBitContext gb; ///< bitstream reader context
int buf_bit_size; ///< buffer size in bits
- int16_t *samples_16[WMALL_MAX_CHANNELS]; ///< current samplebuffer pointer (16-bit)
- int32_t *samples_32[WMALL_MAX_CHANNELS]; ///< current samplebuffer pointer (24-bit)
+ int16_t *samples_16[WMALL_MAX_CHANNELS]; ///< current sample buffer pointer (16-bit)
+ int32_t *samples_32[WMALL_MAX_CHANNELS]; ///< current sample buffer pointer (24-bit)
uint8_t drc_gain; ///< gain for the DRC tool
int8_t skip_frame; ///< skip output step
int8_t parsed_all_subframes; ///< all subframes decoded?
int8_t mclms_order;
int8_t mclms_scaling;
- int16_t mclms_coeffs[128];
- int16_t mclms_coeffs_cur[4];
+ int16_t mclms_coeffs[WMALL_MAX_CHANNELS * WMALL_MAX_CHANNELS * 32];
+ int16_t mclms_coeffs_cur[WMALL_MAX_CHANNELS * WMALL_MAX_CHANNELS];
int16_t mclms_prevvalues[WMALL_MAX_CHANNELS * 2 * 32];
int16_t mclms_updates[WMALL_MAX_CHANNELS * 2 * 32];
int mclms_recent;
channel_mask = AV_RL32(edata_ptr + 2);
s->bits_per_sample = AV_RL16(edata_ptr);
if (s->bits_per_sample == 16)
- avctx->sample_fmt = AV_SAMPLE_FMT_S16;
+ avctx->sample_fmt = AV_SAMPLE_FMT_S16P;
else if (s->bits_per_sample == 24) {
- avctx->sample_fmt = AV_SAMPLE_FMT_S32;
- av_log_missing_feature(avctx, "bit-depth higher than 16", 0);
+ avctx->sample_fmt = AV_SAMPLE_FMT_S32P;
+ avpriv_report_missing_feature(avctx, "Bit-depth higher than 16");
return AVERROR_PATCHWELCOME;
} else {
- av_log(avctx, AV_LOG_ERROR, "Unknown bit-depth: %d\n",
+ av_log(avctx, AV_LOG_ERROR, "Unknown bit-depth: %"PRIu8"\n",
s->bits_per_sample);
return AVERROR_INVALIDDATA;
}
/* dump the extradata */
for (i = 0; i < avctx->extradata_size; i++)
- av_dlog(avctx, "[%x] ", avctx->extradata[i]);
- av_dlog(avctx, "\n");
+ ff_dlog(avctx, "[%x] ", avctx->extradata[i]);
+ ff_dlog(avctx, "\n");
} else {
- av_log_ask_for_sample(avctx, "Unsupported extradata size\n");
- return AVERROR_INVALIDDATA;
+ avpriv_request_sample(avctx, "Unsupported extradata size");
+ return AVERROR_PATCHWELCOME;
}
/* generic init */
s->bV3RTM = s->decode_flags & 0x100;
if (s->max_num_subframes > MAX_SUBFRAMES) {
- av_log(avctx, AV_LOG_ERROR, "invalid number of subframes %i\n",
+ av_log(avctx, AV_LOG_ERROR, "invalid number of subframes %"PRIu8"\n",
s->max_num_subframes);
return AVERROR_INVALIDDATA;
}
}
if (s->num_channels < 0) {
- av_log(avctx, AV_LOG_ERROR, "invalid number of channels %d\n",
+ av_log(avctx, AV_LOG_ERROR, "invalid number of channels %"PRId8"\n",
s->num_channels);
return AVERROR_INVALIDDATA;
} else if (s->num_channels > WMALL_MAX_CHANNELS) {
- av_log_ask_for_sample(avctx, "unsupported number of channels\n");
+ avpriv_request_sample(avctx,
+ "More than %d channels", WMALL_MAX_CHANNELS);
return AVERROR_PATCHWELCOME;
}
- avcodec_get_frame_defaults(&s->frame);
- avctx->coded_frame = &s->frame;
+ s->frame = av_frame_alloc();
+ if (!s->frame)
+ return AVERROR(ENOMEM);
+
avctx->channel_layout = channel_mask;
return 0;
}
++chan->num_subframes;
if (num_samples[c] > s->samples_per_frame) {
av_log(s->avctx, AV_LOG_ERROR, "broken frame: "
- "channel len(%d) > samples_per_frame(%d)\n",
+ "channel len(%"PRIu16") > samples_per_frame(%"PRIu16")\n",
num_samples[c], s->samples_per_frame);
return AVERROR_INVALIDDATA;
}
s->acfilter_scaling = get_bits(&s->gb, 4);
for (i = 0; i < s->acfilter_order; i++)
- s->acfilter_coeffs[i] = (s->acfilter_scaling ?
- get_bits(&s->gb, s->acfilter_scaling) : 0) + 1;
+ s->acfilter_coeffs[i] = get_bitsz(&s->gb, s->acfilter_scaling) + 1;
}
static void decode_mclms(WmallDecodeCtx *s)
if (1 << cbits < s->mclms_scaling + 1)
cbits++;
- send_coef_bits = (cbits ? get_bits(&s->gb, cbits) : 0) + 2;
+ send_coef_bits = get_bitsz(&s->gb, cbits) + 2;
for (i = 0; i < s->mclms_order * s->num_channels * s->num_channels; i++)
s->mclms_coeffs[i] = get_bits(&s->gb, send_coef_bits);
if ((1 << cbits) < s->cdlms[c][i].scaling + 1)
cbits++;
- s->cdlms[c][i].bitsend = get_bits(&s->gb, cbits) + 2;
+ s->cdlms[c][i].bitsend = get_bitsz(&s->gb, cbits) + 2;
shift_l = 32 - s->cdlms[c][i].bitsend;
shift_r = 32 - s->cdlms[c][i].scaling - 2;
for (j = 0; j < s->cdlms[c][i].coefsend; j++)
s->do_arith_coding = get_bits1(&s->gb);
if (s->do_arith_coding) {
- av_log_missing_feature(s->avctx, "arithmetic coding", 1);
+ avpriv_request_sample(s->avctx, "Arithmetic coding");
return AVERROR_PATCHWELCOME;
}
s->do_ac_filter = get_bits1(&s->gb);
} else if (!s->cdlms[0][0].order) {
av_log(s->avctx, AV_LOG_DEBUG,
"Waiting for seekable tile\n");
- s->frame.nb_samples = 0;
+ av_frame_unref(s->frame);
return -1;
}
s->do_lpc = get_bits1(&s->gb);
if (s->do_lpc) {
decode_lpc(s);
- av_log_ask_for_sample(s->avctx, "Inverse LPC filter not "
- "implemented. Expect wrong output.\n");
+ avpriv_request_sample(s->avctx, "Expect wrong output since "
+ "inverse LPC filter");
}
} else
s->do_lpc = 0;
"Invalid number of padding bits in raw PCM tile\n");
return AVERROR_INVALIDDATA;
}
- av_dlog(s->avctx, "RAWPCM %d bits per sample. "
+ ff_dlog(s->avctx, "RAWPCM %d bits per sample. "
"total %d bits, remain=%d\n", bits,
bits * s->num_channels * subframe_len, get_bits_count(&s->gb));
for (i = 0; i < s->num_channels; i++)
for (j = 0; j < subframe_len; j++) {
if (s->bits_per_sample == 16) {
- *s->samples_16[c] = (int16_t) s->channel_residues[c][j] << padding_zeroes;
- s->samples_16[c] += s->num_channels;
+ *s->samples_16[c]++ = (int16_t) s->channel_residues[c][j] << padding_zeroes;
} else {
- *s->samples_32[c] = s->channel_residues[c][j] << padding_zeroes;
- s->samples_32[c] += s->num_channels;
+ *s->samples_32[c]++ = s->channel_residues[c][j] << padding_zeroes;
}
}
}
GetBitContext* gb = &s->gb;
int more_frames = 0, len = 0, i, ret;
- s->frame.nb_samples = s->samples_per_frame;
- if ((ret = s->avctx->get_buffer(s->avctx, &s->frame)) < 0) {
+ s->frame->nb_samples = s->samples_per_frame;
+ if ((ret = ff_get_buffer(s->avctx, s->frame, 0)) < 0) {
/* return an error if no frame could be decoded at all */
av_log(s->avctx, AV_LOG_ERROR,
"not enough space for the output samples\n");
return ret;
}
for (i = 0; i < s->num_channels; i++) {
- s->samples_16[i] = (int16_t *)s->frame.data[0] + i;
- s->samples_32[i] = (int32_t *)s->frame.data[0] + i;
+ s->samples_16[i] = (int16_t *)s->frame->extended_data[i];
+ s->samples_32[i] = (int32_t *)s->frame->extended_data[i];
}
/* get frame length */
/* usually true for the first frame */
if (get_bits1(gb)) {
skip = get_bits(gb, av_log2(s->samples_per_frame * 2));
- av_dlog(s->avctx, "start skip: %i\n", skip);
+ ff_dlog(s->avctx, "start skip: %i\n", skip);
}
/* sometimes true for the last frame */
if (get_bits1(gb)) {
skip = get_bits(gb, av_log2(s->samples_per_frame * 2));
- av_dlog(s->avctx, "end skip: %i\n", skip);
+ ff_dlog(s->avctx, "end skip: %i\n", skip);
}
}
}
}
- av_dlog(s->avctx, "Frame done\n");
+ ff_dlog(s->avctx, "Frame done\n");
if (s->skip_frame)
s->skip_frame = 0;
if (len != (get_bits_count(gb) - s->frame_offset) + 2) {
/* FIXME: not sure if this is always an error */
av_log(s->avctx, AV_LOG_ERROR,
- "frame[%i] would have to skip %i bits\n", s->frame_num,
+ "frame[%"PRIu32"] would have to skip %i bits\n",
+ s->frame_num,
len - (get_bits_count(gb) - s->frame_offset) - 1);
s->packet_loss = 1;
return 0;
buflen = (s->num_saved_bits + len + 8) >> 3;
if (len <= 0 || buflen > MAX_FRAMESIZE) {
- av_log_ask_for_sample(s->avctx, "input buffer too small\n");
+ avpriv_request_sample(s->avctx, "Too small input buffer");
s->packet_loss = 1;
return;
}
int buf_size = avpkt->size;
int num_bits_prev_frame, packet_sequence_number, spliced_packet;
- s->frame.nb_samples = 0;
+ s->frame->nb_samples = 0;
if (s->packet_done || s->packet_loss) {
s->packet_done = 0;
/* parse packet header */
init_get_bits(gb, buf, s->buf_bit_size);
packet_sequence_number = get_bits(gb, 4);
- skip_bits(gb, 1); // Skip seekable_frame_in_packet, currently ununused
+ skip_bits(gb, 1); // Skip seekable_frame_in_packet, currently unused
spliced_packet = get_bits1(gb);
if (spliced_packet)
- av_log_missing_feature(avctx, "Bitstream splicing", 1);
+ avpriv_request_sample(avctx, "Bitstream splicing");
/* get number of bits that need to be added to the previous frame */
num_bits_prev_frame = get_bits(gb, s->log2_frame_size);
if (!s->packet_loss &&
((s->packet_sequence_number + 1) & 0xF) != packet_sequence_number) {
s->packet_loss = 1;
- av_log(avctx, AV_LOG_ERROR, "Packet loss detected! seq %x vs %x\n",
+ av_log(avctx, AV_LOG_ERROR,
+ "Packet loss detected! seq %"PRIx8" vs %x\n",
s->packet_sequence_number, packet_sequence_number);
}
s->packet_sequence_number = packet_sequence_number;
if (num_bits_prev_frame < remaining_packet_bits && !s->packet_loss)
decode_frame(s);
} else if (s->num_saved_bits - s->frame_offset) {
- av_dlog(avctx, "ignoring %x previously saved bits\n",
+ ff_dlog(avctx, "ignoring %x previously saved bits\n",
s->num_saved_bits - s->frame_offset);
}
* to decode incomplete frames in the s->len_prefix == 0 case. */
s->num_saved_bits = 0;
s->packet_loss = 0;
+ init_put_bits(&s->pb, s->frame_data, MAX_FRAMESIZE);
}
} else {
save_bits(s, gb, remaining_bits(s, gb), 0);
}
- *(AVFrame *)data = s->frame;
- *got_frame_ptr = s->frame.nb_samples > 0;
+ *got_frame_ptr = s->frame->nb_samples > 0;
+ av_frame_move_ref(data, s->frame);
+
s->packet_offset = get_bits_count(gb) & 7;
return (s->packet_loss) ? AVERROR_INVALIDDATA : get_bits_count(gb) >> 3;
s->frame_offset = 0;
s->next_packet_start = 0;
s->cdlms[0][0].order = 0;
- s->frame.nb_samples = 0;
+ s->frame->nb_samples = 0;
+ init_put_bits(&s->pb, s->frame_data, MAX_FRAMESIZE);
+}
+
+static av_cold int decode_close(AVCodecContext *avctx)
+{
+ WmallDecodeCtx *s = avctx->priv_data;
+
+ av_frame_free(&s->frame);
+
+ return 0;
}
AVCodec ff_wmalossless_decoder = {
.name = "wmalossless",
+ .long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio Lossless"),
.type = AVMEDIA_TYPE_AUDIO,
.id = AV_CODEC_ID_WMALOSSLESS,
.priv_data_size = sizeof(WmallDecodeCtx),
.init = decode_init,
+ .close = decode_close,
.decode = decode_packet,
.flush = flush,
- .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DR1 | CODEC_CAP_DELAY,
- .long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio Lossless"),
+ .capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
+ .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16P,
+ AV_SAMPLE_FMT_S32P,
+ AV_SAMPLE_FMT_NONE },
};