2 * Wmapro compatible decoder
3 * Copyright (c) 2007 Baptiste Coudurier, Benjamin Larsson, Ulion
4 * Copyright (c) 2008 - 2011 Sascha Sommer, Benjamin Larsson
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 * @brief wmapro decoder implementation
26 * Wmapro is an MDCT based codec comparable to wma standard or AAC.
27 * The decoding therefore consists of the following steps:
28 * - bitstream decoding
29 * - reconstruction of per-channel data
30 * - rescaling and inverse quantization
32 * - windowing and overlapp-add
34 * The compressed wmapro bitstream is split into individual packets.
35 * Every such packet contains one or more wma frames.
36 * The compressed frames may have a variable length and frames may
37 * cross packet boundaries.
38 * Common to all wmapro frames is the number of samples that are stored in
40 * The number of samples and a few other decode flags are stored
41 * as extradata that has to be passed to the decoder.
43 * The wmapro frames themselves are again split into a variable number of
44 * subframes. Every subframe contains the data for 2^N time domain samples
45 * where N varies between 7 and 12.
47 * Example wmapro bitstream (in samples):
49 * || packet 0 || packet 1 || packet 2 packets
50 * ---------------------------------------------------
51 * || frame 0 || frame 1 || frame 2 || frames
52 * ---------------------------------------------------
53 * || | | || | | | || || subframes of channel 0
54 * ---------------------------------------------------
55 * || | | || | | | || || subframes of channel 1
56 * ---------------------------------------------------
58 * The frame layouts for the individual channels of a wma frame does not need
61 * However, if the offsets and lengths of several subframes of a frame are the
62 * same, the subframes of the channels can be grouped.
63 * Every group may then use special coding techniques like M/S stereo coding
64 * to improve the compression ratio. These channel transformations do not
65 * need to be applied to a whole subframe. Instead, they can also work on
66 * individual scale factor bands (see below).
67 * The coefficients that carry the audio signal in the frequency domain
68 * are transmitted as huffman-coded vectors with 4, 2 and 1 elements.
69 * In addition to that, the encoder can switch to a runlevel coding scheme
70 * by transmitting subframe_length / 128 zero coefficients.
72 * Before the audio signal can be converted to the time domain, the
73 * coefficients have to be rescaled and inverse quantized.
74 * A subframe is therefore split into several scale factor bands that get
75 * scaled individually.
76 * Scale factors are submitted for every frame but they might be shared
77 * between the subframes of a channel. Scale factors are initially DPCM-coded.
78 * Once scale factors are shared, the differences are transmitted as runlevel
80 * Every subframe length and offset combination in the frame layout shares a
81 * common quantization factor that can be adjusted for every channel by a
83 * After the inverse quantization, the coefficients get processed by an IMDCT.
84 * The resulting values are then windowed with a sine window and the first half
85 * of the values are added to the second half of the output from the previous
86 * subframe in order to reconstruct the output samples.
89 #include "libavutil/intfloat.h"
90 #include "libavutil/intreadwrite.h"
95 #include "wmaprodata.h"
99 #include "wma_common.h"
101 /** current decoder limitations */
102 #define WMAPRO_MAX_CHANNELS 8 ///< max number of handled channels
103 #define MAX_SUBFRAMES 32 ///< max number of subframes per channel
104 #define MAX_BANDS 29 ///< max number of scale factor bands
105 #define MAX_FRAMESIZE 32768 ///< maximum compressed frame size
107 #define WMAPRO_BLOCK_MIN_BITS 6 ///< log2 of min block size
108 #define WMAPRO_BLOCK_MAX_BITS 13 ///< log2 of max block size
109 #define WMAPRO_BLOCK_MAX_SIZE (1 << WMAPRO_BLOCK_MAX_BITS) ///< maximum block size
110 #define WMAPRO_BLOCK_SIZES (WMAPRO_BLOCK_MAX_BITS - WMAPRO_BLOCK_MIN_BITS + 1) ///< possible block sizes
114 #define SCALEVLCBITS 8
115 #define VEC4MAXDEPTH ((HUFF_VEC4_MAXBITS+VLCBITS-1)/VLCBITS)
116 #define VEC2MAXDEPTH ((HUFF_VEC2_MAXBITS+VLCBITS-1)/VLCBITS)
117 #define VEC1MAXDEPTH ((HUFF_VEC1_MAXBITS+VLCBITS-1)/VLCBITS)
118 #define SCALEMAXDEPTH ((HUFF_SCALE_MAXBITS+SCALEVLCBITS-1)/SCALEVLCBITS)
119 #define SCALERLMAXDEPTH ((HUFF_SCALE_RL_MAXBITS+VLCBITS-1)/VLCBITS)
121 static VLC sf_vlc; ///< scale factor DPCM vlc
122 static VLC sf_rl_vlc; ///< scale factor run length vlc
123 static VLC vec4_vlc; ///< 4 coefficients per symbol
124 static VLC vec2_vlc; ///< 2 coefficients per symbol
125 static VLC vec1_vlc; ///< 1 coefficient per symbol
126 static VLC coef_vlc[2]; ///< coefficient run length vlc codes
127 static float sin64[33]; ///< sinus table for decorrelation
130 * @brief frame specific decoder context for a single channel
133 int16_t prev_block_len; ///< length of the previous block
134 uint8_t transmit_coefs;
135 uint8_t num_subframes;
136 uint16_t subframe_len[MAX_SUBFRAMES]; ///< subframe length in samples
137 uint16_t subframe_offset[MAX_SUBFRAMES]; ///< subframe positions in the current frame
138 uint8_t cur_subframe; ///< current subframe number
139 uint16_t decoded_samples; ///< number of already processed samples
140 uint8_t grouped; ///< channel is part of a group
141 int quant_step; ///< quantization step for the current subframe
142 int8_t reuse_sf; ///< share scale factors between subframes
143 int8_t scale_factor_step; ///< scaling step for the current subframe
144 int max_scale_factor; ///< maximum scale factor for the current subframe
145 int saved_scale_factors[2][MAX_BANDS]; ///< resampled and (previously) transmitted scale factor values
146 int8_t scale_factor_idx; ///< index for the transmitted scale factor values (used for resampling)
147 int* scale_factors; ///< pointer to the scale factor values used for decoding
148 uint8_t table_idx; ///< index in sf_offsets for the scale factor reference block
149 float* coeffs; ///< pointer to the subframe decode buffer
150 uint16_t num_vec_coeffs; ///< number of vector coded coefficients
151 DECLARE_ALIGNED(32, float, out)[WMAPRO_BLOCK_MAX_SIZE + WMAPRO_BLOCK_MAX_SIZE / 2]; ///< output buffer
155 * @brief channel group for channel transformations
158 uint8_t num_channels; ///< number of channels in the group
159 int8_t transform; ///< transform on / off
160 int8_t transform_band[MAX_BANDS]; ///< controls if the transform is enabled for a certain band
161 float decorrelation_matrix[WMAPRO_MAX_CHANNELS*WMAPRO_MAX_CHANNELS];
162 float* channel_data[WMAPRO_MAX_CHANNELS]; ///< transformation coefficients
166 * @brief main decoder context
168 typedef struct WMAProDecodeCtx {
169 /* generic decoder variables */
170 AVCodecContext* avctx; ///< codec context for av_log
171 AVFrame frame; ///< AVFrame for decoded output
172 DSPContext dsp; ///< accelerated DSP functions
173 uint8_t frame_data[MAX_FRAMESIZE +
174 FF_INPUT_BUFFER_PADDING_SIZE];///< compressed frame data
175 PutBitContext pb; ///< context for filling the frame_data buffer
176 FFTContext mdct_ctx[WMAPRO_BLOCK_SIZES]; ///< MDCT context per block size
177 DECLARE_ALIGNED(32, float, tmp)[WMAPRO_BLOCK_MAX_SIZE]; ///< IMDCT output buffer
178 float* windows[WMAPRO_BLOCK_SIZES]; ///< windows for the different block sizes
180 /* frame size dependent frame information (set during initialization) */
181 uint32_t decode_flags; ///< used compression features
182 uint8_t len_prefix; ///< frame is prefixed with its length
183 uint8_t dynamic_range_compression; ///< frame contains DRC data
184 uint8_t bits_per_sample; ///< integer audio sample size for the unscaled IMDCT output (used to scale to [-1.0, 1.0])
185 uint16_t samples_per_frame; ///< number of samples to output
186 uint16_t log2_frame_size;
187 int8_t lfe_channel; ///< lfe channel index
188 uint8_t max_num_subframes;
189 uint8_t subframe_len_bits; ///< number of bits used for the subframe length
190 uint8_t max_subframe_len_bit; ///< flag indicating that the subframe is of maximum size when the first subframe length bit is 1
191 uint16_t min_samples_per_subframe;
192 int8_t num_sfb[WMAPRO_BLOCK_SIZES]; ///< scale factor bands per block size
193 int16_t sfb_offsets[WMAPRO_BLOCK_SIZES][MAX_BANDS]; ///< scale factor band offsets (multiples of 4)
194 int8_t sf_offsets[WMAPRO_BLOCK_SIZES][WMAPRO_BLOCK_SIZES][MAX_BANDS]; ///< scale factor resample matrix
195 int16_t subwoofer_cutoffs[WMAPRO_BLOCK_SIZES]; ///< subwoofer cutoff values
197 /* packet decode state */
198 GetBitContext pgb; ///< bitstream reader context for the packet
199 int next_packet_start; ///< start offset of the next wma packet in the demuxer packet
200 uint8_t packet_offset; ///< frame offset in the packet
201 uint8_t packet_sequence_number; ///< current packet number
202 int num_saved_bits; ///< saved number of bits
203 int frame_offset; ///< frame offset in the bit reservoir
204 int subframe_offset; ///< subframe offset in the bit reservoir
205 uint8_t packet_loss; ///< set in case of bitstream error
206 uint8_t packet_done; ///< set when a packet is fully decoded
208 /* frame decode state */
209 uint32_t frame_num; ///< current frame number (not used for decoding)
210 GetBitContext gb; ///< bitstream reader context
211 int buf_bit_size; ///< buffer size in bits
212 uint8_t drc_gain; ///< gain for the DRC tool
213 int8_t skip_frame; ///< skip output step
214 int8_t parsed_all_subframes; ///< all subframes decoded?
216 /* subframe/block decode state */
217 int16_t subframe_len; ///< current subframe length
218 int8_t channels_for_cur_subframe; ///< number of channels that contain the subframe
219 int8_t channel_indexes_for_cur_subframe[WMAPRO_MAX_CHANNELS];
220 int8_t num_bands; ///< number of scale factor bands
221 int8_t transmit_num_vec_coeffs; ///< number of vector coded coefficients is part of the bitstream
222 int16_t* cur_sfb_offsets; ///< sfb offsets for the current block
223 uint8_t table_idx; ///< index for the num_sfb, sfb_offsets, sf_offsets and subwoofer_cutoffs tables
224 int8_t esc_len; ///< length of escaped coefficients
226 uint8_t num_chgroups; ///< number of channel groups
227 WMAProChannelGrp chgroup[WMAPRO_MAX_CHANNELS]; ///< channel group information
229 WMAProChannelCtx channel[WMAPRO_MAX_CHANNELS]; ///< per channel data
234 *@brief helper function to print the most important members of the context
237 static av_cold void dump_context(WMAProDecodeCtx *s)
239 #define PRINT(a, b) av_log(s->avctx, AV_LOG_DEBUG, " %s = %d\n", a, b);
240 #define PRINT_HEX(a, b) av_log(s->avctx, AV_LOG_DEBUG, " %s = %x\n", a, b);
242 PRINT("ed sample bit depth", s->bits_per_sample);
243 PRINT_HEX("ed decode flags", s->decode_flags);
244 PRINT("samples per frame", s->samples_per_frame);
245 PRINT("log2 frame size", s->log2_frame_size);
246 PRINT("max num subframes", s->max_num_subframes);
247 PRINT("len prefix", s->len_prefix);
248 PRINT("num channels", s->avctx->channels);
252 *@brief Uninitialize the decoder and free all resources.
253 *@param avctx codec context
254 *@return 0 on success, < 0 otherwise
256 static av_cold int decode_end(AVCodecContext *avctx)
258 WMAProDecodeCtx *s = avctx->priv_data;
261 for (i = 0; i < WMAPRO_BLOCK_SIZES; i++)
262 ff_mdct_end(&s->mdct_ctx[i]);
268 *@brief Initialize the decoder.
269 *@param avctx codec context
270 *@return 0 on success, -1 otherwise
272 static av_cold int decode_init(AVCodecContext *avctx)
274 WMAProDecodeCtx *s = avctx->priv_data;
275 uint8_t *edata_ptr = avctx->extradata;
276 unsigned int channel_mask;
278 int log2_max_num_subframes;
279 int num_possible_block_sizes;
282 ff_dsputil_init(&s->dsp, avctx);
283 init_put_bits(&s->pb, s->frame_data, MAX_FRAMESIZE);
285 avctx->sample_fmt = AV_SAMPLE_FMT_FLTP;
287 if (avctx->extradata_size >= 18) {
288 s->decode_flags = AV_RL16(edata_ptr+14);
289 channel_mask = AV_RL32(edata_ptr+2);
290 s->bits_per_sample = AV_RL16(edata_ptr);
291 /** dump the extradata */
292 for (i = 0; i < avctx->extradata_size; i++)
293 av_dlog(avctx, "[%x] ", avctx->extradata[i]);
294 av_dlog(avctx, "\n");
297 av_log_ask_for_sample(avctx, "Unknown extradata size\n");
298 return AVERROR_INVALIDDATA;
302 s->log2_frame_size = av_log2(avctx->block_align) + 4;
305 s->skip_frame = 1; /* skip first frame */
307 s->len_prefix = (s->decode_flags & 0x40);
310 bits = ff_wma_get_frame_len_bits(avctx->sample_rate, 3, s->decode_flags);
311 if (bits > WMAPRO_BLOCK_MAX_BITS) {
312 av_log_missing_feature(avctx, "14-bits block sizes", 1);
313 return AVERROR_PATCHWELCOME;
315 s->samples_per_frame = 1 << bits;
318 log2_max_num_subframes = ((s->decode_flags & 0x38) >> 3);
319 s->max_num_subframes = 1 << log2_max_num_subframes;
320 if (s->max_num_subframes == 16 || s->max_num_subframes == 4)
321 s->max_subframe_len_bit = 1;
322 s->subframe_len_bits = av_log2(log2_max_num_subframes) + 1;
324 num_possible_block_sizes = log2_max_num_subframes + 1;
325 s->min_samples_per_subframe = s->samples_per_frame / s->max_num_subframes;
326 s->dynamic_range_compression = (s->decode_flags & 0x80);
328 if (s->max_num_subframes > MAX_SUBFRAMES) {
329 av_log(avctx, AV_LOG_ERROR, "invalid number of subframes %i\n",
330 s->max_num_subframes);
331 return AVERROR_INVALIDDATA;
334 if (s->min_samples_per_subframe < (1<<WMAPRO_BLOCK_MIN_BITS)) {
335 av_log(avctx, AV_LOG_ERROR, "min_samples_per_subframe of %d too small\n",
336 s->min_samples_per_subframe);
337 return AVERROR_INVALIDDATA;
340 if (s->avctx->sample_rate <= 0) {
341 av_log(avctx, AV_LOG_ERROR, "invalid sample rate\n");
342 return AVERROR_INVALIDDATA;
345 if (avctx->channels < 0) {
346 av_log(avctx, AV_LOG_ERROR, "invalid number of channels %d\n",
348 return AVERROR_INVALIDDATA;
349 } else if (avctx->channels > WMAPRO_MAX_CHANNELS) {
350 av_log_ask_for_sample(avctx, "unsupported number of channels\n");
351 return AVERROR_PATCHWELCOME;
354 /** init previous block len */
355 for (i = 0; i < avctx->channels; i++)
356 s->channel[i].prev_block_len = s->samples_per_frame;
358 /** extract lfe channel position */
361 if (channel_mask & 8) {
363 for (mask = 1; mask < 16; mask <<= 1) {
364 if (channel_mask & mask)
369 INIT_VLC_STATIC(&sf_vlc, SCALEVLCBITS, HUFF_SCALE_SIZE,
370 scale_huffbits, 1, 1,
371 scale_huffcodes, 2, 2, 616);
373 INIT_VLC_STATIC(&sf_rl_vlc, VLCBITS, HUFF_SCALE_RL_SIZE,
374 scale_rl_huffbits, 1, 1,
375 scale_rl_huffcodes, 4, 4, 1406);
377 INIT_VLC_STATIC(&coef_vlc[0], VLCBITS, HUFF_COEF0_SIZE,
378 coef0_huffbits, 1, 1,
379 coef0_huffcodes, 4, 4, 2108);
381 INIT_VLC_STATIC(&coef_vlc[1], VLCBITS, HUFF_COEF1_SIZE,
382 coef1_huffbits, 1, 1,
383 coef1_huffcodes, 4, 4, 3912);
385 INIT_VLC_STATIC(&vec4_vlc, VLCBITS, HUFF_VEC4_SIZE,
387 vec4_huffcodes, 2, 2, 604);
389 INIT_VLC_STATIC(&vec2_vlc, VLCBITS, HUFF_VEC2_SIZE,
391 vec2_huffcodes, 2, 2, 562);
393 INIT_VLC_STATIC(&vec1_vlc, VLCBITS, HUFF_VEC1_SIZE,
395 vec1_huffcodes, 2, 2, 562);
397 /** calculate number of scale factor bands and their offsets
398 for every possible block size */
399 for (i = 0; i < num_possible_block_sizes; i++) {
400 int subframe_len = s->samples_per_frame >> i;
404 s->sfb_offsets[i][0] = 0;
406 for (x = 0; x < MAX_BANDS-1 && s->sfb_offsets[i][band - 1] < subframe_len; x++) {
407 int offset = (subframe_len * 2 * critical_freq[x])
408 / s->avctx->sample_rate + 2;
410 if (offset > s->sfb_offsets[i][band - 1])
411 s->sfb_offsets[i][band++] = offset;
413 s->sfb_offsets[i][band - 1] = subframe_len;
414 s->num_sfb[i] = band - 1;
415 if (s->num_sfb[i] <= 0) {
416 av_log(avctx, AV_LOG_ERROR, "num_sfb invalid\n");
417 return AVERROR_INVALIDDATA;
422 /** Scale factors can be shared between blocks of different size
423 as every block has a different scale factor band layout.
424 The matrix sf_offsets is needed to find the correct scale factor.
427 for (i = 0; i < num_possible_block_sizes; i++) {
429 for (b = 0; b < s->num_sfb[i]; b++) {
431 int offset = ((s->sfb_offsets[i][b]
432 + s->sfb_offsets[i][b + 1] - 1) << i) >> 1;
433 for (x = 0; x < num_possible_block_sizes; x++) {
435 while (s->sfb_offsets[x][v + 1] << x < offset)
437 s->sf_offsets[i][x][b] = v;
442 /** init MDCT, FIXME: only init needed sizes */
443 for (i = 0; i < WMAPRO_BLOCK_SIZES; i++)
444 ff_mdct_init(&s->mdct_ctx[i], WMAPRO_BLOCK_MIN_BITS+1+i, 1,
445 1.0 / (1 << (WMAPRO_BLOCK_MIN_BITS + i - 1))
446 / (1 << (s->bits_per_sample - 1)));
448 /** init MDCT windows: simple sinus window */
449 for (i = 0; i < WMAPRO_BLOCK_SIZES; i++) {
450 const int win_idx = WMAPRO_BLOCK_MAX_BITS - i;
451 ff_init_ff_sine_windows(win_idx);
452 s->windows[WMAPRO_BLOCK_SIZES - i - 1] = ff_sine_windows[win_idx];
455 /** calculate subwoofer cutoff values */
456 for (i = 0; i < num_possible_block_sizes; i++) {
457 int block_size = s->samples_per_frame >> i;
458 int cutoff = (440*block_size + 3 * (s->avctx->sample_rate >> 1) - 1)
459 / s->avctx->sample_rate;
460 s->subwoofer_cutoffs[i] = av_clip(cutoff, 4, block_size);
463 /** calculate sine values for the decorrelation matrix */
464 for (i = 0; i < 33; i++)
465 sin64[i] = sin(i*M_PI / 64.0);
467 if (avctx->debug & FF_DEBUG_BITSTREAM)
470 avctx->channel_layout = channel_mask;
472 avcodec_get_frame_defaults(&s->frame);
473 avctx->coded_frame = &s->frame;
479 *@brief Decode the subframe length.
481 *@param offset sample offset in the frame
482 *@return decoded subframe length on success, < 0 in case of an error
484 static int decode_subframe_length(WMAProDecodeCtx *s, int offset)
486 int frame_len_shift = 0;
489 /** no need to read from the bitstream when only one length is possible */
490 if (offset == s->samples_per_frame - s->min_samples_per_subframe)
491 return s->min_samples_per_subframe;
493 /** 1 bit indicates if the subframe is of maximum length */
494 if (s->max_subframe_len_bit) {
495 if (get_bits1(&s->gb))
496 frame_len_shift = 1 + get_bits(&s->gb, s->subframe_len_bits-1);
498 frame_len_shift = get_bits(&s->gb, s->subframe_len_bits);
500 subframe_len = s->samples_per_frame >> frame_len_shift;
502 /** sanity check the length */
503 if (subframe_len < s->min_samples_per_subframe ||
504 subframe_len > s->samples_per_frame) {
505 av_log(s->avctx, AV_LOG_ERROR, "broken frame: subframe_len %i\n",
507 return AVERROR_INVALIDDATA;
513 *@brief Decode how the data in the frame is split into subframes.
514 * Every WMA frame contains the encoded data for a fixed number of
515 * samples per channel. The data for every channel might be split
516 * into several subframes. This function will reconstruct the list of
517 * subframes for every channel.
519 * If the subframes are not evenly split, the algorithm estimates the
520 * channels with the lowest number of total samples.
521 * Afterwards, for each of these channels a bit is read from the
522 * bitstream that indicates if the channel contains a subframe with the
523 * next subframe size that is going to be read from the bitstream or not.
524 * If a channel contains such a subframe, the subframe size gets added to
525 * the channel's subframe list.
526 * The algorithm repeats these steps until the frame is properly divided
527 * between the individual channels.
530 *@return 0 on success, < 0 in case of an error
532 static int decode_tilehdr(WMAProDecodeCtx *s)
534 uint16_t num_samples[WMAPRO_MAX_CHANNELS] = { 0 };/**< sum of samples for all currently known subframes of a channel */
535 uint8_t contains_subframe[WMAPRO_MAX_CHANNELS]; /**< flag indicating if a channel contains the current subframe */
536 int channels_for_cur_subframe = s->avctx->channels; /**< number of channels that contain the current subframe */
537 int fixed_channel_layout = 0; /**< flag indicating that all channels use the same subframe offsets and sizes */
538 int min_channel_len = 0; /**< smallest sum of samples (channels with this length will be processed first) */
541 /* Should never consume more than 3073 bits (256 iterations for the
542 * while loop when always the minimum amount of 128 samples is subtracted
543 * from missing samples in the 8 channel case).
544 * 1 + BLOCK_MAX_SIZE * MAX_CHANNELS / BLOCK_MIN_SIZE * (MAX_CHANNELS + 4)
547 /** reset tiling information */
548 for (c = 0; c < s->avctx->channels; c++)
549 s->channel[c].num_subframes = 0;
551 if (s->max_num_subframes == 1 || get_bits1(&s->gb))
552 fixed_channel_layout = 1;
554 /** loop until the frame data is split between the subframes */
558 /** check which channels contain the subframe */
559 for (c = 0; c < s->avctx->channels; c++) {
560 if (num_samples[c] == min_channel_len) {
561 if (fixed_channel_layout || channels_for_cur_subframe == 1 ||
562 (min_channel_len == s->samples_per_frame - s->min_samples_per_subframe))
563 contains_subframe[c] = 1;
565 contains_subframe[c] = get_bits1(&s->gb);
567 contains_subframe[c] = 0;
570 /** get subframe length, subframe_len == 0 is not allowed */
571 if ((subframe_len = decode_subframe_length(s, min_channel_len)) <= 0)
572 return AVERROR_INVALIDDATA;
574 /** add subframes to the individual channels and find new min_channel_len */
575 min_channel_len += subframe_len;
576 for (c = 0; c < s->avctx->channels; c++) {
577 WMAProChannelCtx* chan = &s->channel[c];
579 if (contains_subframe[c]) {
580 if (chan->num_subframes >= MAX_SUBFRAMES) {
581 av_log(s->avctx, AV_LOG_ERROR,
582 "broken frame: num subframes > 31\n");
583 return AVERROR_INVALIDDATA;
585 chan->subframe_len[chan->num_subframes] = subframe_len;
586 num_samples[c] += subframe_len;
587 ++chan->num_subframes;
588 if (num_samples[c] > s->samples_per_frame) {
589 av_log(s->avctx, AV_LOG_ERROR, "broken frame: "
590 "channel len > samples_per_frame\n");
591 return AVERROR_INVALIDDATA;
593 } else if (num_samples[c] <= min_channel_len) {
594 if (num_samples[c] < min_channel_len) {
595 channels_for_cur_subframe = 0;
596 min_channel_len = num_samples[c];
598 ++channels_for_cur_subframe;
601 } while (min_channel_len < s->samples_per_frame);
603 for (c = 0; c < s->avctx->channels; c++) {
606 for (i = 0; i < s->channel[c].num_subframes; i++) {
607 av_dlog(s->avctx, "frame[%i] channel[%i] subframe[%i]"
608 " len %i\n", s->frame_num, c, i,
609 s->channel[c].subframe_len[i]);
610 s->channel[c].subframe_offset[i] = offset;
611 offset += s->channel[c].subframe_len[i];
619 *@brief Calculate a decorrelation matrix from the bitstream parameters.
620 *@param s codec context
621 *@param chgroup channel group for which the matrix needs to be calculated
623 static void decode_decorrelation_matrix(WMAProDecodeCtx *s,
624 WMAProChannelGrp *chgroup)
628 int8_t rotation_offset[WMAPRO_MAX_CHANNELS * WMAPRO_MAX_CHANNELS];
629 memset(chgroup->decorrelation_matrix, 0, s->avctx->channels *
630 s->avctx->channels * sizeof(*chgroup->decorrelation_matrix));
632 for (i = 0; i < chgroup->num_channels * (chgroup->num_channels - 1) >> 1; i++)
633 rotation_offset[i] = get_bits(&s->gb, 6);
635 for (i = 0; i < chgroup->num_channels; i++)
636 chgroup->decorrelation_matrix[chgroup->num_channels * i + i] =
637 get_bits1(&s->gb) ? 1.0 : -1.0;
639 for (i = 1; i < chgroup->num_channels; i++) {
641 for (x = 0; x < i; x++) {
643 for (y = 0; y < i + 1; y++) {
644 float v1 = chgroup->decorrelation_matrix[x * chgroup->num_channels + y];
645 float v2 = chgroup->decorrelation_matrix[i * chgroup->num_channels + y];
646 int n = rotation_offset[offset + x];
652 cosv = sin64[32 - n];
654 sinv = sin64[64 - n];
655 cosv = -sin64[n - 32];
658 chgroup->decorrelation_matrix[y + x * chgroup->num_channels] =
659 (v1 * sinv) - (v2 * cosv);
660 chgroup->decorrelation_matrix[y + i * chgroup->num_channels] =
661 (v1 * cosv) + (v2 * sinv);
669 *@brief Decode channel transformation parameters
670 *@param s codec context
671 *@return 0 in case of success, < 0 in case of bitstream errors
673 static int decode_channel_transform(WMAProDecodeCtx* s)
676 /* should never consume more than 1921 bits for the 8 channel case
677 * 1 + MAX_CHANNELS * (MAX_CHANNELS + 2 + 3 * MAX_CHANNELS * MAX_CHANNELS
678 * + MAX_CHANNELS + MAX_BANDS + 1)
681 /** in the one channel case channel transforms are pointless */
683 if (s->avctx->channels > 1) {
684 int remaining_channels = s->channels_for_cur_subframe;
686 if (get_bits1(&s->gb)) {
687 av_log_ask_for_sample(s->avctx,
688 "unsupported channel transform bit\n");
689 return AVERROR_INVALIDDATA;
692 for (s->num_chgroups = 0; remaining_channels &&
693 s->num_chgroups < s->channels_for_cur_subframe; s->num_chgroups++) {
694 WMAProChannelGrp* chgroup = &s->chgroup[s->num_chgroups];
695 float** channel_data = chgroup->channel_data;
696 chgroup->num_channels = 0;
697 chgroup->transform = 0;
699 /** decode channel mask */
700 if (remaining_channels > 2) {
701 for (i = 0; i < s->channels_for_cur_subframe; i++) {
702 int channel_idx = s->channel_indexes_for_cur_subframe[i];
703 if (!s->channel[channel_idx].grouped
704 && get_bits1(&s->gb)) {
705 ++chgroup->num_channels;
706 s->channel[channel_idx].grouped = 1;
707 *channel_data++ = s->channel[channel_idx].coeffs;
711 chgroup->num_channels = remaining_channels;
712 for (i = 0; i < s->channels_for_cur_subframe; i++) {
713 int channel_idx = s->channel_indexes_for_cur_subframe[i];
714 if (!s->channel[channel_idx].grouped)
715 *channel_data++ = s->channel[channel_idx].coeffs;
716 s->channel[channel_idx].grouped = 1;
720 /** decode transform type */
721 if (chgroup->num_channels == 2) {
722 if (get_bits1(&s->gb)) {
723 if (get_bits1(&s->gb)) {
724 av_log_ask_for_sample(s->avctx,
725 "unsupported channel transform type\n");
728 chgroup->transform = 1;
729 if (s->avctx->channels == 2) {
730 chgroup->decorrelation_matrix[0] = 1.0;
731 chgroup->decorrelation_matrix[1] = -1.0;
732 chgroup->decorrelation_matrix[2] = 1.0;
733 chgroup->decorrelation_matrix[3] = 1.0;
736 chgroup->decorrelation_matrix[0] = 0.70703125;
737 chgroup->decorrelation_matrix[1] = -0.70703125;
738 chgroup->decorrelation_matrix[2] = 0.70703125;
739 chgroup->decorrelation_matrix[3] = 0.70703125;
742 } else if (chgroup->num_channels > 2) {
743 if (get_bits1(&s->gb)) {
744 chgroup->transform = 1;
745 if (get_bits1(&s->gb)) {
746 decode_decorrelation_matrix(s, chgroup);
748 /** FIXME: more than 6 coupled channels not supported */
749 if (chgroup->num_channels > 6) {
750 av_log_ask_for_sample(s->avctx,
751 "coupled channels > 6\n");
753 memcpy(chgroup->decorrelation_matrix,
754 default_decorrelation[chgroup->num_channels],
755 chgroup->num_channels * chgroup->num_channels *
756 sizeof(*chgroup->decorrelation_matrix));
762 /** decode transform on / off */
763 if (chgroup->transform) {
764 if (!get_bits1(&s->gb)) {
766 /** transform can be enabled for individual bands */
767 for (i = 0; i < s->num_bands; i++) {
768 chgroup->transform_band[i] = get_bits1(&s->gb);
771 memset(chgroup->transform_band, 1, s->num_bands);
774 remaining_channels -= chgroup->num_channels;
781 *@brief Extract the coefficients from the bitstream.
782 *@param s codec context
783 *@param c current channel number
784 *@return 0 on success, < 0 in case of bitstream errors
786 static int decode_coeffs(WMAProDecodeCtx *s, int c)
788 /* Integers 0..15 as single-precision floats. The table saves a
789 costly int to float conversion, and storing the values as
790 integers allows fast sign-flipping. */
791 static const uint32_t fval_tab[16] = {
792 0x00000000, 0x3f800000, 0x40000000, 0x40400000,
793 0x40800000, 0x40a00000, 0x40c00000, 0x40e00000,
794 0x41000000, 0x41100000, 0x41200000, 0x41300000,
795 0x41400000, 0x41500000, 0x41600000, 0x41700000,
799 WMAProChannelCtx* ci = &s->channel[c];
806 av_dlog(s->avctx, "decode coefficients for channel %i\n", c);
808 vlctable = get_bits1(&s->gb);
809 vlc = &coef_vlc[vlctable];
819 /** decode vector coefficients (consumes up to 167 bits per iteration for
820 4 vector coded large values) */
821 while ((s->transmit_num_vec_coeffs || !rl_mode) &&
822 (cur_coeff + 3 < ci->num_vec_coeffs)) {
827 idx = get_vlc2(&s->gb, vec4_vlc.table, VLCBITS, VEC4MAXDEPTH);
829 if (idx == HUFF_VEC4_SIZE - 1) {
830 for (i = 0; i < 4; i += 2) {
831 idx = get_vlc2(&s->gb, vec2_vlc.table, VLCBITS, VEC2MAXDEPTH);
832 if (idx == HUFF_VEC2_SIZE - 1) {
834 v0 = get_vlc2(&s->gb, vec1_vlc.table, VLCBITS, VEC1MAXDEPTH);
835 if (v0 == HUFF_VEC1_SIZE - 1)
836 v0 += ff_wma_get_large_val(&s->gb);
837 v1 = get_vlc2(&s->gb, vec1_vlc.table, VLCBITS, VEC1MAXDEPTH);
838 if (v1 == HUFF_VEC1_SIZE - 1)
839 v1 += ff_wma_get_large_val(&s->gb);
840 vals[i ] = av_float2int(v0);
841 vals[i+1] = av_float2int(v1);
843 vals[i] = fval_tab[symbol_to_vec2[idx] >> 4 ];
844 vals[i+1] = fval_tab[symbol_to_vec2[idx] & 0xF];
848 vals[0] = fval_tab[ symbol_to_vec4[idx] >> 12 ];
849 vals[1] = fval_tab[(symbol_to_vec4[idx] >> 8) & 0xF];
850 vals[2] = fval_tab[(symbol_to_vec4[idx] >> 4) & 0xF];
851 vals[3] = fval_tab[ symbol_to_vec4[idx] & 0xF];
855 for (i = 0; i < 4; i++) {
857 uint32_t sign = get_bits1(&s->gb) - 1;
858 AV_WN32A(&ci->coeffs[cur_coeff], vals[i] ^ sign << 31);
861 ci->coeffs[cur_coeff] = 0;
862 /** switch to run level mode when subframe_len / 128 zeros
863 were found in a row */
864 rl_mode |= (++num_zeros > s->subframe_len >> 8);
870 /** decode run level coded coefficients */
871 if (cur_coeff < s->subframe_len) {
872 memset(&ci->coeffs[cur_coeff], 0,
873 sizeof(*ci->coeffs) * (s->subframe_len - cur_coeff));
874 if (ff_wma_run_level_decode(s->avctx, &s->gb, vlc,
875 level, run, 1, ci->coeffs,
876 cur_coeff, s->subframe_len,
877 s->subframe_len, s->esc_len, 0))
878 return AVERROR_INVALIDDATA;
885 *@brief Extract scale factors from the bitstream.
886 *@param s codec context
887 *@return 0 on success, < 0 in case of bitstream errors
889 static int decode_scale_factors(WMAProDecodeCtx* s)
893 /** should never consume more than 5344 bits
894 * MAX_CHANNELS * (1 + MAX_BANDS * 23)
897 for (i = 0; i < s->channels_for_cur_subframe; i++) {
898 int c = s->channel_indexes_for_cur_subframe[i];
901 s->channel[c].scale_factors = s->channel[c].saved_scale_factors[!s->channel[c].scale_factor_idx];
902 sf_end = s->channel[c].scale_factors + s->num_bands;
904 /** resample scale factors for the new block size
905 * as the scale factors might need to be resampled several times
906 * before some new values are transmitted, a backup of the last
907 * transmitted scale factors is kept in saved_scale_factors
909 if (s->channel[c].reuse_sf) {
910 const int8_t* sf_offsets = s->sf_offsets[s->table_idx][s->channel[c].table_idx];
912 for (b = 0; b < s->num_bands; b++)
913 s->channel[c].scale_factors[b] =
914 s->channel[c].saved_scale_factors[s->channel[c].scale_factor_idx][*sf_offsets++];
917 if (!s->channel[c].cur_subframe || get_bits1(&s->gb)) {
919 if (!s->channel[c].reuse_sf) {
921 /** decode DPCM coded scale factors */
922 s->channel[c].scale_factor_step = get_bits(&s->gb, 2) + 1;
923 val = 45 / s->channel[c].scale_factor_step;
924 for (sf = s->channel[c].scale_factors; sf < sf_end; sf++) {
925 val += get_vlc2(&s->gb, sf_vlc.table, SCALEVLCBITS, SCALEMAXDEPTH) - 60;
930 /** run level decode differences to the resampled factors */
931 for (i = 0; i < s->num_bands; i++) {
937 idx = get_vlc2(&s->gb, sf_rl_vlc.table, VLCBITS, SCALERLMAXDEPTH);
940 uint32_t code = get_bits(&s->gb, 14);
942 sign = (code & 1) - 1;
943 skip = (code & 0x3f) >> 1;
944 } else if (idx == 1) {
947 skip = scale_rl_run[idx];
948 val = scale_rl_level[idx];
949 sign = get_bits1(&s->gb)-1;
953 if (i >= s->num_bands) {
954 av_log(s->avctx, AV_LOG_ERROR,
955 "invalid scale factor coding\n");
956 return AVERROR_INVALIDDATA;
958 s->channel[c].scale_factors[i] += (val ^ sign) - sign;
962 s->channel[c].scale_factor_idx = !s->channel[c].scale_factor_idx;
963 s->channel[c].table_idx = s->table_idx;
964 s->channel[c].reuse_sf = 1;
967 /** calculate new scale factor maximum */
968 s->channel[c].max_scale_factor = s->channel[c].scale_factors[0];
969 for (sf = s->channel[c].scale_factors + 1; sf < sf_end; sf++) {
970 s->channel[c].max_scale_factor =
971 FFMAX(s->channel[c].max_scale_factor, *sf);
979 *@brief Reconstruct the individual channel data.
980 *@param s codec context
982 static void inverse_channel_transform(WMAProDecodeCtx *s)
986 for (i = 0; i < s->num_chgroups; i++) {
987 if (s->chgroup[i].transform) {
988 float data[WMAPRO_MAX_CHANNELS];
989 const int num_channels = s->chgroup[i].num_channels;
990 float** ch_data = s->chgroup[i].channel_data;
991 float** ch_end = ch_data + num_channels;
992 const int8_t* tb = s->chgroup[i].transform_band;
995 /** multichannel decorrelation */
996 for (sfb = s->cur_sfb_offsets;
997 sfb < s->cur_sfb_offsets + s->num_bands; sfb++) {
1000 /** multiply values with the decorrelation_matrix */
1001 for (y = sfb[0]; y < FFMIN(sfb[1], s->subframe_len); y++) {
1002 const float* mat = s->chgroup[i].decorrelation_matrix;
1003 const float* data_end = data + num_channels;
1004 float* data_ptr = data;
1007 for (ch = ch_data; ch < ch_end; ch++)
1008 *data_ptr++ = (*ch)[y];
1010 for (ch = ch_data; ch < ch_end; ch++) {
1013 while (data_ptr < data_end)
1014 sum += *data_ptr++ * *mat++;
1019 } else if (s->avctx->channels == 2) {
1020 int len = FFMIN(sfb[1], s->subframe_len) - sfb[0];
1021 s->dsp.vector_fmul_scalar(ch_data[0] + sfb[0],
1022 ch_data[0] + sfb[0],
1024 s->dsp.vector_fmul_scalar(ch_data[1] + sfb[0],
1025 ch_data[1] + sfb[0],
1034 *@brief Apply sine window and reconstruct the output buffer.
1035 *@param s codec context
1037 static void wmapro_window(WMAProDecodeCtx *s)
1040 for (i = 0; i < s->channels_for_cur_subframe; i++) {
1041 int c = s->channel_indexes_for_cur_subframe[i];
1043 int winlen = s->channel[c].prev_block_len;
1044 float* start = s->channel[c].coeffs - (winlen >> 1);
1046 if (s->subframe_len < winlen) {
1047 start += (winlen - s->subframe_len) >> 1;
1048 winlen = s->subframe_len;
1051 window = s->windows[av_log2(winlen) - WMAPRO_BLOCK_MIN_BITS];
1055 s->dsp.vector_fmul_window(start, start, start + winlen,
1058 s->channel[c].prev_block_len = s->subframe_len;
1063 *@brief Decode a single subframe (block).
1064 *@param s codec context
1065 *@return 0 on success, < 0 when decoding failed
1067 static int decode_subframe(WMAProDecodeCtx *s)
1069 int offset = s->samples_per_frame;
1070 int subframe_len = s->samples_per_frame;
1072 int total_samples = s->samples_per_frame * s->avctx->channels;
1073 int transmit_coeffs = 0;
1074 int cur_subwoofer_cutoff;
1076 s->subframe_offset = get_bits_count(&s->gb);
1078 /** reset channel context and find the next block offset and size
1079 == the next block of the channel with the smallest number of
1082 for (i = 0; i < s->avctx->channels; i++) {
1083 s->channel[i].grouped = 0;
1084 if (offset > s->channel[i].decoded_samples) {
1085 offset = s->channel[i].decoded_samples;
1087 s->channel[i].subframe_len[s->channel[i].cur_subframe];
1092 "processing subframe with offset %i len %i\n", offset, subframe_len);
1094 /** get a list of all channels that contain the estimated block */
1095 s->channels_for_cur_subframe = 0;
1096 for (i = 0; i < s->avctx->channels; i++) {
1097 const int cur_subframe = s->channel[i].cur_subframe;
1098 /** substract already processed samples */
1099 total_samples -= s->channel[i].decoded_samples;
1101 /** and count if there are multiple subframes that match our profile */
1102 if (offset == s->channel[i].decoded_samples &&
1103 subframe_len == s->channel[i].subframe_len[cur_subframe]) {
1104 total_samples -= s->channel[i].subframe_len[cur_subframe];
1105 s->channel[i].decoded_samples +=
1106 s->channel[i].subframe_len[cur_subframe];
1107 s->channel_indexes_for_cur_subframe[s->channels_for_cur_subframe] = i;
1108 ++s->channels_for_cur_subframe;
1112 /** check if the frame will be complete after processing the
1115 s->parsed_all_subframes = 1;
1118 av_dlog(s->avctx, "subframe is part of %i channels\n",
1119 s->channels_for_cur_subframe);
1121 /** calculate number of scale factor bands and their offsets */
1122 s->table_idx = av_log2(s->samples_per_frame/subframe_len);
1123 s->num_bands = s->num_sfb[s->table_idx];
1124 s->cur_sfb_offsets = s->sfb_offsets[s->table_idx];
1125 cur_subwoofer_cutoff = s->subwoofer_cutoffs[s->table_idx];
1127 /** configure the decoder for the current subframe */
1128 for (i = 0; i < s->channels_for_cur_subframe; i++) {
1129 int c = s->channel_indexes_for_cur_subframe[i];
1131 s->channel[c].coeffs = &s->channel[c].out[(s->samples_per_frame >> 1)
1135 s->subframe_len = subframe_len;
1136 s->esc_len = av_log2(s->subframe_len - 1) + 1;
1138 /** skip extended header if any */
1139 if (get_bits1(&s->gb)) {
1141 if (!(num_fill_bits = get_bits(&s->gb, 2))) {
1142 int len = get_bits(&s->gb, 4);
1143 num_fill_bits = (len ? get_bits(&s->gb, len) : 0) + 1;
1146 if (num_fill_bits >= 0) {
1147 if (get_bits_count(&s->gb) + num_fill_bits > s->num_saved_bits) {
1148 av_log(s->avctx, AV_LOG_ERROR, "invalid number of fill bits\n");
1149 return AVERROR_INVALIDDATA;
1152 skip_bits_long(&s->gb, num_fill_bits);
1156 /** no idea for what the following bit is used */
1157 if (get_bits1(&s->gb)) {
1158 av_log_ask_for_sample(s->avctx, "reserved bit set\n");
1159 return AVERROR_INVALIDDATA;
1163 if (decode_channel_transform(s) < 0)
1164 return AVERROR_INVALIDDATA;
1167 for (i = 0; i < s->channels_for_cur_subframe; i++) {
1168 int c = s->channel_indexes_for_cur_subframe[i];
1169 if ((s->channel[c].transmit_coefs = get_bits1(&s->gb)))
1170 transmit_coeffs = 1;
1173 av_assert0(s->subframe_len <= WMAPRO_BLOCK_MAX_SIZE);
1174 if (transmit_coeffs) {
1176 int quant_step = 90 * s->bits_per_sample >> 4;
1178 /** decode number of vector coded coefficients */
1179 if ((s->transmit_num_vec_coeffs = get_bits1(&s->gb))) {
1180 int num_bits = av_log2((s->subframe_len + 3)/4) + 1;
1181 for (i = 0; i < s->channels_for_cur_subframe; i++) {
1182 int c = s->channel_indexes_for_cur_subframe[i];
1183 int num_vec_coeffs = get_bits(&s->gb, num_bits) << 2;
1184 if (num_vec_coeffs > s->subframe_len) {
1185 av_log(s->avctx, AV_LOG_ERROR, "num_vec_coeffs %d is too large\n", num_vec_coeffs);
1186 return AVERROR_INVALIDDATA;
1188 s->channel[c].num_vec_coeffs = num_vec_coeffs;
1191 for (i = 0; i < s->channels_for_cur_subframe; i++) {
1192 int c = s->channel_indexes_for_cur_subframe[i];
1193 s->channel[c].num_vec_coeffs = s->subframe_len;
1196 /** decode quantization step */
1197 step = get_sbits(&s->gb, 6);
1199 if (step == -32 || step == 31) {
1200 const int sign = (step == 31) - 1;
1202 while (get_bits_count(&s->gb) + 5 < s->num_saved_bits &&
1203 (step = get_bits(&s->gb, 5)) == 31) {
1206 quant_step += ((quant + step) ^ sign) - sign;
1208 if (quant_step < 0) {
1209 av_log(s->avctx, AV_LOG_DEBUG, "negative quant step\n");
1212 /** decode quantization step modifiers for every channel */
1214 if (s->channels_for_cur_subframe == 1) {
1215 s->channel[s->channel_indexes_for_cur_subframe[0]].quant_step = quant_step;
1217 int modifier_len = get_bits(&s->gb, 3);
1218 for (i = 0; i < s->channels_for_cur_subframe; i++) {
1219 int c = s->channel_indexes_for_cur_subframe[i];
1220 s->channel[c].quant_step = quant_step;
1221 if (get_bits1(&s->gb)) {
1223 s->channel[c].quant_step += get_bits(&s->gb, modifier_len) + 1;
1225 ++s->channel[c].quant_step;
1230 /** decode scale factors */
1231 if (decode_scale_factors(s) < 0)
1232 return AVERROR_INVALIDDATA;
1235 av_dlog(s->avctx, "BITSTREAM: subframe header length was %i\n",
1236 get_bits_count(&s->gb) - s->subframe_offset);
1238 /** parse coefficients */
1239 for (i = 0; i < s->channels_for_cur_subframe; i++) {
1240 int c = s->channel_indexes_for_cur_subframe[i];
1241 if (s->channel[c].transmit_coefs &&
1242 get_bits_count(&s->gb) < s->num_saved_bits) {
1243 decode_coeffs(s, c);
1245 memset(s->channel[c].coeffs, 0,
1246 sizeof(*s->channel[c].coeffs) * subframe_len);
1249 av_dlog(s->avctx, "BITSTREAM: subframe length was %i\n",
1250 get_bits_count(&s->gb) - s->subframe_offset);
1252 if (transmit_coeffs) {
1253 FFTContext *mdct = &s->mdct_ctx[av_log2(subframe_len) - WMAPRO_BLOCK_MIN_BITS];
1254 /** reconstruct the per channel data */
1255 inverse_channel_transform(s);
1256 for (i = 0; i < s->channels_for_cur_subframe; i++) {
1257 int c = s->channel_indexes_for_cur_subframe[i];
1258 const int* sf = s->channel[c].scale_factors;
1261 if (c == s->lfe_channel)
1262 memset(&s->tmp[cur_subwoofer_cutoff], 0, sizeof(*s->tmp) *
1263 (subframe_len - cur_subwoofer_cutoff));
1265 /** inverse quantization and rescaling */
1266 for (b = 0; b < s->num_bands; b++) {
1267 const int end = FFMIN(s->cur_sfb_offsets[b+1], s->subframe_len);
1268 const int exp = s->channel[c].quant_step -
1269 (s->channel[c].max_scale_factor - *sf++) *
1270 s->channel[c].scale_factor_step;
1271 const float quant = pow(10.0, exp / 20.0);
1272 int start = s->cur_sfb_offsets[b];
1273 s->dsp.vector_fmul_scalar(s->tmp + start,
1274 s->channel[c].coeffs + start,
1275 quant, end - start);
1278 /** apply imdct (imdct_half == DCTIV with reverse) */
1279 mdct->imdct_half(mdct, s->channel[c].coeffs, s->tmp);
1283 /** window and overlapp-add */
1286 /** handled one subframe */
1287 for (i = 0; i < s->channels_for_cur_subframe; i++) {
1288 int c = s->channel_indexes_for_cur_subframe[i];
1289 if (s->channel[c].cur_subframe >= s->channel[c].num_subframes) {
1290 av_log(s->avctx, AV_LOG_ERROR, "broken subframe\n");
1291 return AVERROR_INVALIDDATA;
1293 ++s->channel[c].cur_subframe;
1300 *@brief Decode one WMA frame.
1301 *@param s codec context
1302 *@return 0 if the trailer bit indicates that this is the last frame,
1303 * 1 if there are additional frames
1305 static int decode_frame(WMAProDecodeCtx *s, int *got_frame_ptr)
1307 AVCodecContext *avctx = s->avctx;
1308 GetBitContext* gb = &s->gb;
1309 int more_frames = 0;
1313 /** get frame length */
1315 len = get_bits(gb, s->log2_frame_size);
1317 av_dlog(s->avctx, "decoding frame with length %x\n", len);
1319 /** decode tile information */
1320 if (decode_tilehdr(s)) {
1325 /** read postproc transform */
1326 if (s->avctx->channels > 1 && get_bits1(gb)) {
1327 if (get_bits1(gb)) {
1328 for (i = 0; i < avctx->channels * avctx->channels; i++)
1333 /** read drc info */
1334 if (s->dynamic_range_compression) {
1335 s->drc_gain = get_bits(gb, 8);
1336 av_dlog(s->avctx, "drc_gain %i\n", s->drc_gain);
1339 /** no idea what these are for, might be the number of samples
1340 that need to be skipped at the beginning or end of a stream */
1341 if (get_bits1(gb)) {
1344 /** usually true for the first frame */
1345 if (get_bits1(gb)) {
1346 skip = get_bits(gb, av_log2(s->samples_per_frame * 2));
1347 av_dlog(s->avctx, "start skip: %i\n", skip);
1350 /** sometimes true for the last frame */
1351 if (get_bits1(gb)) {
1352 skip = get_bits(gb, av_log2(s->samples_per_frame * 2));
1353 av_dlog(s->avctx, "end skip: %i\n", skip);
1358 av_dlog(s->avctx, "BITSTREAM: frame header length was %i\n",
1359 get_bits_count(gb) - s->frame_offset);
1361 /** reset subframe states */
1362 s->parsed_all_subframes = 0;
1363 for (i = 0; i < avctx->channels; i++) {
1364 s->channel[i].decoded_samples = 0;
1365 s->channel[i].cur_subframe = 0;
1366 s->channel[i].reuse_sf = 0;
1369 /** decode all subframes */
1370 while (!s->parsed_all_subframes) {
1371 if (decode_subframe(s) < 0) {
1377 /* get output buffer */
1378 s->frame.nb_samples = s->samples_per_frame;
1379 if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
1380 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
1385 /** copy samples to the output buffer */
1386 for (i = 0; i < avctx->channels; i++)
1387 memcpy(s->frame.extended_data[i], s->channel[i].out,
1388 s->samples_per_frame * sizeof(*s->channel[i].out));
1390 for (i = 0; i < avctx->channels; i++) {
1391 /** reuse second half of the IMDCT output for the next frame */
1392 memcpy(&s->channel[i].out[0],
1393 &s->channel[i].out[s->samples_per_frame],
1394 s->samples_per_frame * sizeof(*s->channel[i].out) >> 1);
1397 if (s->skip_frame) {
1404 if (s->len_prefix) {
1405 if (len != (get_bits_count(gb) - s->frame_offset) + 2) {
1406 /** FIXME: not sure if this is always an error */
1407 av_log(s->avctx, AV_LOG_ERROR,
1408 "frame[%i] would have to skip %i bits\n", s->frame_num,
1409 len - (get_bits_count(gb) - s->frame_offset) - 1);
1414 /** skip the rest of the frame data */
1415 skip_bits_long(gb, len - (get_bits_count(gb) - s->frame_offset) - 1);
1417 while (get_bits_count(gb) < s->num_saved_bits && get_bits1(gb) == 0) {
1421 /** decode trailer bit */
1422 more_frames = get_bits1(gb);
1429 *@brief Calculate remaining input buffer length.
1430 *@param s codec context
1431 *@param gb bitstream reader context
1432 *@return remaining size in bits
1434 static int remaining_bits(WMAProDecodeCtx *s, GetBitContext *gb)
1436 return s->buf_bit_size - get_bits_count(gb);
1440 *@brief Fill the bit reservoir with a (partial) frame.
1441 *@param s codec context
1442 *@param gb bitstream reader context
1443 *@param len length of the partial frame
1444 *@param append decides whether to reset the buffer or not
1446 static void save_bits(WMAProDecodeCtx *s, GetBitContext* gb, int len,
1451 /** when the frame data does not need to be concatenated, the input buffer
1452 is reset and additional bits from the previous frame are copied
1453 and skipped later so that a fast byte copy is possible */
1456 s->frame_offset = get_bits_count(gb) & 7;
1457 s->num_saved_bits = s->frame_offset;
1458 init_put_bits(&s->pb, s->frame_data, MAX_FRAMESIZE);
1461 buflen = (put_bits_count(&s->pb) + len + 8) >> 3;
1463 if (len <= 0 || buflen > MAX_FRAMESIZE) {
1464 av_log_ask_for_sample(s->avctx, "input buffer too small\n");
1469 s->num_saved_bits += len;
1471 avpriv_copy_bits(&s->pb, gb->buffer + (get_bits_count(gb) >> 3),
1474 int align = 8 - (get_bits_count(gb) & 7);
1475 align = FFMIN(align, len);
1476 put_bits(&s->pb, align, get_bits(gb, align));
1478 avpriv_copy_bits(&s->pb, gb->buffer + (get_bits_count(gb) >> 3), len);
1480 skip_bits_long(gb, len);
1483 PutBitContext tmp = s->pb;
1484 flush_put_bits(&tmp);
1487 init_get_bits(&s->gb, s->frame_data, s->num_saved_bits);
1488 skip_bits(&s->gb, s->frame_offset);
1492 *@brief Decode a single WMA packet.
1493 *@param avctx codec context
1494 *@param data the output buffer
1495 *@param avpkt input packet
1496 *@return number of bytes that were read from the input buffer
1498 static int decode_packet(AVCodecContext *avctx, void *data,
1499 int *got_frame_ptr, AVPacket* avpkt)
1501 WMAProDecodeCtx *s = avctx->priv_data;
1502 GetBitContext* gb = &s->pgb;
1503 const uint8_t* buf = avpkt->data;
1504 int buf_size = avpkt->size;
1505 int num_bits_prev_frame;
1506 int packet_sequence_number;
1510 if (s->packet_done || s->packet_loss) {
1513 /** sanity check for the buffer length */
1514 if (buf_size < avctx->block_align)
1517 s->next_packet_start = buf_size - avctx->block_align;
1518 buf_size = avctx->block_align;
1519 s->buf_bit_size = buf_size << 3;
1521 /** parse packet header */
1522 init_get_bits(gb, buf, s->buf_bit_size);
1523 packet_sequence_number = get_bits(gb, 4);
1526 /** get number of bits that need to be added to the previous frame */
1527 num_bits_prev_frame = get_bits(gb, s->log2_frame_size);
1528 av_dlog(avctx, "packet[%d]: nbpf %x\n", avctx->frame_number,
1529 num_bits_prev_frame);
1531 /** check for packet loss */
1532 if (!s->packet_loss &&
1533 ((s->packet_sequence_number + 1) & 0xF) != packet_sequence_number) {
1535 av_log(avctx, AV_LOG_ERROR, "Packet loss detected! seq %x vs %x\n",
1536 s->packet_sequence_number, packet_sequence_number);
1538 s->packet_sequence_number = packet_sequence_number;
1540 if (num_bits_prev_frame > 0) {
1541 int remaining_packet_bits = s->buf_bit_size - get_bits_count(gb);
1542 if (num_bits_prev_frame >= remaining_packet_bits) {
1543 num_bits_prev_frame = remaining_packet_bits;
1547 /** append the previous frame data to the remaining data from the
1548 previous packet to create a full frame */
1549 save_bits(s, gb, num_bits_prev_frame, 1);
1550 av_dlog(avctx, "accumulated %x bits of frame data\n",
1551 s->num_saved_bits - s->frame_offset);
1553 /** decode the cross packet frame if it is valid */
1554 if (!s->packet_loss)
1555 decode_frame(s, got_frame_ptr);
1556 } else if (s->num_saved_bits - s->frame_offset) {
1557 av_dlog(avctx, "ignoring %x previously saved bits\n",
1558 s->num_saved_bits - s->frame_offset);
1561 if (s->packet_loss) {
1562 /** reset number of saved bits so that the decoder
1563 does not start to decode incomplete frames in the
1564 s->len_prefix == 0 case */
1565 s->num_saved_bits = 0;
1571 s->buf_bit_size = (avpkt->size - s->next_packet_start) << 3;
1572 init_get_bits(gb, avpkt->data, s->buf_bit_size);
1573 skip_bits(gb, s->packet_offset);
1574 if (s->len_prefix && remaining_bits(s, gb) > s->log2_frame_size &&
1575 (frame_size = show_bits(gb, s->log2_frame_size)) &&
1576 frame_size <= remaining_bits(s, gb)) {
1577 save_bits(s, gb, frame_size, 0);
1578 s->packet_done = !decode_frame(s, got_frame_ptr);
1579 } else if (!s->len_prefix
1580 && s->num_saved_bits > get_bits_count(&s->gb)) {
1581 /** when the frames do not have a length prefix, we don't know
1582 the compressed length of the individual frames
1583 however, we know what part of a new packet belongs to the
1585 therefore we save the incoming packet first, then we append
1586 the "previous frame" data from the next packet so that
1587 we get a buffer that only contains full frames */
1588 s->packet_done = !decode_frame(s, got_frame_ptr);
1593 if (s->packet_done && !s->packet_loss &&
1594 remaining_bits(s, gb) > 0) {
1595 /** save the rest of the data so that it can be decoded
1596 with the next packet */
1597 save_bits(s, gb, remaining_bits(s, gb), 0);
1600 s->packet_offset = get_bits_count(gb) & 7;
1602 return AVERROR_INVALIDDATA;
1605 *(AVFrame *)data = s->frame;
1607 return get_bits_count(gb) >> 3;
1611 *@brief Clear decoder buffers (for seeking).
1612 *@param avctx codec context
1614 static void flush(AVCodecContext *avctx)
1616 WMAProDecodeCtx *s = avctx->priv_data;
1618 /** reset output buffer as a part of it is used during the windowing of a
1620 for (i = 0; i < avctx->channels; i++)
1621 memset(s->channel[i].out, 0, s->samples_per_frame *
1622 sizeof(*s->channel[i].out));
1628 *@brief wmapro decoder
1630 AVCodec ff_wmapro_decoder = {
1632 .type = AVMEDIA_TYPE_AUDIO,
1633 .id = AV_CODEC_ID_WMAPRO,
1634 .priv_data_size = sizeof(WMAProDecodeCtx),
1635 .init = decode_init,
1636 .close = decode_end,
1637 .decode = decode_packet,
1638 .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DR1,
1640 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio 9 Professional"),
1641 .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
1642 AV_SAMPLE_FMT_NONE },