2 * Wmapro compatible decoder
3 * Copyright (c) 2007 Baptiste Coudurier, Benjamin Larsson, Ulion
4 * Copyright (c) 2008 - 2011 Sascha Sommer, Benjamin Larsson
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 * @brief wmapro decoder implementation
26 * Wmapro is an MDCT based codec comparable to wma standard or AAC.
27 * The decoding therefore consists of the following steps:
28 * - bitstream decoding
29 * - reconstruction of per-channel data
30 * - rescaling and inverse quantization
32 * - windowing and overlapp-add
34 * The compressed wmapro bitstream is split into individual packets.
35 * Every such packet contains one or more wma frames.
36 * The compressed frames may have a variable length and frames may
37 * cross packet boundaries.
38 * Common to all wmapro frames is the number of samples that are stored in
40 * The number of samples and a few other decode flags are stored
41 * as extradata that has to be passed to the decoder.
43 * The wmapro frames themselves are again split into a variable number of
44 * subframes. Every subframe contains the data for 2^N time domain samples
45 * where N varies between 7 and 12.
47 * Example wmapro bitstream (in samples):
49 * || packet 0 || packet 1 || packet 2 packets
50 * ---------------------------------------------------
51 * || frame 0 || frame 1 || frame 2 || frames
52 * ---------------------------------------------------
53 * || | | || | | | || || subframes of channel 0
54 * ---------------------------------------------------
55 * || | | || | | | || || subframes of channel 1
56 * ---------------------------------------------------
58 * The frame layouts for the individual channels of a wma frame does not need
61 * However, if the offsets and lengths of several subframes of a frame are the
62 * same, the subframes of the channels can be grouped.
63 * Every group may then use special coding techniques like M/S stereo coding
64 * to improve the compression ratio. These channel transformations do not
65 * need to be applied to a whole subframe. Instead, they can also work on
66 * individual scale factor bands (see below).
67 * The coefficients that carry the audio signal in the frequency domain
68 * are transmitted as huffman-coded vectors with 4, 2 and 1 elements.
69 * In addition to that, the encoder can switch to a runlevel coding scheme
70 * by transmitting subframe_length / 128 zero coefficients.
72 * Before the audio signal can be converted to the time domain, the
73 * coefficients have to be rescaled and inverse quantized.
74 * A subframe is therefore split into several scale factor bands that get
75 * scaled individually.
76 * Scale factors are submitted for every frame but they might be shared
77 * between the subframes of a channel. Scale factors are initially DPCM-coded.
78 * Once scale factors are shared, the differences are transmitted as runlevel
80 * Every subframe length and offset combination in the frame layout shares a
81 * common quantization factor that can be adjusted for every channel by a
83 * After the inverse quantization, the coefficients get processed by an IMDCT.
84 * The resulting values are then windowed with a sine window and the first half
85 * of the values are added to the second half of the output from the previous
86 * subframe in order to reconstruct the output samples.
89 #include "libavutil/float_dsp.h"
90 #include "libavutil/intfloat.h"
91 #include "libavutil/intreadwrite.h"
96 #include "wmaprodata.h"
99 #include "wma_common.h"
101 /** current decoder limitations */
102 #define WMAPRO_MAX_CHANNELS 8 ///< max number of handled channels
103 #define MAX_SUBFRAMES 32 ///< max number of subframes per channel
104 #define MAX_BANDS 29 ///< max number of scale factor bands
105 #define MAX_FRAMESIZE 32768 ///< maximum compressed frame size
107 #define WMAPRO_BLOCK_MIN_BITS 6 ///< log2 of min block size
108 #define WMAPRO_BLOCK_MAX_BITS 13 ///< log2 of max block size
109 #define WMAPRO_BLOCK_MIN_SIZE (1 << WMAPRO_BLOCK_MIN_BITS) ///< minimum block size
110 #define WMAPRO_BLOCK_MAX_SIZE (1 << WMAPRO_BLOCK_MAX_BITS) ///< maximum block size
111 #define WMAPRO_BLOCK_SIZES (WMAPRO_BLOCK_MAX_BITS - WMAPRO_BLOCK_MIN_BITS + 1) ///< possible block sizes
115 #define SCALEVLCBITS 8
116 #define VEC4MAXDEPTH ((HUFF_VEC4_MAXBITS+VLCBITS-1)/VLCBITS)
117 #define VEC2MAXDEPTH ((HUFF_VEC2_MAXBITS+VLCBITS-1)/VLCBITS)
118 #define VEC1MAXDEPTH ((HUFF_VEC1_MAXBITS+VLCBITS-1)/VLCBITS)
119 #define SCALEMAXDEPTH ((HUFF_SCALE_MAXBITS+SCALEVLCBITS-1)/SCALEVLCBITS)
120 #define SCALERLMAXDEPTH ((HUFF_SCALE_RL_MAXBITS+VLCBITS-1)/VLCBITS)
122 static VLC sf_vlc; ///< scale factor DPCM vlc
123 static VLC sf_rl_vlc; ///< scale factor run length vlc
124 static VLC vec4_vlc; ///< 4 coefficients per symbol
125 static VLC vec2_vlc; ///< 2 coefficients per symbol
126 static VLC vec1_vlc; ///< 1 coefficient per symbol
127 static VLC coef_vlc[2]; ///< coefficient run length vlc codes
128 static float sin64[33]; ///< sinus table for decorrelation
131 * @brief frame specific decoder context for a single channel
134 int16_t prev_block_len; ///< length of the previous block
135 uint8_t transmit_coefs;
136 uint8_t num_subframes;
137 uint16_t subframe_len[MAX_SUBFRAMES]; ///< subframe length in samples
138 uint16_t subframe_offset[MAX_SUBFRAMES]; ///< subframe positions in the current frame
139 uint8_t cur_subframe; ///< current subframe number
140 uint16_t decoded_samples; ///< number of already processed samples
141 uint8_t grouped; ///< channel is part of a group
142 int quant_step; ///< quantization step for the current subframe
143 int8_t reuse_sf; ///< share scale factors between subframes
144 int8_t scale_factor_step; ///< scaling step for the current subframe
145 int max_scale_factor; ///< maximum scale factor for the current subframe
146 int saved_scale_factors[2][MAX_BANDS]; ///< resampled and (previously) transmitted scale factor values
147 int8_t scale_factor_idx; ///< index for the transmitted scale factor values (used for resampling)
148 int* scale_factors; ///< pointer to the scale factor values used for decoding
149 uint8_t table_idx; ///< index in sf_offsets for the scale factor reference block
150 float* coeffs; ///< pointer to the subframe decode buffer
151 uint16_t num_vec_coeffs; ///< number of vector coded coefficients
152 DECLARE_ALIGNED(32, float, out)[WMAPRO_BLOCK_MAX_SIZE + WMAPRO_BLOCK_MAX_SIZE / 2]; ///< output buffer
156 * @brief channel group for channel transformations
159 uint8_t num_channels; ///< number of channels in the group
160 int8_t transform; ///< transform on / off
161 int8_t transform_band[MAX_BANDS]; ///< controls if the transform is enabled for a certain band
162 float decorrelation_matrix[WMAPRO_MAX_CHANNELS*WMAPRO_MAX_CHANNELS];
163 float* channel_data[WMAPRO_MAX_CHANNELS]; ///< transformation coefficients
167 * @brief main decoder context
169 typedef struct WMAProDecodeCtx {
170 /* generic decoder variables */
171 AVCodecContext* avctx; ///< codec context for av_log
172 AVFloatDSPContext fdsp;
173 uint8_t frame_data[MAX_FRAMESIZE +
174 FF_INPUT_BUFFER_PADDING_SIZE];///< compressed frame data
175 PutBitContext pb; ///< context for filling the frame_data buffer
176 FFTContext mdct_ctx[WMAPRO_BLOCK_SIZES]; ///< MDCT context per block size
177 DECLARE_ALIGNED(32, float, tmp)[WMAPRO_BLOCK_MAX_SIZE]; ///< IMDCT output buffer
178 float* windows[WMAPRO_BLOCK_SIZES]; ///< windows for the different block sizes
180 /* frame size dependent frame information (set during initialization) */
181 uint32_t decode_flags; ///< used compression features
182 uint8_t len_prefix; ///< frame is prefixed with its length
183 uint8_t dynamic_range_compression; ///< frame contains DRC data
184 uint8_t bits_per_sample; ///< integer audio sample size for the unscaled IMDCT output (used to scale to [-1.0, 1.0])
185 uint16_t samples_per_frame; ///< number of samples to output
186 uint16_t log2_frame_size;
187 int8_t lfe_channel; ///< lfe channel index
188 uint8_t max_num_subframes;
189 uint8_t subframe_len_bits; ///< number of bits used for the subframe length
190 uint8_t max_subframe_len_bit; ///< flag indicating that the subframe is of maximum size when the first subframe length bit is 1
191 uint16_t min_samples_per_subframe;
192 int8_t num_sfb[WMAPRO_BLOCK_SIZES]; ///< scale factor bands per block size
193 int16_t sfb_offsets[WMAPRO_BLOCK_SIZES][MAX_BANDS]; ///< scale factor band offsets (multiples of 4)
194 int8_t sf_offsets[WMAPRO_BLOCK_SIZES][WMAPRO_BLOCK_SIZES][MAX_BANDS]; ///< scale factor resample matrix
195 int16_t subwoofer_cutoffs[WMAPRO_BLOCK_SIZES]; ///< subwoofer cutoff values
197 /* packet decode state */
198 GetBitContext pgb; ///< bitstream reader context for the packet
199 int next_packet_start; ///< start offset of the next wma packet in the demuxer packet
200 uint8_t packet_offset; ///< frame offset in the packet
201 uint8_t packet_sequence_number; ///< current packet number
202 int num_saved_bits; ///< saved number of bits
203 int frame_offset; ///< frame offset in the bit reservoir
204 int subframe_offset; ///< subframe offset in the bit reservoir
205 uint8_t packet_loss; ///< set in case of bitstream error
206 uint8_t packet_done; ///< set when a packet is fully decoded
208 /* frame decode state */
209 uint32_t frame_num; ///< current frame number (not used for decoding)
210 GetBitContext gb; ///< bitstream reader context
211 int buf_bit_size; ///< buffer size in bits
212 uint8_t drc_gain; ///< gain for the DRC tool
213 int8_t skip_frame; ///< skip output step
214 int8_t parsed_all_subframes; ///< all subframes decoded?
216 /* subframe/block decode state */
217 int16_t subframe_len; ///< current subframe length
218 int8_t channels_for_cur_subframe; ///< number of channels that contain the subframe
219 int8_t channel_indexes_for_cur_subframe[WMAPRO_MAX_CHANNELS];
220 int8_t num_bands; ///< number of scale factor bands
221 int8_t transmit_num_vec_coeffs; ///< number of vector coded coefficients is part of the bitstream
222 int16_t* cur_sfb_offsets; ///< sfb offsets for the current block
223 uint8_t table_idx; ///< index for the num_sfb, sfb_offsets, sf_offsets and subwoofer_cutoffs tables
224 int8_t esc_len; ///< length of escaped coefficients
226 uint8_t num_chgroups; ///< number of channel groups
227 WMAProChannelGrp chgroup[WMAPRO_MAX_CHANNELS]; ///< channel group information
229 WMAProChannelCtx channel[WMAPRO_MAX_CHANNELS]; ///< per channel data
234 *@brief helper function to print the most important members of the context
237 static av_cold void dump_context(WMAProDecodeCtx *s)
239 #define PRINT(a, b) av_log(s->avctx, AV_LOG_DEBUG, " %s = %d\n", a, b);
240 #define PRINT_HEX(a, b) av_log(s->avctx, AV_LOG_DEBUG, " %s = %x\n", a, b);
242 PRINT("ed sample bit depth", s->bits_per_sample);
243 PRINT_HEX("ed decode flags", s->decode_flags);
244 PRINT("samples per frame", s->samples_per_frame);
245 PRINT("log2 frame size", s->log2_frame_size);
246 PRINT("max num subframes", s->max_num_subframes);
247 PRINT("len prefix", s->len_prefix);
248 PRINT("num channels", s->avctx->channels);
252 *@brief Uninitialize the decoder and free all resources.
253 *@param avctx codec context
254 *@return 0 on success, < 0 otherwise
256 static av_cold int decode_end(AVCodecContext *avctx)
258 WMAProDecodeCtx *s = avctx->priv_data;
261 for (i = 0; i < WMAPRO_BLOCK_SIZES; i++)
262 ff_mdct_end(&s->mdct_ctx[i]);
268 *@brief Initialize the decoder.
269 *@param avctx codec context
270 *@return 0 on success, -1 otherwise
272 static av_cold int decode_init(AVCodecContext *avctx)
274 WMAProDecodeCtx *s = avctx->priv_data;
275 uint8_t *edata_ptr = avctx->extradata;
276 unsigned int channel_mask;
278 int log2_max_num_subframes;
279 int num_possible_block_sizes;
281 if (!avctx->block_align) {
282 av_log(avctx, AV_LOG_ERROR, "block_align is not set\n");
283 return AVERROR(EINVAL);
287 avpriv_float_dsp_init(&s->fdsp, avctx->flags & CODEC_FLAG_BITEXACT);
289 init_put_bits(&s->pb, s->frame_data, MAX_FRAMESIZE);
291 avctx->sample_fmt = AV_SAMPLE_FMT_FLTP;
293 if (avctx->extradata_size >= 18) {
294 s->decode_flags = AV_RL16(edata_ptr+14);
295 channel_mask = AV_RL32(edata_ptr+2);
296 s->bits_per_sample = AV_RL16(edata_ptr);
297 /** dump the extradata */
298 for (i = 0; i < avctx->extradata_size; i++)
299 av_dlog(avctx, "[%x] ", avctx->extradata[i]);
300 av_dlog(avctx, "\n");
303 avpriv_request_sample(avctx, "Unknown extradata size");
304 return AVERROR_PATCHWELCOME;
308 s->log2_frame_size = av_log2(avctx->block_align) + 4;
311 s->skip_frame = 1; /* skip first frame */
313 s->len_prefix = (s->decode_flags & 0x40);
316 bits = ff_wma_get_frame_len_bits(avctx->sample_rate, 3, s->decode_flags);
317 if (bits > WMAPRO_BLOCK_MAX_BITS) {
318 avpriv_request_sample(avctx, "14-bit block sizes");
319 return AVERROR_PATCHWELCOME;
321 s->samples_per_frame = 1 << bits;
324 log2_max_num_subframes = ((s->decode_flags & 0x38) >> 3);
325 s->max_num_subframes = 1 << log2_max_num_subframes;
326 if (s->max_num_subframes == 16 || s->max_num_subframes == 4)
327 s->max_subframe_len_bit = 1;
328 s->subframe_len_bits = av_log2(log2_max_num_subframes) + 1;
330 num_possible_block_sizes = log2_max_num_subframes + 1;
331 s->min_samples_per_subframe = s->samples_per_frame / s->max_num_subframes;
332 s->dynamic_range_compression = (s->decode_flags & 0x80);
334 if (s->max_num_subframes > MAX_SUBFRAMES) {
335 av_log(avctx, AV_LOG_ERROR, "invalid number of subframes %i\n",
336 s->max_num_subframes);
337 return AVERROR_INVALIDDATA;
340 if (s->min_samples_per_subframe < WMAPRO_BLOCK_MIN_SIZE) {
341 av_log(avctx, AV_LOG_ERROR, "min_samples_per_subframe of %d too small\n",
342 s->min_samples_per_subframe);
343 return AVERROR_INVALIDDATA;
346 if (s->avctx->sample_rate <= 0) {
347 av_log(avctx, AV_LOG_ERROR, "invalid sample rate\n");
348 return AVERROR_INVALIDDATA;
351 if (avctx->channels < 0) {
352 av_log(avctx, AV_LOG_ERROR, "invalid number of channels %d\n",
354 return AVERROR_INVALIDDATA;
355 } else if (avctx->channels > WMAPRO_MAX_CHANNELS) {
356 avpriv_request_sample(avctx,
357 "More than %d channels", WMAPRO_MAX_CHANNELS);
358 return AVERROR_PATCHWELCOME;
361 /** init previous block len */
362 for (i = 0; i < avctx->channels; i++)
363 s->channel[i].prev_block_len = s->samples_per_frame;
365 /** extract lfe channel position */
368 if (channel_mask & 8) {
370 for (mask = 1; mask < 16; mask <<= 1) {
371 if (channel_mask & mask)
376 INIT_VLC_STATIC(&sf_vlc, SCALEVLCBITS, HUFF_SCALE_SIZE,
377 scale_huffbits, 1, 1,
378 scale_huffcodes, 2, 2, 616);
380 INIT_VLC_STATIC(&sf_rl_vlc, VLCBITS, HUFF_SCALE_RL_SIZE,
381 scale_rl_huffbits, 1, 1,
382 scale_rl_huffcodes, 4, 4, 1406);
384 INIT_VLC_STATIC(&coef_vlc[0], VLCBITS, HUFF_COEF0_SIZE,
385 coef0_huffbits, 1, 1,
386 coef0_huffcodes, 4, 4, 2108);
388 INIT_VLC_STATIC(&coef_vlc[1], VLCBITS, HUFF_COEF1_SIZE,
389 coef1_huffbits, 1, 1,
390 coef1_huffcodes, 4, 4, 3912);
392 INIT_VLC_STATIC(&vec4_vlc, VLCBITS, HUFF_VEC4_SIZE,
394 vec4_huffcodes, 2, 2, 604);
396 INIT_VLC_STATIC(&vec2_vlc, VLCBITS, HUFF_VEC2_SIZE,
398 vec2_huffcodes, 2, 2, 562);
400 INIT_VLC_STATIC(&vec1_vlc, VLCBITS, HUFF_VEC1_SIZE,
402 vec1_huffcodes, 2, 2, 562);
404 /** calculate number of scale factor bands and their offsets
405 for every possible block size */
406 for (i = 0; i < num_possible_block_sizes; i++) {
407 int subframe_len = s->samples_per_frame >> i;
411 s->sfb_offsets[i][0] = 0;
413 for (x = 0; x < MAX_BANDS-1 && s->sfb_offsets[i][band - 1] < subframe_len; x++) {
414 int offset = (subframe_len * 2 * critical_freq[x])
415 / s->avctx->sample_rate + 2;
417 if (offset > s->sfb_offsets[i][band - 1])
418 s->sfb_offsets[i][band++] = offset;
420 s->sfb_offsets[i][band - 1] = subframe_len;
421 s->num_sfb[i] = band - 1;
422 if (s->num_sfb[i] <= 0) {
423 av_log(avctx, AV_LOG_ERROR, "num_sfb invalid\n");
424 return AVERROR_INVALIDDATA;
429 /** Scale factors can be shared between blocks of different size
430 as every block has a different scale factor band layout.
431 The matrix sf_offsets is needed to find the correct scale factor.
434 for (i = 0; i < num_possible_block_sizes; i++) {
436 for (b = 0; b < s->num_sfb[i]; b++) {
438 int offset = ((s->sfb_offsets[i][b]
439 + s->sfb_offsets[i][b + 1] - 1) << i) >> 1;
440 for (x = 0; x < num_possible_block_sizes; x++) {
442 while (s->sfb_offsets[x][v + 1] << x < offset) {
444 av_assert0(v < MAX_BANDS);
446 s->sf_offsets[i][x][b] = v;
451 /** init MDCT, FIXME: only init needed sizes */
452 for (i = 0; i < WMAPRO_BLOCK_SIZES; i++)
453 ff_mdct_init(&s->mdct_ctx[i], WMAPRO_BLOCK_MIN_BITS+1+i, 1,
454 1.0 / (1 << (WMAPRO_BLOCK_MIN_BITS + i - 1))
455 / (1 << (s->bits_per_sample - 1)));
457 /** init MDCT windows: simple sinus window */
458 for (i = 0; i < WMAPRO_BLOCK_SIZES; i++) {
459 const int win_idx = WMAPRO_BLOCK_MAX_BITS - i;
460 ff_init_ff_sine_windows(win_idx);
461 s->windows[WMAPRO_BLOCK_SIZES - i - 1] = ff_sine_windows[win_idx];
464 /** calculate subwoofer cutoff values */
465 for (i = 0; i < num_possible_block_sizes; i++) {
466 int block_size = s->samples_per_frame >> i;
467 int cutoff = (440*block_size + 3 * (s->avctx->sample_rate >> 1) - 1)
468 / s->avctx->sample_rate;
469 s->subwoofer_cutoffs[i] = av_clip(cutoff, 4, block_size);
472 /** calculate sine values for the decorrelation matrix */
473 for (i = 0; i < 33; i++)
474 sin64[i] = sin(i*M_PI / 64.0);
476 if (avctx->debug & FF_DEBUG_BITSTREAM)
479 avctx->channel_layout = channel_mask;
485 *@brief Decode the subframe length.
487 *@param offset sample offset in the frame
488 *@return decoded subframe length on success, < 0 in case of an error
490 static int decode_subframe_length(WMAProDecodeCtx *s, int offset)
492 int frame_len_shift = 0;
495 /** no need to read from the bitstream when only one length is possible */
496 if (offset == s->samples_per_frame - s->min_samples_per_subframe)
497 return s->min_samples_per_subframe;
499 /** 1 bit indicates if the subframe is of maximum length */
500 if (s->max_subframe_len_bit) {
501 if (get_bits1(&s->gb))
502 frame_len_shift = 1 + get_bits(&s->gb, s->subframe_len_bits-1);
504 frame_len_shift = get_bits(&s->gb, s->subframe_len_bits);
506 subframe_len = s->samples_per_frame >> frame_len_shift;
508 /** sanity check the length */
509 if (subframe_len < s->min_samples_per_subframe ||
510 subframe_len > s->samples_per_frame) {
511 av_log(s->avctx, AV_LOG_ERROR, "broken frame: subframe_len %i\n",
513 return AVERROR_INVALIDDATA;
519 *@brief Decode how the data in the frame is split into subframes.
520 * Every WMA frame contains the encoded data for a fixed number of
521 * samples per channel. The data for every channel might be split
522 * into several subframes. This function will reconstruct the list of
523 * subframes for every channel.
525 * If the subframes are not evenly split, the algorithm estimates the
526 * channels with the lowest number of total samples.
527 * Afterwards, for each of these channels a bit is read from the
528 * bitstream that indicates if the channel contains a subframe with the
529 * next subframe size that is going to be read from the bitstream or not.
530 * If a channel contains such a subframe, the subframe size gets added to
531 * the channel's subframe list.
532 * The algorithm repeats these steps until the frame is properly divided
533 * between the individual channels.
536 *@return 0 on success, < 0 in case of an error
538 static int decode_tilehdr(WMAProDecodeCtx *s)
540 uint16_t num_samples[WMAPRO_MAX_CHANNELS] = { 0 };/**< sum of samples for all currently known subframes of a channel */
541 uint8_t contains_subframe[WMAPRO_MAX_CHANNELS]; /**< flag indicating if a channel contains the current subframe */
542 int channels_for_cur_subframe = s->avctx->channels; /**< number of channels that contain the current subframe */
543 int fixed_channel_layout = 0; /**< flag indicating that all channels use the same subframe offsets and sizes */
544 int min_channel_len = 0; /**< smallest sum of samples (channels with this length will be processed first) */
547 /* Should never consume more than 3073 bits (256 iterations for the
548 * while loop when always the minimum amount of 128 samples is subtracted
549 * from missing samples in the 8 channel case).
550 * 1 + BLOCK_MAX_SIZE * MAX_CHANNELS / BLOCK_MIN_SIZE * (MAX_CHANNELS + 4)
553 /** reset tiling information */
554 for (c = 0; c < s->avctx->channels; c++)
555 s->channel[c].num_subframes = 0;
557 if (s->max_num_subframes == 1 || get_bits1(&s->gb))
558 fixed_channel_layout = 1;
560 /** loop until the frame data is split between the subframes */
564 /** check which channels contain the subframe */
565 for (c = 0; c < s->avctx->channels; c++) {
566 if (num_samples[c] == min_channel_len) {
567 if (fixed_channel_layout || channels_for_cur_subframe == 1 ||
568 (min_channel_len == s->samples_per_frame - s->min_samples_per_subframe))
569 contains_subframe[c] = 1;
571 contains_subframe[c] = get_bits1(&s->gb);
573 contains_subframe[c] = 0;
576 /** get subframe length, subframe_len == 0 is not allowed */
577 if ((subframe_len = decode_subframe_length(s, min_channel_len)) <= 0)
578 return AVERROR_INVALIDDATA;
580 /** add subframes to the individual channels and find new min_channel_len */
581 min_channel_len += subframe_len;
582 for (c = 0; c < s->avctx->channels; c++) {
583 WMAProChannelCtx* chan = &s->channel[c];
585 if (contains_subframe[c]) {
586 if (chan->num_subframes >= MAX_SUBFRAMES) {
587 av_log(s->avctx, AV_LOG_ERROR,
588 "broken frame: num subframes > 31\n");
589 return AVERROR_INVALIDDATA;
591 chan->subframe_len[chan->num_subframes] = subframe_len;
592 num_samples[c] += subframe_len;
593 ++chan->num_subframes;
594 if (num_samples[c] > s->samples_per_frame) {
595 av_log(s->avctx, AV_LOG_ERROR, "broken frame: "
596 "channel len > samples_per_frame\n");
597 return AVERROR_INVALIDDATA;
599 } else if (num_samples[c] <= min_channel_len) {
600 if (num_samples[c] < min_channel_len) {
601 channels_for_cur_subframe = 0;
602 min_channel_len = num_samples[c];
604 ++channels_for_cur_subframe;
607 } while (min_channel_len < s->samples_per_frame);
609 for (c = 0; c < s->avctx->channels; c++) {
612 for (i = 0; i < s->channel[c].num_subframes; i++) {
613 av_dlog(s->avctx, "frame[%i] channel[%i] subframe[%i]"
614 " len %i\n", s->frame_num, c, i,
615 s->channel[c].subframe_len[i]);
616 s->channel[c].subframe_offset[i] = offset;
617 offset += s->channel[c].subframe_len[i];
625 *@brief Calculate a decorrelation matrix from the bitstream parameters.
626 *@param s codec context
627 *@param chgroup channel group for which the matrix needs to be calculated
629 static void decode_decorrelation_matrix(WMAProDecodeCtx *s,
630 WMAProChannelGrp *chgroup)
634 int8_t rotation_offset[WMAPRO_MAX_CHANNELS * WMAPRO_MAX_CHANNELS];
635 memset(chgroup->decorrelation_matrix, 0, s->avctx->channels *
636 s->avctx->channels * sizeof(*chgroup->decorrelation_matrix));
638 for (i = 0; i < chgroup->num_channels * (chgroup->num_channels - 1) >> 1; i++)
639 rotation_offset[i] = get_bits(&s->gb, 6);
641 for (i = 0; i < chgroup->num_channels; i++)
642 chgroup->decorrelation_matrix[chgroup->num_channels * i + i] =
643 get_bits1(&s->gb) ? 1.0 : -1.0;
645 for (i = 1; i < chgroup->num_channels; i++) {
647 for (x = 0; x < i; x++) {
649 for (y = 0; y < i + 1; y++) {
650 float v1 = chgroup->decorrelation_matrix[x * chgroup->num_channels + y];
651 float v2 = chgroup->decorrelation_matrix[i * chgroup->num_channels + y];
652 int n = rotation_offset[offset + x];
658 cosv = sin64[32 - n];
660 sinv = sin64[64 - n];
661 cosv = -sin64[n - 32];
664 chgroup->decorrelation_matrix[y + x * chgroup->num_channels] =
665 (v1 * sinv) - (v2 * cosv);
666 chgroup->decorrelation_matrix[y + i * chgroup->num_channels] =
667 (v1 * cosv) + (v2 * sinv);
675 *@brief Decode channel transformation parameters
676 *@param s codec context
677 *@return 0 in case of success, < 0 in case of bitstream errors
679 static int decode_channel_transform(WMAProDecodeCtx* s)
682 /* should never consume more than 1921 bits for the 8 channel case
683 * 1 + MAX_CHANNELS * (MAX_CHANNELS + 2 + 3 * MAX_CHANNELS * MAX_CHANNELS
684 * + MAX_CHANNELS + MAX_BANDS + 1)
687 /** in the one channel case channel transforms are pointless */
689 if (s->avctx->channels > 1) {
690 int remaining_channels = s->channels_for_cur_subframe;
692 if (get_bits1(&s->gb)) {
693 avpriv_request_sample(s->avctx,
694 "Channel transform bit");
695 return AVERROR_PATCHWELCOME;
698 for (s->num_chgroups = 0; remaining_channels &&
699 s->num_chgroups < s->channels_for_cur_subframe; s->num_chgroups++) {
700 WMAProChannelGrp* chgroup = &s->chgroup[s->num_chgroups];
701 float** channel_data = chgroup->channel_data;
702 chgroup->num_channels = 0;
703 chgroup->transform = 0;
705 /** decode channel mask */
706 if (remaining_channels > 2) {
707 for (i = 0; i < s->channels_for_cur_subframe; i++) {
708 int channel_idx = s->channel_indexes_for_cur_subframe[i];
709 if (!s->channel[channel_idx].grouped
710 && get_bits1(&s->gb)) {
711 ++chgroup->num_channels;
712 s->channel[channel_idx].grouped = 1;
713 *channel_data++ = s->channel[channel_idx].coeffs;
717 chgroup->num_channels = remaining_channels;
718 for (i = 0; i < s->channels_for_cur_subframe; i++) {
719 int channel_idx = s->channel_indexes_for_cur_subframe[i];
720 if (!s->channel[channel_idx].grouped)
721 *channel_data++ = s->channel[channel_idx].coeffs;
722 s->channel[channel_idx].grouped = 1;
726 /** decode transform type */
727 if (chgroup->num_channels == 2) {
728 if (get_bits1(&s->gb)) {
729 if (get_bits1(&s->gb)) {
730 avpriv_request_sample(s->avctx,
731 "Unknown channel transform type");
732 return AVERROR_PATCHWELCOME;
735 chgroup->transform = 1;
736 if (s->avctx->channels == 2) {
737 chgroup->decorrelation_matrix[0] = 1.0;
738 chgroup->decorrelation_matrix[1] = -1.0;
739 chgroup->decorrelation_matrix[2] = 1.0;
740 chgroup->decorrelation_matrix[3] = 1.0;
743 chgroup->decorrelation_matrix[0] = 0.70703125;
744 chgroup->decorrelation_matrix[1] = -0.70703125;
745 chgroup->decorrelation_matrix[2] = 0.70703125;
746 chgroup->decorrelation_matrix[3] = 0.70703125;
749 } else if (chgroup->num_channels > 2) {
750 if (get_bits1(&s->gb)) {
751 chgroup->transform = 1;
752 if (get_bits1(&s->gb)) {
753 decode_decorrelation_matrix(s, chgroup);
755 /** FIXME: more than 6 coupled channels not supported */
756 if (chgroup->num_channels > 6) {
757 avpriv_request_sample(s->avctx,
758 "Coupled channels > 6");
760 memcpy(chgroup->decorrelation_matrix,
761 default_decorrelation[chgroup->num_channels],
762 chgroup->num_channels * chgroup->num_channels *
763 sizeof(*chgroup->decorrelation_matrix));
769 /** decode transform on / off */
770 if (chgroup->transform) {
771 if (!get_bits1(&s->gb)) {
773 /** transform can be enabled for individual bands */
774 for (i = 0; i < s->num_bands; i++) {
775 chgroup->transform_band[i] = get_bits1(&s->gb);
778 memset(chgroup->transform_band, 1, s->num_bands);
781 remaining_channels -= chgroup->num_channels;
788 *@brief Extract the coefficients from the bitstream.
789 *@param s codec context
790 *@param c current channel number
791 *@return 0 on success, < 0 in case of bitstream errors
793 static int decode_coeffs(WMAProDecodeCtx *s, int c)
795 /* Integers 0..15 as single-precision floats. The table saves a
796 costly int to float conversion, and storing the values as
797 integers allows fast sign-flipping. */
798 static const uint32_t fval_tab[16] = {
799 0x00000000, 0x3f800000, 0x40000000, 0x40400000,
800 0x40800000, 0x40a00000, 0x40c00000, 0x40e00000,
801 0x41000000, 0x41100000, 0x41200000, 0x41300000,
802 0x41400000, 0x41500000, 0x41600000, 0x41700000,
806 WMAProChannelCtx* ci = &s->channel[c];
813 av_dlog(s->avctx, "decode coefficients for channel %i\n", c);
815 vlctable = get_bits1(&s->gb);
816 vlc = &coef_vlc[vlctable];
826 /** decode vector coefficients (consumes up to 167 bits per iteration for
827 4 vector coded large values) */
828 while ((s->transmit_num_vec_coeffs || !rl_mode) &&
829 (cur_coeff + 3 < ci->num_vec_coeffs)) {
834 idx = get_vlc2(&s->gb, vec4_vlc.table, VLCBITS, VEC4MAXDEPTH);
836 if (idx == HUFF_VEC4_SIZE - 1) {
837 for (i = 0; i < 4; i += 2) {
838 idx = get_vlc2(&s->gb, vec2_vlc.table, VLCBITS, VEC2MAXDEPTH);
839 if (idx == HUFF_VEC2_SIZE - 1) {
841 v0 = get_vlc2(&s->gb, vec1_vlc.table, VLCBITS, VEC1MAXDEPTH);
842 if (v0 == HUFF_VEC1_SIZE - 1)
843 v0 += ff_wma_get_large_val(&s->gb);
844 v1 = get_vlc2(&s->gb, vec1_vlc.table, VLCBITS, VEC1MAXDEPTH);
845 if (v1 == HUFF_VEC1_SIZE - 1)
846 v1 += ff_wma_get_large_val(&s->gb);
847 vals[i ] = av_float2int(v0);
848 vals[i+1] = av_float2int(v1);
850 vals[i] = fval_tab[symbol_to_vec2[idx] >> 4 ];
851 vals[i+1] = fval_tab[symbol_to_vec2[idx] & 0xF];
855 vals[0] = fval_tab[ symbol_to_vec4[idx] >> 12 ];
856 vals[1] = fval_tab[(symbol_to_vec4[idx] >> 8) & 0xF];
857 vals[2] = fval_tab[(symbol_to_vec4[idx] >> 4) & 0xF];
858 vals[3] = fval_tab[ symbol_to_vec4[idx] & 0xF];
862 for (i = 0; i < 4; i++) {
864 uint32_t sign = get_bits1(&s->gb) - 1;
865 AV_WN32A(&ci->coeffs[cur_coeff], vals[i] ^ sign << 31);
868 ci->coeffs[cur_coeff] = 0;
869 /** switch to run level mode when subframe_len / 128 zeros
870 were found in a row */
871 rl_mode |= (++num_zeros > s->subframe_len >> 8);
877 /** decode run level coded coefficients */
878 if (cur_coeff < s->subframe_len) {
879 memset(&ci->coeffs[cur_coeff], 0,
880 sizeof(*ci->coeffs) * (s->subframe_len - cur_coeff));
881 if (ff_wma_run_level_decode(s->avctx, &s->gb, vlc,
882 level, run, 1, ci->coeffs,
883 cur_coeff, s->subframe_len,
884 s->subframe_len, s->esc_len, 0))
885 return AVERROR_INVALIDDATA;
892 *@brief Extract scale factors from the bitstream.
893 *@param s codec context
894 *@return 0 on success, < 0 in case of bitstream errors
896 static int decode_scale_factors(WMAProDecodeCtx* s)
900 /** should never consume more than 5344 bits
901 * MAX_CHANNELS * (1 + MAX_BANDS * 23)
904 for (i = 0; i < s->channels_for_cur_subframe; i++) {
905 int c = s->channel_indexes_for_cur_subframe[i];
908 s->channel[c].scale_factors = s->channel[c].saved_scale_factors[!s->channel[c].scale_factor_idx];
909 sf_end = s->channel[c].scale_factors + s->num_bands;
911 /** resample scale factors for the new block size
912 * as the scale factors might need to be resampled several times
913 * before some new values are transmitted, a backup of the last
914 * transmitted scale factors is kept in saved_scale_factors
916 if (s->channel[c].reuse_sf) {
917 const int8_t* sf_offsets = s->sf_offsets[s->table_idx][s->channel[c].table_idx];
919 for (b = 0; b < s->num_bands; b++)
920 s->channel[c].scale_factors[b] =
921 s->channel[c].saved_scale_factors[s->channel[c].scale_factor_idx][*sf_offsets++];
924 if (!s->channel[c].cur_subframe || get_bits1(&s->gb)) {
926 if (!s->channel[c].reuse_sf) {
928 /** decode DPCM coded scale factors */
929 s->channel[c].scale_factor_step = get_bits(&s->gb, 2) + 1;
930 val = 45 / s->channel[c].scale_factor_step;
931 for (sf = s->channel[c].scale_factors; sf < sf_end; sf++) {
932 val += get_vlc2(&s->gb, sf_vlc.table, SCALEVLCBITS, SCALEMAXDEPTH) - 60;
937 /** run level decode differences to the resampled factors */
938 for (i = 0; i < s->num_bands; i++) {
944 idx = get_vlc2(&s->gb, sf_rl_vlc.table, VLCBITS, SCALERLMAXDEPTH);
947 uint32_t code = get_bits(&s->gb, 14);
949 sign = (code & 1) - 1;
950 skip = (code & 0x3f) >> 1;
951 } else if (idx == 1) {
954 skip = scale_rl_run[idx];
955 val = scale_rl_level[idx];
956 sign = get_bits1(&s->gb)-1;
960 if (i >= s->num_bands) {
961 av_log(s->avctx, AV_LOG_ERROR,
962 "invalid scale factor coding\n");
963 return AVERROR_INVALIDDATA;
965 s->channel[c].scale_factors[i] += (val ^ sign) - sign;
969 s->channel[c].scale_factor_idx = !s->channel[c].scale_factor_idx;
970 s->channel[c].table_idx = s->table_idx;
971 s->channel[c].reuse_sf = 1;
974 /** calculate new scale factor maximum */
975 s->channel[c].max_scale_factor = s->channel[c].scale_factors[0];
976 for (sf = s->channel[c].scale_factors + 1; sf < sf_end; sf++) {
977 s->channel[c].max_scale_factor =
978 FFMAX(s->channel[c].max_scale_factor, *sf);
986 *@brief Reconstruct the individual channel data.
987 *@param s codec context
989 static void inverse_channel_transform(WMAProDecodeCtx *s)
993 for (i = 0; i < s->num_chgroups; i++) {
994 if (s->chgroup[i].transform) {
995 float data[WMAPRO_MAX_CHANNELS];
996 const int num_channels = s->chgroup[i].num_channels;
997 float** ch_data = s->chgroup[i].channel_data;
998 float** ch_end = ch_data + num_channels;
999 const int8_t* tb = s->chgroup[i].transform_band;
1002 /** multichannel decorrelation */
1003 for (sfb = s->cur_sfb_offsets;
1004 sfb < s->cur_sfb_offsets + s->num_bands; sfb++) {
1007 /** multiply values with the decorrelation_matrix */
1008 for (y = sfb[0]; y < FFMIN(sfb[1], s->subframe_len); y++) {
1009 const float* mat = s->chgroup[i].decorrelation_matrix;
1010 const float* data_end = data + num_channels;
1011 float* data_ptr = data;
1014 for (ch = ch_data; ch < ch_end; ch++)
1015 *data_ptr++ = (*ch)[y];
1017 for (ch = ch_data; ch < ch_end; ch++) {
1020 while (data_ptr < data_end)
1021 sum += *data_ptr++ * *mat++;
1026 } else if (s->avctx->channels == 2) {
1027 int len = FFMIN(sfb[1], s->subframe_len) - sfb[0];
1028 s->fdsp.vector_fmul_scalar(ch_data[0] + sfb[0],
1029 ch_data[0] + sfb[0],
1031 s->fdsp.vector_fmul_scalar(ch_data[1] + sfb[0],
1032 ch_data[1] + sfb[0],
1041 *@brief Apply sine window and reconstruct the output buffer.
1042 *@param s codec context
1044 static void wmapro_window(WMAProDecodeCtx *s)
1047 for (i = 0; i < s->channels_for_cur_subframe; i++) {
1048 int c = s->channel_indexes_for_cur_subframe[i];
1050 int winlen = s->channel[c].prev_block_len;
1051 float* start = s->channel[c].coeffs - (winlen >> 1);
1053 if (s->subframe_len < winlen) {
1054 start += (winlen - s->subframe_len) >> 1;
1055 winlen = s->subframe_len;
1058 window = s->windows[av_log2(winlen) - WMAPRO_BLOCK_MIN_BITS];
1062 s->fdsp.vector_fmul_window(start, start, start + winlen,
1065 s->channel[c].prev_block_len = s->subframe_len;
1070 *@brief Decode a single subframe (block).
1071 *@param s codec context
1072 *@return 0 on success, < 0 when decoding failed
1074 static int decode_subframe(WMAProDecodeCtx *s)
1076 int offset = s->samples_per_frame;
1077 int subframe_len = s->samples_per_frame;
1079 int total_samples = s->samples_per_frame * s->avctx->channels;
1080 int transmit_coeffs = 0;
1081 int cur_subwoofer_cutoff;
1083 s->subframe_offset = get_bits_count(&s->gb);
1085 /** reset channel context and find the next block offset and size
1086 == the next block of the channel with the smallest number of
1089 for (i = 0; i < s->avctx->channels; i++) {
1090 s->channel[i].grouped = 0;
1091 if (offset > s->channel[i].decoded_samples) {
1092 offset = s->channel[i].decoded_samples;
1094 s->channel[i].subframe_len[s->channel[i].cur_subframe];
1099 "processing subframe with offset %i len %i\n", offset, subframe_len);
1101 /** get a list of all channels that contain the estimated block */
1102 s->channels_for_cur_subframe = 0;
1103 for (i = 0; i < s->avctx->channels; i++) {
1104 const int cur_subframe = s->channel[i].cur_subframe;
1105 /** subtract already processed samples */
1106 total_samples -= s->channel[i].decoded_samples;
1108 /** and count if there are multiple subframes that match our profile */
1109 if (offset == s->channel[i].decoded_samples &&
1110 subframe_len == s->channel[i].subframe_len[cur_subframe]) {
1111 total_samples -= s->channel[i].subframe_len[cur_subframe];
1112 s->channel[i].decoded_samples +=
1113 s->channel[i].subframe_len[cur_subframe];
1114 s->channel_indexes_for_cur_subframe[s->channels_for_cur_subframe] = i;
1115 ++s->channels_for_cur_subframe;
1119 /** check if the frame will be complete after processing the
1122 s->parsed_all_subframes = 1;
1125 av_dlog(s->avctx, "subframe is part of %i channels\n",
1126 s->channels_for_cur_subframe);
1128 /** calculate number of scale factor bands and their offsets */
1129 s->table_idx = av_log2(s->samples_per_frame/subframe_len);
1130 s->num_bands = s->num_sfb[s->table_idx];
1131 s->cur_sfb_offsets = s->sfb_offsets[s->table_idx];
1132 cur_subwoofer_cutoff = s->subwoofer_cutoffs[s->table_idx];
1134 /** configure the decoder for the current subframe */
1135 offset += s->samples_per_frame >> 1;
1137 for (i = 0; i < s->channels_for_cur_subframe; i++) {
1138 int c = s->channel_indexes_for_cur_subframe[i];
1140 s->channel[c].coeffs = &s->channel[c].out[offset];
1143 s->subframe_len = subframe_len;
1144 s->esc_len = av_log2(s->subframe_len - 1) + 1;
1146 /** skip extended header if any */
1147 if (get_bits1(&s->gb)) {
1149 if (!(num_fill_bits = get_bits(&s->gb, 2))) {
1150 int len = get_bits(&s->gb, 4);
1151 num_fill_bits = (len ? get_bits(&s->gb, len) : 0) + 1;
1154 if (num_fill_bits >= 0) {
1155 if (get_bits_count(&s->gb) + num_fill_bits > s->num_saved_bits) {
1156 av_log(s->avctx, AV_LOG_ERROR, "invalid number of fill bits\n");
1157 return AVERROR_INVALIDDATA;
1160 skip_bits_long(&s->gb, num_fill_bits);
1164 /** no idea for what the following bit is used */
1165 if (get_bits1(&s->gb)) {
1166 avpriv_request_sample(s->avctx, "Reserved bit");
1167 return AVERROR_PATCHWELCOME;
1171 if (decode_channel_transform(s) < 0)
1172 return AVERROR_INVALIDDATA;
1175 for (i = 0; i < s->channels_for_cur_subframe; i++) {
1176 int c = s->channel_indexes_for_cur_subframe[i];
1177 if ((s->channel[c].transmit_coefs = get_bits1(&s->gb)))
1178 transmit_coeffs = 1;
1181 av_assert0(s->subframe_len <= WMAPRO_BLOCK_MAX_SIZE);
1182 if (transmit_coeffs) {
1184 int quant_step = 90 * s->bits_per_sample >> 4;
1186 /** decode number of vector coded coefficients */
1187 if ((s->transmit_num_vec_coeffs = get_bits1(&s->gb))) {
1188 int num_bits = av_log2((s->subframe_len + 3)/4) + 1;
1189 for (i = 0; i < s->channels_for_cur_subframe; i++) {
1190 int c = s->channel_indexes_for_cur_subframe[i];
1191 int num_vec_coeffs = get_bits(&s->gb, num_bits) << 2;
1192 if (num_vec_coeffs > s->subframe_len) {
1193 av_log(s->avctx, AV_LOG_ERROR, "num_vec_coeffs %d is too large\n", num_vec_coeffs);
1194 return AVERROR_INVALIDDATA;
1196 av_assert0(num_vec_coeffs + offset <= FF_ARRAY_ELEMS(s->channel[c].out));
1197 s->channel[c].num_vec_coeffs = num_vec_coeffs;
1200 for (i = 0; i < s->channels_for_cur_subframe; i++) {
1201 int c = s->channel_indexes_for_cur_subframe[i];
1202 s->channel[c].num_vec_coeffs = s->subframe_len;
1205 /** decode quantization step */
1206 step = get_sbits(&s->gb, 6);
1208 if (step == -32 || step == 31) {
1209 const int sign = (step == 31) - 1;
1211 while (get_bits_count(&s->gb) + 5 < s->num_saved_bits &&
1212 (step = get_bits(&s->gb, 5)) == 31) {
1215 quant_step += ((quant + step) ^ sign) - sign;
1217 if (quant_step < 0) {
1218 av_log(s->avctx, AV_LOG_DEBUG, "negative quant step\n");
1221 /** decode quantization step modifiers for every channel */
1223 if (s->channels_for_cur_subframe == 1) {
1224 s->channel[s->channel_indexes_for_cur_subframe[0]].quant_step = quant_step;
1226 int modifier_len = get_bits(&s->gb, 3);
1227 for (i = 0; i < s->channels_for_cur_subframe; i++) {
1228 int c = s->channel_indexes_for_cur_subframe[i];
1229 s->channel[c].quant_step = quant_step;
1230 if (get_bits1(&s->gb)) {
1232 s->channel[c].quant_step += get_bits(&s->gb, modifier_len) + 1;
1234 ++s->channel[c].quant_step;
1239 /** decode scale factors */
1240 if (decode_scale_factors(s) < 0)
1241 return AVERROR_INVALIDDATA;
1244 av_dlog(s->avctx, "BITSTREAM: subframe header length was %i\n",
1245 get_bits_count(&s->gb) - s->subframe_offset);
1247 /** parse coefficients */
1248 for (i = 0; i < s->channels_for_cur_subframe; i++) {
1249 int c = s->channel_indexes_for_cur_subframe[i];
1250 if (s->channel[c].transmit_coefs &&
1251 get_bits_count(&s->gb) < s->num_saved_bits) {
1252 decode_coeffs(s, c);
1254 memset(s->channel[c].coeffs, 0,
1255 sizeof(*s->channel[c].coeffs) * subframe_len);
1258 av_dlog(s->avctx, "BITSTREAM: subframe length was %i\n",
1259 get_bits_count(&s->gb) - s->subframe_offset);
1261 if (transmit_coeffs) {
1262 FFTContext *mdct = &s->mdct_ctx[av_log2(subframe_len) - WMAPRO_BLOCK_MIN_BITS];
1263 /** reconstruct the per channel data */
1264 inverse_channel_transform(s);
1265 for (i = 0; i < s->channels_for_cur_subframe; i++) {
1266 int c = s->channel_indexes_for_cur_subframe[i];
1267 const int* sf = s->channel[c].scale_factors;
1270 if (c == s->lfe_channel)
1271 memset(&s->tmp[cur_subwoofer_cutoff], 0, sizeof(*s->tmp) *
1272 (subframe_len - cur_subwoofer_cutoff));
1274 /** inverse quantization and rescaling */
1275 for (b = 0; b < s->num_bands; b++) {
1276 const int end = FFMIN(s->cur_sfb_offsets[b+1], s->subframe_len);
1277 const int exp = s->channel[c].quant_step -
1278 (s->channel[c].max_scale_factor - *sf++) *
1279 s->channel[c].scale_factor_step;
1280 const float quant = pow(10.0, exp / 20.0);
1281 int start = s->cur_sfb_offsets[b];
1282 s->fdsp.vector_fmul_scalar(s->tmp + start,
1283 s->channel[c].coeffs + start,
1284 quant, end - start);
1287 /** apply imdct (imdct_half == DCTIV with reverse) */
1288 mdct->imdct_half(mdct, s->channel[c].coeffs, s->tmp);
1292 /** window and overlapp-add */
1295 /** handled one subframe */
1296 for (i = 0; i < s->channels_for_cur_subframe; i++) {
1297 int c = s->channel_indexes_for_cur_subframe[i];
1298 if (s->channel[c].cur_subframe >= s->channel[c].num_subframes) {
1299 av_log(s->avctx, AV_LOG_ERROR, "broken subframe\n");
1300 return AVERROR_INVALIDDATA;
1302 ++s->channel[c].cur_subframe;
1309 *@brief Decode one WMA frame.
1310 *@param s codec context
1311 *@return 0 if the trailer bit indicates that this is the last frame,
1312 * 1 if there are additional frames
1314 static int decode_frame(WMAProDecodeCtx *s, AVFrame *frame, int *got_frame_ptr)
1316 AVCodecContext *avctx = s->avctx;
1317 GetBitContext* gb = &s->gb;
1318 int more_frames = 0;
1322 /** get frame length */
1324 len = get_bits(gb, s->log2_frame_size);
1326 av_dlog(s->avctx, "decoding frame with length %x\n", len);
1328 /** decode tile information */
1329 if (decode_tilehdr(s)) {
1334 /** read postproc transform */
1335 if (s->avctx->channels > 1 && get_bits1(gb)) {
1336 if (get_bits1(gb)) {
1337 for (i = 0; i < avctx->channels * avctx->channels; i++)
1342 /** read drc info */
1343 if (s->dynamic_range_compression) {
1344 s->drc_gain = get_bits(gb, 8);
1345 av_dlog(s->avctx, "drc_gain %i\n", s->drc_gain);
1348 /** no idea what these are for, might be the number of samples
1349 that need to be skipped at the beginning or end of a stream */
1350 if (get_bits1(gb)) {
1353 /** usually true for the first frame */
1354 if (get_bits1(gb)) {
1355 skip = get_bits(gb, av_log2(s->samples_per_frame * 2));
1356 av_dlog(s->avctx, "start skip: %i\n", skip);
1359 /** sometimes true for the last frame */
1360 if (get_bits1(gb)) {
1361 skip = get_bits(gb, av_log2(s->samples_per_frame * 2));
1362 av_dlog(s->avctx, "end skip: %i\n", skip);
1367 av_dlog(s->avctx, "BITSTREAM: frame header length was %i\n",
1368 get_bits_count(gb) - s->frame_offset);
1370 /** reset subframe states */
1371 s->parsed_all_subframes = 0;
1372 for (i = 0; i < avctx->channels; i++) {
1373 s->channel[i].decoded_samples = 0;
1374 s->channel[i].cur_subframe = 0;
1375 s->channel[i].reuse_sf = 0;
1378 /** decode all subframes */
1379 while (!s->parsed_all_subframes) {
1380 if (decode_subframe(s) < 0) {
1386 /* get output buffer */
1387 frame->nb_samples = s->samples_per_frame;
1388 if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
1393 /** copy samples to the output buffer */
1394 for (i = 0; i < avctx->channels; i++)
1395 memcpy(frame->extended_data[i], s->channel[i].out,
1396 s->samples_per_frame * sizeof(*s->channel[i].out));
1398 for (i = 0; i < avctx->channels; i++) {
1399 /** reuse second half of the IMDCT output for the next frame */
1400 memcpy(&s->channel[i].out[0],
1401 &s->channel[i].out[s->samples_per_frame],
1402 s->samples_per_frame * sizeof(*s->channel[i].out) >> 1);
1405 if (s->skip_frame) {
1408 av_frame_unref(frame);
1413 if (s->len_prefix) {
1414 if (len != (get_bits_count(gb) - s->frame_offset) + 2) {
1415 /** FIXME: not sure if this is always an error */
1416 av_log(s->avctx, AV_LOG_ERROR,
1417 "frame[%i] would have to skip %i bits\n", s->frame_num,
1418 len - (get_bits_count(gb) - s->frame_offset) - 1);
1423 /** skip the rest of the frame data */
1424 skip_bits_long(gb, len - (get_bits_count(gb) - s->frame_offset) - 1);
1426 while (get_bits_count(gb) < s->num_saved_bits && get_bits1(gb) == 0) {
1430 /** decode trailer bit */
1431 more_frames = get_bits1(gb);
1438 *@brief Calculate remaining input buffer length.
1439 *@param s codec context
1440 *@param gb bitstream reader context
1441 *@return remaining size in bits
1443 static int remaining_bits(WMAProDecodeCtx *s, GetBitContext *gb)
1445 return s->buf_bit_size - get_bits_count(gb);
1449 *@brief Fill the bit reservoir with a (partial) frame.
1450 *@param s codec context
1451 *@param gb bitstream reader context
1452 *@param len length of the partial frame
1453 *@param append decides whether to reset the buffer or not
1455 static void save_bits(WMAProDecodeCtx *s, GetBitContext* gb, int len,
1460 /** when the frame data does not need to be concatenated, the input buffer
1461 is reset and additional bits from the previous frame are copied
1462 and skipped later so that a fast byte copy is possible */
1465 s->frame_offset = get_bits_count(gb) & 7;
1466 s->num_saved_bits = s->frame_offset;
1467 init_put_bits(&s->pb, s->frame_data, MAX_FRAMESIZE);
1470 buflen = (put_bits_count(&s->pb) + len + 8) >> 3;
1472 if (len <= 0 || buflen > MAX_FRAMESIZE) {
1473 avpriv_request_sample(s->avctx, "Too small input buffer");
1478 av_assert0(len <= put_bits_left(&s->pb));
1480 s->num_saved_bits += len;
1482 avpriv_copy_bits(&s->pb, gb->buffer + (get_bits_count(gb) >> 3),
1485 int align = 8 - (get_bits_count(gb) & 7);
1486 align = FFMIN(align, len);
1487 put_bits(&s->pb, align, get_bits(gb, align));
1489 avpriv_copy_bits(&s->pb, gb->buffer + (get_bits_count(gb) >> 3), len);
1491 skip_bits_long(gb, len);
1494 PutBitContext tmp = s->pb;
1495 flush_put_bits(&tmp);
1498 init_get_bits(&s->gb, s->frame_data, s->num_saved_bits);
1499 skip_bits(&s->gb, s->frame_offset);
1503 *@brief Decode a single WMA packet.
1504 *@param avctx codec context
1505 *@param data the output buffer
1506 *@param avpkt input packet
1507 *@return number of bytes that were read from the input buffer
1509 static int decode_packet(AVCodecContext *avctx, void *data,
1510 int *got_frame_ptr, AVPacket* avpkt)
1512 WMAProDecodeCtx *s = avctx->priv_data;
1513 GetBitContext* gb = &s->pgb;
1514 const uint8_t* buf = avpkt->data;
1515 int buf_size = avpkt->size;
1516 int num_bits_prev_frame;
1517 int packet_sequence_number;
1521 if (s->packet_done || s->packet_loss) {
1524 /** sanity check for the buffer length */
1525 if (buf_size < avctx->block_align) {
1526 av_log(avctx, AV_LOG_ERROR, "Input packet too small (%d < %d)\n",
1527 buf_size, avctx->block_align);
1528 return AVERROR_INVALIDDATA;
1531 s->next_packet_start = buf_size - avctx->block_align;
1532 buf_size = avctx->block_align;
1533 s->buf_bit_size = buf_size << 3;
1535 /** parse packet header */
1536 init_get_bits(gb, buf, s->buf_bit_size);
1537 packet_sequence_number = get_bits(gb, 4);
1540 /** get number of bits that need to be added to the previous frame */
1541 num_bits_prev_frame = get_bits(gb, s->log2_frame_size);
1542 av_dlog(avctx, "packet[%d]: nbpf %x\n", avctx->frame_number,
1543 num_bits_prev_frame);
1545 /** check for packet loss */
1546 if (!s->packet_loss &&
1547 ((s->packet_sequence_number + 1) & 0xF) != packet_sequence_number) {
1549 av_log(avctx, AV_LOG_ERROR, "Packet loss detected! seq %x vs %x\n",
1550 s->packet_sequence_number, packet_sequence_number);
1552 s->packet_sequence_number = packet_sequence_number;
1554 if (num_bits_prev_frame > 0) {
1555 int remaining_packet_bits = s->buf_bit_size - get_bits_count(gb);
1556 if (num_bits_prev_frame >= remaining_packet_bits) {
1557 num_bits_prev_frame = remaining_packet_bits;
1561 /** append the previous frame data to the remaining data from the
1562 previous packet to create a full frame */
1563 save_bits(s, gb, num_bits_prev_frame, 1);
1564 av_dlog(avctx, "accumulated %x bits of frame data\n",
1565 s->num_saved_bits - s->frame_offset);
1567 /** decode the cross packet frame if it is valid */
1568 if (!s->packet_loss)
1569 decode_frame(s, data, got_frame_ptr);
1570 } else if (s->num_saved_bits - s->frame_offset) {
1571 av_dlog(avctx, "ignoring %x previously saved bits\n",
1572 s->num_saved_bits - s->frame_offset);
1575 if (s->packet_loss) {
1576 /** reset number of saved bits so that the decoder
1577 does not start to decode incomplete frames in the
1578 s->len_prefix == 0 case */
1579 s->num_saved_bits = 0;
1585 s->buf_bit_size = (avpkt->size - s->next_packet_start) << 3;
1586 init_get_bits(gb, avpkt->data, s->buf_bit_size);
1587 skip_bits(gb, s->packet_offset);
1588 if (s->len_prefix && remaining_bits(s, gb) > s->log2_frame_size &&
1589 (frame_size = show_bits(gb, s->log2_frame_size)) &&
1590 frame_size <= remaining_bits(s, gb)) {
1591 save_bits(s, gb, frame_size, 0);
1592 if (!s->packet_loss)
1593 s->packet_done = !decode_frame(s, data, got_frame_ptr);
1594 } else if (!s->len_prefix
1595 && s->num_saved_bits > get_bits_count(&s->gb)) {
1596 /** when the frames do not have a length prefix, we don't know
1597 the compressed length of the individual frames
1598 however, we know what part of a new packet belongs to the
1600 therefore we save the incoming packet first, then we append
1601 the "previous frame" data from the next packet so that
1602 we get a buffer that only contains full frames */
1603 s->packet_done = !decode_frame(s, data, got_frame_ptr);
1608 if (s->packet_done && !s->packet_loss &&
1609 remaining_bits(s, gb) > 0) {
1610 /** save the rest of the data so that it can be decoded
1611 with the next packet */
1612 save_bits(s, gb, remaining_bits(s, gb), 0);
1615 s->packet_offset = get_bits_count(gb) & 7;
1617 return AVERROR_INVALIDDATA;
1619 return get_bits_count(gb) >> 3;
1623 *@brief Clear decoder buffers (for seeking).
1624 *@param avctx codec context
1626 static void flush(AVCodecContext *avctx)
1628 WMAProDecodeCtx *s = avctx->priv_data;
1630 /** reset output buffer as a part of it is used during the windowing of a
1632 for (i = 0; i < avctx->channels; i++)
1633 memset(s->channel[i].out, 0, s->samples_per_frame *
1634 sizeof(*s->channel[i].out));
1640 *@brief wmapro decoder
1642 AVCodec ff_wmapro_decoder = {
1644 .type = AVMEDIA_TYPE_AUDIO,
1645 .id = AV_CODEC_ID_WMAPRO,
1646 .priv_data_size = sizeof(WMAProDecodeCtx),
1647 .init = decode_init,
1648 .close = decode_end,
1649 .decode = decode_packet,
1650 .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DR1,
1652 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio 9 Professional"),
1653 .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
1654 AV_SAMPLE_FMT_NONE },