2 * VC-1 and WMV3 decoder
3 * Copyright (c) 2006 Konstantin Shishkov
4 * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 * VC-1 and WMV3 decoder
30 #include "mpegvideo.h"
32 #include "vc1acdata.h"
37 extern const uint32_t ff_table0_dc_lum[120][2], ff_table1_dc_lum[120][2];
38 extern const uint32_t ff_table0_dc_chroma[120][2], ff_table1_dc_chroma[120][2];
39 extern VLC ff_msmp4_dc_luma_vlc[2], ff_msmp4_dc_chroma_vlc[2];
40 #define MB_INTRA_VLC_BITS 9
41 extern VLC ff_msmp4_mb_i_vlc;
42 extern const uint16_t ff_msmp4_mb_i_table[64][2];
45 static const uint16_t table_mb_intra[64][2];
48 /** Available Profiles */
53 PROFILE_COMPLEX, ///< TODO: WMV9 specific
58 /** Sequence quantizer mode */
61 QUANT_FRAME_IMPLICIT, ///< Implicitly specified at frame level
62 QUANT_FRAME_EXPLICIT, ///< Explicitly specified at frame level
63 QUANT_NON_UNIFORM, ///< Non-uniform quant used for all frames
64 QUANT_UNIFORM ///< Uniform quant used for all frames
68 /** Where quant can be changed */
72 DQPROFILE_DOUBLE_EDGES,
73 DQPROFILE_SINGLE_EDGE,
78 /** @name Where quant can be changed
89 /** Which pair of edges is quantized with ALTPQUANT */
92 DQDOUBLE_BEDGE_TOPLEFT,
93 DQDOUBLE_BEDGE_TOPRIGHT,
94 DQDOUBLE_BEDGE_BOTTOMRIGHT,
95 DQDOUBLE_BEDGE_BOTTOMLEFT
99 /** MV modes for P frames */
102 MV_PMODE_1MV_HPEL_BILIN,
106 MV_PMODE_INTENSITY_COMP
110 /** @name MV types for B frames */
115 BMV_TYPE_INTERPOLATED
119 /** @name Block types for P/B frames */
121 enum TransformTypes {
125 TT_8X4, //Both halves
128 TT_4X8, //Both halves
133 /** Table for conversion between TTBLK and TTMB */
134 static const int ttblk_to_tt[3][8] = {
135 { TT_8X4, TT_4X8, TT_8X8, TT_4X4, TT_8X4_TOP, TT_8X4_BOTTOM, TT_4X8_RIGHT, TT_4X8_LEFT },
136 { TT_8X8, TT_4X8_RIGHT, TT_4X8_LEFT, TT_4X4, TT_8X4, TT_4X8, TT_8X4_BOTTOM, TT_8X4_TOP },
137 { TT_8X8, TT_4X8, TT_4X4, TT_8X4_BOTTOM, TT_4X8_RIGHT, TT_4X8_LEFT, TT_8X4, TT_8X4_TOP }
140 static const int ttfrm_to_tt[4] = { TT_8X8, TT_8X4, TT_4X8, TT_4X4 };
142 /** MV P mode - the 5th element is only used for mode 1 */
143 static const uint8_t mv_pmode_table[2][5] = {
144 { MV_PMODE_1MV_HPEL_BILIN, MV_PMODE_1MV, MV_PMODE_1MV_HPEL, MV_PMODE_INTENSITY_COMP, MV_PMODE_MIXED_MV },
145 { MV_PMODE_1MV, MV_PMODE_MIXED_MV, MV_PMODE_1MV_HPEL, MV_PMODE_INTENSITY_COMP, MV_PMODE_1MV_HPEL_BILIN }
147 static const uint8_t mv_pmode_table2[2][4] = {
148 { MV_PMODE_1MV_HPEL_BILIN, MV_PMODE_1MV, MV_PMODE_1MV_HPEL, MV_PMODE_MIXED_MV },
149 { MV_PMODE_1MV, MV_PMODE_MIXED_MV, MV_PMODE_1MV_HPEL, MV_PMODE_1MV_HPEL_BILIN }
152 /** One more frame type */
155 static const int fps_nr[5] = { 24, 25, 30, 50, 60 },
156 fps_dr[2] = { 1000, 1001 };
157 static const uint8_t pquant_table[3][32] = {
158 { /* Implicit quantizer */
159 0, 1, 2, 3, 4, 5, 6, 7, 8, 6, 7, 8, 9, 10, 11, 12,
160 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 29, 31
162 { /* Explicit quantizer, pquantizer uniform */
163 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
164 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
166 { /* Explicit quantizer, pquantizer non-uniform */
167 0, 1, 1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
168 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 29, 31
172 /** @name VC-1 VLC tables and defines
173 * @todo TODO move this into the context
176 #define VC1_BFRACTION_VLC_BITS 7
177 static VLC vc1_bfraction_vlc;
178 #define VC1_IMODE_VLC_BITS 4
179 static VLC vc1_imode_vlc;
180 #define VC1_NORM2_VLC_BITS 3
181 static VLC vc1_norm2_vlc;
182 #define VC1_NORM6_VLC_BITS 9
183 static VLC vc1_norm6_vlc;
184 /* Could be optimized, one table only needs 8 bits */
185 #define VC1_TTMB_VLC_BITS 9 //12
186 static VLC vc1_ttmb_vlc[3];
187 #define VC1_MV_DIFF_VLC_BITS 9 //15
188 static VLC vc1_mv_diff_vlc[4];
189 #define VC1_CBPCY_P_VLC_BITS 9 //14
190 static VLC vc1_cbpcy_p_vlc[4];
191 #define VC1_4MV_BLOCK_PATTERN_VLC_BITS 6
192 static VLC vc1_4mv_block_pattern_vlc[4];
193 #define VC1_TTBLK_VLC_BITS 5
194 static VLC vc1_ttblk_vlc[3];
195 #define VC1_SUBBLKPAT_VLC_BITS 6
196 static VLC vc1_subblkpat_vlc[3];
198 static VLC vc1_ac_coeff_table[8];
202 CS_HIGH_MOT_INTRA = 0,
212 /** @name Overlap conditions for Advanced Profile */
223 * @fixme Change size wherever another size is more efficient
224 * Many members are only used for Advanced Profile
226 typedef struct VC1Context{
231 /** Simple/Main Profile sequence header */
233 int res_sm; ///< reserved, 2b
234 int res_x8; ///< reserved
235 int multires; ///< frame-level RESPIC syntax element present
236 int res_fasttx; ///< reserved, always 1
237 int res_transtab; ///< reserved, always 0
238 int rangered; ///< RANGEREDFRM (range reduction) syntax element present
240 int res_rtm_flag; ///< reserved, set to 1
241 int reserved; ///< reserved
244 /** Advanced Profile */
246 int level; ///< 3bits, for Advanced/Simple Profile, provided by TS layer
247 int chromaformat; ///< 2bits, 2=4:2:0, only defined
248 int postprocflag; ///< Per-frame processing suggestion flag present
249 int broadcast; ///< TFF/RFF present
250 int interlace; ///< Progressive/interlaced (RPTFTM syntax element)
251 int tfcntrflag; ///< TFCNTR present
252 int panscanflag; ///< NUMPANSCANWIN, TOPLEFT{X,Y}, BOTRIGHT{X,Y} present
253 int extended_dmv; ///< Additional extended dmv range at P/B frame-level
254 int color_prim; ///< 8bits, chroma coordinates of the color primaries
255 int transfer_char; ///< 8bits, Opto-electronic transfer characteristics
256 int matrix_coef; ///< 8bits, Color primaries->YCbCr transform matrix
257 int hrd_param_flag; ///< Presence of Hypothetical Reference
258 ///< Decoder parameters
259 int psf; ///< Progressive Segmented Frame
262 /** Sequence header data for all Profiles
263 * TODO: choose between ints, uint8_ts and monobit flags
266 int profile; ///< 2bits, Profile
267 int frmrtq_postproc; ///< 3bits,
268 int bitrtq_postproc; ///< 5bits, quantized framerate-based postprocessing strength
269 int fastuvmc; ///< Rounding of qpel vector to hpel ? (not in Simple)
270 int extended_mv; ///< Ext MV in P/B (not in Simple)
271 int dquant; ///< How qscale varies with MBs, 2bits (not in Simple)
272 int vstransform; ///< variable-size [48]x[48] transform type + info
273 int overlap; ///< overlapped transforms in use
274 int quantizer_mode; ///< 2bits, quantizer mode used for sequence, see QUANT_*
275 int finterpflag; ///< INTERPFRM present
278 /** Frame decoding info for all profiles */
280 uint8_t mv_mode; ///< MV coding monde
281 uint8_t mv_mode2; ///< Secondary MV coding mode (B frames)
282 int k_x; ///< Number of bits for MVs (depends on MV range)
283 int k_y; ///< Number of bits for MVs (depends on MV range)
284 int range_x, range_y; ///< MV range
285 uint8_t pq, altpq; ///< Current/alternate frame quantizer scale
286 /** pquant parameters */
293 /** AC coding set indexes
294 * @see 8.1.1.10, p(1)10
297 int c_ac_table_index; ///< Chroma index from ACFRM element
298 int y_ac_table_index; ///< Luma index from AC2FRM element
300 int ttfrm; ///< Transform type info present at frame level
301 uint8_t ttmbf; ///< Transform type flag
302 uint8_t ttblk4x4; ///< Value of ttblk which indicates a 4x4 transform
303 int codingset; ///< index of current table set from 11.8 to use for luma block decoding
304 int codingset2; ///< index of current table set from 11.8 to use for chroma block decoding
305 int pqindex; ///< raw pqindex used in coding set selection
306 int a_avail, c_avail;
307 uint8_t *mb_type_base, *mb_type[3];
310 /** Luma compensation parameters */
315 int16_t bfraction; ///< Relative position % anchors=> how to scale MVs
316 uint8_t halfpq; ///< Uniform quant over image and qp+.5
317 uint8_t respic; ///< Frame-level flag for resized images
318 int buffer_fullness; ///< HRD info
320 * -# 0 -> [-64n 63.f] x [-32, 31.f]
321 * -# 1 -> [-128, 127.f] x [-64, 63.f]
322 * -# 2 -> [-512, 511.f] x [-128, 127.f]
323 * -# 3 -> [-1024, 1023.f] x [-256, 255.f]
326 uint8_t pquantizer; ///< Uniform (over sequence) quantizer in use
327 VLC *cbpcy_vlc; ///< CBPCY VLC table
328 int tt_index; ///< Index for Transform Type tables
329 uint8_t* mv_type_mb_plane; ///< bitplane for mv_type == (4MV)
330 uint8_t* direct_mb_plane; ///< bitplane for "direct" MBs
331 int mv_type_is_raw; ///< mv type mb plane is not coded
332 int dmb_is_raw; ///< direct mb plane is raw
333 int skip_is_raw; ///< skip mb plane is not coded
334 uint8_t luty[256], lutuv[256]; // lookup tables used for intensity compensation
335 int use_ic; ///< use intensity compensation in B-frames
336 int rnd; ///< rounding control
338 /** Frame decoding info for S/M profiles only */
340 uint8_t rangeredfrm; ///< out_sample = CLIP((in_sample-128)*2+128)
344 /** Frame decoding info for Advanced profile */
346 uint8_t fcm; ///< 0->Progressive, 2->Frame-Interlace, 3->Field-Interlace
347 uint8_t numpanscanwin;
349 uint8_t rptfrm, tff, rff;
352 uint16_t bottomrightx;
353 uint16_t bottomrighty;
356 int hrd_num_leaky_buckets;
357 uint8_t bit_rate_exponent;
358 uint8_t buffer_size_exponent;
359 uint8_t* acpred_plane; ///< AC prediction flags bitplane
361 uint8_t* over_flags_plane; ///< Overflags bitplane
364 uint16_t *hrd_rate, *hrd_buffer;
365 uint8_t *hrd_fullness;
366 uint8_t range_mapy_flag;
367 uint8_t range_mapuv_flag;
377 * Get unary code of limited length
378 * @fixme FIXME Slow and ugly
379 * @param gb GetBitContext
380 * @param[in] stop The bitstop value (unary code of 1's or 0's)
381 * @param[in] len Maximum length
382 * @return Unary length/index
384 static int get_prefix(GetBitContext *gb, int stop, int len)
389 for(i = 0; i < len && get_bits1(gb) != stop; i++);
391 /* int i = 0, tmp = !stop;
393 while (i != len && tmp != stop)
395 tmp = get_bits(gb, 1);
398 if (i == len && tmp != stop) return len+1;
405 UPDATE_CACHE(re, gb);
406 buf=GET_CACHE(re, gb); //Still not sure
407 if (stop) buf = ~buf;
409 log= av_log2(-buf); //FIXME: -?
411 LAST_SKIP_BITS(re, gb, log+1);
412 CLOSE_READER(re, gb);
416 LAST_SKIP_BITS(re, gb, limit);
417 CLOSE_READER(re, gb);
422 static inline int decode210(GetBitContext *gb){
428 return 2 - get_bits1(gb);
432 * Init VC-1 specific tables and VC1Context members
433 * @param v The VC1Context to initialize
436 static int vc1_init_common(VC1Context *v)
441 v->hrd_rate = v->hrd_buffer = NULL;
447 init_vlc(&vc1_bfraction_vlc, VC1_BFRACTION_VLC_BITS, 23,
448 vc1_bfraction_bits, 1, 1,
449 vc1_bfraction_codes, 1, 1, 1);
450 init_vlc(&vc1_norm2_vlc, VC1_NORM2_VLC_BITS, 4,
451 vc1_norm2_bits, 1, 1,
452 vc1_norm2_codes, 1, 1, 1);
453 init_vlc(&vc1_norm6_vlc, VC1_NORM6_VLC_BITS, 64,
454 vc1_norm6_bits, 1, 1,
455 vc1_norm6_codes, 2, 2, 1);
456 init_vlc(&vc1_imode_vlc, VC1_IMODE_VLC_BITS, 7,
457 vc1_imode_bits, 1, 1,
458 vc1_imode_codes, 1, 1, 1);
461 init_vlc(&vc1_ttmb_vlc[i], VC1_TTMB_VLC_BITS, 16,
462 vc1_ttmb_bits[i], 1, 1,
463 vc1_ttmb_codes[i], 2, 2, 1);
464 init_vlc(&vc1_ttblk_vlc[i], VC1_TTBLK_VLC_BITS, 8,
465 vc1_ttblk_bits[i], 1, 1,
466 vc1_ttblk_codes[i], 1, 1, 1);
467 init_vlc(&vc1_subblkpat_vlc[i], VC1_SUBBLKPAT_VLC_BITS, 15,
468 vc1_subblkpat_bits[i], 1, 1,
469 vc1_subblkpat_codes[i], 1, 1, 1);
473 init_vlc(&vc1_4mv_block_pattern_vlc[i], VC1_4MV_BLOCK_PATTERN_VLC_BITS, 16,
474 vc1_4mv_block_pattern_bits[i], 1, 1,
475 vc1_4mv_block_pattern_codes[i], 1, 1, 1);
476 init_vlc(&vc1_cbpcy_p_vlc[i], VC1_CBPCY_P_VLC_BITS, 64,
477 vc1_cbpcy_p_bits[i], 1, 1,
478 vc1_cbpcy_p_codes[i], 2, 2, 1);
479 init_vlc(&vc1_mv_diff_vlc[i], VC1_MV_DIFF_VLC_BITS, 73,
480 vc1_mv_diff_bits[i], 1, 1,
481 vc1_mv_diff_codes[i], 2, 2, 1);
484 init_vlc(&vc1_ac_coeff_table[i], AC_VLC_BITS, vc1_ac_sizes[i],
485 &vc1_ac_tables[i][0][1], 8, 4,
486 &vc1_ac_tables[i][0][0], 8, 4, 1);
487 init_vlc(&ff_msmp4_mb_i_vlc, MB_INTRA_VLC_BITS, 64,
488 &ff_msmp4_mb_i_table[0][1], 4, 2,
489 &ff_msmp4_mb_i_table[0][0], 4, 2, 1);
494 v->mvrange = 0; /* 7.1.1.18, p80 */
499 /***********************************************************************/
501 * @defgroup bitplane VC9 Bitplane decoding
506 /** @addtogroup bitplane
519 /** @} */ //imode defines
521 /** Decode rows by checking if they are skipped
522 * @param plane Buffer to store decoded bits
523 * @param[in] width Width of this buffer
524 * @param[in] height Height of this buffer
525 * @param[in] stride of this buffer
527 static void decode_rowskip(uint8_t* plane, int width, int height, int stride, GetBitContext *gb){
530 for (y=0; y<height; y++){
531 if (!get_bits(gb, 1)) //rowskip
532 memset(plane, 0, width);
534 for (x=0; x<width; x++)
535 plane[x] = get_bits(gb, 1);
540 /** Decode columns by checking if they are skipped
541 * @param plane Buffer to store decoded bits
542 * @param[in] width Width of this buffer
543 * @param[in] height Height of this buffer
544 * @param[in] stride of this buffer
545 * @fixme FIXME: Optimize
547 static void decode_colskip(uint8_t* plane, int width, int height, int stride, GetBitContext *gb){
550 for (x=0; x<width; x++){
551 if (!get_bits(gb, 1)) //colskip
552 for (y=0; y<height; y++)
555 for (y=0; y<height; y++)
556 plane[y*stride] = get_bits(gb, 1);
561 /** Decode a bitplane's bits
562 * @param bp Bitplane where to store the decode bits
563 * @param v VC-1 context for bit reading and logging
565 * @fixme FIXME: Optimize
567 static int bitplane_decoding(uint8_t* data, int *raw_flag, VC1Context *v)
569 GetBitContext *gb = &v->s.gb;
571 int imode, x, y, code, offset;
572 uint8_t invert, *planep = data;
573 int width, height, stride;
575 width = v->s.mb_width;
576 height = v->s.mb_height;
577 stride = v->s.mb_stride;
578 invert = get_bits(gb, 1);
579 imode = get_vlc2(gb, vc1_imode_vlc.table, VC1_IMODE_VLC_BITS, 1);
585 //Data is actually read in the MB layer (same for all tests == "raw")
586 *raw_flag = 1; //invert ignored
590 if ((height * width) & 1)
592 *planep++ = get_bits(gb, 1);
596 // decode bitplane as one long line
597 for (y = offset; y < height * width; y += 2) {
598 code = get_vlc2(gb, vc1_norm2_vlc.table, VC1_NORM2_VLC_BITS, 1);
599 *planep++ = code & 1;
601 if(offset == width) {
603 planep += stride - width;
605 *planep++ = code >> 1;
607 if(offset == width) {
609 planep += stride - width;
615 if(!(height % 3) && (width % 3)) { // use 2x3 decoding
616 for(y = 0; y < height; y+= 3) {
617 for(x = width & 1; x < width; x += 2) {
618 code = get_vlc2(gb, vc1_norm6_vlc.table, VC1_NORM6_VLC_BITS, 2);
620 av_log(v->s.avctx, AV_LOG_DEBUG, "invalid NORM-6 VLC\n");
623 planep[x + 0] = (code >> 0) & 1;
624 planep[x + 1] = (code >> 1) & 1;
625 planep[x + 0 + stride] = (code >> 2) & 1;
626 planep[x + 1 + stride] = (code >> 3) & 1;
627 planep[x + 0 + stride * 2] = (code >> 4) & 1;
628 planep[x + 1 + stride * 2] = (code >> 5) & 1;
630 planep += stride * 3;
632 if(width & 1) decode_colskip(data, 1, height, stride, &v->s.gb);
634 planep += (height & 1) * stride;
635 for(y = height & 1; y < height; y += 2) {
636 for(x = width % 3; x < width; x += 3) {
637 code = get_vlc2(gb, vc1_norm6_vlc.table, VC1_NORM6_VLC_BITS, 2);
639 av_log(v->s.avctx, AV_LOG_DEBUG, "invalid NORM-6 VLC\n");
642 planep[x + 0] = (code >> 0) & 1;
643 planep[x + 1] = (code >> 1) & 1;
644 planep[x + 2] = (code >> 2) & 1;
645 planep[x + 0 + stride] = (code >> 3) & 1;
646 planep[x + 1 + stride] = (code >> 4) & 1;
647 planep[x + 2 + stride] = (code >> 5) & 1;
649 planep += stride * 2;
652 if(x) decode_colskip(data , x, height , stride, &v->s.gb);
653 if(height & 1) decode_rowskip(data+x, width - x, 1, stride, &v->s.gb);
657 decode_rowskip(data, width, height, stride, &v->s.gb);
660 decode_colskip(data, width, height, stride, &v->s.gb);
665 /* Applying diff operator */
666 if (imode == IMODE_DIFF2 || imode == IMODE_DIFF6)
670 for (x=1; x<width; x++)
671 planep[x] ^= planep[x-1];
672 for (y=1; y<height; y++)
675 planep[0] ^= planep[-stride];
676 for (x=1; x<width; x++)
678 if (planep[x-1] != planep[x-stride]) planep[x] ^= invert;
679 else planep[x] ^= planep[x-1];
686 for (x=0; x<stride*height; x++) planep[x] = !planep[x]; //FIXME stride
688 return (imode<<1) + invert;
691 /** @} */ //Bitplane group
693 /***********************************************************************/
694 /** VOP Dquant decoding
695 * @param v VC-1 Context
697 static int vop_dquant_decoding(VC1Context *v)
699 GetBitContext *gb = &v->s.gb;
705 pqdiff = get_bits(gb, 3);
706 if (pqdiff == 7) v->altpq = get_bits(gb, 5);
707 else v->altpq = v->pq + pqdiff + 1;
711 v->dquantfrm = get_bits(gb, 1);
714 v->dqprofile = get_bits(gb, 2);
715 switch (v->dqprofile)
717 case DQPROFILE_SINGLE_EDGE:
718 case DQPROFILE_DOUBLE_EDGES:
719 v->dqsbedge = get_bits(gb, 2);
721 case DQPROFILE_ALL_MBS:
722 v->dqbilevel = get_bits(gb, 1);
723 default: break; //Forbidden ?
725 if (v->dqbilevel || v->dqprofile != DQPROFILE_ALL_MBS)
727 pqdiff = get_bits(gb, 3);
728 if (pqdiff == 7) v->altpq = get_bits(gb, 5);
729 else v->altpq = v->pq + pqdiff + 1;
736 /** Put block onto picture
738 static void vc1_put_block(VC1Context *v, DCTELEM block[6][64])
742 DSPContext *dsp = &v->s.dsp;
746 for(k = 0; k < 6; k++)
747 for(j = 0; j < 8; j++)
748 for(i = 0; i < 8; i++)
749 block[k][i + j*8] = ((block[k][i + j*8] - 128) << 1) + 128;
752 ys = v->s.current_picture.linesize[0];
753 us = v->s.current_picture.linesize[1];
754 vs = v->s.current_picture.linesize[2];
757 dsp->put_pixels_clamped(block[0], Y, ys);
758 dsp->put_pixels_clamped(block[1], Y + 8, ys);
760 dsp->put_pixels_clamped(block[2], Y, ys);
761 dsp->put_pixels_clamped(block[3], Y + 8, ys);
763 if(!(v->s.flags & CODEC_FLAG_GRAY)) {
764 dsp->put_pixels_clamped(block[4], v->s.dest[1], us);
765 dsp->put_pixels_clamped(block[5], v->s.dest[2], vs);
769 /** Do motion compensation over 1 macroblock
770 * Mostly adapted hpel_motion and qpel_motion from mpegvideo.c
772 static void vc1_mc_1mv(VC1Context *v, int dir)
774 MpegEncContext *s = &v->s;
775 DSPContext *dsp = &v->s.dsp;
776 uint8_t *srcY, *srcU, *srcV;
777 int dxy, uvdxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
779 if(!v->s.last_picture.data[0])return;
781 mx = s->mv[dir][0][0];
782 my = s->mv[dir][0][1];
784 // store motion vectors for further use in B frames
785 if(s->pict_type == P_TYPE) {
786 s->current_picture.motion_val[1][s->block_index[0]][0] = mx;
787 s->current_picture.motion_val[1][s->block_index[0]][1] = my;
789 uvmx = (mx + ((mx & 3) == 3)) >> 1;
790 uvmy = (my + ((my & 3) == 3)) >> 1;
792 srcY = s->last_picture.data[0];
793 srcU = s->last_picture.data[1];
794 srcV = s->last_picture.data[2];
796 srcY = s->next_picture.data[0];
797 srcU = s->next_picture.data[1];
798 srcV = s->next_picture.data[2];
801 src_x = s->mb_x * 16 + (mx >> 2);
802 src_y = s->mb_y * 16 + (my >> 2);
803 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
804 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
806 src_x = clip( src_x, -16, s->mb_width * 16);
807 src_y = clip( src_y, -16, s->mb_height * 16);
808 uvsrc_x = clip(uvsrc_x, -8, s->mb_width * 8);
809 uvsrc_y = clip(uvsrc_y, -8, s->mb_height * 8);
811 srcY += src_y * s->linesize + src_x;
812 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
813 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
815 /* for grayscale we should not try to read from unknown area */
816 if(s->flags & CODEC_FLAG_GRAY) {
817 srcU = s->edge_emu_buffer + 18 * s->linesize;
818 srcV = s->edge_emu_buffer + 18 * s->linesize;
821 if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
822 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel*3
823 || (unsigned)(src_y - s->mspel) > s->v_edge_pos - (my&3) - 16 - s->mspel*3){
824 uint8_t *uvbuf= s->edge_emu_buffer + 19 * s->linesize;
826 srcY -= s->mspel * (1 + s->linesize);
827 ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 17+s->mspel*2, 17+s->mspel*2,
828 src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);
829 srcY = s->edge_emu_buffer;
830 ff_emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8+1, 8+1,
831 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
832 ff_emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8+1, 8+1,
833 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
836 /* if we deal with range reduction we need to scale source blocks */
842 for(j = 0; j < 17 + s->mspel*2; j++) {
843 for(i = 0; i < 17 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128;
846 src = srcU; src2 = srcV;
847 for(j = 0; j < 9; j++) {
848 for(i = 0; i < 9; i++) {
849 src[i] = ((src[i] - 128) >> 1) + 128;
850 src2[i] = ((src2[i] - 128) >> 1) + 128;
852 src += s->uvlinesize;
853 src2 += s->uvlinesize;
856 /* if we deal with intensity compensation we need to scale source blocks */
857 if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
862 for(j = 0; j < 17 + s->mspel*2; j++) {
863 for(i = 0; i < 17 + s->mspel*2; i++) src[i] = v->luty[src[i]];
866 src = srcU; src2 = srcV;
867 for(j = 0; j < 9; j++) {
868 for(i = 0; i < 9; i++) {
869 src[i] = v->lutuv[src[i]];
870 src2[i] = v->lutuv[src2[i]];
872 src += s->uvlinesize;
873 src2 += s->uvlinesize;
876 srcY += s->mspel * (1 + s->linesize);
880 uvmx = uvmx + ((uvmx<0)?(uvmx&1):-(uvmx&1));
881 uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1));
885 dxy = ((my & 3) << 2) | (mx & 3);
886 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] , srcY , s->linesize, v->rnd);
887 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8, srcY + 8, s->linesize, v->rnd);
888 srcY += s->linesize * 8;
889 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize , srcY , s->linesize, v->rnd);
890 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
891 } else { // hpel mc - always used for luma
892 dxy = (my & 2) | ((mx & 2) >> 1);
895 dsp->put_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
897 dsp->put_no_rnd_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
900 if(s->flags & CODEC_FLAG_GRAY) return;
901 /* Chroma MC always uses qpel bilinear */
902 uvdxy = ((uvmy & 3) << 2) | (uvmx & 3);
906 dsp->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
907 dsp->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
909 dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
910 dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
914 /** Do motion compensation for 4-MV macroblock - luminance block
916 static void vc1_mc_4mv_luma(VC1Context *v, int n)
918 MpegEncContext *s = &v->s;
919 DSPContext *dsp = &v->s.dsp;
921 int dxy, mx, my, src_x, src_y;
924 if(!v->s.last_picture.data[0])return;
927 srcY = s->last_picture.data[0];
929 off = s->linesize * 4 * (n&2) + (n&1) * 8;
931 src_x = s->mb_x * 16 + (n&1) * 8 + (mx >> 2);
932 src_y = s->mb_y * 16 + (n&2) * 4 + (my >> 2);
934 src_x = clip( src_x, -16, s->mb_width * 16);
935 src_y = clip( src_y, -16, s->mb_height * 16);
937 srcY += src_y * s->linesize + src_x;
939 if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
940 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 8 - s->mspel*2
941 || (unsigned)(src_y - s->mspel) > s->v_edge_pos - (my&3) - 8 - s->mspel*2){
942 srcY -= s->mspel * (1 + s->linesize);
943 ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 9+s->mspel*2, 9+s->mspel*2,
944 src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);
945 srcY = s->edge_emu_buffer;
946 /* if we deal with range reduction we need to scale source blocks */
952 for(j = 0; j < 9 + s->mspel*2; j++) {
953 for(i = 0; i < 9 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128;
957 /* if we deal with intensity compensation we need to scale source blocks */
958 if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
963 for(j = 0; j < 9 + s->mspel*2; j++) {
964 for(i = 0; i < 9 + s->mspel*2; i++) src[i] = v->luty[src[i]];
968 srcY += s->mspel * (1 + s->linesize);
972 dxy = ((my & 3) << 2) | (mx & 3);
973 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize, v->rnd);
974 } else { // hpel mc - always used for luma
975 dxy = (my & 2) | ((mx & 2) >> 1);
977 dsp->put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
979 dsp->put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
983 static inline int median4(int a, int b, int c, int d)
986 if(c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2;
987 else return (FFMIN(b, c) + FFMAX(a, d)) / 2;
989 if(c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2;
990 else return (FFMIN(a, c) + FFMAX(b, d)) / 2;
995 /** Do motion compensation for 4-MV macroblock - both chroma blocks
997 static void vc1_mc_4mv_chroma(VC1Context *v)
999 MpegEncContext *s = &v->s;
1000 DSPContext *dsp = &v->s.dsp;
1001 uint8_t *srcU, *srcV;
1002 int uvdxy, uvmx, uvmy, uvsrc_x, uvsrc_y;
1003 int i, idx, tx = 0, ty = 0;
1004 int mvx[4], mvy[4], intra[4];
1005 static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
1007 if(!v->s.last_picture.data[0])return;
1008 if(s->flags & CODEC_FLAG_GRAY) return;
1010 for(i = 0; i < 4; i++) {
1011 mvx[i] = s->mv[0][i][0];
1012 mvy[i] = s->mv[0][i][1];
1013 intra[i] = v->mb_type[0][s->block_index[i]];
1016 /* calculate chroma MV vector from four luma MVs */
1017 idx = (intra[3] << 3) | (intra[2] << 2) | (intra[1] << 1) | intra[0];
1018 if(!idx) { // all blocks are inter
1019 tx = median4(mvx[0], mvx[1], mvx[2], mvx[3]);
1020 ty = median4(mvy[0], mvy[1], mvy[2], mvy[3]);
1021 } else if(count[idx] == 1) { // 3 inter blocks
1024 tx = mid_pred(mvx[1], mvx[2], mvx[3]);
1025 ty = mid_pred(mvy[1], mvy[2], mvy[3]);
1028 tx = mid_pred(mvx[0], mvx[2], mvx[3]);
1029 ty = mid_pred(mvy[0], mvy[2], mvy[3]);
1032 tx = mid_pred(mvx[0], mvx[1], mvx[3]);
1033 ty = mid_pred(mvy[0], mvy[1], mvy[3]);
1036 tx = mid_pred(mvx[0], mvx[1], mvx[2]);
1037 ty = mid_pred(mvy[0], mvy[1], mvy[2]);
1040 } else if(count[idx] == 2) {
1042 for(i=0; i<3;i++) if(!intra[i]) {t1 = i; break;}
1043 for(i= t1+1; i<4; i++)if(!intra[i]) {t2 = i; break;}
1044 tx = (mvx[t1] + mvx[t2]) / 2;
1045 ty = (mvy[t1] + mvy[t2]) / 2;
1047 return; //no need to do MC for inter blocks
1049 s->current_picture.motion_val[1][s->block_index[0]][0] = tx;
1050 s->current_picture.motion_val[1][s->block_index[0]][1] = ty;
1051 uvmx = (tx + ((tx&3) == 3)) >> 1;
1052 uvmy = (ty + ((ty&3) == 3)) >> 1;
1054 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
1055 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
1057 uvsrc_x = clip(uvsrc_x, -8, s->mb_width * 8);
1058 uvsrc_y = clip(uvsrc_y, -8, s->mb_height * 8);
1059 srcU = s->last_picture.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
1060 srcV = s->last_picture.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
1061 if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
1062 || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
1063 || (unsigned)uvsrc_y > (s->v_edge_pos >> 1) - 9){
1064 ff_emulated_edge_mc(s->edge_emu_buffer , srcU, s->uvlinesize, 8+1, 8+1,
1065 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
1066 ff_emulated_edge_mc(s->edge_emu_buffer + 16, srcV, s->uvlinesize, 8+1, 8+1,
1067 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
1068 srcU = s->edge_emu_buffer;
1069 srcV = s->edge_emu_buffer + 16;
1071 /* if we deal with range reduction we need to scale source blocks */
1072 if(v->rangeredfrm) {
1074 uint8_t *src, *src2;
1076 src = srcU; src2 = srcV;
1077 for(j = 0; j < 9; j++) {
1078 for(i = 0; i < 9; i++) {
1079 src[i] = ((src[i] - 128) >> 1) + 128;
1080 src2[i] = ((src2[i] - 128) >> 1) + 128;
1082 src += s->uvlinesize;
1083 src2 += s->uvlinesize;
1086 /* if we deal with intensity compensation we need to scale source blocks */
1087 if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1089 uint8_t *src, *src2;
1091 src = srcU; src2 = srcV;
1092 for(j = 0; j < 9; j++) {
1093 for(i = 0; i < 9; i++) {
1094 src[i] = v->lutuv[src[i]];
1095 src2[i] = v->lutuv[src2[i]];
1097 src += s->uvlinesize;
1098 src2 += s->uvlinesize;
1104 uvmx = uvmx + ((uvmx<0)?(uvmx&1):-(uvmx&1));
1105 uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1));
1108 /* Chroma MC always uses qpel bilinear */
1109 uvdxy = ((uvmy & 3) << 2) | (uvmx & 3);
1113 dsp->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
1114 dsp->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
1116 dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
1117 dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
1121 static int decode_sequence_header_adv(VC1Context *v, GetBitContext *gb);
1124 * Decode Simple/Main Profiles sequence header
1125 * @see Figure 7-8, p16-17
1126 * @param avctx Codec context
1127 * @param gb GetBit context initialized from Codec context extra_data
1130 static int decode_sequence_header(AVCodecContext *avctx, GetBitContext *gb)
1132 VC1Context *v = avctx->priv_data;
1134 av_log(avctx, AV_LOG_DEBUG, "Header: %0X\n", show_bits(gb, 32));
1135 v->profile = get_bits(gb, 2);
1136 if (v->profile == 2)
1138 av_log(avctx, AV_LOG_ERROR, "Profile value 2 is forbidden (and WMV3 Complex Profile is unsupported)\n");
1142 if (v->profile == PROFILE_ADVANCED)
1144 return decode_sequence_header_adv(v, gb);
1148 v->res_sm = get_bits(gb, 2); //reserved
1151 av_log(avctx, AV_LOG_ERROR,
1152 "Reserved RES_SM=%i is forbidden\n", v->res_sm);
1158 v->frmrtq_postproc = get_bits(gb, 3); //common
1159 // (bitrate-32kbps)/64kbps
1160 v->bitrtq_postproc = get_bits(gb, 5); //common
1161 v->s.loop_filter = get_bits(gb, 1); //common
1162 if(v->s.loop_filter == 1 && v->profile == PROFILE_SIMPLE)
1164 av_log(avctx, AV_LOG_ERROR,
1165 "LOOPFILTER shell not be enabled in simple profile\n");
1168 v->res_x8 = get_bits(gb, 1); //reserved
1171 av_log(avctx, AV_LOG_ERROR,
1172 "1 for reserved RES_X8 is forbidden\n");
1175 v->multires = get_bits(gb, 1);
1176 v->res_fasttx = get_bits(gb, 1);
1179 av_log(avctx, AV_LOG_ERROR,
1180 "0 for reserved RES_FASTTX is forbidden\n");
1184 v->fastuvmc = get_bits(gb, 1); //common
1185 if (!v->profile && !v->fastuvmc)
1187 av_log(avctx, AV_LOG_ERROR,
1188 "FASTUVMC unavailable in Simple Profile\n");
1191 v->extended_mv = get_bits(gb, 1); //common
1192 if (!v->profile && v->extended_mv)
1194 av_log(avctx, AV_LOG_ERROR,
1195 "Extended MVs unavailable in Simple Profile\n");
1198 v->dquant = get_bits(gb, 2); //common
1199 v->vstransform = get_bits(gb, 1); //common
1201 v->res_transtab = get_bits(gb, 1);
1202 if (v->res_transtab)
1204 av_log(avctx, AV_LOG_ERROR,
1205 "1 for reserved RES_TRANSTAB is forbidden\n");
1209 v->overlap = get_bits(gb, 1); //common
1211 v->s.resync_marker = get_bits(gb, 1);
1212 v->rangered = get_bits(gb, 1);
1213 if (v->rangered && v->profile == PROFILE_SIMPLE)
1215 av_log(avctx, AV_LOG_INFO,
1216 "RANGERED should be set to 0 in simple profile\n");
1219 v->s.max_b_frames = avctx->max_b_frames = get_bits(gb, 3); //common
1220 v->quantizer_mode = get_bits(gb, 2); //common
1222 v->finterpflag = get_bits(gb, 1); //common
1223 v->res_rtm_flag = get_bits(gb, 1); //reserved
1224 if (!v->res_rtm_flag)
1226 // av_log(avctx, AV_LOG_ERROR,
1227 // "0 for reserved RES_RTM_FLAG is forbidden\n");
1228 av_log(avctx, AV_LOG_ERROR,
1229 "Old WMV3 version detected, only I-frames will be decoded\n");
1232 av_log(avctx, AV_LOG_DEBUG,
1233 "Profile %i:\nfrmrtq_postproc=%i, bitrtq_postproc=%i\n"
1234 "LoopFilter=%i, MultiRes=%i, FastUVMC=%i, Extended MV=%i\n"
1235 "Rangered=%i, VSTransform=%i, Overlap=%i, SyncMarker=%i\n"
1236 "DQuant=%i, Quantizer mode=%i, Max B frames=%i\n",
1237 v->profile, v->frmrtq_postproc, v->bitrtq_postproc,
1238 v->s.loop_filter, v->multires, v->fastuvmc, v->extended_mv,
1239 v->rangered, v->vstransform, v->overlap, v->s.resync_marker,
1240 v->dquant, v->quantizer_mode, avctx->max_b_frames
1245 static int decode_sequence_header_adv(VC1Context *v, GetBitContext *gb)
1247 v->res_rtm_flag = 1;
1248 v->level = get_bits(gb, 3);
1251 av_log(v->s.avctx, AV_LOG_ERROR, "Reserved LEVEL %i\n",v->level);
1253 v->chromaformat = get_bits(gb, 2);
1254 if (v->chromaformat != 1)
1256 av_log(v->s.avctx, AV_LOG_ERROR,
1257 "Only 4:2:0 chroma format supported\n");
1262 v->frmrtq_postproc = get_bits(gb, 3); //common
1263 // (bitrate-32kbps)/64kbps
1264 v->bitrtq_postproc = get_bits(gb, 5); //common
1265 v->postprocflag = get_bits(gb, 1); //common
1267 v->s.avctx->coded_width = (get_bits(gb, 12) + 1) << 1;
1268 v->s.avctx->coded_height = (get_bits(gb, 12) + 1) << 1;
1269 v->broadcast = get_bits1(gb);
1270 v->interlace = get_bits1(gb);
1271 v->tfcntrflag = get_bits1(gb);
1272 v->finterpflag = get_bits1(gb);
1273 get_bits1(gb); // reserved
1274 v->psf = get_bits1(gb);
1275 if(v->psf) { //PsF, 6.1.13
1276 av_log(v->s.avctx, AV_LOG_ERROR, "Progressive Segmented Frame mode: not supported (yet)\n");
1279 if(get_bits1(gb)) { //Display Info - decoding is not affected by it
1281 av_log(v->s.avctx, AV_LOG_INFO, "Display extended info:\n");
1282 w = get_bits(gb, 14);
1283 h = get_bits(gb, 14);
1284 av_log(v->s.avctx, AV_LOG_INFO, "Display dimensions: %ix%i\n", w, h);
1285 //TODO: store aspect ratio in AVCodecContext
1287 ar = get_bits(gb, 4);
1289 w = get_bits(gb, 8);
1290 h = get_bits(gb, 8);
1293 if(get_bits1(gb)){ //framerate stuff
1303 v->color_prim = get_bits(gb, 8);
1304 v->transfer_char = get_bits(gb, 8);
1305 v->matrix_coef = get_bits(gb, 8);
1309 v->hrd_param_flag = get_bits1(gb);
1310 if(v->hrd_param_flag) {
1312 v->hrd_num_leaky_buckets = get_bits(gb, 5);
1313 get_bits(gb, 4); //bitrate exponent
1314 get_bits(gb, 4); //buffer size exponent
1315 for(i = 0; i < v->hrd_num_leaky_buckets; i++) {
1316 get_bits(gb, 16); //hrd_rate[n]
1317 get_bits(gb, 16); //hrd_buffer[n]
1323 static int decode_entry_point(AVCodecContext *avctx, GetBitContext *gb)
1325 VC1Context *v = avctx->priv_data;
1328 av_log(avctx, AV_LOG_DEBUG, "Entry point: %08X\n", show_bits_long(gb, 32));
1329 get_bits1(gb); // broken link
1330 avctx->max_b_frames = 1 - get_bits1(gb); // 'closed entry' also signalize possible B-frames
1331 v->panscanflag = get_bits1(gb);
1332 get_bits1(gb); // refdist flag
1333 v->s.loop_filter = get_bits1(gb);
1334 v->fastuvmc = get_bits1(gb);
1335 v->extended_mv = get_bits1(gb);
1336 v->dquant = get_bits(gb, 2);
1337 v->vstransform = get_bits1(gb);
1338 v->overlap = get_bits1(gb);
1339 v->quantizer_mode = get_bits(gb, 2);
1341 if(v->hrd_param_flag){
1342 for(i = 0; i < v->hrd_num_leaky_buckets; i++) {
1343 get_bits(gb, 8); //hrd_full[n]
1348 avctx->coded_width = (get_bits(gb, 12)+1)<<1;
1349 avctx->coded_height = (get_bits(gb, 12)+1)<<1;
1352 v->extended_dmv = get_bits1(gb);
1354 av_log(avctx, AV_LOG_ERROR, "Luma scaling is not supported, expect wrong picture\n");
1355 skip_bits(gb, 3); // Y range, ignored for now
1358 av_log(avctx, AV_LOG_ERROR, "Chroma scaling is not supported, expect wrong picture\n");
1359 skip_bits(gb, 3); // UV range, ignored for now
1365 static int vc1_parse_frame_header(VC1Context *v, GetBitContext* gb)
1367 int pqindex, lowquant, status;
1369 if(v->finterpflag) v->interpfrm = get_bits(gb, 1);
1370 skip_bits(gb, 2); //framecnt unused
1372 if (v->rangered) v->rangeredfrm = get_bits(gb, 1);
1373 v->s.pict_type = get_bits(gb, 1);
1374 if (v->s.avctx->max_b_frames) {
1375 if (!v->s.pict_type) {
1376 if (get_bits(gb, 1)) v->s.pict_type = I_TYPE;
1377 else v->s.pict_type = B_TYPE;
1378 } else v->s.pict_type = P_TYPE;
1379 } else v->s.pict_type = v->s.pict_type ? P_TYPE : I_TYPE;
1382 if(v->s.pict_type == B_TYPE) {
1383 v->bfraction = get_vlc2(gb, vc1_bfraction_vlc.table, VC1_BFRACTION_VLC_BITS, 1);
1384 v->bfraction = vc1_bfraction_lut[v->bfraction];
1385 if(v->bfraction == 0) {
1386 v->s.pict_type = BI_TYPE;
1389 if(v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
1390 get_bits(gb, 7); // skip buffer fullness
1393 if(v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
1395 if(v->s.pict_type == P_TYPE)
1398 /* Quantizer stuff */
1399 pqindex = get_bits(gb, 5);
1400 if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
1401 v->pq = pquant_table[0][pqindex];
1403 v->pq = pquant_table[1][pqindex];
1406 if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
1407 v->pquantizer = pqindex < 9;
1408 if (v->quantizer_mode == QUANT_NON_UNIFORM)
1410 v->pqindex = pqindex;
1411 if (pqindex < 9) v->halfpq = get_bits(gb, 1);
1413 if (v->quantizer_mode == QUANT_FRAME_EXPLICIT)
1414 v->pquantizer = get_bits(gb, 1);
1416 if (v->extended_mv == 1) v->mvrange = get_prefix(gb, 0, 3);
1417 v->k_x = v->mvrange + 9 + (v->mvrange >> 1); //k_x can be 9 10 12 13
1418 v->k_y = v->mvrange + 8; //k_y can be 8 9 10 11
1419 v->range_x = 1 << (v->k_x - 1);
1420 v->range_y = 1 << (v->k_y - 1);
1421 if (v->profile == PROFILE_ADVANCED)
1423 if (v->postprocflag) v->postproc = get_bits(gb, 1);
1426 if (v->multires && v->s.pict_type != B_TYPE) v->respic = get_bits(gb, 2);
1428 //av_log(v->s.avctx, AV_LOG_INFO, "%c Frame: QP=[%i]%i (+%i/2) %i\n",
1429 // (v->s.pict_type == P_TYPE) ? 'P' : ((v->s.pict_type == I_TYPE) ? 'I' : 'B'), pqindex, v->pq, v->halfpq, v->rangeredfrm);
1431 if(v->s.pict_type == I_TYPE || v->s.pict_type == P_TYPE) v->use_ic = 0;
1433 switch(v->s.pict_type) {
1435 if (v->pq < 5) v->tt_index = 0;
1436 else if(v->pq < 13) v->tt_index = 1;
1437 else v->tt_index = 2;
1439 lowquant = (v->pq > 12) ? 0 : 1;
1440 v->mv_mode = mv_pmode_table[lowquant][get_prefix(gb, 1, 4)];
1441 if (v->mv_mode == MV_PMODE_INTENSITY_COMP)
1443 int scale, shift, i;
1444 v->mv_mode2 = mv_pmode_table2[lowquant][get_prefix(gb, 1, 3)];
1445 v->lumscale = get_bits(gb, 6);
1446 v->lumshift = get_bits(gb, 6);
1448 /* fill lookup tables for intensity compensation */
1451 shift = (255 - v->lumshift * 2) << 6;
1452 if(v->lumshift > 31)
1455 scale = v->lumscale + 32;
1456 if(v->lumshift > 31)
1457 shift = (v->lumshift - 64) << 6;
1459 shift = v->lumshift << 6;
1461 for(i = 0; i < 256; i++) {
1462 v->luty[i] = clip_uint8((scale * i + shift + 32) >> 6);
1463 v->lutuv[i] = clip_uint8((scale * (i - 128) + 128*64 + 32) >> 6);
1466 if(v->mv_mode == MV_PMODE_1MV_HPEL || v->mv_mode == MV_PMODE_1MV_HPEL_BILIN)
1467 v->s.quarter_sample = 0;
1468 else if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1469 if(v->mv_mode2 == MV_PMODE_1MV_HPEL || v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN)
1470 v->s.quarter_sample = 0;
1472 v->s.quarter_sample = 1;
1474 v->s.quarter_sample = 1;
1475 v->s.mspel = !(v->mv_mode == MV_PMODE_1MV_HPEL_BILIN || (v->mv_mode == MV_PMODE_INTENSITY_COMP && v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN));
1477 if ((v->mv_mode == MV_PMODE_INTENSITY_COMP &&
1478 v->mv_mode2 == MV_PMODE_MIXED_MV)
1479 || v->mv_mode == MV_PMODE_MIXED_MV)
1481 status = bitplane_decoding(v->mv_type_mb_plane, &v->mv_type_is_raw, v);
1482 if (status < 0) return -1;
1483 av_log(v->s.avctx, AV_LOG_DEBUG, "MB MV Type plane encoding: "
1484 "Imode: %i, Invert: %i\n", status>>1, status&1);
1486 v->mv_type_is_raw = 0;
1487 memset(v->mv_type_mb_plane, 0, v->s.mb_stride * v->s.mb_height);
1489 status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
1490 if (status < 0) return -1;
1491 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
1492 "Imode: %i, Invert: %i\n", status>>1, status&1);
1494 /* Hopefully this is correct for P frames */
1495 v->s.mv_table_index = get_bits(gb, 2); //but using vc1_ tables
1496 v->cbpcy_vlc = &vc1_cbpcy_p_vlc[get_bits(gb, 2)];
1500 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1501 vop_dquant_decoding(v);
1504 v->ttfrm = 0; //FIXME Is that so ?
1507 v->ttmbf = get_bits(gb, 1);
1510 v->ttfrm = ttfrm_to_tt[get_bits(gb, 2)];
1518 if (v->pq < 5) v->tt_index = 0;
1519 else if(v->pq < 13) v->tt_index = 1;
1520 else v->tt_index = 2;
1522 lowquant = (v->pq > 12) ? 0 : 1;
1523 v->mv_mode = get_bits1(gb) ? MV_PMODE_1MV : MV_PMODE_1MV_HPEL_BILIN;
1524 v->s.quarter_sample = (v->mv_mode == MV_PMODE_1MV);
1525 v->s.mspel = v->s.quarter_sample;
1527 status = bitplane_decoding(v->direct_mb_plane, &v->dmb_is_raw, v);
1528 if (status < 0) return -1;
1529 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Direct Type plane encoding: "
1530 "Imode: %i, Invert: %i\n", status>>1, status&1);
1531 status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
1532 if (status < 0) return -1;
1533 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
1534 "Imode: %i, Invert: %i\n", status>>1, status&1);
1536 v->s.mv_table_index = get_bits(gb, 2);
1537 v->cbpcy_vlc = &vc1_cbpcy_p_vlc[get_bits(gb, 2)];
1541 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1542 vop_dquant_decoding(v);
1548 v->ttmbf = get_bits(gb, 1);
1551 v->ttfrm = ttfrm_to_tt[get_bits(gb, 2)];
1561 v->c_ac_table_index = decode012(gb);
1562 if (v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
1564 v->y_ac_table_index = decode012(gb);
1567 v->s.dc_table_index = get_bits(gb, 1);
1569 if(v->s.pict_type == BI_TYPE) {
1570 v->s.pict_type = B_TYPE;
1576 static int vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
1579 int pqindex, lowquant;
1582 v->p_frame_skipped = 0;
1585 fcm = decode012(gb);
1586 switch(get_prefix(gb, 0, 4)) {
1588 v->s.pict_type = P_TYPE;
1591 v->s.pict_type = B_TYPE;
1595 v->s.pict_type = I_TYPE;
1598 v->s.pict_type = BI_TYPE;
1601 v->s.pict_type = P_TYPE; // skipped pic
1602 v->p_frame_skipped = 1;
1608 if(!v->interlace || v->panscanflag) {
1615 if(v->panscanflag) {
1618 v->rnd = get_bits1(gb);
1620 v->uvsamp = get_bits1(gb);
1621 if(v->finterpflag) v->interpfrm = get_bits(gb, 1);
1622 pqindex = get_bits(gb, 5);
1623 v->pqindex = pqindex;
1624 if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
1625 v->pq = pquant_table[0][pqindex];
1627 v->pq = pquant_table[1][pqindex];
1630 if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
1631 v->pquantizer = pqindex < 9;
1632 if (v->quantizer_mode == QUANT_NON_UNIFORM)
1634 v->pqindex = pqindex;
1635 if (pqindex < 9) v->halfpq = get_bits(gb, 1);
1637 if (v->quantizer_mode == QUANT_FRAME_EXPLICIT)
1638 v->pquantizer = get_bits(gb, 1);
1640 switch(v->s.pict_type) {
1643 status = bitplane_decoding(v->acpred_plane, &v->acpred_is_raw, v);
1644 if (status < 0) return -1;
1645 av_log(v->s.avctx, AV_LOG_DEBUG, "ACPRED plane encoding: "
1646 "Imode: %i, Invert: %i\n", status>>1, status&1);
1647 v->condover = CONDOVER_NONE;
1648 if(v->overlap && v->pq <= 8) {
1649 v->condover = decode012(gb);
1650 if(v->condover == CONDOVER_SELECT) {
1651 status = bitplane_decoding(v->over_flags_plane, &v->overflg_is_raw, v);
1652 if (status < 0) return -1;
1653 av_log(v->s.avctx, AV_LOG_DEBUG, "CONDOVER plane encoding: "
1654 "Imode: %i, Invert: %i\n", status>>1, status&1);
1660 v->postproc = get_bits1(gb);
1661 if (v->extended_mv) v->mvrange = get_prefix(gb, 0, 3);
1662 else v->mvrange = 0;
1663 v->k_x = v->mvrange + 9 + (v->mvrange >> 1); //k_x can be 9 10 12 13
1664 v->k_y = v->mvrange + 8; //k_y can be 8 9 10 11
1665 v->range_x = 1 << (v->k_x - 1);
1666 v->range_y = 1 << (v->k_y - 1);
1668 if (v->pq < 5) v->tt_index = 0;
1669 else if(v->pq < 13) v->tt_index = 1;
1670 else v->tt_index = 2;
1672 lowquant = (v->pq > 12) ? 0 : 1;
1673 v->mv_mode = mv_pmode_table[lowquant][get_prefix(gb, 1, 4)];
1674 if (v->mv_mode == MV_PMODE_INTENSITY_COMP)
1676 int scale, shift, i;
1677 v->mv_mode2 = mv_pmode_table2[lowquant][get_prefix(gb, 1, 3)];
1678 v->lumscale = get_bits(gb, 6);
1679 v->lumshift = get_bits(gb, 6);
1680 /* fill lookup tables for intensity compensation */
1683 shift = (255 - v->lumshift * 2) << 6;
1684 if(v->lumshift > 31)
1687 scale = v->lumscale + 32;
1688 if(v->lumshift > 31)
1689 shift = (v->lumshift - 64) << 6;
1691 shift = v->lumshift << 6;
1693 for(i = 0; i < 256; i++) {
1694 v->luty[i] = clip_uint8((scale * i + shift + 32) >> 6);
1695 v->lutuv[i] = clip_uint8((scale * (i - 128) + 128*64 + 32) >> 6);
1698 if(v->mv_mode == MV_PMODE_1MV_HPEL || v->mv_mode == MV_PMODE_1MV_HPEL_BILIN)
1699 v->s.quarter_sample = 0;
1700 else if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1701 if(v->mv_mode2 == MV_PMODE_1MV_HPEL || v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN)
1702 v->s.quarter_sample = 0;
1704 v->s.quarter_sample = 1;
1706 v->s.quarter_sample = 1;
1707 v->s.mspel = !(v->mv_mode == MV_PMODE_1MV_HPEL_BILIN || (v->mv_mode == MV_PMODE_INTENSITY_COMP && v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN));
1709 if ((v->mv_mode == MV_PMODE_INTENSITY_COMP &&
1710 v->mv_mode2 == MV_PMODE_MIXED_MV)
1711 || v->mv_mode == MV_PMODE_MIXED_MV)
1713 status = bitplane_decoding(v->mv_type_mb_plane, &v->mv_type_is_raw, v);
1714 if (status < 0) return -1;
1715 av_log(v->s.avctx, AV_LOG_DEBUG, "MB MV Type plane encoding: "
1716 "Imode: %i, Invert: %i\n", status>>1, status&1);
1718 v->mv_type_is_raw = 0;
1719 memset(v->mv_type_mb_plane, 0, v->s.mb_stride * v->s.mb_height);
1721 status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
1722 if (status < 0) return -1;
1723 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
1724 "Imode: %i, Invert: %i\n", status>>1, status&1);
1726 /* Hopefully this is correct for P frames */
1727 v->s.mv_table_index = get_bits(gb, 2); //but using vc1_ tables
1728 v->cbpcy_vlc = &vc1_cbpcy_p_vlc[get_bits(gb, 2)];
1731 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1732 vop_dquant_decoding(v);
1735 v->ttfrm = 0; //FIXME Is that so ?
1738 v->ttmbf = get_bits(gb, 1);
1741 v->ttfrm = ttfrm_to_tt[get_bits(gb, 2)];
1751 v->c_ac_table_index = decode012(gb);
1752 if (v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
1754 v->y_ac_table_index = decode012(gb);
1757 v->s.dc_table_index = get_bits(gb, 1);
1758 if (v->s.pict_type == I_TYPE && v->dquant) {
1759 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1760 vop_dquant_decoding(v);
1764 if(v->s.pict_type == BI_TYPE) {
1765 v->s.pict_type = B_TYPE;
1771 /***********************************************************************/
1773 * @defgroup block VC-1 Block-level functions
1774 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
1780 * @brief Get macroblock-level quantizer scale
1782 #define GET_MQUANT() \
1786 if (v->dqprofile == DQPROFILE_ALL_MBS) \
1790 mquant = (get_bits(gb, 1)) ? v->altpq : v->pq; \
1794 mqdiff = get_bits(gb, 3); \
1795 if (mqdiff != 7) mquant = v->pq + mqdiff; \
1796 else mquant = get_bits(gb, 5); \
1799 if(v->dqprofile == DQPROFILE_SINGLE_EDGE) \
1800 edges = 1 << v->dqsbedge; \
1801 else if(v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
1802 edges = (3 << v->dqsbedge) % 15; \
1803 else if(v->dqprofile == DQPROFILE_FOUR_EDGES) \
1805 if((edges&1) && !s->mb_x) \
1806 mquant = v->altpq; \
1807 if((edges&2) && s->first_slice_line) \
1808 mquant = v->altpq; \
1809 if((edges&4) && s->mb_x == (s->mb_width - 1)) \
1810 mquant = v->altpq; \
1811 if((edges&8) && s->mb_y == (s->mb_height - 1)) \
1812 mquant = v->altpq; \
1816 * @def GET_MVDATA(_dmv_x, _dmv_y)
1817 * @brief Get MV differentials
1818 * @see MVDATA decoding from 8.3.5.2, p(1)20
1819 * @param _dmv_x Horizontal differential for decoded MV
1820 * @param _dmv_y Vertical differential for decoded MV
1822 #define GET_MVDATA(_dmv_x, _dmv_y) \
1823 index = 1 + get_vlc2(gb, vc1_mv_diff_vlc[s->mv_table_index].table,\
1824 VC1_MV_DIFF_VLC_BITS, 2); \
1827 mb_has_coeffs = 1; \
1830 else mb_has_coeffs = 0; \
1832 if (!index) { _dmv_x = _dmv_y = 0; } \
1833 else if (index == 35) \
1835 _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
1836 _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
1838 else if (index == 36) \
1847 if (!s->quarter_sample && index1 == 5) val = 1; \
1849 if(size_table[index1] - val > 0) \
1850 val = get_bits(gb, size_table[index1] - val); \
1852 sign = 0 - (val&1); \
1853 _dmv_x = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1856 if (!s->quarter_sample && index1 == 5) val = 1; \
1858 if(size_table[index1] - val > 0) \
1859 val = get_bits(gb, size_table[index1] - val); \
1861 sign = 0 - (val&1); \
1862 _dmv_y = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1865 /** Predict and set motion vector
1867 static inline void vc1_pred_mv(MpegEncContext *s, int n, int dmv_x, int dmv_y, int mv1, int r_x, int r_y, uint8_t* is_intra)
1869 int xy, wrap, off = 0;
1874 /* scale MV difference to be quad-pel */
1875 dmv_x <<= 1 - s->quarter_sample;
1876 dmv_y <<= 1 - s->quarter_sample;
1878 wrap = s->b8_stride;
1879 xy = s->block_index[n];
1882 s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = 0;
1883 s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = 0;
1884 if(mv1) { /* duplicate motion data for 1-MV block */
1885 s->current_picture.motion_val[0][xy + 1][0] = 0;
1886 s->current_picture.motion_val[0][xy + 1][1] = 0;
1887 s->current_picture.motion_val[0][xy + wrap][0] = 0;
1888 s->current_picture.motion_val[0][xy + wrap][1] = 0;
1889 s->current_picture.motion_val[0][xy + wrap + 1][0] = 0;
1890 s->current_picture.motion_val[0][xy + wrap + 1][1] = 0;
1895 C = s->current_picture.motion_val[0][xy - 1];
1896 A = s->current_picture.motion_val[0][xy - wrap];
1898 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2;
1900 //in 4-MV mode different blocks have different B predictor position
1903 off = (s->mb_x > 0) ? -1 : 1;
1906 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 1;
1915 B = s->current_picture.motion_val[0][xy - wrap + off];
1917 if(!s->first_slice_line || (n==2 || n==3)) { // predictor A is not out of bounds
1918 if(s->mb_width == 1) {
1922 px = mid_pred(A[0], B[0], C[0]);
1923 py = mid_pred(A[1], B[1], C[1]);
1925 } else if(s->mb_x || (n==1 || n==3)) { // predictor C is not out of bounds
1931 /* Pullback MV as specified in 8.3.5.3.4 */
1934 qx = (s->mb_x << 6) + ((n==1 || n==3) ? 32 : 0);
1935 qy = (s->mb_y << 6) + ((n==2 || n==3) ? 32 : 0);
1936 X = (s->mb_width << 6) - 4;
1937 Y = (s->mb_height << 6) - 4;
1939 if(qx + px < -60) px = -60 - qx;
1940 if(qy + py < -60) py = -60 - qy;
1942 if(qx + px < -28) px = -28 - qx;
1943 if(qy + py < -28) py = -28 - qy;
1945 if(qx + px > X) px = X - qx;
1946 if(qy + py > Y) py = Y - qy;
1948 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
1949 if((!s->first_slice_line || (n==2 || n==3)) && (s->mb_x || (n==1 || n==3))) {
1950 if(is_intra[xy - wrap])
1951 sum = ABS(px) + ABS(py);
1953 sum = ABS(px - A[0]) + ABS(py - A[1]);
1955 if(get_bits1(&s->gb)) {
1963 if(is_intra[xy - 1])
1964 sum = ABS(px) + ABS(py);
1966 sum = ABS(px - C[0]) + ABS(py - C[1]);
1968 if(get_bits1(&s->gb)) {
1978 /* store MV using signed modulus of MV range defined in 4.11 */
1979 s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1980 s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
1981 if(mv1) { /* duplicate motion data for 1-MV block */
1982 s->current_picture.motion_val[0][xy + 1][0] = s->current_picture.motion_val[0][xy][0];
1983 s->current_picture.motion_val[0][xy + 1][1] = s->current_picture.motion_val[0][xy][1];
1984 s->current_picture.motion_val[0][xy + wrap][0] = s->current_picture.motion_val[0][xy][0];
1985 s->current_picture.motion_val[0][xy + wrap][1] = s->current_picture.motion_val[0][xy][1];
1986 s->current_picture.motion_val[0][xy + wrap + 1][0] = s->current_picture.motion_val[0][xy][0];
1987 s->current_picture.motion_val[0][xy + wrap + 1][1] = s->current_picture.motion_val[0][xy][1];
1991 /** Motion compensation for direct or interpolated blocks in B-frames
1993 static void vc1_interp_mc(VC1Context *v)
1995 MpegEncContext *s = &v->s;
1996 DSPContext *dsp = &v->s.dsp;
1997 uint8_t *srcY, *srcU, *srcV;
1998 int dxy, uvdxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
2000 if(!v->s.next_picture.data[0])return;
2002 mx = s->mv[1][0][0];
2003 my = s->mv[1][0][1];
2004 uvmx = (mx + ((mx & 3) == 3)) >> 1;
2005 uvmy = (my + ((my & 3) == 3)) >> 1;
2006 srcY = s->next_picture.data[0];
2007 srcU = s->next_picture.data[1];
2008 srcV = s->next_picture.data[2];
2010 src_x = s->mb_x * 16 + (mx >> 2);
2011 src_y = s->mb_y * 16 + (my >> 2);
2012 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
2013 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
2015 src_x = clip( src_x, -16, s->mb_width * 16);
2016 src_y = clip( src_y, -16, s->mb_height * 16);
2017 uvsrc_x = clip(uvsrc_x, -8, s->mb_width * 8);
2018 uvsrc_y = clip(uvsrc_y, -8, s->mb_height * 8);
2020 srcY += src_y * s->linesize + src_x;
2021 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
2022 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
2024 /* for grayscale we should not try to read from unknown area */
2025 if(s->flags & CODEC_FLAG_GRAY) {
2026 srcU = s->edge_emu_buffer + 18 * s->linesize;
2027 srcV = s->edge_emu_buffer + 18 * s->linesize;
2031 || (unsigned)src_x > s->h_edge_pos - (mx&3) - 16
2032 || (unsigned)src_y > s->v_edge_pos - (my&3) - 16){
2033 uint8_t *uvbuf= s->edge_emu_buffer + 19 * s->linesize;
2035 srcY -= s->mspel * (1 + s->linesize);
2036 ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 17+s->mspel*2, 17+s->mspel*2,
2037 src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);
2038 srcY = s->edge_emu_buffer;
2039 ff_emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8+1, 8+1,
2040 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
2041 ff_emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8+1, 8+1,
2042 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
2045 /* if we deal with range reduction we need to scale source blocks */
2046 if(v->rangeredfrm) {
2048 uint8_t *src, *src2;
2051 for(j = 0; j < 17 + s->mspel*2; j++) {
2052 for(i = 0; i < 17 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128;
2055 src = srcU; src2 = srcV;
2056 for(j = 0; j < 9; j++) {
2057 for(i = 0; i < 9; i++) {
2058 src[i] = ((src[i] - 128) >> 1) + 128;
2059 src2[i] = ((src2[i] - 128) >> 1) + 128;
2061 src += s->uvlinesize;
2062 src2 += s->uvlinesize;
2065 srcY += s->mspel * (1 + s->linesize);
2069 uvmx = uvmx + ((uvmx<0)?(uvmx&1):-(uvmx&1));
2070 uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1));
2075 dxy = ((my & 1) << 1) | (mx & 1);
2077 dsp->avg_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
2079 if(s->flags & CODEC_FLAG_GRAY) return;
2080 /* Chroma MC always uses qpel blilinear */
2081 uvdxy = ((uvmy & 3) << 2) | (uvmx & 3);
2084 dsp->avg_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
2085 dsp->avg_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
2088 static always_inline int scale_mv(int value, int bfrac, int inv, int qs)
2092 #if B_FRACTION_DEN==256
2096 return 2 * ((value * n + 255) >> 9);
2097 return (value * n + 128) >> 8;
2100 n -= B_FRACTION_DEN;
2102 return 2 * ((value * n + B_FRACTION_DEN - 1) / (2 * B_FRACTION_DEN));
2103 return (value * n + B_FRACTION_DEN/2) / B_FRACTION_DEN;
2107 /** Reconstruct motion vector for B-frame and do motion compensation
2109 static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mode)
2114 v->mv_mode2 = v->mv_mode;
2115 v->mv_mode = MV_PMODE_INTENSITY_COMP;
2120 if(v->use_ic) v->mv_mode = v->mv_mode2;
2123 if(mode == BMV_TYPE_INTERPOLATED) {
2126 if(v->use_ic) v->mv_mode = v->mv_mode2;
2130 if(v->use_ic && (mode == BMV_TYPE_BACKWARD)) v->mv_mode = v->mv_mode2;
2131 vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD));
2132 if(v->use_ic) v->mv_mode = v->mv_mode2;
2135 static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mvtype)
2137 MpegEncContext *s = &v->s;
2138 int xy, wrap, off = 0;
2143 const uint8_t *is_intra = v->mb_type[0];
2147 /* scale MV difference to be quad-pel */
2148 dmv_x[0] <<= 1 - s->quarter_sample;
2149 dmv_y[0] <<= 1 - s->quarter_sample;
2150 dmv_x[1] <<= 1 - s->quarter_sample;
2151 dmv_y[1] <<= 1 - s->quarter_sample;
2153 wrap = s->b8_stride;
2154 xy = s->block_index[0];
2157 s->current_picture.motion_val[0][xy][0] =
2158 s->current_picture.motion_val[0][xy][1] =
2159 s->current_picture.motion_val[1][xy][0] =
2160 s->current_picture.motion_val[1][xy][1] = 0;
2163 s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
2164 s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
2165 s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
2166 s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
2168 s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
2169 s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
2170 s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
2171 s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
2175 if((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2176 C = s->current_picture.motion_val[0][xy - 2];
2177 A = s->current_picture.motion_val[0][xy - wrap*2];
2178 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2179 B = s->current_picture.motion_val[0][xy - wrap*2 + off];
2181 if(!s->first_slice_line) { // predictor A is not out of bounds
2182 if(s->mb_width == 1) {
2186 px = mid_pred(A[0], B[0], C[0]);
2187 py = mid_pred(A[1], B[1], C[1]);
2189 } else if(s->mb_x) { // predictor C is not out of bounds
2195 /* Pullback MV as specified in 8.3.5.3.4 */
2198 if(v->profile < PROFILE_ADVANCED) {
2199 qx = (s->mb_x << 5);
2200 qy = (s->mb_y << 5);
2201 X = (s->mb_width << 5) - 4;
2202 Y = (s->mb_height << 5) - 4;
2203 if(qx + px < -28) px = -28 - qx;
2204 if(qy + py < -28) py = -28 - qy;
2205 if(qx + px > X) px = X - qx;
2206 if(qy + py > Y) py = Y - qy;
2208 qx = (s->mb_x << 6);
2209 qy = (s->mb_y << 6);
2210 X = (s->mb_width << 6) - 4;
2211 Y = (s->mb_height << 6) - 4;
2212 if(qx + px < -60) px = -60 - qx;
2213 if(qy + py < -60) py = -60 - qy;
2214 if(qx + px > X) px = X - qx;
2215 if(qy + py > Y) py = Y - qy;
2218 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2219 if(0 && !s->first_slice_line && s->mb_x) {
2220 if(is_intra[xy - wrap])
2221 sum = ABS(px) + ABS(py);
2223 sum = ABS(px - A[0]) + ABS(py - A[1]);
2225 if(get_bits1(&s->gb)) {
2233 if(is_intra[xy - 2])
2234 sum = ABS(px) + ABS(py);
2236 sum = ABS(px - C[0]) + ABS(py - C[1]);
2238 if(get_bits1(&s->gb)) {
2248 /* store MV using signed modulus of MV range defined in 4.11 */
2249 s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x;
2250 s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
2252 if((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2253 C = s->current_picture.motion_val[1][xy - 2];
2254 A = s->current_picture.motion_val[1][xy - wrap*2];
2255 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2256 B = s->current_picture.motion_val[1][xy - wrap*2 + off];
2258 if(!s->first_slice_line) { // predictor A is not out of bounds
2259 if(s->mb_width == 1) {
2263 px = mid_pred(A[0], B[0], C[0]);
2264 py = mid_pred(A[1], B[1], C[1]);
2266 } else if(s->mb_x) { // predictor C is not out of bounds
2272 /* Pullback MV as specified in 8.3.5.3.4 */
2275 if(v->profile < PROFILE_ADVANCED) {
2276 qx = (s->mb_x << 5);
2277 qy = (s->mb_y << 5);
2278 X = (s->mb_width << 5) - 4;
2279 Y = (s->mb_height << 5) - 4;
2280 if(qx + px < -28) px = -28 - qx;
2281 if(qy + py < -28) py = -28 - qy;
2282 if(qx + px > X) px = X - qx;
2283 if(qy + py > Y) py = Y - qy;
2285 qx = (s->mb_x << 6);
2286 qy = (s->mb_y << 6);
2287 X = (s->mb_width << 6) - 4;
2288 Y = (s->mb_height << 6) - 4;
2289 if(qx + px < -60) px = -60 - qx;
2290 if(qy + py < -60) py = -60 - qy;
2291 if(qx + px > X) px = X - qx;
2292 if(qy + py > Y) py = Y - qy;
2295 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2296 if(0 && !s->first_slice_line && s->mb_x) {
2297 if(is_intra[xy - wrap])
2298 sum = ABS(px) + ABS(py);
2300 sum = ABS(px - A[0]) + ABS(py - A[1]);
2302 if(get_bits1(&s->gb)) {
2310 if(is_intra[xy - 2])
2311 sum = ABS(px) + ABS(py);
2313 sum = ABS(px - C[0]) + ABS(py - C[1]);
2315 if(get_bits1(&s->gb)) {
2325 /* store MV using signed modulus of MV range defined in 4.11 */
2327 s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
2328 s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
2330 s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
2331 s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
2332 s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
2333 s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
2336 /** Get predicted DC value for I-frames only
2337 * prediction dir: left=0, top=1
2338 * @param s MpegEncContext
2339 * @param[in] n block index in the current MB
2340 * @param dc_val_ptr Pointer to DC predictor
2341 * @param dir_ptr Prediction direction for use in AC prediction
2343 static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2344 uint16_t **dc_val_ptr, int *dir_ptr)
2346 int a, b, c, wrap, pred, scale;
2348 static const uint16_t dcpred[32] = {
2349 -1, 1024, 512, 341, 256, 205, 171, 146, 128,
2350 114, 102, 93, 85, 79, 73, 68, 64,
2351 60, 57, 54, 51, 49, 47, 45, 43,
2352 41, 39, 38, 37, 35, 34, 33
2355 /* find prediction - wmv3_dc_scale always used here in fact */
2356 if (n < 4) scale = s->y_dc_scale;
2357 else scale = s->c_dc_scale;
2359 wrap = s->block_wrap[n];
2360 dc_val= s->dc_val[0] + s->block_index[n];
2366 b = dc_val[ - 1 - wrap];
2367 a = dc_val[ - wrap];
2369 if (pq < 9 || !overlap)
2371 /* Set outer values */
2372 if (s->first_slice_line && (n!=2 && n!=3)) b=a=dcpred[scale];
2373 if (s->mb_x == 0 && (n!=1 && n!=3)) b=c=dcpred[scale];
2377 /* Set outer values */
2378 if (s->first_slice_line && (n!=2 && n!=3)) b=a=0;
2379 if (s->mb_x == 0 && (n!=1 && n!=3)) b=c=0;
2382 if (abs(a - b) <= abs(b - c)) {
2390 /* update predictor */
2391 *dc_val_ptr = &dc_val[0];
2396 /** Get predicted DC value
2397 * prediction dir: left=0, top=1
2398 * @param s MpegEncContext
2399 * @param[in] n block index in the current MB
2400 * @param dc_val_ptr Pointer to DC predictor
2401 * @param dir_ptr Prediction direction for use in AC prediction
2403 static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2404 int a_avail, int c_avail,
2405 uint16_t **dc_val_ptr, int *dir_ptr)
2407 int a, b, c, wrap, pred, scale;
2409 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2412 /* find prediction - wmv3_dc_scale always used here in fact */
2413 if (n < 4) scale = s->y_dc_scale;
2414 else scale = s->c_dc_scale;
2416 wrap = s->block_wrap[n];
2417 dc_val= s->dc_val[0] + s->block_index[n];
2423 b = dc_val[ - 1 - wrap];
2424 a = dc_val[ - wrap];
2425 /* scale predictors if needed */
2426 q1 = s->current_picture.qscale_table[mb_pos];
2427 if(c_avail && (n!= 1 && n!=3)) {
2428 q2 = s->current_picture.qscale_table[mb_pos - 1];
2430 c = (c * s->y_dc_scale_table[q2] * vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2432 if(a_avail && (n!= 2 && n!=3)) {
2433 q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2435 a = (a * s->y_dc_scale_table[q2] * vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2437 if(a_avail && c_avail && (n!=3)) {
2440 if(n != 2) off -= s->mb_stride;
2441 q2 = s->current_picture.qscale_table[off];
2443 b = (b * s->y_dc_scale_table[q2] * vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2446 if(a_avail && c_avail) {
2447 if(abs(a - b) <= abs(b - c)) {
2454 } else if(a_avail) {
2457 } else if(c_avail) {
2465 /* update predictor */
2466 *dc_val_ptr = &dc_val[0];
2472 * @defgroup std_mb VC1 Macroblock-level functions in Simple/Main Profiles
2473 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
2477 static inline int vc1_coded_block_pred(MpegEncContext * s, int n, uint8_t **coded_block_ptr)
2479 int xy, wrap, pred, a, b, c;
2481 xy = s->block_index[n];
2482 wrap = s->b8_stride;
2487 a = s->coded_block[xy - 1 ];
2488 b = s->coded_block[xy - 1 - wrap];
2489 c = s->coded_block[xy - wrap];
2498 *coded_block_ptr = &s->coded_block[xy];
2504 * Decode one AC coefficient
2505 * @param v The VC1 context
2506 * @param last Last coefficient
2507 * @param skip How much zero coefficients to skip
2508 * @param value Decoded AC coefficient value
2511 static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip, int *value, int codingset)
2513 GetBitContext *gb = &v->s.gb;
2514 int index, escape, run = 0, level = 0, lst = 0;
2516 index = get_vlc2(gb, vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2517 if (index != vc1_ac_sizes[codingset] - 1) {
2518 run = vc1_index_decode_table[codingset][index][0];
2519 level = vc1_index_decode_table[codingset][index][1];
2520 lst = index >= vc1_last_decode_table[codingset];
2524 escape = decode210(gb);
2526 index = get_vlc2(gb, vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2527 run = vc1_index_decode_table[codingset][index][0];
2528 level = vc1_index_decode_table[codingset][index][1];
2529 lst = index >= vc1_last_decode_table[codingset];
2532 level += vc1_last_delta_level_table[codingset][run];
2534 level += vc1_delta_level_table[codingset][run];
2537 run += vc1_last_delta_run_table[codingset][level] + 1;
2539 run += vc1_delta_run_table[codingset][level] + 1;
2545 lst = get_bits(gb, 1);
2546 if(v->s.esc3_level_length == 0) {
2547 if(v->pq < 8 || v->dquantfrm) { // table 59
2548 v->s.esc3_level_length = get_bits(gb, 3);
2549 if(!v->s.esc3_level_length)
2550 v->s.esc3_level_length = get_bits(gb, 2) + 8;
2552 v->s.esc3_level_length = get_prefix(gb, 1, 6) + 2;
2554 v->s.esc3_run_length = 3 + get_bits(gb, 2);
2556 run = get_bits(gb, v->s.esc3_run_length);
2557 sign = get_bits(gb, 1);
2558 level = get_bits(gb, v->s.esc3_level_length);
2569 /** Decode intra block in intra frames - should be faster than decode_intra_block
2570 * @param v VC1Context
2571 * @param block block to decode
2572 * @param coded are AC coeffs present or not
2573 * @param codingset set of VLC to decode data
2575 static int vc1_decode_i_block(VC1Context *v, DCTELEM block[64], int n, int coded, int codingset)
2577 GetBitContext *gb = &v->s.gb;
2578 MpegEncContext *s = &v->s;
2579 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2582 int16_t *ac_val, *ac_val2;
2585 /* Get DC differential */
2587 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2589 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2592 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2597 if (dcdiff == 119 /* ESC index value */)
2599 /* TODO: Optimize */
2600 if (v->pq == 1) dcdiff = get_bits(gb, 10);
2601 else if (v->pq == 2) dcdiff = get_bits(gb, 9);
2602 else dcdiff = get_bits(gb, 8);
2607 dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3;
2608 else if (v->pq == 2)
2609 dcdiff = (dcdiff<<1) + get_bits(gb, 1) - 1;
2611 if (get_bits(gb, 1))
2616 dcdiff += vc1_i_pred_dc(&v->s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir);
2619 /* Store the quantized DC coeff, used for prediction */
2621 block[0] = dcdiff * s->y_dc_scale;
2623 block[0] = dcdiff * s->c_dc_scale;
2636 int last = 0, skip, value;
2637 const int8_t *zz_table;
2641 scale = v->pq * 2 + v->halfpq;
2645 zz_table = vc1_horizontal_zz;
2647 zz_table = vc1_vertical_zz;
2649 zz_table = vc1_normal_zz;
2651 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2653 if(dc_pred_dir) //left
2656 ac_val -= 16 * s->block_wrap[n];
2659 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2663 block[zz_table[i++]] = value;
2666 /* apply AC prediction if needed */
2668 if(dc_pred_dir) { //left
2669 for(k = 1; k < 8; k++)
2670 block[k << 3] += ac_val[k];
2672 for(k = 1; k < 8; k++)
2673 block[k] += ac_val[k + 8];
2676 /* save AC coeffs for further prediction */
2677 for(k = 1; k < 8; k++) {
2678 ac_val2[k] = block[k << 3];
2679 ac_val2[k + 8] = block[k];
2682 /* scale AC coeffs */
2683 for(k = 1; k < 64; k++)
2687 block[k] += (block[k] < 0) ? -v->pq : v->pq;
2690 if(s->ac_pred) i = 63;
2696 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2699 scale = v->pq * 2 + v->halfpq;
2700 memset(ac_val2, 0, 16 * 2);
2701 if(dc_pred_dir) {//left
2704 memcpy(ac_val2, ac_val, 8 * 2);
2706 ac_val -= 16 * s->block_wrap[n];
2708 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2711 /* apply AC prediction if needed */
2713 if(dc_pred_dir) { //left
2714 for(k = 1; k < 8; k++) {
2715 block[k << 3] = ac_val[k] * scale;
2716 if(!v->pquantizer && block[k << 3])
2717 block[k << 3] += (block[k << 3] < 0) ? -v->pq : v->pq;
2720 for(k = 1; k < 8; k++) {
2721 block[k] = ac_val[k + 8] * scale;
2722 if(!v->pquantizer && block[k])
2723 block[k] += (block[k] < 0) ? -v->pq : v->pq;
2729 s->block_last_index[n] = i;
2734 /** Decode intra block in intra frames - should be faster than decode_intra_block
2735 * @param v VC1Context
2736 * @param block block to decode
2737 * @param coded are AC coeffs present or not
2738 * @param codingset set of VLC to decode data
2740 static int vc1_decode_i_block_adv(VC1Context *v, DCTELEM block[64], int n, int coded, int codingset, int mquant)
2742 GetBitContext *gb = &v->s.gb;
2743 MpegEncContext *s = &v->s;
2744 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2747 int16_t *ac_val, *ac_val2;
2749 int a_avail = v->a_avail, c_avail = v->c_avail;
2750 int use_pred = s->ac_pred;
2753 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2755 /* Get DC differential */
2757 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2759 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2762 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2767 if (dcdiff == 119 /* ESC index value */)
2769 /* TODO: Optimize */
2770 if (mquant == 1) dcdiff = get_bits(gb, 10);
2771 else if (mquant == 2) dcdiff = get_bits(gb, 9);
2772 else dcdiff = get_bits(gb, 8);
2777 dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3;
2778 else if (mquant == 2)
2779 dcdiff = (dcdiff<<1) + get_bits(gb, 1) - 1;
2781 if (get_bits(gb, 1))
2786 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir);
2789 /* Store the quantized DC coeff, used for prediction */
2791 block[0] = dcdiff * s->y_dc_scale;
2793 block[0] = dcdiff * s->c_dc_scale;
2802 /* check if AC is needed at all and adjust direction if needed */
2803 if(!a_avail) dc_pred_dir = 1;
2804 if(!c_avail) dc_pred_dir = 0;
2805 if(!a_avail && !c_avail) use_pred = 0;
2806 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2809 scale = mquant * 2 + v->halfpq;
2811 if(dc_pred_dir) //left
2814 ac_val -= 16 * s->block_wrap[n];
2816 q1 = s->current_picture.qscale_table[mb_pos];
2817 if(dc_pred_dir && c_avail) q2 = s->current_picture.qscale_table[mb_pos - 1];
2818 if(!dc_pred_dir && a_avail) q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2819 if(n && n<4) q2 = q1;
2822 int last = 0, skip, value;
2823 const int8_t *zz_table;
2828 zz_table = vc1_horizontal_zz;
2830 zz_table = vc1_vertical_zz;
2832 zz_table = vc1_normal_zz;
2835 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2839 block[zz_table[i++]] = value;
2842 /* apply AC prediction if needed */
2844 /* scale predictors if needed*/
2849 if(dc_pred_dir) { //left
2850 for(k = 1; k < 8; k++)
2851 block[k << 3] += (ac_val[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2853 for(k = 1; k < 8; k++)
2854 block[k] += (ac_val[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2857 if(dc_pred_dir) { //left
2858 for(k = 1; k < 8; k++)
2859 block[k << 3] += ac_val[k];
2861 for(k = 1; k < 8; k++)
2862 block[k] += ac_val[k + 8];
2866 /* save AC coeffs for further prediction */
2867 for(k = 1; k < 8; k++) {
2868 ac_val2[k] = block[k << 3];
2869 ac_val2[k + 8] = block[k];
2872 /* scale AC coeffs */
2873 for(k = 1; k < 64; k++)
2877 block[k] += (block[k] < 0) ? -mquant : mquant;
2880 if(use_pred) i = 63;
2881 } else { // no AC coeffs
2884 memset(ac_val2, 0, 16 * 2);
2885 if(dc_pred_dir) {//left
2887 memcpy(ac_val2, ac_val, 8 * 2);
2891 for(k = 1; k < 8; k++)
2892 ac_val2[k] = (ac_val2[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2897 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2901 for(k = 1; k < 8; k++)
2902 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2907 /* apply AC prediction if needed */
2909 if(dc_pred_dir) { //left
2910 for(k = 1; k < 8; k++) {
2911 block[k << 3] = ac_val2[k] * scale;
2912 if(!v->pquantizer && block[k << 3])
2913 block[k << 3] += (block[k << 3] < 0) ? -mquant : mquant;
2916 for(k = 1; k < 8; k++) {
2917 block[k] = ac_val2[k + 8] * scale;
2918 if(!v->pquantizer && block[k])
2919 block[k] += (block[k] < 0) ? -mquant : mquant;
2925 s->block_last_index[n] = i;
2930 /** Decode intra block in inter frames - more generic version than vc1_decode_i_block
2931 * @param v VC1Context
2932 * @param block block to decode
2933 * @param coded are AC coeffs present or not
2934 * @param mquant block quantizer
2935 * @param codingset set of VLC to decode data
2937 static int vc1_decode_intra_block(VC1Context *v, DCTELEM block[64], int n, int coded, int mquant, int codingset)
2939 GetBitContext *gb = &v->s.gb;
2940 MpegEncContext *s = &v->s;
2941 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2944 int16_t *ac_val, *ac_val2;
2946 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2947 int a_avail = v->a_avail, c_avail = v->c_avail;
2948 int use_pred = s->ac_pred;
2952 /* XXX: Guard against dumb values of mquant */
2953 mquant = (mquant < 1) ? 0 : ( (mquant>31) ? 31 : mquant );
2955 /* Set DC scale - y and c use the same */
2956 s->y_dc_scale = s->y_dc_scale_table[mquant];
2957 s->c_dc_scale = s->c_dc_scale_table[mquant];
2959 /* Get DC differential */
2961 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2963 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2966 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2971 if (dcdiff == 119 /* ESC index value */)
2973 /* TODO: Optimize */
2974 if (mquant == 1) dcdiff = get_bits(gb, 10);
2975 else if (mquant == 2) dcdiff = get_bits(gb, 9);
2976 else dcdiff = get_bits(gb, 8);
2981 dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3;
2982 else if (mquant == 2)
2983 dcdiff = (dcdiff<<1) + get_bits(gb, 1) - 1;
2985 if (get_bits(gb, 1))
2990 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir);
2993 /* Store the quantized DC coeff, used for prediction */
2996 block[0] = dcdiff * s->y_dc_scale;
2998 block[0] = dcdiff * s->c_dc_scale;
3007 /* check if AC is needed at all and adjust direction if needed */
3008 if(!a_avail) dc_pred_dir = 1;
3009 if(!c_avail) dc_pred_dir = 0;
3010 if(!a_avail && !c_avail) use_pred = 0;
3011 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
3014 scale = mquant * 2 + v->halfpq;
3016 if(dc_pred_dir) //left
3019 ac_val -= 16 * s->block_wrap[n];
3021 q1 = s->current_picture.qscale_table[mb_pos];
3022 if(dc_pred_dir && c_avail) q2 = s->current_picture.qscale_table[mb_pos - 1];
3023 if(!dc_pred_dir && a_avail) q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
3024 if(n && n<4) q2 = q1;
3027 int last = 0, skip, value;
3028 const int8_t *zz_table;
3031 zz_table = vc1_simple_progressive_8x8_zz;
3034 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
3038 block[zz_table[i++]] = value;
3041 /* apply AC prediction if needed */
3043 /* scale predictors if needed*/
3048 if(dc_pred_dir) { //left
3049 for(k = 1; k < 8; k++)
3050 block[k << 3] += (ac_val[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3052 for(k = 1; k < 8; k++)
3053 block[k] += (ac_val[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3056 if(dc_pred_dir) { //left
3057 for(k = 1; k < 8; k++)
3058 block[k << 3] += ac_val[k];
3060 for(k = 1; k < 8; k++)
3061 block[k] += ac_val[k + 8];
3065 /* save AC coeffs for further prediction */
3066 for(k = 1; k < 8; k++) {
3067 ac_val2[k] = block[k << 3];
3068 ac_val2[k + 8] = block[k];
3071 /* scale AC coeffs */
3072 for(k = 1; k < 64; k++)
3076 block[k] += (block[k] < 0) ? -mquant : mquant;
3079 if(use_pred) i = 63;
3080 } else { // no AC coeffs
3083 memset(ac_val2, 0, 16 * 2);
3084 if(dc_pred_dir) {//left
3086 memcpy(ac_val2, ac_val, 8 * 2);
3090 for(k = 1; k < 8; k++)
3091 ac_val2[k] = (ac_val2[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3096 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
3100 for(k = 1; k < 8; k++)
3101 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3106 /* apply AC prediction if needed */
3108 if(dc_pred_dir) { //left
3109 for(k = 1; k < 8; k++) {
3110 block[k << 3] = ac_val2[k] * scale;
3111 if(!v->pquantizer && block[k << 3])
3112 block[k << 3] += (block[k << 3] < 0) ? -mquant : mquant;
3115 for(k = 1; k < 8; k++) {
3116 block[k] = ac_val2[k + 8] * scale;
3117 if(!v->pquantizer && block[k])
3118 block[k] += (block[k] < 0) ? -mquant : mquant;
3124 s->block_last_index[n] = i;
3131 static int vc1_decode_p_block(VC1Context *v, DCTELEM block[64], int n, int mquant, int ttmb, int first_block)
3133 MpegEncContext *s = &v->s;
3134 GetBitContext *gb = &s->gb;
3137 int scale, off, idx, last, skip, value;
3138 int ttblk = ttmb & 7;
3141 ttblk = ttblk_to_tt[v->tt_index][get_vlc2(gb, vc1_ttblk_vlc[v->tt_index].table, VC1_TTBLK_VLC_BITS, 1)];
3143 if(ttblk == TT_4X4) {
3144 subblkpat = ~(get_vlc2(gb, vc1_subblkpat_vlc[v->tt_index].table, VC1_SUBBLKPAT_VLC_BITS, 1) + 1);
3146 if((ttblk != TT_8X8 && ttblk != TT_4X4) && (v->ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))) {
3147 subblkpat = decode012(gb);
3148 if(subblkpat) subblkpat ^= 3; //swap decoded pattern bits
3149 if(ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) ttblk = TT_8X4;
3150 if(ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) ttblk = TT_4X8;
3152 scale = 2 * mquant + v->halfpq;
3154 // convert transforms like 8X4_TOP to generic TT and SUBBLKPAT
3155 if(ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) {
3156 subblkpat = 2 - (ttblk == TT_8X4_TOP);
3159 if(ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) {
3160 subblkpat = 2 - (ttblk == TT_4X8_LEFT);
3168 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3172 idx = vc1_simple_progressive_8x8_zz[i++];
3173 block[idx] = value * scale;
3175 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3177 s->dsp.vc1_inv_trans_8x8(block);
3180 for(j = 0; j < 4; j++) {
3181 last = subblkpat & (1 << (3 - j));
3183 off = (j & 1) * 4 + (j & 2) * 16;
3185 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3189 idx = vc1_simple_progressive_4x4_zz[i++];
3190 block[idx + off] = value * scale;
3192 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3194 if(!(subblkpat & (1 << (3 - j))))
3195 s->dsp.vc1_inv_trans_4x4(block, j);
3199 for(j = 0; j < 2; j++) {
3200 last = subblkpat & (1 << (1 - j));
3204 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3208 if(v->profile < PROFILE_ADVANCED)
3209 idx = vc1_simple_progressive_8x4_zz[i++];
3211 idx = vc1_adv_progressive_8x4_zz[i++];
3212 block[idx + off] = value * scale;
3214 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3216 if(!(subblkpat & (1 << (1 - j))))
3217 s->dsp.vc1_inv_trans_8x4(block, j);
3221 for(j = 0; j < 2; j++) {
3222 last = subblkpat & (1 << (1 - j));
3226 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3230 if(v->profile < PROFILE_ADVANCED)
3231 idx = vc1_simple_progressive_4x8_zz[i++];
3233 idx = vc1_adv_progressive_4x8_zz[i++];
3234 block[idx + off] = value * scale;
3236 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3238 if(!(subblkpat & (1 << (1 - j))))
3239 s->dsp.vc1_inv_trans_4x8(block, j);
3247 /** Decode one P-frame MB (in Simple/Main profile)
3249 static int vc1_decode_p_mb(VC1Context *v)
3251 MpegEncContext *s = &v->s;
3252 GetBitContext *gb = &s->gb;
3254 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3255 int cbp; /* cbp decoding stuff */
3256 int mqdiff, mquant; /* MB quantization */
3257 int ttmb = v->ttfrm; /* MB Transform type */
3260 static const int size_table[6] = { 0, 2, 3, 4, 5, 8 },
3261 offset_table[6] = { 0, 1, 3, 7, 15, 31 };
3262 int mb_has_coeffs = 1; /* last_flag */
3263 int dmv_x, dmv_y; /* Differential MV components */
3264 int index, index1; /* LUT indices */
3265 int val, sign; /* temp values */
3266 int first_block = 1;
3268 int skipped, fourmv;
3270 mquant = v->pq; /* Loosy initialization */
3272 if (v->mv_type_is_raw)
3273 fourmv = get_bits1(gb);
3275 fourmv = v->mv_type_mb_plane[mb_pos];
3277 skipped = get_bits1(gb);
3279 skipped = v->s.mbskip_table[mb_pos];
3281 s->dsp.clear_blocks(s->block[0]);
3283 if (!fourmv) /* 1MV mode */
3287 GET_MVDATA(dmv_x, dmv_y);
3290 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
3291 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
3293 s->current_picture.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;
3294 vc1_pred_mv(s, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0]);
3296 /* FIXME Set DC val for inter block ? */
3297 if (s->mb_intra && !mb_has_coeffs)
3300 s->ac_pred = get_bits(gb, 1);
3303 else if (mb_has_coeffs)
3305 if (s->mb_intra) s->ac_pred = get_bits(gb, 1);
3306 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3314 s->current_picture.qscale_table[mb_pos] = mquant;
3316 if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
3317 ttmb = get_vlc2(gb, vc1_ttmb_vlc[v->tt_index].table,
3318 VC1_TTMB_VLC_BITS, 2);
3319 if(!s->mb_intra) vc1_mc_1mv(v, 0);
3323 s->dc_val[0][s->block_index[i]] = 0;
3325 val = ((cbp >> (5 - i)) & 1);
3326 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3327 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3329 /* check if prediction blocks A and C are available */
3330 v->a_avail = v->c_avail = 0;
3331 if(i == 2 || i == 3 || !s->first_slice_line)
3332 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3333 if(i == 1 || i == 3 || s->mb_x)
3334 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3336 vc1_decode_intra_block(v, s->block[i], i, val, mquant, (i&4)?v->codingset2:v->codingset);
3337 if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3338 s->dsp.vc1_inv_trans_8x8(s->block[i]);
3339 if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
3340 for(j = 0; j < 64; j++) s->block[i][j] += 128;
3341 s->dsp.put_pixels_clamped(s->block[i], s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
3342 if(v->pq >= 9 && v->overlap) {
3344 s->dsp.vc1_v_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2), (i<4) ? ((i&1)>>1) : (s->mb_y&1));
3346 s->dsp.vc1_h_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2), (i<4) ? (i&1) : (s->mb_x&1));
3349 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block);
3350 if(!v->ttmbf && ttmb < 8) ttmb = -1;
3352 if((i<4) || !(s->flags & CODEC_FLAG_GRAY))
3353 s->dsp.add_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
3360 for(i = 0; i < 6; i++) {
3361 v->mb_type[0][s->block_index[i]] = 0;
3362 s->dc_val[0][s->block_index[i]] = 0;
3364 s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
3365 s->current_picture.qscale_table[mb_pos] = 0;
3366 vc1_pred_mv(s, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0]);
3373 if (!skipped /* unskipped MB */)
3375 int intra_count = 0, coded_inter = 0;
3376 int is_intra[6], is_coded[6];
3378 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3381 val = ((cbp >> (5 - i)) & 1);
3382 s->dc_val[0][s->block_index[i]] = 0;
3389 GET_MVDATA(dmv_x, dmv_y);
3391 vc1_pred_mv(s, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0]);
3392 if(!s->mb_intra) vc1_mc_4mv_luma(v, i);
3393 intra_count += s->mb_intra;
3394 is_intra[i] = s->mb_intra;
3395 is_coded[i] = mb_has_coeffs;
3398 is_intra[i] = (intra_count >= 3);
3401 if(i == 4) vc1_mc_4mv_chroma(v);
3402 v->mb_type[0][s->block_index[i]] = is_intra[i];
3403 if(!coded_inter) coded_inter = !is_intra[i] & is_coded[i];
3405 // if there are no coded blocks then don't do anything more
3406 if(!intra_count && !coded_inter) return 0;
3409 s->current_picture.qscale_table[mb_pos] = mquant;
3410 /* test if block is intra and has pred */
3415 if(((!s->first_slice_line || (i==2 || i==3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]])
3416 || ((s->mb_x || (i==1 || i==3)) && v->mb_type[0][s->block_index[i] - 1])) {
3421 if(intrapred)s->ac_pred = get_bits(gb, 1);
3422 else s->ac_pred = 0;
3424 if (!v->ttmbf && coded_inter)
3425 ttmb = get_vlc2(gb, vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3429 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3430 s->mb_intra = is_intra[i];
3432 /* check if prediction blocks A and C are available */
3433 v->a_avail = v->c_avail = 0;
3434 if(i == 2 || i == 3 || !s->first_slice_line)
3435 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3436 if(i == 1 || i == 3 || s->mb_x)
3437 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3439 vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant, (i&4)?v->codingset2:v->codingset);
3440 if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3441 s->dsp.vc1_inv_trans_8x8(s->block[i]);
3442 if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
3443 for(j = 0; j < 64; j++) s->block[i][j] += 128;
3444 s->dsp.put_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
3445 if(v->pq >= 9 && v->overlap) {
3447 s->dsp.vc1_v_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2), (i<4) ? ((i&1)>>1) : (s->mb_y&1));
3449 s->dsp.vc1_h_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2), (i<4) ? (i&1) : (s->mb_x&1));
3451 } else if(is_coded[i]) {
3452 status = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block);
3453 if(!v->ttmbf && ttmb < 8) ttmb = -1;
3455 if((i<4) || !(s->flags & CODEC_FLAG_GRAY))
3456 s->dsp.add_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
3464 s->current_picture.qscale_table[mb_pos] = 0;
3465 for (i=0; i<6; i++) {
3466 v->mb_type[0][s->block_index[i]] = 0;
3467 s->dc_val[0][s->block_index[i]] = 0;
3471 vc1_pred_mv(s, i, 0, 0, 0, v->range_x, v->range_y, v->mb_type[0]);
3472 vc1_mc_4mv_luma(v, i);
3474 vc1_mc_4mv_chroma(v);
3475 s->current_picture.qscale_table[mb_pos] = 0;
3480 /* Should never happen */
3484 /** Decode one B-frame MB (in Main profile)
3486 static void vc1_decode_b_mb(VC1Context *v)
3488 MpegEncContext *s = &v->s;
3489 GetBitContext *gb = &s->gb;
3491 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3492 int cbp = 0; /* cbp decoding stuff */
3493 int mqdiff, mquant; /* MB quantization */
3494 int ttmb = v->ttfrm; /* MB Transform type */
3496 static const int size_table[6] = { 0, 2, 3, 4, 5, 8 },
3497 offset_table[6] = { 0, 1, 3, 7, 15, 31 };
3498 int mb_has_coeffs = 0; /* last_flag */
3499 int index, index1; /* LUT indices */
3500 int val, sign; /* temp values */
3501 int first_block = 1;
3503 int skipped, direct;
3504 int dmv_x[2], dmv_y[2];
3505 int bmvtype = BMV_TYPE_BACKWARD;
3507 mquant = v->pq; /* Loosy initialization */
3511 direct = get_bits1(gb);
3513 direct = v->direct_mb_plane[mb_pos];
3515 skipped = get_bits1(gb);
3517 skipped = v->s.mbskip_table[mb_pos];
3519 s->dsp.clear_blocks(s->block[0]);
3520 dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
3521 for(i = 0; i < 6; i++) {
3522 v->mb_type[0][s->block_index[i]] = 0;
3523 s->dc_val[0][s->block_index[i]] = 0;
3525 s->current_picture.qscale_table[mb_pos] = 0;
3529 GET_MVDATA(dmv_x[0], dmv_y[0]);
3530 dmv_x[1] = dmv_x[0];
3531 dmv_y[1] = dmv_y[0];
3533 if(skipped || !s->mb_intra) {
3534 bmvtype = decode012(gb);
3537 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
3540 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
3543 bmvtype = BMV_TYPE_INTERPOLATED;
3544 dmv_x[0] = dmv_y[0] = 0;
3548 for(i = 0; i < 6; i++)
3549 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3552 if(direct) bmvtype = BMV_TYPE_INTERPOLATED;
3553 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3554 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3558 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3562 s->current_picture.qscale_table[mb_pos] = mquant;
3564 ttmb = get_vlc2(gb, vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3565 dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
3566 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3567 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3569 if(!mb_has_coeffs && !s->mb_intra) {
3570 /* no coded blocks - effectively skipped */
3571 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3572 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3575 if(s->mb_intra && !mb_has_coeffs) {
3577 s->current_picture.qscale_table[mb_pos] = mquant;
3578 s->ac_pred = get_bits1(gb);
3580 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3582 if(bmvtype == BMV_TYPE_INTERPOLATED) {
3583 GET_MVDATA(dmv_x[0], dmv_y[0]);
3584 if(!mb_has_coeffs) {
3585 /* interpolated skipped block */
3586 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3587 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3591 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3593 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3596 s->ac_pred = get_bits1(gb);
3597 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3599 s->current_picture.qscale_table[mb_pos] = mquant;
3600 if(!v->ttmbf && !s->mb_intra && mb_has_coeffs)
3601 ttmb = get_vlc2(gb, vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3607 s->dc_val[0][s->block_index[i]] = 0;
3609 val = ((cbp >> (5 - i)) & 1);
3610 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3611 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3613 /* check if prediction blocks A and C are available */
3614 v->a_avail = v->c_avail = 0;
3615 if(i == 2 || i == 3 || !s->first_slice_line)
3616 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3617 if(i == 1 || i == 3 || s->mb_x)
3618 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3620 vc1_decode_intra_block(v, s->block[i], i, val, mquant, (i&4)?v->codingset2:v->codingset);
3621 if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3622 s->dsp.vc1_inv_trans_8x8(s->block[i]);
3623 if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
3624 for(j = 0; j < 64; j++) s->block[i][j] += 128;
3625 s->dsp.put_pixels_clamped(s->block[i], s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
3627 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block);
3628 if(!v->ttmbf && ttmb < 8) ttmb = -1;
3630 if((i<4) || !(s->flags & CODEC_FLAG_GRAY))
3631 s->dsp.add_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
3636 /** Decode blocks of I-frame
3638 static void vc1_decode_i_blocks(VC1Context *v)
3641 MpegEncContext *s = &v->s;
3646 /* select codingmode used for VLC tables selection */
3647 switch(v->y_ac_table_index){
3649 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
3652 v->codingset = CS_HIGH_MOT_INTRA;
3655 v->codingset = CS_MID_RATE_INTRA;
3659 switch(v->c_ac_table_index){
3661 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
3664 v->codingset2 = CS_HIGH_MOT_INTER;
3667 v->codingset2 = CS_MID_RATE_INTER;
3671 /* Set DC scale - y and c use the same */
3672 s->y_dc_scale = s->y_dc_scale_table[v->pq];
3673 s->c_dc_scale = s->c_dc_scale_table[v->pq];
3676 s->mb_x = s->mb_y = 0;
3678 s->first_slice_line = 1;
3679 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
3680 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
3681 for(s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
3682 ff_init_block_index(s);
3683 ff_update_block_index(s);
3684 s->dsp.clear_blocks(s->block[0]);
3685 mb_pos = s->mb_x + s->mb_y * s->mb_width;
3686 s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
3687 s->current_picture.qscale_table[mb_pos] = v->pq;
3688 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
3689 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
3691 // do actual MB decoding and displaying
3692 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
3693 v->s.ac_pred = get_bits(&v->s.gb, 1);
3695 for(k = 0; k < 6; k++) {
3696 val = ((cbp >> (5 - k)) & 1);
3699 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
3703 cbp |= val << (5 - k);
3705 vc1_decode_i_block(v, s->block[k], k, val, (k<4)? v->codingset : v->codingset2);
3707 s->dsp.vc1_inv_trans_8x8(s->block[k]);
3708 if(v->pq >= 9 && v->overlap) {
3709 for(j = 0; j < 64; j++) s->block[k][j] += 128;
3713 vc1_put_block(v, s->block);
3714 if(v->pq >= 9 && v->overlap) {
3715 if(!s->first_slice_line) {
3716 s->dsp.vc1_v_overlap(s->dest[0], s->linesize, 0);
3717 s->dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize, 0);
3718 if(!(s->flags & CODEC_FLAG_GRAY)) {
3719 s->dsp.vc1_v_overlap(s->dest[1], s->uvlinesize, s->mb_y&1);
3720 s->dsp.vc1_v_overlap(s->dest[2], s->uvlinesize, s->mb_y&1);
3723 s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize, 1);
3724 s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize, 1);
3726 s->dsp.vc1_h_overlap(s->dest[0], s->linesize, 0);
3727 s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize, 0);
3728 if(!(s->flags & CODEC_FLAG_GRAY)) {
3729 s->dsp.vc1_h_overlap(s->dest[1], s->uvlinesize, s->mb_x&1);
3730 s->dsp.vc1_h_overlap(s->dest[2], s->uvlinesize, s->mb_x&1);
3733 s->dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize, 1);
3734 s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize, 1);
3737 if(get_bits_count(&s->gb) > v->bits) {
3738 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n", get_bits_count(&s->gb), v->bits);
3742 ff_draw_horiz_band(s, s->mb_y * 16, 16);
3743 s->first_slice_line = 0;
3747 /** Decode blocks of I-frame for advanced profile
3749 static void vc1_decode_i_blocks_adv(VC1Context *v)
3752 MpegEncContext *s = &v->s;
3759 GetBitContext *gb = &s->gb;
3761 /* select codingmode used for VLC tables selection */
3762 switch(v->y_ac_table_index){
3764 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
3767 v->codingset = CS_HIGH_MOT_INTRA;
3770 v->codingset = CS_MID_RATE_INTRA;
3774 switch(v->c_ac_table_index){
3776 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
3779 v->codingset2 = CS_HIGH_MOT_INTER;
3782 v->codingset2 = CS_MID_RATE_INTER;
3786 /* Set DC scale - y and c use the same */
3787 s->y_dc_scale = s->y_dc_scale_table[v->pq];
3788 s->c_dc_scale = s->c_dc_scale_table[v->pq];
3791 s->mb_x = s->mb_y = 0;
3793 s->first_slice_line = 1;
3794 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
3795 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
3796 for(s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
3797 ff_init_block_index(s);
3798 ff_update_block_index(s);
3799 s->dsp.clear_blocks(s->block[0]);
3800 mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3801 s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
3802 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
3803 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
3805 // do actual MB decoding and displaying
3806 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
3807 if(v->acpred_is_raw)
3808 v->s.ac_pred = get_bits(&v->s.gb, 1);
3810 v->s.ac_pred = v->acpred_plane[mb_pos];
3812 if(v->condover == CONDOVER_SELECT) {
3813 if(v->overflg_is_raw)
3814 overlap = get_bits(&v->s.gb, 1);
3816 overlap = v->over_flags_plane[mb_pos];
3818 overlap = (v->condover == CONDOVER_ALL);
3822 s->current_picture.qscale_table[mb_pos] = mquant;
3824 for(k = 0; k < 6; k++) {
3825 val = ((cbp >> (5 - k)) & 1);
3828 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
3832 cbp |= val << (5 - k);
3834 v->a_avail = !s->first_slice_line || (k==2 || k==3);
3835 v->c_avail = !!s->mb_x || (k==1 || k==3);
3837 vc1_decode_i_block_adv(v, s->block[k], k, val, (k<4)? v->codingset : v->codingset2, mquant);
3839 s->dsp.vc1_inv_trans_8x8(s->block[k]);
3840 for(j = 0; j < 64; j++) s->block[k][j] += 128;
3843 vc1_put_block(v, s->block);
3845 if(!s->first_slice_line) {
3846 s->dsp.vc1_v_overlap(s->dest[0], s->linesize, 0);
3847 s->dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize, 0);
3848 if(!(s->flags & CODEC_FLAG_GRAY)) {
3849 s->dsp.vc1_v_overlap(s->dest[1], s->uvlinesize, s->mb_y&1);
3850 s->dsp.vc1_v_overlap(s->dest[2], s->uvlinesize, s->mb_y&1);
3853 s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize, 1);
3854 s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize, 1);
3856 s->dsp.vc1_h_overlap(s->dest[0], s->linesize, 0);
3857 s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize, 0);
3858 if(!(s->flags & CODEC_FLAG_GRAY)) {
3859 s->dsp.vc1_h_overlap(s->dest[1], s->uvlinesize, s->mb_x&1);
3860 s->dsp.vc1_h_overlap(s->dest[2], s->uvlinesize, s->mb_x&1);
3863 s->dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize, 1);
3864 s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize, 1);
3867 if(get_bits_count(&s->gb) > v->bits) {
3868 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n", get_bits_count(&s->gb), v->bits);
3872 ff_draw_horiz_band(s, s->mb_y * 16, 16);
3873 s->first_slice_line = 0;
3877 static void vc1_decode_p_blocks(VC1Context *v)
3879 MpegEncContext *s = &v->s;
3881 /* select codingmode used for VLC tables selection */
3882 switch(v->c_ac_table_index){
3884 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
3887 v->codingset = CS_HIGH_MOT_INTRA;
3890 v->codingset = CS_MID_RATE_INTRA;
3894 switch(v->c_ac_table_index){
3896 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
3899 v->codingset2 = CS_HIGH_MOT_INTER;
3902 v->codingset2 = CS_MID_RATE_INTER;
3906 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
3907 s->first_slice_line = 1;
3908 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
3909 for(s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
3910 ff_init_block_index(s);
3911 ff_update_block_index(s);
3912 s->dsp.clear_blocks(s->block[0]);
3915 if(get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
3916 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n", get_bits_count(&s->gb), v->bits,s->mb_x,s->mb_y);
3920 ff_draw_horiz_band(s, s->mb_y * 16, 16);
3921 s->first_slice_line = 0;
3925 static void vc1_decode_b_blocks(VC1Context *v)
3927 MpegEncContext *s = &v->s;
3929 /* select codingmode used for VLC tables selection */
3930 switch(v->c_ac_table_index){
3932 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
3935 v->codingset = CS_HIGH_MOT_INTRA;
3938 v->codingset = CS_MID_RATE_INTRA;
3942 switch(v->c_ac_table_index){
3944 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
3947 v->codingset2 = CS_HIGH_MOT_INTER;
3950 v->codingset2 = CS_MID_RATE_INTER;
3954 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
3955 s->first_slice_line = 1;
3956 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
3957 for(s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
3958 ff_init_block_index(s);
3959 ff_update_block_index(s);
3960 s->dsp.clear_blocks(s->block[0]);
3963 if(get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
3964 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n", get_bits_count(&s->gb), v->bits,s->mb_x,s->mb_y);
3968 ff_draw_horiz_band(s, s->mb_y * 16, 16);
3969 s->first_slice_line = 0;
3973 static void vc1_decode_skip_blocks(VC1Context *v)
3975 MpegEncContext *s = &v->s;
3977 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
3978 s->first_slice_line = 1;
3979 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
3981 ff_init_block_index(s);
3982 ff_update_block_index(s);
3983 memcpy(s->dest[0], s->last_picture.data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16);
3984 memcpy(s->dest[1], s->last_picture.data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
3985 memcpy(s->dest[2], s->last_picture.data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
3986 ff_draw_horiz_band(s, s->mb_y * 16, 16);
3987 s->first_slice_line = 0;
3989 s->pict_type = P_TYPE;
3992 static void vc1_decode_blocks(VC1Context *v)
3995 v->s.esc3_level_length = 0;
3997 switch(v->s.pict_type) {
3999 if(v->profile == PROFILE_ADVANCED)
4000 vc1_decode_i_blocks_adv(v);
4002 vc1_decode_i_blocks(v);
4005 if(v->p_frame_skipped)
4006 vc1_decode_skip_blocks(v);
4008 vc1_decode_p_blocks(v);
4012 vc1_decode_i_blocks(v);
4014 vc1_decode_b_blocks(v);
4020 /** Initialize a VC1/WMV3 decoder
4021 * @todo TODO: Handle VC-1 IDUs (Transport level?)
4022 * @todo TODO: Decypher remaining bits in extra_data
4024 static int vc1_decode_init(AVCodecContext *avctx)
4026 VC1Context *v = avctx->priv_data;
4027 MpegEncContext *s = &v->s;
4030 if (!avctx->extradata_size || !avctx->extradata) return -1;
4031 if (!(avctx->flags & CODEC_FLAG_GRAY))
4032 avctx->pix_fmt = PIX_FMT_YUV420P;
4034 avctx->pix_fmt = PIX_FMT_GRAY8;
4036 avctx->flags |= CODEC_FLAG_EMU_EDGE;
4037 v->s.flags |= CODEC_FLAG_EMU_EDGE;
4039 if(ff_h263_decode_init(avctx) < 0)
4041 if (vc1_init_common(v) < 0) return -1;
4043 avctx->coded_width = avctx->width;
4044 avctx->coded_height = avctx->height;
4045 if (avctx->codec_id == CODEC_ID_WMV3)
4049 // looks like WMV3 has a sequence header stored in the extradata
4050 // advanced sequence header may be before the first frame
4051 // the last byte of the extradata is a version number, 1 for the
4052 // samples we can decode
4054 init_get_bits(&gb, avctx->extradata, avctx->extradata_size*8);
4056 if (decode_sequence_header(avctx, &gb) < 0)
4059 count = avctx->extradata_size*8 - get_bits_count(&gb);
4062 av_log(avctx, AV_LOG_INFO, "Extra data: %i bits left, value: %X\n",
4063 count, get_bits(&gb, count));
4067 av_log(avctx, AV_LOG_INFO, "Read %i bits in overflow\n", -count);
4069 } else { // VC1/WVC1
4070 int edata_size = avctx->extradata_size;
4071 uint8_t *edata = avctx->extradata;
4073 if(avctx->extradata_size < 16) {
4074 av_log(avctx, AV_LOG_ERROR, "Extradata size too small: %i\n", edata_size);
4077 while(edata_size > 8) {
4078 // test if we've found header
4079 if(BE_32(edata) == 0x0000010F) {
4088 init_get_bits(&gb, edata, edata_size*8);
4090 if (decode_sequence_header(avctx, &gb) < 0)
4093 while(edata_size > 8) {
4094 // test if we've found entry point
4095 if(BE_32(edata) == 0x0000010E) {
4104 init_get_bits(&gb, edata, edata_size*8);
4106 if (decode_entry_point(avctx, &gb) < 0)
4109 avctx->has_b_frames= !!(avctx->max_b_frames);
4110 s->low_delay = !avctx->has_b_frames;
4112 s->mb_width = (avctx->coded_width+15)>>4;
4113 s->mb_height = (avctx->coded_height+15)>>4;
4115 /* Allocate mb bitplanes */
4116 v->mv_type_mb_plane = av_malloc(s->mb_stride * s->mb_height);
4117 v->direct_mb_plane = av_malloc(s->mb_stride * s->mb_height);
4118 v->acpred_plane = av_malloc(s->mb_stride * s->mb_height);
4119 v->over_flags_plane = av_malloc(s->mb_stride * s->mb_height);
4121 /* allocate block type info in that way so it could be used with s->block_index[] */
4122 v->mb_type_base = av_malloc(s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
4123 v->mb_type[0] = v->mb_type_base + s->b8_stride + 1;
4124 v->mb_type[1] = v->mb_type_base + s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride + 1;
4125 v->mb_type[2] = v->mb_type[1] + s->mb_stride * (s->mb_height + 1);
4127 /* Init coded blocks info */
4128 if (v->profile == PROFILE_ADVANCED)
4130 // if (alloc_bitplane(&v->over_flags_plane, s->mb_width, s->mb_height) < 0)
4132 // if (alloc_bitplane(&v->ac_pred_plane, s->mb_width, s->mb_height) < 0)
4140 /** Decode a VC1/WMV3 frame
4141 * @todo TODO: Handle VC-1 IDUs (Transport level?)
4143 static int vc1_decode_frame(AVCodecContext *avctx,
4144 void *data, int *data_size,
4145 uint8_t *buf, int buf_size)
4147 VC1Context *v = avctx->priv_data;
4148 MpegEncContext *s = &v->s;
4149 AVFrame *pict = data;
4150 uint8_t *buf2 = NULL;
4152 /* no supplementary picture */
4153 if (buf_size == 0) {
4154 /* special case for last picture */
4155 if (s->low_delay==0 && s->next_picture_ptr) {
4156 *pict= *(AVFrame*)s->next_picture_ptr;
4157 s->next_picture_ptr= NULL;
4159 *data_size = sizeof(AVFrame);
4165 //we need to set current_picture_ptr before reading the header, otherwise we cant store anyting im there
4166 if(s->current_picture_ptr==NULL || s->current_picture_ptr->data[0]){
4167 int i= ff_find_unused_picture(s, 0);
4168 s->current_picture_ptr= &s->picture[i];
4171 //for advanced profile we need to unescape buffer
4172 if (avctx->codec_id == CODEC_ID_VC1) {
4174 buf2 = av_malloc(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
4176 for(i = 0; i < buf_size; i++) {
4177 if(buf[i] == 3 && i >= 2 && !buf[i-1] && !buf[i-2] && i < buf_size-1 && buf[i+1] < 4) {
4178 buf2[buf_size2++] = buf[i+1];
4181 buf2[buf_size2++] = buf[i];
4183 init_get_bits(&s->gb, buf2, buf_size2*8);
4185 init_get_bits(&s->gb, buf, buf_size*8);
4186 // do parse frame header
4187 if(v->profile < PROFILE_ADVANCED) {
4188 if(vc1_parse_frame_header(v, &s->gb) == -1) {
4193 if(vc1_parse_frame_header_adv(v, &s->gb) == -1) {
4199 if(s->pict_type != I_TYPE && !v->res_rtm_flag){
4205 s->current_picture.pict_type= s->pict_type;
4206 s->current_picture.key_frame= s->pict_type == I_TYPE;
4208 /* skip B-frames if we don't have reference frames */
4209 if(s->last_picture_ptr==NULL && (s->pict_type==B_TYPE || s->dropable)){
4211 return -1;//buf_size;
4213 /* skip b frames if we are in a hurry */
4214 if(avctx->hurry_up && s->pict_type==B_TYPE) return -1;//buf_size;
4215 if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==B_TYPE)
4216 || (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=I_TYPE)
4217 || avctx->skip_frame >= AVDISCARD_ALL) {
4221 /* skip everything if we are in a hurry>=5 */
4222 if(avctx->hurry_up>=5) {
4224 return -1;//buf_size;
4227 if(s->next_p_frame_damaged){
4228 if(s->pict_type==B_TYPE)
4231 s->next_p_frame_damaged=0;
4234 if(MPV_frame_start(s, avctx) < 0) {
4239 ff_er_frame_start(s);
4241 v->bits = buf_size * 8;
4242 vc1_decode_blocks(v);
4243 //av_log(s->avctx, AV_LOG_INFO, "Consumed %i/%i bits\n", get_bits_count(&s->gb), buf_size*8);
4244 // if(get_bits_count(&s->gb) > buf_size * 8)
4250 assert(s->current_picture.pict_type == s->current_picture_ptr->pict_type);
4251 assert(s->current_picture.pict_type == s->pict_type);
4252 if (s->pict_type == B_TYPE || s->low_delay) {
4253 *pict= *(AVFrame*)s->current_picture_ptr;
4254 } else if (s->last_picture_ptr != NULL) {
4255 *pict= *(AVFrame*)s->last_picture_ptr;
4258 if(s->last_picture_ptr || s->low_delay){
4259 *data_size = sizeof(AVFrame);
4260 ff_print_debug_info(s, pict);
4263 /* Return the Picture timestamp as the frame number */
4264 /* we substract 1 because it is added on utils.c */
4265 avctx->frame_number = s->picture_number - 1;
4272 /** Close a VC1/WMV3 decoder
4273 * @warning Initial try at using MpegEncContext stuff
4275 static int vc1_decode_end(AVCodecContext *avctx)
4277 VC1Context *v = avctx->priv_data;
4279 av_freep(&v->hrd_rate);
4280 av_freep(&v->hrd_buffer);
4281 MPV_common_end(&v->s);
4282 av_freep(&v->mv_type_mb_plane);
4283 av_freep(&v->direct_mb_plane);
4284 av_freep(&v->acpred_plane);
4285 av_freep(&v->over_flags_plane);
4286 av_freep(&v->mb_type_base);
4291 AVCodec vc1_decoder = {
4304 AVCodec wmv3_decoder = {