2 * VC-1 and WMV3 decoder
3 * Copyright (c) 2006 Konstantin Shishkov
4 * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 * VC-1 and WMV3 decoder
30 #include "mpegvideo.h"
32 #include "vc1acdata.h"
37 extern const uint32_t ff_table0_dc_lum[120][2], ff_table1_dc_lum[120][2];
38 extern const uint32_t ff_table0_dc_chroma[120][2], ff_table1_dc_chroma[120][2];
39 extern VLC ff_msmp4_dc_luma_vlc[2], ff_msmp4_dc_chroma_vlc[2];
40 #define MB_INTRA_VLC_BITS 9
41 extern VLC ff_msmp4_mb_i_vlc;
42 extern const uint16_t ff_msmp4_mb_i_table[64][2];
45 static const uint16_t table_mb_intra[64][2];
48 /** Available Profiles */
53 PROFILE_COMPLEX, ///< TODO: WMV9 specific
58 /** Sequence quantizer mode */
61 QUANT_FRAME_IMPLICIT, ///< Implicitly specified at frame level
62 QUANT_FRAME_EXPLICIT, ///< Explicitly specified at frame level
63 QUANT_NON_UNIFORM, ///< Non-uniform quant used for all frames
64 QUANT_UNIFORM ///< Uniform quant used for all frames
68 /** Where quant can be changed */
72 DQPROFILE_DOUBLE_EDGES,
73 DQPROFILE_SINGLE_EDGE,
78 /** @name Where quant can be changed
89 /** Which pair of edges is quantized with ALTPQUANT */
92 DQDOUBLE_BEDGE_TOPLEFT,
93 DQDOUBLE_BEDGE_TOPRIGHT,
94 DQDOUBLE_BEDGE_BOTTOMRIGHT,
95 DQDOUBLE_BEDGE_BOTTOMLEFT
99 /** MV modes for P frames */
102 MV_PMODE_1MV_HPEL_BILIN,
106 MV_PMODE_INTENSITY_COMP
110 /** @name MV types for B frames */
115 BMV_TYPE_INTERPOLATED
119 /** @name Block types for P/B frames */
121 enum TransformTypes {
125 TT_8X4, //Both halves
128 TT_4X8, //Both halves
133 /** Table for conversion between TTBLK and TTMB */
134 static const int ttblk_to_tt[3][8] = {
135 { TT_8X4, TT_4X8, TT_8X8, TT_4X4, TT_8X4_TOP, TT_8X4_BOTTOM, TT_4X8_RIGHT, TT_4X8_LEFT },
136 { TT_8X8, TT_4X8_RIGHT, TT_4X8_LEFT, TT_4X4, TT_8X4, TT_4X8, TT_8X4_BOTTOM, TT_8X4_TOP },
137 { TT_8X8, TT_4X8, TT_4X4, TT_8X4_BOTTOM, TT_4X8_RIGHT, TT_4X8_LEFT, TT_8X4, TT_8X4_TOP }
140 static const int ttfrm_to_tt[4] = { TT_8X8, TT_8X4, TT_4X8, TT_4X4 };
142 /** MV P mode - the 5th element is only used for mode 1 */
143 static const uint8_t mv_pmode_table[2][5] = {
144 { MV_PMODE_1MV_HPEL_BILIN, MV_PMODE_1MV, MV_PMODE_1MV_HPEL, MV_PMODE_INTENSITY_COMP, MV_PMODE_MIXED_MV },
145 { MV_PMODE_1MV, MV_PMODE_MIXED_MV, MV_PMODE_1MV_HPEL, MV_PMODE_INTENSITY_COMP, MV_PMODE_1MV_HPEL_BILIN }
147 static const uint8_t mv_pmode_table2[2][4] = {
148 { MV_PMODE_1MV_HPEL_BILIN, MV_PMODE_1MV, MV_PMODE_1MV_HPEL, MV_PMODE_MIXED_MV },
149 { MV_PMODE_1MV, MV_PMODE_MIXED_MV, MV_PMODE_1MV_HPEL, MV_PMODE_1MV_HPEL_BILIN }
152 /** One more frame type */
155 static const int fps_nr[5] = { 24, 25, 30, 50, 60 },
156 fps_dr[2] = { 1000, 1001 };
157 static const uint8_t pquant_table[3][32] = {
158 { /* Implicit quantizer */
159 0, 1, 2, 3, 4, 5, 6, 7, 8, 6, 7, 8, 9, 10, 11, 12,
160 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 29, 31
162 { /* Explicit quantizer, pquantizer uniform */
163 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
164 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
166 { /* Explicit quantizer, pquantizer non-uniform */
167 0, 1, 1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
168 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 29, 31
172 /** @name VC-1 VLC tables and defines
173 * @todo TODO move this into the context
176 #define VC1_BFRACTION_VLC_BITS 7
177 static VLC vc1_bfraction_vlc;
178 #define VC1_IMODE_VLC_BITS 4
179 static VLC vc1_imode_vlc;
180 #define VC1_NORM2_VLC_BITS 3
181 static VLC vc1_norm2_vlc;
182 #define VC1_NORM6_VLC_BITS 9
183 static VLC vc1_norm6_vlc;
184 /* Could be optimized, one table only needs 8 bits */
185 #define VC1_TTMB_VLC_BITS 9 //12
186 static VLC vc1_ttmb_vlc[3];
187 #define VC1_MV_DIFF_VLC_BITS 9 //15
188 static VLC vc1_mv_diff_vlc[4];
189 #define VC1_CBPCY_P_VLC_BITS 9 //14
190 static VLC vc1_cbpcy_p_vlc[4];
191 #define VC1_4MV_BLOCK_PATTERN_VLC_BITS 6
192 static VLC vc1_4mv_block_pattern_vlc[4];
193 #define VC1_TTBLK_VLC_BITS 5
194 static VLC vc1_ttblk_vlc[3];
195 #define VC1_SUBBLKPAT_VLC_BITS 6
196 static VLC vc1_subblkpat_vlc[3];
198 static VLC vc1_ac_coeff_table[8];
202 CS_HIGH_MOT_INTRA = 0,
212 /** @name Overlap conditions for Advanced Profile */
223 * @fixme Change size wherever another size is more efficient
224 * Many members are only used for Advanced Profile
226 typedef struct VC1Context{
231 /** Simple/Main Profile sequence header */
233 int res_sm; ///< reserved, 2b
234 int res_x8; ///< reserved
235 int multires; ///< frame-level RESPIC syntax element present
236 int res_fasttx; ///< reserved, always 1
237 int res_transtab; ///< reserved, always 0
238 int rangered; ///< RANGEREDFRM (range reduction) syntax element present
240 int res_rtm_flag; ///< reserved, set to 1
241 int reserved; ///< reserved
244 /** Advanced Profile */
246 int level; ///< 3bits, for Advanced/Simple Profile, provided by TS layer
247 int chromaformat; ///< 2bits, 2=4:2:0, only defined
248 int postprocflag; ///< Per-frame processing suggestion flag present
249 int broadcast; ///< TFF/RFF present
250 int interlace; ///< Progressive/interlaced (RPTFTM syntax element)
251 int tfcntrflag; ///< TFCNTR present
252 int panscanflag; ///< NUMPANSCANWIN, TOPLEFT{X,Y}, BOTRIGHT{X,Y} present
253 int extended_dmv; ///< Additional extended dmv range at P/B frame-level
254 int color_prim; ///< 8bits, chroma coordinates of the color primaries
255 int transfer_char; ///< 8bits, Opto-electronic transfer characteristics
256 int matrix_coef; ///< 8bits, Color primaries->YCbCr transform matrix
257 int hrd_param_flag; ///< Presence of Hypothetical Reference
258 ///< Decoder parameters
259 int psf; ///< Progressive Segmented Frame
262 /** Sequence header data for all Profiles
263 * TODO: choose between ints, uint8_ts and monobit flags
266 int profile; ///< 2bits, Profile
267 int frmrtq_postproc; ///< 3bits,
268 int bitrtq_postproc; ///< 5bits, quantized framerate-based postprocessing strength
269 int fastuvmc; ///< Rounding of qpel vector to hpel ? (not in Simple)
270 int extended_mv; ///< Ext MV in P/B (not in Simple)
271 int dquant; ///< How qscale varies with MBs, 2bits (not in Simple)
272 int vstransform; ///< variable-size [48]x[48] transform type + info
273 int overlap; ///< overlapped transforms in use
274 int quantizer_mode; ///< 2bits, quantizer mode used for sequence, see QUANT_*
275 int finterpflag; ///< INTERPFRM present
278 /** Frame decoding info for all profiles */
280 uint8_t mv_mode; ///< MV coding monde
281 uint8_t mv_mode2; ///< Secondary MV coding mode (B frames)
282 int k_x; ///< Number of bits for MVs (depends on MV range)
283 int k_y; ///< Number of bits for MVs (depends on MV range)
284 int range_x, range_y; ///< MV range
285 uint8_t pq, altpq; ///< Current/alternate frame quantizer scale
286 /** pquant parameters */
293 /** AC coding set indexes
294 * @see 8.1.1.10, p(1)10
297 int c_ac_table_index; ///< Chroma index from ACFRM element
298 int y_ac_table_index; ///< Luma index from AC2FRM element
300 int ttfrm; ///< Transform type info present at frame level
301 uint8_t ttmbf; ///< Transform type flag
302 uint8_t ttblk4x4; ///< Value of ttblk which indicates a 4x4 transform
303 int codingset; ///< index of current table set from 11.8 to use for luma block decoding
304 int codingset2; ///< index of current table set from 11.8 to use for chroma block decoding
305 int pqindex; ///< raw pqindex used in coding set selection
306 int a_avail, c_avail;
307 uint8_t *mb_type_base, *mb_type[3];
310 /** Luma compensation parameters */
315 int16_t bfraction; ///< Relative position % anchors=> how to scale MVs
316 uint8_t halfpq; ///< Uniform quant over image and qp+.5
317 uint8_t respic; ///< Frame-level flag for resized images
318 int buffer_fullness; ///< HRD info
320 * -# 0 -> [-64n 63.f] x [-32, 31.f]
321 * -# 1 -> [-128, 127.f] x [-64, 63.f]
322 * -# 2 -> [-512, 511.f] x [-128, 127.f]
323 * -# 3 -> [-1024, 1023.f] x [-256, 255.f]
326 uint8_t pquantizer; ///< Uniform (over sequence) quantizer in use
327 VLC *cbpcy_vlc; ///< CBPCY VLC table
328 int tt_index; ///< Index for Transform Type tables
329 uint8_t* mv_type_mb_plane; ///< bitplane for mv_type == (4MV)
330 uint8_t* direct_mb_plane; ///< bitplane for "direct" MBs
331 int mv_type_is_raw; ///< mv type mb plane is not coded
332 int dmb_is_raw; ///< direct mb plane is raw
333 int skip_is_raw; ///< skip mb plane is not coded
334 uint8_t luty[256], lutuv[256]; // lookup tables used for intensity compensation
335 int rnd; ///< rounding control
337 /** Frame decoding info for S/M profiles only */
339 uint8_t rangeredfrm; ///< out_sample = CLIP((in_sample-128)*2+128)
343 /** Frame decoding info for Advanced profile */
345 uint8_t fcm; ///< 0->Progressive, 2->Frame-Interlace, 3->Field-Interlace
346 uint8_t numpanscanwin;
348 uint8_t rptfrm, tff, rff;
351 uint16_t bottomrightx;
352 uint16_t bottomrighty;
355 int hrd_num_leaky_buckets;
356 uint8_t bit_rate_exponent;
357 uint8_t buffer_size_exponent;
358 uint8_t* acpred_plane; ///< AC prediction flags bitplane
360 uint8_t* over_flags_plane; ///< Overflags bitplane
363 uint16_t *hrd_rate, *hrd_buffer;
364 uint8_t *hrd_fullness;
365 uint8_t range_mapy_flag;
366 uint8_t range_mapuv_flag;
376 * Get unary code of limited length
377 * @fixme FIXME Slow and ugly
378 * @param gb GetBitContext
379 * @param[in] stop The bitstop value (unary code of 1's or 0's)
380 * @param[in] len Maximum length
381 * @return Unary length/index
383 static int get_prefix(GetBitContext *gb, int stop, int len)
388 for(i = 0; i < len && get_bits1(gb) != stop; i++);
390 /* int i = 0, tmp = !stop;
392 while (i != len && tmp != stop)
394 tmp = get_bits(gb, 1);
397 if (i == len && tmp != stop) return len+1;
404 UPDATE_CACHE(re, gb);
405 buf=GET_CACHE(re, gb); //Still not sure
406 if (stop) buf = ~buf;
408 log= av_log2(-buf); //FIXME: -?
410 LAST_SKIP_BITS(re, gb, log+1);
411 CLOSE_READER(re, gb);
415 LAST_SKIP_BITS(re, gb, limit);
416 CLOSE_READER(re, gb);
421 static inline int decode210(GetBitContext *gb){
427 return 2 - get_bits1(gb);
431 * Init VC-1 specific tables and VC1Context members
432 * @param v The VC1Context to initialize
435 static int vc1_init_common(VC1Context *v)
440 v->hrd_rate = v->hrd_buffer = NULL;
446 init_vlc(&vc1_bfraction_vlc, VC1_BFRACTION_VLC_BITS, 23,
447 vc1_bfraction_bits, 1, 1,
448 vc1_bfraction_codes, 1, 1, 1);
449 init_vlc(&vc1_norm2_vlc, VC1_NORM2_VLC_BITS, 4,
450 vc1_norm2_bits, 1, 1,
451 vc1_norm2_codes, 1, 1, 1);
452 init_vlc(&vc1_norm6_vlc, VC1_NORM6_VLC_BITS, 64,
453 vc1_norm6_bits, 1, 1,
454 vc1_norm6_codes, 2, 2, 1);
455 init_vlc(&vc1_imode_vlc, VC1_IMODE_VLC_BITS, 7,
456 vc1_imode_bits, 1, 1,
457 vc1_imode_codes, 1, 1, 1);
460 init_vlc(&vc1_ttmb_vlc[i], VC1_TTMB_VLC_BITS, 16,
461 vc1_ttmb_bits[i], 1, 1,
462 vc1_ttmb_codes[i], 2, 2, 1);
463 init_vlc(&vc1_ttblk_vlc[i], VC1_TTBLK_VLC_BITS, 8,
464 vc1_ttblk_bits[i], 1, 1,
465 vc1_ttblk_codes[i], 1, 1, 1);
466 init_vlc(&vc1_subblkpat_vlc[i], VC1_SUBBLKPAT_VLC_BITS, 15,
467 vc1_subblkpat_bits[i], 1, 1,
468 vc1_subblkpat_codes[i], 1, 1, 1);
472 init_vlc(&vc1_4mv_block_pattern_vlc[i], VC1_4MV_BLOCK_PATTERN_VLC_BITS, 16,
473 vc1_4mv_block_pattern_bits[i], 1, 1,
474 vc1_4mv_block_pattern_codes[i], 1, 1, 1);
475 init_vlc(&vc1_cbpcy_p_vlc[i], VC1_CBPCY_P_VLC_BITS, 64,
476 vc1_cbpcy_p_bits[i], 1, 1,
477 vc1_cbpcy_p_codes[i], 2, 2, 1);
478 init_vlc(&vc1_mv_diff_vlc[i], VC1_MV_DIFF_VLC_BITS, 73,
479 vc1_mv_diff_bits[i], 1, 1,
480 vc1_mv_diff_codes[i], 2, 2, 1);
483 init_vlc(&vc1_ac_coeff_table[i], AC_VLC_BITS, vc1_ac_sizes[i],
484 &vc1_ac_tables[i][0][1], 8, 4,
485 &vc1_ac_tables[i][0][0], 8, 4, 1);
486 init_vlc(&ff_msmp4_mb_i_vlc, MB_INTRA_VLC_BITS, 64,
487 &ff_msmp4_mb_i_table[0][1], 4, 2,
488 &ff_msmp4_mb_i_table[0][0], 4, 2, 1);
493 v->mvrange = 0; /* 7.1.1.18, p80 */
498 /***********************************************************************/
500 * @defgroup bitplane VC9 Bitplane decoding
505 /** @addtogroup bitplane
518 /** @} */ //imode defines
520 /** Decode rows by checking if they are skipped
521 * @param plane Buffer to store decoded bits
522 * @param[in] width Width of this buffer
523 * @param[in] height Height of this buffer
524 * @param[in] stride of this buffer
526 static void decode_rowskip(uint8_t* plane, int width, int height, int stride, GetBitContext *gb){
529 for (y=0; y<height; y++){
530 if (!get_bits(gb, 1)) //rowskip
531 memset(plane, 0, width);
533 for (x=0; x<width; x++)
534 plane[x] = get_bits(gb, 1);
539 /** Decode columns by checking if they are skipped
540 * @param plane Buffer to store decoded bits
541 * @param[in] width Width of this buffer
542 * @param[in] height Height of this buffer
543 * @param[in] stride of this buffer
544 * @fixme FIXME: Optimize
546 static void decode_colskip(uint8_t* plane, int width, int height, int stride, GetBitContext *gb){
549 for (x=0; x<width; x++){
550 if (!get_bits(gb, 1)) //colskip
551 for (y=0; y<height; y++)
554 for (y=0; y<height; y++)
555 plane[y*stride] = get_bits(gb, 1);
560 /** Decode a bitplane's bits
561 * @param bp Bitplane where to store the decode bits
562 * @param v VC-1 context for bit reading and logging
564 * @fixme FIXME: Optimize
566 static int bitplane_decoding(uint8_t* data, int *raw_flag, VC1Context *v)
568 GetBitContext *gb = &v->s.gb;
570 int imode, x, y, code, offset;
571 uint8_t invert, *planep = data;
572 int width, height, stride;
574 width = v->s.mb_width;
575 height = v->s.mb_height;
576 stride = v->s.mb_stride;
577 invert = get_bits(gb, 1);
578 imode = get_vlc2(gb, vc1_imode_vlc.table, VC1_IMODE_VLC_BITS, 1);
584 //Data is actually read in the MB layer (same for all tests == "raw")
585 *raw_flag = 1; //invert ignored
589 if ((height * width) & 1)
591 *planep++ = get_bits(gb, 1);
595 // decode bitplane as one long line
596 for (y = offset; y < height * width; y += 2) {
597 code = get_vlc2(gb, vc1_norm2_vlc.table, VC1_NORM2_VLC_BITS, 1);
598 *planep++ = code & 1;
600 if(offset == width) {
602 planep += stride - width;
604 *planep++ = code >> 1;
606 if(offset == width) {
608 planep += stride - width;
614 if(!(height % 3) && (width % 3)) { // use 2x3 decoding
615 for(y = 0; y < height; y+= 3) {
616 for(x = width & 1; x < width; x += 2) {
617 code = get_vlc2(gb, vc1_norm6_vlc.table, VC1_NORM6_VLC_BITS, 2);
619 av_log(v->s.avctx, AV_LOG_DEBUG, "invalid NORM-6 VLC\n");
622 planep[x + 0] = (code >> 0) & 1;
623 planep[x + 1] = (code >> 1) & 1;
624 planep[x + 0 + stride] = (code >> 2) & 1;
625 planep[x + 1 + stride] = (code >> 3) & 1;
626 planep[x + 0 + stride * 2] = (code >> 4) & 1;
627 planep[x + 1 + stride * 2] = (code >> 5) & 1;
629 planep += stride * 3;
631 if(width & 1) decode_colskip(data, 1, height, stride, &v->s.gb);
633 planep += (height & 1) * stride;
634 for(y = height & 1; y < height; y += 2) {
635 for(x = width % 3; x < width; x += 3) {
636 code = get_vlc2(gb, vc1_norm6_vlc.table, VC1_NORM6_VLC_BITS, 2);
638 av_log(v->s.avctx, AV_LOG_DEBUG, "invalid NORM-6 VLC\n");
641 planep[x + 0] = (code >> 0) & 1;
642 planep[x + 1] = (code >> 1) & 1;
643 planep[x + 2] = (code >> 2) & 1;
644 planep[x + 0 + stride] = (code >> 3) & 1;
645 planep[x + 1 + stride] = (code >> 4) & 1;
646 planep[x + 2 + stride] = (code >> 5) & 1;
648 planep += stride * 2;
651 if(x) decode_colskip(data , x, height , stride, &v->s.gb);
652 if(height & 1) decode_rowskip(data+x, width - x, 1, stride, &v->s.gb);
656 decode_rowskip(data, width, height, stride, &v->s.gb);
659 decode_colskip(data, width, height, stride, &v->s.gb);
664 /* Applying diff operator */
665 if (imode == IMODE_DIFF2 || imode == IMODE_DIFF6)
669 for (x=1; x<width; x++)
670 planep[x] ^= planep[x-1];
671 for (y=1; y<height; y++)
674 planep[0] ^= planep[-stride];
675 for (x=1; x<width; x++)
677 if (planep[x-1] != planep[x-stride]) planep[x] ^= invert;
678 else planep[x] ^= planep[x-1];
685 for (x=0; x<stride*height; x++) planep[x] = !planep[x]; //FIXME stride
687 return (imode<<1) + invert;
690 /** @} */ //Bitplane group
692 /***********************************************************************/
693 /** VOP Dquant decoding
694 * @param v VC-1 Context
696 static int vop_dquant_decoding(VC1Context *v)
698 GetBitContext *gb = &v->s.gb;
704 pqdiff = get_bits(gb, 3);
705 if (pqdiff == 7) v->altpq = get_bits(gb, 5);
706 else v->altpq = v->pq + pqdiff + 1;
710 v->dquantfrm = get_bits(gb, 1);
713 v->dqprofile = get_bits(gb, 2);
714 switch (v->dqprofile)
716 case DQPROFILE_SINGLE_EDGE:
717 case DQPROFILE_DOUBLE_EDGES:
718 v->dqsbedge = get_bits(gb, 2);
720 case DQPROFILE_ALL_MBS:
721 v->dqbilevel = get_bits(gb, 1);
722 default: break; //Forbidden ?
724 if (v->dqbilevel || v->dqprofile != DQPROFILE_ALL_MBS)
726 pqdiff = get_bits(gb, 3);
727 if (pqdiff == 7) v->altpq = get_bits(gb, 5);
728 else v->altpq = v->pq + pqdiff + 1;
735 /** Put block onto picture
737 static void vc1_put_block(VC1Context *v, DCTELEM block[6][64])
741 DSPContext *dsp = &v->s.dsp;
745 for(k = 0; k < 6; k++)
746 for(j = 0; j < 8; j++)
747 for(i = 0; i < 8; i++)
748 block[k][i + j*8] = ((block[k][i + j*8] - 128) << 1) + 128;
751 ys = v->s.current_picture.linesize[0];
752 us = v->s.current_picture.linesize[1];
753 vs = v->s.current_picture.linesize[2];
756 dsp->put_pixels_clamped(block[0], Y, ys);
757 dsp->put_pixels_clamped(block[1], Y + 8, ys);
759 dsp->put_pixels_clamped(block[2], Y, ys);
760 dsp->put_pixels_clamped(block[3], Y + 8, ys);
762 if(!(v->s.flags & CODEC_FLAG_GRAY)) {
763 dsp->put_pixels_clamped(block[4], v->s.dest[1], us);
764 dsp->put_pixels_clamped(block[5], v->s.dest[2], vs);
768 /** Do motion compensation over 1 macroblock
769 * Mostly adapted hpel_motion and qpel_motion from mpegvideo.c
771 static void vc1_mc_1mv(VC1Context *v, int dir)
773 MpegEncContext *s = &v->s;
774 DSPContext *dsp = &v->s.dsp;
775 uint8_t *srcY, *srcU, *srcV;
776 int dxy, uvdxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
778 if(!v->s.last_picture.data[0])return;
780 mx = s->mv[dir][0][0];
781 my = s->mv[dir][0][1];
783 // store motion vectors for further use in B frames
784 if(s->pict_type == P_TYPE) {
785 s->current_picture.motion_val[1][s->block_index[0]][0] = mx;
786 s->current_picture.motion_val[1][s->block_index[0]][1] = my;
788 uvmx = (mx + ((mx & 3) == 3)) >> 1;
789 uvmy = (my + ((my & 3) == 3)) >> 1;
791 srcY = s->last_picture.data[0];
792 srcU = s->last_picture.data[1];
793 srcV = s->last_picture.data[2];
795 srcY = s->next_picture.data[0];
796 srcU = s->next_picture.data[1];
797 srcV = s->next_picture.data[2];
800 src_x = s->mb_x * 16 + (mx >> 2);
801 src_y = s->mb_y * 16 + (my >> 2);
802 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
803 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
805 src_x = clip( src_x, -16, s->mb_width * 16);
806 src_y = clip( src_y, -16, s->mb_height * 16);
807 uvsrc_x = clip(uvsrc_x, -8, s->mb_width * 8);
808 uvsrc_y = clip(uvsrc_y, -8, s->mb_height * 8);
810 srcY += src_y * s->linesize + src_x;
811 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
812 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
814 /* for grayscale we should not try to read from unknown area */
815 if(s->flags & CODEC_FLAG_GRAY) {
816 srcU = s->edge_emu_buffer + 18 * s->linesize;
817 srcV = s->edge_emu_buffer + 18 * s->linesize;
820 if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
821 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel*3
822 || (unsigned)(src_y - s->mspel) > s->v_edge_pos - (my&3) - 16 - s->mspel*3){
823 uint8_t *uvbuf= s->edge_emu_buffer + 19 * s->linesize;
825 srcY -= s->mspel * (1 + s->linesize);
826 ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 17+s->mspel*2, 17+s->mspel*2,
827 src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);
828 srcY = s->edge_emu_buffer;
829 ff_emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8+1, 8+1,
830 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
831 ff_emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8+1, 8+1,
832 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
835 /* if we deal with range reduction we need to scale source blocks */
841 for(j = 0; j < 17 + s->mspel*2; j++) {
842 for(i = 0; i < 17 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128;
845 src = srcU; src2 = srcV;
846 for(j = 0; j < 9; j++) {
847 for(i = 0; i < 9; i++) {
848 src[i] = ((src[i] - 128) >> 1) + 128;
849 src2[i] = ((src2[i] - 128) >> 1) + 128;
851 src += s->uvlinesize;
852 src2 += s->uvlinesize;
855 /* if we deal with intensity compensation we need to scale source blocks */
856 if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
861 for(j = 0; j < 17 + s->mspel*2; j++) {
862 for(i = 0; i < 17 + s->mspel*2; i++) src[i] = v->luty[src[i]];
865 src = srcU; src2 = srcV;
866 for(j = 0; j < 9; j++) {
867 for(i = 0; i < 9; i++) {
868 src[i] = v->lutuv[src[i]];
869 src2[i] = v->lutuv[src2[i]];
871 src += s->uvlinesize;
872 src2 += s->uvlinesize;
875 srcY += s->mspel * (1 + s->linesize);
879 uvmx = uvmx + ((uvmx<0)?(uvmx&1):-(uvmx&1));
880 uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1));
884 dxy = ((my & 3) << 2) | (mx & 3);
885 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] , srcY , s->linesize, v->rnd);
886 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8, srcY + 8, s->linesize, v->rnd);
887 srcY += s->linesize * 8;
888 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize , srcY , s->linesize, v->rnd);
889 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
890 } else { // hpel mc - always used for luma
891 dxy = (my & 2) | ((mx & 2) >> 1);
894 dsp->put_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
896 dsp->put_no_rnd_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
899 if(s->flags & CODEC_FLAG_GRAY) return;
900 /* Chroma MC always uses qpel bilinear */
901 uvdxy = ((uvmy & 3) << 2) | (uvmx & 3);
905 dsp->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
906 dsp->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
908 dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
909 dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
913 /** Do motion compensation for 4-MV macroblock - luminance block
915 static void vc1_mc_4mv_luma(VC1Context *v, int n)
917 MpegEncContext *s = &v->s;
918 DSPContext *dsp = &v->s.dsp;
920 int dxy, mx, my, src_x, src_y;
923 if(!v->s.last_picture.data[0])return;
926 srcY = s->last_picture.data[0];
928 off = s->linesize * 4 * (n&2) + (n&1) * 8;
930 src_x = s->mb_x * 16 + (n&1) * 8 + (mx >> 2);
931 src_y = s->mb_y * 16 + (n&2) * 4 + (my >> 2);
933 src_x = clip( src_x, -16, s->mb_width * 16);
934 src_y = clip( src_y, -16, s->mb_height * 16);
936 srcY += src_y * s->linesize + src_x;
938 if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
939 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 8 - s->mspel*2
940 || (unsigned)(src_y - s->mspel) > s->v_edge_pos - (my&3) - 8 - s->mspel*2){
941 srcY -= s->mspel * (1 + s->linesize);
942 ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 9+s->mspel*2, 9+s->mspel*2,
943 src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);
944 srcY = s->edge_emu_buffer;
945 /* if we deal with range reduction we need to scale source blocks */
951 for(j = 0; j < 9 + s->mspel*2; j++) {
952 for(i = 0; i < 9 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128;
956 /* if we deal with intensity compensation we need to scale source blocks */
957 if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
962 for(j = 0; j < 9 + s->mspel*2; j++) {
963 for(i = 0; i < 9 + s->mspel*2; i++) src[i] = v->luty[src[i]];
967 srcY += s->mspel * (1 + s->linesize);
971 dxy = ((my & 3) << 2) | (mx & 3);
972 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize, v->rnd);
973 } else { // hpel mc - always used for luma
974 dxy = (my & 2) | ((mx & 2) >> 1);
976 dsp->put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
978 dsp->put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
982 static inline int median4(int a, int b, int c, int d)
985 if(c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2;
986 else return (FFMIN(b, c) + FFMAX(a, d)) / 2;
988 if(c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2;
989 else return (FFMIN(a, c) + FFMAX(b, d)) / 2;
994 /** Do motion compensation for 4-MV macroblock - both chroma blocks
996 static void vc1_mc_4mv_chroma(VC1Context *v)
998 MpegEncContext *s = &v->s;
999 DSPContext *dsp = &v->s.dsp;
1000 uint8_t *srcU, *srcV;
1001 int uvdxy, uvmx, uvmy, uvsrc_x, uvsrc_y;
1002 int i, idx, tx = 0, ty = 0;
1003 int mvx[4], mvy[4], intra[4];
1004 static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
1006 if(!v->s.last_picture.data[0])return;
1007 if(s->flags & CODEC_FLAG_GRAY) return;
1009 for(i = 0; i < 4; i++) {
1010 mvx[i] = s->mv[0][i][0];
1011 mvy[i] = s->mv[0][i][1];
1012 intra[i] = v->mb_type[0][s->block_index[i]];
1015 /* calculate chroma MV vector from four luma MVs */
1016 idx = (intra[3] << 3) | (intra[2] << 2) | (intra[1] << 1) | intra[0];
1017 if(!idx) { // all blocks are inter
1018 tx = median4(mvx[0], mvx[1], mvx[2], mvx[3]);
1019 ty = median4(mvy[0], mvy[1], mvy[2], mvy[3]);
1020 } else if(count[idx] == 1) { // 3 inter blocks
1023 tx = mid_pred(mvx[1], mvx[2], mvx[3]);
1024 ty = mid_pred(mvy[1], mvy[2], mvy[3]);
1027 tx = mid_pred(mvx[0], mvx[2], mvx[3]);
1028 ty = mid_pred(mvy[0], mvy[2], mvy[3]);
1031 tx = mid_pred(mvx[0], mvx[1], mvx[3]);
1032 ty = mid_pred(mvy[0], mvy[1], mvy[3]);
1035 tx = mid_pred(mvx[0], mvx[1], mvx[2]);
1036 ty = mid_pred(mvy[0], mvy[1], mvy[2]);
1039 } else if(count[idx] == 2) {
1041 for(i=0; i<3;i++) if(!intra[i]) {t1 = i; break;}
1042 for(i= t1+1; i<4; i++)if(!intra[i]) {t2 = i; break;}
1043 tx = (mvx[t1] + mvx[t2]) / 2;
1044 ty = (mvy[t1] + mvy[t2]) / 2;
1046 return; //no need to do MC for inter blocks
1048 s->current_picture.motion_val[1][s->block_index[0]][0] = tx;
1049 s->current_picture.motion_val[1][s->block_index[0]][1] = ty;
1050 uvmx = (tx + ((tx&3) == 3)) >> 1;
1051 uvmy = (ty + ((ty&3) == 3)) >> 1;
1053 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
1054 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
1056 uvsrc_x = clip(uvsrc_x, -8, s->mb_width * 8);
1057 uvsrc_y = clip(uvsrc_y, -8, s->mb_height * 8);
1058 srcU = s->last_picture.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
1059 srcV = s->last_picture.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
1060 if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
1061 || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
1062 || (unsigned)uvsrc_y > (s->v_edge_pos >> 1) - 9){
1063 ff_emulated_edge_mc(s->edge_emu_buffer , srcU, s->uvlinesize, 8+1, 8+1,
1064 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
1065 ff_emulated_edge_mc(s->edge_emu_buffer + 16, srcV, s->uvlinesize, 8+1, 8+1,
1066 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
1067 srcU = s->edge_emu_buffer;
1068 srcV = s->edge_emu_buffer + 16;
1070 /* if we deal with range reduction we need to scale source blocks */
1071 if(v->rangeredfrm) {
1073 uint8_t *src, *src2;
1075 src = srcU; src2 = srcV;
1076 for(j = 0; j < 9; j++) {
1077 for(i = 0; i < 9; i++) {
1078 src[i] = ((src[i] - 128) >> 1) + 128;
1079 src2[i] = ((src2[i] - 128) >> 1) + 128;
1081 src += s->uvlinesize;
1082 src2 += s->uvlinesize;
1085 /* if we deal with intensity compensation we need to scale source blocks */
1086 if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1088 uint8_t *src, *src2;
1090 src = srcU; src2 = srcV;
1091 for(j = 0; j < 9; j++) {
1092 for(i = 0; i < 9; i++) {
1093 src[i] = v->lutuv[src[i]];
1094 src2[i] = v->lutuv[src2[i]];
1096 src += s->uvlinesize;
1097 src2 += s->uvlinesize;
1103 uvmx = uvmx + ((uvmx<0)?(uvmx&1):-(uvmx&1));
1104 uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1));
1107 /* Chroma MC always uses qpel bilinear */
1108 uvdxy = ((uvmy & 3) << 2) | (uvmx & 3);
1112 dsp->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
1113 dsp->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
1115 dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
1116 dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
1120 static int decode_sequence_header_adv(VC1Context *v, GetBitContext *gb);
1123 * Decode Simple/Main Profiles sequence header
1124 * @see Figure 7-8, p16-17
1125 * @param avctx Codec context
1126 * @param gb GetBit context initialized from Codec context extra_data
1129 static int decode_sequence_header(AVCodecContext *avctx, GetBitContext *gb)
1131 VC1Context *v = avctx->priv_data;
1133 av_log(avctx, AV_LOG_INFO, "Header: %0X\n", show_bits(gb, 32));
1134 v->profile = get_bits(gb, 2);
1135 if (v->profile == 2)
1137 av_log(avctx, AV_LOG_ERROR, "Profile value 2 is forbidden (and WMV3 Complex Profile is unsupported)\n");
1141 if (v->profile == PROFILE_ADVANCED)
1143 return decode_sequence_header_adv(v, gb);
1147 v->res_sm = get_bits(gb, 2); //reserved
1150 av_log(avctx, AV_LOG_ERROR,
1151 "Reserved RES_SM=%i is forbidden\n", v->res_sm);
1157 v->frmrtq_postproc = get_bits(gb, 3); //common
1158 // (bitrate-32kbps)/64kbps
1159 v->bitrtq_postproc = get_bits(gb, 5); //common
1160 v->s.loop_filter = get_bits(gb, 1); //common
1161 if(v->s.loop_filter == 1 && v->profile == PROFILE_SIMPLE)
1163 av_log(avctx, AV_LOG_ERROR,
1164 "LOOPFILTER shell not be enabled in simple profile\n");
1167 v->res_x8 = get_bits(gb, 1); //reserved
1170 av_log(avctx, AV_LOG_ERROR,
1171 "1 for reserved RES_X8 is forbidden\n");
1174 v->multires = get_bits(gb, 1);
1175 v->res_fasttx = get_bits(gb, 1);
1178 av_log(avctx, AV_LOG_ERROR,
1179 "0 for reserved RES_FASTTX is forbidden\n");
1183 v->fastuvmc = get_bits(gb, 1); //common
1184 if (!v->profile && !v->fastuvmc)
1186 av_log(avctx, AV_LOG_ERROR,
1187 "FASTUVMC unavailable in Simple Profile\n");
1190 v->extended_mv = get_bits(gb, 1); //common
1191 if (!v->profile && v->extended_mv)
1193 av_log(avctx, AV_LOG_ERROR,
1194 "Extended MVs unavailable in Simple Profile\n");
1197 v->dquant = get_bits(gb, 2); //common
1198 v->vstransform = get_bits(gb, 1); //common
1200 v->res_transtab = get_bits(gb, 1);
1201 if (v->res_transtab)
1203 av_log(avctx, AV_LOG_ERROR,
1204 "1 for reserved RES_TRANSTAB is forbidden\n");
1208 v->overlap = get_bits(gb, 1); //common
1210 v->s.resync_marker = get_bits(gb, 1);
1211 v->rangered = get_bits(gb, 1);
1212 if (v->rangered && v->profile == PROFILE_SIMPLE)
1214 av_log(avctx, AV_LOG_INFO,
1215 "RANGERED should be set to 0 in simple profile\n");
1218 v->s.max_b_frames = avctx->max_b_frames = get_bits(gb, 3); //common
1219 v->quantizer_mode = get_bits(gb, 2); //common
1221 v->finterpflag = get_bits(gb, 1); //common
1222 v->res_rtm_flag = get_bits(gb, 1); //reserved
1223 if (!v->res_rtm_flag)
1225 // av_log(avctx, AV_LOG_ERROR,
1226 // "0 for reserved RES_RTM_FLAG is forbidden\n");
1227 av_log(avctx, AV_LOG_ERROR,
1228 "Old WMV3 version detected, only I-frames will be decoded\n");
1231 av_log(avctx, AV_LOG_DEBUG,
1232 "Profile %i:\nfrmrtq_postproc=%i, bitrtq_postproc=%i\n"
1233 "LoopFilter=%i, MultiRes=%i, FastUVMC=%i, Extended MV=%i\n"
1234 "Rangered=%i, VSTransform=%i, Overlap=%i, SyncMarker=%i\n"
1235 "DQuant=%i, Quantizer mode=%i, Max B frames=%i\n",
1236 v->profile, v->frmrtq_postproc, v->bitrtq_postproc,
1237 v->s.loop_filter, v->multires, v->fastuvmc, v->extended_mv,
1238 v->rangered, v->vstransform, v->overlap, v->s.resync_marker,
1239 v->dquant, v->quantizer_mode, avctx->max_b_frames
1244 static int decode_sequence_header_adv(VC1Context *v, GetBitContext *gb)
1246 v->res_rtm_flag = 1;
1247 v->level = get_bits(gb, 3);
1250 av_log(v->s.avctx, AV_LOG_ERROR, "Reserved LEVEL %i\n",v->level);
1252 v->chromaformat = get_bits(gb, 2);
1253 if (v->chromaformat != 1)
1255 av_log(v->s.avctx, AV_LOG_ERROR,
1256 "Only 4:2:0 chroma format supported\n");
1261 v->frmrtq_postproc = get_bits(gb, 3); //common
1262 // (bitrate-32kbps)/64kbps
1263 v->bitrtq_postproc = get_bits(gb, 5); //common
1264 v->postprocflag = get_bits(gb, 1); //common
1266 v->s.avctx->coded_width = (get_bits(gb, 12) + 1) << 1;
1267 v->s.avctx->coded_height = (get_bits(gb, 12) + 1) << 1;
1268 v->broadcast = get_bits1(gb);
1269 v->interlace = get_bits1(gb);
1270 v->tfcntrflag = get_bits1(gb);
1271 v->finterpflag = get_bits1(gb);
1272 get_bits1(gb); // reserved
1273 v->psf = get_bits1(gb);
1274 if(v->psf) { //PsF, 6.1.13
1275 av_log(v->s.avctx, AV_LOG_ERROR, "Progressive Segmented Frame mode: not supported (yet)\n");
1278 if(get_bits1(gb)) { //Display Info - decoding is not affected by it
1280 av_log(v->s.avctx, AV_LOG_INFO, "Display extended info:\n");
1281 w = get_bits(gb, 14);
1282 h = get_bits(gb, 14);
1283 av_log(v->s.avctx, AV_LOG_INFO, "Display dimensions: %ix%i\n", w, h);
1284 //TODO: store aspect ratio in AVCodecContext
1286 ar = get_bits(gb, 4);
1288 w = get_bits(gb, 8);
1289 h = get_bits(gb, 8);
1292 if(get_bits1(gb)){ //framerate stuff
1302 v->color_prim = get_bits(gb, 8);
1303 v->transfer_char = get_bits(gb, 8);
1304 v->matrix_coef = get_bits(gb, 8);
1308 v->hrd_param_flag = get_bits1(gb);
1309 if(v->hrd_param_flag) {
1311 v->hrd_num_leaky_buckets = get_bits(gb, 5);
1312 get_bits(gb, 4); //bitrate exponent
1313 get_bits(gb, 4); //buffer size exponent
1314 for(i = 0; i < v->hrd_num_leaky_buckets; i++) {
1315 get_bits(gb, 16); //hrd_rate[n]
1316 get_bits(gb, 16); //hrd_buffer[n]
1322 static int decode_entry_point(AVCodecContext *avctx, GetBitContext *gb)
1324 VC1Context *v = avctx->priv_data;
1327 av_log(avctx, AV_LOG_DEBUG, "Entry point: %08X\n", show_bits_long(gb, 32));
1328 get_bits1(gb); // broken link
1329 get_bits1(gb); // closed entry
1330 v->panscanflag = get_bits1(gb);
1331 get_bits1(gb); // refdist flag
1332 v->s.loop_filter = get_bits1(gb);
1333 v->fastuvmc = get_bits1(gb);
1334 v->extended_mv = get_bits1(gb);
1335 v->dquant = get_bits(gb, 2);
1336 v->vstransform = get_bits1(gb);
1337 v->overlap = get_bits1(gb);
1338 v->quantizer_mode = get_bits(gb, 2);
1340 if(v->hrd_param_flag){
1341 for(i = 0; i < v->hrd_num_leaky_buckets; i++) {
1342 get_bits(gb, 8); //hrd_full[n]
1347 avctx->coded_width = (get_bits(gb, 12)+1)<<1;
1348 avctx->coded_height = (get_bits(gb, 12)+1)<<1;
1351 v->extended_dmv = get_bits1(gb);
1353 av_log(avctx, AV_LOG_ERROR, "Luma scaling is not supported, expect wrong picture\n");
1354 skip_bits(gb, 3); // Y range, ignored for now
1357 av_log(avctx, AV_LOG_ERROR, "Chroma scaling is not supported, expect wrong picture\n");
1358 skip_bits(gb, 3); // UV range, ignored for now
1364 static int vc1_parse_frame_header(VC1Context *v, GetBitContext* gb)
1366 int pqindex, lowquant, status;
1368 if(v->finterpflag) v->interpfrm = get_bits(gb, 1);
1369 skip_bits(gb, 2); //framecnt unused
1371 if (v->rangered) v->rangeredfrm = get_bits(gb, 1);
1372 v->s.pict_type = get_bits(gb, 1);
1373 if (v->s.avctx->max_b_frames) {
1374 if (!v->s.pict_type) {
1375 if (get_bits(gb, 1)) v->s.pict_type = I_TYPE;
1376 else v->s.pict_type = B_TYPE;
1377 } else v->s.pict_type = P_TYPE;
1378 } else v->s.pict_type = v->s.pict_type ? P_TYPE : I_TYPE;
1381 if(v->s.pict_type == B_TYPE) {
1382 v->bfraction = get_vlc2(gb, vc1_bfraction_vlc.table, VC1_BFRACTION_VLC_BITS, 1);
1383 v->bfraction = vc1_bfraction_lut[v->bfraction];
1384 if(v->bfraction == 0) {
1385 v->s.pict_type = BI_TYPE;
1388 if(v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
1389 get_bits(gb, 7); // skip buffer fullness
1392 if(v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
1394 if(v->s.pict_type == P_TYPE)
1397 /* Quantizer stuff */
1398 pqindex = get_bits(gb, 5);
1399 if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
1400 v->pq = pquant_table[0][pqindex];
1402 v->pq = pquant_table[1][pqindex];
1405 if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
1406 v->pquantizer = pqindex < 9;
1407 if (v->quantizer_mode == QUANT_NON_UNIFORM)
1409 v->pqindex = pqindex;
1410 if (pqindex < 9) v->halfpq = get_bits(gb, 1);
1412 if (v->quantizer_mode == QUANT_FRAME_EXPLICIT)
1413 v->pquantizer = get_bits(gb, 1);
1415 if (v->extended_mv == 1) v->mvrange = get_prefix(gb, 0, 3);
1416 v->k_x = v->mvrange + 9 + (v->mvrange >> 1); //k_x can be 9 10 12 13
1417 v->k_y = v->mvrange + 8; //k_y can be 8 9 10 11
1418 v->range_x = 1 << (v->k_x - 1);
1419 v->range_y = 1 << (v->k_y - 1);
1420 if (v->profile == PROFILE_ADVANCED)
1422 if (v->postprocflag) v->postproc = get_bits(gb, 1);
1425 if (v->multires && v->s.pict_type != B_TYPE) v->respic = get_bits(gb, 2);
1427 //av_log(v->s.avctx, AV_LOG_INFO, "%c Frame: QP=[%i]%i (+%i/2) %i\n",
1428 // (v->s.pict_type == P_TYPE) ? 'P' : ((v->s.pict_type == I_TYPE) ? 'I' : 'B'), pqindex, v->pq, v->halfpq, v->rangeredfrm);
1430 switch(v->s.pict_type) {
1432 if (v->pq < 5) v->tt_index = 0;
1433 else if(v->pq < 13) v->tt_index = 1;
1434 else v->tt_index = 2;
1436 lowquant = (v->pq > 12) ? 0 : 1;
1437 v->mv_mode = mv_pmode_table[lowquant][get_prefix(gb, 1, 4)];
1438 if (v->mv_mode == MV_PMODE_INTENSITY_COMP)
1440 int scale, shift, i;
1441 v->mv_mode2 = mv_pmode_table2[lowquant][get_prefix(gb, 1, 3)];
1442 v->lumscale = get_bits(gb, 6);
1443 v->lumshift = get_bits(gb, 6);
1444 /* fill lookup tables for intensity compensation */
1447 shift = (255 - v->lumshift * 2) << 6;
1448 if(v->lumshift > 31)
1451 scale = v->lumscale + 32;
1452 if(v->lumshift > 31)
1453 shift = (v->lumshift - 64) << 6;
1455 shift = v->lumshift << 6;
1457 for(i = 0; i < 256; i++) {
1458 v->luty[i] = clip_uint8((scale * i + shift + 32) >> 6);
1459 v->lutuv[i] = clip_uint8((scale * (i - 128) + 128*64 + 32) >> 6);
1462 if(v->mv_mode == MV_PMODE_1MV_HPEL || v->mv_mode == MV_PMODE_1MV_HPEL_BILIN)
1463 v->s.quarter_sample = 0;
1464 else if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1465 if(v->mv_mode2 == MV_PMODE_1MV_HPEL || v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN)
1466 v->s.quarter_sample = 0;
1468 v->s.quarter_sample = 1;
1470 v->s.quarter_sample = 1;
1471 v->s.mspel = !(v->mv_mode == MV_PMODE_1MV_HPEL_BILIN || (v->mv_mode == MV_PMODE_INTENSITY_COMP && v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN));
1473 if ((v->mv_mode == MV_PMODE_INTENSITY_COMP &&
1474 v->mv_mode2 == MV_PMODE_MIXED_MV)
1475 || v->mv_mode == MV_PMODE_MIXED_MV)
1477 status = bitplane_decoding(v->mv_type_mb_plane, &v->mv_type_is_raw, v);
1478 if (status < 0) return -1;
1479 av_log(v->s.avctx, AV_LOG_DEBUG, "MB MV Type plane encoding: "
1480 "Imode: %i, Invert: %i\n", status>>1, status&1);
1482 v->mv_type_is_raw = 0;
1483 memset(v->mv_type_mb_plane, 0, v->s.mb_stride * v->s.mb_height);
1485 status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
1486 if (status < 0) return -1;
1487 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
1488 "Imode: %i, Invert: %i\n", status>>1, status&1);
1490 /* Hopefully this is correct for P frames */
1491 v->s.mv_table_index = get_bits(gb, 2); //but using vc1_ tables
1492 v->cbpcy_vlc = &vc1_cbpcy_p_vlc[get_bits(gb, 2)];
1496 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1497 vop_dquant_decoding(v);
1500 v->ttfrm = 0; //FIXME Is that so ?
1503 v->ttmbf = get_bits(gb, 1);
1506 v->ttfrm = ttfrm_to_tt[get_bits(gb, 2)];
1514 if (v->pq < 5) v->tt_index = 0;
1515 else if(v->pq < 13) v->tt_index = 1;
1516 else v->tt_index = 2;
1518 lowquant = (v->pq > 12) ? 0 : 1;
1519 v->mv_mode = get_bits1(gb) ? MV_PMODE_1MV : MV_PMODE_1MV_HPEL_BILIN;
1520 v->s.quarter_sample = (v->mv_mode == MV_PMODE_1MV);
1521 v->s.mspel = v->s.quarter_sample;
1523 status = bitplane_decoding(v->direct_mb_plane, &v->dmb_is_raw, v);
1524 if (status < 0) return -1;
1525 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Direct Type plane encoding: "
1526 "Imode: %i, Invert: %i\n", status>>1, status&1);
1527 status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
1528 if (status < 0) return -1;
1529 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
1530 "Imode: %i, Invert: %i\n", status>>1, status&1);
1532 v->s.mv_table_index = get_bits(gb, 2);
1533 v->cbpcy_vlc = &vc1_cbpcy_p_vlc[get_bits(gb, 2)];
1537 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1538 vop_dquant_decoding(v);
1544 v->ttmbf = get_bits(gb, 1);
1547 v->ttfrm = ttfrm_to_tt[get_bits(gb, 2)];
1557 v->c_ac_table_index = decode012(gb);
1558 if (v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
1560 v->y_ac_table_index = decode012(gb);
1563 v->s.dc_table_index = get_bits(gb, 1);
1565 if(v->s.pict_type == BI_TYPE) {
1566 v->s.pict_type = B_TYPE;
1572 static int vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
1575 int pqindex, lowquant;
1578 v->p_frame_skipped = 0;
1581 fcm = decode012(gb);
1582 switch(get_prefix(gb, 0, 4)) {
1584 v->s.pict_type = P_TYPE;
1587 v->s.pict_type = B_TYPE;
1591 v->s.pict_type = I_TYPE;
1594 v->s.pict_type = BI_TYPE;
1598 v->s.pict_type = P_TYPE; // skipped pic
1599 v->p_frame_skipped = 1;
1605 if(!v->interlace || v->panscanflag) {
1612 if(v->panscanflag) {
1615 v->rnd = get_bits1(gb);
1617 v->uvsamp = get_bits1(gb);
1618 if(v->finterpflag) v->interpfrm = get_bits(gb, 1);
1619 pqindex = get_bits(gb, 5);
1620 v->pqindex = pqindex;
1621 if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
1622 v->pq = pquant_table[0][pqindex];
1624 v->pq = pquant_table[1][pqindex];
1627 if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
1628 v->pquantizer = pqindex < 9;
1629 if (v->quantizer_mode == QUANT_NON_UNIFORM)
1631 v->pqindex = pqindex;
1632 if (pqindex < 9) v->halfpq = get_bits(gb, 1);
1634 if (v->quantizer_mode == QUANT_FRAME_EXPLICIT)
1635 v->pquantizer = get_bits(gb, 1);
1637 switch(v->s.pict_type) {
1639 status = bitplane_decoding(v->acpred_plane, &v->acpred_is_raw, v);
1640 if (status < 0) return -1;
1641 av_log(v->s.avctx, AV_LOG_DEBUG, "ACPRED plane encoding: "
1642 "Imode: %i, Invert: %i\n", status>>1, status&1);
1643 v->condover = CONDOVER_NONE;
1644 if(v->overlap && v->pq <= 8) {
1645 v->condover = decode012(gb);
1646 if(v->condover == CONDOVER_SELECT) {
1647 status = bitplane_decoding(v->over_flags_plane, &v->overflg_is_raw, v);
1648 if (status < 0) return -1;
1649 av_log(v->s.avctx, AV_LOG_DEBUG, "CONDOVER plane encoding: "
1650 "Imode: %i, Invert: %i\n", status>>1, status&1);
1656 v->postproc = get_bits1(gb);
1657 if (v->extended_mv) v->mvrange = get_prefix(gb, 0, 3);
1658 else v->mvrange = 0;
1659 v->k_x = v->mvrange + 9 + (v->mvrange >> 1); //k_x can be 9 10 12 13
1660 v->k_y = v->mvrange + 8; //k_y can be 8 9 10 11
1661 v->range_x = 1 << (v->k_x - 1);
1662 v->range_y = 1 << (v->k_y - 1);
1664 if (v->pq < 5) v->tt_index = 0;
1665 else if(v->pq < 13) v->tt_index = 1;
1666 else v->tt_index = 2;
1668 lowquant = (v->pq > 12) ? 0 : 1;
1669 v->mv_mode = mv_pmode_table[lowquant][get_prefix(gb, 1, 4)];
1670 if (v->mv_mode == MV_PMODE_INTENSITY_COMP)
1672 int scale, shift, i;
1673 v->mv_mode2 = mv_pmode_table2[lowquant][get_prefix(gb, 1, 3)];
1674 v->lumscale = get_bits(gb, 6);
1675 v->lumshift = get_bits(gb, 6);
1676 /* fill lookup tables for intensity compensation */
1679 shift = (255 - v->lumshift * 2) << 6;
1680 if(v->lumshift > 31)
1683 scale = v->lumscale + 32;
1684 if(v->lumshift > 31)
1685 shift = (v->lumshift - 64) << 6;
1687 shift = v->lumshift << 6;
1689 for(i = 0; i < 256; i++) {
1690 v->luty[i] = clip_uint8((scale * i + shift + 32) >> 6);
1691 v->lutuv[i] = clip_uint8((scale * (i - 128) + 128*64 + 32) >> 6);
1694 if(v->mv_mode == MV_PMODE_1MV_HPEL || v->mv_mode == MV_PMODE_1MV_HPEL_BILIN)
1695 v->s.quarter_sample = 0;
1696 else if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1697 if(v->mv_mode2 == MV_PMODE_1MV_HPEL || v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN)
1698 v->s.quarter_sample = 0;
1700 v->s.quarter_sample = 1;
1702 v->s.quarter_sample = 1;
1703 v->s.mspel = !(v->mv_mode == MV_PMODE_1MV_HPEL_BILIN || (v->mv_mode == MV_PMODE_INTENSITY_COMP && v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN));
1705 if ((v->mv_mode == MV_PMODE_INTENSITY_COMP &&
1706 v->mv_mode2 == MV_PMODE_MIXED_MV)
1707 || v->mv_mode == MV_PMODE_MIXED_MV)
1709 status = bitplane_decoding(v->mv_type_mb_plane, &v->mv_type_is_raw, v);
1710 if (status < 0) return -1;
1711 av_log(v->s.avctx, AV_LOG_DEBUG, "MB MV Type plane encoding: "
1712 "Imode: %i, Invert: %i\n", status>>1, status&1);
1714 v->mv_type_is_raw = 0;
1715 memset(v->mv_type_mb_plane, 0, v->s.mb_stride * v->s.mb_height);
1717 status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
1718 if (status < 0) return -1;
1719 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
1720 "Imode: %i, Invert: %i\n", status>>1, status&1);
1722 /* Hopefully this is correct for P frames */
1723 v->s.mv_table_index = get_bits(gb, 2); //but using vc1_ tables
1724 v->cbpcy_vlc = &vc1_cbpcy_p_vlc[get_bits(gb, 2)];
1727 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1728 vop_dquant_decoding(v);
1731 v->ttfrm = 0; //FIXME Is that so ?
1734 v->ttmbf = get_bits(gb, 1);
1737 v->ttfrm = ttfrm_to_tt[get_bits(gb, 2)];
1747 v->c_ac_table_index = decode012(gb);
1748 if (v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
1750 v->y_ac_table_index = decode012(gb);
1753 v->s.dc_table_index = get_bits(gb, 1);
1754 if (v->s.pict_type == I_TYPE && v->dquant) {
1755 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1756 vop_dquant_decoding(v);
1762 /***********************************************************************/
1764 * @defgroup block VC-1 Block-level functions
1765 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
1771 * @brief Get macroblock-level quantizer scale
1773 #define GET_MQUANT() \
1777 if (v->dqprofile == DQPROFILE_ALL_MBS) \
1781 mquant = (get_bits(gb, 1)) ? v->altpq : v->pq; \
1785 mqdiff = get_bits(gb, 3); \
1786 if (mqdiff != 7) mquant = v->pq + mqdiff; \
1787 else mquant = get_bits(gb, 5); \
1790 if(v->dqprofile == DQPROFILE_SINGLE_EDGE) \
1791 edges = 1 << v->dqsbedge; \
1792 else if(v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
1793 edges = (3 << v->dqsbedge) % 15; \
1794 else if(v->dqprofile == DQPROFILE_FOUR_EDGES) \
1796 if((edges&1) && !s->mb_x) \
1797 mquant = v->altpq; \
1798 if((edges&2) && s->first_slice_line) \
1799 mquant = v->altpq; \
1800 if((edges&4) && s->mb_x == (s->mb_width - 1)) \
1801 mquant = v->altpq; \
1802 if((edges&8) && s->mb_y == (s->mb_height - 1)) \
1803 mquant = v->altpq; \
1807 * @def GET_MVDATA(_dmv_x, _dmv_y)
1808 * @brief Get MV differentials
1809 * @see MVDATA decoding from 8.3.5.2, p(1)20
1810 * @param _dmv_x Horizontal differential for decoded MV
1811 * @param _dmv_y Vertical differential for decoded MV
1813 #define GET_MVDATA(_dmv_x, _dmv_y) \
1814 index = 1 + get_vlc2(gb, vc1_mv_diff_vlc[s->mv_table_index].table,\
1815 VC1_MV_DIFF_VLC_BITS, 2); \
1818 mb_has_coeffs = 1; \
1821 else mb_has_coeffs = 0; \
1823 if (!index) { _dmv_x = _dmv_y = 0; } \
1824 else if (index == 35) \
1826 _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
1827 _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
1829 else if (index == 36) \
1838 if (!s->quarter_sample && index1 == 5) val = 1; \
1840 if(size_table[index1] - val > 0) \
1841 val = get_bits(gb, size_table[index1] - val); \
1843 sign = 0 - (val&1); \
1844 _dmv_x = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1847 if (!s->quarter_sample && index1 == 5) val = 1; \
1849 if(size_table[index1] - val > 0) \
1850 val = get_bits(gb, size_table[index1] - val); \
1852 sign = 0 - (val&1); \
1853 _dmv_y = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1856 /** Predict and set motion vector
1858 static inline void vc1_pred_mv(MpegEncContext *s, int n, int dmv_x, int dmv_y, int mv1, int r_x, int r_y, uint8_t* is_intra)
1860 int xy, wrap, off = 0;
1865 /* scale MV difference to be quad-pel */
1866 dmv_x <<= 1 - s->quarter_sample;
1867 dmv_y <<= 1 - s->quarter_sample;
1869 wrap = s->b8_stride;
1870 xy = s->block_index[n];
1873 s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = 0;
1874 s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = 0;
1875 if(mv1) { /* duplicate motion data for 1-MV block */
1876 s->current_picture.motion_val[0][xy + 1][0] = 0;
1877 s->current_picture.motion_val[0][xy + 1][1] = 0;
1878 s->current_picture.motion_val[0][xy + wrap][0] = 0;
1879 s->current_picture.motion_val[0][xy + wrap][1] = 0;
1880 s->current_picture.motion_val[0][xy + wrap + 1][0] = 0;
1881 s->current_picture.motion_val[0][xy + wrap + 1][1] = 0;
1886 C = s->current_picture.motion_val[0][xy - 1];
1887 A = s->current_picture.motion_val[0][xy - wrap];
1889 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2;
1891 //in 4-MV mode different blocks have different B predictor position
1894 off = (s->mb_x > 0) ? -1 : 1;
1897 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 1;
1906 B = s->current_picture.motion_val[0][xy - wrap + off];
1908 if(!s->first_slice_line || (n==2 || n==3)) { // predictor A is not out of bounds
1909 if(s->mb_width == 1) {
1913 px = mid_pred(A[0], B[0], C[0]);
1914 py = mid_pred(A[1], B[1], C[1]);
1916 } else if(s->mb_x || (n==1 || n==3)) { // predictor C is not out of bounds
1922 /* Pullback MV as specified in 8.3.5.3.4 */
1925 qx = (s->mb_x << 6) + ((n==1 || n==3) ? 32 : 0);
1926 qy = (s->mb_y << 6) + ((n==2 || n==3) ? 32 : 0);
1927 X = (s->mb_width << 6) - 4;
1928 Y = (s->mb_height << 6) - 4;
1930 if(qx + px < -60) px = -60 - qx;
1931 if(qy + py < -60) py = -60 - qy;
1933 if(qx + px < -28) px = -28 - qx;
1934 if(qy + py < -28) py = -28 - qy;
1936 if(qx + px > X) px = X - qx;
1937 if(qy + py > Y) py = Y - qy;
1939 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
1940 if((!s->first_slice_line || (n==2 || n==3)) && (s->mb_x || (n==1 || n==3))) {
1941 if(is_intra[xy - wrap])
1942 sum = ABS(px) + ABS(py);
1944 sum = ABS(px - A[0]) + ABS(py - A[1]);
1946 if(get_bits1(&s->gb)) {
1954 if(is_intra[xy - 1])
1955 sum = ABS(px) + ABS(py);
1957 sum = ABS(px - C[0]) + ABS(py - C[1]);
1959 if(get_bits1(&s->gb)) {
1969 /* store MV using signed modulus of MV range defined in 4.11 */
1970 s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1971 s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
1972 if(mv1) { /* duplicate motion data for 1-MV block */
1973 s->current_picture.motion_val[0][xy + 1][0] = s->current_picture.motion_val[0][xy][0];
1974 s->current_picture.motion_val[0][xy + 1][1] = s->current_picture.motion_val[0][xy][1];
1975 s->current_picture.motion_val[0][xy + wrap][0] = s->current_picture.motion_val[0][xy][0];
1976 s->current_picture.motion_val[0][xy + wrap][1] = s->current_picture.motion_val[0][xy][1];
1977 s->current_picture.motion_val[0][xy + wrap + 1][0] = s->current_picture.motion_val[0][xy][0];
1978 s->current_picture.motion_val[0][xy + wrap + 1][1] = s->current_picture.motion_val[0][xy][1];
1982 /** Motion compensation for direct or interpolated blocks in B-frames
1984 static void vc1_interp_mc(VC1Context *v)
1986 MpegEncContext *s = &v->s;
1987 DSPContext *dsp = &v->s.dsp;
1988 uint8_t *srcY, *srcU, *srcV;
1989 int dxy, uvdxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
1991 if(!v->s.next_picture.data[0])return;
1993 mx = s->mv[1][0][0];
1994 my = s->mv[1][0][1];
1995 uvmx = (mx + ((mx & 3) == 3)) >> 1;
1996 uvmy = (my + ((my & 3) == 3)) >> 1;
1997 srcY = s->next_picture.data[0];
1998 srcU = s->next_picture.data[1];
1999 srcV = s->next_picture.data[2];
2001 src_x = s->mb_x * 16 + (mx >> 2);
2002 src_y = s->mb_y * 16 + (my >> 2);
2003 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
2004 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
2006 src_x = clip( src_x, -16, s->mb_width * 16);
2007 src_y = clip( src_y, -16, s->mb_height * 16);
2008 uvsrc_x = clip(uvsrc_x, -8, s->mb_width * 8);
2009 uvsrc_y = clip(uvsrc_y, -8, s->mb_height * 8);
2011 srcY += src_y * s->linesize + src_x;
2012 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
2013 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
2015 /* for grayscale we should not try to read from unknown area */
2016 if(s->flags & CODEC_FLAG_GRAY) {
2017 srcU = s->edge_emu_buffer + 18 * s->linesize;
2018 srcV = s->edge_emu_buffer + 18 * s->linesize;
2022 || (unsigned)src_x > s->h_edge_pos - (mx&3) - 16
2023 || (unsigned)src_y > s->v_edge_pos - (my&3) - 16){
2024 uint8_t *uvbuf= s->edge_emu_buffer + 19 * s->linesize;
2026 ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 17, 17,
2027 src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);
2028 srcY = s->edge_emu_buffer;
2029 ff_emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8+1, 8+1,
2030 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
2031 ff_emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8+1, 8+1,
2032 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
2035 /* if we deal with range reduction we need to scale source blocks */
2036 if(v->rangeredfrm) {
2038 uint8_t *src, *src2;
2041 for(j = 0; j < 17; j++) {
2042 for(i = 0; i < 17; i++) src[i] = ((src[i] - 128) >> 1) + 128;
2045 src = srcU; src2 = srcV;
2046 for(j = 0; j < 9; j++) {
2047 for(i = 0; i < 9; i++) {
2048 src[i] = ((src[i] - 128) >> 1) + 128;
2049 src2[i] = ((src2[i] - 128) >> 1) + 128;
2051 src += s->uvlinesize;
2052 src2 += s->uvlinesize;
2058 uvmx = uvmx + ((uvmx<0)?(uvmx&1):-(uvmx&1));
2059 uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1));
2064 dxy = ((my & 1) << 1) | (mx & 1);
2066 dsp->avg_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
2068 if(s->flags & CODEC_FLAG_GRAY) return;
2069 /* Chroma MC always uses qpel blilinear */
2070 uvdxy = ((uvmy & 3) << 2) | (uvmx & 3);
2071 dsp->avg_qpel_pixels_tab[1][uvdxy](s->dest[1], srcU, s->uvlinesize);
2072 dsp->avg_qpel_pixels_tab[1][uvdxy](s->dest[2], srcV, s->uvlinesize);
2075 static always_inline int scale_mv(int value, int bfrac, int inv, int qs)
2079 #if B_FRACTION_DEN==256
2083 return 2 * ((value * n + 255) >> 9);
2084 return (value * n + 128) >> 8;
2087 n -= B_FRACTION_DEN;
2089 return 2 * ((value * n + B_FRACTION_DEN - 1) / (2 * B_FRACTION_DEN));
2090 return (value * n + B_FRACTION_DEN/2) / B_FRACTION_DEN;
2094 /** Reconstruct motion vector for B-frame and do motion compensation
2096 static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mode)
2103 if(mode == BMV_TYPE_INTERPOLATED) {
2109 vc1_mc_1mv(v, (mode == BMV_TYPE_FORWARD));
2112 static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mvtype)
2114 MpegEncContext *s = &v->s;
2115 int xy, wrap, off = 0;
2120 const uint8_t *is_intra = v->mb_type[0];
2124 /* scale MV difference to be quad-pel */
2125 dmv_x[0] <<= 1 - s->quarter_sample;
2126 dmv_y[0] <<= 1 - s->quarter_sample;
2127 dmv_x[1] <<= 1 - s->quarter_sample;
2128 dmv_y[1] <<= 1 - s->quarter_sample;
2130 wrap = s->b8_stride;
2131 xy = s->block_index[0];
2134 s->current_picture.motion_val[0][xy][0] =
2135 s->current_picture.motion_val[0][xy][1] =
2136 s->current_picture.motion_val[1][xy][0] =
2137 s->current_picture.motion_val[1][xy][1] = 0;
2140 s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
2141 s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
2142 s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
2143 s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
2145 s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
2146 s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
2147 s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
2148 s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
2152 if((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2153 C = s->current_picture.motion_val[0][xy - 2];
2154 A = s->current_picture.motion_val[0][xy - wrap*2];
2155 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2156 B = s->current_picture.motion_val[0][xy - wrap*2 + off];
2158 if(!s->first_slice_line) { // predictor A is not out of bounds
2159 if(s->mb_width == 1) {
2163 px = mid_pred(A[0], B[0], C[0]);
2164 py = mid_pred(A[1], B[1], C[1]);
2166 } else if(s->mb_x) { // predictor C is not out of bounds
2172 /* Pullback MV as specified in 8.3.5.3.4 */
2175 if(v->profile < PROFILE_ADVANCED) {
2176 qx = (s->mb_x << 5);
2177 qy = (s->mb_y << 5);
2178 X = (s->mb_width << 5) - 4;
2179 Y = (s->mb_height << 5) - 4;
2180 if(qx + px < -28) px = -28 - qx;
2181 if(qy + py < -28) py = -28 - qy;
2182 if(qx + px > X) px = X - qx;
2183 if(qy + py > Y) py = Y - qy;
2185 qx = (s->mb_x << 6);
2186 qy = (s->mb_y << 6);
2187 X = (s->mb_width << 6) - 4;
2188 Y = (s->mb_height << 6) - 4;
2189 if(qx + px < -60) px = -60 - qx;
2190 if(qy + py < -60) py = -60 - qy;
2191 if(qx + px > X) px = X - qx;
2192 if(qy + py > Y) py = Y - qy;
2195 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2196 if(0 && !s->first_slice_line && s->mb_x) {
2197 if(is_intra[xy - wrap])
2198 sum = ABS(px) + ABS(py);
2200 sum = ABS(px - A[0]) + ABS(py - A[1]);
2202 if(get_bits1(&s->gb)) {
2210 if(is_intra[xy - 2])
2211 sum = ABS(px) + ABS(py);
2213 sum = ABS(px - C[0]) + ABS(py - C[1]);
2215 if(get_bits1(&s->gb)) {
2225 /* store MV using signed modulus of MV range defined in 4.11 */
2226 s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x;
2227 s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
2229 if((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2230 C = s->current_picture.motion_val[1][xy - 2];
2231 A = s->current_picture.motion_val[1][xy - wrap*2];
2232 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2233 B = s->current_picture.motion_val[1][xy - wrap*2 + off];
2235 if(!s->first_slice_line) { // predictor A is not out of bounds
2236 if(s->mb_width == 1) {
2240 px = mid_pred(A[0], B[0], C[0]);
2241 py = mid_pred(A[1], B[1], C[1]);
2243 } else if(s->mb_x) { // predictor C is not out of bounds
2249 /* Pullback MV as specified in 8.3.5.3.4 */
2252 if(v->profile < PROFILE_ADVANCED) {
2253 qx = (s->mb_x << 5);
2254 qy = (s->mb_y << 5);
2255 X = (s->mb_width << 5) - 4;
2256 Y = (s->mb_height << 5) - 4;
2257 if(qx + px < -28) px = -28 - qx;
2258 if(qy + py < -28) py = -28 - qy;
2259 if(qx + px > X) px = X - qx;
2260 if(qy + py > Y) py = Y - qy;
2262 qx = (s->mb_x << 6);
2263 qy = (s->mb_y << 6);
2264 X = (s->mb_width << 6) - 4;
2265 Y = (s->mb_height << 6) - 4;
2266 if(qx + px < -60) px = -60 - qx;
2267 if(qy + py < -60) py = -60 - qy;
2268 if(qx + px > X) px = X - qx;
2269 if(qy + py > Y) py = Y - qy;
2272 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2273 if(0 && !s->first_slice_line && s->mb_x) {
2274 if(is_intra[xy - wrap])
2275 sum = ABS(px) + ABS(py);
2277 sum = ABS(px - A[0]) + ABS(py - A[1]);
2279 if(get_bits1(&s->gb)) {
2287 if(is_intra[xy - 2])
2288 sum = ABS(px) + ABS(py);
2290 sum = ABS(px - C[0]) + ABS(py - C[1]);
2292 if(get_bits1(&s->gb)) {
2302 /* store MV using signed modulus of MV range defined in 4.11 */
2304 s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
2305 s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
2307 s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
2308 s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
2309 s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
2310 s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
2313 /** Get predicted DC value for I-frames only
2314 * prediction dir: left=0, top=1
2315 * @param s MpegEncContext
2316 * @param[in] n block index in the current MB
2317 * @param dc_val_ptr Pointer to DC predictor
2318 * @param dir_ptr Prediction direction for use in AC prediction
2320 static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2321 int16_t **dc_val_ptr, int *dir_ptr)
2323 int a, b, c, wrap, pred, scale;
2325 static const uint16_t dcpred[32] = {
2326 -1, 1024, 512, 341, 256, 205, 171, 146, 128,
2327 114, 102, 93, 85, 79, 73, 68, 64,
2328 60, 57, 54, 51, 49, 47, 45, 43,
2329 41, 39, 38, 37, 35, 34, 33
2332 /* find prediction - wmv3_dc_scale always used here in fact */
2333 if (n < 4) scale = s->y_dc_scale;
2334 else scale = s->c_dc_scale;
2336 wrap = s->block_wrap[n];
2337 dc_val= s->dc_val[0] + s->block_index[n];
2343 b = dc_val[ - 1 - wrap];
2344 a = dc_val[ - wrap];
2346 if (pq < 9 || !overlap)
2348 /* Set outer values */
2349 if (s->first_slice_line && (n!=2 && n!=3)) b=a=dcpred[scale];
2350 if (s->mb_x == 0 && (n!=1 && n!=3)) b=c=dcpred[scale];
2354 /* Set outer values */
2355 if (s->first_slice_line && (n!=2 && n!=3)) b=a=0;
2356 if (s->mb_x == 0 && (n!=1 && n!=3)) b=c=0;
2359 if (abs(a - b) <= abs(b - c)) {
2367 /* update predictor */
2368 *dc_val_ptr = &dc_val[0];
2373 /** Get predicted DC value
2374 * prediction dir: left=0, top=1
2375 * @param s MpegEncContext
2376 * @param[in] n block index in the current MB
2377 * @param dc_val_ptr Pointer to DC predictor
2378 * @param dir_ptr Prediction direction for use in AC prediction
2380 static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2381 int a_avail, int c_avail,
2382 int16_t **dc_val_ptr, int *dir_ptr)
2384 int a, b, c, wrap, pred, scale;
2386 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2389 /* find prediction - wmv3_dc_scale always used here in fact */
2390 if (n < 4) scale = s->y_dc_scale;
2391 else scale = s->c_dc_scale;
2393 wrap = s->block_wrap[n];
2394 dc_val= s->dc_val[0] + s->block_index[n];
2400 b = dc_val[ - 1 - wrap];
2401 a = dc_val[ - wrap];
2402 /* scale predictors if needed */
2403 q1 = s->current_picture.qscale_table[mb_pos];
2404 if(c_avail && (n!= 1 && n!=3)) {
2405 q2 = s->current_picture.qscale_table[mb_pos - 1];
2407 c = (c * s->y_dc_scale_table[q2] * vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2409 if(a_avail && (n!= 2 && n!=3)) {
2410 q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2412 a = (a * s->y_dc_scale_table[q2] * vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2414 if(a_avail && c_avail && (n!=3)) {
2417 if(n != 2) off -= s->mb_stride;
2418 q2 = s->current_picture.qscale_table[off];
2420 b = (b * s->y_dc_scale_table[q2] * vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2423 if(a_avail && c_avail) {
2424 if(abs(a - b) <= abs(b - c)) {
2431 } else if(a_avail) {
2434 } else if(c_avail) {
2442 /* update predictor */
2443 *dc_val_ptr = &dc_val[0];
2449 * @defgroup std_mb VC1 Macroblock-level functions in Simple/Main Profiles
2450 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
2454 static inline int vc1_coded_block_pred(MpegEncContext * s, int n, uint8_t **coded_block_ptr)
2456 int xy, wrap, pred, a, b, c;
2458 xy = s->block_index[n];
2459 wrap = s->b8_stride;
2464 a = s->coded_block[xy - 1 ];
2465 b = s->coded_block[xy - 1 - wrap];
2466 c = s->coded_block[xy - wrap];
2475 *coded_block_ptr = &s->coded_block[xy];
2481 * Decode one AC coefficient
2482 * @param v The VC1 context
2483 * @param last Last coefficient
2484 * @param skip How much zero coefficients to skip
2485 * @param value Decoded AC coefficient value
2488 static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip, int *value, int codingset)
2490 GetBitContext *gb = &v->s.gb;
2491 int index, escape, run = 0, level = 0, lst = 0;
2493 index = get_vlc2(gb, vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2494 if (index != vc1_ac_sizes[codingset] - 1) {
2495 run = vc1_index_decode_table[codingset][index][0];
2496 level = vc1_index_decode_table[codingset][index][1];
2497 lst = index >= vc1_last_decode_table[codingset];
2501 escape = decode210(gb);
2503 index = get_vlc2(gb, vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2504 run = vc1_index_decode_table[codingset][index][0];
2505 level = vc1_index_decode_table[codingset][index][1];
2506 lst = index >= vc1_last_decode_table[codingset];
2509 level += vc1_last_delta_level_table[codingset][run];
2511 level += vc1_delta_level_table[codingset][run];
2514 run += vc1_last_delta_run_table[codingset][level] + 1;
2516 run += vc1_delta_run_table[codingset][level] + 1;
2522 lst = get_bits(gb, 1);
2523 if(v->s.esc3_level_length == 0) {
2524 if(v->pq < 8 || v->dquantfrm) { // table 59
2525 v->s.esc3_level_length = get_bits(gb, 3);
2526 if(!v->s.esc3_level_length)
2527 v->s.esc3_level_length = get_bits(gb, 2) + 8;
2529 v->s.esc3_level_length = get_prefix(gb, 1, 6) + 2;
2531 v->s.esc3_run_length = 3 + get_bits(gb, 2);
2533 run = get_bits(gb, v->s.esc3_run_length);
2534 sign = get_bits(gb, 1);
2535 level = get_bits(gb, v->s.esc3_level_length);
2546 /** Decode intra block in intra frames - should be faster than decode_intra_block
2547 * @param v VC1Context
2548 * @param block block to decode
2549 * @param coded are AC coeffs present or not
2550 * @param codingset set of VLC to decode data
2552 static int vc1_decode_i_block(VC1Context *v, DCTELEM block[64], int n, int coded, int codingset)
2554 GetBitContext *gb = &v->s.gb;
2555 MpegEncContext *s = &v->s;
2556 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2559 int16_t *ac_val, *ac_val2;
2562 /* Get DC differential */
2564 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2566 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2569 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2574 if (dcdiff == 119 /* ESC index value */)
2576 /* TODO: Optimize */
2577 if (v->pq == 1) dcdiff = get_bits(gb, 10);
2578 else if (v->pq == 2) dcdiff = get_bits(gb, 9);
2579 else dcdiff = get_bits(gb, 8);
2584 dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3;
2585 else if (v->pq == 2)
2586 dcdiff = (dcdiff<<1) + get_bits(gb, 1) - 1;
2588 if (get_bits(gb, 1))
2593 dcdiff += vc1_i_pred_dc(&v->s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir);
2596 /* Store the quantized DC coeff, used for prediction */
2598 block[0] = dcdiff * s->y_dc_scale;
2600 block[0] = dcdiff * s->c_dc_scale;
2613 int last = 0, skip, value;
2614 const int8_t *zz_table;
2618 scale = v->pq * 2 + v->halfpq;
2622 zz_table = vc1_horizontal_zz;
2624 zz_table = vc1_vertical_zz;
2626 zz_table = vc1_normal_zz;
2628 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2630 if(dc_pred_dir) //left
2633 ac_val -= 16 * s->block_wrap[n];
2636 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2640 block[zz_table[i++]] = value;
2643 /* apply AC prediction if needed */
2645 if(dc_pred_dir) { //left
2646 for(k = 1; k < 8; k++)
2647 block[k << 3] += ac_val[k];
2649 for(k = 1; k < 8; k++)
2650 block[k] += ac_val[k + 8];
2653 /* save AC coeffs for further prediction */
2654 for(k = 1; k < 8; k++) {
2655 ac_val2[k] = block[k << 3];
2656 ac_val2[k + 8] = block[k];
2659 /* scale AC coeffs */
2660 for(k = 1; k < 64; k++)
2664 block[k] += (block[k] < 0) ? -v->pq : v->pq;
2667 if(s->ac_pred) i = 63;
2673 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2676 scale = v->pq * 2 + v->halfpq;
2677 memset(ac_val2, 0, 16 * 2);
2678 if(dc_pred_dir) {//left
2681 memcpy(ac_val2, ac_val, 8 * 2);
2683 ac_val -= 16 * s->block_wrap[n];
2685 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2688 /* apply AC prediction if needed */
2690 if(dc_pred_dir) { //left
2691 for(k = 1; k < 8; k++) {
2692 block[k << 3] = ac_val[k] * scale;
2693 if(!v->pquantizer && block[k << 3])
2694 block[k << 3] += (block[k << 3] < 0) ? -v->pq : v->pq;
2697 for(k = 1; k < 8; k++) {
2698 block[k] = ac_val[k + 8] * scale;
2699 if(!v->pquantizer && block[k])
2700 block[k] += (block[k] < 0) ? -v->pq : v->pq;
2706 s->block_last_index[n] = i;
2711 /** Decode intra block in intra frames - should be faster than decode_intra_block
2712 * @param v VC1Context
2713 * @param block block to decode
2714 * @param coded are AC coeffs present or not
2715 * @param codingset set of VLC to decode data
2717 static int vc1_decode_i_block_adv(VC1Context *v, DCTELEM block[64], int n, int coded, int codingset, int mquant)
2719 GetBitContext *gb = &v->s.gb;
2720 MpegEncContext *s = &v->s;
2721 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2724 int16_t *ac_val, *ac_val2;
2726 int a_avail = v->a_avail, c_avail = v->c_avail;
2727 int use_pred = s->ac_pred;
2730 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2732 /* Get DC differential */
2734 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2736 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2739 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2744 if (dcdiff == 119 /* ESC index value */)
2746 /* TODO: Optimize */
2747 if (mquant == 1) dcdiff = get_bits(gb, 10);
2748 else if (mquant == 2) dcdiff = get_bits(gb, 9);
2749 else dcdiff = get_bits(gb, 8);
2754 dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3;
2755 else if (mquant == 2)
2756 dcdiff = (dcdiff<<1) + get_bits(gb, 1) - 1;
2758 if (get_bits(gb, 1))
2763 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir);
2766 /* Store the quantized DC coeff, used for prediction */
2768 block[0] = dcdiff * s->y_dc_scale;
2770 block[0] = dcdiff * s->c_dc_scale;
2779 /* check if AC is needed at all and adjust direction if needed */
2780 if(!a_avail) dc_pred_dir = 1;
2781 if(!c_avail) dc_pred_dir = 0;
2782 if(!a_avail && !c_avail) use_pred = 0;
2783 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2786 scale = mquant * 2 + v->halfpq;
2788 if(dc_pred_dir) //left
2791 ac_val -= 16 * s->block_wrap[n];
2793 q1 = s->current_picture.qscale_table[mb_pos];
2794 if(dc_pred_dir && c_avail) q2 = s->current_picture.qscale_table[mb_pos - 1];
2795 if(!dc_pred_dir && a_avail) q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2796 if(n && n<4) q2 = q1;
2799 int last = 0, skip, value;
2800 const int8_t *zz_table;
2805 zz_table = vc1_horizontal_zz;
2807 zz_table = vc1_vertical_zz;
2809 zz_table = vc1_normal_zz;
2812 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2816 block[zz_table[i++]] = value;
2819 /* apply AC prediction if needed */
2821 /* scale predictors if needed*/
2826 if(dc_pred_dir) { //left
2827 for(k = 1; k < 8; k++)
2828 block[k << 3] += (ac_val[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2830 for(k = 1; k < 8; k++)
2831 block[k] += (ac_val[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2834 if(dc_pred_dir) { //left
2835 for(k = 1; k < 8; k++)
2836 block[k << 3] += ac_val[k];
2838 for(k = 1; k < 8; k++)
2839 block[k] += ac_val[k + 8];
2843 /* save AC coeffs for further prediction */
2844 for(k = 1; k < 8; k++) {
2845 ac_val2[k] = block[k << 3];
2846 ac_val2[k + 8] = block[k];
2849 /* scale AC coeffs */
2850 for(k = 1; k < 64; k++)
2854 block[k] += (block[k] < 0) ? -mquant : mquant;
2857 if(use_pred) i = 63;
2858 } else { // no AC coeffs
2861 memset(ac_val2, 0, 16 * 2);
2862 if(dc_pred_dir) {//left
2864 memcpy(ac_val2, ac_val, 8 * 2);
2868 for(k = 1; k < 8; k++)
2869 ac_val2[k] = (ac_val2[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2874 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2878 for(k = 1; k < 8; k++)
2879 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2884 /* apply AC prediction if needed */
2886 if(dc_pred_dir) { //left
2887 for(k = 1; k < 8; k++) {
2888 block[k << 3] = ac_val2[k] * scale;
2889 if(!v->pquantizer && block[k << 3])
2890 block[k << 3] += (block[k << 3] < 0) ? -mquant : mquant;
2893 for(k = 1; k < 8; k++) {
2894 block[k] = ac_val2[k + 8] * scale;
2895 if(!v->pquantizer && block[k])
2896 block[k] += (block[k] < 0) ? -mquant : mquant;
2902 s->block_last_index[n] = i;
2907 /** Decode intra block in inter frames - more generic version than vc1_decode_i_block
2908 * @param v VC1Context
2909 * @param block block to decode
2910 * @param coded are AC coeffs present or not
2911 * @param mquant block quantizer
2912 * @param codingset set of VLC to decode data
2914 static int vc1_decode_intra_block(VC1Context *v, DCTELEM block[64], int n, int coded, int mquant, int codingset)
2916 GetBitContext *gb = &v->s.gb;
2917 MpegEncContext *s = &v->s;
2918 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2921 int16_t *ac_val, *ac_val2;
2923 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2924 int a_avail = v->a_avail, c_avail = v->c_avail;
2925 int use_pred = s->ac_pred;
2929 /* XXX: Guard against dumb values of mquant */
2930 mquant = (mquant < 1) ? 0 : ( (mquant>31) ? 31 : mquant );
2932 /* Set DC scale - y and c use the same */
2933 s->y_dc_scale = s->y_dc_scale_table[mquant];
2934 s->c_dc_scale = s->c_dc_scale_table[mquant];
2936 /* Get DC differential */
2938 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2940 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2943 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2948 if (dcdiff == 119 /* ESC index value */)
2950 /* TODO: Optimize */
2951 if (mquant == 1) dcdiff = get_bits(gb, 10);
2952 else if (mquant == 2) dcdiff = get_bits(gb, 9);
2953 else dcdiff = get_bits(gb, 8);
2958 dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3;
2959 else if (mquant == 2)
2960 dcdiff = (dcdiff<<1) + get_bits(gb, 1) - 1;
2962 if (get_bits(gb, 1))
2967 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir);
2970 /* Store the quantized DC coeff, used for prediction */
2973 block[0] = dcdiff * s->y_dc_scale;
2975 block[0] = dcdiff * s->c_dc_scale;
2984 /* check if AC is needed at all and adjust direction if needed */
2985 if(!a_avail) dc_pred_dir = 1;
2986 if(!c_avail) dc_pred_dir = 0;
2987 if(!a_avail && !c_avail) use_pred = 0;
2988 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2991 scale = mquant * 2 + v->halfpq;
2993 if(dc_pred_dir) //left
2996 ac_val -= 16 * s->block_wrap[n];
2998 q1 = s->current_picture.qscale_table[mb_pos];
2999 if(dc_pred_dir && c_avail) q2 = s->current_picture.qscale_table[mb_pos - 1];
3000 if(!dc_pred_dir && a_avail) q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
3001 if(n && n<4) q2 = q1;
3004 int last = 0, skip, value;
3005 const int8_t *zz_table;
3008 zz_table = vc1_simple_progressive_8x8_zz;
3011 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
3015 block[zz_table[i++]] = value;
3018 /* apply AC prediction if needed */
3020 /* scale predictors if needed*/
3025 if(dc_pred_dir) { //left
3026 for(k = 1; k < 8; k++)
3027 block[k << 3] += (ac_val[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3029 for(k = 1; k < 8; k++)
3030 block[k] += (ac_val[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3033 if(dc_pred_dir) { //left
3034 for(k = 1; k < 8; k++)
3035 block[k << 3] += ac_val[k];
3037 for(k = 1; k < 8; k++)
3038 block[k] += ac_val[k + 8];
3042 /* save AC coeffs for further prediction */
3043 for(k = 1; k < 8; k++) {
3044 ac_val2[k] = block[k << 3];
3045 ac_val2[k + 8] = block[k];
3048 /* scale AC coeffs */
3049 for(k = 1; k < 64; k++)
3053 block[k] += (block[k] < 0) ? -mquant : mquant;
3056 if(use_pred) i = 63;
3057 } else { // no AC coeffs
3060 memset(ac_val2, 0, 16 * 2);
3061 if(dc_pred_dir) {//left
3063 memcpy(ac_val2, ac_val, 8 * 2);
3067 for(k = 1; k < 8; k++)
3068 ac_val2[k] = (ac_val2[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3073 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
3077 for(k = 1; k < 8; k++)
3078 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3083 /* apply AC prediction if needed */
3085 if(dc_pred_dir) { //left
3086 for(k = 1; k < 8; k++) {
3087 block[k << 3] = ac_val2[k] * scale;
3088 if(!v->pquantizer && block[k << 3])
3089 block[k << 3] += (block[k << 3] < 0) ? -mquant : mquant;
3092 for(k = 1; k < 8; k++) {
3093 block[k] = ac_val2[k + 8] * scale;
3094 if(!v->pquantizer && block[k])
3095 block[k] += (block[k] < 0) ? -mquant : mquant;
3101 s->block_last_index[n] = i;
3108 static int vc1_decode_p_block(VC1Context *v, DCTELEM block[64], int n, int mquant, int ttmb, int first_block)
3110 MpegEncContext *s = &v->s;
3111 GetBitContext *gb = &s->gb;
3114 int scale, off, idx, last, skip, value;
3115 int ttblk = ttmb & 7;
3118 ttblk = ttblk_to_tt[v->tt_index][get_vlc2(gb, vc1_ttblk_vlc[v->tt_index].table, VC1_TTBLK_VLC_BITS, 1)];
3120 if(ttblk == TT_4X4) {
3121 subblkpat = ~(get_vlc2(gb, vc1_subblkpat_vlc[v->tt_index].table, VC1_SUBBLKPAT_VLC_BITS, 1) + 1);
3123 if((ttblk != TT_8X8 && ttblk != TT_4X4) && (v->ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))) {
3124 subblkpat = decode012(gb);
3125 if(subblkpat) subblkpat ^= 3; //swap decoded pattern bits
3126 if(ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) ttblk = TT_8X4;
3127 if(ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) ttblk = TT_4X8;
3129 scale = 2 * mquant + v->halfpq;
3131 // convert transforms like 8X4_TOP to generic TT and SUBBLKPAT
3132 if(ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) {
3133 subblkpat = 2 - (ttblk == TT_8X4_TOP);
3136 if(ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) {
3137 subblkpat = 2 - (ttblk == TT_4X8_LEFT);
3145 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3149 idx = vc1_simple_progressive_8x8_zz[i++];
3150 block[idx] = value * scale;
3152 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3154 s->dsp.vc1_inv_trans_8x8(block);
3157 for(j = 0; j < 4; j++) {
3158 last = subblkpat & (1 << (3 - j));
3160 off = (j & 1) * 4 + (j & 2) * 16;
3162 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3166 idx = vc1_simple_progressive_4x4_zz[i++];
3167 block[idx + off] = value * scale;
3169 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3171 if(!(subblkpat & (1 << (3 - j))))
3172 s->dsp.vc1_inv_trans_4x4(block, j);
3176 for(j = 0; j < 2; j++) {
3177 last = subblkpat & (1 << (1 - j));
3181 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3185 if(v->profile < PROFILE_ADVANCED)
3186 idx = vc1_simple_progressive_8x4_zz[i++];
3188 idx = vc1_adv_progressive_8x4_zz[i++];
3189 block[idx + off] = value * scale;
3191 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3193 if(!(subblkpat & (1 << (1 - j))))
3194 s->dsp.vc1_inv_trans_8x4(block, j);
3198 for(j = 0; j < 2; j++) {
3199 last = subblkpat & (1 << (1 - j));
3203 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3207 if(v->profile < PROFILE_ADVANCED)
3208 idx = vc1_simple_progressive_4x8_zz[i++];
3210 idx = vc1_adv_progressive_4x8_zz[i++];
3211 block[idx + off] = value * scale;
3213 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3215 if(!(subblkpat & (1 << (1 - j))))
3216 s->dsp.vc1_inv_trans_4x8(block, j);
3224 /** Decode one P-frame MB (in Simple/Main profile)
3226 static int vc1_decode_p_mb(VC1Context *v)
3228 MpegEncContext *s = &v->s;
3229 GetBitContext *gb = &s->gb;
3231 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3232 int cbp; /* cbp decoding stuff */
3233 int mqdiff, mquant; /* MB quantization */
3234 int ttmb = v->ttfrm; /* MB Transform type */
3237 static const int size_table[6] = { 0, 2, 3, 4, 5, 8 },
3238 offset_table[6] = { 0, 1, 3, 7, 15, 31 };
3239 int mb_has_coeffs = 1; /* last_flag */
3240 int dmv_x, dmv_y; /* Differential MV components */
3241 int index, index1; /* LUT indices */
3242 int val, sign; /* temp values */
3243 int first_block = 1;
3245 int skipped, fourmv;
3247 mquant = v->pq; /* Loosy initialization */
3249 if (v->mv_type_is_raw)
3250 fourmv = get_bits1(gb);
3252 fourmv = v->mv_type_mb_plane[mb_pos];
3254 skipped = get_bits1(gb);
3256 skipped = v->s.mbskip_table[mb_pos];
3258 s->dsp.clear_blocks(s->block[0]);
3260 if (!fourmv) /* 1MV mode */
3264 GET_MVDATA(dmv_x, dmv_y);
3267 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
3268 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
3270 s->current_picture.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;
3271 vc1_pred_mv(s, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0]);
3273 /* FIXME Set DC val for inter block ? */
3274 if (s->mb_intra && !mb_has_coeffs)
3277 s->ac_pred = get_bits(gb, 1);
3280 else if (mb_has_coeffs)
3282 if (s->mb_intra) s->ac_pred = get_bits(gb, 1);
3283 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3291 s->current_picture.qscale_table[mb_pos] = mquant;
3293 if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
3294 ttmb = get_vlc2(gb, vc1_ttmb_vlc[v->tt_index].table,
3295 VC1_TTMB_VLC_BITS, 2);
3296 if(!s->mb_intra) vc1_mc_1mv(v, 0);
3300 s->dc_val[0][s->block_index[i]] = 0;
3302 val = ((cbp >> (5 - i)) & 1);
3303 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3304 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3306 /* check if prediction blocks A and C are available */
3307 v->a_avail = v->c_avail = 0;
3308 if(i == 2 || i == 3 || !s->first_slice_line)
3309 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3310 if(i == 1 || i == 3 || s->mb_x)
3311 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3313 vc1_decode_intra_block(v, s->block[i], i, val, mquant, (i&4)?v->codingset2:v->codingset);
3314 if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3315 s->dsp.vc1_inv_trans_8x8(s->block[i]);
3316 if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
3317 for(j = 0; j < 64; j++) s->block[i][j] += 128;
3318 s->dsp.put_pixels_clamped(s->block[i], s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
3319 if(v->pq >= 9 && v->overlap) {
3321 s->dsp.vc1_v_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2), (i<4) ? ((i&1)>>1) : (s->mb_y&1));
3323 s->dsp.vc1_h_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2), (i<4) ? (i&1) : (s->mb_x&1));
3326 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block);
3327 if(!v->ttmbf && ttmb < 8) ttmb = -1;
3329 if((i<4) || !(s->flags & CODEC_FLAG_GRAY))
3330 s->dsp.add_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
3337 for(i = 0; i < 6; i++) {
3338 v->mb_type[0][s->block_index[i]] = 0;
3339 s->dc_val[0][s->block_index[i]] = 0;
3341 s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
3342 s->current_picture.qscale_table[mb_pos] = 0;
3343 vc1_pred_mv(s, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0]);
3350 if (!skipped /* unskipped MB */)
3352 int intra_count = 0, coded_inter = 0;
3353 int is_intra[6], is_coded[6];
3355 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3358 val = ((cbp >> (5 - i)) & 1);
3359 s->dc_val[0][s->block_index[i]] = 0;
3366 GET_MVDATA(dmv_x, dmv_y);
3368 vc1_pred_mv(s, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0]);
3369 if(!s->mb_intra) vc1_mc_4mv_luma(v, i);
3370 intra_count += s->mb_intra;
3371 is_intra[i] = s->mb_intra;
3372 is_coded[i] = mb_has_coeffs;
3375 is_intra[i] = (intra_count >= 3);
3378 if(i == 4) vc1_mc_4mv_chroma(v);
3379 v->mb_type[0][s->block_index[i]] = is_intra[i];
3380 if(!coded_inter) coded_inter = !is_intra[i] & is_coded[i];
3382 // if there are no coded blocks then don't do anything more
3383 if(!intra_count && !coded_inter) return 0;
3386 s->current_picture.qscale_table[mb_pos] = mquant;
3387 /* test if block is intra and has pred */
3392 if(((!s->first_slice_line || (i==2 || i==3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]])
3393 || ((s->mb_x || (i==1 || i==3)) && v->mb_type[0][s->block_index[i] - 1])) {
3398 if(intrapred)s->ac_pred = get_bits(gb, 1);
3399 else s->ac_pred = 0;
3401 if (!v->ttmbf && coded_inter)
3402 ttmb = get_vlc2(gb, vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3406 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3407 s->mb_intra = is_intra[i];
3409 /* check if prediction blocks A and C are available */
3410 v->a_avail = v->c_avail = 0;
3411 if(i == 2 || i == 3 || !s->first_slice_line)
3412 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3413 if(i == 1 || i == 3 || s->mb_x)
3414 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3416 vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant, (i&4)?v->codingset2:v->codingset);
3417 if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3418 s->dsp.vc1_inv_trans_8x8(s->block[i]);
3419 if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
3420 for(j = 0; j < 64; j++) s->block[i][j] += 128;
3421 s->dsp.put_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
3422 if(v->pq >= 9 && v->overlap) {
3424 s->dsp.vc1_v_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2), (i<4) ? ((i&1)>>1) : (s->mb_y&1));
3426 s->dsp.vc1_h_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2), (i<4) ? (i&1) : (s->mb_x&1));
3428 } else if(is_coded[i]) {
3429 status = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block);
3430 if(!v->ttmbf && ttmb < 8) ttmb = -1;
3432 if((i<4) || !(s->flags & CODEC_FLAG_GRAY))
3433 s->dsp.add_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
3441 s->current_picture.qscale_table[mb_pos] = 0;
3442 for (i=0; i<6; i++) {
3443 v->mb_type[0][s->block_index[i]] = 0;
3444 s->dc_val[0][s->block_index[i]] = 0;
3448 vc1_pred_mv(s, i, 0, 0, 0, v->range_x, v->range_y, v->mb_type[0]);
3449 vc1_mc_4mv_luma(v, i);
3451 vc1_mc_4mv_chroma(v);
3452 s->current_picture.qscale_table[mb_pos] = 0;
3457 /* Should never happen */
3461 /** Decode one B-frame MB (in Main profile)
3463 static void vc1_decode_b_mb(VC1Context *v)
3465 MpegEncContext *s = &v->s;
3466 GetBitContext *gb = &s->gb;
3468 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3469 int cbp = 0; /* cbp decoding stuff */
3470 int mqdiff, mquant; /* MB quantization */
3471 int ttmb = v->ttfrm; /* MB Transform type */
3473 static const int size_table[6] = { 0, 2, 3, 4, 5, 8 },
3474 offset_table[6] = { 0, 1, 3, 7, 15, 31 };
3475 int mb_has_coeffs = 0; /* last_flag */
3476 int index, index1; /* LUT indices */
3477 int val, sign; /* temp values */
3478 int first_block = 1;
3480 int skipped, direct;
3481 int dmv_x[2], dmv_y[2];
3482 int bmvtype = BMV_TYPE_BACKWARD;
3484 mquant = v->pq; /* Loosy initialization */
3488 direct = get_bits1(gb);
3490 direct = v->direct_mb_plane[mb_pos];
3492 skipped = get_bits1(gb);
3494 skipped = v->s.mbskip_table[mb_pos];
3496 s->dsp.clear_blocks(s->block[0]);
3497 dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
3498 for(i = 0; i < 6; i++) {
3499 v->mb_type[0][s->block_index[i]] = 0;
3500 s->dc_val[0][s->block_index[i]] = 0;
3502 s->current_picture.qscale_table[mb_pos] = 0;
3506 GET_MVDATA(dmv_x[0], dmv_y[0]);
3507 dmv_x[1] = dmv_x[0];
3508 dmv_y[1] = dmv_y[0];
3510 if(skipped || !s->mb_intra) {
3511 bmvtype = decode012(gb);
3514 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
3517 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
3520 bmvtype = BMV_TYPE_INTERPOLATED;
3521 dmv_x[1] = dmv_y[1] = 0;
3525 for(i = 0; i < 6; i++)
3526 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3529 if(direct) bmvtype = BMV_TYPE_INTERPOLATED;
3530 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3531 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3535 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3539 s->current_picture.qscale_table[mb_pos] = mquant;
3541 ttmb = get_vlc2(gb, vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3542 dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
3543 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3544 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3546 if(!mb_has_coeffs && !s->mb_intra) {
3547 /* no coded blocks - effectively skipped */
3548 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3549 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3552 if(s->mb_intra && !mb_has_coeffs) {
3554 s->current_picture.qscale_table[mb_pos] = mquant;
3555 s->ac_pred = get_bits1(gb);
3557 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3559 if(bmvtype == BMV_TYPE_INTERPOLATED) {
3560 GET_MVDATA(dmv_x[1], dmv_y[1]);
3561 if(!mb_has_coeffs) {
3562 /* interpolated skipped block */
3563 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3564 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3568 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3570 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3573 s->ac_pred = get_bits1(gb);
3574 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3576 s->current_picture.qscale_table[mb_pos] = mquant;
3577 if(!v->ttmbf && !s->mb_intra && mb_has_coeffs)
3578 ttmb = get_vlc2(gb, vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3584 s->dc_val[0][s->block_index[i]] = 0;
3586 val = ((cbp >> (5 - i)) & 1);
3587 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3588 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3590 /* check if prediction blocks A and C are available */
3591 v->a_avail = v->c_avail = 0;
3592 if(i == 2 || i == 3 || !s->first_slice_line)
3593 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3594 if(i == 1 || i == 3 || s->mb_x)
3595 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3597 vc1_decode_intra_block(v, s->block[i], i, val, mquant, (i&4)?v->codingset2:v->codingset);
3598 if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3599 s->dsp.vc1_inv_trans_8x8(s->block[i]);
3600 if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
3601 for(j = 0; j < 64; j++) s->block[i][j] += 128;
3602 s->dsp.put_pixels_clamped(s->block[i], s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
3604 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block);
3605 if(!v->ttmbf && ttmb < 8) ttmb = -1;
3607 if((i<4) || !(s->flags & CODEC_FLAG_GRAY))
3608 s->dsp.add_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
3613 /** Decode blocks of I-frame
3615 static void vc1_decode_i_blocks(VC1Context *v)
3618 MpegEncContext *s = &v->s;
3623 /* select codingmode used for VLC tables selection */
3624 switch(v->y_ac_table_index){
3626 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
3629 v->codingset = CS_HIGH_MOT_INTRA;
3632 v->codingset = CS_MID_RATE_INTRA;
3636 switch(v->c_ac_table_index){
3638 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
3641 v->codingset2 = CS_HIGH_MOT_INTER;
3644 v->codingset2 = CS_MID_RATE_INTER;
3648 /* Set DC scale - y and c use the same */
3649 s->y_dc_scale = s->y_dc_scale_table[v->pq];
3650 s->c_dc_scale = s->c_dc_scale_table[v->pq];
3653 s->mb_x = s->mb_y = 0;
3655 s->first_slice_line = 1;
3656 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
3657 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
3658 for(s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
3659 ff_init_block_index(s);
3660 ff_update_block_index(s);
3661 s->dsp.clear_blocks(s->block[0]);
3662 mb_pos = s->mb_x + s->mb_y * s->mb_width;
3663 s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
3664 s->current_picture.qscale_table[mb_pos] = v->pq;
3666 // do actual MB decoding and displaying
3667 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
3668 v->s.ac_pred = get_bits(&v->s.gb, 1);
3670 for(k = 0; k < 6; k++) {
3671 val = ((cbp >> (5 - k)) & 1);
3674 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
3678 cbp |= val << (5 - k);
3680 vc1_decode_i_block(v, s->block[k], k, val, (k<4)? v->codingset : v->codingset2);
3682 s->dsp.vc1_inv_trans_8x8(s->block[k]);
3683 if(v->pq >= 9 && v->overlap) {
3684 for(j = 0; j < 64; j++) s->block[k][j] += 128;
3688 vc1_put_block(v, s->block);
3689 if(v->pq >= 9 && v->overlap) {
3690 if(!s->first_slice_line) {
3691 s->dsp.vc1_v_overlap(s->dest[0], s->linesize, 0);
3692 s->dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize, 0);
3693 if(!(s->flags & CODEC_FLAG_GRAY)) {
3694 s->dsp.vc1_v_overlap(s->dest[1], s->uvlinesize, s->mb_y&1);
3695 s->dsp.vc1_v_overlap(s->dest[2], s->uvlinesize, s->mb_y&1);
3698 s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize, 1);
3699 s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize, 1);
3701 s->dsp.vc1_h_overlap(s->dest[0], s->linesize, 0);
3702 s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize, 0);
3703 if(!(s->flags & CODEC_FLAG_GRAY)) {
3704 s->dsp.vc1_h_overlap(s->dest[1], s->uvlinesize, s->mb_x&1);
3705 s->dsp.vc1_h_overlap(s->dest[2], s->uvlinesize, s->mb_x&1);
3708 s->dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize, 1);
3709 s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize, 1);
3712 if(get_bits_count(&s->gb) > v->bits) {
3713 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n", get_bits_count(&s->gb), v->bits);
3717 ff_draw_horiz_band(s, s->mb_y * 16, 16);
3718 s->first_slice_line = 0;
3722 /** Decode blocks of I-frame for advanced profile
3724 static void vc1_decode_i_blocks_adv(VC1Context *v)
3727 MpegEncContext *s = &v->s;
3734 GetBitContext *gb = &s->gb;
3736 /* select codingmode used for VLC tables selection */
3737 switch(v->y_ac_table_index){
3739 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
3742 v->codingset = CS_HIGH_MOT_INTRA;
3745 v->codingset = CS_MID_RATE_INTRA;
3749 switch(v->c_ac_table_index){
3751 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
3754 v->codingset2 = CS_HIGH_MOT_INTER;
3757 v->codingset2 = CS_MID_RATE_INTER;
3761 /* Set DC scale - y and c use the same */
3762 s->y_dc_scale = s->y_dc_scale_table[v->pq];
3763 s->c_dc_scale = s->c_dc_scale_table[v->pq];
3766 s->mb_x = s->mb_y = 0;
3768 s->first_slice_line = 1;
3769 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
3770 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
3771 for(s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
3772 ff_init_block_index(s);
3773 ff_update_block_index(s);
3774 s->dsp.clear_blocks(s->block[0]);
3775 mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3776 s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
3778 // do actual MB decoding and displaying
3779 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
3780 if(v->acpred_is_raw)
3781 v->s.ac_pred = get_bits(&v->s.gb, 1);
3783 v->s.ac_pred = v->acpred_plane[mb_pos];
3785 if(v->condover == CONDOVER_SELECT) {
3786 if(v->overflg_is_raw)
3787 overlap = get_bits(&v->s.gb, 1);
3789 overlap = v->over_flags_plane[mb_pos];
3791 overlap = (v->condover == CONDOVER_ALL);
3795 s->current_picture.qscale_table[mb_pos] = mquant;
3797 for(k = 0; k < 6; k++) {
3798 val = ((cbp >> (5 - k)) & 1);
3801 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
3805 cbp |= val << (5 - k);
3807 v->a_avail = !s->first_slice_line || (k==2 || k==3);
3808 v->c_avail = !!s->mb_x || (k==1 || k==3);
3810 vc1_decode_i_block_adv(v, s->block[k], k, val, (k<4)? v->codingset : v->codingset2, mquant);
3812 s->dsp.vc1_inv_trans_8x8(s->block[k]);
3813 for(j = 0; j < 64; j++) s->block[k][j] += 128;
3816 vc1_put_block(v, s->block);
3818 if(!s->first_slice_line) {
3819 s->dsp.vc1_v_overlap(s->dest[0], s->linesize, 0);
3820 s->dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize, 0);
3821 if(!(s->flags & CODEC_FLAG_GRAY)) {
3822 s->dsp.vc1_v_overlap(s->dest[1], s->uvlinesize, s->mb_y&1);
3823 s->dsp.vc1_v_overlap(s->dest[2], s->uvlinesize, s->mb_y&1);
3826 s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize, 1);
3827 s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize, 1);
3829 s->dsp.vc1_h_overlap(s->dest[0], s->linesize, 0);
3830 s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize, 0);
3831 if(!(s->flags & CODEC_FLAG_GRAY)) {
3832 s->dsp.vc1_h_overlap(s->dest[1], s->uvlinesize, s->mb_x&1);
3833 s->dsp.vc1_h_overlap(s->dest[2], s->uvlinesize, s->mb_x&1);
3836 s->dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize, 1);
3837 s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize, 1);
3840 if(get_bits_count(&s->gb) > v->bits) {
3841 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n", get_bits_count(&s->gb), v->bits);
3845 ff_draw_horiz_band(s, s->mb_y * 16, 16);
3846 s->first_slice_line = 0;
3850 static void vc1_decode_p_blocks(VC1Context *v)
3852 MpegEncContext *s = &v->s;
3854 /* select codingmode used for VLC tables selection */
3855 switch(v->c_ac_table_index){
3857 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
3860 v->codingset = CS_HIGH_MOT_INTRA;
3863 v->codingset = CS_MID_RATE_INTRA;
3867 switch(v->c_ac_table_index){
3869 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
3872 v->codingset2 = CS_HIGH_MOT_INTER;
3875 v->codingset2 = CS_MID_RATE_INTER;
3879 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
3880 s->first_slice_line = 1;
3881 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
3882 for(s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
3883 ff_init_block_index(s);
3884 ff_update_block_index(s);
3885 s->dsp.clear_blocks(s->block[0]);
3888 if(get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
3889 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n", get_bits_count(&s->gb), v->bits,s->mb_x,s->mb_y);
3893 ff_draw_horiz_band(s, s->mb_y * 16, 16);
3894 s->first_slice_line = 0;
3898 static void vc1_decode_b_blocks(VC1Context *v)
3900 MpegEncContext *s = &v->s;
3902 /* select codingmode used for VLC tables selection */
3903 switch(v->c_ac_table_index){
3905 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
3908 v->codingset = CS_HIGH_MOT_INTRA;
3911 v->codingset = CS_MID_RATE_INTRA;
3915 switch(v->c_ac_table_index){
3917 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
3920 v->codingset2 = CS_HIGH_MOT_INTER;
3923 v->codingset2 = CS_MID_RATE_INTER;
3927 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
3928 s->first_slice_line = 1;
3929 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
3930 for(s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
3931 ff_init_block_index(s);
3932 ff_update_block_index(s);
3933 s->dsp.clear_blocks(s->block[0]);
3936 if(get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
3937 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n", get_bits_count(&s->gb), v->bits,s->mb_x,s->mb_y);
3941 ff_draw_horiz_band(s, s->mb_y * 16, 16);
3942 s->first_slice_line = 0;
3946 static void vc1_decode_skip_blocks(VC1Context *v)
3948 MpegEncContext *s = &v->s;
3950 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
3951 s->first_slice_line = 1;
3952 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
3954 ff_init_block_index(s);
3955 ff_update_block_index(s);
3956 memcpy(s->dest[0], s->last_picture.data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16);
3957 memcpy(s->dest[1], s->last_picture.data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
3958 memcpy(s->dest[2], s->last_picture.data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
3959 ff_draw_horiz_band(s, s->mb_y * 16, 16);
3960 s->first_slice_line = 0;
3962 s->pict_type = P_TYPE;
3965 static void vc1_decode_blocks(VC1Context *v)
3968 v->s.esc3_level_length = 0;
3970 switch(v->s.pict_type) {
3972 if(v->profile == PROFILE_ADVANCED)
3973 vc1_decode_i_blocks_adv(v);
3975 vc1_decode_i_blocks(v);
3978 if(v->p_frame_skipped)
3979 vc1_decode_skip_blocks(v);
3981 vc1_decode_p_blocks(v);
3985 vc1_decode_i_blocks(v);
3987 vc1_decode_b_blocks(v);
3993 /** Initialize a VC1/WMV3 decoder
3994 * @todo TODO: Handle VC-1 IDUs (Transport level?)
3995 * @todo TODO: Decypher remaining bits in extra_data
3997 static int vc1_decode_init(AVCodecContext *avctx)
3999 VC1Context *v = avctx->priv_data;
4000 MpegEncContext *s = &v->s;
4003 if (!avctx->extradata_size || !avctx->extradata) return -1;
4004 if (!(avctx->flags & CODEC_FLAG_GRAY))
4005 avctx->pix_fmt = PIX_FMT_YUV420P;
4007 avctx->pix_fmt = PIX_FMT_GRAY8;
4009 avctx->flags |= CODEC_FLAG_EMU_EDGE;
4010 v->s.flags |= CODEC_FLAG_EMU_EDGE;
4012 if(ff_h263_decode_init(avctx) < 0)
4014 if (vc1_init_common(v) < 0) return -1;
4016 avctx->coded_width = avctx->width;
4017 avctx->coded_height = avctx->height;
4018 if (avctx->codec_id == CODEC_ID_WMV3)
4022 // looks like WMV3 has a sequence header stored in the extradata
4023 // advanced sequence header may be before the first frame
4024 // the last byte of the extradata is a version number, 1 for the
4025 // samples we can decode
4027 init_get_bits(&gb, avctx->extradata, avctx->extradata_size*8);
4029 if (decode_sequence_header(avctx, &gb) < 0)
4032 count = avctx->extradata_size*8 - get_bits_count(&gb);
4035 av_log(avctx, AV_LOG_INFO, "Extra data: %i bits left, value: %X\n",
4036 count, get_bits(&gb, count));
4040 av_log(avctx, AV_LOG_INFO, "Read %i bits in overflow\n", -count);
4042 } else { // VC1/WVC1
4043 int edata_size = avctx->extradata_size;
4044 uint8_t *edata = avctx->extradata;
4046 if(avctx->extradata_size < 16) {
4047 av_log(avctx, AV_LOG_ERROR, "Extradata size too small: %i\n", edata_size);
4050 while(edata_size > 8) {
4051 // test if we've found header
4052 if(BE_32(edata) == 0x0000010F) {
4061 init_get_bits(&gb, edata, edata_size*8);
4063 if (decode_sequence_header(avctx, &gb) < 0)
4066 while(edata_size > 8) {
4067 // test if we've found entry point
4068 if(BE_32(edata) == 0x0000010E) {
4077 init_get_bits(&gb, edata, edata_size*8);
4079 if (decode_entry_point(avctx, &gb) < 0)
4082 avctx->has_b_frames= !!(avctx->max_b_frames);
4084 s->mb_width = (avctx->coded_width+15)>>4;
4085 s->mb_height = (avctx->coded_height+15)>>4;
4087 /* Allocate mb bitplanes */
4088 v->mv_type_mb_plane = av_malloc(s->mb_stride * s->mb_height);
4089 v->direct_mb_plane = av_malloc(s->mb_stride * s->mb_height);
4090 v->acpred_plane = av_malloc(s->mb_stride * s->mb_height);
4091 v->over_flags_plane = av_malloc(s->mb_stride * s->mb_height);
4093 /* allocate block type info in that way so it could be used with s->block_index[] */
4094 v->mb_type_base = av_malloc(s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
4095 v->mb_type[0] = v->mb_type_base + s->b8_stride + 1;
4096 v->mb_type[1] = v->mb_type_base + s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride + 1;
4097 v->mb_type[2] = v->mb_type[1] + s->mb_stride * (s->mb_height + 1);
4099 /* Init coded blocks info */
4100 if (v->profile == PROFILE_ADVANCED)
4102 // if (alloc_bitplane(&v->over_flags_plane, s->mb_width, s->mb_height) < 0)
4104 // if (alloc_bitplane(&v->ac_pred_plane, s->mb_width, s->mb_height) < 0)
4112 /** Decode a VC1/WMV3 frame
4113 * @todo TODO: Handle VC-1 IDUs (Transport level?)
4115 static int vc1_decode_frame(AVCodecContext *avctx,
4116 void *data, int *data_size,
4117 uint8_t *buf, int buf_size)
4119 VC1Context *v = avctx->priv_data;
4120 MpegEncContext *s = &v->s;
4121 AVFrame *pict = data;
4122 uint8_t *buf2 = NULL;
4124 /* no supplementary picture */
4125 if (buf_size == 0) {
4126 /* special case for last picture */
4127 if (s->low_delay==0 && s->next_picture_ptr) {
4128 *pict= *(AVFrame*)s->next_picture_ptr;
4129 s->next_picture_ptr= NULL;
4131 *data_size = sizeof(AVFrame);
4137 //we need to set current_picture_ptr before reading the header, otherwise we cant store anyting im there
4138 if(s->current_picture_ptr==NULL || s->current_picture_ptr->data[0]){
4139 int i= ff_find_unused_picture(s, 0);
4140 s->current_picture_ptr= &s->picture[i];
4143 avctx->has_b_frames= !s->low_delay;
4145 //for advanced profile we need to unescape buffer
4146 if (avctx->codec_id == CODEC_ID_VC1) {
4148 buf2 = av_malloc(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
4150 for(i = 0; i < buf_size; i++) {
4151 if(buf[i] == 3 && i >= 2 && !buf[i-1] && !buf[i-2] && i < buf_size-1 && buf[i+1] < 4) {
4152 buf2[buf_size2++] = buf[i+1];
4155 buf2[buf_size2++] = buf[i];
4157 init_get_bits(&s->gb, buf2, buf_size2*8);
4159 init_get_bits(&s->gb, buf, buf_size*8);
4160 // do parse frame header
4161 if(v->profile < PROFILE_ADVANCED) {
4162 if(vc1_parse_frame_header(v, &s->gb) == -1) {
4163 if(buf2)av_free(buf2);
4167 if(vc1_parse_frame_header_adv(v, &s->gb) == -1) {
4168 if(buf2)av_free(buf2);
4173 if(s->pict_type != I_TYPE && !v->res_rtm_flag){
4174 if(buf2)av_free(buf2);
4179 s->current_picture.pict_type= s->pict_type;
4180 s->current_picture.key_frame= s->pict_type == I_TYPE;
4182 /* skip B-frames if we don't have reference frames */
4183 if(s->last_picture_ptr==NULL && (s->pict_type==B_TYPE || s->dropable)){
4184 if(buf2)av_free(buf2);
4185 return -1;//buf_size;
4187 /* skip b frames if we are in a hurry */
4188 if(avctx->hurry_up && s->pict_type==B_TYPE) return -1;//buf_size;
4189 if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==B_TYPE)
4190 || (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=I_TYPE)
4191 || avctx->skip_frame >= AVDISCARD_ALL) {
4192 if(buf2)av_free(buf2);
4195 /* skip everything if we are in a hurry>=5 */
4196 if(avctx->hurry_up>=5) {
4197 if(buf2)av_free(buf2);
4198 return -1;//buf_size;
4201 if(s->next_p_frame_damaged){
4202 if(s->pict_type==B_TYPE)
4205 s->next_p_frame_damaged=0;
4208 if(MPV_frame_start(s, avctx) < 0) {
4209 if(buf2)av_free(buf2);
4213 ff_er_frame_start(s);
4215 v->bits = buf_size * 8;
4216 vc1_decode_blocks(v);
4217 //av_log(s->avctx, AV_LOG_INFO, "Consumed %i/%i bits\n", get_bits_count(&s->gb), buf_size*8);
4218 // if(get_bits_count(&s->gb) > buf_size * 8)
4224 assert(s->current_picture.pict_type == s->current_picture_ptr->pict_type);
4225 assert(s->current_picture.pict_type == s->pict_type);
4226 if (s->pict_type == B_TYPE || s->low_delay) {
4227 *pict= *(AVFrame*)s->current_picture_ptr;
4228 } else if (s->last_picture_ptr != NULL) {
4229 *pict= *(AVFrame*)s->last_picture_ptr;
4232 if(s->last_picture_ptr || s->low_delay){
4233 *data_size = sizeof(AVFrame);
4234 ff_print_debug_info(s, pict);
4237 /* Return the Picture timestamp as the frame number */
4238 /* we substract 1 because it is added on utils.c */
4239 avctx->frame_number = s->picture_number - 1;
4241 if(buf2)av_free(buf2);
4246 /** Close a VC1/WMV3 decoder
4247 * @warning Initial try at using MpegEncContext stuff
4249 static int vc1_decode_end(AVCodecContext *avctx)
4251 VC1Context *v = avctx->priv_data;
4253 av_freep(&v->hrd_rate);
4254 av_freep(&v->hrd_buffer);
4255 MPV_common_end(&v->s);
4256 av_freep(&v->mv_type_mb_plane);
4257 av_freep(&v->direct_mb_plane);
4258 av_freep(&v->acpred_plane);
4259 av_freep(&v->over_flags_plane);
4260 av_freep(&v->mb_type_base);
4265 AVCodec vc1_decoder = {
4278 AVCodec wmv3_decoder = {