2 * VC-1 and WMV3 decoder
3 * Copyright (c) 2006 Konstantin Shishkov
4 * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 * VC-1 and WMV3 decoder
30 #include "mpegvideo.h"
32 #include "vc1acdata.h"
37 extern const uint32_t ff_table0_dc_lum[120][2], ff_table1_dc_lum[120][2];
38 extern const uint32_t ff_table0_dc_chroma[120][2], ff_table1_dc_chroma[120][2];
39 extern VLC ff_msmp4_dc_luma_vlc[2], ff_msmp4_dc_chroma_vlc[2];
40 #define MB_INTRA_VLC_BITS 9
41 extern VLC ff_msmp4_mb_i_vlc;
42 extern const uint16_t ff_msmp4_mb_i_table[64][2];
45 static const uint16_t table_mb_intra[64][2];
48 /** Available Profiles */
53 PROFILE_COMPLEX, ///< TODO: WMV9 specific
58 /** Sequence quantizer mode */
61 QUANT_FRAME_IMPLICIT, ///< Implicitly specified at frame level
62 QUANT_FRAME_EXPLICIT, ///< Explicitly specified at frame level
63 QUANT_NON_UNIFORM, ///< Non-uniform quant used for all frames
64 QUANT_UNIFORM ///< Uniform quant used for all frames
68 /** Where quant can be changed */
72 DQPROFILE_DOUBLE_EDGES,
73 DQPROFILE_SINGLE_EDGE,
78 /** @name Where quant can be changed
89 /** Which pair of edges is quantized with ALTPQUANT */
92 DQDOUBLE_BEDGE_TOPLEFT,
93 DQDOUBLE_BEDGE_TOPRIGHT,
94 DQDOUBLE_BEDGE_BOTTOMRIGHT,
95 DQDOUBLE_BEDGE_BOTTOMLEFT
99 /** MV modes for P frames */
102 MV_PMODE_1MV_HPEL_BILIN,
106 MV_PMODE_INTENSITY_COMP
110 /** @name MV types for B frames */
115 BMV_TYPE_INTERPOLATED
119 /** @name Block types for P/B frames */
121 enum TransformTypes {
125 TT_8X4, //Both halves
128 TT_4X8, //Both halves
133 /** Table for conversion between TTBLK and TTMB */
134 static const int ttblk_to_tt[3][8] = {
135 { TT_8X4, TT_4X8, TT_8X8, TT_4X4, TT_8X4_TOP, TT_8X4_BOTTOM, TT_4X8_RIGHT, TT_4X8_LEFT },
136 { TT_8X8, TT_4X8_RIGHT, TT_4X8_LEFT, TT_4X4, TT_8X4, TT_4X8, TT_8X4_BOTTOM, TT_8X4_TOP },
137 { TT_8X8, TT_4X8, TT_4X4, TT_8X4_BOTTOM, TT_4X8_RIGHT, TT_4X8_LEFT, TT_8X4, TT_8X4_TOP }
140 static const int ttfrm_to_tt[4] = { TT_8X8, TT_8X4, TT_4X8, TT_4X4 };
142 /** MV P mode - the 5th element is only used for mode 1 */
143 static const uint8_t mv_pmode_table[2][5] = {
144 { MV_PMODE_1MV_HPEL_BILIN, MV_PMODE_1MV, MV_PMODE_1MV_HPEL, MV_PMODE_INTENSITY_COMP, MV_PMODE_MIXED_MV },
145 { MV_PMODE_1MV, MV_PMODE_MIXED_MV, MV_PMODE_1MV_HPEL, MV_PMODE_INTENSITY_COMP, MV_PMODE_1MV_HPEL_BILIN }
147 static const uint8_t mv_pmode_table2[2][4] = {
148 { MV_PMODE_1MV_HPEL_BILIN, MV_PMODE_1MV, MV_PMODE_1MV_HPEL, MV_PMODE_MIXED_MV },
149 { MV_PMODE_1MV, MV_PMODE_MIXED_MV, MV_PMODE_1MV_HPEL, MV_PMODE_1MV_HPEL_BILIN }
152 /** One more frame type */
155 static const int fps_nr[5] = { 24, 25, 30, 50, 60 },
156 fps_dr[2] = { 1000, 1001 };
157 static const uint8_t pquant_table[3][32] = {
158 { /* Implicit quantizer */
159 0, 1, 2, 3, 4, 5, 6, 7, 8, 6, 7, 8, 9, 10, 11, 12,
160 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 29, 31
162 { /* Explicit quantizer, pquantizer uniform */
163 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
164 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
166 { /* Explicit quantizer, pquantizer non-uniform */
167 0, 1, 1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
168 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 29, 31
172 /** @name VC-1 VLC tables and defines
173 * @todo TODO move this into the context
176 #define VC1_BFRACTION_VLC_BITS 7
177 static VLC vc1_bfraction_vlc;
178 #define VC1_IMODE_VLC_BITS 4
179 static VLC vc1_imode_vlc;
180 #define VC1_NORM2_VLC_BITS 3
181 static VLC vc1_norm2_vlc;
182 #define VC1_NORM6_VLC_BITS 9
183 static VLC vc1_norm6_vlc;
184 /* Could be optimized, one table only needs 8 bits */
185 #define VC1_TTMB_VLC_BITS 9 //12
186 static VLC vc1_ttmb_vlc[3];
187 #define VC1_MV_DIFF_VLC_BITS 9 //15
188 static VLC vc1_mv_diff_vlc[4];
189 #define VC1_CBPCY_P_VLC_BITS 9 //14
190 static VLC vc1_cbpcy_p_vlc[4];
191 #define VC1_4MV_BLOCK_PATTERN_VLC_BITS 6
192 static VLC vc1_4mv_block_pattern_vlc[4];
193 #define VC1_TTBLK_VLC_BITS 5
194 static VLC vc1_ttblk_vlc[3];
195 #define VC1_SUBBLKPAT_VLC_BITS 6
196 static VLC vc1_subblkpat_vlc[3];
198 static VLC vc1_ac_coeff_table[8];
202 CS_HIGH_MOT_INTRA = 0,
212 /** @name Overlap conditions for Advanced Profile */
223 * @fixme Change size wherever another size is more efficient
224 * Many members are only used for Advanced Profile
226 typedef struct VC1Context{
231 /** Simple/Main Profile sequence header */
233 int res_sm; ///< reserved, 2b
234 int res_x8; ///< reserved
235 int multires; ///< frame-level RESPIC syntax element present
236 int res_fasttx; ///< reserved, always 1
237 int res_transtab; ///< reserved, always 0
238 int rangered; ///< RANGEREDFRM (range reduction) syntax element present
240 int res_rtm_flag; ///< reserved, set to 1
241 int reserved; ///< reserved
244 /** Advanced Profile */
246 int level; ///< 3bits, for Advanced/Simple Profile, provided by TS layer
247 int chromaformat; ///< 2bits, 2=4:2:0, only defined
248 int postprocflag; ///< Per-frame processing suggestion flag present
249 int broadcast; ///< TFF/RFF present
250 int interlace; ///< Progressive/interlaced (RPTFTM syntax element)
251 int tfcntrflag; ///< TFCNTR present
252 int panscanflag; ///< NUMPANSCANWIN, TOPLEFT{X,Y}, BOTRIGHT{X,Y} present
253 int extended_dmv; ///< Additional extended dmv range at P/B frame-level
254 int color_prim; ///< 8bits, chroma coordinates of the color primaries
255 int transfer_char; ///< 8bits, Opto-electronic transfer characteristics
256 int matrix_coef; ///< 8bits, Color primaries->YCbCr transform matrix
257 int hrd_param_flag; ///< Presence of Hypothetical Reference
258 ///< Decoder parameters
259 int psf; ///< Progressive Segmented Frame
262 /** Sequence header data for all Profiles
263 * TODO: choose between ints, uint8_ts and monobit flags
266 int profile; ///< 2bits, Profile
267 int frmrtq_postproc; ///< 3bits,
268 int bitrtq_postproc; ///< 5bits, quantized framerate-based postprocessing strength
269 int fastuvmc; ///< Rounding of qpel vector to hpel ? (not in Simple)
270 int extended_mv; ///< Ext MV in P/B (not in Simple)
271 int dquant; ///< How qscale varies with MBs, 2bits (not in Simple)
272 int vstransform; ///< variable-size [48]x[48] transform type + info
273 int overlap; ///< overlapped transforms in use
274 int quantizer_mode; ///< 2bits, quantizer mode used for sequence, see QUANT_*
275 int finterpflag; ///< INTERPFRM present
278 /** Frame decoding info for all profiles */
280 uint8_t mv_mode; ///< MV coding monde
281 uint8_t mv_mode2; ///< Secondary MV coding mode (B frames)
282 int k_x; ///< Number of bits for MVs (depends on MV range)
283 int k_y; ///< Number of bits for MVs (depends on MV range)
284 int range_x, range_y; ///< MV range
285 uint8_t pq, altpq; ///< Current/alternate frame quantizer scale
286 /** pquant parameters */
293 /** AC coding set indexes
294 * @see 8.1.1.10, p(1)10
297 int c_ac_table_index; ///< Chroma index from ACFRM element
298 int y_ac_table_index; ///< Luma index from AC2FRM element
300 int ttfrm; ///< Transform type info present at frame level
301 uint8_t ttmbf; ///< Transform type flag
302 uint8_t ttblk4x4; ///< Value of ttblk which indicates a 4x4 transform
303 int codingset; ///< index of current table set from 11.8 to use for luma block decoding
304 int codingset2; ///< index of current table set from 11.8 to use for chroma block decoding
305 int pqindex; ///< raw pqindex used in coding set selection
306 int a_avail, c_avail;
307 uint8_t *mb_type_base, *mb_type[3];
310 /** Luma compensation parameters */
315 int16_t bfraction; ///< Relative position % anchors=> how to scale MVs
316 uint8_t halfpq; ///< Uniform quant over image and qp+.5
317 uint8_t respic; ///< Frame-level flag for resized images
318 int buffer_fullness; ///< HRD info
320 * -# 0 -> [-64n 63.f] x [-32, 31.f]
321 * -# 1 -> [-128, 127.f] x [-64, 63.f]
322 * -# 2 -> [-512, 511.f] x [-128, 127.f]
323 * -# 3 -> [-1024, 1023.f] x [-256, 255.f]
326 uint8_t pquantizer; ///< Uniform (over sequence) quantizer in use
327 VLC *cbpcy_vlc; ///< CBPCY VLC table
328 int tt_index; ///< Index for Transform Type tables
329 uint8_t* mv_type_mb_plane; ///< bitplane for mv_type == (4MV)
330 uint8_t* direct_mb_plane; ///< bitplane for "direct" MBs
331 int mv_type_is_raw; ///< mv type mb plane is not coded
332 int dmb_is_raw; ///< direct mb plane is raw
333 int skip_is_raw; ///< skip mb plane is not coded
334 uint8_t luty[256], lutuv[256]; // lookup tables used for intensity compensation
335 int rnd; ///< rounding control
337 /** Frame decoding info for S/M profiles only */
339 uint8_t rangeredfrm; ///< out_sample = CLIP((in_sample-128)*2+128)
343 /** Frame decoding info for Advanced profile */
345 uint8_t fcm; ///< 0->Progressive, 2->Frame-Interlace, 3->Field-Interlace
346 uint8_t numpanscanwin;
348 uint8_t rptfrm, tff, rff;
351 uint16_t bottomrightx;
352 uint16_t bottomrighty;
355 int hrd_num_leaky_buckets;
356 uint8_t bit_rate_exponent;
357 uint8_t buffer_size_exponent;
358 uint8_t* acpred_plane; ///< AC prediction flags bitplane
360 uint8_t* over_flags_plane; ///< Overflags bitplane
363 uint16_t *hrd_rate, *hrd_buffer;
364 uint8_t *hrd_fullness;
365 uint8_t range_mapy_flag;
366 uint8_t range_mapuv_flag;
376 * Get unary code of limited length
377 * @fixme FIXME Slow and ugly
378 * @param gb GetBitContext
379 * @param[in] stop The bitstop value (unary code of 1's or 0's)
380 * @param[in] len Maximum length
381 * @return Unary length/index
383 static int get_prefix(GetBitContext *gb, int stop, int len)
388 for(i = 0; i < len && get_bits1(gb) != stop; i++);
390 /* int i = 0, tmp = !stop;
392 while (i != len && tmp != stop)
394 tmp = get_bits(gb, 1);
397 if (i == len && tmp != stop) return len+1;
404 UPDATE_CACHE(re, gb);
405 buf=GET_CACHE(re, gb); //Still not sure
406 if (stop) buf = ~buf;
408 log= av_log2(-buf); //FIXME: -?
410 LAST_SKIP_BITS(re, gb, log+1);
411 CLOSE_READER(re, gb);
415 LAST_SKIP_BITS(re, gb, limit);
416 CLOSE_READER(re, gb);
421 static inline int decode210(GetBitContext *gb){
427 return 2 - get_bits1(gb);
431 * Init VC-1 specific tables and VC1Context members
432 * @param v The VC1Context to initialize
435 static int vc1_init_common(VC1Context *v)
440 v->hrd_rate = v->hrd_buffer = NULL;
446 init_vlc(&vc1_bfraction_vlc, VC1_BFRACTION_VLC_BITS, 23,
447 vc1_bfraction_bits, 1, 1,
448 vc1_bfraction_codes, 1, 1, 1);
449 init_vlc(&vc1_norm2_vlc, VC1_NORM2_VLC_BITS, 4,
450 vc1_norm2_bits, 1, 1,
451 vc1_norm2_codes, 1, 1, 1);
452 init_vlc(&vc1_norm6_vlc, VC1_NORM6_VLC_BITS, 64,
453 vc1_norm6_bits, 1, 1,
454 vc1_norm6_codes, 2, 2, 1);
455 init_vlc(&vc1_imode_vlc, VC1_IMODE_VLC_BITS, 7,
456 vc1_imode_bits, 1, 1,
457 vc1_imode_codes, 1, 1, 1);
460 init_vlc(&vc1_ttmb_vlc[i], VC1_TTMB_VLC_BITS, 16,
461 vc1_ttmb_bits[i], 1, 1,
462 vc1_ttmb_codes[i], 2, 2, 1);
463 init_vlc(&vc1_ttblk_vlc[i], VC1_TTBLK_VLC_BITS, 8,
464 vc1_ttblk_bits[i], 1, 1,
465 vc1_ttblk_codes[i], 1, 1, 1);
466 init_vlc(&vc1_subblkpat_vlc[i], VC1_SUBBLKPAT_VLC_BITS, 15,
467 vc1_subblkpat_bits[i], 1, 1,
468 vc1_subblkpat_codes[i], 1, 1, 1);
472 init_vlc(&vc1_4mv_block_pattern_vlc[i], VC1_4MV_BLOCK_PATTERN_VLC_BITS, 16,
473 vc1_4mv_block_pattern_bits[i], 1, 1,
474 vc1_4mv_block_pattern_codes[i], 1, 1, 1);
475 init_vlc(&vc1_cbpcy_p_vlc[i], VC1_CBPCY_P_VLC_BITS, 64,
476 vc1_cbpcy_p_bits[i], 1, 1,
477 vc1_cbpcy_p_codes[i], 2, 2, 1);
478 init_vlc(&vc1_mv_diff_vlc[i], VC1_MV_DIFF_VLC_BITS, 73,
479 vc1_mv_diff_bits[i], 1, 1,
480 vc1_mv_diff_codes[i], 2, 2, 1);
483 init_vlc(&vc1_ac_coeff_table[i], AC_VLC_BITS, vc1_ac_sizes[i],
484 &vc1_ac_tables[i][0][1], 8, 4,
485 &vc1_ac_tables[i][0][0], 8, 4, 1);
486 init_vlc(&ff_msmp4_mb_i_vlc, MB_INTRA_VLC_BITS, 64,
487 &ff_msmp4_mb_i_table[0][1], 4, 2,
488 &ff_msmp4_mb_i_table[0][0], 4, 2, 1);
493 v->mvrange = 0; /* 7.1.1.18, p80 */
498 /***********************************************************************/
500 * @defgroup bitplane VC9 Bitplane decoding
505 /** @addtogroup bitplane
518 /** @} */ //imode defines
520 /** Decode rows by checking if they are skipped
521 * @param plane Buffer to store decoded bits
522 * @param[in] width Width of this buffer
523 * @param[in] height Height of this buffer
524 * @param[in] stride of this buffer
526 static void decode_rowskip(uint8_t* plane, int width, int height, int stride, GetBitContext *gb){
529 for (y=0; y<height; y++){
530 if (!get_bits(gb, 1)) //rowskip
531 memset(plane, 0, width);
533 for (x=0; x<width; x++)
534 plane[x] = get_bits(gb, 1);
539 /** Decode columns by checking if they are skipped
540 * @param plane Buffer to store decoded bits
541 * @param[in] width Width of this buffer
542 * @param[in] height Height of this buffer
543 * @param[in] stride of this buffer
544 * @fixme FIXME: Optimize
546 static void decode_colskip(uint8_t* plane, int width, int height, int stride, GetBitContext *gb){
549 for (x=0; x<width; x++){
550 if (!get_bits(gb, 1)) //colskip
551 for (y=0; y<height; y++)
554 for (y=0; y<height; y++)
555 plane[y*stride] = get_bits(gb, 1);
560 /** Decode a bitplane's bits
561 * @param bp Bitplane where to store the decode bits
562 * @param v VC-1 context for bit reading and logging
564 * @fixme FIXME: Optimize
566 static int bitplane_decoding(uint8_t* data, int *raw_flag, VC1Context *v)
568 GetBitContext *gb = &v->s.gb;
570 int imode, x, y, code, offset;
571 uint8_t invert, *planep = data;
572 int width, height, stride;
574 width = v->s.mb_width;
575 height = v->s.mb_height;
576 stride = v->s.mb_stride;
577 invert = get_bits(gb, 1);
578 imode = get_vlc2(gb, vc1_imode_vlc.table, VC1_IMODE_VLC_BITS, 1);
584 //Data is actually read in the MB layer (same for all tests == "raw")
585 *raw_flag = 1; //invert ignored
589 if ((height * width) & 1)
591 *planep++ = get_bits(gb, 1);
595 // decode bitplane as one long line
596 for (y = offset; y < height * width; y += 2) {
597 code = get_vlc2(gb, vc1_norm2_vlc.table, VC1_NORM2_VLC_BITS, 1);
598 *planep++ = code & 1;
600 if(offset == width) {
602 planep += stride - width;
604 *planep++ = code >> 1;
606 if(offset == width) {
608 planep += stride - width;
614 if(!(height % 3) && (width % 3)) { // use 2x3 decoding
615 for(y = 0; y < height; y+= 3) {
616 for(x = width & 1; x < width; x += 2) {
617 code = get_vlc2(gb, vc1_norm6_vlc.table, VC1_NORM6_VLC_BITS, 2);
619 av_log(v->s.avctx, AV_LOG_DEBUG, "invalid NORM-6 VLC\n");
622 planep[x + 0] = (code >> 0) & 1;
623 planep[x + 1] = (code >> 1) & 1;
624 planep[x + 0 + stride] = (code >> 2) & 1;
625 planep[x + 1 + stride] = (code >> 3) & 1;
626 planep[x + 0 + stride * 2] = (code >> 4) & 1;
627 planep[x + 1 + stride * 2] = (code >> 5) & 1;
629 planep += stride * 3;
631 if(width & 1) decode_colskip(data, 1, height, stride, &v->s.gb);
633 planep += (height & 1) * stride;
634 for(y = height & 1; y < height; y += 2) {
635 for(x = width % 3; x < width; x += 3) {
636 code = get_vlc2(gb, vc1_norm6_vlc.table, VC1_NORM6_VLC_BITS, 2);
638 av_log(v->s.avctx, AV_LOG_DEBUG, "invalid NORM-6 VLC\n");
641 planep[x + 0] = (code >> 0) & 1;
642 planep[x + 1] = (code >> 1) & 1;
643 planep[x + 2] = (code >> 2) & 1;
644 planep[x + 0 + stride] = (code >> 3) & 1;
645 planep[x + 1 + stride] = (code >> 4) & 1;
646 planep[x + 2 + stride] = (code >> 5) & 1;
648 planep += stride * 2;
651 if(x) decode_colskip(data , x, height , stride, &v->s.gb);
652 if(height & 1) decode_rowskip(data+x, width - x, 1, stride, &v->s.gb);
656 decode_rowskip(data, width, height, stride, &v->s.gb);
659 decode_colskip(data, width, height, stride, &v->s.gb);
664 /* Applying diff operator */
665 if (imode == IMODE_DIFF2 || imode == IMODE_DIFF6)
669 for (x=1; x<width; x++)
670 planep[x] ^= planep[x-1];
671 for (y=1; y<height; y++)
674 planep[0] ^= planep[-stride];
675 for (x=1; x<width; x++)
677 if (planep[x-1] != planep[x-stride]) planep[x] ^= invert;
678 else planep[x] ^= planep[x-1];
685 for (x=0; x<stride*height; x++) planep[x] = !planep[x]; //FIXME stride
687 return (imode<<1) + invert;
690 /** @} */ //Bitplane group
692 /***********************************************************************/
693 /** VOP Dquant decoding
694 * @param v VC-1 Context
696 static int vop_dquant_decoding(VC1Context *v)
698 GetBitContext *gb = &v->s.gb;
704 pqdiff = get_bits(gb, 3);
705 if (pqdiff == 7) v->altpq = get_bits(gb, 5);
706 else v->altpq = v->pq + pqdiff + 1;
710 v->dquantfrm = get_bits(gb, 1);
713 v->dqprofile = get_bits(gb, 2);
714 switch (v->dqprofile)
716 case DQPROFILE_SINGLE_EDGE:
717 case DQPROFILE_DOUBLE_EDGES:
718 v->dqsbedge = get_bits(gb, 2);
720 case DQPROFILE_ALL_MBS:
721 v->dqbilevel = get_bits(gb, 1);
722 default: break; //Forbidden ?
724 if (v->dqbilevel || v->dqprofile != DQPROFILE_ALL_MBS)
726 pqdiff = get_bits(gb, 3);
727 if (pqdiff == 7) v->altpq = get_bits(gb, 5);
728 else v->altpq = v->pq + pqdiff + 1;
735 /** Put block onto picture
737 static void vc1_put_block(VC1Context *v, DCTELEM block[6][64])
741 DSPContext *dsp = &v->s.dsp;
745 for(k = 0; k < 6; k++)
746 for(j = 0; j < 8; j++)
747 for(i = 0; i < 8; i++)
748 block[k][i + j*8] = ((block[k][i + j*8] - 128) << 1) + 128;
751 ys = v->s.current_picture.linesize[0];
752 us = v->s.current_picture.linesize[1];
753 vs = v->s.current_picture.linesize[2];
756 dsp->put_pixels_clamped(block[0], Y, ys);
757 dsp->put_pixels_clamped(block[1], Y + 8, ys);
759 dsp->put_pixels_clamped(block[2], Y, ys);
760 dsp->put_pixels_clamped(block[3], Y + 8, ys);
762 if(!(v->s.flags & CODEC_FLAG_GRAY)) {
763 dsp->put_pixels_clamped(block[4], v->s.dest[1], us);
764 dsp->put_pixels_clamped(block[5], v->s.dest[2], vs);
768 /** Do motion compensation over 1 macroblock
769 * Mostly adapted hpel_motion and qpel_motion from mpegvideo.c
771 static void vc1_mc_1mv(VC1Context *v, int dir)
773 MpegEncContext *s = &v->s;
774 DSPContext *dsp = &v->s.dsp;
775 uint8_t *srcY, *srcU, *srcV;
776 int dxy, uvdxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
778 if(!v->s.last_picture.data[0])return;
780 mx = s->mv[dir][0][0];
781 my = s->mv[dir][0][1];
783 // store motion vectors for further use in B frames
784 if(s->pict_type == P_TYPE) {
785 s->current_picture.motion_val[1][s->block_index[0]][0] = mx;
786 s->current_picture.motion_val[1][s->block_index[0]][1] = my;
788 uvmx = (mx + ((mx & 3) == 3)) >> 1;
789 uvmy = (my + ((my & 3) == 3)) >> 1;
791 srcY = s->last_picture.data[0];
792 srcU = s->last_picture.data[1];
793 srcV = s->last_picture.data[2];
795 srcY = s->next_picture.data[0];
796 srcU = s->next_picture.data[1];
797 srcV = s->next_picture.data[2];
800 src_x = s->mb_x * 16 + (mx >> 2);
801 src_y = s->mb_y * 16 + (my >> 2);
802 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
803 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
805 src_x = clip( src_x, -16, s->mb_width * 16);
806 src_y = clip( src_y, -16, s->mb_height * 16);
807 uvsrc_x = clip(uvsrc_x, -8, s->mb_width * 8);
808 uvsrc_y = clip(uvsrc_y, -8, s->mb_height * 8);
810 srcY += src_y * s->linesize + src_x;
811 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
812 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
814 /* for grayscale we should not try to read from unknown area */
815 if(s->flags & CODEC_FLAG_GRAY) {
816 srcU = s->edge_emu_buffer + 18 * s->linesize;
817 srcV = s->edge_emu_buffer + 18 * s->linesize;
820 if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
821 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel*3
822 || (unsigned)(src_y - s->mspel) > s->v_edge_pos - (my&3) - 16 - s->mspel*3){
823 uint8_t *uvbuf= s->edge_emu_buffer + 19 * s->linesize;
825 srcY -= s->mspel * (1 + s->linesize);
826 ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 17+s->mspel*2, 17+s->mspel*2,
827 src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);
828 srcY = s->edge_emu_buffer;
829 ff_emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8+1, 8+1,
830 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
831 ff_emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8+1, 8+1,
832 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
835 /* if we deal with range reduction we need to scale source blocks */
841 for(j = 0; j < 17 + s->mspel*2; j++) {
842 for(i = 0; i < 17 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128;
845 src = srcU; src2 = srcV;
846 for(j = 0; j < 9; j++) {
847 for(i = 0; i < 9; i++) {
848 src[i] = ((src[i] - 128) >> 1) + 128;
849 src2[i] = ((src2[i] - 128) >> 1) + 128;
851 src += s->uvlinesize;
852 src2 += s->uvlinesize;
855 /* if we deal with intensity compensation we need to scale source blocks */
856 if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
861 for(j = 0; j < 17 + s->mspel*2; j++) {
862 for(i = 0; i < 17 + s->mspel*2; i++) src[i] = v->luty[src[i]];
865 src = srcU; src2 = srcV;
866 for(j = 0; j < 9; j++) {
867 for(i = 0; i < 9; i++) {
868 src[i] = v->lutuv[src[i]];
869 src2[i] = v->lutuv[src2[i]];
871 src += s->uvlinesize;
872 src2 += s->uvlinesize;
875 srcY += s->mspel * (1 + s->linesize);
879 uvmx = uvmx + ((uvmx<0)?(uvmx&1):-(uvmx&1));
880 uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1));
884 dxy = ((my & 3) << 2) | (mx & 3);
885 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] , srcY , s->linesize, v->rnd);
886 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8, srcY + 8, s->linesize, v->rnd);
887 srcY += s->linesize * 8;
888 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize , srcY , s->linesize, v->rnd);
889 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
890 } else { // hpel mc - always used for luma
891 dxy = (my & 2) | ((mx & 2) >> 1);
894 dsp->put_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
896 dsp->put_no_rnd_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
899 if(s->flags & CODEC_FLAG_GRAY) return;
900 /* Chroma MC always uses qpel bilinear */
901 uvdxy = ((uvmy & 3) << 2) | (uvmx & 3);
905 dsp->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
906 dsp->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
908 dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
909 dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
913 /** Do motion compensation for 4-MV macroblock - luminance block
915 static void vc1_mc_4mv_luma(VC1Context *v, int n)
917 MpegEncContext *s = &v->s;
918 DSPContext *dsp = &v->s.dsp;
920 int dxy, mx, my, src_x, src_y;
923 if(!v->s.last_picture.data[0])return;
926 srcY = s->last_picture.data[0];
928 off = s->linesize * 4 * (n&2) + (n&1) * 8;
930 src_x = s->mb_x * 16 + (n&1) * 8 + (mx >> 2);
931 src_y = s->mb_y * 16 + (n&2) * 4 + (my >> 2);
933 src_x = clip( src_x, -16, s->mb_width * 16);
934 src_y = clip( src_y, -16, s->mb_height * 16);
936 srcY += src_y * s->linesize + src_x;
938 if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
939 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 8 - s->mspel*2
940 || (unsigned)(src_y - s->mspel) > s->v_edge_pos - (my&3) - 8 - s->mspel*2){
941 srcY -= s->mspel * (1 + s->linesize);
942 ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 9+s->mspel*2, 9+s->mspel*2,
943 src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);
944 srcY = s->edge_emu_buffer;
945 /* if we deal with range reduction we need to scale source blocks */
951 for(j = 0; j < 9 + s->mspel*2; j++) {
952 for(i = 0; i < 9 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128;
956 /* if we deal with intensity compensation we need to scale source blocks */
957 if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
962 for(j = 0; j < 9 + s->mspel*2; j++) {
963 for(i = 0; i < 9 + s->mspel*2; i++) src[i] = v->luty[src[i]];
967 srcY += s->mspel * (1 + s->linesize);
971 dxy = ((my & 3) << 2) | (mx & 3);
972 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize, v->rnd);
973 } else { // hpel mc - always used for luma
974 dxy = (my & 2) | ((mx & 2) >> 1);
976 dsp->put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
978 dsp->put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
982 static inline int median4(int a, int b, int c, int d)
985 if(c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2;
986 else return (FFMIN(b, c) + FFMAX(a, d)) / 2;
988 if(c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2;
989 else return (FFMIN(a, c) + FFMAX(b, d)) / 2;
994 /** Do motion compensation for 4-MV macroblock - both chroma blocks
996 static void vc1_mc_4mv_chroma(VC1Context *v)
998 MpegEncContext *s = &v->s;
999 DSPContext *dsp = &v->s.dsp;
1000 uint8_t *srcU, *srcV;
1001 int uvdxy, uvmx, uvmy, uvsrc_x, uvsrc_y;
1002 int i, idx, tx = 0, ty = 0;
1003 int mvx[4], mvy[4], intra[4];
1004 static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
1006 if(!v->s.last_picture.data[0])return;
1007 if(s->flags & CODEC_FLAG_GRAY) return;
1009 for(i = 0; i < 4; i++) {
1010 mvx[i] = s->mv[0][i][0];
1011 mvy[i] = s->mv[0][i][1];
1012 intra[i] = v->mb_type[0][s->block_index[i]];
1015 /* calculate chroma MV vector from four luma MVs */
1016 idx = (intra[3] << 3) | (intra[2] << 2) | (intra[1] << 1) | intra[0];
1017 if(!idx) { // all blocks are inter
1018 tx = median4(mvx[0], mvx[1], mvx[2], mvx[3]);
1019 ty = median4(mvy[0], mvy[1], mvy[2], mvy[3]);
1020 } else if(count[idx] == 1) { // 3 inter blocks
1023 tx = mid_pred(mvx[1], mvx[2], mvx[3]);
1024 ty = mid_pred(mvy[1], mvy[2], mvy[3]);
1027 tx = mid_pred(mvx[0], mvx[2], mvx[3]);
1028 ty = mid_pred(mvy[0], mvy[2], mvy[3]);
1031 tx = mid_pred(mvx[0], mvx[1], mvx[3]);
1032 ty = mid_pred(mvy[0], mvy[1], mvy[3]);
1035 tx = mid_pred(mvx[0], mvx[1], mvx[2]);
1036 ty = mid_pred(mvy[0], mvy[1], mvy[2]);
1039 } else if(count[idx] == 2) {
1041 for(i=0; i<3;i++) if(!intra[i]) {t1 = i; break;}
1042 for(i= t1+1; i<4; i++)if(!intra[i]) {t2 = i; break;}
1043 tx = (mvx[t1] + mvx[t2]) / 2;
1044 ty = (mvy[t1] + mvy[t2]) / 2;
1046 return; //no need to do MC for inter blocks
1048 s->current_picture.motion_val[1][s->block_index[0]][0] = tx;
1049 s->current_picture.motion_val[1][s->block_index[0]][1] = ty;
1050 uvmx = (tx + ((tx&3) == 3)) >> 1;
1051 uvmy = (ty + ((ty&3) == 3)) >> 1;
1053 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
1054 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
1056 uvsrc_x = clip(uvsrc_x, -8, s->mb_width * 8);
1057 uvsrc_y = clip(uvsrc_y, -8, s->mb_height * 8);
1058 srcU = s->last_picture.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
1059 srcV = s->last_picture.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
1060 if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
1061 || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
1062 || (unsigned)uvsrc_y > (s->v_edge_pos >> 1) - 9){
1063 ff_emulated_edge_mc(s->edge_emu_buffer , srcU, s->uvlinesize, 8+1, 8+1,
1064 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
1065 ff_emulated_edge_mc(s->edge_emu_buffer + 16, srcV, s->uvlinesize, 8+1, 8+1,
1066 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
1067 srcU = s->edge_emu_buffer;
1068 srcV = s->edge_emu_buffer + 16;
1070 /* if we deal with range reduction we need to scale source blocks */
1071 if(v->rangeredfrm) {
1073 uint8_t *src, *src2;
1075 src = srcU; src2 = srcV;
1076 for(j = 0; j < 9; j++) {
1077 for(i = 0; i < 9; i++) {
1078 src[i] = ((src[i] - 128) >> 1) + 128;
1079 src2[i] = ((src2[i] - 128) >> 1) + 128;
1081 src += s->uvlinesize;
1082 src2 += s->uvlinesize;
1085 /* if we deal with intensity compensation we need to scale source blocks */
1086 if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1088 uint8_t *src, *src2;
1090 src = srcU; src2 = srcV;
1091 for(j = 0; j < 9; j++) {
1092 for(i = 0; i < 9; i++) {
1093 src[i] = v->lutuv[src[i]];
1094 src2[i] = v->lutuv[src2[i]];
1096 src += s->uvlinesize;
1097 src2 += s->uvlinesize;
1103 uvmx = uvmx + ((uvmx<0)?(uvmx&1):-(uvmx&1));
1104 uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1));
1107 /* Chroma MC always uses qpel bilinear */
1108 uvdxy = ((uvmy & 3) << 2) | (uvmx & 3);
1112 dsp->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
1113 dsp->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
1115 dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
1116 dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
1120 static int decode_sequence_header_adv(VC1Context *v, GetBitContext *gb);
1123 * Decode Simple/Main Profiles sequence header
1124 * @see Figure 7-8, p16-17
1125 * @param avctx Codec context
1126 * @param gb GetBit context initialized from Codec context extra_data
1129 static int decode_sequence_header(AVCodecContext *avctx, GetBitContext *gb)
1131 VC1Context *v = avctx->priv_data;
1133 av_log(avctx, AV_LOG_DEBUG, "Header: %0X\n", show_bits(gb, 32));
1134 v->profile = get_bits(gb, 2);
1135 if (v->profile == 2)
1137 av_log(avctx, AV_LOG_ERROR, "Profile value 2 is forbidden (and WMV3 Complex Profile is unsupported)\n");
1141 if (v->profile == PROFILE_ADVANCED)
1143 return decode_sequence_header_adv(v, gb);
1147 v->res_sm = get_bits(gb, 2); //reserved
1150 av_log(avctx, AV_LOG_ERROR,
1151 "Reserved RES_SM=%i is forbidden\n", v->res_sm);
1157 v->frmrtq_postproc = get_bits(gb, 3); //common
1158 // (bitrate-32kbps)/64kbps
1159 v->bitrtq_postproc = get_bits(gb, 5); //common
1160 v->s.loop_filter = get_bits(gb, 1); //common
1161 if(v->s.loop_filter == 1 && v->profile == PROFILE_SIMPLE)
1163 av_log(avctx, AV_LOG_ERROR,
1164 "LOOPFILTER shell not be enabled in simple profile\n");
1167 v->res_x8 = get_bits(gb, 1); //reserved
1170 av_log(avctx, AV_LOG_ERROR,
1171 "1 for reserved RES_X8 is forbidden\n");
1174 v->multires = get_bits(gb, 1);
1175 v->res_fasttx = get_bits(gb, 1);
1178 av_log(avctx, AV_LOG_ERROR,
1179 "0 for reserved RES_FASTTX is forbidden\n");
1183 v->fastuvmc = get_bits(gb, 1); //common
1184 if (!v->profile && !v->fastuvmc)
1186 av_log(avctx, AV_LOG_ERROR,
1187 "FASTUVMC unavailable in Simple Profile\n");
1190 v->extended_mv = get_bits(gb, 1); //common
1191 if (!v->profile && v->extended_mv)
1193 av_log(avctx, AV_LOG_ERROR,
1194 "Extended MVs unavailable in Simple Profile\n");
1197 v->dquant = get_bits(gb, 2); //common
1198 v->vstransform = get_bits(gb, 1); //common
1200 v->res_transtab = get_bits(gb, 1);
1201 if (v->res_transtab)
1203 av_log(avctx, AV_LOG_ERROR,
1204 "1 for reserved RES_TRANSTAB is forbidden\n");
1208 v->overlap = get_bits(gb, 1); //common
1210 v->s.resync_marker = get_bits(gb, 1);
1211 v->rangered = get_bits(gb, 1);
1212 if (v->rangered && v->profile == PROFILE_SIMPLE)
1214 av_log(avctx, AV_LOG_INFO,
1215 "RANGERED should be set to 0 in simple profile\n");
1218 v->s.max_b_frames = avctx->max_b_frames = get_bits(gb, 3); //common
1219 v->quantizer_mode = get_bits(gb, 2); //common
1221 v->finterpflag = get_bits(gb, 1); //common
1222 v->res_rtm_flag = get_bits(gb, 1); //reserved
1223 if (!v->res_rtm_flag)
1225 // av_log(avctx, AV_LOG_ERROR,
1226 // "0 for reserved RES_RTM_FLAG is forbidden\n");
1227 av_log(avctx, AV_LOG_ERROR,
1228 "Old WMV3 version detected, only I-frames will be decoded\n");
1231 av_log(avctx, AV_LOG_DEBUG,
1232 "Profile %i:\nfrmrtq_postproc=%i, bitrtq_postproc=%i\n"
1233 "LoopFilter=%i, MultiRes=%i, FastUVMC=%i, Extended MV=%i\n"
1234 "Rangered=%i, VSTransform=%i, Overlap=%i, SyncMarker=%i\n"
1235 "DQuant=%i, Quantizer mode=%i, Max B frames=%i\n",
1236 v->profile, v->frmrtq_postproc, v->bitrtq_postproc,
1237 v->s.loop_filter, v->multires, v->fastuvmc, v->extended_mv,
1238 v->rangered, v->vstransform, v->overlap, v->s.resync_marker,
1239 v->dquant, v->quantizer_mode, avctx->max_b_frames
1244 static int decode_sequence_header_adv(VC1Context *v, GetBitContext *gb)
1246 v->res_rtm_flag = 1;
1247 v->level = get_bits(gb, 3);
1250 av_log(v->s.avctx, AV_LOG_ERROR, "Reserved LEVEL %i\n",v->level);
1252 v->chromaformat = get_bits(gb, 2);
1253 if (v->chromaformat != 1)
1255 av_log(v->s.avctx, AV_LOG_ERROR,
1256 "Only 4:2:0 chroma format supported\n");
1261 v->frmrtq_postproc = get_bits(gb, 3); //common
1262 // (bitrate-32kbps)/64kbps
1263 v->bitrtq_postproc = get_bits(gb, 5); //common
1264 v->postprocflag = get_bits(gb, 1); //common
1266 v->s.avctx->coded_width = (get_bits(gb, 12) + 1) << 1;
1267 v->s.avctx->coded_height = (get_bits(gb, 12) + 1) << 1;
1268 v->broadcast = get_bits1(gb);
1269 v->interlace = get_bits1(gb);
1270 v->tfcntrflag = get_bits1(gb);
1271 v->finterpflag = get_bits1(gb);
1272 get_bits1(gb); // reserved
1273 v->psf = get_bits1(gb);
1274 if(v->psf) { //PsF, 6.1.13
1275 av_log(v->s.avctx, AV_LOG_ERROR, "Progressive Segmented Frame mode: not supported (yet)\n");
1278 if(get_bits1(gb)) { //Display Info - decoding is not affected by it
1280 av_log(v->s.avctx, AV_LOG_INFO, "Display extended info:\n");
1281 w = get_bits(gb, 14);
1282 h = get_bits(gb, 14);
1283 av_log(v->s.avctx, AV_LOG_INFO, "Display dimensions: %ix%i\n", w, h);
1284 //TODO: store aspect ratio in AVCodecContext
1286 ar = get_bits(gb, 4);
1288 w = get_bits(gb, 8);
1289 h = get_bits(gb, 8);
1292 if(get_bits1(gb)){ //framerate stuff
1302 v->color_prim = get_bits(gb, 8);
1303 v->transfer_char = get_bits(gb, 8);
1304 v->matrix_coef = get_bits(gb, 8);
1308 v->hrd_param_flag = get_bits1(gb);
1309 if(v->hrd_param_flag) {
1311 v->hrd_num_leaky_buckets = get_bits(gb, 5);
1312 get_bits(gb, 4); //bitrate exponent
1313 get_bits(gb, 4); //buffer size exponent
1314 for(i = 0; i < v->hrd_num_leaky_buckets; i++) {
1315 get_bits(gb, 16); //hrd_rate[n]
1316 get_bits(gb, 16); //hrd_buffer[n]
1322 static int decode_entry_point(AVCodecContext *avctx, GetBitContext *gb)
1324 VC1Context *v = avctx->priv_data;
1327 av_log(avctx, AV_LOG_DEBUG, "Entry point: %08X\n", show_bits_long(gb, 32));
1328 get_bits1(gb); // broken link
1329 avctx->max_b_frames = 1 - get_bits1(gb); // 'closed entry' also signalize possible B-frames
1330 v->panscanflag = get_bits1(gb);
1331 get_bits1(gb); // refdist flag
1332 v->s.loop_filter = get_bits1(gb);
1333 v->fastuvmc = get_bits1(gb);
1334 v->extended_mv = get_bits1(gb);
1335 v->dquant = get_bits(gb, 2);
1336 v->vstransform = get_bits1(gb);
1337 v->overlap = get_bits1(gb);
1338 v->quantizer_mode = get_bits(gb, 2);
1340 if(v->hrd_param_flag){
1341 for(i = 0; i < v->hrd_num_leaky_buckets; i++) {
1342 get_bits(gb, 8); //hrd_full[n]
1347 avctx->coded_width = (get_bits(gb, 12)+1)<<1;
1348 avctx->coded_height = (get_bits(gb, 12)+1)<<1;
1351 v->extended_dmv = get_bits1(gb);
1353 av_log(avctx, AV_LOG_ERROR, "Luma scaling is not supported, expect wrong picture\n");
1354 skip_bits(gb, 3); // Y range, ignored for now
1357 av_log(avctx, AV_LOG_ERROR, "Chroma scaling is not supported, expect wrong picture\n");
1358 skip_bits(gb, 3); // UV range, ignored for now
1364 static int vc1_parse_frame_header(VC1Context *v, GetBitContext* gb)
1366 int pqindex, lowquant, status;
1368 if(v->finterpflag) v->interpfrm = get_bits(gb, 1);
1369 skip_bits(gb, 2); //framecnt unused
1371 if (v->rangered) v->rangeredfrm = get_bits(gb, 1);
1372 v->s.pict_type = get_bits(gb, 1);
1373 if (v->s.avctx->max_b_frames) {
1374 if (!v->s.pict_type) {
1375 if (get_bits(gb, 1)) v->s.pict_type = I_TYPE;
1376 else v->s.pict_type = B_TYPE;
1377 } else v->s.pict_type = P_TYPE;
1378 } else v->s.pict_type = v->s.pict_type ? P_TYPE : I_TYPE;
1381 if(v->s.pict_type == B_TYPE) {
1382 v->bfraction = get_vlc2(gb, vc1_bfraction_vlc.table, VC1_BFRACTION_VLC_BITS, 1);
1383 v->bfraction = vc1_bfraction_lut[v->bfraction];
1384 if(v->bfraction == 0) {
1385 v->s.pict_type = BI_TYPE;
1388 if(v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
1389 get_bits(gb, 7); // skip buffer fullness
1392 if(v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
1394 if(v->s.pict_type == P_TYPE)
1397 /* Quantizer stuff */
1398 pqindex = get_bits(gb, 5);
1399 if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
1400 v->pq = pquant_table[0][pqindex];
1402 v->pq = pquant_table[1][pqindex];
1405 if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
1406 v->pquantizer = pqindex < 9;
1407 if (v->quantizer_mode == QUANT_NON_UNIFORM)
1409 v->pqindex = pqindex;
1410 if (pqindex < 9) v->halfpq = get_bits(gb, 1);
1412 if (v->quantizer_mode == QUANT_FRAME_EXPLICIT)
1413 v->pquantizer = get_bits(gb, 1);
1415 if (v->extended_mv == 1) v->mvrange = get_prefix(gb, 0, 3);
1416 v->k_x = v->mvrange + 9 + (v->mvrange >> 1); //k_x can be 9 10 12 13
1417 v->k_y = v->mvrange + 8; //k_y can be 8 9 10 11
1418 v->range_x = 1 << (v->k_x - 1);
1419 v->range_y = 1 << (v->k_y - 1);
1420 if (v->profile == PROFILE_ADVANCED)
1422 if (v->postprocflag) v->postproc = get_bits(gb, 1);
1425 if (v->multires && v->s.pict_type != B_TYPE) v->respic = get_bits(gb, 2);
1427 //av_log(v->s.avctx, AV_LOG_INFO, "%c Frame: QP=[%i]%i (+%i/2) %i\n",
1428 // (v->s.pict_type == P_TYPE) ? 'P' : ((v->s.pict_type == I_TYPE) ? 'I' : 'B'), pqindex, v->pq, v->halfpq, v->rangeredfrm);
1430 switch(v->s.pict_type) {
1432 if (v->pq < 5) v->tt_index = 0;
1433 else if(v->pq < 13) v->tt_index = 1;
1434 else v->tt_index = 2;
1436 lowquant = (v->pq > 12) ? 0 : 1;
1437 v->mv_mode = mv_pmode_table[lowquant][get_prefix(gb, 1, 4)];
1438 if (v->mv_mode == MV_PMODE_INTENSITY_COMP)
1440 int scale, shift, i;
1441 v->mv_mode2 = mv_pmode_table2[lowquant][get_prefix(gb, 1, 3)];
1442 v->lumscale = get_bits(gb, 6);
1443 v->lumshift = get_bits(gb, 6);
1444 /* fill lookup tables for intensity compensation */
1447 shift = (255 - v->lumshift * 2) << 6;
1448 if(v->lumshift > 31)
1451 scale = v->lumscale + 32;
1452 if(v->lumshift > 31)
1453 shift = (v->lumshift - 64) << 6;
1455 shift = v->lumshift << 6;
1457 for(i = 0; i < 256; i++) {
1458 v->luty[i] = clip_uint8((scale * i + shift + 32) >> 6);
1459 v->lutuv[i] = clip_uint8((scale * (i - 128) + 128*64 + 32) >> 6);
1462 if(v->mv_mode == MV_PMODE_1MV_HPEL || v->mv_mode == MV_PMODE_1MV_HPEL_BILIN)
1463 v->s.quarter_sample = 0;
1464 else if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1465 if(v->mv_mode2 == MV_PMODE_1MV_HPEL || v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN)
1466 v->s.quarter_sample = 0;
1468 v->s.quarter_sample = 1;
1470 v->s.quarter_sample = 1;
1471 v->s.mspel = !(v->mv_mode == MV_PMODE_1MV_HPEL_BILIN || (v->mv_mode == MV_PMODE_INTENSITY_COMP && v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN));
1473 if ((v->mv_mode == MV_PMODE_INTENSITY_COMP &&
1474 v->mv_mode2 == MV_PMODE_MIXED_MV)
1475 || v->mv_mode == MV_PMODE_MIXED_MV)
1477 status = bitplane_decoding(v->mv_type_mb_plane, &v->mv_type_is_raw, v);
1478 if (status < 0) return -1;
1479 av_log(v->s.avctx, AV_LOG_DEBUG, "MB MV Type plane encoding: "
1480 "Imode: %i, Invert: %i\n", status>>1, status&1);
1482 v->mv_type_is_raw = 0;
1483 memset(v->mv_type_mb_plane, 0, v->s.mb_stride * v->s.mb_height);
1485 status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
1486 if (status < 0) return -1;
1487 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
1488 "Imode: %i, Invert: %i\n", status>>1, status&1);
1490 /* Hopefully this is correct for P frames */
1491 v->s.mv_table_index = get_bits(gb, 2); //but using vc1_ tables
1492 v->cbpcy_vlc = &vc1_cbpcy_p_vlc[get_bits(gb, 2)];
1496 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1497 vop_dquant_decoding(v);
1500 v->ttfrm = 0; //FIXME Is that so ?
1503 v->ttmbf = get_bits(gb, 1);
1506 v->ttfrm = ttfrm_to_tt[get_bits(gb, 2)];
1514 if (v->pq < 5) v->tt_index = 0;
1515 else if(v->pq < 13) v->tt_index = 1;
1516 else v->tt_index = 2;
1518 lowquant = (v->pq > 12) ? 0 : 1;
1519 v->mv_mode = get_bits1(gb) ? MV_PMODE_1MV : MV_PMODE_1MV_HPEL_BILIN;
1520 v->s.quarter_sample = (v->mv_mode == MV_PMODE_1MV);
1521 v->s.mspel = v->s.quarter_sample;
1523 status = bitplane_decoding(v->direct_mb_plane, &v->dmb_is_raw, v);
1524 if (status < 0) return -1;
1525 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Direct Type plane encoding: "
1526 "Imode: %i, Invert: %i\n", status>>1, status&1);
1527 status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
1528 if (status < 0) return -1;
1529 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
1530 "Imode: %i, Invert: %i\n", status>>1, status&1);
1532 v->s.mv_table_index = get_bits(gb, 2);
1533 v->cbpcy_vlc = &vc1_cbpcy_p_vlc[get_bits(gb, 2)];
1537 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1538 vop_dquant_decoding(v);
1544 v->ttmbf = get_bits(gb, 1);
1547 v->ttfrm = ttfrm_to_tt[get_bits(gb, 2)];
1557 v->c_ac_table_index = decode012(gb);
1558 if (v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
1560 v->y_ac_table_index = decode012(gb);
1563 v->s.dc_table_index = get_bits(gb, 1);
1565 if(v->s.pict_type == BI_TYPE) {
1566 v->s.pict_type = B_TYPE;
1572 static int vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
1575 int pqindex, lowquant;
1578 v->p_frame_skipped = 0;
1581 fcm = decode012(gb);
1582 switch(get_prefix(gb, 0, 4)) {
1584 v->s.pict_type = P_TYPE;
1587 v->s.pict_type = B_TYPE;
1591 v->s.pict_type = I_TYPE;
1594 v->s.pict_type = BI_TYPE;
1597 v->s.pict_type = P_TYPE; // skipped pic
1598 v->p_frame_skipped = 1;
1604 if(!v->interlace || v->panscanflag) {
1611 if(v->panscanflag) {
1614 v->rnd = get_bits1(gb);
1616 v->uvsamp = get_bits1(gb);
1617 if(v->finterpflag) v->interpfrm = get_bits(gb, 1);
1618 pqindex = get_bits(gb, 5);
1619 v->pqindex = pqindex;
1620 if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
1621 v->pq = pquant_table[0][pqindex];
1623 v->pq = pquant_table[1][pqindex];
1626 if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
1627 v->pquantizer = pqindex < 9;
1628 if (v->quantizer_mode == QUANT_NON_UNIFORM)
1630 v->pqindex = pqindex;
1631 if (pqindex < 9) v->halfpq = get_bits(gb, 1);
1633 if (v->quantizer_mode == QUANT_FRAME_EXPLICIT)
1634 v->pquantizer = get_bits(gb, 1);
1636 switch(v->s.pict_type) {
1639 status = bitplane_decoding(v->acpred_plane, &v->acpred_is_raw, v);
1640 if (status < 0) return -1;
1641 av_log(v->s.avctx, AV_LOG_DEBUG, "ACPRED plane encoding: "
1642 "Imode: %i, Invert: %i\n", status>>1, status&1);
1643 v->condover = CONDOVER_NONE;
1644 if(v->overlap && v->pq <= 8) {
1645 v->condover = decode012(gb);
1646 if(v->condover == CONDOVER_SELECT) {
1647 status = bitplane_decoding(v->over_flags_plane, &v->overflg_is_raw, v);
1648 if (status < 0) return -1;
1649 av_log(v->s.avctx, AV_LOG_DEBUG, "CONDOVER plane encoding: "
1650 "Imode: %i, Invert: %i\n", status>>1, status&1);
1656 v->postproc = get_bits1(gb);
1657 if (v->extended_mv) v->mvrange = get_prefix(gb, 0, 3);
1658 else v->mvrange = 0;
1659 v->k_x = v->mvrange + 9 + (v->mvrange >> 1); //k_x can be 9 10 12 13
1660 v->k_y = v->mvrange + 8; //k_y can be 8 9 10 11
1661 v->range_x = 1 << (v->k_x - 1);
1662 v->range_y = 1 << (v->k_y - 1);
1664 if (v->pq < 5) v->tt_index = 0;
1665 else if(v->pq < 13) v->tt_index = 1;
1666 else v->tt_index = 2;
1668 lowquant = (v->pq > 12) ? 0 : 1;
1669 v->mv_mode = mv_pmode_table[lowquant][get_prefix(gb, 1, 4)];
1670 if (v->mv_mode == MV_PMODE_INTENSITY_COMP)
1672 int scale, shift, i;
1673 v->mv_mode2 = mv_pmode_table2[lowquant][get_prefix(gb, 1, 3)];
1674 v->lumscale = get_bits(gb, 6);
1675 v->lumshift = get_bits(gb, 6);
1676 /* fill lookup tables for intensity compensation */
1679 shift = (255 - v->lumshift * 2) << 6;
1680 if(v->lumshift > 31)
1683 scale = v->lumscale + 32;
1684 if(v->lumshift > 31)
1685 shift = (v->lumshift - 64) << 6;
1687 shift = v->lumshift << 6;
1689 for(i = 0; i < 256; i++) {
1690 v->luty[i] = clip_uint8((scale * i + shift + 32) >> 6);
1691 v->lutuv[i] = clip_uint8((scale * (i - 128) + 128*64 + 32) >> 6);
1694 if(v->mv_mode == MV_PMODE_1MV_HPEL || v->mv_mode == MV_PMODE_1MV_HPEL_BILIN)
1695 v->s.quarter_sample = 0;
1696 else if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1697 if(v->mv_mode2 == MV_PMODE_1MV_HPEL || v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN)
1698 v->s.quarter_sample = 0;
1700 v->s.quarter_sample = 1;
1702 v->s.quarter_sample = 1;
1703 v->s.mspel = !(v->mv_mode == MV_PMODE_1MV_HPEL_BILIN || (v->mv_mode == MV_PMODE_INTENSITY_COMP && v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN));
1705 if ((v->mv_mode == MV_PMODE_INTENSITY_COMP &&
1706 v->mv_mode2 == MV_PMODE_MIXED_MV)
1707 || v->mv_mode == MV_PMODE_MIXED_MV)
1709 status = bitplane_decoding(v->mv_type_mb_plane, &v->mv_type_is_raw, v);
1710 if (status < 0) return -1;
1711 av_log(v->s.avctx, AV_LOG_DEBUG, "MB MV Type plane encoding: "
1712 "Imode: %i, Invert: %i\n", status>>1, status&1);
1714 v->mv_type_is_raw = 0;
1715 memset(v->mv_type_mb_plane, 0, v->s.mb_stride * v->s.mb_height);
1717 status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
1718 if (status < 0) return -1;
1719 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
1720 "Imode: %i, Invert: %i\n", status>>1, status&1);
1722 /* Hopefully this is correct for P frames */
1723 v->s.mv_table_index = get_bits(gb, 2); //but using vc1_ tables
1724 v->cbpcy_vlc = &vc1_cbpcy_p_vlc[get_bits(gb, 2)];
1727 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1728 vop_dquant_decoding(v);
1731 v->ttfrm = 0; //FIXME Is that so ?
1734 v->ttmbf = get_bits(gb, 1);
1737 v->ttfrm = ttfrm_to_tt[get_bits(gb, 2)];
1747 v->c_ac_table_index = decode012(gb);
1748 if (v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
1750 v->y_ac_table_index = decode012(gb);
1753 v->s.dc_table_index = get_bits(gb, 1);
1754 if (v->s.pict_type == I_TYPE && v->dquant) {
1755 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1756 vop_dquant_decoding(v);
1760 if(v->s.pict_type == BI_TYPE) {
1761 v->s.pict_type = B_TYPE;
1767 /***********************************************************************/
1769 * @defgroup block VC-1 Block-level functions
1770 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
1776 * @brief Get macroblock-level quantizer scale
1778 #define GET_MQUANT() \
1782 if (v->dqprofile == DQPROFILE_ALL_MBS) \
1786 mquant = (get_bits(gb, 1)) ? v->altpq : v->pq; \
1790 mqdiff = get_bits(gb, 3); \
1791 if (mqdiff != 7) mquant = v->pq + mqdiff; \
1792 else mquant = get_bits(gb, 5); \
1795 if(v->dqprofile == DQPROFILE_SINGLE_EDGE) \
1796 edges = 1 << v->dqsbedge; \
1797 else if(v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
1798 edges = (3 << v->dqsbedge) % 15; \
1799 else if(v->dqprofile == DQPROFILE_FOUR_EDGES) \
1801 if((edges&1) && !s->mb_x) \
1802 mquant = v->altpq; \
1803 if((edges&2) && s->first_slice_line) \
1804 mquant = v->altpq; \
1805 if((edges&4) && s->mb_x == (s->mb_width - 1)) \
1806 mquant = v->altpq; \
1807 if((edges&8) && s->mb_y == (s->mb_height - 1)) \
1808 mquant = v->altpq; \
1812 * @def GET_MVDATA(_dmv_x, _dmv_y)
1813 * @brief Get MV differentials
1814 * @see MVDATA decoding from 8.3.5.2, p(1)20
1815 * @param _dmv_x Horizontal differential for decoded MV
1816 * @param _dmv_y Vertical differential for decoded MV
1818 #define GET_MVDATA(_dmv_x, _dmv_y) \
1819 index = 1 + get_vlc2(gb, vc1_mv_diff_vlc[s->mv_table_index].table,\
1820 VC1_MV_DIFF_VLC_BITS, 2); \
1823 mb_has_coeffs = 1; \
1826 else mb_has_coeffs = 0; \
1828 if (!index) { _dmv_x = _dmv_y = 0; } \
1829 else if (index == 35) \
1831 _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
1832 _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
1834 else if (index == 36) \
1843 if (!s->quarter_sample && index1 == 5) val = 1; \
1845 if(size_table[index1] - val > 0) \
1846 val = get_bits(gb, size_table[index1] - val); \
1848 sign = 0 - (val&1); \
1849 _dmv_x = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1852 if (!s->quarter_sample && index1 == 5) val = 1; \
1854 if(size_table[index1] - val > 0) \
1855 val = get_bits(gb, size_table[index1] - val); \
1857 sign = 0 - (val&1); \
1858 _dmv_y = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1861 /** Predict and set motion vector
1863 static inline void vc1_pred_mv(MpegEncContext *s, int n, int dmv_x, int dmv_y, int mv1, int r_x, int r_y, uint8_t* is_intra)
1865 int xy, wrap, off = 0;
1870 /* scale MV difference to be quad-pel */
1871 dmv_x <<= 1 - s->quarter_sample;
1872 dmv_y <<= 1 - s->quarter_sample;
1874 wrap = s->b8_stride;
1875 xy = s->block_index[n];
1878 s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = 0;
1879 s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = 0;
1880 if(mv1) { /* duplicate motion data for 1-MV block */
1881 s->current_picture.motion_val[0][xy + 1][0] = 0;
1882 s->current_picture.motion_val[0][xy + 1][1] = 0;
1883 s->current_picture.motion_val[0][xy + wrap][0] = 0;
1884 s->current_picture.motion_val[0][xy + wrap][1] = 0;
1885 s->current_picture.motion_val[0][xy + wrap + 1][0] = 0;
1886 s->current_picture.motion_val[0][xy + wrap + 1][1] = 0;
1891 C = s->current_picture.motion_val[0][xy - 1];
1892 A = s->current_picture.motion_val[0][xy - wrap];
1894 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2;
1896 //in 4-MV mode different blocks have different B predictor position
1899 off = (s->mb_x > 0) ? -1 : 1;
1902 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 1;
1911 B = s->current_picture.motion_val[0][xy - wrap + off];
1913 if(!s->first_slice_line || (n==2 || n==3)) { // predictor A is not out of bounds
1914 if(s->mb_width == 1) {
1918 px = mid_pred(A[0], B[0], C[0]);
1919 py = mid_pred(A[1], B[1], C[1]);
1921 } else if(s->mb_x || (n==1 || n==3)) { // predictor C is not out of bounds
1927 /* Pullback MV as specified in 8.3.5.3.4 */
1930 qx = (s->mb_x << 6) + ((n==1 || n==3) ? 32 : 0);
1931 qy = (s->mb_y << 6) + ((n==2 || n==3) ? 32 : 0);
1932 X = (s->mb_width << 6) - 4;
1933 Y = (s->mb_height << 6) - 4;
1935 if(qx + px < -60) px = -60 - qx;
1936 if(qy + py < -60) py = -60 - qy;
1938 if(qx + px < -28) px = -28 - qx;
1939 if(qy + py < -28) py = -28 - qy;
1941 if(qx + px > X) px = X - qx;
1942 if(qy + py > Y) py = Y - qy;
1944 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
1945 if((!s->first_slice_line || (n==2 || n==3)) && (s->mb_x || (n==1 || n==3))) {
1946 if(is_intra[xy - wrap])
1947 sum = ABS(px) + ABS(py);
1949 sum = ABS(px - A[0]) + ABS(py - A[1]);
1951 if(get_bits1(&s->gb)) {
1959 if(is_intra[xy - 1])
1960 sum = ABS(px) + ABS(py);
1962 sum = ABS(px - C[0]) + ABS(py - C[1]);
1964 if(get_bits1(&s->gb)) {
1974 /* store MV using signed modulus of MV range defined in 4.11 */
1975 s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1976 s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
1977 if(mv1) { /* duplicate motion data for 1-MV block */
1978 s->current_picture.motion_val[0][xy + 1][0] = s->current_picture.motion_val[0][xy][0];
1979 s->current_picture.motion_val[0][xy + 1][1] = s->current_picture.motion_val[0][xy][1];
1980 s->current_picture.motion_val[0][xy + wrap][0] = s->current_picture.motion_val[0][xy][0];
1981 s->current_picture.motion_val[0][xy + wrap][1] = s->current_picture.motion_val[0][xy][1];
1982 s->current_picture.motion_val[0][xy + wrap + 1][0] = s->current_picture.motion_val[0][xy][0];
1983 s->current_picture.motion_val[0][xy + wrap + 1][1] = s->current_picture.motion_val[0][xy][1];
1987 /** Motion compensation for direct or interpolated blocks in B-frames
1989 static void vc1_interp_mc(VC1Context *v)
1991 MpegEncContext *s = &v->s;
1992 DSPContext *dsp = &v->s.dsp;
1993 uint8_t *srcY, *srcU, *srcV;
1994 int dxy, uvdxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
1996 if(!v->s.next_picture.data[0])return;
1998 mx = s->mv[1][0][0];
1999 my = s->mv[1][0][1];
2000 uvmx = (mx + ((mx & 3) == 3)) >> 1;
2001 uvmy = (my + ((my & 3) == 3)) >> 1;
2002 srcY = s->next_picture.data[0];
2003 srcU = s->next_picture.data[1];
2004 srcV = s->next_picture.data[2];
2006 src_x = s->mb_x * 16 + (mx >> 2);
2007 src_y = s->mb_y * 16 + (my >> 2);
2008 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
2009 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
2011 src_x = clip( src_x, -16, s->mb_width * 16);
2012 src_y = clip( src_y, -16, s->mb_height * 16);
2013 uvsrc_x = clip(uvsrc_x, -8, s->mb_width * 8);
2014 uvsrc_y = clip(uvsrc_y, -8, s->mb_height * 8);
2016 srcY += src_y * s->linesize + src_x;
2017 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
2018 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
2020 /* for grayscale we should not try to read from unknown area */
2021 if(s->flags & CODEC_FLAG_GRAY) {
2022 srcU = s->edge_emu_buffer + 18 * s->linesize;
2023 srcV = s->edge_emu_buffer + 18 * s->linesize;
2027 || (unsigned)src_x > s->h_edge_pos - (mx&3) - 16
2028 || (unsigned)src_y > s->v_edge_pos - (my&3) - 16){
2029 uint8_t *uvbuf= s->edge_emu_buffer + 19 * s->linesize;
2031 ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 17, 17,
2032 src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);
2033 srcY = s->edge_emu_buffer;
2034 ff_emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8+1, 8+1,
2035 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
2036 ff_emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8+1, 8+1,
2037 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
2040 /* if we deal with range reduction we need to scale source blocks */
2041 if(v->rangeredfrm) {
2043 uint8_t *src, *src2;
2046 for(j = 0; j < 17; j++) {
2047 for(i = 0; i < 17; i++) src[i] = ((src[i] - 128) >> 1) + 128;
2050 src = srcU; src2 = srcV;
2051 for(j = 0; j < 9; j++) {
2052 for(i = 0; i < 9; i++) {
2053 src[i] = ((src[i] - 128) >> 1) + 128;
2054 src2[i] = ((src2[i] - 128) >> 1) + 128;
2056 src += s->uvlinesize;
2057 src2 += s->uvlinesize;
2063 uvmx = uvmx + ((uvmx<0)?(uvmx&1):-(uvmx&1));
2064 uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1));
2069 dxy = ((my & 1) << 1) | (mx & 1);
2071 dsp->avg_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
2073 if(s->flags & CODEC_FLAG_GRAY) return;
2074 /* Chroma MC always uses qpel blilinear */
2075 uvdxy = ((uvmy & 3) << 2) | (uvmx & 3);
2076 dsp->avg_qpel_pixels_tab[1][uvdxy](s->dest[1], srcU, s->uvlinesize);
2077 dsp->avg_qpel_pixels_tab[1][uvdxy](s->dest[2], srcV, s->uvlinesize);
2080 static always_inline int scale_mv(int value, int bfrac, int inv, int qs)
2084 #if B_FRACTION_DEN==256
2088 return 2 * ((value * n + 255) >> 9);
2089 return (value * n + 128) >> 8;
2092 n -= B_FRACTION_DEN;
2094 return 2 * ((value * n + B_FRACTION_DEN - 1) / (2 * B_FRACTION_DEN));
2095 return (value * n + B_FRACTION_DEN/2) / B_FRACTION_DEN;
2099 /** Reconstruct motion vector for B-frame and do motion compensation
2101 static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mode)
2108 if(mode == BMV_TYPE_INTERPOLATED) {
2114 vc1_mc_1mv(v, (mode == BMV_TYPE_FORWARD));
2117 static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mvtype)
2119 MpegEncContext *s = &v->s;
2120 int xy, wrap, off = 0;
2125 const uint8_t *is_intra = v->mb_type[0];
2129 /* scale MV difference to be quad-pel */
2130 dmv_x[0] <<= 1 - s->quarter_sample;
2131 dmv_y[0] <<= 1 - s->quarter_sample;
2132 dmv_x[1] <<= 1 - s->quarter_sample;
2133 dmv_y[1] <<= 1 - s->quarter_sample;
2135 wrap = s->b8_stride;
2136 xy = s->block_index[0];
2139 s->current_picture.motion_val[0][xy][0] =
2140 s->current_picture.motion_val[0][xy][1] =
2141 s->current_picture.motion_val[1][xy][0] =
2142 s->current_picture.motion_val[1][xy][1] = 0;
2145 s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
2146 s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
2147 s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
2148 s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
2150 s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
2151 s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
2152 s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
2153 s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
2157 if((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2158 C = s->current_picture.motion_val[0][xy - 2];
2159 A = s->current_picture.motion_val[0][xy - wrap*2];
2160 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2161 B = s->current_picture.motion_val[0][xy - wrap*2 + off];
2163 if(!s->first_slice_line) { // predictor A is not out of bounds
2164 if(s->mb_width == 1) {
2168 px = mid_pred(A[0], B[0], C[0]);
2169 py = mid_pred(A[1], B[1], C[1]);
2171 } else if(s->mb_x) { // predictor C is not out of bounds
2177 /* Pullback MV as specified in 8.3.5.3.4 */
2180 if(v->profile < PROFILE_ADVANCED) {
2181 qx = (s->mb_x << 5);
2182 qy = (s->mb_y << 5);
2183 X = (s->mb_width << 5) - 4;
2184 Y = (s->mb_height << 5) - 4;
2185 if(qx + px < -28) px = -28 - qx;
2186 if(qy + py < -28) py = -28 - qy;
2187 if(qx + px > X) px = X - qx;
2188 if(qy + py > Y) py = Y - qy;
2190 qx = (s->mb_x << 6);
2191 qy = (s->mb_y << 6);
2192 X = (s->mb_width << 6) - 4;
2193 Y = (s->mb_height << 6) - 4;
2194 if(qx + px < -60) px = -60 - qx;
2195 if(qy + py < -60) py = -60 - qy;
2196 if(qx + px > X) px = X - qx;
2197 if(qy + py > Y) py = Y - qy;
2200 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2201 if(0 && !s->first_slice_line && s->mb_x) {
2202 if(is_intra[xy - wrap])
2203 sum = ABS(px) + ABS(py);
2205 sum = ABS(px - A[0]) + ABS(py - A[1]);
2207 if(get_bits1(&s->gb)) {
2215 if(is_intra[xy - 2])
2216 sum = ABS(px) + ABS(py);
2218 sum = ABS(px - C[0]) + ABS(py - C[1]);
2220 if(get_bits1(&s->gb)) {
2230 /* store MV using signed modulus of MV range defined in 4.11 */
2231 s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x;
2232 s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
2234 if((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2235 C = s->current_picture.motion_val[1][xy - 2];
2236 A = s->current_picture.motion_val[1][xy - wrap*2];
2237 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2238 B = s->current_picture.motion_val[1][xy - wrap*2 + off];
2240 if(!s->first_slice_line) { // predictor A is not out of bounds
2241 if(s->mb_width == 1) {
2245 px = mid_pred(A[0], B[0], C[0]);
2246 py = mid_pred(A[1], B[1], C[1]);
2248 } else if(s->mb_x) { // predictor C is not out of bounds
2254 /* Pullback MV as specified in 8.3.5.3.4 */
2257 if(v->profile < PROFILE_ADVANCED) {
2258 qx = (s->mb_x << 5);
2259 qy = (s->mb_y << 5);
2260 X = (s->mb_width << 5) - 4;
2261 Y = (s->mb_height << 5) - 4;
2262 if(qx + px < -28) px = -28 - qx;
2263 if(qy + py < -28) py = -28 - qy;
2264 if(qx + px > X) px = X - qx;
2265 if(qy + py > Y) py = Y - qy;
2267 qx = (s->mb_x << 6);
2268 qy = (s->mb_y << 6);
2269 X = (s->mb_width << 6) - 4;
2270 Y = (s->mb_height << 6) - 4;
2271 if(qx + px < -60) px = -60 - qx;
2272 if(qy + py < -60) py = -60 - qy;
2273 if(qx + px > X) px = X - qx;
2274 if(qy + py > Y) py = Y - qy;
2277 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2278 if(0 && !s->first_slice_line && s->mb_x) {
2279 if(is_intra[xy - wrap])
2280 sum = ABS(px) + ABS(py);
2282 sum = ABS(px - A[0]) + ABS(py - A[1]);
2284 if(get_bits1(&s->gb)) {
2292 if(is_intra[xy - 2])
2293 sum = ABS(px) + ABS(py);
2295 sum = ABS(px - C[0]) + ABS(py - C[1]);
2297 if(get_bits1(&s->gb)) {
2307 /* store MV using signed modulus of MV range defined in 4.11 */
2309 s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
2310 s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
2312 s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
2313 s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
2314 s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
2315 s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
2318 /** Get predicted DC value for I-frames only
2319 * prediction dir: left=0, top=1
2320 * @param s MpegEncContext
2321 * @param[in] n block index in the current MB
2322 * @param dc_val_ptr Pointer to DC predictor
2323 * @param dir_ptr Prediction direction for use in AC prediction
2325 static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2326 int16_t **dc_val_ptr, int *dir_ptr)
2328 int a, b, c, wrap, pred, scale;
2330 static const uint16_t dcpred[32] = {
2331 -1, 1024, 512, 341, 256, 205, 171, 146, 128,
2332 114, 102, 93, 85, 79, 73, 68, 64,
2333 60, 57, 54, 51, 49, 47, 45, 43,
2334 41, 39, 38, 37, 35, 34, 33
2337 /* find prediction - wmv3_dc_scale always used here in fact */
2338 if (n < 4) scale = s->y_dc_scale;
2339 else scale = s->c_dc_scale;
2341 wrap = s->block_wrap[n];
2342 dc_val= s->dc_val[0] + s->block_index[n];
2348 b = dc_val[ - 1 - wrap];
2349 a = dc_val[ - wrap];
2351 if (pq < 9 || !overlap)
2353 /* Set outer values */
2354 if (s->first_slice_line && (n!=2 && n!=3)) b=a=dcpred[scale];
2355 if (s->mb_x == 0 && (n!=1 && n!=3)) b=c=dcpred[scale];
2359 /* Set outer values */
2360 if (s->first_slice_line && (n!=2 && n!=3)) b=a=0;
2361 if (s->mb_x == 0 && (n!=1 && n!=3)) b=c=0;
2364 if (abs(a - b) <= abs(b - c)) {
2372 /* update predictor */
2373 *dc_val_ptr = &dc_val[0];
2378 /** Get predicted DC value
2379 * prediction dir: left=0, top=1
2380 * @param s MpegEncContext
2381 * @param[in] n block index in the current MB
2382 * @param dc_val_ptr Pointer to DC predictor
2383 * @param dir_ptr Prediction direction for use in AC prediction
2385 static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2386 int a_avail, int c_avail,
2387 int16_t **dc_val_ptr, int *dir_ptr)
2389 int a, b, c, wrap, pred, scale;
2391 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2394 /* find prediction - wmv3_dc_scale always used here in fact */
2395 if (n < 4) scale = s->y_dc_scale;
2396 else scale = s->c_dc_scale;
2398 wrap = s->block_wrap[n];
2399 dc_val= s->dc_val[0] + s->block_index[n];
2405 b = dc_val[ - 1 - wrap];
2406 a = dc_val[ - wrap];
2407 /* scale predictors if needed */
2408 q1 = s->current_picture.qscale_table[mb_pos];
2409 if(c_avail && (n!= 1 && n!=3)) {
2410 q2 = s->current_picture.qscale_table[mb_pos - 1];
2412 c = (c * s->y_dc_scale_table[q2] * vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2414 if(a_avail && (n!= 2 && n!=3)) {
2415 q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2417 a = (a * s->y_dc_scale_table[q2] * vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2419 if(a_avail && c_avail && (n!=3)) {
2422 if(n != 2) off -= s->mb_stride;
2423 q2 = s->current_picture.qscale_table[off];
2425 b = (b * s->y_dc_scale_table[q2] * vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2428 if(a_avail && c_avail) {
2429 if(abs(a - b) <= abs(b - c)) {
2436 } else if(a_avail) {
2439 } else if(c_avail) {
2447 /* update predictor */
2448 *dc_val_ptr = &dc_val[0];
2454 * @defgroup std_mb VC1 Macroblock-level functions in Simple/Main Profiles
2455 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
2459 static inline int vc1_coded_block_pred(MpegEncContext * s, int n, uint8_t **coded_block_ptr)
2461 int xy, wrap, pred, a, b, c;
2463 xy = s->block_index[n];
2464 wrap = s->b8_stride;
2469 a = s->coded_block[xy - 1 ];
2470 b = s->coded_block[xy - 1 - wrap];
2471 c = s->coded_block[xy - wrap];
2480 *coded_block_ptr = &s->coded_block[xy];
2486 * Decode one AC coefficient
2487 * @param v The VC1 context
2488 * @param last Last coefficient
2489 * @param skip How much zero coefficients to skip
2490 * @param value Decoded AC coefficient value
2493 static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip, int *value, int codingset)
2495 GetBitContext *gb = &v->s.gb;
2496 int index, escape, run = 0, level = 0, lst = 0;
2498 index = get_vlc2(gb, vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2499 if (index != vc1_ac_sizes[codingset] - 1) {
2500 run = vc1_index_decode_table[codingset][index][0];
2501 level = vc1_index_decode_table[codingset][index][1];
2502 lst = index >= vc1_last_decode_table[codingset];
2506 escape = decode210(gb);
2508 index = get_vlc2(gb, vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2509 run = vc1_index_decode_table[codingset][index][0];
2510 level = vc1_index_decode_table[codingset][index][1];
2511 lst = index >= vc1_last_decode_table[codingset];
2514 level += vc1_last_delta_level_table[codingset][run];
2516 level += vc1_delta_level_table[codingset][run];
2519 run += vc1_last_delta_run_table[codingset][level] + 1;
2521 run += vc1_delta_run_table[codingset][level] + 1;
2527 lst = get_bits(gb, 1);
2528 if(v->s.esc3_level_length == 0) {
2529 if(v->pq < 8 || v->dquantfrm) { // table 59
2530 v->s.esc3_level_length = get_bits(gb, 3);
2531 if(!v->s.esc3_level_length)
2532 v->s.esc3_level_length = get_bits(gb, 2) + 8;
2534 v->s.esc3_level_length = get_prefix(gb, 1, 6) + 2;
2536 v->s.esc3_run_length = 3 + get_bits(gb, 2);
2538 run = get_bits(gb, v->s.esc3_run_length);
2539 sign = get_bits(gb, 1);
2540 level = get_bits(gb, v->s.esc3_level_length);
2551 /** Decode intra block in intra frames - should be faster than decode_intra_block
2552 * @param v VC1Context
2553 * @param block block to decode
2554 * @param coded are AC coeffs present or not
2555 * @param codingset set of VLC to decode data
2557 static int vc1_decode_i_block(VC1Context *v, DCTELEM block[64], int n, int coded, int codingset)
2559 GetBitContext *gb = &v->s.gb;
2560 MpegEncContext *s = &v->s;
2561 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2564 int16_t *ac_val, *ac_val2;
2567 /* Get DC differential */
2569 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2571 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2574 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2579 if (dcdiff == 119 /* ESC index value */)
2581 /* TODO: Optimize */
2582 if (v->pq == 1) dcdiff = get_bits(gb, 10);
2583 else if (v->pq == 2) dcdiff = get_bits(gb, 9);
2584 else dcdiff = get_bits(gb, 8);
2589 dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3;
2590 else if (v->pq == 2)
2591 dcdiff = (dcdiff<<1) + get_bits(gb, 1) - 1;
2593 if (get_bits(gb, 1))
2598 dcdiff += vc1_i_pred_dc(&v->s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir);
2601 /* Store the quantized DC coeff, used for prediction */
2603 block[0] = dcdiff * s->y_dc_scale;
2605 block[0] = dcdiff * s->c_dc_scale;
2618 int last = 0, skip, value;
2619 const int8_t *zz_table;
2623 scale = v->pq * 2 + v->halfpq;
2627 zz_table = vc1_horizontal_zz;
2629 zz_table = vc1_vertical_zz;
2631 zz_table = vc1_normal_zz;
2633 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2635 if(dc_pred_dir) //left
2638 ac_val -= 16 * s->block_wrap[n];
2641 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2645 block[zz_table[i++]] = value;
2648 /* apply AC prediction if needed */
2650 if(dc_pred_dir) { //left
2651 for(k = 1; k < 8; k++)
2652 block[k << 3] += ac_val[k];
2654 for(k = 1; k < 8; k++)
2655 block[k] += ac_val[k + 8];
2658 /* save AC coeffs for further prediction */
2659 for(k = 1; k < 8; k++) {
2660 ac_val2[k] = block[k << 3];
2661 ac_val2[k + 8] = block[k];
2664 /* scale AC coeffs */
2665 for(k = 1; k < 64; k++)
2669 block[k] += (block[k] < 0) ? -v->pq : v->pq;
2672 if(s->ac_pred) i = 63;
2678 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2681 scale = v->pq * 2 + v->halfpq;
2682 memset(ac_val2, 0, 16 * 2);
2683 if(dc_pred_dir) {//left
2686 memcpy(ac_val2, ac_val, 8 * 2);
2688 ac_val -= 16 * s->block_wrap[n];
2690 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2693 /* apply AC prediction if needed */
2695 if(dc_pred_dir) { //left
2696 for(k = 1; k < 8; k++) {
2697 block[k << 3] = ac_val[k] * scale;
2698 if(!v->pquantizer && block[k << 3])
2699 block[k << 3] += (block[k << 3] < 0) ? -v->pq : v->pq;
2702 for(k = 1; k < 8; k++) {
2703 block[k] = ac_val[k + 8] * scale;
2704 if(!v->pquantizer && block[k])
2705 block[k] += (block[k] < 0) ? -v->pq : v->pq;
2711 s->block_last_index[n] = i;
2716 /** Decode intra block in intra frames - should be faster than decode_intra_block
2717 * @param v VC1Context
2718 * @param block block to decode
2719 * @param coded are AC coeffs present or not
2720 * @param codingset set of VLC to decode data
2722 static int vc1_decode_i_block_adv(VC1Context *v, DCTELEM block[64], int n, int coded, int codingset, int mquant)
2724 GetBitContext *gb = &v->s.gb;
2725 MpegEncContext *s = &v->s;
2726 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2729 int16_t *ac_val, *ac_val2;
2731 int a_avail = v->a_avail, c_avail = v->c_avail;
2732 int use_pred = s->ac_pred;
2735 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2737 /* Get DC differential */
2739 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2741 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2744 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2749 if (dcdiff == 119 /* ESC index value */)
2751 /* TODO: Optimize */
2752 if (mquant == 1) dcdiff = get_bits(gb, 10);
2753 else if (mquant == 2) dcdiff = get_bits(gb, 9);
2754 else dcdiff = get_bits(gb, 8);
2759 dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3;
2760 else if (mquant == 2)
2761 dcdiff = (dcdiff<<1) + get_bits(gb, 1) - 1;
2763 if (get_bits(gb, 1))
2768 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir);
2771 /* Store the quantized DC coeff, used for prediction */
2773 block[0] = dcdiff * s->y_dc_scale;
2775 block[0] = dcdiff * s->c_dc_scale;
2784 /* check if AC is needed at all and adjust direction if needed */
2785 if(!a_avail) dc_pred_dir = 1;
2786 if(!c_avail) dc_pred_dir = 0;
2787 if(!a_avail && !c_avail) use_pred = 0;
2788 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2791 scale = mquant * 2 + v->halfpq;
2793 if(dc_pred_dir) //left
2796 ac_val -= 16 * s->block_wrap[n];
2798 q1 = s->current_picture.qscale_table[mb_pos];
2799 if(dc_pred_dir && c_avail) q2 = s->current_picture.qscale_table[mb_pos - 1];
2800 if(!dc_pred_dir && a_avail) q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2801 if(n && n<4) q2 = q1;
2804 int last = 0, skip, value;
2805 const int8_t *zz_table;
2810 zz_table = vc1_horizontal_zz;
2812 zz_table = vc1_vertical_zz;
2814 zz_table = vc1_normal_zz;
2817 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2821 block[zz_table[i++]] = value;
2824 /* apply AC prediction if needed */
2826 /* scale predictors if needed*/
2831 if(dc_pred_dir) { //left
2832 for(k = 1; k < 8; k++)
2833 block[k << 3] += (ac_val[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2835 for(k = 1; k < 8; k++)
2836 block[k] += (ac_val[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2839 if(dc_pred_dir) { //left
2840 for(k = 1; k < 8; k++)
2841 block[k << 3] += ac_val[k];
2843 for(k = 1; k < 8; k++)
2844 block[k] += ac_val[k + 8];
2848 /* save AC coeffs for further prediction */
2849 for(k = 1; k < 8; k++) {
2850 ac_val2[k] = block[k << 3];
2851 ac_val2[k + 8] = block[k];
2854 /* scale AC coeffs */
2855 for(k = 1; k < 64; k++)
2859 block[k] += (block[k] < 0) ? -mquant : mquant;
2862 if(use_pred) i = 63;
2863 } else { // no AC coeffs
2866 memset(ac_val2, 0, 16 * 2);
2867 if(dc_pred_dir) {//left
2869 memcpy(ac_val2, ac_val, 8 * 2);
2873 for(k = 1; k < 8; k++)
2874 ac_val2[k] = (ac_val2[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2879 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2883 for(k = 1; k < 8; k++)
2884 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2889 /* apply AC prediction if needed */
2891 if(dc_pred_dir) { //left
2892 for(k = 1; k < 8; k++) {
2893 block[k << 3] = ac_val2[k] * scale;
2894 if(!v->pquantizer && block[k << 3])
2895 block[k << 3] += (block[k << 3] < 0) ? -mquant : mquant;
2898 for(k = 1; k < 8; k++) {
2899 block[k] = ac_val2[k + 8] * scale;
2900 if(!v->pquantizer && block[k])
2901 block[k] += (block[k] < 0) ? -mquant : mquant;
2907 s->block_last_index[n] = i;
2912 /** Decode intra block in inter frames - more generic version than vc1_decode_i_block
2913 * @param v VC1Context
2914 * @param block block to decode
2915 * @param coded are AC coeffs present or not
2916 * @param mquant block quantizer
2917 * @param codingset set of VLC to decode data
2919 static int vc1_decode_intra_block(VC1Context *v, DCTELEM block[64], int n, int coded, int mquant, int codingset)
2921 GetBitContext *gb = &v->s.gb;
2922 MpegEncContext *s = &v->s;
2923 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2926 int16_t *ac_val, *ac_val2;
2928 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2929 int a_avail = v->a_avail, c_avail = v->c_avail;
2930 int use_pred = s->ac_pred;
2934 /* XXX: Guard against dumb values of mquant */
2935 mquant = (mquant < 1) ? 0 : ( (mquant>31) ? 31 : mquant );
2937 /* Set DC scale - y and c use the same */
2938 s->y_dc_scale = s->y_dc_scale_table[mquant];
2939 s->c_dc_scale = s->c_dc_scale_table[mquant];
2941 /* Get DC differential */
2943 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2945 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2948 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2953 if (dcdiff == 119 /* ESC index value */)
2955 /* TODO: Optimize */
2956 if (mquant == 1) dcdiff = get_bits(gb, 10);
2957 else if (mquant == 2) dcdiff = get_bits(gb, 9);
2958 else dcdiff = get_bits(gb, 8);
2963 dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3;
2964 else if (mquant == 2)
2965 dcdiff = (dcdiff<<1) + get_bits(gb, 1) - 1;
2967 if (get_bits(gb, 1))
2972 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir);
2975 /* Store the quantized DC coeff, used for prediction */
2978 block[0] = dcdiff * s->y_dc_scale;
2980 block[0] = dcdiff * s->c_dc_scale;
2989 /* check if AC is needed at all and adjust direction if needed */
2990 if(!a_avail) dc_pred_dir = 1;
2991 if(!c_avail) dc_pred_dir = 0;
2992 if(!a_avail && !c_avail) use_pred = 0;
2993 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2996 scale = mquant * 2 + v->halfpq;
2998 if(dc_pred_dir) //left
3001 ac_val -= 16 * s->block_wrap[n];
3003 q1 = s->current_picture.qscale_table[mb_pos];
3004 if(dc_pred_dir && c_avail) q2 = s->current_picture.qscale_table[mb_pos - 1];
3005 if(!dc_pred_dir && a_avail) q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
3006 if(n && n<4) q2 = q1;
3009 int last = 0, skip, value;
3010 const int8_t *zz_table;
3013 zz_table = vc1_simple_progressive_8x8_zz;
3016 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
3020 block[zz_table[i++]] = value;
3023 /* apply AC prediction if needed */
3025 /* scale predictors if needed*/
3030 if(dc_pred_dir) { //left
3031 for(k = 1; k < 8; k++)
3032 block[k << 3] += (ac_val[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3034 for(k = 1; k < 8; k++)
3035 block[k] += (ac_val[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3038 if(dc_pred_dir) { //left
3039 for(k = 1; k < 8; k++)
3040 block[k << 3] += ac_val[k];
3042 for(k = 1; k < 8; k++)
3043 block[k] += ac_val[k + 8];
3047 /* save AC coeffs for further prediction */
3048 for(k = 1; k < 8; k++) {
3049 ac_val2[k] = block[k << 3];
3050 ac_val2[k + 8] = block[k];
3053 /* scale AC coeffs */
3054 for(k = 1; k < 64; k++)
3058 block[k] += (block[k] < 0) ? -mquant : mquant;
3061 if(use_pred) i = 63;
3062 } else { // no AC coeffs
3065 memset(ac_val2, 0, 16 * 2);
3066 if(dc_pred_dir) {//left
3068 memcpy(ac_val2, ac_val, 8 * 2);
3072 for(k = 1; k < 8; k++)
3073 ac_val2[k] = (ac_val2[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3078 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
3082 for(k = 1; k < 8; k++)
3083 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3088 /* apply AC prediction if needed */
3090 if(dc_pred_dir) { //left
3091 for(k = 1; k < 8; k++) {
3092 block[k << 3] = ac_val2[k] * scale;
3093 if(!v->pquantizer && block[k << 3])
3094 block[k << 3] += (block[k << 3] < 0) ? -mquant : mquant;
3097 for(k = 1; k < 8; k++) {
3098 block[k] = ac_val2[k + 8] * scale;
3099 if(!v->pquantizer && block[k])
3100 block[k] += (block[k] < 0) ? -mquant : mquant;
3106 s->block_last_index[n] = i;
3113 static int vc1_decode_p_block(VC1Context *v, DCTELEM block[64], int n, int mquant, int ttmb, int first_block)
3115 MpegEncContext *s = &v->s;
3116 GetBitContext *gb = &s->gb;
3119 int scale, off, idx, last, skip, value;
3120 int ttblk = ttmb & 7;
3123 ttblk = ttblk_to_tt[v->tt_index][get_vlc2(gb, vc1_ttblk_vlc[v->tt_index].table, VC1_TTBLK_VLC_BITS, 1)];
3125 if(ttblk == TT_4X4) {
3126 subblkpat = ~(get_vlc2(gb, vc1_subblkpat_vlc[v->tt_index].table, VC1_SUBBLKPAT_VLC_BITS, 1) + 1);
3128 if((ttblk != TT_8X8 && ttblk != TT_4X4) && (v->ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))) {
3129 subblkpat = decode012(gb);
3130 if(subblkpat) subblkpat ^= 3; //swap decoded pattern bits
3131 if(ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) ttblk = TT_8X4;
3132 if(ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) ttblk = TT_4X8;
3134 scale = 2 * mquant + v->halfpq;
3136 // convert transforms like 8X4_TOP to generic TT and SUBBLKPAT
3137 if(ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) {
3138 subblkpat = 2 - (ttblk == TT_8X4_TOP);
3141 if(ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) {
3142 subblkpat = 2 - (ttblk == TT_4X8_LEFT);
3150 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3154 idx = vc1_simple_progressive_8x8_zz[i++];
3155 block[idx] = value * scale;
3157 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3159 s->dsp.vc1_inv_trans_8x8(block);
3162 for(j = 0; j < 4; j++) {
3163 last = subblkpat & (1 << (3 - j));
3165 off = (j & 1) * 4 + (j & 2) * 16;
3167 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3171 idx = vc1_simple_progressive_4x4_zz[i++];
3172 block[idx + off] = value * scale;
3174 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3176 if(!(subblkpat & (1 << (3 - j))))
3177 s->dsp.vc1_inv_trans_4x4(block, j);
3181 for(j = 0; j < 2; j++) {
3182 last = subblkpat & (1 << (1 - j));
3186 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3190 if(v->profile < PROFILE_ADVANCED)
3191 idx = vc1_simple_progressive_8x4_zz[i++];
3193 idx = vc1_adv_progressive_8x4_zz[i++];
3194 block[idx + off] = value * scale;
3196 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3198 if(!(subblkpat & (1 << (1 - j))))
3199 s->dsp.vc1_inv_trans_8x4(block, j);
3203 for(j = 0; j < 2; j++) {
3204 last = subblkpat & (1 << (1 - j));
3208 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3212 if(v->profile < PROFILE_ADVANCED)
3213 idx = vc1_simple_progressive_4x8_zz[i++];
3215 idx = vc1_adv_progressive_4x8_zz[i++];
3216 block[idx + off] = value * scale;
3218 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3220 if(!(subblkpat & (1 << (1 - j))))
3221 s->dsp.vc1_inv_trans_4x8(block, j);
3229 /** Decode one P-frame MB (in Simple/Main profile)
3231 static int vc1_decode_p_mb(VC1Context *v)
3233 MpegEncContext *s = &v->s;
3234 GetBitContext *gb = &s->gb;
3236 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3237 int cbp; /* cbp decoding stuff */
3238 int mqdiff, mquant; /* MB quantization */
3239 int ttmb = v->ttfrm; /* MB Transform type */
3242 static const int size_table[6] = { 0, 2, 3, 4, 5, 8 },
3243 offset_table[6] = { 0, 1, 3, 7, 15, 31 };
3244 int mb_has_coeffs = 1; /* last_flag */
3245 int dmv_x, dmv_y; /* Differential MV components */
3246 int index, index1; /* LUT indices */
3247 int val, sign; /* temp values */
3248 int first_block = 1;
3250 int skipped, fourmv;
3252 mquant = v->pq; /* Loosy initialization */
3254 if (v->mv_type_is_raw)
3255 fourmv = get_bits1(gb);
3257 fourmv = v->mv_type_mb_plane[mb_pos];
3259 skipped = get_bits1(gb);
3261 skipped = v->s.mbskip_table[mb_pos];
3263 s->dsp.clear_blocks(s->block[0]);
3265 if (!fourmv) /* 1MV mode */
3269 GET_MVDATA(dmv_x, dmv_y);
3272 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
3273 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
3275 s->current_picture.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;
3276 vc1_pred_mv(s, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0]);
3278 /* FIXME Set DC val for inter block ? */
3279 if (s->mb_intra && !mb_has_coeffs)
3282 s->ac_pred = get_bits(gb, 1);
3285 else if (mb_has_coeffs)
3287 if (s->mb_intra) s->ac_pred = get_bits(gb, 1);
3288 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3296 s->current_picture.qscale_table[mb_pos] = mquant;
3298 if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
3299 ttmb = get_vlc2(gb, vc1_ttmb_vlc[v->tt_index].table,
3300 VC1_TTMB_VLC_BITS, 2);
3301 if(!s->mb_intra) vc1_mc_1mv(v, 0);
3305 s->dc_val[0][s->block_index[i]] = 0;
3307 val = ((cbp >> (5 - i)) & 1);
3308 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3309 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3311 /* check if prediction blocks A and C are available */
3312 v->a_avail = v->c_avail = 0;
3313 if(i == 2 || i == 3 || !s->first_slice_line)
3314 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3315 if(i == 1 || i == 3 || s->mb_x)
3316 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3318 vc1_decode_intra_block(v, s->block[i], i, val, mquant, (i&4)?v->codingset2:v->codingset);
3319 if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3320 s->dsp.vc1_inv_trans_8x8(s->block[i]);
3321 if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
3322 for(j = 0; j < 64; j++) s->block[i][j] += 128;
3323 s->dsp.put_pixels_clamped(s->block[i], s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
3324 if(v->pq >= 9 && v->overlap) {
3326 s->dsp.vc1_v_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2), (i<4) ? ((i&1)>>1) : (s->mb_y&1));
3328 s->dsp.vc1_h_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2), (i<4) ? (i&1) : (s->mb_x&1));
3331 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block);
3332 if(!v->ttmbf && ttmb < 8) ttmb = -1;
3334 if((i<4) || !(s->flags & CODEC_FLAG_GRAY))
3335 s->dsp.add_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
3342 for(i = 0; i < 6; i++) {
3343 v->mb_type[0][s->block_index[i]] = 0;
3344 s->dc_val[0][s->block_index[i]] = 0;
3346 s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
3347 s->current_picture.qscale_table[mb_pos] = 0;
3348 vc1_pred_mv(s, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0]);
3355 if (!skipped /* unskipped MB */)
3357 int intra_count = 0, coded_inter = 0;
3358 int is_intra[6], is_coded[6];
3360 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3363 val = ((cbp >> (5 - i)) & 1);
3364 s->dc_val[0][s->block_index[i]] = 0;
3371 GET_MVDATA(dmv_x, dmv_y);
3373 vc1_pred_mv(s, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0]);
3374 if(!s->mb_intra) vc1_mc_4mv_luma(v, i);
3375 intra_count += s->mb_intra;
3376 is_intra[i] = s->mb_intra;
3377 is_coded[i] = mb_has_coeffs;
3380 is_intra[i] = (intra_count >= 3);
3383 if(i == 4) vc1_mc_4mv_chroma(v);
3384 v->mb_type[0][s->block_index[i]] = is_intra[i];
3385 if(!coded_inter) coded_inter = !is_intra[i] & is_coded[i];
3387 // if there are no coded blocks then don't do anything more
3388 if(!intra_count && !coded_inter) return 0;
3391 s->current_picture.qscale_table[mb_pos] = mquant;
3392 /* test if block is intra and has pred */
3397 if(((!s->first_slice_line || (i==2 || i==3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]])
3398 || ((s->mb_x || (i==1 || i==3)) && v->mb_type[0][s->block_index[i] - 1])) {
3403 if(intrapred)s->ac_pred = get_bits(gb, 1);
3404 else s->ac_pred = 0;
3406 if (!v->ttmbf && coded_inter)
3407 ttmb = get_vlc2(gb, vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3411 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3412 s->mb_intra = is_intra[i];
3414 /* check if prediction blocks A and C are available */
3415 v->a_avail = v->c_avail = 0;
3416 if(i == 2 || i == 3 || !s->first_slice_line)
3417 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3418 if(i == 1 || i == 3 || s->mb_x)
3419 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3421 vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant, (i&4)?v->codingset2:v->codingset);
3422 if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3423 s->dsp.vc1_inv_trans_8x8(s->block[i]);
3424 if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
3425 for(j = 0; j < 64; j++) s->block[i][j] += 128;
3426 s->dsp.put_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
3427 if(v->pq >= 9 && v->overlap) {
3429 s->dsp.vc1_v_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2), (i<4) ? ((i&1)>>1) : (s->mb_y&1));
3431 s->dsp.vc1_h_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2), (i<4) ? (i&1) : (s->mb_x&1));
3433 } else if(is_coded[i]) {
3434 status = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block);
3435 if(!v->ttmbf && ttmb < 8) ttmb = -1;
3437 if((i<4) || !(s->flags & CODEC_FLAG_GRAY))
3438 s->dsp.add_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
3446 s->current_picture.qscale_table[mb_pos] = 0;
3447 for (i=0; i<6; i++) {
3448 v->mb_type[0][s->block_index[i]] = 0;
3449 s->dc_val[0][s->block_index[i]] = 0;
3453 vc1_pred_mv(s, i, 0, 0, 0, v->range_x, v->range_y, v->mb_type[0]);
3454 vc1_mc_4mv_luma(v, i);
3456 vc1_mc_4mv_chroma(v);
3457 s->current_picture.qscale_table[mb_pos] = 0;
3462 /* Should never happen */
3466 /** Decode one B-frame MB (in Main profile)
3468 static void vc1_decode_b_mb(VC1Context *v)
3470 MpegEncContext *s = &v->s;
3471 GetBitContext *gb = &s->gb;
3473 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3474 int cbp = 0; /* cbp decoding stuff */
3475 int mqdiff, mquant; /* MB quantization */
3476 int ttmb = v->ttfrm; /* MB Transform type */
3478 static const int size_table[6] = { 0, 2, 3, 4, 5, 8 },
3479 offset_table[6] = { 0, 1, 3, 7, 15, 31 };
3480 int mb_has_coeffs = 0; /* last_flag */
3481 int index, index1; /* LUT indices */
3482 int val, sign; /* temp values */
3483 int first_block = 1;
3485 int skipped, direct;
3486 int dmv_x[2], dmv_y[2];
3487 int bmvtype = BMV_TYPE_BACKWARD;
3489 mquant = v->pq; /* Loosy initialization */
3493 direct = get_bits1(gb);
3495 direct = v->direct_mb_plane[mb_pos];
3497 skipped = get_bits1(gb);
3499 skipped = v->s.mbskip_table[mb_pos];
3501 s->dsp.clear_blocks(s->block[0]);
3502 dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
3503 for(i = 0; i < 6; i++) {
3504 v->mb_type[0][s->block_index[i]] = 0;
3505 s->dc_val[0][s->block_index[i]] = 0;
3507 s->current_picture.qscale_table[mb_pos] = 0;
3511 GET_MVDATA(dmv_x[0], dmv_y[0]);
3512 dmv_x[1] = dmv_x[0];
3513 dmv_y[1] = dmv_y[0];
3515 if(skipped || !s->mb_intra) {
3516 bmvtype = decode012(gb);
3519 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
3522 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
3525 bmvtype = BMV_TYPE_INTERPOLATED;
3526 dmv_x[1] = dmv_y[1] = 0;
3530 for(i = 0; i < 6; i++)
3531 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3534 if(direct) bmvtype = BMV_TYPE_INTERPOLATED;
3535 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3536 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3540 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3544 s->current_picture.qscale_table[mb_pos] = mquant;
3546 ttmb = get_vlc2(gb, vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3547 dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
3548 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3549 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3551 if(!mb_has_coeffs && !s->mb_intra) {
3552 /* no coded blocks - effectively skipped */
3553 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3554 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3557 if(s->mb_intra && !mb_has_coeffs) {
3559 s->current_picture.qscale_table[mb_pos] = mquant;
3560 s->ac_pred = get_bits1(gb);
3562 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3564 if(bmvtype == BMV_TYPE_INTERPOLATED) {
3565 GET_MVDATA(dmv_x[1], dmv_y[1]);
3566 if(!mb_has_coeffs) {
3567 /* interpolated skipped block */
3568 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3569 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3573 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3575 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3578 s->ac_pred = get_bits1(gb);
3579 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3581 s->current_picture.qscale_table[mb_pos] = mquant;
3582 if(!v->ttmbf && !s->mb_intra && mb_has_coeffs)
3583 ttmb = get_vlc2(gb, vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3589 s->dc_val[0][s->block_index[i]] = 0;
3591 val = ((cbp >> (5 - i)) & 1);
3592 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3593 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3595 /* check if prediction blocks A and C are available */
3596 v->a_avail = v->c_avail = 0;
3597 if(i == 2 || i == 3 || !s->first_slice_line)
3598 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3599 if(i == 1 || i == 3 || s->mb_x)
3600 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3602 vc1_decode_intra_block(v, s->block[i], i, val, mquant, (i&4)?v->codingset2:v->codingset);
3603 if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3604 s->dsp.vc1_inv_trans_8x8(s->block[i]);
3605 if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
3606 for(j = 0; j < 64; j++) s->block[i][j] += 128;
3607 s->dsp.put_pixels_clamped(s->block[i], s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
3609 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block);
3610 if(!v->ttmbf && ttmb < 8) ttmb = -1;
3612 if((i<4) || !(s->flags & CODEC_FLAG_GRAY))
3613 s->dsp.add_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
3618 /** Decode blocks of I-frame
3620 static void vc1_decode_i_blocks(VC1Context *v)
3623 MpegEncContext *s = &v->s;
3628 /* select codingmode used for VLC tables selection */
3629 switch(v->y_ac_table_index){
3631 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
3634 v->codingset = CS_HIGH_MOT_INTRA;
3637 v->codingset = CS_MID_RATE_INTRA;
3641 switch(v->c_ac_table_index){
3643 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
3646 v->codingset2 = CS_HIGH_MOT_INTER;
3649 v->codingset2 = CS_MID_RATE_INTER;
3653 /* Set DC scale - y and c use the same */
3654 s->y_dc_scale = s->y_dc_scale_table[v->pq];
3655 s->c_dc_scale = s->c_dc_scale_table[v->pq];
3658 s->mb_x = s->mb_y = 0;
3660 s->first_slice_line = 1;
3661 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
3662 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
3663 for(s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
3664 ff_init_block_index(s);
3665 ff_update_block_index(s);
3666 s->dsp.clear_blocks(s->block[0]);
3667 mb_pos = s->mb_x + s->mb_y * s->mb_width;
3668 s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
3669 s->current_picture.qscale_table[mb_pos] = v->pq;
3671 // do actual MB decoding and displaying
3672 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
3673 v->s.ac_pred = get_bits(&v->s.gb, 1);
3675 for(k = 0; k < 6; k++) {
3676 val = ((cbp >> (5 - k)) & 1);
3679 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
3683 cbp |= val << (5 - k);
3685 vc1_decode_i_block(v, s->block[k], k, val, (k<4)? v->codingset : v->codingset2);
3687 s->dsp.vc1_inv_trans_8x8(s->block[k]);
3688 if(v->pq >= 9 && v->overlap) {
3689 for(j = 0; j < 64; j++) s->block[k][j] += 128;
3693 vc1_put_block(v, s->block);
3694 if(v->pq >= 9 && v->overlap) {
3695 if(!s->first_slice_line) {
3696 s->dsp.vc1_v_overlap(s->dest[0], s->linesize, 0);
3697 s->dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize, 0);
3698 if(!(s->flags & CODEC_FLAG_GRAY)) {
3699 s->dsp.vc1_v_overlap(s->dest[1], s->uvlinesize, s->mb_y&1);
3700 s->dsp.vc1_v_overlap(s->dest[2], s->uvlinesize, s->mb_y&1);
3703 s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize, 1);
3704 s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize, 1);
3706 s->dsp.vc1_h_overlap(s->dest[0], s->linesize, 0);
3707 s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize, 0);
3708 if(!(s->flags & CODEC_FLAG_GRAY)) {
3709 s->dsp.vc1_h_overlap(s->dest[1], s->uvlinesize, s->mb_x&1);
3710 s->dsp.vc1_h_overlap(s->dest[2], s->uvlinesize, s->mb_x&1);
3713 s->dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize, 1);
3714 s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize, 1);
3717 if(get_bits_count(&s->gb) > v->bits) {
3718 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n", get_bits_count(&s->gb), v->bits);
3722 ff_draw_horiz_band(s, s->mb_y * 16, 16);
3723 s->first_slice_line = 0;
3727 /** Decode blocks of I-frame for advanced profile
3729 static void vc1_decode_i_blocks_adv(VC1Context *v)
3732 MpegEncContext *s = &v->s;
3739 GetBitContext *gb = &s->gb;
3741 /* select codingmode used for VLC tables selection */
3742 switch(v->y_ac_table_index){
3744 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
3747 v->codingset = CS_HIGH_MOT_INTRA;
3750 v->codingset = CS_MID_RATE_INTRA;
3754 switch(v->c_ac_table_index){
3756 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
3759 v->codingset2 = CS_HIGH_MOT_INTER;
3762 v->codingset2 = CS_MID_RATE_INTER;
3766 /* Set DC scale - y and c use the same */
3767 s->y_dc_scale = s->y_dc_scale_table[v->pq];
3768 s->c_dc_scale = s->c_dc_scale_table[v->pq];
3771 s->mb_x = s->mb_y = 0;
3773 s->first_slice_line = 1;
3774 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
3775 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
3776 for(s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
3777 ff_init_block_index(s);
3778 ff_update_block_index(s);
3779 s->dsp.clear_blocks(s->block[0]);
3780 mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3781 s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
3783 // do actual MB decoding and displaying
3784 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
3785 if(v->acpred_is_raw)
3786 v->s.ac_pred = get_bits(&v->s.gb, 1);
3788 v->s.ac_pred = v->acpred_plane[mb_pos];
3790 if(v->condover == CONDOVER_SELECT) {
3791 if(v->overflg_is_raw)
3792 overlap = get_bits(&v->s.gb, 1);
3794 overlap = v->over_flags_plane[mb_pos];
3796 overlap = (v->condover == CONDOVER_ALL);
3800 s->current_picture.qscale_table[mb_pos] = mquant;
3802 for(k = 0; k < 6; k++) {
3803 val = ((cbp >> (5 - k)) & 1);
3806 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
3810 cbp |= val << (5 - k);
3812 v->a_avail = !s->first_slice_line || (k==2 || k==3);
3813 v->c_avail = !!s->mb_x || (k==1 || k==3);
3815 vc1_decode_i_block_adv(v, s->block[k], k, val, (k<4)? v->codingset : v->codingset2, mquant);
3817 s->dsp.vc1_inv_trans_8x8(s->block[k]);
3818 for(j = 0; j < 64; j++) s->block[k][j] += 128;
3821 vc1_put_block(v, s->block);
3823 if(!s->first_slice_line) {
3824 s->dsp.vc1_v_overlap(s->dest[0], s->linesize, 0);
3825 s->dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize, 0);
3826 if(!(s->flags & CODEC_FLAG_GRAY)) {
3827 s->dsp.vc1_v_overlap(s->dest[1], s->uvlinesize, s->mb_y&1);
3828 s->dsp.vc1_v_overlap(s->dest[2], s->uvlinesize, s->mb_y&1);
3831 s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize, 1);
3832 s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize, 1);
3834 s->dsp.vc1_h_overlap(s->dest[0], s->linesize, 0);
3835 s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize, 0);
3836 if(!(s->flags & CODEC_FLAG_GRAY)) {
3837 s->dsp.vc1_h_overlap(s->dest[1], s->uvlinesize, s->mb_x&1);
3838 s->dsp.vc1_h_overlap(s->dest[2], s->uvlinesize, s->mb_x&1);
3841 s->dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize, 1);
3842 s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize, 1);
3845 if(get_bits_count(&s->gb) > v->bits) {
3846 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n", get_bits_count(&s->gb), v->bits);
3850 ff_draw_horiz_band(s, s->mb_y * 16, 16);
3851 s->first_slice_line = 0;
3855 static void vc1_decode_p_blocks(VC1Context *v)
3857 MpegEncContext *s = &v->s;
3859 /* select codingmode used for VLC tables selection */
3860 switch(v->c_ac_table_index){
3862 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
3865 v->codingset = CS_HIGH_MOT_INTRA;
3868 v->codingset = CS_MID_RATE_INTRA;
3872 switch(v->c_ac_table_index){
3874 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
3877 v->codingset2 = CS_HIGH_MOT_INTER;
3880 v->codingset2 = CS_MID_RATE_INTER;
3884 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
3885 s->first_slice_line = 1;
3886 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
3887 for(s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
3888 ff_init_block_index(s);
3889 ff_update_block_index(s);
3890 s->dsp.clear_blocks(s->block[0]);
3893 if(get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
3894 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n", get_bits_count(&s->gb), v->bits,s->mb_x,s->mb_y);
3898 ff_draw_horiz_band(s, s->mb_y * 16, 16);
3899 s->first_slice_line = 0;
3903 static void vc1_decode_b_blocks(VC1Context *v)
3905 MpegEncContext *s = &v->s;
3907 /* select codingmode used for VLC tables selection */
3908 switch(v->c_ac_table_index){
3910 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
3913 v->codingset = CS_HIGH_MOT_INTRA;
3916 v->codingset = CS_MID_RATE_INTRA;
3920 switch(v->c_ac_table_index){
3922 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
3925 v->codingset2 = CS_HIGH_MOT_INTER;
3928 v->codingset2 = CS_MID_RATE_INTER;
3932 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
3933 s->first_slice_line = 1;
3934 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
3935 for(s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
3936 ff_init_block_index(s);
3937 ff_update_block_index(s);
3938 s->dsp.clear_blocks(s->block[0]);
3941 if(get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
3942 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n", get_bits_count(&s->gb), v->bits,s->mb_x,s->mb_y);
3946 ff_draw_horiz_band(s, s->mb_y * 16, 16);
3947 s->first_slice_line = 0;
3951 static void vc1_decode_skip_blocks(VC1Context *v)
3953 MpegEncContext *s = &v->s;
3955 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
3956 s->first_slice_line = 1;
3957 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
3959 ff_init_block_index(s);
3960 ff_update_block_index(s);
3961 memcpy(s->dest[0], s->last_picture.data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16);
3962 memcpy(s->dest[1], s->last_picture.data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
3963 memcpy(s->dest[2], s->last_picture.data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
3964 ff_draw_horiz_band(s, s->mb_y * 16, 16);
3965 s->first_slice_line = 0;
3967 s->pict_type = P_TYPE;
3970 static void vc1_decode_blocks(VC1Context *v)
3973 v->s.esc3_level_length = 0;
3975 switch(v->s.pict_type) {
3977 if(v->profile == PROFILE_ADVANCED)
3978 vc1_decode_i_blocks_adv(v);
3980 vc1_decode_i_blocks(v);
3983 if(v->p_frame_skipped)
3984 vc1_decode_skip_blocks(v);
3986 vc1_decode_p_blocks(v);
3990 vc1_decode_i_blocks(v);
3992 vc1_decode_b_blocks(v);
3998 /** Initialize a VC1/WMV3 decoder
3999 * @todo TODO: Handle VC-1 IDUs (Transport level?)
4000 * @todo TODO: Decypher remaining bits in extra_data
4002 static int vc1_decode_init(AVCodecContext *avctx)
4004 VC1Context *v = avctx->priv_data;
4005 MpegEncContext *s = &v->s;
4008 if (!avctx->extradata_size || !avctx->extradata) return -1;
4009 if (!(avctx->flags & CODEC_FLAG_GRAY))
4010 avctx->pix_fmt = PIX_FMT_YUV420P;
4012 avctx->pix_fmt = PIX_FMT_GRAY8;
4014 avctx->flags |= CODEC_FLAG_EMU_EDGE;
4015 v->s.flags |= CODEC_FLAG_EMU_EDGE;
4017 if(ff_h263_decode_init(avctx) < 0)
4019 if (vc1_init_common(v) < 0) return -1;
4021 avctx->coded_width = avctx->width;
4022 avctx->coded_height = avctx->height;
4023 if (avctx->codec_id == CODEC_ID_WMV3)
4027 // looks like WMV3 has a sequence header stored in the extradata
4028 // advanced sequence header may be before the first frame
4029 // the last byte of the extradata is a version number, 1 for the
4030 // samples we can decode
4032 init_get_bits(&gb, avctx->extradata, avctx->extradata_size*8);
4034 if (decode_sequence_header(avctx, &gb) < 0)
4037 count = avctx->extradata_size*8 - get_bits_count(&gb);
4040 av_log(avctx, AV_LOG_INFO, "Extra data: %i bits left, value: %X\n",
4041 count, get_bits(&gb, count));
4045 av_log(avctx, AV_LOG_INFO, "Read %i bits in overflow\n", -count);
4047 } else { // VC1/WVC1
4048 int edata_size = avctx->extradata_size;
4049 uint8_t *edata = avctx->extradata;
4051 if(avctx->extradata_size < 16) {
4052 av_log(avctx, AV_LOG_ERROR, "Extradata size too small: %i\n", edata_size);
4055 while(edata_size > 8) {
4056 // test if we've found header
4057 if(BE_32(edata) == 0x0000010F) {
4066 init_get_bits(&gb, edata, edata_size*8);
4068 if (decode_sequence_header(avctx, &gb) < 0)
4071 while(edata_size > 8) {
4072 // test if we've found entry point
4073 if(BE_32(edata) == 0x0000010E) {
4082 init_get_bits(&gb, edata, edata_size*8);
4084 if (decode_entry_point(avctx, &gb) < 0)
4087 avctx->has_b_frames= !!(avctx->max_b_frames);
4089 s->mb_width = (avctx->coded_width+15)>>4;
4090 s->mb_height = (avctx->coded_height+15)>>4;
4092 /* Allocate mb bitplanes */
4093 v->mv_type_mb_plane = av_malloc(s->mb_stride * s->mb_height);
4094 v->direct_mb_plane = av_malloc(s->mb_stride * s->mb_height);
4095 v->acpred_plane = av_malloc(s->mb_stride * s->mb_height);
4096 v->over_flags_plane = av_malloc(s->mb_stride * s->mb_height);
4098 /* allocate block type info in that way so it could be used with s->block_index[] */
4099 v->mb_type_base = av_malloc(s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
4100 v->mb_type[0] = v->mb_type_base + s->b8_stride + 1;
4101 v->mb_type[1] = v->mb_type_base + s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride + 1;
4102 v->mb_type[2] = v->mb_type[1] + s->mb_stride * (s->mb_height + 1);
4104 /* Init coded blocks info */
4105 if (v->profile == PROFILE_ADVANCED)
4107 // if (alloc_bitplane(&v->over_flags_plane, s->mb_width, s->mb_height) < 0)
4109 // if (alloc_bitplane(&v->ac_pred_plane, s->mb_width, s->mb_height) < 0)
4117 /** Decode a VC1/WMV3 frame
4118 * @todo TODO: Handle VC-1 IDUs (Transport level?)
4120 static int vc1_decode_frame(AVCodecContext *avctx,
4121 void *data, int *data_size,
4122 uint8_t *buf, int buf_size)
4124 VC1Context *v = avctx->priv_data;
4125 MpegEncContext *s = &v->s;
4126 AVFrame *pict = data;
4127 uint8_t *buf2 = NULL;
4129 /* no supplementary picture */
4130 if (buf_size == 0) {
4131 /* special case for last picture */
4132 if (s->low_delay==0 && s->next_picture_ptr) {
4133 *pict= *(AVFrame*)s->next_picture_ptr;
4134 s->next_picture_ptr= NULL;
4136 *data_size = sizeof(AVFrame);
4142 //we need to set current_picture_ptr before reading the header, otherwise we cant store anyting im there
4143 if(s->current_picture_ptr==NULL || s->current_picture_ptr->data[0]){
4144 int i= ff_find_unused_picture(s, 0);
4145 s->current_picture_ptr= &s->picture[i];
4148 avctx->has_b_frames= !s->low_delay;
4150 //for advanced profile we need to unescape buffer
4151 if (avctx->codec_id == CODEC_ID_VC1) {
4153 buf2 = av_malloc(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
4155 for(i = 0; i < buf_size; i++) {
4156 if(buf[i] == 3 && i >= 2 && !buf[i-1] && !buf[i-2] && i < buf_size-1 && buf[i+1] < 4) {
4157 buf2[buf_size2++] = buf[i+1];
4160 buf2[buf_size2++] = buf[i];
4162 init_get_bits(&s->gb, buf2, buf_size2*8);
4164 init_get_bits(&s->gb, buf, buf_size*8);
4165 // do parse frame header
4166 if(v->profile < PROFILE_ADVANCED) {
4167 if(vc1_parse_frame_header(v, &s->gb) == -1) {
4172 if(vc1_parse_frame_header_adv(v, &s->gb) == -1) {
4178 if(s->pict_type != I_TYPE && !v->res_rtm_flag){
4183 /* skip B frames as they are not decoded correctly */
4184 if(s->pict_type == B_TYPE){
4190 s->current_picture.pict_type= s->pict_type;
4191 s->current_picture.key_frame= s->pict_type == I_TYPE;
4193 /* skip B-frames if we don't have reference frames */
4194 if(s->last_picture_ptr==NULL && (s->pict_type==B_TYPE || s->dropable)){
4196 return -1;//buf_size;
4198 /* skip b frames if we are in a hurry */
4199 if(avctx->hurry_up && s->pict_type==B_TYPE) return -1;//buf_size;
4200 if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==B_TYPE)
4201 || (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=I_TYPE)
4202 || avctx->skip_frame >= AVDISCARD_ALL) {
4206 /* skip everything if we are in a hurry>=5 */
4207 if(avctx->hurry_up>=5) {
4209 return -1;//buf_size;
4212 if(s->next_p_frame_damaged){
4213 if(s->pict_type==B_TYPE)
4216 s->next_p_frame_damaged=0;
4219 if(MPV_frame_start(s, avctx) < 0) {
4224 ff_er_frame_start(s);
4226 v->bits = buf_size * 8;
4227 vc1_decode_blocks(v);
4228 //av_log(s->avctx, AV_LOG_INFO, "Consumed %i/%i bits\n", get_bits_count(&s->gb), buf_size*8);
4229 // if(get_bits_count(&s->gb) > buf_size * 8)
4235 assert(s->current_picture.pict_type == s->current_picture_ptr->pict_type);
4236 assert(s->current_picture.pict_type == s->pict_type);
4237 if (s->pict_type == B_TYPE || s->low_delay) {
4238 *pict= *(AVFrame*)s->current_picture_ptr;
4239 } else if (s->last_picture_ptr != NULL) {
4240 *pict= *(AVFrame*)s->last_picture_ptr;
4243 if(s->last_picture_ptr || s->low_delay){
4244 *data_size = sizeof(AVFrame);
4245 ff_print_debug_info(s, pict);
4248 /* Return the Picture timestamp as the frame number */
4249 /* we substract 1 because it is added on utils.c */
4250 avctx->frame_number = s->picture_number - 1;
4257 /** Close a VC1/WMV3 decoder
4258 * @warning Initial try at using MpegEncContext stuff
4260 static int vc1_decode_end(AVCodecContext *avctx)
4262 VC1Context *v = avctx->priv_data;
4264 av_freep(&v->hrd_rate);
4265 av_freep(&v->hrd_buffer);
4266 MPV_common_end(&v->s);
4267 av_freep(&v->mv_type_mb_plane);
4268 av_freep(&v->direct_mb_plane);
4269 av_freep(&v->acpred_plane);
4270 av_freep(&v->over_flags_plane);
4271 av_freep(&v->mb_type_base);
4276 AVCodec vc1_decoder = {
4289 AVCodec wmv3_decoder = {