2 * VC-1 and WMV3 decoder
3 * Copyright (c) 2006 Konstantin Shishkov
4 * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * VC-1 and WMV3 decoder
32 #include "mpegvideo.h"
34 #include "vc1acdata.h"
39 extern const uint32_t ff_table0_dc_lum[120][2], ff_table1_dc_lum[120][2];
40 extern const uint32_t ff_table0_dc_chroma[120][2], ff_table1_dc_chroma[120][2];
41 extern VLC ff_msmp4_dc_luma_vlc[2], ff_msmp4_dc_chroma_vlc[2];
42 #define MB_INTRA_VLC_BITS 9
43 extern VLC ff_msmp4_mb_i_vlc;
44 extern const uint16_t ff_msmp4_mb_i_table[64][2];
47 static const uint16_t table_mb_intra[64][2];
49 /** Markers used if VC-1 AP frame data */
52 VC1_CODE_RES0 = 0x00000100,
53 VC1_CODE_ENDOFSEQ = 0x0000010A,
62 /** Available Profiles */
67 PROFILE_COMPLEX, ///< TODO: WMV9 specific
72 /** Sequence quantizer mode */
75 QUANT_FRAME_IMPLICIT, ///< Implicitly specified at frame level
76 QUANT_FRAME_EXPLICIT, ///< Explicitly specified at frame level
77 QUANT_NON_UNIFORM, ///< Non-uniform quant used for all frames
78 QUANT_UNIFORM ///< Uniform quant used for all frames
82 /** Where quant can be changed */
86 DQPROFILE_DOUBLE_EDGES,
87 DQPROFILE_SINGLE_EDGE,
92 /** @name Where quant can be changed
103 /** Which pair of edges is quantized with ALTPQUANT */
106 DQDOUBLE_BEDGE_TOPLEFT,
107 DQDOUBLE_BEDGE_TOPRIGHT,
108 DQDOUBLE_BEDGE_BOTTOMRIGHT,
109 DQDOUBLE_BEDGE_BOTTOMLEFT
113 /** MV modes for P frames */
116 MV_PMODE_1MV_HPEL_BILIN,
120 MV_PMODE_INTENSITY_COMP
124 /** @name MV types for B frames */
129 BMV_TYPE_INTERPOLATED
133 /** @name Block types for P/B frames */
135 enum TransformTypes {
139 TT_8X4, //Both halves
142 TT_4X8, //Both halves
147 /** Table for conversion between TTBLK and TTMB */
148 static const int ttblk_to_tt[3][8] = {
149 { TT_8X4, TT_4X8, TT_8X8, TT_4X4, TT_8X4_TOP, TT_8X4_BOTTOM, TT_4X8_RIGHT, TT_4X8_LEFT },
150 { TT_8X8, TT_4X8_RIGHT, TT_4X8_LEFT, TT_4X4, TT_8X4, TT_4X8, TT_8X4_BOTTOM, TT_8X4_TOP },
151 { TT_8X8, TT_4X8, TT_4X4, TT_8X4_BOTTOM, TT_4X8_RIGHT, TT_4X8_LEFT, TT_8X4, TT_8X4_TOP }
154 static const int ttfrm_to_tt[4] = { TT_8X8, TT_8X4, TT_4X8, TT_4X4 };
156 /** MV P mode - the 5th element is only used for mode 1 */
157 static const uint8_t mv_pmode_table[2][5] = {
158 { MV_PMODE_1MV_HPEL_BILIN, MV_PMODE_1MV, MV_PMODE_1MV_HPEL, MV_PMODE_INTENSITY_COMP, MV_PMODE_MIXED_MV },
159 { MV_PMODE_1MV, MV_PMODE_MIXED_MV, MV_PMODE_1MV_HPEL, MV_PMODE_INTENSITY_COMP, MV_PMODE_1MV_HPEL_BILIN }
161 static const uint8_t mv_pmode_table2[2][4] = {
162 { MV_PMODE_1MV_HPEL_BILIN, MV_PMODE_1MV, MV_PMODE_1MV_HPEL, MV_PMODE_MIXED_MV },
163 { MV_PMODE_1MV, MV_PMODE_MIXED_MV, MV_PMODE_1MV_HPEL, MV_PMODE_1MV_HPEL_BILIN }
166 /** One more frame type */
169 static const int fps_nr[5] = { 24, 25, 30, 50, 60 },
170 fps_dr[2] = { 1000, 1001 };
171 static const uint8_t pquant_table[3][32] = {
172 { /* Implicit quantizer */
173 0, 1, 2, 3, 4, 5, 6, 7, 8, 6, 7, 8, 9, 10, 11, 12,
174 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 29, 31
176 { /* Explicit quantizer, pquantizer uniform */
177 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
178 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
180 { /* Explicit quantizer, pquantizer non-uniform */
181 0, 1, 1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
182 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 29, 31
186 /** @name VC-1 VLC tables and defines
187 * @todo TODO move this into the context
190 #define VC1_BFRACTION_VLC_BITS 7
191 static VLC vc1_bfraction_vlc;
192 #define VC1_IMODE_VLC_BITS 4
193 static VLC vc1_imode_vlc;
194 #define VC1_NORM2_VLC_BITS 3
195 static VLC vc1_norm2_vlc;
196 #define VC1_NORM6_VLC_BITS 9
197 static VLC vc1_norm6_vlc;
198 /* Could be optimized, one table only needs 8 bits */
199 #define VC1_TTMB_VLC_BITS 9 //12
200 static VLC vc1_ttmb_vlc[3];
201 #define VC1_MV_DIFF_VLC_BITS 9 //15
202 static VLC vc1_mv_diff_vlc[4];
203 #define VC1_CBPCY_P_VLC_BITS 9 //14
204 static VLC vc1_cbpcy_p_vlc[4];
205 #define VC1_4MV_BLOCK_PATTERN_VLC_BITS 6
206 static VLC vc1_4mv_block_pattern_vlc[4];
207 #define VC1_TTBLK_VLC_BITS 5
208 static VLC vc1_ttblk_vlc[3];
209 #define VC1_SUBBLKPAT_VLC_BITS 6
210 static VLC vc1_subblkpat_vlc[3];
212 static VLC vc1_ac_coeff_table[8];
216 CS_HIGH_MOT_INTRA = 0,
226 /** @name Overlap conditions for Advanced Profile */
237 * @fixme Change size wherever another size is more efficient
238 * Many members are only used for Advanced Profile
240 typedef struct VC1Context{
245 /** Simple/Main Profile sequence header */
247 int res_sm; ///< reserved, 2b
248 int res_x8; ///< reserved
249 int multires; ///< frame-level RESPIC syntax element present
250 int res_fasttx; ///< reserved, always 1
251 int res_transtab; ///< reserved, always 0
252 int rangered; ///< RANGEREDFRM (range reduction) syntax element present
254 int res_rtm_flag; ///< reserved, set to 1
255 int reserved; ///< reserved
258 /** Advanced Profile */
260 int level; ///< 3bits, for Advanced/Simple Profile, provided by TS layer
261 int chromaformat; ///< 2bits, 2=4:2:0, only defined
262 int postprocflag; ///< Per-frame processing suggestion flag present
263 int broadcast; ///< TFF/RFF present
264 int interlace; ///< Progressive/interlaced (RPTFTM syntax element)
265 int tfcntrflag; ///< TFCNTR present
266 int panscanflag; ///< NUMPANSCANWIN, TOPLEFT{X,Y}, BOTRIGHT{X,Y} present
267 int extended_dmv; ///< Additional extended dmv range at P/B frame-level
268 int color_prim; ///< 8bits, chroma coordinates of the color primaries
269 int transfer_char; ///< 8bits, Opto-electronic transfer characteristics
270 int matrix_coef; ///< 8bits, Color primaries->YCbCr transform matrix
271 int hrd_param_flag; ///< Presence of Hypothetical Reference
272 ///< Decoder parameters
273 int psf; ///< Progressive Segmented Frame
276 /** Sequence header data for all Profiles
277 * TODO: choose between ints, uint8_ts and monobit flags
280 int profile; ///< 2bits, Profile
281 int frmrtq_postproc; ///< 3bits,
282 int bitrtq_postproc; ///< 5bits, quantized framerate-based postprocessing strength
283 int fastuvmc; ///< Rounding of qpel vector to hpel ? (not in Simple)
284 int extended_mv; ///< Ext MV in P/B (not in Simple)
285 int dquant; ///< How qscale varies with MBs, 2bits (not in Simple)
286 int vstransform; ///< variable-size [48]x[48] transform type + info
287 int overlap; ///< overlapped transforms in use
288 int quantizer_mode; ///< 2bits, quantizer mode used for sequence, see QUANT_*
289 int finterpflag; ///< INTERPFRM present
292 /** Frame decoding info for all profiles */
294 uint8_t mv_mode; ///< MV coding monde
295 uint8_t mv_mode2; ///< Secondary MV coding mode (B frames)
296 int k_x; ///< Number of bits for MVs (depends on MV range)
297 int k_y; ///< Number of bits for MVs (depends on MV range)
298 int range_x, range_y; ///< MV range
299 uint8_t pq, altpq; ///< Current/alternate frame quantizer scale
300 /** pquant parameters */
307 /** AC coding set indexes
308 * @see 8.1.1.10, p(1)10
311 int c_ac_table_index; ///< Chroma index from ACFRM element
312 int y_ac_table_index; ///< Luma index from AC2FRM element
314 int ttfrm; ///< Transform type info present at frame level
315 uint8_t ttmbf; ///< Transform type flag
316 uint8_t ttblk4x4; ///< Value of ttblk which indicates a 4x4 transform
317 int codingset; ///< index of current table set from 11.8 to use for luma block decoding
318 int codingset2; ///< index of current table set from 11.8 to use for chroma block decoding
319 int pqindex; ///< raw pqindex used in coding set selection
320 int a_avail, c_avail;
321 uint8_t *mb_type_base, *mb_type[3];
324 /** Luma compensation parameters */
329 int16_t bfraction; ///< Relative position % anchors=> how to scale MVs
330 uint8_t halfpq; ///< Uniform quant over image and qp+.5
331 uint8_t respic; ///< Frame-level flag for resized images
332 int buffer_fullness; ///< HRD info
334 * -# 0 -> [-64n 63.f] x [-32, 31.f]
335 * -# 1 -> [-128, 127.f] x [-64, 63.f]
336 * -# 2 -> [-512, 511.f] x [-128, 127.f]
337 * -# 3 -> [-1024, 1023.f] x [-256, 255.f]
340 uint8_t pquantizer; ///< Uniform (over sequence) quantizer in use
341 VLC *cbpcy_vlc; ///< CBPCY VLC table
342 int tt_index; ///< Index for Transform Type tables
343 uint8_t* mv_type_mb_plane; ///< bitplane for mv_type == (4MV)
344 uint8_t* direct_mb_plane; ///< bitplane for "direct" MBs
345 int mv_type_is_raw; ///< mv type mb plane is not coded
346 int dmb_is_raw; ///< direct mb plane is raw
347 int skip_is_raw; ///< skip mb plane is not coded
348 uint8_t luty[256], lutuv[256]; // lookup tables used for intensity compensation
349 int use_ic; ///< use intensity compensation in B-frames
350 int rnd; ///< rounding control
352 /** Frame decoding info for S/M profiles only */
354 uint8_t rangeredfrm; ///< out_sample = CLIP((in_sample-128)*2+128)
358 /** Frame decoding info for Advanced profile */
360 uint8_t fcm; ///< 0->Progressive, 2->Frame-Interlace, 3->Field-Interlace
361 uint8_t numpanscanwin;
363 uint8_t rptfrm, tff, rff;
366 uint16_t bottomrightx;
367 uint16_t bottomrighty;
370 int hrd_num_leaky_buckets;
371 uint8_t bit_rate_exponent;
372 uint8_t buffer_size_exponent;
373 uint8_t* acpred_plane; ///< AC prediction flags bitplane
375 uint8_t* over_flags_plane; ///< Overflags bitplane
378 uint16_t *hrd_rate, *hrd_buffer;
379 uint8_t *hrd_fullness;
380 uint8_t range_mapy_flag;
381 uint8_t range_mapuv_flag;
391 * Get unary code of limited length
392 * @fixme FIXME Slow and ugly
393 * @param gb GetBitContext
394 * @param[in] stop The bitstop value (unary code of 1's or 0's)
395 * @param[in] len Maximum length
396 * @return Unary length/index
398 static int get_prefix(GetBitContext *gb, int stop, int len)
403 for(i = 0; i < len && get_bits1(gb) != stop; i++);
405 /* int i = 0, tmp = !stop;
407 while (i != len && tmp != stop)
409 tmp = get_bits(gb, 1);
412 if (i == len && tmp != stop) return len+1;
419 UPDATE_CACHE(re, gb);
420 buf=GET_CACHE(re, gb); //Still not sure
421 if (stop) buf = ~buf;
423 log= av_log2(-buf); //FIXME: -?
425 LAST_SKIP_BITS(re, gb, log+1);
426 CLOSE_READER(re, gb);
430 LAST_SKIP_BITS(re, gb, limit);
431 CLOSE_READER(re, gb);
436 static inline int decode210(GetBitContext *gb){
442 return 2 - get_bits1(gb);
446 * Init VC-1 specific tables and VC1Context members
447 * @param v The VC1Context to initialize
450 static int vc1_init_common(VC1Context *v)
455 v->hrd_rate = v->hrd_buffer = NULL;
461 init_vlc(&vc1_bfraction_vlc, VC1_BFRACTION_VLC_BITS, 23,
462 vc1_bfraction_bits, 1, 1,
463 vc1_bfraction_codes, 1, 1, 1);
464 init_vlc(&vc1_norm2_vlc, VC1_NORM2_VLC_BITS, 4,
465 vc1_norm2_bits, 1, 1,
466 vc1_norm2_codes, 1, 1, 1);
467 init_vlc(&vc1_norm6_vlc, VC1_NORM6_VLC_BITS, 64,
468 vc1_norm6_bits, 1, 1,
469 vc1_norm6_codes, 2, 2, 1);
470 init_vlc(&vc1_imode_vlc, VC1_IMODE_VLC_BITS, 7,
471 vc1_imode_bits, 1, 1,
472 vc1_imode_codes, 1, 1, 1);
475 init_vlc(&vc1_ttmb_vlc[i], VC1_TTMB_VLC_BITS, 16,
476 vc1_ttmb_bits[i], 1, 1,
477 vc1_ttmb_codes[i], 2, 2, 1);
478 init_vlc(&vc1_ttblk_vlc[i], VC1_TTBLK_VLC_BITS, 8,
479 vc1_ttblk_bits[i], 1, 1,
480 vc1_ttblk_codes[i], 1, 1, 1);
481 init_vlc(&vc1_subblkpat_vlc[i], VC1_SUBBLKPAT_VLC_BITS, 15,
482 vc1_subblkpat_bits[i], 1, 1,
483 vc1_subblkpat_codes[i], 1, 1, 1);
487 init_vlc(&vc1_4mv_block_pattern_vlc[i], VC1_4MV_BLOCK_PATTERN_VLC_BITS, 16,
488 vc1_4mv_block_pattern_bits[i], 1, 1,
489 vc1_4mv_block_pattern_codes[i], 1, 1, 1);
490 init_vlc(&vc1_cbpcy_p_vlc[i], VC1_CBPCY_P_VLC_BITS, 64,
491 vc1_cbpcy_p_bits[i], 1, 1,
492 vc1_cbpcy_p_codes[i], 2, 2, 1);
493 init_vlc(&vc1_mv_diff_vlc[i], VC1_MV_DIFF_VLC_BITS, 73,
494 vc1_mv_diff_bits[i], 1, 1,
495 vc1_mv_diff_codes[i], 2, 2, 1);
498 init_vlc(&vc1_ac_coeff_table[i], AC_VLC_BITS, vc1_ac_sizes[i],
499 &vc1_ac_tables[i][0][1], 8, 4,
500 &vc1_ac_tables[i][0][0], 8, 4, 1);
501 init_vlc(&ff_msmp4_mb_i_vlc, MB_INTRA_VLC_BITS, 64,
502 &ff_msmp4_mb_i_table[0][1], 4, 2,
503 &ff_msmp4_mb_i_table[0][0], 4, 2, 1);
508 v->mvrange = 0; /* 7.1.1.18, p80 */
513 /***********************************************************************/
515 * @defgroup bitplane VC9 Bitplane decoding
520 /** @addtogroup bitplane
533 /** @} */ //imode defines
535 /** Decode rows by checking if they are skipped
536 * @param plane Buffer to store decoded bits
537 * @param[in] width Width of this buffer
538 * @param[in] height Height of this buffer
539 * @param[in] stride of this buffer
541 static void decode_rowskip(uint8_t* plane, int width, int height, int stride, GetBitContext *gb){
544 for (y=0; y<height; y++){
545 if (!get_bits(gb, 1)) //rowskip
546 memset(plane, 0, width);
548 for (x=0; x<width; x++)
549 plane[x] = get_bits(gb, 1);
554 /** Decode columns by checking if they are skipped
555 * @param plane Buffer to store decoded bits
556 * @param[in] width Width of this buffer
557 * @param[in] height Height of this buffer
558 * @param[in] stride of this buffer
559 * @fixme FIXME: Optimize
561 static void decode_colskip(uint8_t* plane, int width, int height, int stride, GetBitContext *gb){
564 for (x=0; x<width; x++){
565 if (!get_bits(gb, 1)) //colskip
566 for (y=0; y<height; y++)
569 for (y=0; y<height; y++)
570 plane[y*stride] = get_bits(gb, 1);
575 /** Decode a bitplane's bits
576 * @param bp Bitplane where to store the decode bits
577 * @param v VC-1 context for bit reading and logging
579 * @fixme FIXME: Optimize
581 static int bitplane_decoding(uint8_t* data, int *raw_flag, VC1Context *v)
583 GetBitContext *gb = &v->s.gb;
585 int imode, x, y, code, offset;
586 uint8_t invert, *planep = data;
587 int width, height, stride;
589 width = v->s.mb_width;
590 height = v->s.mb_height;
591 stride = v->s.mb_stride;
592 invert = get_bits(gb, 1);
593 imode = get_vlc2(gb, vc1_imode_vlc.table, VC1_IMODE_VLC_BITS, 1);
599 //Data is actually read in the MB layer (same for all tests == "raw")
600 *raw_flag = 1; //invert ignored
604 if ((height * width) & 1)
606 *planep++ = get_bits(gb, 1);
610 // decode bitplane as one long line
611 for (y = offset; y < height * width; y += 2) {
612 code = get_vlc2(gb, vc1_norm2_vlc.table, VC1_NORM2_VLC_BITS, 1);
613 *planep++ = code & 1;
615 if(offset == width) {
617 planep += stride - width;
619 *planep++ = code >> 1;
621 if(offset == width) {
623 planep += stride - width;
629 if(!(height % 3) && (width % 3)) { // use 2x3 decoding
630 for(y = 0; y < height; y+= 3) {
631 for(x = width & 1; x < width; x += 2) {
632 code = get_vlc2(gb, vc1_norm6_vlc.table, VC1_NORM6_VLC_BITS, 2);
634 av_log(v->s.avctx, AV_LOG_DEBUG, "invalid NORM-6 VLC\n");
637 planep[x + 0] = (code >> 0) & 1;
638 planep[x + 1] = (code >> 1) & 1;
639 planep[x + 0 + stride] = (code >> 2) & 1;
640 planep[x + 1 + stride] = (code >> 3) & 1;
641 planep[x + 0 + stride * 2] = (code >> 4) & 1;
642 planep[x + 1 + stride * 2] = (code >> 5) & 1;
644 planep += stride * 3;
646 if(width & 1) decode_colskip(data, 1, height, stride, &v->s.gb);
648 planep += (height & 1) * stride;
649 for(y = height & 1; y < height; y += 2) {
650 for(x = width % 3; x < width; x += 3) {
651 code = get_vlc2(gb, vc1_norm6_vlc.table, VC1_NORM6_VLC_BITS, 2);
653 av_log(v->s.avctx, AV_LOG_DEBUG, "invalid NORM-6 VLC\n");
656 planep[x + 0] = (code >> 0) & 1;
657 planep[x + 1] = (code >> 1) & 1;
658 planep[x + 2] = (code >> 2) & 1;
659 planep[x + 0 + stride] = (code >> 3) & 1;
660 planep[x + 1 + stride] = (code >> 4) & 1;
661 planep[x + 2 + stride] = (code >> 5) & 1;
663 planep += stride * 2;
666 if(x) decode_colskip(data , x, height , stride, &v->s.gb);
667 if(height & 1) decode_rowskip(data+x, width - x, 1, stride, &v->s.gb);
671 decode_rowskip(data, width, height, stride, &v->s.gb);
674 decode_colskip(data, width, height, stride, &v->s.gb);
679 /* Applying diff operator */
680 if (imode == IMODE_DIFF2 || imode == IMODE_DIFF6)
684 for (x=1; x<width; x++)
685 planep[x] ^= planep[x-1];
686 for (y=1; y<height; y++)
689 planep[0] ^= planep[-stride];
690 for (x=1; x<width; x++)
692 if (planep[x-1] != planep[x-stride]) planep[x] ^= invert;
693 else planep[x] ^= planep[x-1];
700 for (x=0; x<stride*height; x++) planep[x] = !planep[x]; //FIXME stride
702 return (imode<<1) + invert;
705 /** @} */ //Bitplane group
707 /***********************************************************************/
708 /** VOP Dquant decoding
709 * @param v VC-1 Context
711 static int vop_dquant_decoding(VC1Context *v)
713 GetBitContext *gb = &v->s.gb;
719 pqdiff = get_bits(gb, 3);
720 if (pqdiff == 7) v->altpq = get_bits(gb, 5);
721 else v->altpq = v->pq + pqdiff + 1;
725 v->dquantfrm = get_bits(gb, 1);
728 v->dqprofile = get_bits(gb, 2);
729 switch (v->dqprofile)
731 case DQPROFILE_SINGLE_EDGE:
732 case DQPROFILE_DOUBLE_EDGES:
733 v->dqsbedge = get_bits(gb, 2);
735 case DQPROFILE_ALL_MBS:
736 v->dqbilevel = get_bits(gb, 1);
737 default: break; //Forbidden ?
739 if (v->dqbilevel || v->dqprofile != DQPROFILE_ALL_MBS)
741 pqdiff = get_bits(gb, 3);
742 if (pqdiff == 7) v->altpq = get_bits(gb, 5);
743 else v->altpq = v->pq + pqdiff + 1;
750 /** Put block onto picture
752 static void vc1_put_block(VC1Context *v, DCTELEM block[6][64])
756 DSPContext *dsp = &v->s.dsp;
760 for(k = 0; k < 6; k++)
761 for(j = 0; j < 8; j++)
762 for(i = 0; i < 8; i++)
763 block[k][i + j*8] = ((block[k][i + j*8] - 128) << 1) + 128;
766 ys = v->s.current_picture.linesize[0];
767 us = v->s.current_picture.linesize[1];
768 vs = v->s.current_picture.linesize[2];
771 dsp->put_pixels_clamped(block[0], Y, ys);
772 dsp->put_pixels_clamped(block[1], Y + 8, ys);
774 dsp->put_pixels_clamped(block[2], Y, ys);
775 dsp->put_pixels_clamped(block[3], Y + 8, ys);
777 if(!(v->s.flags & CODEC_FLAG_GRAY)) {
778 dsp->put_pixels_clamped(block[4], v->s.dest[1], us);
779 dsp->put_pixels_clamped(block[5], v->s.dest[2], vs);
783 /** Do motion compensation over 1 macroblock
784 * Mostly adapted hpel_motion and qpel_motion from mpegvideo.c
786 static void vc1_mc_1mv(VC1Context *v, int dir)
788 MpegEncContext *s = &v->s;
789 DSPContext *dsp = &v->s.dsp;
790 uint8_t *srcY, *srcU, *srcV;
791 int dxy, uvdxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
793 if(!v->s.last_picture.data[0])return;
795 mx = s->mv[dir][0][0];
796 my = s->mv[dir][0][1];
798 // store motion vectors for further use in B frames
799 if(s->pict_type == P_TYPE) {
800 s->current_picture.motion_val[1][s->block_index[0]][0] = mx;
801 s->current_picture.motion_val[1][s->block_index[0]][1] = my;
803 uvmx = (mx + ((mx & 3) == 3)) >> 1;
804 uvmy = (my + ((my & 3) == 3)) >> 1;
806 uvmx = uvmx + ((uvmx<0)?(uvmx&1):-(uvmx&1));
807 uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1));
810 srcY = s->last_picture.data[0];
811 srcU = s->last_picture.data[1];
812 srcV = s->last_picture.data[2];
814 srcY = s->next_picture.data[0];
815 srcU = s->next_picture.data[1];
816 srcV = s->next_picture.data[2];
819 src_x = s->mb_x * 16 + (mx >> 2);
820 src_y = s->mb_y * 16 + (my >> 2);
821 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
822 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
824 src_x = clip( src_x, -16, s->mb_width * 16);
825 src_y = clip( src_y, -16, s->mb_height * 16);
826 uvsrc_x = clip(uvsrc_x, -8, s->mb_width * 8);
827 uvsrc_y = clip(uvsrc_y, -8, s->mb_height * 8);
829 srcY += src_y * s->linesize + src_x;
830 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
831 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
833 /* for grayscale we should not try to read from unknown area */
834 if(s->flags & CODEC_FLAG_GRAY) {
835 srcU = s->edge_emu_buffer + 18 * s->linesize;
836 srcV = s->edge_emu_buffer + 18 * s->linesize;
839 if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
840 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel*3
841 || (unsigned)(src_y - s->mspel) > s->v_edge_pos - (my&3) - 16 - s->mspel*3){
842 uint8_t *uvbuf= s->edge_emu_buffer + 19 * s->linesize;
844 srcY -= s->mspel * (1 + s->linesize);
845 ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 17+s->mspel*2, 17+s->mspel*2,
846 src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);
847 srcY = s->edge_emu_buffer;
848 ff_emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8+1, 8+1,
849 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
850 ff_emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8+1, 8+1,
851 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
854 /* if we deal with range reduction we need to scale source blocks */
860 for(j = 0; j < 17 + s->mspel*2; j++) {
861 for(i = 0; i < 17 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128;
864 src = srcU; src2 = srcV;
865 for(j = 0; j < 9; j++) {
866 for(i = 0; i < 9; i++) {
867 src[i] = ((src[i] - 128) >> 1) + 128;
868 src2[i] = ((src2[i] - 128) >> 1) + 128;
870 src += s->uvlinesize;
871 src2 += s->uvlinesize;
874 /* if we deal with intensity compensation we need to scale source blocks */
875 if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
880 for(j = 0; j < 17 + s->mspel*2; j++) {
881 for(i = 0; i < 17 + s->mspel*2; i++) src[i] = v->luty[src[i]];
884 src = srcU; src2 = srcV;
885 for(j = 0; j < 9; j++) {
886 for(i = 0; i < 9; i++) {
887 src[i] = v->lutuv[src[i]];
888 src2[i] = v->lutuv[src2[i]];
890 src += s->uvlinesize;
891 src2 += s->uvlinesize;
894 srcY += s->mspel * (1 + s->linesize);
898 dxy = ((my & 3) << 2) | (mx & 3);
899 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] , srcY , s->linesize, v->rnd);
900 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8, srcY + 8, s->linesize, v->rnd);
901 srcY += s->linesize * 8;
902 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize , srcY , s->linesize, v->rnd);
903 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
904 } else { // hpel mc - always used for luma
905 dxy = (my & 2) | ((mx & 2) >> 1);
908 dsp->put_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
910 dsp->put_no_rnd_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
913 if(s->flags & CODEC_FLAG_GRAY) return;
914 /* Chroma MC always uses qpel bilinear */
915 uvdxy = ((uvmy & 3) << 2) | (uvmx & 3);
919 dsp->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
920 dsp->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
922 dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
923 dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
927 /** Do motion compensation for 4-MV macroblock - luminance block
929 static void vc1_mc_4mv_luma(VC1Context *v, int n)
931 MpegEncContext *s = &v->s;
932 DSPContext *dsp = &v->s.dsp;
934 int dxy, mx, my, src_x, src_y;
937 if(!v->s.last_picture.data[0])return;
940 srcY = s->last_picture.data[0];
942 off = s->linesize * 4 * (n&2) + (n&1) * 8;
944 src_x = s->mb_x * 16 + (n&1) * 8 + (mx >> 2);
945 src_y = s->mb_y * 16 + (n&2) * 4 + (my >> 2);
947 src_x = clip( src_x, -16, s->mb_width * 16);
948 src_y = clip( src_y, -16, s->mb_height * 16);
950 srcY += src_y * s->linesize + src_x;
952 if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
953 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 8 - s->mspel*2
954 || (unsigned)(src_y - s->mspel) > s->v_edge_pos - (my&3) - 8 - s->mspel*2){
955 srcY -= s->mspel * (1 + s->linesize);
956 ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 9+s->mspel*2, 9+s->mspel*2,
957 src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);
958 srcY = s->edge_emu_buffer;
959 /* if we deal with range reduction we need to scale source blocks */
965 for(j = 0; j < 9 + s->mspel*2; j++) {
966 for(i = 0; i < 9 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128;
970 /* if we deal with intensity compensation we need to scale source blocks */
971 if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
976 for(j = 0; j < 9 + s->mspel*2; j++) {
977 for(i = 0; i < 9 + s->mspel*2; i++) src[i] = v->luty[src[i]];
981 srcY += s->mspel * (1 + s->linesize);
985 dxy = ((my & 3) << 2) | (mx & 3);
986 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize, v->rnd);
987 } else { // hpel mc - always used for luma
988 dxy = (my & 2) | ((mx & 2) >> 1);
990 dsp->put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
992 dsp->put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
996 static inline int median4(int a, int b, int c, int d)
999 if(c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2;
1000 else return (FFMIN(b, c) + FFMAX(a, d)) / 2;
1002 if(c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2;
1003 else return (FFMIN(a, c) + FFMAX(b, d)) / 2;
1008 /** Do motion compensation for 4-MV macroblock - both chroma blocks
1010 static void vc1_mc_4mv_chroma(VC1Context *v)
1012 MpegEncContext *s = &v->s;
1013 DSPContext *dsp = &v->s.dsp;
1014 uint8_t *srcU, *srcV;
1015 int uvdxy, uvmx, uvmy, uvsrc_x, uvsrc_y;
1016 int i, idx, tx = 0, ty = 0;
1017 int mvx[4], mvy[4], intra[4];
1018 static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
1020 if(!v->s.last_picture.data[0])return;
1021 if(s->flags & CODEC_FLAG_GRAY) return;
1023 for(i = 0; i < 4; i++) {
1024 mvx[i] = s->mv[0][i][0];
1025 mvy[i] = s->mv[0][i][1];
1026 intra[i] = v->mb_type[0][s->block_index[i]];
1029 /* calculate chroma MV vector from four luma MVs */
1030 idx = (intra[3] << 3) | (intra[2] << 2) | (intra[1] << 1) | intra[0];
1031 if(!idx) { // all blocks are inter
1032 tx = median4(mvx[0], mvx[1], mvx[2], mvx[3]);
1033 ty = median4(mvy[0], mvy[1], mvy[2], mvy[3]);
1034 } else if(count[idx] == 1) { // 3 inter blocks
1037 tx = mid_pred(mvx[1], mvx[2], mvx[3]);
1038 ty = mid_pred(mvy[1], mvy[2], mvy[3]);
1041 tx = mid_pred(mvx[0], mvx[2], mvx[3]);
1042 ty = mid_pred(mvy[0], mvy[2], mvy[3]);
1045 tx = mid_pred(mvx[0], mvx[1], mvx[3]);
1046 ty = mid_pred(mvy[0], mvy[1], mvy[3]);
1049 tx = mid_pred(mvx[0], mvx[1], mvx[2]);
1050 ty = mid_pred(mvy[0], mvy[1], mvy[2]);
1053 } else if(count[idx] == 2) {
1055 for(i=0; i<3;i++) if(!intra[i]) {t1 = i; break;}
1056 for(i= t1+1; i<4; i++)if(!intra[i]) {t2 = i; break;}
1057 tx = (mvx[t1] + mvx[t2]) / 2;
1058 ty = (mvy[t1] + mvy[t2]) / 2;
1060 return; //no need to do MC for inter blocks
1062 s->current_picture.motion_val[1][s->block_index[0]][0] = tx;
1063 s->current_picture.motion_val[1][s->block_index[0]][1] = ty;
1064 uvmx = (tx + ((tx&3) == 3)) >> 1;
1065 uvmy = (ty + ((ty&3) == 3)) >> 1;
1067 uvmx = uvmx + ((uvmx<0)?(uvmx&1):-(uvmx&1));
1068 uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1));
1071 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
1072 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
1074 uvsrc_x = clip(uvsrc_x, -8, s->mb_width * 8);
1075 uvsrc_y = clip(uvsrc_y, -8, s->mb_height * 8);
1076 srcU = s->last_picture.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
1077 srcV = s->last_picture.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
1078 if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
1079 || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
1080 || (unsigned)uvsrc_y > (s->v_edge_pos >> 1) - 9){
1081 ff_emulated_edge_mc(s->edge_emu_buffer , srcU, s->uvlinesize, 8+1, 8+1,
1082 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
1083 ff_emulated_edge_mc(s->edge_emu_buffer + 16, srcV, s->uvlinesize, 8+1, 8+1,
1084 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
1085 srcU = s->edge_emu_buffer;
1086 srcV = s->edge_emu_buffer + 16;
1088 /* if we deal with range reduction we need to scale source blocks */
1089 if(v->rangeredfrm) {
1091 uint8_t *src, *src2;
1093 src = srcU; src2 = srcV;
1094 for(j = 0; j < 9; j++) {
1095 for(i = 0; i < 9; i++) {
1096 src[i] = ((src[i] - 128) >> 1) + 128;
1097 src2[i] = ((src2[i] - 128) >> 1) + 128;
1099 src += s->uvlinesize;
1100 src2 += s->uvlinesize;
1103 /* if we deal with intensity compensation we need to scale source blocks */
1104 if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1106 uint8_t *src, *src2;
1108 src = srcU; src2 = srcV;
1109 for(j = 0; j < 9; j++) {
1110 for(i = 0; i < 9; i++) {
1111 src[i] = v->lutuv[src[i]];
1112 src2[i] = v->lutuv[src2[i]];
1114 src += s->uvlinesize;
1115 src2 += s->uvlinesize;
1120 /* Chroma MC always uses qpel bilinear */
1121 uvdxy = ((uvmy & 3) << 2) | (uvmx & 3);
1125 dsp->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
1126 dsp->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
1128 dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
1129 dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
1133 static int decode_sequence_header_adv(VC1Context *v, GetBitContext *gb);
1136 * Decode Simple/Main Profiles sequence header
1137 * @see Figure 7-8, p16-17
1138 * @param avctx Codec context
1139 * @param gb GetBit context initialized from Codec context extra_data
1142 static int decode_sequence_header(AVCodecContext *avctx, GetBitContext *gb)
1144 VC1Context *v = avctx->priv_data;
1146 av_log(avctx, AV_LOG_DEBUG, "Header: %0X\n", show_bits(gb, 32));
1147 v->profile = get_bits(gb, 2);
1148 if (v->profile == 2)
1150 av_log(avctx, AV_LOG_ERROR, "Profile value 2 is forbidden (and WMV3 Complex Profile is unsupported)\n");
1154 if (v->profile == PROFILE_ADVANCED)
1156 return decode_sequence_header_adv(v, gb);
1160 v->res_sm = get_bits(gb, 2); //reserved
1163 av_log(avctx, AV_LOG_ERROR,
1164 "Reserved RES_SM=%i is forbidden\n", v->res_sm);
1170 v->frmrtq_postproc = get_bits(gb, 3); //common
1171 // (bitrate-32kbps)/64kbps
1172 v->bitrtq_postproc = get_bits(gb, 5); //common
1173 v->s.loop_filter = get_bits(gb, 1); //common
1174 if(v->s.loop_filter == 1 && v->profile == PROFILE_SIMPLE)
1176 av_log(avctx, AV_LOG_ERROR,
1177 "LOOPFILTER shell not be enabled in simple profile\n");
1180 v->res_x8 = get_bits(gb, 1); //reserved
1183 av_log(avctx, AV_LOG_ERROR,
1184 "1 for reserved RES_X8 is forbidden\n");
1187 v->multires = get_bits(gb, 1);
1188 v->res_fasttx = get_bits(gb, 1);
1191 av_log(avctx, AV_LOG_ERROR,
1192 "0 for reserved RES_FASTTX is forbidden\n");
1196 v->fastuvmc = get_bits(gb, 1); //common
1197 if (!v->profile && !v->fastuvmc)
1199 av_log(avctx, AV_LOG_ERROR,
1200 "FASTUVMC unavailable in Simple Profile\n");
1203 v->extended_mv = get_bits(gb, 1); //common
1204 if (!v->profile && v->extended_mv)
1206 av_log(avctx, AV_LOG_ERROR,
1207 "Extended MVs unavailable in Simple Profile\n");
1210 v->dquant = get_bits(gb, 2); //common
1211 v->vstransform = get_bits(gb, 1); //common
1213 v->res_transtab = get_bits(gb, 1);
1214 if (v->res_transtab)
1216 av_log(avctx, AV_LOG_ERROR,
1217 "1 for reserved RES_TRANSTAB is forbidden\n");
1221 v->overlap = get_bits(gb, 1); //common
1223 v->s.resync_marker = get_bits(gb, 1);
1224 v->rangered = get_bits(gb, 1);
1225 if (v->rangered && v->profile == PROFILE_SIMPLE)
1227 av_log(avctx, AV_LOG_INFO,
1228 "RANGERED should be set to 0 in simple profile\n");
1231 v->s.max_b_frames = avctx->max_b_frames = get_bits(gb, 3); //common
1232 v->quantizer_mode = get_bits(gb, 2); //common
1234 v->finterpflag = get_bits(gb, 1); //common
1235 v->res_rtm_flag = get_bits(gb, 1); //reserved
1236 if (!v->res_rtm_flag)
1238 // av_log(avctx, AV_LOG_ERROR,
1239 // "0 for reserved RES_RTM_FLAG is forbidden\n");
1240 av_log(avctx, AV_LOG_ERROR,
1241 "Old WMV3 version detected, only I-frames will be decoded\n");
1244 av_log(avctx, AV_LOG_DEBUG,
1245 "Profile %i:\nfrmrtq_postproc=%i, bitrtq_postproc=%i\n"
1246 "LoopFilter=%i, MultiRes=%i, FastUVMC=%i, Extended MV=%i\n"
1247 "Rangered=%i, VSTransform=%i, Overlap=%i, SyncMarker=%i\n"
1248 "DQuant=%i, Quantizer mode=%i, Max B frames=%i\n",
1249 v->profile, v->frmrtq_postproc, v->bitrtq_postproc,
1250 v->s.loop_filter, v->multires, v->fastuvmc, v->extended_mv,
1251 v->rangered, v->vstransform, v->overlap, v->s.resync_marker,
1252 v->dquant, v->quantizer_mode, avctx->max_b_frames
1257 static int decode_sequence_header_adv(VC1Context *v, GetBitContext *gb)
1259 v->res_rtm_flag = 1;
1260 v->level = get_bits(gb, 3);
1263 av_log(v->s.avctx, AV_LOG_ERROR, "Reserved LEVEL %i\n",v->level);
1265 v->chromaformat = get_bits(gb, 2);
1266 if (v->chromaformat != 1)
1268 av_log(v->s.avctx, AV_LOG_ERROR,
1269 "Only 4:2:0 chroma format supported\n");
1274 v->frmrtq_postproc = get_bits(gb, 3); //common
1275 // (bitrate-32kbps)/64kbps
1276 v->bitrtq_postproc = get_bits(gb, 5); //common
1277 v->postprocflag = get_bits(gb, 1); //common
1279 v->s.avctx->coded_width = (get_bits(gb, 12) + 1) << 1;
1280 v->s.avctx->coded_height = (get_bits(gb, 12) + 1) << 1;
1281 v->s.avctx->width = v->s.avctx->coded_width;
1282 v->s.avctx->height = v->s.avctx->coded_height;
1283 v->broadcast = get_bits1(gb);
1284 v->interlace = get_bits1(gb);
1285 v->tfcntrflag = get_bits1(gb);
1286 v->finterpflag = get_bits1(gb);
1287 get_bits1(gb); // reserved
1289 av_log(v->s.avctx, AV_LOG_DEBUG,
1290 "Advanced Profile level %i:\nfrmrtq_postproc=%i, bitrtq_postproc=%i\n"
1291 "LoopFilter=%i, ChromaFormat=%i, Pulldown=%i, Interlace: %i\n"
1292 "TFCTRflag=%i, FINTERPflag=%i\n",
1293 v->level, v->frmrtq_postproc, v->bitrtq_postproc,
1294 v->s.loop_filter, v->chromaformat, v->broadcast, v->interlace,
1295 v->tfcntrflag, v->finterpflag
1298 v->psf = get_bits1(gb);
1299 if(v->psf) { //PsF, 6.1.13
1300 av_log(v->s.avctx, AV_LOG_ERROR, "Progressive Segmented Frame mode: not supported (yet)\n");
1303 v->s.max_b_frames = v->s.avctx->max_b_frames = 7;
1304 if(get_bits1(gb)) { //Display Info - decoding is not affected by it
1306 av_log(v->s.avctx, AV_LOG_DEBUG, "Display extended info:\n");
1307 v->s.avctx->width = v->s.width = w = get_bits(gb, 14) + 1;
1308 v->s.avctx->height = v->s.height = h = get_bits(gb, 14) + 1;
1309 av_log(v->s.avctx, AV_LOG_DEBUG, "Display dimensions: %ix%i\n", w, h);
1311 ar = get_bits(gb, 4);
1313 v->s.avctx->sample_aspect_ratio = vc1_pixel_aspect[ar];
1315 w = get_bits(gb, 8);
1316 h = get_bits(gb, 8);
1317 v->s.avctx->sample_aspect_ratio = (AVRational){w, h};
1320 if(get_bits1(gb)){ //framerate stuff
1322 v->s.avctx->time_base.num = 32;
1323 v->s.avctx->time_base.den = get_bits(gb, 16) + 1;
1326 nr = get_bits(gb, 8);
1327 dr = get_bits(gb, 4);
1328 if(nr && nr < 8 && dr && dr < 3){
1329 v->s.avctx->time_base.num = fps_dr[dr - 1];
1330 v->s.avctx->time_base.den = fps_nr[nr - 1] * 1000;
1336 v->color_prim = get_bits(gb, 8);
1337 v->transfer_char = get_bits(gb, 8);
1338 v->matrix_coef = get_bits(gb, 8);
1342 v->hrd_param_flag = get_bits1(gb);
1343 if(v->hrd_param_flag) {
1345 v->hrd_num_leaky_buckets = get_bits(gb, 5);
1346 get_bits(gb, 4); //bitrate exponent
1347 get_bits(gb, 4); //buffer size exponent
1348 for(i = 0; i < v->hrd_num_leaky_buckets; i++) {
1349 get_bits(gb, 16); //hrd_rate[n]
1350 get_bits(gb, 16); //hrd_buffer[n]
1356 static int decode_entry_point(AVCodecContext *avctx, GetBitContext *gb)
1358 VC1Context *v = avctx->priv_data;
1359 int i, blink, clentry, refdist;
1361 av_log(avctx, AV_LOG_DEBUG, "Entry point: %08X\n", show_bits_long(gb, 32));
1362 blink = get_bits1(gb); // broken link
1363 clentry = get_bits1(gb); // closed entry
1364 v->panscanflag = get_bits1(gb);
1365 refdist = get_bits1(gb); // refdist flag
1366 v->s.loop_filter = get_bits1(gb);
1367 v->fastuvmc = get_bits1(gb);
1368 v->extended_mv = get_bits1(gb);
1369 v->dquant = get_bits(gb, 2);
1370 v->vstransform = get_bits1(gb);
1371 v->overlap = get_bits1(gb);
1372 v->quantizer_mode = get_bits(gb, 2);
1374 if(v->hrd_param_flag){
1375 for(i = 0; i < v->hrd_num_leaky_buckets; i++) {
1376 get_bits(gb, 8); //hrd_full[n]
1381 avctx->coded_width = (get_bits(gb, 12)+1)<<1;
1382 avctx->coded_height = (get_bits(gb, 12)+1)<<1;
1385 v->extended_dmv = get_bits1(gb);
1387 av_log(avctx, AV_LOG_ERROR, "Luma scaling is not supported, expect wrong picture\n");
1388 skip_bits(gb, 3); // Y range, ignored for now
1391 av_log(avctx, AV_LOG_ERROR, "Chroma scaling is not supported, expect wrong picture\n");
1392 skip_bits(gb, 3); // UV range, ignored for now
1395 av_log(avctx, AV_LOG_DEBUG, "Entry point info:\n"
1396 "BrokenLink=%i, ClosedEntry=%i, PanscanFlag=%i\n"
1397 "RefDist=%i, Postproc=%i, FastUVMC=%i, ExtMV=%i\n"
1398 "DQuant=%i, VSTransform=%i, Overlap=%i, Qmode=%i\n",
1399 blink, clentry, v->panscanflag, refdist, v->s.loop_filter,
1400 v->fastuvmc, v->extended_mv, v->dquant, v->vstransform, v->overlap, v->quantizer_mode);
1405 static int vc1_parse_frame_header(VC1Context *v, GetBitContext* gb)
1407 int pqindex, lowquant, status;
1409 if(v->finterpflag) v->interpfrm = get_bits(gb, 1);
1410 skip_bits(gb, 2); //framecnt unused
1412 if (v->rangered) v->rangeredfrm = get_bits(gb, 1);
1413 v->s.pict_type = get_bits(gb, 1);
1414 if (v->s.avctx->max_b_frames) {
1415 if (!v->s.pict_type) {
1416 if (get_bits(gb, 1)) v->s.pict_type = I_TYPE;
1417 else v->s.pict_type = B_TYPE;
1418 } else v->s.pict_type = P_TYPE;
1419 } else v->s.pict_type = v->s.pict_type ? P_TYPE : I_TYPE;
1422 if(v->s.pict_type == B_TYPE) {
1423 v->bfraction = get_vlc2(gb, vc1_bfraction_vlc.table, VC1_BFRACTION_VLC_BITS, 1);
1424 v->bfraction = vc1_bfraction_lut[v->bfraction];
1425 if(v->bfraction == 0) {
1426 v->s.pict_type = BI_TYPE;
1429 if(v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
1430 get_bits(gb, 7); // skip buffer fullness
1433 if(v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
1435 if(v->s.pict_type == P_TYPE)
1438 /* Quantizer stuff */
1439 pqindex = get_bits(gb, 5);
1440 if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
1441 v->pq = pquant_table[0][pqindex];
1443 v->pq = pquant_table[1][pqindex];
1446 if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
1447 v->pquantizer = pqindex < 9;
1448 if (v->quantizer_mode == QUANT_NON_UNIFORM)
1450 v->pqindex = pqindex;
1451 if (pqindex < 9) v->halfpq = get_bits(gb, 1);
1453 if (v->quantizer_mode == QUANT_FRAME_EXPLICIT)
1454 v->pquantizer = get_bits(gb, 1);
1456 if (v->extended_mv == 1) v->mvrange = get_prefix(gb, 0, 3);
1457 v->k_x = v->mvrange + 9 + (v->mvrange >> 1); //k_x can be 9 10 12 13
1458 v->k_y = v->mvrange + 8; //k_y can be 8 9 10 11
1459 v->range_x = 1 << (v->k_x - 1);
1460 v->range_y = 1 << (v->k_y - 1);
1461 if (v->profile == PROFILE_ADVANCED)
1463 if (v->postprocflag) v->postproc = get_bits(gb, 1);
1466 if (v->multires && v->s.pict_type != B_TYPE) v->respic = get_bits(gb, 2);
1468 //av_log(v->s.avctx, AV_LOG_INFO, "%c Frame: QP=[%i]%i (+%i/2) %i\n",
1469 // (v->s.pict_type == P_TYPE) ? 'P' : ((v->s.pict_type == I_TYPE) ? 'I' : 'B'), pqindex, v->pq, v->halfpq, v->rangeredfrm);
1471 if(v->s.pict_type == I_TYPE || v->s.pict_type == P_TYPE) v->use_ic = 0;
1473 switch(v->s.pict_type) {
1475 if (v->pq < 5) v->tt_index = 0;
1476 else if(v->pq < 13) v->tt_index = 1;
1477 else v->tt_index = 2;
1479 lowquant = (v->pq > 12) ? 0 : 1;
1480 v->mv_mode = mv_pmode_table[lowquant][get_prefix(gb, 1, 4)];
1481 if (v->mv_mode == MV_PMODE_INTENSITY_COMP)
1483 int scale, shift, i;
1484 v->mv_mode2 = mv_pmode_table2[lowquant][get_prefix(gb, 1, 3)];
1485 v->lumscale = get_bits(gb, 6);
1486 v->lumshift = get_bits(gb, 6);
1488 /* fill lookup tables for intensity compensation */
1491 shift = (255 - v->lumshift * 2) << 6;
1492 if(v->lumshift > 31)
1495 scale = v->lumscale + 32;
1496 if(v->lumshift > 31)
1497 shift = (v->lumshift - 64) << 6;
1499 shift = v->lumshift << 6;
1501 for(i = 0; i < 256; i++) {
1502 v->luty[i] = clip_uint8((scale * i + shift + 32) >> 6);
1503 v->lutuv[i] = clip_uint8((scale * (i - 128) + 128*64 + 32) >> 6);
1506 if(v->mv_mode == MV_PMODE_1MV_HPEL || v->mv_mode == MV_PMODE_1MV_HPEL_BILIN)
1507 v->s.quarter_sample = 0;
1508 else if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1509 if(v->mv_mode2 == MV_PMODE_1MV_HPEL || v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN)
1510 v->s.quarter_sample = 0;
1512 v->s.quarter_sample = 1;
1514 v->s.quarter_sample = 1;
1515 v->s.mspel = !(v->mv_mode == MV_PMODE_1MV_HPEL_BILIN || (v->mv_mode == MV_PMODE_INTENSITY_COMP && v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN));
1517 if ((v->mv_mode == MV_PMODE_INTENSITY_COMP &&
1518 v->mv_mode2 == MV_PMODE_MIXED_MV)
1519 || v->mv_mode == MV_PMODE_MIXED_MV)
1521 status = bitplane_decoding(v->mv_type_mb_plane, &v->mv_type_is_raw, v);
1522 if (status < 0) return -1;
1523 av_log(v->s.avctx, AV_LOG_DEBUG, "MB MV Type plane encoding: "
1524 "Imode: %i, Invert: %i\n", status>>1, status&1);
1526 v->mv_type_is_raw = 0;
1527 memset(v->mv_type_mb_plane, 0, v->s.mb_stride * v->s.mb_height);
1529 status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
1530 if (status < 0) return -1;
1531 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
1532 "Imode: %i, Invert: %i\n", status>>1, status&1);
1534 /* Hopefully this is correct for P frames */
1535 v->s.mv_table_index = get_bits(gb, 2); //but using vc1_ tables
1536 v->cbpcy_vlc = &vc1_cbpcy_p_vlc[get_bits(gb, 2)];
1540 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1541 vop_dquant_decoding(v);
1544 v->ttfrm = 0; //FIXME Is that so ?
1547 v->ttmbf = get_bits(gb, 1);
1550 v->ttfrm = ttfrm_to_tt[get_bits(gb, 2)];
1558 if (v->pq < 5) v->tt_index = 0;
1559 else if(v->pq < 13) v->tt_index = 1;
1560 else v->tt_index = 2;
1562 lowquant = (v->pq > 12) ? 0 : 1;
1563 v->mv_mode = get_bits1(gb) ? MV_PMODE_1MV : MV_PMODE_1MV_HPEL_BILIN;
1564 v->s.quarter_sample = (v->mv_mode == MV_PMODE_1MV);
1565 v->s.mspel = v->s.quarter_sample;
1567 status = bitplane_decoding(v->direct_mb_plane, &v->dmb_is_raw, v);
1568 if (status < 0) return -1;
1569 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Direct Type plane encoding: "
1570 "Imode: %i, Invert: %i\n", status>>1, status&1);
1571 status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
1572 if (status < 0) return -1;
1573 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
1574 "Imode: %i, Invert: %i\n", status>>1, status&1);
1576 v->s.mv_table_index = get_bits(gb, 2);
1577 v->cbpcy_vlc = &vc1_cbpcy_p_vlc[get_bits(gb, 2)];
1581 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1582 vop_dquant_decoding(v);
1588 v->ttmbf = get_bits(gb, 1);
1591 v->ttfrm = ttfrm_to_tt[get_bits(gb, 2)];
1601 v->c_ac_table_index = decode012(gb);
1602 if (v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
1604 v->y_ac_table_index = decode012(gb);
1607 v->s.dc_table_index = get_bits(gb, 1);
1609 if(v->s.pict_type == BI_TYPE) {
1610 v->s.pict_type = B_TYPE;
1616 static int vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
1618 int pqindex, lowquant;
1621 v->p_frame_skipped = 0;
1624 v->fcm = decode012(gb);
1625 if(v->fcm) return -1; // interlaced frames/fields are not implemented
1627 switch(get_prefix(gb, 0, 4)) {
1629 v->s.pict_type = P_TYPE;
1632 v->s.pict_type = B_TYPE;
1635 v->s.pict_type = I_TYPE;
1638 v->s.pict_type = BI_TYPE;
1641 v->s.pict_type = P_TYPE; // skipped pic
1642 v->p_frame_skipped = 1;
1648 if(!v->interlace || v->psf) {
1649 v->rptfrm = get_bits(gb, 2);
1651 v->tff = get_bits1(gb);
1652 v->rptfrm = get_bits1(gb);
1655 if(v->panscanflag) {
1658 v->rnd = get_bits1(gb);
1660 v->uvsamp = get_bits1(gb);
1661 if(v->finterpflag) v->interpfrm = get_bits(gb, 1);
1662 if(v->s.pict_type == B_TYPE) {
1663 v->bfraction = get_vlc2(gb, vc1_bfraction_vlc.table, VC1_BFRACTION_VLC_BITS, 1);
1664 v->bfraction = vc1_bfraction_lut[v->bfraction];
1665 if(v->bfraction == 0) {
1666 v->s.pict_type = BI_TYPE; /* XXX: should not happen here */
1669 pqindex = get_bits(gb, 5);
1670 v->pqindex = pqindex;
1671 if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
1672 v->pq = pquant_table[0][pqindex];
1674 v->pq = pquant_table[1][pqindex];
1677 if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
1678 v->pquantizer = pqindex < 9;
1679 if (v->quantizer_mode == QUANT_NON_UNIFORM)
1681 v->pqindex = pqindex;
1682 if (pqindex < 9) v->halfpq = get_bits(gb, 1);
1684 if (v->quantizer_mode == QUANT_FRAME_EXPLICIT)
1685 v->pquantizer = get_bits(gb, 1);
1687 switch(v->s.pict_type) {
1690 status = bitplane_decoding(v->acpred_plane, &v->acpred_is_raw, v);
1691 if (status < 0) return -1;
1692 av_log(v->s.avctx, AV_LOG_DEBUG, "ACPRED plane encoding: "
1693 "Imode: %i, Invert: %i\n", status>>1, status&1);
1694 v->condover = CONDOVER_NONE;
1695 if(v->overlap && v->pq <= 8) {
1696 v->condover = decode012(gb);
1697 if(v->condover == CONDOVER_SELECT) {
1698 status = bitplane_decoding(v->over_flags_plane, &v->overflg_is_raw, v);
1699 if (status < 0) return -1;
1700 av_log(v->s.avctx, AV_LOG_DEBUG, "CONDOVER plane encoding: "
1701 "Imode: %i, Invert: %i\n", status>>1, status&1);
1707 v->postproc = get_bits1(gb);
1708 if (v->extended_mv) v->mvrange = get_prefix(gb, 0, 3);
1709 else v->mvrange = 0;
1710 v->k_x = v->mvrange + 9 + (v->mvrange >> 1); //k_x can be 9 10 12 13
1711 v->k_y = v->mvrange + 8; //k_y can be 8 9 10 11
1712 v->range_x = 1 << (v->k_x - 1);
1713 v->range_y = 1 << (v->k_y - 1);
1715 if (v->pq < 5) v->tt_index = 0;
1716 else if(v->pq < 13) v->tt_index = 1;
1717 else v->tt_index = 2;
1719 lowquant = (v->pq > 12) ? 0 : 1;
1720 v->mv_mode = mv_pmode_table[lowquant][get_prefix(gb, 1, 4)];
1721 if (v->mv_mode == MV_PMODE_INTENSITY_COMP)
1723 int scale, shift, i;
1724 v->mv_mode2 = mv_pmode_table2[lowquant][get_prefix(gb, 1, 3)];
1725 v->lumscale = get_bits(gb, 6);
1726 v->lumshift = get_bits(gb, 6);
1727 /* fill lookup tables for intensity compensation */
1730 shift = (255 - v->lumshift * 2) << 6;
1731 if(v->lumshift > 31)
1734 scale = v->lumscale + 32;
1735 if(v->lumshift > 31)
1736 shift = (v->lumshift - 64) << 6;
1738 shift = v->lumshift << 6;
1740 for(i = 0; i < 256; i++) {
1741 v->luty[i] = clip_uint8((scale * i + shift + 32) >> 6);
1742 v->lutuv[i] = clip_uint8((scale * (i - 128) + 128*64 + 32) >> 6);
1745 if(v->mv_mode == MV_PMODE_1MV_HPEL || v->mv_mode == MV_PMODE_1MV_HPEL_BILIN)
1746 v->s.quarter_sample = 0;
1747 else if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1748 if(v->mv_mode2 == MV_PMODE_1MV_HPEL || v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN)
1749 v->s.quarter_sample = 0;
1751 v->s.quarter_sample = 1;
1753 v->s.quarter_sample = 1;
1754 v->s.mspel = !(v->mv_mode == MV_PMODE_1MV_HPEL_BILIN || (v->mv_mode == MV_PMODE_INTENSITY_COMP && v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN));
1756 if ((v->mv_mode == MV_PMODE_INTENSITY_COMP &&
1757 v->mv_mode2 == MV_PMODE_MIXED_MV)
1758 || v->mv_mode == MV_PMODE_MIXED_MV)
1760 status = bitplane_decoding(v->mv_type_mb_plane, &v->mv_type_is_raw, v);
1761 if (status < 0) return -1;
1762 av_log(v->s.avctx, AV_LOG_DEBUG, "MB MV Type plane encoding: "
1763 "Imode: %i, Invert: %i\n", status>>1, status&1);
1765 v->mv_type_is_raw = 0;
1766 memset(v->mv_type_mb_plane, 0, v->s.mb_stride * v->s.mb_height);
1768 status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
1769 if (status < 0) return -1;
1770 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
1771 "Imode: %i, Invert: %i\n", status>>1, status&1);
1773 /* Hopefully this is correct for P frames */
1774 v->s.mv_table_index = get_bits(gb, 2); //but using vc1_ tables
1775 v->cbpcy_vlc = &vc1_cbpcy_p_vlc[get_bits(gb, 2)];
1778 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1779 vop_dquant_decoding(v);
1782 v->ttfrm = 0; //FIXME Is that so ?
1785 v->ttmbf = get_bits(gb, 1);
1788 v->ttfrm = ttfrm_to_tt[get_bits(gb, 2)];
1797 v->postproc = get_bits1(gb);
1798 if (v->extended_mv) v->mvrange = get_prefix(gb, 0, 3);
1799 else v->mvrange = 0;
1800 v->k_x = v->mvrange + 9 + (v->mvrange >> 1); //k_x can be 9 10 12 13
1801 v->k_y = v->mvrange + 8; //k_y can be 8 9 10 11
1802 v->range_x = 1 << (v->k_x - 1);
1803 v->range_y = 1 << (v->k_y - 1);
1805 if (v->pq < 5) v->tt_index = 0;
1806 else if(v->pq < 13) v->tt_index = 1;
1807 else v->tt_index = 2;
1809 lowquant = (v->pq > 12) ? 0 : 1;
1810 v->mv_mode = get_bits1(gb) ? MV_PMODE_1MV : MV_PMODE_1MV_HPEL_BILIN;
1811 v->s.quarter_sample = (v->mv_mode == MV_PMODE_1MV);
1812 v->s.mspel = v->s.quarter_sample;
1814 status = bitplane_decoding(v->direct_mb_plane, &v->dmb_is_raw, v);
1815 if (status < 0) return -1;
1816 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Direct Type plane encoding: "
1817 "Imode: %i, Invert: %i\n", status>>1, status&1);
1818 status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
1819 if (status < 0) return -1;
1820 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
1821 "Imode: %i, Invert: %i\n", status>>1, status&1);
1823 v->s.mv_table_index = get_bits(gb, 2);
1824 v->cbpcy_vlc = &vc1_cbpcy_p_vlc[get_bits(gb, 2)];
1828 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1829 vop_dquant_decoding(v);
1835 v->ttmbf = get_bits(gb, 1);
1838 v->ttfrm = ttfrm_to_tt[get_bits(gb, 2)];
1848 v->c_ac_table_index = decode012(gb);
1849 if (v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
1851 v->y_ac_table_index = decode012(gb);
1854 v->s.dc_table_index = get_bits(gb, 1);
1855 if ((v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE) && v->dquant) {
1856 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1857 vop_dquant_decoding(v);
1861 if(v->s.pict_type == BI_TYPE) {
1862 v->s.pict_type = B_TYPE;
1868 /***********************************************************************/
1870 * @defgroup block VC-1 Block-level functions
1871 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
1877 * @brief Get macroblock-level quantizer scale
1879 #define GET_MQUANT() \
1883 if (v->dqprofile == DQPROFILE_ALL_MBS) \
1887 mquant = (get_bits(gb, 1)) ? v->altpq : v->pq; \
1891 mqdiff = get_bits(gb, 3); \
1892 if (mqdiff != 7) mquant = v->pq + mqdiff; \
1893 else mquant = get_bits(gb, 5); \
1896 if(v->dqprofile == DQPROFILE_SINGLE_EDGE) \
1897 edges = 1 << v->dqsbedge; \
1898 else if(v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
1899 edges = (3 << v->dqsbedge) % 15; \
1900 else if(v->dqprofile == DQPROFILE_FOUR_EDGES) \
1902 if((edges&1) && !s->mb_x) \
1903 mquant = v->altpq; \
1904 if((edges&2) && s->first_slice_line) \
1905 mquant = v->altpq; \
1906 if((edges&4) && s->mb_x == (s->mb_width - 1)) \
1907 mquant = v->altpq; \
1908 if((edges&8) && s->mb_y == (s->mb_height - 1)) \
1909 mquant = v->altpq; \
1913 * @def GET_MVDATA(_dmv_x, _dmv_y)
1914 * @brief Get MV differentials
1915 * @see MVDATA decoding from 8.3.5.2, p(1)20
1916 * @param _dmv_x Horizontal differential for decoded MV
1917 * @param _dmv_y Vertical differential for decoded MV
1919 #define GET_MVDATA(_dmv_x, _dmv_y) \
1920 index = 1 + get_vlc2(gb, vc1_mv_diff_vlc[s->mv_table_index].table,\
1921 VC1_MV_DIFF_VLC_BITS, 2); \
1924 mb_has_coeffs = 1; \
1927 else mb_has_coeffs = 0; \
1929 if (!index) { _dmv_x = _dmv_y = 0; } \
1930 else if (index == 35) \
1932 _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
1933 _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
1935 else if (index == 36) \
1944 if (!s->quarter_sample && index1 == 5) val = 1; \
1946 if(size_table[index1] - val > 0) \
1947 val = get_bits(gb, size_table[index1] - val); \
1949 sign = 0 - (val&1); \
1950 _dmv_x = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1953 if (!s->quarter_sample && index1 == 5) val = 1; \
1955 if(size_table[index1] - val > 0) \
1956 val = get_bits(gb, size_table[index1] - val); \
1958 sign = 0 - (val&1); \
1959 _dmv_y = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1962 /** Predict and set motion vector
1964 static inline void vc1_pred_mv(MpegEncContext *s, int n, int dmv_x, int dmv_y, int mv1, int r_x, int r_y, uint8_t* is_intra)
1966 int xy, wrap, off = 0;
1971 /* scale MV difference to be quad-pel */
1972 dmv_x <<= 1 - s->quarter_sample;
1973 dmv_y <<= 1 - s->quarter_sample;
1975 wrap = s->b8_stride;
1976 xy = s->block_index[n];
1979 s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = 0;
1980 s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = 0;
1981 if(mv1) { /* duplicate motion data for 1-MV block */
1982 s->current_picture.motion_val[0][xy + 1][0] = 0;
1983 s->current_picture.motion_val[0][xy + 1][1] = 0;
1984 s->current_picture.motion_val[0][xy + wrap][0] = 0;
1985 s->current_picture.motion_val[0][xy + wrap][1] = 0;
1986 s->current_picture.motion_val[0][xy + wrap + 1][0] = 0;
1987 s->current_picture.motion_val[0][xy + wrap + 1][1] = 0;
1992 C = s->current_picture.motion_val[0][xy - 1];
1993 A = s->current_picture.motion_val[0][xy - wrap];
1995 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2;
1997 //in 4-MV mode different blocks have different B predictor position
2000 off = (s->mb_x > 0) ? -1 : 1;
2003 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 1;
2012 B = s->current_picture.motion_val[0][xy - wrap + off];
2014 if(!s->first_slice_line || (n==2 || n==3)) { // predictor A is not out of bounds
2015 if(s->mb_width == 1) {
2019 px = mid_pred(A[0], B[0], C[0]);
2020 py = mid_pred(A[1], B[1], C[1]);
2022 } else if(s->mb_x || (n==1 || n==3)) { // predictor C is not out of bounds
2028 /* Pullback MV as specified in 8.3.5.3.4 */
2031 qx = (s->mb_x << 6) + ((n==1 || n==3) ? 32 : 0);
2032 qy = (s->mb_y << 6) + ((n==2 || n==3) ? 32 : 0);
2033 X = (s->mb_width << 6) - 4;
2034 Y = (s->mb_height << 6) - 4;
2036 if(qx + px < -60) px = -60 - qx;
2037 if(qy + py < -60) py = -60 - qy;
2039 if(qx + px < -28) px = -28 - qx;
2040 if(qy + py < -28) py = -28 - qy;
2042 if(qx + px > X) px = X - qx;
2043 if(qy + py > Y) py = Y - qy;
2045 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2046 if((!s->first_slice_line || (n==2 || n==3)) && (s->mb_x || (n==1 || n==3))) {
2047 if(is_intra[xy - wrap])
2048 sum = FFABS(px) + FFABS(py);
2050 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2052 if(get_bits1(&s->gb)) {
2060 if(is_intra[xy - 1])
2061 sum = FFABS(px) + FFABS(py);
2063 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2065 if(get_bits1(&s->gb)) {
2075 /* store MV using signed modulus of MV range defined in 4.11 */
2076 s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
2077 s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
2078 if(mv1) { /* duplicate motion data for 1-MV block */
2079 s->current_picture.motion_val[0][xy + 1][0] = s->current_picture.motion_val[0][xy][0];
2080 s->current_picture.motion_val[0][xy + 1][1] = s->current_picture.motion_val[0][xy][1];
2081 s->current_picture.motion_val[0][xy + wrap][0] = s->current_picture.motion_val[0][xy][0];
2082 s->current_picture.motion_val[0][xy + wrap][1] = s->current_picture.motion_val[0][xy][1];
2083 s->current_picture.motion_val[0][xy + wrap + 1][0] = s->current_picture.motion_val[0][xy][0];
2084 s->current_picture.motion_val[0][xy + wrap + 1][1] = s->current_picture.motion_val[0][xy][1];
2088 /** Motion compensation for direct or interpolated blocks in B-frames
2090 static void vc1_interp_mc(VC1Context *v)
2092 MpegEncContext *s = &v->s;
2093 DSPContext *dsp = &v->s.dsp;
2094 uint8_t *srcY, *srcU, *srcV;
2095 int dxy, uvdxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
2097 if(!v->s.next_picture.data[0])return;
2099 mx = s->mv[1][0][0];
2100 my = s->mv[1][0][1];
2101 uvmx = (mx + ((mx & 3) == 3)) >> 1;
2102 uvmy = (my + ((my & 3) == 3)) >> 1;
2104 uvmx = uvmx + ((uvmx<0)?-(uvmx&1):(uvmx&1));
2105 uvmy = uvmy + ((uvmy<0)?-(uvmy&1):(uvmy&1));
2107 srcY = s->next_picture.data[0];
2108 srcU = s->next_picture.data[1];
2109 srcV = s->next_picture.data[2];
2111 src_x = s->mb_x * 16 + (mx >> 2);
2112 src_y = s->mb_y * 16 + (my >> 2);
2113 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
2114 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
2116 src_x = clip( src_x, -16, s->mb_width * 16);
2117 src_y = clip( src_y, -16, s->mb_height * 16);
2118 uvsrc_x = clip(uvsrc_x, -8, s->mb_width * 8);
2119 uvsrc_y = clip(uvsrc_y, -8, s->mb_height * 8);
2121 srcY += src_y * s->linesize + src_x;
2122 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
2123 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
2125 /* for grayscale we should not try to read from unknown area */
2126 if(s->flags & CODEC_FLAG_GRAY) {
2127 srcU = s->edge_emu_buffer + 18 * s->linesize;
2128 srcV = s->edge_emu_buffer + 18 * s->linesize;
2132 || (unsigned)src_x > s->h_edge_pos - (mx&3) - 16
2133 || (unsigned)src_y > s->v_edge_pos - (my&3) - 16){
2134 uint8_t *uvbuf= s->edge_emu_buffer + 19 * s->linesize;
2136 srcY -= s->mspel * (1 + s->linesize);
2137 ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 17+s->mspel*2, 17+s->mspel*2,
2138 src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);
2139 srcY = s->edge_emu_buffer;
2140 ff_emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8+1, 8+1,
2141 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
2142 ff_emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8+1, 8+1,
2143 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
2146 /* if we deal with range reduction we need to scale source blocks */
2147 if(v->rangeredfrm) {
2149 uint8_t *src, *src2;
2152 for(j = 0; j < 17 + s->mspel*2; j++) {
2153 for(i = 0; i < 17 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128;
2156 src = srcU; src2 = srcV;
2157 for(j = 0; j < 9; j++) {
2158 for(i = 0; i < 9; i++) {
2159 src[i] = ((src[i] - 128) >> 1) + 128;
2160 src2[i] = ((src2[i] - 128) >> 1) + 128;
2162 src += s->uvlinesize;
2163 src2 += s->uvlinesize;
2166 srcY += s->mspel * (1 + s->linesize);
2171 dxy = ((my & 1) << 1) | (mx & 1);
2173 dsp->avg_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
2175 if(s->flags & CODEC_FLAG_GRAY) return;
2176 /* Chroma MC always uses qpel blilinear */
2177 uvdxy = ((uvmy & 3) << 2) | (uvmx & 3);
2180 dsp->avg_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
2181 dsp->avg_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
2184 static av_always_inline int scale_mv(int value, int bfrac, int inv, int qs)
2188 #if B_FRACTION_DEN==256
2192 return 2 * ((value * n + 255) >> 9);
2193 return (value * n + 128) >> 8;
2196 n -= B_FRACTION_DEN;
2198 return 2 * ((value * n + B_FRACTION_DEN - 1) / (2 * B_FRACTION_DEN));
2199 return (value * n + B_FRACTION_DEN/2) / B_FRACTION_DEN;
2203 /** Reconstruct motion vector for B-frame and do motion compensation
2205 static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mode)
2208 v->mv_mode2 = v->mv_mode;
2209 v->mv_mode = MV_PMODE_INTENSITY_COMP;
2214 if(v->use_ic) v->mv_mode = v->mv_mode2;
2217 if(mode == BMV_TYPE_INTERPOLATED) {
2220 if(v->use_ic) v->mv_mode = v->mv_mode2;
2224 if(v->use_ic && (mode == BMV_TYPE_BACKWARD)) v->mv_mode = v->mv_mode2;
2225 vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD));
2226 if(v->use_ic) v->mv_mode = v->mv_mode2;
2229 static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mvtype)
2231 MpegEncContext *s = &v->s;
2232 int xy, wrap, off = 0;
2237 const uint8_t *is_intra = v->mb_type[0];
2241 /* scale MV difference to be quad-pel */
2242 dmv_x[0] <<= 1 - s->quarter_sample;
2243 dmv_y[0] <<= 1 - s->quarter_sample;
2244 dmv_x[1] <<= 1 - s->quarter_sample;
2245 dmv_y[1] <<= 1 - s->quarter_sample;
2247 wrap = s->b8_stride;
2248 xy = s->block_index[0];
2251 s->current_picture.motion_val[0][xy][0] =
2252 s->current_picture.motion_val[0][xy][1] =
2253 s->current_picture.motion_val[1][xy][0] =
2254 s->current_picture.motion_val[1][xy][1] = 0;
2257 s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
2258 s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
2259 s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
2260 s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
2262 s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
2263 s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
2264 s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
2265 s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
2269 if((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2270 C = s->current_picture.motion_val[0][xy - 2];
2271 A = s->current_picture.motion_val[0][xy - wrap*2];
2272 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2273 B = s->current_picture.motion_val[0][xy - wrap*2 + off];
2275 if(!s->first_slice_line) { // predictor A is not out of bounds
2276 if(s->mb_width == 1) {
2280 px = mid_pred(A[0], B[0], C[0]);
2281 py = mid_pred(A[1], B[1], C[1]);
2283 } else if(s->mb_x) { // predictor C is not out of bounds
2289 /* Pullback MV as specified in 8.3.5.3.4 */
2292 if(v->profile < PROFILE_ADVANCED) {
2293 qx = (s->mb_x << 5);
2294 qy = (s->mb_y << 5);
2295 X = (s->mb_width << 5) - 4;
2296 Y = (s->mb_height << 5) - 4;
2297 if(qx + px < -28) px = -28 - qx;
2298 if(qy + py < -28) py = -28 - qy;
2299 if(qx + px > X) px = X - qx;
2300 if(qy + py > Y) py = Y - qy;
2302 qx = (s->mb_x << 6);
2303 qy = (s->mb_y << 6);
2304 X = (s->mb_width << 6) - 4;
2305 Y = (s->mb_height << 6) - 4;
2306 if(qx + px < -60) px = -60 - qx;
2307 if(qy + py < -60) py = -60 - qy;
2308 if(qx + px > X) px = X - qx;
2309 if(qy + py > Y) py = Y - qy;
2312 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2313 if(0 && !s->first_slice_line && s->mb_x) {
2314 if(is_intra[xy - wrap])
2315 sum = FFABS(px) + FFABS(py);
2317 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2319 if(get_bits1(&s->gb)) {
2327 if(is_intra[xy - 2])
2328 sum = FFABS(px) + FFABS(py);
2330 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2332 if(get_bits1(&s->gb)) {
2342 /* store MV using signed modulus of MV range defined in 4.11 */
2343 s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x;
2344 s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
2346 if((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2347 C = s->current_picture.motion_val[1][xy - 2];
2348 A = s->current_picture.motion_val[1][xy - wrap*2];
2349 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2350 B = s->current_picture.motion_val[1][xy - wrap*2 + off];
2352 if(!s->first_slice_line) { // predictor A is not out of bounds
2353 if(s->mb_width == 1) {
2357 px = mid_pred(A[0], B[0], C[0]);
2358 py = mid_pred(A[1], B[1], C[1]);
2360 } else if(s->mb_x) { // predictor C is not out of bounds
2366 /* Pullback MV as specified in 8.3.5.3.4 */
2369 if(v->profile < PROFILE_ADVANCED) {
2370 qx = (s->mb_x << 5);
2371 qy = (s->mb_y << 5);
2372 X = (s->mb_width << 5) - 4;
2373 Y = (s->mb_height << 5) - 4;
2374 if(qx + px < -28) px = -28 - qx;
2375 if(qy + py < -28) py = -28 - qy;
2376 if(qx + px > X) px = X - qx;
2377 if(qy + py > Y) py = Y - qy;
2379 qx = (s->mb_x << 6);
2380 qy = (s->mb_y << 6);
2381 X = (s->mb_width << 6) - 4;
2382 Y = (s->mb_height << 6) - 4;
2383 if(qx + px < -60) px = -60 - qx;
2384 if(qy + py < -60) py = -60 - qy;
2385 if(qx + px > X) px = X - qx;
2386 if(qy + py > Y) py = Y - qy;
2389 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2390 if(0 && !s->first_slice_line && s->mb_x) {
2391 if(is_intra[xy - wrap])
2392 sum = FFABS(px) + FFABS(py);
2394 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2396 if(get_bits1(&s->gb)) {
2404 if(is_intra[xy - 2])
2405 sum = FFABS(px) + FFABS(py);
2407 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2409 if(get_bits1(&s->gb)) {
2419 /* store MV using signed modulus of MV range defined in 4.11 */
2421 s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
2422 s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
2424 s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
2425 s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
2426 s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
2427 s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
2430 /** Get predicted DC value for I-frames only
2431 * prediction dir: left=0, top=1
2432 * @param s MpegEncContext
2433 * @param[in] n block index in the current MB
2434 * @param dc_val_ptr Pointer to DC predictor
2435 * @param dir_ptr Prediction direction for use in AC prediction
2437 static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2438 int16_t **dc_val_ptr, int *dir_ptr)
2440 int a, b, c, wrap, pred, scale;
2442 static const uint16_t dcpred[32] = {
2443 -1, 1024, 512, 341, 256, 205, 171, 146, 128,
2444 114, 102, 93, 85, 79, 73, 68, 64,
2445 60, 57, 54, 51, 49, 47, 45, 43,
2446 41, 39, 38, 37, 35, 34, 33
2449 /* find prediction - wmv3_dc_scale always used here in fact */
2450 if (n < 4) scale = s->y_dc_scale;
2451 else scale = s->c_dc_scale;
2453 wrap = s->block_wrap[n];
2454 dc_val= s->dc_val[0] + s->block_index[n];
2460 b = dc_val[ - 1 - wrap];
2461 a = dc_val[ - wrap];
2463 if (pq < 9 || !overlap)
2465 /* Set outer values */
2466 if (s->first_slice_line && (n!=2 && n!=3)) b=a=dcpred[scale];
2467 if (s->mb_x == 0 && (n!=1 && n!=3)) b=c=dcpred[scale];
2471 /* Set outer values */
2472 if (s->first_slice_line && (n!=2 && n!=3)) b=a=0;
2473 if (s->mb_x == 0 && (n!=1 && n!=3)) b=c=0;
2476 if (abs(a - b) <= abs(b - c)) {
2484 /* update predictor */
2485 *dc_val_ptr = &dc_val[0];
2490 /** Get predicted DC value
2491 * prediction dir: left=0, top=1
2492 * @param s MpegEncContext
2493 * @param[in] n block index in the current MB
2494 * @param dc_val_ptr Pointer to DC predictor
2495 * @param dir_ptr Prediction direction for use in AC prediction
2497 static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2498 int a_avail, int c_avail,
2499 int16_t **dc_val_ptr, int *dir_ptr)
2501 int a, b, c, wrap, pred, scale;
2503 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2506 /* find prediction - wmv3_dc_scale always used here in fact */
2507 if (n < 4) scale = s->y_dc_scale;
2508 else scale = s->c_dc_scale;
2510 wrap = s->block_wrap[n];
2511 dc_val= s->dc_val[0] + s->block_index[n];
2517 b = dc_val[ - 1 - wrap];
2518 a = dc_val[ - wrap];
2519 /* scale predictors if needed */
2520 q1 = s->current_picture.qscale_table[mb_pos];
2521 if(c_avail && (n!= 1 && n!=3)) {
2522 q2 = s->current_picture.qscale_table[mb_pos - 1];
2524 c = (c * s->y_dc_scale_table[q2] * vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2526 if(a_avail && (n!= 2 && n!=3)) {
2527 q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2529 a = (a * s->y_dc_scale_table[q2] * vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2531 if(a_avail && c_avail && (n!=3)) {
2534 if(n != 2) off -= s->mb_stride;
2535 q2 = s->current_picture.qscale_table[off];
2537 b = (b * s->y_dc_scale_table[q2] * vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2540 if(a_avail && c_avail) {
2541 if(abs(a - b) <= abs(b - c)) {
2548 } else if(a_avail) {
2551 } else if(c_avail) {
2559 /* update predictor */
2560 *dc_val_ptr = &dc_val[0];
2566 * @defgroup std_mb VC1 Macroblock-level functions in Simple/Main Profiles
2567 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
2571 static inline int vc1_coded_block_pred(MpegEncContext * s, int n, uint8_t **coded_block_ptr)
2573 int xy, wrap, pred, a, b, c;
2575 xy = s->block_index[n];
2576 wrap = s->b8_stride;
2581 a = s->coded_block[xy - 1 ];
2582 b = s->coded_block[xy - 1 - wrap];
2583 c = s->coded_block[xy - wrap];
2592 *coded_block_ptr = &s->coded_block[xy];
2598 * Decode one AC coefficient
2599 * @param v The VC1 context
2600 * @param last Last coefficient
2601 * @param skip How much zero coefficients to skip
2602 * @param value Decoded AC coefficient value
2605 static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip, int *value, int codingset)
2607 GetBitContext *gb = &v->s.gb;
2608 int index, escape, run = 0, level = 0, lst = 0;
2610 index = get_vlc2(gb, vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2611 if (index != vc1_ac_sizes[codingset] - 1) {
2612 run = vc1_index_decode_table[codingset][index][0];
2613 level = vc1_index_decode_table[codingset][index][1];
2614 lst = index >= vc1_last_decode_table[codingset];
2618 escape = decode210(gb);
2620 index = get_vlc2(gb, vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2621 run = vc1_index_decode_table[codingset][index][0];
2622 level = vc1_index_decode_table[codingset][index][1];
2623 lst = index >= vc1_last_decode_table[codingset];
2626 level += vc1_last_delta_level_table[codingset][run];
2628 level += vc1_delta_level_table[codingset][run];
2631 run += vc1_last_delta_run_table[codingset][level] + 1;
2633 run += vc1_delta_run_table[codingset][level] + 1;
2639 lst = get_bits(gb, 1);
2640 if(v->s.esc3_level_length == 0) {
2641 if(v->pq < 8 || v->dquantfrm) { // table 59
2642 v->s.esc3_level_length = get_bits(gb, 3);
2643 if(!v->s.esc3_level_length)
2644 v->s.esc3_level_length = get_bits(gb, 2) + 8;
2646 v->s.esc3_level_length = get_prefix(gb, 1, 6) + 2;
2648 v->s.esc3_run_length = 3 + get_bits(gb, 2);
2650 run = get_bits(gb, v->s.esc3_run_length);
2651 sign = get_bits(gb, 1);
2652 level = get_bits(gb, v->s.esc3_level_length);
2663 /** Decode intra block in intra frames - should be faster than decode_intra_block
2664 * @param v VC1Context
2665 * @param block block to decode
2666 * @param coded are AC coeffs present or not
2667 * @param codingset set of VLC to decode data
2669 static int vc1_decode_i_block(VC1Context *v, DCTELEM block[64], int n, int coded, int codingset)
2671 GetBitContext *gb = &v->s.gb;
2672 MpegEncContext *s = &v->s;
2673 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2676 int16_t *ac_val, *ac_val2;
2679 /* Get DC differential */
2681 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2683 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2686 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2691 if (dcdiff == 119 /* ESC index value */)
2693 /* TODO: Optimize */
2694 if (v->pq == 1) dcdiff = get_bits(gb, 10);
2695 else if (v->pq == 2) dcdiff = get_bits(gb, 9);
2696 else dcdiff = get_bits(gb, 8);
2701 dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3;
2702 else if (v->pq == 2)
2703 dcdiff = (dcdiff<<1) + get_bits(gb, 1) - 1;
2705 if (get_bits(gb, 1))
2710 dcdiff += vc1_i_pred_dc(&v->s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir);
2713 /* Store the quantized DC coeff, used for prediction */
2715 block[0] = dcdiff * s->y_dc_scale;
2717 block[0] = dcdiff * s->c_dc_scale;
2730 int last = 0, skip, value;
2731 const int8_t *zz_table;
2735 scale = v->pq * 2 + v->halfpq;
2739 zz_table = vc1_horizontal_zz;
2741 zz_table = vc1_vertical_zz;
2743 zz_table = vc1_normal_zz;
2745 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2747 if(dc_pred_dir) //left
2750 ac_val -= 16 * s->block_wrap[n];
2753 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2757 block[zz_table[i++]] = value;
2760 /* apply AC prediction if needed */
2762 if(dc_pred_dir) { //left
2763 for(k = 1; k < 8; k++)
2764 block[k << 3] += ac_val[k];
2766 for(k = 1; k < 8; k++)
2767 block[k] += ac_val[k + 8];
2770 /* save AC coeffs for further prediction */
2771 for(k = 1; k < 8; k++) {
2772 ac_val2[k] = block[k << 3];
2773 ac_val2[k + 8] = block[k];
2776 /* scale AC coeffs */
2777 for(k = 1; k < 64; k++)
2781 block[k] += (block[k] < 0) ? -v->pq : v->pq;
2784 if(s->ac_pred) i = 63;
2790 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2793 scale = v->pq * 2 + v->halfpq;
2794 memset(ac_val2, 0, 16 * 2);
2795 if(dc_pred_dir) {//left
2798 memcpy(ac_val2, ac_val, 8 * 2);
2800 ac_val -= 16 * s->block_wrap[n];
2802 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2805 /* apply AC prediction if needed */
2807 if(dc_pred_dir) { //left
2808 for(k = 1; k < 8; k++) {
2809 block[k << 3] = ac_val[k] * scale;
2810 if(!v->pquantizer && block[k << 3])
2811 block[k << 3] += (block[k << 3] < 0) ? -v->pq : v->pq;
2814 for(k = 1; k < 8; k++) {
2815 block[k] = ac_val[k + 8] * scale;
2816 if(!v->pquantizer && block[k])
2817 block[k] += (block[k] < 0) ? -v->pq : v->pq;
2823 s->block_last_index[n] = i;
2828 /** Decode intra block in intra frames - should be faster than decode_intra_block
2829 * @param v VC1Context
2830 * @param block block to decode
2831 * @param coded are AC coeffs present or not
2832 * @param codingset set of VLC to decode data
2834 static int vc1_decode_i_block_adv(VC1Context *v, DCTELEM block[64], int n, int coded, int codingset, int mquant)
2836 GetBitContext *gb = &v->s.gb;
2837 MpegEncContext *s = &v->s;
2838 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2841 int16_t *ac_val, *ac_val2;
2843 int a_avail = v->a_avail, c_avail = v->c_avail;
2844 int use_pred = s->ac_pred;
2847 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2849 /* Get DC differential */
2851 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2853 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2856 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2861 if (dcdiff == 119 /* ESC index value */)
2863 /* TODO: Optimize */
2864 if (mquant == 1) dcdiff = get_bits(gb, 10);
2865 else if (mquant == 2) dcdiff = get_bits(gb, 9);
2866 else dcdiff = get_bits(gb, 8);
2871 dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3;
2872 else if (mquant == 2)
2873 dcdiff = (dcdiff<<1) + get_bits(gb, 1) - 1;
2875 if (get_bits(gb, 1))
2880 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir);
2883 /* Store the quantized DC coeff, used for prediction */
2885 block[0] = dcdiff * s->y_dc_scale;
2887 block[0] = dcdiff * s->c_dc_scale;
2896 /* check if AC is needed at all and adjust direction if needed */
2897 if(!a_avail) dc_pred_dir = 1;
2898 if(!c_avail) dc_pred_dir = 0;
2899 if(!a_avail && !c_avail) use_pred = 0;
2900 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2903 scale = mquant * 2 + v->halfpq;
2905 if(dc_pred_dir) //left
2908 ac_val -= 16 * s->block_wrap[n];
2910 q1 = s->current_picture.qscale_table[mb_pos];
2911 if(dc_pred_dir && c_avail && mb_pos) q2 = s->current_picture.qscale_table[mb_pos - 1];
2912 if(!dc_pred_dir && a_avail && mb_pos >= s->mb_stride) q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2913 if(n && n<4) q2 = q1;
2916 int last = 0, skip, value;
2917 const int8_t *zz_table;
2922 zz_table = vc1_horizontal_zz;
2924 zz_table = vc1_vertical_zz;
2926 zz_table = vc1_normal_zz;
2929 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2933 block[zz_table[i++]] = value;
2936 /* apply AC prediction if needed */
2938 /* scale predictors if needed*/
2940 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2941 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2943 if(dc_pred_dir) { //left
2944 for(k = 1; k < 8; k++)
2945 block[k << 3] += (ac_val[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2947 for(k = 1; k < 8; k++)
2948 block[k] += (ac_val[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2951 if(dc_pred_dir) { //left
2952 for(k = 1; k < 8; k++)
2953 block[k << 3] += ac_val[k];
2955 for(k = 1; k < 8; k++)
2956 block[k] += ac_val[k + 8];
2960 /* save AC coeffs for further prediction */
2961 for(k = 1; k < 8; k++) {
2962 ac_val2[k] = block[k << 3];
2963 ac_val2[k + 8] = block[k];
2966 /* scale AC coeffs */
2967 for(k = 1; k < 64; k++)
2971 block[k] += (block[k] < 0) ? -mquant : mquant;
2974 if(use_pred) i = 63;
2975 } else { // no AC coeffs
2978 memset(ac_val2, 0, 16 * 2);
2979 if(dc_pred_dir) {//left
2981 memcpy(ac_val2, ac_val, 8 * 2);
2983 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2984 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2985 for(k = 1; k < 8; k++)
2986 ac_val2[k] = (ac_val2[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2991 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2993 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2994 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2995 for(k = 1; k < 8; k++)
2996 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3001 /* apply AC prediction if needed */
3003 if(dc_pred_dir) { //left
3004 for(k = 1; k < 8; k++) {
3005 block[k << 3] = ac_val2[k] * scale;
3006 if(!v->pquantizer && block[k << 3])
3007 block[k << 3] += (block[k << 3] < 0) ? -mquant : mquant;
3010 for(k = 1; k < 8; k++) {
3011 block[k] = ac_val2[k + 8] * scale;
3012 if(!v->pquantizer && block[k])
3013 block[k] += (block[k] < 0) ? -mquant : mquant;
3019 s->block_last_index[n] = i;
3024 /** Decode intra block in inter frames - more generic version than vc1_decode_i_block
3025 * @param v VC1Context
3026 * @param block block to decode
3027 * @param coded are AC coeffs present or not
3028 * @param mquant block quantizer
3029 * @param codingset set of VLC to decode data
3031 static int vc1_decode_intra_block(VC1Context *v, DCTELEM block[64], int n, int coded, int mquant, int codingset)
3033 GetBitContext *gb = &v->s.gb;
3034 MpegEncContext *s = &v->s;
3035 int dc_pred_dir = 0; /* Direction of the DC prediction used */
3038 int16_t *ac_val, *ac_val2;
3040 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3041 int a_avail = v->a_avail, c_avail = v->c_avail;
3042 int use_pred = s->ac_pred;
3046 /* XXX: Guard against dumb values of mquant */
3047 mquant = (mquant < 1) ? 0 : ( (mquant>31) ? 31 : mquant );
3049 /* Set DC scale - y and c use the same */
3050 s->y_dc_scale = s->y_dc_scale_table[mquant];
3051 s->c_dc_scale = s->c_dc_scale_table[mquant];
3053 /* Get DC differential */
3055 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
3057 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
3060 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
3065 if (dcdiff == 119 /* ESC index value */)
3067 /* TODO: Optimize */
3068 if (mquant == 1) dcdiff = get_bits(gb, 10);
3069 else if (mquant == 2) dcdiff = get_bits(gb, 9);
3070 else dcdiff = get_bits(gb, 8);
3075 dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3;
3076 else if (mquant == 2)
3077 dcdiff = (dcdiff<<1) + get_bits(gb, 1) - 1;
3079 if (get_bits(gb, 1))
3084 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir);
3087 /* Store the quantized DC coeff, used for prediction */
3090 block[0] = dcdiff * s->y_dc_scale;
3092 block[0] = dcdiff * s->c_dc_scale;
3101 /* check if AC is needed at all and adjust direction if needed */
3102 if(!a_avail) dc_pred_dir = 1;
3103 if(!c_avail) dc_pred_dir = 0;
3104 if(!a_avail && !c_avail) use_pred = 0;
3105 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
3108 scale = mquant * 2 + v->halfpq;
3110 if(dc_pred_dir) //left
3113 ac_val -= 16 * s->block_wrap[n];
3115 q1 = s->current_picture.qscale_table[mb_pos];
3116 if(dc_pred_dir && c_avail && mb_pos) q2 = s->current_picture.qscale_table[mb_pos - 1];
3117 if(!dc_pred_dir && a_avail && mb_pos >= s->mb_stride) q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
3118 if(n && n<4) q2 = q1;
3121 int last = 0, skip, value;
3122 const int8_t *zz_table;
3125 zz_table = vc1_simple_progressive_8x8_zz;
3128 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
3132 block[zz_table[i++]] = value;
3135 /* apply AC prediction if needed */
3137 /* scale predictors if needed*/
3139 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3140 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3142 if(dc_pred_dir) { //left
3143 for(k = 1; k < 8; k++)
3144 block[k << 3] += (ac_val[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3146 for(k = 1; k < 8; k++)
3147 block[k] += (ac_val[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3150 if(dc_pred_dir) { //left
3151 for(k = 1; k < 8; k++)
3152 block[k << 3] += ac_val[k];
3154 for(k = 1; k < 8; k++)
3155 block[k] += ac_val[k + 8];
3159 /* save AC coeffs for further prediction */
3160 for(k = 1; k < 8; k++) {
3161 ac_val2[k] = block[k << 3];
3162 ac_val2[k + 8] = block[k];
3165 /* scale AC coeffs */
3166 for(k = 1; k < 64; k++)
3170 block[k] += (block[k] < 0) ? -mquant : mquant;
3173 if(use_pred) i = 63;
3174 } else { // no AC coeffs
3177 memset(ac_val2, 0, 16 * 2);
3178 if(dc_pred_dir) {//left
3180 memcpy(ac_val2, ac_val, 8 * 2);
3182 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3183 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3184 for(k = 1; k < 8; k++)
3185 ac_val2[k] = (ac_val2[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3190 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
3192 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3193 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3194 for(k = 1; k < 8; k++)
3195 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3200 /* apply AC prediction if needed */
3202 if(dc_pred_dir) { //left
3203 for(k = 1; k < 8; k++) {
3204 block[k << 3] = ac_val2[k] * scale;
3205 if(!v->pquantizer && block[k << 3])
3206 block[k << 3] += (block[k << 3] < 0) ? -mquant : mquant;
3209 for(k = 1; k < 8; k++) {
3210 block[k] = ac_val2[k + 8] * scale;
3211 if(!v->pquantizer && block[k])
3212 block[k] += (block[k] < 0) ? -mquant : mquant;
3218 s->block_last_index[n] = i;
3225 static int vc1_decode_p_block(VC1Context *v, DCTELEM block[64], int n, int mquant, int ttmb, int first_block)
3227 MpegEncContext *s = &v->s;
3228 GetBitContext *gb = &s->gb;
3231 int scale, off, idx, last, skip, value;
3232 int ttblk = ttmb & 7;
3235 ttblk = ttblk_to_tt[v->tt_index][get_vlc2(gb, vc1_ttblk_vlc[v->tt_index].table, VC1_TTBLK_VLC_BITS, 1)];
3237 if(ttblk == TT_4X4) {
3238 subblkpat = ~(get_vlc2(gb, vc1_subblkpat_vlc[v->tt_index].table, VC1_SUBBLKPAT_VLC_BITS, 1) + 1);
3240 if((ttblk != TT_8X8 && ttblk != TT_4X4) && (v->ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))) {
3241 subblkpat = decode012(gb);
3242 if(subblkpat) subblkpat ^= 3; //swap decoded pattern bits
3243 if(ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) ttblk = TT_8X4;
3244 if(ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) ttblk = TT_4X8;
3246 scale = 2 * mquant + v->halfpq;
3248 // convert transforms like 8X4_TOP to generic TT and SUBBLKPAT
3249 if(ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) {
3250 subblkpat = 2 - (ttblk == TT_8X4_TOP);
3253 if(ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) {
3254 subblkpat = 2 - (ttblk == TT_4X8_LEFT);
3262 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3266 idx = vc1_simple_progressive_8x8_zz[i++];
3267 block[idx] = value * scale;
3269 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3271 s->dsp.vc1_inv_trans_8x8(block);
3274 for(j = 0; j < 4; j++) {
3275 last = subblkpat & (1 << (3 - j));
3277 off = (j & 1) * 4 + (j & 2) * 16;
3279 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3283 idx = vc1_simple_progressive_4x4_zz[i++];
3284 block[idx + off] = value * scale;
3286 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3288 if(!(subblkpat & (1 << (3 - j))))
3289 s->dsp.vc1_inv_trans_4x4(block, j);
3293 for(j = 0; j < 2; j++) {
3294 last = subblkpat & (1 << (1 - j));
3298 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3302 if(v->profile < PROFILE_ADVANCED)
3303 idx = vc1_simple_progressive_8x4_zz[i++];
3305 idx = vc1_adv_progressive_8x4_zz[i++];
3306 block[idx + off] = value * scale;
3308 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3310 if(!(subblkpat & (1 << (1 - j))))
3311 s->dsp.vc1_inv_trans_8x4(block, j);
3315 for(j = 0; j < 2; j++) {
3316 last = subblkpat & (1 << (1 - j));
3320 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3324 if(v->profile < PROFILE_ADVANCED)
3325 idx = vc1_simple_progressive_4x8_zz[i++];
3327 idx = vc1_adv_progressive_4x8_zz[i++];
3328 block[idx + off] = value * scale;
3330 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3332 if(!(subblkpat & (1 << (1 - j))))
3333 s->dsp.vc1_inv_trans_4x8(block, j);
3341 /** Decode one P-frame MB (in Simple/Main profile)
3343 static int vc1_decode_p_mb(VC1Context *v)
3345 MpegEncContext *s = &v->s;
3346 GetBitContext *gb = &s->gb;
3348 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3349 int cbp; /* cbp decoding stuff */
3350 int mqdiff, mquant; /* MB quantization */
3351 int ttmb = v->ttfrm; /* MB Transform type */
3354 static const int size_table[6] = { 0, 2, 3, 4, 5, 8 },
3355 offset_table[6] = { 0, 1, 3, 7, 15, 31 };
3356 int mb_has_coeffs = 1; /* last_flag */
3357 int dmv_x, dmv_y; /* Differential MV components */
3358 int index, index1; /* LUT indices */
3359 int val, sign; /* temp values */
3360 int first_block = 1;
3362 int skipped, fourmv;
3364 mquant = v->pq; /* Loosy initialization */
3366 if (v->mv_type_is_raw)
3367 fourmv = get_bits1(gb);
3369 fourmv = v->mv_type_mb_plane[mb_pos];
3371 skipped = get_bits1(gb);
3373 skipped = v->s.mbskip_table[mb_pos];
3375 s->dsp.clear_blocks(s->block[0]);
3377 if (!fourmv) /* 1MV mode */
3381 GET_MVDATA(dmv_x, dmv_y);
3384 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
3385 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
3387 s->current_picture.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;
3388 vc1_pred_mv(s, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0]);
3390 /* FIXME Set DC val for inter block ? */
3391 if (s->mb_intra && !mb_has_coeffs)
3394 s->ac_pred = get_bits(gb, 1);
3397 else if (mb_has_coeffs)
3399 if (s->mb_intra) s->ac_pred = get_bits(gb, 1);
3400 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3408 s->current_picture.qscale_table[mb_pos] = mquant;
3410 if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
3411 ttmb = get_vlc2(gb, vc1_ttmb_vlc[v->tt_index].table,
3412 VC1_TTMB_VLC_BITS, 2);
3413 if(!s->mb_intra) vc1_mc_1mv(v, 0);
3417 s->dc_val[0][s->block_index[i]] = 0;
3419 val = ((cbp >> (5 - i)) & 1);
3420 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3421 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3423 /* check if prediction blocks A and C are available */
3424 v->a_avail = v->c_avail = 0;
3425 if(i == 2 || i == 3 || !s->first_slice_line)
3426 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3427 if(i == 1 || i == 3 || s->mb_x)
3428 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3430 vc1_decode_intra_block(v, s->block[i], i, val, mquant, (i&4)?v->codingset2:v->codingset);
3431 if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3432 s->dsp.vc1_inv_trans_8x8(s->block[i]);
3433 if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
3434 for(j = 0; j < 64; j++) s->block[i][j] += 128;
3435 s->dsp.put_pixels_clamped(s->block[i], s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
3436 if(v->pq >= 9 && v->overlap) {
3438 s->dsp.vc1_h_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
3440 s->dsp.vc1_v_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
3443 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block);
3444 if(!v->ttmbf && ttmb < 8) ttmb = -1;
3446 if((i<4) || !(s->flags & CODEC_FLAG_GRAY))
3447 s->dsp.add_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
3454 for(i = 0; i < 6; i++) {
3455 v->mb_type[0][s->block_index[i]] = 0;
3456 s->dc_val[0][s->block_index[i]] = 0;
3458 s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
3459 s->current_picture.qscale_table[mb_pos] = 0;
3460 vc1_pred_mv(s, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0]);
3467 if (!skipped /* unskipped MB */)
3469 int intra_count = 0, coded_inter = 0;
3470 int is_intra[6], is_coded[6];
3472 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3475 val = ((cbp >> (5 - i)) & 1);
3476 s->dc_val[0][s->block_index[i]] = 0;
3483 GET_MVDATA(dmv_x, dmv_y);
3485 vc1_pred_mv(s, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0]);
3486 if(!s->mb_intra) vc1_mc_4mv_luma(v, i);
3487 intra_count += s->mb_intra;
3488 is_intra[i] = s->mb_intra;
3489 is_coded[i] = mb_has_coeffs;
3492 is_intra[i] = (intra_count >= 3);
3495 if(i == 4) vc1_mc_4mv_chroma(v);
3496 v->mb_type[0][s->block_index[i]] = is_intra[i];
3497 if(!coded_inter) coded_inter = !is_intra[i] & is_coded[i];
3499 // if there are no coded blocks then don't do anything more
3500 if(!intra_count && !coded_inter) return 0;
3503 s->current_picture.qscale_table[mb_pos] = mquant;
3504 /* test if block is intra and has pred */
3509 if(((!s->first_slice_line || (i==2 || i==3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]])
3510 || ((s->mb_x || (i==1 || i==3)) && v->mb_type[0][s->block_index[i] - 1])) {
3515 if(intrapred)s->ac_pred = get_bits(gb, 1);
3516 else s->ac_pred = 0;
3518 if (!v->ttmbf && coded_inter)
3519 ttmb = get_vlc2(gb, vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3523 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3524 s->mb_intra = is_intra[i];
3526 /* check if prediction blocks A and C are available */
3527 v->a_avail = v->c_avail = 0;
3528 if(i == 2 || i == 3 || !s->first_slice_line)
3529 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3530 if(i == 1 || i == 3 || s->mb_x)
3531 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3533 vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant, (i&4)?v->codingset2:v->codingset);
3534 if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3535 s->dsp.vc1_inv_trans_8x8(s->block[i]);
3536 if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
3537 for(j = 0; j < 64; j++) s->block[i][j] += 128;
3538 s->dsp.put_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
3539 if(v->pq >= 9 && v->overlap) {
3541 s->dsp.vc1_h_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
3543 s->dsp.vc1_v_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
3545 } else if(is_coded[i]) {
3546 status = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block);
3547 if(!v->ttmbf && ttmb < 8) ttmb = -1;
3549 if((i<4) || !(s->flags & CODEC_FLAG_GRAY))
3550 s->dsp.add_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
3558 s->current_picture.qscale_table[mb_pos] = 0;
3559 for (i=0; i<6; i++) {
3560 v->mb_type[0][s->block_index[i]] = 0;
3561 s->dc_val[0][s->block_index[i]] = 0;
3565 vc1_pred_mv(s, i, 0, 0, 0, v->range_x, v->range_y, v->mb_type[0]);
3566 vc1_mc_4mv_luma(v, i);
3568 vc1_mc_4mv_chroma(v);
3569 s->current_picture.qscale_table[mb_pos] = 0;
3574 /* Should never happen */
3578 /** Decode one B-frame MB (in Main profile)
3580 static void vc1_decode_b_mb(VC1Context *v)
3582 MpegEncContext *s = &v->s;
3583 GetBitContext *gb = &s->gb;
3585 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3586 int cbp = 0; /* cbp decoding stuff */
3587 int mqdiff, mquant; /* MB quantization */
3588 int ttmb = v->ttfrm; /* MB Transform type */
3590 static const int size_table[6] = { 0, 2, 3, 4, 5, 8 },
3591 offset_table[6] = { 0, 1, 3, 7, 15, 31 };
3592 int mb_has_coeffs = 0; /* last_flag */
3593 int index, index1; /* LUT indices */
3594 int val, sign; /* temp values */
3595 int first_block = 1;
3597 int skipped, direct;
3598 int dmv_x[2], dmv_y[2];
3599 int bmvtype = BMV_TYPE_BACKWARD;
3601 mquant = v->pq; /* Loosy initialization */
3605 direct = get_bits1(gb);
3607 direct = v->direct_mb_plane[mb_pos];
3609 skipped = get_bits1(gb);
3611 skipped = v->s.mbskip_table[mb_pos];
3613 s->dsp.clear_blocks(s->block[0]);
3614 dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
3615 for(i = 0; i < 6; i++) {
3616 v->mb_type[0][s->block_index[i]] = 0;
3617 s->dc_val[0][s->block_index[i]] = 0;
3619 s->current_picture.qscale_table[mb_pos] = 0;
3623 GET_MVDATA(dmv_x[0], dmv_y[0]);
3624 dmv_x[1] = dmv_x[0];
3625 dmv_y[1] = dmv_y[0];
3627 if(skipped || !s->mb_intra) {
3628 bmvtype = decode012(gb);
3631 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
3634 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
3637 bmvtype = BMV_TYPE_INTERPOLATED;
3638 dmv_x[0] = dmv_y[0] = 0;
3642 for(i = 0; i < 6; i++)
3643 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3646 if(direct) bmvtype = BMV_TYPE_INTERPOLATED;
3647 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3648 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3652 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3656 s->current_picture.qscale_table[mb_pos] = mquant;
3658 ttmb = get_vlc2(gb, vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3659 dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
3660 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3661 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3663 if(!mb_has_coeffs && !s->mb_intra) {
3664 /* no coded blocks - effectively skipped */
3665 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3666 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3669 if(s->mb_intra && !mb_has_coeffs) {
3671 s->current_picture.qscale_table[mb_pos] = mquant;
3672 s->ac_pred = get_bits1(gb);
3674 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3676 if(bmvtype == BMV_TYPE_INTERPOLATED) {
3677 GET_MVDATA(dmv_x[0], dmv_y[0]);
3678 if(!mb_has_coeffs) {
3679 /* interpolated skipped block */
3680 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3681 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3685 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3687 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3690 s->ac_pred = get_bits1(gb);
3691 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3693 s->current_picture.qscale_table[mb_pos] = mquant;
3694 if(!v->ttmbf && !s->mb_intra && mb_has_coeffs)
3695 ttmb = get_vlc2(gb, vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3701 s->dc_val[0][s->block_index[i]] = 0;
3703 val = ((cbp >> (5 - i)) & 1);
3704 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3705 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3707 /* check if prediction blocks A and C are available */
3708 v->a_avail = v->c_avail = 0;
3709 if(i == 2 || i == 3 || !s->first_slice_line)
3710 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3711 if(i == 1 || i == 3 || s->mb_x)
3712 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3714 vc1_decode_intra_block(v, s->block[i], i, val, mquant, (i&4)?v->codingset2:v->codingset);
3715 if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3716 s->dsp.vc1_inv_trans_8x8(s->block[i]);
3717 if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
3718 for(j = 0; j < 64; j++) s->block[i][j] += 128;
3719 s->dsp.put_pixels_clamped(s->block[i], s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
3721 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block);
3722 if(!v->ttmbf && ttmb < 8) ttmb = -1;
3724 if((i<4) || !(s->flags & CODEC_FLAG_GRAY))
3725 s->dsp.add_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
3730 /** Decode blocks of I-frame
3732 static void vc1_decode_i_blocks(VC1Context *v)
3735 MpegEncContext *s = &v->s;
3740 /* select codingmode used for VLC tables selection */
3741 switch(v->y_ac_table_index){
3743 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
3746 v->codingset = CS_HIGH_MOT_INTRA;
3749 v->codingset = CS_MID_RATE_INTRA;
3753 switch(v->c_ac_table_index){
3755 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
3758 v->codingset2 = CS_HIGH_MOT_INTER;
3761 v->codingset2 = CS_MID_RATE_INTER;
3765 /* Set DC scale - y and c use the same */
3766 s->y_dc_scale = s->y_dc_scale_table[v->pq];
3767 s->c_dc_scale = s->c_dc_scale_table[v->pq];
3770 s->mb_x = s->mb_y = 0;
3772 s->first_slice_line = 1;
3773 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
3774 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
3775 for(s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
3776 ff_init_block_index(s);
3777 ff_update_block_index(s);
3778 s->dsp.clear_blocks(s->block[0]);
3779 mb_pos = s->mb_x + s->mb_y * s->mb_width;
3780 s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
3781 s->current_picture.qscale_table[mb_pos] = v->pq;
3782 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
3783 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
3785 // do actual MB decoding and displaying
3786 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
3787 v->s.ac_pred = get_bits(&v->s.gb, 1);
3789 for(k = 0; k < 6; k++) {
3790 val = ((cbp >> (5 - k)) & 1);
3793 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
3797 cbp |= val << (5 - k);
3799 vc1_decode_i_block(v, s->block[k], k, val, (k<4)? v->codingset : v->codingset2);
3801 s->dsp.vc1_inv_trans_8x8(s->block[k]);
3802 if(v->pq >= 9 && v->overlap) {
3803 for(j = 0; j < 64; j++) s->block[k][j] += 128;
3807 vc1_put_block(v, s->block);
3808 if(v->pq >= 9 && v->overlap) {
3810 s->dsp.vc1_h_overlap(s->dest[0], s->linesize);
3811 s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
3812 if(!(s->flags & CODEC_FLAG_GRAY)) {
3813 s->dsp.vc1_h_overlap(s->dest[1], s->uvlinesize);
3814 s->dsp.vc1_h_overlap(s->dest[2], s->uvlinesize);
3817 s->dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize);
3818 s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
3819 if(!s->first_slice_line) {
3820 s->dsp.vc1_v_overlap(s->dest[0], s->linesize);
3821 s->dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize);
3822 if(!(s->flags & CODEC_FLAG_GRAY)) {
3823 s->dsp.vc1_v_overlap(s->dest[1], s->uvlinesize);
3824 s->dsp.vc1_v_overlap(s->dest[2], s->uvlinesize);
3827 s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
3828 s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
3831 if(get_bits_count(&s->gb) > v->bits) {
3832 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n", get_bits_count(&s->gb), v->bits);
3836 ff_draw_horiz_band(s, s->mb_y * 16, 16);
3837 s->first_slice_line = 0;
3841 /** Decode blocks of I-frame for advanced profile
3843 static void vc1_decode_i_blocks_adv(VC1Context *v)
3846 MpegEncContext *s = &v->s;
3853 GetBitContext *gb = &s->gb;
3855 /* select codingmode used for VLC tables selection */
3856 switch(v->y_ac_table_index){
3858 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
3861 v->codingset = CS_HIGH_MOT_INTRA;
3864 v->codingset = CS_MID_RATE_INTRA;
3868 switch(v->c_ac_table_index){
3870 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
3873 v->codingset2 = CS_HIGH_MOT_INTER;
3876 v->codingset2 = CS_MID_RATE_INTER;
3881 s->mb_x = s->mb_y = 0;
3883 s->first_slice_line = 1;
3884 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
3885 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
3886 for(s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
3887 ff_init_block_index(s);
3888 ff_update_block_index(s);
3889 s->dsp.clear_blocks(s->block[0]);
3890 mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3891 s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
3892 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
3893 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
3895 // do actual MB decoding and displaying
3896 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
3897 if(v->acpred_is_raw)
3898 v->s.ac_pred = get_bits(&v->s.gb, 1);
3900 v->s.ac_pred = v->acpred_plane[mb_pos];
3902 if(v->condover == CONDOVER_SELECT) {
3903 if(v->overflg_is_raw)
3904 overlap = get_bits(&v->s.gb, 1);
3906 overlap = v->over_flags_plane[mb_pos];
3908 overlap = (v->condover == CONDOVER_ALL);
3912 s->current_picture.qscale_table[mb_pos] = mquant;
3913 /* Set DC scale - y and c use the same */
3914 s->y_dc_scale = s->y_dc_scale_table[mquant];
3915 s->c_dc_scale = s->c_dc_scale_table[mquant];
3917 for(k = 0; k < 6; k++) {
3918 val = ((cbp >> (5 - k)) & 1);
3921 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
3925 cbp |= val << (5 - k);
3927 v->a_avail = !s->first_slice_line || (k==2 || k==3);
3928 v->c_avail = !!s->mb_x || (k==1 || k==3);
3930 vc1_decode_i_block_adv(v, s->block[k], k, val, (k<4)? v->codingset : v->codingset2, mquant);
3932 s->dsp.vc1_inv_trans_8x8(s->block[k]);
3933 for(j = 0; j < 64; j++) s->block[k][j] += 128;
3936 vc1_put_block(v, s->block);
3939 s->dsp.vc1_h_overlap(s->dest[0], s->linesize);
3940 s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
3941 if(!(s->flags & CODEC_FLAG_GRAY)) {
3942 s->dsp.vc1_h_overlap(s->dest[1], s->uvlinesize);
3943 s->dsp.vc1_h_overlap(s->dest[2], s->uvlinesize);
3946 s->dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize);
3947 s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
3948 if(!s->first_slice_line) {
3949 s->dsp.vc1_v_overlap(s->dest[0], s->linesize);
3950 s->dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize);
3951 if(!(s->flags & CODEC_FLAG_GRAY)) {
3952 s->dsp.vc1_v_overlap(s->dest[1], s->uvlinesize);
3953 s->dsp.vc1_v_overlap(s->dest[2], s->uvlinesize);
3956 s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
3957 s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
3960 if(get_bits_count(&s->gb) > v->bits) {
3961 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n", get_bits_count(&s->gb), v->bits);
3965 ff_draw_horiz_band(s, s->mb_y * 16, 16);
3966 s->first_slice_line = 0;
3970 static void vc1_decode_p_blocks(VC1Context *v)
3972 MpegEncContext *s = &v->s;
3974 /* select codingmode used for VLC tables selection */
3975 switch(v->c_ac_table_index){
3977 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
3980 v->codingset = CS_HIGH_MOT_INTRA;
3983 v->codingset = CS_MID_RATE_INTRA;
3987 switch(v->c_ac_table_index){
3989 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
3992 v->codingset2 = CS_HIGH_MOT_INTER;
3995 v->codingset2 = CS_MID_RATE_INTER;
3999 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
4000 s->first_slice_line = 1;
4001 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
4002 for(s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
4003 ff_init_block_index(s);
4004 ff_update_block_index(s);
4005 s->dsp.clear_blocks(s->block[0]);
4008 if(get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
4009 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n", get_bits_count(&s->gb), v->bits,s->mb_x,s->mb_y);
4013 ff_draw_horiz_band(s, s->mb_y * 16, 16);
4014 s->first_slice_line = 0;
4018 static void vc1_decode_b_blocks(VC1Context *v)
4020 MpegEncContext *s = &v->s;
4022 /* select codingmode used for VLC tables selection */
4023 switch(v->c_ac_table_index){
4025 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
4028 v->codingset = CS_HIGH_MOT_INTRA;
4031 v->codingset = CS_MID_RATE_INTRA;
4035 switch(v->c_ac_table_index){
4037 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
4040 v->codingset2 = CS_HIGH_MOT_INTER;
4043 v->codingset2 = CS_MID_RATE_INTER;
4047 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
4048 s->first_slice_line = 1;
4049 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
4050 for(s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
4051 ff_init_block_index(s);
4052 ff_update_block_index(s);
4053 s->dsp.clear_blocks(s->block[0]);
4056 if(get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
4057 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n", get_bits_count(&s->gb), v->bits,s->mb_x,s->mb_y);
4061 ff_draw_horiz_band(s, s->mb_y * 16, 16);
4062 s->first_slice_line = 0;
4066 static void vc1_decode_skip_blocks(VC1Context *v)
4068 MpegEncContext *s = &v->s;
4070 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
4071 s->first_slice_line = 1;
4072 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
4074 ff_init_block_index(s);
4075 ff_update_block_index(s);
4076 memcpy(s->dest[0], s->last_picture.data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16);
4077 memcpy(s->dest[1], s->last_picture.data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
4078 memcpy(s->dest[2], s->last_picture.data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
4079 ff_draw_horiz_band(s, s->mb_y * 16, 16);
4080 s->first_slice_line = 0;
4082 s->pict_type = P_TYPE;
4085 static void vc1_decode_blocks(VC1Context *v)
4088 v->s.esc3_level_length = 0;
4090 switch(v->s.pict_type) {
4092 if(v->profile == PROFILE_ADVANCED)
4093 vc1_decode_i_blocks_adv(v);
4095 vc1_decode_i_blocks(v);
4098 if(v->p_frame_skipped)
4099 vc1_decode_skip_blocks(v);
4101 vc1_decode_p_blocks(v);
4105 if(v->profile == PROFILE_ADVANCED)
4106 vc1_decode_i_blocks_adv(v);
4108 vc1_decode_i_blocks(v);
4110 vc1_decode_b_blocks(v);
4115 #define IS_MARKER(x) (((x) & ~0xFF) == VC1_CODE_RES0)
4117 /** Find VC-1 marker in buffer
4118 * @return position where next marker starts or end of buffer if no marker found
4120 static av_always_inline uint8_t* find_next_marker(uint8_t *src, uint8_t *end)
4122 uint32_t mrk = 0xFFFFFFFF;
4124 if(end-src < 4) return end;
4126 mrk = (mrk << 8) | *src++;
4133 static av_always_inline int vc1_unescape_buffer(uint8_t *src, int size, uint8_t *dst)
4138 for(dsize = 0; dsize < size; dsize++) *dst++ = *src++;
4141 for(i = 0; i < size; i++, src++) {
4142 if(src[0] == 3 && i >= 2 && !src[-1] && !src[-2] && i < size-1 && src[1] < 4) {
4143 dst[dsize++] = src[1];
4147 dst[dsize++] = *src;
4152 /** Initialize a VC1/WMV3 decoder
4153 * @todo TODO: Handle VC-1 IDUs (Transport level?)
4154 * @todo TODO: Decypher remaining bits in extra_data
4156 static int vc1_decode_init(AVCodecContext *avctx)
4158 VC1Context *v = avctx->priv_data;
4159 MpegEncContext *s = &v->s;
4162 if (!avctx->extradata_size || !avctx->extradata) return -1;
4163 if (!(avctx->flags & CODEC_FLAG_GRAY))
4164 avctx->pix_fmt = PIX_FMT_YUV420P;
4166 avctx->pix_fmt = PIX_FMT_GRAY8;
4168 avctx->flags |= CODEC_FLAG_EMU_EDGE;
4169 v->s.flags |= CODEC_FLAG_EMU_EDGE;
4171 if(ff_h263_decode_init(avctx) < 0)
4173 if (vc1_init_common(v) < 0) return -1;
4175 avctx->coded_width = avctx->width;
4176 avctx->coded_height = avctx->height;
4177 if (avctx->codec_id == CODEC_ID_WMV3)
4181 // looks like WMV3 has a sequence header stored in the extradata
4182 // advanced sequence header may be before the first frame
4183 // the last byte of the extradata is a version number, 1 for the
4184 // samples we can decode
4186 init_get_bits(&gb, avctx->extradata, avctx->extradata_size*8);
4188 if (decode_sequence_header(avctx, &gb) < 0)
4191 count = avctx->extradata_size*8 - get_bits_count(&gb);
4194 av_log(avctx, AV_LOG_INFO, "Extra data: %i bits left, value: %X\n",
4195 count, get_bits(&gb, count));
4199 av_log(avctx, AV_LOG_INFO, "Read %i bits in overflow\n", -count);
4201 } else { // VC1/WVC1
4202 uint8_t *start = avctx->extradata, *end = avctx->extradata + avctx->extradata_size;
4203 uint8_t *next; int size, buf2_size;
4204 uint8_t *buf2 = NULL;
4205 int seq_inited = 0, ep_inited = 0;
4207 if(avctx->extradata_size < 16) {
4208 av_log(avctx, AV_LOG_ERROR, "Extradata size too small: %i\n", avctx->extradata_size);
4212 buf2 = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
4213 if(start[0]) start++; // in WVC1 extradata first byte is its size
4215 for(; next < end; start = next){
4216 next = find_next_marker(start + 4, end);
4217 size = next - start - 4;
4218 if(size <= 0) continue;
4219 buf2_size = vc1_unescape_buffer(start + 4, size, buf2);
4220 init_get_bits(&gb, buf2, buf2_size * 8);
4221 switch(AV_RB32(start)){
4222 case VC1_CODE_SEQHDR:
4223 if(decode_sequence_header(avctx, &gb) < 0){
4229 case VC1_CODE_ENTRYPOINT:
4230 if(decode_entry_point(avctx, &gb) < 0){
4239 if(!seq_inited || !ep_inited){
4240 av_log(avctx, AV_LOG_ERROR, "Incomplete extradata\n");
4244 avctx->has_b_frames= !!(avctx->max_b_frames);
4245 s->low_delay = !avctx->has_b_frames;
4247 s->mb_width = (avctx->coded_width+15)>>4;
4248 s->mb_height = (avctx->coded_height+15)>>4;
4250 /* Allocate mb bitplanes */
4251 v->mv_type_mb_plane = av_malloc(s->mb_stride * s->mb_height);
4252 v->direct_mb_plane = av_malloc(s->mb_stride * s->mb_height);
4253 v->acpred_plane = av_malloc(s->mb_stride * s->mb_height);
4254 v->over_flags_plane = av_malloc(s->mb_stride * s->mb_height);
4256 /* allocate block type info in that way so it could be used with s->block_index[] */
4257 v->mb_type_base = av_malloc(s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
4258 v->mb_type[0] = v->mb_type_base + s->b8_stride + 1;
4259 v->mb_type[1] = v->mb_type_base + s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride + 1;
4260 v->mb_type[2] = v->mb_type[1] + s->mb_stride * (s->mb_height + 1);
4262 /* Init coded blocks info */
4263 if (v->profile == PROFILE_ADVANCED)
4265 // if (alloc_bitplane(&v->over_flags_plane, s->mb_width, s->mb_height) < 0)
4267 // if (alloc_bitplane(&v->ac_pred_plane, s->mb_width, s->mb_height) < 0)
4275 /** Decode a VC1/WMV3 frame
4276 * @todo TODO: Handle VC-1 IDUs (Transport level?)
4278 static int vc1_decode_frame(AVCodecContext *avctx,
4279 void *data, int *data_size,
4280 uint8_t *buf, int buf_size)
4282 VC1Context *v = avctx->priv_data;
4283 MpegEncContext *s = &v->s;
4284 AVFrame *pict = data;
4285 uint8_t *buf2 = NULL;
4287 /* no supplementary picture */
4288 if (buf_size == 0) {
4289 /* special case for last picture */
4290 if (s->low_delay==0 && s->next_picture_ptr) {
4291 *pict= *(AVFrame*)s->next_picture_ptr;
4292 s->next_picture_ptr= NULL;
4294 *data_size = sizeof(AVFrame);
4300 //we need to set current_picture_ptr before reading the header, otherwise we cant store anyting im there
4301 if(s->current_picture_ptr==NULL || s->current_picture_ptr->data[0]){
4302 int i= ff_find_unused_picture(s, 0);
4303 s->current_picture_ptr= &s->picture[i];
4306 //for advanced profile we may need to parse and unescape data
4307 if (avctx->codec_id == CODEC_ID_VC1) {
4309 buf2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
4311 if(IS_MARKER(AV_RB32(buf))){ /* frame starts with marker and needs to be parsed */
4312 uint8_t *dst = buf2, *start, *end, *next;
4316 for(start = buf, end = buf + buf_size; next < end; start = next){
4317 next = find_next_marker(start + 4, end);
4318 size = next - start - 4;
4319 if(size <= 0) continue;
4320 switch(AV_RB32(start)){
4321 case VC1_CODE_FRAME:
4322 buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
4324 case VC1_CODE_ENTRYPOINT: /* it should be before frame data */
4325 buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
4326 init_get_bits(&s->gb, buf2, buf_size2*8);
4327 decode_entry_point(avctx, &s->gb);
4329 case VC1_CODE_SLICE:
4330 av_log(avctx, AV_LOG_ERROR, "Sliced decoding is not implemented (yet)\n");
4335 }else if(v->interlace && ((buf[0] & 0xC0) == 0xC0)){ /* WVC1 interlaced stores both fields divided by marker */
4338 divider = find_next_marker(buf, buf + buf_size);
4339 if((divider == (buf + buf_size)) || AV_RB32(divider) != VC1_CODE_FIELD){
4340 av_log(avctx, AV_LOG_ERROR, "Error in WVC1 interlaced frame\n");
4344 buf_size2 = vc1_unescape_buffer(buf, divider - buf, buf2);
4346 av_free(buf2);return -1;
4348 buf_size2 = vc1_unescape_buffer(buf, buf_size, buf2);
4350 init_get_bits(&s->gb, buf2, buf_size2*8);
4352 init_get_bits(&s->gb, buf, buf_size*8);
4353 // do parse frame header
4354 if(v->profile < PROFILE_ADVANCED) {
4355 if(vc1_parse_frame_header(v, &s->gb) == -1) {
4360 if(vc1_parse_frame_header_adv(v, &s->gb) == -1) {
4366 if(s->pict_type != I_TYPE && !v->res_rtm_flag){
4372 s->current_picture.pict_type= s->pict_type;
4373 s->current_picture.key_frame= s->pict_type == I_TYPE;
4375 /* skip B-frames if we don't have reference frames */
4376 if(s->last_picture_ptr==NULL && (s->pict_type==B_TYPE || s->dropable)){
4378 return -1;//buf_size;
4380 /* skip b frames if we are in a hurry */
4381 if(avctx->hurry_up && s->pict_type==B_TYPE) return -1;//buf_size;
4382 if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==B_TYPE)
4383 || (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=I_TYPE)
4384 || avctx->skip_frame >= AVDISCARD_ALL) {
4388 /* skip everything if we are in a hurry>=5 */
4389 if(avctx->hurry_up>=5) {
4391 return -1;//buf_size;
4394 if(s->next_p_frame_damaged){
4395 if(s->pict_type==B_TYPE)
4398 s->next_p_frame_damaged=0;
4401 if(MPV_frame_start(s, avctx) < 0) {
4406 ff_er_frame_start(s);
4408 v->bits = buf_size * 8;
4409 vc1_decode_blocks(v);
4410 //av_log(s->avctx, AV_LOG_INFO, "Consumed %i/%i bits\n", get_bits_count(&s->gb), buf_size*8);
4411 // if(get_bits_count(&s->gb) > buf_size * 8)
4417 assert(s->current_picture.pict_type == s->current_picture_ptr->pict_type);
4418 assert(s->current_picture.pict_type == s->pict_type);
4419 if (s->pict_type == B_TYPE || s->low_delay) {
4420 *pict= *(AVFrame*)s->current_picture_ptr;
4421 } else if (s->last_picture_ptr != NULL) {
4422 *pict= *(AVFrame*)s->last_picture_ptr;
4425 if(s->last_picture_ptr || s->low_delay){
4426 *data_size = sizeof(AVFrame);
4427 ff_print_debug_info(s, pict);
4430 /* Return the Picture timestamp as the frame number */
4431 /* we substract 1 because it is added on utils.c */
4432 avctx->frame_number = s->picture_number - 1;
4439 /** Close a VC1/WMV3 decoder
4440 * @warning Initial try at using MpegEncContext stuff
4442 static int vc1_decode_end(AVCodecContext *avctx)
4444 VC1Context *v = avctx->priv_data;
4446 av_freep(&v->hrd_rate);
4447 av_freep(&v->hrd_buffer);
4448 MPV_common_end(&v->s);
4449 av_freep(&v->mv_type_mb_plane);
4450 av_freep(&v->direct_mb_plane);
4451 av_freep(&v->acpred_plane);
4452 av_freep(&v->over_flags_plane);
4453 av_freep(&v->mb_type_base);
4458 AVCodec vc1_decoder = {
4471 AVCodec wmv3_decoder = {
4484 #ifdef CONFIG_VC1_PARSER
4486 * finds the end of the current frame in the bitstream.
4487 * @return the position of the first byte of the next frame, or -1
4489 static int vc1_find_frame_end(ParseContext *pc, const uint8_t *buf,
4494 pic_found= pc->frame_start_found;
4499 for(i=0; i<buf_size; i++){
4500 state= (state<<8) | buf[i];
4501 if(state == VC1_CODE_FRAME || state == VC1_CODE_FIELD){
4510 /* EOF considered as end of frame */
4513 for(; i<buf_size; i++){
4514 state= (state<<8) | buf[i];
4515 if(IS_MARKER(state) && state != VC1_CODE_FIELD && state != VC1_CODE_SLICE){
4516 pc->frame_start_found=0;
4522 pc->frame_start_found= pic_found;
4524 return END_NOT_FOUND;
4527 static int vc1_parse(AVCodecParserContext *s,
4528 AVCodecContext *avctx,
4529 uint8_t **poutbuf, int *poutbuf_size,
4530 const uint8_t *buf, int buf_size)
4532 ParseContext *pc = s->priv_data;
4535 if(s->flags & PARSER_FLAG_COMPLETE_FRAMES){
4538 next= vc1_find_frame_end(pc, buf, buf_size);
4540 if (ff_combine_frame(pc, next, (uint8_t **)&buf, &buf_size) < 0) {
4546 *poutbuf = (uint8_t *)buf;
4547 *poutbuf_size = buf_size;
4551 int vc1_split(AVCodecContext *avctx,
4552 const uint8_t *buf, int buf_size)
4557 for(i=0; i<buf_size; i++){
4558 state= (state<<8) | buf[i];
4559 if(IS_MARKER(state) && state != VC1_CODE_SEQHDR && state != VC1_CODE_ENTRYPOINT)
4565 AVCodecParser vc1_parser = {
4567 sizeof(ParseContext1),
4573 #endif /* CONFIG_VC1_PARSER */