2 * VC-1 and WMV3 decoder
3 * Copyright (c) 2006 Konstantin Shishkov
4 * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * VC-1 and WMV3 decoder
32 #include "mpegvideo.h"
34 #include "vc1acdata.h"
39 extern const uint32_t ff_table0_dc_lum[120][2], ff_table1_dc_lum[120][2];
40 extern const uint32_t ff_table0_dc_chroma[120][2], ff_table1_dc_chroma[120][2];
41 extern VLC ff_msmp4_dc_luma_vlc[2], ff_msmp4_dc_chroma_vlc[2];
42 #define MB_INTRA_VLC_BITS 9
43 extern VLC ff_msmp4_mb_i_vlc;
44 extern const uint16_t ff_msmp4_mb_i_table[64][2];
47 static const uint16_t table_mb_intra[64][2];
50 /** Available Profiles */
55 PROFILE_COMPLEX, ///< TODO: WMV9 specific
60 /** Sequence quantizer mode */
63 QUANT_FRAME_IMPLICIT, ///< Implicitly specified at frame level
64 QUANT_FRAME_EXPLICIT, ///< Explicitly specified at frame level
65 QUANT_NON_UNIFORM, ///< Non-uniform quant used for all frames
66 QUANT_UNIFORM ///< Uniform quant used for all frames
70 /** Where quant can be changed */
74 DQPROFILE_DOUBLE_EDGES,
75 DQPROFILE_SINGLE_EDGE,
80 /** @name Where quant can be changed
91 /** Which pair of edges is quantized with ALTPQUANT */
94 DQDOUBLE_BEDGE_TOPLEFT,
95 DQDOUBLE_BEDGE_TOPRIGHT,
96 DQDOUBLE_BEDGE_BOTTOMRIGHT,
97 DQDOUBLE_BEDGE_BOTTOMLEFT
101 /** MV modes for P frames */
104 MV_PMODE_1MV_HPEL_BILIN,
108 MV_PMODE_INTENSITY_COMP
112 /** @name MV types for B frames */
117 BMV_TYPE_INTERPOLATED
121 /** @name Block types for P/B frames */
123 enum TransformTypes {
127 TT_8X4, //Both halves
130 TT_4X8, //Both halves
135 /** Table for conversion between TTBLK and TTMB */
136 static const int ttblk_to_tt[3][8] = {
137 { TT_8X4, TT_4X8, TT_8X8, TT_4X4, TT_8X4_TOP, TT_8X4_BOTTOM, TT_4X8_RIGHT, TT_4X8_LEFT },
138 { TT_8X8, TT_4X8_RIGHT, TT_4X8_LEFT, TT_4X4, TT_8X4, TT_4X8, TT_8X4_BOTTOM, TT_8X4_TOP },
139 { TT_8X8, TT_4X8, TT_4X4, TT_8X4_BOTTOM, TT_4X8_RIGHT, TT_4X8_LEFT, TT_8X4, TT_8X4_TOP }
142 static const int ttfrm_to_tt[4] = { TT_8X8, TT_8X4, TT_4X8, TT_4X4 };
144 /** MV P mode - the 5th element is only used for mode 1 */
145 static const uint8_t mv_pmode_table[2][5] = {
146 { MV_PMODE_1MV_HPEL_BILIN, MV_PMODE_1MV, MV_PMODE_1MV_HPEL, MV_PMODE_INTENSITY_COMP, MV_PMODE_MIXED_MV },
147 { MV_PMODE_1MV, MV_PMODE_MIXED_MV, MV_PMODE_1MV_HPEL, MV_PMODE_INTENSITY_COMP, MV_PMODE_1MV_HPEL_BILIN }
149 static const uint8_t mv_pmode_table2[2][4] = {
150 { MV_PMODE_1MV_HPEL_BILIN, MV_PMODE_1MV, MV_PMODE_1MV_HPEL, MV_PMODE_MIXED_MV },
151 { MV_PMODE_1MV, MV_PMODE_MIXED_MV, MV_PMODE_1MV_HPEL, MV_PMODE_1MV_HPEL_BILIN }
154 /** One more frame type */
157 static const int fps_nr[5] = { 24, 25, 30, 50, 60 },
158 fps_dr[2] = { 1000, 1001 };
159 static const uint8_t pquant_table[3][32] = {
160 { /* Implicit quantizer */
161 0, 1, 2, 3, 4, 5, 6, 7, 8, 6, 7, 8, 9, 10, 11, 12,
162 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 29, 31
164 { /* Explicit quantizer, pquantizer uniform */
165 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
166 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
168 { /* Explicit quantizer, pquantizer non-uniform */
169 0, 1, 1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
170 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 29, 31
174 /** @name VC-1 VLC tables and defines
175 * @todo TODO move this into the context
178 #define VC1_BFRACTION_VLC_BITS 7
179 static VLC vc1_bfraction_vlc;
180 #define VC1_IMODE_VLC_BITS 4
181 static VLC vc1_imode_vlc;
182 #define VC1_NORM2_VLC_BITS 3
183 static VLC vc1_norm2_vlc;
184 #define VC1_NORM6_VLC_BITS 9
185 static VLC vc1_norm6_vlc;
186 /* Could be optimized, one table only needs 8 bits */
187 #define VC1_TTMB_VLC_BITS 9 //12
188 static VLC vc1_ttmb_vlc[3];
189 #define VC1_MV_DIFF_VLC_BITS 9 //15
190 static VLC vc1_mv_diff_vlc[4];
191 #define VC1_CBPCY_P_VLC_BITS 9 //14
192 static VLC vc1_cbpcy_p_vlc[4];
193 #define VC1_4MV_BLOCK_PATTERN_VLC_BITS 6
194 static VLC vc1_4mv_block_pattern_vlc[4];
195 #define VC1_TTBLK_VLC_BITS 5
196 static VLC vc1_ttblk_vlc[3];
197 #define VC1_SUBBLKPAT_VLC_BITS 6
198 static VLC vc1_subblkpat_vlc[3];
200 static VLC vc1_ac_coeff_table[8];
204 CS_HIGH_MOT_INTRA = 0,
214 /** @name Overlap conditions for Advanced Profile */
225 * @fixme Change size wherever another size is more efficient
226 * Many members are only used for Advanced Profile
228 typedef struct VC1Context{
233 /** Simple/Main Profile sequence header */
235 int res_sm; ///< reserved, 2b
236 int res_x8; ///< reserved
237 int multires; ///< frame-level RESPIC syntax element present
238 int res_fasttx; ///< reserved, always 1
239 int res_transtab; ///< reserved, always 0
240 int rangered; ///< RANGEREDFRM (range reduction) syntax element present
242 int res_rtm_flag; ///< reserved, set to 1
243 int reserved; ///< reserved
246 /** Advanced Profile */
248 int level; ///< 3bits, for Advanced/Simple Profile, provided by TS layer
249 int chromaformat; ///< 2bits, 2=4:2:0, only defined
250 int postprocflag; ///< Per-frame processing suggestion flag present
251 int broadcast; ///< TFF/RFF present
252 int interlace; ///< Progressive/interlaced (RPTFTM syntax element)
253 int tfcntrflag; ///< TFCNTR present
254 int panscanflag; ///< NUMPANSCANWIN, TOPLEFT{X,Y}, BOTRIGHT{X,Y} present
255 int extended_dmv; ///< Additional extended dmv range at P/B frame-level
256 int color_prim; ///< 8bits, chroma coordinates of the color primaries
257 int transfer_char; ///< 8bits, Opto-electronic transfer characteristics
258 int matrix_coef; ///< 8bits, Color primaries->YCbCr transform matrix
259 int hrd_param_flag; ///< Presence of Hypothetical Reference
260 ///< Decoder parameters
261 int psf; ///< Progressive Segmented Frame
264 /** Sequence header data for all Profiles
265 * TODO: choose between ints, uint8_ts and monobit flags
268 int profile; ///< 2bits, Profile
269 int frmrtq_postproc; ///< 3bits,
270 int bitrtq_postproc; ///< 5bits, quantized framerate-based postprocessing strength
271 int fastuvmc; ///< Rounding of qpel vector to hpel ? (not in Simple)
272 int extended_mv; ///< Ext MV in P/B (not in Simple)
273 int dquant; ///< How qscale varies with MBs, 2bits (not in Simple)
274 int vstransform; ///< variable-size [48]x[48] transform type + info
275 int overlap; ///< overlapped transforms in use
276 int quantizer_mode; ///< 2bits, quantizer mode used for sequence, see QUANT_*
277 int finterpflag; ///< INTERPFRM present
280 /** Frame decoding info for all profiles */
282 uint8_t mv_mode; ///< MV coding monde
283 uint8_t mv_mode2; ///< Secondary MV coding mode (B frames)
284 int k_x; ///< Number of bits for MVs (depends on MV range)
285 int k_y; ///< Number of bits for MVs (depends on MV range)
286 int range_x, range_y; ///< MV range
287 uint8_t pq, altpq; ///< Current/alternate frame quantizer scale
288 /** pquant parameters */
295 /** AC coding set indexes
296 * @see 8.1.1.10, p(1)10
299 int c_ac_table_index; ///< Chroma index from ACFRM element
300 int y_ac_table_index; ///< Luma index from AC2FRM element
302 int ttfrm; ///< Transform type info present at frame level
303 uint8_t ttmbf; ///< Transform type flag
304 uint8_t ttblk4x4; ///< Value of ttblk which indicates a 4x4 transform
305 int codingset; ///< index of current table set from 11.8 to use for luma block decoding
306 int codingset2; ///< index of current table set from 11.8 to use for chroma block decoding
307 int pqindex; ///< raw pqindex used in coding set selection
308 int a_avail, c_avail;
309 uint8_t *mb_type_base, *mb_type[3];
312 /** Luma compensation parameters */
317 int16_t bfraction; ///< Relative position % anchors=> how to scale MVs
318 uint8_t halfpq; ///< Uniform quant over image and qp+.5
319 uint8_t respic; ///< Frame-level flag for resized images
320 int buffer_fullness; ///< HRD info
322 * -# 0 -> [-64n 63.f] x [-32, 31.f]
323 * -# 1 -> [-128, 127.f] x [-64, 63.f]
324 * -# 2 -> [-512, 511.f] x [-128, 127.f]
325 * -# 3 -> [-1024, 1023.f] x [-256, 255.f]
328 uint8_t pquantizer; ///< Uniform (over sequence) quantizer in use
329 VLC *cbpcy_vlc; ///< CBPCY VLC table
330 int tt_index; ///< Index for Transform Type tables
331 uint8_t* mv_type_mb_plane; ///< bitplane for mv_type == (4MV)
332 uint8_t* direct_mb_plane; ///< bitplane for "direct" MBs
333 int mv_type_is_raw; ///< mv type mb plane is not coded
334 int dmb_is_raw; ///< direct mb plane is raw
335 int skip_is_raw; ///< skip mb plane is not coded
336 uint8_t luty[256], lutuv[256]; // lookup tables used for intensity compensation
337 int use_ic; ///< use intensity compensation in B-frames
338 int rnd; ///< rounding control
340 /** Frame decoding info for S/M profiles only */
342 uint8_t rangeredfrm; ///< out_sample = CLIP((in_sample-128)*2+128)
346 /** Frame decoding info for Advanced profile */
348 uint8_t fcm; ///< 0->Progressive, 2->Frame-Interlace, 3->Field-Interlace
349 uint8_t numpanscanwin;
351 uint8_t rptfrm, tff, rff;
354 uint16_t bottomrightx;
355 uint16_t bottomrighty;
358 int hrd_num_leaky_buckets;
359 uint8_t bit_rate_exponent;
360 uint8_t buffer_size_exponent;
361 uint8_t* acpred_plane; ///< AC prediction flags bitplane
363 uint8_t* over_flags_plane; ///< Overflags bitplane
366 uint16_t *hrd_rate, *hrd_buffer;
367 uint8_t *hrd_fullness;
368 uint8_t range_mapy_flag;
369 uint8_t range_mapuv_flag;
379 * Get unary code of limited length
380 * @fixme FIXME Slow and ugly
381 * @param gb GetBitContext
382 * @param[in] stop The bitstop value (unary code of 1's or 0's)
383 * @param[in] len Maximum length
384 * @return Unary length/index
386 static int get_prefix(GetBitContext *gb, int stop, int len)
391 for(i = 0; i < len && get_bits1(gb) != stop; i++);
393 /* int i = 0, tmp = !stop;
395 while (i != len && tmp != stop)
397 tmp = get_bits(gb, 1);
400 if (i == len && tmp != stop) return len+1;
407 UPDATE_CACHE(re, gb);
408 buf=GET_CACHE(re, gb); //Still not sure
409 if (stop) buf = ~buf;
411 log= av_log2(-buf); //FIXME: -?
413 LAST_SKIP_BITS(re, gb, log+1);
414 CLOSE_READER(re, gb);
418 LAST_SKIP_BITS(re, gb, limit);
419 CLOSE_READER(re, gb);
424 static inline int decode210(GetBitContext *gb){
430 return 2 - get_bits1(gb);
434 * Init VC-1 specific tables and VC1Context members
435 * @param v The VC1Context to initialize
438 static int vc1_init_common(VC1Context *v)
443 v->hrd_rate = v->hrd_buffer = NULL;
449 init_vlc(&vc1_bfraction_vlc, VC1_BFRACTION_VLC_BITS, 23,
450 vc1_bfraction_bits, 1, 1,
451 vc1_bfraction_codes, 1, 1, 1);
452 init_vlc(&vc1_norm2_vlc, VC1_NORM2_VLC_BITS, 4,
453 vc1_norm2_bits, 1, 1,
454 vc1_norm2_codes, 1, 1, 1);
455 init_vlc(&vc1_norm6_vlc, VC1_NORM6_VLC_BITS, 64,
456 vc1_norm6_bits, 1, 1,
457 vc1_norm6_codes, 2, 2, 1);
458 init_vlc(&vc1_imode_vlc, VC1_IMODE_VLC_BITS, 7,
459 vc1_imode_bits, 1, 1,
460 vc1_imode_codes, 1, 1, 1);
463 init_vlc(&vc1_ttmb_vlc[i], VC1_TTMB_VLC_BITS, 16,
464 vc1_ttmb_bits[i], 1, 1,
465 vc1_ttmb_codes[i], 2, 2, 1);
466 init_vlc(&vc1_ttblk_vlc[i], VC1_TTBLK_VLC_BITS, 8,
467 vc1_ttblk_bits[i], 1, 1,
468 vc1_ttblk_codes[i], 1, 1, 1);
469 init_vlc(&vc1_subblkpat_vlc[i], VC1_SUBBLKPAT_VLC_BITS, 15,
470 vc1_subblkpat_bits[i], 1, 1,
471 vc1_subblkpat_codes[i], 1, 1, 1);
475 init_vlc(&vc1_4mv_block_pattern_vlc[i], VC1_4MV_BLOCK_PATTERN_VLC_BITS, 16,
476 vc1_4mv_block_pattern_bits[i], 1, 1,
477 vc1_4mv_block_pattern_codes[i], 1, 1, 1);
478 init_vlc(&vc1_cbpcy_p_vlc[i], VC1_CBPCY_P_VLC_BITS, 64,
479 vc1_cbpcy_p_bits[i], 1, 1,
480 vc1_cbpcy_p_codes[i], 2, 2, 1);
481 init_vlc(&vc1_mv_diff_vlc[i], VC1_MV_DIFF_VLC_BITS, 73,
482 vc1_mv_diff_bits[i], 1, 1,
483 vc1_mv_diff_codes[i], 2, 2, 1);
486 init_vlc(&vc1_ac_coeff_table[i], AC_VLC_BITS, vc1_ac_sizes[i],
487 &vc1_ac_tables[i][0][1], 8, 4,
488 &vc1_ac_tables[i][0][0], 8, 4, 1);
489 init_vlc(&ff_msmp4_mb_i_vlc, MB_INTRA_VLC_BITS, 64,
490 &ff_msmp4_mb_i_table[0][1], 4, 2,
491 &ff_msmp4_mb_i_table[0][0], 4, 2, 1);
496 v->mvrange = 0; /* 7.1.1.18, p80 */
501 /***********************************************************************/
503 * @defgroup bitplane VC9 Bitplane decoding
508 /** @addtogroup bitplane
521 /** @} */ //imode defines
523 /** Decode rows by checking if they are skipped
524 * @param plane Buffer to store decoded bits
525 * @param[in] width Width of this buffer
526 * @param[in] height Height of this buffer
527 * @param[in] stride of this buffer
529 static void decode_rowskip(uint8_t* plane, int width, int height, int stride, GetBitContext *gb){
532 for (y=0; y<height; y++){
533 if (!get_bits(gb, 1)) //rowskip
534 memset(plane, 0, width);
536 for (x=0; x<width; x++)
537 plane[x] = get_bits(gb, 1);
542 /** Decode columns by checking if they are skipped
543 * @param plane Buffer to store decoded bits
544 * @param[in] width Width of this buffer
545 * @param[in] height Height of this buffer
546 * @param[in] stride of this buffer
547 * @fixme FIXME: Optimize
549 static void decode_colskip(uint8_t* plane, int width, int height, int stride, GetBitContext *gb){
552 for (x=0; x<width; x++){
553 if (!get_bits(gb, 1)) //colskip
554 for (y=0; y<height; y++)
557 for (y=0; y<height; y++)
558 plane[y*stride] = get_bits(gb, 1);
563 /** Decode a bitplane's bits
564 * @param bp Bitplane where to store the decode bits
565 * @param v VC-1 context for bit reading and logging
567 * @fixme FIXME: Optimize
569 static int bitplane_decoding(uint8_t* data, int *raw_flag, VC1Context *v)
571 GetBitContext *gb = &v->s.gb;
573 int imode, x, y, code, offset;
574 uint8_t invert, *planep = data;
575 int width, height, stride;
577 width = v->s.mb_width;
578 height = v->s.mb_height;
579 stride = v->s.mb_stride;
580 invert = get_bits(gb, 1);
581 imode = get_vlc2(gb, vc1_imode_vlc.table, VC1_IMODE_VLC_BITS, 1);
587 //Data is actually read in the MB layer (same for all tests == "raw")
588 *raw_flag = 1; //invert ignored
592 if ((height * width) & 1)
594 *planep++ = get_bits(gb, 1);
598 // decode bitplane as one long line
599 for (y = offset; y < height * width; y += 2) {
600 code = get_vlc2(gb, vc1_norm2_vlc.table, VC1_NORM2_VLC_BITS, 1);
601 *planep++ = code & 1;
603 if(offset == width) {
605 planep += stride - width;
607 *planep++ = code >> 1;
609 if(offset == width) {
611 planep += stride - width;
617 if(!(height % 3) && (width % 3)) { // use 2x3 decoding
618 for(y = 0; y < height; y+= 3) {
619 for(x = width & 1; x < width; x += 2) {
620 code = get_vlc2(gb, vc1_norm6_vlc.table, VC1_NORM6_VLC_BITS, 2);
622 av_log(v->s.avctx, AV_LOG_DEBUG, "invalid NORM-6 VLC\n");
625 planep[x + 0] = (code >> 0) & 1;
626 planep[x + 1] = (code >> 1) & 1;
627 planep[x + 0 + stride] = (code >> 2) & 1;
628 planep[x + 1 + stride] = (code >> 3) & 1;
629 planep[x + 0 + stride * 2] = (code >> 4) & 1;
630 planep[x + 1 + stride * 2] = (code >> 5) & 1;
632 planep += stride * 3;
634 if(width & 1) decode_colskip(data, 1, height, stride, &v->s.gb);
636 planep += (height & 1) * stride;
637 for(y = height & 1; y < height; y += 2) {
638 for(x = width % 3; x < width; x += 3) {
639 code = get_vlc2(gb, vc1_norm6_vlc.table, VC1_NORM6_VLC_BITS, 2);
641 av_log(v->s.avctx, AV_LOG_DEBUG, "invalid NORM-6 VLC\n");
644 planep[x + 0] = (code >> 0) & 1;
645 planep[x + 1] = (code >> 1) & 1;
646 planep[x + 2] = (code >> 2) & 1;
647 planep[x + 0 + stride] = (code >> 3) & 1;
648 planep[x + 1 + stride] = (code >> 4) & 1;
649 planep[x + 2 + stride] = (code >> 5) & 1;
651 planep += stride * 2;
654 if(x) decode_colskip(data , x, height , stride, &v->s.gb);
655 if(height & 1) decode_rowskip(data+x, width - x, 1, stride, &v->s.gb);
659 decode_rowskip(data, width, height, stride, &v->s.gb);
662 decode_colskip(data, width, height, stride, &v->s.gb);
667 /* Applying diff operator */
668 if (imode == IMODE_DIFF2 || imode == IMODE_DIFF6)
672 for (x=1; x<width; x++)
673 planep[x] ^= planep[x-1];
674 for (y=1; y<height; y++)
677 planep[0] ^= planep[-stride];
678 for (x=1; x<width; x++)
680 if (planep[x-1] != planep[x-stride]) planep[x] ^= invert;
681 else planep[x] ^= planep[x-1];
688 for (x=0; x<stride*height; x++) planep[x] = !planep[x]; //FIXME stride
690 return (imode<<1) + invert;
693 /** @} */ //Bitplane group
695 /***********************************************************************/
696 /** VOP Dquant decoding
697 * @param v VC-1 Context
699 static int vop_dquant_decoding(VC1Context *v)
701 GetBitContext *gb = &v->s.gb;
707 pqdiff = get_bits(gb, 3);
708 if (pqdiff == 7) v->altpq = get_bits(gb, 5);
709 else v->altpq = v->pq + pqdiff + 1;
713 v->dquantfrm = get_bits(gb, 1);
716 v->dqprofile = get_bits(gb, 2);
717 switch (v->dqprofile)
719 case DQPROFILE_SINGLE_EDGE:
720 case DQPROFILE_DOUBLE_EDGES:
721 v->dqsbedge = get_bits(gb, 2);
723 case DQPROFILE_ALL_MBS:
724 v->dqbilevel = get_bits(gb, 1);
725 default: break; //Forbidden ?
727 if (v->dqbilevel || v->dqprofile != DQPROFILE_ALL_MBS)
729 pqdiff = get_bits(gb, 3);
730 if (pqdiff == 7) v->altpq = get_bits(gb, 5);
731 else v->altpq = v->pq + pqdiff + 1;
738 /** Put block onto picture
740 static void vc1_put_block(VC1Context *v, DCTELEM block[6][64])
744 DSPContext *dsp = &v->s.dsp;
748 for(k = 0; k < 6; k++)
749 for(j = 0; j < 8; j++)
750 for(i = 0; i < 8; i++)
751 block[k][i + j*8] = ((block[k][i + j*8] - 128) << 1) + 128;
754 ys = v->s.current_picture.linesize[0];
755 us = v->s.current_picture.linesize[1];
756 vs = v->s.current_picture.linesize[2];
759 dsp->put_pixels_clamped(block[0], Y, ys);
760 dsp->put_pixels_clamped(block[1], Y + 8, ys);
762 dsp->put_pixels_clamped(block[2], Y, ys);
763 dsp->put_pixels_clamped(block[3], Y + 8, ys);
765 if(!(v->s.flags & CODEC_FLAG_GRAY)) {
766 dsp->put_pixels_clamped(block[4], v->s.dest[1], us);
767 dsp->put_pixels_clamped(block[5], v->s.dest[2], vs);
771 /** Do motion compensation over 1 macroblock
772 * Mostly adapted hpel_motion and qpel_motion from mpegvideo.c
774 static void vc1_mc_1mv(VC1Context *v, int dir)
776 MpegEncContext *s = &v->s;
777 DSPContext *dsp = &v->s.dsp;
778 uint8_t *srcY, *srcU, *srcV;
779 int dxy, uvdxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
781 if(!v->s.last_picture.data[0])return;
783 mx = s->mv[dir][0][0];
784 my = s->mv[dir][0][1];
786 // store motion vectors for further use in B frames
787 if(s->pict_type == P_TYPE) {
788 s->current_picture.motion_val[1][s->block_index[0]][0] = mx;
789 s->current_picture.motion_val[1][s->block_index[0]][1] = my;
791 uvmx = (mx + ((mx & 3) == 3)) >> 1;
792 uvmy = (my + ((my & 3) == 3)) >> 1;
794 uvmx = uvmx + ((uvmx<0)?(uvmx&1):-(uvmx&1));
795 uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1));
798 srcY = s->last_picture.data[0];
799 srcU = s->last_picture.data[1];
800 srcV = s->last_picture.data[2];
802 srcY = s->next_picture.data[0];
803 srcU = s->next_picture.data[1];
804 srcV = s->next_picture.data[2];
807 src_x = s->mb_x * 16 + (mx >> 2);
808 src_y = s->mb_y * 16 + (my >> 2);
809 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
810 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
812 src_x = clip( src_x, -16, s->mb_width * 16);
813 src_y = clip( src_y, -16, s->mb_height * 16);
814 uvsrc_x = clip(uvsrc_x, -8, s->mb_width * 8);
815 uvsrc_y = clip(uvsrc_y, -8, s->mb_height * 8);
817 srcY += src_y * s->linesize + src_x;
818 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
819 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
821 /* for grayscale we should not try to read from unknown area */
822 if(s->flags & CODEC_FLAG_GRAY) {
823 srcU = s->edge_emu_buffer + 18 * s->linesize;
824 srcV = s->edge_emu_buffer + 18 * s->linesize;
827 if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
828 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel*3
829 || (unsigned)(src_y - s->mspel) > s->v_edge_pos - (my&3) - 16 - s->mspel*3){
830 uint8_t *uvbuf= s->edge_emu_buffer + 19 * s->linesize;
832 srcY -= s->mspel * (1 + s->linesize);
833 ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 17+s->mspel*2, 17+s->mspel*2,
834 src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);
835 srcY = s->edge_emu_buffer;
836 ff_emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8+1, 8+1,
837 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
838 ff_emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8+1, 8+1,
839 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
842 /* if we deal with range reduction we need to scale source blocks */
848 for(j = 0; j < 17 + s->mspel*2; j++) {
849 for(i = 0; i < 17 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128;
852 src = srcU; src2 = srcV;
853 for(j = 0; j < 9; j++) {
854 for(i = 0; i < 9; i++) {
855 src[i] = ((src[i] - 128) >> 1) + 128;
856 src2[i] = ((src2[i] - 128) >> 1) + 128;
858 src += s->uvlinesize;
859 src2 += s->uvlinesize;
862 /* if we deal with intensity compensation we need to scale source blocks */
863 if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
868 for(j = 0; j < 17 + s->mspel*2; j++) {
869 for(i = 0; i < 17 + s->mspel*2; i++) src[i] = v->luty[src[i]];
872 src = srcU; src2 = srcV;
873 for(j = 0; j < 9; j++) {
874 for(i = 0; i < 9; i++) {
875 src[i] = v->lutuv[src[i]];
876 src2[i] = v->lutuv[src2[i]];
878 src += s->uvlinesize;
879 src2 += s->uvlinesize;
882 srcY += s->mspel * (1 + s->linesize);
886 dxy = ((my & 3) << 2) | (mx & 3);
887 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] , srcY , s->linesize, v->rnd);
888 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8, srcY + 8, s->linesize, v->rnd);
889 srcY += s->linesize * 8;
890 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize , srcY , s->linesize, v->rnd);
891 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
892 } else { // hpel mc - always used for luma
893 dxy = (my & 2) | ((mx & 2) >> 1);
896 dsp->put_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
898 dsp->put_no_rnd_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
901 if(s->flags & CODEC_FLAG_GRAY) return;
902 /* Chroma MC always uses qpel bilinear */
903 uvdxy = ((uvmy & 3) << 2) | (uvmx & 3);
907 dsp->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
908 dsp->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
910 dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
911 dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
915 /** Do motion compensation for 4-MV macroblock - luminance block
917 static void vc1_mc_4mv_luma(VC1Context *v, int n)
919 MpegEncContext *s = &v->s;
920 DSPContext *dsp = &v->s.dsp;
922 int dxy, mx, my, src_x, src_y;
925 if(!v->s.last_picture.data[0])return;
928 srcY = s->last_picture.data[0];
930 off = s->linesize * 4 * (n&2) + (n&1) * 8;
932 src_x = s->mb_x * 16 + (n&1) * 8 + (mx >> 2);
933 src_y = s->mb_y * 16 + (n&2) * 4 + (my >> 2);
935 src_x = clip( src_x, -16, s->mb_width * 16);
936 src_y = clip( src_y, -16, s->mb_height * 16);
938 srcY += src_y * s->linesize + src_x;
940 if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
941 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 8 - s->mspel*2
942 || (unsigned)(src_y - s->mspel) > s->v_edge_pos - (my&3) - 8 - s->mspel*2){
943 srcY -= s->mspel * (1 + s->linesize);
944 ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 9+s->mspel*2, 9+s->mspel*2,
945 src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);
946 srcY = s->edge_emu_buffer;
947 /* if we deal with range reduction we need to scale source blocks */
953 for(j = 0; j < 9 + s->mspel*2; j++) {
954 for(i = 0; i < 9 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128;
958 /* if we deal with intensity compensation we need to scale source blocks */
959 if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
964 for(j = 0; j < 9 + s->mspel*2; j++) {
965 for(i = 0; i < 9 + s->mspel*2; i++) src[i] = v->luty[src[i]];
969 srcY += s->mspel * (1 + s->linesize);
973 dxy = ((my & 3) << 2) | (mx & 3);
974 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize, v->rnd);
975 } else { // hpel mc - always used for luma
976 dxy = (my & 2) | ((mx & 2) >> 1);
978 dsp->put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
980 dsp->put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
984 static inline int median4(int a, int b, int c, int d)
987 if(c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2;
988 else return (FFMIN(b, c) + FFMAX(a, d)) / 2;
990 if(c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2;
991 else return (FFMIN(a, c) + FFMAX(b, d)) / 2;
996 /** Do motion compensation for 4-MV macroblock - both chroma blocks
998 static void vc1_mc_4mv_chroma(VC1Context *v)
1000 MpegEncContext *s = &v->s;
1001 DSPContext *dsp = &v->s.dsp;
1002 uint8_t *srcU, *srcV;
1003 int uvdxy, uvmx, uvmy, uvsrc_x, uvsrc_y;
1004 int i, idx, tx = 0, ty = 0;
1005 int mvx[4], mvy[4], intra[4];
1006 static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
1008 if(!v->s.last_picture.data[0])return;
1009 if(s->flags & CODEC_FLAG_GRAY) return;
1011 for(i = 0; i < 4; i++) {
1012 mvx[i] = s->mv[0][i][0];
1013 mvy[i] = s->mv[0][i][1];
1014 intra[i] = v->mb_type[0][s->block_index[i]];
1017 /* calculate chroma MV vector from four luma MVs */
1018 idx = (intra[3] << 3) | (intra[2] << 2) | (intra[1] << 1) | intra[0];
1019 if(!idx) { // all blocks are inter
1020 tx = median4(mvx[0], mvx[1], mvx[2], mvx[3]);
1021 ty = median4(mvy[0], mvy[1], mvy[2], mvy[3]);
1022 } else if(count[idx] == 1) { // 3 inter blocks
1025 tx = mid_pred(mvx[1], mvx[2], mvx[3]);
1026 ty = mid_pred(mvy[1], mvy[2], mvy[3]);
1029 tx = mid_pred(mvx[0], mvx[2], mvx[3]);
1030 ty = mid_pred(mvy[0], mvy[2], mvy[3]);
1033 tx = mid_pred(mvx[0], mvx[1], mvx[3]);
1034 ty = mid_pred(mvy[0], mvy[1], mvy[3]);
1037 tx = mid_pred(mvx[0], mvx[1], mvx[2]);
1038 ty = mid_pred(mvy[0], mvy[1], mvy[2]);
1041 } else if(count[idx] == 2) {
1043 for(i=0; i<3;i++) if(!intra[i]) {t1 = i; break;}
1044 for(i= t1+1; i<4; i++)if(!intra[i]) {t2 = i; break;}
1045 tx = (mvx[t1] + mvx[t2]) / 2;
1046 ty = (mvy[t1] + mvy[t2]) / 2;
1048 return; //no need to do MC for inter blocks
1050 s->current_picture.motion_val[1][s->block_index[0]][0] = tx;
1051 s->current_picture.motion_val[1][s->block_index[0]][1] = ty;
1052 uvmx = (tx + ((tx&3) == 3)) >> 1;
1053 uvmy = (ty + ((ty&3) == 3)) >> 1;
1055 uvmx = uvmx + ((uvmx<0)?(uvmx&1):-(uvmx&1));
1056 uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1));
1059 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
1060 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
1062 uvsrc_x = clip(uvsrc_x, -8, s->mb_width * 8);
1063 uvsrc_y = clip(uvsrc_y, -8, s->mb_height * 8);
1064 srcU = s->last_picture.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
1065 srcV = s->last_picture.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
1066 if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
1067 || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
1068 || (unsigned)uvsrc_y > (s->v_edge_pos >> 1) - 9){
1069 ff_emulated_edge_mc(s->edge_emu_buffer , srcU, s->uvlinesize, 8+1, 8+1,
1070 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
1071 ff_emulated_edge_mc(s->edge_emu_buffer + 16, srcV, s->uvlinesize, 8+1, 8+1,
1072 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
1073 srcU = s->edge_emu_buffer;
1074 srcV = s->edge_emu_buffer + 16;
1076 /* if we deal with range reduction we need to scale source blocks */
1077 if(v->rangeredfrm) {
1079 uint8_t *src, *src2;
1081 src = srcU; src2 = srcV;
1082 for(j = 0; j < 9; j++) {
1083 for(i = 0; i < 9; i++) {
1084 src[i] = ((src[i] - 128) >> 1) + 128;
1085 src2[i] = ((src2[i] - 128) >> 1) + 128;
1087 src += s->uvlinesize;
1088 src2 += s->uvlinesize;
1091 /* if we deal with intensity compensation we need to scale source blocks */
1092 if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1094 uint8_t *src, *src2;
1096 src = srcU; src2 = srcV;
1097 for(j = 0; j < 9; j++) {
1098 for(i = 0; i < 9; i++) {
1099 src[i] = v->lutuv[src[i]];
1100 src2[i] = v->lutuv[src2[i]];
1102 src += s->uvlinesize;
1103 src2 += s->uvlinesize;
1108 /* Chroma MC always uses qpel bilinear */
1109 uvdxy = ((uvmy & 3) << 2) | (uvmx & 3);
1113 dsp->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
1114 dsp->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
1116 dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
1117 dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
1121 static int decode_sequence_header_adv(VC1Context *v, GetBitContext *gb);
1124 * Decode Simple/Main Profiles sequence header
1125 * @see Figure 7-8, p16-17
1126 * @param avctx Codec context
1127 * @param gb GetBit context initialized from Codec context extra_data
1130 static int decode_sequence_header(AVCodecContext *avctx, GetBitContext *gb)
1132 VC1Context *v = avctx->priv_data;
1134 av_log(avctx, AV_LOG_DEBUG, "Header: %0X\n", show_bits(gb, 32));
1135 v->profile = get_bits(gb, 2);
1136 if (v->profile == 2)
1138 av_log(avctx, AV_LOG_ERROR, "Profile value 2 is forbidden (and WMV3 Complex Profile is unsupported)\n");
1142 if (v->profile == PROFILE_ADVANCED)
1144 return decode_sequence_header_adv(v, gb);
1148 v->res_sm = get_bits(gb, 2); //reserved
1151 av_log(avctx, AV_LOG_ERROR,
1152 "Reserved RES_SM=%i is forbidden\n", v->res_sm);
1158 v->frmrtq_postproc = get_bits(gb, 3); //common
1159 // (bitrate-32kbps)/64kbps
1160 v->bitrtq_postproc = get_bits(gb, 5); //common
1161 v->s.loop_filter = get_bits(gb, 1); //common
1162 if(v->s.loop_filter == 1 && v->profile == PROFILE_SIMPLE)
1164 av_log(avctx, AV_LOG_ERROR,
1165 "LOOPFILTER shell not be enabled in simple profile\n");
1168 v->res_x8 = get_bits(gb, 1); //reserved
1171 av_log(avctx, AV_LOG_ERROR,
1172 "1 for reserved RES_X8 is forbidden\n");
1175 v->multires = get_bits(gb, 1);
1176 v->res_fasttx = get_bits(gb, 1);
1179 av_log(avctx, AV_LOG_ERROR,
1180 "0 for reserved RES_FASTTX is forbidden\n");
1184 v->fastuvmc = get_bits(gb, 1); //common
1185 if (!v->profile && !v->fastuvmc)
1187 av_log(avctx, AV_LOG_ERROR,
1188 "FASTUVMC unavailable in Simple Profile\n");
1191 v->extended_mv = get_bits(gb, 1); //common
1192 if (!v->profile && v->extended_mv)
1194 av_log(avctx, AV_LOG_ERROR,
1195 "Extended MVs unavailable in Simple Profile\n");
1198 v->dquant = get_bits(gb, 2); //common
1199 v->vstransform = get_bits(gb, 1); //common
1201 v->res_transtab = get_bits(gb, 1);
1202 if (v->res_transtab)
1204 av_log(avctx, AV_LOG_ERROR,
1205 "1 for reserved RES_TRANSTAB is forbidden\n");
1209 v->overlap = get_bits(gb, 1); //common
1211 v->s.resync_marker = get_bits(gb, 1);
1212 v->rangered = get_bits(gb, 1);
1213 if (v->rangered && v->profile == PROFILE_SIMPLE)
1215 av_log(avctx, AV_LOG_INFO,
1216 "RANGERED should be set to 0 in simple profile\n");
1219 v->s.max_b_frames = avctx->max_b_frames = get_bits(gb, 3); //common
1220 v->quantizer_mode = get_bits(gb, 2); //common
1222 v->finterpflag = get_bits(gb, 1); //common
1223 v->res_rtm_flag = get_bits(gb, 1); //reserved
1224 if (!v->res_rtm_flag)
1226 // av_log(avctx, AV_LOG_ERROR,
1227 // "0 for reserved RES_RTM_FLAG is forbidden\n");
1228 av_log(avctx, AV_LOG_ERROR,
1229 "Old WMV3 version detected, only I-frames will be decoded\n");
1232 av_log(avctx, AV_LOG_DEBUG,
1233 "Profile %i:\nfrmrtq_postproc=%i, bitrtq_postproc=%i\n"
1234 "LoopFilter=%i, MultiRes=%i, FastUVMC=%i, Extended MV=%i\n"
1235 "Rangered=%i, VSTransform=%i, Overlap=%i, SyncMarker=%i\n"
1236 "DQuant=%i, Quantizer mode=%i, Max B frames=%i\n",
1237 v->profile, v->frmrtq_postproc, v->bitrtq_postproc,
1238 v->s.loop_filter, v->multires, v->fastuvmc, v->extended_mv,
1239 v->rangered, v->vstransform, v->overlap, v->s.resync_marker,
1240 v->dquant, v->quantizer_mode, avctx->max_b_frames
1245 static int decode_sequence_header_adv(VC1Context *v, GetBitContext *gb)
1247 v->res_rtm_flag = 1;
1248 v->level = get_bits(gb, 3);
1251 av_log(v->s.avctx, AV_LOG_ERROR, "Reserved LEVEL %i\n",v->level);
1253 v->chromaformat = get_bits(gb, 2);
1254 if (v->chromaformat != 1)
1256 av_log(v->s.avctx, AV_LOG_ERROR,
1257 "Only 4:2:0 chroma format supported\n");
1262 v->frmrtq_postproc = get_bits(gb, 3); //common
1263 // (bitrate-32kbps)/64kbps
1264 v->bitrtq_postproc = get_bits(gb, 5); //common
1265 v->postprocflag = get_bits(gb, 1); //common
1267 v->s.avctx->coded_width = (get_bits(gb, 12) + 1) << 1;
1268 v->s.avctx->coded_height = (get_bits(gb, 12) + 1) << 1;
1269 v->broadcast = get_bits1(gb);
1270 v->interlace = get_bits1(gb);
1271 v->tfcntrflag = get_bits1(gb);
1272 v->finterpflag = get_bits1(gb);
1273 get_bits1(gb); // reserved
1274 v->psf = get_bits1(gb);
1275 if(v->psf) { //PsF, 6.1.13
1276 av_log(v->s.avctx, AV_LOG_ERROR, "Progressive Segmented Frame mode: not supported (yet)\n");
1279 if(get_bits1(gb)) { //Display Info - decoding is not affected by it
1281 av_log(v->s.avctx, AV_LOG_INFO, "Display extended info:\n");
1282 w = get_bits(gb, 14);
1283 h = get_bits(gb, 14);
1284 av_log(v->s.avctx, AV_LOG_INFO, "Display dimensions: %ix%i\n", w, h);
1285 //TODO: store aspect ratio in AVCodecContext
1287 ar = get_bits(gb, 4);
1289 w = get_bits(gb, 8);
1290 h = get_bits(gb, 8);
1293 if(get_bits1(gb)){ //framerate stuff
1303 v->color_prim = get_bits(gb, 8);
1304 v->transfer_char = get_bits(gb, 8);
1305 v->matrix_coef = get_bits(gb, 8);
1309 v->hrd_param_flag = get_bits1(gb);
1310 if(v->hrd_param_flag) {
1312 v->hrd_num_leaky_buckets = get_bits(gb, 5);
1313 get_bits(gb, 4); //bitrate exponent
1314 get_bits(gb, 4); //buffer size exponent
1315 for(i = 0; i < v->hrd_num_leaky_buckets; i++) {
1316 get_bits(gb, 16); //hrd_rate[n]
1317 get_bits(gb, 16); //hrd_buffer[n]
1323 static int decode_entry_point(AVCodecContext *avctx, GetBitContext *gb)
1325 VC1Context *v = avctx->priv_data;
1328 av_log(avctx, AV_LOG_DEBUG, "Entry point: %08X\n", show_bits_long(gb, 32));
1329 get_bits1(gb); // broken link
1330 avctx->max_b_frames = 1 - get_bits1(gb); // 'closed entry' also signalize possible B-frames
1331 v->panscanflag = get_bits1(gb);
1332 get_bits1(gb); // refdist flag
1333 v->s.loop_filter = get_bits1(gb);
1334 v->fastuvmc = get_bits1(gb);
1335 v->extended_mv = get_bits1(gb);
1336 v->dquant = get_bits(gb, 2);
1337 v->vstransform = get_bits1(gb);
1338 v->overlap = get_bits1(gb);
1339 v->quantizer_mode = get_bits(gb, 2);
1341 if(v->hrd_param_flag){
1342 for(i = 0; i < v->hrd_num_leaky_buckets; i++) {
1343 get_bits(gb, 8); //hrd_full[n]
1348 avctx->coded_width = (get_bits(gb, 12)+1)<<1;
1349 avctx->coded_height = (get_bits(gb, 12)+1)<<1;
1352 v->extended_dmv = get_bits1(gb);
1354 av_log(avctx, AV_LOG_ERROR, "Luma scaling is not supported, expect wrong picture\n");
1355 skip_bits(gb, 3); // Y range, ignored for now
1358 av_log(avctx, AV_LOG_ERROR, "Chroma scaling is not supported, expect wrong picture\n");
1359 skip_bits(gb, 3); // UV range, ignored for now
1365 static int vc1_parse_frame_header(VC1Context *v, GetBitContext* gb)
1367 int pqindex, lowquant, status;
1369 if(v->finterpflag) v->interpfrm = get_bits(gb, 1);
1370 skip_bits(gb, 2); //framecnt unused
1372 if (v->rangered) v->rangeredfrm = get_bits(gb, 1);
1373 v->s.pict_type = get_bits(gb, 1);
1374 if (v->s.avctx->max_b_frames) {
1375 if (!v->s.pict_type) {
1376 if (get_bits(gb, 1)) v->s.pict_type = I_TYPE;
1377 else v->s.pict_type = B_TYPE;
1378 } else v->s.pict_type = P_TYPE;
1379 } else v->s.pict_type = v->s.pict_type ? P_TYPE : I_TYPE;
1382 if(v->s.pict_type == B_TYPE) {
1383 v->bfraction = get_vlc2(gb, vc1_bfraction_vlc.table, VC1_BFRACTION_VLC_BITS, 1);
1384 v->bfraction = vc1_bfraction_lut[v->bfraction];
1385 if(v->bfraction == 0) {
1386 v->s.pict_type = BI_TYPE;
1389 if(v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
1390 get_bits(gb, 7); // skip buffer fullness
1393 if(v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
1395 if(v->s.pict_type == P_TYPE)
1398 /* Quantizer stuff */
1399 pqindex = get_bits(gb, 5);
1400 if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
1401 v->pq = pquant_table[0][pqindex];
1403 v->pq = pquant_table[1][pqindex];
1406 if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
1407 v->pquantizer = pqindex < 9;
1408 if (v->quantizer_mode == QUANT_NON_UNIFORM)
1410 v->pqindex = pqindex;
1411 if (pqindex < 9) v->halfpq = get_bits(gb, 1);
1413 if (v->quantizer_mode == QUANT_FRAME_EXPLICIT)
1414 v->pquantizer = get_bits(gb, 1);
1416 if (v->extended_mv == 1) v->mvrange = get_prefix(gb, 0, 3);
1417 v->k_x = v->mvrange + 9 + (v->mvrange >> 1); //k_x can be 9 10 12 13
1418 v->k_y = v->mvrange + 8; //k_y can be 8 9 10 11
1419 v->range_x = 1 << (v->k_x - 1);
1420 v->range_y = 1 << (v->k_y - 1);
1421 if (v->profile == PROFILE_ADVANCED)
1423 if (v->postprocflag) v->postproc = get_bits(gb, 1);
1426 if (v->multires && v->s.pict_type != B_TYPE) v->respic = get_bits(gb, 2);
1428 //av_log(v->s.avctx, AV_LOG_INFO, "%c Frame: QP=[%i]%i (+%i/2) %i\n",
1429 // (v->s.pict_type == P_TYPE) ? 'P' : ((v->s.pict_type == I_TYPE) ? 'I' : 'B'), pqindex, v->pq, v->halfpq, v->rangeredfrm);
1431 if(v->s.pict_type == I_TYPE || v->s.pict_type == P_TYPE) v->use_ic = 0;
1433 switch(v->s.pict_type) {
1435 if (v->pq < 5) v->tt_index = 0;
1436 else if(v->pq < 13) v->tt_index = 1;
1437 else v->tt_index = 2;
1439 lowquant = (v->pq > 12) ? 0 : 1;
1440 v->mv_mode = mv_pmode_table[lowquant][get_prefix(gb, 1, 4)];
1441 if (v->mv_mode == MV_PMODE_INTENSITY_COMP)
1443 int scale, shift, i;
1444 v->mv_mode2 = mv_pmode_table2[lowquant][get_prefix(gb, 1, 3)];
1445 v->lumscale = get_bits(gb, 6);
1446 v->lumshift = get_bits(gb, 6);
1448 /* fill lookup tables for intensity compensation */
1451 shift = (255 - v->lumshift * 2) << 6;
1452 if(v->lumshift > 31)
1455 scale = v->lumscale + 32;
1456 if(v->lumshift > 31)
1457 shift = (v->lumshift - 64) << 6;
1459 shift = v->lumshift << 6;
1461 for(i = 0; i < 256; i++) {
1462 v->luty[i] = clip_uint8((scale * i + shift + 32) >> 6);
1463 v->lutuv[i] = clip_uint8((scale * (i - 128) + 128*64 + 32) >> 6);
1466 if(v->mv_mode == MV_PMODE_1MV_HPEL || v->mv_mode == MV_PMODE_1MV_HPEL_BILIN)
1467 v->s.quarter_sample = 0;
1468 else if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1469 if(v->mv_mode2 == MV_PMODE_1MV_HPEL || v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN)
1470 v->s.quarter_sample = 0;
1472 v->s.quarter_sample = 1;
1474 v->s.quarter_sample = 1;
1475 v->s.mspel = !(v->mv_mode == MV_PMODE_1MV_HPEL_BILIN || (v->mv_mode == MV_PMODE_INTENSITY_COMP && v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN));
1477 if ((v->mv_mode == MV_PMODE_INTENSITY_COMP &&
1478 v->mv_mode2 == MV_PMODE_MIXED_MV)
1479 || v->mv_mode == MV_PMODE_MIXED_MV)
1481 status = bitplane_decoding(v->mv_type_mb_plane, &v->mv_type_is_raw, v);
1482 if (status < 0) return -1;
1483 av_log(v->s.avctx, AV_LOG_DEBUG, "MB MV Type plane encoding: "
1484 "Imode: %i, Invert: %i\n", status>>1, status&1);
1486 v->mv_type_is_raw = 0;
1487 memset(v->mv_type_mb_plane, 0, v->s.mb_stride * v->s.mb_height);
1489 status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
1490 if (status < 0) return -1;
1491 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
1492 "Imode: %i, Invert: %i\n", status>>1, status&1);
1494 /* Hopefully this is correct for P frames */
1495 v->s.mv_table_index = get_bits(gb, 2); //but using vc1_ tables
1496 v->cbpcy_vlc = &vc1_cbpcy_p_vlc[get_bits(gb, 2)];
1500 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1501 vop_dquant_decoding(v);
1504 v->ttfrm = 0; //FIXME Is that so ?
1507 v->ttmbf = get_bits(gb, 1);
1510 v->ttfrm = ttfrm_to_tt[get_bits(gb, 2)];
1518 if (v->pq < 5) v->tt_index = 0;
1519 else if(v->pq < 13) v->tt_index = 1;
1520 else v->tt_index = 2;
1522 lowquant = (v->pq > 12) ? 0 : 1;
1523 v->mv_mode = get_bits1(gb) ? MV_PMODE_1MV : MV_PMODE_1MV_HPEL_BILIN;
1524 v->s.quarter_sample = (v->mv_mode == MV_PMODE_1MV);
1525 v->s.mspel = v->s.quarter_sample;
1527 status = bitplane_decoding(v->direct_mb_plane, &v->dmb_is_raw, v);
1528 if (status < 0) return -1;
1529 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Direct Type plane encoding: "
1530 "Imode: %i, Invert: %i\n", status>>1, status&1);
1531 status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
1532 if (status < 0) return -1;
1533 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
1534 "Imode: %i, Invert: %i\n", status>>1, status&1);
1536 v->s.mv_table_index = get_bits(gb, 2);
1537 v->cbpcy_vlc = &vc1_cbpcy_p_vlc[get_bits(gb, 2)];
1541 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1542 vop_dquant_decoding(v);
1548 v->ttmbf = get_bits(gb, 1);
1551 v->ttfrm = ttfrm_to_tt[get_bits(gb, 2)];
1561 v->c_ac_table_index = decode012(gb);
1562 if (v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
1564 v->y_ac_table_index = decode012(gb);
1567 v->s.dc_table_index = get_bits(gb, 1);
1569 if(v->s.pict_type == BI_TYPE) {
1570 v->s.pict_type = B_TYPE;
1576 static int vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
1579 int pqindex, lowquant;
1582 v->p_frame_skipped = 0;
1585 fcm = decode012(gb);
1586 switch(get_prefix(gb, 0, 4)) {
1588 v->s.pict_type = P_TYPE;
1591 v->s.pict_type = B_TYPE;
1594 v->s.pict_type = I_TYPE;
1597 v->s.pict_type = BI_TYPE;
1600 v->s.pict_type = P_TYPE; // skipped pic
1601 v->p_frame_skipped = 1;
1607 if(!v->interlace || v->panscanflag) {
1614 if(v->panscanflag) {
1617 v->rnd = get_bits1(gb);
1619 v->uvsamp = get_bits1(gb);
1620 if(v->finterpflag) v->interpfrm = get_bits(gb, 1);
1621 if(v->s.pict_type == B_TYPE) {
1622 v->bfraction = get_vlc2(gb, vc1_bfraction_vlc.table, VC1_BFRACTION_VLC_BITS, 1);
1623 v->bfraction = vc1_bfraction_lut[v->bfraction];
1624 if(v->bfraction == 0) {
1625 v->s.pict_type = BI_TYPE; /* XXX: should not happen here */
1628 pqindex = get_bits(gb, 5);
1629 v->pqindex = pqindex;
1630 if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
1631 v->pq = pquant_table[0][pqindex];
1633 v->pq = pquant_table[1][pqindex];
1636 if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
1637 v->pquantizer = pqindex < 9;
1638 if (v->quantizer_mode == QUANT_NON_UNIFORM)
1640 v->pqindex = pqindex;
1641 if (pqindex < 9) v->halfpq = get_bits(gb, 1);
1643 if (v->quantizer_mode == QUANT_FRAME_EXPLICIT)
1644 v->pquantizer = get_bits(gb, 1);
1646 switch(v->s.pict_type) {
1649 status = bitplane_decoding(v->acpred_plane, &v->acpred_is_raw, v);
1650 if (status < 0) return -1;
1651 av_log(v->s.avctx, AV_LOG_DEBUG, "ACPRED plane encoding: "
1652 "Imode: %i, Invert: %i\n", status>>1, status&1);
1653 v->condover = CONDOVER_NONE;
1654 if(v->overlap && v->pq <= 8) {
1655 v->condover = decode012(gb);
1656 if(v->condover == CONDOVER_SELECT) {
1657 status = bitplane_decoding(v->over_flags_plane, &v->overflg_is_raw, v);
1658 if (status < 0) return -1;
1659 av_log(v->s.avctx, AV_LOG_DEBUG, "CONDOVER plane encoding: "
1660 "Imode: %i, Invert: %i\n", status>>1, status&1);
1666 v->postproc = get_bits1(gb);
1667 if (v->extended_mv) v->mvrange = get_prefix(gb, 0, 3);
1668 else v->mvrange = 0;
1669 v->k_x = v->mvrange + 9 + (v->mvrange >> 1); //k_x can be 9 10 12 13
1670 v->k_y = v->mvrange + 8; //k_y can be 8 9 10 11
1671 v->range_x = 1 << (v->k_x - 1);
1672 v->range_y = 1 << (v->k_y - 1);
1674 if (v->pq < 5) v->tt_index = 0;
1675 else if(v->pq < 13) v->tt_index = 1;
1676 else v->tt_index = 2;
1678 lowquant = (v->pq > 12) ? 0 : 1;
1679 v->mv_mode = mv_pmode_table[lowquant][get_prefix(gb, 1, 4)];
1680 if (v->mv_mode == MV_PMODE_INTENSITY_COMP)
1682 int scale, shift, i;
1683 v->mv_mode2 = mv_pmode_table2[lowquant][get_prefix(gb, 1, 3)];
1684 v->lumscale = get_bits(gb, 6);
1685 v->lumshift = get_bits(gb, 6);
1686 /* fill lookup tables for intensity compensation */
1689 shift = (255 - v->lumshift * 2) << 6;
1690 if(v->lumshift > 31)
1693 scale = v->lumscale + 32;
1694 if(v->lumshift > 31)
1695 shift = (v->lumshift - 64) << 6;
1697 shift = v->lumshift << 6;
1699 for(i = 0; i < 256; i++) {
1700 v->luty[i] = clip_uint8((scale * i + shift + 32) >> 6);
1701 v->lutuv[i] = clip_uint8((scale * (i - 128) + 128*64 + 32) >> 6);
1704 if(v->mv_mode == MV_PMODE_1MV_HPEL || v->mv_mode == MV_PMODE_1MV_HPEL_BILIN)
1705 v->s.quarter_sample = 0;
1706 else if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1707 if(v->mv_mode2 == MV_PMODE_1MV_HPEL || v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN)
1708 v->s.quarter_sample = 0;
1710 v->s.quarter_sample = 1;
1712 v->s.quarter_sample = 1;
1713 v->s.mspel = !(v->mv_mode == MV_PMODE_1MV_HPEL_BILIN || (v->mv_mode == MV_PMODE_INTENSITY_COMP && v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN));
1715 if ((v->mv_mode == MV_PMODE_INTENSITY_COMP &&
1716 v->mv_mode2 == MV_PMODE_MIXED_MV)
1717 || v->mv_mode == MV_PMODE_MIXED_MV)
1719 status = bitplane_decoding(v->mv_type_mb_plane, &v->mv_type_is_raw, v);
1720 if (status < 0) return -1;
1721 av_log(v->s.avctx, AV_LOG_DEBUG, "MB MV Type plane encoding: "
1722 "Imode: %i, Invert: %i\n", status>>1, status&1);
1724 v->mv_type_is_raw = 0;
1725 memset(v->mv_type_mb_plane, 0, v->s.mb_stride * v->s.mb_height);
1727 status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
1728 if (status < 0) return -1;
1729 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
1730 "Imode: %i, Invert: %i\n", status>>1, status&1);
1732 /* Hopefully this is correct for P frames */
1733 v->s.mv_table_index = get_bits(gb, 2); //but using vc1_ tables
1734 v->cbpcy_vlc = &vc1_cbpcy_p_vlc[get_bits(gb, 2)];
1737 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1738 vop_dquant_decoding(v);
1741 v->ttfrm = 0; //FIXME Is that so ?
1744 v->ttmbf = get_bits(gb, 1);
1747 v->ttfrm = ttfrm_to_tt[get_bits(gb, 2)];
1756 v->postproc = get_bits1(gb);
1757 if (v->extended_mv) v->mvrange = get_prefix(gb, 0, 3);
1758 else v->mvrange = 0;
1759 v->k_x = v->mvrange + 9 + (v->mvrange >> 1); //k_x can be 9 10 12 13
1760 v->k_y = v->mvrange + 8; //k_y can be 8 9 10 11
1761 v->range_x = 1 << (v->k_x - 1);
1762 v->range_y = 1 << (v->k_y - 1);
1764 if (v->pq < 5) v->tt_index = 0;
1765 else if(v->pq < 13) v->tt_index = 1;
1766 else v->tt_index = 2;
1768 lowquant = (v->pq > 12) ? 0 : 1;
1769 v->mv_mode = get_bits1(gb) ? MV_PMODE_1MV : MV_PMODE_1MV_HPEL_BILIN;
1770 v->s.quarter_sample = (v->mv_mode == MV_PMODE_1MV);
1771 v->s.mspel = v->s.quarter_sample;
1773 status = bitplane_decoding(v->direct_mb_plane, &v->dmb_is_raw, v);
1774 if (status < 0) return -1;
1775 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Direct Type plane encoding: "
1776 "Imode: %i, Invert: %i\n", status>>1, status&1);
1777 status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
1778 if (status < 0) return -1;
1779 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
1780 "Imode: %i, Invert: %i\n", status>>1, status&1);
1782 v->s.mv_table_index = get_bits(gb, 2);
1783 v->cbpcy_vlc = &vc1_cbpcy_p_vlc[get_bits(gb, 2)];
1787 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1788 vop_dquant_decoding(v);
1794 v->ttmbf = get_bits(gb, 1);
1797 v->ttfrm = ttfrm_to_tt[get_bits(gb, 2)];
1807 v->c_ac_table_index = decode012(gb);
1808 if (v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
1810 v->y_ac_table_index = decode012(gb);
1813 v->s.dc_table_index = get_bits(gb, 1);
1814 if (v->s.pict_type == I_TYPE && v->dquant) {
1815 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1816 vop_dquant_decoding(v);
1820 if(v->s.pict_type == BI_TYPE) {
1821 v->s.pict_type = B_TYPE;
1827 /***********************************************************************/
1829 * @defgroup block VC-1 Block-level functions
1830 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
1836 * @brief Get macroblock-level quantizer scale
1838 #define GET_MQUANT() \
1842 if (v->dqprofile == DQPROFILE_ALL_MBS) \
1846 mquant = (get_bits(gb, 1)) ? v->altpq : v->pq; \
1850 mqdiff = get_bits(gb, 3); \
1851 if (mqdiff != 7) mquant = v->pq + mqdiff; \
1852 else mquant = get_bits(gb, 5); \
1855 if(v->dqprofile == DQPROFILE_SINGLE_EDGE) \
1856 edges = 1 << v->dqsbedge; \
1857 else if(v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
1858 edges = (3 << v->dqsbedge) % 15; \
1859 else if(v->dqprofile == DQPROFILE_FOUR_EDGES) \
1861 if((edges&1) && !s->mb_x) \
1862 mquant = v->altpq; \
1863 if((edges&2) && s->first_slice_line) \
1864 mquant = v->altpq; \
1865 if((edges&4) && s->mb_x == (s->mb_width - 1)) \
1866 mquant = v->altpq; \
1867 if((edges&8) && s->mb_y == (s->mb_height - 1)) \
1868 mquant = v->altpq; \
1872 * @def GET_MVDATA(_dmv_x, _dmv_y)
1873 * @brief Get MV differentials
1874 * @see MVDATA decoding from 8.3.5.2, p(1)20
1875 * @param _dmv_x Horizontal differential for decoded MV
1876 * @param _dmv_y Vertical differential for decoded MV
1878 #define GET_MVDATA(_dmv_x, _dmv_y) \
1879 index = 1 + get_vlc2(gb, vc1_mv_diff_vlc[s->mv_table_index].table,\
1880 VC1_MV_DIFF_VLC_BITS, 2); \
1883 mb_has_coeffs = 1; \
1886 else mb_has_coeffs = 0; \
1888 if (!index) { _dmv_x = _dmv_y = 0; } \
1889 else if (index == 35) \
1891 _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
1892 _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
1894 else if (index == 36) \
1903 if (!s->quarter_sample && index1 == 5) val = 1; \
1905 if(size_table[index1] - val > 0) \
1906 val = get_bits(gb, size_table[index1] - val); \
1908 sign = 0 - (val&1); \
1909 _dmv_x = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1912 if (!s->quarter_sample && index1 == 5) val = 1; \
1914 if(size_table[index1] - val > 0) \
1915 val = get_bits(gb, size_table[index1] - val); \
1917 sign = 0 - (val&1); \
1918 _dmv_y = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1921 /** Predict and set motion vector
1923 static inline void vc1_pred_mv(MpegEncContext *s, int n, int dmv_x, int dmv_y, int mv1, int r_x, int r_y, uint8_t* is_intra)
1925 int xy, wrap, off = 0;
1930 /* scale MV difference to be quad-pel */
1931 dmv_x <<= 1 - s->quarter_sample;
1932 dmv_y <<= 1 - s->quarter_sample;
1934 wrap = s->b8_stride;
1935 xy = s->block_index[n];
1938 s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = 0;
1939 s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = 0;
1940 if(mv1) { /* duplicate motion data for 1-MV block */
1941 s->current_picture.motion_val[0][xy + 1][0] = 0;
1942 s->current_picture.motion_val[0][xy + 1][1] = 0;
1943 s->current_picture.motion_val[0][xy + wrap][0] = 0;
1944 s->current_picture.motion_val[0][xy + wrap][1] = 0;
1945 s->current_picture.motion_val[0][xy + wrap + 1][0] = 0;
1946 s->current_picture.motion_val[0][xy + wrap + 1][1] = 0;
1951 C = s->current_picture.motion_val[0][xy - 1];
1952 A = s->current_picture.motion_val[0][xy - wrap];
1954 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2;
1956 //in 4-MV mode different blocks have different B predictor position
1959 off = (s->mb_x > 0) ? -1 : 1;
1962 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 1;
1971 B = s->current_picture.motion_val[0][xy - wrap + off];
1973 if(!s->first_slice_line || (n==2 || n==3)) { // predictor A is not out of bounds
1974 if(s->mb_width == 1) {
1978 px = mid_pred(A[0], B[0], C[0]);
1979 py = mid_pred(A[1], B[1], C[1]);
1981 } else if(s->mb_x || (n==1 || n==3)) { // predictor C is not out of bounds
1987 /* Pullback MV as specified in 8.3.5.3.4 */
1990 qx = (s->mb_x << 6) + ((n==1 || n==3) ? 32 : 0);
1991 qy = (s->mb_y << 6) + ((n==2 || n==3) ? 32 : 0);
1992 X = (s->mb_width << 6) - 4;
1993 Y = (s->mb_height << 6) - 4;
1995 if(qx + px < -60) px = -60 - qx;
1996 if(qy + py < -60) py = -60 - qy;
1998 if(qx + px < -28) px = -28 - qx;
1999 if(qy + py < -28) py = -28 - qy;
2001 if(qx + px > X) px = X - qx;
2002 if(qy + py > Y) py = Y - qy;
2004 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2005 if((!s->first_slice_line || (n==2 || n==3)) && (s->mb_x || (n==1 || n==3))) {
2006 if(is_intra[xy - wrap])
2007 sum = FFABS(px) + FFABS(py);
2009 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2011 if(get_bits1(&s->gb)) {
2019 if(is_intra[xy - 1])
2020 sum = FFABS(px) + FFABS(py);
2022 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2024 if(get_bits1(&s->gb)) {
2034 /* store MV using signed modulus of MV range defined in 4.11 */
2035 s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
2036 s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
2037 if(mv1) { /* duplicate motion data for 1-MV block */
2038 s->current_picture.motion_val[0][xy + 1][0] = s->current_picture.motion_val[0][xy][0];
2039 s->current_picture.motion_val[0][xy + 1][1] = s->current_picture.motion_val[0][xy][1];
2040 s->current_picture.motion_val[0][xy + wrap][0] = s->current_picture.motion_val[0][xy][0];
2041 s->current_picture.motion_val[0][xy + wrap][1] = s->current_picture.motion_val[0][xy][1];
2042 s->current_picture.motion_val[0][xy + wrap + 1][0] = s->current_picture.motion_val[0][xy][0];
2043 s->current_picture.motion_val[0][xy + wrap + 1][1] = s->current_picture.motion_val[0][xy][1];
2047 /** Motion compensation for direct or interpolated blocks in B-frames
2049 static void vc1_interp_mc(VC1Context *v)
2051 MpegEncContext *s = &v->s;
2052 DSPContext *dsp = &v->s.dsp;
2053 uint8_t *srcY, *srcU, *srcV;
2054 int dxy, uvdxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
2056 if(!v->s.next_picture.data[0])return;
2058 mx = s->mv[1][0][0];
2059 my = s->mv[1][0][1];
2060 uvmx = (mx + ((mx & 3) == 3)) >> 1;
2061 uvmy = (my + ((my & 3) == 3)) >> 1;
2063 uvmx = uvmx + ((uvmx<0)?-(uvmx&1):(uvmx&1));
2064 uvmy = uvmy + ((uvmy<0)?-(uvmy&1):(uvmy&1));
2066 srcY = s->next_picture.data[0];
2067 srcU = s->next_picture.data[1];
2068 srcV = s->next_picture.data[2];
2070 src_x = s->mb_x * 16 + (mx >> 2);
2071 src_y = s->mb_y * 16 + (my >> 2);
2072 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
2073 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
2075 src_x = clip( src_x, -16, s->mb_width * 16);
2076 src_y = clip( src_y, -16, s->mb_height * 16);
2077 uvsrc_x = clip(uvsrc_x, -8, s->mb_width * 8);
2078 uvsrc_y = clip(uvsrc_y, -8, s->mb_height * 8);
2080 srcY += src_y * s->linesize + src_x;
2081 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
2082 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
2084 /* for grayscale we should not try to read from unknown area */
2085 if(s->flags & CODEC_FLAG_GRAY) {
2086 srcU = s->edge_emu_buffer + 18 * s->linesize;
2087 srcV = s->edge_emu_buffer + 18 * s->linesize;
2091 || (unsigned)src_x > s->h_edge_pos - (mx&3) - 16
2092 || (unsigned)src_y > s->v_edge_pos - (my&3) - 16){
2093 uint8_t *uvbuf= s->edge_emu_buffer + 19 * s->linesize;
2095 srcY -= s->mspel * (1 + s->linesize);
2096 ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 17+s->mspel*2, 17+s->mspel*2,
2097 src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);
2098 srcY = s->edge_emu_buffer;
2099 ff_emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8+1, 8+1,
2100 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
2101 ff_emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8+1, 8+1,
2102 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
2105 /* if we deal with range reduction we need to scale source blocks */
2106 if(v->rangeredfrm) {
2108 uint8_t *src, *src2;
2111 for(j = 0; j < 17 + s->mspel*2; j++) {
2112 for(i = 0; i < 17 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128;
2115 src = srcU; src2 = srcV;
2116 for(j = 0; j < 9; j++) {
2117 for(i = 0; i < 9; i++) {
2118 src[i] = ((src[i] - 128) >> 1) + 128;
2119 src2[i] = ((src2[i] - 128) >> 1) + 128;
2121 src += s->uvlinesize;
2122 src2 += s->uvlinesize;
2125 srcY += s->mspel * (1 + s->linesize);
2130 dxy = ((my & 1) << 1) | (mx & 1);
2132 dsp->avg_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
2134 if(s->flags & CODEC_FLAG_GRAY) return;
2135 /* Chroma MC always uses qpel blilinear */
2136 uvdxy = ((uvmy & 3) << 2) | (uvmx & 3);
2139 dsp->avg_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
2140 dsp->avg_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
2143 static always_inline int scale_mv(int value, int bfrac, int inv, int qs)
2147 #if B_FRACTION_DEN==256
2151 return 2 * ((value * n + 255) >> 9);
2152 return (value * n + 128) >> 8;
2155 n -= B_FRACTION_DEN;
2157 return 2 * ((value * n + B_FRACTION_DEN - 1) / (2 * B_FRACTION_DEN));
2158 return (value * n + B_FRACTION_DEN/2) / B_FRACTION_DEN;
2162 /** Reconstruct motion vector for B-frame and do motion compensation
2164 static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mode)
2167 v->mv_mode2 = v->mv_mode;
2168 v->mv_mode = MV_PMODE_INTENSITY_COMP;
2173 if(v->use_ic) v->mv_mode = v->mv_mode2;
2176 if(mode == BMV_TYPE_INTERPOLATED) {
2179 if(v->use_ic) v->mv_mode = v->mv_mode2;
2183 if(v->use_ic && (mode == BMV_TYPE_BACKWARD)) v->mv_mode = v->mv_mode2;
2184 vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD));
2185 if(v->use_ic) v->mv_mode = v->mv_mode2;
2188 static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mvtype)
2190 MpegEncContext *s = &v->s;
2191 int xy, wrap, off = 0;
2196 const uint8_t *is_intra = v->mb_type[0];
2200 /* scale MV difference to be quad-pel */
2201 dmv_x[0] <<= 1 - s->quarter_sample;
2202 dmv_y[0] <<= 1 - s->quarter_sample;
2203 dmv_x[1] <<= 1 - s->quarter_sample;
2204 dmv_y[1] <<= 1 - s->quarter_sample;
2206 wrap = s->b8_stride;
2207 xy = s->block_index[0];
2210 s->current_picture.motion_val[0][xy][0] =
2211 s->current_picture.motion_val[0][xy][1] =
2212 s->current_picture.motion_val[1][xy][0] =
2213 s->current_picture.motion_val[1][xy][1] = 0;
2216 s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
2217 s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
2218 s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
2219 s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
2221 s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
2222 s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
2223 s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
2224 s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
2228 if((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2229 C = s->current_picture.motion_val[0][xy - 2];
2230 A = s->current_picture.motion_val[0][xy - wrap*2];
2231 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2232 B = s->current_picture.motion_val[0][xy - wrap*2 + off];
2234 if(!s->first_slice_line) { // predictor A is not out of bounds
2235 if(s->mb_width == 1) {
2239 px = mid_pred(A[0], B[0], C[0]);
2240 py = mid_pred(A[1], B[1], C[1]);
2242 } else if(s->mb_x) { // predictor C is not out of bounds
2248 /* Pullback MV as specified in 8.3.5.3.4 */
2251 if(v->profile < PROFILE_ADVANCED) {
2252 qx = (s->mb_x << 5);
2253 qy = (s->mb_y << 5);
2254 X = (s->mb_width << 5) - 4;
2255 Y = (s->mb_height << 5) - 4;
2256 if(qx + px < -28) px = -28 - qx;
2257 if(qy + py < -28) py = -28 - qy;
2258 if(qx + px > X) px = X - qx;
2259 if(qy + py > Y) py = Y - qy;
2261 qx = (s->mb_x << 6);
2262 qy = (s->mb_y << 6);
2263 X = (s->mb_width << 6) - 4;
2264 Y = (s->mb_height << 6) - 4;
2265 if(qx + px < -60) px = -60 - qx;
2266 if(qy + py < -60) py = -60 - qy;
2267 if(qx + px > X) px = X - qx;
2268 if(qy + py > Y) py = Y - qy;
2271 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2272 if(0 && !s->first_slice_line && s->mb_x) {
2273 if(is_intra[xy - wrap])
2274 sum = FFABS(px) + FFABS(py);
2276 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2278 if(get_bits1(&s->gb)) {
2286 if(is_intra[xy - 2])
2287 sum = FFABS(px) + FFABS(py);
2289 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2291 if(get_bits1(&s->gb)) {
2301 /* store MV using signed modulus of MV range defined in 4.11 */
2302 s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x;
2303 s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
2305 if((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2306 C = s->current_picture.motion_val[1][xy - 2];
2307 A = s->current_picture.motion_val[1][xy - wrap*2];
2308 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2309 B = s->current_picture.motion_val[1][xy - wrap*2 + off];
2311 if(!s->first_slice_line) { // predictor A is not out of bounds
2312 if(s->mb_width == 1) {
2316 px = mid_pred(A[0], B[0], C[0]);
2317 py = mid_pred(A[1], B[1], C[1]);
2319 } else if(s->mb_x) { // predictor C is not out of bounds
2325 /* Pullback MV as specified in 8.3.5.3.4 */
2328 if(v->profile < PROFILE_ADVANCED) {
2329 qx = (s->mb_x << 5);
2330 qy = (s->mb_y << 5);
2331 X = (s->mb_width << 5) - 4;
2332 Y = (s->mb_height << 5) - 4;
2333 if(qx + px < -28) px = -28 - qx;
2334 if(qy + py < -28) py = -28 - qy;
2335 if(qx + px > X) px = X - qx;
2336 if(qy + py > Y) py = Y - qy;
2338 qx = (s->mb_x << 6);
2339 qy = (s->mb_y << 6);
2340 X = (s->mb_width << 6) - 4;
2341 Y = (s->mb_height << 6) - 4;
2342 if(qx + px < -60) px = -60 - qx;
2343 if(qy + py < -60) py = -60 - qy;
2344 if(qx + px > X) px = X - qx;
2345 if(qy + py > Y) py = Y - qy;
2348 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2349 if(0 && !s->first_slice_line && s->mb_x) {
2350 if(is_intra[xy - wrap])
2351 sum = FFABS(px) + FFABS(py);
2353 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2355 if(get_bits1(&s->gb)) {
2363 if(is_intra[xy - 2])
2364 sum = FFABS(px) + FFABS(py);
2366 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2368 if(get_bits1(&s->gb)) {
2378 /* store MV using signed modulus of MV range defined in 4.11 */
2380 s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
2381 s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
2383 s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
2384 s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
2385 s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
2386 s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
2389 /** Get predicted DC value for I-frames only
2390 * prediction dir: left=0, top=1
2391 * @param s MpegEncContext
2392 * @param[in] n block index in the current MB
2393 * @param dc_val_ptr Pointer to DC predictor
2394 * @param dir_ptr Prediction direction for use in AC prediction
2396 static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2397 int16_t **dc_val_ptr, int *dir_ptr)
2399 int a, b, c, wrap, pred, scale;
2401 static const uint16_t dcpred[32] = {
2402 -1, 1024, 512, 341, 256, 205, 171, 146, 128,
2403 114, 102, 93, 85, 79, 73, 68, 64,
2404 60, 57, 54, 51, 49, 47, 45, 43,
2405 41, 39, 38, 37, 35, 34, 33
2408 /* find prediction - wmv3_dc_scale always used here in fact */
2409 if (n < 4) scale = s->y_dc_scale;
2410 else scale = s->c_dc_scale;
2412 wrap = s->block_wrap[n];
2413 dc_val= s->dc_val[0] + s->block_index[n];
2419 b = dc_val[ - 1 - wrap];
2420 a = dc_val[ - wrap];
2422 if (pq < 9 || !overlap)
2424 /* Set outer values */
2425 if (s->first_slice_line && (n!=2 && n!=3)) b=a=dcpred[scale];
2426 if (s->mb_x == 0 && (n!=1 && n!=3)) b=c=dcpred[scale];
2430 /* Set outer values */
2431 if (s->first_slice_line && (n!=2 && n!=3)) b=a=0;
2432 if (s->mb_x == 0 && (n!=1 && n!=3)) b=c=0;
2435 if (abs(a - b) <= abs(b - c)) {
2443 /* update predictor */
2444 *dc_val_ptr = &dc_val[0];
2449 /** Get predicted DC value
2450 * prediction dir: left=0, top=1
2451 * @param s MpegEncContext
2452 * @param[in] n block index in the current MB
2453 * @param dc_val_ptr Pointer to DC predictor
2454 * @param dir_ptr Prediction direction for use in AC prediction
2456 static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2457 int a_avail, int c_avail,
2458 int16_t **dc_val_ptr, int *dir_ptr)
2460 int a, b, c, wrap, pred, scale;
2462 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2465 /* find prediction - wmv3_dc_scale always used here in fact */
2466 if (n < 4) scale = s->y_dc_scale;
2467 else scale = s->c_dc_scale;
2469 wrap = s->block_wrap[n];
2470 dc_val= s->dc_val[0] + s->block_index[n];
2476 b = dc_val[ - 1 - wrap];
2477 a = dc_val[ - wrap];
2478 /* scale predictors if needed */
2479 q1 = s->current_picture.qscale_table[mb_pos];
2480 if(c_avail && (n!= 1 && n!=3)) {
2481 q2 = s->current_picture.qscale_table[mb_pos - 1];
2483 c = (c * s->y_dc_scale_table[q2] * vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2485 if(a_avail && (n!= 2 && n!=3)) {
2486 q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2488 a = (a * s->y_dc_scale_table[q2] * vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2490 if(a_avail && c_avail && (n!=3)) {
2493 if(n != 2) off -= s->mb_stride;
2494 q2 = s->current_picture.qscale_table[off];
2496 b = (b * s->y_dc_scale_table[q2] * vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2499 if(a_avail && c_avail) {
2500 if(abs(a - b) <= abs(b - c)) {
2507 } else if(a_avail) {
2510 } else if(c_avail) {
2518 /* update predictor */
2519 *dc_val_ptr = &dc_val[0];
2525 * @defgroup std_mb VC1 Macroblock-level functions in Simple/Main Profiles
2526 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
2530 static inline int vc1_coded_block_pred(MpegEncContext * s, int n, uint8_t **coded_block_ptr)
2532 int xy, wrap, pred, a, b, c;
2534 xy = s->block_index[n];
2535 wrap = s->b8_stride;
2540 a = s->coded_block[xy - 1 ];
2541 b = s->coded_block[xy - 1 - wrap];
2542 c = s->coded_block[xy - wrap];
2551 *coded_block_ptr = &s->coded_block[xy];
2557 * Decode one AC coefficient
2558 * @param v The VC1 context
2559 * @param last Last coefficient
2560 * @param skip How much zero coefficients to skip
2561 * @param value Decoded AC coefficient value
2564 static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip, int *value, int codingset)
2566 GetBitContext *gb = &v->s.gb;
2567 int index, escape, run = 0, level = 0, lst = 0;
2569 index = get_vlc2(gb, vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2570 if (index != vc1_ac_sizes[codingset] - 1) {
2571 run = vc1_index_decode_table[codingset][index][0];
2572 level = vc1_index_decode_table[codingset][index][1];
2573 lst = index >= vc1_last_decode_table[codingset];
2577 escape = decode210(gb);
2579 index = get_vlc2(gb, vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2580 run = vc1_index_decode_table[codingset][index][0];
2581 level = vc1_index_decode_table[codingset][index][1];
2582 lst = index >= vc1_last_decode_table[codingset];
2585 level += vc1_last_delta_level_table[codingset][run];
2587 level += vc1_delta_level_table[codingset][run];
2590 run += vc1_last_delta_run_table[codingset][level] + 1;
2592 run += vc1_delta_run_table[codingset][level] + 1;
2598 lst = get_bits(gb, 1);
2599 if(v->s.esc3_level_length == 0) {
2600 if(v->pq < 8 || v->dquantfrm) { // table 59
2601 v->s.esc3_level_length = get_bits(gb, 3);
2602 if(!v->s.esc3_level_length)
2603 v->s.esc3_level_length = get_bits(gb, 2) + 8;
2605 v->s.esc3_level_length = get_prefix(gb, 1, 6) + 2;
2607 v->s.esc3_run_length = 3 + get_bits(gb, 2);
2609 run = get_bits(gb, v->s.esc3_run_length);
2610 sign = get_bits(gb, 1);
2611 level = get_bits(gb, v->s.esc3_level_length);
2622 /** Decode intra block in intra frames - should be faster than decode_intra_block
2623 * @param v VC1Context
2624 * @param block block to decode
2625 * @param coded are AC coeffs present or not
2626 * @param codingset set of VLC to decode data
2628 static int vc1_decode_i_block(VC1Context *v, DCTELEM block[64], int n, int coded, int codingset)
2630 GetBitContext *gb = &v->s.gb;
2631 MpegEncContext *s = &v->s;
2632 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2635 int16_t *ac_val, *ac_val2;
2638 /* Get DC differential */
2640 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2642 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2645 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2650 if (dcdiff == 119 /* ESC index value */)
2652 /* TODO: Optimize */
2653 if (v->pq == 1) dcdiff = get_bits(gb, 10);
2654 else if (v->pq == 2) dcdiff = get_bits(gb, 9);
2655 else dcdiff = get_bits(gb, 8);
2660 dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3;
2661 else if (v->pq == 2)
2662 dcdiff = (dcdiff<<1) + get_bits(gb, 1) - 1;
2664 if (get_bits(gb, 1))
2669 dcdiff += vc1_i_pred_dc(&v->s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir);
2672 /* Store the quantized DC coeff, used for prediction */
2674 block[0] = dcdiff * s->y_dc_scale;
2676 block[0] = dcdiff * s->c_dc_scale;
2689 int last = 0, skip, value;
2690 const int8_t *zz_table;
2694 scale = v->pq * 2 + v->halfpq;
2698 zz_table = vc1_horizontal_zz;
2700 zz_table = vc1_vertical_zz;
2702 zz_table = vc1_normal_zz;
2704 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2706 if(dc_pred_dir) //left
2709 ac_val -= 16 * s->block_wrap[n];
2712 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2716 block[zz_table[i++]] = value;
2719 /* apply AC prediction if needed */
2721 if(dc_pred_dir) { //left
2722 for(k = 1; k < 8; k++)
2723 block[k << 3] += ac_val[k];
2725 for(k = 1; k < 8; k++)
2726 block[k] += ac_val[k + 8];
2729 /* save AC coeffs for further prediction */
2730 for(k = 1; k < 8; k++) {
2731 ac_val2[k] = block[k << 3];
2732 ac_val2[k + 8] = block[k];
2735 /* scale AC coeffs */
2736 for(k = 1; k < 64; k++)
2740 block[k] += (block[k] < 0) ? -v->pq : v->pq;
2743 if(s->ac_pred) i = 63;
2749 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2752 scale = v->pq * 2 + v->halfpq;
2753 memset(ac_val2, 0, 16 * 2);
2754 if(dc_pred_dir) {//left
2757 memcpy(ac_val2, ac_val, 8 * 2);
2759 ac_val -= 16 * s->block_wrap[n];
2761 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2764 /* apply AC prediction if needed */
2766 if(dc_pred_dir) { //left
2767 for(k = 1; k < 8; k++) {
2768 block[k << 3] = ac_val[k] * scale;
2769 if(!v->pquantizer && block[k << 3])
2770 block[k << 3] += (block[k << 3] < 0) ? -v->pq : v->pq;
2773 for(k = 1; k < 8; k++) {
2774 block[k] = ac_val[k + 8] * scale;
2775 if(!v->pquantizer && block[k])
2776 block[k] += (block[k] < 0) ? -v->pq : v->pq;
2782 s->block_last_index[n] = i;
2787 /** Decode intra block in intra frames - should be faster than decode_intra_block
2788 * @param v VC1Context
2789 * @param block block to decode
2790 * @param coded are AC coeffs present or not
2791 * @param codingset set of VLC to decode data
2793 static int vc1_decode_i_block_adv(VC1Context *v, DCTELEM block[64], int n, int coded, int codingset, int mquant)
2795 GetBitContext *gb = &v->s.gb;
2796 MpegEncContext *s = &v->s;
2797 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2800 int16_t *ac_val, *ac_val2;
2802 int a_avail = v->a_avail, c_avail = v->c_avail;
2803 int use_pred = s->ac_pred;
2806 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2808 /* Get DC differential */
2810 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2812 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2815 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2820 if (dcdiff == 119 /* ESC index value */)
2822 /* TODO: Optimize */
2823 if (mquant == 1) dcdiff = get_bits(gb, 10);
2824 else if (mquant == 2) dcdiff = get_bits(gb, 9);
2825 else dcdiff = get_bits(gb, 8);
2830 dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3;
2831 else if (mquant == 2)
2832 dcdiff = (dcdiff<<1) + get_bits(gb, 1) - 1;
2834 if (get_bits(gb, 1))
2839 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir);
2842 /* Store the quantized DC coeff, used for prediction */
2844 block[0] = dcdiff * s->y_dc_scale;
2846 block[0] = dcdiff * s->c_dc_scale;
2855 /* check if AC is needed at all and adjust direction if needed */
2856 if(!a_avail) dc_pred_dir = 1;
2857 if(!c_avail) dc_pred_dir = 0;
2858 if(!a_avail && !c_avail) use_pred = 0;
2859 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2862 scale = mquant * 2 + v->halfpq;
2864 if(dc_pred_dir) //left
2867 ac_val -= 16 * s->block_wrap[n];
2869 q1 = s->current_picture.qscale_table[mb_pos];
2870 if(dc_pred_dir && c_avail) q2 = s->current_picture.qscale_table[mb_pos - 1];
2871 if(!dc_pred_dir && a_avail) q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2872 if(n && n<4) q2 = q1;
2875 int last = 0, skip, value;
2876 const int8_t *zz_table;
2881 zz_table = vc1_horizontal_zz;
2883 zz_table = vc1_vertical_zz;
2885 zz_table = vc1_normal_zz;
2888 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2892 block[zz_table[i++]] = value;
2895 /* apply AC prediction if needed */
2897 /* scale predictors if needed*/
2899 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2900 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2902 if(dc_pred_dir) { //left
2903 for(k = 1; k < 8; k++)
2904 block[k << 3] += (ac_val[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2906 for(k = 1; k < 8; k++)
2907 block[k] += (ac_val[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2910 if(dc_pred_dir) { //left
2911 for(k = 1; k < 8; k++)
2912 block[k << 3] += ac_val[k];
2914 for(k = 1; k < 8; k++)
2915 block[k] += ac_val[k + 8];
2919 /* save AC coeffs for further prediction */
2920 for(k = 1; k < 8; k++) {
2921 ac_val2[k] = block[k << 3];
2922 ac_val2[k + 8] = block[k];
2925 /* scale AC coeffs */
2926 for(k = 1; k < 64; k++)
2930 block[k] += (block[k] < 0) ? -mquant : mquant;
2933 if(use_pred) i = 63;
2934 } else { // no AC coeffs
2937 memset(ac_val2, 0, 16 * 2);
2938 if(dc_pred_dir) {//left
2940 memcpy(ac_val2, ac_val, 8 * 2);
2942 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2943 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2944 for(k = 1; k < 8; k++)
2945 ac_val2[k] = (ac_val2[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2950 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2952 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2953 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2954 for(k = 1; k < 8; k++)
2955 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2960 /* apply AC prediction if needed */
2962 if(dc_pred_dir) { //left
2963 for(k = 1; k < 8; k++) {
2964 block[k << 3] = ac_val2[k] * scale;
2965 if(!v->pquantizer && block[k << 3])
2966 block[k << 3] += (block[k << 3] < 0) ? -mquant : mquant;
2969 for(k = 1; k < 8; k++) {
2970 block[k] = ac_val2[k + 8] * scale;
2971 if(!v->pquantizer && block[k])
2972 block[k] += (block[k] < 0) ? -mquant : mquant;
2978 s->block_last_index[n] = i;
2983 /** Decode intra block in inter frames - more generic version than vc1_decode_i_block
2984 * @param v VC1Context
2985 * @param block block to decode
2986 * @param coded are AC coeffs present or not
2987 * @param mquant block quantizer
2988 * @param codingset set of VLC to decode data
2990 static int vc1_decode_intra_block(VC1Context *v, DCTELEM block[64], int n, int coded, int mquant, int codingset)
2992 GetBitContext *gb = &v->s.gb;
2993 MpegEncContext *s = &v->s;
2994 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2997 int16_t *ac_val, *ac_val2;
2999 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3000 int a_avail = v->a_avail, c_avail = v->c_avail;
3001 int use_pred = s->ac_pred;
3005 /* XXX: Guard against dumb values of mquant */
3006 mquant = (mquant < 1) ? 0 : ( (mquant>31) ? 31 : mquant );
3008 /* Set DC scale - y and c use the same */
3009 s->y_dc_scale = s->y_dc_scale_table[mquant];
3010 s->c_dc_scale = s->c_dc_scale_table[mquant];
3012 /* Get DC differential */
3014 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
3016 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
3019 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
3024 if (dcdiff == 119 /* ESC index value */)
3026 /* TODO: Optimize */
3027 if (mquant == 1) dcdiff = get_bits(gb, 10);
3028 else if (mquant == 2) dcdiff = get_bits(gb, 9);
3029 else dcdiff = get_bits(gb, 8);
3034 dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3;
3035 else if (mquant == 2)
3036 dcdiff = (dcdiff<<1) + get_bits(gb, 1) - 1;
3038 if (get_bits(gb, 1))
3043 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir);
3046 /* Store the quantized DC coeff, used for prediction */
3049 block[0] = dcdiff * s->y_dc_scale;
3051 block[0] = dcdiff * s->c_dc_scale;
3060 /* check if AC is needed at all and adjust direction if needed */
3061 if(!a_avail) dc_pred_dir = 1;
3062 if(!c_avail) dc_pred_dir = 0;
3063 if(!a_avail && !c_avail) use_pred = 0;
3064 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
3067 scale = mquant * 2 + v->halfpq;
3069 if(dc_pred_dir) //left
3072 ac_val -= 16 * s->block_wrap[n];
3074 q1 = s->current_picture.qscale_table[mb_pos];
3075 if(dc_pred_dir && c_avail) q2 = s->current_picture.qscale_table[mb_pos - 1];
3076 if(!dc_pred_dir && a_avail) q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
3077 if(n && n<4) q2 = q1;
3080 int last = 0, skip, value;
3081 const int8_t *zz_table;
3084 zz_table = vc1_simple_progressive_8x8_zz;
3087 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
3091 block[zz_table[i++]] = value;
3094 /* apply AC prediction if needed */
3096 /* scale predictors if needed*/
3098 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3099 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3101 if(dc_pred_dir) { //left
3102 for(k = 1; k < 8; k++)
3103 block[k << 3] += (ac_val[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3105 for(k = 1; k < 8; k++)
3106 block[k] += (ac_val[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3109 if(dc_pred_dir) { //left
3110 for(k = 1; k < 8; k++)
3111 block[k << 3] += ac_val[k];
3113 for(k = 1; k < 8; k++)
3114 block[k] += ac_val[k + 8];
3118 /* save AC coeffs for further prediction */
3119 for(k = 1; k < 8; k++) {
3120 ac_val2[k] = block[k << 3];
3121 ac_val2[k + 8] = block[k];
3124 /* scale AC coeffs */
3125 for(k = 1; k < 64; k++)
3129 block[k] += (block[k] < 0) ? -mquant : mquant;
3132 if(use_pred) i = 63;
3133 } else { // no AC coeffs
3136 memset(ac_val2, 0, 16 * 2);
3137 if(dc_pred_dir) {//left
3139 memcpy(ac_val2, ac_val, 8 * 2);
3141 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3142 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3143 for(k = 1; k < 8; k++)
3144 ac_val2[k] = (ac_val2[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3149 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
3151 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3152 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3153 for(k = 1; k < 8; k++)
3154 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3159 /* apply AC prediction if needed */
3161 if(dc_pred_dir) { //left
3162 for(k = 1; k < 8; k++) {
3163 block[k << 3] = ac_val2[k] * scale;
3164 if(!v->pquantizer && block[k << 3])
3165 block[k << 3] += (block[k << 3] < 0) ? -mquant : mquant;
3168 for(k = 1; k < 8; k++) {
3169 block[k] = ac_val2[k + 8] * scale;
3170 if(!v->pquantizer && block[k])
3171 block[k] += (block[k] < 0) ? -mquant : mquant;
3177 s->block_last_index[n] = i;
3184 static int vc1_decode_p_block(VC1Context *v, DCTELEM block[64], int n, int mquant, int ttmb, int first_block)
3186 MpegEncContext *s = &v->s;
3187 GetBitContext *gb = &s->gb;
3190 int scale, off, idx, last, skip, value;
3191 int ttblk = ttmb & 7;
3194 ttblk = ttblk_to_tt[v->tt_index][get_vlc2(gb, vc1_ttblk_vlc[v->tt_index].table, VC1_TTBLK_VLC_BITS, 1)];
3196 if(ttblk == TT_4X4) {
3197 subblkpat = ~(get_vlc2(gb, vc1_subblkpat_vlc[v->tt_index].table, VC1_SUBBLKPAT_VLC_BITS, 1) + 1);
3199 if((ttblk != TT_8X8 && ttblk != TT_4X4) && (v->ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))) {
3200 subblkpat = decode012(gb);
3201 if(subblkpat) subblkpat ^= 3; //swap decoded pattern bits
3202 if(ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) ttblk = TT_8X4;
3203 if(ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) ttblk = TT_4X8;
3205 scale = 2 * mquant + v->halfpq;
3207 // convert transforms like 8X4_TOP to generic TT and SUBBLKPAT
3208 if(ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) {
3209 subblkpat = 2 - (ttblk == TT_8X4_TOP);
3212 if(ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) {
3213 subblkpat = 2 - (ttblk == TT_4X8_LEFT);
3221 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3225 idx = vc1_simple_progressive_8x8_zz[i++];
3226 block[idx] = value * scale;
3228 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3230 s->dsp.vc1_inv_trans_8x8(block);
3233 for(j = 0; j < 4; j++) {
3234 last = subblkpat & (1 << (3 - j));
3236 off = (j & 1) * 4 + (j & 2) * 16;
3238 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3242 idx = vc1_simple_progressive_4x4_zz[i++];
3243 block[idx + off] = value * scale;
3245 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3247 if(!(subblkpat & (1 << (3 - j))))
3248 s->dsp.vc1_inv_trans_4x4(block, j);
3252 for(j = 0; j < 2; j++) {
3253 last = subblkpat & (1 << (1 - j));
3257 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3261 if(v->profile < PROFILE_ADVANCED)
3262 idx = vc1_simple_progressive_8x4_zz[i++];
3264 idx = vc1_adv_progressive_8x4_zz[i++];
3265 block[idx + off] = value * scale;
3267 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3269 if(!(subblkpat & (1 << (1 - j))))
3270 s->dsp.vc1_inv_trans_8x4(block, j);
3274 for(j = 0; j < 2; j++) {
3275 last = subblkpat & (1 << (1 - j));
3279 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3283 if(v->profile < PROFILE_ADVANCED)
3284 idx = vc1_simple_progressive_4x8_zz[i++];
3286 idx = vc1_adv_progressive_4x8_zz[i++];
3287 block[idx + off] = value * scale;
3289 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3291 if(!(subblkpat & (1 << (1 - j))))
3292 s->dsp.vc1_inv_trans_4x8(block, j);
3300 /** Decode one P-frame MB (in Simple/Main profile)
3302 static int vc1_decode_p_mb(VC1Context *v)
3304 MpegEncContext *s = &v->s;
3305 GetBitContext *gb = &s->gb;
3307 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3308 int cbp; /* cbp decoding stuff */
3309 int mqdiff, mquant; /* MB quantization */
3310 int ttmb = v->ttfrm; /* MB Transform type */
3313 static const int size_table[6] = { 0, 2, 3, 4, 5, 8 },
3314 offset_table[6] = { 0, 1, 3, 7, 15, 31 };
3315 int mb_has_coeffs = 1; /* last_flag */
3316 int dmv_x, dmv_y; /* Differential MV components */
3317 int index, index1; /* LUT indices */
3318 int val, sign; /* temp values */
3319 int first_block = 1;
3321 int skipped, fourmv;
3323 mquant = v->pq; /* Loosy initialization */
3325 if (v->mv_type_is_raw)
3326 fourmv = get_bits1(gb);
3328 fourmv = v->mv_type_mb_plane[mb_pos];
3330 skipped = get_bits1(gb);
3332 skipped = v->s.mbskip_table[mb_pos];
3334 s->dsp.clear_blocks(s->block[0]);
3336 if (!fourmv) /* 1MV mode */
3340 GET_MVDATA(dmv_x, dmv_y);
3343 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
3344 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
3346 s->current_picture.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;
3347 vc1_pred_mv(s, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0]);
3349 /* FIXME Set DC val for inter block ? */
3350 if (s->mb_intra && !mb_has_coeffs)
3353 s->ac_pred = get_bits(gb, 1);
3356 else if (mb_has_coeffs)
3358 if (s->mb_intra) s->ac_pred = get_bits(gb, 1);
3359 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3367 s->current_picture.qscale_table[mb_pos] = mquant;
3369 if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
3370 ttmb = get_vlc2(gb, vc1_ttmb_vlc[v->tt_index].table,
3371 VC1_TTMB_VLC_BITS, 2);
3372 if(!s->mb_intra) vc1_mc_1mv(v, 0);
3376 s->dc_val[0][s->block_index[i]] = 0;
3378 val = ((cbp >> (5 - i)) & 1);
3379 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3380 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3382 /* check if prediction blocks A and C are available */
3383 v->a_avail = v->c_avail = 0;
3384 if(i == 2 || i == 3 || !s->first_slice_line)
3385 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3386 if(i == 1 || i == 3 || s->mb_x)
3387 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3389 vc1_decode_intra_block(v, s->block[i], i, val, mquant, (i&4)?v->codingset2:v->codingset);
3390 if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3391 s->dsp.vc1_inv_trans_8x8(s->block[i]);
3392 if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
3393 for(j = 0; j < 64; j++) s->block[i][j] += 128;
3394 s->dsp.put_pixels_clamped(s->block[i], s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
3395 if(v->pq >= 9 && v->overlap) {
3397 s->dsp.vc1_h_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
3399 s->dsp.vc1_v_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
3402 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block);
3403 if(!v->ttmbf && ttmb < 8) ttmb = -1;
3405 if((i<4) || !(s->flags & CODEC_FLAG_GRAY))
3406 s->dsp.add_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
3413 for(i = 0; i < 6; i++) {
3414 v->mb_type[0][s->block_index[i]] = 0;
3415 s->dc_val[0][s->block_index[i]] = 0;
3417 s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
3418 s->current_picture.qscale_table[mb_pos] = 0;
3419 vc1_pred_mv(s, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0]);
3426 if (!skipped /* unskipped MB */)
3428 int intra_count = 0, coded_inter = 0;
3429 int is_intra[6], is_coded[6];
3431 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3434 val = ((cbp >> (5 - i)) & 1);
3435 s->dc_val[0][s->block_index[i]] = 0;
3442 GET_MVDATA(dmv_x, dmv_y);
3444 vc1_pred_mv(s, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0]);
3445 if(!s->mb_intra) vc1_mc_4mv_luma(v, i);
3446 intra_count += s->mb_intra;
3447 is_intra[i] = s->mb_intra;
3448 is_coded[i] = mb_has_coeffs;
3451 is_intra[i] = (intra_count >= 3);
3454 if(i == 4) vc1_mc_4mv_chroma(v);
3455 v->mb_type[0][s->block_index[i]] = is_intra[i];
3456 if(!coded_inter) coded_inter = !is_intra[i] & is_coded[i];
3458 // if there are no coded blocks then don't do anything more
3459 if(!intra_count && !coded_inter) return 0;
3462 s->current_picture.qscale_table[mb_pos] = mquant;
3463 /* test if block is intra and has pred */
3468 if(((!s->first_slice_line || (i==2 || i==3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]])
3469 || ((s->mb_x || (i==1 || i==3)) && v->mb_type[0][s->block_index[i] - 1])) {
3474 if(intrapred)s->ac_pred = get_bits(gb, 1);
3475 else s->ac_pred = 0;
3477 if (!v->ttmbf && coded_inter)
3478 ttmb = get_vlc2(gb, vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3482 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3483 s->mb_intra = is_intra[i];
3485 /* check if prediction blocks A and C are available */
3486 v->a_avail = v->c_avail = 0;
3487 if(i == 2 || i == 3 || !s->first_slice_line)
3488 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3489 if(i == 1 || i == 3 || s->mb_x)
3490 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3492 vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant, (i&4)?v->codingset2:v->codingset);
3493 if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3494 s->dsp.vc1_inv_trans_8x8(s->block[i]);
3495 if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
3496 for(j = 0; j < 64; j++) s->block[i][j] += 128;
3497 s->dsp.put_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
3498 if(v->pq >= 9 && v->overlap) {
3500 s->dsp.vc1_h_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
3502 s->dsp.vc1_v_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
3504 } else if(is_coded[i]) {
3505 status = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block);
3506 if(!v->ttmbf && ttmb < 8) ttmb = -1;
3508 if((i<4) || !(s->flags & CODEC_FLAG_GRAY))
3509 s->dsp.add_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
3517 s->current_picture.qscale_table[mb_pos] = 0;
3518 for (i=0; i<6; i++) {
3519 v->mb_type[0][s->block_index[i]] = 0;
3520 s->dc_val[0][s->block_index[i]] = 0;
3524 vc1_pred_mv(s, i, 0, 0, 0, v->range_x, v->range_y, v->mb_type[0]);
3525 vc1_mc_4mv_luma(v, i);
3527 vc1_mc_4mv_chroma(v);
3528 s->current_picture.qscale_table[mb_pos] = 0;
3533 /* Should never happen */
3537 /** Decode one B-frame MB (in Main profile)
3539 static void vc1_decode_b_mb(VC1Context *v)
3541 MpegEncContext *s = &v->s;
3542 GetBitContext *gb = &s->gb;
3544 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3545 int cbp = 0; /* cbp decoding stuff */
3546 int mqdiff, mquant; /* MB quantization */
3547 int ttmb = v->ttfrm; /* MB Transform type */
3549 static const int size_table[6] = { 0, 2, 3, 4, 5, 8 },
3550 offset_table[6] = { 0, 1, 3, 7, 15, 31 };
3551 int mb_has_coeffs = 0; /* last_flag */
3552 int index, index1; /* LUT indices */
3553 int val, sign; /* temp values */
3554 int first_block = 1;
3556 int skipped, direct;
3557 int dmv_x[2], dmv_y[2];
3558 int bmvtype = BMV_TYPE_BACKWARD;
3560 mquant = v->pq; /* Loosy initialization */
3564 direct = get_bits1(gb);
3566 direct = v->direct_mb_plane[mb_pos];
3568 skipped = get_bits1(gb);
3570 skipped = v->s.mbskip_table[mb_pos];
3572 s->dsp.clear_blocks(s->block[0]);
3573 dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
3574 for(i = 0; i < 6; i++) {
3575 v->mb_type[0][s->block_index[i]] = 0;
3576 s->dc_val[0][s->block_index[i]] = 0;
3578 s->current_picture.qscale_table[mb_pos] = 0;
3582 GET_MVDATA(dmv_x[0], dmv_y[0]);
3583 dmv_x[1] = dmv_x[0];
3584 dmv_y[1] = dmv_y[0];
3586 if(skipped || !s->mb_intra) {
3587 bmvtype = decode012(gb);
3590 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
3593 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
3596 bmvtype = BMV_TYPE_INTERPOLATED;
3597 dmv_x[0] = dmv_y[0] = 0;
3601 for(i = 0; i < 6; i++)
3602 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3605 if(direct) bmvtype = BMV_TYPE_INTERPOLATED;
3606 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3607 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3611 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3615 s->current_picture.qscale_table[mb_pos] = mquant;
3617 ttmb = get_vlc2(gb, vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3618 dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
3619 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3620 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3622 if(!mb_has_coeffs && !s->mb_intra) {
3623 /* no coded blocks - effectively skipped */
3624 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3625 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3628 if(s->mb_intra && !mb_has_coeffs) {
3630 s->current_picture.qscale_table[mb_pos] = mquant;
3631 s->ac_pred = get_bits1(gb);
3633 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3635 if(bmvtype == BMV_TYPE_INTERPOLATED) {
3636 GET_MVDATA(dmv_x[0], dmv_y[0]);
3637 if(!mb_has_coeffs) {
3638 /* interpolated skipped block */
3639 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3640 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3644 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3646 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3649 s->ac_pred = get_bits1(gb);
3650 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3652 s->current_picture.qscale_table[mb_pos] = mquant;
3653 if(!v->ttmbf && !s->mb_intra && mb_has_coeffs)
3654 ttmb = get_vlc2(gb, vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3660 s->dc_val[0][s->block_index[i]] = 0;
3662 val = ((cbp >> (5 - i)) & 1);
3663 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3664 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3666 /* check if prediction blocks A and C are available */
3667 v->a_avail = v->c_avail = 0;
3668 if(i == 2 || i == 3 || !s->first_slice_line)
3669 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3670 if(i == 1 || i == 3 || s->mb_x)
3671 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3673 vc1_decode_intra_block(v, s->block[i], i, val, mquant, (i&4)?v->codingset2:v->codingset);
3674 if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3675 s->dsp.vc1_inv_trans_8x8(s->block[i]);
3676 if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
3677 for(j = 0; j < 64; j++) s->block[i][j] += 128;
3678 s->dsp.put_pixels_clamped(s->block[i], s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
3680 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block);
3681 if(!v->ttmbf && ttmb < 8) ttmb = -1;
3683 if((i<4) || !(s->flags & CODEC_FLAG_GRAY))
3684 s->dsp.add_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
3689 /** Decode blocks of I-frame
3691 static void vc1_decode_i_blocks(VC1Context *v)
3694 MpegEncContext *s = &v->s;
3699 /* select codingmode used for VLC tables selection */
3700 switch(v->y_ac_table_index){
3702 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
3705 v->codingset = CS_HIGH_MOT_INTRA;
3708 v->codingset = CS_MID_RATE_INTRA;
3712 switch(v->c_ac_table_index){
3714 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
3717 v->codingset2 = CS_HIGH_MOT_INTER;
3720 v->codingset2 = CS_MID_RATE_INTER;
3724 /* Set DC scale - y and c use the same */
3725 s->y_dc_scale = s->y_dc_scale_table[v->pq];
3726 s->c_dc_scale = s->c_dc_scale_table[v->pq];
3729 s->mb_x = s->mb_y = 0;
3731 s->first_slice_line = 1;
3732 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
3733 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
3734 for(s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
3735 ff_init_block_index(s);
3736 ff_update_block_index(s);
3737 s->dsp.clear_blocks(s->block[0]);
3738 mb_pos = s->mb_x + s->mb_y * s->mb_width;
3739 s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
3740 s->current_picture.qscale_table[mb_pos] = v->pq;
3741 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
3742 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
3744 // do actual MB decoding and displaying
3745 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
3746 v->s.ac_pred = get_bits(&v->s.gb, 1);
3748 for(k = 0; k < 6; k++) {
3749 val = ((cbp >> (5 - k)) & 1);
3752 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
3756 cbp |= val << (5 - k);
3758 vc1_decode_i_block(v, s->block[k], k, val, (k<4)? v->codingset : v->codingset2);
3760 s->dsp.vc1_inv_trans_8x8(s->block[k]);
3761 if(v->pq >= 9 && v->overlap) {
3762 for(j = 0; j < 64; j++) s->block[k][j] += 128;
3766 vc1_put_block(v, s->block);
3767 if(v->pq >= 9 && v->overlap) {
3769 s->dsp.vc1_h_overlap(s->dest[0], s->linesize);
3770 s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
3771 if(!(s->flags & CODEC_FLAG_GRAY)) {
3772 s->dsp.vc1_h_overlap(s->dest[1], s->uvlinesize);
3773 s->dsp.vc1_h_overlap(s->dest[2], s->uvlinesize);
3776 s->dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize);
3777 s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
3778 if(!s->first_slice_line) {
3779 s->dsp.vc1_v_overlap(s->dest[0], s->linesize);
3780 s->dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize);
3781 if(!(s->flags & CODEC_FLAG_GRAY)) {
3782 s->dsp.vc1_v_overlap(s->dest[1], s->uvlinesize);
3783 s->dsp.vc1_v_overlap(s->dest[2], s->uvlinesize);
3786 s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
3787 s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
3790 if(get_bits_count(&s->gb) > v->bits) {
3791 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n", get_bits_count(&s->gb), v->bits);
3795 ff_draw_horiz_band(s, s->mb_y * 16, 16);
3796 s->first_slice_line = 0;
3800 /** Decode blocks of I-frame for advanced profile
3802 static void vc1_decode_i_blocks_adv(VC1Context *v)
3805 MpegEncContext *s = &v->s;
3812 GetBitContext *gb = &s->gb;
3814 /* select codingmode used for VLC tables selection */
3815 switch(v->y_ac_table_index){
3817 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
3820 v->codingset = CS_HIGH_MOT_INTRA;
3823 v->codingset = CS_MID_RATE_INTRA;
3827 switch(v->c_ac_table_index){
3829 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
3832 v->codingset2 = CS_HIGH_MOT_INTER;
3835 v->codingset2 = CS_MID_RATE_INTER;
3840 s->mb_x = s->mb_y = 0;
3842 s->first_slice_line = 1;
3843 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
3844 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
3845 for(s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
3846 ff_init_block_index(s);
3847 ff_update_block_index(s);
3848 s->dsp.clear_blocks(s->block[0]);
3849 mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3850 s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
3851 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
3852 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
3854 // do actual MB decoding and displaying
3855 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
3856 if(v->acpred_is_raw)
3857 v->s.ac_pred = get_bits(&v->s.gb, 1);
3859 v->s.ac_pred = v->acpred_plane[mb_pos];
3861 if(v->condover == CONDOVER_SELECT) {
3862 if(v->overflg_is_raw)
3863 overlap = get_bits(&v->s.gb, 1);
3865 overlap = v->over_flags_plane[mb_pos];
3867 overlap = (v->condover == CONDOVER_ALL);
3871 s->current_picture.qscale_table[mb_pos] = mquant;
3872 /* Set DC scale - y and c use the same */
3873 s->y_dc_scale = s->y_dc_scale_table[mquant];
3874 s->c_dc_scale = s->c_dc_scale_table[mquant];
3876 for(k = 0; k < 6; k++) {
3877 val = ((cbp >> (5 - k)) & 1);
3880 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
3884 cbp |= val << (5 - k);
3886 v->a_avail = !s->first_slice_line || (k==2 || k==3);
3887 v->c_avail = !!s->mb_x || (k==1 || k==3);
3889 vc1_decode_i_block_adv(v, s->block[k], k, val, (k<4)? v->codingset : v->codingset2, mquant);
3891 s->dsp.vc1_inv_trans_8x8(s->block[k]);
3892 for(j = 0; j < 64; j++) s->block[k][j] += 128;
3895 vc1_put_block(v, s->block);
3898 s->dsp.vc1_h_overlap(s->dest[0], s->linesize);
3899 s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
3900 if(!(s->flags & CODEC_FLAG_GRAY)) {
3901 s->dsp.vc1_h_overlap(s->dest[1], s->uvlinesize);
3902 s->dsp.vc1_h_overlap(s->dest[2], s->uvlinesize);
3905 s->dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize);
3906 s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
3907 if(!s->first_slice_line) {
3908 s->dsp.vc1_v_overlap(s->dest[0], s->linesize);
3909 s->dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize);
3910 if(!(s->flags & CODEC_FLAG_GRAY)) {
3911 s->dsp.vc1_v_overlap(s->dest[1], s->uvlinesize);
3912 s->dsp.vc1_v_overlap(s->dest[2], s->uvlinesize);
3915 s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
3916 s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
3919 if(get_bits_count(&s->gb) > v->bits) {
3920 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n", get_bits_count(&s->gb), v->bits);
3924 ff_draw_horiz_band(s, s->mb_y * 16, 16);
3925 s->first_slice_line = 0;
3929 static void vc1_decode_p_blocks(VC1Context *v)
3931 MpegEncContext *s = &v->s;
3933 /* select codingmode used for VLC tables selection */
3934 switch(v->c_ac_table_index){
3936 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
3939 v->codingset = CS_HIGH_MOT_INTRA;
3942 v->codingset = CS_MID_RATE_INTRA;
3946 switch(v->c_ac_table_index){
3948 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
3951 v->codingset2 = CS_HIGH_MOT_INTER;
3954 v->codingset2 = CS_MID_RATE_INTER;
3958 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
3959 s->first_slice_line = 1;
3960 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
3961 for(s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
3962 ff_init_block_index(s);
3963 ff_update_block_index(s);
3964 s->dsp.clear_blocks(s->block[0]);
3967 if(get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
3968 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n", get_bits_count(&s->gb), v->bits,s->mb_x,s->mb_y);
3972 ff_draw_horiz_band(s, s->mb_y * 16, 16);
3973 s->first_slice_line = 0;
3977 static void vc1_decode_b_blocks(VC1Context *v)
3979 MpegEncContext *s = &v->s;
3981 /* select codingmode used for VLC tables selection */
3982 switch(v->c_ac_table_index){
3984 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
3987 v->codingset = CS_HIGH_MOT_INTRA;
3990 v->codingset = CS_MID_RATE_INTRA;
3994 switch(v->c_ac_table_index){
3996 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
3999 v->codingset2 = CS_HIGH_MOT_INTER;
4002 v->codingset2 = CS_MID_RATE_INTER;
4006 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
4007 s->first_slice_line = 1;
4008 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
4009 for(s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
4010 ff_init_block_index(s);
4011 ff_update_block_index(s);
4012 s->dsp.clear_blocks(s->block[0]);
4015 if(get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
4016 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n", get_bits_count(&s->gb), v->bits,s->mb_x,s->mb_y);
4020 ff_draw_horiz_band(s, s->mb_y * 16, 16);
4021 s->first_slice_line = 0;
4025 static void vc1_decode_skip_blocks(VC1Context *v)
4027 MpegEncContext *s = &v->s;
4029 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
4030 s->first_slice_line = 1;
4031 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
4033 ff_init_block_index(s);
4034 ff_update_block_index(s);
4035 memcpy(s->dest[0], s->last_picture.data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16);
4036 memcpy(s->dest[1], s->last_picture.data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
4037 memcpy(s->dest[2], s->last_picture.data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
4038 ff_draw_horiz_band(s, s->mb_y * 16, 16);
4039 s->first_slice_line = 0;
4041 s->pict_type = P_TYPE;
4044 static void vc1_decode_blocks(VC1Context *v)
4047 v->s.esc3_level_length = 0;
4049 switch(v->s.pict_type) {
4051 if(v->profile == PROFILE_ADVANCED)
4052 vc1_decode_i_blocks_adv(v);
4054 vc1_decode_i_blocks(v);
4057 if(v->p_frame_skipped)
4058 vc1_decode_skip_blocks(v);
4060 vc1_decode_p_blocks(v);
4064 vc1_decode_i_blocks(v);
4066 vc1_decode_b_blocks(v);
4072 /** Initialize a VC1/WMV3 decoder
4073 * @todo TODO: Handle VC-1 IDUs (Transport level?)
4074 * @todo TODO: Decypher remaining bits in extra_data
4076 static int vc1_decode_init(AVCodecContext *avctx)
4078 VC1Context *v = avctx->priv_data;
4079 MpegEncContext *s = &v->s;
4082 if (!avctx->extradata_size || !avctx->extradata) return -1;
4083 if (!(avctx->flags & CODEC_FLAG_GRAY))
4084 avctx->pix_fmt = PIX_FMT_YUV420P;
4086 avctx->pix_fmt = PIX_FMT_GRAY8;
4088 avctx->flags |= CODEC_FLAG_EMU_EDGE;
4089 v->s.flags |= CODEC_FLAG_EMU_EDGE;
4091 if(ff_h263_decode_init(avctx) < 0)
4093 if (vc1_init_common(v) < 0) return -1;
4095 avctx->coded_width = avctx->width;
4096 avctx->coded_height = avctx->height;
4097 if (avctx->codec_id == CODEC_ID_WMV3)
4101 // looks like WMV3 has a sequence header stored in the extradata
4102 // advanced sequence header may be before the first frame
4103 // the last byte of the extradata is a version number, 1 for the
4104 // samples we can decode
4106 init_get_bits(&gb, avctx->extradata, avctx->extradata_size*8);
4108 if (decode_sequence_header(avctx, &gb) < 0)
4111 count = avctx->extradata_size*8 - get_bits_count(&gb);
4114 av_log(avctx, AV_LOG_INFO, "Extra data: %i bits left, value: %X\n",
4115 count, get_bits(&gb, count));
4119 av_log(avctx, AV_LOG_INFO, "Read %i bits in overflow\n", -count);
4121 } else { // VC1/WVC1
4122 int edata_size = avctx->extradata_size;
4123 uint8_t *edata = avctx->extradata;
4125 if(avctx->extradata_size < 16) {
4126 av_log(avctx, AV_LOG_ERROR, "Extradata size too small: %i\n", edata_size);
4129 while(edata_size > 8) {
4130 // test if we've found header
4131 if(BE_32(edata) == 0x0000010F) {
4140 init_get_bits(&gb, edata, edata_size*8);
4142 if (decode_sequence_header(avctx, &gb) < 0)
4145 while(edata_size > 8) {
4146 // test if we've found entry point
4147 if(BE_32(edata) == 0x0000010E) {
4156 init_get_bits(&gb, edata, edata_size*8);
4158 if (decode_entry_point(avctx, &gb) < 0)
4161 avctx->has_b_frames= !!(avctx->max_b_frames);
4162 s->low_delay = !avctx->has_b_frames;
4164 s->mb_width = (avctx->coded_width+15)>>4;
4165 s->mb_height = (avctx->coded_height+15)>>4;
4167 /* Allocate mb bitplanes */
4168 v->mv_type_mb_plane = av_malloc(s->mb_stride * s->mb_height);
4169 v->direct_mb_plane = av_malloc(s->mb_stride * s->mb_height);
4170 v->acpred_plane = av_malloc(s->mb_stride * s->mb_height);
4171 v->over_flags_plane = av_malloc(s->mb_stride * s->mb_height);
4173 /* allocate block type info in that way so it could be used with s->block_index[] */
4174 v->mb_type_base = av_malloc(s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
4175 v->mb_type[0] = v->mb_type_base + s->b8_stride + 1;
4176 v->mb_type[1] = v->mb_type_base + s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride + 1;
4177 v->mb_type[2] = v->mb_type[1] + s->mb_stride * (s->mb_height + 1);
4179 /* Init coded blocks info */
4180 if (v->profile == PROFILE_ADVANCED)
4182 // if (alloc_bitplane(&v->over_flags_plane, s->mb_width, s->mb_height) < 0)
4184 // if (alloc_bitplane(&v->ac_pred_plane, s->mb_width, s->mb_height) < 0)
4192 /** Decode a VC1/WMV3 frame
4193 * @todo TODO: Handle VC-1 IDUs (Transport level?)
4195 static int vc1_decode_frame(AVCodecContext *avctx,
4196 void *data, int *data_size,
4197 uint8_t *buf, int buf_size)
4199 VC1Context *v = avctx->priv_data;
4200 MpegEncContext *s = &v->s;
4201 AVFrame *pict = data;
4202 uint8_t *buf2 = NULL;
4204 /* no supplementary picture */
4205 if (buf_size == 0) {
4206 /* special case for last picture */
4207 if (s->low_delay==0 && s->next_picture_ptr) {
4208 *pict= *(AVFrame*)s->next_picture_ptr;
4209 s->next_picture_ptr= NULL;
4211 *data_size = sizeof(AVFrame);
4217 //we need to set current_picture_ptr before reading the header, otherwise we cant store anyting im there
4218 if(s->current_picture_ptr==NULL || s->current_picture_ptr->data[0]){
4219 int i= ff_find_unused_picture(s, 0);
4220 s->current_picture_ptr= &s->picture[i];
4223 //for advanced profile we need to unescape buffer
4224 if (avctx->codec_id == CODEC_ID_VC1) {
4226 buf2 = av_malloc(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
4228 for(i = 0; i < buf_size; i++) {
4229 if(buf[i] == 3 && i >= 2 && !buf[i-1] && !buf[i-2] && i < buf_size-1 && buf[i+1] < 4) {
4230 buf2[buf_size2++] = buf[i+1];
4233 buf2[buf_size2++] = buf[i];
4235 init_get_bits(&s->gb, buf2, buf_size2*8);
4237 init_get_bits(&s->gb, buf, buf_size*8);
4238 // do parse frame header
4239 if(v->profile < PROFILE_ADVANCED) {
4240 if(vc1_parse_frame_header(v, &s->gb) == -1) {
4245 if(vc1_parse_frame_header_adv(v, &s->gb) == -1) {
4251 if(s->pict_type != I_TYPE && !v->res_rtm_flag){
4257 s->current_picture.pict_type= s->pict_type;
4258 s->current_picture.key_frame= s->pict_type == I_TYPE;
4260 /* skip B-frames if we don't have reference frames */
4261 if(s->last_picture_ptr==NULL && (s->pict_type==B_TYPE || s->dropable)){
4263 return -1;//buf_size;
4265 /* skip b frames if we are in a hurry */
4266 if(avctx->hurry_up && s->pict_type==B_TYPE) return -1;//buf_size;
4267 if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==B_TYPE)
4268 || (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=I_TYPE)
4269 || avctx->skip_frame >= AVDISCARD_ALL) {
4273 /* skip everything if we are in a hurry>=5 */
4274 if(avctx->hurry_up>=5) {
4276 return -1;//buf_size;
4279 if(s->next_p_frame_damaged){
4280 if(s->pict_type==B_TYPE)
4283 s->next_p_frame_damaged=0;
4286 if(MPV_frame_start(s, avctx) < 0) {
4291 ff_er_frame_start(s);
4293 v->bits = buf_size * 8;
4294 vc1_decode_blocks(v);
4295 //av_log(s->avctx, AV_LOG_INFO, "Consumed %i/%i bits\n", get_bits_count(&s->gb), buf_size*8);
4296 // if(get_bits_count(&s->gb) > buf_size * 8)
4302 assert(s->current_picture.pict_type == s->current_picture_ptr->pict_type);
4303 assert(s->current_picture.pict_type == s->pict_type);
4304 if (s->pict_type == B_TYPE || s->low_delay) {
4305 *pict= *(AVFrame*)s->current_picture_ptr;
4306 } else if (s->last_picture_ptr != NULL) {
4307 *pict= *(AVFrame*)s->last_picture_ptr;
4310 if(s->last_picture_ptr || s->low_delay){
4311 *data_size = sizeof(AVFrame);
4312 ff_print_debug_info(s, pict);
4315 /* Return the Picture timestamp as the frame number */
4316 /* we substract 1 because it is added on utils.c */
4317 avctx->frame_number = s->picture_number - 1;
4324 /** Close a VC1/WMV3 decoder
4325 * @warning Initial try at using MpegEncContext stuff
4327 static int vc1_decode_end(AVCodecContext *avctx)
4329 VC1Context *v = avctx->priv_data;
4331 av_freep(&v->hrd_rate);
4332 av_freep(&v->hrd_buffer);
4333 MPV_common_end(&v->s);
4334 av_freep(&v->mv_type_mb_plane);
4335 av_freep(&v->direct_mb_plane);
4336 av_freep(&v->acpred_plane);
4337 av_freep(&v->over_flags_plane);
4338 av_freep(&v->mb_type_base);
4343 AVCodec vc1_decoder = {
4356 AVCodec wmv3_decoder = {