2 * VC-1 and WMV3 decoder
3 * Copyright (c) 2006 Konstantin Shishkov
4 * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 * VC-1 and WMV3 decoder
30 #include "mpegvideo.h"
32 #include "vc1acdata.h"
37 extern const uint32_t ff_table0_dc_lum[120][2], ff_table1_dc_lum[120][2];
38 extern const uint32_t ff_table0_dc_chroma[120][2], ff_table1_dc_chroma[120][2];
39 extern VLC ff_msmp4_dc_luma_vlc[2], ff_msmp4_dc_chroma_vlc[2];
40 #define MB_INTRA_VLC_BITS 9
41 extern VLC ff_msmp4_mb_i_vlc;
42 extern const uint16_t ff_msmp4_mb_i_table[64][2];
45 static const uint16_t table_mb_intra[64][2];
48 /** Available Profiles */
53 PROFILE_COMPLEX, ///< TODO: WMV9 specific
58 /** Sequence quantizer mode */
61 QUANT_FRAME_IMPLICIT, ///< Implicitly specified at frame level
62 QUANT_FRAME_EXPLICIT, ///< Explicitly specified at frame level
63 QUANT_NON_UNIFORM, ///< Non-uniform quant used for all frames
64 QUANT_UNIFORM ///< Uniform quant used for all frames
68 /** Where quant can be changed */
72 DQPROFILE_DOUBLE_EDGES,
73 DQPROFILE_SINGLE_EDGE,
78 /** @name Where quant can be changed
89 /** Which pair of edges is quantized with ALTPQUANT */
92 DQDOUBLE_BEDGE_TOPLEFT,
93 DQDOUBLE_BEDGE_TOPRIGHT,
94 DQDOUBLE_BEDGE_BOTTOMRIGHT,
95 DQDOUBLE_BEDGE_BOTTOMLEFT
99 /** MV modes for P frames */
102 MV_PMODE_1MV_HPEL_BILIN,
106 MV_PMODE_INTENSITY_COMP
110 /** @name MV types for B frames */
115 BMV_TYPE_INTERPOLATED
119 /** @name Block types for P/B frames */
121 enum TransformTypes {
125 TT_8X4, //Both halves
128 TT_4X8, //Both halves
133 /** Table for conversion between TTBLK and TTMB */
134 static const int ttblk_to_tt[3][8] = {
135 { TT_8X4, TT_4X8, TT_8X8, TT_4X4, TT_8X4_TOP, TT_8X4_BOTTOM, TT_4X8_RIGHT, TT_4X8_LEFT },
136 { TT_8X8, TT_4X8_RIGHT, TT_4X8_LEFT, TT_4X4, TT_8X4, TT_4X8, TT_8X4_BOTTOM, TT_8X4_TOP },
137 { TT_8X8, TT_4X8, TT_4X4, TT_8X4_BOTTOM, TT_4X8_RIGHT, TT_4X8_LEFT, TT_8X4, TT_8X4_TOP }
140 static const int ttfrm_to_tt[4] = { TT_8X8, TT_8X4, TT_4X8, TT_4X4 };
142 /** MV P mode - the 5th element is only used for mode 1 */
143 static const uint8_t mv_pmode_table[2][5] = {
144 { MV_PMODE_1MV_HPEL_BILIN, MV_PMODE_1MV, MV_PMODE_1MV_HPEL, MV_PMODE_INTENSITY_COMP, MV_PMODE_MIXED_MV },
145 { MV_PMODE_1MV, MV_PMODE_MIXED_MV, MV_PMODE_1MV_HPEL, MV_PMODE_INTENSITY_COMP, MV_PMODE_1MV_HPEL_BILIN }
147 static const uint8_t mv_pmode_table2[2][4] = {
148 { MV_PMODE_1MV_HPEL_BILIN, MV_PMODE_1MV, MV_PMODE_1MV_HPEL, MV_PMODE_MIXED_MV },
149 { MV_PMODE_1MV, MV_PMODE_MIXED_MV, MV_PMODE_1MV_HPEL, MV_PMODE_1MV_HPEL_BILIN }
152 /** One more frame type */
155 static const int fps_nr[5] = { 24, 25, 30, 50, 60 },
156 fps_dr[2] = { 1000, 1001 };
157 static const uint8_t pquant_table[3][32] = {
158 { /* Implicit quantizer */
159 0, 1, 2, 3, 4, 5, 6, 7, 8, 6, 7, 8, 9, 10, 11, 12,
160 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 29, 31
162 { /* Explicit quantizer, pquantizer uniform */
163 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
164 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
166 { /* Explicit quantizer, pquantizer non-uniform */
167 0, 1, 1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
168 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 29, 31
172 /** @name VC-1 VLC tables and defines
173 * @todo TODO move this into the context
176 #define VC1_BFRACTION_VLC_BITS 7
177 static VLC vc1_bfraction_vlc;
178 #define VC1_IMODE_VLC_BITS 4
179 static VLC vc1_imode_vlc;
180 #define VC1_NORM2_VLC_BITS 3
181 static VLC vc1_norm2_vlc;
182 #define VC1_NORM6_VLC_BITS 9
183 static VLC vc1_norm6_vlc;
184 /* Could be optimized, one table only needs 8 bits */
185 #define VC1_TTMB_VLC_BITS 9 //12
186 static VLC vc1_ttmb_vlc[3];
187 #define VC1_MV_DIFF_VLC_BITS 9 //15
188 static VLC vc1_mv_diff_vlc[4];
189 #define VC1_CBPCY_P_VLC_BITS 9 //14
190 static VLC vc1_cbpcy_p_vlc[4];
191 #define VC1_4MV_BLOCK_PATTERN_VLC_BITS 6
192 static VLC vc1_4mv_block_pattern_vlc[4];
193 #define VC1_TTBLK_VLC_BITS 5
194 static VLC vc1_ttblk_vlc[3];
195 #define VC1_SUBBLKPAT_VLC_BITS 6
196 static VLC vc1_subblkpat_vlc[3];
198 static VLC vc1_ac_coeff_table[8];
202 CS_HIGH_MOT_INTRA = 0,
212 /** @name Overlap conditions for Advanced Profile */
223 * @fixme Change size wherever another size is more efficient
224 * Many members are only used for Advanced Profile
226 typedef struct VC1Context{
231 /** Simple/Main Profile sequence header */
233 int res_sm; ///< reserved, 2b
234 int res_x8; ///< reserved
235 int multires; ///< frame-level RESPIC syntax element present
236 int res_fasttx; ///< reserved, always 1
237 int res_transtab; ///< reserved, always 0
238 int rangered; ///< RANGEREDFRM (range reduction) syntax element present
240 int res_rtm_flag; ///< reserved, set to 1
241 int reserved; ///< reserved
244 /** Advanced Profile */
246 int level; ///< 3bits, for Advanced/Simple Profile, provided by TS layer
247 int chromaformat; ///< 2bits, 2=4:2:0, only defined
248 int postprocflag; ///< Per-frame processing suggestion flag present
249 int broadcast; ///< TFF/RFF present
250 int interlace; ///< Progressive/interlaced (RPTFTM syntax element)
251 int tfcntrflag; ///< TFCNTR present
252 int panscanflag; ///< NUMPANSCANWIN, TOPLEFT{X,Y}, BOTRIGHT{X,Y} present
253 int extended_dmv; ///< Additional extended dmv range at P/B frame-level
254 int color_prim; ///< 8bits, chroma coordinates of the color primaries
255 int transfer_char; ///< 8bits, Opto-electronic transfer characteristics
256 int matrix_coef; ///< 8bits, Color primaries->YCbCr transform matrix
257 int hrd_param_flag; ///< Presence of Hypothetical Reference
258 ///< Decoder parameters
259 int psf; ///< Progressive Segmented Frame
262 /** Sequence header data for all Profiles
263 * TODO: choose between ints, uint8_ts and monobit flags
266 int profile; ///< 2bits, Profile
267 int frmrtq_postproc; ///< 3bits,
268 int bitrtq_postproc; ///< 5bits, quantized framerate-based postprocessing strength
269 int fastuvmc; ///< Rounding of qpel vector to hpel ? (not in Simple)
270 int extended_mv; ///< Ext MV in P/B (not in Simple)
271 int dquant; ///< How qscale varies with MBs, 2bits (not in Simple)
272 int vstransform; ///< variable-size [48]x[48] transform type + info
273 int overlap; ///< overlapped transforms in use
274 int quantizer_mode; ///< 2bits, quantizer mode used for sequence, see QUANT_*
275 int finterpflag; ///< INTERPFRM present
278 /** Frame decoding info for all profiles */
280 uint8_t mv_mode; ///< MV coding monde
281 uint8_t mv_mode2; ///< Secondary MV coding mode (B frames)
282 int k_x; ///< Number of bits for MVs (depends on MV range)
283 int k_y; ///< Number of bits for MVs (depends on MV range)
284 int range_x, range_y; ///< MV range
285 uint8_t pq, altpq; ///< Current/alternate frame quantizer scale
286 /** pquant parameters */
293 /** AC coding set indexes
294 * @see 8.1.1.10, p(1)10
297 int c_ac_table_index; ///< Chroma index from ACFRM element
298 int y_ac_table_index; ///< Luma index from AC2FRM element
300 int ttfrm; ///< Transform type info present at frame level
301 uint8_t ttmbf; ///< Transform type flag
302 uint8_t ttblk4x4; ///< Value of ttblk which indicates a 4x4 transform
303 int codingset; ///< index of current table set from 11.8 to use for luma block decoding
304 int codingset2; ///< index of current table set from 11.8 to use for chroma block decoding
305 int pqindex; ///< raw pqindex used in coding set selection
306 int a_avail, c_avail;
307 uint8_t *mb_type_base, *mb_type[3];
310 /** Luma compensation parameters */
315 int16_t bfraction; ///< Relative position % anchors=> how to scale MVs
316 uint8_t halfpq; ///< Uniform quant over image and qp+.5
317 uint8_t respic; ///< Frame-level flag for resized images
318 int buffer_fullness; ///< HRD info
320 * -# 0 -> [-64n 63.f] x [-32, 31.f]
321 * -# 1 -> [-128, 127.f] x [-64, 63.f]
322 * -# 2 -> [-512, 511.f] x [-128, 127.f]
323 * -# 3 -> [-1024, 1023.f] x [-256, 255.f]
326 uint8_t pquantizer; ///< Uniform (over sequence) quantizer in use
327 VLC *cbpcy_vlc; ///< CBPCY VLC table
328 int tt_index; ///< Index for Transform Type tables
329 uint8_t* mv_type_mb_plane; ///< bitplane for mv_type == (4MV)
330 uint8_t* direct_mb_plane; ///< bitplane for "direct" MBs
331 int mv_type_is_raw; ///< mv type mb plane is not coded
332 int dmb_is_raw; ///< direct mb plane is raw
333 int skip_is_raw; ///< skip mb plane is not coded
334 uint8_t luty[256], lutuv[256]; // lookup tables used for intensity compensation
335 int use_ic; ///< use intensity compensation in B-frames
336 int rnd; ///< rounding control
338 /** Frame decoding info for S/M profiles only */
340 uint8_t rangeredfrm; ///< out_sample = CLIP((in_sample-128)*2+128)
344 /** Frame decoding info for Advanced profile */
346 uint8_t fcm; ///< 0->Progressive, 2->Frame-Interlace, 3->Field-Interlace
347 uint8_t numpanscanwin;
349 uint8_t rptfrm, tff, rff;
352 uint16_t bottomrightx;
353 uint16_t bottomrighty;
356 int hrd_num_leaky_buckets;
357 uint8_t bit_rate_exponent;
358 uint8_t buffer_size_exponent;
359 uint8_t* acpred_plane; ///< AC prediction flags bitplane
361 uint8_t* over_flags_plane; ///< Overflags bitplane
364 uint16_t *hrd_rate, *hrd_buffer;
365 uint8_t *hrd_fullness;
366 uint8_t range_mapy_flag;
367 uint8_t range_mapuv_flag;
377 * Get unary code of limited length
378 * @fixme FIXME Slow and ugly
379 * @param gb GetBitContext
380 * @param[in] stop The bitstop value (unary code of 1's or 0's)
381 * @param[in] len Maximum length
382 * @return Unary length/index
384 static int get_prefix(GetBitContext *gb, int stop, int len)
389 for(i = 0; i < len && get_bits1(gb) != stop; i++);
391 /* int i = 0, tmp = !stop;
393 while (i != len && tmp != stop)
395 tmp = get_bits(gb, 1);
398 if (i == len && tmp != stop) return len+1;
405 UPDATE_CACHE(re, gb);
406 buf=GET_CACHE(re, gb); //Still not sure
407 if (stop) buf = ~buf;
409 log= av_log2(-buf); //FIXME: -?
411 LAST_SKIP_BITS(re, gb, log+1);
412 CLOSE_READER(re, gb);
416 LAST_SKIP_BITS(re, gb, limit);
417 CLOSE_READER(re, gb);
422 static inline int decode210(GetBitContext *gb){
428 return 2 - get_bits1(gb);
432 * Init VC-1 specific tables and VC1Context members
433 * @param v The VC1Context to initialize
436 static int vc1_init_common(VC1Context *v)
441 v->hrd_rate = v->hrd_buffer = NULL;
447 init_vlc(&vc1_bfraction_vlc, VC1_BFRACTION_VLC_BITS, 23,
448 vc1_bfraction_bits, 1, 1,
449 vc1_bfraction_codes, 1, 1, 1);
450 init_vlc(&vc1_norm2_vlc, VC1_NORM2_VLC_BITS, 4,
451 vc1_norm2_bits, 1, 1,
452 vc1_norm2_codes, 1, 1, 1);
453 init_vlc(&vc1_norm6_vlc, VC1_NORM6_VLC_BITS, 64,
454 vc1_norm6_bits, 1, 1,
455 vc1_norm6_codes, 2, 2, 1);
456 init_vlc(&vc1_imode_vlc, VC1_IMODE_VLC_BITS, 7,
457 vc1_imode_bits, 1, 1,
458 vc1_imode_codes, 1, 1, 1);
461 init_vlc(&vc1_ttmb_vlc[i], VC1_TTMB_VLC_BITS, 16,
462 vc1_ttmb_bits[i], 1, 1,
463 vc1_ttmb_codes[i], 2, 2, 1);
464 init_vlc(&vc1_ttblk_vlc[i], VC1_TTBLK_VLC_BITS, 8,
465 vc1_ttblk_bits[i], 1, 1,
466 vc1_ttblk_codes[i], 1, 1, 1);
467 init_vlc(&vc1_subblkpat_vlc[i], VC1_SUBBLKPAT_VLC_BITS, 15,
468 vc1_subblkpat_bits[i], 1, 1,
469 vc1_subblkpat_codes[i], 1, 1, 1);
473 init_vlc(&vc1_4mv_block_pattern_vlc[i], VC1_4MV_BLOCK_PATTERN_VLC_BITS, 16,
474 vc1_4mv_block_pattern_bits[i], 1, 1,
475 vc1_4mv_block_pattern_codes[i], 1, 1, 1);
476 init_vlc(&vc1_cbpcy_p_vlc[i], VC1_CBPCY_P_VLC_BITS, 64,
477 vc1_cbpcy_p_bits[i], 1, 1,
478 vc1_cbpcy_p_codes[i], 2, 2, 1);
479 init_vlc(&vc1_mv_diff_vlc[i], VC1_MV_DIFF_VLC_BITS, 73,
480 vc1_mv_diff_bits[i], 1, 1,
481 vc1_mv_diff_codes[i], 2, 2, 1);
484 init_vlc(&vc1_ac_coeff_table[i], AC_VLC_BITS, vc1_ac_sizes[i],
485 &vc1_ac_tables[i][0][1], 8, 4,
486 &vc1_ac_tables[i][0][0], 8, 4, 1);
487 init_vlc(&ff_msmp4_mb_i_vlc, MB_INTRA_VLC_BITS, 64,
488 &ff_msmp4_mb_i_table[0][1], 4, 2,
489 &ff_msmp4_mb_i_table[0][0], 4, 2, 1);
494 v->mvrange = 0; /* 7.1.1.18, p80 */
499 /***********************************************************************/
501 * @defgroup bitplane VC9 Bitplane decoding
506 /** @addtogroup bitplane
519 /** @} */ //imode defines
521 /** Decode rows by checking if they are skipped
522 * @param plane Buffer to store decoded bits
523 * @param[in] width Width of this buffer
524 * @param[in] height Height of this buffer
525 * @param[in] stride of this buffer
527 static void decode_rowskip(uint8_t* plane, int width, int height, int stride, GetBitContext *gb){
530 for (y=0; y<height; y++){
531 if (!get_bits(gb, 1)) //rowskip
532 memset(plane, 0, width);
534 for (x=0; x<width; x++)
535 plane[x] = get_bits(gb, 1);
540 /** Decode columns by checking if they are skipped
541 * @param plane Buffer to store decoded bits
542 * @param[in] width Width of this buffer
543 * @param[in] height Height of this buffer
544 * @param[in] stride of this buffer
545 * @fixme FIXME: Optimize
547 static void decode_colskip(uint8_t* plane, int width, int height, int stride, GetBitContext *gb){
550 for (x=0; x<width; x++){
551 if (!get_bits(gb, 1)) //colskip
552 for (y=0; y<height; y++)
555 for (y=0; y<height; y++)
556 plane[y*stride] = get_bits(gb, 1);
561 /** Decode a bitplane's bits
562 * @param bp Bitplane where to store the decode bits
563 * @param v VC-1 context for bit reading and logging
565 * @fixme FIXME: Optimize
567 static int bitplane_decoding(uint8_t* data, int *raw_flag, VC1Context *v)
569 GetBitContext *gb = &v->s.gb;
571 int imode, x, y, code, offset;
572 uint8_t invert, *planep = data;
573 int width, height, stride;
575 width = v->s.mb_width;
576 height = v->s.mb_height;
577 stride = v->s.mb_stride;
578 invert = get_bits(gb, 1);
579 imode = get_vlc2(gb, vc1_imode_vlc.table, VC1_IMODE_VLC_BITS, 1);
585 //Data is actually read in the MB layer (same for all tests == "raw")
586 *raw_flag = 1; //invert ignored
590 if ((height * width) & 1)
592 *planep++ = get_bits(gb, 1);
596 // decode bitplane as one long line
597 for (y = offset; y < height * width; y += 2) {
598 code = get_vlc2(gb, vc1_norm2_vlc.table, VC1_NORM2_VLC_BITS, 1);
599 *planep++ = code & 1;
601 if(offset == width) {
603 planep += stride - width;
605 *planep++ = code >> 1;
607 if(offset == width) {
609 planep += stride - width;
615 if(!(height % 3) && (width % 3)) { // use 2x3 decoding
616 for(y = 0; y < height; y+= 3) {
617 for(x = width & 1; x < width; x += 2) {
618 code = get_vlc2(gb, vc1_norm6_vlc.table, VC1_NORM6_VLC_BITS, 2);
620 av_log(v->s.avctx, AV_LOG_DEBUG, "invalid NORM-6 VLC\n");
623 planep[x + 0] = (code >> 0) & 1;
624 planep[x + 1] = (code >> 1) & 1;
625 planep[x + 0 + stride] = (code >> 2) & 1;
626 planep[x + 1 + stride] = (code >> 3) & 1;
627 planep[x + 0 + stride * 2] = (code >> 4) & 1;
628 planep[x + 1 + stride * 2] = (code >> 5) & 1;
630 planep += stride * 3;
632 if(width & 1) decode_colskip(data, 1, height, stride, &v->s.gb);
634 planep += (height & 1) * stride;
635 for(y = height & 1; y < height; y += 2) {
636 for(x = width % 3; x < width; x += 3) {
637 code = get_vlc2(gb, vc1_norm6_vlc.table, VC1_NORM6_VLC_BITS, 2);
639 av_log(v->s.avctx, AV_LOG_DEBUG, "invalid NORM-6 VLC\n");
642 planep[x + 0] = (code >> 0) & 1;
643 planep[x + 1] = (code >> 1) & 1;
644 planep[x + 2] = (code >> 2) & 1;
645 planep[x + 0 + stride] = (code >> 3) & 1;
646 planep[x + 1 + stride] = (code >> 4) & 1;
647 planep[x + 2 + stride] = (code >> 5) & 1;
649 planep += stride * 2;
652 if(x) decode_colskip(data , x, height , stride, &v->s.gb);
653 if(height & 1) decode_rowskip(data+x, width - x, 1, stride, &v->s.gb);
657 decode_rowskip(data, width, height, stride, &v->s.gb);
660 decode_colskip(data, width, height, stride, &v->s.gb);
665 /* Applying diff operator */
666 if (imode == IMODE_DIFF2 || imode == IMODE_DIFF6)
670 for (x=1; x<width; x++)
671 planep[x] ^= planep[x-1];
672 for (y=1; y<height; y++)
675 planep[0] ^= planep[-stride];
676 for (x=1; x<width; x++)
678 if (planep[x-1] != planep[x-stride]) planep[x] ^= invert;
679 else planep[x] ^= planep[x-1];
686 for (x=0; x<stride*height; x++) planep[x] = !planep[x]; //FIXME stride
688 return (imode<<1) + invert;
691 /** @} */ //Bitplane group
693 /***********************************************************************/
694 /** VOP Dquant decoding
695 * @param v VC-1 Context
697 static int vop_dquant_decoding(VC1Context *v)
699 GetBitContext *gb = &v->s.gb;
705 pqdiff = get_bits(gb, 3);
706 if (pqdiff == 7) v->altpq = get_bits(gb, 5);
707 else v->altpq = v->pq + pqdiff + 1;
711 v->dquantfrm = get_bits(gb, 1);
714 v->dqprofile = get_bits(gb, 2);
715 switch (v->dqprofile)
717 case DQPROFILE_SINGLE_EDGE:
718 case DQPROFILE_DOUBLE_EDGES:
719 v->dqsbedge = get_bits(gb, 2);
721 case DQPROFILE_ALL_MBS:
722 v->dqbilevel = get_bits(gb, 1);
723 default: break; //Forbidden ?
725 if (v->dqbilevel || v->dqprofile != DQPROFILE_ALL_MBS)
727 pqdiff = get_bits(gb, 3);
728 if (pqdiff == 7) v->altpq = get_bits(gb, 5);
729 else v->altpq = v->pq + pqdiff + 1;
736 /** Put block onto picture
738 static void vc1_put_block(VC1Context *v, DCTELEM block[6][64])
742 DSPContext *dsp = &v->s.dsp;
746 for(k = 0; k < 6; k++)
747 for(j = 0; j < 8; j++)
748 for(i = 0; i < 8; i++)
749 block[k][i + j*8] = ((block[k][i + j*8] - 128) << 1) + 128;
752 ys = v->s.current_picture.linesize[0];
753 us = v->s.current_picture.linesize[1];
754 vs = v->s.current_picture.linesize[2];
757 dsp->put_pixels_clamped(block[0], Y, ys);
758 dsp->put_pixels_clamped(block[1], Y + 8, ys);
760 dsp->put_pixels_clamped(block[2], Y, ys);
761 dsp->put_pixels_clamped(block[3], Y + 8, ys);
763 if(!(v->s.flags & CODEC_FLAG_GRAY)) {
764 dsp->put_pixels_clamped(block[4], v->s.dest[1], us);
765 dsp->put_pixels_clamped(block[5], v->s.dest[2], vs);
769 /** Do motion compensation over 1 macroblock
770 * Mostly adapted hpel_motion and qpel_motion from mpegvideo.c
772 static void vc1_mc_1mv(VC1Context *v, int dir)
774 MpegEncContext *s = &v->s;
775 DSPContext *dsp = &v->s.dsp;
776 uint8_t *srcY, *srcU, *srcV;
777 int dxy, uvdxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
779 if(!v->s.last_picture.data[0])return;
781 mx = s->mv[dir][0][0];
782 my = s->mv[dir][0][1];
784 // store motion vectors for further use in B frames
785 if(s->pict_type == P_TYPE) {
786 s->current_picture.motion_val[1][s->block_index[0]][0] = mx;
787 s->current_picture.motion_val[1][s->block_index[0]][1] = my;
789 uvmx = (mx + ((mx & 3) == 3)) >> 1;
790 uvmy = (my + ((my & 3) == 3)) >> 1;
792 srcY = s->last_picture.data[0];
793 srcU = s->last_picture.data[1];
794 srcV = s->last_picture.data[2];
796 srcY = s->next_picture.data[0];
797 srcU = s->next_picture.data[1];
798 srcV = s->next_picture.data[2];
801 src_x = s->mb_x * 16 + (mx >> 2);
802 src_y = s->mb_y * 16 + (my >> 2);
803 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
804 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
806 src_x = clip( src_x, -16, s->mb_width * 16);
807 src_y = clip( src_y, -16, s->mb_height * 16);
808 uvsrc_x = clip(uvsrc_x, -8, s->mb_width * 8);
809 uvsrc_y = clip(uvsrc_y, -8, s->mb_height * 8);
811 srcY += src_y * s->linesize + src_x;
812 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
813 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
815 /* for grayscale we should not try to read from unknown area */
816 if(s->flags & CODEC_FLAG_GRAY) {
817 srcU = s->edge_emu_buffer + 18 * s->linesize;
818 srcV = s->edge_emu_buffer + 18 * s->linesize;
821 if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
822 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel*3
823 || (unsigned)(src_y - s->mspel) > s->v_edge_pos - (my&3) - 16 - s->mspel*3){
824 uint8_t *uvbuf= s->edge_emu_buffer + 19 * s->linesize;
826 srcY -= s->mspel * (1 + s->linesize);
827 ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 17+s->mspel*2, 17+s->mspel*2,
828 src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);
829 srcY = s->edge_emu_buffer;
830 ff_emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8+1, 8+1,
831 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
832 ff_emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8+1, 8+1,
833 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
836 /* if we deal with range reduction we need to scale source blocks */
842 for(j = 0; j < 17 + s->mspel*2; j++) {
843 for(i = 0; i < 17 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128;
846 src = srcU; src2 = srcV;
847 for(j = 0; j < 9; j++) {
848 for(i = 0; i < 9; i++) {
849 src[i] = ((src[i] - 128) >> 1) + 128;
850 src2[i] = ((src2[i] - 128) >> 1) + 128;
852 src += s->uvlinesize;
853 src2 += s->uvlinesize;
856 /* if we deal with intensity compensation we need to scale source blocks */
857 if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
862 for(j = 0; j < 17 + s->mspel*2; j++) {
863 for(i = 0; i < 17 + s->mspel*2; i++) src[i] = v->luty[src[i]];
866 src = srcU; src2 = srcV;
867 for(j = 0; j < 9; j++) {
868 for(i = 0; i < 9; i++) {
869 src[i] = v->lutuv[src[i]];
870 src2[i] = v->lutuv[src2[i]];
872 src += s->uvlinesize;
873 src2 += s->uvlinesize;
876 srcY += s->mspel * (1 + s->linesize);
880 uvmx = uvmx + ((uvmx<0)?(uvmx&1):-(uvmx&1));
881 uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1));
885 dxy = ((my & 3) << 2) | (mx & 3);
886 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] , srcY , s->linesize, v->rnd);
887 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8, srcY + 8, s->linesize, v->rnd);
888 srcY += s->linesize * 8;
889 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize , srcY , s->linesize, v->rnd);
890 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
891 } else { // hpel mc - always used for luma
892 dxy = (my & 2) | ((mx & 2) >> 1);
895 dsp->put_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
897 dsp->put_no_rnd_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
900 if(s->flags & CODEC_FLAG_GRAY) return;
901 /* Chroma MC always uses qpel bilinear */
902 uvdxy = ((uvmy & 3) << 2) | (uvmx & 3);
906 dsp->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
907 dsp->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
909 dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
910 dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
914 /** Do motion compensation for 4-MV macroblock - luminance block
916 static void vc1_mc_4mv_luma(VC1Context *v, int n)
918 MpegEncContext *s = &v->s;
919 DSPContext *dsp = &v->s.dsp;
921 int dxy, mx, my, src_x, src_y;
924 if(!v->s.last_picture.data[0])return;
927 srcY = s->last_picture.data[0];
929 off = s->linesize * 4 * (n&2) + (n&1) * 8;
931 src_x = s->mb_x * 16 + (n&1) * 8 + (mx >> 2);
932 src_y = s->mb_y * 16 + (n&2) * 4 + (my >> 2);
934 src_x = clip( src_x, -16, s->mb_width * 16);
935 src_y = clip( src_y, -16, s->mb_height * 16);
937 srcY += src_y * s->linesize + src_x;
939 if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
940 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 8 - s->mspel*2
941 || (unsigned)(src_y - s->mspel) > s->v_edge_pos - (my&3) - 8 - s->mspel*2){
942 srcY -= s->mspel * (1 + s->linesize);
943 ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 9+s->mspel*2, 9+s->mspel*2,
944 src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);
945 srcY = s->edge_emu_buffer;
946 /* if we deal with range reduction we need to scale source blocks */
952 for(j = 0; j < 9 + s->mspel*2; j++) {
953 for(i = 0; i < 9 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128;
957 /* if we deal with intensity compensation we need to scale source blocks */
958 if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
963 for(j = 0; j < 9 + s->mspel*2; j++) {
964 for(i = 0; i < 9 + s->mspel*2; i++) src[i] = v->luty[src[i]];
968 srcY += s->mspel * (1 + s->linesize);
972 dxy = ((my & 3) << 2) | (mx & 3);
973 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize, v->rnd);
974 } else { // hpel mc - always used for luma
975 dxy = (my & 2) | ((mx & 2) >> 1);
977 dsp->put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
979 dsp->put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
983 static inline int median4(int a, int b, int c, int d)
986 if(c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2;
987 else return (FFMIN(b, c) + FFMAX(a, d)) / 2;
989 if(c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2;
990 else return (FFMIN(a, c) + FFMAX(b, d)) / 2;
995 /** Do motion compensation for 4-MV macroblock - both chroma blocks
997 static void vc1_mc_4mv_chroma(VC1Context *v)
999 MpegEncContext *s = &v->s;
1000 DSPContext *dsp = &v->s.dsp;
1001 uint8_t *srcU, *srcV;
1002 int uvdxy, uvmx, uvmy, uvsrc_x, uvsrc_y;
1003 int i, idx, tx = 0, ty = 0;
1004 int mvx[4], mvy[4], intra[4];
1005 static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
1007 if(!v->s.last_picture.data[0])return;
1008 if(s->flags & CODEC_FLAG_GRAY) return;
1010 for(i = 0; i < 4; i++) {
1011 mvx[i] = s->mv[0][i][0];
1012 mvy[i] = s->mv[0][i][1];
1013 intra[i] = v->mb_type[0][s->block_index[i]];
1016 /* calculate chroma MV vector from four luma MVs */
1017 idx = (intra[3] << 3) | (intra[2] << 2) | (intra[1] << 1) | intra[0];
1018 if(!idx) { // all blocks are inter
1019 tx = median4(mvx[0], mvx[1], mvx[2], mvx[3]);
1020 ty = median4(mvy[0], mvy[1], mvy[2], mvy[3]);
1021 } else if(count[idx] == 1) { // 3 inter blocks
1024 tx = mid_pred(mvx[1], mvx[2], mvx[3]);
1025 ty = mid_pred(mvy[1], mvy[2], mvy[3]);
1028 tx = mid_pred(mvx[0], mvx[2], mvx[3]);
1029 ty = mid_pred(mvy[0], mvy[2], mvy[3]);
1032 tx = mid_pred(mvx[0], mvx[1], mvx[3]);
1033 ty = mid_pred(mvy[0], mvy[1], mvy[3]);
1036 tx = mid_pred(mvx[0], mvx[1], mvx[2]);
1037 ty = mid_pred(mvy[0], mvy[1], mvy[2]);
1040 } else if(count[idx] == 2) {
1042 for(i=0; i<3;i++) if(!intra[i]) {t1 = i; break;}
1043 for(i= t1+1; i<4; i++)if(!intra[i]) {t2 = i; break;}
1044 tx = (mvx[t1] + mvx[t2]) / 2;
1045 ty = (mvy[t1] + mvy[t2]) / 2;
1047 return; //no need to do MC for inter blocks
1049 s->current_picture.motion_val[1][s->block_index[0]][0] = tx;
1050 s->current_picture.motion_val[1][s->block_index[0]][1] = ty;
1051 uvmx = (tx + ((tx&3) == 3)) >> 1;
1052 uvmy = (ty + ((ty&3) == 3)) >> 1;
1054 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
1055 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
1057 uvsrc_x = clip(uvsrc_x, -8, s->mb_width * 8);
1058 uvsrc_y = clip(uvsrc_y, -8, s->mb_height * 8);
1059 srcU = s->last_picture.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
1060 srcV = s->last_picture.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
1061 if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
1062 || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
1063 || (unsigned)uvsrc_y > (s->v_edge_pos >> 1) - 9){
1064 ff_emulated_edge_mc(s->edge_emu_buffer , srcU, s->uvlinesize, 8+1, 8+1,
1065 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
1066 ff_emulated_edge_mc(s->edge_emu_buffer + 16, srcV, s->uvlinesize, 8+1, 8+1,
1067 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
1068 srcU = s->edge_emu_buffer;
1069 srcV = s->edge_emu_buffer + 16;
1071 /* if we deal with range reduction we need to scale source blocks */
1072 if(v->rangeredfrm) {
1074 uint8_t *src, *src2;
1076 src = srcU; src2 = srcV;
1077 for(j = 0; j < 9; j++) {
1078 for(i = 0; i < 9; i++) {
1079 src[i] = ((src[i] - 128) >> 1) + 128;
1080 src2[i] = ((src2[i] - 128) >> 1) + 128;
1082 src += s->uvlinesize;
1083 src2 += s->uvlinesize;
1086 /* if we deal with intensity compensation we need to scale source blocks */
1087 if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1089 uint8_t *src, *src2;
1091 src = srcU; src2 = srcV;
1092 for(j = 0; j < 9; j++) {
1093 for(i = 0; i < 9; i++) {
1094 src[i] = v->lutuv[src[i]];
1095 src2[i] = v->lutuv[src2[i]];
1097 src += s->uvlinesize;
1098 src2 += s->uvlinesize;
1104 uvmx = uvmx + ((uvmx<0)?(uvmx&1):-(uvmx&1));
1105 uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1));
1108 /* Chroma MC always uses qpel bilinear */
1109 uvdxy = ((uvmy & 3) << 2) | (uvmx & 3);
1113 dsp->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
1114 dsp->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
1116 dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
1117 dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
1121 static int decode_sequence_header_adv(VC1Context *v, GetBitContext *gb);
1124 * Decode Simple/Main Profiles sequence header
1125 * @see Figure 7-8, p16-17
1126 * @param avctx Codec context
1127 * @param gb GetBit context initialized from Codec context extra_data
1130 static int decode_sequence_header(AVCodecContext *avctx, GetBitContext *gb)
1132 VC1Context *v = avctx->priv_data;
1134 av_log(avctx, AV_LOG_DEBUG, "Header: %0X\n", show_bits(gb, 32));
1135 v->profile = get_bits(gb, 2);
1136 if (v->profile == 2)
1138 av_log(avctx, AV_LOG_ERROR, "Profile value 2 is forbidden (and WMV3 Complex Profile is unsupported)\n");
1142 if (v->profile == PROFILE_ADVANCED)
1144 return decode_sequence_header_adv(v, gb);
1148 v->res_sm = get_bits(gb, 2); //reserved
1151 av_log(avctx, AV_LOG_ERROR,
1152 "Reserved RES_SM=%i is forbidden\n", v->res_sm);
1158 v->frmrtq_postproc = get_bits(gb, 3); //common
1159 // (bitrate-32kbps)/64kbps
1160 v->bitrtq_postproc = get_bits(gb, 5); //common
1161 v->s.loop_filter = get_bits(gb, 1); //common
1162 if(v->s.loop_filter == 1 && v->profile == PROFILE_SIMPLE)
1164 av_log(avctx, AV_LOG_ERROR,
1165 "LOOPFILTER shell not be enabled in simple profile\n");
1168 v->res_x8 = get_bits(gb, 1); //reserved
1171 av_log(avctx, AV_LOG_ERROR,
1172 "1 for reserved RES_X8 is forbidden\n");
1175 v->multires = get_bits(gb, 1);
1176 v->res_fasttx = get_bits(gb, 1);
1179 av_log(avctx, AV_LOG_ERROR,
1180 "0 for reserved RES_FASTTX is forbidden\n");
1184 v->fastuvmc = get_bits(gb, 1); //common
1185 if (!v->profile && !v->fastuvmc)
1187 av_log(avctx, AV_LOG_ERROR,
1188 "FASTUVMC unavailable in Simple Profile\n");
1191 v->extended_mv = get_bits(gb, 1); //common
1192 if (!v->profile && v->extended_mv)
1194 av_log(avctx, AV_LOG_ERROR,
1195 "Extended MVs unavailable in Simple Profile\n");
1198 v->dquant = get_bits(gb, 2); //common
1199 v->vstransform = get_bits(gb, 1); //common
1201 v->res_transtab = get_bits(gb, 1);
1202 if (v->res_transtab)
1204 av_log(avctx, AV_LOG_ERROR,
1205 "1 for reserved RES_TRANSTAB is forbidden\n");
1209 v->overlap = get_bits(gb, 1); //common
1211 v->s.resync_marker = get_bits(gb, 1);
1212 v->rangered = get_bits(gb, 1);
1213 if (v->rangered && v->profile == PROFILE_SIMPLE)
1215 av_log(avctx, AV_LOG_INFO,
1216 "RANGERED should be set to 0 in simple profile\n");
1219 v->s.max_b_frames = avctx->max_b_frames = get_bits(gb, 3); //common
1220 v->quantizer_mode = get_bits(gb, 2); //common
1222 v->finterpflag = get_bits(gb, 1); //common
1223 v->res_rtm_flag = get_bits(gb, 1); //reserved
1224 if (!v->res_rtm_flag)
1226 // av_log(avctx, AV_LOG_ERROR,
1227 // "0 for reserved RES_RTM_FLAG is forbidden\n");
1228 av_log(avctx, AV_LOG_ERROR,
1229 "Old WMV3 version detected, only I-frames will be decoded\n");
1232 av_log(avctx, AV_LOG_DEBUG,
1233 "Profile %i:\nfrmrtq_postproc=%i, bitrtq_postproc=%i\n"
1234 "LoopFilter=%i, MultiRes=%i, FastUVMC=%i, Extended MV=%i\n"
1235 "Rangered=%i, VSTransform=%i, Overlap=%i, SyncMarker=%i\n"
1236 "DQuant=%i, Quantizer mode=%i, Max B frames=%i\n",
1237 v->profile, v->frmrtq_postproc, v->bitrtq_postproc,
1238 v->s.loop_filter, v->multires, v->fastuvmc, v->extended_mv,
1239 v->rangered, v->vstransform, v->overlap, v->s.resync_marker,
1240 v->dquant, v->quantizer_mode, avctx->max_b_frames
1245 static int decode_sequence_header_adv(VC1Context *v, GetBitContext *gb)
1247 v->res_rtm_flag = 1;
1248 v->level = get_bits(gb, 3);
1251 av_log(v->s.avctx, AV_LOG_ERROR, "Reserved LEVEL %i\n",v->level);
1253 v->chromaformat = get_bits(gb, 2);
1254 if (v->chromaformat != 1)
1256 av_log(v->s.avctx, AV_LOG_ERROR,
1257 "Only 4:2:0 chroma format supported\n");
1262 v->frmrtq_postproc = get_bits(gb, 3); //common
1263 // (bitrate-32kbps)/64kbps
1264 v->bitrtq_postproc = get_bits(gb, 5); //common
1265 v->postprocflag = get_bits(gb, 1); //common
1267 v->s.avctx->coded_width = (get_bits(gb, 12) + 1) << 1;
1268 v->s.avctx->coded_height = (get_bits(gb, 12) + 1) << 1;
1269 v->broadcast = get_bits1(gb);
1270 v->interlace = get_bits1(gb);
1271 v->tfcntrflag = get_bits1(gb);
1272 v->finterpflag = get_bits1(gb);
1273 get_bits1(gb); // reserved
1274 v->psf = get_bits1(gb);
1275 if(v->psf) { //PsF, 6.1.13
1276 av_log(v->s.avctx, AV_LOG_ERROR, "Progressive Segmented Frame mode: not supported (yet)\n");
1279 if(get_bits1(gb)) { //Display Info - decoding is not affected by it
1281 av_log(v->s.avctx, AV_LOG_INFO, "Display extended info:\n");
1282 w = get_bits(gb, 14);
1283 h = get_bits(gb, 14);
1284 av_log(v->s.avctx, AV_LOG_INFO, "Display dimensions: %ix%i\n", w, h);
1285 //TODO: store aspect ratio in AVCodecContext
1287 ar = get_bits(gb, 4);
1289 w = get_bits(gb, 8);
1290 h = get_bits(gb, 8);
1293 if(get_bits1(gb)){ //framerate stuff
1303 v->color_prim = get_bits(gb, 8);
1304 v->transfer_char = get_bits(gb, 8);
1305 v->matrix_coef = get_bits(gb, 8);
1309 v->hrd_param_flag = get_bits1(gb);
1310 if(v->hrd_param_flag) {
1312 v->hrd_num_leaky_buckets = get_bits(gb, 5);
1313 get_bits(gb, 4); //bitrate exponent
1314 get_bits(gb, 4); //buffer size exponent
1315 for(i = 0; i < v->hrd_num_leaky_buckets; i++) {
1316 get_bits(gb, 16); //hrd_rate[n]
1317 get_bits(gb, 16); //hrd_buffer[n]
1323 static int decode_entry_point(AVCodecContext *avctx, GetBitContext *gb)
1325 VC1Context *v = avctx->priv_data;
1328 av_log(avctx, AV_LOG_DEBUG, "Entry point: %08X\n", show_bits_long(gb, 32));
1329 get_bits1(gb); // broken link
1330 avctx->max_b_frames = 1 - get_bits1(gb); // 'closed entry' also signalize possible B-frames
1331 v->panscanflag = get_bits1(gb);
1332 get_bits1(gb); // refdist flag
1333 v->s.loop_filter = get_bits1(gb);
1334 v->fastuvmc = get_bits1(gb);
1335 v->extended_mv = get_bits1(gb);
1336 v->dquant = get_bits(gb, 2);
1337 v->vstransform = get_bits1(gb);
1338 v->overlap = get_bits1(gb);
1339 v->quantizer_mode = get_bits(gb, 2);
1341 if(v->hrd_param_flag){
1342 for(i = 0; i < v->hrd_num_leaky_buckets; i++) {
1343 get_bits(gb, 8); //hrd_full[n]
1348 avctx->coded_width = (get_bits(gb, 12)+1)<<1;
1349 avctx->coded_height = (get_bits(gb, 12)+1)<<1;
1352 v->extended_dmv = get_bits1(gb);
1354 av_log(avctx, AV_LOG_ERROR, "Luma scaling is not supported, expect wrong picture\n");
1355 skip_bits(gb, 3); // Y range, ignored for now
1358 av_log(avctx, AV_LOG_ERROR, "Chroma scaling is not supported, expect wrong picture\n");
1359 skip_bits(gb, 3); // UV range, ignored for now
1365 static int vc1_parse_frame_header(VC1Context *v, GetBitContext* gb)
1367 int pqindex, lowquant, status;
1369 if(v->finterpflag) v->interpfrm = get_bits(gb, 1);
1370 skip_bits(gb, 2); //framecnt unused
1372 if (v->rangered) v->rangeredfrm = get_bits(gb, 1);
1373 v->s.pict_type = get_bits(gb, 1);
1374 if (v->s.avctx->max_b_frames) {
1375 if (!v->s.pict_type) {
1376 if (get_bits(gb, 1)) v->s.pict_type = I_TYPE;
1377 else v->s.pict_type = B_TYPE;
1378 } else v->s.pict_type = P_TYPE;
1379 } else v->s.pict_type = v->s.pict_type ? P_TYPE : I_TYPE;
1382 if(v->s.pict_type == B_TYPE) {
1383 v->bfraction = get_vlc2(gb, vc1_bfraction_vlc.table, VC1_BFRACTION_VLC_BITS, 1);
1384 v->bfraction = vc1_bfraction_lut[v->bfraction];
1385 if(v->bfraction == 0) {
1386 v->s.pict_type = BI_TYPE;
1389 if(v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
1390 get_bits(gb, 7); // skip buffer fullness
1393 if(v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
1395 if(v->s.pict_type == P_TYPE)
1398 /* Quantizer stuff */
1399 pqindex = get_bits(gb, 5);
1400 if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
1401 v->pq = pquant_table[0][pqindex];
1403 v->pq = pquant_table[1][pqindex];
1406 if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
1407 v->pquantizer = pqindex < 9;
1408 if (v->quantizer_mode == QUANT_NON_UNIFORM)
1410 v->pqindex = pqindex;
1411 if (pqindex < 9) v->halfpq = get_bits(gb, 1);
1413 if (v->quantizer_mode == QUANT_FRAME_EXPLICIT)
1414 v->pquantizer = get_bits(gb, 1);
1416 if (v->extended_mv == 1) v->mvrange = get_prefix(gb, 0, 3);
1417 v->k_x = v->mvrange + 9 + (v->mvrange >> 1); //k_x can be 9 10 12 13
1418 v->k_y = v->mvrange + 8; //k_y can be 8 9 10 11
1419 v->range_x = 1 << (v->k_x - 1);
1420 v->range_y = 1 << (v->k_y - 1);
1421 if (v->profile == PROFILE_ADVANCED)
1423 if (v->postprocflag) v->postproc = get_bits(gb, 1);
1426 if (v->multires && v->s.pict_type != B_TYPE) v->respic = get_bits(gb, 2);
1428 //av_log(v->s.avctx, AV_LOG_INFO, "%c Frame: QP=[%i]%i (+%i/2) %i\n",
1429 // (v->s.pict_type == P_TYPE) ? 'P' : ((v->s.pict_type == I_TYPE) ? 'I' : 'B'), pqindex, v->pq, v->halfpq, v->rangeredfrm);
1431 if(v->s.pict_type == I_TYPE || v->s.pict_type == P_TYPE) v->use_ic = 0;
1433 switch(v->s.pict_type) {
1435 if (v->pq < 5) v->tt_index = 0;
1436 else if(v->pq < 13) v->tt_index = 1;
1437 else v->tt_index = 2;
1439 lowquant = (v->pq > 12) ? 0 : 1;
1440 v->mv_mode = mv_pmode_table[lowquant][get_prefix(gb, 1, 4)];
1441 if (v->mv_mode == MV_PMODE_INTENSITY_COMP)
1443 int scale, shift, i;
1444 v->mv_mode2 = mv_pmode_table2[lowquant][get_prefix(gb, 1, 3)];
1445 v->lumscale = get_bits(gb, 6);
1446 v->lumshift = get_bits(gb, 6);
1448 /* fill lookup tables for intensity compensation */
1451 shift = (255 - v->lumshift * 2) << 6;
1452 if(v->lumshift > 31)
1455 scale = v->lumscale + 32;
1456 if(v->lumshift > 31)
1457 shift = (v->lumshift - 64) << 6;
1459 shift = v->lumshift << 6;
1461 for(i = 0; i < 256; i++) {
1462 v->luty[i] = clip_uint8((scale * i + shift + 32) >> 6);
1463 v->lutuv[i] = clip_uint8((scale * (i - 128) + 128*64 + 32) >> 6);
1466 if(v->mv_mode == MV_PMODE_1MV_HPEL || v->mv_mode == MV_PMODE_1MV_HPEL_BILIN)
1467 v->s.quarter_sample = 0;
1468 else if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1469 if(v->mv_mode2 == MV_PMODE_1MV_HPEL || v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN)
1470 v->s.quarter_sample = 0;
1472 v->s.quarter_sample = 1;
1474 v->s.quarter_sample = 1;
1475 v->s.mspel = !(v->mv_mode == MV_PMODE_1MV_HPEL_BILIN || (v->mv_mode == MV_PMODE_INTENSITY_COMP && v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN));
1477 if ((v->mv_mode == MV_PMODE_INTENSITY_COMP &&
1478 v->mv_mode2 == MV_PMODE_MIXED_MV)
1479 || v->mv_mode == MV_PMODE_MIXED_MV)
1481 status = bitplane_decoding(v->mv_type_mb_plane, &v->mv_type_is_raw, v);
1482 if (status < 0) return -1;
1483 av_log(v->s.avctx, AV_LOG_DEBUG, "MB MV Type plane encoding: "
1484 "Imode: %i, Invert: %i\n", status>>1, status&1);
1486 v->mv_type_is_raw = 0;
1487 memset(v->mv_type_mb_plane, 0, v->s.mb_stride * v->s.mb_height);
1489 status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
1490 if (status < 0) return -1;
1491 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
1492 "Imode: %i, Invert: %i\n", status>>1, status&1);
1494 /* Hopefully this is correct for P frames */
1495 v->s.mv_table_index = get_bits(gb, 2); //but using vc1_ tables
1496 v->cbpcy_vlc = &vc1_cbpcy_p_vlc[get_bits(gb, 2)];
1500 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1501 vop_dquant_decoding(v);
1504 v->ttfrm = 0; //FIXME Is that so ?
1507 v->ttmbf = get_bits(gb, 1);
1510 v->ttfrm = ttfrm_to_tt[get_bits(gb, 2)];
1518 if (v->pq < 5) v->tt_index = 0;
1519 else if(v->pq < 13) v->tt_index = 1;
1520 else v->tt_index = 2;
1522 lowquant = (v->pq > 12) ? 0 : 1;
1523 v->mv_mode = get_bits1(gb) ? MV_PMODE_1MV : MV_PMODE_1MV_HPEL_BILIN;
1524 v->s.quarter_sample = (v->mv_mode == MV_PMODE_1MV);
1525 v->s.mspel = v->s.quarter_sample;
1527 status = bitplane_decoding(v->direct_mb_plane, &v->dmb_is_raw, v);
1528 if (status < 0) return -1;
1529 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Direct Type plane encoding: "
1530 "Imode: %i, Invert: %i\n", status>>1, status&1);
1531 status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
1532 if (status < 0) return -1;
1533 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
1534 "Imode: %i, Invert: %i\n", status>>1, status&1);
1536 v->s.mv_table_index = get_bits(gb, 2);
1537 v->cbpcy_vlc = &vc1_cbpcy_p_vlc[get_bits(gb, 2)];
1541 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1542 vop_dquant_decoding(v);
1548 v->ttmbf = get_bits(gb, 1);
1551 v->ttfrm = ttfrm_to_tt[get_bits(gb, 2)];
1561 v->c_ac_table_index = decode012(gb);
1562 if (v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
1564 v->y_ac_table_index = decode012(gb);
1567 v->s.dc_table_index = get_bits(gb, 1);
1569 if(v->s.pict_type == BI_TYPE) {
1570 v->s.pict_type = B_TYPE;
1576 static int vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
1579 int pqindex, lowquant;
1582 v->p_frame_skipped = 0;
1585 fcm = decode012(gb);
1586 switch(get_prefix(gb, 0, 4)) {
1588 v->s.pict_type = P_TYPE;
1591 v->s.pict_type = B_TYPE;
1595 v->s.pict_type = I_TYPE;
1598 v->s.pict_type = BI_TYPE;
1601 v->s.pict_type = P_TYPE; // skipped pic
1602 v->p_frame_skipped = 1;
1608 if(!v->interlace || v->panscanflag) {
1615 if(v->panscanflag) {
1618 v->rnd = get_bits1(gb);
1620 v->uvsamp = get_bits1(gb);
1621 if(v->finterpflag) v->interpfrm = get_bits(gb, 1);
1622 pqindex = get_bits(gb, 5);
1623 v->pqindex = pqindex;
1624 if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
1625 v->pq = pquant_table[0][pqindex];
1627 v->pq = pquant_table[1][pqindex];
1630 if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
1631 v->pquantizer = pqindex < 9;
1632 if (v->quantizer_mode == QUANT_NON_UNIFORM)
1634 v->pqindex = pqindex;
1635 if (pqindex < 9) v->halfpq = get_bits(gb, 1);
1637 if (v->quantizer_mode == QUANT_FRAME_EXPLICIT)
1638 v->pquantizer = get_bits(gb, 1);
1640 switch(v->s.pict_type) {
1643 status = bitplane_decoding(v->acpred_plane, &v->acpred_is_raw, v);
1644 if (status < 0) return -1;
1645 av_log(v->s.avctx, AV_LOG_DEBUG, "ACPRED plane encoding: "
1646 "Imode: %i, Invert: %i\n", status>>1, status&1);
1647 v->condover = CONDOVER_NONE;
1648 if(v->overlap && v->pq <= 8) {
1649 v->condover = decode012(gb);
1650 if(v->condover == CONDOVER_SELECT) {
1651 status = bitplane_decoding(v->over_flags_plane, &v->overflg_is_raw, v);
1652 if (status < 0) return -1;
1653 av_log(v->s.avctx, AV_LOG_DEBUG, "CONDOVER plane encoding: "
1654 "Imode: %i, Invert: %i\n", status>>1, status&1);
1660 v->postproc = get_bits1(gb);
1661 if (v->extended_mv) v->mvrange = get_prefix(gb, 0, 3);
1662 else v->mvrange = 0;
1663 v->k_x = v->mvrange + 9 + (v->mvrange >> 1); //k_x can be 9 10 12 13
1664 v->k_y = v->mvrange + 8; //k_y can be 8 9 10 11
1665 v->range_x = 1 << (v->k_x - 1);
1666 v->range_y = 1 << (v->k_y - 1);
1668 if (v->pq < 5) v->tt_index = 0;
1669 else if(v->pq < 13) v->tt_index = 1;
1670 else v->tt_index = 2;
1672 lowquant = (v->pq > 12) ? 0 : 1;
1673 v->mv_mode = mv_pmode_table[lowquant][get_prefix(gb, 1, 4)];
1674 if (v->mv_mode == MV_PMODE_INTENSITY_COMP)
1676 int scale, shift, i;
1677 v->mv_mode2 = mv_pmode_table2[lowquant][get_prefix(gb, 1, 3)];
1678 v->lumscale = get_bits(gb, 6);
1679 v->lumshift = get_bits(gb, 6);
1680 /* fill lookup tables for intensity compensation */
1683 shift = (255 - v->lumshift * 2) << 6;
1684 if(v->lumshift > 31)
1687 scale = v->lumscale + 32;
1688 if(v->lumshift > 31)
1689 shift = (v->lumshift - 64) << 6;
1691 shift = v->lumshift << 6;
1693 for(i = 0; i < 256; i++) {
1694 v->luty[i] = clip_uint8((scale * i + shift + 32) >> 6);
1695 v->lutuv[i] = clip_uint8((scale * (i - 128) + 128*64 + 32) >> 6);
1698 if(v->mv_mode == MV_PMODE_1MV_HPEL || v->mv_mode == MV_PMODE_1MV_HPEL_BILIN)
1699 v->s.quarter_sample = 0;
1700 else if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1701 if(v->mv_mode2 == MV_PMODE_1MV_HPEL || v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN)
1702 v->s.quarter_sample = 0;
1704 v->s.quarter_sample = 1;
1706 v->s.quarter_sample = 1;
1707 v->s.mspel = !(v->mv_mode == MV_PMODE_1MV_HPEL_BILIN || (v->mv_mode == MV_PMODE_INTENSITY_COMP && v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN));
1709 if ((v->mv_mode == MV_PMODE_INTENSITY_COMP &&
1710 v->mv_mode2 == MV_PMODE_MIXED_MV)
1711 || v->mv_mode == MV_PMODE_MIXED_MV)
1713 status = bitplane_decoding(v->mv_type_mb_plane, &v->mv_type_is_raw, v);
1714 if (status < 0) return -1;
1715 av_log(v->s.avctx, AV_LOG_DEBUG, "MB MV Type plane encoding: "
1716 "Imode: %i, Invert: %i\n", status>>1, status&1);
1718 v->mv_type_is_raw = 0;
1719 memset(v->mv_type_mb_plane, 0, v->s.mb_stride * v->s.mb_height);
1721 status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
1722 if (status < 0) return -1;
1723 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
1724 "Imode: %i, Invert: %i\n", status>>1, status&1);
1726 /* Hopefully this is correct for P frames */
1727 v->s.mv_table_index = get_bits(gb, 2); //but using vc1_ tables
1728 v->cbpcy_vlc = &vc1_cbpcy_p_vlc[get_bits(gb, 2)];
1731 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1732 vop_dquant_decoding(v);
1735 v->ttfrm = 0; //FIXME Is that so ?
1738 v->ttmbf = get_bits(gb, 1);
1741 v->ttfrm = ttfrm_to_tt[get_bits(gb, 2)];
1751 v->c_ac_table_index = decode012(gb);
1752 if (v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
1754 v->y_ac_table_index = decode012(gb);
1757 v->s.dc_table_index = get_bits(gb, 1);
1758 if (v->s.pict_type == I_TYPE && v->dquant) {
1759 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1760 vop_dquant_decoding(v);
1764 if(v->s.pict_type == BI_TYPE) {
1765 v->s.pict_type = B_TYPE;
1771 /***********************************************************************/
1773 * @defgroup block VC-1 Block-level functions
1774 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
1780 * @brief Get macroblock-level quantizer scale
1782 #define GET_MQUANT() \
1786 if (v->dqprofile == DQPROFILE_ALL_MBS) \
1790 mquant = (get_bits(gb, 1)) ? v->altpq : v->pq; \
1794 mqdiff = get_bits(gb, 3); \
1795 if (mqdiff != 7) mquant = v->pq + mqdiff; \
1796 else mquant = get_bits(gb, 5); \
1799 if(v->dqprofile == DQPROFILE_SINGLE_EDGE) \
1800 edges = 1 << v->dqsbedge; \
1801 else if(v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
1802 edges = (3 << v->dqsbedge) % 15; \
1803 else if(v->dqprofile == DQPROFILE_FOUR_EDGES) \
1805 if((edges&1) && !s->mb_x) \
1806 mquant = v->altpq; \
1807 if((edges&2) && s->first_slice_line) \
1808 mquant = v->altpq; \
1809 if((edges&4) && s->mb_x == (s->mb_width - 1)) \
1810 mquant = v->altpq; \
1811 if((edges&8) && s->mb_y == (s->mb_height - 1)) \
1812 mquant = v->altpq; \
1816 * @def GET_MVDATA(_dmv_x, _dmv_y)
1817 * @brief Get MV differentials
1818 * @see MVDATA decoding from 8.3.5.2, p(1)20
1819 * @param _dmv_x Horizontal differential for decoded MV
1820 * @param _dmv_y Vertical differential for decoded MV
1822 #define GET_MVDATA(_dmv_x, _dmv_y) \
1823 index = 1 + get_vlc2(gb, vc1_mv_diff_vlc[s->mv_table_index].table,\
1824 VC1_MV_DIFF_VLC_BITS, 2); \
1827 mb_has_coeffs = 1; \
1830 else mb_has_coeffs = 0; \
1832 if (!index) { _dmv_x = _dmv_y = 0; } \
1833 else if (index == 35) \
1835 _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
1836 _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
1838 else if (index == 36) \
1847 if (!s->quarter_sample && index1 == 5) val = 1; \
1849 if(size_table[index1] - val > 0) \
1850 val = get_bits(gb, size_table[index1] - val); \
1852 sign = 0 - (val&1); \
1853 _dmv_x = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1856 if (!s->quarter_sample && index1 == 5) val = 1; \
1858 if(size_table[index1] - val > 0) \
1859 val = get_bits(gb, size_table[index1] - val); \
1861 sign = 0 - (val&1); \
1862 _dmv_y = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1865 /** Predict and set motion vector
1867 static inline void vc1_pred_mv(MpegEncContext *s, int n, int dmv_x, int dmv_y, int mv1, int r_x, int r_y, uint8_t* is_intra)
1869 int xy, wrap, off = 0;
1874 /* scale MV difference to be quad-pel */
1875 dmv_x <<= 1 - s->quarter_sample;
1876 dmv_y <<= 1 - s->quarter_sample;
1878 wrap = s->b8_stride;
1879 xy = s->block_index[n];
1882 s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = 0;
1883 s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = 0;
1884 if(mv1) { /* duplicate motion data for 1-MV block */
1885 s->current_picture.motion_val[0][xy + 1][0] = 0;
1886 s->current_picture.motion_val[0][xy + 1][1] = 0;
1887 s->current_picture.motion_val[0][xy + wrap][0] = 0;
1888 s->current_picture.motion_val[0][xy + wrap][1] = 0;
1889 s->current_picture.motion_val[0][xy + wrap + 1][0] = 0;
1890 s->current_picture.motion_val[0][xy + wrap + 1][1] = 0;
1895 C = s->current_picture.motion_val[0][xy - 1];
1896 A = s->current_picture.motion_val[0][xy - wrap];
1898 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2;
1900 //in 4-MV mode different blocks have different B predictor position
1903 off = (s->mb_x > 0) ? -1 : 1;
1906 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 1;
1915 B = s->current_picture.motion_val[0][xy - wrap + off];
1917 if(!s->first_slice_line || (n==2 || n==3)) { // predictor A is not out of bounds
1918 if(s->mb_width == 1) {
1922 px = mid_pred(A[0], B[0], C[0]);
1923 py = mid_pred(A[1], B[1], C[1]);
1925 } else if(s->mb_x || (n==1 || n==3)) { // predictor C is not out of bounds
1931 /* Pullback MV as specified in 8.3.5.3.4 */
1934 qx = (s->mb_x << 6) + ((n==1 || n==3) ? 32 : 0);
1935 qy = (s->mb_y << 6) + ((n==2 || n==3) ? 32 : 0);
1936 X = (s->mb_width << 6) - 4;
1937 Y = (s->mb_height << 6) - 4;
1939 if(qx + px < -60) px = -60 - qx;
1940 if(qy + py < -60) py = -60 - qy;
1942 if(qx + px < -28) px = -28 - qx;
1943 if(qy + py < -28) py = -28 - qy;
1945 if(qx + px > X) px = X - qx;
1946 if(qy + py > Y) py = Y - qy;
1948 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
1949 if((!s->first_slice_line || (n==2 || n==3)) && (s->mb_x || (n==1 || n==3))) {
1950 if(is_intra[xy - wrap])
1951 sum = ABS(px) + ABS(py);
1953 sum = ABS(px - A[0]) + ABS(py - A[1]);
1955 if(get_bits1(&s->gb)) {
1963 if(is_intra[xy - 1])
1964 sum = ABS(px) + ABS(py);
1966 sum = ABS(px - C[0]) + ABS(py - C[1]);
1968 if(get_bits1(&s->gb)) {
1978 /* store MV using signed modulus of MV range defined in 4.11 */
1979 s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1980 s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
1981 if(mv1) { /* duplicate motion data for 1-MV block */
1982 s->current_picture.motion_val[0][xy + 1][0] = s->current_picture.motion_val[0][xy][0];
1983 s->current_picture.motion_val[0][xy + 1][1] = s->current_picture.motion_val[0][xy][1];
1984 s->current_picture.motion_val[0][xy + wrap][0] = s->current_picture.motion_val[0][xy][0];
1985 s->current_picture.motion_val[0][xy + wrap][1] = s->current_picture.motion_val[0][xy][1];
1986 s->current_picture.motion_val[0][xy + wrap + 1][0] = s->current_picture.motion_val[0][xy][0];
1987 s->current_picture.motion_val[0][xy + wrap + 1][1] = s->current_picture.motion_val[0][xy][1];
1991 /** Motion compensation for direct or interpolated blocks in B-frames
1993 static void vc1_interp_mc(VC1Context *v)
1995 MpegEncContext *s = &v->s;
1996 DSPContext *dsp = &v->s.dsp;
1997 uint8_t *srcY, *srcU, *srcV;
1998 int dxy, uvdxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
2000 if(!v->s.next_picture.data[0])return;
2002 mx = s->mv[1][0][0];
2003 my = s->mv[1][0][1];
2004 uvmx = (mx + ((mx & 3) == 3)) >> 1;
2005 uvmy = (my + ((my & 3) == 3)) >> 1;
2006 srcY = s->next_picture.data[0];
2007 srcU = s->next_picture.data[1];
2008 srcV = s->next_picture.data[2];
2010 src_x = s->mb_x * 16 + (mx >> 2);
2011 src_y = s->mb_y * 16 + (my >> 2);
2012 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
2013 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
2015 src_x = clip( src_x, -16, s->mb_width * 16);
2016 src_y = clip( src_y, -16, s->mb_height * 16);
2017 uvsrc_x = clip(uvsrc_x, -8, s->mb_width * 8);
2018 uvsrc_y = clip(uvsrc_y, -8, s->mb_height * 8);
2020 srcY += src_y * s->linesize + src_x;
2021 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
2022 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
2024 /* for grayscale we should not try to read from unknown area */
2025 if(s->flags & CODEC_FLAG_GRAY) {
2026 srcU = s->edge_emu_buffer + 18 * s->linesize;
2027 srcV = s->edge_emu_buffer + 18 * s->linesize;
2031 || (unsigned)src_x > s->h_edge_pos - (mx&3) - 16
2032 || (unsigned)src_y > s->v_edge_pos - (my&3) - 16){
2033 uint8_t *uvbuf= s->edge_emu_buffer + 19 * s->linesize;
2035 srcY -= s->mspel * (1 + s->linesize);
2036 ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 17+s->mspel*2, 17+s->mspel*2,
2037 src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);
2038 srcY = s->edge_emu_buffer;
2039 ff_emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8+1, 8+1,
2040 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
2041 ff_emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8+1, 8+1,
2042 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
2045 /* if we deal with range reduction we need to scale source blocks */
2046 if(v->rangeredfrm) {
2048 uint8_t *src, *src2;
2051 for(j = 0; j < 17 + s->mspel*2; j++) {
2052 for(i = 0; i < 17 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128;
2055 src = srcU; src2 = srcV;
2056 for(j = 0; j < 9; j++) {
2057 for(i = 0; i < 9; i++) {
2058 src[i] = ((src[i] - 128) >> 1) + 128;
2059 src2[i] = ((src2[i] - 128) >> 1) + 128;
2061 src += s->uvlinesize;
2062 src2 += s->uvlinesize;
2065 srcY += s->mspel * (1 + s->linesize);
2069 uvmx = uvmx + ((uvmx<0)?(uvmx&1):-(uvmx&1));
2070 uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1));
2075 dxy = ((my & 1) << 1) | (mx & 1);
2077 dsp->avg_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
2079 if(s->flags & CODEC_FLAG_GRAY) return;
2080 /* Chroma MC always uses qpel blilinear */
2081 uvdxy = ((uvmy & 3) << 2) | (uvmx & 3);
2084 dsp->avg_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
2085 dsp->avg_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
2088 static always_inline int scale_mv(int value, int bfrac, int inv, int qs)
2092 #if B_FRACTION_DEN==256
2096 return 2 * ((value * n + 255) >> 9);
2097 return (value * n + 128) >> 8;
2100 n -= B_FRACTION_DEN;
2102 return 2 * ((value * n + B_FRACTION_DEN - 1) / (2 * B_FRACTION_DEN));
2103 return (value * n + B_FRACTION_DEN/2) / B_FRACTION_DEN;
2107 /** Reconstruct motion vector for B-frame and do motion compensation
2109 static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mode)
2112 v->mv_mode2 = v->mv_mode;
2113 v->mv_mode = MV_PMODE_INTENSITY_COMP;
2118 if(v->use_ic) v->mv_mode = v->mv_mode2;
2121 if(mode == BMV_TYPE_INTERPOLATED) {
2124 if(v->use_ic) v->mv_mode = v->mv_mode2;
2128 if(v->use_ic && (mode == BMV_TYPE_BACKWARD)) v->mv_mode = v->mv_mode2;
2129 vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD));
2130 if(v->use_ic) v->mv_mode = v->mv_mode2;
2133 static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mvtype)
2135 MpegEncContext *s = &v->s;
2136 int xy, wrap, off = 0;
2141 const uint8_t *is_intra = v->mb_type[0];
2145 /* scale MV difference to be quad-pel */
2146 dmv_x[0] <<= 1 - s->quarter_sample;
2147 dmv_y[0] <<= 1 - s->quarter_sample;
2148 dmv_x[1] <<= 1 - s->quarter_sample;
2149 dmv_y[1] <<= 1 - s->quarter_sample;
2151 wrap = s->b8_stride;
2152 xy = s->block_index[0];
2155 s->current_picture.motion_val[0][xy][0] =
2156 s->current_picture.motion_val[0][xy][1] =
2157 s->current_picture.motion_val[1][xy][0] =
2158 s->current_picture.motion_val[1][xy][1] = 0;
2161 s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
2162 s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
2163 s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
2164 s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
2166 s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
2167 s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
2168 s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
2169 s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
2173 if((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2174 C = s->current_picture.motion_val[0][xy - 2];
2175 A = s->current_picture.motion_val[0][xy - wrap*2];
2176 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2177 B = s->current_picture.motion_val[0][xy - wrap*2 + off];
2179 if(!s->first_slice_line) { // predictor A is not out of bounds
2180 if(s->mb_width == 1) {
2184 px = mid_pred(A[0], B[0], C[0]);
2185 py = mid_pred(A[1], B[1], C[1]);
2187 } else if(s->mb_x) { // predictor C is not out of bounds
2193 /* Pullback MV as specified in 8.3.5.3.4 */
2196 if(v->profile < PROFILE_ADVANCED) {
2197 qx = (s->mb_x << 5);
2198 qy = (s->mb_y << 5);
2199 X = (s->mb_width << 5) - 4;
2200 Y = (s->mb_height << 5) - 4;
2201 if(qx + px < -28) px = -28 - qx;
2202 if(qy + py < -28) py = -28 - qy;
2203 if(qx + px > X) px = X - qx;
2204 if(qy + py > Y) py = Y - qy;
2206 qx = (s->mb_x << 6);
2207 qy = (s->mb_y << 6);
2208 X = (s->mb_width << 6) - 4;
2209 Y = (s->mb_height << 6) - 4;
2210 if(qx + px < -60) px = -60 - qx;
2211 if(qy + py < -60) py = -60 - qy;
2212 if(qx + px > X) px = X - qx;
2213 if(qy + py > Y) py = Y - qy;
2216 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2217 if(0 && !s->first_slice_line && s->mb_x) {
2218 if(is_intra[xy - wrap])
2219 sum = ABS(px) + ABS(py);
2221 sum = ABS(px - A[0]) + ABS(py - A[1]);
2223 if(get_bits1(&s->gb)) {
2231 if(is_intra[xy - 2])
2232 sum = ABS(px) + ABS(py);
2234 sum = ABS(px - C[0]) + ABS(py - C[1]);
2236 if(get_bits1(&s->gb)) {
2246 /* store MV using signed modulus of MV range defined in 4.11 */
2247 s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x;
2248 s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
2250 if((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2251 C = s->current_picture.motion_val[1][xy - 2];
2252 A = s->current_picture.motion_val[1][xy - wrap*2];
2253 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2254 B = s->current_picture.motion_val[1][xy - wrap*2 + off];
2256 if(!s->first_slice_line) { // predictor A is not out of bounds
2257 if(s->mb_width == 1) {
2261 px = mid_pred(A[0], B[0], C[0]);
2262 py = mid_pred(A[1], B[1], C[1]);
2264 } else if(s->mb_x) { // predictor C is not out of bounds
2270 /* Pullback MV as specified in 8.3.5.3.4 */
2273 if(v->profile < PROFILE_ADVANCED) {
2274 qx = (s->mb_x << 5);
2275 qy = (s->mb_y << 5);
2276 X = (s->mb_width << 5) - 4;
2277 Y = (s->mb_height << 5) - 4;
2278 if(qx + px < -28) px = -28 - qx;
2279 if(qy + py < -28) py = -28 - qy;
2280 if(qx + px > X) px = X - qx;
2281 if(qy + py > Y) py = Y - qy;
2283 qx = (s->mb_x << 6);
2284 qy = (s->mb_y << 6);
2285 X = (s->mb_width << 6) - 4;
2286 Y = (s->mb_height << 6) - 4;
2287 if(qx + px < -60) px = -60 - qx;
2288 if(qy + py < -60) py = -60 - qy;
2289 if(qx + px > X) px = X - qx;
2290 if(qy + py > Y) py = Y - qy;
2293 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2294 if(0 && !s->first_slice_line && s->mb_x) {
2295 if(is_intra[xy - wrap])
2296 sum = ABS(px) + ABS(py);
2298 sum = ABS(px - A[0]) + ABS(py - A[1]);
2300 if(get_bits1(&s->gb)) {
2308 if(is_intra[xy - 2])
2309 sum = ABS(px) + ABS(py);
2311 sum = ABS(px - C[0]) + ABS(py - C[1]);
2313 if(get_bits1(&s->gb)) {
2323 /* store MV using signed modulus of MV range defined in 4.11 */
2325 s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
2326 s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
2328 s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
2329 s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
2330 s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
2331 s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
2334 /** Get predicted DC value for I-frames only
2335 * prediction dir: left=0, top=1
2336 * @param s MpegEncContext
2337 * @param[in] n block index in the current MB
2338 * @param dc_val_ptr Pointer to DC predictor
2339 * @param dir_ptr Prediction direction for use in AC prediction
2341 static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2342 int16_t **dc_val_ptr, int *dir_ptr)
2344 int a, b, c, wrap, pred, scale;
2346 static const uint16_t dcpred[32] = {
2347 -1, 1024, 512, 341, 256, 205, 171, 146, 128,
2348 114, 102, 93, 85, 79, 73, 68, 64,
2349 60, 57, 54, 51, 49, 47, 45, 43,
2350 41, 39, 38, 37, 35, 34, 33
2353 /* find prediction - wmv3_dc_scale always used here in fact */
2354 if (n < 4) scale = s->y_dc_scale;
2355 else scale = s->c_dc_scale;
2357 wrap = s->block_wrap[n];
2358 dc_val= s->dc_val[0] + s->block_index[n];
2364 b = dc_val[ - 1 - wrap];
2365 a = dc_val[ - wrap];
2367 if (pq < 9 || !overlap)
2369 /* Set outer values */
2370 if (s->first_slice_line && (n!=2 && n!=3)) b=a=dcpred[scale];
2371 if (s->mb_x == 0 && (n!=1 && n!=3)) b=c=dcpred[scale];
2375 /* Set outer values */
2376 if (s->first_slice_line && (n!=2 && n!=3)) b=a=0;
2377 if (s->mb_x == 0 && (n!=1 && n!=3)) b=c=0;
2380 if (abs(a - b) <= abs(b - c)) {
2388 /* update predictor */
2389 *dc_val_ptr = &dc_val[0];
2394 /** Get predicted DC value
2395 * prediction dir: left=0, top=1
2396 * @param s MpegEncContext
2397 * @param[in] n block index in the current MB
2398 * @param dc_val_ptr Pointer to DC predictor
2399 * @param dir_ptr Prediction direction for use in AC prediction
2401 static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2402 int a_avail, int c_avail,
2403 int16_t **dc_val_ptr, int *dir_ptr)
2405 int a, b, c, wrap, pred, scale;
2407 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2410 /* find prediction - wmv3_dc_scale always used here in fact */
2411 if (n < 4) scale = s->y_dc_scale;
2412 else scale = s->c_dc_scale;
2414 wrap = s->block_wrap[n];
2415 dc_val= s->dc_val[0] + s->block_index[n];
2421 b = dc_val[ - 1 - wrap];
2422 a = dc_val[ - wrap];
2423 /* scale predictors if needed */
2424 q1 = s->current_picture.qscale_table[mb_pos];
2425 if(c_avail && (n!= 1 && n!=3)) {
2426 q2 = s->current_picture.qscale_table[mb_pos - 1];
2428 c = (c * s->y_dc_scale_table[q2] * vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2430 if(a_avail && (n!= 2 && n!=3)) {
2431 q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2433 a = (a * s->y_dc_scale_table[q2] * vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2435 if(a_avail && c_avail && (n!=3)) {
2438 if(n != 2) off -= s->mb_stride;
2439 q2 = s->current_picture.qscale_table[off];
2441 b = (b * s->y_dc_scale_table[q2] * vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2444 if(a_avail && c_avail) {
2445 if(abs(a - b) <= abs(b - c)) {
2452 } else if(a_avail) {
2455 } else if(c_avail) {
2463 /* update predictor */
2464 *dc_val_ptr = &dc_val[0];
2470 * @defgroup std_mb VC1 Macroblock-level functions in Simple/Main Profiles
2471 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
2475 static inline int vc1_coded_block_pred(MpegEncContext * s, int n, uint8_t **coded_block_ptr)
2477 int xy, wrap, pred, a, b, c;
2479 xy = s->block_index[n];
2480 wrap = s->b8_stride;
2485 a = s->coded_block[xy - 1 ];
2486 b = s->coded_block[xy - 1 - wrap];
2487 c = s->coded_block[xy - wrap];
2496 *coded_block_ptr = &s->coded_block[xy];
2502 * Decode one AC coefficient
2503 * @param v The VC1 context
2504 * @param last Last coefficient
2505 * @param skip How much zero coefficients to skip
2506 * @param value Decoded AC coefficient value
2509 static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip, int *value, int codingset)
2511 GetBitContext *gb = &v->s.gb;
2512 int index, escape, run = 0, level = 0, lst = 0;
2514 index = get_vlc2(gb, vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2515 if (index != vc1_ac_sizes[codingset] - 1) {
2516 run = vc1_index_decode_table[codingset][index][0];
2517 level = vc1_index_decode_table[codingset][index][1];
2518 lst = index >= vc1_last_decode_table[codingset];
2522 escape = decode210(gb);
2524 index = get_vlc2(gb, vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2525 run = vc1_index_decode_table[codingset][index][0];
2526 level = vc1_index_decode_table[codingset][index][1];
2527 lst = index >= vc1_last_decode_table[codingset];
2530 level += vc1_last_delta_level_table[codingset][run];
2532 level += vc1_delta_level_table[codingset][run];
2535 run += vc1_last_delta_run_table[codingset][level] + 1;
2537 run += vc1_delta_run_table[codingset][level] + 1;
2543 lst = get_bits(gb, 1);
2544 if(v->s.esc3_level_length == 0) {
2545 if(v->pq < 8 || v->dquantfrm) { // table 59
2546 v->s.esc3_level_length = get_bits(gb, 3);
2547 if(!v->s.esc3_level_length)
2548 v->s.esc3_level_length = get_bits(gb, 2) + 8;
2550 v->s.esc3_level_length = get_prefix(gb, 1, 6) + 2;
2552 v->s.esc3_run_length = 3 + get_bits(gb, 2);
2554 run = get_bits(gb, v->s.esc3_run_length);
2555 sign = get_bits(gb, 1);
2556 level = get_bits(gb, v->s.esc3_level_length);
2567 /** Decode intra block in intra frames - should be faster than decode_intra_block
2568 * @param v VC1Context
2569 * @param block block to decode
2570 * @param coded are AC coeffs present or not
2571 * @param codingset set of VLC to decode data
2573 static int vc1_decode_i_block(VC1Context *v, DCTELEM block[64], int n, int coded, int codingset)
2575 GetBitContext *gb = &v->s.gb;
2576 MpegEncContext *s = &v->s;
2577 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2580 int16_t *ac_val, *ac_val2;
2583 /* Get DC differential */
2585 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2587 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2590 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2595 if (dcdiff == 119 /* ESC index value */)
2597 /* TODO: Optimize */
2598 if (v->pq == 1) dcdiff = get_bits(gb, 10);
2599 else if (v->pq == 2) dcdiff = get_bits(gb, 9);
2600 else dcdiff = get_bits(gb, 8);
2605 dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3;
2606 else if (v->pq == 2)
2607 dcdiff = (dcdiff<<1) + get_bits(gb, 1) - 1;
2609 if (get_bits(gb, 1))
2614 dcdiff += vc1_i_pred_dc(&v->s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir);
2617 /* Store the quantized DC coeff, used for prediction */
2619 block[0] = dcdiff * s->y_dc_scale;
2621 block[0] = dcdiff * s->c_dc_scale;
2634 int last = 0, skip, value;
2635 const int8_t *zz_table;
2639 scale = v->pq * 2 + v->halfpq;
2643 zz_table = vc1_horizontal_zz;
2645 zz_table = vc1_vertical_zz;
2647 zz_table = vc1_normal_zz;
2649 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2651 if(dc_pred_dir) //left
2654 ac_val -= 16 * s->block_wrap[n];
2657 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2661 block[zz_table[i++]] = value;
2664 /* apply AC prediction if needed */
2666 if(dc_pred_dir) { //left
2667 for(k = 1; k < 8; k++)
2668 block[k << 3] += ac_val[k];
2670 for(k = 1; k < 8; k++)
2671 block[k] += ac_val[k + 8];
2674 /* save AC coeffs for further prediction */
2675 for(k = 1; k < 8; k++) {
2676 ac_val2[k] = block[k << 3];
2677 ac_val2[k + 8] = block[k];
2680 /* scale AC coeffs */
2681 for(k = 1; k < 64; k++)
2685 block[k] += (block[k] < 0) ? -v->pq : v->pq;
2688 if(s->ac_pred) i = 63;
2694 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2697 scale = v->pq * 2 + v->halfpq;
2698 memset(ac_val2, 0, 16 * 2);
2699 if(dc_pred_dir) {//left
2702 memcpy(ac_val2, ac_val, 8 * 2);
2704 ac_val -= 16 * s->block_wrap[n];
2706 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2709 /* apply AC prediction if needed */
2711 if(dc_pred_dir) { //left
2712 for(k = 1; k < 8; k++) {
2713 block[k << 3] = ac_val[k] * scale;
2714 if(!v->pquantizer && block[k << 3])
2715 block[k << 3] += (block[k << 3] < 0) ? -v->pq : v->pq;
2718 for(k = 1; k < 8; k++) {
2719 block[k] = ac_val[k + 8] * scale;
2720 if(!v->pquantizer && block[k])
2721 block[k] += (block[k] < 0) ? -v->pq : v->pq;
2727 s->block_last_index[n] = i;
2732 /** Decode intra block in intra frames - should be faster than decode_intra_block
2733 * @param v VC1Context
2734 * @param block block to decode
2735 * @param coded are AC coeffs present or not
2736 * @param codingset set of VLC to decode data
2738 static int vc1_decode_i_block_adv(VC1Context *v, DCTELEM block[64], int n, int coded, int codingset, int mquant)
2740 GetBitContext *gb = &v->s.gb;
2741 MpegEncContext *s = &v->s;
2742 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2745 int16_t *ac_val, *ac_val2;
2747 int a_avail = v->a_avail, c_avail = v->c_avail;
2748 int use_pred = s->ac_pred;
2751 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2753 /* Get DC differential */
2755 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2757 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2760 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2765 if (dcdiff == 119 /* ESC index value */)
2767 /* TODO: Optimize */
2768 if (mquant == 1) dcdiff = get_bits(gb, 10);
2769 else if (mquant == 2) dcdiff = get_bits(gb, 9);
2770 else dcdiff = get_bits(gb, 8);
2775 dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3;
2776 else if (mquant == 2)
2777 dcdiff = (dcdiff<<1) + get_bits(gb, 1) - 1;
2779 if (get_bits(gb, 1))
2784 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir);
2787 /* Store the quantized DC coeff, used for prediction */
2789 block[0] = dcdiff * s->y_dc_scale;
2791 block[0] = dcdiff * s->c_dc_scale;
2800 /* check if AC is needed at all and adjust direction if needed */
2801 if(!a_avail) dc_pred_dir = 1;
2802 if(!c_avail) dc_pred_dir = 0;
2803 if(!a_avail && !c_avail) use_pred = 0;
2804 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2807 scale = mquant * 2 + v->halfpq;
2809 if(dc_pred_dir) //left
2812 ac_val -= 16 * s->block_wrap[n];
2814 q1 = s->current_picture.qscale_table[mb_pos];
2815 if(dc_pred_dir && c_avail) q2 = s->current_picture.qscale_table[mb_pos - 1];
2816 if(!dc_pred_dir && a_avail) q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2817 if(n && n<4) q2 = q1;
2820 int last = 0, skip, value;
2821 const int8_t *zz_table;
2826 zz_table = vc1_horizontal_zz;
2828 zz_table = vc1_vertical_zz;
2830 zz_table = vc1_normal_zz;
2833 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2837 block[zz_table[i++]] = value;
2840 /* apply AC prediction if needed */
2842 /* scale predictors if needed*/
2847 if(dc_pred_dir) { //left
2848 for(k = 1; k < 8; k++)
2849 block[k << 3] += (ac_val[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2851 for(k = 1; k < 8; k++)
2852 block[k] += (ac_val[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2855 if(dc_pred_dir) { //left
2856 for(k = 1; k < 8; k++)
2857 block[k << 3] += ac_val[k];
2859 for(k = 1; k < 8; k++)
2860 block[k] += ac_val[k + 8];
2864 /* save AC coeffs for further prediction */
2865 for(k = 1; k < 8; k++) {
2866 ac_val2[k] = block[k << 3];
2867 ac_val2[k + 8] = block[k];
2870 /* scale AC coeffs */
2871 for(k = 1; k < 64; k++)
2875 block[k] += (block[k] < 0) ? -mquant : mquant;
2878 if(use_pred) i = 63;
2879 } else { // no AC coeffs
2882 memset(ac_val2, 0, 16 * 2);
2883 if(dc_pred_dir) {//left
2885 memcpy(ac_val2, ac_val, 8 * 2);
2889 for(k = 1; k < 8; k++)
2890 ac_val2[k] = (ac_val2[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2895 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2899 for(k = 1; k < 8; k++)
2900 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2905 /* apply AC prediction if needed */
2907 if(dc_pred_dir) { //left
2908 for(k = 1; k < 8; k++) {
2909 block[k << 3] = ac_val2[k] * scale;
2910 if(!v->pquantizer && block[k << 3])
2911 block[k << 3] += (block[k << 3] < 0) ? -mquant : mquant;
2914 for(k = 1; k < 8; k++) {
2915 block[k] = ac_val2[k + 8] * scale;
2916 if(!v->pquantizer && block[k])
2917 block[k] += (block[k] < 0) ? -mquant : mquant;
2923 s->block_last_index[n] = i;
2928 /** Decode intra block in inter frames - more generic version than vc1_decode_i_block
2929 * @param v VC1Context
2930 * @param block block to decode
2931 * @param coded are AC coeffs present or not
2932 * @param mquant block quantizer
2933 * @param codingset set of VLC to decode data
2935 static int vc1_decode_intra_block(VC1Context *v, DCTELEM block[64], int n, int coded, int mquant, int codingset)
2937 GetBitContext *gb = &v->s.gb;
2938 MpegEncContext *s = &v->s;
2939 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2942 int16_t *ac_val, *ac_val2;
2944 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2945 int a_avail = v->a_avail, c_avail = v->c_avail;
2946 int use_pred = s->ac_pred;
2950 /* XXX: Guard against dumb values of mquant */
2951 mquant = (mquant < 1) ? 0 : ( (mquant>31) ? 31 : mquant );
2953 /* Set DC scale - y and c use the same */
2954 s->y_dc_scale = s->y_dc_scale_table[mquant];
2955 s->c_dc_scale = s->c_dc_scale_table[mquant];
2957 /* Get DC differential */
2959 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2961 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2964 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2969 if (dcdiff == 119 /* ESC index value */)
2971 /* TODO: Optimize */
2972 if (mquant == 1) dcdiff = get_bits(gb, 10);
2973 else if (mquant == 2) dcdiff = get_bits(gb, 9);
2974 else dcdiff = get_bits(gb, 8);
2979 dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3;
2980 else if (mquant == 2)
2981 dcdiff = (dcdiff<<1) + get_bits(gb, 1) - 1;
2983 if (get_bits(gb, 1))
2988 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir);
2991 /* Store the quantized DC coeff, used for prediction */
2994 block[0] = dcdiff * s->y_dc_scale;
2996 block[0] = dcdiff * s->c_dc_scale;
3005 /* check if AC is needed at all and adjust direction if needed */
3006 if(!a_avail) dc_pred_dir = 1;
3007 if(!c_avail) dc_pred_dir = 0;
3008 if(!a_avail && !c_avail) use_pred = 0;
3009 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
3012 scale = mquant * 2 + v->halfpq;
3014 if(dc_pred_dir) //left
3017 ac_val -= 16 * s->block_wrap[n];
3019 q1 = s->current_picture.qscale_table[mb_pos];
3020 if(dc_pred_dir && c_avail) q2 = s->current_picture.qscale_table[mb_pos - 1];
3021 if(!dc_pred_dir && a_avail) q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
3022 if(n && n<4) q2 = q1;
3025 int last = 0, skip, value;
3026 const int8_t *zz_table;
3029 zz_table = vc1_simple_progressive_8x8_zz;
3032 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
3036 block[zz_table[i++]] = value;
3039 /* apply AC prediction if needed */
3041 /* scale predictors if needed*/
3046 if(dc_pred_dir) { //left
3047 for(k = 1; k < 8; k++)
3048 block[k << 3] += (ac_val[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3050 for(k = 1; k < 8; k++)
3051 block[k] += (ac_val[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3054 if(dc_pred_dir) { //left
3055 for(k = 1; k < 8; k++)
3056 block[k << 3] += ac_val[k];
3058 for(k = 1; k < 8; k++)
3059 block[k] += ac_val[k + 8];
3063 /* save AC coeffs for further prediction */
3064 for(k = 1; k < 8; k++) {
3065 ac_val2[k] = block[k << 3];
3066 ac_val2[k + 8] = block[k];
3069 /* scale AC coeffs */
3070 for(k = 1; k < 64; k++)
3074 block[k] += (block[k] < 0) ? -mquant : mquant;
3077 if(use_pred) i = 63;
3078 } else { // no AC coeffs
3081 memset(ac_val2, 0, 16 * 2);
3082 if(dc_pred_dir) {//left
3084 memcpy(ac_val2, ac_val, 8 * 2);
3088 for(k = 1; k < 8; k++)
3089 ac_val2[k] = (ac_val2[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3094 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
3098 for(k = 1; k < 8; k++)
3099 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3104 /* apply AC prediction if needed */
3106 if(dc_pred_dir) { //left
3107 for(k = 1; k < 8; k++) {
3108 block[k << 3] = ac_val2[k] * scale;
3109 if(!v->pquantizer && block[k << 3])
3110 block[k << 3] += (block[k << 3] < 0) ? -mquant : mquant;
3113 for(k = 1; k < 8; k++) {
3114 block[k] = ac_val2[k + 8] * scale;
3115 if(!v->pquantizer && block[k])
3116 block[k] += (block[k] < 0) ? -mquant : mquant;
3122 s->block_last_index[n] = i;
3129 static int vc1_decode_p_block(VC1Context *v, DCTELEM block[64], int n, int mquant, int ttmb, int first_block)
3131 MpegEncContext *s = &v->s;
3132 GetBitContext *gb = &s->gb;
3135 int scale, off, idx, last, skip, value;
3136 int ttblk = ttmb & 7;
3139 ttblk = ttblk_to_tt[v->tt_index][get_vlc2(gb, vc1_ttblk_vlc[v->tt_index].table, VC1_TTBLK_VLC_BITS, 1)];
3141 if(ttblk == TT_4X4) {
3142 subblkpat = ~(get_vlc2(gb, vc1_subblkpat_vlc[v->tt_index].table, VC1_SUBBLKPAT_VLC_BITS, 1) + 1);
3144 if((ttblk != TT_8X8 && ttblk != TT_4X4) && (v->ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))) {
3145 subblkpat = decode012(gb);
3146 if(subblkpat) subblkpat ^= 3; //swap decoded pattern bits
3147 if(ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) ttblk = TT_8X4;
3148 if(ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) ttblk = TT_4X8;
3150 scale = 2 * mquant + v->halfpq;
3152 // convert transforms like 8X4_TOP to generic TT and SUBBLKPAT
3153 if(ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) {
3154 subblkpat = 2 - (ttblk == TT_8X4_TOP);
3157 if(ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) {
3158 subblkpat = 2 - (ttblk == TT_4X8_LEFT);
3166 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3170 idx = vc1_simple_progressive_8x8_zz[i++];
3171 block[idx] = value * scale;
3173 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3175 s->dsp.vc1_inv_trans_8x8(block);
3178 for(j = 0; j < 4; j++) {
3179 last = subblkpat & (1 << (3 - j));
3181 off = (j & 1) * 4 + (j & 2) * 16;
3183 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3187 idx = vc1_simple_progressive_4x4_zz[i++];
3188 block[idx + off] = value * scale;
3190 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3192 if(!(subblkpat & (1 << (3 - j))))
3193 s->dsp.vc1_inv_trans_4x4(block, j);
3197 for(j = 0; j < 2; j++) {
3198 last = subblkpat & (1 << (1 - j));
3202 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3206 if(v->profile < PROFILE_ADVANCED)
3207 idx = vc1_simple_progressive_8x4_zz[i++];
3209 idx = vc1_adv_progressive_8x4_zz[i++];
3210 block[idx + off] = value * scale;
3212 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3214 if(!(subblkpat & (1 << (1 - j))))
3215 s->dsp.vc1_inv_trans_8x4(block, j);
3219 for(j = 0; j < 2; j++) {
3220 last = subblkpat & (1 << (1 - j));
3224 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3228 if(v->profile < PROFILE_ADVANCED)
3229 idx = vc1_simple_progressive_4x8_zz[i++];
3231 idx = vc1_adv_progressive_4x8_zz[i++];
3232 block[idx + off] = value * scale;
3234 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3236 if(!(subblkpat & (1 << (1 - j))))
3237 s->dsp.vc1_inv_trans_4x8(block, j);
3245 /** Decode one P-frame MB (in Simple/Main profile)
3247 static int vc1_decode_p_mb(VC1Context *v)
3249 MpegEncContext *s = &v->s;
3250 GetBitContext *gb = &s->gb;
3252 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3253 int cbp; /* cbp decoding stuff */
3254 int mqdiff, mquant; /* MB quantization */
3255 int ttmb = v->ttfrm; /* MB Transform type */
3258 static const int size_table[6] = { 0, 2, 3, 4, 5, 8 },
3259 offset_table[6] = { 0, 1, 3, 7, 15, 31 };
3260 int mb_has_coeffs = 1; /* last_flag */
3261 int dmv_x, dmv_y; /* Differential MV components */
3262 int index, index1; /* LUT indices */
3263 int val, sign; /* temp values */
3264 int first_block = 1;
3266 int skipped, fourmv;
3268 mquant = v->pq; /* Loosy initialization */
3270 if (v->mv_type_is_raw)
3271 fourmv = get_bits1(gb);
3273 fourmv = v->mv_type_mb_plane[mb_pos];
3275 skipped = get_bits1(gb);
3277 skipped = v->s.mbskip_table[mb_pos];
3279 s->dsp.clear_blocks(s->block[0]);
3281 if (!fourmv) /* 1MV mode */
3285 GET_MVDATA(dmv_x, dmv_y);
3288 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
3289 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
3291 s->current_picture.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;
3292 vc1_pred_mv(s, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0]);
3294 /* FIXME Set DC val for inter block ? */
3295 if (s->mb_intra && !mb_has_coeffs)
3298 s->ac_pred = get_bits(gb, 1);
3301 else if (mb_has_coeffs)
3303 if (s->mb_intra) s->ac_pred = get_bits(gb, 1);
3304 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3312 s->current_picture.qscale_table[mb_pos] = mquant;
3314 if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
3315 ttmb = get_vlc2(gb, vc1_ttmb_vlc[v->tt_index].table,
3316 VC1_TTMB_VLC_BITS, 2);
3317 if(!s->mb_intra) vc1_mc_1mv(v, 0);
3321 s->dc_val[0][s->block_index[i]] = 0;
3323 val = ((cbp >> (5 - i)) & 1);
3324 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3325 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3327 /* check if prediction blocks A and C are available */
3328 v->a_avail = v->c_avail = 0;
3329 if(i == 2 || i == 3 || !s->first_slice_line)
3330 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3331 if(i == 1 || i == 3 || s->mb_x)
3332 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3334 vc1_decode_intra_block(v, s->block[i], i, val, mquant, (i&4)?v->codingset2:v->codingset);
3335 if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3336 s->dsp.vc1_inv_trans_8x8(s->block[i]);
3337 if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
3338 for(j = 0; j < 64; j++) s->block[i][j] += 128;
3339 s->dsp.put_pixels_clamped(s->block[i], s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
3340 if(v->pq >= 9 && v->overlap) {
3342 s->dsp.vc1_v_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2), (i<4) ? ((i&1)>>1) : (s->mb_y&1));
3344 s->dsp.vc1_h_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2), (i<4) ? (i&1) : (s->mb_x&1));
3347 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block);
3348 if(!v->ttmbf && ttmb < 8) ttmb = -1;
3350 if((i<4) || !(s->flags & CODEC_FLAG_GRAY))
3351 s->dsp.add_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
3358 for(i = 0; i < 6; i++) {
3359 v->mb_type[0][s->block_index[i]] = 0;
3360 s->dc_val[0][s->block_index[i]] = 0;
3362 s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
3363 s->current_picture.qscale_table[mb_pos] = 0;
3364 vc1_pred_mv(s, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0]);
3371 if (!skipped /* unskipped MB */)
3373 int intra_count = 0, coded_inter = 0;
3374 int is_intra[6], is_coded[6];
3376 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3379 val = ((cbp >> (5 - i)) & 1);
3380 s->dc_val[0][s->block_index[i]] = 0;
3387 GET_MVDATA(dmv_x, dmv_y);
3389 vc1_pred_mv(s, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0]);
3390 if(!s->mb_intra) vc1_mc_4mv_luma(v, i);
3391 intra_count += s->mb_intra;
3392 is_intra[i] = s->mb_intra;
3393 is_coded[i] = mb_has_coeffs;
3396 is_intra[i] = (intra_count >= 3);
3399 if(i == 4) vc1_mc_4mv_chroma(v);
3400 v->mb_type[0][s->block_index[i]] = is_intra[i];
3401 if(!coded_inter) coded_inter = !is_intra[i] & is_coded[i];
3403 // if there are no coded blocks then don't do anything more
3404 if(!intra_count && !coded_inter) return 0;
3407 s->current_picture.qscale_table[mb_pos] = mquant;
3408 /* test if block is intra and has pred */
3413 if(((!s->first_slice_line || (i==2 || i==3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]])
3414 || ((s->mb_x || (i==1 || i==3)) && v->mb_type[0][s->block_index[i] - 1])) {
3419 if(intrapred)s->ac_pred = get_bits(gb, 1);
3420 else s->ac_pred = 0;
3422 if (!v->ttmbf && coded_inter)
3423 ttmb = get_vlc2(gb, vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3427 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3428 s->mb_intra = is_intra[i];
3430 /* check if prediction blocks A and C are available */
3431 v->a_avail = v->c_avail = 0;
3432 if(i == 2 || i == 3 || !s->first_slice_line)
3433 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3434 if(i == 1 || i == 3 || s->mb_x)
3435 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3437 vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant, (i&4)?v->codingset2:v->codingset);
3438 if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3439 s->dsp.vc1_inv_trans_8x8(s->block[i]);
3440 if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
3441 for(j = 0; j < 64; j++) s->block[i][j] += 128;
3442 s->dsp.put_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
3443 if(v->pq >= 9 && v->overlap) {
3445 s->dsp.vc1_v_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2), (i<4) ? ((i&1)>>1) : (s->mb_y&1));
3447 s->dsp.vc1_h_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2), (i<4) ? (i&1) : (s->mb_x&1));
3449 } else if(is_coded[i]) {
3450 status = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block);
3451 if(!v->ttmbf && ttmb < 8) ttmb = -1;
3453 if((i<4) || !(s->flags & CODEC_FLAG_GRAY))
3454 s->dsp.add_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
3462 s->current_picture.qscale_table[mb_pos] = 0;
3463 for (i=0; i<6; i++) {
3464 v->mb_type[0][s->block_index[i]] = 0;
3465 s->dc_val[0][s->block_index[i]] = 0;
3469 vc1_pred_mv(s, i, 0, 0, 0, v->range_x, v->range_y, v->mb_type[0]);
3470 vc1_mc_4mv_luma(v, i);
3472 vc1_mc_4mv_chroma(v);
3473 s->current_picture.qscale_table[mb_pos] = 0;
3478 /* Should never happen */
3482 /** Decode one B-frame MB (in Main profile)
3484 static void vc1_decode_b_mb(VC1Context *v)
3486 MpegEncContext *s = &v->s;
3487 GetBitContext *gb = &s->gb;
3489 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3490 int cbp = 0; /* cbp decoding stuff */
3491 int mqdiff, mquant; /* MB quantization */
3492 int ttmb = v->ttfrm; /* MB Transform type */
3494 static const int size_table[6] = { 0, 2, 3, 4, 5, 8 },
3495 offset_table[6] = { 0, 1, 3, 7, 15, 31 };
3496 int mb_has_coeffs = 0; /* last_flag */
3497 int index, index1; /* LUT indices */
3498 int val, sign; /* temp values */
3499 int first_block = 1;
3501 int skipped, direct;
3502 int dmv_x[2], dmv_y[2];
3503 int bmvtype = BMV_TYPE_BACKWARD;
3505 mquant = v->pq; /* Loosy initialization */
3509 direct = get_bits1(gb);
3511 direct = v->direct_mb_plane[mb_pos];
3513 skipped = get_bits1(gb);
3515 skipped = v->s.mbskip_table[mb_pos];
3517 s->dsp.clear_blocks(s->block[0]);
3518 dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
3519 for(i = 0; i < 6; i++) {
3520 v->mb_type[0][s->block_index[i]] = 0;
3521 s->dc_val[0][s->block_index[i]] = 0;
3523 s->current_picture.qscale_table[mb_pos] = 0;
3527 GET_MVDATA(dmv_x[0], dmv_y[0]);
3528 dmv_x[1] = dmv_x[0];
3529 dmv_y[1] = dmv_y[0];
3531 if(skipped || !s->mb_intra) {
3532 bmvtype = decode012(gb);
3535 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
3538 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
3541 bmvtype = BMV_TYPE_INTERPOLATED;
3542 dmv_x[0] = dmv_y[0] = 0;
3546 for(i = 0; i < 6; i++)
3547 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3550 if(direct) bmvtype = BMV_TYPE_INTERPOLATED;
3551 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3552 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3556 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3560 s->current_picture.qscale_table[mb_pos] = mquant;
3562 ttmb = get_vlc2(gb, vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3563 dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
3564 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3565 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3567 if(!mb_has_coeffs && !s->mb_intra) {
3568 /* no coded blocks - effectively skipped */
3569 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3570 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3573 if(s->mb_intra && !mb_has_coeffs) {
3575 s->current_picture.qscale_table[mb_pos] = mquant;
3576 s->ac_pred = get_bits1(gb);
3578 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3580 if(bmvtype == BMV_TYPE_INTERPOLATED) {
3581 GET_MVDATA(dmv_x[0], dmv_y[0]);
3582 if(!mb_has_coeffs) {
3583 /* interpolated skipped block */
3584 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3585 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3589 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3591 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3594 s->ac_pred = get_bits1(gb);
3595 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3597 s->current_picture.qscale_table[mb_pos] = mquant;
3598 if(!v->ttmbf && !s->mb_intra && mb_has_coeffs)
3599 ttmb = get_vlc2(gb, vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3605 s->dc_val[0][s->block_index[i]] = 0;
3607 val = ((cbp >> (5 - i)) & 1);
3608 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3609 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3611 /* check if prediction blocks A and C are available */
3612 v->a_avail = v->c_avail = 0;
3613 if(i == 2 || i == 3 || !s->first_slice_line)
3614 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3615 if(i == 1 || i == 3 || s->mb_x)
3616 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3618 vc1_decode_intra_block(v, s->block[i], i, val, mquant, (i&4)?v->codingset2:v->codingset);
3619 if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3620 s->dsp.vc1_inv_trans_8x8(s->block[i]);
3621 if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
3622 for(j = 0; j < 64; j++) s->block[i][j] += 128;
3623 s->dsp.put_pixels_clamped(s->block[i], s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
3625 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block);
3626 if(!v->ttmbf && ttmb < 8) ttmb = -1;
3628 if((i<4) || !(s->flags & CODEC_FLAG_GRAY))
3629 s->dsp.add_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
3634 /** Decode blocks of I-frame
3636 static void vc1_decode_i_blocks(VC1Context *v)
3639 MpegEncContext *s = &v->s;
3644 /* select codingmode used for VLC tables selection */
3645 switch(v->y_ac_table_index){
3647 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
3650 v->codingset = CS_HIGH_MOT_INTRA;
3653 v->codingset = CS_MID_RATE_INTRA;
3657 switch(v->c_ac_table_index){
3659 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
3662 v->codingset2 = CS_HIGH_MOT_INTER;
3665 v->codingset2 = CS_MID_RATE_INTER;
3669 /* Set DC scale - y and c use the same */
3670 s->y_dc_scale = s->y_dc_scale_table[v->pq];
3671 s->c_dc_scale = s->c_dc_scale_table[v->pq];
3674 s->mb_x = s->mb_y = 0;
3676 s->first_slice_line = 1;
3677 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
3678 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
3679 for(s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
3680 ff_init_block_index(s);
3681 ff_update_block_index(s);
3682 s->dsp.clear_blocks(s->block[0]);
3683 mb_pos = s->mb_x + s->mb_y * s->mb_width;
3684 s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
3685 s->current_picture.qscale_table[mb_pos] = v->pq;
3686 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
3687 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
3689 // do actual MB decoding and displaying
3690 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
3691 v->s.ac_pred = get_bits(&v->s.gb, 1);
3693 for(k = 0; k < 6; k++) {
3694 val = ((cbp >> (5 - k)) & 1);
3697 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
3701 cbp |= val << (5 - k);
3703 vc1_decode_i_block(v, s->block[k], k, val, (k<4)? v->codingset : v->codingset2);
3705 s->dsp.vc1_inv_trans_8x8(s->block[k]);
3706 if(v->pq >= 9 && v->overlap) {
3707 for(j = 0; j < 64; j++) s->block[k][j] += 128;
3711 vc1_put_block(v, s->block);
3712 if(v->pq >= 9 && v->overlap) {
3713 if(!s->first_slice_line) {
3714 s->dsp.vc1_v_overlap(s->dest[0], s->linesize, 0);
3715 s->dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize, 0);
3716 if(!(s->flags & CODEC_FLAG_GRAY)) {
3717 s->dsp.vc1_v_overlap(s->dest[1], s->uvlinesize, s->mb_y&1);
3718 s->dsp.vc1_v_overlap(s->dest[2], s->uvlinesize, s->mb_y&1);
3721 s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize, 1);
3722 s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize, 1);
3724 s->dsp.vc1_h_overlap(s->dest[0], s->linesize, 0);
3725 s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize, 0);
3726 if(!(s->flags & CODEC_FLAG_GRAY)) {
3727 s->dsp.vc1_h_overlap(s->dest[1], s->uvlinesize, s->mb_x&1);
3728 s->dsp.vc1_h_overlap(s->dest[2], s->uvlinesize, s->mb_x&1);
3731 s->dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize, 1);
3732 s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize, 1);
3735 if(get_bits_count(&s->gb) > v->bits) {
3736 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n", get_bits_count(&s->gb), v->bits);
3740 ff_draw_horiz_band(s, s->mb_y * 16, 16);
3741 s->first_slice_line = 0;
3745 /** Decode blocks of I-frame for advanced profile
3747 static void vc1_decode_i_blocks_adv(VC1Context *v)
3750 MpegEncContext *s = &v->s;
3757 GetBitContext *gb = &s->gb;
3759 /* select codingmode used for VLC tables selection */
3760 switch(v->y_ac_table_index){
3762 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
3765 v->codingset = CS_HIGH_MOT_INTRA;
3768 v->codingset = CS_MID_RATE_INTRA;
3772 switch(v->c_ac_table_index){
3774 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
3777 v->codingset2 = CS_HIGH_MOT_INTER;
3780 v->codingset2 = CS_MID_RATE_INTER;
3784 /* Set DC scale - y and c use the same */
3785 s->y_dc_scale = s->y_dc_scale_table[v->pq];
3786 s->c_dc_scale = s->c_dc_scale_table[v->pq];
3789 s->mb_x = s->mb_y = 0;
3791 s->first_slice_line = 1;
3792 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
3793 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
3794 for(s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
3795 ff_init_block_index(s);
3796 ff_update_block_index(s);
3797 s->dsp.clear_blocks(s->block[0]);
3798 mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3799 s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
3800 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
3801 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
3803 // do actual MB decoding and displaying
3804 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
3805 if(v->acpred_is_raw)
3806 v->s.ac_pred = get_bits(&v->s.gb, 1);
3808 v->s.ac_pred = v->acpred_plane[mb_pos];
3810 if(v->condover == CONDOVER_SELECT) {
3811 if(v->overflg_is_raw)
3812 overlap = get_bits(&v->s.gb, 1);
3814 overlap = v->over_flags_plane[mb_pos];
3816 overlap = (v->condover == CONDOVER_ALL);
3820 s->current_picture.qscale_table[mb_pos] = mquant;
3822 for(k = 0; k < 6; k++) {
3823 val = ((cbp >> (5 - k)) & 1);
3826 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
3830 cbp |= val << (5 - k);
3832 v->a_avail = !s->first_slice_line || (k==2 || k==3);
3833 v->c_avail = !!s->mb_x || (k==1 || k==3);
3835 vc1_decode_i_block_adv(v, s->block[k], k, val, (k<4)? v->codingset : v->codingset2, mquant);
3837 s->dsp.vc1_inv_trans_8x8(s->block[k]);
3838 for(j = 0; j < 64; j++) s->block[k][j] += 128;
3841 vc1_put_block(v, s->block);
3843 if(!s->first_slice_line) {
3844 s->dsp.vc1_v_overlap(s->dest[0], s->linesize, 0);
3845 s->dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize, 0);
3846 if(!(s->flags & CODEC_FLAG_GRAY)) {
3847 s->dsp.vc1_v_overlap(s->dest[1], s->uvlinesize, s->mb_y&1);
3848 s->dsp.vc1_v_overlap(s->dest[2], s->uvlinesize, s->mb_y&1);
3851 s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize, 1);
3852 s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize, 1);
3854 s->dsp.vc1_h_overlap(s->dest[0], s->linesize, 0);
3855 s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize, 0);
3856 if(!(s->flags & CODEC_FLAG_GRAY)) {
3857 s->dsp.vc1_h_overlap(s->dest[1], s->uvlinesize, s->mb_x&1);
3858 s->dsp.vc1_h_overlap(s->dest[2], s->uvlinesize, s->mb_x&1);
3861 s->dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize, 1);
3862 s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize, 1);
3865 if(get_bits_count(&s->gb) > v->bits) {
3866 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n", get_bits_count(&s->gb), v->bits);
3870 ff_draw_horiz_band(s, s->mb_y * 16, 16);
3871 s->first_slice_line = 0;
3875 static void vc1_decode_p_blocks(VC1Context *v)
3877 MpegEncContext *s = &v->s;
3879 /* select codingmode used for VLC tables selection */
3880 switch(v->c_ac_table_index){
3882 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
3885 v->codingset = CS_HIGH_MOT_INTRA;
3888 v->codingset = CS_MID_RATE_INTRA;
3892 switch(v->c_ac_table_index){
3894 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
3897 v->codingset2 = CS_HIGH_MOT_INTER;
3900 v->codingset2 = CS_MID_RATE_INTER;
3904 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
3905 s->first_slice_line = 1;
3906 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
3907 for(s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
3908 ff_init_block_index(s);
3909 ff_update_block_index(s);
3910 s->dsp.clear_blocks(s->block[0]);
3913 if(get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
3914 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n", get_bits_count(&s->gb), v->bits,s->mb_x,s->mb_y);
3918 ff_draw_horiz_band(s, s->mb_y * 16, 16);
3919 s->first_slice_line = 0;
3923 static void vc1_decode_b_blocks(VC1Context *v)
3925 MpegEncContext *s = &v->s;
3927 /* select codingmode used for VLC tables selection */
3928 switch(v->c_ac_table_index){
3930 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
3933 v->codingset = CS_HIGH_MOT_INTRA;
3936 v->codingset = CS_MID_RATE_INTRA;
3940 switch(v->c_ac_table_index){
3942 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
3945 v->codingset2 = CS_HIGH_MOT_INTER;
3948 v->codingset2 = CS_MID_RATE_INTER;
3952 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
3953 s->first_slice_line = 1;
3954 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
3955 for(s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
3956 ff_init_block_index(s);
3957 ff_update_block_index(s);
3958 s->dsp.clear_blocks(s->block[0]);
3961 if(get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
3962 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n", get_bits_count(&s->gb), v->bits,s->mb_x,s->mb_y);
3966 ff_draw_horiz_band(s, s->mb_y * 16, 16);
3967 s->first_slice_line = 0;
3971 static void vc1_decode_skip_blocks(VC1Context *v)
3973 MpegEncContext *s = &v->s;
3975 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
3976 s->first_slice_line = 1;
3977 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
3979 ff_init_block_index(s);
3980 ff_update_block_index(s);
3981 memcpy(s->dest[0], s->last_picture.data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16);
3982 memcpy(s->dest[1], s->last_picture.data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
3983 memcpy(s->dest[2], s->last_picture.data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
3984 ff_draw_horiz_band(s, s->mb_y * 16, 16);
3985 s->first_slice_line = 0;
3987 s->pict_type = P_TYPE;
3990 static void vc1_decode_blocks(VC1Context *v)
3993 v->s.esc3_level_length = 0;
3995 switch(v->s.pict_type) {
3997 if(v->profile == PROFILE_ADVANCED)
3998 vc1_decode_i_blocks_adv(v);
4000 vc1_decode_i_blocks(v);
4003 if(v->p_frame_skipped)
4004 vc1_decode_skip_blocks(v);
4006 vc1_decode_p_blocks(v);
4010 vc1_decode_i_blocks(v);
4012 vc1_decode_b_blocks(v);
4018 /** Initialize a VC1/WMV3 decoder
4019 * @todo TODO: Handle VC-1 IDUs (Transport level?)
4020 * @todo TODO: Decypher remaining bits in extra_data
4022 static int vc1_decode_init(AVCodecContext *avctx)
4024 VC1Context *v = avctx->priv_data;
4025 MpegEncContext *s = &v->s;
4028 if (!avctx->extradata_size || !avctx->extradata) return -1;
4029 if (!(avctx->flags & CODEC_FLAG_GRAY))
4030 avctx->pix_fmt = PIX_FMT_YUV420P;
4032 avctx->pix_fmt = PIX_FMT_GRAY8;
4034 avctx->flags |= CODEC_FLAG_EMU_EDGE;
4035 v->s.flags |= CODEC_FLAG_EMU_EDGE;
4037 if(ff_h263_decode_init(avctx) < 0)
4039 if (vc1_init_common(v) < 0) return -1;
4041 avctx->coded_width = avctx->width;
4042 avctx->coded_height = avctx->height;
4043 if (avctx->codec_id == CODEC_ID_WMV3)
4047 // looks like WMV3 has a sequence header stored in the extradata
4048 // advanced sequence header may be before the first frame
4049 // the last byte of the extradata is a version number, 1 for the
4050 // samples we can decode
4052 init_get_bits(&gb, avctx->extradata, avctx->extradata_size*8);
4054 if (decode_sequence_header(avctx, &gb) < 0)
4057 count = avctx->extradata_size*8 - get_bits_count(&gb);
4060 av_log(avctx, AV_LOG_INFO, "Extra data: %i bits left, value: %X\n",
4061 count, get_bits(&gb, count));
4065 av_log(avctx, AV_LOG_INFO, "Read %i bits in overflow\n", -count);
4067 } else { // VC1/WVC1
4068 int edata_size = avctx->extradata_size;
4069 uint8_t *edata = avctx->extradata;
4071 if(avctx->extradata_size < 16) {
4072 av_log(avctx, AV_LOG_ERROR, "Extradata size too small: %i\n", edata_size);
4075 while(edata_size > 8) {
4076 // test if we've found header
4077 if(BE_32(edata) == 0x0000010F) {
4086 init_get_bits(&gb, edata, edata_size*8);
4088 if (decode_sequence_header(avctx, &gb) < 0)
4091 while(edata_size > 8) {
4092 // test if we've found entry point
4093 if(BE_32(edata) == 0x0000010E) {
4102 init_get_bits(&gb, edata, edata_size*8);
4104 if (decode_entry_point(avctx, &gb) < 0)
4107 avctx->has_b_frames= !!(avctx->max_b_frames);
4108 s->low_delay = !avctx->has_b_frames;
4110 s->mb_width = (avctx->coded_width+15)>>4;
4111 s->mb_height = (avctx->coded_height+15)>>4;
4113 /* Allocate mb bitplanes */
4114 v->mv_type_mb_plane = av_malloc(s->mb_stride * s->mb_height);
4115 v->direct_mb_plane = av_malloc(s->mb_stride * s->mb_height);
4116 v->acpred_plane = av_malloc(s->mb_stride * s->mb_height);
4117 v->over_flags_plane = av_malloc(s->mb_stride * s->mb_height);
4119 /* allocate block type info in that way so it could be used with s->block_index[] */
4120 v->mb_type_base = av_malloc(s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
4121 v->mb_type[0] = v->mb_type_base + s->b8_stride + 1;
4122 v->mb_type[1] = v->mb_type_base + s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride + 1;
4123 v->mb_type[2] = v->mb_type[1] + s->mb_stride * (s->mb_height + 1);
4125 /* Init coded blocks info */
4126 if (v->profile == PROFILE_ADVANCED)
4128 // if (alloc_bitplane(&v->over_flags_plane, s->mb_width, s->mb_height) < 0)
4130 // if (alloc_bitplane(&v->ac_pred_plane, s->mb_width, s->mb_height) < 0)
4138 /** Decode a VC1/WMV3 frame
4139 * @todo TODO: Handle VC-1 IDUs (Transport level?)
4141 static int vc1_decode_frame(AVCodecContext *avctx,
4142 void *data, int *data_size,
4143 uint8_t *buf, int buf_size)
4145 VC1Context *v = avctx->priv_data;
4146 MpegEncContext *s = &v->s;
4147 AVFrame *pict = data;
4148 uint8_t *buf2 = NULL;
4150 /* no supplementary picture */
4151 if (buf_size == 0) {
4152 /* special case for last picture */
4153 if (s->low_delay==0 && s->next_picture_ptr) {
4154 *pict= *(AVFrame*)s->next_picture_ptr;
4155 s->next_picture_ptr= NULL;
4157 *data_size = sizeof(AVFrame);
4163 //we need to set current_picture_ptr before reading the header, otherwise we cant store anyting im there
4164 if(s->current_picture_ptr==NULL || s->current_picture_ptr->data[0]){
4165 int i= ff_find_unused_picture(s, 0);
4166 s->current_picture_ptr= &s->picture[i];
4169 //for advanced profile we need to unescape buffer
4170 if (avctx->codec_id == CODEC_ID_VC1) {
4172 buf2 = av_malloc(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
4174 for(i = 0; i < buf_size; i++) {
4175 if(buf[i] == 3 && i >= 2 && !buf[i-1] && !buf[i-2] && i < buf_size-1 && buf[i+1] < 4) {
4176 buf2[buf_size2++] = buf[i+1];
4179 buf2[buf_size2++] = buf[i];
4181 init_get_bits(&s->gb, buf2, buf_size2*8);
4183 init_get_bits(&s->gb, buf, buf_size*8);
4184 // do parse frame header
4185 if(v->profile < PROFILE_ADVANCED) {
4186 if(vc1_parse_frame_header(v, &s->gb) == -1) {
4191 if(vc1_parse_frame_header_adv(v, &s->gb) == -1) {
4197 if(s->pict_type != I_TYPE && !v->res_rtm_flag){
4203 s->current_picture.pict_type= s->pict_type;
4204 s->current_picture.key_frame= s->pict_type == I_TYPE;
4206 /* skip B-frames if we don't have reference frames */
4207 if(s->last_picture_ptr==NULL && (s->pict_type==B_TYPE || s->dropable)){
4209 return -1;//buf_size;
4211 /* skip b frames if we are in a hurry */
4212 if(avctx->hurry_up && s->pict_type==B_TYPE) return -1;//buf_size;
4213 if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==B_TYPE)
4214 || (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=I_TYPE)
4215 || avctx->skip_frame >= AVDISCARD_ALL) {
4219 /* skip everything if we are in a hurry>=5 */
4220 if(avctx->hurry_up>=5) {
4222 return -1;//buf_size;
4225 if(s->next_p_frame_damaged){
4226 if(s->pict_type==B_TYPE)
4229 s->next_p_frame_damaged=0;
4232 if(MPV_frame_start(s, avctx) < 0) {
4237 ff_er_frame_start(s);
4239 v->bits = buf_size * 8;
4240 vc1_decode_blocks(v);
4241 //av_log(s->avctx, AV_LOG_INFO, "Consumed %i/%i bits\n", get_bits_count(&s->gb), buf_size*8);
4242 // if(get_bits_count(&s->gb) > buf_size * 8)
4248 assert(s->current_picture.pict_type == s->current_picture_ptr->pict_type);
4249 assert(s->current_picture.pict_type == s->pict_type);
4250 if (s->pict_type == B_TYPE || s->low_delay) {
4251 *pict= *(AVFrame*)s->current_picture_ptr;
4252 } else if (s->last_picture_ptr != NULL) {
4253 *pict= *(AVFrame*)s->last_picture_ptr;
4256 if(s->last_picture_ptr || s->low_delay){
4257 *data_size = sizeof(AVFrame);
4258 ff_print_debug_info(s, pict);
4261 /* Return the Picture timestamp as the frame number */
4262 /* we substract 1 because it is added on utils.c */
4263 avctx->frame_number = s->picture_number - 1;
4270 /** Close a VC1/WMV3 decoder
4271 * @warning Initial try at using MpegEncContext stuff
4273 static int vc1_decode_end(AVCodecContext *avctx)
4275 VC1Context *v = avctx->priv_data;
4277 av_freep(&v->hrd_rate);
4278 av_freep(&v->hrd_buffer);
4279 MPV_common_end(&v->s);
4280 av_freep(&v->mv_type_mb_plane);
4281 av_freep(&v->direct_mb_plane);
4282 av_freep(&v->acpred_plane);
4283 av_freep(&v->over_flags_plane);
4284 av_freep(&v->mb_type_base);
4289 AVCodec vc1_decoder = {
4302 AVCodec wmv3_decoder = {