2 * VC-1 and WMV3 decoder
3 * Copyright (c) 2006 Konstantin Shishkov
4 * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * VC-1 and WMV3 decoder
32 #include "mpegvideo.h"
34 #include "vc1acdata.h"
39 extern const uint32_t ff_table0_dc_lum[120][2], ff_table1_dc_lum[120][2];
40 extern const uint32_t ff_table0_dc_chroma[120][2], ff_table1_dc_chroma[120][2];
41 extern VLC ff_msmp4_dc_luma_vlc[2], ff_msmp4_dc_chroma_vlc[2];
42 #define MB_INTRA_VLC_BITS 9
43 extern VLC ff_msmp4_mb_i_vlc;
44 extern const uint16_t ff_msmp4_mb_i_table[64][2];
47 static const uint16_t table_mb_intra[64][2];
50 /** Available Profiles */
55 PROFILE_COMPLEX, ///< TODO: WMV9 specific
60 /** Sequence quantizer mode */
63 QUANT_FRAME_IMPLICIT, ///< Implicitly specified at frame level
64 QUANT_FRAME_EXPLICIT, ///< Explicitly specified at frame level
65 QUANT_NON_UNIFORM, ///< Non-uniform quant used for all frames
66 QUANT_UNIFORM ///< Uniform quant used for all frames
70 /** Where quant can be changed */
74 DQPROFILE_DOUBLE_EDGES,
75 DQPROFILE_SINGLE_EDGE,
80 /** @name Where quant can be changed
91 /** Which pair of edges is quantized with ALTPQUANT */
94 DQDOUBLE_BEDGE_TOPLEFT,
95 DQDOUBLE_BEDGE_TOPRIGHT,
96 DQDOUBLE_BEDGE_BOTTOMRIGHT,
97 DQDOUBLE_BEDGE_BOTTOMLEFT
101 /** MV modes for P frames */
104 MV_PMODE_1MV_HPEL_BILIN,
108 MV_PMODE_INTENSITY_COMP
112 /** @name MV types for B frames */
117 BMV_TYPE_INTERPOLATED
121 /** @name Block types for P/B frames */
123 enum TransformTypes {
127 TT_8X4, //Both halves
130 TT_4X8, //Both halves
135 /** Table for conversion between TTBLK and TTMB */
136 static const int ttblk_to_tt[3][8] = {
137 { TT_8X4, TT_4X8, TT_8X8, TT_4X4, TT_8X4_TOP, TT_8X4_BOTTOM, TT_4X8_RIGHT, TT_4X8_LEFT },
138 { TT_8X8, TT_4X8_RIGHT, TT_4X8_LEFT, TT_4X4, TT_8X4, TT_4X8, TT_8X4_BOTTOM, TT_8X4_TOP },
139 { TT_8X8, TT_4X8, TT_4X4, TT_8X4_BOTTOM, TT_4X8_RIGHT, TT_4X8_LEFT, TT_8X4, TT_8X4_TOP }
142 static const int ttfrm_to_tt[4] = { TT_8X8, TT_8X4, TT_4X8, TT_4X4 };
144 /** MV P mode - the 5th element is only used for mode 1 */
145 static const uint8_t mv_pmode_table[2][5] = {
146 { MV_PMODE_1MV_HPEL_BILIN, MV_PMODE_1MV, MV_PMODE_1MV_HPEL, MV_PMODE_INTENSITY_COMP, MV_PMODE_MIXED_MV },
147 { MV_PMODE_1MV, MV_PMODE_MIXED_MV, MV_PMODE_1MV_HPEL, MV_PMODE_INTENSITY_COMP, MV_PMODE_1MV_HPEL_BILIN }
149 static const uint8_t mv_pmode_table2[2][4] = {
150 { MV_PMODE_1MV_HPEL_BILIN, MV_PMODE_1MV, MV_PMODE_1MV_HPEL, MV_PMODE_MIXED_MV },
151 { MV_PMODE_1MV, MV_PMODE_MIXED_MV, MV_PMODE_1MV_HPEL, MV_PMODE_1MV_HPEL_BILIN }
154 /** One more frame type */
157 static const int fps_nr[5] = { 24, 25, 30, 50, 60 },
158 fps_dr[2] = { 1000, 1001 };
159 static const uint8_t pquant_table[3][32] = {
160 { /* Implicit quantizer */
161 0, 1, 2, 3, 4, 5, 6, 7, 8, 6, 7, 8, 9, 10, 11, 12,
162 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 29, 31
164 { /* Explicit quantizer, pquantizer uniform */
165 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
166 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
168 { /* Explicit quantizer, pquantizer non-uniform */
169 0, 1, 1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
170 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 29, 31
174 /** @name VC-1 VLC tables and defines
175 * @todo TODO move this into the context
178 #define VC1_BFRACTION_VLC_BITS 7
179 static VLC vc1_bfraction_vlc;
180 #define VC1_IMODE_VLC_BITS 4
181 static VLC vc1_imode_vlc;
182 #define VC1_NORM2_VLC_BITS 3
183 static VLC vc1_norm2_vlc;
184 #define VC1_NORM6_VLC_BITS 9
185 static VLC vc1_norm6_vlc;
186 /* Could be optimized, one table only needs 8 bits */
187 #define VC1_TTMB_VLC_BITS 9 //12
188 static VLC vc1_ttmb_vlc[3];
189 #define VC1_MV_DIFF_VLC_BITS 9 //15
190 static VLC vc1_mv_diff_vlc[4];
191 #define VC1_CBPCY_P_VLC_BITS 9 //14
192 static VLC vc1_cbpcy_p_vlc[4];
193 #define VC1_4MV_BLOCK_PATTERN_VLC_BITS 6
194 static VLC vc1_4mv_block_pattern_vlc[4];
195 #define VC1_TTBLK_VLC_BITS 5
196 static VLC vc1_ttblk_vlc[3];
197 #define VC1_SUBBLKPAT_VLC_BITS 6
198 static VLC vc1_subblkpat_vlc[3];
200 static VLC vc1_ac_coeff_table[8];
204 CS_HIGH_MOT_INTRA = 0,
214 /** @name Overlap conditions for Advanced Profile */
225 * @fixme Change size wherever another size is more efficient
226 * Many members are only used for Advanced Profile
228 typedef struct VC1Context{
233 /** Simple/Main Profile sequence header */
235 int res_sm; ///< reserved, 2b
236 int res_x8; ///< reserved
237 int multires; ///< frame-level RESPIC syntax element present
238 int res_fasttx; ///< reserved, always 1
239 int res_transtab; ///< reserved, always 0
240 int rangered; ///< RANGEREDFRM (range reduction) syntax element present
242 int res_rtm_flag; ///< reserved, set to 1
243 int reserved; ///< reserved
246 /** Advanced Profile */
248 int level; ///< 3bits, for Advanced/Simple Profile, provided by TS layer
249 int chromaformat; ///< 2bits, 2=4:2:0, only defined
250 int postprocflag; ///< Per-frame processing suggestion flag present
251 int broadcast; ///< TFF/RFF present
252 int interlace; ///< Progressive/interlaced (RPTFTM syntax element)
253 int tfcntrflag; ///< TFCNTR present
254 int panscanflag; ///< NUMPANSCANWIN, TOPLEFT{X,Y}, BOTRIGHT{X,Y} present
255 int extended_dmv; ///< Additional extended dmv range at P/B frame-level
256 int color_prim; ///< 8bits, chroma coordinates of the color primaries
257 int transfer_char; ///< 8bits, Opto-electronic transfer characteristics
258 int matrix_coef; ///< 8bits, Color primaries->YCbCr transform matrix
259 int hrd_param_flag; ///< Presence of Hypothetical Reference
260 ///< Decoder parameters
261 int psf; ///< Progressive Segmented Frame
264 /** Sequence header data for all Profiles
265 * TODO: choose between ints, uint8_ts and monobit flags
268 int profile; ///< 2bits, Profile
269 int frmrtq_postproc; ///< 3bits,
270 int bitrtq_postproc; ///< 5bits, quantized framerate-based postprocessing strength
271 int fastuvmc; ///< Rounding of qpel vector to hpel ? (not in Simple)
272 int extended_mv; ///< Ext MV in P/B (not in Simple)
273 int dquant; ///< How qscale varies with MBs, 2bits (not in Simple)
274 int vstransform; ///< variable-size [48]x[48] transform type + info
275 int overlap; ///< overlapped transforms in use
276 int quantizer_mode; ///< 2bits, quantizer mode used for sequence, see QUANT_*
277 int finterpflag; ///< INTERPFRM present
280 /** Frame decoding info for all profiles */
282 uint8_t mv_mode; ///< MV coding monde
283 uint8_t mv_mode2; ///< Secondary MV coding mode (B frames)
284 int k_x; ///< Number of bits for MVs (depends on MV range)
285 int k_y; ///< Number of bits for MVs (depends on MV range)
286 int range_x, range_y; ///< MV range
287 uint8_t pq, altpq; ///< Current/alternate frame quantizer scale
288 /** pquant parameters */
295 /** AC coding set indexes
296 * @see 8.1.1.10, p(1)10
299 int c_ac_table_index; ///< Chroma index from ACFRM element
300 int y_ac_table_index; ///< Luma index from AC2FRM element
302 int ttfrm; ///< Transform type info present at frame level
303 uint8_t ttmbf; ///< Transform type flag
304 uint8_t ttblk4x4; ///< Value of ttblk which indicates a 4x4 transform
305 int codingset; ///< index of current table set from 11.8 to use for luma block decoding
306 int codingset2; ///< index of current table set from 11.8 to use for chroma block decoding
307 int pqindex; ///< raw pqindex used in coding set selection
308 int a_avail, c_avail;
309 uint8_t *mb_type_base, *mb_type[3];
312 /** Luma compensation parameters */
317 int16_t bfraction; ///< Relative position % anchors=> how to scale MVs
318 uint8_t halfpq; ///< Uniform quant over image and qp+.5
319 uint8_t respic; ///< Frame-level flag for resized images
320 int buffer_fullness; ///< HRD info
322 * -# 0 -> [-64n 63.f] x [-32, 31.f]
323 * -# 1 -> [-128, 127.f] x [-64, 63.f]
324 * -# 2 -> [-512, 511.f] x [-128, 127.f]
325 * -# 3 -> [-1024, 1023.f] x [-256, 255.f]
328 uint8_t pquantizer; ///< Uniform (over sequence) quantizer in use
329 VLC *cbpcy_vlc; ///< CBPCY VLC table
330 int tt_index; ///< Index for Transform Type tables
331 uint8_t* mv_type_mb_plane; ///< bitplane for mv_type == (4MV)
332 uint8_t* direct_mb_plane; ///< bitplane for "direct" MBs
333 int mv_type_is_raw; ///< mv type mb plane is not coded
334 int dmb_is_raw; ///< direct mb plane is raw
335 int skip_is_raw; ///< skip mb plane is not coded
336 uint8_t luty[256], lutuv[256]; // lookup tables used for intensity compensation
337 int use_ic; ///< use intensity compensation in B-frames
338 int rnd; ///< rounding control
340 /** Frame decoding info for S/M profiles only */
342 uint8_t rangeredfrm; ///< out_sample = CLIP((in_sample-128)*2+128)
346 /** Frame decoding info for Advanced profile */
348 uint8_t fcm; ///< 0->Progressive, 2->Frame-Interlace, 3->Field-Interlace
349 uint8_t numpanscanwin;
351 uint8_t rptfrm, tff, rff;
354 uint16_t bottomrightx;
355 uint16_t bottomrighty;
358 int hrd_num_leaky_buckets;
359 uint8_t bit_rate_exponent;
360 uint8_t buffer_size_exponent;
361 uint8_t* acpred_plane; ///< AC prediction flags bitplane
363 uint8_t* over_flags_plane; ///< Overflags bitplane
366 uint16_t *hrd_rate, *hrd_buffer;
367 uint8_t *hrd_fullness;
368 uint8_t range_mapy_flag;
369 uint8_t range_mapuv_flag;
379 * Get unary code of limited length
380 * @fixme FIXME Slow and ugly
381 * @param gb GetBitContext
382 * @param[in] stop The bitstop value (unary code of 1's or 0's)
383 * @param[in] len Maximum length
384 * @return Unary length/index
386 static int get_prefix(GetBitContext *gb, int stop, int len)
391 for(i = 0; i < len && get_bits1(gb) != stop; i++);
393 /* int i = 0, tmp = !stop;
395 while (i != len && tmp != stop)
397 tmp = get_bits(gb, 1);
400 if (i == len && tmp != stop) return len+1;
407 UPDATE_CACHE(re, gb);
408 buf=GET_CACHE(re, gb); //Still not sure
409 if (stop) buf = ~buf;
411 log= av_log2(-buf); //FIXME: -?
413 LAST_SKIP_BITS(re, gb, log+1);
414 CLOSE_READER(re, gb);
418 LAST_SKIP_BITS(re, gb, limit);
419 CLOSE_READER(re, gb);
424 static inline int decode210(GetBitContext *gb){
430 return 2 - get_bits1(gb);
434 * Init VC-1 specific tables and VC1Context members
435 * @param v The VC1Context to initialize
438 static int vc1_init_common(VC1Context *v)
443 v->hrd_rate = v->hrd_buffer = NULL;
449 init_vlc(&vc1_bfraction_vlc, VC1_BFRACTION_VLC_BITS, 23,
450 vc1_bfraction_bits, 1, 1,
451 vc1_bfraction_codes, 1, 1, 1);
452 init_vlc(&vc1_norm2_vlc, VC1_NORM2_VLC_BITS, 4,
453 vc1_norm2_bits, 1, 1,
454 vc1_norm2_codes, 1, 1, 1);
455 init_vlc(&vc1_norm6_vlc, VC1_NORM6_VLC_BITS, 64,
456 vc1_norm6_bits, 1, 1,
457 vc1_norm6_codes, 2, 2, 1);
458 init_vlc(&vc1_imode_vlc, VC1_IMODE_VLC_BITS, 7,
459 vc1_imode_bits, 1, 1,
460 vc1_imode_codes, 1, 1, 1);
463 init_vlc(&vc1_ttmb_vlc[i], VC1_TTMB_VLC_BITS, 16,
464 vc1_ttmb_bits[i], 1, 1,
465 vc1_ttmb_codes[i], 2, 2, 1);
466 init_vlc(&vc1_ttblk_vlc[i], VC1_TTBLK_VLC_BITS, 8,
467 vc1_ttblk_bits[i], 1, 1,
468 vc1_ttblk_codes[i], 1, 1, 1);
469 init_vlc(&vc1_subblkpat_vlc[i], VC1_SUBBLKPAT_VLC_BITS, 15,
470 vc1_subblkpat_bits[i], 1, 1,
471 vc1_subblkpat_codes[i], 1, 1, 1);
475 init_vlc(&vc1_4mv_block_pattern_vlc[i], VC1_4MV_BLOCK_PATTERN_VLC_BITS, 16,
476 vc1_4mv_block_pattern_bits[i], 1, 1,
477 vc1_4mv_block_pattern_codes[i], 1, 1, 1);
478 init_vlc(&vc1_cbpcy_p_vlc[i], VC1_CBPCY_P_VLC_BITS, 64,
479 vc1_cbpcy_p_bits[i], 1, 1,
480 vc1_cbpcy_p_codes[i], 2, 2, 1);
481 init_vlc(&vc1_mv_diff_vlc[i], VC1_MV_DIFF_VLC_BITS, 73,
482 vc1_mv_diff_bits[i], 1, 1,
483 vc1_mv_diff_codes[i], 2, 2, 1);
486 init_vlc(&vc1_ac_coeff_table[i], AC_VLC_BITS, vc1_ac_sizes[i],
487 &vc1_ac_tables[i][0][1], 8, 4,
488 &vc1_ac_tables[i][0][0], 8, 4, 1);
489 init_vlc(&ff_msmp4_mb_i_vlc, MB_INTRA_VLC_BITS, 64,
490 &ff_msmp4_mb_i_table[0][1], 4, 2,
491 &ff_msmp4_mb_i_table[0][0], 4, 2, 1);
496 v->mvrange = 0; /* 7.1.1.18, p80 */
501 /***********************************************************************/
503 * @defgroup bitplane VC9 Bitplane decoding
508 /** @addtogroup bitplane
521 /** @} */ //imode defines
523 /** Decode rows by checking if they are skipped
524 * @param plane Buffer to store decoded bits
525 * @param[in] width Width of this buffer
526 * @param[in] height Height of this buffer
527 * @param[in] stride of this buffer
529 static void decode_rowskip(uint8_t* plane, int width, int height, int stride, GetBitContext *gb){
532 for (y=0; y<height; y++){
533 if (!get_bits(gb, 1)) //rowskip
534 memset(plane, 0, width);
536 for (x=0; x<width; x++)
537 plane[x] = get_bits(gb, 1);
542 /** Decode columns by checking if they are skipped
543 * @param plane Buffer to store decoded bits
544 * @param[in] width Width of this buffer
545 * @param[in] height Height of this buffer
546 * @param[in] stride of this buffer
547 * @fixme FIXME: Optimize
549 static void decode_colskip(uint8_t* plane, int width, int height, int stride, GetBitContext *gb){
552 for (x=0; x<width; x++){
553 if (!get_bits(gb, 1)) //colskip
554 for (y=0; y<height; y++)
557 for (y=0; y<height; y++)
558 plane[y*stride] = get_bits(gb, 1);
563 /** Decode a bitplane's bits
564 * @param bp Bitplane where to store the decode bits
565 * @param v VC-1 context for bit reading and logging
567 * @fixme FIXME: Optimize
569 static int bitplane_decoding(uint8_t* data, int *raw_flag, VC1Context *v)
571 GetBitContext *gb = &v->s.gb;
573 int imode, x, y, code, offset;
574 uint8_t invert, *planep = data;
575 int width, height, stride;
577 width = v->s.mb_width;
578 height = v->s.mb_height;
579 stride = v->s.mb_stride;
580 invert = get_bits(gb, 1);
581 imode = get_vlc2(gb, vc1_imode_vlc.table, VC1_IMODE_VLC_BITS, 1);
587 //Data is actually read in the MB layer (same for all tests == "raw")
588 *raw_flag = 1; //invert ignored
592 if ((height * width) & 1)
594 *planep++ = get_bits(gb, 1);
598 // decode bitplane as one long line
599 for (y = offset; y < height * width; y += 2) {
600 code = get_vlc2(gb, vc1_norm2_vlc.table, VC1_NORM2_VLC_BITS, 1);
601 *planep++ = code & 1;
603 if(offset == width) {
605 planep += stride - width;
607 *planep++ = code >> 1;
609 if(offset == width) {
611 planep += stride - width;
617 if(!(height % 3) && (width % 3)) { // use 2x3 decoding
618 for(y = 0; y < height; y+= 3) {
619 for(x = width & 1; x < width; x += 2) {
620 code = get_vlc2(gb, vc1_norm6_vlc.table, VC1_NORM6_VLC_BITS, 2);
622 av_log(v->s.avctx, AV_LOG_DEBUG, "invalid NORM-6 VLC\n");
625 planep[x + 0] = (code >> 0) & 1;
626 planep[x + 1] = (code >> 1) & 1;
627 planep[x + 0 + stride] = (code >> 2) & 1;
628 planep[x + 1 + stride] = (code >> 3) & 1;
629 planep[x + 0 + stride * 2] = (code >> 4) & 1;
630 planep[x + 1 + stride * 2] = (code >> 5) & 1;
632 planep += stride * 3;
634 if(width & 1) decode_colskip(data, 1, height, stride, &v->s.gb);
636 planep += (height & 1) * stride;
637 for(y = height & 1; y < height; y += 2) {
638 for(x = width % 3; x < width; x += 3) {
639 code = get_vlc2(gb, vc1_norm6_vlc.table, VC1_NORM6_VLC_BITS, 2);
641 av_log(v->s.avctx, AV_LOG_DEBUG, "invalid NORM-6 VLC\n");
644 planep[x + 0] = (code >> 0) & 1;
645 planep[x + 1] = (code >> 1) & 1;
646 planep[x + 2] = (code >> 2) & 1;
647 planep[x + 0 + stride] = (code >> 3) & 1;
648 planep[x + 1 + stride] = (code >> 4) & 1;
649 planep[x + 2 + stride] = (code >> 5) & 1;
651 planep += stride * 2;
654 if(x) decode_colskip(data , x, height , stride, &v->s.gb);
655 if(height & 1) decode_rowskip(data+x, width - x, 1, stride, &v->s.gb);
659 decode_rowskip(data, width, height, stride, &v->s.gb);
662 decode_colskip(data, width, height, stride, &v->s.gb);
667 /* Applying diff operator */
668 if (imode == IMODE_DIFF2 || imode == IMODE_DIFF6)
672 for (x=1; x<width; x++)
673 planep[x] ^= planep[x-1];
674 for (y=1; y<height; y++)
677 planep[0] ^= planep[-stride];
678 for (x=1; x<width; x++)
680 if (planep[x-1] != planep[x-stride]) planep[x] ^= invert;
681 else planep[x] ^= planep[x-1];
688 for (x=0; x<stride*height; x++) planep[x] = !planep[x]; //FIXME stride
690 return (imode<<1) + invert;
693 /** @} */ //Bitplane group
695 /***********************************************************************/
696 /** VOP Dquant decoding
697 * @param v VC-1 Context
699 static int vop_dquant_decoding(VC1Context *v)
701 GetBitContext *gb = &v->s.gb;
707 pqdiff = get_bits(gb, 3);
708 if (pqdiff == 7) v->altpq = get_bits(gb, 5);
709 else v->altpq = v->pq + pqdiff + 1;
713 v->dquantfrm = get_bits(gb, 1);
716 v->dqprofile = get_bits(gb, 2);
717 switch (v->dqprofile)
719 case DQPROFILE_SINGLE_EDGE:
720 case DQPROFILE_DOUBLE_EDGES:
721 v->dqsbedge = get_bits(gb, 2);
723 case DQPROFILE_ALL_MBS:
724 v->dqbilevel = get_bits(gb, 1);
725 default: break; //Forbidden ?
727 if (v->dqbilevel || v->dqprofile != DQPROFILE_ALL_MBS)
729 pqdiff = get_bits(gb, 3);
730 if (pqdiff == 7) v->altpq = get_bits(gb, 5);
731 else v->altpq = v->pq + pqdiff + 1;
738 /** Put block onto picture
740 static void vc1_put_block(VC1Context *v, DCTELEM block[6][64])
744 DSPContext *dsp = &v->s.dsp;
748 for(k = 0; k < 6; k++)
749 for(j = 0; j < 8; j++)
750 for(i = 0; i < 8; i++)
751 block[k][i + j*8] = ((block[k][i + j*8] - 128) << 1) + 128;
754 ys = v->s.current_picture.linesize[0];
755 us = v->s.current_picture.linesize[1];
756 vs = v->s.current_picture.linesize[2];
759 dsp->put_pixels_clamped(block[0], Y, ys);
760 dsp->put_pixels_clamped(block[1], Y + 8, ys);
762 dsp->put_pixels_clamped(block[2], Y, ys);
763 dsp->put_pixels_clamped(block[3], Y + 8, ys);
765 if(!(v->s.flags & CODEC_FLAG_GRAY)) {
766 dsp->put_pixels_clamped(block[4], v->s.dest[1], us);
767 dsp->put_pixels_clamped(block[5], v->s.dest[2], vs);
771 /** Do motion compensation over 1 macroblock
772 * Mostly adapted hpel_motion and qpel_motion from mpegvideo.c
774 static void vc1_mc_1mv(VC1Context *v, int dir)
776 MpegEncContext *s = &v->s;
777 DSPContext *dsp = &v->s.dsp;
778 uint8_t *srcY, *srcU, *srcV;
779 int dxy, uvdxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
781 if(!v->s.last_picture.data[0])return;
783 mx = s->mv[dir][0][0];
784 my = s->mv[dir][0][1];
786 // store motion vectors for further use in B frames
787 if(s->pict_type == P_TYPE) {
788 s->current_picture.motion_val[1][s->block_index[0]][0] = mx;
789 s->current_picture.motion_val[1][s->block_index[0]][1] = my;
791 uvmx = (mx + ((mx & 3) == 3)) >> 1;
792 uvmy = (my + ((my & 3) == 3)) >> 1;
794 srcY = s->last_picture.data[0];
795 srcU = s->last_picture.data[1];
796 srcV = s->last_picture.data[2];
798 srcY = s->next_picture.data[0];
799 srcU = s->next_picture.data[1];
800 srcV = s->next_picture.data[2];
803 src_x = s->mb_x * 16 + (mx >> 2);
804 src_y = s->mb_y * 16 + (my >> 2);
805 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
806 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
808 src_x = clip( src_x, -16, s->mb_width * 16);
809 src_y = clip( src_y, -16, s->mb_height * 16);
810 uvsrc_x = clip(uvsrc_x, -8, s->mb_width * 8);
811 uvsrc_y = clip(uvsrc_y, -8, s->mb_height * 8);
813 srcY += src_y * s->linesize + src_x;
814 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
815 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
817 /* for grayscale we should not try to read from unknown area */
818 if(s->flags & CODEC_FLAG_GRAY) {
819 srcU = s->edge_emu_buffer + 18 * s->linesize;
820 srcV = s->edge_emu_buffer + 18 * s->linesize;
823 if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
824 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel*3
825 || (unsigned)(src_y - s->mspel) > s->v_edge_pos - (my&3) - 16 - s->mspel*3){
826 uint8_t *uvbuf= s->edge_emu_buffer + 19 * s->linesize;
828 srcY -= s->mspel * (1 + s->linesize);
829 ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 17+s->mspel*2, 17+s->mspel*2,
830 src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);
831 srcY = s->edge_emu_buffer;
832 ff_emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8+1, 8+1,
833 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
834 ff_emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8+1, 8+1,
835 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
838 /* if we deal with range reduction we need to scale source blocks */
844 for(j = 0; j < 17 + s->mspel*2; j++) {
845 for(i = 0; i < 17 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128;
848 src = srcU; src2 = srcV;
849 for(j = 0; j < 9; j++) {
850 for(i = 0; i < 9; i++) {
851 src[i] = ((src[i] - 128) >> 1) + 128;
852 src2[i] = ((src2[i] - 128) >> 1) + 128;
854 src += s->uvlinesize;
855 src2 += s->uvlinesize;
858 /* if we deal with intensity compensation we need to scale source blocks */
859 if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
864 for(j = 0; j < 17 + s->mspel*2; j++) {
865 for(i = 0; i < 17 + s->mspel*2; i++) src[i] = v->luty[src[i]];
868 src = srcU; src2 = srcV;
869 for(j = 0; j < 9; j++) {
870 for(i = 0; i < 9; i++) {
871 src[i] = v->lutuv[src[i]];
872 src2[i] = v->lutuv[src2[i]];
874 src += s->uvlinesize;
875 src2 += s->uvlinesize;
878 srcY += s->mspel * (1 + s->linesize);
882 uvmx = uvmx + ((uvmx<0)?(uvmx&1):-(uvmx&1));
883 uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1));
887 dxy = ((my & 3) << 2) | (mx & 3);
888 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] , srcY , s->linesize, v->rnd);
889 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8, srcY + 8, s->linesize, v->rnd);
890 srcY += s->linesize * 8;
891 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize , srcY , s->linesize, v->rnd);
892 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
893 } else { // hpel mc - always used for luma
894 dxy = (my & 2) | ((mx & 2) >> 1);
897 dsp->put_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
899 dsp->put_no_rnd_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
902 if(s->flags & CODEC_FLAG_GRAY) return;
903 /* Chroma MC always uses qpel bilinear */
904 uvdxy = ((uvmy & 3) << 2) | (uvmx & 3);
908 dsp->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
909 dsp->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
911 dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
912 dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
916 /** Do motion compensation for 4-MV macroblock - luminance block
918 static void vc1_mc_4mv_luma(VC1Context *v, int n)
920 MpegEncContext *s = &v->s;
921 DSPContext *dsp = &v->s.dsp;
923 int dxy, mx, my, src_x, src_y;
926 if(!v->s.last_picture.data[0])return;
929 srcY = s->last_picture.data[0];
931 off = s->linesize * 4 * (n&2) + (n&1) * 8;
933 src_x = s->mb_x * 16 + (n&1) * 8 + (mx >> 2);
934 src_y = s->mb_y * 16 + (n&2) * 4 + (my >> 2);
936 src_x = clip( src_x, -16, s->mb_width * 16);
937 src_y = clip( src_y, -16, s->mb_height * 16);
939 srcY += src_y * s->linesize + src_x;
941 if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
942 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 8 - s->mspel*2
943 || (unsigned)(src_y - s->mspel) > s->v_edge_pos - (my&3) - 8 - s->mspel*2){
944 srcY -= s->mspel * (1 + s->linesize);
945 ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 9+s->mspel*2, 9+s->mspel*2,
946 src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);
947 srcY = s->edge_emu_buffer;
948 /* if we deal with range reduction we need to scale source blocks */
954 for(j = 0; j < 9 + s->mspel*2; j++) {
955 for(i = 0; i < 9 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128;
959 /* if we deal with intensity compensation we need to scale source blocks */
960 if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
965 for(j = 0; j < 9 + s->mspel*2; j++) {
966 for(i = 0; i < 9 + s->mspel*2; i++) src[i] = v->luty[src[i]];
970 srcY += s->mspel * (1 + s->linesize);
974 dxy = ((my & 3) << 2) | (mx & 3);
975 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize, v->rnd);
976 } else { // hpel mc - always used for luma
977 dxy = (my & 2) | ((mx & 2) >> 1);
979 dsp->put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
981 dsp->put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
985 static inline int median4(int a, int b, int c, int d)
988 if(c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2;
989 else return (FFMIN(b, c) + FFMAX(a, d)) / 2;
991 if(c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2;
992 else return (FFMIN(a, c) + FFMAX(b, d)) / 2;
997 /** Do motion compensation for 4-MV macroblock - both chroma blocks
999 static void vc1_mc_4mv_chroma(VC1Context *v)
1001 MpegEncContext *s = &v->s;
1002 DSPContext *dsp = &v->s.dsp;
1003 uint8_t *srcU, *srcV;
1004 int uvdxy, uvmx, uvmy, uvsrc_x, uvsrc_y;
1005 int i, idx, tx = 0, ty = 0;
1006 int mvx[4], mvy[4], intra[4];
1007 static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
1009 if(!v->s.last_picture.data[0])return;
1010 if(s->flags & CODEC_FLAG_GRAY) return;
1012 for(i = 0; i < 4; i++) {
1013 mvx[i] = s->mv[0][i][0];
1014 mvy[i] = s->mv[0][i][1];
1015 intra[i] = v->mb_type[0][s->block_index[i]];
1018 /* calculate chroma MV vector from four luma MVs */
1019 idx = (intra[3] << 3) | (intra[2] << 2) | (intra[1] << 1) | intra[0];
1020 if(!idx) { // all blocks are inter
1021 tx = median4(mvx[0], mvx[1], mvx[2], mvx[3]);
1022 ty = median4(mvy[0], mvy[1], mvy[2], mvy[3]);
1023 } else if(count[idx] == 1) { // 3 inter blocks
1026 tx = mid_pred(mvx[1], mvx[2], mvx[3]);
1027 ty = mid_pred(mvy[1], mvy[2], mvy[3]);
1030 tx = mid_pred(mvx[0], mvx[2], mvx[3]);
1031 ty = mid_pred(mvy[0], mvy[2], mvy[3]);
1034 tx = mid_pred(mvx[0], mvx[1], mvx[3]);
1035 ty = mid_pred(mvy[0], mvy[1], mvy[3]);
1038 tx = mid_pred(mvx[0], mvx[1], mvx[2]);
1039 ty = mid_pred(mvy[0], mvy[1], mvy[2]);
1042 } else if(count[idx] == 2) {
1044 for(i=0; i<3;i++) if(!intra[i]) {t1 = i; break;}
1045 for(i= t1+1; i<4; i++)if(!intra[i]) {t2 = i; break;}
1046 tx = (mvx[t1] + mvx[t2]) / 2;
1047 ty = (mvy[t1] + mvy[t2]) / 2;
1049 return; //no need to do MC for inter blocks
1051 s->current_picture.motion_val[1][s->block_index[0]][0] = tx;
1052 s->current_picture.motion_val[1][s->block_index[0]][1] = ty;
1053 uvmx = (tx + ((tx&3) == 3)) >> 1;
1054 uvmy = (ty + ((ty&3) == 3)) >> 1;
1056 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
1057 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
1059 uvsrc_x = clip(uvsrc_x, -8, s->mb_width * 8);
1060 uvsrc_y = clip(uvsrc_y, -8, s->mb_height * 8);
1061 srcU = s->last_picture.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
1062 srcV = s->last_picture.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
1063 if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
1064 || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
1065 || (unsigned)uvsrc_y > (s->v_edge_pos >> 1) - 9){
1066 ff_emulated_edge_mc(s->edge_emu_buffer , srcU, s->uvlinesize, 8+1, 8+1,
1067 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
1068 ff_emulated_edge_mc(s->edge_emu_buffer + 16, srcV, s->uvlinesize, 8+1, 8+1,
1069 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
1070 srcU = s->edge_emu_buffer;
1071 srcV = s->edge_emu_buffer + 16;
1073 /* if we deal with range reduction we need to scale source blocks */
1074 if(v->rangeredfrm) {
1076 uint8_t *src, *src2;
1078 src = srcU; src2 = srcV;
1079 for(j = 0; j < 9; j++) {
1080 for(i = 0; i < 9; i++) {
1081 src[i] = ((src[i] - 128) >> 1) + 128;
1082 src2[i] = ((src2[i] - 128) >> 1) + 128;
1084 src += s->uvlinesize;
1085 src2 += s->uvlinesize;
1088 /* if we deal with intensity compensation we need to scale source blocks */
1089 if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1091 uint8_t *src, *src2;
1093 src = srcU; src2 = srcV;
1094 for(j = 0; j < 9; j++) {
1095 for(i = 0; i < 9; i++) {
1096 src[i] = v->lutuv[src[i]];
1097 src2[i] = v->lutuv[src2[i]];
1099 src += s->uvlinesize;
1100 src2 += s->uvlinesize;
1106 uvmx = uvmx + ((uvmx<0)?(uvmx&1):-(uvmx&1));
1107 uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1));
1110 /* Chroma MC always uses qpel bilinear */
1111 uvdxy = ((uvmy & 3) << 2) | (uvmx & 3);
1115 dsp->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
1116 dsp->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
1118 dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
1119 dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
1123 static int decode_sequence_header_adv(VC1Context *v, GetBitContext *gb);
1126 * Decode Simple/Main Profiles sequence header
1127 * @see Figure 7-8, p16-17
1128 * @param avctx Codec context
1129 * @param gb GetBit context initialized from Codec context extra_data
1132 static int decode_sequence_header(AVCodecContext *avctx, GetBitContext *gb)
1134 VC1Context *v = avctx->priv_data;
1136 av_log(avctx, AV_LOG_DEBUG, "Header: %0X\n", show_bits(gb, 32));
1137 v->profile = get_bits(gb, 2);
1138 if (v->profile == 2)
1140 av_log(avctx, AV_LOG_ERROR, "Profile value 2 is forbidden (and WMV3 Complex Profile is unsupported)\n");
1144 if (v->profile == PROFILE_ADVANCED)
1146 return decode_sequence_header_adv(v, gb);
1150 v->res_sm = get_bits(gb, 2); //reserved
1153 av_log(avctx, AV_LOG_ERROR,
1154 "Reserved RES_SM=%i is forbidden\n", v->res_sm);
1160 v->frmrtq_postproc = get_bits(gb, 3); //common
1161 // (bitrate-32kbps)/64kbps
1162 v->bitrtq_postproc = get_bits(gb, 5); //common
1163 v->s.loop_filter = get_bits(gb, 1); //common
1164 if(v->s.loop_filter == 1 && v->profile == PROFILE_SIMPLE)
1166 av_log(avctx, AV_LOG_ERROR,
1167 "LOOPFILTER shell not be enabled in simple profile\n");
1170 v->res_x8 = get_bits(gb, 1); //reserved
1173 av_log(avctx, AV_LOG_ERROR,
1174 "1 for reserved RES_X8 is forbidden\n");
1177 v->multires = get_bits(gb, 1);
1178 v->res_fasttx = get_bits(gb, 1);
1181 av_log(avctx, AV_LOG_ERROR,
1182 "0 for reserved RES_FASTTX is forbidden\n");
1186 v->fastuvmc = get_bits(gb, 1); //common
1187 if (!v->profile && !v->fastuvmc)
1189 av_log(avctx, AV_LOG_ERROR,
1190 "FASTUVMC unavailable in Simple Profile\n");
1193 v->extended_mv = get_bits(gb, 1); //common
1194 if (!v->profile && v->extended_mv)
1196 av_log(avctx, AV_LOG_ERROR,
1197 "Extended MVs unavailable in Simple Profile\n");
1200 v->dquant = get_bits(gb, 2); //common
1201 v->vstransform = get_bits(gb, 1); //common
1203 v->res_transtab = get_bits(gb, 1);
1204 if (v->res_transtab)
1206 av_log(avctx, AV_LOG_ERROR,
1207 "1 for reserved RES_TRANSTAB is forbidden\n");
1211 v->overlap = get_bits(gb, 1); //common
1213 v->s.resync_marker = get_bits(gb, 1);
1214 v->rangered = get_bits(gb, 1);
1215 if (v->rangered && v->profile == PROFILE_SIMPLE)
1217 av_log(avctx, AV_LOG_INFO,
1218 "RANGERED should be set to 0 in simple profile\n");
1221 v->s.max_b_frames = avctx->max_b_frames = get_bits(gb, 3); //common
1222 v->quantizer_mode = get_bits(gb, 2); //common
1224 v->finterpflag = get_bits(gb, 1); //common
1225 v->res_rtm_flag = get_bits(gb, 1); //reserved
1226 if (!v->res_rtm_flag)
1228 // av_log(avctx, AV_LOG_ERROR,
1229 // "0 for reserved RES_RTM_FLAG is forbidden\n");
1230 av_log(avctx, AV_LOG_ERROR,
1231 "Old WMV3 version detected, only I-frames will be decoded\n");
1234 av_log(avctx, AV_LOG_DEBUG,
1235 "Profile %i:\nfrmrtq_postproc=%i, bitrtq_postproc=%i\n"
1236 "LoopFilter=%i, MultiRes=%i, FastUVMC=%i, Extended MV=%i\n"
1237 "Rangered=%i, VSTransform=%i, Overlap=%i, SyncMarker=%i\n"
1238 "DQuant=%i, Quantizer mode=%i, Max B frames=%i\n",
1239 v->profile, v->frmrtq_postproc, v->bitrtq_postproc,
1240 v->s.loop_filter, v->multires, v->fastuvmc, v->extended_mv,
1241 v->rangered, v->vstransform, v->overlap, v->s.resync_marker,
1242 v->dquant, v->quantizer_mode, avctx->max_b_frames
1247 static int decode_sequence_header_adv(VC1Context *v, GetBitContext *gb)
1249 v->res_rtm_flag = 1;
1250 v->level = get_bits(gb, 3);
1253 av_log(v->s.avctx, AV_LOG_ERROR, "Reserved LEVEL %i\n",v->level);
1255 v->chromaformat = get_bits(gb, 2);
1256 if (v->chromaformat != 1)
1258 av_log(v->s.avctx, AV_LOG_ERROR,
1259 "Only 4:2:0 chroma format supported\n");
1264 v->frmrtq_postproc = get_bits(gb, 3); //common
1265 // (bitrate-32kbps)/64kbps
1266 v->bitrtq_postproc = get_bits(gb, 5); //common
1267 v->postprocflag = get_bits(gb, 1); //common
1269 v->s.avctx->coded_width = (get_bits(gb, 12) + 1) << 1;
1270 v->s.avctx->coded_height = (get_bits(gb, 12) + 1) << 1;
1271 v->broadcast = get_bits1(gb);
1272 v->interlace = get_bits1(gb);
1273 v->tfcntrflag = get_bits1(gb);
1274 v->finterpflag = get_bits1(gb);
1275 get_bits1(gb); // reserved
1276 v->psf = get_bits1(gb);
1277 if(v->psf) { //PsF, 6.1.13
1278 av_log(v->s.avctx, AV_LOG_ERROR, "Progressive Segmented Frame mode: not supported (yet)\n");
1281 if(get_bits1(gb)) { //Display Info - decoding is not affected by it
1283 av_log(v->s.avctx, AV_LOG_INFO, "Display extended info:\n");
1284 w = get_bits(gb, 14);
1285 h = get_bits(gb, 14);
1286 av_log(v->s.avctx, AV_LOG_INFO, "Display dimensions: %ix%i\n", w, h);
1287 //TODO: store aspect ratio in AVCodecContext
1289 ar = get_bits(gb, 4);
1291 w = get_bits(gb, 8);
1292 h = get_bits(gb, 8);
1295 if(get_bits1(gb)){ //framerate stuff
1305 v->color_prim = get_bits(gb, 8);
1306 v->transfer_char = get_bits(gb, 8);
1307 v->matrix_coef = get_bits(gb, 8);
1311 v->hrd_param_flag = get_bits1(gb);
1312 if(v->hrd_param_flag) {
1314 v->hrd_num_leaky_buckets = get_bits(gb, 5);
1315 get_bits(gb, 4); //bitrate exponent
1316 get_bits(gb, 4); //buffer size exponent
1317 for(i = 0; i < v->hrd_num_leaky_buckets; i++) {
1318 get_bits(gb, 16); //hrd_rate[n]
1319 get_bits(gb, 16); //hrd_buffer[n]
1325 static int decode_entry_point(AVCodecContext *avctx, GetBitContext *gb)
1327 VC1Context *v = avctx->priv_data;
1330 av_log(avctx, AV_LOG_DEBUG, "Entry point: %08X\n", show_bits_long(gb, 32));
1331 get_bits1(gb); // broken link
1332 avctx->max_b_frames = 1 - get_bits1(gb); // 'closed entry' also signalize possible B-frames
1333 v->panscanflag = get_bits1(gb);
1334 get_bits1(gb); // refdist flag
1335 v->s.loop_filter = get_bits1(gb);
1336 v->fastuvmc = get_bits1(gb);
1337 v->extended_mv = get_bits1(gb);
1338 v->dquant = get_bits(gb, 2);
1339 v->vstransform = get_bits1(gb);
1340 v->overlap = get_bits1(gb);
1341 v->quantizer_mode = get_bits(gb, 2);
1343 if(v->hrd_param_flag){
1344 for(i = 0; i < v->hrd_num_leaky_buckets; i++) {
1345 get_bits(gb, 8); //hrd_full[n]
1350 avctx->coded_width = (get_bits(gb, 12)+1)<<1;
1351 avctx->coded_height = (get_bits(gb, 12)+1)<<1;
1354 v->extended_dmv = get_bits1(gb);
1356 av_log(avctx, AV_LOG_ERROR, "Luma scaling is not supported, expect wrong picture\n");
1357 skip_bits(gb, 3); // Y range, ignored for now
1360 av_log(avctx, AV_LOG_ERROR, "Chroma scaling is not supported, expect wrong picture\n");
1361 skip_bits(gb, 3); // UV range, ignored for now
1367 static int vc1_parse_frame_header(VC1Context *v, GetBitContext* gb)
1369 int pqindex, lowquant, status;
1371 if(v->finterpflag) v->interpfrm = get_bits(gb, 1);
1372 skip_bits(gb, 2); //framecnt unused
1374 if (v->rangered) v->rangeredfrm = get_bits(gb, 1);
1375 v->s.pict_type = get_bits(gb, 1);
1376 if (v->s.avctx->max_b_frames) {
1377 if (!v->s.pict_type) {
1378 if (get_bits(gb, 1)) v->s.pict_type = I_TYPE;
1379 else v->s.pict_type = B_TYPE;
1380 } else v->s.pict_type = P_TYPE;
1381 } else v->s.pict_type = v->s.pict_type ? P_TYPE : I_TYPE;
1384 if(v->s.pict_type == B_TYPE) {
1385 v->bfraction = get_vlc2(gb, vc1_bfraction_vlc.table, VC1_BFRACTION_VLC_BITS, 1);
1386 v->bfraction = vc1_bfraction_lut[v->bfraction];
1387 if(v->bfraction == 0) {
1388 v->s.pict_type = BI_TYPE;
1391 if(v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
1392 get_bits(gb, 7); // skip buffer fullness
1395 if(v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
1397 if(v->s.pict_type == P_TYPE)
1400 /* Quantizer stuff */
1401 pqindex = get_bits(gb, 5);
1402 if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
1403 v->pq = pquant_table[0][pqindex];
1405 v->pq = pquant_table[1][pqindex];
1408 if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
1409 v->pquantizer = pqindex < 9;
1410 if (v->quantizer_mode == QUANT_NON_UNIFORM)
1412 v->pqindex = pqindex;
1413 if (pqindex < 9) v->halfpq = get_bits(gb, 1);
1415 if (v->quantizer_mode == QUANT_FRAME_EXPLICIT)
1416 v->pquantizer = get_bits(gb, 1);
1418 if (v->extended_mv == 1) v->mvrange = get_prefix(gb, 0, 3);
1419 v->k_x = v->mvrange + 9 + (v->mvrange >> 1); //k_x can be 9 10 12 13
1420 v->k_y = v->mvrange + 8; //k_y can be 8 9 10 11
1421 v->range_x = 1 << (v->k_x - 1);
1422 v->range_y = 1 << (v->k_y - 1);
1423 if (v->profile == PROFILE_ADVANCED)
1425 if (v->postprocflag) v->postproc = get_bits(gb, 1);
1428 if (v->multires && v->s.pict_type != B_TYPE) v->respic = get_bits(gb, 2);
1430 //av_log(v->s.avctx, AV_LOG_INFO, "%c Frame: QP=[%i]%i (+%i/2) %i\n",
1431 // (v->s.pict_type == P_TYPE) ? 'P' : ((v->s.pict_type == I_TYPE) ? 'I' : 'B'), pqindex, v->pq, v->halfpq, v->rangeredfrm);
1433 if(v->s.pict_type == I_TYPE || v->s.pict_type == P_TYPE) v->use_ic = 0;
1435 switch(v->s.pict_type) {
1437 if (v->pq < 5) v->tt_index = 0;
1438 else if(v->pq < 13) v->tt_index = 1;
1439 else v->tt_index = 2;
1441 lowquant = (v->pq > 12) ? 0 : 1;
1442 v->mv_mode = mv_pmode_table[lowquant][get_prefix(gb, 1, 4)];
1443 if (v->mv_mode == MV_PMODE_INTENSITY_COMP)
1445 int scale, shift, i;
1446 v->mv_mode2 = mv_pmode_table2[lowquant][get_prefix(gb, 1, 3)];
1447 v->lumscale = get_bits(gb, 6);
1448 v->lumshift = get_bits(gb, 6);
1450 /* fill lookup tables for intensity compensation */
1453 shift = (255 - v->lumshift * 2) << 6;
1454 if(v->lumshift > 31)
1457 scale = v->lumscale + 32;
1458 if(v->lumshift > 31)
1459 shift = (v->lumshift - 64) << 6;
1461 shift = v->lumshift << 6;
1463 for(i = 0; i < 256; i++) {
1464 v->luty[i] = clip_uint8((scale * i + shift + 32) >> 6);
1465 v->lutuv[i] = clip_uint8((scale * (i - 128) + 128*64 + 32) >> 6);
1468 if(v->mv_mode == MV_PMODE_1MV_HPEL || v->mv_mode == MV_PMODE_1MV_HPEL_BILIN)
1469 v->s.quarter_sample = 0;
1470 else if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1471 if(v->mv_mode2 == MV_PMODE_1MV_HPEL || v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN)
1472 v->s.quarter_sample = 0;
1474 v->s.quarter_sample = 1;
1476 v->s.quarter_sample = 1;
1477 v->s.mspel = !(v->mv_mode == MV_PMODE_1MV_HPEL_BILIN || (v->mv_mode == MV_PMODE_INTENSITY_COMP && v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN));
1479 if ((v->mv_mode == MV_PMODE_INTENSITY_COMP &&
1480 v->mv_mode2 == MV_PMODE_MIXED_MV)
1481 || v->mv_mode == MV_PMODE_MIXED_MV)
1483 status = bitplane_decoding(v->mv_type_mb_plane, &v->mv_type_is_raw, v);
1484 if (status < 0) return -1;
1485 av_log(v->s.avctx, AV_LOG_DEBUG, "MB MV Type plane encoding: "
1486 "Imode: %i, Invert: %i\n", status>>1, status&1);
1488 v->mv_type_is_raw = 0;
1489 memset(v->mv_type_mb_plane, 0, v->s.mb_stride * v->s.mb_height);
1491 status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
1492 if (status < 0) return -1;
1493 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
1494 "Imode: %i, Invert: %i\n", status>>1, status&1);
1496 /* Hopefully this is correct for P frames */
1497 v->s.mv_table_index = get_bits(gb, 2); //but using vc1_ tables
1498 v->cbpcy_vlc = &vc1_cbpcy_p_vlc[get_bits(gb, 2)];
1502 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1503 vop_dquant_decoding(v);
1506 v->ttfrm = 0; //FIXME Is that so ?
1509 v->ttmbf = get_bits(gb, 1);
1512 v->ttfrm = ttfrm_to_tt[get_bits(gb, 2)];
1520 if (v->pq < 5) v->tt_index = 0;
1521 else if(v->pq < 13) v->tt_index = 1;
1522 else v->tt_index = 2;
1524 lowquant = (v->pq > 12) ? 0 : 1;
1525 v->mv_mode = get_bits1(gb) ? MV_PMODE_1MV : MV_PMODE_1MV_HPEL_BILIN;
1526 v->s.quarter_sample = (v->mv_mode == MV_PMODE_1MV);
1527 v->s.mspel = v->s.quarter_sample;
1529 status = bitplane_decoding(v->direct_mb_plane, &v->dmb_is_raw, v);
1530 if (status < 0) return -1;
1531 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Direct Type plane encoding: "
1532 "Imode: %i, Invert: %i\n", status>>1, status&1);
1533 status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
1534 if (status < 0) return -1;
1535 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
1536 "Imode: %i, Invert: %i\n", status>>1, status&1);
1538 v->s.mv_table_index = get_bits(gb, 2);
1539 v->cbpcy_vlc = &vc1_cbpcy_p_vlc[get_bits(gb, 2)];
1543 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1544 vop_dquant_decoding(v);
1550 v->ttmbf = get_bits(gb, 1);
1553 v->ttfrm = ttfrm_to_tt[get_bits(gb, 2)];
1563 v->c_ac_table_index = decode012(gb);
1564 if (v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
1566 v->y_ac_table_index = decode012(gb);
1569 v->s.dc_table_index = get_bits(gb, 1);
1571 if(v->s.pict_type == BI_TYPE) {
1572 v->s.pict_type = B_TYPE;
1578 static int vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
1581 int pqindex, lowquant;
1584 v->p_frame_skipped = 0;
1587 fcm = decode012(gb);
1588 switch(get_prefix(gb, 0, 4)) {
1590 v->s.pict_type = P_TYPE;
1593 v->s.pict_type = B_TYPE;
1597 v->s.pict_type = I_TYPE;
1600 v->s.pict_type = BI_TYPE;
1603 v->s.pict_type = P_TYPE; // skipped pic
1604 v->p_frame_skipped = 1;
1610 if(!v->interlace || v->panscanflag) {
1617 if(v->panscanflag) {
1620 v->rnd = get_bits1(gb);
1622 v->uvsamp = get_bits1(gb);
1623 if(v->finterpflag) v->interpfrm = get_bits(gb, 1);
1624 pqindex = get_bits(gb, 5);
1625 v->pqindex = pqindex;
1626 if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
1627 v->pq = pquant_table[0][pqindex];
1629 v->pq = pquant_table[1][pqindex];
1632 if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
1633 v->pquantizer = pqindex < 9;
1634 if (v->quantizer_mode == QUANT_NON_UNIFORM)
1636 v->pqindex = pqindex;
1637 if (pqindex < 9) v->halfpq = get_bits(gb, 1);
1639 if (v->quantizer_mode == QUANT_FRAME_EXPLICIT)
1640 v->pquantizer = get_bits(gb, 1);
1642 switch(v->s.pict_type) {
1645 status = bitplane_decoding(v->acpred_plane, &v->acpred_is_raw, v);
1646 if (status < 0) return -1;
1647 av_log(v->s.avctx, AV_LOG_DEBUG, "ACPRED plane encoding: "
1648 "Imode: %i, Invert: %i\n", status>>1, status&1);
1649 v->condover = CONDOVER_NONE;
1650 if(v->overlap && v->pq <= 8) {
1651 v->condover = decode012(gb);
1652 if(v->condover == CONDOVER_SELECT) {
1653 status = bitplane_decoding(v->over_flags_plane, &v->overflg_is_raw, v);
1654 if (status < 0) return -1;
1655 av_log(v->s.avctx, AV_LOG_DEBUG, "CONDOVER plane encoding: "
1656 "Imode: %i, Invert: %i\n", status>>1, status&1);
1662 v->postproc = get_bits1(gb);
1663 if (v->extended_mv) v->mvrange = get_prefix(gb, 0, 3);
1664 else v->mvrange = 0;
1665 v->k_x = v->mvrange + 9 + (v->mvrange >> 1); //k_x can be 9 10 12 13
1666 v->k_y = v->mvrange + 8; //k_y can be 8 9 10 11
1667 v->range_x = 1 << (v->k_x - 1);
1668 v->range_y = 1 << (v->k_y - 1);
1670 if (v->pq < 5) v->tt_index = 0;
1671 else if(v->pq < 13) v->tt_index = 1;
1672 else v->tt_index = 2;
1674 lowquant = (v->pq > 12) ? 0 : 1;
1675 v->mv_mode = mv_pmode_table[lowquant][get_prefix(gb, 1, 4)];
1676 if (v->mv_mode == MV_PMODE_INTENSITY_COMP)
1678 int scale, shift, i;
1679 v->mv_mode2 = mv_pmode_table2[lowquant][get_prefix(gb, 1, 3)];
1680 v->lumscale = get_bits(gb, 6);
1681 v->lumshift = get_bits(gb, 6);
1682 /* fill lookup tables for intensity compensation */
1685 shift = (255 - v->lumshift * 2) << 6;
1686 if(v->lumshift > 31)
1689 scale = v->lumscale + 32;
1690 if(v->lumshift > 31)
1691 shift = (v->lumshift - 64) << 6;
1693 shift = v->lumshift << 6;
1695 for(i = 0; i < 256; i++) {
1696 v->luty[i] = clip_uint8((scale * i + shift + 32) >> 6);
1697 v->lutuv[i] = clip_uint8((scale * (i - 128) + 128*64 + 32) >> 6);
1700 if(v->mv_mode == MV_PMODE_1MV_HPEL || v->mv_mode == MV_PMODE_1MV_HPEL_BILIN)
1701 v->s.quarter_sample = 0;
1702 else if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1703 if(v->mv_mode2 == MV_PMODE_1MV_HPEL || v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN)
1704 v->s.quarter_sample = 0;
1706 v->s.quarter_sample = 1;
1708 v->s.quarter_sample = 1;
1709 v->s.mspel = !(v->mv_mode == MV_PMODE_1MV_HPEL_BILIN || (v->mv_mode == MV_PMODE_INTENSITY_COMP && v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN));
1711 if ((v->mv_mode == MV_PMODE_INTENSITY_COMP &&
1712 v->mv_mode2 == MV_PMODE_MIXED_MV)
1713 || v->mv_mode == MV_PMODE_MIXED_MV)
1715 status = bitplane_decoding(v->mv_type_mb_plane, &v->mv_type_is_raw, v);
1716 if (status < 0) return -1;
1717 av_log(v->s.avctx, AV_LOG_DEBUG, "MB MV Type plane encoding: "
1718 "Imode: %i, Invert: %i\n", status>>1, status&1);
1720 v->mv_type_is_raw = 0;
1721 memset(v->mv_type_mb_plane, 0, v->s.mb_stride * v->s.mb_height);
1723 status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
1724 if (status < 0) return -1;
1725 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
1726 "Imode: %i, Invert: %i\n", status>>1, status&1);
1728 /* Hopefully this is correct for P frames */
1729 v->s.mv_table_index = get_bits(gb, 2); //but using vc1_ tables
1730 v->cbpcy_vlc = &vc1_cbpcy_p_vlc[get_bits(gb, 2)];
1733 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1734 vop_dquant_decoding(v);
1737 v->ttfrm = 0; //FIXME Is that so ?
1740 v->ttmbf = get_bits(gb, 1);
1743 v->ttfrm = ttfrm_to_tt[get_bits(gb, 2)];
1753 v->c_ac_table_index = decode012(gb);
1754 if (v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
1756 v->y_ac_table_index = decode012(gb);
1759 v->s.dc_table_index = get_bits(gb, 1);
1760 if (v->s.pict_type == I_TYPE && v->dquant) {
1761 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1762 vop_dquant_decoding(v);
1766 if(v->s.pict_type == BI_TYPE) {
1767 v->s.pict_type = B_TYPE;
1773 /***********************************************************************/
1775 * @defgroup block VC-1 Block-level functions
1776 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
1782 * @brief Get macroblock-level quantizer scale
1784 #define GET_MQUANT() \
1788 if (v->dqprofile == DQPROFILE_ALL_MBS) \
1792 mquant = (get_bits(gb, 1)) ? v->altpq : v->pq; \
1796 mqdiff = get_bits(gb, 3); \
1797 if (mqdiff != 7) mquant = v->pq + mqdiff; \
1798 else mquant = get_bits(gb, 5); \
1801 if(v->dqprofile == DQPROFILE_SINGLE_EDGE) \
1802 edges = 1 << v->dqsbedge; \
1803 else if(v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
1804 edges = (3 << v->dqsbedge) % 15; \
1805 else if(v->dqprofile == DQPROFILE_FOUR_EDGES) \
1807 if((edges&1) && !s->mb_x) \
1808 mquant = v->altpq; \
1809 if((edges&2) && s->first_slice_line) \
1810 mquant = v->altpq; \
1811 if((edges&4) && s->mb_x == (s->mb_width - 1)) \
1812 mquant = v->altpq; \
1813 if((edges&8) && s->mb_y == (s->mb_height - 1)) \
1814 mquant = v->altpq; \
1818 * @def GET_MVDATA(_dmv_x, _dmv_y)
1819 * @brief Get MV differentials
1820 * @see MVDATA decoding from 8.3.5.2, p(1)20
1821 * @param _dmv_x Horizontal differential for decoded MV
1822 * @param _dmv_y Vertical differential for decoded MV
1824 #define GET_MVDATA(_dmv_x, _dmv_y) \
1825 index = 1 + get_vlc2(gb, vc1_mv_diff_vlc[s->mv_table_index].table,\
1826 VC1_MV_DIFF_VLC_BITS, 2); \
1829 mb_has_coeffs = 1; \
1832 else mb_has_coeffs = 0; \
1834 if (!index) { _dmv_x = _dmv_y = 0; } \
1835 else if (index == 35) \
1837 _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
1838 _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
1840 else if (index == 36) \
1849 if (!s->quarter_sample && index1 == 5) val = 1; \
1851 if(size_table[index1] - val > 0) \
1852 val = get_bits(gb, size_table[index1] - val); \
1854 sign = 0 - (val&1); \
1855 _dmv_x = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1858 if (!s->quarter_sample && index1 == 5) val = 1; \
1860 if(size_table[index1] - val > 0) \
1861 val = get_bits(gb, size_table[index1] - val); \
1863 sign = 0 - (val&1); \
1864 _dmv_y = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1867 /** Predict and set motion vector
1869 static inline void vc1_pred_mv(MpegEncContext *s, int n, int dmv_x, int dmv_y, int mv1, int r_x, int r_y, uint8_t* is_intra)
1871 int xy, wrap, off = 0;
1876 /* scale MV difference to be quad-pel */
1877 dmv_x <<= 1 - s->quarter_sample;
1878 dmv_y <<= 1 - s->quarter_sample;
1880 wrap = s->b8_stride;
1881 xy = s->block_index[n];
1884 s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = 0;
1885 s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = 0;
1886 if(mv1) { /* duplicate motion data for 1-MV block */
1887 s->current_picture.motion_val[0][xy + 1][0] = 0;
1888 s->current_picture.motion_val[0][xy + 1][1] = 0;
1889 s->current_picture.motion_val[0][xy + wrap][0] = 0;
1890 s->current_picture.motion_val[0][xy + wrap][1] = 0;
1891 s->current_picture.motion_val[0][xy + wrap + 1][0] = 0;
1892 s->current_picture.motion_val[0][xy + wrap + 1][1] = 0;
1897 C = s->current_picture.motion_val[0][xy - 1];
1898 A = s->current_picture.motion_val[0][xy - wrap];
1900 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2;
1902 //in 4-MV mode different blocks have different B predictor position
1905 off = (s->mb_x > 0) ? -1 : 1;
1908 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 1;
1917 B = s->current_picture.motion_val[0][xy - wrap + off];
1919 if(!s->first_slice_line || (n==2 || n==3)) { // predictor A is not out of bounds
1920 if(s->mb_width == 1) {
1924 px = mid_pred(A[0], B[0], C[0]);
1925 py = mid_pred(A[1], B[1], C[1]);
1927 } else if(s->mb_x || (n==1 || n==3)) { // predictor C is not out of bounds
1933 /* Pullback MV as specified in 8.3.5.3.4 */
1936 qx = (s->mb_x << 6) + ((n==1 || n==3) ? 32 : 0);
1937 qy = (s->mb_y << 6) + ((n==2 || n==3) ? 32 : 0);
1938 X = (s->mb_width << 6) - 4;
1939 Y = (s->mb_height << 6) - 4;
1941 if(qx + px < -60) px = -60 - qx;
1942 if(qy + py < -60) py = -60 - qy;
1944 if(qx + px < -28) px = -28 - qx;
1945 if(qy + py < -28) py = -28 - qy;
1947 if(qx + px > X) px = X - qx;
1948 if(qy + py > Y) py = Y - qy;
1950 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
1951 if((!s->first_slice_line || (n==2 || n==3)) && (s->mb_x || (n==1 || n==3))) {
1952 if(is_intra[xy - wrap])
1953 sum = FFABS(px) + FFABS(py);
1955 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
1957 if(get_bits1(&s->gb)) {
1965 if(is_intra[xy - 1])
1966 sum = FFABS(px) + FFABS(py);
1968 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
1970 if(get_bits1(&s->gb)) {
1980 /* store MV using signed modulus of MV range defined in 4.11 */
1981 s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1982 s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
1983 if(mv1) { /* duplicate motion data for 1-MV block */
1984 s->current_picture.motion_val[0][xy + 1][0] = s->current_picture.motion_val[0][xy][0];
1985 s->current_picture.motion_val[0][xy + 1][1] = s->current_picture.motion_val[0][xy][1];
1986 s->current_picture.motion_val[0][xy + wrap][0] = s->current_picture.motion_val[0][xy][0];
1987 s->current_picture.motion_val[0][xy + wrap][1] = s->current_picture.motion_val[0][xy][1];
1988 s->current_picture.motion_val[0][xy + wrap + 1][0] = s->current_picture.motion_val[0][xy][0];
1989 s->current_picture.motion_val[0][xy + wrap + 1][1] = s->current_picture.motion_val[0][xy][1];
1993 /** Motion compensation for direct or interpolated blocks in B-frames
1995 static void vc1_interp_mc(VC1Context *v)
1997 MpegEncContext *s = &v->s;
1998 DSPContext *dsp = &v->s.dsp;
1999 uint8_t *srcY, *srcU, *srcV;
2000 int dxy, uvdxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
2002 if(!v->s.next_picture.data[0])return;
2004 mx = s->mv[1][0][0];
2005 my = s->mv[1][0][1];
2006 uvmx = (mx + ((mx & 3) == 3)) >> 1;
2007 uvmy = (my + ((my & 3) == 3)) >> 1;
2008 srcY = s->next_picture.data[0];
2009 srcU = s->next_picture.data[1];
2010 srcV = s->next_picture.data[2];
2012 src_x = s->mb_x * 16 + (mx >> 2);
2013 src_y = s->mb_y * 16 + (my >> 2);
2014 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
2015 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
2017 src_x = clip( src_x, -16, s->mb_width * 16);
2018 src_y = clip( src_y, -16, s->mb_height * 16);
2019 uvsrc_x = clip(uvsrc_x, -8, s->mb_width * 8);
2020 uvsrc_y = clip(uvsrc_y, -8, s->mb_height * 8);
2022 srcY += src_y * s->linesize + src_x;
2023 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
2024 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
2026 /* for grayscale we should not try to read from unknown area */
2027 if(s->flags & CODEC_FLAG_GRAY) {
2028 srcU = s->edge_emu_buffer + 18 * s->linesize;
2029 srcV = s->edge_emu_buffer + 18 * s->linesize;
2033 || (unsigned)src_x > s->h_edge_pos - (mx&3) - 16
2034 || (unsigned)src_y > s->v_edge_pos - (my&3) - 16){
2035 uint8_t *uvbuf= s->edge_emu_buffer + 19 * s->linesize;
2037 srcY -= s->mspel * (1 + s->linesize);
2038 ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 17+s->mspel*2, 17+s->mspel*2,
2039 src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);
2040 srcY = s->edge_emu_buffer;
2041 ff_emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8+1, 8+1,
2042 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
2043 ff_emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8+1, 8+1,
2044 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
2047 /* if we deal with range reduction we need to scale source blocks */
2048 if(v->rangeredfrm) {
2050 uint8_t *src, *src2;
2053 for(j = 0; j < 17 + s->mspel*2; j++) {
2054 for(i = 0; i < 17 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128;
2057 src = srcU; src2 = srcV;
2058 for(j = 0; j < 9; j++) {
2059 for(i = 0; i < 9; i++) {
2060 src[i] = ((src[i] - 128) >> 1) + 128;
2061 src2[i] = ((src2[i] - 128) >> 1) + 128;
2063 src += s->uvlinesize;
2064 src2 += s->uvlinesize;
2067 srcY += s->mspel * (1 + s->linesize);
2071 uvmx = uvmx + ((uvmx<0)?(uvmx&1):-(uvmx&1));
2072 uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1));
2077 dxy = ((my & 1) << 1) | (mx & 1);
2079 dsp->avg_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
2081 if(s->flags & CODEC_FLAG_GRAY) return;
2082 /* Chroma MC always uses qpel blilinear */
2083 uvdxy = ((uvmy & 3) << 2) | (uvmx & 3);
2086 dsp->avg_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
2087 dsp->avg_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
2090 static always_inline int scale_mv(int value, int bfrac, int inv, int qs)
2094 #if B_FRACTION_DEN==256
2098 return 2 * ((value * n + 255) >> 9);
2099 return (value * n + 128) >> 8;
2102 n -= B_FRACTION_DEN;
2104 return 2 * ((value * n + B_FRACTION_DEN - 1) / (2 * B_FRACTION_DEN));
2105 return (value * n + B_FRACTION_DEN/2) / B_FRACTION_DEN;
2109 /** Reconstruct motion vector for B-frame and do motion compensation
2111 static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mode)
2114 v->mv_mode2 = v->mv_mode;
2115 v->mv_mode = MV_PMODE_INTENSITY_COMP;
2120 if(v->use_ic) v->mv_mode = v->mv_mode2;
2123 if(mode == BMV_TYPE_INTERPOLATED) {
2126 if(v->use_ic) v->mv_mode = v->mv_mode2;
2130 if(v->use_ic && (mode == BMV_TYPE_BACKWARD)) v->mv_mode = v->mv_mode2;
2131 vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD));
2132 if(v->use_ic) v->mv_mode = v->mv_mode2;
2135 static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mvtype)
2137 MpegEncContext *s = &v->s;
2138 int xy, wrap, off = 0;
2143 const uint8_t *is_intra = v->mb_type[0];
2147 /* scale MV difference to be quad-pel */
2148 dmv_x[0] <<= 1 - s->quarter_sample;
2149 dmv_y[0] <<= 1 - s->quarter_sample;
2150 dmv_x[1] <<= 1 - s->quarter_sample;
2151 dmv_y[1] <<= 1 - s->quarter_sample;
2153 wrap = s->b8_stride;
2154 xy = s->block_index[0];
2157 s->current_picture.motion_val[0][xy][0] =
2158 s->current_picture.motion_val[0][xy][1] =
2159 s->current_picture.motion_val[1][xy][0] =
2160 s->current_picture.motion_val[1][xy][1] = 0;
2163 s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
2164 s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
2165 s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
2166 s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
2168 s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
2169 s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
2170 s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
2171 s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
2175 if((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2176 C = s->current_picture.motion_val[0][xy - 2];
2177 A = s->current_picture.motion_val[0][xy - wrap*2];
2178 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2179 B = s->current_picture.motion_val[0][xy - wrap*2 + off];
2181 if(!s->first_slice_line) { // predictor A is not out of bounds
2182 if(s->mb_width == 1) {
2186 px = mid_pred(A[0], B[0], C[0]);
2187 py = mid_pred(A[1], B[1], C[1]);
2189 } else if(s->mb_x) { // predictor C is not out of bounds
2195 /* Pullback MV as specified in 8.3.5.3.4 */
2198 if(v->profile < PROFILE_ADVANCED) {
2199 qx = (s->mb_x << 5);
2200 qy = (s->mb_y << 5);
2201 X = (s->mb_width << 5) - 4;
2202 Y = (s->mb_height << 5) - 4;
2203 if(qx + px < -28) px = -28 - qx;
2204 if(qy + py < -28) py = -28 - qy;
2205 if(qx + px > X) px = X - qx;
2206 if(qy + py > Y) py = Y - qy;
2208 qx = (s->mb_x << 6);
2209 qy = (s->mb_y << 6);
2210 X = (s->mb_width << 6) - 4;
2211 Y = (s->mb_height << 6) - 4;
2212 if(qx + px < -60) px = -60 - qx;
2213 if(qy + py < -60) py = -60 - qy;
2214 if(qx + px > X) px = X - qx;
2215 if(qy + py > Y) py = Y - qy;
2218 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2219 if(0 && !s->first_slice_line && s->mb_x) {
2220 if(is_intra[xy - wrap])
2221 sum = FFABS(px) + FFABS(py);
2223 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2225 if(get_bits1(&s->gb)) {
2233 if(is_intra[xy - 2])
2234 sum = FFABS(px) + FFABS(py);
2236 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2238 if(get_bits1(&s->gb)) {
2248 /* store MV using signed modulus of MV range defined in 4.11 */
2249 s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x;
2250 s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
2252 if((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2253 C = s->current_picture.motion_val[1][xy - 2];
2254 A = s->current_picture.motion_val[1][xy - wrap*2];
2255 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2256 B = s->current_picture.motion_val[1][xy - wrap*2 + off];
2258 if(!s->first_slice_line) { // predictor A is not out of bounds
2259 if(s->mb_width == 1) {
2263 px = mid_pred(A[0], B[0], C[0]);
2264 py = mid_pred(A[1], B[1], C[1]);
2266 } else if(s->mb_x) { // predictor C is not out of bounds
2272 /* Pullback MV as specified in 8.3.5.3.4 */
2275 if(v->profile < PROFILE_ADVANCED) {
2276 qx = (s->mb_x << 5);
2277 qy = (s->mb_y << 5);
2278 X = (s->mb_width << 5) - 4;
2279 Y = (s->mb_height << 5) - 4;
2280 if(qx + px < -28) px = -28 - qx;
2281 if(qy + py < -28) py = -28 - qy;
2282 if(qx + px > X) px = X - qx;
2283 if(qy + py > Y) py = Y - qy;
2285 qx = (s->mb_x << 6);
2286 qy = (s->mb_y << 6);
2287 X = (s->mb_width << 6) - 4;
2288 Y = (s->mb_height << 6) - 4;
2289 if(qx + px < -60) px = -60 - qx;
2290 if(qy + py < -60) py = -60 - qy;
2291 if(qx + px > X) px = X - qx;
2292 if(qy + py > Y) py = Y - qy;
2295 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2296 if(0 && !s->first_slice_line && s->mb_x) {
2297 if(is_intra[xy - wrap])
2298 sum = FFABS(px) + FFABS(py);
2300 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2302 if(get_bits1(&s->gb)) {
2310 if(is_intra[xy - 2])
2311 sum = FFABS(px) + FFABS(py);
2313 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2315 if(get_bits1(&s->gb)) {
2325 /* store MV using signed modulus of MV range defined in 4.11 */
2327 s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
2328 s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
2330 s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
2331 s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
2332 s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
2333 s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
2336 /** Get predicted DC value for I-frames only
2337 * prediction dir: left=0, top=1
2338 * @param s MpegEncContext
2339 * @param[in] n block index in the current MB
2340 * @param dc_val_ptr Pointer to DC predictor
2341 * @param dir_ptr Prediction direction for use in AC prediction
2343 static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2344 int16_t **dc_val_ptr, int *dir_ptr)
2346 int a, b, c, wrap, pred, scale;
2348 static const uint16_t dcpred[32] = {
2349 -1, 1024, 512, 341, 256, 205, 171, 146, 128,
2350 114, 102, 93, 85, 79, 73, 68, 64,
2351 60, 57, 54, 51, 49, 47, 45, 43,
2352 41, 39, 38, 37, 35, 34, 33
2355 /* find prediction - wmv3_dc_scale always used here in fact */
2356 if (n < 4) scale = s->y_dc_scale;
2357 else scale = s->c_dc_scale;
2359 wrap = s->block_wrap[n];
2360 dc_val= s->dc_val[0] + s->block_index[n];
2366 b = dc_val[ - 1 - wrap];
2367 a = dc_val[ - wrap];
2369 if (pq < 9 || !overlap)
2371 /* Set outer values */
2372 if (s->first_slice_line && (n!=2 && n!=3)) b=a=dcpred[scale];
2373 if (s->mb_x == 0 && (n!=1 && n!=3)) b=c=dcpred[scale];
2377 /* Set outer values */
2378 if (s->first_slice_line && (n!=2 && n!=3)) b=a=0;
2379 if (s->mb_x == 0 && (n!=1 && n!=3)) b=c=0;
2382 if (abs(a - b) <= abs(b - c)) {
2390 /* update predictor */
2391 *dc_val_ptr = &dc_val[0];
2396 /** Get predicted DC value
2397 * prediction dir: left=0, top=1
2398 * @param s MpegEncContext
2399 * @param[in] n block index in the current MB
2400 * @param dc_val_ptr Pointer to DC predictor
2401 * @param dir_ptr Prediction direction for use in AC prediction
2403 static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2404 int a_avail, int c_avail,
2405 int16_t **dc_val_ptr, int *dir_ptr)
2407 int a, b, c, wrap, pred, scale;
2409 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2412 /* find prediction - wmv3_dc_scale always used here in fact */
2413 if (n < 4) scale = s->y_dc_scale;
2414 else scale = s->c_dc_scale;
2416 wrap = s->block_wrap[n];
2417 dc_val= s->dc_val[0] + s->block_index[n];
2423 b = dc_val[ - 1 - wrap];
2424 a = dc_val[ - wrap];
2425 /* scale predictors if needed */
2426 q1 = s->current_picture.qscale_table[mb_pos];
2427 if(c_avail && (n!= 1 && n!=3)) {
2428 q2 = s->current_picture.qscale_table[mb_pos - 1];
2430 c = (c * s->y_dc_scale_table[q2] * vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2432 if(a_avail && (n!= 2 && n!=3)) {
2433 q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2435 a = (a * s->y_dc_scale_table[q2] * vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2437 if(a_avail && c_avail && (n!=3)) {
2440 if(n != 2) off -= s->mb_stride;
2441 q2 = s->current_picture.qscale_table[off];
2443 b = (b * s->y_dc_scale_table[q2] * vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2446 if(a_avail && c_avail) {
2447 if(abs(a - b) <= abs(b - c)) {
2454 } else if(a_avail) {
2457 } else if(c_avail) {
2465 /* update predictor */
2466 *dc_val_ptr = &dc_val[0];
2472 * @defgroup std_mb VC1 Macroblock-level functions in Simple/Main Profiles
2473 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
2477 static inline int vc1_coded_block_pred(MpegEncContext * s, int n, uint8_t **coded_block_ptr)
2479 int xy, wrap, pred, a, b, c;
2481 xy = s->block_index[n];
2482 wrap = s->b8_stride;
2487 a = s->coded_block[xy - 1 ];
2488 b = s->coded_block[xy - 1 - wrap];
2489 c = s->coded_block[xy - wrap];
2498 *coded_block_ptr = &s->coded_block[xy];
2504 * Decode one AC coefficient
2505 * @param v The VC1 context
2506 * @param last Last coefficient
2507 * @param skip How much zero coefficients to skip
2508 * @param value Decoded AC coefficient value
2511 static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip, int *value, int codingset)
2513 GetBitContext *gb = &v->s.gb;
2514 int index, escape, run = 0, level = 0, lst = 0;
2516 index = get_vlc2(gb, vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2517 if (index != vc1_ac_sizes[codingset] - 1) {
2518 run = vc1_index_decode_table[codingset][index][0];
2519 level = vc1_index_decode_table[codingset][index][1];
2520 lst = index >= vc1_last_decode_table[codingset];
2524 escape = decode210(gb);
2526 index = get_vlc2(gb, vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2527 run = vc1_index_decode_table[codingset][index][0];
2528 level = vc1_index_decode_table[codingset][index][1];
2529 lst = index >= vc1_last_decode_table[codingset];
2532 level += vc1_last_delta_level_table[codingset][run];
2534 level += vc1_delta_level_table[codingset][run];
2537 run += vc1_last_delta_run_table[codingset][level] + 1;
2539 run += vc1_delta_run_table[codingset][level] + 1;
2545 lst = get_bits(gb, 1);
2546 if(v->s.esc3_level_length == 0) {
2547 if(v->pq < 8 || v->dquantfrm) { // table 59
2548 v->s.esc3_level_length = get_bits(gb, 3);
2549 if(!v->s.esc3_level_length)
2550 v->s.esc3_level_length = get_bits(gb, 2) + 8;
2552 v->s.esc3_level_length = get_prefix(gb, 1, 6) + 2;
2554 v->s.esc3_run_length = 3 + get_bits(gb, 2);
2556 run = get_bits(gb, v->s.esc3_run_length);
2557 sign = get_bits(gb, 1);
2558 level = get_bits(gb, v->s.esc3_level_length);
2569 /** Decode intra block in intra frames - should be faster than decode_intra_block
2570 * @param v VC1Context
2571 * @param block block to decode
2572 * @param coded are AC coeffs present or not
2573 * @param codingset set of VLC to decode data
2575 static int vc1_decode_i_block(VC1Context *v, DCTELEM block[64], int n, int coded, int codingset)
2577 GetBitContext *gb = &v->s.gb;
2578 MpegEncContext *s = &v->s;
2579 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2582 int16_t *ac_val, *ac_val2;
2585 /* Get DC differential */
2587 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2589 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2592 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2597 if (dcdiff == 119 /* ESC index value */)
2599 /* TODO: Optimize */
2600 if (v->pq == 1) dcdiff = get_bits(gb, 10);
2601 else if (v->pq == 2) dcdiff = get_bits(gb, 9);
2602 else dcdiff = get_bits(gb, 8);
2607 dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3;
2608 else if (v->pq == 2)
2609 dcdiff = (dcdiff<<1) + get_bits(gb, 1) - 1;
2611 if (get_bits(gb, 1))
2616 dcdiff += vc1_i_pred_dc(&v->s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir);
2619 /* Store the quantized DC coeff, used for prediction */
2621 block[0] = dcdiff * s->y_dc_scale;
2623 block[0] = dcdiff * s->c_dc_scale;
2636 int last = 0, skip, value;
2637 const int8_t *zz_table;
2641 scale = v->pq * 2 + v->halfpq;
2645 zz_table = vc1_horizontal_zz;
2647 zz_table = vc1_vertical_zz;
2649 zz_table = vc1_normal_zz;
2651 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2653 if(dc_pred_dir) //left
2656 ac_val -= 16 * s->block_wrap[n];
2659 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2663 block[zz_table[i++]] = value;
2666 /* apply AC prediction if needed */
2668 if(dc_pred_dir) { //left
2669 for(k = 1; k < 8; k++)
2670 block[k << 3] += ac_val[k];
2672 for(k = 1; k < 8; k++)
2673 block[k] += ac_val[k + 8];
2676 /* save AC coeffs for further prediction */
2677 for(k = 1; k < 8; k++) {
2678 ac_val2[k] = block[k << 3];
2679 ac_val2[k + 8] = block[k];
2682 /* scale AC coeffs */
2683 for(k = 1; k < 64; k++)
2687 block[k] += (block[k] < 0) ? -v->pq : v->pq;
2690 if(s->ac_pred) i = 63;
2696 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2699 scale = v->pq * 2 + v->halfpq;
2700 memset(ac_val2, 0, 16 * 2);
2701 if(dc_pred_dir) {//left
2704 memcpy(ac_val2, ac_val, 8 * 2);
2706 ac_val -= 16 * s->block_wrap[n];
2708 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2711 /* apply AC prediction if needed */
2713 if(dc_pred_dir) { //left
2714 for(k = 1; k < 8; k++) {
2715 block[k << 3] = ac_val[k] * scale;
2716 if(!v->pquantizer && block[k << 3])
2717 block[k << 3] += (block[k << 3] < 0) ? -v->pq : v->pq;
2720 for(k = 1; k < 8; k++) {
2721 block[k] = ac_val[k + 8] * scale;
2722 if(!v->pquantizer && block[k])
2723 block[k] += (block[k] < 0) ? -v->pq : v->pq;
2729 s->block_last_index[n] = i;
2734 /** Decode intra block in intra frames - should be faster than decode_intra_block
2735 * @param v VC1Context
2736 * @param block block to decode
2737 * @param coded are AC coeffs present or not
2738 * @param codingset set of VLC to decode data
2740 static int vc1_decode_i_block_adv(VC1Context *v, DCTELEM block[64], int n, int coded, int codingset, int mquant)
2742 GetBitContext *gb = &v->s.gb;
2743 MpegEncContext *s = &v->s;
2744 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2747 int16_t *ac_val, *ac_val2;
2749 int a_avail = v->a_avail, c_avail = v->c_avail;
2750 int use_pred = s->ac_pred;
2753 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2755 /* Get DC differential */
2757 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2759 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2762 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2767 if (dcdiff == 119 /* ESC index value */)
2769 /* TODO: Optimize */
2770 if (mquant == 1) dcdiff = get_bits(gb, 10);
2771 else if (mquant == 2) dcdiff = get_bits(gb, 9);
2772 else dcdiff = get_bits(gb, 8);
2777 dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3;
2778 else if (mquant == 2)
2779 dcdiff = (dcdiff<<1) + get_bits(gb, 1) - 1;
2781 if (get_bits(gb, 1))
2786 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir);
2789 /* Store the quantized DC coeff, used for prediction */
2791 block[0] = dcdiff * s->y_dc_scale;
2793 block[0] = dcdiff * s->c_dc_scale;
2802 /* check if AC is needed at all and adjust direction if needed */
2803 if(!a_avail) dc_pred_dir = 1;
2804 if(!c_avail) dc_pred_dir = 0;
2805 if(!a_avail && !c_avail) use_pred = 0;
2806 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2809 scale = mquant * 2 + v->halfpq;
2811 if(dc_pred_dir) //left
2814 ac_val -= 16 * s->block_wrap[n];
2816 q1 = s->current_picture.qscale_table[mb_pos];
2817 if(dc_pred_dir && c_avail) q2 = s->current_picture.qscale_table[mb_pos - 1];
2818 if(!dc_pred_dir && a_avail) q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2819 if(n && n<4) q2 = q1;
2822 int last = 0, skip, value;
2823 const int8_t *zz_table;
2828 zz_table = vc1_horizontal_zz;
2830 zz_table = vc1_vertical_zz;
2832 zz_table = vc1_normal_zz;
2835 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2839 block[zz_table[i++]] = value;
2842 /* apply AC prediction if needed */
2844 /* scale predictors if needed*/
2849 if(dc_pred_dir) { //left
2850 for(k = 1; k < 8; k++)
2851 block[k << 3] += (ac_val[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2853 for(k = 1; k < 8; k++)
2854 block[k] += (ac_val[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2857 if(dc_pred_dir) { //left
2858 for(k = 1; k < 8; k++)
2859 block[k << 3] += ac_val[k];
2861 for(k = 1; k < 8; k++)
2862 block[k] += ac_val[k + 8];
2866 /* save AC coeffs for further prediction */
2867 for(k = 1; k < 8; k++) {
2868 ac_val2[k] = block[k << 3];
2869 ac_val2[k + 8] = block[k];
2872 /* scale AC coeffs */
2873 for(k = 1; k < 64; k++)
2877 block[k] += (block[k] < 0) ? -mquant : mquant;
2880 if(use_pred) i = 63;
2881 } else { // no AC coeffs
2884 memset(ac_val2, 0, 16 * 2);
2885 if(dc_pred_dir) {//left
2887 memcpy(ac_val2, ac_val, 8 * 2);
2891 for(k = 1; k < 8; k++)
2892 ac_val2[k] = (ac_val2[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2897 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2901 for(k = 1; k < 8; k++)
2902 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2907 /* apply AC prediction if needed */
2909 if(dc_pred_dir) { //left
2910 for(k = 1; k < 8; k++) {
2911 block[k << 3] = ac_val2[k] * scale;
2912 if(!v->pquantizer && block[k << 3])
2913 block[k << 3] += (block[k << 3] < 0) ? -mquant : mquant;
2916 for(k = 1; k < 8; k++) {
2917 block[k] = ac_val2[k + 8] * scale;
2918 if(!v->pquantizer && block[k])
2919 block[k] += (block[k] < 0) ? -mquant : mquant;
2925 s->block_last_index[n] = i;
2930 /** Decode intra block in inter frames - more generic version than vc1_decode_i_block
2931 * @param v VC1Context
2932 * @param block block to decode
2933 * @param coded are AC coeffs present or not
2934 * @param mquant block quantizer
2935 * @param codingset set of VLC to decode data
2937 static int vc1_decode_intra_block(VC1Context *v, DCTELEM block[64], int n, int coded, int mquant, int codingset)
2939 GetBitContext *gb = &v->s.gb;
2940 MpegEncContext *s = &v->s;
2941 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2944 int16_t *ac_val, *ac_val2;
2946 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2947 int a_avail = v->a_avail, c_avail = v->c_avail;
2948 int use_pred = s->ac_pred;
2952 /* XXX: Guard against dumb values of mquant */
2953 mquant = (mquant < 1) ? 0 : ( (mquant>31) ? 31 : mquant );
2955 /* Set DC scale - y and c use the same */
2956 s->y_dc_scale = s->y_dc_scale_table[mquant];
2957 s->c_dc_scale = s->c_dc_scale_table[mquant];
2959 /* Get DC differential */
2961 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2963 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2966 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2971 if (dcdiff == 119 /* ESC index value */)
2973 /* TODO: Optimize */
2974 if (mquant == 1) dcdiff = get_bits(gb, 10);
2975 else if (mquant == 2) dcdiff = get_bits(gb, 9);
2976 else dcdiff = get_bits(gb, 8);
2981 dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3;
2982 else if (mquant == 2)
2983 dcdiff = (dcdiff<<1) + get_bits(gb, 1) - 1;
2985 if (get_bits(gb, 1))
2990 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir);
2993 /* Store the quantized DC coeff, used for prediction */
2996 block[0] = dcdiff * s->y_dc_scale;
2998 block[0] = dcdiff * s->c_dc_scale;
3007 /* check if AC is needed at all and adjust direction if needed */
3008 if(!a_avail) dc_pred_dir = 1;
3009 if(!c_avail) dc_pred_dir = 0;
3010 if(!a_avail && !c_avail) use_pred = 0;
3011 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
3014 scale = mquant * 2 + v->halfpq;
3016 if(dc_pred_dir) //left
3019 ac_val -= 16 * s->block_wrap[n];
3021 q1 = s->current_picture.qscale_table[mb_pos];
3022 if(dc_pred_dir && c_avail) q2 = s->current_picture.qscale_table[mb_pos - 1];
3023 if(!dc_pred_dir && a_avail) q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
3024 if(n && n<4) q2 = q1;
3027 int last = 0, skip, value;
3028 const int8_t *zz_table;
3031 zz_table = vc1_simple_progressive_8x8_zz;
3034 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
3038 block[zz_table[i++]] = value;
3041 /* apply AC prediction if needed */
3043 /* scale predictors if needed*/
3048 if(dc_pred_dir) { //left
3049 for(k = 1; k < 8; k++)
3050 block[k << 3] += (ac_val[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3052 for(k = 1; k < 8; k++)
3053 block[k] += (ac_val[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3056 if(dc_pred_dir) { //left
3057 for(k = 1; k < 8; k++)
3058 block[k << 3] += ac_val[k];
3060 for(k = 1; k < 8; k++)
3061 block[k] += ac_val[k + 8];
3065 /* save AC coeffs for further prediction */
3066 for(k = 1; k < 8; k++) {
3067 ac_val2[k] = block[k << 3];
3068 ac_val2[k + 8] = block[k];
3071 /* scale AC coeffs */
3072 for(k = 1; k < 64; k++)
3076 block[k] += (block[k] < 0) ? -mquant : mquant;
3079 if(use_pred) i = 63;
3080 } else { // no AC coeffs
3083 memset(ac_val2, 0, 16 * 2);
3084 if(dc_pred_dir) {//left
3086 memcpy(ac_val2, ac_val, 8 * 2);
3090 for(k = 1; k < 8; k++)
3091 ac_val2[k] = (ac_val2[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3096 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
3100 for(k = 1; k < 8; k++)
3101 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3106 /* apply AC prediction if needed */
3108 if(dc_pred_dir) { //left
3109 for(k = 1; k < 8; k++) {
3110 block[k << 3] = ac_val2[k] * scale;
3111 if(!v->pquantizer && block[k << 3])
3112 block[k << 3] += (block[k << 3] < 0) ? -mquant : mquant;
3115 for(k = 1; k < 8; k++) {
3116 block[k] = ac_val2[k + 8] * scale;
3117 if(!v->pquantizer && block[k])
3118 block[k] += (block[k] < 0) ? -mquant : mquant;
3124 s->block_last_index[n] = i;
3131 static int vc1_decode_p_block(VC1Context *v, DCTELEM block[64], int n, int mquant, int ttmb, int first_block)
3133 MpegEncContext *s = &v->s;
3134 GetBitContext *gb = &s->gb;
3137 int scale, off, idx, last, skip, value;
3138 int ttblk = ttmb & 7;
3141 ttblk = ttblk_to_tt[v->tt_index][get_vlc2(gb, vc1_ttblk_vlc[v->tt_index].table, VC1_TTBLK_VLC_BITS, 1)];
3143 if(ttblk == TT_4X4) {
3144 subblkpat = ~(get_vlc2(gb, vc1_subblkpat_vlc[v->tt_index].table, VC1_SUBBLKPAT_VLC_BITS, 1) + 1);
3146 if((ttblk != TT_8X8 && ttblk != TT_4X4) && (v->ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))) {
3147 subblkpat = decode012(gb);
3148 if(subblkpat) subblkpat ^= 3; //swap decoded pattern bits
3149 if(ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) ttblk = TT_8X4;
3150 if(ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) ttblk = TT_4X8;
3152 scale = 2 * mquant + v->halfpq;
3154 // convert transforms like 8X4_TOP to generic TT and SUBBLKPAT
3155 if(ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) {
3156 subblkpat = 2 - (ttblk == TT_8X4_TOP);
3159 if(ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) {
3160 subblkpat = 2 - (ttblk == TT_4X8_LEFT);
3168 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3172 idx = vc1_simple_progressive_8x8_zz[i++];
3173 block[idx] = value * scale;
3175 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3177 s->dsp.vc1_inv_trans_8x8(block);
3180 for(j = 0; j < 4; j++) {
3181 last = subblkpat & (1 << (3 - j));
3183 off = (j & 1) * 4 + (j & 2) * 16;
3185 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3189 idx = vc1_simple_progressive_4x4_zz[i++];
3190 block[idx + off] = value * scale;
3192 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3194 if(!(subblkpat & (1 << (3 - j))))
3195 s->dsp.vc1_inv_trans_4x4(block, j);
3199 for(j = 0; j < 2; j++) {
3200 last = subblkpat & (1 << (1 - j));
3204 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3208 if(v->profile < PROFILE_ADVANCED)
3209 idx = vc1_simple_progressive_8x4_zz[i++];
3211 idx = vc1_adv_progressive_8x4_zz[i++];
3212 block[idx + off] = value * scale;
3214 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3216 if(!(subblkpat & (1 << (1 - j))))
3217 s->dsp.vc1_inv_trans_8x4(block, j);
3221 for(j = 0; j < 2; j++) {
3222 last = subblkpat & (1 << (1 - j));
3226 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3230 if(v->profile < PROFILE_ADVANCED)
3231 idx = vc1_simple_progressive_4x8_zz[i++];
3233 idx = vc1_adv_progressive_4x8_zz[i++];
3234 block[idx + off] = value * scale;
3236 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3238 if(!(subblkpat & (1 << (1 - j))))
3239 s->dsp.vc1_inv_trans_4x8(block, j);
3247 /** Decode one P-frame MB (in Simple/Main profile)
3249 static int vc1_decode_p_mb(VC1Context *v)
3251 MpegEncContext *s = &v->s;
3252 GetBitContext *gb = &s->gb;
3254 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3255 int cbp; /* cbp decoding stuff */
3256 int mqdiff, mquant; /* MB quantization */
3257 int ttmb = v->ttfrm; /* MB Transform type */
3260 static const int size_table[6] = { 0, 2, 3, 4, 5, 8 },
3261 offset_table[6] = { 0, 1, 3, 7, 15, 31 };
3262 int mb_has_coeffs = 1; /* last_flag */
3263 int dmv_x, dmv_y; /* Differential MV components */
3264 int index, index1; /* LUT indices */
3265 int val, sign; /* temp values */
3266 int first_block = 1;
3268 int skipped, fourmv;
3270 mquant = v->pq; /* Loosy initialization */
3272 if (v->mv_type_is_raw)
3273 fourmv = get_bits1(gb);
3275 fourmv = v->mv_type_mb_plane[mb_pos];
3277 skipped = get_bits1(gb);
3279 skipped = v->s.mbskip_table[mb_pos];
3281 s->dsp.clear_blocks(s->block[0]);
3283 if (!fourmv) /* 1MV mode */
3287 GET_MVDATA(dmv_x, dmv_y);
3290 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
3291 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
3293 s->current_picture.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;
3294 vc1_pred_mv(s, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0]);
3296 /* FIXME Set DC val for inter block ? */
3297 if (s->mb_intra && !mb_has_coeffs)
3300 s->ac_pred = get_bits(gb, 1);
3303 else if (mb_has_coeffs)
3305 if (s->mb_intra) s->ac_pred = get_bits(gb, 1);
3306 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3314 s->current_picture.qscale_table[mb_pos] = mquant;
3316 if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
3317 ttmb = get_vlc2(gb, vc1_ttmb_vlc[v->tt_index].table,
3318 VC1_TTMB_VLC_BITS, 2);
3319 if(!s->mb_intra) vc1_mc_1mv(v, 0);
3323 s->dc_val[0][s->block_index[i]] = 0;
3325 val = ((cbp >> (5 - i)) & 1);
3326 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3327 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3329 /* check if prediction blocks A and C are available */
3330 v->a_avail = v->c_avail = 0;
3331 if(i == 2 || i == 3 || !s->first_slice_line)
3332 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3333 if(i == 1 || i == 3 || s->mb_x)
3334 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3336 vc1_decode_intra_block(v, s->block[i], i, val, mquant, (i&4)?v->codingset2:v->codingset);
3337 if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3338 s->dsp.vc1_inv_trans_8x8(s->block[i]);
3339 if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
3340 for(j = 0; j < 64; j++) s->block[i][j] += 128;
3341 s->dsp.put_pixels_clamped(s->block[i], s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
3342 if(v->pq >= 9 && v->overlap) {
3344 s->dsp.vc1_v_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2), (i<4) ? ((i&1)>>1) : (s->mb_y&1));
3346 s->dsp.vc1_h_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2), (i<4) ? (i&1) : (s->mb_x&1));
3349 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block);
3350 if(!v->ttmbf && ttmb < 8) ttmb = -1;
3352 if((i<4) || !(s->flags & CODEC_FLAG_GRAY))
3353 s->dsp.add_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
3360 for(i = 0; i < 6; i++) {
3361 v->mb_type[0][s->block_index[i]] = 0;
3362 s->dc_val[0][s->block_index[i]] = 0;
3364 s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
3365 s->current_picture.qscale_table[mb_pos] = 0;
3366 vc1_pred_mv(s, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0]);
3373 if (!skipped /* unskipped MB */)
3375 int intra_count = 0, coded_inter = 0;
3376 int is_intra[6], is_coded[6];
3378 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3381 val = ((cbp >> (5 - i)) & 1);
3382 s->dc_val[0][s->block_index[i]] = 0;
3389 GET_MVDATA(dmv_x, dmv_y);
3391 vc1_pred_mv(s, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0]);
3392 if(!s->mb_intra) vc1_mc_4mv_luma(v, i);
3393 intra_count += s->mb_intra;
3394 is_intra[i] = s->mb_intra;
3395 is_coded[i] = mb_has_coeffs;
3398 is_intra[i] = (intra_count >= 3);
3401 if(i == 4) vc1_mc_4mv_chroma(v);
3402 v->mb_type[0][s->block_index[i]] = is_intra[i];
3403 if(!coded_inter) coded_inter = !is_intra[i] & is_coded[i];
3405 // if there are no coded blocks then don't do anything more
3406 if(!intra_count && !coded_inter) return 0;
3409 s->current_picture.qscale_table[mb_pos] = mquant;
3410 /* test if block is intra and has pred */
3415 if(((!s->first_slice_line || (i==2 || i==3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]])
3416 || ((s->mb_x || (i==1 || i==3)) && v->mb_type[0][s->block_index[i] - 1])) {
3421 if(intrapred)s->ac_pred = get_bits(gb, 1);
3422 else s->ac_pred = 0;
3424 if (!v->ttmbf && coded_inter)
3425 ttmb = get_vlc2(gb, vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3429 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3430 s->mb_intra = is_intra[i];
3432 /* check if prediction blocks A and C are available */
3433 v->a_avail = v->c_avail = 0;
3434 if(i == 2 || i == 3 || !s->first_slice_line)
3435 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3436 if(i == 1 || i == 3 || s->mb_x)
3437 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3439 vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant, (i&4)?v->codingset2:v->codingset);
3440 if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3441 s->dsp.vc1_inv_trans_8x8(s->block[i]);
3442 if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
3443 for(j = 0; j < 64; j++) s->block[i][j] += 128;
3444 s->dsp.put_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
3445 if(v->pq >= 9 && v->overlap) {
3447 s->dsp.vc1_v_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2), (i<4) ? ((i&1)>>1) : (s->mb_y&1));
3449 s->dsp.vc1_h_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2), (i<4) ? (i&1) : (s->mb_x&1));
3451 } else if(is_coded[i]) {
3452 status = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block);
3453 if(!v->ttmbf && ttmb < 8) ttmb = -1;
3455 if((i<4) || !(s->flags & CODEC_FLAG_GRAY))
3456 s->dsp.add_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
3464 s->current_picture.qscale_table[mb_pos] = 0;
3465 for (i=0; i<6; i++) {
3466 v->mb_type[0][s->block_index[i]] = 0;
3467 s->dc_val[0][s->block_index[i]] = 0;
3471 vc1_pred_mv(s, i, 0, 0, 0, v->range_x, v->range_y, v->mb_type[0]);
3472 vc1_mc_4mv_luma(v, i);
3474 vc1_mc_4mv_chroma(v);
3475 s->current_picture.qscale_table[mb_pos] = 0;
3480 /* Should never happen */
3484 /** Decode one B-frame MB (in Main profile)
3486 static void vc1_decode_b_mb(VC1Context *v)
3488 MpegEncContext *s = &v->s;
3489 GetBitContext *gb = &s->gb;
3491 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3492 int cbp = 0; /* cbp decoding stuff */
3493 int mqdiff, mquant; /* MB quantization */
3494 int ttmb = v->ttfrm; /* MB Transform type */
3496 static const int size_table[6] = { 0, 2, 3, 4, 5, 8 },
3497 offset_table[6] = { 0, 1, 3, 7, 15, 31 };
3498 int mb_has_coeffs = 0; /* last_flag */
3499 int index, index1; /* LUT indices */
3500 int val, sign; /* temp values */
3501 int first_block = 1;
3503 int skipped, direct;
3504 int dmv_x[2], dmv_y[2];
3505 int bmvtype = BMV_TYPE_BACKWARD;
3507 mquant = v->pq; /* Loosy initialization */
3511 direct = get_bits1(gb);
3513 direct = v->direct_mb_plane[mb_pos];
3515 skipped = get_bits1(gb);
3517 skipped = v->s.mbskip_table[mb_pos];
3519 s->dsp.clear_blocks(s->block[0]);
3520 dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
3521 for(i = 0; i < 6; i++) {
3522 v->mb_type[0][s->block_index[i]] = 0;
3523 s->dc_val[0][s->block_index[i]] = 0;
3525 s->current_picture.qscale_table[mb_pos] = 0;
3529 GET_MVDATA(dmv_x[0], dmv_y[0]);
3530 dmv_x[1] = dmv_x[0];
3531 dmv_y[1] = dmv_y[0];
3533 if(skipped || !s->mb_intra) {
3534 bmvtype = decode012(gb);
3537 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
3540 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
3543 bmvtype = BMV_TYPE_INTERPOLATED;
3544 dmv_x[0] = dmv_y[0] = 0;
3548 for(i = 0; i < 6; i++)
3549 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3552 if(direct) bmvtype = BMV_TYPE_INTERPOLATED;
3553 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3554 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3558 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3562 s->current_picture.qscale_table[mb_pos] = mquant;
3564 ttmb = get_vlc2(gb, vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3565 dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
3566 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3567 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3569 if(!mb_has_coeffs && !s->mb_intra) {
3570 /* no coded blocks - effectively skipped */
3571 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3572 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3575 if(s->mb_intra && !mb_has_coeffs) {
3577 s->current_picture.qscale_table[mb_pos] = mquant;
3578 s->ac_pred = get_bits1(gb);
3580 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3582 if(bmvtype == BMV_TYPE_INTERPOLATED) {
3583 GET_MVDATA(dmv_x[0], dmv_y[0]);
3584 if(!mb_has_coeffs) {
3585 /* interpolated skipped block */
3586 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3587 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3591 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3593 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3596 s->ac_pred = get_bits1(gb);
3597 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3599 s->current_picture.qscale_table[mb_pos] = mquant;
3600 if(!v->ttmbf && !s->mb_intra && mb_has_coeffs)
3601 ttmb = get_vlc2(gb, vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3607 s->dc_val[0][s->block_index[i]] = 0;
3609 val = ((cbp >> (5 - i)) & 1);
3610 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3611 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3613 /* check if prediction blocks A and C are available */
3614 v->a_avail = v->c_avail = 0;
3615 if(i == 2 || i == 3 || !s->first_slice_line)
3616 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3617 if(i == 1 || i == 3 || s->mb_x)
3618 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3620 vc1_decode_intra_block(v, s->block[i], i, val, mquant, (i&4)?v->codingset2:v->codingset);
3621 if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3622 s->dsp.vc1_inv_trans_8x8(s->block[i]);
3623 if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
3624 for(j = 0; j < 64; j++) s->block[i][j] += 128;
3625 s->dsp.put_pixels_clamped(s->block[i], s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
3627 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block);
3628 if(!v->ttmbf && ttmb < 8) ttmb = -1;
3630 if((i<4) || !(s->flags & CODEC_FLAG_GRAY))
3631 s->dsp.add_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
3636 /** Decode blocks of I-frame
3638 static void vc1_decode_i_blocks(VC1Context *v)
3641 MpegEncContext *s = &v->s;
3646 /* select codingmode used for VLC tables selection */
3647 switch(v->y_ac_table_index){
3649 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
3652 v->codingset = CS_HIGH_MOT_INTRA;
3655 v->codingset = CS_MID_RATE_INTRA;
3659 switch(v->c_ac_table_index){
3661 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
3664 v->codingset2 = CS_HIGH_MOT_INTER;
3667 v->codingset2 = CS_MID_RATE_INTER;
3671 /* Set DC scale - y and c use the same */
3672 s->y_dc_scale = s->y_dc_scale_table[v->pq];
3673 s->c_dc_scale = s->c_dc_scale_table[v->pq];
3676 s->mb_x = s->mb_y = 0;
3678 s->first_slice_line = 1;
3679 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
3680 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
3681 for(s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
3682 ff_init_block_index(s);
3683 ff_update_block_index(s);
3684 s->dsp.clear_blocks(s->block[0]);
3685 mb_pos = s->mb_x + s->mb_y * s->mb_width;
3686 s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
3687 s->current_picture.qscale_table[mb_pos] = v->pq;
3688 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
3689 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
3691 // do actual MB decoding and displaying
3692 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
3693 v->s.ac_pred = get_bits(&v->s.gb, 1);
3695 for(k = 0; k < 6; k++) {
3696 val = ((cbp >> (5 - k)) & 1);
3699 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
3703 cbp |= val << (5 - k);
3705 vc1_decode_i_block(v, s->block[k], k, val, (k<4)? v->codingset : v->codingset2);
3707 s->dsp.vc1_inv_trans_8x8(s->block[k]);
3708 if(v->pq >= 9 && v->overlap) {
3709 for(j = 0; j < 64; j++) s->block[k][j] += 128;
3713 vc1_put_block(v, s->block);
3714 if(v->pq >= 9 && v->overlap) {
3715 if(!s->first_slice_line) {
3716 s->dsp.vc1_v_overlap(s->dest[0], s->linesize, 0);
3717 s->dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize, 0);
3718 if(!(s->flags & CODEC_FLAG_GRAY)) {
3719 s->dsp.vc1_v_overlap(s->dest[1], s->uvlinesize, s->mb_y&1);
3720 s->dsp.vc1_v_overlap(s->dest[2], s->uvlinesize, s->mb_y&1);
3723 s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize, 1);
3724 s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize, 1);
3726 s->dsp.vc1_h_overlap(s->dest[0], s->linesize, 0);
3727 s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize, 0);
3728 if(!(s->flags & CODEC_FLAG_GRAY)) {
3729 s->dsp.vc1_h_overlap(s->dest[1], s->uvlinesize, s->mb_x&1);
3730 s->dsp.vc1_h_overlap(s->dest[2], s->uvlinesize, s->mb_x&1);
3733 s->dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize, 1);
3734 s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize, 1);
3737 if(get_bits_count(&s->gb) > v->bits) {
3738 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n", get_bits_count(&s->gb), v->bits);
3742 ff_draw_horiz_band(s, s->mb_y * 16, 16);
3743 s->first_slice_line = 0;
3747 /** Decode blocks of I-frame for advanced profile
3749 static void vc1_decode_i_blocks_adv(VC1Context *v)
3752 MpegEncContext *s = &v->s;
3759 GetBitContext *gb = &s->gb;
3761 /* select codingmode used for VLC tables selection */
3762 switch(v->y_ac_table_index){
3764 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
3767 v->codingset = CS_HIGH_MOT_INTRA;
3770 v->codingset = CS_MID_RATE_INTRA;
3774 switch(v->c_ac_table_index){
3776 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
3779 v->codingset2 = CS_HIGH_MOT_INTER;
3782 v->codingset2 = CS_MID_RATE_INTER;
3786 /* Set DC scale - y and c use the same */
3787 s->y_dc_scale = s->y_dc_scale_table[v->pq];
3788 s->c_dc_scale = s->c_dc_scale_table[v->pq];
3791 s->mb_x = s->mb_y = 0;
3793 s->first_slice_line = 1;
3794 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
3795 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
3796 for(s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
3797 ff_init_block_index(s);
3798 ff_update_block_index(s);
3799 s->dsp.clear_blocks(s->block[0]);
3800 mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3801 s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
3802 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
3803 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
3805 // do actual MB decoding and displaying
3806 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
3807 if(v->acpred_is_raw)
3808 v->s.ac_pred = get_bits(&v->s.gb, 1);
3810 v->s.ac_pred = v->acpred_plane[mb_pos];
3812 if(v->condover == CONDOVER_SELECT) {
3813 if(v->overflg_is_raw)
3814 overlap = get_bits(&v->s.gb, 1);
3816 overlap = v->over_flags_plane[mb_pos];
3818 overlap = (v->condover == CONDOVER_ALL);
3822 s->current_picture.qscale_table[mb_pos] = mquant;
3824 for(k = 0; k < 6; k++) {
3825 val = ((cbp >> (5 - k)) & 1);
3828 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
3832 cbp |= val << (5 - k);
3834 v->a_avail = !s->first_slice_line || (k==2 || k==3);
3835 v->c_avail = !!s->mb_x || (k==1 || k==3);
3837 vc1_decode_i_block_adv(v, s->block[k], k, val, (k<4)? v->codingset : v->codingset2, mquant);
3839 s->dsp.vc1_inv_trans_8x8(s->block[k]);
3840 for(j = 0; j < 64; j++) s->block[k][j] += 128;
3843 vc1_put_block(v, s->block);
3845 if(!s->first_slice_line) {
3846 s->dsp.vc1_v_overlap(s->dest[0], s->linesize, 0);
3847 s->dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize, 0);
3848 if(!(s->flags & CODEC_FLAG_GRAY)) {
3849 s->dsp.vc1_v_overlap(s->dest[1], s->uvlinesize, s->mb_y&1);
3850 s->dsp.vc1_v_overlap(s->dest[2], s->uvlinesize, s->mb_y&1);
3853 s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize, 1);
3854 s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize, 1);
3856 s->dsp.vc1_h_overlap(s->dest[0], s->linesize, 0);
3857 s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize, 0);
3858 if(!(s->flags & CODEC_FLAG_GRAY)) {
3859 s->dsp.vc1_h_overlap(s->dest[1], s->uvlinesize, s->mb_x&1);
3860 s->dsp.vc1_h_overlap(s->dest[2], s->uvlinesize, s->mb_x&1);
3863 s->dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize, 1);
3864 s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize, 1);
3867 if(get_bits_count(&s->gb) > v->bits) {
3868 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n", get_bits_count(&s->gb), v->bits);
3872 ff_draw_horiz_band(s, s->mb_y * 16, 16);
3873 s->first_slice_line = 0;
3877 static void vc1_decode_p_blocks(VC1Context *v)
3879 MpegEncContext *s = &v->s;
3881 /* select codingmode used for VLC tables selection */
3882 switch(v->c_ac_table_index){
3884 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
3887 v->codingset = CS_HIGH_MOT_INTRA;
3890 v->codingset = CS_MID_RATE_INTRA;
3894 switch(v->c_ac_table_index){
3896 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
3899 v->codingset2 = CS_HIGH_MOT_INTER;
3902 v->codingset2 = CS_MID_RATE_INTER;
3906 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
3907 s->first_slice_line = 1;
3908 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
3909 for(s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
3910 ff_init_block_index(s);
3911 ff_update_block_index(s);
3912 s->dsp.clear_blocks(s->block[0]);
3915 if(get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
3916 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n", get_bits_count(&s->gb), v->bits,s->mb_x,s->mb_y);
3920 ff_draw_horiz_band(s, s->mb_y * 16, 16);
3921 s->first_slice_line = 0;
3925 static void vc1_decode_b_blocks(VC1Context *v)
3927 MpegEncContext *s = &v->s;
3929 /* select codingmode used for VLC tables selection */
3930 switch(v->c_ac_table_index){
3932 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
3935 v->codingset = CS_HIGH_MOT_INTRA;
3938 v->codingset = CS_MID_RATE_INTRA;
3942 switch(v->c_ac_table_index){
3944 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
3947 v->codingset2 = CS_HIGH_MOT_INTER;
3950 v->codingset2 = CS_MID_RATE_INTER;
3954 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
3955 s->first_slice_line = 1;
3956 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
3957 for(s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
3958 ff_init_block_index(s);
3959 ff_update_block_index(s);
3960 s->dsp.clear_blocks(s->block[0]);
3963 if(get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
3964 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n", get_bits_count(&s->gb), v->bits,s->mb_x,s->mb_y);
3968 ff_draw_horiz_band(s, s->mb_y * 16, 16);
3969 s->first_slice_line = 0;
3973 static void vc1_decode_skip_blocks(VC1Context *v)
3975 MpegEncContext *s = &v->s;
3977 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
3978 s->first_slice_line = 1;
3979 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
3981 ff_init_block_index(s);
3982 ff_update_block_index(s);
3983 memcpy(s->dest[0], s->last_picture.data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16);
3984 memcpy(s->dest[1], s->last_picture.data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
3985 memcpy(s->dest[2], s->last_picture.data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
3986 ff_draw_horiz_band(s, s->mb_y * 16, 16);
3987 s->first_slice_line = 0;
3989 s->pict_type = P_TYPE;
3992 static void vc1_decode_blocks(VC1Context *v)
3995 v->s.esc3_level_length = 0;
3997 switch(v->s.pict_type) {
3999 if(v->profile == PROFILE_ADVANCED)
4000 vc1_decode_i_blocks_adv(v);
4002 vc1_decode_i_blocks(v);
4005 if(v->p_frame_skipped)
4006 vc1_decode_skip_blocks(v);
4008 vc1_decode_p_blocks(v);
4012 vc1_decode_i_blocks(v);
4014 vc1_decode_b_blocks(v);
4020 /** Initialize a VC1/WMV3 decoder
4021 * @todo TODO: Handle VC-1 IDUs (Transport level?)
4022 * @todo TODO: Decypher remaining bits in extra_data
4024 static int vc1_decode_init(AVCodecContext *avctx)
4026 VC1Context *v = avctx->priv_data;
4027 MpegEncContext *s = &v->s;
4030 if (!avctx->extradata_size || !avctx->extradata) return -1;
4031 if (!(avctx->flags & CODEC_FLAG_GRAY))
4032 avctx->pix_fmt = PIX_FMT_YUV420P;
4034 avctx->pix_fmt = PIX_FMT_GRAY8;
4036 avctx->flags |= CODEC_FLAG_EMU_EDGE;
4037 v->s.flags |= CODEC_FLAG_EMU_EDGE;
4039 if(ff_h263_decode_init(avctx) < 0)
4041 if (vc1_init_common(v) < 0) return -1;
4043 avctx->coded_width = avctx->width;
4044 avctx->coded_height = avctx->height;
4045 if (avctx->codec_id == CODEC_ID_WMV3)
4049 // looks like WMV3 has a sequence header stored in the extradata
4050 // advanced sequence header may be before the first frame
4051 // the last byte of the extradata is a version number, 1 for the
4052 // samples we can decode
4054 init_get_bits(&gb, avctx->extradata, avctx->extradata_size*8);
4056 if (decode_sequence_header(avctx, &gb) < 0)
4059 count = avctx->extradata_size*8 - get_bits_count(&gb);
4062 av_log(avctx, AV_LOG_INFO, "Extra data: %i bits left, value: %X\n",
4063 count, get_bits(&gb, count));
4067 av_log(avctx, AV_LOG_INFO, "Read %i bits in overflow\n", -count);
4069 } else { // VC1/WVC1
4070 int edata_size = avctx->extradata_size;
4071 uint8_t *edata = avctx->extradata;
4073 if(avctx->extradata_size < 16) {
4074 av_log(avctx, AV_LOG_ERROR, "Extradata size too small: %i\n", edata_size);
4077 while(edata_size > 8) {
4078 // test if we've found header
4079 if(BE_32(edata) == 0x0000010F) {
4088 init_get_bits(&gb, edata, edata_size*8);
4090 if (decode_sequence_header(avctx, &gb) < 0)
4093 while(edata_size > 8) {
4094 // test if we've found entry point
4095 if(BE_32(edata) == 0x0000010E) {
4104 init_get_bits(&gb, edata, edata_size*8);
4106 if (decode_entry_point(avctx, &gb) < 0)
4109 avctx->has_b_frames= !!(avctx->max_b_frames);
4110 s->low_delay = !avctx->has_b_frames;
4112 s->mb_width = (avctx->coded_width+15)>>4;
4113 s->mb_height = (avctx->coded_height+15)>>4;
4115 /* Allocate mb bitplanes */
4116 v->mv_type_mb_plane = av_malloc(s->mb_stride * s->mb_height);
4117 v->direct_mb_plane = av_malloc(s->mb_stride * s->mb_height);
4118 v->acpred_plane = av_malloc(s->mb_stride * s->mb_height);
4119 v->over_flags_plane = av_malloc(s->mb_stride * s->mb_height);
4121 /* allocate block type info in that way so it could be used with s->block_index[] */
4122 v->mb_type_base = av_malloc(s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
4123 v->mb_type[0] = v->mb_type_base + s->b8_stride + 1;
4124 v->mb_type[1] = v->mb_type_base + s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride + 1;
4125 v->mb_type[2] = v->mb_type[1] + s->mb_stride * (s->mb_height + 1);
4127 /* Init coded blocks info */
4128 if (v->profile == PROFILE_ADVANCED)
4130 // if (alloc_bitplane(&v->over_flags_plane, s->mb_width, s->mb_height) < 0)
4132 // if (alloc_bitplane(&v->ac_pred_plane, s->mb_width, s->mb_height) < 0)
4140 /** Decode a VC1/WMV3 frame
4141 * @todo TODO: Handle VC-1 IDUs (Transport level?)
4143 static int vc1_decode_frame(AVCodecContext *avctx,
4144 void *data, int *data_size,
4145 uint8_t *buf, int buf_size)
4147 VC1Context *v = avctx->priv_data;
4148 MpegEncContext *s = &v->s;
4149 AVFrame *pict = data;
4150 uint8_t *buf2 = NULL;
4152 /* no supplementary picture */
4153 if (buf_size == 0) {
4154 /* special case for last picture */
4155 if (s->low_delay==0 && s->next_picture_ptr) {
4156 *pict= *(AVFrame*)s->next_picture_ptr;
4157 s->next_picture_ptr= NULL;
4159 *data_size = sizeof(AVFrame);
4165 //we need to set current_picture_ptr before reading the header, otherwise we cant store anyting im there
4166 if(s->current_picture_ptr==NULL || s->current_picture_ptr->data[0]){
4167 int i= ff_find_unused_picture(s, 0);
4168 s->current_picture_ptr= &s->picture[i];
4171 //for advanced profile we need to unescape buffer
4172 if (avctx->codec_id == CODEC_ID_VC1) {
4174 buf2 = av_malloc(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
4176 for(i = 0; i < buf_size; i++) {
4177 if(buf[i] == 3 && i >= 2 && !buf[i-1] && !buf[i-2] && i < buf_size-1 && buf[i+1] < 4) {
4178 buf2[buf_size2++] = buf[i+1];
4181 buf2[buf_size2++] = buf[i];
4183 init_get_bits(&s->gb, buf2, buf_size2*8);
4185 init_get_bits(&s->gb, buf, buf_size*8);
4186 // do parse frame header
4187 if(v->profile < PROFILE_ADVANCED) {
4188 if(vc1_parse_frame_header(v, &s->gb) == -1) {
4193 if(vc1_parse_frame_header_adv(v, &s->gb) == -1) {
4199 if(s->pict_type != I_TYPE && !v->res_rtm_flag){
4205 s->current_picture.pict_type= s->pict_type;
4206 s->current_picture.key_frame= s->pict_type == I_TYPE;
4208 /* skip B-frames if we don't have reference frames */
4209 if(s->last_picture_ptr==NULL && (s->pict_type==B_TYPE || s->dropable)){
4211 return -1;//buf_size;
4213 /* skip b frames if we are in a hurry */
4214 if(avctx->hurry_up && s->pict_type==B_TYPE) return -1;//buf_size;
4215 if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==B_TYPE)
4216 || (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=I_TYPE)
4217 || avctx->skip_frame >= AVDISCARD_ALL) {
4221 /* skip everything if we are in a hurry>=5 */
4222 if(avctx->hurry_up>=5) {
4224 return -1;//buf_size;
4227 if(s->next_p_frame_damaged){
4228 if(s->pict_type==B_TYPE)
4231 s->next_p_frame_damaged=0;
4234 if(MPV_frame_start(s, avctx) < 0) {
4239 ff_er_frame_start(s);
4241 v->bits = buf_size * 8;
4242 vc1_decode_blocks(v);
4243 //av_log(s->avctx, AV_LOG_INFO, "Consumed %i/%i bits\n", get_bits_count(&s->gb), buf_size*8);
4244 // if(get_bits_count(&s->gb) > buf_size * 8)
4250 assert(s->current_picture.pict_type == s->current_picture_ptr->pict_type);
4251 assert(s->current_picture.pict_type == s->pict_type);
4252 if (s->pict_type == B_TYPE || s->low_delay) {
4253 *pict= *(AVFrame*)s->current_picture_ptr;
4254 } else if (s->last_picture_ptr != NULL) {
4255 *pict= *(AVFrame*)s->last_picture_ptr;
4258 if(s->last_picture_ptr || s->low_delay){
4259 *data_size = sizeof(AVFrame);
4260 ff_print_debug_info(s, pict);
4263 /* Return the Picture timestamp as the frame number */
4264 /* we substract 1 because it is added on utils.c */
4265 avctx->frame_number = s->picture_number - 1;
4272 /** Close a VC1/WMV3 decoder
4273 * @warning Initial try at using MpegEncContext stuff
4275 static int vc1_decode_end(AVCodecContext *avctx)
4277 VC1Context *v = avctx->priv_data;
4279 av_freep(&v->hrd_rate);
4280 av_freep(&v->hrd_buffer);
4281 MPV_common_end(&v->s);
4282 av_freep(&v->mv_type_mb_plane);
4283 av_freep(&v->direct_mb_plane);
4284 av_freep(&v->acpred_plane);
4285 av_freep(&v->over_flags_plane);
4286 av_freep(&v->mb_type_base);
4291 AVCodec vc1_decoder = {
4304 AVCodec wmv3_decoder = {