2 * VC-1 and WMV3 decoder
3 * Copyright (c) 2011 Mashiat Sarker Shakkhar
4 * Copyright (c) 2006-2007 Konstantin Shishkov
5 * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
7 * This file is part of Libav.
9 * Libav is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * Libav is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with Libav; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * VC-1 and WMV3 decoder
32 #include "mpegvideo.h"
36 #include "vc1acdata.h"
37 #include "msmpeg4data.h"
39 #include "simple_idct.h"
41 #include "vdpau_internal.h"
46 #define MB_INTRA_VLC_BITS 9
51 static const uint16_t vlc_offs[] = {
52 0, 520, 552, 616, 1128, 1160, 1224, 1740, 1772, 1836, 1900, 2436,
53 2986, 3050, 3610, 4154, 4218, 4746, 5326, 5390, 5902, 6554, 7658, 8342,
54 9304, 9988, 10630, 11234, 12174, 13006, 13560, 14232, 14786, 15432, 16350, 17522,
55 20372, 21818, 22330, 22394, 23166, 23678, 23742, 24820, 25332, 25396, 26460, 26980,
56 27048, 27592, 27600, 27608, 27616, 27624, 28224, 28258, 28290, 28802, 28834, 28866,
57 29378, 29412, 29444, 29960, 29994, 30026, 30538, 30572, 30604, 31120, 31154, 31186,
58 31714, 31746, 31778, 32306, 32340, 32372
61 // offset tables for interlaced picture MVDATA decoding
62 static const int offset_table1[9] = { 0, 1, 2, 4, 8, 16, 32, 64, 128 };
63 static const int offset_table2[9] = { 0, 1, 3, 7, 15, 31, 63, 127, 255 };
66 * Init VC-1 specific tables and VC1Context members
67 * @param v The VC1Context to initialize
70 static int vc1_init_common(VC1Context *v)
74 static VLC_TYPE vlc_table[32372][2];
76 v->hrd_rate = v->hrd_buffer = NULL;
80 INIT_VLC_STATIC(&ff_vc1_bfraction_vlc, VC1_BFRACTION_VLC_BITS, 23,
81 ff_vc1_bfraction_bits, 1, 1,
82 ff_vc1_bfraction_codes, 1, 1, 1 << VC1_BFRACTION_VLC_BITS);
83 INIT_VLC_STATIC(&ff_vc1_norm2_vlc, VC1_NORM2_VLC_BITS, 4,
84 ff_vc1_norm2_bits, 1, 1,
85 ff_vc1_norm2_codes, 1, 1, 1 << VC1_NORM2_VLC_BITS);
86 INIT_VLC_STATIC(&ff_vc1_norm6_vlc, VC1_NORM6_VLC_BITS, 64,
87 ff_vc1_norm6_bits, 1, 1,
88 ff_vc1_norm6_codes, 2, 2, 556);
89 INIT_VLC_STATIC(&ff_vc1_imode_vlc, VC1_IMODE_VLC_BITS, 7,
90 ff_vc1_imode_bits, 1, 1,
91 ff_vc1_imode_codes, 1, 1, 1 << VC1_IMODE_VLC_BITS);
92 for (i = 0; i < 3; i++) {
93 ff_vc1_ttmb_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 0]];
94 ff_vc1_ttmb_vlc[i].table_allocated = vlc_offs[i * 3 + 1] - vlc_offs[i * 3 + 0];
95 init_vlc(&ff_vc1_ttmb_vlc[i], VC1_TTMB_VLC_BITS, 16,
96 ff_vc1_ttmb_bits[i], 1, 1,
97 ff_vc1_ttmb_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC);
98 ff_vc1_ttblk_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 1]];
99 ff_vc1_ttblk_vlc[i].table_allocated = vlc_offs[i * 3 + 2] - vlc_offs[i * 3 + 1];
100 init_vlc(&ff_vc1_ttblk_vlc[i], VC1_TTBLK_VLC_BITS, 8,
101 ff_vc1_ttblk_bits[i], 1, 1,
102 ff_vc1_ttblk_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
103 ff_vc1_subblkpat_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 2]];
104 ff_vc1_subblkpat_vlc[i].table_allocated = vlc_offs[i * 3 + 3] - vlc_offs[i * 3 + 2];
105 init_vlc(&ff_vc1_subblkpat_vlc[i], VC1_SUBBLKPAT_VLC_BITS, 15,
106 ff_vc1_subblkpat_bits[i], 1, 1,
107 ff_vc1_subblkpat_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
109 for (i = 0; i < 4; i++) {
110 ff_vc1_4mv_block_pattern_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 9]];
111 ff_vc1_4mv_block_pattern_vlc[i].table_allocated = vlc_offs[i * 3 + 10] - vlc_offs[i * 3 + 9];
112 init_vlc(&ff_vc1_4mv_block_pattern_vlc[i], VC1_4MV_BLOCK_PATTERN_VLC_BITS, 16,
113 ff_vc1_4mv_block_pattern_bits[i], 1, 1,
114 ff_vc1_4mv_block_pattern_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
115 ff_vc1_cbpcy_p_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 10]];
116 ff_vc1_cbpcy_p_vlc[i].table_allocated = vlc_offs[i * 3 + 11] - vlc_offs[i * 3 + 10];
117 init_vlc(&ff_vc1_cbpcy_p_vlc[i], VC1_CBPCY_P_VLC_BITS, 64,
118 ff_vc1_cbpcy_p_bits[i], 1, 1,
119 ff_vc1_cbpcy_p_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC);
120 ff_vc1_mv_diff_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 11]];
121 ff_vc1_mv_diff_vlc[i].table_allocated = vlc_offs[i * 3 + 12] - vlc_offs[i * 3 + 11];
122 init_vlc(&ff_vc1_mv_diff_vlc[i], VC1_MV_DIFF_VLC_BITS, 73,
123 ff_vc1_mv_diff_bits[i], 1, 1,
124 ff_vc1_mv_diff_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC);
126 for (i = 0; i < 8; i++) {
127 ff_vc1_ac_coeff_table[i].table = &vlc_table[vlc_offs[i * 2 + 21]];
128 ff_vc1_ac_coeff_table[i].table_allocated = vlc_offs[i * 2 + 22] - vlc_offs[i * 2 + 21];
129 init_vlc(&ff_vc1_ac_coeff_table[i], AC_VLC_BITS, vc1_ac_sizes[i],
130 &vc1_ac_tables[i][0][1], 8, 4,
131 &vc1_ac_tables[i][0][0], 8, 4, INIT_VLC_USE_NEW_STATIC);
132 /* initialize interlaced MVDATA tables (2-Ref) */
133 ff_vc1_2ref_mvdata_vlc[i].table = &vlc_table[vlc_offs[i * 2 + 22]];
134 ff_vc1_2ref_mvdata_vlc[i].table_allocated = vlc_offs[i * 2 + 23] - vlc_offs[i * 2 + 22];
135 init_vlc(&ff_vc1_2ref_mvdata_vlc[i], VC1_2REF_MVDATA_VLC_BITS, 126,
136 ff_vc1_2ref_mvdata_bits[i], 1, 1,
137 ff_vc1_2ref_mvdata_codes[i], 4, 4, INIT_VLC_USE_NEW_STATIC);
139 for (i = 0; i < 4; i++) {
140 /* initialize 4MV MBMODE VLC tables for interlaced frame P picture */
141 ff_vc1_intfr_4mv_mbmode_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 37]];
142 ff_vc1_intfr_4mv_mbmode_vlc[i].table_allocated = vlc_offs[i * 3 + 38] - vlc_offs[i * 3 + 37];
143 init_vlc(&ff_vc1_intfr_4mv_mbmode_vlc[i], VC1_INTFR_4MV_MBMODE_VLC_BITS, 15,
144 ff_vc1_intfr_4mv_mbmode_bits[i], 1, 1,
145 ff_vc1_intfr_4mv_mbmode_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC);
146 /* initialize NON-4MV MBMODE VLC tables for the same */
147 ff_vc1_intfr_non4mv_mbmode_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 38]];
148 ff_vc1_intfr_non4mv_mbmode_vlc[i].table_allocated = vlc_offs[i * 3 + 39] - vlc_offs[i * 3 + 38];
149 init_vlc(&ff_vc1_intfr_non4mv_mbmode_vlc[i], VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 9,
150 ff_vc1_intfr_non4mv_mbmode_bits[i], 1, 1,
151 ff_vc1_intfr_non4mv_mbmode_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
152 /* initialize interlaced MVDATA tables (1-Ref) */
153 ff_vc1_1ref_mvdata_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 39]];
154 ff_vc1_1ref_mvdata_vlc[i].table_allocated = vlc_offs[i * 3 + 40] - vlc_offs[i * 3 + 39];
155 init_vlc(&ff_vc1_1ref_mvdata_vlc[i], VC1_1REF_MVDATA_VLC_BITS, 72,
156 ff_vc1_1ref_mvdata_bits[i], 1, 1,
157 ff_vc1_1ref_mvdata_codes[i], 4, 4, INIT_VLC_USE_NEW_STATIC);
159 for (i = 0; i < 4; i++) {
160 /* Initialize 2MV Block pattern VLC tables */
161 ff_vc1_2mv_block_pattern_vlc[i].table = &vlc_table[vlc_offs[i + 49]];
162 ff_vc1_2mv_block_pattern_vlc[i].table_allocated = vlc_offs[i + 50] - vlc_offs[i + 49];
163 init_vlc(&ff_vc1_2mv_block_pattern_vlc[i], VC1_2MV_BLOCK_PATTERN_VLC_BITS, 4,
164 ff_vc1_2mv_block_pattern_bits[i], 1, 1,
165 ff_vc1_2mv_block_pattern_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
167 for (i = 0; i < 8; i++) {
168 /* Initialize interlaced CBPCY VLC tables (Table 124 - Table 131) */
169 ff_vc1_icbpcy_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 53]];
170 ff_vc1_icbpcy_vlc[i].table_allocated = vlc_offs[i * 3 + 54] - vlc_offs[i * 3 + 53];
171 init_vlc(&ff_vc1_icbpcy_vlc[i], VC1_ICBPCY_VLC_BITS, 63,
172 ff_vc1_icbpcy_p_bits[i], 1, 1,
173 ff_vc1_icbpcy_p_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC);
174 /* Initialize interlaced field picture MBMODE VLC tables */
175 ff_vc1_if_mmv_mbmode_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 54]];
176 ff_vc1_if_mmv_mbmode_vlc[i].table_allocated = vlc_offs[i * 3 + 55] - vlc_offs[i * 3 + 54];
177 init_vlc(&ff_vc1_if_mmv_mbmode_vlc[i], VC1_IF_MMV_MBMODE_VLC_BITS, 8,
178 ff_vc1_if_mmv_mbmode_bits[i], 1, 1,
179 ff_vc1_if_mmv_mbmode_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
180 ff_vc1_if_1mv_mbmode_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 55]];
181 ff_vc1_if_1mv_mbmode_vlc[i].table_allocated = vlc_offs[i * 3 + 56] - vlc_offs[i * 3 + 55];
182 init_vlc(&ff_vc1_if_1mv_mbmode_vlc[i], VC1_IF_1MV_MBMODE_VLC_BITS, 6,
183 ff_vc1_if_1mv_mbmode_bits[i], 1, 1,
184 ff_vc1_if_1mv_mbmode_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
191 v->mvrange = 0; /* 7.1.1.18, p80 */
196 /***********************************************************************/
198 * @name VC-1 Bitplane decoding
216 /** @} */ //imode defines
219 /** @} */ //Bitplane group
221 static void vc1_put_signed_blocks_clamped(VC1Context *v)
223 MpegEncContext *s = &v->s;
224 int topleft_mb_pos, top_mb_pos;
225 int stride_y, fieldtx;
228 /* The put pixels loop is always one MB row behind the decoding loop,
229 * because we can only put pixels when overlap filtering is done, and
230 * for filtering of the bottom edge of a MB, we need the next MB row
232 * Within the row, the put pixels loop is also one MB col behind the
233 * decoding loop. The reason for this is again, because for filtering
234 * of the right MB edge, we need the next MB present. */
235 if (!s->first_slice_line) {
237 topleft_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x - 1;
238 fieldtx = v->fieldtx_plane[topleft_mb_pos];
239 stride_y = (s->linesize) << fieldtx;
240 v_dist = (16 - fieldtx) >> (fieldtx == 0);
241 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][0],
242 s->dest[0] - 16 * s->linesize - 16,
244 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][1],
245 s->dest[0] - 16 * s->linesize - 8,
247 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][2],
248 s->dest[0] - v_dist * s->linesize - 16,
250 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][3],
251 s->dest[0] - v_dist * s->linesize - 8,
253 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][4],
254 s->dest[1] - 8 * s->uvlinesize - 8,
256 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][5],
257 s->dest[2] - 8 * s->uvlinesize - 8,
260 if (s->mb_x == s->mb_width - 1) {
261 top_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x;
262 fieldtx = v->fieldtx_plane[top_mb_pos];
263 stride_y = s->linesize << fieldtx;
264 v_dist = fieldtx ? 15 : 8;
265 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][0],
266 s->dest[0] - 16 * s->linesize,
268 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][1],
269 s->dest[0] - 16 * s->linesize + 8,
271 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][2],
272 s->dest[0] - v_dist * s->linesize,
274 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][3],
275 s->dest[0] - v_dist * s->linesize + 8,
277 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][4],
278 s->dest[1] - 8 * s->uvlinesize,
280 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][5],
281 s->dest[2] - 8 * s->uvlinesize,
286 #define inc_blk_idx(idx) do { \
288 if (idx >= v->n_allocated_blks) \
292 inc_blk_idx(v->topleft_blk_idx);
293 inc_blk_idx(v->top_blk_idx);
294 inc_blk_idx(v->left_blk_idx);
295 inc_blk_idx(v->cur_blk_idx);
298 static void vc1_loop_filter_iblk(VC1Context *v, int pq)
300 MpegEncContext *s = &v->s;
302 if (!s->first_slice_line) {
303 v->vc1dsp.vc1_v_loop_filter16(s->dest[0], s->linesize, pq);
305 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
306 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
307 for (j = 0; j < 2; j++) {
308 v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1], s->uvlinesize, pq);
310 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
313 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] + 8 * s->linesize, s->linesize, pq);
315 if (s->mb_y == s->end_mb_y - 1) {
317 v->vc1dsp.vc1_h_loop_filter16(s->dest[0], s->linesize, pq);
318 v->vc1dsp.vc1_h_loop_filter8(s->dest[1], s->uvlinesize, pq);
319 v->vc1dsp.vc1_h_loop_filter8(s->dest[2], s->uvlinesize, pq);
321 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] + 8, s->linesize, pq);
325 static void vc1_loop_filter_iblk_delayed(VC1Context *v, int pq)
327 MpegEncContext *s = &v->s;
330 /* The loopfilter runs 1 row and 1 column behind the overlap filter, which
331 * means it runs two rows/cols behind the decoding loop. */
332 if (!s->first_slice_line) {
334 if (s->mb_y >= s->start_mb_y + 2) {
335 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
338 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 16, s->linesize, pq);
339 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 8, s->linesize, pq);
340 for (j = 0; j < 2; j++) {
341 v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
343 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize - 8, s->uvlinesize, pq);
347 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize - 16, s->linesize, pq);
350 if (s->mb_x == s->mb_width - 1) {
351 if (s->mb_y >= s->start_mb_y + 2) {
352 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
355 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize, s->linesize, pq);
356 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize + 8, s->linesize, pq);
357 for (j = 0; j < 2; j++) {
358 v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
360 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize, s->uvlinesize, pq);
364 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize, s->linesize, pq);
367 if (s->mb_y == s->end_mb_y) {
370 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
371 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 8, s->linesize, pq);
373 for (j = 0; j < 2; j++) {
374 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
379 if (s->mb_x == s->mb_width - 1) {
381 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
382 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
384 for (j = 0; j < 2; j++) {
385 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
393 static void vc1_smooth_overlap_filter_iblk(VC1Context *v)
395 MpegEncContext *s = &v->s;
398 if (v->condover == CONDOVER_NONE)
401 mb_pos = s->mb_x + s->mb_y * s->mb_stride;
403 /* Within a MB, the horizontal overlap always runs before the vertical.
404 * To accomplish that, we run the H on left and internal borders of the
405 * currently decoded MB. Then, we wait for the next overlap iteration
406 * to do H overlap on the right edge of this MB, before moving over and
407 * running the V overlap. Therefore, the V overlap makes us trail by one
408 * MB col and the H overlap filter makes us trail by one MB row. This
409 * is reflected in the time at which we run the put_pixels loop. */
410 if (v->condover == CONDOVER_ALL || v->pq >= 9 || v->over_flags_plane[mb_pos]) {
411 if (s->mb_x && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
412 v->over_flags_plane[mb_pos - 1])) {
413 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][1],
414 v->block[v->cur_blk_idx][0]);
415 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][3],
416 v->block[v->cur_blk_idx][2]);
417 if (!(s->flags & CODEC_FLAG_GRAY)) {
418 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][4],
419 v->block[v->cur_blk_idx][4]);
420 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][5],
421 v->block[v->cur_blk_idx][5]);
424 v->vc1dsp.vc1_h_s_overlap(v->block[v->cur_blk_idx][0],
425 v->block[v->cur_blk_idx][1]);
426 v->vc1dsp.vc1_h_s_overlap(v->block[v->cur_blk_idx][2],
427 v->block[v->cur_blk_idx][3]);
429 if (s->mb_x == s->mb_width - 1) {
430 if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
431 v->over_flags_plane[mb_pos - s->mb_stride])) {
432 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][2],
433 v->block[v->cur_blk_idx][0]);
434 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][3],
435 v->block[v->cur_blk_idx][1]);
436 if (!(s->flags & CODEC_FLAG_GRAY)) {
437 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][4],
438 v->block[v->cur_blk_idx][4]);
439 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][5],
440 v->block[v->cur_blk_idx][5]);
443 v->vc1dsp.vc1_v_s_overlap(v->block[v->cur_blk_idx][0],
444 v->block[v->cur_blk_idx][2]);
445 v->vc1dsp.vc1_v_s_overlap(v->block[v->cur_blk_idx][1],
446 v->block[v->cur_blk_idx][3]);
449 if (s->mb_x && (v->condover == CONDOVER_ALL || v->over_flags_plane[mb_pos - 1])) {
450 if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
451 v->over_flags_plane[mb_pos - s->mb_stride - 1])) {
452 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][2],
453 v->block[v->left_blk_idx][0]);
454 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][3],
455 v->block[v->left_blk_idx][1]);
456 if (!(s->flags & CODEC_FLAG_GRAY)) {
457 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][4],
458 v->block[v->left_blk_idx][4]);
459 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][5],
460 v->block[v->left_blk_idx][5]);
463 v->vc1dsp.vc1_v_s_overlap(v->block[v->left_blk_idx][0],
464 v->block[v->left_blk_idx][2]);
465 v->vc1dsp.vc1_v_s_overlap(v->block[v->left_blk_idx][1],
466 v->block[v->left_blk_idx][3]);
470 /** Do motion compensation over 1 macroblock
471 * Mostly adapted hpel_motion and qpel_motion from mpegvideo.c
473 static void vc1_mc_1mv(VC1Context *v, int dir)
475 MpegEncContext *s = &v->s;
476 DSPContext *dsp = &v->s.dsp;
477 uint8_t *srcY, *srcU, *srcV;
478 int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
480 int v_edge_pos = s->v_edge_pos >> v->field_mode;
481 if (!v->field_mode && !v->s.last_picture.f.data[0])
484 mx = s->mv[dir][0][0];
485 my = s->mv[dir][0][1];
487 // store motion vectors for further use in B frames
488 if (s->pict_type == AV_PICTURE_TYPE_P) {
489 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = mx;
490 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = my;
493 uvmx = (mx + ((mx & 3) == 3)) >> 1;
494 uvmy = (my + ((my & 3) == 3)) >> 1;
495 v->luma_mv[s->mb_x][0] = uvmx;
496 v->luma_mv[s->mb_x][1] = uvmy;
499 v->cur_field_type != v->ref_field_type[dir]) {
500 my = my - 2 + 4 * v->cur_field_type;
501 uvmy = uvmy - 2 + 4 * v->cur_field_type;
504 if (v->fastuvmc && (v->fcm != 1)) { // fastuvmc shall be ignored for interlaced frame picture
505 uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
506 uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
508 if (v->field_mode) { // interlaced field picture
510 if ((v->cur_field_type != v->ref_field_type[dir]) && v->cur_field_type) {
511 srcY = s->current_picture.f.data[0];
512 srcU = s->current_picture.f.data[1];
513 srcV = s->current_picture.f.data[2];
515 srcY = s->last_picture.f.data[0];
516 srcU = s->last_picture.f.data[1];
517 srcV = s->last_picture.f.data[2];
520 srcY = s->next_picture.f.data[0];
521 srcU = s->next_picture.f.data[1];
522 srcV = s->next_picture.f.data[2];
526 srcY = s->last_picture.f.data[0];
527 srcU = s->last_picture.f.data[1];
528 srcV = s->last_picture.f.data[2];
530 srcY = s->next_picture.f.data[0];
531 srcU = s->next_picture.f.data[1];
532 srcV = s->next_picture.f.data[2];
536 src_x = s->mb_x * 16 + (mx >> 2);
537 src_y = s->mb_y * 16 + (my >> 2);
538 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
539 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
541 if (v->profile != PROFILE_ADVANCED) {
542 src_x = av_clip( src_x, -16, s->mb_width * 16);
543 src_y = av_clip( src_y, -16, s->mb_height * 16);
544 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
545 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
547 src_x = av_clip( src_x, -17, s->avctx->coded_width);
548 src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
549 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
550 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
553 srcY += src_y * s->linesize + src_x;
554 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
555 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
557 if (v->field_mode && v->ref_field_type[dir]) {
558 srcY += s->current_picture_ptr->f.linesize[0];
559 srcU += s->current_picture_ptr->f.linesize[1];
560 srcV += s->current_picture_ptr->f.linesize[2];
563 /* for grayscale we should not try to read from unknown area */
564 if (s->flags & CODEC_FLAG_GRAY) {
565 srcU = s->edge_emu_buffer + 18 * s->linesize;
566 srcV = s->edge_emu_buffer + 18 * s->linesize;
569 if (v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
570 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel * 3
571 || (unsigned)(src_y - s->mspel) > v_edge_pos - (my&3) - 16 - s->mspel * 3) {
572 uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
574 srcY -= s->mspel * (1 + s->linesize);
575 s->dsp.emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize,
576 17 + s->mspel * 2, 17 + s->mspel * 2,
577 src_x - s->mspel, src_y - s->mspel,
578 s->h_edge_pos, v_edge_pos);
579 srcY = s->edge_emu_buffer;
580 s->dsp.emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8 + 1, 8 + 1,
581 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
582 s->dsp.emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8 + 1, 8 + 1,
583 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
586 /* if we deal with range reduction we need to scale source blocks */
587 if (v->rangeredfrm) {
592 for (j = 0; j < 17 + s->mspel * 2; j++) {
593 for (i = 0; i < 17 + s->mspel * 2; i++)
594 src[i] = ((src[i] - 128) >> 1) + 128;
599 for (j = 0; j < 9; j++) {
600 for (i = 0; i < 9; i++) {
601 src[i] = ((src[i] - 128) >> 1) + 128;
602 src2[i] = ((src2[i] - 128) >> 1) + 128;
604 src += s->uvlinesize;
605 src2 += s->uvlinesize;
608 /* if we deal with intensity compensation we need to scale source blocks */
609 if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
614 for (j = 0; j < 17 + s->mspel * 2; j++) {
615 for (i = 0; i < 17 + s->mspel * 2; i++)
616 src[i] = v->luty[src[i]];
621 for (j = 0; j < 9; j++) {
622 for (i = 0; i < 9; i++) {
623 src[i] = v->lutuv[src[i]];
624 src2[i] = v->lutuv[src2[i]];
626 src += s->uvlinesize;
627 src2 += s->uvlinesize;
630 srcY += s->mspel * (1 + s->linesize);
633 if (v->field_mode && v->cur_field_type) {
634 off = s->current_picture_ptr->f.linesize[0];
635 off_uv = s->current_picture_ptr->f.linesize[1];
641 dxy = ((my & 3) << 2) | (mx & 3);
642 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off , srcY , s->linesize, v->rnd);
643 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8, srcY + 8, s->linesize, v->rnd);
644 srcY += s->linesize * 8;
645 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize , srcY , s->linesize, v->rnd);
646 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
647 } else { // hpel mc - always used for luma
648 dxy = (my & 2) | ((mx & 2) >> 1);
650 dsp->put_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
652 dsp->put_no_rnd_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
655 if (s->flags & CODEC_FLAG_GRAY) return;
656 /* Chroma MC always uses qpel bilinear */
657 uvmx = (uvmx & 3) << 1;
658 uvmy = (uvmy & 3) << 1;
660 dsp->put_h264_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
661 dsp->put_h264_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
663 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
664 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
668 static inline int median4(int a, int b, int c, int d)
671 if (c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2;
672 else return (FFMIN(b, c) + FFMAX(a, d)) / 2;
674 if (c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2;
675 else return (FFMIN(a, c) + FFMAX(b, d)) / 2;
679 /** Do motion compensation for 4-MV macroblock - luminance block
681 static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir)
683 MpegEncContext *s = &v->s;
684 DSPContext *dsp = &v->s.dsp;
686 int dxy, mx, my, src_x, src_y;
688 int fieldmv = (v->fcm == 1) ? v->blk_mv_type[s->block_index[n]] : 0;
689 int v_edge_pos = s->v_edge_pos >> v->field_mode;
691 if (!v->field_mode && !v->s.last_picture.f.data[0])
694 mx = s->mv[dir][n][0];
695 my = s->mv[dir][n][1];
699 if ((v->cur_field_type != v->ref_field_type[dir]) && v->cur_field_type)
700 srcY = s->current_picture.f.data[0];
702 srcY = s->last_picture.f.data[0];
704 srcY = s->last_picture.f.data[0];
706 srcY = s->next_picture.f.data[0];
709 if (v->cur_field_type != v->ref_field_type[dir])
710 my = my - 2 + 4 * v->cur_field_type;
713 if (s->pict_type == AV_PICTURE_TYPE_P && n == 3 && v->field_mode) {
714 int same_count = 0, opp_count = 0, k;
715 int chosen_mv[2][4][2], f;
717 for (k = 0; k < 4; k++) {
718 f = v->mv_f[0][s->block_index[k] + v->blocks_off];
719 chosen_mv[f][f ? opp_count : same_count][0] = s->mv[0][k][0];
720 chosen_mv[f][f ? opp_count : same_count][1] = s->mv[0][k][1];
724 f = opp_count > same_count;
725 switch (f ? opp_count : same_count) {
727 tx = median4(chosen_mv[f][0][0], chosen_mv[f][1][0],
728 chosen_mv[f][2][0], chosen_mv[f][3][0]);
729 ty = median4(chosen_mv[f][0][1], chosen_mv[f][1][1],
730 chosen_mv[f][2][1], chosen_mv[f][3][1]);
733 tx = mid_pred(chosen_mv[f][0][0], chosen_mv[f][1][0], chosen_mv[f][2][0]);
734 ty = mid_pred(chosen_mv[f][0][1], chosen_mv[f][1][1], chosen_mv[f][2][1]);
737 tx = (chosen_mv[f][0][0] + chosen_mv[f][1][0]) / 2;
738 ty = (chosen_mv[f][0][1] + chosen_mv[f][1][1]) / 2;
741 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
742 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
743 for (k = 0; k < 4; k++)
744 v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
747 if (v->fcm == 1) { // not sure if needed for other types of picture
749 int width = s->avctx->coded_width;
750 int height = s->avctx->coded_height >> 1;
751 qx = (s->mb_x * 16) + (mx >> 2);
752 qy = (s->mb_y * 8) + (my >> 3);
757 mx -= 4 * (qx - width);
760 else if (qy > height + 1)
761 my -= 8 * (qy - height - 1);
764 if ((v->fcm == 1) && fieldmv)
765 off = ((n > 1) ? s->linesize : 0) + (n & 1) * 8;
767 off = s->linesize * 4 * (n & 2) + (n & 1) * 8;
768 if (v->field_mode && v->cur_field_type)
769 off += s->current_picture_ptr->f.linesize[0];
771 src_x = s->mb_x * 16 + (n & 1) * 8 + (mx >> 2);
773 src_y = s->mb_y * 16 + (n & 2) * 4 + (my >> 2);
775 src_y = s->mb_y * 16 + ((n > 1) ? 1 : 0) + (my >> 2);
777 if (v->profile != PROFILE_ADVANCED) {
778 src_x = av_clip(src_x, -16, s->mb_width * 16);
779 src_y = av_clip(src_y, -16, s->mb_height * 16);
781 src_x = av_clip(src_x, -17, s->avctx->coded_width);
784 src_y = av_clip(src_y, -17, s->avctx->coded_height + 1);
786 src_y = av_clip(src_y, -18, s->avctx->coded_height);
788 src_y = av_clip(src_y, -18, s->avctx->coded_height + 1);
792 srcY += src_y * s->linesize + src_x;
793 if (v->field_mode && v->ref_field_type[dir])
794 srcY += s->current_picture_ptr->f.linesize[0];
796 if (fieldmv && !(src_y & 1))
798 if (fieldmv && (src_y & 1) && src_y < 4)
800 if (v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
801 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx & 3) - 8 - s->mspel * 2
802 || (unsigned)(src_y - (s->mspel << fieldmv)) > v_edge_pos - (my & 3) - ((8 + s->mspel * 2) << fieldmv)) {
803 srcY -= s->mspel * (1 + (s->linesize << fieldmv));
804 /* check emulate edge stride and offset */
805 s->dsp.emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize,
806 9 + s->mspel * 2, (9 + s->mspel * 2) << fieldmv,
807 src_x - s->mspel, src_y - (s->mspel << fieldmv),
808 s->h_edge_pos, v_edge_pos);
809 srcY = s->edge_emu_buffer;
810 /* if we deal with range reduction we need to scale source blocks */
811 if (v->rangeredfrm) {
816 for (j = 0; j < 9 + s->mspel * 2; j++) {
817 for (i = 0; i < 9 + s->mspel * 2; i++)
818 src[i] = ((src[i] - 128) >> 1) + 128;
819 src += s->linesize << fieldmv;
822 /* if we deal with intensity compensation we need to scale source blocks */
823 if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
828 for (j = 0; j < 9 + s->mspel * 2; j++) {
829 for (i = 0; i < 9 + s->mspel * 2; i++)
830 src[i] = v->luty[src[i]];
831 src += s->linesize << fieldmv;
834 srcY += s->mspel * (1 + (s->linesize << fieldmv));
838 dxy = ((my & 3) << 2) | (mx & 3);
839 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize << fieldmv, v->rnd);
840 } else { // hpel mc - always used for luma
841 dxy = (my & 2) | ((mx & 2) >> 1);
843 dsp->put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
845 dsp->put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
849 static av_always_inline int get_chroma_mv(int *mvx, int *mvy, int *a, int flag, int *tx, int *ty)
852 static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
854 idx = ((a[3] != flag) << 3)
855 | ((a[2] != flag) << 2)
856 | ((a[1] != flag) << 1)
859 *tx = median4(mvx[0], mvx[1], mvx[2], mvx[3]);
860 *ty = median4(mvy[0], mvy[1], mvy[2], mvy[3]);
862 } else if (count[idx] == 1) {
865 *tx = mid_pred(mvx[1], mvx[2], mvx[3]);
866 *ty = mid_pred(mvy[1], mvy[2], mvy[3]);
869 *tx = mid_pred(mvx[0], mvx[2], mvx[3]);
870 *ty = mid_pred(mvy[0], mvy[2], mvy[3]);
873 *tx = mid_pred(mvx[0], mvx[1], mvx[3]);
874 *ty = mid_pred(mvy[0], mvy[1], mvy[3]);
877 *tx = mid_pred(mvx[0], mvx[1], mvx[2]);
878 *ty = mid_pred(mvy[0], mvy[1], mvy[2]);
881 } else if (count[idx] == 2) {
883 for (i = 0; i < 3; i++)
888 for (i = t1 + 1; i < 4; i++)
893 *tx = (mvx[t1] + mvx[t2]) / 2;
894 *ty = (mvy[t1] + mvy[t2]) / 2;
902 /** Do motion compensation for 4-MV macroblock - both chroma blocks
904 static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
906 MpegEncContext *s = &v->s;
907 DSPContext *dsp = &v->s.dsp;
908 uint8_t *srcU, *srcV;
909 int uvmx, uvmy, uvsrc_x, uvsrc_y;
910 int k, tx = 0, ty = 0;
911 int mvx[4], mvy[4], intra[4], mv_f[4];
913 int chroma_ref_type = v->cur_field_type, off = 0;
914 int v_edge_pos = s->v_edge_pos >> v->field_mode;
916 if (!v->field_mode && !v->s.last_picture.f.data[0])
918 if (s->flags & CODEC_FLAG_GRAY)
921 for (k = 0; k < 4; k++) {
922 mvx[k] = s->mv[dir][k][0];
923 mvy[k] = s->mv[dir][k][1];
924 intra[k] = v->mb_type[0][s->block_index[k]];
926 mv_f[k] = v->mv_f[dir][s->block_index[k] + v->blocks_off];
929 /* calculate chroma MV vector from four luma MVs */
930 if (!v->field_mode || (v->field_mode && !v->numref)) {
931 valid_count = get_chroma_mv(mvx, mvy, intra, 0, &tx, &ty);
933 s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
934 s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
935 v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
936 return; //no need to do MC for intra blocks
940 if (mv_f[0] + mv_f[1] + mv_f[2] + mv_f[3] > 2)
942 valid_count = get_chroma_mv(mvx, mvy, mv_f, dominant, &tx, &ty);
944 chroma_ref_type = !v->cur_field_type;
946 s->current_picture.f.motion_val[1][s->block_index[0]][0] = tx;
947 s->current_picture.f.motion_val[1][s->block_index[0]][1] = ty;
948 uvmx = (tx + ((tx & 3) == 3)) >> 1;
949 uvmy = (ty + ((ty & 3) == 3)) >> 1;
951 v->luma_mv[s->mb_x][0] = uvmx;
952 v->luma_mv[s->mb_x][1] = uvmy;
955 uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
956 uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
958 // Field conversion bias
959 if (v->cur_field_type != chroma_ref_type)
960 uvmy += 2 - 4 * chroma_ref_type;
962 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
963 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
965 if (v->profile != PROFILE_ADVANCED) {
966 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
967 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
969 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
970 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
975 if ((v->cur_field_type != chroma_ref_type) && v->cur_field_type) {
976 srcU = s->current_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
977 srcV = s->current_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
979 srcU = s->last_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
980 srcV = s->last_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
983 srcU = s->last_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
984 srcV = s->last_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
987 srcU = s->next_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
988 srcV = s->next_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
992 if (chroma_ref_type) {
993 srcU += s->current_picture_ptr->f.linesize[1];
994 srcV += s->current_picture_ptr->f.linesize[2];
996 off = v->cur_field_type ? s->current_picture_ptr->f.linesize[1] : 0;
999 if (v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
1000 || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
1001 || (unsigned)uvsrc_y > (v_edge_pos >> 1) - 9) {
1002 s->dsp.emulated_edge_mc(s->edge_emu_buffer , srcU, s->uvlinesize,
1003 8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
1004 s->h_edge_pos >> 1, v_edge_pos >> 1);
1005 s->dsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV, s->uvlinesize,
1006 8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
1007 s->h_edge_pos >> 1, v_edge_pos >> 1);
1008 srcU = s->edge_emu_buffer;
1009 srcV = s->edge_emu_buffer + 16;
1011 /* if we deal with range reduction we need to scale source blocks */
1012 if (v->rangeredfrm) {
1014 uint8_t *src, *src2;
1018 for (j = 0; j < 9; j++) {
1019 for (i = 0; i < 9; i++) {
1020 src[i] = ((src[i] - 128) >> 1) + 128;
1021 src2[i] = ((src2[i] - 128) >> 1) + 128;
1023 src += s->uvlinesize;
1024 src2 += s->uvlinesize;
1027 /* if we deal with intensity compensation we need to scale source blocks */
1028 if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1030 uint8_t *src, *src2;
1034 for (j = 0; j < 9; j++) {
1035 for (i = 0; i < 9; i++) {
1036 src[i] = v->lutuv[src[i]];
1037 src2[i] = v->lutuv[src2[i]];
1039 src += s->uvlinesize;
1040 src2 += s->uvlinesize;
1045 /* Chroma MC always uses qpel bilinear */
1046 uvmx = (uvmx & 3) << 1;
1047 uvmy = (uvmy & 3) << 1;
1049 dsp->put_h264_chroma_pixels_tab[0](s->dest[1] + off, srcU, s->uvlinesize, 8, uvmx, uvmy);
1050 dsp->put_h264_chroma_pixels_tab[0](s->dest[2] + off, srcV, s->uvlinesize, 8, uvmx, uvmy);
1052 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off, srcU, s->uvlinesize, 8, uvmx, uvmy);
1053 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off, srcV, s->uvlinesize, 8, uvmx, uvmy);
1057 /** Do motion compensation for 4-MV field chroma macroblock (both U and V)
1059 static void vc1_mc_4mv_chroma4(VC1Context *v)
1061 MpegEncContext *s = &v->s;
1062 DSPContext *dsp = &v->s.dsp;
1063 uint8_t *srcU, *srcV;
1064 int uvsrc_x, uvsrc_y;
1065 int uvmx_field[4], uvmy_field[4];
1067 int fieldmv = v->blk_mv_type[s->block_index[0]];
1068 static const int s_rndtblfield[16] = { 0, 0, 1, 2, 4, 4, 5, 6, 2, 2, 3, 8, 6, 6, 7, 12 };
1069 int v_dist = fieldmv ? 1 : 4; // vertical offset for lower sub-blocks
1070 int v_edge_pos = s->v_edge_pos >> 1;
1072 if (!v->s.last_picture.f.data[0])
1074 if (s->flags & CODEC_FLAG_GRAY)
1077 for (i = 0; i < 4; i++) {
1078 tx = s->mv[0][i][0];
1079 uvmx_field[i] = (tx + ((tx & 3) == 3)) >> 1;
1080 ty = s->mv[0][i][1];
1082 uvmy_field[i] = (ty >> 4) * 8 + s_rndtblfield[ty & 0xF];
1084 uvmy_field[i] = (ty + ((ty & 3) == 3)) >> 1;
1087 for (i = 0; i < 4; i++) {
1088 off = (i & 1) * 4 + ((i & 2) ? v_dist * s->uvlinesize : 0);
1089 uvsrc_x = s->mb_x * 8 + (i & 1) * 4 + (uvmx_field[i] >> 2);
1090 uvsrc_y = s->mb_y * 8 + ((i & 2) ? v_dist : 0) + (uvmy_field[i] >> 2);
1091 // FIXME: implement proper pull-back (see vc1cropmv.c, vc1CROPMV_ChromaPullBack())
1092 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
1093 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
1094 srcU = s->last_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
1095 srcV = s->last_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
1096 uvmx_field[i] = (uvmx_field[i] & 3) << 1;
1097 uvmy_field[i] = (uvmy_field[i] & 3) << 1;
1099 if (fieldmv && !(uvsrc_y & 1))
1101 if (fieldmv && (uvsrc_y & 1) && uvsrc_y < 2)
1103 if ((v->mv_mode == MV_PMODE_INTENSITY_COMP)
1104 || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 5
1105 || (unsigned)uvsrc_y > v_edge_pos - (5 << fieldmv)) {
1106 s->dsp.emulated_edge_mc(s->edge_emu_buffer, srcU, s->uvlinesize,
1107 5, (5 << fieldmv), uvsrc_x, uvsrc_y,
1108 s->h_edge_pos >> 1, v_edge_pos);
1109 s->dsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV, s->uvlinesize,
1110 5, (5 << fieldmv), uvsrc_x, uvsrc_y,
1111 s->h_edge_pos >> 1, v_edge_pos);
1112 srcU = s->edge_emu_buffer;
1113 srcV = s->edge_emu_buffer + 16;
1115 /* if we deal with intensity compensation we need to scale source blocks */
1116 if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1118 uint8_t *src, *src2;
1122 for (j = 0; j < 5; j++) {
1123 for (i = 0; i < 5; i++) {
1124 src[i] = v->lutuv[src[i]];
1125 src2[i] = v->lutuv[src2[i]];
1127 src += s->uvlinesize << 1;
1128 src2 += s->uvlinesize << 1;
1133 dsp->put_h264_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1134 dsp->put_h264_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1136 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1137 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1142 /***********************************************************************/
1144 * @name VC-1 Block-level functions
1145 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
1151 * @brief Get macroblock-level quantizer scale
1153 #define GET_MQUANT() \
1154 if (v->dquantfrm) { \
1156 if (v->dqprofile == DQPROFILE_ALL_MBS) { \
1157 if (v->dqbilevel) { \
1158 mquant = (get_bits1(gb)) ? v->altpq : v->pq; \
1160 mqdiff = get_bits(gb, 3); \
1162 mquant = v->pq + mqdiff; \
1164 mquant = get_bits(gb, 5); \
1167 if (v->dqprofile == DQPROFILE_SINGLE_EDGE) \
1168 edges = 1 << v->dqsbedge; \
1169 else if (v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
1170 edges = (3 << v->dqsbedge) % 15; \
1171 else if (v->dqprofile == DQPROFILE_FOUR_EDGES) \
1173 if ((edges&1) && !s->mb_x) \
1174 mquant = v->altpq; \
1175 if ((edges&2) && s->first_slice_line) \
1176 mquant = v->altpq; \
1177 if ((edges&4) && s->mb_x == (s->mb_width - 1)) \
1178 mquant = v->altpq; \
1179 if ((edges&8) && s->mb_y == (s->mb_height - 1)) \
1180 mquant = v->altpq; \
1184 * @def GET_MVDATA(_dmv_x, _dmv_y)
1185 * @brief Get MV differentials
1186 * @see MVDATA decoding from 8.3.5.2, p(1)20
1187 * @param _dmv_x Horizontal differential for decoded MV
1188 * @param _dmv_y Vertical differential for decoded MV
1190 #define GET_MVDATA(_dmv_x, _dmv_y) \
1191 index = 1 + get_vlc2(gb, ff_vc1_mv_diff_vlc[s->mv_table_index].table, \
1192 VC1_MV_DIFF_VLC_BITS, 2); \
1194 mb_has_coeffs = 1; \
1197 mb_has_coeffs = 0; \
1200 _dmv_x = _dmv_y = 0; \
1201 } else if (index == 35) { \
1202 _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
1203 _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
1204 } else if (index == 36) { \
1209 index1 = index % 6; \
1210 if (!s->quarter_sample && index1 == 5) val = 1; \
1212 if (size_table[index1] - val > 0) \
1213 val = get_bits(gb, size_table[index1] - val); \
1215 sign = 0 - (val&1); \
1216 _dmv_x = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1218 index1 = index / 6; \
1219 if (!s->quarter_sample && index1 == 5) val = 1; \
1221 if (size_table[index1] - val > 0) \
1222 val = get_bits(gb, size_table[index1] - val); \
1224 sign = 0 - (val & 1); \
1225 _dmv_y = (sign ^ ((val >> 1) + offset_table[index1])) - sign; \
1228 static av_always_inline void get_mvdata_interlaced(VC1Context *v, int *dmv_x,
1229 int *dmv_y, int *pred_flag)
1232 int extend_x = 0, extend_y = 0;
1233 GetBitContext *gb = &v->s.gb;
1236 const int* offs_tab;
1239 bits = VC1_2REF_MVDATA_VLC_BITS;
1242 bits = VC1_1REF_MVDATA_VLC_BITS;
1245 switch (v->dmvrange) {
1253 extend_x = extend_y = 1;
1256 index = get_vlc2(gb, v->imv_vlc->table, bits, 3);
1258 *dmv_x = get_bits(gb, v->k_x);
1259 *dmv_y = get_bits(gb, v->k_y);
1261 *pred_flag = *dmv_y & 1;
1262 *dmv_y = (*dmv_y + *pred_flag) >> 1;
1267 offs_tab = offset_table2;
1269 offs_tab = offset_table1;
1270 index1 = (index + 1) % 9;
1272 val = get_bits(gb, index1 + extend_x);
1273 sign = 0 -(val & 1);
1274 *dmv_x = (sign ^ ((val >> 1) + offs_tab[index1])) - sign;
1278 offs_tab = offset_table2;
1280 offs_tab = offset_table1;
1281 index1 = (index + 1) / 9;
1282 if (index1 > v->numref) {
1283 val = get_bits(gb, (index1 + (extend_y << v->numref)) >> v->numref);
1284 sign = 0 - (val & 1);
1285 *dmv_y = (sign ^ ((val >> 1) + offs_tab[index1 >> v->numref])) - sign;
1289 *pred_flag = index1 & 1;
1293 static av_always_inline int scaleforsame_x(VC1Context *v, int n /* MV */, int dir)
1295 int scaledvalue, refdist;
1296 int scalesame1, scalesame2;
1297 int scalezone1_x, zone1offset_x;
1298 int table_index = dir ^ v->second_field;
1300 if (v->s.pict_type != AV_PICTURE_TYPE_B)
1301 refdist = v->refdist;
1303 refdist = dir ? v->brfd : v->frfd;
1306 scalesame1 = vc1_field_mvpred_scales[table_index][1][refdist];
1307 scalesame2 = vc1_field_mvpred_scales[table_index][2][refdist];
1308 scalezone1_x = vc1_field_mvpred_scales[table_index][3][refdist];
1309 zone1offset_x = vc1_field_mvpred_scales[table_index][5][refdist];
1314 if (FFABS(n) < scalezone1_x)
1315 scaledvalue = (n * scalesame1) >> 8;
1318 scaledvalue = ((n * scalesame2) >> 8) - zone1offset_x;
1320 scaledvalue = ((n * scalesame2) >> 8) + zone1offset_x;
1323 return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1326 static av_always_inline int scaleforsame_y(VC1Context *v, int i, int n /* MV */, int dir)
1328 int scaledvalue, refdist;
1329 int scalesame1, scalesame2;
1330 int scalezone1_y, zone1offset_y;
1331 int table_index = dir ^ v->second_field;
1333 if (v->s.pict_type != AV_PICTURE_TYPE_B)
1334 refdist = v->refdist;
1336 refdist = dir ? v->brfd : v->frfd;
1339 scalesame1 = vc1_field_mvpred_scales[table_index][1][refdist];
1340 scalesame2 = vc1_field_mvpred_scales[table_index][2][refdist];
1341 scalezone1_y = vc1_field_mvpred_scales[table_index][4][refdist];
1342 zone1offset_y = vc1_field_mvpred_scales[table_index][6][refdist];
1347 if (FFABS(n) < scalezone1_y)
1348 scaledvalue = (n * scalesame1) >> 8;
1351 scaledvalue = ((n * scalesame2) >> 8) - zone1offset_y;
1353 scaledvalue = ((n * scalesame2) >> 8) + zone1offset_y;
1357 if (v->cur_field_type && !v->ref_field_type[dir])
1358 return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1360 return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1363 static av_always_inline int scaleforopp_x(VC1Context *v, int n /* MV */)
1365 int scalezone1_x, zone1offset_x;
1366 int scaleopp1, scaleopp2, brfd;
1369 brfd = FFMIN(v->brfd, 3);
1370 scalezone1_x = vc1_b_field_mvpred_scales[3][brfd];
1371 zone1offset_x = vc1_b_field_mvpred_scales[5][brfd];
1372 scaleopp1 = vc1_b_field_mvpred_scales[1][brfd];
1373 scaleopp2 = vc1_b_field_mvpred_scales[2][brfd];
1378 if (FFABS(n) < scalezone1_x)
1379 scaledvalue = (n * scaleopp1) >> 8;
1382 scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_x;
1384 scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_x;
1387 return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1390 static av_always_inline int scaleforopp_y(VC1Context *v, int n /* MV */, int dir)
1392 int scalezone1_y, zone1offset_y;
1393 int scaleopp1, scaleopp2, brfd;
1396 brfd = FFMIN(v->brfd, 3);
1397 scalezone1_y = vc1_b_field_mvpred_scales[4][brfd];
1398 zone1offset_y = vc1_b_field_mvpred_scales[6][brfd];
1399 scaleopp1 = vc1_b_field_mvpred_scales[1][brfd];
1400 scaleopp2 = vc1_b_field_mvpred_scales[2][brfd];
1405 if (FFABS(n) < scalezone1_y)
1406 scaledvalue = (n * scaleopp1) >> 8;
1409 scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_y;
1411 scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_y;
1414 if (v->cur_field_type && !v->ref_field_type[dir]) {
1415 return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1417 return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1421 static av_always_inline int scaleforsame(VC1Context *v, int i, int n /* MV */,
1424 int brfd, scalesame;
1426 if (v->s.pict_type != AV_PICTURE_TYPE_B || v->second_field || !dir) {
1428 return scaleforsame_y(v, i, n, dir);
1430 return scaleforsame_x(v, n, dir);
1432 brfd = FFMIN(v->brfd, 3);
1433 scalesame = vc1_b_field_mvpred_scales[0][brfd];
1435 return n * scalesame >> 8;
1438 static av_always_inline int scaleforopp(VC1Context *v, int n /* MV */,
1441 int refdist, scaleopp;
1443 if (v->s.pict_type == AV_PICTURE_TYPE_B && !v->second_field && dir == 1) {
1445 return scaleforopp_y(v, n, dir);
1447 return scaleforopp_x(v, n);
1449 if (v->s.pict_type != AV_PICTURE_TYPE_B)
1450 refdist = FFMIN(v->refdist, 3);
1452 refdist = dir ? v->brfd : v->frfd;
1453 scaleopp = vc1_field_mvpred_scales[dir ^ v->second_field][0][refdist];
1455 return n * scaleopp >> 8;
1458 /** Predict and set motion vector
1460 static inline void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
1461 int mv1, int r_x, int r_y, uint8_t* is_intra,
1462 int pred_flag, int dir)
1464 MpegEncContext *s = &v->s;
1465 int xy, wrap, off = 0;
1469 int mixedmv_pic, num_samefield = 0, num_oppfield = 0;
1471 int16_t samefield_pred[2], oppfield_pred[2];
1472 int16_t samefield_predA[2], oppfield_predA[2];
1473 int16_t samefield_predB[2], oppfield_predB[2];
1474 int16_t samefield_predC[2], oppfield_predC[2];
1475 int16_t *predA, *predC;
1476 int a_valid, b_valid, c_valid;
1477 int hybridmv_thresh, y_bias = 0;
1479 if (v->mv_mode == MV_PMODE_MIXED_MV ||
1480 ((v->mv_mode == MV_PMODE_INTENSITY_COMP) && (v->mv_mode2 == MV_PMODE_MIXED_MV)))
1484 /* scale MV difference to be quad-pel */
1485 dmv_x <<= 1 - s->quarter_sample;
1486 dmv_y <<= 1 - s->quarter_sample;
1488 wrap = s->b8_stride;
1489 xy = s->block_index[n];
1492 s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy + v->blocks_off][0] = 0;
1493 s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy + v->blocks_off][1] = 0;
1494 s->current_picture.f.motion_val[1][xy + v->blocks_off][0] = 0;
1495 s->current_picture.f.motion_val[1][xy + v->blocks_off][1] = 0;
1496 if (mv1) { /* duplicate motion data for 1-MV block */
1497 s->current_picture.f.motion_val[0][xy + 1 + v->blocks_off][0] = 0;
1498 s->current_picture.f.motion_val[0][xy + 1 + v->blocks_off][1] = 0;
1499 s->current_picture.f.motion_val[0][xy + wrap + v->blocks_off][0] = 0;
1500 s->current_picture.f.motion_val[0][xy + wrap + v->blocks_off][1] = 0;
1501 s->current_picture.f.motion_val[0][xy + wrap + 1 + v->blocks_off][0] = 0;
1502 s->current_picture.f.motion_val[0][xy + wrap + 1 + v->blocks_off][1] = 0;
1503 v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1504 s->current_picture.f.motion_val[1][xy + 1 + v->blocks_off][0] = 0;
1505 s->current_picture.f.motion_val[1][xy + 1 + v->blocks_off][1] = 0;
1506 s->current_picture.f.motion_val[1][xy + wrap][0] = 0;
1507 s->current_picture.f.motion_val[1][xy + wrap + v->blocks_off][1] = 0;
1508 s->current_picture.f.motion_val[1][xy + wrap + 1 + v->blocks_off][0] = 0;
1509 s->current_picture.f.motion_val[1][xy + wrap + 1 + v->blocks_off][1] = 0;
1514 C = s->current_picture.f.motion_val[dir][xy - 1 + v->blocks_off];
1515 A = s->current_picture.f.motion_val[dir][xy - wrap + v->blocks_off];
1517 if (v->field_mode && mixedmv_pic)
1518 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
1520 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2;
1522 //in 4-MV mode different blocks have different B predictor position
1525 off = (s->mb_x > 0) ? -1 : 1;
1528 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 1;
1537 B = s->current_picture.f.motion_val[dir][xy - wrap + off + v->blocks_off];
1539 a_valid = !s->first_slice_line || (n == 2 || n == 3);
1540 b_valid = a_valid && (s->mb_width > 1);
1541 c_valid = s->mb_x || (n == 1 || n == 3);
1542 if (v->field_mode) {
1543 a_valid = a_valid && !is_intra[xy - wrap];
1544 b_valid = b_valid && !is_intra[xy - wrap + off];
1545 c_valid = c_valid && !is_intra[xy - 1];
1549 f = v->mv_f[dir][xy - wrap + v->blocks_off];
1551 num_samefield += 1 - f;
1553 oppfield_predA[0] = A[0];
1554 oppfield_predA[1] = A[1];
1555 samefield_predA[0] = scaleforsame(v, 0, A[0], 0, dir);
1556 samefield_predA[1] = scaleforsame(v, n, A[1], 1, dir);
1558 samefield_predA[0] = A[0];
1559 samefield_predA[1] = A[1];
1561 oppfield_predA[0] = scaleforopp(v, A[0], 0, dir);
1563 oppfield_predA[1] = scaleforopp(v, A[1], 1, dir);
1566 samefield_predA[0] = samefield_predA[1] = 0;
1567 oppfield_predA[0] = oppfield_predA[1] = 0;
1570 f = v->mv_f[dir][xy - 1 + v->blocks_off];
1572 num_samefield += 1 - f;
1574 oppfield_predC[0] = C[0];
1575 oppfield_predC[1] = C[1];
1576 samefield_predC[0] = scaleforsame(v, 0, C[0], 0, dir);
1577 samefield_predC[1] = scaleforsame(v, n, C[1], 1, dir);
1579 samefield_predC[0] = C[0];
1580 samefield_predC[1] = C[1];
1582 oppfield_predC[0] = scaleforopp(v, C[0], 0, dir);
1584 oppfield_predC[1] = scaleforopp(v, C[1], 1, dir);
1587 samefield_predC[0] = samefield_predC[1] = 0;
1588 oppfield_predC[0] = oppfield_predC[1] = 0;
1591 f = v->mv_f[dir][xy - wrap + off + v->blocks_off];
1593 num_samefield += 1 - f;
1595 oppfield_predB[0] = B[0];
1596 oppfield_predB[1] = B[1];
1597 samefield_predB[0] = scaleforsame(v, 0, B[0], 0, dir);
1598 samefield_predB[1] = scaleforsame(v, n, B[1], 1, dir);
1600 samefield_predB[0] = B[0];
1601 samefield_predB[1] = B[1];
1603 oppfield_predB[0] = scaleforopp(v, B[0], 0, dir);
1605 oppfield_predB[1] = scaleforopp(v, B[1], 1, dir);
1608 samefield_predB[0] = samefield_predB[1] = 0;
1609 oppfield_predB[0] = oppfield_predB[1] = 0;
1613 samefield_pred[0] = samefield_predA[0];
1614 samefield_pred[1] = samefield_predA[1];
1615 oppfield_pred[0] = oppfield_predA[0];
1616 oppfield_pred[1] = oppfield_predA[1];
1617 } else if (c_valid) {
1618 samefield_pred[0] = samefield_predC[0];
1619 samefield_pred[1] = samefield_predC[1];
1620 oppfield_pred[0] = oppfield_predC[0];
1621 oppfield_pred[1] = oppfield_predC[1];
1622 } else if (b_valid) {
1623 samefield_pred[0] = samefield_predB[0];
1624 samefield_pred[1] = samefield_predB[1];
1625 oppfield_pred[0] = oppfield_predB[0];
1626 oppfield_pred[1] = oppfield_predB[1];
1628 samefield_pred[0] = samefield_pred[1] = 0;
1629 oppfield_pred[0] = oppfield_pred[1] = 0;
1632 if (num_samefield + num_oppfield > 1) {
1633 samefield_pred[0] = mid_pred(samefield_predA[0], samefield_predB[0], samefield_predC[0]);
1634 samefield_pred[1] = mid_pred(samefield_predA[1], samefield_predB[1], samefield_predC[1]);
1636 oppfield_pred[0] = mid_pred(oppfield_predA[0], oppfield_predB[0], oppfield_predC[0]);
1638 oppfield_pred[1] = mid_pred(oppfield_predA[1], oppfield_predB[1], oppfield_predC[1]);
1641 if (v->field_mode) {
1642 if (num_samefield <= num_oppfield)
1643 opposit = 1 - pred_flag;
1645 opposit = pred_flag;
1649 px = oppfield_pred[0];
1650 py = oppfield_pred[1];
1651 predA = oppfield_predA;
1652 predC = oppfield_predC;
1653 v->mv_f[dir][xy + v->blocks_off] = f = 1;
1654 v->ref_field_type[dir] = !v->cur_field_type;
1656 px = samefield_pred[0];
1657 py = samefield_pred[1];
1658 predA = samefield_predA;
1659 predC = samefield_predC;
1660 v->mv_f[dir][xy + v->blocks_off] = f = 0;
1661 v->ref_field_type[dir] = v->cur_field_type;
1664 /* Pullback MV as specified in 8.3.5.3.4 */
1665 if (!v->field_mode) {
1667 qx = (s->mb_x << 6) + ((n == 1 || n == 3) ? 32 : 0);
1668 qy = (s->mb_y << 6) + ((n == 2 || n == 3) ? 32 : 0);
1669 X = (s->mb_width << 6) - 4;
1670 Y = (s->mb_height << 6) - 4;
1672 if (qx + px < -60) px = -60 - qx;
1673 if (qy + py < -60) py = -60 - qy;
1675 if (qx + px < -28) px = -28 - qx;
1676 if (qy + py < -28) py = -28 - qy;
1678 if (qx + px > X) px = X - qx;
1679 if (qy + py > Y) py = Y - qy;
1682 if (!v->field_mode || s->pict_type != AV_PICTURE_TYPE_B) {
1683 /* Calculate hybrid prediction as specified in 8.3.5.3.5 (also 10.3.5.4.3.5) */
1684 if (v->field_mode && !s->quarter_sample)
1685 hybridmv_thresh = 16;
1687 hybridmv_thresh = 32;
1688 if (a_valid && c_valid) {
1689 if (is_intra[xy - wrap])
1690 sum = FFABS(px) + FFABS(py);
1692 sum = FFABS(px - predA[0]) + FFABS(py - predA[1]);
1693 if (sum > hybridmv_thresh) {
1694 if (get_bits1(&s->gb)) { // read HYBRIDPRED bit
1702 if (is_intra[xy - 1])
1703 sum = FFABS(px) + FFABS(py);
1705 sum = FFABS(px - predC[0]) + FFABS(py - predC[1]);
1706 if (sum > hybridmv_thresh) {
1707 if (get_bits1(&s->gb)) {
1719 if (v->field_mode && !s->quarter_sample) {
1723 if (v->field_mode && v->numref)
1725 if (v->field_mode && v->cur_field_type && v->ref_field_type[dir] == 0)
1727 /* store MV using signed modulus of MV range defined in 4.11 */
1728 s->mv[dir][n][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1729 s->mv[dir][n][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1] = ((py + dmv_y + r_y - y_bias) & ((r_y << 1) - 1)) - r_y + y_bias;
1730 if (mv1) { /* duplicate motion data for 1-MV block */
1731 s->current_picture.f.motion_val[dir][xy + 1 + v->blocks_off][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0];
1732 s->current_picture.f.motion_val[dir][xy + 1 + v->blocks_off][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1];
1733 s->current_picture.f.motion_val[dir][xy + wrap + v->blocks_off][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0];
1734 s->current_picture.f.motion_val[dir][xy + wrap + v->blocks_off][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1];
1735 s->current_picture.f.motion_val[dir][xy + wrap + 1 + v->blocks_off][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0];
1736 s->current_picture.f.motion_val[dir][xy + wrap + 1 + v->blocks_off][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1];
1737 v->mv_f[dir][xy + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1738 v->mv_f[dir][xy + wrap + v->blocks_off] = v->mv_f[dir][xy + wrap + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1742 /** Predict and set motion vector for interlaced frame picture MBs
1744 static inline void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
1745 int mvn, int r_x, int r_y, uint8_t* is_intra)
1747 MpegEncContext *s = &v->s;
1748 int xy, wrap, off = 0;
1749 int A[2], B[2], C[2];
1751 int a_valid = 0, b_valid = 0, c_valid = 0;
1752 int field_a, field_b, field_c; // 0: same, 1: opposit
1753 int total_valid, num_samefield, num_oppfield;
1754 int pos_c, pos_b, n_adj;
1756 wrap = s->b8_stride;
1757 xy = s->block_index[n];
1760 s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy][0] = 0;
1761 s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy][1] = 0;
1762 s->current_picture.f.motion_val[1][xy][0] = 0;
1763 s->current_picture.f.motion_val[1][xy][1] = 0;
1764 if (mvn == 1) { /* duplicate motion data for 1-MV block */
1765 s->current_picture.f.motion_val[0][xy + 1][0] = 0;
1766 s->current_picture.f.motion_val[0][xy + 1][1] = 0;
1767 s->current_picture.f.motion_val[0][xy + wrap][0] = 0;
1768 s->current_picture.f.motion_val[0][xy + wrap][1] = 0;
1769 s->current_picture.f.motion_val[0][xy + wrap + 1][0] = 0;
1770 s->current_picture.f.motion_val[0][xy + wrap + 1][1] = 0;
1771 v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1772 s->current_picture.f.motion_val[1][xy + 1][0] = 0;
1773 s->current_picture.f.motion_val[1][xy + 1][1] = 0;
1774 s->current_picture.f.motion_val[1][xy + wrap][0] = 0;
1775 s->current_picture.f.motion_val[1][xy + wrap][1] = 0;
1776 s->current_picture.f.motion_val[1][xy + wrap + 1][0] = 0;
1777 s->current_picture.f.motion_val[1][xy + wrap + 1][1] = 0;
1782 off = ((n == 0) || (n == 1)) ? 1 : -1;
1784 if (s->mb_x || (n == 1) || (n == 3)) {
1785 if ((v->blk_mv_type[xy]) // current block (MB) has a field MV
1786 || (!v->blk_mv_type[xy] && !v->blk_mv_type[xy - 1])) { // or both have frame MV
1787 A[0] = s->current_picture.f.motion_val[0][xy - 1][0];
1788 A[1] = s->current_picture.f.motion_val[0][xy - 1][1];
1790 } else { // current block has frame mv and cand. has field MV (so average)
1791 A[0] = (s->current_picture.f.motion_val[0][xy - 1][0]
1792 + s->current_picture.f.motion_val[0][xy - 1 + off * wrap][0] + 1) >> 1;
1793 A[1] = (s->current_picture.f.motion_val[0][xy - 1][1]
1794 + s->current_picture.f.motion_val[0][xy - 1 + off * wrap][1] + 1) >> 1;
1797 if (!(n & 1) && v->is_intra[s->mb_x - 1]) {
1803 /* Predict B and C */
1804 B[0] = B[1] = C[0] = C[1] = 0;
1805 if (n == 0 || n == 1 || v->blk_mv_type[xy]) {
1806 if (!s->first_slice_line) {
1807 if (!v->is_intra[s->mb_x - s->mb_stride]) {
1810 pos_b = s->block_index[n_adj] - 2 * wrap;
1811 if (v->blk_mv_type[pos_b] && v->blk_mv_type[xy]) {
1812 n_adj = (n & 2) | (n & 1);
1814 B[0] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap][0];
1815 B[1] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap][1];
1816 if (v->blk_mv_type[pos_b] && !v->blk_mv_type[xy]) {
1817 B[0] = (B[0] + s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap][0] + 1) >> 1;
1818 B[1] = (B[1] + s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap][1] + 1) >> 1;
1821 if (s->mb_width > 1) {
1822 if (!v->is_intra[s->mb_x - s->mb_stride + 1]) {
1825 pos_c = s->block_index[2] - 2 * wrap + 2;
1826 if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1829 C[0] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap + 2][0];
1830 C[1] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap + 2][1];
1831 if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1832 C[0] = (1 + C[0] + (s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap + 2][0])) >> 1;
1833 C[1] = (1 + C[1] + (s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap + 2][1])) >> 1;
1835 if (s->mb_x == s->mb_width - 1) {
1836 if (!v->is_intra[s->mb_x - s->mb_stride - 1]) {
1839 pos_c = s->block_index[3] - 2 * wrap - 2;
1840 if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1843 C[0] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap - 2][0];
1844 C[1] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap - 2][1];
1845 if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1846 C[0] = (1 + C[0] + s->current_picture.f.motion_val[0][s->block_index[1] - 2 * wrap - 2][0]) >> 1;
1847 C[1] = (1 + C[1] + s->current_picture.f.motion_val[0][s->block_index[1] - 2 * wrap - 2][1]) >> 1;
1856 pos_b = s->block_index[1];
1858 B[0] = s->current_picture.f.motion_val[0][pos_b][0];
1859 B[1] = s->current_picture.f.motion_val[0][pos_b][1];
1860 pos_c = s->block_index[0];
1862 C[0] = s->current_picture.f.motion_val[0][pos_c][0];
1863 C[1] = s->current_picture.f.motion_val[0][pos_c][1];
1866 total_valid = a_valid + b_valid + c_valid;
1867 // check if predictor A is out of bounds
1868 if (!s->mb_x && !(n == 1 || n == 3)) {
1871 // check if predictor B is out of bounds
1872 if ((s->first_slice_line && v->blk_mv_type[xy]) || (s->first_slice_line && !(n & 2))) {
1873 B[0] = B[1] = C[0] = C[1] = 0;
1875 if (!v->blk_mv_type[xy]) {
1876 if (s->mb_width == 1) {
1880 if (total_valid >= 2) {
1881 px = mid_pred(A[0], B[0], C[0]);
1882 py = mid_pred(A[1], B[1], C[1]);
1883 } else if (total_valid) {
1884 if (a_valid) { px = A[0]; py = A[1]; }
1885 if (b_valid) { px = B[0]; py = B[1]; }
1886 if (c_valid) { px = C[0]; py = C[1]; }
1892 field_a = (A[1] & 4) ? 1 : 0;
1896 field_b = (B[1] & 4) ? 1 : 0;
1900 field_c = (C[1] & 4) ? 1 : 0;
1904 num_oppfield = field_a + field_b + field_c;
1905 num_samefield = total_valid - num_oppfield;
1906 if (total_valid == 3) {
1907 if ((num_samefield == 3) || (num_oppfield == 3)) {
1908 px = mid_pred(A[0], B[0], C[0]);
1909 py = mid_pred(A[1], B[1], C[1]);
1910 } else if (num_samefield >= num_oppfield) {
1911 /* take one MV from same field set depending on priority
1912 the check for B may not be necessary */
1913 px = !field_a ? A[0] : B[0];
1914 py = !field_a ? A[1] : B[1];
1916 px = field_a ? A[0] : B[0];
1917 py = field_a ? A[1] : B[1];
1919 } else if (total_valid == 2) {
1920 if (num_samefield >= num_oppfield) {
1921 if (!field_a && a_valid) {
1924 } else if (!field_b && b_valid) {
1927 } else if (c_valid) {
1932 if (field_a && a_valid) {
1935 } else if (field_b && b_valid) {
1938 } else if (c_valid) {
1943 } else if (total_valid == 1) {
1944 px = (a_valid) ? A[0] : ((b_valid) ? B[0] : C[0]);
1945 py = (a_valid) ? A[1] : ((b_valid) ? B[1] : C[1]);
1950 /* store MV using signed modulus of MV range defined in 4.11 */
1951 s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1952 s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
1953 if (mvn == 1) { /* duplicate motion data for 1-MV block */
1954 s->current_picture.f.motion_val[0][xy + 1 ][0] = s->current_picture.f.motion_val[0][xy][0];
1955 s->current_picture.f.motion_val[0][xy + 1 ][1] = s->current_picture.f.motion_val[0][xy][1];
1956 s->current_picture.f.motion_val[0][xy + wrap ][0] = s->current_picture.f.motion_val[0][xy][0];
1957 s->current_picture.f.motion_val[0][xy + wrap ][1] = s->current_picture.f.motion_val[0][xy][1];
1958 s->current_picture.f.motion_val[0][xy + wrap + 1][0] = s->current_picture.f.motion_val[0][xy][0];
1959 s->current_picture.f.motion_val[0][xy + wrap + 1][1] = s->current_picture.f.motion_val[0][xy][1];
1960 } else if (mvn == 2) { /* duplicate motion data for 2-Field MV block */
1961 s->current_picture.f.motion_val[0][xy + 1][0] = s->current_picture.f.motion_val[0][xy][0];
1962 s->current_picture.f.motion_val[0][xy + 1][1] = s->current_picture.f.motion_val[0][xy][1];
1963 s->mv[0][n + 1][0] = s->mv[0][n][0];
1964 s->mv[0][n + 1][1] = s->mv[0][n][1];
1968 /** Motion compensation for direct or interpolated blocks in B-frames
1970 static void vc1_interp_mc(VC1Context *v)
1972 MpegEncContext *s = &v->s;
1973 DSPContext *dsp = &v->s.dsp;
1974 uint8_t *srcY, *srcU, *srcV;
1975 int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
1977 int v_edge_pos = s->v_edge_pos >> v->field_mode;
1979 if (!v->field_mode && !v->s.next_picture.f.data[0])
1982 mx = s->mv[1][0][0];
1983 my = s->mv[1][0][1];
1984 uvmx = (mx + ((mx & 3) == 3)) >> 1;
1985 uvmy = (my + ((my & 3) == 3)) >> 1;
1986 if (v->field_mode) {
1987 if (v->cur_field_type != v->ref_field_type[1])
1988 my = my - 2 + 4 * v->cur_field_type;
1989 uvmy = uvmy - 2 + 4 * v->cur_field_type;
1992 uvmx = uvmx + ((uvmx < 0) ? -(uvmx & 1) : (uvmx & 1));
1993 uvmy = uvmy + ((uvmy < 0) ? -(uvmy & 1) : (uvmy & 1));
1995 srcY = s->next_picture.f.data[0];
1996 srcU = s->next_picture.f.data[1];
1997 srcV = s->next_picture.f.data[2];
1999 src_x = s->mb_x * 16 + (mx >> 2);
2000 src_y = s->mb_y * 16 + (my >> 2);
2001 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
2002 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
2004 if (v->profile != PROFILE_ADVANCED) {
2005 src_x = av_clip( src_x, -16, s->mb_width * 16);
2006 src_y = av_clip( src_y, -16, s->mb_height * 16);
2007 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
2008 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
2010 src_x = av_clip( src_x, -17, s->avctx->coded_width);
2011 src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
2012 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
2013 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
2016 srcY += src_y * s->linesize + src_x;
2017 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
2018 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
2020 if (v->field_mode && v->ref_field_type[1]) {
2021 srcY += s->current_picture_ptr->f.linesize[0];
2022 srcU += s->current_picture_ptr->f.linesize[1];
2023 srcV += s->current_picture_ptr->f.linesize[2];
2026 /* for grayscale we should not try to read from unknown area */
2027 if (s->flags & CODEC_FLAG_GRAY) {
2028 srcU = s->edge_emu_buffer + 18 * s->linesize;
2029 srcV = s->edge_emu_buffer + 18 * s->linesize;
2033 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx & 3) - 16 - s->mspel * 3
2034 || (unsigned)(src_y - s->mspel) > v_edge_pos - (my & 3) - 16 - s->mspel * 3) {
2035 uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
2037 srcY -= s->mspel * (1 + s->linesize);
2038 s->dsp.emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize,
2039 17 + s->mspel * 2, 17 + s->mspel * 2,
2040 src_x - s->mspel, src_y - s->mspel,
2041 s->h_edge_pos, v_edge_pos);
2042 srcY = s->edge_emu_buffer;
2043 s->dsp.emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8 + 1, 8 + 1,
2044 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
2045 s->dsp.emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8 + 1, 8 + 1,
2046 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
2049 /* if we deal with range reduction we need to scale source blocks */
2050 if (v->rangeredfrm) {
2052 uint8_t *src, *src2;
2055 for (j = 0; j < 17 + s->mspel * 2; j++) {
2056 for (i = 0; i < 17 + s->mspel * 2; i++)
2057 src[i] = ((src[i] - 128) >> 1) + 128;
2062 for (j = 0; j < 9; j++) {
2063 for (i = 0; i < 9; i++) {
2064 src[i] = ((src[i] - 128) >> 1) + 128;
2065 src2[i] = ((src2[i] - 128) >> 1) + 128;
2067 src += s->uvlinesize;
2068 src2 += s->uvlinesize;
2071 srcY += s->mspel * (1 + s->linesize);
2074 if (v->field_mode && v->cur_field_type) {
2075 off = s->current_picture_ptr->f.linesize[0];
2076 off_uv = s->current_picture_ptr->f.linesize[1];
2083 dxy = ((my & 3) << 2) | (mx & 3);
2084 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off , srcY , s->linesize, v->rnd);
2085 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8, srcY + 8, s->linesize, v->rnd);
2086 srcY += s->linesize * 8;
2087 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize , srcY , s->linesize, v->rnd);
2088 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
2090 dxy = (my & 2) | ((mx & 2) >> 1);
2093 dsp->avg_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
2095 dsp->avg_no_rnd_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
2098 if (s->flags & CODEC_FLAG_GRAY) return;
2099 /* Chroma MC always uses qpel blilinear */
2100 uvmx = (uvmx & 3) << 1;
2101 uvmy = (uvmy & 3) << 1;
2103 dsp->avg_h264_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
2104 dsp->avg_h264_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
2106 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
2107 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
2111 static av_always_inline int scale_mv(int value, int bfrac, int inv, int qs)
2115 #if B_FRACTION_DEN==256
2119 return 2 * ((value * n + 255) >> 9);
2120 return (value * n + 128) >> 8;
2123 n -= B_FRACTION_DEN;
2125 return 2 * ((value * n + B_FRACTION_DEN - 1) / (2 * B_FRACTION_DEN));
2126 return (value * n + B_FRACTION_DEN/2) / B_FRACTION_DEN;
2130 static av_always_inline int scale_mv_intfi(int value, int bfrac, int inv,
2131 int qs, int qs_last)
2139 return (value * n + 255) >> 9;
2141 return (value * n + 128) >> 8;
2144 /** Reconstruct motion vector for B-frame and do motion compensation
2146 static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2],
2147 int direct, int mode)
2150 v->mv_mode2 = v->mv_mode;
2151 v->mv_mode = MV_PMODE_INTENSITY_COMP;
2157 v->mv_mode = v->mv_mode2;
2160 if (mode == BMV_TYPE_INTERPOLATED) {
2164 v->mv_mode = v->mv_mode2;
2168 if (v->use_ic && (mode == BMV_TYPE_BACKWARD))
2169 v->mv_mode = v->mv_mode2;
2170 vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD));
2172 v->mv_mode = v->mv_mode2;
2175 static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2],
2176 int direct, int mvtype)
2178 MpegEncContext *s = &v->s;
2179 int xy, wrap, off = 0;
2184 const uint8_t *is_intra = v->mb_type[0];
2188 /* scale MV difference to be quad-pel */
2189 dmv_x[0] <<= 1 - s->quarter_sample;
2190 dmv_y[0] <<= 1 - s->quarter_sample;
2191 dmv_x[1] <<= 1 - s->quarter_sample;
2192 dmv_y[1] <<= 1 - s->quarter_sample;
2194 wrap = s->b8_stride;
2195 xy = s->block_index[0];
2198 s->current_picture.f.motion_val[0][xy + v->blocks_off][0] =
2199 s->current_picture.f.motion_val[0][xy + v->blocks_off][1] =
2200 s->current_picture.f.motion_val[1][xy + v->blocks_off][0] =
2201 s->current_picture.f.motion_val[1][xy + v->blocks_off][1] = 0;
2204 if (!v->field_mode) {
2205 s->mv[0][0][0] = scale_mv(s->next_picture.f.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
2206 s->mv[0][0][1] = scale_mv(s->next_picture.f.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
2207 s->mv[1][0][0] = scale_mv(s->next_picture.f.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
2208 s->mv[1][0][1] = scale_mv(s->next_picture.f.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
2210 /* Pullback predicted motion vectors as specified in 8.4.5.4 */
2211 s->mv[0][0][0] = av_clip(s->mv[0][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2212 s->mv[0][0][1] = av_clip(s->mv[0][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2213 s->mv[1][0][0] = av_clip(s->mv[1][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2214 s->mv[1][0][1] = av_clip(s->mv[1][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2217 s->current_picture.f.motion_val[0][xy + v->blocks_off][0] = s->mv[0][0][0];
2218 s->current_picture.f.motion_val[0][xy + v->blocks_off][1] = s->mv[0][0][1];
2219 s->current_picture.f.motion_val[1][xy + v->blocks_off][0] = s->mv[1][0][0];
2220 s->current_picture.f.motion_val[1][xy + v->blocks_off][1] = s->mv[1][0][1];
2224 if ((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2225 C = s->current_picture.f.motion_val[0][xy - 2];
2226 A = s->current_picture.f.motion_val[0][xy - wrap * 2];
2227 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2228 B = s->current_picture.f.motion_val[0][xy - wrap * 2 + off];
2230 if (!s->mb_x) C[0] = C[1] = 0;
2231 if (!s->first_slice_line) { // predictor A is not out of bounds
2232 if (s->mb_width == 1) {
2236 px = mid_pred(A[0], B[0], C[0]);
2237 py = mid_pred(A[1], B[1], C[1]);
2239 } else if (s->mb_x) { // predictor C is not out of bounds
2245 /* Pullback MV as specified in 8.3.5.3.4 */
2248 if (v->profile < PROFILE_ADVANCED) {
2249 qx = (s->mb_x << 5);
2250 qy = (s->mb_y << 5);
2251 X = (s->mb_width << 5) - 4;
2252 Y = (s->mb_height << 5) - 4;
2253 if (qx + px < -28) px = -28 - qx;
2254 if (qy + py < -28) py = -28 - qy;
2255 if (qx + px > X) px = X - qx;
2256 if (qy + py > Y) py = Y - qy;
2258 qx = (s->mb_x << 6);
2259 qy = (s->mb_y << 6);
2260 X = (s->mb_width << 6) - 4;
2261 Y = (s->mb_height << 6) - 4;
2262 if (qx + px < -60) px = -60 - qx;
2263 if (qy + py < -60) py = -60 - qy;
2264 if (qx + px > X) px = X - qx;
2265 if (qy + py > Y) py = Y - qy;
2268 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2269 if (0 && !s->first_slice_line && s->mb_x) {
2270 if (is_intra[xy - wrap])
2271 sum = FFABS(px) + FFABS(py);
2273 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2275 if (get_bits1(&s->gb)) {
2283 if (is_intra[xy - 2])
2284 sum = FFABS(px) + FFABS(py);
2286 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2288 if (get_bits1(&s->gb)) {
2298 /* store MV using signed modulus of MV range defined in 4.11 */
2299 s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x;
2300 s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
2302 if ((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2303 C = s->current_picture.f.motion_val[1][xy - 2];
2304 A = s->current_picture.f.motion_val[1][xy - wrap * 2];
2305 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2306 B = s->current_picture.f.motion_val[1][xy - wrap * 2 + off];
2310 if (!s->first_slice_line) { // predictor A is not out of bounds
2311 if (s->mb_width == 1) {
2315 px = mid_pred(A[0], B[0], C[0]);
2316 py = mid_pred(A[1], B[1], C[1]);
2318 } else if (s->mb_x) { // predictor C is not out of bounds
2324 /* Pullback MV as specified in 8.3.5.3.4 */
2327 if (v->profile < PROFILE_ADVANCED) {
2328 qx = (s->mb_x << 5);
2329 qy = (s->mb_y << 5);
2330 X = (s->mb_width << 5) - 4;
2331 Y = (s->mb_height << 5) - 4;
2332 if (qx + px < -28) px = -28 - qx;
2333 if (qy + py < -28) py = -28 - qy;
2334 if (qx + px > X) px = X - qx;
2335 if (qy + py > Y) py = Y - qy;
2337 qx = (s->mb_x << 6);
2338 qy = (s->mb_y << 6);
2339 X = (s->mb_width << 6) - 4;
2340 Y = (s->mb_height << 6) - 4;
2341 if (qx + px < -60) px = -60 - qx;
2342 if (qy + py < -60) py = -60 - qy;
2343 if (qx + px > X) px = X - qx;
2344 if (qy + py > Y) py = Y - qy;
2347 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2348 if (0 && !s->first_slice_line && s->mb_x) {
2349 if (is_intra[xy - wrap])
2350 sum = FFABS(px) + FFABS(py);
2352 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2354 if (get_bits1(&s->gb)) {
2362 if (is_intra[xy - 2])
2363 sum = FFABS(px) + FFABS(py);
2365 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2367 if (get_bits1(&s->gb)) {
2377 /* store MV using signed modulus of MV range defined in 4.11 */
2379 s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
2380 s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
2382 s->current_picture.f.motion_val[0][xy][0] = s->mv[0][0][0];
2383 s->current_picture.f.motion_val[0][xy][1] = s->mv[0][0][1];
2384 s->current_picture.f.motion_val[1][xy][0] = s->mv[1][0][0];
2385 s->current_picture.f.motion_val[1][xy][1] = s->mv[1][0][1];
2388 static inline void vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dmv_y, int mv1, int *pred_flag)
2390 int dir = (v->bmvtype == BMV_TYPE_BACKWARD) ? 1 : 0;
2391 MpegEncContext *s = &v->s;
2392 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2394 if (v->bmvtype == BMV_TYPE_DIRECT) {
2395 int total_opp, k, f;
2396 if (s->next_picture.f.mb_type[mb_pos + v->mb_off] != MB_TYPE_INTRA) {
2397 s->mv[0][0][0] = scale_mv_intfi(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0],
2398 v->bfraction, 0, s->quarter_sample, v->qs_last);
2399 s->mv[0][0][1] = scale_mv_intfi(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1],
2400 v->bfraction, 0, s->quarter_sample, v->qs_last);
2401 s->mv[1][0][0] = scale_mv_intfi(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0],
2402 v->bfraction, 1, s->quarter_sample, v->qs_last);
2403 s->mv[1][0][1] = scale_mv_intfi(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1],
2404 v->bfraction, 1, s->quarter_sample, v->qs_last);
2406 total_opp = v->mv_f_next[0][s->block_index[0] + v->blocks_off]
2407 + v->mv_f_next[0][s->block_index[1] + v->blocks_off]
2408 + v->mv_f_next[0][s->block_index[2] + v->blocks_off]
2409 + v->mv_f_next[0][s->block_index[3] + v->blocks_off];
2410 f = (total_opp > 2) ? 1 : 0;
2412 s->mv[0][0][0] = s->mv[0][0][1] = 0;
2413 s->mv[1][0][0] = s->mv[1][0][1] = 0;
2416 v->ref_field_type[0] = v->ref_field_type[1] = v->cur_field_type ^ f;
2417 for (k = 0; k < 4; k++) {
2418 s->current_picture.f.motion_val[0][s->block_index[k] + v->blocks_off][0] = s->mv[0][0][0];
2419 s->current_picture.f.motion_val[0][s->block_index[k] + v->blocks_off][1] = s->mv[0][0][1];
2420 s->current_picture.f.motion_val[1][s->block_index[k] + v->blocks_off][0] = s->mv[1][0][0];
2421 s->current_picture.f.motion_val[1][s->block_index[k] + v->blocks_off][1] = s->mv[1][0][1];
2422 v->mv_f[0][s->block_index[k] + v->blocks_off] = f;
2423 v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
2427 if (v->bmvtype == BMV_TYPE_INTERPOLATED) {
2428 vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2429 vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2432 if (dir) { // backward
2433 vc1_pred_mv(v, n, dmv_x[1], dmv_y[1], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2434 if (n == 3 || mv1) {
2435 vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
2438 vc1_pred_mv(v, n, dmv_x[0], dmv_y[0], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2439 if (n == 3 || mv1) {
2440 vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], 0, 1);
2445 /** Get predicted DC value for I-frames only
2446 * prediction dir: left=0, top=1
2447 * @param s MpegEncContext
2448 * @param overlap flag indicating that overlap filtering is used
2449 * @param pq integer part of picture quantizer
2450 * @param[in] n block index in the current MB
2451 * @param dc_val_ptr Pointer to DC predictor
2452 * @param dir_ptr Prediction direction for use in AC prediction
2454 static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2455 int16_t **dc_val_ptr, int *dir_ptr)
2457 int a, b, c, wrap, pred, scale;
2459 static const uint16_t dcpred[32] = {
2460 -1, 1024, 512, 341, 256, 205, 171, 146, 128,
2461 114, 102, 93, 85, 79, 73, 68, 64,
2462 60, 57, 54, 51, 49, 47, 45, 43,
2463 41, 39, 38, 37, 35, 34, 33
2466 /* find prediction - wmv3_dc_scale always used here in fact */
2467 if (n < 4) scale = s->y_dc_scale;
2468 else scale = s->c_dc_scale;
2470 wrap = s->block_wrap[n];
2471 dc_val = s->dc_val[0] + s->block_index[n];
2477 b = dc_val[ - 1 - wrap];
2478 a = dc_val[ - wrap];
2480 if (pq < 9 || !overlap) {
2481 /* Set outer values */
2482 if (s->first_slice_line && (n != 2 && n != 3))
2483 b = a = dcpred[scale];
2484 if (s->mb_x == 0 && (n != 1 && n != 3))
2485 b = c = dcpred[scale];
2487 /* Set outer values */
2488 if (s->first_slice_line && (n != 2 && n != 3))
2490 if (s->mb_x == 0 && (n != 1 && n != 3))
2494 if (abs(a - b) <= abs(b - c)) {
2496 *dir_ptr = 1; // left
2499 *dir_ptr = 0; // top
2502 /* update predictor */
2503 *dc_val_ptr = &dc_val[0];
2508 /** Get predicted DC value
2509 * prediction dir: left=0, top=1
2510 * @param s MpegEncContext
2511 * @param overlap flag indicating that overlap filtering is used
2512 * @param pq integer part of picture quantizer
2513 * @param[in] n block index in the current MB
2514 * @param a_avail flag indicating top block availability
2515 * @param c_avail flag indicating left block availability
2516 * @param dc_val_ptr Pointer to DC predictor
2517 * @param dir_ptr Prediction direction for use in AC prediction
2519 static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2520 int a_avail, int c_avail,
2521 int16_t **dc_val_ptr, int *dir_ptr)
2523 int a, b, c, wrap, pred;
2525 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2528 wrap = s->block_wrap[n];
2529 dc_val = s->dc_val[0] + s->block_index[n];
2535 b = dc_val[ - 1 - wrap];
2536 a = dc_val[ - wrap];
2537 /* scale predictors if needed */
2538 q1 = s->current_picture.f.qscale_table[mb_pos];
2539 if (c_avail && (n != 1 && n != 3)) {
2540 q2 = s->current_picture.f.qscale_table[mb_pos - 1];
2542 c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2544 if (a_avail && (n != 2 && n != 3)) {
2545 q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
2547 a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2549 if (a_avail && c_avail && (n != 3)) {
2554 off -= s->mb_stride;
2555 q2 = s->current_picture.f.qscale_table[off];
2557 b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2560 if (a_avail && c_avail) {
2561 if (abs(a - b) <= abs(b - c)) {
2563 *dir_ptr = 1; // left
2566 *dir_ptr = 0; // top
2568 } else if (a_avail) {
2570 *dir_ptr = 0; // top
2571 } else if (c_avail) {
2573 *dir_ptr = 1; // left
2576 *dir_ptr = 1; // left
2579 /* update predictor */
2580 *dc_val_ptr = &dc_val[0];
2584 /** @} */ // Block group
2587 * @name VC1 Macroblock-level functions in Simple/Main Profiles
2588 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
2592 static inline int vc1_coded_block_pred(MpegEncContext * s, int n,
2593 uint8_t **coded_block_ptr)
2595 int xy, wrap, pred, a, b, c;
2597 xy = s->block_index[n];
2598 wrap = s->b8_stride;
2603 a = s->coded_block[xy - 1 ];
2604 b = s->coded_block[xy - 1 - wrap];
2605 c = s->coded_block[xy - wrap];
2614 *coded_block_ptr = &s->coded_block[xy];
2620 * Decode one AC coefficient
2621 * @param v The VC1 context
2622 * @param last Last coefficient
2623 * @param skip How much zero coefficients to skip
2624 * @param value Decoded AC coefficient value
2625 * @param codingset set of VLC to decode data
2628 static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip,
2629 int *value, int codingset)
2631 GetBitContext *gb = &v->s.gb;
2632 int index, escape, run = 0, level = 0, lst = 0;
2634 index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2635 if (index != vc1_ac_sizes[codingset] - 1) {
2636 run = vc1_index_decode_table[codingset][index][0];
2637 level = vc1_index_decode_table[codingset][index][1];
2638 lst = index >= vc1_last_decode_table[codingset] || get_bits_left(gb) < 0;
2642 escape = decode210(gb);
2644 index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2645 run = vc1_index_decode_table[codingset][index][0];
2646 level = vc1_index_decode_table[codingset][index][1];
2647 lst = index >= vc1_last_decode_table[codingset];
2650 level += vc1_last_delta_level_table[codingset][run];
2652 level += vc1_delta_level_table[codingset][run];
2655 run += vc1_last_delta_run_table[codingset][level] + 1;
2657 run += vc1_delta_run_table[codingset][level] + 1;
2663 lst = get_bits1(gb);
2664 if (v->s.esc3_level_length == 0) {
2665 if (v->pq < 8 || v->dquantfrm) { // table 59
2666 v->s.esc3_level_length = get_bits(gb, 3);
2667 if (!v->s.esc3_level_length)
2668 v->s.esc3_level_length = get_bits(gb, 2) + 8;
2669 } else { // table 60
2670 v->s.esc3_level_length = get_unary(gb, 1, 6) + 2;
2672 v->s.esc3_run_length = 3 + get_bits(gb, 2);
2674 run = get_bits(gb, v->s.esc3_run_length);
2675 sign = get_bits1(gb);
2676 level = get_bits(gb, v->s.esc3_level_length);
2687 /** Decode intra block in intra frames - should be faster than decode_intra_block
2688 * @param v VC1Context
2689 * @param block block to decode
2690 * @param[in] n subblock index
2691 * @param coded are AC coeffs present or not
2692 * @param codingset set of VLC to decode data
2694 static int vc1_decode_i_block(VC1Context *v, DCTELEM block[64], int n,
2695 int coded, int codingset)
2697 GetBitContext *gb = &v->s.gb;
2698 MpegEncContext *s = &v->s;
2699 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2702 int16_t *ac_val, *ac_val2;
2705 /* Get DC differential */
2707 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2709 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2712 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2716 if (dcdiff == 119 /* ESC index value */) {
2717 /* TODO: Optimize */
2718 if (v->pq == 1) dcdiff = get_bits(gb, 10);
2719 else if (v->pq == 2) dcdiff = get_bits(gb, 9);
2720 else dcdiff = get_bits(gb, 8);
2723 dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2724 else if (v->pq == 2)
2725 dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2732 dcdiff += vc1_i_pred_dc(&v->s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir);
2735 /* Store the quantized DC coeff, used for prediction */
2737 block[0] = dcdiff * s->y_dc_scale;
2739 block[0] = dcdiff * s->c_dc_scale;
2750 int last = 0, skip, value;
2751 const uint8_t *zz_table;
2755 scale = v->pq * 2 + v->halfpq;
2759 zz_table = v->zz_8x8[2];
2761 zz_table = v->zz_8x8[3];
2763 zz_table = v->zz_8x8[1];
2765 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2767 if (dc_pred_dir) // left
2770 ac_val -= 16 * s->block_wrap[n];
2773 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2777 block[zz_table[i++]] = value;
2780 /* apply AC prediction if needed */
2782 if (dc_pred_dir) { // left
2783 for (k = 1; k < 8; k++)
2784 block[k << v->left_blk_sh] += ac_val[k];
2786 for (k = 1; k < 8; k++)
2787 block[k << v->top_blk_sh] += ac_val[k + 8];
2790 /* save AC coeffs for further prediction */
2791 for (k = 1; k < 8; k++) {
2792 ac_val2[k] = block[k << v->left_blk_sh];
2793 ac_val2[k + 8] = block[k << v->top_blk_sh];
2796 /* scale AC coeffs */
2797 for (k = 1; k < 64; k++)
2801 block[k] += (block[k] < 0) ? -v->pq : v->pq;
2804 if (s->ac_pred) i = 63;
2810 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2814 scale = v->pq * 2 + v->halfpq;
2815 memset(ac_val2, 0, 16 * 2);
2816 if (dc_pred_dir) { // left
2819 memcpy(ac_val2, ac_val, 8 * 2);
2821 ac_val -= 16 * s->block_wrap[n];
2823 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2826 /* apply AC prediction if needed */
2828 if (dc_pred_dir) { //left
2829 for (k = 1; k < 8; k++) {
2830 block[k << v->left_blk_sh] = ac_val[k] * scale;
2831 if (!v->pquantizer && block[k << v->left_blk_sh])
2832 block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -v->pq : v->pq;
2835 for (k = 1; k < 8; k++) {
2836 block[k << v->top_blk_sh] = ac_val[k + 8] * scale;
2837 if (!v->pquantizer && block[k << v->top_blk_sh])
2838 block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -v->pq : v->pq;
2844 s->block_last_index[n] = i;
2849 /** Decode intra block in intra frames - should be faster than decode_intra_block
2850 * @param v VC1Context
2851 * @param block block to decode
2852 * @param[in] n subblock number
2853 * @param coded are AC coeffs present or not
2854 * @param codingset set of VLC to decode data
2855 * @param mquant quantizer value for this macroblock
2857 static int vc1_decode_i_block_adv(VC1Context *v, DCTELEM block[64], int n,
2858 int coded, int codingset, int mquant)
2860 GetBitContext *gb = &v->s.gb;
2861 MpegEncContext *s = &v->s;
2862 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2865 int16_t *ac_val, *ac_val2;
2867 int a_avail = v->a_avail, c_avail = v->c_avail;
2868 int use_pred = s->ac_pred;
2871 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2873 /* Get DC differential */
2875 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2877 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2880 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2884 if (dcdiff == 119 /* ESC index value */) {
2885 /* TODO: Optimize */
2886 if (mquant == 1) dcdiff = get_bits(gb, 10);
2887 else if (mquant == 2) dcdiff = get_bits(gb, 9);
2888 else dcdiff = get_bits(gb, 8);
2891 dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2892 else if (mquant == 2)
2893 dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2900 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir);
2903 /* Store the quantized DC coeff, used for prediction */
2905 block[0] = dcdiff * s->y_dc_scale;
2907 block[0] = dcdiff * s->c_dc_scale;
2913 /* check if AC is needed at all */
2914 if (!a_avail && !c_avail)
2916 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2919 scale = mquant * 2 + ((mquant == v->pq) ? v->halfpq : 0);
2921 if (dc_pred_dir) // left
2924 ac_val -= 16 * s->block_wrap[n];
2926 q1 = s->current_picture.f.qscale_table[mb_pos];
2927 if ( dc_pred_dir && c_avail && mb_pos)
2928 q2 = s->current_picture.f.qscale_table[mb_pos - 1];
2929 if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
2930 q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
2931 if ( dc_pred_dir && n == 1)
2933 if (!dc_pred_dir && n == 2)
2939 int last = 0, skip, value;
2940 const uint8_t *zz_table;
2944 if (!use_pred && v->fcm == 1) {
2945 zz_table = v->zzi_8x8;
2947 if (!dc_pred_dir) // top
2948 zz_table = v->zz_8x8[2];
2950 zz_table = v->zz_8x8[3];
2954 zz_table = v->zz_8x8[1];
2956 zz_table = v->zzi_8x8;
2960 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2964 block[zz_table[i++]] = value;
2967 /* apply AC prediction if needed */
2969 /* scale predictors if needed*/
2970 if (q2 && q1 != q2) {
2971 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2972 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2974 if (dc_pred_dir) { // left
2975 for (k = 1; k < 8; k++)
2976 block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2978 for (k = 1; k < 8; k++)
2979 block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2982 if (dc_pred_dir) { //left
2983 for (k = 1; k < 8; k++)
2984 block[k << v->left_blk_sh] += ac_val[k];
2986 for (k = 1; k < 8; k++)
2987 block[k << v->top_blk_sh] += ac_val[k + 8];
2991 /* save AC coeffs for further prediction */
2992 for (k = 1; k < 8; k++) {
2993 ac_val2[k ] = block[k << v->left_blk_sh];
2994 ac_val2[k + 8] = block[k << v->top_blk_sh];
2997 /* scale AC coeffs */
2998 for (k = 1; k < 64; k++)
3002 block[k] += (block[k] < 0) ? -mquant : mquant;
3005 if (use_pred) i = 63;
3006 } else { // no AC coeffs
3009 memset(ac_val2, 0, 16 * 2);
3010 if (dc_pred_dir) { // left
3012 memcpy(ac_val2, ac_val, 8 * 2);
3013 if (q2 && q1 != q2) {
3014 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3015 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3016 for (k = 1; k < 8; k++)
3017 ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3022 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
3023 if (q2 && q1 != q2) {
3024 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3025 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3026 for (k = 1; k < 8; k++)
3027 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3032 /* apply AC prediction if needed */
3034 if (dc_pred_dir) { // left
3035 for (k = 1; k < 8; k++) {
3036 block[k << v->left_blk_sh] = ac_val2[k] * scale;
3037 if (!v->pquantizer && block[k << v->left_blk_sh])
3038 block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
3041 for (k = 1; k < 8; k++) {
3042 block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
3043 if (!v->pquantizer && block[k << v->top_blk_sh])
3044 block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
3050 s->block_last_index[n] = i;
3055 /** Decode intra block in inter frames - more generic version than vc1_decode_i_block
3056 * @param v VC1Context
3057 * @param block block to decode
3058 * @param[in] n subblock index
3059 * @param coded are AC coeffs present or not
3060 * @param mquant block quantizer
3061 * @param codingset set of VLC to decode data
3063 static int vc1_decode_intra_block(VC1Context *v, DCTELEM block[64], int n,
3064 int coded, int mquant, int codingset)
3066 GetBitContext *gb = &v->s.gb;
3067 MpegEncContext *s = &v->s;
3068 int dc_pred_dir = 0; /* Direction of the DC prediction used */
3071 int16_t *ac_val, *ac_val2;
3073 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3074 int a_avail = v->a_avail, c_avail = v->c_avail;
3075 int use_pred = s->ac_pred;
3079 s->dsp.clear_block(block);
3081 /* XXX: Guard against dumb values of mquant */
3082 mquant = (mquant < 1) ? 0 : ((mquant > 31) ? 31 : mquant);
3084 /* Set DC scale - y and c use the same */
3085 s->y_dc_scale = s->y_dc_scale_table[mquant];
3086 s->c_dc_scale = s->c_dc_scale_table[mquant];
3088 /* Get DC differential */
3090 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
3092 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
3095 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
3099 if (dcdiff == 119 /* ESC index value */) {
3100 /* TODO: Optimize */
3101 if (mquant == 1) dcdiff = get_bits(gb, 10);
3102 else if (mquant == 2) dcdiff = get_bits(gb, 9);
3103 else dcdiff = get_bits(gb, 8);
3106 dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
3107 else if (mquant == 2)
3108 dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
3115 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir);
3118 /* Store the quantized DC coeff, used for prediction */
3121 block[0] = dcdiff * s->y_dc_scale;
3123 block[0] = dcdiff * s->c_dc_scale;
3129 /* check if AC is needed at all and adjust direction if needed */
3130 if (!a_avail) dc_pred_dir = 1;
3131 if (!c_avail) dc_pred_dir = 0;
3132 if (!a_avail && !c_avail) use_pred = 0;
3133 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
3136 scale = mquant * 2 + v->halfpq;
3138 if (dc_pred_dir) //left
3141 ac_val -= 16 * s->block_wrap[n];
3143 q1 = s->current_picture.f.qscale_table[mb_pos];
3144 if (dc_pred_dir && c_avail && mb_pos)
3145 q2 = s->current_picture.f.qscale_table[mb_pos - 1];
3146 if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
3147 q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
3148 if ( dc_pred_dir && n == 1)
3150 if (!dc_pred_dir && n == 2)
3152 if (n == 3) q2 = q1;
3155 int last = 0, skip, value;
3159 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
3164 block[v->zz_8x8[0][i++]] = value;
3166 if (use_pred && (v->fcm == 1)) {
3167 if (!dc_pred_dir) // top
3168 block[v->zz_8x8[2][i++]] = value;
3170 block[v->zz_8x8[3][i++]] = value;
3172 block[v->zzi_8x8[i++]] = value;
3177 /* apply AC prediction if needed */
3179 /* scale predictors if needed*/
3180 if (q2 && q1 != q2) {
3181 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3182 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3184 if (dc_pred_dir) { // left
3185 for (k = 1; k < 8; k++)
3186 block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3188 for (k = 1; k < 8; k++)
3189 block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3192 if (dc_pred_dir) { // left
3193 for (k = 1; k < 8; k++)
3194 block[k << v->left_blk_sh] += ac_val[k];
3196 for (k = 1; k < 8; k++)
3197 block[k << v->top_blk_sh] += ac_val[k + 8];
3201 /* save AC coeffs for further prediction */
3202 for (k = 1; k < 8; k++) {
3203 ac_val2[k ] = block[k << v->left_blk_sh];
3204 ac_val2[k + 8] = block[k << v->top_blk_sh];
3207 /* scale AC coeffs */
3208 for (k = 1; k < 64; k++)
3212 block[k] += (block[k] < 0) ? -mquant : mquant;
3215 if (use_pred) i = 63;
3216 } else { // no AC coeffs
3219 memset(ac_val2, 0, 16 * 2);
3220 if (dc_pred_dir) { // left
3222 memcpy(ac_val2, ac_val, 8 * 2);
3223 if (q2 && q1 != q2) {
3224 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3225 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3226 for (k = 1; k < 8; k++)
3227 ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3232 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
3233 if (q2 && q1 != q2) {
3234 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3235 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3236 for (k = 1; k < 8; k++)
3237 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3242 /* apply AC prediction if needed */
3244 if (dc_pred_dir) { // left
3245 for (k = 1; k < 8; k++) {
3246 block[k << v->left_blk_sh] = ac_val2[k] * scale;
3247 if (!v->pquantizer && block[k << v->left_blk_sh])
3248 block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
3251 for (k = 1; k < 8; k++) {
3252 block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
3253 if (!v->pquantizer && block[k << v->top_blk_sh])
3254 block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
3260 s->block_last_index[n] = i;
3267 static int vc1_decode_p_block(VC1Context *v, DCTELEM block[64], int n,
3268 int mquant, int ttmb, int first_block,
3269 uint8_t *dst, int linesize, int skip_block,
3272 MpegEncContext *s = &v->s;
3273 GetBitContext *gb = &s->gb;
3276 int scale, off, idx, last, skip, value;
3277 int ttblk = ttmb & 7;
3280 s->dsp.clear_block(block);
3283 ttblk = ff_vc1_ttblk_to_tt[v->tt_index][get_vlc2(gb, ff_vc1_ttblk_vlc[v->tt_index].table, VC1_TTBLK_VLC_BITS, 1)];
3285 if (ttblk == TT_4X4) {
3286 subblkpat = ~(get_vlc2(gb, ff_vc1_subblkpat_vlc[v->tt_index].table, VC1_SUBBLKPAT_VLC_BITS, 1) + 1);
3288 if ((ttblk != TT_8X8 && ttblk != TT_4X4)
3289 && ((v->ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))
3290 || (!v->res_rtm_flag && !first_block))) {
3291 subblkpat = decode012(gb);
3293 subblkpat ^= 3; // swap decoded pattern bits
3294 if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM)
3296 if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT)
3299 scale = 2 * mquant + ((v->pq == mquant) ? v->halfpq : 0);
3301 // convert transforms like 8X4_TOP to generic TT and SUBBLKPAT
3302 if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) {
3303 subblkpat = 2 - (ttblk == TT_8X4_TOP);
3306 if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) {
3307 subblkpat = 2 - (ttblk == TT_4X8_LEFT);
3316 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3321 idx = v->zz_8x8[0][i++];
3323 idx = v->zzi_8x8[i++];
3324 block[idx] = value * scale;
3326 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3330 v->vc1dsp.vc1_inv_trans_8x8_dc(dst, linesize, block);
3332 v->vc1dsp.vc1_inv_trans_8x8(block);
3333 s->dsp.add_pixels_clamped(block, dst, linesize);
3338 pat = ~subblkpat & 0xF;
3339 for (j = 0; j < 4; j++) {
3340 last = subblkpat & (1 << (3 - j));
3342 off = (j & 1) * 4 + (j & 2) * 16;
3344 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3349 idx = ff_vc1_simple_progressive_4x4_zz[i++];
3351 idx = ff_vc1_adv_interlaced_4x4_zz[i++];
3352 block[idx + off] = value * scale;
3354 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3356 if (!(subblkpat & (1 << (3 - j))) && !skip_block) {
3358 v->vc1dsp.vc1_inv_trans_4x4_dc(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3360 v->vc1dsp.vc1_inv_trans_4x4(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3365 pat = ~((subblkpat & 2) * 6 + (subblkpat & 1) * 3) & 0xF;
3366 for (j = 0; j < 2; j++) {
3367 last = subblkpat & (1 << (1 - j));
3371 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3376 idx = v->zz_8x4[i++] + off;
3378 idx = ff_vc1_adv_interlaced_8x4_zz[i++] + off;
3379 block[idx] = value * scale;
3381 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3383 if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3385 v->vc1dsp.vc1_inv_trans_8x4_dc(dst + j * 4 * linesize, linesize, block + off);
3387 v->vc1dsp.vc1_inv_trans_8x4(dst + j * 4 * linesize, linesize, block + off);
3392 pat = ~(subblkpat * 5) & 0xF;
3393 for (j = 0; j < 2; j++) {
3394 last = subblkpat & (1 << (1 - j));
3398 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3403 idx = v->zz_4x8[i++] + off;
3405 idx = ff_vc1_adv_interlaced_4x8_zz[i++] + off;
3406 block[idx] = value * scale;
3408 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3410 if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3412 v->vc1dsp.vc1_inv_trans_4x8_dc(dst + j * 4, linesize, block + off);
3414 v->vc1dsp.vc1_inv_trans_4x8(dst + j*4, linesize, block + off);
3420 *ttmb_out |= ttblk << (n * 4);
3424 /** @} */ // Macroblock group
3426 static const int size_table [6] = { 0, 2, 3, 4, 5, 8 };
3427 static const int offset_table[6] = { 0, 1, 3, 7, 15, 31 };
3429 static av_always_inline void vc1_apply_p_v_loop_filter(VC1Context *v, int block_num)
3431 MpegEncContext *s = &v->s;
3432 int mb_cbp = v->cbp[s->mb_x - s->mb_stride],
3433 block_cbp = mb_cbp >> (block_num * 4), bottom_cbp,
3434 mb_is_intra = v->is_intra[s->mb_x - s->mb_stride],
3435 block_is_intra = mb_is_intra >> (block_num * 4), bottom_is_intra;
3436 int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3439 if (block_num > 3) {
3440 dst = s->dest[block_num - 3];
3442 dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 8) * linesize;
3444 if (s->mb_y != s->end_mb_y || block_num < 2) {
3448 if (block_num > 3) {
3449 bottom_cbp = v->cbp[s->mb_x] >> (block_num * 4);
3450 bottom_is_intra = v->is_intra[s->mb_x] >> (block_num * 4);
3451 mv = &v->luma_mv[s->mb_x - s->mb_stride];
3452 mv_stride = s->mb_stride;
3454 bottom_cbp = (block_num < 2) ? (mb_cbp >> ((block_num + 2) * 4))
3455 : (v->cbp[s->mb_x] >> ((block_num - 2) * 4));
3456 bottom_is_intra = (block_num < 2) ? (mb_is_intra >> ((block_num + 2) * 4))
3457 : (v->is_intra[s->mb_x] >> ((block_num - 2) * 4));
3458 mv_stride = s->b8_stride;
3459 mv = &s->current_picture.f.motion_val[0][s->block_index[block_num] - 2 * mv_stride];
3462 if (bottom_is_intra & 1 || block_is_intra & 1 ||
3463 mv[0][0] != mv[mv_stride][0] || mv[0][1] != mv[mv_stride][1]) {
3464 v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3466 idx = ((bottom_cbp >> 2) | block_cbp) & 3;
3468 v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3471 v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3473 v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3478 dst -= 4 * linesize;
3479 ttblk = (v->ttblk[s->mb_x - s->mb_stride] >> (block_num * 4)) & 0xF;
3480 if (ttblk == TT_4X4 || ttblk == TT_8X4) {
3481 idx = (block_cbp | (block_cbp >> 2)) & 3;
3483 v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3486 v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3488 v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3493 static av_always_inline void vc1_apply_p_h_loop_filter(VC1Context *v, int block_num)
3495 MpegEncContext *s = &v->s;
3496 int mb_cbp = v->cbp[s->mb_x - 1 - s->mb_stride],
3497 block_cbp = mb_cbp >> (block_num * 4), right_cbp,
3498 mb_is_intra = v->is_intra[s->mb_x - 1 - s->mb_stride],
3499 block_is_intra = mb_is_intra >> (block_num * 4), right_is_intra;
3500 int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3503 if (block_num > 3) {
3504 dst = s->dest[block_num - 3] - 8 * linesize;
3506 dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 16) * linesize - 8;
3509 if (s->mb_x != s->mb_width || !(block_num & 5)) {
3512 if (block_num > 3) {
3513 right_cbp = v->cbp[s->mb_x - s->mb_stride] >> (block_num * 4);
3514 right_is_intra = v->is_intra[s->mb_x - s->mb_stride] >> (block_num * 4);
3515 mv = &v->luma_mv[s->mb_x - s->mb_stride - 1];
3517 right_cbp = (block_num & 1) ? (v->cbp[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3518 : (mb_cbp >> ((block_num + 1) * 4));
3519 right_is_intra = (block_num & 1) ? (v->is_intra[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3520 : (mb_is_intra >> ((block_num + 1) * 4));
3521 mv = &s->current_picture.f.motion_val[0][s->block_index[block_num] - s->b8_stride * 2 - 2];
3523 if (block_is_intra & 1 || right_is_intra & 1 || mv[0][0] != mv[1][0] || mv[0][1] != mv[1][1]) {
3524 v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3526 idx = ((right_cbp >> 1) | block_cbp) & 5; // FIXME check
3528 v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3531 v->vc1dsp.vc1_h_loop_filter4(dst + 4 * linesize, linesize, v->pq);
3533 v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3539 ttblk = (v->ttblk[s->mb_x - s->mb_stride - 1] >> (block_num * 4)) & 0xf;
3540 if (ttblk == TT_4X4 || ttblk == TT_4X8) {
3541 idx = (block_cbp | (block_cbp >> 1)) & 5;
3543 v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3546 v->vc1dsp.vc1_h_loop_filter4(dst + linesize * 4, linesize, v->pq);
3548 v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3553 static void vc1_apply_p_loop_filter(VC1Context *v)
3555 MpegEncContext *s = &v->s;
3558 for (i = 0; i < 6; i++) {
3559 vc1_apply_p_v_loop_filter(v, i);
3562 /* V always preceedes H, therefore we run H one MB before V;
3563 * at the end of a row, we catch up to complete the row */
3565 for (i = 0; i < 6; i++) {
3566 vc1_apply_p_h_loop_filter(v, i);
3568 if (s->mb_x == s->mb_width - 1) {
3570 ff_update_block_index(s);
3571 for (i = 0; i < 6; i++) {
3572 vc1_apply_p_h_loop_filter(v, i);
3578 /** Decode one P-frame MB
3580 static int vc1_decode_p_mb(VC1Context *v)
3582 MpegEncContext *s = &v->s;
3583 GetBitContext *gb = &s->gb;
3585 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3586 int cbp; /* cbp decoding stuff */
3587 int mqdiff, mquant; /* MB quantization */
3588 int ttmb = v->ttfrm; /* MB Transform type */
3590 int mb_has_coeffs = 1; /* last_flag */
3591 int dmv_x, dmv_y; /* Differential MV components */
3592 int index, index1; /* LUT indexes */
3593 int val, sign; /* temp values */
3594 int first_block = 1;
3596 int skipped, fourmv;
3597 int block_cbp = 0, pat, block_tt = 0, block_intra = 0;
3599 mquant = v->pq; /* Loosy initialization */
3601 if (v->mv_type_is_raw)
3602 fourmv = get_bits1(gb);
3604 fourmv = v->mv_type_mb_plane[mb_pos];
3606 skipped = get_bits1(gb);
3608 skipped = v->s.mbskip_table[mb_pos];
3610 if (!fourmv) { /* 1MV mode */
3612 GET_MVDATA(dmv_x, dmv_y);
3615 s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
3616 s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
3618 s->current_picture.f.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;
3619 vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3621 /* FIXME Set DC val for inter block ? */
3622 if (s->mb_intra && !mb_has_coeffs) {
3624 s->ac_pred = get_bits1(gb);
3626 } else if (mb_has_coeffs) {
3628 s->ac_pred = get_bits1(gb);
3629 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3635 s->current_picture.f.qscale_table[mb_pos] = mquant;
3637 if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
3638 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table,
3639 VC1_TTMB_VLC_BITS, 2);
3640 if (!s->mb_intra) vc1_mc_1mv(v, 0);
3642 for (i = 0; i < 6; i++) {
3643 s->dc_val[0][s->block_index[i]] = 0;
3645 val = ((cbp >> (5 - i)) & 1);
3646 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3647 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3649 /* check if prediction blocks A and C are available */
3650 v->a_avail = v->c_avail = 0;
3651 if (i == 2 || i == 3 || !s->first_slice_line)
3652 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3653 if (i == 1 || i == 3 || s->mb_x)
3654 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3656 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3657 (i & 4) ? v->codingset2 : v->codingset);
3658 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3660 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3662 for (j = 0; j < 64; j++)
3663 s->block[i][j] <<= 1;
3664 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3665 if (v->pq >= 9 && v->overlap) {
3667 v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3669 v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3671 block_cbp |= 0xF << (i << 2);
3672 block_intra |= 1 << i;
3674 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block,
3675 s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize,
3676 (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
3677 block_cbp |= pat << (i << 2);
3678 if (!v->ttmbf && ttmb < 8)
3685 for (i = 0; i < 6; i++) {
3686 v->mb_type[0][s->block_index[i]] = 0;
3687 s->dc_val[0][s->block_index[i]] = 0;
3689 s->current_picture.f.mb_type[mb_pos] = MB_TYPE_SKIP;
3690 s->current_picture.f.qscale_table[mb_pos] = 0;
3691 vc1_pred_mv(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3694 } else { // 4MV mode
3695 if (!skipped /* unskipped MB */) {
3696 int intra_count = 0, coded_inter = 0;
3697 int is_intra[6], is_coded[6];
3699 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3700 for (i = 0; i < 6; i++) {
3701 val = ((cbp >> (5 - i)) & 1);
3702 s->dc_val[0][s->block_index[i]] = 0;
3709 GET_MVDATA(dmv_x, dmv_y);
3711 vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3713 vc1_mc_4mv_luma(v, i, 0);
3714 intra_count += s->mb_intra;
3715 is_intra[i] = s->mb_intra;
3716 is_coded[i] = mb_has_coeffs;
3719 is_intra[i] = (intra_count >= 3);
3723 vc1_mc_4mv_chroma(v, 0);
3724 v->mb_type[0][s->block_index[i]] = is_intra[i];
3726 coded_inter = !is_intra[i] & is_coded[i];
3728 // if there are no coded blocks then don't do anything more
3730 if (!intra_count && !coded_inter)
3733 s->current_picture.f.qscale_table[mb_pos] = mquant;
3734 /* test if block is intra and has pred */
3737 for (i = 0; i < 6; i++)
3739 if (((!s->first_slice_line || (i == 2 || i == 3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]])
3740 || ((s->mb_x || (i == 1 || i == 3)) && v->mb_type[0][s->block_index[i] - 1])) {
3746 s->ac_pred = get_bits1(gb);
3750 if (!v->ttmbf && coded_inter)
3751 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3752 for (i = 0; i < 6; i++) {
3754 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3755 s->mb_intra = is_intra[i];
3757 /* check if prediction blocks A and C are available */
3758 v->a_avail = v->c_avail = 0;
3759 if (i == 2 || i == 3 || !s->first_slice_line)
3760 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3761 if (i == 1 || i == 3 || s->mb_x)
3762 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3764 vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant,
3765 (i & 4) ? v->codingset2 : v->codingset);
3766 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3768 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3770 for (j = 0; j < 64; j++)
3771 s->block[i][j] <<= 1;
3772 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off,
3773 (i & 4) ? s->uvlinesize : s->linesize);
3774 if (v->pq >= 9 && v->overlap) {
3776 v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3778 v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3780 block_cbp |= 0xF << (i << 2);
3781 block_intra |= 1 << i;
3782 } else if (is_coded[i]) {
3783 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3784 first_block, s->dest[dst_idx] + off,
3785 (i & 4) ? s->uvlinesize : s->linesize,
3786 (i & 4) && (s->flags & CODEC_FLAG_GRAY),
3788 block_cbp |= pat << (i << 2);
3789 if (!v->ttmbf && ttmb < 8)
3794 } else { // skipped MB
3796 s->current_picture.f.qscale_table[mb_pos] = 0;
3797 for (i = 0; i < 6; i++) {
3798 v->mb_type[0][s->block_index[i]] = 0;
3799 s->dc_val[0][s->block_index[i]] = 0;
3801 for (i = 0; i < 4; i++) {
3802 vc1_pred_mv(v, i, 0, 0, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3803 vc1_mc_4mv_luma(v, i, 0);
3805 vc1_mc_4mv_chroma(v, 0);
3806 s->current_picture.f.qscale_table[mb_pos] = 0;
3810 v->cbp[s->mb_x] = block_cbp;
3811 v->ttblk[s->mb_x] = block_tt;
3812 v->is_intra[s->mb_x] = block_intra;
3817 /* Decode one macroblock in an interlaced frame p picture */
3819 static int vc1_decode_p_mb_intfr(VC1Context *v)
3821 MpegEncContext *s = &v->s;
3822 GetBitContext *gb = &s->gb;
3824 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3825 int cbp = 0; /* cbp decoding stuff */
3826 int mqdiff, mquant; /* MB quantization */
3827 int ttmb = v->ttfrm; /* MB Transform type */
3829 int mb_has_coeffs = 1; /* last_flag */
3830 int dmv_x, dmv_y; /* Differential MV components */
3831 int val; /* temp value */
3832 int first_block = 1;
3834 int skipped, fourmv = 0, twomv = 0;
3835 int block_cbp = 0, pat, block_tt = 0;
3836 int idx_mbmode = 0, mvbp;
3837 int stride_y, fieldtx;
3839 mquant = v->pq; /* Loosy initialization */
3842 skipped = get_bits1(gb);
3844 skipped = v->s.mbskip_table[mb_pos];
3846 if (v->fourmvswitch)
3847 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_4MV_MBMODE_VLC_BITS, 2); // try getting this done
3849 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2); // in a single line
3850 switch (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0]) {
3851 /* store the motion vector type in a flag (useful later) */
3852 case MV_PMODE_INTFR_4MV:
3854 v->blk_mv_type[s->block_index[0]] = 0;
3855 v->blk_mv_type[s->block_index[1]] = 0;
3856 v->blk_mv_type[s->block_index[2]] = 0;
3857 v->blk_mv_type[s->block_index[3]] = 0;
3859 case MV_PMODE_INTFR_4MV_FIELD:
3861 v->blk_mv_type[s->block_index[0]] = 1;
3862 v->blk_mv_type[s->block_index[1]] = 1;
3863 v->blk_mv_type[s->block_index[2]] = 1;
3864 v->blk_mv_type[s->block_index[3]] = 1;
3866 case MV_PMODE_INTFR_2MV_FIELD:
3868 v->blk_mv_type[s->block_index[0]] = 1;
3869 v->blk_mv_type[s->block_index[1]] = 1;
3870 v->blk_mv_type[s->block_index[2]] = 1;
3871 v->blk_mv_type[s->block_index[3]] = 1;
3873 case MV_PMODE_INTFR_1MV:
3874 v->blk_mv_type[s->block_index[0]] = 0;
3875 v->blk_mv_type[s->block_index[1]] = 0;
3876 v->blk_mv_type[s->block_index[2]] = 0;
3877 v->blk_mv_type[s->block_index[3]] = 0;
3880 if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
3881 s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
3882 s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
3883 s->current_picture.f.mb_type[mb_pos] = MB_TYPE_INTRA;
3884 s->mb_intra = v->is_intra[s->mb_x] = 1;
3885 for (i = 0; i < 6; i++)
3886 v->mb_type[0][s->block_index[i]] = 1;
3887 fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
3888 mb_has_coeffs = get_bits1(gb);
3890 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3891 v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
3893 s->current_picture.f.qscale_table[mb_pos] = mquant;
3894 /* Set DC scale - y and c use the same (not sure if necessary here) */
3895 s->y_dc_scale = s->y_dc_scale_table[mquant];
3896 s->c_dc_scale = s->c_dc_scale_table[mquant];
3898 for (i = 0; i < 6; i++) {
3899 s->dc_val[0][s->block_index[i]] = 0;
3901 val = ((cbp >> (5 - i)) & 1);
3902 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3903 v->a_avail = v->c_avail = 0;
3904 if (i == 2 || i == 3 || !s->first_slice_line)
3905 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3906 if (i == 1 || i == 3 || s->mb_x)
3907 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3909 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3910 (i & 4) ? v->codingset2 : v->codingset);
3911 if ((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3912 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3914 stride_y = s->linesize << fieldtx;
3915 off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->linesize : (i & 1) * 8 + 4 * (i & 2) * s->linesize;
3917 stride_y = s->uvlinesize;
3920 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, stride_y);
3924 } else { // inter MB
3925 mb_has_coeffs = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][3];
3927 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3928 if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD) {
3929 v->twomvbp = get_vlc2(gb, v->twomvbp_vlc->table, VC1_2MV_BLOCK_PATTERN_VLC_BITS, 1);
3931 if ((ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV)
3932 || (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV_FIELD)) {
3933 v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
3936 s->mb_intra = v->is_intra[s->mb_x] = 0;
3937 for (i = 0; i < 6; i++)
3938 v->mb_type[0][s->block_index[i]] = 0;
3939 fieldtx = v->fieldtx_plane[mb_pos] = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][1];
3940 /* for all motion vector read MVDATA and motion compensate each block */
3944 for (i = 0; i < 6; i++) {
3947 val = ((mvbp >> (3 - i)) & 1);
3949 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3951 vc1_pred_mv_intfr(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0]);
3952 vc1_mc_4mv_luma(v, i, 0);
3953 } else if (i == 4) {
3954 vc1_mc_4mv_chroma4(v);
3961 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3963 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0]);
3964 vc1_mc_4mv_luma(v, 0, 0);
3965 vc1_mc_4mv_luma(v, 1, 0);
3968 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3970 vc1_pred_mv_intfr(v, 2, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0]);
3971 vc1_mc_4mv_luma(v, 2, 0);
3972 vc1_mc_4mv_luma(v, 3, 0);
3973 vc1_mc_4mv_chroma4(v);
3975 mvbp = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][2];
3977 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3979 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0]);
3983 GET_MQUANT(); // p. 227
3984 s->current_picture.f.qscale_table[mb_pos] = mquant;
3985 if (!v->ttmbf && cbp)
3986 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3987 for (i = 0; i < 6; i++) {
3988 s->dc_val[0][s->block_index[i]] = 0;
3990 val = ((cbp >> (5 - i)) & 1);
3992 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3994 off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->linesize));
3996 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3997 first_block, s->dest[dst_idx] + off,
3998 (i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
3999 (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
4000 block_cbp |= pat << (i << 2);
4001 if (!v->ttmbf && ttmb < 8)
4008 s->mb_intra = v->is_intra[s->mb_x] = 0;
4009 for (i = 0; i < 6; i++) {
4010 v->mb_type[0][s->block_index[i]] = 0;
4011 s->dc_val[0][s->block_index[i]] = 0;
4013 s->current_picture.f.mb_type[mb_pos] = MB_TYPE_SKIP;
4014 s->current_picture.f.qscale_table[mb_pos] = 0;
4015 v->blk_mv_type[s->block_index[0]] = 0;
4016 v->blk_mv_type[s->block_index[1]] = 0;
4017 v->blk_mv_type[s->block_index[2]] = 0;
4018 v->blk_mv_type[s->block_index[3]] = 0;
4019 vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0]);
4022 if (s->mb_x == s->mb_width - 1)
4023 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0])*s->mb_stride);
4027 static int vc1_decode_p_mb_intfi(VC1Context *v)
4029 MpegEncContext *s = &v->s;
4030 GetBitContext *gb = &s->gb;
4032 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4033 int cbp = 0; /* cbp decoding stuff */
4034 int mqdiff, mquant; /* MB quantization */
4035 int ttmb = v->ttfrm; /* MB Transform type */
4037 int mb_has_coeffs = 1; /* last_flag */
4038 int dmv_x, dmv_y; /* Differential MV components */
4039 int val; /* temp values */
4040 int first_block = 1;
4043 int block_cbp = 0, pat, block_tt = 0;
4046 mquant = v->pq; /* Loosy initialization */
4048 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
4049 if (idx_mbmode <= 1) { // intra MB
4050 s->mb_intra = v->is_intra[s->mb_x] = 1;
4051 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
4052 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
4053 s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4055 s->current_picture.f.qscale_table[mb_pos] = mquant;
4056 /* Set DC scale - y and c use the same (not sure if necessary here) */
4057 s->y_dc_scale = s->y_dc_scale_table[mquant];
4058 s->c_dc_scale = s->c_dc_scale_table[mquant];
4059 v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4060 mb_has_coeffs = idx_mbmode & 1;
4062 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
4064 for (i = 0; i < 6; i++) {
4065 s->dc_val[0][s->block_index[i]] = 0;
4066 v->mb_type[0][s->block_index[i]] = 1;
4068 val = ((cbp >> (5 - i)) & 1);
4069 v->a_avail = v->c_avail = 0;
4070 if (i == 2 || i == 3 || !s->first_slice_line)
4071 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4072 if (i == 1 || i == 3 || s->mb_x)
4073 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4075 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4076 (i & 4) ? v->codingset2 : v->codingset);
4077 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4079 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4080 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4081 off += v->cur_field_type ? ((i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0]) : 0;
4082 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize);
4083 // TODO: loop filter
4086 s->mb_intra = v->is_intra[s->mb_x] = 0;
4087 s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
4088 for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
4089 if (idx_mbmode <= 5) { // 1-MV
4091 if (idx_mbmode & 1) {
4092 get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
4094 vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
4096 mb_has_coeffs = !(idx_mbmode & 2);
4098 v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
4099 for (i = 0; i < 6; i++) {
4101 dmv_x = dmv_y = pred_flag = 0;
4102 val = ((v->fourmvbp >> (3 - i)) & 1);
4104 get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
4106 vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
4107 vc1_mc_4mv_luma(v, i, 0);
4109 vc1_mc_4mv_chroma(v, 0);
4111 mb_has_coeffs = idx_mbmode & 1;
4114 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4118 s->current_picture.f.qscale_table[mb_pos] = mquant;
4119 if (!v->ttmbf && cbp) {
4120 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4123 for (i = 0; i < 6; i++) {
4124 s->dc_val[0][s->block_index[i]] = 0;
4126 val = ((cbp >> (5 - i)) & 1);
4127 off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
4128 if (v->cur_field_type)
4129 off += (i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0];
4131 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4132 first_block, s->dest[dst_idx] + off,
4133 (i & 4) ? s->uvlinesize : s->linesize,
4134 (i & 4) && (s->flags & CODEC_FLAG_GRAY),
4136 block_cbp |= pat << (i << 2);
4137 if (!v->ttmbf && ttmb < 8) ttmb = -1;
4142 if (s->mb_x == s->mb_width - 1)
4143 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
4147 /** Decode one B-frame MB (in Main profile)
4149 static void vc1_decode_b_mb(VC1Context *v)
4151 MpegEncContext *s = &v->s;
4152 GetBitContext *gb = &s->gb;
4154 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4155 int cbp = 0; /* cbp decoding stuff */
4156 int mqdiff, mquant; /* MB quantization */
4157 int ttmb = v->ttfrm; /* MB Transform type */
4158 int mb_has_coeffs = 0; /* last_flag */
4159 int index, index1; /* LUT indexes */
4160 int val, sign; /* temp values */
4161 int first_block = 1;
4163 int skipped, direct;
4164 int dmv_x[2], dmv_y[2];
4165 int bmvtype = BMV_TYPE_BACKWARD;
4167 mquant = v->pq; /* Loosy initialization */
4171 direct = get_bits1(gb);
4173 direct = v->direct_mb_plane[mb_pos];
4175 skipped = get_bits1(gb);
4177 skipped = v->s.mbskip_table[mb_pos];
4179 dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4180 for (i = 0; i < 6; i++) {
4181 v->mb_type[0][s->block_index[i]] = 0;
4182 s->dc_val[0][s->block_index[i]] = 0;
4184 s->current_picture.f.qscale_table[mb_pos] = 0;
4188 GET_MVDATA(dmv_x[0], dmv_y[0]);
4189 dmv_x[1] = dmv_x[0];
4190 dmv_y[1] = dmv_y[0];
4192 if (skipped || !s->mb_intra) {
4193 bmvtype = decode012(gb);
4196 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
4199 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
4202 bmvtype = BMV_TYPE_INTERPOLATED;
4203 dmv_x[0] = dmv_y[0] = 0;
4207 for (i = 0; i < 6; i++)
4208 v->mb_type[0][s->block_index[i]] = s->mb_intra;
4212 bmvtype = BMV_TYPE_INTERPOLATED;
4213 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4214 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4218 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4221 s->current_picture.f.qscale_table[mb_pos] = mquant;
4223 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4224 dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
4225 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4226 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4228 if (!mb_has_coeffs && !s->mb_intra) {
4229 /* no coded blocks - effectively skipped */
4230 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4231 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4234 if (s->mb_intra && !mb_has_coeffs) {
4236 s->current_picture.f.qscale_table[mb_pos] = mquant;
4237 s->ac_pred = get_bits1(gb);
4239 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4241 if (bmvtype == BMV_TYPE_INTERPOLATED) {
4242 GET_MVDATA(dmv_x[0], dmv_y[0]);
4243 if (!mb_has_coeffs) {
4244 /* interpolated skipped block */
4245 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4246 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4250 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4252 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4255 s->ac_pred = get_bits1(gb);
4256 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4258 s->current_picture.f.qscale_table[mb_pos] = mquant;
4259 if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
4260 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4264 for (i = 0; i < 6; i++) {
4265 s->dc_val[0][s->block_index[i]] = 0;
4267 val = ((cbp >> (5 - i)) & 1);
4268 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4269 v->mb_type[0][s->block_index[i]] = s->mb_intra;
4271 /* check if prediction blocks A and C are available */
4272 v->a_avail = v->c_avail = 0;
4273 if (i == 2 || i == 3 || !s->first_slice_line)
4274 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4275 if (i == 1 || i == 3 || s->mb_x)
4276 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4278 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4279 (i & 4) ? v->codingset2 : v->codingset);
4280 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4282 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4284 for (j = 0; j < 64; j++)
4285 s->block[i][j] <<= 1;
4286 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
4288 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4289 first_block, s->dest[dst_idx] + off,
4290 (i & 4) ? s->uvlinesize : s->linesize,
4291 (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4292 if (!v->ttmbf && ttmb < 8)
4299 /** Decode one B-frame MB (in interlaced field B picture)
4301 static void vc1_decode_b_mb_intfi(VC1Context *v)
4303 MpegEncContext *s = &v->s;
4304 GetBitContext *gb = &s->gb;
4306 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4307 int cbp = 0; /* cbp decoding stuff */
4308 int mqdiff, mquant; /* MB quantization */
4309 int ttmb = v->ttfrm; /* MB Transform type */
4310 int mb_has_coeffs = 0; /* last_flag */
4311 int val; /* temp value */
4312 int first_block = 1;
4315 int dmv_x[2], dmv_y[2], pred_flag[2];
4316 int bmvtype = BMV_TYPE_BACKWARD;
4317 int idx_mbmode, interpmvp;
4319 mquant = v->pq; /* Loosy initialization */
4322 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
4323 if (idx_mbmode <= 1) { // intra MB
4324 s->mb_intra = v->is_intra[s->mb_x] = 1;
4325 s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
4326 s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
4327 s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4329 s->current_picture.f.qscale_table[mb_pos] = mquant;
4330 /* Set DC scale - y and c use the same (not sure if necessary here) */
4331 s->y_dc_scale = s->y_dc_scale_table[mquant];
4332 s->c_dc_scale = s->c_dc_scale_table[mquant];
4333 v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4334 mb_has_coeffs = idx_mbmode & 1;
4336 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
4338 for (i = 0; i < 6; i++) {
4339 s->dc_val[0][s->block_index[i]] = 0;
4341 val = ((cbp >> (5 - i)) & 1);
4342 v->mb_type[0][s->block_index[i]] = s->mb_intra;
4343 v->a_avail = v->c_avail = 0;
4344 if (i == 2 || i == 3 || !s->first_slice_line)
4345 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4346 if (i == 1 || i == 3 || s->mb_x)
4347 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4349 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4350 (i & 4) ? v->codingset2 : v->codingset);
4351 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4353 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4355 for (j = 0; j < 64; j++)
4356 s->block[i][j] <<= 1;
4357 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4358 off += v->cur_field_type ? ((i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0]) : 0;
4359 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize);
4360 // TODO: yet to perform loop filter
4363 s->mb_intra = v->is_intra[s->mb_x] = 0;
4364 s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
4365 for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
4367 fwd = v->forward_mb_plane[mb_pos] = get_bits1(gb);
4369 fwd = v->forward_mb_plane[mb_pos];
4370 if (idx_mbmode <= 5) { // 1-MV
4371 dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4372 pred_flag[0] = pred_flag[1] = 0;
4374 bmvtype = BMV_TYPE_FORWARD;
4376 bmvtype = decode012(gb);
4379 bmvtype = BMV_TYPE_BACKWARD;
4382 bmvtype = BMV_TYPE_DIRECT;
4385 bmvtype = BMV_TYPE_INTERPOLATED;
4386 interpmvp = get_bits1(gb);
4389 v->bmvtype = bmvtype;
4390 if (bmvtype != BMV_TYPE_DIRECT && idx_mbmode & 1) {
4391 get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD], &dmv_y[bmvtype == BMV_TYPE_BACKWARD], &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
4393 if (bmvtype == BMV_TYPE_INTERPOLATED && interpmvp) {
4394 get_mvdata_interlaced(v, &dmv_x[1], &dmv_y[1], &pred_flag[1]);
4396 if (bmvtype == BMV_TYPE_DIRECT) {
4397 dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4398 dmv_x[1] = dmv_y[1] = pred_flag[0] = 0;
4400 vc1_pred_b_mv_intfi(v, 0, dmv_x, dmv_y, 1, pred_flag);
4401 vc1_b_mc(v, dmv_x, dmv_y, (bmvtype == BMV_TYPE_DIRECT), bmvtype);
4402 mb_has_coeffs = !(idx_mbmode & 2);
4405 bmvtype = BMV_TYPE_FORWARD;
4406 v->bmvtype = bmvtype;
4407 v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
4408 for (i = 0; i < 6; i++) {
4410 dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4411 dmv_x[1] = dmv_y[1] = pred_flag[1] = 0;
4412 val = ((v->fourmvbp >> (3 - i)) & 1);
4414 get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD],
4415 &dmv_y[bmvtype == BMV_TYPE_BACKWARD],
4416 &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
4418 vc1_pred_b_mv_intfi(v, i, dmv_x, dmv_y, 0, pred_flag);
4419 vc1_mc_4mv_luma(v, i, bmvtype == BMV_TYPE_BACKWARD);
4421 vc1_mc_4mv_chroma(v, bmvtype == BMV_TYPE_BACKWARD);
4423 mb_has_coeffs = idx_mbmode & 1;
4426 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4430 s->current_picture.f.qscale_table[mb_pos] = mquant;
4431 if (!v->ttmbf && cbp) {
4432 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4435 for (i = 0; i < 6; i++) {
4436 s->dc_val[0][s->block_index[i]] = 0;
4438 val = ((cbp >> (5 - i)) & 1);
4439 off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
4440 if (v->cur_field_type)
4441 off += (i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0];
4443 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4444 first_block, s->dest[dst_idx] + off,
4445 (i & 4) ? s->uvlinesize : s->linesize,
4446 (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4447 if (!v->ttmbf && ttmb < 8)
4455 /** Decode blocks of I-frame
4457 static void vc1_decode_i_blocks(VC1Context *v)
4460 MpegEncContext *s = &v->s;
4465 /* select codingmode used for VLC tables selection */
4466 switch (v->y_ac_table_index) {
4468 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
4471 v->codingset = CS_HIGH_MOT_INTRA;
4474 v->codingset = CS_MID_RATE_INTRA;
4478 switch (v->c_ac_table_index) {
4480 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
4483 v->codingset2 = CS_HIGH_MOT_INTER;
4486 v->codingset2 = CS_MID_RATE_INTER;
4490 /* Set DC scale - y and c use the same */
4491 s->y_dc_scale = s->y_dc_scale_table[v->pq];
4492 s->c_dc_scale = s->c_dc_scale_table[v->pq];
4495 s->mb_x = s->mb_y = 0;
4497 s->first_slice_line = 1;
4498 for (s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
4500 ff_init_block_index(s);
4501 for (; s->mb_x < s->mb_width; s->mb_x++) {
4503 ff_update_block_index(s);
4504 dst[0] = s->dest[0];
4505 dst[1] = dst[0] + 8;
4506 dst[2] = s->dest[0] + s->linesize * 8;
4507 dst[3] = dst[2] + 8;
4508 dst[4] = s->dest[1];
4509 dst[5] = s->dest[2];
4510 s->dsp.clear_blocks(s->block[0]);
4511 mb_pos = s->mb_x + s->mb_y * s->mb_width;
4512 s->current_picture.f.mb_type[mb_pos] = MB_TYPE_INTRA;
4513 s->current_picture.f.qscale_table[mb_pos] = v->pq;
4514 s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
4515 s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
4517 // do actual MB decoding and displaying
4518 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
4519 v->s.ac_pred = get_bits1(&v->s.gb);
4521 for (k = 0; k < 6; k++) {
4522 val = ((cbp >> (5 - k)) & 1);
4525 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
4529 cbp |= val << (5 - k);
4531 vc1_decode_i_block(v, s->block[k], k, val, (k < 4) ? v->codingset : v->codingset2);
4533 if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
4535 v->vc1dsp.vc1_inv_trans_8x8(s->block[k]);
4536 if (v->pq >= 9 && v->overlap) {
4538 for (j = 0; j < 64; j++)
4539 s->block[k][j] <<= 1;
4540 s->dsp.put_signed_pixels_clamped(s->block[k], dst[k], k & 4 ? s->uvlinesize : s->linesize);
4543 for (j = 0; j < 64; j++)
4544 s->block[k][j] = (s->block[k][j] - 64) << 1;
4545 s->dsp.put_pixels_clamped(s->block[k], dst[k], k & 4 ? s->uvlinesize : s->linesize);
4549 if (v->pq >= 9 && v->overlap) {
4551 v->vc1dsp.vc1_h_overlap(s->dest[0], s->linesize);
4552 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
4553 if (!(s->flags & CODEC_FLAG_GRAY)) {
4554 v->vc1dsp.vc1_h_overlap(s->dest[1], s->uvlinesize);
4555 v->vc1dsp.vc1_h_overlap(s->dest[2], s->uvlinesize);
4558 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize);
4559 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
4560 if (!s->first_slice_line) {
4561 v->vc1dsp.vc1_v_overlap(s->dest[0], s->linesize);
4562 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize);
4563 if (!(s->flags & CODEC_FLAG_GRAY)) {
4564 v->vc1dsp.vc1_v_overlap(s->dest[1], s->uvlinesize);
4565 v->vc1dsp.vc1_v_overlap(s->dest[2], s->uvlinesize);
4568 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
4569 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
4571 if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
4573 if (get_bits_count(&s->gb) > v->bits) {
4574 ff_er_add_slice(s, 0, 0, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END));
4575 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
4576 get_bits_count(&s->gb), v->bits);
4580 if (!v->s.loop_filter)
4581 ff_draw_horiz_band(s, s->mb_y * 16, 16);
4583 ff_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
4585 s->first_slice_line = 0;
4587 if (v->s.loop_filter)
4588 ff_draw_horiz_band(s, (s->mb_height - 1) * 16, 16);
4589 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
4592 /** Decode blocks of I-frame for advanced profile
4594 static void vc1_decode_i_blocks_adv(VC1Context *v)
4597 MpegEncContext *s = &v->s;
4603 GetBitContext *gb = &s->gb;
4605 /* select codingmode used for VLC tables selection */
4606 switch (v->y_ac_table_index) {
4608 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
4611 v->codingset = CS_HIGH_MOT_INTRA;
4614 v->codingset = CS_MID_RATE_INTRA;
4618 switch (v->c_ac_table_index) {
4620 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
4623 v->codingset2 = CS_HIGH_MOT_INTER;
4626 v->codingset2 = CS_MID_RATE_INTER;
4631 s->mb_x = s->mb_y = 0;
4633 s->first_slice_line = 1;
4634 s->mb_y = s->start_mb_y;
4635 if (s->start_mb_y) {
4637 ff_init_block_index(s);
4638 memset(&s->coded_block[s->block_index[0] - s->b8_stride], 0,
4639 (1 + s->b8_stride) * sizeof(*s->coded_block));
4641 for (; s->mb_y < s->end_mb_y; s->mb_y++) {
4643 ff_init_block_index(s);
4644 for (;s->mb_x < s->mb_width; s->mb_x++) {
4645 DCTELEM (*block)[64] = v->block[v->cur_blk_idx];
4646 ff_update_block_index(s);
4647 s->dsp.clear_blocks(block[0]);
4648 mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4649 s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4650 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
4651 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
4653 // do actual MB decoding and displaying
4654 if (v->fieldtx_is_raw)
4655 v->fieldtx_plane[mb_pos] = get_bits1(&v->s.gb);
4656 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
4657 if ( v->acpred_is_raw)
4658 v->s.ac_pred = get_bits1(&v->s.gb);
4660 v->s.ac_pred = v->acpred_plane[mb_pos];
4662 if (v->condover == CONDOVER_SELECT && v->overflg_is_raw)
4663 v->over_flags_plane[mb_pos] = get_bits1(&v->s.gb);
4667 s->current_picture.f.qscale_table[mb_pos] = mquant;
4668 /* Set DC scale - y and c use the same */
4669 s->y_dc_scale = s->y_dc_scale_table[mquant];
4670 s->c_dc_scale = s->c_dc_scale_table[mquant];
4672 for (k = 0; k < 6; k++) {
4673 val = ((cbp >> (5 - k)) & 1);
4676 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
4680 cbp |= val << (5 - k);
4682 v->a_avail = !s->first_slice_line || (k == 2 || k == 3);
4683 v->c_avail = !!s->mb_x || (k == 1 || k == 3);
4685 vc1_decode_i_block_adv(v, block[k], k, val,
4686 (k < 4) ? v->codingset : v->codingset2, mquant);
4688 if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
4690 v->vc1dsp.vc1_inv_trans_8x8(block[k]);
4693 vc1_smooth_overlap_filter_iblk(v);
4694 vc1_put_signed_blocks_clamped(v);
4695 if (v->s.loop_filter) vc1_loop_filter_iblk_delayed(v, v->pq);
4697 if (get_bits_count(&s->gb) > v->bits) {
4698 // TODO: may need modification to handle slice coding
4699 ff_er_add_slice(s, 0, s->start_mb_y, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END));
4700 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
4701 get_bits_count(&s->gb), v->bits);
4705 if (!v->s.loop_filter)
4706 ff_draw_horiz_band(s, s->mb_y * 16, 16);
4708 ff_draw_horiz_band(s, (s->mb_y-1) * 16, 16);
4709 s->first_slice_line = 0;
4712 /* raw bottom MB row */
4714 ff_init_block_index(s);
4715 for (;s->mb_x < s->mb_width; s->mb_x++) {
4716 ff_update_block_index(s);
4717 vc1_put_signed_blocks_clamped(v);
4718 if (v->s.loop_filter)
4719 vc1_loop_filter_iblk_delayed(v, v->pq);
4721 if (v->s.loop_filter)
4722 ff_draw_horiz_band(s, (s->end_mb_y-1)*16, 16);
4723 ff_er_add_slice(s, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
4724 (s->end_mb_y << v->field_mode) - 1, (AC_END|DC_END|MV_END));
4727 static void vc1_decode_p_blocks(VC1Context *v)
4729 MpegEncContext *s = &v->s;
4730 int apply_loop_filter;
4732 /* select codingmode used for VLC tables selection */
4733 switch (v->c_ac_table_index) {
4735 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
4738 v->codingset = CS_HIGH_MOT_INTRA;
4741 v->codingset = CS_MID_RATE_INTRA;
4745 switch (v->c_ac_table_index) {
4747 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
4750 v->codingset2 = CS_HIGH_MOT_INTER;
4753 v->codingset2 = CS_MID_RATE_INTER;
4757 apply_loop_filter = s->loop_filter && !(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY);
4758 s->first_slice_line = 1;
4759 memset(v->cbp_base, 0, sizeof(v->cbp_base[0])*2*s->mb_stride);
4760 for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
4762 ff_init_block_index(s);
4763 for (; s->mb_x < s->mb_width; s->mb_x++) {
4764 ff_update_block_index(s);
4767 vc1_decode_p_mb_intfi(v);
4768 else if (v->fcm == 1)
4769 vc1_decode_p_mb_intfr(v);
4770 else vc1_decode_p_mb(v);
4771 if (s->mb_y != s->start_mb_y && apply_loop_filter && v->fcm == 0)
4772 vc1_apply_p_loop_filter(v);
4773 if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
4774 // TODO: may need modification to handle slice coding
4775 ff_er_add_slice(s, 0, s->start_mb_y, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END));
4776 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
4777 get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
4781 memmove(v->cbp_base, v->cbp, sizeof(v->cbp_base[0]) * s->mb_stride);
4782 memmove(v->ttblk_base, v->ttblk, sizeof(v->ttblk_base[0]) * s->mb_stride);
4783 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
4784 memmove(v->luma_mv_base, v->luma_mv, sizeof(v->luma_mv_base[0]) * s->mb_stride);
4785 if (s->mb_y != s->start_mb_y) ff_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
4786 s->first_slice_line = 0;
4788 if (apply_loop_filter) {
4790 ff_init_block_index(s);
4791 for (; s->mb_x < s->mb_width; s->mb_x++) {
4792 ff_update_block_index(s);
4793 vc1_apply_p_loop_filter(v);
4796 if (s->end_mb_y >= s->start_mb_y)
4797 ff_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
4798 ff_er_add_slice(s, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
4799 (s->end_mb_y << v->field_mode) - 1, (AC_END|DC_END|MV_END));
4802 static void vc1_decode_b_blocks(VC1Context *v)
4804 MpegEncContext *s = &v->s;
4806 /* select codingmode used for VLC tables selection */
4807 switch (v->c_ac_table_index) {
4809 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
4812 v->codingset = CS_HIGH_MOT_INTRA;
4815 v->codingset = CS_MID_RATE_INTRA;
4819 switch (v->c_ac_table_index) {
4821 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
4824 v->codingset2 = CS_HIGH_MOT_INTER;
4827 v->codingset2 = CS_MID_RATE_INTER;
4831 s->first_slice_line = 1;
4832 for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
4834 ff_init_block_index(s);
4835 for (; s->mb_x < s->mb_width; s->mb_x++) {
4836 ff_update_block_index(s);
4839 vc1_decode_b_mb_intfi(v);
4842 if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
4843 // TODO: may need modification to handle slice coding
4844 ff_er_add_slice(s, 0, s->start_mb_y, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END));
4845 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
4846 get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
4849 if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
4851 if (!v->s.loop_filter)
4852 ff_draw_horiz_band(s, s->mb_y * 16, 16);
4854 ff_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
4855 s->first_slice_line = 0;
4857 if (v->s.loop_filter)
4858 ff_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
4859 ff_er_add_slice(s, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
4860 (s->end_mb_y << v->field_mode) - 1, (AC_END|DC_END|MV_END));
4863 static void vc1_decode_skip_blocks(VC1Context *v)
4865 MpegEncContext *s = &v->s;
4867 ff_er_add_slice(s, 0, s->start_mb_y, s->mb_width - 1, s->end_mb_y - 1, (AC_END|DC_END|MV_END));
4868 s->first_slice_line = 1;
4869 for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
4871 ff_init_block_index(s);
4872 ff_update_block_index(s);
4873 memcpy(s->dest[0], s->last_picture.f.data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16);
4874 memcpy(s->dest[1], s->last_picture.f.data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
4875 memcpy(s->dest[2], s->last_picture.f.data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
4876 ff_draw_horiz_band(s, s->mb_y * 16, 16);
4877 s->first_slice_line = 0;
4879 s->pict_type = AV_PICTURE_TYPE_P;
4882 static void vc1_decode_blocks(VC1Context *v)
4885 v->s.esc3_level_length = 0;
4887 ff_intrax8_decode_picture(&v->x8, 2*v->pq + v->halfpq, v->pq * !v->pquantizer);
4890 v->left_blk_idx = -1;
4891 v->topleft_blk_idx = 1;
4893 switch (v->s.pict_type) {
4894 case AV_PICTURE_TYPE_I:
4895 if (v->profile == PROFILE_ADVANCED)
4896 vc1_decode_i_blocks_adv(v);
4898 vc1_decode_i_blocks(v);
4900 case AV_PICTURE_TYPE_P:
4901 if (v->p_frame_skipped)
4902 vc1_decode_skip_blocks(v);
4904 vc1_decode_p_blocks(v);
4906 case AV_PICTURE_TYPE_B:
4908 if (v->profile == PROFILE_ADVANCED)
4909 vc1_decode_i_blocks_adv(v);
4911 vc1_decode_i_blocks(v);
4913 vc1_decode_b_blocks(v);
4919 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
4923 * Transform coefficients for both sprites in 16.16 fixed point format,
4924 * in the order they appear in the bitstream:
4926 * rotation 1 (unused)
4928 * rotation 2 (unused)
4935 int effect_type, effect_flag;
4936 int effect_pcount1, effect_pcount2; ///< amount of effect parameters stored in effect_params
4937 int effect_params1[15], effect_params2[10]; ///< effect parameters in 16.16 fixed point format
4940 static inline int get_fp_val(GetBitContext* gb)
4942 return (get_bits_long(gb, 30) - (1 << 29)) << 1;
4945 static void vc1_sprite_parse_transform(GetBitContext* gb, int c[7])
4949 switch (get_bits(gb, 2)) {
4952 c[2] = get_fp_val(gb);
4956 c[0] = c[4] = get_fp_val(gb);
4957 c[2] = get_fp_val(gb);
4960 c[0] = get_fp_val(gb);
4961 c[2] = get_fp_val(gb);
4962 c[4] = get_fp_val(gb);
4965 c[0] = get_fp_val(gb);
4966 c[1] = get_fp_val(gb);
4967 c[2] = get_fp_val(gb);
4968 c[3] = get_fp_val(gb);
4969 c[4] = get_fp_val(gb);
4972 c[5] = get_fp_val(gb);
4974 c[6] = get_fp_val(gb);
4979 static void vc1_parse_sprites(VC1Context *v, GetBitContext* gb, SpriteData* sd)
4981 AVCodecContext *avctx = v->s.avctx;
4984 for (sprite = 0; sprite <= v->two_sprites; sprite++) {
4985 vc1_sprite_parse_transform(gb, sd->coefs[sprite]);
4986 if (sd->coefs[sprite][1] || sd->coefs[sprite][3])
4987 av_log_ask_for_sample(avctx, "Rotation coefficients are not zero");
4988 av_log(avctx, AV_LOG_DEBUG, sprite ? "S2:" : "S1:");
4989 for (i = 0; i < 7; i++)
4990 av_log(avctx, AV_LOG_DEBUG, " %d.%.3d",
4991 sd->coefs[sprite][i] / (1<<16),
4992 (abs(sd->coefs[sprite][i]) & 0xFFFF) * 1000 / (1 << 16));
4993 av_log(avctx, AV_LOG_DEBUG, "\n");
4997 if (sd->effect_type = get_bits_long(gb, 30)) {
4998 switch (sd->effect_pcount1 = get_bits(gb, 4)) {
5000 vc1_sprite_parse_transform(gb, sd->effect_params1);
5003 vc1_sprite_parse_transform(gb, sd->effect_params1);
5004 vc1_sprite_parse_transform(gb, sd->effect_params1 + 7);
5007 for (i = 0; i < sd->effect_pcount1; i++)
5008 sd->effect_params1[i] = get_fp_val(gb);
5010 if (sd->effect_type != 13 || sd->effect_params1[0] != sd->coefs[0][6]) {
5011 // effect 13 is simple alpha blending and matches the opacity above
5012 av_log(avctx, AV_LOG_DEBUG, "Effect: %d; params: ", sd->effect_type);
5013 for (i = 0; i < sd->effect_pcount1; i++)
5014 av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
5015 sd->effect_params1[i] / (1 << 16),
5016 (abs(sd->effect_params1[i]) & 0xFFFF) * 1000 / (1 << 16));
5017 av_log(avctx, AV_LOG_DEBUG, "\n");
5020 sd->effect_pcount2 = get_bits(gb, 16);
5021 if (sd->effect_pcount2 > 10) {
5022 av_log(avctx, AV_LOG_ERROR, "Too many effect parameters\n");
5024 } else if (sd->effect_pcount2) {
5026 av_log(avctx, AV_LOG_DEBUG, "Effect params 2: ");
5027 while (++i < sd->effect_pcount2) {
5028 sd->effect_params2[i] = get_fp_val(gb);
5029 av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
5030 sd->effect_params2[i] / (1 << 16),
5031 (abs(sd->effect_params2[i]) & 0xFFFF) * 1000 / (1 << 16));
5033 av_log(avctx, AV_LOG_DEBUG, "\n");
5036 if (sd->effect_flag = get_bits1(gb))
5037 av_log(avctx, AV_LOG_DEBUG, "Effect flag set\n");
5039 if (get_bits_count(gb) >= gb->size_in_bits +
5040 (avctx->codec_id == CODEC_ID_WMV3IMAGE ? 64 : 0))
5041 av_log(avctx, AV_LOG_ERROR, "Buffer overrun\n");
5042 if (get_bits_count(gb) < gb->size_in_bits - 8)
5043 av_log(avctx, AV_LOG_WARNING, "Buffer not fully read\n");
5046 static void vc1_draw_sprites(VC1Context *v, SpriteData* sd)
5048 int i, plane, row, sprite;
5049 int sr_cache[2][2] = { { -1, -1 }, { -1, -1 } };
5050 uint8_t* src_h[2][2];
5051 int xoff[2], xadv[2], yoff[2], yadv[2], alpha;
5053 MpegEncContext *s = &v->s;
5055 for (i = 0; i < 2; i++) {
5056 xoff[i] = av_clip(sd->coefs[i][2], 0, v->sprite_width-1 << 16);
5057 xadv[i] = sd->coefs[i][0];
5058 if (xadv[i] != 1<<16 || (v->sprite_width << 16) - (v->output_width << 16) - xoff[i])
5059 xadv[i] = av_clip(xadv[i], 0, ((v->sprite_width<<16) - xoff[i] - 1) / v->output_width);
5061 yoff[i] = av_clip(sd->coefs[i][5], 0, v->sprite_height-1 << 16);
5062 yadv[i] = av_clip(sd->coefs[i][4], 0, ((v->sprite_height << 16) - yoff[i]) / v->output_height);
5064 alpha = av_clip(sd->coefs[1][6], 0, (1<<16) - 1);
5066 for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++) {
5067 int width = v->output_width>>!!plane;
5069 for (row = 0; row < v->output_height>>!!plane; row++) {
5070 uint8_t *dst = v->sprite_output_frame.data[plane] +
5071 v->sprite_output_frame.linesize[plane] * row;
5073 for (sprite = 0; sprite <= v->two_sprites; sprite++) {
5074 uint8_t *iplane = s->current_picture.f.data[plane];
5075 int iline = s->current_picture.f.linesize[plane];
5076 int ycoord = yoff[sprite] + yadv[sprite] * row;
5077 int yline = ycoord >> 16;
5078 ysub[sprite] = ycoord & 0xFFFF;
5080 iplane = s->last_picture.f.data[plane];
5081 iline = s->last_picture.f.linesize[plane];
5083 if (!(xoff[sprite] & 0xFFFF) && xadv[sprite] == 1 << 16) {
5084 src_h[sprite][0] = iplane + (xoff[sprite] >> 16) + yline * iline;
5086 src_h[sprite][1] = iplane + (xoff[sprite] >> 16) + (yline + 1) * iline;
5088 if (sr_cache[sprite][0] != yline) {
5089 if (sr_cache[sprite][1] == yline) {
5090 FFSWAP(uint8_t*, v->sr_rows[sprite][0], v->sr_rows[sprite][1]);
5091 FFSWAP(int, sr_cache[sprite][0], sr_cache[sprite][1]);
5093 v->vc1dsp.sprite_h(v->sr_rows[sprite][0], iplane + yline * iline, xoff[sprite], xadv[sprite], width);
5094 sr_cache[sprite][0] = yline;
5097 if (ysub[sprite] && sr_cache[sprite][1] != yline + 1) {
5098 v->vc1dsp.sprite_h(v->sr_rows[sprite][1], iplane + (yline + 1) * iline, xoff[sprite], xadv[sprite], width);
5099 sr_cache[sprite][1] = yline + 1;
5101 src_h[sprite][0] = v->sr_rows[sprite][0];
5102 src_h[sprite][1] = v->sr_rows[sprite][1];
5106 if (!v->two_sprites) {
5108 v->vc1dsp.sprite_v_single(dst, src_h[0][0], src_h[0][1], ysub[0], width);
5110 memcpy(dst, src_h[0][0], width);
5113 if (ysub[0] && ysub[1]) {
5114 v->vc1dsp.sprite_v_double_twoscale(dst, src_h[0][0], src_h[0][1], ysub[0],
5115 src_h[1][0], src_h[1][1], ysub[1], alpha, width);
5116 } else if (ysub[0]) {
5117 v->vc1dsp.sprite_v_double_onescale(dst, src_h[0][0], src_h[0][1], ysub[0],
5118 src_h[1][0], alpha, width);
5119 } else if (ysub[1]) {
5120 v->vc1dsp.sprite_v_double_onescale(dst, src_h[1][0], src_h[1][1], ysub[1],
5121 src_h[0][0], (1<<16)-1-alpha, width);
5123 v->vc1dsp.sprite_v_double_noscale(dst, src_h[0][0], src_h[1][0], alpha, width);
5129 for (i = 0; i < 2; i++) {
5139 static int vc1_decode_sprites(VC1Context *v, GetBitContext* gb)
5141 MpegEncContext *s = &v->s;
5142 AVCodecContext *avctx = s->avctx;
5145 vc1_parse_sprites(v, gb, &sd);
5147 if (!s->current_picture.f.data[0]) {
5148 av_log(avctx, AV_LOG_ERROR, "Got no sprites\n");
5152 if (v->two_sprites && (!s->last_picture_ptr || !s->last_picture.f.data[0])) {
5153 av_log(avctx, AV_LOG_WARNING, "Need two sprites, only got one\n");
5157 if (v->sprite_output_frame.data[0])
5158 avctx->release_buffer(avctx, &v->sprite_output_frame);
5160 v->sprite_output_frame.buffer_hints = FF_BUFFER_HINTS_VALID;
5161 v->sprite_output_frame.reference = 0;
5162 if (avctx->get_buffer(avctx, &v->sprite_output_frame) < 0) {
5163 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
5167 vc1_draw_sprites(v, &sd);
5172 static void vc1_sprite_flush(AVCodecContext *avctx)
5174 VC1Context *v = avctx->priv_data;
5175 MpegEncContext *s = &v->s;
5176 AVFrame *f = &s->current_picture.f;
5179 /* Windows Media Image codecs have a convergence interval of two keyframes.
5180 Since we can't enforce it, clear to black the missing sprite. This is
5181 wrong but it looks better than doing nothing. */
5184 for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++)
5185 for (i = 0; i < v->sprite_height>>!!plane; i++)
5186 memset(f->data[plane] + i * f->linesize[plane],
5187 plane ? 128 : 0, f->linesize[plane]);
5192 static av_cold int vc1_decode_init_alloc_tables(VC1Context *v)
5194 MpegEncContext *s = &v->s;
5197 /* Allocate mb bitplanes */
5198 v->mv_type_mb_plane = av_malloc (s->mb_stride * s->mb_height);
5199 v->direct_mb_plane = av_malloc (s->mb_stride * s->mb_height);
5200 v->forward_mb_plane = av_malloc (s->mb_stride * s->mb_height);
5201 v->fieldtx_plane = av_mallocz(s->mb_stride * s->mb_height);
5202 v->acpred_plane = av_malloc (s->mb_stride * s->mb_height);
5203 v->over_flags_plane = av_malloc (s->mb_stride * s->mb_height);
5205 v->n_allocated_blks = s->mb_width + 2;
5206 v->block = av_malloc(sizeof(*v->block) * v->n_allocated_blks);
5207 v->cbp_base = av_malloc(sizeof(v->cbp_base[0]) * 2 * s->mb_stride);
5208 v->cbp = v->cbp_base + s->mb_stride;
5209 v->ttblk_base = av_malloc(sizeof(v->ttblk_base[0]) * 2 * s->mb_stride);
5210 v->ttblk = v->ttblk_base + s->mb_stride;
5211 v->is_intra_base = av_mallocz(sizeof(v->is_intra_base[0]) * 2 * s->mb_stride);
5212 v->is_intra = v->is_intra_base + s->mb_stride;
5213 v->luma_mv_base = av_malloc(sizeof(v->luma_mv_base[0]) * 2 * s->mb_stride);
5214 v->luma_mv = v->luma_mv_base + s->mb_stride;
5216 /* allocate block type info in that way so it could be used with s->block_index[] */
5217 v->mb_type_base = av_malloc(s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5218 v->mb_type[0] = v->mb_type_base + s->b8_stride + 1;
5219 v->mb_type[1] = v->mb_type_base + s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride + 1;
5220 v->mb_type[2] = v->mb_type[1] + s->mb_stride * (s->mb_height + 1);
5222 /* allocate memory to store block level MV info */
5223 v->blk_mv_type_base = av_mallocz( s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5224 v->blk_mv_type = v->blk_mv_type_base + s->b8_stride + 1;
5225 v->mv_f_base = av_mallocz(2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5226 v->mv_f[0] = v->mv_f_base + s->b8_stride + 1;
5227 v->mv_f[1] = v->mv_f[0] + (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5228 v->mv_f_last_base = av_mallocz(2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5229 v->mv_f_last[0] = v->mv_f_last_base + s->b8_stride + 1;
5230 v->mv_f_last[1] = v->mv_f_last[0] + (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5231 v->mv_f_next_base = av_mallocz(2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5232 v->mv_f_next[0] = v->mv_f_next_base + s->b8_stride + 1;
5233 v->mv_f_next[1] = v->mv_f_next[0] + (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5235 /* Init coded blocks info */
5236 if (v->profile == PROFILE_ADVANCED) {
5237 // if (alloc_bitplane(&v->over_flags_plane, s->mb_width, s->mb_height) < 0)
5239 // if (alloc_bitplane(&v->ac_pred_plane, s->mb_width, s->mb_height) < 0)
5243 ff_intrax8_common_init(&v->x8,s);
5245 if (s->avctx->codec_id == CODEC_ID_WMV3IMAGE || s->avctx->codec_id == CODEC_ID_VC1IMAGE) {
5246 for (i = 0; i < 4; i++)
5247 if (!(v->sr_rows[i >> 1][i & 1] = av_malloc(v->output_width))) return -1;
5250 if (!v->mv_type_mb_plane || !v->direct_mb_plane || !v->acpred_plane || !v->over_flags_plane ||
5251 !v->block || !v->cbp_base || !v->ttblk_base || !v->is_intra_base || !v->luma_mv_base ||
5258 /** Initialize a VC1/WMV3 decoder
5259 * @todo TODO: Handle VC-1 IDUs (Transport level?)
5260 * @todo TODO: Decypher remaining bits in extra_data
5262 static av_cold int vc1_decode_init(AVCodecContext *avctx)
5264 VC1Context *v = avctx->priv_data;
5265 MpegEncContext *s = &v->s;
5269 /* save the container output size for WMImage */
5270 v->output_width = avctx->width;
5271 v->output_height = avctx->height;
5273 if (!avctx->extradata_size || !avctx->extradata)
5275 if (!(avctx->flags & CODEC_FLAG_GRAY))
5276 avctx->pix_fmt = avctx->get_format(avctx, avctx->codec->pix_fmts);
5278 avctx->pix_fmt = PIX_FMT_GRAY8;
5279 avctx->hwaccel = ff_find_hwaccel(avctx->codec->id, avctx->pix_fmt);
5281 avctx->flags |= CODEC_FLAG_EMU_EDGE;
5282 v->s.flags |= CODEC_FLAG_EMU_EDGE;
5284 if (avctx->idct_algo == FF_IDCT_AUTO) {
5285 avctx->idct_algo = FF_IDCT_WMV2;
5288 if (vc1_init_common(v) < 0)
5290 ff_vc1dsp_init(&v->vc1dsp);
5292 if (avctx->codec_id == CODEC_ID_WMV3 || avctx->codec_id == CODEC_ID_WMV3IMAGE) {
5295 // looks like WMV3 has a sequence header stored in the extradata
5296 // advanced sequence header may be before the first frame
5297 // the last byte of the extradata is a version number, 1 for the
5298 // samples we can decode
5300 init_get_bits(&gb, avctx->extradata, avctx->extradata_size*8);
5302 if (vc1_decode_sequence_header(avctx, v, &gb) < 0)
5305 count = avctx->extradata_size*8 - get_bits_count(&gb);
5307 av_log(avctx, AV_LOG_INFO, "Extra data: %i bits left, value: %X\n",
5308 count, get_bits(&gb, count));
5309 } else if (count < 0) {
5310 av_log(avctx, AV_LOG_INFO, "Read %i bits in overflow\n", -count);
5312 } else { // VC1/WVC1/WVP2
5313 const uint8_t *start = avctx->extradata;
5314 uint8_t *end = avctx->extradata + avctx->extradata_size;
5315 const uint8_t *next;
5316 int size, buf2_size;
5317 uint8_t *buf2 = NULL;
5318 int seq_initialized = 0, ep_initialized = 0;
5320 if (avctx->extradata_size < 16) {
5321 av_log(avctx, AV_LOG_ERROR, "Extradata size too small: %i\n", avctx->extradata_size);
5325 buf2 = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
5326 start = find_next_marker(start, end); // in WVC1 extradata first byte is its size, but can be 0 in mkv
5328 for (; next < end; start = next) {
5329 next = find_next_marker(start + 4, end);
5330 size = next - start - 4;
5333 buf2_size = vc1_unescape_buffer(start + 4, size, buf2);
5334 init_get_bits(&gb, buf2, buf2_size * 8);
5335 switch (AV_RB32(start)) {
5336 case VC1_CODE_SEQHDR:
5337 if (vc1_decode_sequence_header(avctx, v, &gb) < 0) {
5341 seq_initialized = 1;
5343 case VC1_CODE_ENTRYPOINT:
5344 if (vc1_decode_entry_point(avctx, v, &gb) < 0) {
5353 if (!seq_initialized || !ep_initialized) {
5354 av_log(avctx, AV_LOG_ERROR, "Incomplete extradata\n");
5357 v->res_sprite = (avctx->codec_tag == MKTAG('W','V','P','2'));
5360 avctx->profile = v->profile;
5361 if (v->profile == PROFILE_ADVANCED)
5362 avctx->level = v->level;
5364 avctx->has_b_frames = !!(avctx->max_b_frames);
5366 s->mb_width = (avctx->coded_width + 15) >> 4;
5367 s->mb_height = (avctx->coded_height + 15) >> 4;
5369 if (v->profile == PROFILE_ADVANCED || v->res_fasttx) {
5370 for (i = 0; i < 64; i++) {
5371 #define transpose(x) ((x >> 3) | ((x & 7) << 3))
5372 v->zz_8x8[0][i] = transpose(wmv1_scantable[0][i]);
5373 v->zz_8x8[1][i] = transpose(wmv1_scantable[1][i]);
5374 v->zz_8x8[2][i] = transpose(wmv1_scantable[2][i]);
5375 v->zz_8x8[3][i] = transpose(wmv1_scantable[3][i]);
5376 v->zzi_8x8[i] = transpose(ff_vc1_adv_interlaced_8x8_zz[i]);
5381 memcpy(v->zz_8x8, wmv1_scantable, 4*64);
5386 if (avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE) {
5387 v->sprite_width = avctx->coded_width;
5388 v->sprite_height = avctx->coded_height;
5390 avctx->coded_width = avctx->width = v->output_width;
5391 avctx->coded_height = avctx->height = v->output_height;
5393 // prevent 16.16 overflows
5394 if (v->sprite_width > 1 << 14 ||
5395 v->sprite_height > 1 << 14 ||
5396 v->output_width > 1 << 14 ||
5397 v->output_height > 1 << 14) return -1;
5402 /** Close a VC1/WMV3 decoder
5403 * @warning Initial try at using MpegEncContext stuff
5405 static av_cold int vc1_decode_end(AVCodecContext *avctx)
5407 VC1Context *v = avctx->priv_data;
5410 if ((avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE)
5411 && v->sprite_output_frame.data[0])
5412 avctx->release_buffer(avctx, &v->sprite_output_frame);
5413 for (i = 0; i < 4; i++)
5414 av_freep(&v->sr_rows[i >> 1][i & 1]);
5415 av_freep(&v->hrd_rate);
5416 av_freep(&v->hrd_buffer);
5417 MPV_common_end(&v->s);
5418 av_freep(&v->mv_type_mb_plane);
5419 av_freep(&v->direct_mb_plane);
5420 av_freep(&v->forward_mb_plane);
5421 av_freep(&v->fieldtx_plane);
5422 av_freep(&v->acpred_plane);
5423 av_freep(&v->over_flags_plane);
5424 av_freep(&v->mb_type_base);
5425 av_freep(&v->blk_mv_type_base);
5426 av_freep(&v->mv_f_base);
5427 av_freep(&v->mv_f_last_base);
5428 av_freep(&v->mv_f_next_base);
5429 av_freep(&v->block);
5430 av_freep(&v->cbp_base);
5431 av_freep(&v->ttblk_base);
5432 av_freep(&v->is_intra_base); // FIXME use v->mb_type[]
5433 av_freep(&v->luma_mv_base);
5434 ff_intrax8_common_end(&v->x8);
5439 /** Decode a VC1/WMV3 frame
5440 * @todo TODO: Handle VC-1 IDUs (Transport level?)
5442 static int vc1_decode_frame(AVCodecContext *avctx, void *data,
5443 int *data_size, AVPacket *avpkt)
5445 const uint8_t *buf = avpkt->data;
5446 int buf_size = avpkt->size, n_slices = 0, i;
5447 VC1Context *v = avctx->priv_data;
5448 MpegEncContext *s = &v->s;
5449 AVFrame *pict = data;
5450 uint8_t *buf2 = NULL;
5451 uint8_t *buf_field2 = NULL;
5452 const uint8_t *buf_start = buf;
5453 int mb_height, n_slices1;
5460 /* no supplementary picture */
5461 if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == VC1_CODE_ENDOFSEQ)) {
5462 /* special case for last picture */
5463 if (s->low_delay == 0 && s->next_picture_ptr) {
5464 *pict = *(AVFrame*)s->next_picture_ptr;
5465 s->next_picture_ptr = NULL;
5467 *data_size = sizeof(AVFrame);
5473 if (s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) {
5474 if (v->profile < PROFILE_ADVANCED)
5475 avctx->pix_fmt = PIX_FMT_VDPAU_WMV3;
5477 avctx->pix_fmt = PIX_FMT_VDPAU_VC1;
5480 //for advanced profile we may need to parse and unescape data
5481 if (avctx->codec_id == CODEC_ID_VC1 || avctx->codec_id == CODEC_ID_VC1IMAGE) {
5483 buf2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5485 if (IS_MARKER(AV_RB32(buf))) { /* frame starts with marker and needs to be parsed */
5486 const uint8_t *start, *end, *next;
5490 for (start = buf, end = buf + buf_size; next < end; start = next) {
5491 next = find_next_marker(start + 4, end);
5492 size = next - start - 4;
5493 if (size <= 0) continue;
5494 switch (AV_RB32(start)) {
5495 case VC1_CODE_FRAME:
5496 if (avctx->hwaccel ||
5497 s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
5499 buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
5501 case VC1_CODE_FIELD: {
5503 slices = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5506 slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5507 if (!slices[n_slices].buf)
5509 buf_size3 = vc1_unescape_buffer(start + 4, size,
5510 slices[n_slices].buf);
5511 init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5513 /* assuming that the field marker is at the exact middle,
5514 hope it's correct */
5515 slices[n_slices].mby_start = s->mb_height >> 1;
5516 n_slices1 = n_slices - 1; // index of the last slice of the first field
5518 // not necessary, ad hoc until I find a way to handle WVC1i
5519 buf_field2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5520 vc1_unescape_buffer(start + 4, size, buf_field2);
5523 case VC1_CODE_ENTRYPOINT: /* it should be before frame data */
5524 buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
5525 init_get_bits(&s->gb, buf2, buf_size2 * 8);
5526 vc1_decode_entry_point(avctx, v, &s->gb);
5528 case VC1_CODE_SLICE: {
5530 slices = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5533 slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5534 if (!slices[n_slices].buf)
5536 buf_size3 = vc1_unescape_buffer(start + 4, size,
5537 slices[n_slices].buf);
5538 init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5540 slices[n_slices].mby_start = get_bits(&slices[n_slices].gb, 9);
5546 } else if (v->interlace && ((buf[0] & 0xC0) == 0xC0)) { /* WVC1 interlaced stores both fields divided by marker */
5547 const uint8_t *divider;
5549 divider = find_next_marker(buf, buf + buf_size);
5550 if ((divider == (buf + buf_size)) || AV_RB32(divider) != VC1_CODE_FIELD) {
5551 av_log(avctx, AV_LOG_ERROR, "Error in WVC1 interlaced frame\n");
5553 } else { // found field marker, unescape second field
5554 buf_field2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5555 vc1_unescape_buffer(divider + 4, buf + buf_size - divider - 4, buf_field2);
5557 buf_size2 = vc1_unescape_buffer(buf, divider - buf, buf2);
5559 buf_size2 = vc1_unescape_buffer(buf, buf_size, buf2);
5561 init_get_bits(&s->gb, buf2, buf_size2*8);
5563 init_get_bits(&s->gb, buf, buf_size*8);
5565 if (v->res_sprite) {
5566 v->new_sprite = !get_bits1(&s->gb);
5567 v->two_sprites = get_bits1(&s->gb);
5568 /* res_sprite means a Windows Media Image stream, CODEC_ID_*IMAGE means
5569 we're using the sprite compositor. These are intentionally kept separate
5570 so you can get the raw sprites by using the wmv3 decoder for WMVP or
5571 the vc1 one for WVP2 */
5572 if (avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE) {
5573 if (v->new_sprite) {
5574 // switch AVCodecContext parameters to those of the sprites
5575 avctx->width = avctx->coded_width = v->sprite_width;
5576 avctx->height = avctx->coded_height = v->sprite_height;
5583 if (s->context_initialized &&
5584 (s->width != avctx->coded_width ||
5585 s->height != avctx->coded_height)) {
5586 vc1_decode_end(avctx);
5589 if (!s->context_initialized) {
5590 if (ff_msmpeg4_decode_init(avctx) < 0 || vc1_decode_init_alloc_tables(v) < 0)
5593 s->low_delay = !avctx->has_b_frames || v->res_sprite;
5595 if (v->profile == PROFILE_ADVANCED) {
5596 s->h_edge_pos = avctx->coded_width;
5597 s->v_edge_pos = avctx->coded_height;
5601 /* We need to set current_picture_ptr before reading the header,
5602 * otherwise we cannot store anything in there. */
5603 if (s->current_picture_ptr == NULL || s->current_picture_ptr->f.data[0]) {
5604 int i = ff_find_unused_picture(s, 0);
5605 s->current_picture_ptr = &s->picture[i];
5608 // do parse frame header
5609 v->pic_header_flag = 0;
5610 if (v->profile < PROFILE_ADVANCED) {
5611 if (vc1_parse_frame_header(v, &s->gb) == -1) {
5615 if (vc1_parse_frame_header_adv(v, &s->gb) == -1) {
5620 if ((avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE)
5621 && s->pict_type != AV_PICTURE_TYPE_I) {
5622 av_log(v->s.avctx, AV_LOG_ERROR, "Sprite decoder: expected I-frame\n");
5626 // process pulldown flags
5627 s->current_picture_ptr->f.repeat_pict = 0;
5628 // Pulldown flags are only valid when 'broadcast' has been set.
5629 // So ticks_per_frame will be 2
5632 s->current_picture_ptr->f.repeat_pict = 1;
5633 } else if (v->rptfrm) {
5635 s->current_picture_ptr->f.repeat_pict = v->rptfrm * 2;
5638 // for skipping the frame
5639 s->current_picture.f.pict_type = s->pict_type;
5640 s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
5642 /* skip B-frames if we don't have reference frames */
5643 if (s->last_picture_ptr == NULL && (s->pict_type == AV_PICTURE_TYPE_B || s->dropable)) {
5646 if ((avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B) ||
5647 (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I) ||
5648 avctx->skip_frame >= AVDISCARD_ALL) {
5652 if (s->next_p_frame_damaged) {
5653 if (s->pict_type == AV_PICTURE_TYPE_B)
5656 s->next_p_frame_damaged = 0;
5659 if (MPV_frame_start(s, avctx) < 0) {
5663 s->me.qpel_put = s->dsp.put_qpel_pixels_tab;
5664 s->me.qpel_avg = s->dsp.avg_qpel_pixels_tab;
5666 if ((CONFIG_VC1_VDPAU_DECODER)
5667 &&s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
5668 ff_vdpau_vc1_decode_picture(s, buf_start, (buf + buf_size) - buf_start);
5669 else if (avctx->hwaccel) {
5670 if (avctx->hwaccel->start_frame(avctx, buf, buf_size) < 0)
5672 if (avctx->hwaccel->decode_slice(avctx, buf_start, (buf + buf_size) - buf_start) < 0)
5674 if (avctx->hwaccel->end_frame(avctx) < 0)
5677 ff_er_frame_start(s);
5679 v->bits = buf_size * 8;
5680 if (v->field_mode) {
5682 s->current_picture.f.linesize[0] <<= 1;
5683 s->current_picture.f.linesize[1] <<= 1;
5684 s->current_picture.f.linesize[2] <<= 1;
5686 s->uvlinesize <<= 1;
5687 tmp[0] = v->mv_f_last[0];
5688 tmp[1] = v->mv_f_last[1];
5689 v->mv_f_last[0] = v->mv_f_next[0];
5690 v->mv_f_last[1] = v->mv_f_next[1];
5691 v->mv_f_next[0] = v->mv_f[0];
5692 v->mv_f_next[1] = v->mv_f[1];
5693 v->mv_f[0] = tmp[0];
5694 v->mv_f[1] = tmp[1];
5696 mb_height = s->mb_height >> v->field_mode;
5697 for (i = 0; i <= n_slices; i++) {
5698 if (i > 0 && slices[i - 1].mby_start >= mb_height) {
5699 v->second_field = 1;
5700 v->blocks_off = s->mb_width * s->mb_height << 1;
5701 v->mb_off = s->mb_stride * s->mb_height >> 1;
5703 v->second_field = 0;
5708 v->pic_header_flag = 0;
5709 if (v->field_mode && i == n_slices1 + 2)
5710 vc1_parse_frame_header_adv(v, &s->gb);
5711 else if (get_bits1(&s->gb)) {
5712 v->pic_header_flag = 1;
5713 vc1_parse_frame_header_adv(v, &s->gb);
5716 s->start_mb_y = (i == 0) ? 0 : FFMAX(0, slices[i-1].mby_start % mb_height);
5717 if (!v->field_mode || v->second_field)
5718 s->end_mb_y = (i == n_slices ) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
5720 s->end_mb_y = (i == n_slices1 + 1) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
5721 vc1_decode_blocks(v);
5723 s->gb = slices[i].gb;
5725 if (v->field_mode) {
5726 av_free(buf_field2);
5727 v->second_field = 0;
5729 if (v->field_mode) {
5730 if (s->pict_type == AV_PICTURE_TYPE_B) {
5731 memcpy(v->mv_f_base, v->mv_f_next_base,
5732 2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5734 s->current_picture.f.linesize[0] >>= 1;
5735 s->current_picture.f.linesize[1] >>= 1;
5736 s->current_picture.f.linesize[2] >>= 1;
5738 s->uvlinesize >>= 1;
5740 //av_log(s->avctx, AV_LOG_INFO, "Consumed %i/%i bits\n", get_bits_count(&s->gb), s->gb.size_in_bits);
5741 // if (get_bits_count(&s->gb) > buf_size * 8)
5748 if (avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE) {
5750 avctx->width = avctx->coded_width = v->output_width;
5751 avctx->height = avctx->coded_height = v->output_height;
5752 if (avctx->skip_frame >= AVDISCARD_NONREF)
5754 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
5755 if (vc1_decode_sprites(v, &s->gb))
5758 *pict = v->sprite_output_frame;
5759 *data_size = sizeof(AVFrame);
5761 if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
5762 *pict = *(AVFrame*)s->current_picture_ptr;
5763 } else if (s->last_picture_ptr != NULL) {
5764 *pict = *(AVFrame*)s->last_picture_ptr;
5766 if (s->last_picture_ptr || s->low_delay) {
5767 *data_size = sizeof(AVFrame);
5768 ff_print_debug_info(s, pict);
5774 for (i = 0; i < n_slices; i++)
5775 av_free(slices[i].buf);
5781 for (i = 0; i < n_slices; i++)
5782 av_free(slices[i].buf);
5784 av_free(buf_field2);
5789 static const AVProfile profiles[] = {
5790 { FF_PROFILE_VC1_SIMPLE, "Simple" },
5791 { FF_PROFILE_VC1_MAIN, "Main" },
5792 { FF_PROFILE_VC1_COMPLEX, "Complex" },
5793 { FF_PROFILE_VC1_ADVANCED, "Advanced" },
5794 { FF_PROFILE_UNKNOWN },
5797 AVCodec ff_vc1_decoder = {
5799 .type = AVMEDIA_TYPE_VIDEO,
5801 .priv_data_size = sizeof(VC1Context),
5802 .init = vc1_decode_init,
5803 .close = vc1_decode_end,
5804 .decode = vc1_decode_frame,
5805 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
5806 .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1"),
5807 .pix_fmts = ff_hwaccel_pixfmt_list_420,
5808 .profiles = NULL_IF_CONFIG_SMALL(profiles)
5811 #if CONFIG_WMV3_DECODER
5812 AVCodec ff_wmv3_decoder = {
5814 .type = AVMEDIA_TYPE_VIDEO,
5815 .id = CODEC_ID_WMV3,
5816 .priv_data_size = sizeof(VC1Context),
5817 .init = vc1_decode_init,
5818 .close = vc1_decode_end,
5819 .decode = vc1_decode_frame,
5820 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
5821 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9"),
5822 .pix_fmts = ff_hwaccel_pixfmt_list_420,
5823 .profiles = NULL_IF_CONFIG_SMALL(profiles)
5827 #if CONFIG_WMV3_VDPAU_DECODER
5828 AVCodec ff_wmv3_vdpau_decoder = {
5829 .name = "wmv3_vdpau",
5830 .type = AVMEDIA_TYPE_VIDEO,
5831 .id = CODEC_ID_WMV3,
5832 .priv_data_size = sizeof(VC1Context),
5833 .init = vc1_decode_init,
5834 .close = vc1_decode_end,
5835 .decode = vc1_decode_frame,
5836 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU,
5837 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 VDPAU"),
5838 .pix_fmts = (const enum PixelFormat[]){PIX_FMT_VDPAU_WMV3, PIX_FMT_NONE},
5839 .profiles = NULL_IF_CONFIG_SMALL(profiles)
5843 #if CONFIG_VC1_VDPAU_DECODER
5844 AVCodec ff_vc1_vdpau_decoder = {
5845 .name = "vc1_vdpau",
5846 .type = AVMEDIA_TYPE_VIDEO,
5848 .priv_data_size = sizeof(VC1Context),
5849 .init = vc1_decode_init,
5850 .close = vc1_decode_end,
5851 .decode = vc1_decode_frame,
5852 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU,
5853 .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1 VDPAU"),
5854 .pix_fmts = (const enum PixelFormat[]){PIX_FMT_VDPAU_VC1, PIX_FMT_NONE},
5855 .profiles = NULL_IF_CONFIG_SMALL(profiles)
5859 #if CONFIG_WMV3IMAGE_DECODER
5860 AVCodec ff_wmv3image_decoder = {
5861 .name = "wmv3image",
5862 .type = AVMEDIA_TYPE_VIDEO,
5863 .id = CODEC_ID_WMV3IMAGE,
5864 .priv_data_size = sizeof(VC1Context),
5865 .init = vc1_decode_init,
5866 .close = vc1_decode_end,
5867 .decode = vc1_decode_frame,
5868 .capabilities = CODEC_CAP_DR1,
5869 .flush = vc1_sprite_flush,
5870 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image"),
5871 .pix_fmts = ff_pixfmt_list_420
5875 #if CONFIG_VC1IMAGE_DECODER
5876 AVCodec ff_vc1image_decoder = {
5878 .type = AVMEDIA_TYPE_VIDEO,
5879 .id = CODEC_ID_VC1IMAGE,
5880 .priv_data_size = sizeof(VC1Context),
5881 .init = vc1_decode_init,
5882 .close = vc1_decode_end,
5883 .decode = vc1_decode_frame,
5884 .capabilities = CODEC_CAP_DR1,
5885 .flush = vc1_sprite_flush,
5886 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image v2"),
5887 .pix_fmts = ff_pixfmt_list_420