2 * VC-1 and WMV3 decoder
3 * Copyright (c) 2011 Mashiat Sarker Shakkhar
4 * Copyright (c) 2006-2007 Konstantin Shishkov
5 * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
7 * This file is part of Libav.
9 * Libav is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * Libav is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with Libav; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * VC-1 and WMV3 decoder
32 #include "mpegvideo.h"
36 #include "vc1acdata.h"
37 #include "msmpeg4data.h"
39 #include "simple_idct.h"
41 #include "vdpau_internal.h"
46 #define MB_INTRA_VLC_BITS 9
51 static const uint16_t vlc_offs[] = {
52 0, 520, 552, 616, 1128, 1160, 1224, 1740, 1772, 1836, 1900, 2436,
53 2986, 3050, 3610, 4154, 4218, 4746, 5326, 5390, 5902, 6554, 7658, 8342,
54 9304, 9988, 10630, 11234, 12174, 13006, 13560, 14232, 14786, 15432, 16350, 17522,
55 20372, 21818, 22330, 22394, 23166, 23678, 23742, 24820, 25332, 25396, 26460, 26980,
56 27048, 27592, 27600, 27608, 27616, 27624, 28224, 28258, 28290, 28802, 28834, 28866,
57 29378, 29412, 29444, 29960, 29994, 30026, 30538, 30572, 30604, 31120, 31154, 31186,
58 31714, 31746, 31778, 32306, 32340, 32372
61 // offset tables for interlaced picture MVDATA decoding
62 static const int offset_table1[9] = { 0, 1, 2, 4, 8, 16, 32, 64, 128 };
63 static const int offset_table2[9] = { 0, 1, 3, 7, 15, 31, 63, 127, 255 };
66 * Init VC-1 specific tables and VC1Context members
67 * @param v The VC1Context to initialize
70 static int vc1_init_common(VC1Context *v)
74 static VLC_TYPE vlc_table[32372][2];
76 v->hrd_rate = v->hrd_buffer = NULL;
80 INIT_VLC_STATIC(&ff_vc1_bfraction_vlc, VC1_BFRACTION_VLC_BITS, 23,
81 ff_vc1_bfraction_bits, 1, 1,
82 ff_vc1_bfraction_codes, 1, 1, 1 << VC1_BFRACTION_VLC_BITS);
83 INIT_VLC_STATIC(&ff_vc1_norm2_vlc, VC1_NORM2_VLC_BITS, 4,
84 ff_vc1_norm2_bits, 1, 1,
85 ff_vc1_norm2_codes, 1, 1, 1 << VC1_NORM2_VLC_BITS);
86 INIT_VLC_STATIC(&ff_vc1_norm6_vlc, VC1_NORM6_VLC_BITS, 64,
87 ff_vc1_norm6_bits, 1, 1,
88 ff_vc1_norm6_codes, 2, 2, 556);
89 INIT_VLC_STATIC(&ff_vc1_imode_vlc, VC1_IMODE_VLC_BITS, 7,
90 ff_vc1_imode_bits, 1, 1,
91 ff_vc1_imode_codes, 1, 1, 1 << VC1_IMODE_VLC_BITS);
92 for (i = 0; i < 3; i++) {
93 ff_vc1_ttmb_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 0]];
94 ff_vc1_ttmb_vlc[i].table_allocated = vlc_offs[i * 3 + 1] - vlc_offs[i * 3 + 0];
95 init_vlc(&ff_vc1_ttmb_vlc[i], VC1_TTMB_VLC_BITS, 16,
96 ff_vc1_ttmb_bits[i], 1, 1,
97 ff_vc1_ttmb_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC);
98 ff_vc1_ttblk_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 1]];
99 ff_vc1_ttblk_vlc[i].table_allocated = vlc_offs[i * 3 + 2] - vlc_offs[i * 3 + 1];
100 init_vlc(&ff_vc1_ttblk_vlc[i], VC1_TTBLK_VLC_BITS, 8,
101 ff_vc1_ttblk_bits[i], 1, 1,
102 ff_vc1_ttblk_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
103 ff_vc1_subblkpat_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 2]];
104 ff_vc1_subblkpat_vlc[i].table_allocated = vlc_offs[i * 3 + 3] - vlc_offs[i * 3 + 2];
105 init_vlc(&ff_vc1_subblkpat_vlc[i], VC1_SUBBLKPAT_VLC_BITS, 15,
106 ff_vc1_subblkpat_bits[i], 1, 1,
107 ff_vc1_subblkpat_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
109 for (i = 0; i < 4; i++) {
110 ff_vc1_4mv_block_pattern_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 9]];
111 ff_vc1_4mv_block_pattern_vlc[i].table_allocated = vlc_offs[i * 3 + 10] - vlc_offs[i * 3 + 9];
112 init_vlc(&ff_vc1_4mv_block_pattern_vlc[i], VC1_4MV_BLOCK_PATTERN_VLC_BITS, 16,
113 ff_vc1_4mv_block_pattern_bits[i], 1, 1,
114 ff_vc1_4mv_block_pattern_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
115 ff_vc1_cbpcy_p_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 10]];
116 ff_vc1_cbpcy_p_vlc[i].table_allocated = vlc_offs[i * 3 + 11] - vlc_offs[i * 3 + 10];
117 init_vlc(&ff_vc1_cbpcy_p_vlc[i], VC1_CBPCY_P_VLC_BITS, 64,
118 ff_vc1_cbpcy_p_bits[i], 1, 1,
119 ff_vc1_cbpcy_p_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC);
120 ff_vc1_mv_diff_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 11]];
121 ff_vc1_mv_diff_vlc[i].table_allocated = vlc_offs[i * 3 + 12] - vlc_offs[i * 3 + 11];
122 init_vlc(&ff_vc1_mv_diff_vlc[i], VC1_MV_DIFF_VLC_BITS, 73,
123 ff_vc1_mv_diff_bits[i], 1, 1,
124 ff_vc1_mv_diff_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC);
126 for (i = 0; i < 8; i++) {
127 ff_vc1_ac_coeff_table[i].table = &vlc_table[vlc_offs[i * 2 + 21]];
128 ff_vc1_ac_coeff_table[i].table_allocated = vlc_offs[i * 2 + 22] - vlc_offs[i * 2 + 21];
129 init_vlc(&ff_vc1_ac_coeff_table[i], AC_VLC_BITS, vc1_ac_sizes[i],
130 &vc1_ac_tables[i][0][1], 8, 4,
131 &vc1_ac_tables[i][0][0], 8, 4, INIT_VLC_USE_NEW_STATIC);
132 /* initialize interlaced MVDATA tables (2-Ref) */
133 ff_vc1_2ref_mvdata_vlc[i].table = &vlc_table[vlc_offs[i * 2 + 22]];
134 ff_vc1_2ref_mvdata_vlc[i].table_allocated = vlc_offs[i * 2 + 23] - vlc_offs[i * 2 + 22];
135 init_vlc(&ff_vc1_2ref_mvdata_vlc[i], VC1_2REF_MVDATA_VLC_BITS, 126,
136 ff_vc1_2ref_mvdata_bits[i], 1, 1,
137 ff_vc1_2ref_mvdata_codes[i], 4, 4, INIT_VLC_USE_NEW_STATIC);
139 for (i = 0; i < 4; i++) {
140 /* initialize 4MV MBMODE VLC tables for interlaced frame P picture */
141 ff_vc1_intfr_4mv_mbmode_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 37]];
142 ff_vc1_intfr_4mv_mbmode_vlc[i].table_allocated = vlc_offs[i * 3 + 38] - vlc_offs[i * 3 + 37];
143 init_vlc(&ff_vc1_intfr_4mv_mbmode_vlc[i], VC1_INTFR_4MV_MBMODE_VLC_BITS, 15,
144 ff_vc1_intfr_4mv_mbmode_bits[i], 1, 1,
145 ff_vc1_intfr_4mv_mbmode_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC);
146 /* initialize NON-4MV MBMODE VLC tables for the same */
147 ff_vc1_intfr_non4mv_mbmode_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 38]];
148 ff_vc1_intfr_non4mv_mbmode_vlc[i].table_allocated = vlc_offs[i * 3 + 39] - vlc_offs[i * 3 + 38];
149 init_vlc(&ff_vc1_intfr_non4mv_mbmode_vlc[i], VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 9,
150 ff_vc1_intfr_non4mv_mbmode_bits[i], 1, 1,
151 ff_vc1_intfr_non4mv_mbmode_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
152 /* initialize interlaced MVDATA tables (1-Ref) */
153 ff_vc1_1ref_mvdata_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 39]];
154 ff_vc1_1ref_mvdata_vlc[i].table_allocated = vlc_offs[i * 3 + 40] - vlc_offs[i * 3 + 39];
155 init_vlc(&ff_vc1_1ref_mvdata_vlc[i], VC1_1REF_MVDATA_VLC_BITS, 72,
156 ff_vc1_1ref_mvdata_bits[i], 1, 1,
157 ff_vc1_1ref_mvdata_codes[i], 4, 4, INIT_VLC_USE_NEW_STATIC);
159 for (i = 0; i < 4; i++) {
160 /* Initialize 2MV Block pattern VLC tables */
161 ff_vc1_2mv_block_pattern_vlc[i].table = &vlc_table[vlc_offs[i + 49]];
162 ff_vc1_2mv_block_pattern_vlc[i].table_allocated = vlc_offs[i + 50] - vlc_offs[i + 49];
163 init_vlc(&ff_vc1_2mv_block_pattern_vlc[i], VC1_2MV_BLOCK_PATTERN_VLC_BITS, 4,
164 ff_vc1_2mv_block_pattern_bits[i], 1, 1,
165 ff_vc1_2mv_block_pattern_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
167 for (i = 0; i < 8; i++) {
168 /* Initialize interlaced CBPCY VLC tables (Table 124 - Table 131) */
169 ff_vc1_icbpcy_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 53]];
170 ff_vc1_icbpcy_vlc[i].table_allocated = vlc_offs[i * 3 + 54] - vlc_offs[i * 3 + 53];
171 init_vlc(&ff_vc1_icbpcy_vlc[i], VC1_ICBPCY_VLC_BITS, 63,
172 ff_vc1_icbpcy_p_bits[i], 1, 1,
173 ff_vc1_icbpcy_p_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC);
174 /* Initialize interlaced field picture MBMODE VLC tables */
175 ff_vc1_if_mmv_mbmode_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 54]];
176 ff_vc1_if_mmv_mbmode_vlc[i].table_allocated = vlc_offs[i * 3 + 55] - vlc_offs[i * 3 + 54];
177 init_vlc(&ff_vc1_if_mmv_mbmode_vlc[i], VC1_IF_MMV_MBMODE_VLC_BITS, 8,
178 ff_vc1_if_mmv_mbmode_bits[i], 1, 1,
179 ff_vc1_if_mmv_mbmode_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
180 ff_vc1_if_1mv_mbmode_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 55]];
181 ff_vc1_if_1mv_mbmode_vlc[i].table_allocated = vlc_offs[i * 3 + 56] - vlc_offs[i * 3 + 55];
182 init_vlc(&ff_vc1_if_1mv_mbmode_vlc[i], VC1_IF_1MV_MBMODE_VLC_BITS, 6,
183 ff_vc1_if_1mv_mbmode_bits[i], 1, 1,
184 ff_vc1_if_1mv_mbmode_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
191 v->mvrange = 0; /* 7.1.1.18, p80 */
196 /***********************************************************************/
198 * @name VC-1 Bitplane decoding
216 /** @} */ //imode defines
219 /** @} */ //Bitplane group
221 static void vc1_put_signed_blocks_clamped(VC1Context *v)
223 MpegEncContext *s = &v->s;
224 int topleft_mb_pos, top_mb_pos;
225 int stride_y, fieldtx;
228 /* The put pixels loop is always one MB row behind the decoding loop,
229 * because we can only put pixels when overlap filtering is done, and
230 * for filtering of the bottom edge of a MB, we need the next MB row
232 * Within the row, the put pixels loop is also one MB col behind the
233 * decoding loop. The reason for this is again, because for filtering
234 * of the right MB edge, we need the next MB present. */
235 if (!s->first_slice_line) {
237 topleft_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x - 1;
238 fieldtx = v->fieldtx_plane[topleft_mb_pos];
239 stride_y = (s->linesize) << fieldtx;
240 v_dist = (16 - fieldtx) >> (fieldtx == 0);
241 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][0],
242 s->dest[0] - 16 * s->linesize - 16,
244 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][1],
245 s->dest[0] - 16 * s->linesize - 8,
247 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][2],
248 s->dest[0] - v_dist * s->linesize - 16,
250 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][3],
251 s->dest[0] - v_dist * s->linesize - 8,
253 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][4],
254 s->dest[1] - 8 * s->uvlinesize - 8,
256 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][5],
257 s->dest[2] - 8 * s->uvlinesize - 8,
260 if (s->mb_x == s->mb_width - 1) {
261 top_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x;
262 fieldtx = v->fieldtx_plane[top_mb_pos];
263 stride_y = s->linesize << fieldtx;
264 v_dist = fieldtx ? 15 : 8;
265 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][0],
266 s->dest[0] - 16 * s->linesize,
268 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][1],
269 s->dest[0] - 16 * s->linesize + 8,
271 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][2],
272 s->dest[0] - v_dist * s->linesize,
274 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][3],
275 s->dest[0] - v_dist * s->linesize + 8,
277 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][4],
278 s->dest[1] - 8 * s->uvlinesize,
280 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][5],
281 s->dest[2] - 8 * s->uvlinesize,
286 #define inc_blk_idx(idx) do { \
288 if (idx >= v->n_allocated_blks) \
292 inc_blk_idx(v->topleft_blk_idx);
293 inc_blk_idx(v->top_blk_idx);
294 inc_blk_idx(v->left_blk_idx);
295 inc_blk_idx(v->cur_blk_idx);
298 static void vc1_loop_filter_iblk(VC1Context *v, int pq)
300 MpegEncContext *s = &v->s;
302 if (!s->first_slice_line) {
303 v->vc1dsp.vc1_v_loop_filter16(s->dest[0], s->linesize, pq);
305 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
306 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
307 for (j = 0; j < 2; j++) {
308 v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1], s->uvlinesize, pq);
310 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
313 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] + 8 * s->linesize, s->linesize, pq);
315 if (s->mb_y == s->end_mb_y - 1) {
317 v->vc1dsp.vc1_h_loop_filter16(s->dest[0], s->linesize, pq);
318 v->vc1dsp.vc1_h_loop_filter8(s->dest[1], s->uvlinesize, pq);
319 v->vc1dsp.vc1_h_loop_filter8(s->dest[2], s->uvlinesize, pq);
321 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] + 8, s->linesize, pq);
325 static void vc1_loop_filter_iblk_delayed(VC1Context *v, int pq)
327 MpegEncContext *s = &v->s;
330 /* The loopfilter runs 1 row and 1 column behind the overlap filter, which
331 * means it runs two rows/cols behind the decoding loop. */
332 if (!s->first_slice_line) {
334 if (s->mb_y >= s->start_mb_y + 2) {
335 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
338 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 16, s->linesize, pq);
339 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 8, s->linesize, pq);
340 for (j = 0; j < 2; j++) {
341 v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
343 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize - 8, s->uvlinesize, pq);
347 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize - 16, s->linesize, pq);
350 if (s->mb_x == s->mb_width - 1) {
351 if (s->mb_y >= s->start_mb_y + 2) {
352 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
355 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize, s->linesize, pq);
356 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize + 8, s->linesize, pq);
357 for (j = 0; j < 2; j++) {
358 v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
360 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize, s->uvlinesize, pq);
364 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize, s->linesize, pq);
367 if (s->mb_y == s->end_mb_y) {
370 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
371 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 8, s->linesize, pq);
373 for (j = 0; j < 2; j++) {
374 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
379 if (s->mb_x == s->mb_width - 1) {
381 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
382 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
384 for (j = 0; j < 2; j++) {
385 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
393 static void vc1_smooth_overlap_filter_iblk(VC1Context *v)
395 MpegEncContext *s = &v->s;
398 if (v->condover == CONDOVER_NONE)
401 mb_pos = s->mb_x + s->mb_y * s->mb_stride;
403 /* Within a MB, the horizontal overlap always runs before the vertical.
404 * To accomplish that, we run the H on left and internal borders of the
405 * currently decoded MB. Then, we wait for the next overlap iteration
406 * to do H overlap on the right edge of this MB, before moving over and
407 * running the V overlap. Therefore, the V overlap makes us trail by one
408 * MB col and the H overlap filter makes us trail by one MB row. This
409 * is reflected in the time at which we run the put_pixels loop. */
410 if (v->condover == CONDOVER_ALL || v->pq >= 9 || v->over_flags_plane[mb_pos]) {
411 if (s->mb_x && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
412 v->over_flags_plane[mb_pos - 1])) {
413 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][1],
414 v->block[v->cur_blk_idx][0]);
415 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][3],
416 v->block[v->cur_blk_idx][2]);
417 if (!(s->flags & CODEC_FLAG_GRAY)) {
418 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][4],
419 v->block[v->cur_blk_idx][4]);
420 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][5],
421 v->block[v->cur_blk_idx][5]);
424 v->vc1dsp.vc1_h_s_overlap(v->block[v->cur_blk_idx][0],
425 v->block[v->cur_blk_idx][1]);
426 v->vc1dsp.vc1_h_s_overlap(v->block[v->cur_blk_idx][2],
427 v->block[v->cur_blk_idx][3]);
429 if (s->mb_x == s->mb_width - 1) {
430 if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
431 v->over_flags_plane[mb_pos - s->mb_stride])) {
432 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][2],
433 v->block[v->cur_blk_idx][0]);
434 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][3],
435 v->block[v->cur_blk_idx][1]);
436 if (!(s->flags & CODEC_FLAG_GRAY)) {
437 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][4],
438 v->block[v->cur_blk_idx][4]);
439 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][5],
440 v->block[v->cur_blk_idx][5]);
443 v->vc1dsp.vc1_v_s_overlap(v->block[v->cur_blk_idx][0],
444 v->block[v->cur_blk_idx][2]);
445 v->vc1dsp.vc1_v_s_overlap(v->block[v->cur_blk_idx][1],
446 v->block[v->cur_blk_idx][3]);
449 if (s->mb_x && (v->condover == CONDOVER_ALL || v->over_flags_plane[mb_pos - 1])) {
450 if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
451 v->over_flags_plane[mb_pos - s->mb_stride - 1])) {
452 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][2],
453 v->block[v->left_blk_idx][0]);
454 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][3],
455 v->block[v->left_blk_idx][1]);
456 if (!(s->flags & CODEC_FLAG_GRAY)) {
457 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][4],
458 v->block[v->left_blk_idx][4]);
459 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][5],
460 v->block[v->left_blk_idx][5]);
463 v->vc1dsp.vc1_v_s_overlap(v->block[v->left_blk_idx][0],
464 v->block[v->left_blk_idx][2]);
465 v->vc1dsp.vc1_v_s_overlap(v->block[v->left_blk_idx][1],
466 v->block[v->left_blk_idx][3]);
470 /** Do motion compensation over 1 macroblock
471 * Mostly adapted hpel_motion and qpel_motion from mpegvideo.c
473 static void vc1_mc_1mv(VC1Context *v, int dir)
475 MpegEncContext *s = &v->s;
476 DSPContext *dsp = &v->s.dsp;
477 uint8_t *srcY, *srcU, *srcV;
478 int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
480 int v_edge_pos = s->v_edge_pos >> v->field_mode;
481 if (!v->field_mode && !v->s.last_picture.f.data[0])
484 mx = s->mv[dir][0][0];
485 my = s->mv[dir][0][1];
487 // store motion vectors for further use in B frames
488 if (s->pict_type == AV_PICTURE_TYPE_P) {
489 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = mx;
490 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = my;
493 uvmx = (mx + ((mx & 3) == 3)) >> 1;
494 uvmy = (my + ((my & 3) == 3)) >> 1;
495 v->luma_mv[s->mb_x][0] = uvmx;
496 v->luma_mv[s->mb_x][1] = uvmy;
499 v->cur_field_type != v->ref_field_type[dir]) {
500 my = my - 2 + 4 * v->cur_field_type;
501 uvmy = uvmy - 2 + 4 * v->cur_field_type;
504 if (v->fastuvmc && (v->fcm != 1)) { // fastuvmc shall be ignored for interlaced frame picture
505 uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
506 uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
508 if (v->field_mode) { // interlaced field picture
510 if ((v->cur_field_type != v->ref_field_type[dir]) && v->cur_field_type) {
511 srcY = s->current_picture.f.data[0];
512 srcU = s->current_picture.f.data[1];
513 srcV = s->current_picture.f.data[2];
515 srcY = s->last_picture.f.data[0];
516 srcU = s->last_picture.f.data[1];
517 srcV = s->last_picture.f.data[2];
520 srcY = s->next_picture.f.data[0];
521 srcU = s->next_picture.f.data[1];
522 srcV = s->next_picture.f.data[2];
526 srcY = s->last_picture.f.data[0];
527 srcU = s->last_picture.f.data[1];
528 srcV = s->last_picture.f.data[2];
530 srcY = s->next_picture.f.data[0];
531 srcU = s->next_picture.f.data[1];
532 srcV = s->next_picture.f.data[2];
536 src_x = s->mb_x * 16 + (mx >> 2);
537 src_y = s->mb_y * 16 + (my >> 2);
538 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
539 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
541 if (v->profile != PROFILE_ADVANCED) {
542 src_x = av_clip( src_x, -16, s->mb_width * 16);
543 src_y = av_clip( src_y, -16, s->mb_height * 16);
544 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
545 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
547 src_x = av_clip( src_x, -17, s->avctx->coded_width);
548 src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
549 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
550 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
553 srcY += src_y * s->linesize + src_x;
554 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
555 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
557 if (v->field_mode && v->ref_field_type[dir]) {
558 srcY += s->current_picture_ptr->f.linesize[0];
559 srcU += s->current_picture_ptr->f.linesize[1];
560 srcV += s->current_picture_ptr->f.linesize[2];
563 /* for grayscale we should not try to read from unknown area */
564 if (s->flags & CODEC_FLAG_GRAY) {
565 srcU = s->edge_emu_buffer + 18 * s->linesize;
566 srcV = s->edge_emu_buffer + 18 * s->linesize;
569 if (v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
570 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel * 3
571 || (unsigned)(src_y - s->mspel) > v_edge_pos - (my&3) - 16 - s->mspel * 3) {
572 uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
574 srcY -= s->mspel * (1 + s->linesize);
575 s->dsp.emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize,
576 17 + s->mspel * 2, 17 + s->mspel * 2,
577 src_x - s->mspel, src_y - s->mspel,
578 s->h_edge_pos, v_edge_pos);
579 srcY = s->edge_emu_buffer;
580 s->dsp.emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8 + 1, 8 + 1,
581 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
582 s->dsp.emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8 + 1, 8 + 1,
583 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
586 /* if we deal with range reduction we need to scale source blocks */
587 if (v->rangeredfrm) {
592 for (j = 0; j < 17 + s->mspel * 2; j++) {
593 for (i = 0; i < 17 + s->mspel * 2; i++)
594 src[i] = ((src[i] - 128) >> 1) + 128;
599 for (j = 0; j < 9; j++) {
600 for (i = 0; i < 9; i++) {
601 src[i] = ((src[i] - 128) >> 1) + 128;
602 src2[i] = ((src2[i] - 128) >> 1) + 128;
604 src += s->uvlinesize;
605 src2 += s->uvlinesize;
608 /* if we deal with intensity compensation we need to scale source blocks */
609 if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
614 for (j = 0; j < 17 + s->mspel * 2; j++) {
615 for (i = 0; i < 17 + s->mspel * 2; i++)
616 src[i] = v->luty[src[i]];
621 for (j = 0; j < 9; j++) {
622 for (i = 0; i < 9; i++) {
623 src[i] = v->lutuv[src[i]];
624 src2[i] = v->lutuv[src2[i]];
626 src += s->uvlinesize;
627 src2 += s->uvlinesize;
630 srcY += s->mspel * (1 + s->linesize);
633 if (v->field_mode && v->cur_field_type) {
634 off = s->current_picture_ptr->f.linesize[0];
635 off_uv = s->current_picture_ptr->f.linesize[1];
641 dxy = ((my & 3) << 2) | (mx & 3);
642 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off , srcY , s->linesize, v->rnd);
643 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8, srcY + 8, s->linesize, v->rnd);
644 srcY += s->linesize * 8;
645 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize , srcY , s->linesize, v->rnd);
646 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
647 } else { // hpel mc - always used for luma
648 dxy = (my & 2) | ((mx & 2) >> 1);
650 dsp->put_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
652 dsp->put_no_rnd_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
655 if (s->flags & CODEC_FLAG_GRAY) return;
656 /* Chroma MC always uses qpel bilinear */
657 uvmx = (uvmx & 3) << 1;
658 uvmy = (uvmy & 3) << 1;
660 dsp->put_h264_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
661 dsp->put_h264_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
663 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
664 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
668 static inline int median4(int a, int b, int c, int d)
671 if (c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2;
672 else return (FFMIN(b, c) + FFMAX(a, d)) / 2;
674 if (c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2;
675 else return (FFMIN(a, c) + FFMAX(b, d)) / 2;
679 /** Do motion compensation for 4-MV macroblock - luminance block
681 static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir)
683 MpegEncContext *s = &v->s;
684 DSPContext *dsp = &v->s.dsp;
686 int dxy, mx, my, src_x, src_y;
688 int fieldmv = (v->fcm == 1) ? v->blk_mv_type[s->block_index[n]] : 0;
689 int v_edge_pos = s->v_edge_pos >> v->field_mode;
691 if (!v->field_mode && !v->s.last_picture.f.data[0])
694 mx = s->mv[dir][n][0];
695 my = s->mv[dir][n][1];
699 if ((v->cur_field_type != v->ref_field_type[dir]) && v->cur_field_type)
700 srcY = s->current_picture.f.data[0];
702 srcY = s->last_picture.f.data[0];
704 srcY = s->last_picture.f.data[0];
706 srcY = s->next_picture.f.data[0];
709 if (v->cur_field_type != v->ref_field_type[dir])
710 my = my - 2 + 4 * v->cur_field_type;
713 if (s->pict_type == AV_PICTURE_TYPE_P && n == 3 && v->field_mode) {
714 int same_count = 0, opp_count = 0, k;
715 int chosen_mv[2][4][2], f;
717 for (k = 0; k < 4; k++) {
718 f = v->mv_f[0][s->block_index[k] + v->blocks_off];
719 chosen_mv[f][f ? opp_count : same_count][0] = s->mv[0][k][0];
720 chosen_mv[f][f ? opp_count : same_count][1] = s->mv[0][k][1];
724 f = opp_count > same_count;
725 switch (f ? opp_count : same_count) {
727 tx = median4(chosen_mv[f][0][0], chosen_mv[f][1][0],
728 chosen_mv[f][2][0], chosen_mv[f][3][0]);
729 ty = median4(chosen_mv[f][0][1], chosen_mv[f][1][1],
730 chosen_mv[f][2][1], chosen_mv[f][3][1]);
733 tx = mid_pred(chosen_mv[f][0][0], chosen_mv[f][1][0], chosen_mv[f][2][0]);
734 ty = mid_pred(chosen_mv[f][0][1], chosen_mv[f][1][1], chosen_mv[f][2][1]);
737 tx = (chosen_mv[f][0][0] + chosen_mv[f][1][0]) / 2;
738 ty = (chosen_mv[f][0][1] + chosen_mv[f][1][1]) / 2;
741 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
742 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
743 for (k = 0; k < 4; k++)
744 v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
747 if (v->fcm == 1) { // not sure if needed for other types of picture
749 int width = s->avctx->coded_width;
750 int height = s->avctx->coded_height >> 1;
751 qx = (s->mb_x * 16) + (mx >> 2);
752 qy = (s->mb_y * 8) + (my >> 3);
757 mx -= 4 * (qx - width);
760 else if (qy > height + 1)
761 my -= 8 * (qy - height - 1);
764 if ((v->fcm == 1) && fieldmv)
765 off = ((n > 1) ? s->linesize : 0) + (n & 1) * 8;
767 off = s->linesize * 4 * (n & 2) + (n & 1) * 8;
768 if (v->field_mode && v->cur_field_type)
769 off += s->current_picture_ptr->f.linesize[0];
771 src_x = s->mb_x * 16 + (n & 1) * 8 + (mx >> 2);
773 src_y = s->mb_y * 16 + (n & 2) * 4 + (my >> 2);
775 src_y = s->mb_y * 16 + ((n > 1) ? 1 : 0) + (my >> 2);
777 if (v->profile != PROFILE_ADVANCED) {
778 src_x = av_clip(src_x, -16, s->mb_width * 16);
779 src_y = av_clip(src_y, -16, s->mb_height * 16);
781 src_x = av_clip(src_x, -17, s->avctx->coded_width);
784 src_y = av_clip(src_y, -17, s->avctx->coded_height + 1);
786 src_y = av_clip(src_y, -18, s->avctx->coded_height);
788 src_y = av_clip(src_y, -18, s->avctx->coded_height + 1);
792 srcY += src_y * s->linesize + src_x;
793 if (v->field_mode && v->ref_field_type[dir])
794 srcY += s->current_picture_ptr->f.linesize[0];
796 if (fieldmv && !(src_y & 1))
798 if (fieldmv && (src_y & 1) && src_y < 4)
800 if (v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
801 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx & 3) - 8 - s->mspel * 2
802 || (unsigned)(src_y - (s->mspel << fieldmv)) > v_edge_pos - (my & 3) - ((8 + s->mspel * 2) << fieldmv)) {
803 srcY -= s->mspel * (1 + (s->linesize << fieldmv));
804 /* check emulate edge stride and offset */
805 s->dsp.emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize,
806 9 + s->mspel * 2, (9 + s->mspel * 2) << fieldmv,
807 src_x - s->mspel, src_y - (s->mspel << fieldmv),
808 s->h_edge_pos, v_edge_pos);
809 srcY = s->edge_emu_buffer;
810 /* if we deal with range reduction we need to scale source blocks */
811 if (v->rangeredfrm) {
816 for (j = 0; j < 9 + s->mspel * 2; j++) {
817 for (i = 0; i < 9 + s->mspel * 2; i++)
818 src[i] = ((src[i] - 128) >> 1) + 128;
819 src += s->linesize << fieldmv;
822 /* if we deal with intensity compensation we need to scale source blocks */
823 if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
828 for (j = 0; j < 9 + s->mspel * 2; j++) {
829 for (i = 0; i < 9 + s->mspel * 2; i++)
830 src[i] = v->luty[src[i]];
831 src += s->linesize << fieldmv;
834 srcY += s->mspel * (1 + (s->linesize << fieldmv));
838 dxy = ((my & 3) << 2) | (mx & 3);
839 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize << fieldmv, v->rnd);
840 } else { // hpel mc - always used for luma
841 dxy = (my & 2) | ((mx & 2) >> 1);
843 dsp->put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
845 dsp->put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
849 static av_always_inline int get_chroma_mv(int *mvx, int *mvy, int *a, int flag, int *tx, int *ty)
852 static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
854 idx = ((a[3] != flag) << 3)
855 | ((a[2] != flag) << 2)
856 | ((a[1] != flag) << 1)
859 *tx = median4(mvx[0], mvx[1], mvx[2], mvx[3]);
860 *ty = median4(mvy[0], mvy[1], mvy[2], mvy[3]);
862 } else if (count[idx] == 1) {
865 *tx = mid_pred(mvx[1], mvx[2], mvx[3]);
866 *ty = mid_pred(mvy[1], mvy[2], mvy[3]);
869 *tx = mid_pred(mvx[0], mvx[2], mvx[3]);
870 *ty = mid_pred(mvy[0], mvy[2], mvy[3]);
873 *tx = mid_pred(mvx[0], mvx[1], mvx[3]);
874 *ty = mid_pred(mvy[0], mvy[1], mvy[3]);
877 *tx = mid_pred(mvx[0], mvx[1], mvx[2]);
878 *ty = mid_pred(mvy[0], mvy[1], mvy[2]);
881 } else if (count[idx] == 2) {
883 for (i = 0; i < 3; i++)
888 for (i = t1 + 1; i < 4; i++)
893 *tx = (mvx[t1] + mvx[t2]) / 2;
894 *ty = (mvy[t1] + mvy[t2]) / 2;
902 /** Do motion compensation for 4-MV macroblock - both chroma blocks
904 static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
906 MpegEncContext *s = &v->s;
907 DSPContext *dsp = &v->s.dsp;
908 uint8_t *srcU, *srcV;
909 int uvmx, uvmy, uvsrc_x, uvsrc_y;
910 int k, tx = 0, ty = 0;
911 int mvx[4], mvy[4], intra[4], mv_f[4];
913 int chroma_ref_type = v->cur_field_type, off = 0;
914 int v_edge_pos = s->v_edge_pos >> v->field_mode;
916 if (!v->field_mode && !v->s.last_picture.f.data[0])
918 if (s->flags & CODEC_FLAG_GRAY)
921 for (k = 0; k < 4; k++) {
922 mvx[k] = s->mv[dir][k][0];
923 mvy[k] = s->mv[dir][k][1];
924 intra[k] = v->mb_type[0][s->block_index[k]];
926 mv_f[k] = v->mv_f[dir][s->block_index[k] + v->blocks_off];
929 /* calculate chroma MV vector from four luma MVs */
930 if (!v->field_mode || (v->field_mode && !v->numref)) {
931 valid_count = get_chroma_mv(mvx, mvy, intra, 0, &tx, &ty);
933 s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
934 s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
935 v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
936 return; //no need to do MC for intra blocks
940 if (mv_f[0] + mv_f[1] + mv_f[2] + mv_f[3] > 2)
942 valid_count = get_chroma_mv(mvx, mvy, mv_f, dominant, &tx, &ty);
944 chroma_ref_type = !v->cur_field_type;
946 s->current_picture.f.motion_val[1][s->block_index[0]][0] = tx;
947 s->current_picture.f.motion_val[1][s->block_index[0]][1] = ty;
948 uvmx = (tx + ((tx & 3) == 3)) >> 1;
949 uvmy = (ty + ((ty & 3) == 3)) >> 1;
951 v->luma_mv[s->mb_x][0] = uvmx;
952 v->luma_mv[s->mb_x][1] = uvmy;
955 uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
956 uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
958 // Field conversion bias
959 if (v->cur_field_type != chroma_ref_type)
960 uvmy += 2 - 4 * chroma_ref_type;
962 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
963 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
965 if (v->profile != PROFILE_ADVANCED) {
966 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
967 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
969 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
970 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
975 if ((v->cur_field_type != chroma_ref_type) && v->cur_field_type) {
976 srcU = s->current_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
977 srcV = s->current_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
979 srcU = s->last_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
980 srcV = s->last_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
983 srcU = s->last_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
984 srcV = s->last_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
987 srcU = s->next_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
988 srcV = s->next_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
992 if (chroma_ref_type) {
993 srcU += s->current_picture_ptr->f.linesize[1];
994 srcV += s->current_picture_ptr->f.linesize[2];
996 off = v->cur_field_type ? s->current_picture_ptr->f.linesize[1] : 0;
999 if (v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
1000 || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
1001 || (unsigned)uvsrc_y > (v_edge_pos >> 1) - 9) {
1002 s->dsp.emulated_edge_mc(s->edge_emu_buffer , srcU, s->uvlinesize,
1003 8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
1004 s->h_edge_pos >> 1, v_edge_pos >> 1);
1005 s->dsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV, s->uvlinesize,
1006 8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
1007 s->h_edge_pos >> 1, v_edge_pos >> 1);
1008 srcU = s->edge_emu_buffer;
1009 srcV = s->edge_emu_buffer + 16;
1011 /* if we deal with range reduction we need to scale source blocks */
1012 if (v->rangeredfrm) {
1014 uint8_t *src, *src2;
1018 for (j = 0; j < 9; j++) {
1019 for (i = 0; i < 9; i++) {
1020 src[i] = ((src[i] - 128) >> 1) + 128;
1021 src2[i] = ((src2[i] - 128) >> 1) + 128;
1023 src += s->uvlinesize;
1024 src2 += s->uvlinesize;
1027 /* if we deal with intensity compensation we need to scale source blocks */
1028 if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1030 uint8_t *src, *src2;
1034 for (j = 0; j < 9; j++) {
1035 for (i = 0; i < 9; i++) {
1036 src[i] = v->lutuv[src[i]];
1037 src2[i] = v->lutuv[src2[i]];
1039 src += s->uvlinesize;
1040 src2 += s->uvlinesize;
1045 /* Chroma MC always uses qpel bilinear */
1046 uvmx = (uvmx & 3) << 1;
1047 uvmy = (uvmy & 3) << 1;
1049 dsp->put_h264_chroma_pixels_tab[0](s->dest[1] + off, srcU, s->uvlinesize, 8, uvmx, uvmy);
1050 dsp->put_h264_chroma_pixels_tab[0](s->dest[2] + off, srcV, s->uvlinesize, 8, uvmx, uvmy);
1052 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off, srcU, s->uvlinesize, 8, uvmx, uvmy);
1053 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off, srcV, s->uvlinesize, 8, uvmx, uvmy);
1057 /** Do motion compensation for 4-MV field chroma macroblock (both U and V)
1059 static void vc1_mc_4mv_chroma4(VC1Context *v)
1061 MpegEncContext *s = &v->s;
1062 DSPContext *dsp = &v->s.dsp;
1063 uint8_t *srcU, *srcV;
1064 int uvsrc_x, uvsrc_y;
1065 int uvmx_field[4], uvmy_field[4];
1067 int fieldmv = v->blk_mv_type[s->block_index[0]];
1068 static const int s_rndtblfield[16] = { 0, 0, 1, 2, 4, 4, 5, 6, 2, 2, 3, 8, 6, 6, 7, 12 };
1069 int v_dist = fieldmv ? 1 : 4; // vertical offset for lower sub-blocks
1070 int v_edge_pos = s->v_edge_pos >> 1;
1072 if (!v->s.last_picture.f.data[0])
1074 if (s->flags & CODEC_FLAG_GRAY)
1077 for (i = 0; i < 4; i++) {
1078 tx = s->mv[0][i][0];
1079 uvmx_field[i] = (tx + ((tx & 3) == 3)) >> 1;
1080 ty = s->mv[0][i][1];
1082 uvmy_field[i] = (ty >> 4) * 8 + s_rndtblfield[ty & 0xF];
1084 uvmy_field[i] = (ty + ((ty & 3) == 3)) >> 1;
1087 for (i = 0; i < 4; i++) {
1088 off = (i & 1) * 4 + ((i & 2) ? v_dist * s->uvlinesize : 0);
1089 uvsrc_x = s->mb_x * 8 + (i & 1) * 4 + (uvmx_field[i] >> 2);
1090 uvsrc_y = s->mb_y * 8 + ((i & 2) ? v_dist : 0) + (uvmy_field[i] >> 2);
1091 // FIXME: implement proper pull-back (see vc1cropmv.c, vc1CROPMV_ChromaPullBack())
1092 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
1093 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
1094 srcU = s->last_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
1095 srcV = s->last_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
1096 uvmx_field[i] = (uvmx_field[i] & 3) << 1;
1097 uvmy_field[i] = (uvmy_field[i] & 3) << 1;
1099 if (fieldmv && !(uvsrc_y & 1))
1101 if (fieldmv && (uvsrc_y & 1) && uvsrc_y < 2)
1103 if ((v->mv_mode == MV_PMODE_INTENSITY_COMP)
1104 || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 5
1105 || (unsigned)uvsrc_y > v_edge_pos - (5 << fieldmv)) {
1106 s->dsp.emulated_edge_mc(s->edge_emu_buffer, srcU, s->uvlinesize,
1107 5, (5 << fieldmv), uvsrc_x, uvsrc_y,
1108 s->h_edge_pos >> 1, v_edge_pos);
1109 s->dsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV, s->uvlinesize,
1110 5, (5 << fieldmv), uvsrc_x, uvsrc_y,
1111 s->h_edge_pos >> 1, v_edge_pos);
1112 srcU = s->edge_emu_buffer;
1113 srcV = s->edge_emu_buffer + 16;
1115 /* if we deal with intensity compensation we need to scale source blocks */
1116 if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1118 uint8_t *src, *src2;
1122 for (j = 0; j < 5; j++) {
1123 for (i = 0; i < 5; i++) {
1124 src[i] = v->lutuv[src[i]];
1125 src2[i] = v->lutuv[src2[i]];
1127 src += s->uvlinesize << 1;
1128 src2 += s->uvlinesize << 1;
1133 dsp->put_h264_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1134 dsp->put_h264_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1136 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1137 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1142 /***********************************************************************/
1144 * @name VC-1 Block-level functions
1145 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
1151 * @brief Get macroblock-level quantizer scale
1153 #define GET_MQUANT() \
1154 if (v->dquantfrm) { \
1156 if (v->dqprofile == DQPROFILE_ALL_MBS) { \
1157 if (v->dqbilevel) { \
1158 mquant = (get_bits1(gb)) ? v->altpq : v->pq; \
1160 mqdiff = get_bits(gb, 3); \
1162 mquant = v->pq + mqdiff; \
1164 mquant = get_bits(gb, 5); \
1167 if (v->dqprofile == DQPROFILE_SINGLE_EDGE) \
1168 edges = 1 << v->dqsbedge; \
1169 else if (v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
1170 edges = (3 << v->dqsbedge) % 15; \
1171 else if (v->dqprofile == DQPROFILE_FOUR_EDGES) \
1173 if ((edges&1) && !s->mb_x) \
1174 mquant = v->altpq; \
1175 if ((edges&2) && s->first_slice_line) \
1176 mquant = v->altpq; \
1177 if ((edges&4) && s->mb_x == (s->mb_width - 1)) \
1178 mquant = v->altpq; \
1179 if ((edges&8) && s->mb_y == (s->mb_height - 1)) \
1180 mquant = v->altpq; \
1184 * @def GET_MVDATA(_dmv_x, _dmv_y)
1185 * @brief Get MV differentials
1186 * @see MVDATA decoding from 8.3.5.2, p(1)20
1187 * @param _dmv_x Horizontal differential for decoded MV
1188 * @param _dmv_y Vertical differential for decoded MV
1190 #define GET_MVDATA(_dmv_x, _dmv_y) \
1191 index = 1 + get_vlc2(gb, ff_vc1_mv_diff_vlc[s->mv_table_index].table, \
1192 VC1_MV_DIFF_VLC_BITS, 2); \
1194 mb_has_coeffs = 1; \
1197 mb_has_coeffs = 0; \
1200 _dmv_x = _dmv_y = 0; \
1201 } else if (index == 35) { \
1202 _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
1203 _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
1204 } else if (index == 36) { \
1209 index1 = index % 6; \
1210 if (!s->quarter_sample && index1 == 5) val = 1; \
1212 if (size_table[index1] - val > 0) \
1213 val = get_bits(gb, size_table[index1] - val); \
1215 sign = 0 - (val&1); \
1216 _dmv_x = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1218 index1 = index / 6; \
1219 if (!s->quarter_sample && index1 == 5) val = 1; \
1221 if (size_table[index1] - val > 0) \
1222 val = get_bits(gb, size_table[index1] - val); \
1224 sign = 0 - (val & 1); \
1225 _dmv_y = (sign ^ ((val >> 1) + offset_table[index1])) - sign; \
1228 static av_always_inline void get_mvdata_interlaced(VC1Context *v, int *dmv_x,
1229 int *dmv_y, int *pred_flag)
1232 int extend_x = 0, extend_y = 0;
1233 GetBitContext *gb = &v->s.gb;
1236 const int* offs_tab;
1239 bits = VC1_2REF_MVDATA_VLC_BITS;
1242 bits = VC1_1REF_MVDATA_VLC_BITS;
1245 switch (v->dmvrange) {
1253 extend_x = extend_y = 1;
1256 index = get_vlc2(gb, v->imv_vlc->table, bits, 3);
1258 *dmv_x = get_bits(gb, v->k_x);
1259 *dmv_y = get_bits(gb, v->k_y);
1261 *pred_flag = *dmv_y & 1;
1262 *dmv_y = (*dmv_y + *pred_flag) >> 1;
1267 offs_tab = offset_table2;
1269 offs_tab = offset_table1;
1270 index1 = (index + 1) % 9;
1272 val = get_bits(gb, index1 + extend_x);
1273 sign = 0 -(val & 1);
1274 *dmv_x = (sign ^ ((val >> 1) + offs_tab[index1])) - sign;
1278 offs_tab = offset_table2;
1280 offs_tab = offset_table1;
1281 index1 = (index + 1) / 9;
1282 if (index1 > v->numref) {
1283 val = get_bits(gb, (index1 + (extend_y << v->numref)) >> v->numref);
1284 sign = 0 - (val & 1);
1285 *dmv_y = (sign ^ ((val >> 1) + offs_tab[index1 >> v->numref])) - sign;
1289 *pred_flag = index1 & 1;
1293 static av_always_inline int scaleforsame_x(VC1Context *v, int n /* MV */, int dir)
1295 int scaledvalue, refdist;
1296 int scalesame1, scalesame2;
1297 int scalezone1_x, zone1offset_x;
1298 int table_index = dir ^ v->second_field;
1300 if (v->s.pict_type != AV_PICTURE_TYPE_B)
1301 refdist = v->refdist;
1303 refdist = dir ? v->brfd : v->frfd;
1306 scalesame1 = vc1_field_mvpred_scales[table_index][1][refdist];
1307 scalesame2 = vc1_field_mvpred_scales[table_index][2][refdist];
1308 scalezone1_x = vc1_field_mvpred_scales[table_index][3][refdist];
1309 zone1offset_x = vc1_field_mvpred_scales[table_index][5][refdist];
1314 if (FFABS(n) < scalezone1_x)
1315 scaledvalue = (n * scalesame1) >> 8;
1318 scaledvalue = ((n * scalesame2) >> 8) - zone1offset_x;
1320 scaledvalue = ((n * scalesame2) >> 8) + zone1offset_x;
1323 return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1326 static av_always_inline int scaleforsame_y(VC1Context *v, int i, int n /* MV */, int dir)
1328 int scaledvalue, refdist;
1329 int scalesame1, scalesame2;
1330 int scalezone1_y, zone1offset_y;
1331 int table_index = dir ^ v->second_field;
1333 if (v->s.pict_type != AV_PICTURE_TYPE_B)
1334 refdist = v->refdist;
1336 refdist = dir ? v->brfd : v->frfd;
1339 scalesame1 = vc1_field_mvpred_scales[table_index][1][refdist];
1340 scalesame2 = vc1_field_mvpred_scales[table_index][2][refdist];
1341 scalezone1_y = vc1_field_mvpred_scales[table_index][4][refdist];
1342 zone1offset_y = vc1_field_mvpred_scales[table_index][6][refdist];
1347 if (FFABS(n) < scalezone1_y)
1348 scaledvalue = (n * scalesame1) >> 8;
1351 scaledvalue = ((n * scalesame2) >> 8) - zone1offset_y;
1353 scaledvalue = ((n * scalesame2) >> 8) + zone1offset_y;
1357 if (v->cur_field_type && !v->ref_field_type[dir])
1358 return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1360 return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1363 static av_always_inline int scaleforopp_x(VC1Context *v, int n /* MV */)
1365 int scalezone1_x, zone1offset_x;
1366 int scaleopp1, scaleopp2, brfd;
1369 brfd = FFMIN(v->brfd, 3);
1370 scalezone1_x = vc1_b_field_mvpred_scales[3][brfd];
1371 zone1offset_x = vc1_b_field_mvpred_scales[5][brfd];
1372 scaleopp1 = vc1_b_field_mvpred_scales[1][brfd];
1373 scaleopp2 = vc1_b_field_mvpred_scales[2][brfd];
1378 if (FFABS(n) < scalezone1_x)
1379 scaledvalue = (n * scaleopp1) >> 8;
1382 scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_x;
1384 scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_x;
1387 return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1390 static av_always_inline int scaleforopp_y(VC1Context *v, int n /* MV */, int dir)
1392 int scalezone1_y, zone1offset_y;
1393 int scaleopp1, scaleopp2, brfd;
1396 brfd = FFMIN(v->brfd, 3);
1397 scalezone1_y = vc1_b_field_mvpred_scales[4][brfd];
1398 zone1offset_y = vc1_b_field_mvpred_scales[6][brfd];
1399 scaleopp1 = vc1_b_field_mvpred_scales[1][brfd];
1400 scaleopp2 = vc1_b_field_mvpred_scales[2][brfd];
1405 if (FFABS(n) < scalezone1_y)
1406 scaledvalue = (n * scaleopp1) >> 8;
1409 scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_y;
1411 scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_y;
1414 if (v->cur_field_type && !v->ref_field_type[dir]) {
1415 return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1417 return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1421 static av_always_inline int scaleforsame(VC1Context *v, int i, int n /* MV */,
1424 int brfd, scalesame;
1425 int hpel = 1 - v->s.quarter_sample;
1428 if (v->s.pict_type != AV_PICTURE_TYPE_B || v->second_field || !dir) {
1430 n = scaleforsame_y(v, i, n, dir) << hpel;
1432 n = scaleforsame_x(v, n, dir) << hpel;
1435 brfd = FFMIN(v->brfd, 3);
1436 scalesame = vc1_b_field_mvpred_scales[0][brfd];
1438 n = (n * scalesame >> 8) << hpel;
1442 static av_always_inline int scaleforopp(VC1Context *v, int n /* MV */,
1445 int refdist, scaleopp;
1446 int hpel = 1 - v->s.quarter_sample;
1449 if (v->s.pict_type == AV_PICTURE_TYPE_B && !v->second_field && dir == 1) {
1451 n = scaleforopp_y(v, n, dir) << hpel;
1453 n = scaleforopp_x(v, n) << hpel;
1456 if (v->s.pict_type != AV_PICTURE_TYPE_B)
1457 refdist = FFMIN(v->refdist, 3);
1459 refdist = dir ? v->brfd : v->frfd;
1460 scaleopp = vc1_field_mvpred_scales[dir ^ v->second_field][0][refdist];
1462 n = (n * scaleopp >> 8) << hpel;
1466 /** Predict and set motion vector
1468 static inline void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
1469 int mv1, int r_x, int r_y, uint8_t* is_intra,
1470 int pred_flag, int dir)
1472 MpegEncContext *s = &v->s;
1473 int xy, wrap, off = 0;
1477 int mixedmv_pic, num_samefield = 0, num_oppfield = 0;
1479 int16_t samefield_pred[2], oppfield_pred[2];
1480 int16_t samefield_predA[2], oppfield_predA[2];
1481 int16_t samefield_predB[2], oppfield_predB[2];
1482 int16_t samefield_predC[2], oppfield_predC[2];
1483 int16_t *predA, *predC;
1484 int a_valid, b_valid, c_valid;
1485 int hybridmv_thresh, y_bias = 0;
1487 if (v->mv_mode == MV_PMODE_MIXED_MV ||
1488 ((v->mv_mode == MV_PMODE_INTENSITY_COMP) && (v->mv_mode2 == MV_PMODE_MIXED_MV)))
1492 /* scale MV difference to be quad-pel */
1493 dmv_x <<= 1 - s->quarter_sample;
1494 dmv_y <<= 1 - s->quarter_sample;
1496 wrap = s->b8_stride;
1497 xy = s->block_index[n];
1500 s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy + v->blocks_off][0] = 0;
1501 s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy + v->blocks_off][1] = 0;
1502 s->current_picture.f.motion_val[1][xy + v->blocks_off][0] = 0;
1503 s->current_picture.f.motion_val[1][xy + v->blocks_off][1] = 0;
1504 if (mv1) { /* duplicate motion data for 1-MV block */
1505 s->current_picture.f.motion_val[0][xy + 1 + v->blocks_off][0] = 0;
1506 s->current_picture.f.motion_val[0][xy + 1 + v->blocks_off][1] = 0;
1507 s->current_picture.f.motion_val[0][xy + wrap + v->blocks_off][0] = 0;
1508 s->current_picture.f.motion_val[0][xy + wrap + v->blocks_off][1] = 0;
1509 s->current_picture.f.motion_val[0][xy + wrap + 1 + v->blocks_off][0] = 0;
1510 s->current_picture.f.motion_val[0][xy + wrap + 1 + v->blocks_off][1] = 0;
1511 v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1512 s->current_picture.f.motion_val[1][xy + 1 + v->blocks_off][0] = 0;
1513 s->current_picture.f.motion_val[1][xy + 1 + v->blocks_off][1] = 0;
1514 s->current_picture.f.motion_val[1][xy + wrap][0] = 0;
1515 s->current_picture.f.motion_val[1][xy + wrap + v->blocks_off][1] = 0;
1516 s->current_picture.f.motion_val[1][xy + wrap + 1 + v->blocks_off][0] = 0;
1517 s->current_picture.f.motion_val[1][xy + wrap + 1 + v->blocks_off][1] = 0;
1522 C = s->current_picture.f.motion_val[dir][xy - 1 + v->blocks_off];
1523 A = s->current_picture.f.motion_val[dir][xy - wrap + v->blocks_off];
1525 if (v->field_mode && mixedmv_pic)
1526 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
1528 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2;
1530 //in 4-MV mode different blocks have different B predictor position
1533 off = (s->mb_x > 0) ? -1 : 1;
1536 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 1;
1545 B = s->current_picture.f.motion_val[dir][xy - wrap + off + v->blocks_off];
1547 a_valid = !s->first_slice_line || (n == 2 || n == 3);
1548 b_valid = a_valid && (s->mb_width > 1);
1549 c_valid = s->mb_x || (n == 1 || n == 3);
1550 if (v->field_mode) {
1551 a_valid = a_valid && !is_intra[xy - wrap];
1552 b_valid = b_valid && !is_intra[xy - wrap + off];
1553 c_valid = c_valid && !is_intra[xy - 1];
1557 f = v->mv_f[dir][xy - wrap + v->blocks_off];
1559 num_samefield += 1 - f;
1561 oppfield_predA[0] = A[0];
1562 oppfield_predA[1] = A[1];
1563 samefield_predA[0] = scaleforsame(v, 0, A[0], 0, dir);
1564 samefield_predA[1] = scaleforsame(v, n, A[1], 1, dir);
1566 samefield_predA[0] = A[0];
1567 samefield_predA[1] = A[1];
1569 oppfield_predA[0] = scaleforopp(v, A[0], 0, dir);
1571 oppfield_predA[1] = scaleforopp(v, A[1], 1, dir);
1574 samefield_predA[0] = samefield_predA[1] = 0;
1575 oppfield_predA[0] = oppfield_predA[1] = 0;
1578 f = v->mv_f[dir][xy - 1 + v->blocks_off];
1580 num_samefield += 1 - f;
1582 oppfield_predC[0] = C[0];
1583 oppfield_predC[1] = C[1];
1584 samefield_predC[0] = scaleforsame(v, 0, C[0], 0, dir);
1585 samefield_predC[1] = scaleforsame(v, n, C[1], 1, dir);
1587 samefield_predC[0] = C[0];
1588 samefield_predC[1] = C[1];
1590 oppfield_predC[0] = scaleforopp(v, C[0], 0, dir);
1592 oppfield_predC[1] = scaleforopp(v, C[1], 1, dir);
1595 samefield_predC[0] = samefield_predC[1] = 0;
1596 oppfield_predC[0] = oppfield_predC[1] = 0;
1599 f = v->mv_f[dir][xy - wrap + off + v->blocks_off];
1601 num_samefield += 1 - f;
1603 oppfield_predB[0] = B[0];
1604 oppfield_predB[1] = B[1];
1605 samefield_predB[0] = scaleforsame(v, 0, B[0], 0, dir);
1606 samefield_predB[1] = scaleforsame(v, n, B[1], 1, dir);
1608 samefield_predB[0] = B[0];
1609 samefield_predB[1] = B[1];
1611 oppfield_predB[0] = scaleforopp(v, B[0], 0, dir);
1613 oppfield_predB[1] = scaleforopp(v, B[1], 1, dir);
1616 samefield_predB[0] = samefield_predB[1] = 0;
1617 oppfield_predB[0] = oppfield_predB[1] = 0;
1621 samefield_pred[0] = samefield_predA[0];
1622 samefield_pred[1] = samefield_predA[1];
1623 oppfield_pred[0] = oppfield_predA[0];
1624 oppfield_pred[1] = oppfield_predA[1];
1625 } else if (c_valid) {
1626 samefield_pred[0] = samefield_predC[0];
1627 samefield_pred[1] = samefield_predC[1];
1628 oppfield_pred[0] = oppfield_predC[0];
1629 oppfield_pred[1] = oppfield_predC[1];
1630 } else if (b_valid) {
1631 samefield_pred[0] = samefield_predB[0];
1632 samefield_pred[1] = samefield_predB[1];
1633 oppfield_pred[0] = oppfield_predB[0];
1634 oppfield_pred[1] = oppfield_predB[1];
1636 samefield_pred[0] = samefield_pred[1] = 0;
1637 oppfield_pred[0] = oppfield_pred[1] = 0;
1640 if (num_samefield + num_oppfield > 1) {
1641 samefield_pred[0] = mid_pred(samefield_predA[0], samefield_predB[0], samefield_predC[0]);
1642 samefield_pred[1] = mid_pred(samefield_predA[1], samefield_predB[1], samefield_predC[1]);
1644 oppfield_pred[0] = mid_pred(oppfield_predA[0], oppfield_predB[0], oppfield_predC[0]);
1646 oppfield_pred[1] = mid_pred(oppfield_predA[1], oppfield_predB[1], oppfield_predC[1]);
1649 if (v->field_mode) {
1650 if (num_samefield <= num_oppfield)
1651 opposit = 1 - pred_flag;
1653 opposit = pred_flag;
1657 px = oppfield_pred[0];
1658 py = oppfield_pred[1];
1659 predA = oppfield_predA;
1660 predC = oppfield_predC;
1661 v->mv_f[dir][xy + v->blocks_off] = f = 1;
1662 v->ref_field_type[dir] = !v->cur_field_type;
1664 px = samefield_pred[0];
1665 py = samefield_pred[1];
1666 predA = samefield_predA;
1667 predC = samefield_predC;
1668 v->mv_f[dir][xy + v->blocks_off] = f = 0;
1669 v->ref_field_type[dir] = v->cur_field_type;
1672 /* Pullback MV as specified in 8.3.5.3.4 */
1673 if (!v->field_mode) {
1675 qx = (s->mb_x << 6) + ((n == 1 || n == 3) ? 32 : 0);
1676 qy = (s->mb_y << 6) + ((n == 2 || n == 3) ? 32 : 0);
1677 X = (s->mb_width << 6) - 4;
1678 Y = (s->mb_height << 6) - 4;
1680 if (qx + px < -60) px = -60 - qx;
1681 if (qy + py < -60) py = -60 - qy;
1683 if (qx + px < -28) px = -28 - qx;
1684 if (qy + py < -28) py = -28 - qy;
1686 if (qx + px > X) px = X - qx;
1687 if (qy + py > Y) py = Y - qy;
1690 if (!v->field_mode || s->pict_type != AV_PICTURE_TYPE_B) {
1691 /* Calculate hybrid prediction as specified in 8.3.5.3.5 (also 10.3.5.4.3.5) */
1692 if (v->field_mode && !s->quarter_sample)
1693 hybridmv_thresh = 16;
1695 hybridmv_thresh = 32;
1696 if (a_valid && c_valid) {
1697 if (is_intra[xy - wrap])
1698 sum = FFABS(px) + FFABS(py);
1700 sum = FFABS(px - predA[0]) + FFABS(py - predA[1]);
1701 if (sum > hybridmv_thresh) {
1702 if (get_bits1(&s->gb)) { // read HYBRIDPRED bit
1710 if (is_intra[xy - 1])
1711 sum = FFABS(px) + FFABS(py);
1713 sum = FFABS(px - predC[0]) + FFABS(py - predC[1]);
1714 if (sum > hybridmv_thresh) {
1715 if (get_bits1(&s->gb)) {
1727 if (v->field_mode && !s->quarter_sample) {
1731 if (v->field_mode && v->numref)
1733 if (v->field_mode && v->cur_field_type && v->ref_field_type[dir] == 0)
1735 /* store MV using signed modulus of MV range defined in 4.11 */
1736 s->mv[dir][n][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1737 s->mv[dir][n][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1] = ((py + dmv_y + r_y - y_bias) & ((r_y << 1) - 1)) - r_y + y_bias;
1738 if (mv1) { /* duplicate motion data for 1-MV block */
1739 s->current_picture.f.motion_val[dir][xy + 1 + v->blocks_off][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0];
1740 s->current_picture.f.motion_val[dir][xy + 1 + v->blocks_off][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1];
1741 s->current_picture.f.motion_val[dir][xy + wrap + v->blocks_off][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0];
1742 s->current_picture.f.motion_val[dir][xy + wrap + v->blocks_off][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1];
1743 s->current_picture.f.motion_val[dir][xy + wrap + 1 + v->blocks_off][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0];
1744 s->current_picture.f.motion_val[dir][xy + wrap + 1 + v->blocks_off][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1];
1745 v->mv_f[dir][xy + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1746 v->mv_f[dir][xy + wrap + v->blocks_off] = v->mv_f[dir][xy + wrap + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1750 /** Predict and set motion vector for interlaced frame picture MBs
1752 static inline void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
1753 int mvn, int r_x, int r_y, uint8_t* is_intra)
1755 MpegEncContext *s = &v->s;
1756 int xy, wrap, off = 0;
1757 int A[2], B[2], C[2];
1759 int a_valid = 0, b_valid = 0, c_valid = 0;
1760 int field_a, field_b, field_c; // 0: same, 1: opposit
1761 int total_valid, num_samefield, num_oppfield;
1762 int pos_c, pos_b, n_adj;
1764 wrap = s->b8_stride;
1765 xy = s->block_index[n];
1768 s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy][0] = 0;
1769 s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy][1] = 0;
1770 s->current_picture.f.motion_val[1][xy][0] = 0;
1771 s->current_picture.f.motion_val[1][xy][1] = 0;
1772 if (mvn == 1) { /* duplicate motion data for 1-MV block */
1773 s->current_picture.f.motion_val[0][xy + 1][0] = 0;
1774 s->current_picture.f.motion_val[0][xy + 1][1] = 0;
1775 s->current_picture.f.motion_val[0][xy + wrap][0] = 0;
1776 s->current_picture.f.motion_val[0][xy + wrap][1] = 0;
1777 s->current_picture.f.motion_val[0][xy + wrap + 1][0] = 0;
1778 s->current_picture.f.motion_val[0][xy + wrap + 1][1] = 0;
1779 v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1780 s->current_picture.f.motion_val[1][xy + 1][0] = 0;
1781 s->current_picture.f.motion_val[1][xy + 1][1] = 0;
1782 s->current_picture.f.motion_val[1][xy + wrap][0] = 0;
1783 s->current_picture.f.motion_val[1][xy + wrap][1] = 0;
1784 s->current_picture.f.motion_val[1][xy + wrap + 1][0] = 0;
1785 s->current_picture.f.motion_val[1][xy + wrap + 1][1] = 0;
1790 off = ((n == 0) || (n == 1)) ? 1 : -1;
1792 if (s->mb_x || (n == 1) || (n == 3)) {
1793 if ((v->blk_mv_type[xy]) // current block (MB) has a field MV
1794 || (!v->blk_mv_type[xy] && !v->blk_mv_type[xy - 1])) { // or both have frame MV
1795 A[0] = s->current_picture.f.motion_val[0][xy - 1][0];
1796 A[1] = s->current_picture.f.motion_val[0][xy - 1][1];
1798 } else { // current block has frame mv and cand. has field MV (so average)
1799 A[0] = (s->current_picture.f.motion_val[0][xy - 1][0]
1800 + s->current_picture.f.motion_val[0][xy - 1 + off * wrap][0] + 1) >> 1;
1801 A[1] = (s->current_picture.f.motion_val[0][xy - 1][1]
1802 + s->current_picture.f.motion_val[0][xy - 1 + off * wrap][1] + 1) >> 1;
1805 if (!(n & 1) && v->is_intra[s->mb_x - 1]) {
1811 /* Predict B and C */
1812 B[0] = B[1] = C[0] = C[1] = 0;
1813 if (n == 0 || n == 1 || v->blk_mv_type[xy]) {
1814 if (!s->first_slice_line) {
1815 if (!v->is_intra[s->mb_x - s->mb_stride]) {
1818 pos_b = s->block_index[n_adj] - 2 * wrap;
1819 if (v->blk_mv_type[pos_b] && v->blk_mv_type[xy]) {
1820 n_adj = (n & 2) | (n & 1);
1822 B[0] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap][0];
1823 B[1] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap][1];
1824 if (v->blk_mv_type[pos_b] && !v->blk_mv_type[xy]) {
1825 B[0] = (B[0] + s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap][0] + 1) >> 1;
1826 B[1] = (B[1] + s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap][1] + 1) >> 1;
1829 if (s->mb_width > 1) {
1830 if (!v->is_intra[s->mb_x - s->mb_stride + 1]) {
1833 pos_c = s->block_index[2] - 2 * wrap + 2;
1834 if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1837 C[0] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap + 2][0];
1838 C[1] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap + 2][1];
1839 if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1840 C[0] = (1 + C[0] + (s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap + 2][0])) >> 1;
1841 C[1] = (1 + C[1] + (s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap + 2][1])) >> 1;
1843 if (s->mb_x == s->mb_width - 1) {
1844 if (!v->is_intra[s->mb_x - s->mb_stride - 1]) {
1847 pos_c = s->block_index[3] - 2 * wrap - 2;
1848 if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1851 C[0] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap - 2][0];
1852 C[1] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap - 2][1];
1853 if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1854 C[0] = (1 + C[0] + s->current_picture.f.motion_val[0][s->block_index[1] - 2 * wrap - 2][0]) >> 1;
1855 C[1] = (1 + C[1] + s->current_picture.f.motion_val[0][s->block_index[1] - 2 * wrap - 2][1]) >> 1;
1864 pos_b = s->block_index[1];
1866 B[0] = s->current_picture.f.motion_val[0][pos_b][0];
1867 B[1] = s->current_picture.f.motion_val[0][pos_b][1];
1868 pos_c = s->block_index[0];
1870 C[0] = s->current_picture.f.motion_val[0][pos_c][0];
1871 C[1] = s->current_picture.f.motion_val[0][pos_c][1];
1874 total_valid = a_valid + b_valid + c_valid;
1875 // check if predictor A is out of bounds
1876 if (!s->mb_x && !(n == 1 || n == 3)) {
1879 // check if predictor B is out of bounds
1880 if ((s->first_slice_line && v->blk_mv_type[xy]) || (s->first_slice_line && !(n & 2))) {
1881 B[0] = B[1] = C[0] = C[1] = 0;
1883 if (!v->blk_mv_type[xy]) {
1884 if (s->mb_width == 1) {
1888 if (total_valid >= 2) {
1889 px = mid_pred(A[0], B[0], C[0]);
1890 py = mid_pred(A[1], B[1], C[1]);
1891 } else if (total_valid) {
1892 if (a_valid) { px = A[0]; py = A[1]; }
1893 if (b_valid) { px = B[0]; py = B[1]; }
1894 if (c_valid) { px = C[0]; py = C[1]; }
1900 field_a = (A[1] & 4) ? 1 : 0;
1904 field_b = (B[1] & 4) ? 1 : 0;
1908 field_c = (C[1] & 4) ? 1 : 0;
1912 num_oppfield = field_a + field_b + field_c;
1913 num_samefield = total_valid - num_oppfield;
1914 if (total_valid == 3) {
1915 if ((num_samefield == 3) || (num_oppfield == 3)) {
1916 px = mid_pred(A[0], B[0], C[0]);
1917 py = mid_pred(A[1], B[1], C[1]);
1918 } else if (num_samefield >= num_oppfield) {
1919 /* take one MV from same field set depending on priority
1920 the check for B may not be necessary */
1921 px = !field_a ? A[0] : B[0];
1922 py = !field_a ? A[1] : B[1];
1924 px = field_a ? A[0] : B[0];
1925 py = field_a ? A[1] : B[1];
1927 } else if (total_valid == 2) {
1928 if (num_samefield >= num_oppfield) {
1929 if (!field_a && a_valid) {
1932 } else if (!field_b && b_valid) {
1935 } else if (c_valid) {
1940 if (field_a && a_valid) {
1943 } else if (field_b && b_valid) {
1946 } else if (c_valid) {
1951 } else if (total_valid == 1) {
1952 px = (a_valid) ? A[0] : ((b_valid) ? B[0] : C[0]);
1953 py = (a_valid) ? A[1] : ((b_valid) ? B[1] : C[1]);
1958 /* store MV using signed modulus of MV range defined in 4.11 */
1959 s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1960 s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
1961 if (mvn == 1) { /* duplicate motion data for 1-MV block */
1962 s->current_picture.f.motion_val[0][xy + 1 ][0] = s->current_picture.f.motion_val[0][xy][0];
1963 s->current_picture.f.motion_val[0][xy + 1 ][1] = s->current_picture.f.motion_val[0][xy][1];
1964 s->current_picture.f.motion_val[0][xy + wrap ][0] = s->current_picture.f.motion_val[0][xy][0];
1965 s->current_picture.f.motion_val[0][xy + wrap ][1] = s->current_picture.f.motion_val[0][xy][1];
1966 s->current_picture.f.motion_val[0][xy + wrap + 1][0] = s->current_picture.f.motion_val[0][xy][0];
1967 s->current_picture.f.motion_val[0][xy + wrap + 1][1] = s->current_picture.f.motion_val[0][xy][1];
1968 } else if (mvn == 2) { /* duplicate motion data for 2-Field MV block */
1969 s->current_picture.f.motion_val[0][xy + 1][0] = s->current_picture.f.motion_val[0][xy][0];
1970 s->current_picture.f.motion_val[0][xy + 1][1] = s->current_picture.f.motion_val[0][xy][1];
1971 s->mv[0][n + 1][0] = s->mv[0][n][0];
1972 s->mv[0][n + 1][1] = s->mv[0][n][1];
1976 /** Motion compensation for direct or interpolated blocks in B-frames
1978 static void vc1_interp_mc(VC1Context *v)
1980 MpegEncContext *s = &v->s;
1981 DSPContext *dsp = &v->s.dsp;
1982 uint8_t *srcY, *srcU, *srcV;
1983 int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
1985 int v_edge_pos = s->v_edge_pos >> v->field_mode;
1987 if (!v->field_mode && !v->s.next_picture.f.data[0])
1990 mx = s->mv[1][0][0];
1991 my = s->mv[1][0][1];
1992 uvmx = (mx + ((mx & 3) == 3)) >> 1;
1993 uvmy = (my + ((my & 3) == 3)) >> 1;
1994 if (v->field_mode) {
1995 if (v->cur_field_type != v->ref_field_type[1])
1996 my = my - 2 + 4 * v->cur_field_type;
1997 uvmy = uvmy - 2 + 4 * v->cur_field_type;
2000 uvmx = uvmx + ((uvmx < 0) ? -(uvmx & 1) : (uvmx & 1));
2001 uvmy = uvmy + ((uvmy < 0) ? -(uvmy & 1) : (uvmy & 1));
2003 srcY = s->next_picture.f.data[0];
2004 srcU = s->next_picture.f.data[1];
2005 srcV = s->next_picture.f.data[2];
2007 src_x = s->mb_x * 16 + (mx >> 2);
2008 src_y = s->mb_y * 16 + (my >> 2);
2009 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
2010 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
2012 if (v->profile != PROFILE_ADVANCED) {
2013 src_x = av_clip( src_x, -16, s->mb_width * 16);
2014 src_y = av_clip( src_y, -16, s->mb_height * 16);
2015 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
2016 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
2018 src_x = av_clip( src_x, -17, s->avctx->coded_width);
2019 src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
2020 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
2021 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
2024 srcY += src_y * s->linesize + src_x;
2025 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
2026 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
2028 if (v->field_mode && v->ref_field_type[1]) {
2029 srcY += s->current_picture_ptr->f.linesize[0];
2030 srcU += s->current_picture_ptr->f.linesize[1];
2031 srcV += s->current_picture_ptr->f.linesize[2];
2034 /* for grayscale we should not try to read from unknown area */
2035 if (s->flags & CODEC_FLAG_GRAY) {
2036 srcU = s->edge_emu_buffer + 18 * s->linesize;
2037 srcV = s->edge_emu_buffer + 18 * s->linesize;
2041 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx & 3) - 16 - s->mspel * 3
2042 || (unsigned)(src_y - s->mspel) > v_edge_pos - (my & 3) - 16 - s->mspel * 3) {
2043 uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
2045 srcY -= s->mspel * (1 + s->linesize);
2046 s->dsp.emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize,
2047 17 + s->mspel * 2, 17 + s->mspel * 2,
2048 src_x - s->mspel, src_y - s->mspel,
2049 s->h_edge_pos, v_edge_pos);
2050 srcY = s->edge_emu_buffer;
2051 s->dsp.emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8 + 1, 8 + 1,
2052 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
2053 s->dsp.emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8 + 1, 8 + 1,
2054 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
2057 /* if we deal with range reduction we need to scale source blocks */
2058 if (v->rangeredfrm) {
2060 uint8_t *src, *src2;
2063 for (j = 0; j < 17 + s->mspel * 2; j++) {
2064 for (i = 0; i < 17 + s->mspel * 2; i++)
2065 src[i] = ((src[i] - 128) >> 1) + 128;
2070 for (j = 0; j < 9; j++) {
2071 for (i = 0; i < 9; i++) {
2072 src[i] = ((src[i] - 128) >> 1) + 128;
2073 src2[i] = ((src2[i] - 128) >> 1) + 128;
2075 src += s->uvlinesize;
2076 src2 += s->uvlinesize;
2079 srcY += s->mspel * (1 + s->linesize);
2082 if (v->field_mode && v->cur_field_type) {
2083 off = s->current_picture_ptr->f.linesize[0];
2084 off_uv = s->current_picture_ptr->f.linesize[1];
2091 dxy = ((my & 3) << 2) | (mx & 3);
2092 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off , srcY , s->linesize, v->rnd);
2093 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8, srcY + 8, s->linesize, v->rnd);
2094 srcY += s->linesize * 8;
2095 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize , srcY , s->linesize, v->rnd);
2096 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
2098 dxy = (my & 2) | ((mx & 2) >> 1);
2101 dsp->avg_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
2103 dsp->avg_no_rnd_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
2106 if (s->flags & CODEC_FLAG_GRAY) return;
2107 /* Chroma MC always uses qpel blilinear */
2108 uvmx = (uvmx & 3) << 1;
2109 uvmy = (uvmy & 3) << 1;
2111 dsp->avg_h264_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
2112 dsp->avg_h264_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
2114 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
2115 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
2119 static av_always_inline int scale_mv(int value, int bfrac, int inv, int qs)
2123 #if B_FRACTION_DEN==256
2127 return 2 * ((value * n + 255) >> 9);
2128 return (value * n + 128) >> 8;
2131 n -= B_FRACTION_DEN;
2133 return 2 * ((value * n + B_FRACTION_DEN - 1) / (2 * B_FRACTION_DEN));
2134 return (value * n + B_FRACTION_DEN/2) / B_FRACTION_DEN;
2138 static av_always_inline int scale_mv_intfi(int value, int bfrac, int inv,
2139 int qs, int qs_last)
2147 return (value * n + 255) >> 9;
2149 return (value * n + 128) >> 8;
2152 /** Reconstruct motion vector for B-frame and do motion compensation
2154 static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2],
2155 int direct, int mode)
2158 v->mv_mode2 = v->mv_mode;
2159 v->mv_mode = MV_PMODE_INTENSITY_COMP;
2165 v->mv_mode = v->mv_mode2;
2168 if (mode == BMV_TYPE_INTERPOLATED) {
2172 v->mv_mode = v->mv_mode2;
2176 if (v->use_ic && (mode == BMV_TYPE_BACKWARD))
2177 v->mv_mode = v->mv_mode2;
2178 vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD));
2180 v->mv_mode = v->mv_mode2;
2183 static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2],
2184 int direct, int mvtype)
2186 MpegEncContext *s = &v->s;
2187 int xy, wrap, off = 0;
2192 const uint8_t *is_intra = v->mb_type[0];
2196 /* scale MV difference to be quad-pel */
2197 dmv_x[0] <<= 1 - s->quarter_sample;
2198 dmv_y[0] <<= 1 - s->quarter_sample;
2199 dmv_x[1] <<= 1 - s->quarter_sample;
2200 dmv_y[1] <<= 1 - s->quarter_sample;
2202 wrap = s->b8_stride;
2203 xy = s->block_index[0];
2206 s->current_picture.f.motion_val[0][xy + v->blocks_off][0] =
2207 s->current_picture.f.motion_val[0][xy + v->blocks_off][1] =
2208 s->current_picture.f.motion_val[1][xy + v->blocks_off][0] =
2209 s->current_picture.f.motion_val[1][xy + v->blocks_off][1] = 0;
2212 if (!v->field_mode) {
2213 s->mv[0][0][0] = scale_mv(s->next_picture.f.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
2214 s->mv[0][0][1] = scale_mv(s->next_picture.f.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
2215 s->mv[1][0][0] = scale_mv(s->next_picture.f.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
2216 s->mv[1][0][1] = scale_mv(s->next_picture.f.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
2218 /* Pullback predicted motion vectors as specified in 8.4.5.4 */
2219 s->mv[0][0][0] = av_clip(s->mv[0][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2220 s->mv[0][0][1] = av_clip(s->mv[0][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2221 s->mv[1][0][0] = av_clip(s->mv[1][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2222 s->mv[1][0][1] = av_clip(s->mv[1][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2225 s->current_picture.f.motion_val[0][xy + v->blocks_off][0] = s->mv[0][0][0];
2226 s->current_picture.f.motion_val[0][xy + v->blocks_off][1] = s->mv[0][0][1];
2227 s->current_picture.f.motion_val[1][xy + v->blocks_off][0] = s->mv[1][0][0];
2228 s->current_picture.f.motion_val[1][xy + v->blocks_off][1] = s->mv[1][0][1];
2232 if ((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2233 C = s->current_picture.f.motion_val[0][xy - 2];
2234 A = s->current_picture.f.motion_val[0][xy - wrap * 2];
2235 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2236 B = s->current_picture.f.motion_val[0][xy - wrap * 2 + off];
2238 if (!s->mb_x) C[0] = C[1] = 0;
2239 if (!s->first_slice_line) { // predictor A is not out of bounds
2240 if (s->mb_width == 1) {
2244 px = mid_pred(A[0], B[0], C[0]);
2245 py = mid_pred(A[1], B[1], C[1]);
2247 } else if (s->mb_x) { // predictor C is not out of bounds
2253 /* Pullback MV as specified in 8.3.5.3.4 */
2256 if (v->profile < PROFILE_ADVANCED) {
2257 qx = (s->mb_x << 5);
2258 qy = (s->mb_y << 5);
2259 X = (s->mb_width << 5) - 4;
2260 Y = (s->mb_height << 5) - 4;
2261 if (qx + px < -28) px = -28 - qx;
2262 if (qy + py < -28) py = -28 - qy;
2263 if (qx + px > X) px = X - qx;
2264 if (qy + py > Y) py = Y - qy;
2266 qx = (s->mb_x << 6);
2267 qy = (s->mb_y << 6);
2268 X = (s->mb_width << 6) - 4;
2269 Y = (s->mb_height << 6) - 4;
2270 if (qx + px < -60) px = -60 - qx;
2271 if (qy + py < -60) py = -60 - qy;
2272 if (qx + px > X) px = X - qx;
2273 if (qy + py > Y) py = Y - qy;
2276 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2277 if (0 && !s->first_slice_line && s->mb_x) {
2278 if (is_intra[xy - wrap])
2279 sum = FFABS(px) + FFABS(py);
2281 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2283 if (get_bits1(&s->gb)) {
2291 if (is_intra[xy - 2])
2292 sum = FFABS(px) + FFABS(py);
2294 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2296 if (get_bits1(&s->gb)) {
2306 /* store MV using signed modulus of MV range defined in 4.11 */
2307 s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x;
2308 s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
2310 if ((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2311 C = s->current_picture.f.motion_val[1][xy - 2];
2312 A = s->current_picture.f.motion_val[1][xy - wrap * 2];
2313 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2314 B = s->current_picture.f.motion_val[1][xy - wrap * 2 + off];
2318 if (!s->first_slice_line) { // predictor A is not out of bounds
2319 if (s->mb_width == 1) {
2323 px = mid_pred(A[0], B[0], C[0]);
2324 py = mid_pred(A[1], B[1], C[1]);
2326 } else if (s->mb_x) { // predictor C is not out of bounds
2332 /* Pullback MV as specified in 8.3.5.3.4 */
2335 if (v->profile < PROFILE_ADVANCED) {
2336 qx = (s->mb_x << 5);
2337 qy = (s->mb_y << 5);
2338 X = (s->mb_width << 5) - 4;
2339 Y = (s->mb_height << 5) - 4;
2340 if (qx + px < -28) px = -28 - qx;
2341 if (qy + py < -28) py = -28 - qy;
2342 if (qx + px > X) px = X - qx;
2343 if (qy + py > Y) py = Y - qy;
2345 qx = (s->mb_x << 6);
2346 qy = (s->mb_y << 6);
2347 X = (s->mb_width << 6) - 4;
2348 Y = (s->mb_height << 6) - 4;
2349 if (qx + px < -60) px = -60 - qx;
2350 if (qy + py < -60) py = -60 - qy;
2351 if (qx + px > X) px = X - qx;
2352 if (qy + py > Y) py = Y - qy;
2355 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2356 if (0 && !s->first_slice_line && s->mb_x) {
2357 if (is_intra[xy - wrap])
2358 sum = FFABS(px) + FFABS(py);
2360 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2362 if (get_bits1(&s->gb)) {
2370 if (is_intra[xy - 2])
2371 sum = FFABS(px) + FFABS(py);
2373 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2375 if (get_bits1(&s->gb)) {
2385 /* store MV using signed modulus of MV range defined in 4.11 */
2387 s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
2388 s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
2390 s->current_picture.f.motion_val[0][xy][0] = s->mv[0][0][0];
2391 s->current_picture.f.motion_val[0][xy][1] = s->mv[0][0][1];
2392 s->current_picture.f.motion_val[1][xy][0] = s->mv[1][0][0];
2393 s->current_picture.f.motion_val[1][xy][1] = s->mv[1][0][1];
2396 static inline void vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dmv_y, int mv1, int *pred_flag)
2398 int dir = (v->bmvtype == BMV_TYPE_BACKWARD) ? 1 : 0;
2399 MpegEncContext *s = &v->s;
2400 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2402 if (v->bmvtype == BMV_TYPE_DIRECT) {
2403 int total_opp, k, f;
2404 if (s->next_picture.f.mb_type[mb_pos + v->mb_off] != MB_TYPE_INTRA) {
2405 s->mv[0][0][0] = scale_mv_intfi(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0],
2406 v->bfraction, 0, s->quarter_sample, v->qs_last);
2407 s->mv[0][0][1] = scale_mv_intfi(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1],
2408 v->bfraction, 0, s->quarter_sample, v->qs_last);
2409 s->mv[1][0][0] = scale_mv_intfi(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0],
2410 v->bfraction, 1, s->quarter_sample, v->qs_last);
2411 s->mv[1][0][1] = scale_mv_intfi(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1],
2412 v->bfraction, 1, s->quarter_sample, v->qs_last);
2414 total_opp = v->mv_f_next[0][s->block_index[0] + v->blocks_off]
2415 + v->mv_f_next[0][s->block_index[1] + v->blocks_off]
2416 + v->mv_f_next[0][s->block_index[2] + v->blocks_off]
2417 + v->mv_f_next[0][s->block_index[3] + v->blocks_off];
2418 f = (total_opp > 2) ? 1 : 0;
2420 s->mv[0][0][0] = s->mv[0][0][1] = 0;
2421 s->mv[1][0][0] = s->mv[1][0][1] = 0;
2424 v->ref_field_type[0] = v->ref_field_type[1] = v->cur_field_type ^ f;
2425 for (k = 0; k < 4; k++) {
2426 s->current_picture.f.motion_val[0][s->block_index[k] + v->blocks_off][0] = s->mv[0][0][0];
2427 s->current_picture.f.motion_val[0][s->block_index[k] + v->blocks_off][1] = s->mv[0][0][1];
2428 s->current_picture.f.motion_val[1][s->block_index[k] + v->blocks_off][0] = s->mv[1][0][0];
2429 s->current_picture.f.motion_val[1][s->block_index[k] + v->blocks_off][1] = s->mv[1][0][1];
2430 v->mv_f[0][s->block_index[k] + v->blocks_off] = f;
2431 v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
2435 if (v->bmvtype == BMV_TYPE_INTERPOLATED) {
2436 vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2437 vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2440 if (dir) { // backward
2441 vc1_pred_mv(v, n, dmv_x[1], dmv_y[1], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2442 if (n == 3 || mv1) {
2443 vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
2446 vc1_pred_mv(v, n, dmv_x[0], dmv_y[0], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2447 if (n == 3 || mv1) {
2448 vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], 0, 1);
2453 /** Get predicted DC value for I-frames only
2454 * prediction dir: left=0, top=1
2455 * @param s MpegEncContext
2456 * @param overlap flag indicating that overlap filtering is used
2457 * @param pq integer part of picture quantizer
2458 * @param[in] n block index in the current MB
2459 * @param dc_val_ptr Pointer to DC predictor
2460 * @param dir_ptr Prediction direction for use in AC prediction
2462 static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2463 int16_t **dc_val_ptr, int *dir_ptr)
2465 int a, b, c, wrap, pred, scale;
2467 static const uint16_t dcpred[32] = {
2468 -1, 1024, 512, 341, 256, 205, 171, 146, 128,
2469 114, 102, 93, 85, 79, 73, 68, 64,
2470 60, 57, 54, 51, 49, 47, 45, 43,
2471 41, 39, 38, 37, 35, 34, 33
2474 /* find prediction - wmv3_dc_scale always used here in fact */
2475 if (n < 4) scale = s->y_dc_scale;
2476 else scale = s->c_dc_scale;
2478 wrap = s->block_wrap[n];
2479 dc_val = s->dc_val[0] + s->block_index[n];
2485 b = dc_val[ - 1 - wrap];
2486 a = dc_val[ - wrap];
2488 if (pq < 9 || !overlap) {
2489 /* Set outer values */
2490 if (s->first_slice_line && (n != 2 && n != 3))
2491 b = a = dcpred[scale];
2492 if (s->mb_x == 0 && (n != 1 && n != 3))
2493 b = c = dcpred[scale];
2495 /* Set outer values */
2496 if (s->first_slice_line && (n != 2 && n != 3))
2498 if (s->mb_x == 0 && (n != 1 && n != 3))
2502 if (abs(a - b) <= abs(b - c)) {
2504 *dir_ptr = 1; // left
2507 *dir_ptr = 0; // top
2510 /* update predictor */
2511 *dc_val_ptr = &dc_val[0];
2516 /** Get predicted DC value
2517 * prediction dir: left=0, top=1
2518 * @param s MpegEncContext
2519 * @param overlap flag indicating that overlap filtering is used
2520 * @param pq integer part of picture quantizer
2521 * @param[in] n block index in the current MB
2522 * @param a_avail flag indicating top block availability
2523 * @param c_avail flag indicating left block availability
2524 * @param dc_val_ptr Pointer to DC predictor
2525 * @param dir_ptr Prediction direction for use in AC prediction
2527 static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2528 int a_avail, int c_avail,
2529 int16_t **dc_val_ptr, int *dir_ptr)
2531 int a, b, c, wrap, pred;
2533 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2536 wrap = s->block_wrap[n];
2537 dc_val = s->dc_val[0] + s->block_index[n];
2543 b = dc_val[ - 1 - wrap];
2544 a = dc_val[ - wrap];
2545 /* scale predictors if needed */
2546 q1 = s->current_picture.f.qscale_table[mb_pos];
2547 if (c_avail && (n != 1 && n != 3)) {
2548 q2 = s->current_picture.f.qscale_table[mb_pos - 1];
2550 c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2552 if (a_avail && (n != 2 && n != 3)) {
2553 q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
2555 a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2557 if (a_avail && c_avail && (n != 3)) {
2562 off -= s->mb_stride;
2563 q2 = s->current_picture.f.qscale_table[off];
2565 b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2568 if (a_avail && c_avail) {
2569 if (abs(a - b) <= abs(b - c)) {
2571 *dir_ptr = 1; // left
2574 *dir_ptr = 0; // top
2576 } else if (a_avail) {
2578 *dir_ptr = 0; // top
2579 } else if (c_avail) {
2581 *dir_ptr = 1; // left
2584 *dir_ptr = 1; // left
2587 /* update predictor */
2588 *dc_val_ptr = &dc_val[0];
2592 /** @} */ // Block group
2595 * @name VC1 Macroblock-level functions in Simple/Main Profiles
2596 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
2600 static inline int vc1_coded_block_pred(MpegEncContext * s, int n,
2601 uint8_t **coded_block_ptr)
2603 int xy, wrap, pred, a, b, c;
2605 xy = s->block_index[n];
2606 wrap = s->b8_stride;
2611 a = s->coded_block[xy - 1 ];
2612 b = s->coded_block[xy - 1 - wrap];
2613 c = s->coded_block[xy - wrap];
2622 *coded_block_ptr = &s->coded_block[xy];
2628 * Decode one AC coefficient
2629 * @param v The VC1 context
2630 * @param last Last coefficient
2631 * @param skip How much zero coefficients to skip
2632 * @param value Decoded AC coefficient value
2633 * @param codingset set of VLC to decode data
2636 static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip,
2637 int *value, int codingset)
2639 GetBitContext *gb = &v->s.gb;
2640 int index, escape, run = 0, level = 0, lst = 0;
2642 index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2643 if (index != vc1_ac_sizes[codingset] - 1) {
2644 run = vc1_index_decode_table[codingset][index][0];
2645 level = vc1_index_decode_table[codingset][index][1];
2646 lst = index >= vc1_last_decode_table[codingset] || get_bits_left(gb) < 0;
2650 escape = decode210(gb);
2652 index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2653 run = vc1_index_decode_table[codingset][index][0];
2654 level = vc1_index_decode_table[codingset][index][1];
2655 lst = index >= vc1_last_decode_table[codingset];
2658 level += vc1_last_delta_level_table[codingset][run];
2660 level += vc1_delta_level_table[codingset][run];
2663 run += vc1_last_delta_run_table[codingset][level] + 1;
2665 run += vc1_delta_run_table[codingset][level] + 1;
2671 lst = get_bits1(gb);
2672 if (v->s.esc3_level_length == 0) {
2673 if (v->pq < 8 || v->dquantfrm) { // table 59
2674 v->s.esc3_level_length = get_bits(gb, 3);
2675 if (!v->s.esc3_level_length)
2676 v->s.esc3_level_length = get_bits(gb, 2) + 8;
2677 } else { // table 60
2678 v->s.esc3_level_length = get_unary(gb, 1, 6) + 2;
2680 v->s.esc3_run_length = 3 + get_bits(gb, 2);
2682 run = get_bits(gb, v->s.esc3_run_length);
2683 sign = get_bits1(gb);
2684 level = get_bits(gb, v->s.esc3_level_length);
2695 /** Decode intra block in intra frames - should be faster than decode_intra_block
2696 * @param v VC1Context
2697 * @param block block to decode
2698 * @param[in] n subblock index
2699 * @param coded are AC coeffs present or not
2700 * @param codingset set of VLC to decode data
2702 static int vc1_decode_i_block(VC1Context *v, DCTELEM block[64], int n,
2703 int coded, int codingset)
2705 GetBitContext *gb = &v->s.gb;
2706 MpegEncContext *s = &v->s;
2707 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2710 int16_t *ac_val, *ac_val2;
2713 /* Get DC differential */
2715 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2717 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2720 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2724 if (dcdiff == 119 /* ESC index value */) {
2725 /* TODO: Optimize */
2726 if (v->pq == 1) dcdiff = get_bits(gb, 10);
2727 else if (v->pq == 2) dcdiff = get_bits(gb, 9);
2728 else dcdiff = get_bits(gb, 8);
2731 dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2732 else if (v->pq == 2)
2733 dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2740 dcdiff += vc1_i_pred_dc(&v->s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir);
2743 /* Store the quantized DC coeff, used for prediction */
2745 block[0] = dcdiff * s->y_dc_scale;
2747 block[0] = dcdiff * s->c_dc_scale;
2758 int last = 0, skip, value;
2759 const uint8_t *zz_table;
2763 scale = v->pq * 2 + v->halfpq;
2767 zz_table = v->zz_8x8[2];
2769 zz_table = v->zz_8x8[3];
2771 zz_table = v->zz_8x8[1];
2773 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2775 if (dc_pred_dir) // left
2778 ac_val -= 16 * s->block_wrap[n];
2781 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2785 block[zz_table[i++]] = value;
2788 /* apply AC prediction if needed */
2790 if (dc_pred_dir) { // left
2791 for (k = 1; k < 8; k++)
2792 block[k << v->left_blk_sh] += ac_val[k];
2794 for (k = 1; k < 8; k++)
2795 block[k << v->top_blk_sh] += ac_val[k + 8];
2798 /* save AC coeffs for further prediction */
2799 for (k = 1; k < 8; k++) {
2800 ac_val2[k] = block[k << v->left_blk_sh];
2801 ac_val2[k + 8] = block[k << v->top_blk_sh];
2804 /* scale AC coeffs */
2805 for (k = 1; k < 64; k++)
2809 block[k] += (block[k] < 0) ? -v->pq : v->pq;
2812 if (s->ac_pred) i = 63;
2818 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2822 scale = v->pq * 2 + v->halfpq;
2823 memset(ac_val2, 0, 16 * 2);
2824 if (dc_pred_dir) { // left
2827 memcpy(ac_val2, ac_val, 8 * 2);
2829 ac_val -= 16 * s->block_wrap[n];
2831 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2834 /* apply AC prediction if needed */
2836 if (dc_pred_dir) { //left
2837 for (k = 1; k < 8; k++) {
2838 block[k << v->left_blk_sh] = ac_val[k] * scale;
2839 if (!v->pquantizer && block[k << v->left_blk_sh])
2840 block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -v->pq : v->pq;
2843 for (k = 1; k < 8; k++) {
2844 block[k << v->top_blk_sh] = ac_val[k + 8] * scale;
2845 if (!v->pquantizer && block[k << v->top_blk_sh])
2846 block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -v->pq : v->pq;
2852 s->block_last_index[n] = i;
2857 /** Decode intra block in intra frames - should be faster than decode_intra_block
2858 * @param v VC1Context
2859 * @param block block to decode
2860 * @param[in] n subblock number
2861 * @param coded are AC coeffs present or not
2862 * @param codingset set of VLC to decode data
2863 * @param mquant quantizer value for this macroblock
2865 static int vc1_decode_i_block_adv(VC1Context *v, DCTELEM block[64], int n,
2866 int coded, int codingset, int mquant)
2868 GetBitContext *gb = &v->s.gb;
2869 MpegEncContext *s = &v->s;
2870 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2873 int16_t *ac_val, *ac_val2;
2875 int a_avail = v->a_avail, c_avail = v->c_avail;
2876 int use_pred = s->ac_pred;
2879 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2881 /* Get DC differential */
2883 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2885 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2888 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2892 if (dcdiff == 119 /* ESC index value */) {
2893 /* TODO: Optimize */
2894 if (mquant == 1) dcdiff = get_bits(gb, 10);
2895 else if (mquant == 2) dcdiff = get_bits(gb, 9);
2896 else dcdiff = get_bits(gb, 8);
2899 dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2900 else if (mquant == 2)
2901 dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2908 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir);
2911 /* Store the quantized DC coeff, used for prediction */
2913 block[0] = dcdiff * s->y_dc_scale;
2915 block[0] = dcdiff * s->c_dc_scale;
2921 /* check if AC is needed at all */
2922 if (!a_avail && !c_avail)
2924 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2927 scale = mquant * 2 + ((mquant == v->pq) ? v->halfpq : 0);
2929 if (dc_pred_dir) // left
2932 ac_val -= 16 * s->block_wrap[n];
2934 q1 = s->current_picture.f.qscale_table[mb_pos];
2935 if ( dc_pred_dir && c_avail && mb_pos)
2936 q2 = s->current_picture.f.qscale_table[mb_pos - 1];
2937 if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
2938 q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
2939 if ( dc_pred_dir && n == 1)
2941 if (!dc_pred_dir && n == 2)
2947 int last = 0, skip, value;
2948 const uint8_t *zz_table;
2952 if (!use_pred && v->fcm == 1) {
2953 zz_table = v->zzi_8x8;
2955 if (!dc_pred_dir) // top
2956 zz_table = v->zz_8x8[2];
2958 zz_table = v->zz_8x8[3];
2962 zz_table = v->zz_8x8[1];
2964 zz_table = v->zzi_8x8;
2968 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2972 block[zz_table[i++]] = value;
2975 /* apply AC prediction if needed */
2977 /* scale predictors if needed*/
2978 if (q2 && q1 != q2) {
2979 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2980 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2982 if (dc_pred_dir) { // left
2983 for (k = 1; k < 8; k++)
2984 block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2986 for (k = 1; k < 8; k++)
2987 block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2990 if (dc_pred_dir) { //left
2991 for (k = 1; k < 8; k++)
2992 block[k << v->left_blk_sh] += ac_val[k];
2994 for (k = 1; k < 8; k++)
2995 block[k << v->top_blk_sh] += ac_val[k + 8];
2999 /* save AC coeffs for further prediction */
3000 for (k = 1; k < 8; k++) {
3001 ac_val2[k ] = block[k << v->left_blk_sh];
3002 ac_val2[k + 8] = block[k << v->top_blk_sh];
3005 /* scale AC coeffs */
3006 for (k = 1; k < 64; k++)
3010 block[k] += (block[k] < 0) ? -mquant : mquant;
3013 if (use_pred) i = 63;
3014 } else { // no AC coeffs
3017 memset(ac_val2, 0, 16 * 2);
3018 if (dc_pred_dir) { // left
3020 memcpy(ac_val2, ac_val, 8 * 2);
3021 if (q2 && q1 != q2) {
3022 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3023 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3024 for (k = 1; k < 8; k++)
3025 ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3030 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
3031 if (q2 && q1 != q2) {
3032 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3033 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3034 for (k = 1; k < 8; k++)
3035 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3040 /* apply AC prediction if needed */
3042 if (dc_pred_dir) { // left
3043 for (k = 1; k < 8; k++) {
3044 block[k << v->left_blk_sh] = ac_val2[k] * scale;
3045 if (!v->pquantizer && block[k << v->left_blk_sh])
3046 block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
3049 for (k = 1; k < 8; k++) {
3050 block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
3051 if (!v->pquantizer && block[k << v->top_blk_sh])
3052 block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
3058 s->block_last_index[n] = i;
3063 /** Decode intra block in inter frames - more generic version than vc1_decode_i_block
3064 * @param v VC1Context
3065 * @param block block to decode
3066 * @param[in] n subblock index
3067 * @param coded are AC coeffs present or not
3068 * @param mquant block quantizer
3069 * @param codingset set of VLC to decode data
3071 static int vc1_decode_intra_block(VC1Context *v, DCTELEM block[64], int n,
3072 int coded, int mquant, int codingset)
3074 GetBitContext *gb = &v->s.gb;
3075 MpegEncContext *s = &v->s;
3076 int dc_pred_dir = 0; /* Direction of the DC prediction used */
3079 int16_t *ac_val, *ac_val2;
3081 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3082 int a_avail = v->a_avail, c_avail = v->c_avail;
3083 int use_pred = s->ac_pred;
3087 s->dsp.clear_block(block);
3089 /* XXX: Guard against dumb values of mquant */
3090 mquant = (mquant < 1) ? 0 : ((mquant > 31) ? 31 : mquant);
3092 /* Set DC scale - y and c use the same */
3093 s->y_dc_scale = s->y_dc_scale_table[mquant];
3094 s->c_dc_scale = s->c_dc_scale_table[mquant];
3096 /* Get DC differential */
3098 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
3100 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
3103 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
3107 if (dcdiff == 119 /* ESC index value */) {
3108 /* TODO: Optimize */
3109 if (mquant == 1) dcdiff = get_bits(gb, 10);
3110 else if (mquant == 2) dcdiff = get_bits(gb, 9);
3111 else dcdiff = get_bits(gb, 8);
3114 dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
3115 else if (mquant == 2)
3116 dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
3123 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir);
3126 /* Store the quantized DC coeff, used for prediction */
3129 block[0] = dcdiff * s->y_dc_scale;
3131 block[0] = dcdiff * s->c_dc_scale;
3137 /* check if AC is needed at all and adjust direction if needed */
3138 if (!a_avail) dc_pred_dir = 1;
3139 if (!c_avail) dc_pred_dir = 0;
3140 if (!a_avail && !c_avail) use_pred = 0;
3141 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
3144 scale = mquant * 2 + v->halfpq;
3146 if (dc_pred_dir) //left
3149 ac_val -= 16 * s->block_wrap[n];
3151 q1 = s->current_picture.f.qscale_table[mb_pos];
3152 if (dc_pred_dir && c_avail && mb_pos)
3153 q2 = s->current_picture.f.qscale_table[mb_pos - 1];
3154 if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
3155 q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
3156 if ( dc_pred_dir && n == 1)
3158 if (!dc_pred_dir && n == 2)
3160 if (n == 3) q2 = q1;
3163 int last = 0, skip, value;
3167 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
3172 block[v->zz_8x8[0][i++]] = value;
3174 if (use_pred && (v->fcm == 1)) {
3175 if (!dc_pred_dir) // top
3176 block[v->zz_8x8[2][i++]] = value;
3178 block[v->zz_8x8[3][i++]] = value;
3180 block[v->zzi_8x8[i++]] = value;
3185 /* apply AC prediction if needed */
3187 /* scale predictors if needed*/
3188 if (q2 && q1 != q2) {
3189 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3190 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3192 if (dc_pred_dir) { // left
3193 for (k = 1; k < 8; k++)
3194 block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3196 for (k = 1; k < 8; k++)
3197 block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3200 if (dc_pred_dir) { // left
3201 for (k = 1; k < 8; k++)
3202 block[k << v->left_blk_sh] += ac_val[k];
3204 for (k = 1; k < 8; k++)
3205 block[k << v->top_blk_sh] += ac_val[k + 8];
3209 /* save AC coeffs for further prediction */
3210 for (k = 1; k < 8; k++) {
3211 ac_val2[k ] = block[k << v->left_blk_sh];
3212 ac_val2[k + 8] = block[k << v->top_blk_sh];
3215 /* scale AC coeffs */
3216 for (k = 1; k < 64; k++)
3220 block[k] += (block[k] < 0) ? -mquant : mquant;
3223 if (use_pred) i = 63;
3224 } else { // no AC coeffs
3227 memset(ac_val2, 0, 16 * 2);
3228 if (dc_pred_dir) { // left
3230 memcpy(ac_val2, ac_val, 8 * 2);
3231 if (q2 && q1 != q2) {
3232 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3233 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3234 for (k = 1; k < 8; k++)
3235 ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3240 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
3241 if (q2 && q1 != q2) {
3242 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3243 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3244 for (k = 1; k < 8; k++)
3245 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3250 /* apply AC prediction if needed */
3252 if (dc_pred_dir) { // left
3253 for (k = 1; k < 8; k++) {
3254 block[k << v->left_blk_sh] = ac_val2[k] * scale;
3255 if (!v->pquantizer && block[k << v->left_blk_sh])
3256 block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
3259 for (k = 1; k < 8; k++) {
3260 block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
3261 if (!v->pquantizer && block[k << v->top_blk_sh])
3262 block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
3268 s->block_last_index[n] = i;
3275 static int vc1_decode_p_block(VC1Context *v, DCTELEM block[64], int n,
3276 int mquant, int ttmb, int first_block,
3277 uint8_t *dst, int linesize, int skip_block,
3280 MpegEncContext *s = &v->s;
3281 GetBitContext *gb = &s->gb;
3284 int scale, off, idx, last, skip, value;
3285 int ttblk = ttmb & 7;
3288 s->dsp.clear_block(block);
3291 ttblk = ff_vc1_ttblk_to_tt[v->tt_index][get_vlc2(gb, ff_vc1_ttblk_vlc[v->tt_index].table, VC1_TTBLK_VLC_BITS, 1)];
3293 if (ttblk == TT_4X4) {
3294 subblkpat = ~(get_vlc2(gb, ff_vc1_subblkpat_vlc[v->tt_index].table, VC1_SUBBLKPAT_VLC_BITS, 1) + 1);
3296 if ((ttblk != TT_8X8 && ttblk != TT_4X4)
3297 && ((v->ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))
3298 || (!v->res_rtm_flag && !first_block))) {
3299 subblkpat = decode012(gb);
3301 subblkpat ^= 3; // swap decoded pattern bits
3302 if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM)
3304 if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT)
3307 scale = 2 * mquant + ((v->pq == mquant) ? v->halfpq : 0);
3309 // convert transforms like 8X4_TOP to generic TT and SUBBLKPAT
3310 if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) {
3311 subblkpat = 2 - (ttblk == TT_8X4_TOP);
3314 if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) {
3315 subblkpat = 2 - (ttblk == TT_4X8_LEFT);
3324 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3329 idx = v->zz_8x8[0][i++];
3331 idx = v->zzi_8x8[i++];
3332 block[idx] = value * scale;
3334 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3338 v->vc1dsp.vc1_inv_trans_8x8_dc(dst, linesize, block);
3340 v->vc1dsp.vc1_inv_trans_8x8(block);
3341 s->dsp.add_pixels_clamped(block, dst, linesize);
3346 pat = ~subblkpat & 0xF;
3347 for (j = 0; j < 4; j++) {
3348 last = subblkpat & (1 << (3 - j));
3350 off = (j & 1) * 4 + (j & 2) * 16;
3352 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3357 idx = ff_vc1_simple_progressive_4x4_zz[i++];
3359 idx = ff_vc1_adv_interlaced_4x4_zz[i++];
3360 block[idx + off] = value * scale;
3362 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3364 if (!(subblkpat & (1 << (3 - j))) && !skip_block) {
3366 v->vc1dsp.vc1_inv_trans_4x4_dc(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3368 v->vc1dsp.vc1_inv_trans_4x4(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3373 pat = ~((subblkpat & 2) * 6 + (subblkpat & 1) * 3) & 0xF;
3374 for (j = 0; j < 2; j++) {
3375 last = subblkpat & (1 << (1 - j));
3379 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3384 idx = v->zz_8x4[i++] + off;
3386 idx = ff_vc1_adv_interlaced_8x4_zz[i++] + off;
3387 block[idx] = value * scale;
3389 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3391 if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3393 v->vc1dsp.vc1_inv_trans_8x4_dc(dst + j * 4 * linesize, linesize, block + off);
3395 v->vc1dsp.vc1_inv_trans_8x4(dst + j * 4 * linesize, linesize, block + off);
3400 pat = ~(subblkpat * 5) & 0xF;
3401 for (j = 0; j < 2; j++) {
3402 last = subblkpat & (1 << (1 - j));
3406 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3411 idx = v->zz_4x8[i++] + off;
3413 idx = ff_vc1_adv_interlaced_4x8_zz[i++] + off;
3414 block[idx] = value * scale;
3416 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3418 if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3420 v->vc1dsp.vc1_inv_trans_4x8_dc(dst + j * 4, linesize, block + off);
3422 v->vc1dsp.vc1_inv_trans_4x8(dst + j*4, linesize, block + off);
3428 *ttmb_out |= ttblk << (n * 4);
3432 /** @} */ // Macroblock group
3434 static const int size_table [6] = { 0, 2, 3, 4, 5, 8 };
3435 static const int offset_table[6] = { 0, 1, 3, 7, 15, 31 };
3437 static av_always_inline void vc1_apply_p_v_loop_filter(VC1Context *v, int block_num)
3439 MpegEncContext *s = &v->s;
3440 int mb_cbp = v->cbp[s->mb_x - s->mb_stride],
3441 block_cbp = mb_cbp >> (block_num * 4), bottom_cbp,
3442 mb_is_intra = v->is_intra[s->mb_x - s->mb_stride],
3443 block_is_intra = mb_is_intra >> (block_num * 4), bottom_is_intra;
3444 int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3447 if (block_num > 3) {
3448 dst = s->dest[block_num - 3];
3450 dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 8) * linesize;
3452 if (s->mb_y != s->end_mb_y || block_num < 2) {
3456 if (block_num > 3) {
3457 bottom_cbp = v->cbp[s->mb_x] >> (block_num * 4);
3458 bottom_is_intra = v->is_intra[s->mb_x] >> (block_num * 4);
3459 mv = &v->luma_mv[s->mb_x - s->mb_stride];
3460 mv_stride = s->mb_stride;
3462 bottom_cbp = (block_num < 2) ? (mb_cbp >> ((block_num + 2) * 4))
3463 : (v->cbp[s->mb_x] >> ((block_num - 2) * 4));
3464 bottom_is_intra = (block_num < 2) ? (mb_is_intra >> ((block_num + 2) * 4))
3465 : (v->is_intra[s->mb_x] >> ((block_num - 2) * 4));
3466 mv_stride = s->b8_stride;
3467 mv = &s->current_picture.f.motion_val[0][s->block_index[block_num] - 2 * mv_stride];
3470 if (bottom_is_intra & 1 || block_is_intra & 1 ||
3471 mv[0][0] != mv[mv_stride][0] || mv[0][1] != mv[mv_stride][1]) {
3472 v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3474 idx = ((bottom_cbp >> 2) | block_cbp) & 3;
3476 v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3479 v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3481 v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3486 dst -= 4 * linesize;
3487 ttblk = (v->ttblk[s->mb_x - s->mb_stride] >> (block_num * 4)) & 0xF;
3488 if (ttblk == TT_4X4 || ttblk == TT_8X4) {
3489 idx = (block_cbp | (block_cbp >> 2)) & 3;
3491 v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3494 v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3496 v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3501 static av_always_inline void vc1_apply_p_h_loop_filter(VC1Context *v, int block_num)
3503 MpegEncContext *s = &v->s;
3504 int mb_cbp = v->cbp[s->mb_x - 1 - s->mb_stride],
3505 block_cbp = mb_cbp >> (block_num * 4), right_cbp,
3506 mb_is_intra = v->is_intra[s->mb_x - 1 - s->mb_stride],
3507 block_is_intra = mb_is_intra >> (block_num * 4), right_is_intra;
3508 int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3511 if (block_num > 3) {
3512 dst = s->dest[block_num - 3] - 8 * linesize;
3514 dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 16) * linesize - 8;
3517 if (s->mb_x != s->mb_width || !(block_num & 5)) {
3520 if (block_num > 3) {
3521 right_cbp = v->cbp[s->mb_x - s->mb_stride] >> (block_num * 4);
3522 right_is_intra = v->is_intra[s->mb_x - s->mb_stride] >> (block_num * 4);
3523 mv = &v->luma_mv[s->mb_x - s->mb_stride - 1];
3525 right_cbp = (block_num & 1) ? (v->cbp[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3526 : (mb_cbp >> ((block_num + 1) * 4));
3527 right_is_intra = (block_num & 1) ? (v->is_intra[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3528 : (mb_is_intra >> ((block_num + 1) * 4));
3529 mv = &s->current_picture.f.motion_val[0][s->block_index[block_num] - s->b8_stride * 2 - 2];
3531 if (block_is_intra & 1 || right_is_intra & 1 || mv[0][0] != mv[1][0] || mv[0][1] != mv[1][1]) {
3532 v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3534 idx = ((right_cbp >> 1) | block_cbp) & 5; // FIXME check
3536 v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3539 v->vc1dsp.vc1_h_loop_filter4(dst + 4 * linesize, linesize, v->pq);
3541 v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3547 ttblk = (v->ttblk[s->mb_x - s->mb_stride - 1] >> (block_num * 4)) & 0xf;
3548 if (ttblk == TT_4X4 || ttblk == TT_4X8) {
3549 idx = (block_cbp | (block_cbp >> 1)) & 5;
3551 v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3554 v->vc1dsp.vc1_h_loop_filter4(dst + linesize * 4, linesize, v->pq);
3556 v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3561 static void vc1_apply_p_loop_filter(VC1Context *v)
3563 MpegEncContext *s = &v->s;
3566 for (i = 0; i < 6; i++) {
3567 vc1_apply_p_v_loop_filter(v, i);
3570 /* V always preceedes H, therefore we run H one MB before V;
3571 * at the end of a row, we catch up to complete the row */
3573 for (i = 0; i < 6; i++) {
3574 vc1_apply_p_h_loop_filter(v, i);
3576 if (s->mb_x == s->mb_width - 1) {
3578 ff_update_block_index(s);
3579 for (i = 0; i < 6; i++) {
3580 vc1_apply_p_h_loop_filter(v, i);
3586 /** Decode one P-frame MB
3588 static int vc1_decode_p_mb(VC1Context *v)
3590 MpegEncContext *s = &v->s;
3591 GetBitContext *gb = &s->gb;
3593 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3594 int cbp; /* cbp decoding stuff */
3595 int mqdiff, mquant; /* MB quantization */
3596 int ttmb = v->ttfrm; /* MB Transform type */
3598 int mb_has_coeffs = 1; /* last_flag */
3599 int dmv_x, dmv_y; /* Differential MV components */
3600 int index, index1; /* LUT indexes */
3601 int val, sign; /* temp values */
3602 int first_block = 1;
3604 int skipped, fourmv;
3605 int block_cbp = 0, pat, block_tt = 0, block_intra = 0;
3607 mquant = v->pq; /* Loosy initialization */
3609 if (v->mv_type_is_raw)
3610 fourmv = get_bits1(gb);
3612 fourmv = v->mv_type_mb_plane[mb_pos];
3614 skipped = get_bits1(gb);
3616 skipped = v->s.mbskip_table[mb_pos];
3618 if (!fourmv) { /* 1MV mode */
3620 GET_MVDATA(dmv_x, dmv_y);
3623 s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
3624 s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
3626 s->current_picture.f.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;
3627 vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3629 /* FIXME Set DC val for inter block ? */
3630 if (s->mb_intra && !mb_has_coeffs) {
3632 s->ac_pred = get_bits1(gb);
3634 } else if (mb_has_coeffs) {
3636 s->ac_pred = get_bits1(gb);
3637 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3643 s->current_picture.f.qscale_table[mb_pos] = mquant;
3645 if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
3646 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table,
3647 VC1_TTMB_VLC_BITS, 2);
3648 if (!s->mb_intra) vc1_mc_1mv(v, 0);
3650 for (i = 0; i < 6; i++) {
3651 s->dc_val[0][s->block_index[i]] = 0;
3653 val = ((cbp >> (5 - i)) & 1);
3654 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3655 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3657 /* check if prediction blocks A and C are available */
3658 v->a_avail = v->c_avail = 0;
3659 if (i == 2 || i == 3 || !s->first_slice_line)
3660 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3661 if (i == 1 || i == 3 || s->mb_x)
3662 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3664 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3665 (i & 4) ? v->codingset2 : v->codingset);
3666 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3668 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3670 for (j = 0; j < 64; j++)
3671 s->block[i][j] <<= 1;
3672 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3673 if (v->pq >= 9 && v->overlap) {
3675 v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3677 v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3679 block_cbp |= 0xF << (i << 2);
3680 block_intra |= 1 << i;
3682 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block,
3683 s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize,
3684 (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
3685 block_cbp |= pat << (i << 2);
3686 if (!v->ttmbf && ttmb < 8)
3693 for (i = 0; i < 6; i++) {
3694 v->mb_type[0][s->block_index[i]] = 0;
3695 s->dc_val[0][s->block_index[i]] = 0;
3697 s->current_picture.f.mb_type[mb_pos] = MB_TYPE_SKIP;
3698 s->current_picture.f.qscale_table[mb_pos] = 0;
3699 vc1_pred_mv(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3702 } else { // 4MV mode
3703 if (!skipped /* unskipped MB */) {
3704 int intra_count = 0, coded_inter = 0;
3705 int is_intra[6], is_coded[6];
3707 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3708 for (i = 0; i < 6; i++) {
3709 val = ((cbp >> (5 - i)) & 1);
3710 s->dc_val[0][s->block_index[i]] = 0;
3717 GET_MVDATA(dmv_x, dmv_y);
3719 vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3721 vc1_mc_4mv_luma(v, i, 0);
3722 intra_count += s->mb_intra;
3723 is_intra[i] = s->mb_intra;
3724 is_coded[i] = mb_has_coeffs;
3727 is_intra[i] = (intra_count >= 3);
3731 vc1_mc_4mv_chroma(v, 0);
3732 v->mb_type[0][s->block_index[i]] = is_intra[i];
3734 coded_inter = !is_intra[i] & is_coded[i];
3736 // if there are no coded blocks then don't do anything more
3738 if (!intra_count && !coded_inter)
3741 s->current_picture.f.qscale_table[mb_pos] = mquant;
3742 /* test if block is intra and has pred */
3745 for (i = 0; i < 6; i++)
3747 if (((!s->first_slice_line || (i == 2 || i == 3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]])
3748 || ((s->mb_x || (i == 1 || i == 3)) && v->mb_type[0][s->block_index[i] - 1])) {
3754 s->ac_pred = get_bits1(gb);
3758 if (!v->ttmbf && coded_inter)
3759 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3760 for (i = 0; i < 6; i++) {
3762 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3763 s->mb_intra = is_intra[i];
3765 /* check if prediction blocks A and C are available */
3766 v->a_avail = v->c_avail = 0;
3767 if (i == 2 || i == 3 || !s->first_slice_line)
3768 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3769 if (i == 1 || i == 3 || s->mb_x)
3770 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3772 vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant,
3773 (i & 4) ? v->codingset2 : v->codingset);
3774 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3776 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3778 for (j = 0; j < 64; j++)
3779 s->block[i][j] <<= 1;
3780 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off,
3781 (i & 4) ? s->uvlinesize : s->linesize);
3782 if (v->pq >= 9 && v->overlap) {
3784 v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3786 v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3788 block_cbp |= 0xF << (i << 2);
3789 block_intra |= 1 << i;
3790 } else if (is_coded[i]) {
3791 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3792 first_block, s->dest[dst_idx] + off,
3793 (i & 4) ? s->uvlinesize : s->linesize,
3794 (i & 4) && (s->flags & CODEC_FLAG_GRAY),
3796 block_cbp |= pat << (i << 2);
3797 if (!v->ttmbf && ttmb < 8)
3802 } else { // skipped MB
3804 s->current_picture.f.qscale_table[mb_pos] = 0;
3805 for (i = 0; i < 6; i++) {
3806 v->mb_type[0][s->block_index[i]] = 0;
3807 s->dc_val[0][s->block_index[i]] = 0;
3809 for (i = 0; i < 4; i++) {
3810 vc1_pred_mv(v, i, 0, 0, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3811 vc1_mc_4mv_luma(v, i, 0);
3813 vc1_mc_4mv_chroma(v, 0);
3814 s->current_picture.f.qscale_table[mb_pos] = 0;
3818 v->cbp[s->mb_x] = block_cbp;
3819 v->ttblk[s->mb_x] = block_tt;
3820 v->is_intra[s->mb_x] = block_intra;
3825 /* Decode one macroblock in an interlaced frame p picture */
3827 static int vc1_decode_p_mb_intfr(VC1Context *v)
3829 MpegEncContext *s = &v->s;
3830 GetBitContext *gb = &s->gb;
3832 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3833 int cbp = 0; /* cbp decoding stuff */
3834 int mqdiff, mquant; /* MB quantization */
3835 int ttmb = v->ttfrm; /* MB Transform type */
3837 int mb_has_coeffs = 1; /* last_flag */
3838 int dmv_x, dmv_y; /* Differential MV components */
3839 int val; /* temp value */
3840 int first_block = 1;
3842 int skipped, fourmv = 0, twomv = 0;
3843 int block_cbp = 0, pat, block_tt = 0;
3844 int idx_mbmode = 0, mvbp;
3845 int stride_y, fieldtx;
3847 mquant = v->pq; /* Loosy initialization */
3850 skipped = get_bits1(gb);
3852 skipped = v->s.mbskip_table[mb_pos];
3854 if (v->fourmvswitch)
3855 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_4MV_MBMODE_VLC_BITS, 2); // try getting this done
3857 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2); // in a single line
3858 switch (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0]) {
3859 /* store the motion vector type in a flag (useful later) */
3860 case MV_PMODE_INTFR_4MV:
3862 v->blk_mv_type[s->block_index[0]] = 0;
3863 v->blk_mv_type[s->block_index[1]] = 0;
3864 v->blk_mv_type[s->block_index[2]] = 0;
3865 v->blk_mv_type[s->block_index[3]] = 0;
3867 case MV_PMODE_INTFR_4MV_FIELD:
3869 v->blk_mv_type[s->block_index[0]] = 1;
3870 v->blk_mv_type[s->block_index[1]] = 1;
3871 v->blk_mv_type[s->block_index[2]] = 1;
3872 v->blk_mv_type[s->block_index[3]] = 1;
3874 case MV_PMODE_INTFR_2MV_FIELD:
3876 v->blk_mv_type[s->block_index[0]] = 1;
3877 v->blk_mv_type[s->block_index[1]] = 1;
3878 v->blk_mv_type[s->block_index[2]] = 1;
3879 v->blk_mv_type[s->block_index[3]] = 1;
3881 case MV_PMODE_INTFR_1MV:
3882 v->blk_mv_type[s->block_index[0]] = 0;
3883 v->blk_mv_type[s->block_index[1]] = 0;
3884 v->blk_mv_type[s->block_index[2]] = 0;
3885 v->blk_mv_type[s->block_index[3]] = 0;
3888 if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
3889 s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
3890 s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
3891 s->current_picture.f.mb_type[mb_pos] = MB_TYPE_INTRA;
3892 s->mb_intra = v->is_intra[s->mb_x] = 1;
3893 for (i = 0; i < 6; i++)
3894 v->mb_type[0][s->block_index[i]] = 1;
3895 fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
3896 mb_has_coeffs = get_bits1(gb);
3898 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3899 v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
3901 s->current_picture.f.qscale_table[mb_pos] = mquant;
3902 /* Set DC scale - y and c use the same (not sure if necessary here) */
3903 s->y_dc_scale = s->y_dc_scale_table[mquant];
3904 s->c_dc_scale = s->c_dc_scale_table[mquant];
3906 for (i = 0; i < 6; i++) {
3907 s->dc_val[0][s->block_index[i]] = 0;
3909 val = ((cbp >> (5 - i)) & 1);
3910 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3911 v->a_avail = v->c_avail = 0;
3912 if (i == 2 || i == 3 || !s->first_slice_line)
3913 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3914 if (i == 1 || i == 3 || s->mb_x)
3915 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3917 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3918 (i & 4) ? v->codingset2 : v->codingset);
3919 if ((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3920 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3922 stride_y = s->linesize << fieldtx;
3923 off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->linesize : (i & 1) * 8 + 4 * (i & 2) * s->linesize;
3925 stride_y = s->uvlinesize;
3928 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, stride_y);
3932 } else { // inter MB
3933 mb_has_coeffs = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][3];
3935 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3936 if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD) {
3937 v->twomvbp = get_vlc2(gb, v->twomvbp_vlc->table, VC1_2MV_BLOCK_PATTERN_VLC_BITS, 1);
3939 if ((ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV)
3940 || (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV_FIELD)) {
3941 v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
3944 s->mb_intra = v->is_intra[s->mb_x] = 0;
3945 for (i = 0; i < 6; i++)
3946 v->mb_type[0][s->block_index[i]] = 0;
3947 fieldtx = v->fieldtx_plane[mb_pos] = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][1];
3948 /* for all motion vector read MVDATA and motion compensate each block */
3952 for (i = 0; i < 6; i++) {
3955 val = ((mvbp >> (3 - i)) & 1);
3957 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3959 vc1_pred_mv_intfr(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0]);
3960 vc1_mc_4mv_luma(v, i, 0);
3961 } else if (i == 4) {
3962 vc1_mc_4mv_chroma4(v);
3969 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3971 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0]);
3972 vc1_mc_4mv_luma(v, 0, 0);
3973 vc1_mc_4mv_luma(v, 1, 0);
3976 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3978 vc1_pred_mv_intfr(v, 2, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0]);
3979 vc1_mc_4mv_luma(v, 2, 0);
3980 vc1_mc_4mv_luma(v, 3, 0);
3981 vc1_mc_4mv_chroma4(v);
3983 mvbp = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][2];
3985 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3987 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0]);
3991 GET_MQUANT(); // p. 227
3992 s->current_picture.f.qscale_table[mb_pos] = mquant;
3993 if (!v->ttmbf && cbp)
3994 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3995 for (i = 0; i < 6; i++) {
3996 s->dc_val[0][s->block_index[i]] = 0;
3998 val = ((cbp >> (5 - i)) & 1);
4000 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4002 off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->linesize));
4004 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4005 first_block, s->dest[dst_idx] + off,
4006 (i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
4007 (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
4008 block_cbp |= pat << (i << 2);
4009 if (!v->ttmbf && ttmb < 8)
4016 s->mb_intra = v->is_intra[s->mb_x] = 0;
4017 for (i = 0; i < 6; i++) {
4018 v->mb_type[0][s->block_index[i]] = 0;
4019 s->dc_val[0][s->block_index[i]] = 0;
4021 s->current_picture.f.mb_type[mb_pos] = MB_TYPE_SKIP;
4022 s->current_picture.f.qscale_table[mb_pos] = 0;
4023 v->blk_mv_type[s->block_index[0]] = 0;
4024 v->blk_mv_type[s->block_index[1]] = 0;
4025 v->blk_mv_type[s->block_index[2]] = 0;
4026 v->blk_mv_type[s->block_index[3]] = 0;
4027 vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0]);
4030 if (s->mb_x == s->mb_width - 1)
4031 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0])*s->mb_stride);
4035 static int vc1_decode_p_mb_intfi(VC1Context *v)
4037 MpegEncContext *s = &v->s;
4038 GetBitContext *gb = &s->gb;
4040 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4041 int cbp = 0; /* cbp decoding stuff */
4042 int mqdiff, mquant; /* MB quantization */
4043 int ttmb = v->ttfrm; /* MB Transform type */
4045 int mb_has_coeffs = 1; /* last_flag */
4046 int dmv_x, dmv_y; /* Differential MV components */
4047 int val; /* temp values */
4048 int first_block = 1;
4051 int block_cbp = 0, pat, block_tt = 0;
4054 mquant = v->pq; /* Loosy initialization */
4056 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
4057 if (idx_mbmode <= 1) { // intra MB
4058 s->mb_intra = v->is_intra[s->mb_x] = 1;
4059 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
4060 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
4061 s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4063 s->current_picture.f.qscale_table[mb_pos] = mquant;
4064 /* Set DC scale - y and c use the same (not sure if necessary here) */
4065 s->y_dc_scale = s->y_dc_scale_table[mquant];
4066 s->c_dc_scale = s->c_dc_scale_table[mquant];
4067 v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4068 mb_has_coeffs = idx_mbmode & 1;
4070 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
4072 for (i = 0; i < 6; i++) {
4073 s->dc_val[0][s->block_index[i]] = 0;
4074 v->mb_type[0][s->block_index[i]] = 1;
4076 val = ((cbp >> (5 - i)) & 1);
4077 v->a_avail = v->c_avail = 0;
4078 if (i == 2 || i == 3 || !s->first_slice_line)
4079 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4080 if (i == 1 || i == 3 || s->mb_x)
4081 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4083 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4084 (i & 4) ? v->codingset2 : v->codingset);
4085 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4087 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4088 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4089 off += v->cur_field_type ? ((i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0]) : 0;
4090 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize);
4091 // TODO: loop filter
4094 s->mb_intra = v->is_intra[s->mb_x] = 0;
4095 s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
4096 for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
4097 if (idx_mbmode <= 5) { // 1-MV
4099 if (idx_mbmode & 1) {
4100 get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
4102 vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
4104 mb_has_coeffs = !(idx_mbmode & 2);
4106 v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
4107 for (i = 0; i < 6; i++) {
4109 dmv_x = dmv_y = pred_flag = 0;
4110 val = ((v->fourmvbp >> (3 - i)) & 1);
4112 get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
4114 vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
4115 vc1_mc_4mv_luma(v, i, 0);
4117 vc1_mc_4mv_chroma(v, 0);
4119 mb_has_coeffs = idx_mbmode & 1;
4122 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4126 s->current_picture.f.qscale_table[mb_pos] = mquant;
4127 if (!v->ttmbf && cbp) {
4128 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4131 for (i = 0; i < 6; i++) {
4132 s->dc_val[0][s->block_index[i]] = 0;
4134 val = ((cbp >> (5 - i)) & 1);
4135 off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
4136 if (v->cur_field_type)
4137 off += (i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0];
4139 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4140 first_block, s->dest[dst_idx] + off,
4141 (i & 4) ? s->uvlinesize : s->linesize,
4142 (i & 4) && (s->flags & CODEC_FLAG_GRAY),
4144 block_cbp |= pat << (i << 2);
4145 if (!v->ttmbf && ttmb < 8) ttmb = -1;
4150 if (s->mb_x == s->mb_width - 1)
4151 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
4155 /** Decode one B-frame MB (in Main profile)
4157 static void vc1_decode_b_mb(VC1Context *v)
4159 MpegEncContext *s = &v->s;
4160 GetBitContext *gb = &s->gb;
4162 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4163 int cbp = 0; /* cbp decoding stuff */
4164 int mqdiff, mquant; /* MB quantization */
4165 int ttmb = v->ttfrm; /* MB Transform type */
4166 int mb_has_coeffs = 0; /* last_flag */
4167 int index, index1; /* LUT indexes */
4168 int val, sign; /* temp values */
4169 int first_block = 1;
4171 int skipped, direct;
4172 int dmv_x[2], dmv_y[2];
4173 int bmvtype = BMV_TYPE_BACKWARD;
4175 mquant = v->pq; /* Loosy initialization */
4179 direct = get_bits1(gb);
4181 direct = v->direct_mb_plane[mb_pos];
4183 skipped = get_bits1(gb);
4185 skipped = v->s.mbskip_table[mb_pos];
4187 dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4188 for (i = 0; i < 6; i++) {
4189 v->mb_type[0][s->block_index[i]] = 0;
4190 s->dc_val[0][s->block_index[i]] = 0;
4192 s->current_picture.f.qscale_table[mb_pos] = 0;
4196 GET_MVDATA(dmv_x[0], dmv_y[0]);
4197 dmv_x[1] = dmv_x[0];
4198 dmv_y[1] = dmv_y[0];
4200 if (skipped || !s->mb_intra) {
4201 bmvtype = decode012(gb);
4204 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
4207 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
4210 bmvtype = BMV_TYPE_INTERPOLATED;
4211 dmv_x[0] = dmv_y[0] = 0;
4215 for (i = 0; i < 6; i++)
4216 v->mb_type[0][s->block_index[i]] = s->mb_intra;
4220 bmvtype = BMV_TYPE_INTERPOLATED;
4221 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4222 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4226 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4229 s->current_picture.f.qscale_table[mb_pos] = mquant;
4231 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4232 dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
4233 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4234 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4236 if (!mb_has_coeffs && !s->mb_intra) {
4237 /* no coded blocks - effectively skipped */
4238 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4239 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4242 if (s->mb_intra && !mb_has_coeffs) {
4244 s->current_picture.f.qscale_table[mb_pos] = mquant;
4245 s->ac_pred = get_bits1(gb);
4247 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4249 if (bmvtype == BMV_TYPE_INTERPOLATED) {
4250 GET_MVDATA(dmv_x[0], dmv_y[0]);
4251 if (!mb_has_coeffs) {
4252 /* interpolated skipped block */
4253 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4254 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4258 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4260 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4263 s->ac_pred = get_bits1(gb);
4264 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4266 s->current_picture.f.qscale_table[mb_pos] = mquant;
4267 if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
4268 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4272 for (i = 0; i < 6; i++) {
4273 s->dc_val[0][s->block_index[i]] = 0;
4275 val = ((cbp >> (5 - i)) & 1);
4276 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4277 v->mb_type[0][s->block_index[i]] = s->mb_intra;
4279 /* check if prediction blocks A and C are available */
4280 v->a_avail = v->c_avail = 0;
4281 if (i == 2 || i == 3 || !s->first_slice_line)
4282 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4283 if (i == 1 || i == 3 || s->mb_x)
4284 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4286 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4287 (i & 4) ? v->codingset2 : v->codingset);
4288 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4290 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4292 for (j = 0; j < 64; j++)
4293 s->block[i][j] <<= 1;
4294 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
4296 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4297 first_block, s->dest[dst_idx] + off,
4298 (i & 4) ? s->uvlinesize : s->linesize,
4299 (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4300 if (!v->ttmbf && ttmb < 8)
4307 /** Decode one B-frame MB (in interlaced field B picture)
4309 static void vc1_decode_b_mb_intfi(VC1Context *v)
4311 MpegEncContext *s = &v->s;
4312 GetBitContext *gb = &s->gb;
4314 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4315 int cbp = 0; /* cbp decoding stuff */
4316 int mqdiff, mquant; /* MB quantization */
4317 int ttmb = v->ttfrm; /* MB Transform type */
4318 int mb_has_coeffs = 0; /* last_flag */
4319 int val; /* temp value */
4320 int first_block = 1;
4323 int dmv_x[2], dmv_y[2], pred_flag[2];
4324 int bmvtype = BMV_TYPE_BACKWARD;
4325 int idx_mbmode, interpmvp;
4327 mquant = v->pq; /* Loosy initialization */
4330 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
4331 if (idx_mbmode <= 1) { // intra MB
4332 s->mb_intra = v->is_intra[s->mb_x] = 1;
4333 s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
4334 s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
4335 s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4337 s->current_picture.f.qscale_table[mb_pos] = mquant;
4338 /* Set DC scale - y and c use the same (not sure if necessary here) */
4339 s->y_dc_scale = s->y_dc_scale_table[mquant];
4340 s->c_dc_scale = s->c_dc_scale_table[mquant];
4341 v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4342 mb_has_coeffs = idx_mbmode & 1;
4344 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
4346 for (i = 0; i < 6; i++) {
4347 s->dc_val[0][s->block_index[i]] = 0;
4349 val = ((cbp >> (5 - i)) & 1);
4350 v->mb_type[0][s->block_index[i]] = s->mb_intra;
4351 v->a_avail = v->c_avail = 0;
4352 if (i == 2 || i == 3 || !s->first_slice_line)
4353 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4354 if (i == 1 || i == 3 || s->mb_x)
4355 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4357 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4358 (i & 4) ? v->codingset2 : v->codingset);
4359 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4361 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4363 for (j = 0; j < 64; j++)
4364 s->block[i][j] <<= 1;
4365 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4366 off += v->cur_field_type ? ((i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0]) : 0;
4367 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize);
4368 // TODO: yet to perform loop filter
4371 s->mb_intra = v->is_intra[s->mb_x] = 0;
4372 s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
4373 for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
4375 fwd = v->forward_mb_plane[mb_pos] = get_bits1(gb);
4377 fwd = v->forward_mb_plane[mb_pos];
4378 if (idx_mbmode <= 5) { // 1-MV
4379 dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4380 pred_flag[0] = pred_flag[1] = 0;
4382 bmvtype = BMV_TYPE_FORWARD;
4384 bmvtype = decode012(gb);
4387 bmvtype = BMV_TYPE_BACKWARD;
4390 bmvtype = BMV_TYPE_DIRECT;
4393 bmvtype = BMV_TYPE_INTERPOLATED;
4394 interpmvp = get_bits1(gb);
4397 v->bmvtype = bmvtype;
4398 if (bmvtype != BMV_TYPE_DIRECT && idx_mbmode & 1) {
4399 get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD], &dmv_y[bmvtype == BMV_TYPE_BACKWARD], &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
4401 if (bmvtype == BMV_TYPE_INTERPOLATED && interpmvp) {
4402 get_mvdata_interlaced(v, &dmv_x[1], &dmv_y[1], &pred_flag[1]);
4404 if (bmvtype == BMV_TYPE_DIRECT) {
4405 dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4406 dmv_x[1] = dmv_y[1] = pred_flag[0] = 0;
4408 vc1_pred_b_mv_intfi(v, 0, dmv_x, dmv_y, 1, pred_flag);
4409 vc1_b_mc(v, dmv_x, dmv_y, (bmvtype == BMV_TYPE_DIRECT), bmvtype);
4410 mb_has_coeffs = !(idx_mbmode & 2);
4413 bmvtype = BMV_TYPE_FORWARD;
4414 v->bmvtype = bmvtype;
4415 v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
4416 for (i = 0; i < 6; i++) {
4418 dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4419 dmv_x[1] = dmv_y[1] = pred_flag[1] = 0;
4420 val = ((v->fourmvbp >> (3 - i)) & 1);
4422 get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD],
4423 &dmv_y[bmvtype == BMV_TYPE_BACKWARD],
4424 &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
4426 vc1_pred_b_mv_intfi(v, i, dmv_x, dmv_y, 0, pred_flag);
4427 vc1_mc_4mv_luma(v, i, bmvtype == BMV_TYPE_BACKWARD);
4429 vc1_mc_4mv_chroma(v, bmvtype == BMV_TYPE_BACKWARD);
4431 mb_has_coeffs = idx_mbmode & 1;
4434 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4438 s->current_picture.f.qscale_table[mb_pos] = mquant;
4439 if (!v->ttmbf && cbp) {
4440 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4443 for (i = 0; i < 6; i++) {
4444 s->dc_val[0][s->block_index[i]] = 0;
4446 val = ((cbp >> (5 - i)) & 1);
4447 off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
4448 if (v->cur_field_type)
4449 off += (i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0];
4451 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4452 first_block, s->dest[dst_idx] + off,
4453 (i & 4) ? s->uvlinesize : s->linesize,
4454 (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4455 if (!v->ttmbf && ttmb < 8)
4463 /** Decode blocks of I-frame
4465 static void vc1_decode_i_blocks(VC1Context *v)
4468 MpegEncContext *s = &v->s;
4473 /* select codingmode used for VLC tables selection */
4474 switch (v->y_ac_table_index) {
4476 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
4479 v->codingset = CS_HIGH_MOT_INTRA;
4482 v->codingset = CS_MID_RATE_INTRA;
4486 switch (v->c_ac_table_index) {
4488 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
4491 v->codingset2 = CS_HIGH_MOT_INTER;
4494 v->codingset2 = CS_MID_RATE_INTER;
4498 /* Set DC scale - y and c use the same */
4499 s->y_dc_scale = s->y_dc_scale_table[v->pq];
4500 s->c_dc_scale = s->c_dc_scale_table[v->pq];
4503 s->mb_x = s->mb_y = 0;
4505 s->first_slice_line = 1;
4506 for (s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
4508 ff_init_block_index(s);
4509 for (; s->mb_x < s->mb_width; s->mb_x++) {
4511 ff_update_block_index(s);
4512 dst[0] = s->dest[0];
4513 dst[1] = dst[0] + 8;
4514 dst[2] = s->dest[0] + s->linesize * 8;
4515 dst[3] = dst[2] + 8;
4516 dst[4] = s->dest[1];
4517 dst[5] = s->dest[2];
4518 s->dsp.clear_blocks(s->block[0]);
4519 mb_pos = s->mb_x + s->mb_y * s->mb_width;
4520 s->current_picture.f.mb_type[mb_pos] = MB_TYPE_INTRA;
4521 s->current_picture.f.qscale_table[mb_pos] = v->pq;
4522 s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
4523 s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
4525 // do actual MB decoding and displaying
4526 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
4527 v->s.ac_pred = get_bits1(&v->s.gb);
4529 for (k = 0; k < 6; k++) {
4530 val = ((cbp >> (5 - k)) & 1);
4533 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
4537 cbp |= val << (5 - k);
4539 vc1_decode_i_block(v, s->block[k], k, val, (k < 4) ? v->codingset : v->codingset2);
4541 if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
4543 v->vc1dsp.vc1_inv_trans_8x8(s->block[k]);
4544 if (v->pq >= 9 && v->overlap) {
4546 for (j = 0; j < 64; j++)
4547 s->block[k][j] <<= 1;
4548 s->dsp.put_signed_pixels_clamped(s->block[k], dst[k], k & 4 ? s->uvlinesize : s->linesize);
4551 for (j = 0; j < 64; j++)
4552 s->block[k][j] = (s->block[k][j] - 64) << 1;
4553 s->dsp.put_pixels_clamped(s->block[k], dst[k], k & 4 ? s->uvlinesize : s->linesize);
4557 if (v->pq >= 9 && v->overlap) {
4559 v->vc1dsp.vc1_h_overlap(s->dest[0], s->linesize);
4560 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
4561 if (!(s->flags & CODEC_FLAG_GRAY)) {
4562 v->vc1dsp.vc1_h_overlap(s->dest[1], s->uvlinesize);
4563 v->vc1dsp.vc1_h_overlap(s->dest[2], s->uvlinesize);
4566 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize);
4567 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
4568 if (!s->first_slice_line) {
4569 v->vc1dsp.vc1_v_overlap(s->dest[0], s->linesize);
4570 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize);
4571 if (!(s->flags & CODEC_FLAG_GRAY)) {
4572 v->vc1dsp.vc1_v_overlap(s->dest[1], s->uvlinesize);
4573 v->vc1dsp.vc1_v_overlap(s->dest[2], s->uvlinesize);
4576 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
4577 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
4579 if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
4581 if (get_bits_count(&s->gb) > v->bits) {
4582 ff_er_add_slice(s, 0, 0, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END));
4583 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
4584 get_bits_count(&s->gb), v->bits);
4588 if (!v->s.loop_filter)
4589 ff_draw_horiz_band(s, s->mb_y * 16, 16);
4591 ff_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
4593 s->first_slice_line = 0;
4595 if (v->s.loop_filter)
4596 ff_draw_horiz_band(s, (s->mb_height - 1) * 16, 16);
4597 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
4600 /** Decode blocks of I-frame for advanced profile
4602 static void vc1_decode_i_blocks_adv(VC1Context *v)
4605 MpegEncContext *s = &v->s;
4611 GetBitContext *gb = &s->gb;
4613 /* select codingmode used for VLC tables selection */
4614 switch (v->y_ac_table_index) {
4616 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
4619 v->codingset = CS_HIGH_MOT_INTRA;
4622 v->codingset = CS_MID_RATE_INTRA;
4626 switch (v->c_ac_table_index) {
4628 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
4631 v->codingset2 = CS_HIGH_MOT_INTER;
4634 v->codingset2 = CS_MID_RATE_INTER;
4639 s->mb_x = s->mb_y = 0;
4641 s->first_slice_line = 1;
4642 s->mb_y = s->start_mb_y;
4643 if (s->start_mb_y) {
4645 ff_init_block_index(s);
4646 memset(&s->coded_block[s->block_index[0] - s->b8_stride], 0,
4647 (1 + s->b8_stride) * sizeof(*s->coded_block));
4649 for (; s->mb_y < s->end_mb_y; s->mb_y++) {
4651 ff_init_block_index(s);
4652 for (;s->mb_x < s->mb_width; s->mb_x++) {
4653 DCTELEM (*block)[64] = v->block[v->cur_blk_idx];
4654 ff_update_block_index(s);
4655 s->dsp.clear_blocks(block[0]);
4656 mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4657 s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4658 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
4659 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
4661 // do actual MB decoding and displaying
4662 if (v->fieldtx_is_raw)
4663 v->fieldtx_plane[mb_pos] = get_bits1(&v->s.gb);
4664 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
4665 if ( v->acpred_is_raw)
4666 v->s.ac_pred = get_bits1(&v->s.gb);
4668 v->s.ac_pred = v->acpred_plane[mb_pos];
4670 if (v->condover == CONDOVER_SELECT && v->overflg_is_raw)
4671 v->over_flags_plane[mb_pos] = get_bits1(&v->s.gb);
4675 s->current_picture.f.qscale_table[mb_pos] = mquant;
4676 /* Set DC scale - y and c use the same */
4677 s->y_dc_scale = s->y_dc_scale_table[mquant];
4678 s->c_dc_scale = s->c_dc_scale_table[mquant];
4680 for (k = 0; k < 6; k++) {
4681 val = ((cbp >> (5 - k)) & 1);
4684 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
4688 cbp |= val << (5 - k);
4690 v->a_avail = !s->first_slice_line || (k == 2 || k == 3);
4691 v->c_avail = !!s->mb_x || (k == 1 || k == 3);
4693 vc1_decode_i_block_adv(v, block[k], k, val,
4694 (k < 4) ? v->codingset : v->codingset2, mquant);
4696 if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
4698 v->vc1dsp.vc1_inv_trans_8x8(block[k]);
4701 vc1_smooth_overlap_filter_iblk(v);
4702 vc1_put_signed_blocks_clamped(v);
4703 if (v->s.loop_filter) vc1_loop_filter_iblk_delayed(v, v->pq);
4705 if (get_bits_count(&s->gb) > v->bits) {
4706 // TODO: may need modification to handle slice coding
4707 ff_er_add_slice(s, 0, s->start_mb_y, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END));
4708 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
4709 get_bits_count(&s->gb), v->bits);
4713 if (!v->s.loop_filter)
4714 ff_draw_horiz_band(s, s->mb_y * 16, 16);
4716 ff_draw_horiz_band(s, (s->mb_y-1) * 16, 16);
4717 s->first_slice_line = 0;
4720 /* raw bottom MB row */
4722 ff_init_block_index(s);
4723 for (;s->mb_x < s->mb_width; s->mb_x++) {
4724 ff_update_block_index(s);
4725 vc1_put_signed_blocks_clamped(v);
4726 if (v->s.loop_filter)
4727 vc1_loop_filter_iblk_delayed(v, v->pq);
4729 if (v->s.loop_filter)
4730 ff_draw_horiz_band(s, (s->end_mb_y-1)*16, 16);
4731 ff_er_add_slice(s, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
4732 (s->end_mb_y << v->field_mode) - 1, (AC_END|DC_END|MV_END));
4735 static void vc1_decode_p_blocks(VC1Context *v)
4737 MpegEncContext *s = &v->s;
4738 int apply_loop_filter;
4740 /* select codingmode used for VLC tables selection */
4741 switch (v->c_ac_table_index) {
4743 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
4746 v->codingset = CS_HIGH_MOT_INTRA;
4749 v->codingset = CS_MID_RATE_INTRA;
4753 switch (v->c_ac_table_index) {
4755 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
4758 v->codingset2 = CS_HIGH_MOT_INTER;
4761 v->codingset2 = CS_MID_RATE_INTER;
4765 apply_loop_filter = s->loop_filter && !(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY);
4766 s->first_slice_line = 1;
4767 memset(v->cbp_base, 0, sizeof(v->cbp_base[0])*2*s->mb_stride);
4768 for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
4770 ff_init_block_index(s);
4771 for (; s->mb_x < s->mb_width; s->mb_x++) {
4772 ff_update_block_index(s);
4775 vc1_decode_p_mb_intfi(v);
4776 else if (v->fcm == 1)
4777 vc1_decode_p_mb_intfr(v);
4778 else vc1_decode_p_mb(v);
4779 if (s->mb_y != s->start_mb_y && apply_loop_filter && v->fcm == 0)
4780 vc1_apply_p_loop_filter(v);
4781 if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
4782 // TODO: may need modification to handle slice coding
4783 ff_er_add_slice(s, 0, s->start_mb_y, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END));
4784 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
4785 get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
4789 memmove(v->cbp_base, v->cbp, sizeof(v->cbp_base[0]) * s->mb_stride);
4790 memmove(v->ttblk_base, v->ttblk, sizeof(v->ttblk_base[0]) * s->mb_stride);
4791 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
4792 memmove(v->luma_mv_base, v->luma_mv, sizeof(v->luma_mv_base[0]) * s->mb_stride);
4793 if (s->mb_y != s->start_mb_y) ff_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
4794 s->first_slice_line = 0;
4796 if (apply_loop_filter) {
4798 ff_init_block_index(s);
4799 for (; s->mb_x < s->mb_width; s->mb_x++) {
4800 ff_update_block_index(s);
4801 vc1_apply_p_loop_filter(v);
4804 if (s->end_mb_y >= s->start_mb_y)
4805 ff_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
4806 ff_er_add_slice(s, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
4807 (s->end_mb_y << v->field_mode) - 1, (AC_END|DC_END|MV_END));
4810 static void vc1_decode_b_blocks(VC1Context *v)
4812 MpegEncContext *s = &v->s;
4814 /* select codingmode used for VLC tables selection */
4815 switch (v->c_ac_table_index) {
4817 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
4820 v->codingset = CS_HIGH_MOT_INTRA;
4823 v->codingset = CS_MID_RATE_INTRA;
4827 switch (v->c_ac_table_index) {
4829 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
4832 v->codingset2 = CS_HIGH_MOT_INTER;
4835 v->codingset2 = CS_MID_RATE_INTER;
4839 s->first_slice_line = 1;
4840 for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
4842 ff_init_block_index(s);
4843 for (; s->mb_x < s->mb_width; s->mb_x++) {
4844 ff_update_block_index(s);
4847 vc1_decode_b_mb_intfi(v);
4850 if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
4851 // TODO: may need modification to handle slice coding
4852 ff_er_add_slice(s, 0, s->start_mb_y, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END));
4853 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
4854 get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
4857 if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
4859 if (!v->s.loop_filter)
4860 ff_draw_horiz_band(s, s->mb_y * 16, 16);
4862 ff_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
4863 s->first_slice_line = 0;
4865 if (v->s.loop_filter)
4866 ff_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
4867 ff_er_add_slice(s, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
4868 (s->end_mb_y << v->field_mode) - 1, (AC_END|DC_END|MV_END));
4871 static void vc1_decode_skip_blocks(VC1Context *v)
4873 MpegEncContext *s = &v->s;
4875 ff_er_add_slice(s, 0, s->start_mb_y, s->mb_width - 1, s->end_mb_y - 1, (AC_END|DC_END|MV_END));
4876 s->first_slice_line = 1;
4877 for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
4879 ff_init_block_index(s);
4880 ff_update_block_index(s);
4881 memcpy(s->dest[0], s->last_picture.f.data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16);
4882 memcpy(s->dest[1], s->last_picture.f.data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
4883 memcpy(s->dest[2], s->last_picture.f.data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
4884 ff_draw_horiz_band(s, s->mb_y * 16, 16);
4885 s->first_slice_line = 0;
4887 s->pict_type = AV_PICTURE_TYPE_P;
4890 static void vc1_decode_blocks(VC1Context *v)
4893 v->s.esc3_level_length = 0;
4895 ff_intrax8_decode_picture(&v->x8, 2*v->pq + v->halfpq, v->pq * !v->pquantizer);
4898 v->left_blk_idx = -1;
4899 v->topleft_blk_idx = 1;
4901 switch (v->s.pict_type) {
4902 case AV_PICTURE_TYPE_I:
4903 if (v->profile == PROFILE_ADVANCED)
4904 vc1_decode_i_blocks_adv(v);
4906 vc1_decode_i_blocks(v);
4908 case AV_PICTURE_TYPE_P:
4909 if (v->p_frame_skipped)
4910 vc1_decode_skip_blocks(v);
4912 vc1_decode_p_blocks(v);
4914 case AV_PICTURE_TYPE_B:
4916 if (v->profile == PROFILE_ADVANCED)
4917 vc1_decode_i_blocks_adv(v);
4919 vc1_decode_i_blocks(v);
4921 vc1_decode_b_blocks(v);
4927 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
4931 * Transform coefficients for both sprites in 16.16 fixed point format,
4932 * in the order they appear in the bitstream:
4934 * rotation 1 (unused)
4936 * rotation 2 (unused)
4943 int effect_type, effect_flag;
4944 int effect_pcount1, effect_pcount2; ///< amount of effect parameters stored in effect_params
4945 int effect_params1[15], effect_params2[10]; ///< effect parameters in 16.16 fixed point format
4948 static inline int get_fp_val(GetBitContext* gb)
4950 return (get_bits_long(gb, 30) - (1 << 29)) << 1;
4953 static void vc1_sprite_parse_transform(GetBitContext* gb, int c[7])
4957 switch (get_bits(gb, 2)) {
4960 c[2] = get_fp_val(gb);
4964 c[0] = c[4] = get_fp_val(gb);
4965 c[2] = get_fp_val(gb);
4968 c[0] = get_fp_val(gb);
4969 c[2] = get_fp_val(gb);
4970 c[4] = get_fp_val(gb);
4973 c[0] = get_fp_val(gb);
4974 c[1] = get_fp_val(gb);
4975 c[2] = get_fp_val(gb);
4976 c[3] = get_fp_val(gb);
4977 c[4] = get_fp_val(gb);
4980 c[5] = get_fp_val(gb);
4982 c[6] = get_fp_val(gb);
4987 static void vc1_parse_sprites(VC1Context *v, GetBitContext* gb, SpriteData* sd)
4989 AVCodecContext *avctx = v->s.avctx;
4992 for (sprite = 0; sprite <= v->two_sprites; sprite++) {
4993 vc1_sprite_parse_transform(gb, sd->coefs[sprite]);
4994 if (sd->coefs[sprite][1] || sd->coefs[sprite][3])
4995 av_log_ask_for_sample(avctx, "Rotation coefficients are not zero");
4996 av_log(avctx, AV_LOG_DEBUG, sprite ? "S2:" : "S1:");
4997 for (i = 0; i < 7; i++)
4998 av_log(avctx, AV_LOG_DEBUG, " %d.%.3d",
4999 sd->coefs[sprite][i] / (1<<16),
5000 (abs(sd->coefs[sprite][i]) & 0xFFFF) * 1000 / (1 << 16));
5001 av_log(avctx, AV_LOG_DEBUG, "\n");
5005 if (sd->effect_type = get_bits_long(gb, 30)) {
5006 switch (sd->effect_pcount1 = get_bits(gb, 4)) {
5008 vc1_sprite_parse_transform(gb, sd->effect_params1);
5011 vc1_sprite_parse_transform(gb, sd->effect_params1);
5012 vc1_sprite_parse_transform(gb, sd->effect_params1 + 7);
5015 for (i = 0; i < sd->effect_pcount1; i++)
5016 sd->effect_params1[i] = get_fp_val(gb);
5018 if (sd->effect_type != 13 || sd->effect_params1[0] != sd->coefs[0][6]) {
5019 // effect 13 is simple alpha blending and matches the opacity above
5020 av_log(avctx, AV_LOG_DEBUG, "Effect: %d; params: ", sd->effect_type);
5021 for (i = 0; i < sd->effect_pcount1; i++)
5022 av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
5023 sd->effect_params1[i] / (1 << 16),
5024 (abs(sd->effect_params1[i]) & 0xFFFF) * 1000 / (1 << 16));
5025 av_log(avctx, AV_LOG_DEBUG, "\n");
5028 sd->effect_pcount2 = get_bits(gb, 16);
5029 if (sd->effect_pcount2 > 10) {
5030 av_log(avctx, AV_LOG_ERROR, "Too many effect parameters\n");
5032 } else if (sd->effect_pcount2) {
5034 av_log(avctx, AV_LOG_DEBUG, "Effect params 2: ");
5035 while (++i < sd->effect_pcount2) {
5036 sd->effect_params2[i] = get_fp_val(gb);
5037 av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
5038 sd->effect_params2[i] / (1 << 16),
5039 (abs(sd->effect_params2[i]) & 0xFFFF) * 1000 / (1 << 16));
5041 av_log(avctx, AV_LOG_DEBUG, "\n");
5044 if (sd->effect_flag = get_bits1(gb))
5045 av_log(avctx, AV_LOG_DEBUG, "Effect flag set\n");
5047 if (get_bits_count(gb) >= gb->size_in_bits +
5048 (avctx->codec_id == CODEC_ID_WMV3IMAGE ? 64 : 0))
5049 av_log(avctx, AV_LOG_ERROR, "Buffer overrun\n");
5050 if (get_bits_count(gb) < gb->size_in_bits - 8)
5051 av_log(avctx, AV_LOG_WARNING, "Buffer not fully read\n");
5054 static void vc1_draw_sprites(VC1Context *v, SpriteData* sd)
5056 int i, plane, row, sprite;
5057 int sr_cache[2][2] = { { -1, -1 }, { -1, -1 } };
5058 uint8_t* src_h[2][2];
5059 int xoff[2], xadv[2], yoff[2], yadv[2], alpha;
5061 MpegEncContext *s = &v->s;
5063 for (i = 0; i < 2; i++) {
5064 xoff[i] = av_clip(sd->coefs[i][2], 0, v->sprite_width-1 << 16);
5065 xadv[i] = sd->coefs[i][0];
5066 if (xadv[i] != 1<<16 || (v->sprite_width << 16) - (v->output_width << 16) - xoff[i])
5067 xadv[i] = av_clip(xadv[i], 0, ((v->sprite_width<<16) - xoff[i] - 1) / v->output_width);
5069 yoff[i] = av_clip(sd->coefs[i][5], 0, v->sprite_height-1 << 16);
5070 yadv[i] = av_clip(sd->coefs[i][4], 0, ((v->sprite_height << 16) - yoff[i]) / v->output_height);
5072 alpha = av_clip(sd->coefs[1][6], 0, (1<<16) - 1);
5074 for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++) {
5075 int width = v->output_width>>!!plane;
5077 for (row = 0; row < v->output_height>>!!plane; row++) {
5078 uint8_t *dst = v->sprite_output_frame.data[plane] +
5079 v->sprite_output_frame.linesize[plane] * row;
5081 for (sprite = 0; sprite <= v->two_sprites; sprite++) {
5082 uint8_t *iplane = s->current_picture.f.data[plane];
5083 int iline = s->current_picture.f.linesize[plane];
5084 int ycoord = yoff[sprite] + yadv[sprite] * row;
5085 int yline = ycoord >> 16;
5086 ysub[sprite] = ycoord & 0xFFFF;
5088 iplane = s->last_picture.f.data[plane];
5089 iline = s->last_picture.f.linesize[plane];
5091 if (!(xoff[sprite] & 0xFFFF) && xadv[sprite] == 1 << 16) {
5092 src_h[sprite][0] = iplane + (xoff[sprite] >> 16) + yline * iline;
5094 src_h[sprite][1] = iplane + (xoff[sprite] >> 16) + (yline + 1) * iline;
5096 if (sr_cache[sprite][0] != yline) {
5097 if (sr_cache[sprite][1] == yline) {
5098 FFSWAP(uint8_t*, v->sr_rows[sprite][0], v->sr_rows[sprite][1]);
5099 FFSWAP(int, sr_cache[sprite][0], sr_cache[sprite][1]);
5101 v->vc1dsp.sprite_h(v->sr_rows[sprite][0], iplane + yline * iline, xoff[sprite], xadv[sprite], width);
5102 sr_cache[sprite][0] = yline;
5105 if (ysub[sprite] && sr_cache[sprite][1] != yline + 1) {
5106 v->vc1dsp.sprite_h(v->sr_rows[sprite][1], iplane + (yline + 1) * iline, xoff[sprite], xadv[sprite], width);
5107 sr_cache[sprite][1] = yline + 1;
5109 src_h[sprite][0] = v->sr_rows[sprite][0];
5110 src_h[sprite][1] = v->sr_rows[sprite][1];
5114 if (!v->two_sprites) {
5116 v->vc1dsp.sprite_v_single(dst, src_h[0][0], src_h[0][1], ysub[0], width);
5118 memcpy(dst, src_h[0][0], width);
5121 if (ysub[0] && ysub[1]) {
5122 v->vc1dsp.sprite_v_double_twoscale(dst, src_h[0][0], src_h[0][1], ysub[0],
5123 src_h[1][0], src_h[1][1], ysub[1], alpha, width);
5124 } else if (ysub[0]) {
5125 v->vc1dsp.sprite_v_double_onescale(dst, src_h[0][0], src_h[0][1], ysub[0],
5126 src_h[1][0], alpha, width);
5127 } else if (ysub[1]) {
5128 v->vc1dsp.sprite_v_double_onescale(dst, src_h[1][0], src_h[1][1], ysub[1],
5129 src_h[0][0], (1<<16)-1-alpha, width);
5131 v->vc1dsp.sprite_v_double_noscale(dst, src_h[0][0], src_h[1][0], alpha, width);
5137 for (i = 0; i < 2; i++) {
5147 static int vc1_decode_sprites(VC1Context *v, GetBitContext* gb)
5149 MpegEncContext *s = &v->s;
5150 AVCodecContext *avctx = s->avctx;
5153 vc1_parse_sprites(v, gb, &sd);
5155 if (!s->current_picture.f.data[0]) {
5156 av_log(avctx, AV_LOG_ERROR, "Got no sprites\n");
5160 if (v->two_sprites && (!s->last_picture_ptr || !s->last_picture.f.data[0])) {
5161 av_log(avctx, AV_LOG_WARNING, "Need two sprites, only got one\n");
5165 if (v->sprite_output_frame.data[0])
5166 avctx->release_buffer(avctx, &v->sprite_output_frame);
5168 v->sprite_output_frame.buffer_hints = FF_BUFFER_HINTS_VALID;
5169 v->sprite_output_frame.reference = 0;
5170 if (avctx->get_buffer(avctx, &v->sprite_output_frame) < 0) {
5171 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
5175 vc1_draw_sprites(v, &sd);
5180 static void vc1_sprite_flush(AVCodecContext *avctx)
5182 VC1Context *v = avctx->priv_data;
5183 MpegEncContext *s = &v->s;
5184 AVFrame *f = &s->current_picture.f;
5187 /* Windows Media Image codecs have a convergence interval of two keyframes.
5188 Since we can't enforce it, clear to black the missing sprite. This is
5189 wrong but it looks better than doing nothing. */
5192 for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++)
5193 for (i = 0; i < v->sprite_height>>!!plane; i++)
5194 memset(f->data[plane] + i * f->linesize[plane],
5195 plane ? 128 : 0, f->linesize[plane]);
5200 static av_cold int vc1_decode_init_alloc_tables(VC1Context *v)
5202 MpegEncContext *s = &v->s;
5205 /* Allocate mb bitplanes */
5206 v->mv_type_mb_plane = av_malloc (s->mb_stride * s->mb_height);
5207 v->direct_mb_plane = av_malloc (s->mb_stride * s->mb_height);
5208 v->forward_mb_plane = av_malloc (s->mb_stride * s->mb_height);
5209 v->fieldtx_plane = av_mallocz(s->mb_stride * s->mb_height);
5210 v->acpred_plane = av_malloc (s->mb_stride * s->mb_height);
5211 v->over_flags_plane = av_malloc (s->mb_stride * s->mb_height);
5213 v->n_allocated_blks = s->mb_width + 2;
5214 v->block = av_malloc(sizeof(*v->block) * v->n_allocated_blks);
5215 v->cbp_base = av_malloc(sizeof(v->cbp_base[0]) * 2 * s->mb_stride);
5216 v->cbp = v->cbp_base + s->mb_stride;
5217 v->ttblk_base = av_malloc(sizeof(v->ttblk_base[0]) * 2 * s->mb_stride);
5218 v->ttblk = v->ttblk_base + s->mb_stride;
5219 v->is_intra_base = av_mallocz(sizeof(v->is_intra_base[0]) * 2 * s->mb_stride);
5220 v->is_intra = v->is_intra_base + s->mb_stride;
5221 v->luma_mv_base = av_malloc(sizeof(v->luma_mv_base[0]) * 2 * s->mb_stride);
5222 v->luma_mv = v->luma_mv_base + s->mb_stride;
5224 /* allocate block type info in that way so it could be used with s->block_index[] */
5225 v->mb_type_base = av_malloc(s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5226 v->mb_type[0] = v->mb_type_base + s->b8_stride + 1;
5227 v->mb_type[1] = v->mb_type_base + s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride + 1;
5228 v->mb_type[2] = v->mb_type[1] + s->mb_stride * (s->mb_height + 1);
5230 /* allocate memory to store block level MV info */
5231 v->blk_mv_type_base = av_mallocz( s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5232 v->blk_mv_type = v->blk_mv_type_base + s->b8_stride + 1;
5233 v->mv_f_base = av_mallocz(2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5234 v->mv_f[0] = v->mv_f_base + s->b8_stride + 1;
5235 v->mv_f[1] = v->mv_f[0] + (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5236 v->mv_f_last_base = av_mallocz(2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5237 v->mv_f_last[0] = v->mv_f_last_base + s->b8_stride + 1;
5238 v->mv_f_last[1] = v->mv_f_last[0] + (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5239 v->mv_f_next_base = av_mallocz(2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5240 v->mv_f_next[0] = v->mv_f_next_base + s->b8_stride + 1;
5241 v->mv_f_next[1] = v->mv_f_next[0] + (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5243 /* Init coded blocks info */
5244 if (v->profile == PROFILE_ADVANCED) {
5245 // if (alloc_bitplane(&v->over_flags_plane, s->mb_width, s->mb_height) < 0)
5247 // if (alloc_bitplane(&v->ac_pred_plane, s->mb_width, s->mb_height) < 0)
5251 ff_intrax8_common_init(&v->x8,s);
5253 if (s->avctx->codec_id == CODEC_ID_WMV3IMAGE || s->avctx->codec_id == CODEC_ID_VC1IMAGE) {
5254 for (i = 0; i < 4; i++)
5255 if (!(v->sr_rows[i >> 1][i & 1] = av_malloc(v->output_width))) return -1;
5258 if (!v->mv_type_mb_plane || !v->direct_mb_plane || !v->acpred_plane || !v->over_flags_plane ||
5259 !v->block || !v->cbp_base || !v->ttblk_base || !v->is_intra_base || !v->luma_mv_base ||
5266 /** Initialize a VC1/WMV3 decoder
5267 * @todo TODO: Handle VC-1 IDUs (Transport level?)
5268 * @todo TODO: Decypher remaining bits in extra_data
5270 static av_cold int vc1_decode_init(AVCodecContext *avctx)
5272 VC1Context *v = avctx->priv_data;
5273 MpegEncContext *s = &v->s;
5277 /* save the container output size for WMImage */
5278 v->output_width = avctx->width;
5279 v->output_height = avctx->height;
5281 if (!avctx->extradata_size || !avctx->extradata)
5283 if (!(avctx->flags & CODEC_FLAG_GRAY))
5284 avctx->pix_fmt = avctx->get_format(avctx, avctx->codec->pix_fmts);
5286 avctx->pix_fmt = PIX_FMT_GRAY8;
5287 avctx->hwaccel = ff_find_hwaccel(avctx->codec->id, avctx->pix_fmt);
5289 avctx->flags |= CODEC_FLAG_EMU_EDGE;
5290 v->s.flags |= CODEC_FLAG_EMU_EDGE;
5292 if (avctx->idct_algo == FF_IDCT_AUTO) {
5293 avctx->idct_algo = FF_IDCT_WMV2;
5296 if (vc1_init_common(v) < 0)
5298 ff_vc1dsp_init(&v->vc1dsp);
5300 if (avctx->codec_id == CODEC_ID_WMV3 || avctx->codec_id == CODEC_ID_WMV3IMAGE) {
5303 // looks like WMV3 has a sequence header stored in the extradata
5304 // advanced sequence header may be before the first frame
5305 // the last byte of the extradata is a version number, 1 for the
5306 // samples we can decode
5308 init_get_bits(&gb, avctx->extradata, avctx->extradata_size*8);
5310 if (vc1_decode_sequence_header(avctx, v, &gb) < 0)
5313 count = avctx->extradata_size*8 - get_bits_count(&gb);
5315 av_log(avctx, AV_LOG_INFO, "Extra data: %i bits left, value: %X\n",
5316 count, get_bits(&gb, count));
5317 } else if (count < 0) {
5318 av_log(avctx, AV_LOG_INFO, "Read %i bits in overflow\n", -count);
5320 } else { // VC1/WVC1/WVP2
5321 const uint8_t *start = avctx->extradata;
5322 uint8_t *end = avctx->extradata + avctx->extradata_size;
5323 const uint8_t *next;
5324 int size, buf2_size;
5325 uint8_t *buf2 = NULL;
5326 int seq_initialized = 0, ep_initialized = 0;
5328 if (avctx->extradata_size < 16) {
5329 av_log(avctx, AV_LOG_ERROR, "Extradata size too small: %i\n", avctx->extradata_size);
5333 buf2 = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
5334 start = find_next_marker(start, end); // in WVC1 extradata first byte is its size, but can be 0 in mkv
5336 for (; next < end; start = next) {
5337 next = find_next_marker(start + 4, end);
5338 size = next - start - 4;
5341 buf2_size = vc1_unescape_buffer(start + 4, size, buf2);
5342 init_get_bits(&gb, buf2, buf2_size * 8);
5343 switch (AV_RB32(start)) {
5344 case VC1_CODE_SEQHDR:
5345 if (vc1_decode_sequence_header(avctx, v, &gb) < 0) {
5349 seq_initialized = 1;
5351 case VC1_CODE_ENTRYPOINT:
5352 if (vc1_decode_entry_point(avctx, v, &gb) < 0) {
5361 if (!seq_initialized || !ep_initialized) {
5362 av_log(avctx, AV_LOG_ERROR, "Incomplete extradata\n");
5365 v->res_sprite = (avctx->codec_tag == MKTAG('W','V','P','2'));
5368 avctx->profile = v->profile;
5369 if (v->profile == PROFILE_ADVANCED)
5370 avctx->level = v->level;
5372 avctx->has_b_frames = !!(avctx->max_b_frames);
5374 s->mb_width = (avctx->coded_width + 15) >> 4;
5375 s->mb_height = (avctx->coded_height + 15) >> 4;
5377 if (v->profile == PROFILE_ADVANCED || v->res_fasttx) {
5378 for (i = 0; i < 64; i++) {
5379 #define transpose(x) ((x >> 3) | ((x & 7) << 3))
5380 v->zz_8x8[0][i] = transpose(wmv1_scantable[0][i]);
5381 v->zz_8x8[1][i] = transpose(wmv1_scantable[1][i]);
5382 v->zz_8x8[2][i] = transpose(wmv1_scantable[2][i]);
5383 v->zz_8x8[3][i] = transpose(wmv1_scantable[3][i]);
5384 v->zzi_8x8[i] = transpose(ff_vc1_adv_interlaced_8x8_zz[i]);
5389 memcpy(v->zz_8x8, wmv1_scantable, 4*64);
5394 if (avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE) {
5395 v->sprite_width = avctx->coded_width;
5396 v->sprite_height = avctx->coded_height;
5398 avctx->coded_width = avctx->width = v->output_width;
5399 avctx->coded_height = avctx->height = v->output_height;
5401 // prevent 16.16 overflows
5402 if (v->sprite_width > 1 << 14 ||
5403 v->sprite_height > 1 << 14 ||
5404 v->output_width > 1 << 14 ||
5405 v->output_height > 1 << 14) return -1;
5410 /** Close a VC1/WMV3 decoder
5411 * @warning Initial try at using MpegEncContext stuff
5413 static av_cold int vc1_decode_end(AVCodecContext *avctx)
5415 VC1Context *v = avctx->priv_data;
5418 if ((avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE)
5419 && v->sprite_output_frame.data[0])
5420 avctx->release_buffer(avctx, &v->sprite_output_frame);
5421 for (i = 0; i < 4; i++)
5422 av_freep(&v->sr_rows[i >> 1][i & 1]);
5423 av_freep(&v->hrd_rate);
5424 av_freep(&v->hrd_buffer);
5425 MPV_common_end(&v->s);
5426 av_freep(&v->mv_type_mb_plane);
5427 av_freep(&v->direct_mb_plane);
5428 av_freep(&v->forward_mb_plane);
5429 av_freep(&v->fieldtx_plane);
5430 av_freep(&v->acpred_plane);
5431 av_freep(&v->over_flags_plane);
5432 av_freep(&v->mb_type_base);
5433 av_freep(&v->blk_mv_type_base);
5434 av_freep(&v->mv_f_base);
5435 av_freep(&v->mv_f_last_base);
5436 av_freep(&v->mv_f_next_base);
5437 av_freep(&v->block);
5438 av_freep(&v->cbp_base);
5439 av_freep(&v->ttblk_base);
5440 av_freep(&v->is_intra_base); // FIXME use v->mb_type[]
5441 av_freep(&v->luma_mv_base);
5442 ff_intrax8_common_end(&v->x8);
5447 /** Decode a VC1/WMV3 frame
5448 * @todo TODO: Handle VC-1 IDUs (Transport level?)
5450 static int vc1_decode_frame(AVCodecContext *avctx, void *data,
5451 int *data_size, AVPacket *avpkt)
5453 const uint8_t *buf = avpkt->data;
5454 int buf_size = avpkt->size, n_slices = 0, i;
5455 VC1Context *v = avctx->priv_data;
5456 MpegEncContext *s = &v->s;
5457 AVFrame *pict = data;
5458 uint8_t *buf2 = NULL;
5459 uint8_t *buf_field2 = NULL;
5460 const uint8_t *buf_start = buf;
5461 int mb_height, n_slices1;
5468 /* no supplementary picture */
5469 if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == VC1_CODE_ENDOFSEQ)) {
5470 /* special case for last picture */
5471 if (s->low_delay == 0 && s->next_picture_ptr) {
5472 *pict = *(AVFrame*)s->next_picture_ptr;
5473 s->next_picture_ptr = NULL;
5475 *data_size = sizeof(AVFrame);
5481 if (s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) {
5482 if (v->profile < PROFILE_ADVANCED)
5483 avctx->pix_fmt = PIX_FMT_VDPAU_WMV3;
5485 avctx->pix_fmt = PIX_FMT_VDPAU_VC1;
5488 //for advanced profile we may need to parse and unescape data
5489 if (avctx->codec_id == CODEC_ID_VC1 || avctx->codec_id == CODEC_ID_VC1IMAGE) {
5491 buf2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5493 if (IS_MARKER(AV_RB32(buf))) { /* frame starts with marker and needs to be parsed */
5494 const uint8_t *start, *end, *next;
5498 for (start = buf, end = buf + buf_size; next < end; start = next) {
5499 next = find_next_marker(start + 4, end);
5500 size = next - start - 4;
5501 if (size <= 0) continue;
5502 switch (AV_RB32(start)) {
5503 case VC1_CODE_FRAME:
5504 if (avctx->hwaccel ||
5505 s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
5507 buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
5509 case VC1_CODE_FIELD: {
5511 slices = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5514 slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5515 if (!slices[n_slices].buf)
5517 buf_size3 = vc1_unescape_buffer(start + 4, size,
5518 slices[n_slices].buf);
5519 init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5521 /* assuming that the field marker is at the exact middle,
5522 hope it's correct */
5523 slices[n_slices].mby_start = s->mb_height >> 1;
5524 n_slices1 = n_slices - 1; // index of the last slice of the first field
5526 // not necessary, ad hoc until I find a way to handle WVC1i
5527 buf_field2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5528 vc1_unescape_buffer(start + 4, size, buf_field2);
5531 case VC1_CODE_ENTRYPOINT: /* it should be before frame data */
5532 buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
5533 init_get_bits(&s->gb, buf2, buf_size2 * 8);
5534 vc1_decode_entry_point(avctx, v, &s->gb);
5536 case VC1_CODE_SLICE: {
5538 slices = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5541 slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5542 if (!slices[n_slices].buf)
5544 buf_size3 = vc1_unescape_buffer(start + 4, size,
5545 slices[n_slices].buf);
5546 init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5548 slices[n_slices].mby_start = get_bits(&slices[n_slices].gb, 9);
5554 } else if (v->interlace && ((buf[0] & 0xC0) == 0xC0)) { /* WVC1 interlaced stores both fields divided by marker */
5555 const uint8_t *divider;
5557 divider = find_next_marker(buf, buf + buf_size);
5558 if ((divider == (buf + buf_size)) || AV_RB32(divider) != VC1_CODE_FIELD) {
5559 av_log(avctx, AV_LOG_ERROR, "Error in WVC1 interlaced frame\n");
5561 } else { // found field marker, unescape second field
5562 buf_field2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5563 vc1_unescape_buffer(divider + 4, buf + buf_size - divider - 4, buf_field2);
5565 buf_size2 = vc1_unescape_buffer(buf, divider - buf, buf2);
5567 buf_size2 = vc1_unescape_buffer(buf, buf_size, buf2);
5569 init_get_bits(&s->gb, buf2, buf_size2*8);
5571 init_get_bits(&s->gb, buf, buf_size*8);
5573 if (v->res_sprite) {
5574 v->new_sprite = !get_bits1(&s->gb);
5575 v->two_sprites = get_bits1(&s->gb);
5576 /* res_sprite means a Windows Media Image stream, CODEC_ID_*IMAGE means
5577 we're using the sprite compositor. These are intentionally kept separate
5578 so you can get the raw sprites by using the wmv3 decoder for WMVP or
5579 the vc1 one for WVP2 */
5580 if (avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE) {
5581 if (v->new_sprite) {
5582 // switch AVCodecContext parameters to those of the sprites
5583 avctx->width = avctx->coded_width = v->sprite_width;
5584 avctx->height = avctx->coded_height = v->sprite_height;
5591 if (s->context_initialized &&
5592 (s->width != avctx->coded_width ||
5593 s->height != avctx->coded_height)) {
5594 vc1_decode_end(avctx);
5597 if (!s->context_initialized) {
5598 if (ff_msmpeg4_decode_init(avctx) < 0 || vc1_decode_init_alloc_tables(v) < 0)
5601 s->low_delay = !avctx->has_b_frames || v->res_sprite;
5603 if (v->profile == PROFILE_ADVANCED) {
5604 s->h_edge_pos = avctx->coded_width;
5605 s->v_edge_pos = avctx->coded_height;
5609 /* We need to set current_picture_ptr before reading the header,
5610 * otherwise we cannot store anything in there. */
5611 if (s->current_picture_ptr == NULL || s->current_picture_ptr->f.data[0]) {
5612 int i = ff_find_unused_picture(s, 0);
5613 s->current_picture_ptr = &s->picture[i];
5616 // do parse frame header
5617 v->pic_header_flag = 0;
5618 if (v->profile < PROFILE_ADVANCED) {
5619 if (vc1_parse_frame_header(v, &s->gb) == -1) {
5623 if (vc1_parse_frame_header_adv(v, &s->gb) == -1) {
5628 if ((avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE)
5629 && s->pict_type != AV_PICTURE_TYPE_I) {
5630 av_log(v->s.avctx, AV_LOG_ERROR, "Sprite decoder: expected I-frame\n");
5634 // process pulldown flags
5635 s->current_picture_ptr->f.repeat_pict = 0;
5636 // Pulldown flags are only valid when 'broadcast' has been set.
5637 // So ticks_per_frame will be 2
5640 s->current_picture_ptr->f.repeat_pict = 1;
5641 } else if (v->rptfrm) {
5643 s->current_picture_ptr->f.repeat_pict = v->rptfrm * 2;
5646 // for skipping the frame
5647 s->current_picture.f.pict_type = s->pict_type;
5648 s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
5650 /* skip B-frames if we don't have reference frames */
5651 if (s->last_picture_ptr == NULL && (s->pict_type == AV_PICTURE_TYPE_B || s->dropable)) {
5654 if ((avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B) ||
5655 (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I) ||
5656 avctx->skip_frame >= AVDISCARD_ALL) {
5660 if (s->next_p_frame_damaged) {
5661 if (s->pict_type == AV_PICTURE_TYPE_B)
5664 s->next_p_frame_damaged = 0;
5667 if (MPV_frame_start(s, avctx) < 0) {
5671 s->me.qpel_put = s->dsp.put_qpel_pixels_tab;
5672 s->me.qpel_avg = s->dsp.avg_qpel_pixels_tab;
5674 if ((CONFIG_VC1_VDPAU_DECODER)
5675 &&s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
5676 ff_vdpau_vc1_decode_picture(s, buf_start, (buf + buf_size) - buf_start);
5677 else if (avctx->hwaccel) {
5678 if (avctx->hwaccel->start_frame(avctx, buf, buf_size) < 0)
5680 if (avctx->hwaccel->decode_slice(avctx, buf_start, (buf + buf_size) - buf_start) < 0)
5682 if (avctx->hwaccel->end_frame(avctx) < 0)
5685 ff_er_frame_start(s);
5687 v->bits = buf_size * 8;
5688 if (v->field_mode) {
5690 s->current_picture.f.linesize[0] <<= 1;
5691 s->current_picture.f.linesize[1] <<= 1;
5692 s->current_picture.f.linesize[2] <<= 1;
5694 s->uvlinesize <<= 1;
5695 tmp[0] = v->mv_f_last[0];
5696 tmp[1] = v->mv_f_last[1];
5697 v->mv_f_last[0] = v->mv_f_next[0];
5698 v->mv_f_last[1] = v->mv_f_next[1];
5699 v->mv_f_next[0] = v->mv_f[0];
5700 v->mv_f_next[1] = v->mv_f[1];
5701 v->mv_f[0] = tmp[0];
5702 v->mv_f[1] = tmp[1];
5704 mb_height = s->mb_height >> v->field_mode;
5705 for (i = 0; i <= n_slices; i++) {
5706 if (i > 0 && slices[i - 1].mby_start >= mb_height) {
5707 v->second_field = 1;
5708 v->blocks_off = s->mb_width * s->mb_height << 1;
5709 v->mb_off = s->mb_stride * s->mb_height >> 1;
5711 v->second_field = 0;
5716 v->pic_header_flag = 0;
5717 if (v->field_mode && i == n_slices1 + 2)
5718 vc1_parse_frame_header_adv(v, &s->gb);
5719 else if (get_bits1(&s->gb)) {
5720 v->pic_header_flag = 1;
5721 vc1_parse_frame_header_adv(v, &s->gb);
5724 s->start_mb_y = (i == 0) ? 0 : FFMAX(0, slices[i-1].mby_start % mb_height);
5725 if (!v->field_mode || v->second_field)
5726 s->end_mb_y = (i == n_slices ) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
5728 s->end_mb_y = (i == n_slices1 + 1) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
5729 vc1_decode_blocks(v);
5731 s->gb = slices[i].gb;
5733 if (v->field_mode) {
5734 av_free(buf_field2);
5735 v->second_field = 0;
5737 if (v->field_mode) {
5738 if (s->pict_type == AV_PICTURE_TYPE_B) {
5739 memcpy(v->mv_f_base, v->mv_f_next_base,
5740 2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5742 s->current_picture.f.linesize[0] >>= 1;
5743 s->current_picture.f.linesize[1] >>= 1;
5744 s->current_picture.f.linesize[2] >>= 1;
5746 s->uvlinesize >>= 1;
5748 //av_log(s->avctx, AV_LOG_INFO, "Consumed %i/%i bits\n", get_bits_count(&s->gb), s->gb.size_in_bits);
5749 // if (get_bits_count(&s->gb) > buf_size * 8)
5756 if (avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE) {
5758 avctx->width = avctx->coded_width = v->output_width;
5759 avctx->height = avctx->coded_height = v->output_height;
5760 if (avctx->skip_frame >= AVDISCARD_NONREF)
5762 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
5763 if (vc1_decode_sprites(v, &s->gb))
5766 *pict = v->sprite_output_frame;
5767 *data_size = sizeof(AVFrame);
5769 if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
5770 *pict = *(AVFrame*)s->current_picture_ptr;
5771 } else if (s->last_picture_ptr != NULL) {
5772 *pict = *(AVFrame*)s->last_picture_ptr;
5774 if (s->last_picture_ptr || s->low_delay) {
5775 *data_size = sizeof(AVFrame);
5776 ff_print_debug_info(s, pict);
5782 for (i = 0; i < n_slices; i++)
5783 av_free(slices[i].buf);
5789 for (i = 0; i < n_slices; i++)
5790 av_free(slices[i].buf);
5792 av_free(buf_field2);
5797 static const AVProfile profiles[] = {
5798 { FF_PROFILE_VC1_SIMPLE, "Simple" },
5799 { FF_PROFILE_VC1_MAIN, "Main" },
5800 { FF_PROFILE_VC1_COMPLEX, "Complex" },
5801 { FF_PROFILE_VC1_ADVANCED, "Advanced" },
5802 { FF_PROFILE_UNKNOWN },
5805 AVCodec ff_vc1_decoder = {
5807 .type = AVMEDIA_TYPE_VIDEO,
5809 .priv_data_size = sizeof(VC1Context),
5810 .init = vc1_decode_init,
5811 .close = vc1_decode_end,
5812 .decode = vc1_decode_frame,
5813 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
5814 .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1"),
5815 .pix_fmts = ff_hwaccel_pixfmt_list_420,
5816 .profiles = NULL_IF_CONFIG_SMALL(profiles)
5819 #if CONFIG_WMV3_DECODER
5820 AVCodec ff_wmv3_decoder = {
5822 .type = AVMEDIA_TYPE_VIDEO,
5823 .id = CODEC_ID_WMV3,
5824 .priv_data_size = sizeof(VC1Context),
5825 .init = vc1_decode_init,
5826 .close = vc1_decode_end,
5827 .decode = vc1_decode_frame,
5828 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
5829 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9"),
5830 .pix_fmts = ff_hwaccel_pixfmt_list_420,
5831 .profiles = NULL_IF_CONFIG_SMALL(profiles)
5835 #if CONFIG_WMV3_VDPAU_DECODER
5836 AVCodec ff_wmv3_vdpau_decoder = {
5837 .name = "wmv3_vdpau",
5838 .type = AVMEDIA_TYPE_VIDEO,
5839 .id = CODEC_ID_WMV3,
5840 .priv_data_size = sizeof(VC1Context),
5841 .init = vc1_decode_init,
5842 .close = vc1_decode_end,
5843 .decode = vc1_decode_frame,
5844 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU,
5845 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 VDPAU"),
5846 .pix_fmts = (const enum PixelFormat[]){PIX_FMT_VDPAU_WMV3, PIX_FMT_NONE},
5847 .profiles = NULL_IF_CONFIG_SMALL(profiles)
5851 #if CONFIG_VC1_VDPAU_DECODER
5852 AVCodec ff_vc1_vdpau_decoder = {
5853 .name = "vc1_vdpau",
5854 .type = AVMEDIA_TYPE_VIDEO,
5856 .priv_data_size = sizeof(VC1Context),
5857 .init = vc1_decode_init,
5858 .close = vc1_decode_end,
5859 .decode = vc1_decode_frame,
5860 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU,
5861 .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1 VDPAU"),
5862 .pix_fmts = (const enum PixelFormat[]){PIX_FMT_VDPAU_VC1, PIX_FMT_NONE},
5863 .profiles = NULL_IF_CONFIG_SMALL(profiles)
5867 #if CONFIG_WMV3IMAGE_DECODER
5868 AVCodec ff_wmv3image_decoder = {
5869 .name = "wmv3image",
5870 .type = AVMEDIA_TYPE_VIDEO,
5871 .id = CODEC_ID_WMV3IMAGE,
5872 .priv_data_size = sizeof(VC1Context),
5873 .init = vc1_decode_init,
5874 .close = vc1_decode_end,
5875 .decode = vc1_decode_frame,
5876 .capabilities = CODEC_CAP_DR1,
5877 .flush = vc1_sprite_flush,
5878 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image"),
5879 .pix_fmts = ff_pixfmt_list_420
5883 #if CONFIG_VC1IMAGE_DECODER
5884 AVCodec ff_vc1image_decoder = {
5886 .type = AVMEDIA_TYPE_VIDEO,
5887 .id = CODEC_ID_VC1IMAGE,
5888 .priv_data_size = sizeof(VC1Context),
5889 .init = vc1_decode_init,
5890 .close = vc1_decode_end,
5891 .decode = vc1_decode_frame,
5892 .capabilities = CODEC_CAP_DR1,
5893 .flush = vc1_sprite_flush,
5894 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image v2"),
5895 .pix_fmts = ff_pixfmt_list_420