2 * VC-1 and WMV3 decoder
3 * Copyright (c) 2011 Mashiat Sarker Shakkhar
4 * Copyright (c) 2006-2007 Konstantin Shishkov
5 * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
7 * This file is part of Libav.
9 * Libav is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * Libav is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with Libav; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * VC-1 and WMV3 decoder
32 #include "mpegvideo.h"
36 #include "vc1acdata.h"
37 #include "msmpeg4data.h"
39 #include "simple_idct.h"
41 #include "vdpau_internal.h"
46 #define MB_INTRA_VLC_BITS 9
51 static const uint16_t vlc_offs[] = {
52 0, 520, 552, 616, 1128, 1160, 1224, 1740, 1772, 1836, 1900, 2436,
53 2986, 3050, 3610, 4154, 4218, 4746, 5326, 5390, 5902, 6554, 7658, 8342,
54 9304, 9988, 10630, 11234, 12174, 13006, 13560, 14232, 14786, 15432, 16350, 17522,
55 20372, 21818, 22330, 22394, 23166, 23678, 23742, 24820, 25332, 25396, 26460, 26980,
56 27048, 27592, 27600, 27608, 27616, 27624, 28224, 28258, 28290, 28802, 28834, 28866,
57 29378, 29412, 29444, 29960, 29994, 30026, 30538, 30572, 30604, 31120, 31154, 31186,
58 31714, 31746, 31778, 32306, 32340, 32372
61 // offset tables for interlaced picture MVDATA decoding
62 static const int offset_table1[9] = { 0, 1, 2, 4, 8, 16, 32, 64, 128 };
63 static const int offset_table2[9] = { 0, 1, 3, 7, 15, 31, 63, 127, 255 };
66 * Init VC-1 specific tables and VC1Context members
67 * @param v The VC1Context to initialize
70 static int vc1_init_common(VC1Context *v)
74 static VLC_TYPE vlc_table[32372][2];
76 v->hrd_rate = v->hrd_buffer = NULL;
80 INIT_VLC_STATIC(&ff_vc1_bfraction_vlc, VC1_BFRACTION_VLC_BITS, 23,
81 ff_vc1_bfraction_bits, 1, 1,
82 ff_vc1_bfraction_codes, 1, 1, 1 << VC1_BFRACTION_VLC_BITS);
83 INIT_VLC_STATIC(&ff_vc1_norm2_vlc, VC1_NORM2_VLC_BITS, 4,
84 ff_vc1_norm2_bits, 1, 1,
85 ff_vc1_norm2_codes, 1, 1, 1 << VC1_NORM2_VLC_BITS);
86 INIT_VLC_STATIC(&ff_vc1_norm6_vlc, VC1_NORM6_VLC_BITS, 64,
87 ff_vc1_norm6_bits, 1, 1,
88 ff_vc1_norm6_codes, 2, 2, 556);
89 INIT_VLC_STATIC(&ff_vc1_imode_vlc, VC1_IMODE_VLC_BITS, 7,
90 ff_vc1_imode_bits, 1, 1,
91 ff_vc1_imode_codes, 1, 1, 1 << VC1_IMODE_VLC_BITS);
92 for (i = 0; i < 3; i++) {
93 ff_vc1_ttmb_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 0]];
94 ff_vc1_ttmb_vlc[i].table_allocated = vlc_offs[i * 3 + 1] - vlc_offs[i * 3 + 0];
95 init_vlc(&ff_vc1_ttmb_vlc[i], VC1_TTMB_VLC_BITS, 16,
96 ff_vc1_ttmb_bits[i], 1, 1,
97 ff_vc1_ttmb_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC);
98 ff_vc1_ttblk_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 1]];
99 ff_vc1_ttblk_vlc[i].table_allocated = vlc_offs[i * 3 + 2] - vlc_offs[i * 3 + 1];
100 init_vlc(&ff_vc1_ttblk_vlc[i], VC1_TTBLK_VLC_BITS, 8,
101 ff_vc1_ttblk_bits[i], 1, 1,
102 ff_vc1_ttblk_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
103 ff_vc1_subblkpat_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 2]];
104 ff_vc1_subblkpat_vlc[i].table_allocated = vlc_offs[i * 3 + 3] - vlc_offs[i * 3 + 2];
105 init_vlc(&ff_vc1_subblkpat_vlc[i], VC1_SUBBLKPAT_VLC_BITS, 15,
106 ff_vc1_subblkpat_bits[i], 1, 1,
107 ff_vc1_subblkpat_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
109 for (i = 0; i < 4; i++) {
110 ff_vc1_4mv_block_pattern_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 9]];
111 ff_vc1_4mv_block_pattern_vlc[i].table_allocated = vlc_offs[i * 3 + 10] - vlc_offs[i * 3 + 9];
112 init_vlc(&ff_vc1_4mv_block_pattern_vlc[i], VC1_4MV_BLOCK_PATTERN_VLC_BITS, 16,
113 ff_vc1_4mv_block_pattern_bits[i], 1, 1,
114 ff_vc1_4mv_block_pattern_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
115 ff_vc1_cbpcy_p_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 10]];
116 ff_vc1_cbpcy_p_vlc[i].table_allocated = vlc_offs[i * 3 + 11] - vlc_offs[i * 3 + 10];
117 init_vlc(&ff_vc1_cbpcy_p_vlc[i], VC1_CBPCY_P_VLC_BITS, 64,
118 ff_vc1_cbpcy_p_bits[i], 1, 1,
119 ff_vc1_cbpcy_p_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC);
120 ff_vc1_mv_diff_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 11]];
121 ff_vc1_mv_diff_vlc[i].table_allocated = vlc_offs[i * 3 + 12] - vlc_offs[i * 3 + 11];
122 init_vlc(&ff_vc1_mv_diff_vlc[i], VC1_MV_DIFF_VLC_BITS, 73,
123 ff_vc1_mv_diff_bits[i], 1, 1,
124 ff_vc1_mv_diff_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC);
126 for (i = 0; i < 8; i++) {
127 ff_vc1_ac_coeff_table[i].table = &vlc_table[vlc_offs[i * 2 + 21]];
128 ff_vc1_ac_coeff_table[i].table_allocated = vlc_offs[i * 2 + 22] - vlc_offs[i * 2 + 21];
129 init_vlc(&ff_vc1_ac_coeff_table[i], AC_VLC_BITS, vc1_ac_sizes[i],
130 &vc1_ac_tables[i][0][1], 8, 4,
131 &vc1_ac_tables[i][0][0], 8, 4, INIT_VLC_USE_NEW_STATIC);
132 /* initialize interlaced MVDATA tables (2-Ref) */
133 ff_vc1_2ref_mvdata_vlc[i].table = &vlc_table[vlc_offs[i * 2 + 22]];
134 ff_vc1_2ref_mvdata_vlc[i].table_allocated = vlc_offs[i * 2 + 23] - vlc_offs[i * 2 + 22];
135 init_vlc(&ff_vc1_2ref_mvdata_vlc[i], VC1_2REF_MVDATA_VLC_BITS, 126,
136 ff_vc1_2ref_mvdata_bits[i], 1, 1,
137 ff_vc1_2ref_mvdata_codes[i], 4, 4, INIT_VLC_USE_NEW_STATIC);
139 for (i = 0; i < 4; i++) {
140 /* initialize 4MV MBMODE VLC tables for interlaced frame P picture */
141 ff_vc1_intfr_4mv_mbmode_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 37]];
142 ff_vc1_intfr_4mv_mbmode_vlc[i].table_allocated = vlc_offs[i * 3 + 38] - vlc_offs[i * 3 + 37];
143 init_vlc(&ff_vc1_intfr_4mv_mbmode_vlc[i], VC1_INTFR_4MV_MBMODE_VLC_BITS, 15,
144 ff_vc1_intfr_4mv_mbmode_bits[i], 1, 1,
145 ff_vc1_intfr_4mv_mbmode_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC);
146 /* initialize NON-4MV MBMODE VLC tables for the same */
147 ff_vc1_intfr_non4mv_mbmode_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 38]];
148 ff_vc1_intfr_non4mv_mbmode_vlc[i].table_allocated = vlc_offs[i * 3 + 39] - vlc_offs[i * 3 + 38];
149 init_vlc(&ff_vc1_intfr_non4mv_mbmode_vlc[i], VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 9,
150 ff_vc1_intfr_non4mv_mbmode_bits[i], 1, 1,
151 ff_vc1_intfr_non4mv_mbmode_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
152 /* initialize interlaced MVDATA tables (1-Ref) */
153 ff_vc1_1ref_mvdata_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 39]];
154 ff_vc1_1ref_mvdata_vlc[i].table_allocated = vlc_offs[i * 3 + 40] - vlc_offs[i * 3 + 39];
155 init_vlc(&ff_vc1_1ref_mvdata_vlc[i], VC1_1REF_MVDATA_VLC_BITS, 72,
156 ff_vc1_1ref_mvdata_bits[i], 1, 1,
157 ff_vc1_1ref_mvdata_codes[i], 4, 4, INIT_VLC_USE_NEW_STATIC);
159 for (i = 0; i < 4; i++) {
160 /* Initialize 2MV Block pattern VLC tables */
161 ff_vc1_2mv_block_pattern_vlc[i].table = &vlc_table[vlc_offs[i + 49]];
162 ff_vc1_2mv_block_pattern_vlc[i].table_allocated = vlc_offs[i + 50] - vlc_offs[i + 49];
163 init_vlc(&ff_vc1_2mv_block_pattern_vlc[i], VC1_2MV_BLOCK_PATTERN_VLC_BITS, 4,
164 ff_vc1_2mv_block_pattern_bits[i], 1, 1,
165 ff_vc1_2mv_block_pattern_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
167 for (i = 0; i < 8; i++) {
168 /* Initialize interlaced CBPCY VLC tables (Table 124 - Table 131) */
169 ff_vc1_icbpcy_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 53]];
170 ff_vc1_icbpcy_vlc[i].table_allocated = vlc_offs[i * 3 + 54] - vlc_offs[i * 3 + 53];
171 init_vlc(&ff_vc1_icbpcy_vlc[i], VC1_ICBPCY_VLC_BITS, 63,
172 ff_vc1_icbpcy_p_bits[i], 1, 1,
173 ff_vc1_icbpcy_p_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC);
174 /* Initialize interlaced field picture MBMODE VLC tables */
175 ff_vc1_if_mmv_mbmode_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 54]];
176 ff_vc1_if_mmv_mbmode_vlc[i].table_allocated = vlc_offs[i * 3 + 55] - vlc_offs[i * 3 + 54];
177 init_vlc(&ff_vc1_if_mmv_mbmode_vlc[i], VC1_IF_MMV_MBMODE_VLC_BITS, 8,
178 ff_vc1_if_mmv_mbmode_bits[i], 1, 1,
179 ff_vc1_if_mmv_mbmode_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
180 ff_vc1_if_1mv_mbmode_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 55]];
181 ff_vc1_if_1mv_mbmode_vlc[i].table_allocated = vlc_offs[i * 3 + 56] - vlc_offs[i * 3 + 55];
182 init_vlc(&ff_vc1_if_1mv_mbmode_vlc[i], VC1_IF_1MV_MBMODE_VLC_BITS, 6,
183 ff_vc1_if_1mv_mbmode_bits[i], 1, 1,
184 ff_vc1_if_1mv_mbmode_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
191 v->mvrange = 0; /* 7.1.1.18, p80 */
196 /***********************************************************************/
198 * @name VC-1 Bitplane decoding
216 /** @} */ //imode defines
219 /** @} */ //Bitplane group
221 static void vc1_put_signed_blocks_clamped(VC1Context *v)
223 MpegEncContext *s = &v->s;
224 int topleft_mb_pos, top_mb_pos;
225 int stride_y, fieldtx;
228 /* The put pixels loop is always one MB row behind the decoding loop,
229 * because we can only put pixels when overlap filtering is done, and
230 * for filtering of the bottom edge of a MB, we need the next MB row
232 * Within the row, the put pixels loop is also one MB col behind the
233 * decoding loop. The reason for this is again, because for filtering
234 * of the right MB edge, we need the next MB present. */
235 if (!s->first_slice_line) {
237 topleft_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x - 1;
238 fieldtx = v->fieldtx_plane[topleft_mb_pos];
239 stride_y = (s->linesize) << fieldtx;
240 v_dist = (16 - fieldtx) >> (fieldtx == 0);
241 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][0],
242 s->dest[0] - 16 * s->linesize - 16,
244 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][1],
245 s->dest[0] - 16 * s->linesize - 8,
247 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][2],
248 s->dest[0] - v_dist * s->linesize - 16,
250 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][3],
251 s->dest[0] - v_dist * s->linesize - 8,
253 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][4],
254 s->dest[1] - 8 * s->uvlinesize - 8,
256 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][5],
257 s->dest[2] - 8 * s->uvlinesize - 8,
260 if (s->mb_x == s->mb_width - 1) {
261 top_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x;
262 fieldtx = v->fieldtx_plane[top_mb_pos];
263 stride_y = s->linesize << fieldtx;
264 v_dist = fieldtx ? 15 : 8;
265 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][0],
266 s->dest[0] - 16 * s->linesize,
268 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][1],
269 s->dest[0] - 16 * s->linesize + 8,
271 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][2],
272 s->dest[0] - v_dist * s->linesize,
274 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][3],
275 s->dest[0] - v_dist * s->linesize + 8,
277 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][4],
278 s->dest[1] - 8 * s->uvlinesize,
280 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][5],
281 s->dest[2] - 8 * s->uvlinesize,
286 #define inc_blk_idx(idx) do { \
288 if (idx >= v->n_allocated_blks) \
292 inc_blk_idx(v->topleft_blk_idx);
293 inc_blk_idx(v->top_blk_idx);
294 inc_blk_idx(v->left_blk_idx);
295 inc_blk_idx(v->cur_blk_idx);
298 static void vc1_loop_filter_iblk(VC1Context *v, int pq)
300 MpegEncContext *s = &v->s;
302 if (!s->first_slice_line) {
303 v->vc1dsp.vc1_v_loop_filter16(s->dest[0], s->linesize, pq);
305 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
306 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
307 for (j = 0; j < 2; j++) {
308 v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1], s->uvlinesize, pq);
310 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
313 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] + 8 * s->linesize, s->linesize, pq);
315 if (s->mb_y == s->end_mb_y - 1) {
317 v->vc1dsp.vc1_h_loop_filter16(s->dest[0], s->linesize, pq);
318 v->vc1dsp.vc1_h_loop_filter8(s->dest[1], s->uvlinesize, pq);
319 v->vc1dsp.vc1_h_loop_filter8(s->dest[2], s->uvlinesize, pq);
321 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] + 8, s->linesize, pq);
325 static void vc1_loop_filter_iblk_delayed(VC1Context *v, int pq)
327 MpegEncContext *s = &v->s;
330 /* The loopfilter runs 1 row and 1 column behind the overlap filter, which
331 * means it runs two rows/cols behind the decoding loop. */
332 if (!s->first_slice_line) {
334 if (s->mb_y >= s->start_mb_y + 2) {
335 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
338 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 16, s->linesize, pq);
339 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 8, s->linesize, pq);
340 for (j = 0; j < 2; j++) {
341 v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
343 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize - 8, s->uvlinesize, pq);
347 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize - 16, s->linesize, pq);
350 if (s->mb_x == s->mb_width - 1) {
351 if (s->mb_y >= s->start_mb_y + 2) {
352 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
355 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize, s->linesize, pq);
356 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize + 8, s->linesize, pq);
357 for (j = 0; j < 2; j++) {
358 v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
360 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize, s->uvlinesize, pq);
364 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize, s->linesize, pq);
367 if (s->mb_y == s->end_mb_y) {
370 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
371 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 8, s->linesize, pq);
373 for (j = 0; j < 2; j++) {
374 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
379 if (s->mb_x == s->mb_width - 1) {
381 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
382 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
384 for (j = 0; j < 2; j++) {
385 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
393 static void vc1_smooth_overlap_filter_iblk(VC1Context *v)
395 MpegEncContext *s = &v->s;
398 if (v->condover == CONDOVER_NONE)
401 mb_pos = s->mb_x + s->mb_y * s->mb_stride;
403 /* Within a MB, the horizontal overlap always runs before the vertical.
404 * To accomplish that, we run the H on left and internal borders of the
405 * currently decoded MB. Then, we wait for the next overlap iteration
406 * to do H overlap on the right edge of this MB, before moving over and
407 * running the V overlap. Therefore, the V overlap makes us trail by one
408 * MB col and the H overlap filter makes us trail by one MB row. This
409 * is reflected in the time at which we run the put_pixels loop. */
410 if (v->condover == CONDOVER_ALL || v->pq >= 9 || v->over_flags_plane[mb_pos]) {
411 if (s->mb_x && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
412 v->over_flags_plane[mb_pos - 1])) {
413 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][1],
414 v->block[v->cur_blk_idx][0]);
415 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][3],
416 v->block[v->cur_blk_idx][2]);
417 if (!(s->flags & CODEC_FLAG_GRAY)) {
418 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][4],
419 v->block[v->cur_blk_idx][4]);
420 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][5],
421 v->block[v->cur_blk_idx][5]);
424 v->vc1dsp.vc1_h_s_overlap(v->block[v->cur_blk_idx][0],
425 v->block[v->cur_blk_idx][1]);
426 v->vc1dsp.vc1_h_s_overlap(v->block[v->cur_blk_idx][2],
427 v->block[v->cur_blk_idx][3]);
429 if (s->mb_x == s->mb_width - 1) {
430 if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
431 v->over_flags_plane[mb_pos - s->mb_stride])) {
432 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][2],
433 v->block[v->cur_blk_idx][0]);
434 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][3],
435 v->block[v->cur_blk_idx][1]);
436 if (!(s->flags & CODEC_FLAG_GRAY)) {
437 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][4],
438 v->block[v->cur_blk_idx][4]);
439 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][5],
440 v->block[v->cur_blk_idx][5]);
443 v->vc1dsp.vc1_v_s_overlap(v->block[v->cur_blk_idx][0],
444 v->block[v->cur_blk_idx][2]);
445 v->vc1dsp.vc1_v_s_overlap(v->block[v->cur_blk_idx][1],
446 v->block[v->cur_blk_idx][3]);
449 if (s->mb_x && (v->condover == CONDOVER_ALL || v->over_flags_plane[mb_pos - 1])) {
450 if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
451 v->over_flags_plane[mb_pos - s->mb_stride - 1])) {
452 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][2],
453 v->block[v->left_blk_idx][0]);
454 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][3],
455 v->block[v->left_blk_idx][1]);
456 if (!(s->flags & CODEC_FLAG_GRAY)) {
457 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][4],
458 v->block[v->left_blk_idx][4]);
459 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][5],
460 v->block[v->left_blk_idx][5]);
463 v->vc1dsp.vc1_v_s_overlap(v->block[v->left_blk_idx][0],
464 v->block[v->left_blk_idx][2]);
465 v->vc1dsp.vc1_v_s_overlap(v->block[v->left_blk_idx][1],
466 v->block[v->left_blk_idx][3]);
470 /** Do motion compensation over 1 macroblock
471 * Mostly adapted hpel_motion and qpel_motion from mpegvideo.c
473 static void vc1_mc_1mv(VC1Context *v, int dir)
475 MpegEncContext *s = &v->s;
476 DSPContext *dsp = &v->s.dsp;
477 uint8_t *srcY, *srcU, *srcV;
478 int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
480 int v_edge_pos = s->v_edge_pos >> v->field_mode;
481 if (!v->field_mode && !v->s.last_picture.f.data[0])
484 mx = s->mv[dir][0][0];
485 my = s->mv[dir][0][1];
487 // store motion vectors for further use in B frames
488 if (s->pict_type == AV_PICTURE_TYPE_P) {
489 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = mx;
490 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = my;
493 uvmx = (mx + ((mx & 3) == 3)) >> 1;
494 uvmy = (my + ((my & 3) == 3)) >> 1;
495 v->luma_mv[s->mb_x][0] = uvmx;
496 v->luma_mv[s->mb_x][1] = uvmy;
499 v->cur_field_type != v->ref_field_type[dir]) {
500 my = my - 2 + 4 * v->cur_field_type;
501 uvmy = uvmy - 2 + 4 * v->cur_field_type;
504 if (v->fastuvmc && (v->fcm != 1)) { // fastuvmc shall be ignored for interlaced frame picture
505 uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
506 uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
508 if (v->field_mode) { // interlaced field picture
510 if ((v->cur_field_type != v->ref_field_type[dir]) && v->cur_field_type) {
511 srcY = s->current_picture.f.data[0];
512 srcU = s->current_picture.f.data[1];
513 srcV = s->current_picture.f.data[2];
515 srcY = s->last_picture.f.data[0];
516 srcU = s->last_picture.f.data[1];
517 srcV = s->last_picture.f.data[2];
520 srcY = s->next_picture.f.data[0];
521 srcU = s->next_picture.f.data[1];
522 srcV = s->next_picture.f.data[2];
526 srcY = s->last_picture.f.data[0];
527 srcU = s->last_picture.f.data[1];
528 srcV = s->last_picture.f.data[2];
530 srcY = s->next_picture.f.data[0];
531 srcU = s->next_picture.f.data[1];
532 srcV = s->next_picture.f.data[2];
536 src_x = s->mb_x * 16 + (mx >> 2);
537 src_y = s->mb_y * 16 + (my >> 2);
538 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
539 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
541 if (v->profile != PROFILE_ADVANCED) {
542 src_x = av_clip( src_x, -16, s->mb_width * 16);
543 src_y = av_clip( src_y, -16, s->mb_height * 16);
544 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
545 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
547 src_x = av_clip( src_x, -17, s->avctx->coded_width);
548 src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
549 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
550 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
553 srcY += src_y * s->linesize + src_x;
554 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
555 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
557 if (v->field_mode && v->ref_field_type[dir]) {
558 srcY += s->current_picture_ptr->f.linesize[0];
559 srcU += s->current_picture_ptr->f.linesize[1];
560 srcV += s->current_picture_ptr->f.linesize[2];
563 /* for grayscale we should not try to read from unknown area */
564 if (s->flags & CODEC_FLAG_GRAY) {
565 srcU = s->edge_emu_buffer + 18 * s->linesize;
566 srcV = s->edge_emu_buffer + 18 * s->linesize;
569 if (v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
570 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel * 3
571 || (unsigned)(src_y - s->mspel) > v_edge_pos - (my&3) - 16 - s->mspel * 3) {
572 uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
574 srcY -= s->mspel * (1 + s->linesize);
575 s->dsp.emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize,
576 17 + s->mspel * 2, 17 + s->mspel * 2,
577 src_x - s->mspel, src_y - s->mspel,
578 s->h_edge_pos, v_edge_pos);
579 srcY = s->edge_emu_buffer;
580 s->dsp.emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8 + 1, 8 + 1,
581 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
582 s->dsp.emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8 + 1, 8 + 1,
583 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
586 /* if we deal with range reduction we need to scale source blocks */
587 if (v->rangeredfrm) {
592 for (j = 0; j < 17 + s->mspel * 2; j++) {
593 for (i = 0; i < 17 + s->mspel * 2; i++)
594 src[i] = ((src[i] - 128) >> 1) + 128;
599 for (j = 0; j < 9; j++) {
600 for (i = 0; i < 9; i++) {
601 src[i] = ((src[i] - 128) >> 1) + 128;
602 src2[i] = ((src2[i] - 128) >> 1) + 128;
604 src += s->uvlinesize;
605 src2 += s->uvlinesize;
608 /* if we deal with intensity compensation we need to scale source blocks */
609 if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
614 for (j = 0; j < 17 + s->mspel * 2; j++) {
615 for (i = 0; i < 17 + s->mspel * 2; i++)
616 src[i] = v->luty[src[i]];
621 for (j = 0; j < 9; j++) {
622 for (i = 0; i < 9; i++) {
623 src[i] = v->lutuv[src[i]];
624 src2[i] = v->lutuv[src2[i]];
626 src += s->uvlinesize;
627 src2 += s->uvlinesize;
630 srcY += s->mspel * (1 + s->linesize);
633 if (v->field_mode && v->cur_field_type) {
634 off = s->current_picture_ptr->f.linesize[0];
635 off_uv = s->current_picture_ptr->f.linesize[1];
641 dxy = ((my & 3) << 2) | (mx & 3);
642 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off , srcY , s->linesize, v->rnd);
643 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8, srcY + 8, s->linesize, v->rnd);
644 srcY += s->linesize * 8;
645 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize , srcY , s->linesize, v->rnd);
646 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
647 } else { // hpel mc - always used for luma
648 dxy = (my & 2) | ((mx & 2) >> 1);
650 dsp->put_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
652 dsp->put_no_rnd_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
655 if (s->flags & CODEC_FLAG_GRAY) return;
656 /* Chroma MC always uses qpel bilinear */
657 uvmx = (uvmx & 3) << 1;
658 uvmy = (uvmy & 3) << 1;
660 dsp->put_h264_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
661 dsp->put_h264_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
663 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
664 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
668 static inline int median4(int a, int b, int c, int d)
671 if (c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2;
672 else return (FFMIN(b, c) + FFMAX(a, d)) / 2;
674 if (c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2;
675 else return (FFMIN(a, c) + FFMAX(b, d)) / 2;
679 /** Do motion compensation for 4-MV macroblock - luminance block
681 static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir)
683 MpegEncContext *s = &v->s;
684 DSPContext *dsp = &v->s.dsp;
686 int dxy, mx, my, src_x, src_y;
688 int fieldmv = (v->fcm == 1) ? v->blk_mv_type[s->block_index[n]] : 0;
689 int v_edge_pos = s->v_edge_pos >> v->field_mode;
691 if (!v->field_mode && !v->s.last_picture.f.data[0])
694 mx = s->mv[dir][n][0];
695 my = s->mv[dir][n][1];
699 if ((v->cur_field_type != v->ref_field_type[dir]) && v->cur_field_type)
700 srcY = s->current_picture.f.data[0];
702 srcY = s->last_picture.f.data[0];
704 srcY = s->last_picture.f.data[0];
706 srcY = s->next_picture.f.data[0];
709 if (v->cur_field_type != v->ref_field_type[dir])
710 my = my - 2 + 4 * v->cur_field_type;
713 if (s->pict_type == AV_PICTURE_TYPE_P && n == 3 && v->field_mode) {
714 int same_count = 0, opp_count = 0, k;
715 int chosen_mv[2][4][2], f;
717 for (k = 0; k < 4; k++) {
718 f = v->mv_f[0][s->block_index[k] + v->blocks_off];
719 chosen_mv[f][f ? opp_count : same_count][0] = s->mv[0][k][0];
720 chosen_mv[f][f ? opp_count : same_count][1] = s->mv[0][k][1];
724 f = opp_count > same_count;
725 switch (f ? opp_count : same_count) {
727 tx = median4(chosen_mv[f][0][0], chosen_mv[f][1][0],
728 chosen_mv[f][2][0], chosen_mv[f][3][0]);
729 ty = median4(chosen_mv[f][0][1], chosen_mv[f][1][1],
730 chosen_mv[f][2][1], chosen_mv[f][3][1]);
733 tx = mid_pred(chosen_mv[f][0][0], chosen_mv[f][1][0], chosen_mv[f][2][0]);
734 ty = mid_pred(chosen_mv[f][0][1], chosen_mv[f][1][1], chosen_mv[f][2][1]);
737 tx = (chosen_mv[f][0][0] + chosen_mv[f][1][0]) / 2;
738 ty = (chosen_mv[f][0][1] + chosen_mv[f][1][1]) / 2;
741 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
742 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
743 for (k = 0; k < 4; k++)
744 v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
747 if (v->fcm == 1) { // not sure if needed for other types of picture
749 int width = s->avctx->coded_width;
750 int height = s->avctx->coded_height >> 1;
751 qx = (s->mb_x * 16) + (mx >> 2);
752 qy = (s->mb_y * 8) + (my >> 3);
757 mx -= 4 * (qx - width);
760 else if (qy > height + 1)
761 my -= 8 * (qy - height - 1);
764 if ((v->fcm == 1) && fieldmv)
765 off = ((n > 1) ? s->linesize : 0) + (n & 1) * 8;
767 off = s->linesize * 4 * (n & 2) + (n & 1) * 8;
768 if (v->field_mode && v->cur_field_type)
769 off += s->current_picture_ptr->f.linesize[0];
771 src_x = s->mb_x * 16 + (n & 1) * 8 + (mx >> 2);
773 src_y = s->mb_y * 16 + (n & 2) * 4 + (my >> 2);
775 src_y = s->mb_y * 16 + ((n > 1) ? 1 : 0) + (my >> 2);
777 if (v->profile != PROFILE_ADVANCED) {
778 src_x = av_clip(src_x, -16, s->mb_width * 16);
779 src_y = av_clip(src_y, -16, s->mb_height * 16);
781 src_x = av_clip(src_x, -17, s->avctx->coded_width);
784 src_y = av_clip(src_y, -17, s->avctx->coded_height + 1);
786 src_y = av_clip(src_y, -18, s->avctx->coded_height);
788 src_y = av_clip(src_y, -18, s->avctx->coded_height + 1);
792 srcY += src_y * s->linesize + src_x;
793 if (v->field_mode && v->ref_field_type[dir])
794 srcY += s->current_picture_ptr->f.linesize[0];
796 if (fieldmv && !(src_y & 1))
798 if (fieldmv && (src_y & 1) && src_y < 4)
800 if (v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
801 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx & 3) - 8 - s->mspel * 2
802 || (unsigned)(src_y - (s->mspel << fieldmv)) > v_edge_pos - (my & 3) - ((8 + s->mspel * 2) << fieldmv)) {
803 srcY -= s->mspel * (1 + (s->linesize << fieldmv));
804 /* check emulate edge stride and offset */
805 s->dsp.emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize,
806 9 + s->mspel * 2, (9 + s->mspel * 2) << fieldmv,
807 src_x - s->mspel, src_y - (s->mspel << fieldmv),
808 s->h_edge_pos, v_edge_pos);
809 srcY = s->edge_emu_buffer;
810 /* if we deal with range reduction we need to scale source blocks */
811 if (v->rangeredfrm) {
816 for (j = 0; j < 9 + s->mspel * 2; j++) {
817 for (i = 0; i < 9 + s->mspel * 2; i++)
818 src[i] = ((src[i] - 128) >> 1) + 128;
819 src += s->linesize << fieldmv;
822 /* if we deal with intensity compensation we need to scale source blocks */
823 if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
828 for (j = 0; j < 9 + s->mspel * 2; j++) {
829 for (i = 0; i < 9 + s->mspel * 2; i++)
830 src[i] = v->luty[src[i]];
831 src += s->linesize << fieldmv;
834 srcY += s->mspel * (1 + (s->linesize << fieldmv));
838 dxy = ((my & 3) << 2) | (mx & 3);
839 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize << fieldmv, v->rnd);
840 } else { // hpel mc - always used for luma
841 dxy = (my & 2) | ((mx & 2) >> 1);
843 dsp->put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
845 dsp->put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
849 static av_always_inline int get_chroma_mv(int *mvx, int *mvy, int *a, int flag, int *tx, int *ty)
852 static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
854 idx = ((a[3] != flag) << 3)
855 | ((a[2] != flag) << 2)
856 | ((a[1] != flag) << 1)
859 *tx = median4(mvx[0], mvx[1], mvx[2], mvx[3]);
860 *ty = median4(mvy[0], mvy[1], mvy[2], mvy[3]);
862 } else if (count[idx] == 1) {
865 *tx = mid_pred(mvx[1], mvx[2], mvx[3]);
866 *ty = mid_pred(mvy[1], mvy[2], mvy[3]);
869 *tx = mid_pred(mvx[0], mvx[2], mvx[3]);
870 *ty = mid_pred(mvy[0], mvy[2], mvy[3]);
873 *tx = mid_pred(mvx[0], mvx[1], mvx[3]);
874 *ty = mid_pred(mvy[0], mvy[1], mvy[3]);
877 *tx = mid_pred(mvx[0], mvx[1], mvx[2]);
878 *ty = mid_pred(mvy[0], mvy[1], mvy[2]);
881 } else if (count[idx] == 2) {
883 for (i = 0; i < 3; i++)
888 for (i = t1 + 1; i < 4; i++)
893 *tx = (mvx[t1] + mvx[t2]) / 2;
894 *ty = (mvy[t1] + mvy[t2]) / 2;
902 /** Do motion compensation for 4-MV macroblock - both chroma blocks
904 static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
906 MpegEncContext *s = &v->s;
907 DSPContext *dsp = &v->s.dsp;
908 uint8_t *srcU, *srcV;
909 int uvmx, uvmy, uvsrc_x, uvsrc_y;
910 int k, tx = 0, ty = 0;
911 int mvx[4], mvy[4], intra[4], mv_f[4];
913 int chroma_ref_type = v->cur_field_type, off = 0;
914 int v_edge_pos = s->v_edge_pos >> v->field_mode;
916 if (!v->field_mode && !v->s.last_picture.f.data[0])
918 if (s->flags & CODEC_FLAG_GRAY)
921 for (k = 0; k < 4; k++) {
922 mvx[k] = s->mv[dir][k][0];
923 mvy[k] = s->mv[dir][k][1];
924 intra[k] = v->mb_type[0][s->block_index[k]];
926 mv_f[k] = v->mv_f[dir][s->block_index[k] + v->blocks_off];
929 /* calculate chroma MV vector from four luma MVs */
930 if (!v->field_mode || (v->field_mode && !v->numref)) {
931 valid_count = get_chroma_mv(mvx, mvy, intra, 0, &tx, &ty);
933 s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
934 s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
935 v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
936 return; //no need to do MC for intra blocks
940 if (mv_f[0] + mv_f[1] + mv_f[2] + mv_f[3] > 2)
942 valid_count = get_chroma_mv(mvx, mvy, mv_f, dominant, &tx, &ty);
944 chroma_ref_type = !v->cur_field_type;
946 s->current_picture.f.motion_val[1][s->block_index[0]][0] = tx;
947 s->current_picture.f.motion_val[1][s->block_index[0]][1] = ty;
948 uvmx = (tx + ((tx & 3) == 3)) >> 1;
949 uvmy = (ty + ((ty & 3) == 3)) >> 1;
951 v->luma_mv[s->mb_x][0] = uvmx;
952 v->luma_mv[s->mb_x][1] = uvmy;
955 uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
956 uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
958 // Field conversion bias
959 if (v->cur_field_type != chroma_ref_type)
960 uvmy += 2 - 4 * chroma_ref_type;
962 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
963 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
965 if (v->profile != PROFILE_ADVANCED) {
966 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
967 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
969 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
970 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
975 if ((v->cur_field_type != chroma_ref_type) && v->cur_field_type) {
976 srcU = s->current_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
977 srcV = s->current_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
979 srcU = s->last_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
980 srcV = s->last_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
983 srcU = s->last_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
984 srcV = s->last_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
987 srcU = s->next_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
988 srcV = s->next_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
992 if (chroma_ref_type) {
993 srcU += s->current_picture_ptr->f.linesize[1];
994 srcV += s->current_picture_ptr->f.linesize[2];
996 off = v->cur_field_type ? s->current_picture_ptr->f.linesize[1] : 0;
999 if (v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
1000 || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
1001 || (unsigned)uvsrc_y > (v_edge_pos >> 1) - 9) {
1002 s->dsp.emulated_edge_mc(s->edge_emu_buffer , srcU, s->uvlinesize,
1003 8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
1004 s->h_edge_pos >> 1, v_edge_pos >> 1);
1005 s->dsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV, s->uvlinesize,
1006 8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
1007 s->h_edge_pos >> 1, v_edge_pos >> 1);
1008 srcU = s->edge_emu_buffer;
1009 srcV = s->edge_emu_buffer + 16;
1011 /* if we deal with range reduction we need to scale source blocks */
1012 if (v->rangeredfrm) {
1014 uint8_t *src, *src2;
1018 for (j = 0; j < 9; j++) {
1019 for (i = 0; i < 9; i++) {
1020 src[i] = ((src[i] - 128) >> 1) + 128;
1021 src2[i] = ((src2[i] - 128) >> 1) + 128;
1023 src += s->uvlinesize;
1024 src2 += s->uvlinesize;
1027 /* if we deal with intensity compensation we need to scale source blocks */
1028 if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1030 uint8_t *src, *src2;
1034 for (j = 0; j < 9; j++) {
1035 for (i = 0; i < 9; i++) {
1036 src[i] = v->lutuv[src[i]];
1037 src2[i] = v->lutuv[src2[i]];
1039 src += s->uvlinesize;
1040 src2 += s->uvlinesize;
1045 /* Chroma MC always uses qpel bilinear */
1046 uvmx = (uvmx & 3) << 1;
1047 uvmy = (uvmy & 3) << 1;
1049 dsp->put_h264_chroma_pixels_tab[0](s->dest[1] + off, srcU, s->uvlinesize, 8, uvmx, uvmy);
1050 dsp->put_h264_chroma_pixels_tab[0](s->dest[2] + off, srcV, s->uvlinesize, 8, uvmx, uvmy);
1052 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off, srcU, s->uvlinesize, 8, uvmx, uvmy);
1053 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off, srcV, s->uvlinesize, 8, uvmx, uvmy);
1057 /** Do motion compensation for 4-MV field chroma macroblock (both U and V)
1059 static void vc1_mc_4mv_chroma4(VC1Context *v)
1061 MpegEncContext *s = &v->s;
1062 DSPContext *dsp = &v->s.dsp;
1063 uint8_t *srcU, *srcV;
1064 int uvsrc_x, uvsrc_y;
1065 int uvmx_field[4], uvmy_field[4];
1067 int fieldmv = v->blk_mv_type[s->block_index[0]];
1068 static const int s_rndtblfield[16] = { 0, 0, 1, 2, 4, 4, 5, 6, 2, 2, 3, 8, 6, 6, 7, 12 };
1069 int v_dist = fieldmv ? 1 : 4; // vertical offset for lower sub-blocks
1070 int v_edge_pos = s->v_edge_pos >> 1;
1072 if (!v->s.last_picture.f.data[0])
1074 if (s->flags & CODEC_FLAG_GRAY)
1077 for (i = 0; i < 4; i++) {
1078 tx = s->mv[0][i][0];
1079 uvmx_field[i] = (tx + ((tx & 3) == 3)) >> 1;
1080 ty = s->mv[0][i][1];
1082 uvmy_field[i] = (ty >> 4) * 8 + s_rndtblfield[ty & 0xF];
1084 uvmy_field[i] = (ty + ((ty & 3) == 3)) >> 1;
1087 for (i = 0; i < 4; i++) {
1088 off = (i & 1) * 4 + ((i & 2) ? v_dist * s->uvlinesize : 0);
1089 uvsrc_x = s->mb_x * 8 + (i & 1) * 4 + (uvmx_field[i] >> 2);
1090 uvsrc_y = s->mb_y * 8 + ((i & 2) ? v_dist : 0) + (uvmy_field[i] >> 2);
1091 // FIXME: implement proper pull-back (see vc1cropmv.c, vc1CROPMV_ChromaPullBack())
1092 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
1093 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
1094 srcU = s->last_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
1095 srcV = s->last_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
1096 uvmx_field[i] = (uvmx_field[i] & 3) << 1;
1097 uvmy_field[i] = (uvmy_field[i] & 3) << 1;
1099 if (fieldmv && !(uvsrc_y & 1))
1101 if (fieldmv && (uvsrc_y & 1) && uvsrc_y < 2)
1103 if ((v->mv_mode == MV_PMODE_INTENSITY_COMP)
1104 || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 5
1105 || (unsigned)uvsrc_y > v_edge_pos - (5 << fieldmv)) {
1106 s->dsp.emulated_edge_mc(s->edge_emu_buffer, srcU, s->uvlinesize,
1107 5, (5 << fieldmv), uvsrc_x, uvsrc_y,
1108 s->h_edge_pos >> 1, v_edge_pos);
1109 s->dsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV, s->uvlinesize,
1110 5, (5 << fieldmv), uvsrc_x, uvsrc_y,
1111 s->h_edge_pos >> 1, v_edge_pos);
1112 srcU = s->edge_emu_buffer;
1113 srcV = s->edge_emu_buffer + 16;
1115 /* if we deal with intensity compensation we need to scale source blocks */
1116 if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1118 uint8_t *src, *src2;
1122 for (j = 0; j < 5; j++) {
1123 for (i = 0; i < 5; i++) {
1124 src[i] = v->lutuv[src[i]];
1125 src2[i] = v->lutuv[src2[i]];
1127 src += s->uvlinesize << 1;
1128 src2 += s->uvlinesize << 1;
1133 dsp->put_h264_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1134 dsp->put_h264_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1136 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1137 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1142 /***********************************************************************/
1144 * @name VC-1 Block-level functions
1145 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
1151 * @brief Get macroblock-level quantizer scale
1153 #define GET_MQUANT() \
1154 if (v->dquantfrm) { \
1156 if (v->dqprofile == DQPROFILE_ALL_MBS) { \
1157 if (v->dqbilevel) { \
1158 mquant = (get_bits1(gb)) ? v->altpq : v->pq; \
1160 mqdiff = get_bits(gb, 3); \
1162 mquant = v->pq + mqdiff; \
1164 mquant = get_bits(gb, 5); \
1167 if (v->dqprofile == DQPROFILE_SINGLE_EDGE) \
1168 edges = 1 << v->dqsbedge; \
1169 else if (v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
1170 edges = (3 << v->dqsbedge) % 15; \
1171 else if (v->dqprofile == DQPROFILE_FOUR_EDGES) \
1173 if ((edges&1) && !s->mb_x) \
1174 mquant = v->altpq; \
1175 if ((edges&2) && s->first_slice_line) \
1176 mquant = v->altpq; \
1177 if ((edges&4) && s->mb_x == (s->mb_width - 1)) \
1178 mquant = v->altpq; \
1179 if ((edges&8) && s->mb_y == (s->mb_height - 1)) \
1180 mquant = v->altpq; \
1184 * @def GET_MVDATA(_dmv_x, _dmv_y)
1185 * @brief Get MV differentials
1186 * @see MVDATA decoding from 8.3.5.2, p(1)20
1187 * @param _dmv_x Horizontal differential for decoded MV
1188 * @param _dmv_y Vertical differential for decoded MV
1190 #define GET_MVDATA(_dmv_x, _dmv_y) \
1191 index = 1 + get_vlc2(gb, ff_vc1_mv_diff_vlc[s->mv_table_index].table, \
1192 VC1_MV_DIFF_VLC_BITS, 2); \
1194 mb_has_coeffs = 1; \
1197 mb_has_coeffs = 0; \
1200 _dmv_x = _dmv_y = 0; \
1201 } else if (index == 35) { \
1202 _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
1203 _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
1204 } else if (index == 36) { \
1209 index1 = index % 6; \
1210 if (!s->quarter_sample && index1 == 5) val = 1; \
1212 if (size_table[index1] - val > 0) \
1213 val = get_bits(gb, size_table[index1] - val); \
1215 sign = 0 - (val&1); \
1216 _dmv_x = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1218 index1 = index / 6; \
1219 if (!s->quarter_sample && index1 == 5) val = 1; \
1221 if (size_table[index1] - val > 0) \
1222 val = get_bits(gb, size_table[index1] - val); \
1224 sign = 0 - (val & 1); \
1225 _dmv_y = (sign ^ ((val >> 1) + offset_table[index1])) - sign; \
1228 static av_always_inline void get_mvdata_interlaced(VC1Context *v, int *dmv_x,
1229 int *dmv_y, int *pred_flag)
1232 int extend_x = 0, extend_y = 0;
1233 GetBitContext *gb = &v->s.gb;
1236 const int* offs_tab;
1239 bits = VC1_2REF_MVDATA_VLC_BITS;
1242 bits = VC1_1REF_MVDATA_VLC_BITS;
1245 switch (v->dmvrange) {
1253 extend_x = extend_y = 1;
1256 index = get_vlc2(gb, v->imv_vlc->table, bits, 3);
1258 *dmv_x = get_bits(gb, v->k_x);
1259 *dmv_y = get_bits(gb, v->k_y);
1261 *pred_flag = *dmv_y & 1;
1262 *dmv_y = (*dmv_y + *pred_flag) >> 1;
1267 offs_tab = offset_table2;
1269 offs_tab = offset_table1;
1270 index1 = (index + 1) % 9;
1272 val = get_bits(gb, index1 + extend_x);
1273 sign = 0 -(val & 1);
1274 *dmv_x = (sign ^ ((val >> 1) + offs_tab[index1])) - sign;
1278 offs_tab = offset_table2;
1280 offs_tab = offset_table1;
1281 index1 = (index + 1) / 9;
1282 if (index1 > v->numref) {
1283 val = get_bits(gb, (index1 + (extend_y << v->numref)) >> v->numref);
1284 sign = 0 - (val & 1);
1285 *dmv_y = (sign ^ ((val >> 1) + offs_tab[index1 >> v->numref])) - sign;
1289 *pred_flag = index1 & 1;
1293 static av_always_inline int scaleforsame_x(VC1Context *v, int n /* MV */, int dir)
1295 int scaledvalue, refdist;
1296 int scalesame1, scalesame2;
1297 int scalezone1_x, zone1offset_x;
1298 int table_index = dir ^ v->second_field;
1300 if (v->s.pict_type != AV_PICTURE_TYPE_B)
1301 refdist = v->refdist;
1303 refdist = dir ? v->brfd : v->frfd;
1306 scalesame1 = vc1_field_mvpred_scales[table_index][1][refdist];
1307 scalesame2 = vc1_field_mvpred_scales[table_index][2][refdist];
1308 scalezone1_x = vc1_field_mvpred_scales[table_index][3][refdist];
1309 zone1offset_x = vc1_field_mvpred_scales[table_index][5][refdist];
1314 if (FFABS(n) < scalezone1_x)
1315 scaledvalue = (n * scalesame1) >> 8;
1318 scaledvalue = ((n * scalesame2) >> 8) - zone1offset_x;
1320 scaledvalue = ((n * scalesame2) >> 8) + zone1offset_x;
1323 return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1326 static av_always_inline int scaleforsame_y(VC1Context *v, int i, int n /* MV */, int dir)
1328 int scaledvalue, refdist;
1329 int scalesame1, scalesame2;
1330 int scalezone1_y, zone1offset_y;
1331 int table_index = dir ^ v->second_field;
1333 if (v->s.pict_type != AV_PICTURE_TYPE_B)
1334 refdist = v->refdist;
1336 refdist = dir ? v->brfd : v->frfd;
1339 scalesame1 = vc1_field_mvpred_scales[table_index][1][refdist];
1340 scalesame2 = vc1_field_mvpred_scales[table_index][2][refdist];
1341 scalezone1_y = vc1_field_mvpred_scales[table_index][4][refdist];
1342 zone1offset_y = vc1_field_mvpred_scales[table_index][6][refdist];
1347 if (FFABS(n) < scalezone1_y)
1348 scaledvalue = (n * scalesame1) >> 8;
1351 scaledvalue = ((n * scalesame2) >> 8) - zone1offset_y;
1353 scaledvalue = ((n * scalesame2) >> 8) + zone1offset_y;
1357 if (v->cur_field_type && !v->ref_field_type[dir])
1358 return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1360 return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1363 static av_always_inline int scaleforopp_x(VC1Context *v, int n /* MV */)
1365 int scalezone1_x, zone1offset_x;
1366 int scaleopp1, scaleopp2, brfd;
1369 brfd = FFMIN(v->brfd, 3);
1370 scalezone1_x = vc1_b_field_mvpred_scales[3][brfd];
1371 zone1offset_x = vc1_b_field_mvpred_scales[5][brfd];
1372 scaleopp1 = vc1_b_field_mvpred_scales[1][brfd];
1373 scaleopp2 = vc1_b_field_mvpred_scales[2][brfd];
1378 if (FFABS(n) < scalezone1_x)
1379 scaledvalue = (n * scaleopp1) >> 8;
1382 scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_x;
1384 scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_x;
1387 return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1390 static av_always_inline int scaleforopp_y(VC1Context *v, int n /* MV */, int dir)
1392 int scalezone1_y, zone1offset_y;
1393 int scaleopp1, scaleopp2, brfd;
1396 brfd = FFMIN(v->brfd, 3);
1397 scalezone1_y = vc1_b_field_mvpred_scales[4][brfd];
1398 zone1offset_y = vc1_b_field_mvpred_scales[6][brfd];
1399 scaleopp1 = vc1_b_field_mvpred_scales[1][brfd];
1400 scaleopp2 = vc1_b_field_mvpred_scales[2][brfd];
1405 if (FFABS(n) < scalezone1_y)
1406 scaledvalue = (n * scaleopp1) >> 8;
1409 scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_y;
1411 scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_y;
1414 if (v->cur_field_type && !v->ref_field_type[dir]) {
1415 return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1417 return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1421 static av_always_inline int scaleforsame(VC1Context *v, int i, int n /* MV */,
1424 int brfd, scalesame;
1425 int hpel = 1 - v->s.quarter_sample;
1428 if (v->s.pict_type != AV_PICTURE_TYPE_B || v->second_field || !dir) {
1430 n = scaleforsame_y(v, i, n, dir) << hpel;
1432 n = scaleforsame_x(v, n, dir) << hpel;
1435 brfd = FFMIN(v->brfd, 3);
1436 scalesame = vc1_b_field_mvpred_scales[0][brfd];
1438 n = (n * scalesame >> 8) << hpel;
1442 static av_always_inline int scaleforopp(VC1Context *v, int n /* MV */,
1445 int refdist, scaleopp;
1446 int hpel = 1 - v->s.quarter_sample;
1449 if (v->s.pict_type == AV_PICTURE_TYPE_B && !v->second_field && dir == 1) {
1451 n = scaleforopp_y(v, n, dir) << hpel;
1453 n = scaleforopp_x(v, n) << hpel;
1456 if (v->s.pict_type != AV_PICTURE_TYPE_B)
1457 refdist = FFMIN(v->refdist, 3);
1459 refdist = dir ? v->brfd : v->frfd;
1460 scaleopp = vc1_field_mvpred_scales[dir ^ v->second_field][0][refdist];
1462 n = (n * scaleopp >> 8) << hpel;
1466 /** Predict and set motion vector
1468 static inline void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
1469 int mv1, int r_x, int r_y, uint8_t* is_intra,
1470 int pred_flag, int dir)
1472 MpegEncContext *s = &v->s;
1473 int xy, wrap, off = 0;
1477 int mixedmv_pic, num_samefield = 0, num_oppfield = 0;
1478 int opposit, a_f, b_f, c_f;
1479 int16_t field_predA[2];
1480 int16_t field_predB[2];
1481 int16_t field_predC[2];
1482 int a_valid, b_valid, c_valid;
1483 int hybridmv_thresh, y_bias = 0;
1485 if (v->mv_mode == MV_PMODE_MIXED_MV ||
1486 ((v->mv_mode == MV_PMODE_INTENSITY_COMP) && (v->mv_mode2 == MV_PMODE_MIXED_MV)))
1490 /* scale MV difference to be quad-pel */
1491 dmv_x <<= 1 - s->quarter_sample;
1492 dmv_y <<= 1 - s->quarter_sample;
1494 wrap = s->b8_stride;
1495 xy = s->block_index[n];
1498 s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy + v->blocks_off][0] = 0;
1499 s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy + v->blocks_off][1] = 0;
1500 s->current_picture.f.motion_val[1][xy + v->blocks_off][0] = 0;
1501 s->current_picture.f.motion_val[1][xy + v->blocks_off][1] = 0;
1502 if (mv1) { /* duplicate motion data for 1-MV block */
1503 s->current_picture.f.motion_val[0][xy + 1 + v->blocks_off][0] = 0;
1504 s->current_picture.f.motion_val[0][xy + 1 + v->blocks_off][1] = 0;
1505 s->current_picture.f.motion_val[0][xy + wrap + v->blocks_off][0] = 0;
1506 s->current_picture.f.motion_val[0][xy + wrap + v->blocks_off][1] = 0;
1507 s->current_picture.f.motion_val[0][xy + wrap + 1 + v->blocks_off][0] = 0;
1508 s->current_picture.f.motion_val[0][xy + wrap + 1 + v->blocks_off][1] = 0;
1509 v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1510 s->current_picture.f.motion_val[1][xy + 1 + v->blocks_off][0] = 0;
1511 s->current_picture.f.motion_val[1][xy + 1 + v->blocks_off][1] = 0;
1512 s->current_picture.f.motion_val[1][xy + wrap][0] = 0;
1513 s->current_picture.f.motion_val[1][xy + wrap + v->blocks_off][1] = 0;
1514 s->current_picture.f.motion_val[1][xy + wrap + 1 + v->blocks_off][0] = 0;
1515 s->current_picture.f.motion_val[1][xy + wrap + 1 + v->blocks_off][1] = 0;
1520 C = s->current_picture.f.motion_val[dir][xy - 1 + v->blocks_off];
1521 A = s->current_picture.f.motion_val[dir][xy - wrap + v->blocks_off];
1523 if (v->field_mode && mixedmv_pic)
1524 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
1526 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2;
1528 //in 4-MV mode different blocks have different B predictor position
1531 off = (s->mb_x > 0) ? -1 : 1;
1534 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 1;
1543 B = s->current_picture.f.motion_val[dir][xy - wrap + off + v->blocks_off];
1545 a_valid = !s->first_slice_line || (n == 2 || n == 3);
1546 b_valid = a_valid && (s->mb_width > 1);
1547 c_valid = s->mb_x || (n == 1 || n == 3);
1548 if (v->field_mode) {
1549 a_valid = a_valid && !is_intra[xy - wrap];
1550 b_valid = b_valid && !is_intra[xy - wrap + off];
1551 c_valid = c_valid && !is_intra[xy - 1];
1555 a_f = v->mv_f[dir][xy - wrap + v->blocks_off];
1556 num_oppfield += a_f;
1557 num_samefield += 1 - a_f;
1558 field_predA[0] = A[0];
1559 field_predA[1] = A[1];
1561 field_predA[0] = field_predA[1] = 0;
1565 c_f = v->mv_f[dir][xy - 1 + v->blocks_off];
1566 num_oppfield += c_f;
1567 num_samefield += 1 - c_f;
1568 field_predC[0] = C[0];
1569 field_predC[1] = C[1];
1571 field_predC[0] = field_predC[1] = 0;
1575 b_f = v->mv_f[dir][xy - wrap + off + v->blocks_off];
1576 num_oppfield += b_f;
1577 num_samefield += 1 - b_f;
1578 field_predB[0] = B[0];
1579 field_predB[1] = B[1];
1581 field_predB[0] = field_predB[1] = 0;
1585 if (v->field_mode) {
1586 if (num_samefield <= num_oppfield)
1587 opposit = 1 - pred_flag;
1589 opposit = pred_flag;
1593 if (a_valid && !a_f) {
1594 field_predA[0] = scaleforopp(v, field_predA[0], 0, dir);
1595 field_predA[1] = scaleforopp(v, field_predA[1], 1, dir);
1597 if (b_valid && !b_f) {
1598 field_predB[0] = scaleforopp(v, field_predB[0], 0, dir);
1599 field_predB[1] = scaleforopp(v, field_predB[1], 1, dir);
1601 if (c_valid && !c_f) {
1602 field_predC[0] = scaleforopp(v, field_predC[0], 0, dir);
1603 field_predC[1] = scaleforopp(v, field_predC[1], 1, dir);
1605 v->mv_f[dir][xy + v->blocks_off] = 1;
1606 v->ref_field_type[dir] = !v->cur_field_type;
1608 if (a_valid && a_f) {
1609 field_predA[0] = scaleforsame(v, n, field_predA[0], 0, dir);
1610 field_predA[1] = scaleforsame(v, n, field_predA[1], 1, dir);
1612 if (b_valid && b_f) {
1613 field_predB[0] = scaleforsame(v, n, field_predB[0], 0, dir);
1614 field_predB[1] = scaleforsame(v, n, field_predB[1], 1, dir);
1616 if (c_valid && c_f) {
1617 field_predC[0] = scaleforsame(v, n, field_predC[0], 0, dir);
1618 field_predC[1] = scaleforsame(v, n, field_predC[1], 1, dir);
1620 v->mv_f[dir][xy + v->blocks_off] = 0;
1621 v->ref_field_type[dir] = v->cur_field_type;
1625 px = field_predA[0];
1626 py = field_predA[1];
1627 } else if (c_valid) {
1628 px = field_predC[0];
1629 py = field_predC[1];
1630 } else if (b_valid) {
1631 px = field_predB[0];
1632 py = field_predB[1];
1638 if (num_samefield + num_oppfield > 1) {
1639 px = mid_pred(field_predA[0], field_predB[0], field_predC[0]);
1640 py = mid_pred(field_predA[1], field_predB[1], field_predC[1]);
1643 /* Pullback MV as specified in 8.3.5.3.4 */
1644 if (!v->field_mode) {
1646 qx = (s->mb_x << 6) + ((n == 1 || n == 3) ? 32 : 0);
1647 qy = (s->mb_y << 6) + ((n == 2 || n == 3) ? 32 : 0);
1648 X = (s->mb_width << 6) - 4;
1649 Y = (s->mb_height << 6) - 4;
1651 if (qx + px < -60) px = -60 - qx;
1652 if (qy + py < -60) py = -60 - qy;
1654 if (qx + px < -28) px = -28 - qx;
1655 if (qy + py < -28) py = -28 - qy;
1657 if (qx + px > X) px = X - qx;
1658 if (qy + py > Y) py = Y - qy;
1661 if (!v->field_mode || s->pict_type != AV_PICTURE_TYPE_B) {
1662 /* Calculate hybrid prediction as specified in 8.3.5.3.5 (also 10.3.5.4.3.5) */
1663 hybridmv_thresh = 32;
1664 if (a_valid && c_valid) {
1665 if (is_intra[xy - wrap])
1666 sum = FFABS(px) + FFABS(py);
1668 sum = FFABS(px - field_predA[0]) + FFABS(py - field_predA[1]);
1669 if (sum > hybridmv_thresh) {
1670 if (get_bits1(&s->gb)) { // read HYBRIDPRED bit
1671 px = field_predA[0];
1672 py = field_predA[1];
1674 px = field_predC[0];
1675 py = field_predC[1];
1678 if (is_intra[xy - 1])
1679 sum = FFABS(px) + FFABS(py);
1681 sum = FFABS(px - field_predC[0]) + FFABS(py - field_predC[1]);
1682 if (sum > hybridmv_thresh) {
1683 if (get_bits1(&s->gb)) {
1684 px = field_predA[0];
1685 py = field_predA[1];
1687 px = field_predC[0];
1688 py = field_predC[1];
1695 if (v->field_mode && !s->quarter_sample) {
1699 if (v->field_mode && v->numref)
1701 if (v->field_mode && v->cur_field_type && v->ref_field_type[dir] == 0)
1703 /* store MV using signed modulus of MV range defined in 4.11 */
1704 s->mv[dir][n][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1705 s->mv[dir][n][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1] = ((py + dmv_y + r_y - y_bias) & ((r_y << 1) - 1)) - r_y + y_bias;
1706 if (mv1) { /* duplicate motion data for 1-MV block */
1707 s->current_picture.f.motion_val[dir][xy + 1 + v->blocks_off][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0];
1708 s->current_picture.f.motion_val[dir][xy + 1 + v->blocks_off][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1];
1709 s->current_picture.f.motion_val[dir][xy + wrap + v->blocks_off][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0];
1710 s->current_picture.f.motion_val[dir][xy + wrap + v->blocks_off][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1];
1711 s->current_picture.f.motion_val[dir][xy + wrap + 1 + v->blocks_off][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0];
1712 s->current_picture.f.motion_val[dir][xy + wrap + 1 + v->blocks_off][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1];
1713 v->mv_f[dir][xy + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1714 v->mv_f[dir][xy + wrap + v->blocks_off] = v->mv_f[dir][xy + wrap + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1718 /** Predict and set motion vector for interlaced frame picture MBs
1720 static inline void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
1721 int mvn, int r_x, int r_y, uint8_t* is_intra)
1723 MpegEncContext *s = &v->s;
1724 int xy, wrap, off = 0;
1725 int A[2], B[2], C[2];
1727 int a_valid = 0, b_valid = 0, c_valid = 0;
1728 int field_a, field_b, field_c; // 0: same, 1: opposit
1729 int total_valid, num_samefield, num_oppfield;
1730 int pos_c, pos_b, n_adj;
1732 wrap = s->b8_stride;
1733 xy = s->block_index[n];
1736 s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy][0] = 0;
1737 s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy][1] = 0;
1738 s->current_picture.f.motion_val[1][xy][0] = 0;
1739 s->current_picture.f.motion_val[1][xy][1] = 0;
1740 if (mvn == 1) { /* duplicate motion data for 1-MV block */
1741 s->current_picture.f.motion_val[0][xy + 1][0] = 0;
1742 s->current_picture.f.motion_val[0][xy + 1][1] = 0;
1743 s->current_picture.f.motion_val[0][xy + wrap][0] = 0;
1744 s->current_picture.f.motion_val[0][xy + wrap][1] = 0;
1745 s->current_picture.f.motion_val[0][xy + wrap + 1][0] = 0;
1746 s->current_picture.f.motion_val[0][xy + wrap + 1][1] = 0;
1747 v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1748 s->current_picture.f.motion_val[1][xy + 1][0] = 0;
1749 s->current_picture.f.motion_val[1][xy + 1][1] = 0;
1750 s->current_picture.f.motion_val[1][xy + wrap][0] = 0;
1751 s->current_picture.f.motion_val[1][xy + wrap][1] = 0;
1752 s->current_picture.f.motion_val[1][xy + wrap + 1][0] = 0;
1753 s->current_picture.f.motion_val[1][xy + wrap + 1][1] = 0;
1758 off = ((n == 0) || (n == 1)) ? 1 : -1;
1760 if (s->mb_x || (n == 1) || (n == 3)) {
1761 if ((v->blk_mv_type[xy]) // current block (MB) has a field MV
1762 || (!v->blk_mv_type[xy] && !v->blk_mv_type[xy - 1])) { // or both have frame MV
1763 A[0] = s->current_picture.f.motion_val[0][xy - 1][0];
1764 A[1] = s->current_picture.f.motion_val[0][xy - 1][1];
1766 } else { // current block has frame mv and cand. has field MV (so average)
1767 A[0] = (s->current_picture.f.motion_val[0][xy - 1][0]
1768 + s->current_picture.f.motion_val[0][xy - 1 + off * wrap][0] + 1) >> 1;
1769 A[1] = (s->current_picture.f.motion_val[0][xy - 1][1]
1770 + s->current_picture.f.motion_val[0][xy - 1 + off * wrap][1] + 1) >> 1;
1773 if (!(n & 1) && v->is_intra[s->mb_x - 1]) {
1779 /* Predict B and C */
1780 B[0] = B[1] = C[0] = C[1] = 0;
1781 if (n == 0 || n == 1 || v->blk_mv_type[xy]) {
1782 if (!s->first_slice_line) {
1783 if (!v->is_intra[s->mb_x - s->mb_stride]) {
1786 pos_b = s->block_index[n_adj] - 2 * wrap;
1787 if (v->blk_mv_type[pos_b] && v->blk_mv_type[xy]) {
1788 n_adj = (n & 2) | (n & 1);
1790 B[0] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap][0];
1791 B[1] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap][1];
1792 if (v->blk_mv_type[pos_b] && !v->blk_mv_type[xy]) {
1793 B[0] = (B[0] + s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap][0] + 1) >> 1;
1794 B[1] = (B[1] + s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap][1] + 1) >> 1;
1797 if (s->mb_width > 1) {
1798 if (!v->is_intra[s->mb_x - s->mb_stride + 1]) {
1801 pos_c = s->block_index[2] - 2 * wrap + 2;
1802 if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1805 C[0] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap + 2][0];
1806 C[1] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap + 2][1];
1807 if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1808 C[0] = (1 + C[0] + (s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap + 2][0])) >> 1;
1809 C[1] = (1 + C[1] + (s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap + 2][1])) >> 1;
1811 if (s->mb_x == s->mb_width - 1) {
1812 if (!v->is_intra[s->mb_x - s->mb_stride - 1]) {
1815 pos_c = s->block_index[3] - 2 * wrap - 2;
1816 if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1819 C[0] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap - 2][0];
1820 C[1] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap - 2][1];
1821 if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1822 C[0] = (1 + C[0] + s->current_picture.f.motion_val[0][s->block_index[1] - 2 * wrap - 2][0]) >> 1;
1823 C[1] = (1 + C[1] + s->current_picture.f.motion_val[0][s->block_index[1] - 2 * wrap - 2][1]) >> 1;
1832 pos_b = s->block_index[1];
1834 B[0] = s->current_picture.f.motion_val[0][pos_b][0];
1835 B[1] = s->current_picture.f.motion_val[0][pos_b][1];
1836 pos_c = s->block_index[0];
1838 C[0] = s->current_picture.f.motion_val[0][pos_c][0];
1839 C[1] = s->current_picture.f.motion_val[0][pos_c][1];
1842 total_valid = a_valid + b_valid + c_valid;
1843 // check if predictor A is out of bounds
1844 if (!s->mb_x && !(n == 1 || n == 3)) {
1847 // check if predictor B is out of bounds
1848 if ((s->first_slice_line && v->blk_mv_type[xy]) || (s->first_slice_line && !(n & 2))) {
1849 B[0] = B[1] = C[0] = C[1] = 0;
1851 if (!v->blk_mv_type[xy]) {
1852 if (s->mb_width == 1) {
1856 if (total_valid >= 2) {
1857 px = mid_pred(A[0], B[0], C[0]);
1858 py = mid_pred(A[1], B[1], C[1]);
1859 } else if (total_valid) {
1860 if (a_valid) { px = A[0]; py = A[1]; }
1861 if (b_valid) { px = B[0]; py = B[1]; }
1862 if (c_valid) { px = C[0]; py = C[1]; }
1868 field_a = (A[1] & 4) ? 1 : 0;
1872 field_b = (B[1] & 4) ? 1 : 0;
1876 field_c = (C[1] & 4) ? 1 : 0;
1880 num_oppfield = field_a + field_b + field_c;
1881 num_samefield = total_valid - num_oppfield;
1882 if (total_valid == 3) {
1883 if ((num_samefield == 3) || (num_oppfield == 3)) {
1884 px = mid_pred(A[0], B[0], C[0]);
1885 py = mid_pred(A[1], B[1], C[1]);
1886 } else if (num_samefield >= num_oppfield) {
1887 /* take one MV from same field set depending on priority
1888 the check for B may not be necessary */
1889 px = !field_a ? A[0] : B[0];
1890 py = !field_a ? A[1] : B[1];
1892 px = field_a ? A[0] : B[0];
1893 py = field_a ? A[1] : B[1];
1895 } else if (total_valid == 2) {
1896 if (num_samefield >= num_oppfield) {
1897 if (!field_a && a_valid) {
1900 } else if (!field_b && b_valid) {
1903 } else if (c_valid) {
1908 if (field_a && a_valid) {
1911 } else if (field_b && b_valid) {
1914 } else if (c_valid) {
1919 } else if (total_valid == 1) {
1920 px = (a_valid) ? A[0] : ((b_valid) ? B[0] : C[0]);
1921 py = (a_valid) ? A[1] : ((b_valid) ? B[1] : C[1]);
1926 /* store MV using signed modulus of MV range defined in 4.11 */
1927 s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1928 s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
1929 if (mvn == 1) { /* duplicate motion data for 1-MV block */
1930 s->current_picture.f.motion_val[0][xy + 1 ][0] = s->current_picture.f.motion_val[0][xy][0];
1931 s->current_picture.f.motion_val[0][xy + 1 ][1] = s->current_picture.f.motion_val[0][xy][1];
1932 s->current_picture.f.motion_val[0][xy + wrap ][0] = s->current_picture.f.motion_val[0][xy][0];
1933 s->current_picture.f.motion_val[0][xy + wrap ][1] = s->current_picture.f.motion_val[0][xy][1];
1934 s->current_picture.f.motion_val[0][xy + wrap + 1][0] = s->current_picture.f.motion_val[0][xy][0];
1935 s->current_picture.f.motion_val[0][xy + wrap + 1][1] = s->current_picture.f.motion_val[0][xy][1];
1936 } else if (mvn == 2) { /* duplicate motion data for 2-Field MV block */
1937 s->current_picture.f.motion_val[0][xy + 1][0] = s->current_picture.f.motion_val[0][xy][0];
1938 s->current_picture.f.motion_val[0][xy + 1][1] = s->current_picture.f.motion_val[0][xy][1];
1939 s->mv[0][n + 1][0] = s->mv[0][n][0];
1940 s->mv[0][n + 1][1] = s->mv[0][n][1];
1944 /** Motion compensation for direct or interpolated blocks in B-frames
1946 static void vc1_interp_mc(VC1Context *v)
1948 MpegEncContext *s = &v->s;
1949 DSPContext *dsp = &v->s.dsp;
1950 uint8_t *srcY, *srcU, *srcV;
1951 int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
1953 int v_edge_pos = s->v_edge_pos >> v->field_mode;
1955 if (!v->field_mode && !v->s.next_picture.f.data[0])
1958 mx = s->mv[1][0][0];
1959 my = s->mv[1][0][1];
1960 uvmx = (mx + ((mx & 3) == 3)) >> 1;
1961 uvmy = (my + ((my & 3) == 3)) >> 1;
1962 if (v->field_mode) {
1963 if (v->cur_field_type != v->ref_field_type[1])
1964 my = my - 2 + 4 * v->cur_field_type;
1965 uvmy = uvmy - 2 + 4 * v->cur_field_type;
1968 uvmx = uvmx + ((uvmx < 0) ? -(uvmx & 1) : (uvmx & 1));
1969 uvmy = uvmy + ((uvmy < 0) ? -(uvmy & 1) : (uvmy & 1));
1971 srcY = s->next_picture.f.data[0];
1972 srcU = s->next_picture.f.data[1];
1973 srcV = s->next_picture.f.data[2];
1975 src_x = s->mb_x * 16 + (mx >> 2);
1976 src_y = s->mb_y * 16 + (my >> 2);
1977 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
1978 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
1980 if (v->profile != PROFILE_ADVANCED) {
1981 src_x = av_clip( src_x, -16, s->mb_width * 16);
1982 src_y = av_clip( src_y, -16, s->mb_height * 16);
1983 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
1984 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
1986 src_x = av_clip( src_x, -17, s->avctx->coded_width);
1987 src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
1988 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
1989 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
1992 srcY += src_y * s->linesize + src_x;
1993 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
1994 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
1996 if (v->field_mode && v->ref_field_type[1]) {
1997 srcY += s->current_picture_ptr->f.linesize[0];
1998 srcU += s->current_picture_ptr->f.linesize[1];
1999 srcV += s->current_picture_ptr->f.linesize[2];
2002 /* for grayscale we should not try to read from unknown area */
2003 if (s->flags & CODEC_FLAG_GRAY) {
2004 srcU = s->edge_emu_buffer + 18 * s->linesize;
2005 srcV = s->edge_emu_buffer + 18 * s->linesize;
2009 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx & 3) - 16 - s->mspel * 3
2010 || (unsigned)(src_y - s->mspel) > v_edge_pos - (my & 3) - 16 - s->mspel * 3) {
2011 uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
2013 srcY -= s->mspel * (1 + s->linesize);
2014 s->dsp.emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize,
2015 17 + s->mspel * 2, 17 + s->mspel * 2,
2016 src_x - s->mspel, src_y - s->mspel,
2017 s->h_edge_pos, v_edge_pos);
2018 srcY = s->edge_emu_buffer;
2019 s->dsp.emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8 + 1, 8 + 1,
2020 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
2021 s->dsp.emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8 + 1, 8 + 1,
2022 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
2025 /* if we deal with range reduction we need to scale source blocks */
2026 if (v->rangeredfrm) {
2028 uint8_t *src, *src2;
2031 for (j = 0; j < 17 + s->mspel * 2; j++) {
2032 for (i = 0; i < 17 + s->mspel * 2; i++)
2033 src[i] = ((src[i] - 128) >> 1) + 128;
2038 for (j = 0; j < 9; j++) {
2039 for (i = 0; i < 9; i++) {
2040 src[i] = ((src[i] - 128) >> 1) + 128;
2041 src2[i] = ((src2[i] - 128) >> 1) + 128;
2043 src += s->uvlinesize;
2044 src2 += s->uvlinesize;
2047 srcY += s->mspel * (1 + s->linesize);
2050 if (v->field_mode && v->cur_field_type) {
2051 off = s->current_picture_ptr->f.linesize[0];
2052 off_uv = s->current_picture_ptr->f.linesize[1];
2059 dxy = ((my & 3) << 2) | (mx & 3);
2060 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off , srcY , s->linesize, v->rnd);
2061 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8, srcY + 8, s->linesize, v->rnd);
2062 srcY += s->linesize * 8;
2063 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize , srcY , s->linesize, v->rnd);
2064 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
2066 dxy = (my & 2) | ((mx & 2) >> 1);
2069 dsp->avg_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
2071 dsp->avg_no_rnd_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
2074 if (s->flags & CODEC_FLAG_GRAY) return;
2075 /* Chroma MC always uses qpel blilinear */
2076 uvmx = (uvmx & 3) << 1;
2077 uvmy = (uvmy & 3) << 1;
2079 dsp->avg_h264_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
2080 dsp->avg_h264_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
2082 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
2083 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
2087 static av_always_inline int scale_mv(int value, int bfrac, int inv, int qs)
2091 #if B_FRACTION_DEN==256
2095 return 2 * ((value * n + 255) >> 9);
2096 return (value * n + 128) >> 8;
2099 n -= B_FRACTION_DEN;
2101 return 2 * ((value * n + B_FRACTION_DEN - 1) / (2 * B_FRACTION_DEN));
2102 return (value * n + B_FRACTION_DEN/2) / B_FRACTION_DEN;
2106 static av_always_inline int scale_mv_intfi(int value, int bfrac, int inv,
2107 int qs, int qs_last)
2115 return (value * n + 255) >> 9;
2117 return (value * n + 128) >> 8;
2120 /** Reconstruct motion vector for B-frame and do motion compensation
2122 static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2],
2123 int direct, int mode)
2126 v->mv_mode2 = v->mv_mode;
2127 v->mv_mode = MV_PMODE_INTENSITY_COMP;
2133 v->mv_mode = v->mv_mode2;
2136 if (mode == BMV_TYPE_INTERPOLATED) {
2140 v->mv_mode = v->mv_mode2;
2144 if (v->use_ic && (mode == BMV_TYPE_BACKWARD))
2145 v->mv_mode = v->mv_mode2;
2146 vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD));
2148 v->mv_mode = v->mv_mode2;
2151 static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2],
2152 int direct, int mvtype)
2154 MpegEncContext *s = &v->s;
2155 int xy, wrap, off = 0;
2160 const uint8_t *is_intra = v->mb_type[0];
2164 /* scale MV difference to be quad-pel */
2165 dmv_x[0] <<= 1 - s->quarter_sample;
2166 dmv_y[0] <<= 1 - s->quarter_sample;
2167 dmv_x[1] <<= 1 - s->quarter_sample;
2168 dmv_y[1] <<= 1 - s->quarter_sample;
2170 wrap = s->b8_stride;
2171 xy = s->block_index[0];
2174 s->current_picture.f.motion_val[0][xy + v->blocks_off][0] =
2175 s->current_picture.f.motion_val[0][xy + v->blocks_off][1] =
2176 s->current_picture.f.motion_val[1][xy + v->blocks_off][0] =
2177 s->current_picture.f.motion_val[1][xy + v->blocks_off][1] = 0;
2180 if (!v->field_mode) {
2181 s->mv[0][0][0] = scale_mv(s->next_picture.f.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
2182 s->mv[0][0][1] = scale_mv(s->next_picture.f.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
2183 s->mv[1][0][0] = scale_mv(s->next_picture.f.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
2184 s->mv[1][0][1] = scale_mv(s->next_picture.f.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
2186 /* Pullback predicted motion vectors as specified in 8.4.5.4 */
2187 s->mv[0][0][0] = av_clip(s->mv[0][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2188 s->mv[0][0][1] = av_clip(s->mv[0][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2189 s->mv[1][0][0] = av_clip(s->mv[1][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2190 s->mv[1][0][1] = av_clip(s->mv[1][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2193 s->current_picture.f.motion_val[0][xy + v->blocks_off][0] = s->mv[0][0][0];
2194 s->current_picture.f.motion_val[0][xy + v->blocks_off][1] = s->mv[0][0][1];
2195 s->current_picture.f.motion_val[1][xy + v->blocks_off][0] = s->mv[1][0][0];
2196 s->current_picture.f.motion_val[1][xy + v->blocks_off][1] = s->mv[1][0][1];
2200 if ((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2201 C = s->current_picture.f.motion_val[0][xy - 2];
2202 A = s->current_picture.f.motion_val[0][xy - wrap * 2];
2203 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2204 B = s->current_picture.f.motion_val[0][xy - wrap * 2 + off];
2206 if (!s->mb_x) C[0] = C[1] = 0;
2207 if (!s->first_slice_line) { // predictor A is not out of bounds
2208 if (s->mb_width == 1) {
2212 px = mid_pred(A[0], B[0], C[0]);
2213 py = mid_pred(A[1], B[1], C[1]);
2215 } else if (s->mb_x) { // predictor C is not out of bounds
2221 /* Pullback MV as specified in 8.3.5.3.4 */
2224 if (v->profile < PROFILE_ADVANCED) {
2225 qx = (s->mb_x << 5);
2226 qy = (s->mb_y << 5);
2227 X = (s->mb_width << 5) - 4;
2228 Y = (s->mb_height << 5) - 4;
2229 if (qx + px < -28) px = -28 - qx;
2230 if (qy + py < -28) py = -28 - qy;
2231 if (qx + px > X) px = X - qx;
2232 if (qy + py > Y) py = Y - qy;
2234 qx = (s->mb_x << 6);
2235 qy = (s->mb_y << 6);
2236 X = (s->mb_width << 6) - 4;
2237 Y = (s->mb_height << 6) - 4;
2238 if (qx + px < -60) px = -60 - qx;
2239 if (qy + py < -60) py = -60 - qy;
2240 if (qx + px > X) px = X - qx;
2241 if (qy + py > Y) py = Y - qy;
2244 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2245 if (0 && !s->first_slice_line && s->mb_x) {
2246 if (is_intra[xy - wrap])
2247 sum = FFABS(px) + FFABS(py);
2249 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2251 if (get_bits1(&s->gb)) {
2259 if (is_intra[xy - 2])
2260 sum = FFABS(px) + FFABS(py);
2262 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2264 if (get_bits1(&s->gb)) {
2274 /* store MV using signed modulus of MV range defined in 4.11 */
2275 s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x;
2276 s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
2278 if ((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2279 C = s->current_picture.f.motion_val[1][xy - 2];
2280 A = s->current_picture.f.motion_val[1][xy - wrap * 2];
2281 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2282 B = s->current_picture.f.motion_val[1][xy - wrap * 2 + off];
2286 if (!s->first_slice_line) { // predictor A is not out of bounds
2287 if (s->mb_width == 1) {
2291 px = mid_pred(A[0], B[0], C[0]);
2292 py = mid_pred(A[1], B[1], C[1]);
2294 } else if (s->mb_x) { // predictor C is not out of bounds
2300 /* Pullback MV as specified in 8.3.5.3.4 */
2303 if (v->profile < PROFILE_ADVANCED) {
2304 qx = (s->mb_x << 5);
2305 qy = (s->mb_y << 5);
2306 X = (s->mb_width << 5) - 4;
2307 Y = (s->mb_height << 5) - 4;
2308 if (qx + px < -28) px = -28 - qx;
2309 if (qy + py < -28) py = -28 - qy;
2310 if (qx + px > X) px = X - qx;
2311 if (qy + py > Y) py = Y - qy;
2313 qx = (s->mb_x << 6);
2314 qy = (s->mb_y << 6);
2315 X = (s->mb_width << 6) - 4;
2316 Y = (s->mb_height << 6) - 4;
2317 if (qx + px < -60) px = -60 - qx;
2318 if (qy + py < -60) py = -60 - qy;
2319 if (qx + px > X) px = X - qx;
2320 if (qy + py > Y) py = Y - qy;
2323 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2324 if (0 && !s->first_slice_line && s->mb_x) {
2325 if (is_intra[xy - wrap])
2326 sum = FFABS(px) + FFABS(py);
2328 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2330 if (get_bits1(&s->gb)) {
2338 if (is_intra[xy - 2])
2339 sum = FFABS(px) + FFABS(py);
2341 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2343 if (get_bits1(&s->gb)) {
2353 /* store MV using signed modulus of MV range defined in 4.11 */
2355 s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
2356 s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
2358 s->current_picture.f.motion_val[0][xy][0] = s->mv[0][0][0];
2359 s->current_picture.f.motion_val[0][xy][1] = s->mv[0][0][1];
2360 s->current_picture.f.motion_val[1][xy][0] = s->mv[1][0][0];
2361 s->current_picture.f.motion_val[1][xy][1] = s->mv[1][0][1];
2364 static inline void vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dmv_y, int mv1, int *pred_flag)
2366 int dir = (v->bmvtype == BMV_TYPE_BACKWARD) ? 1 : 0;
2367 MpegEncContext *s = &v->s;
2368 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2370 if (v->bmvtype == BMV_TYPE_DIRECT) {
2371 int total_opp, k, f;
2372 if (s->next_picture.f.mb_type[mb_pos + v->mb_off] != MB_TYPE_INTRA) {
2373 s->mv[0][0][0] = scale_mv_intfi(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0],
2374 v->bfraction, 0, s->quarter_sample, v->qs_last);
2375 s->mv[0][0][1] = scale_mv_intfi(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1],
2376 v->bfraction, 0, s->quarter_sample, v->qs_last);
2377 s->mv[1][0][0] = scale_mv_intfi(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0],
2378 v->bfraction, 1, s->quarter_sample, v->qs_last);
2379 s->mv[1][0][1] = scale_mv_intfi(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1],
2380 v->bfraction, 1, s->quarter_sample, v->qs_last);
2382 total_opp = v->mv_f_next[0][s->block_index[0] + v->blocks_off]
2383 + v->mv_f_next[0][s->block_index[1] + v->blocks_off]
2384 + v->mv_f_next[0][s->block_index[2] + v->blocks_off]
2385 + v->mv_f_next[0][s->block_index[3] + v->blocks_off];
2386 f = (total_opp > 2) ? 1 : 0;
2388 s->mv[0][0][0] = s->mv[0][0][1] = 0;
2389 s->mv[1][0][0] = s->mv[1][0][1] = 0;
2392 v->ref_field_type[0] = v->ref_field_type[1] = v->cur_field_type ^ f;
2393 for (k = 0; k < 4; k++) {
2394 s->current_picture.f.motion_val[0][s->block_index[k] + v->blocks_off][0] = s->mv[0][0][0];
2395 s->current_picture.f.motion_val[0][s->block_index[k] + v->blocks_off][1] = s->mv[0][0][1];
2396 s->current_picture.f.motion_val[1][s->block_index[k] + v->blocks_off][0] = s->mv[1][0][0];
2397 s->current_picture.f.motion_val[1][s->block_index[k] + v->blocks_off][1] = s->mv[1][0][1];
2398 v->mv_f[0][s->block_index[k] + v->blocks_off] = f;
2399 v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
2403 if (v->bmvtype == BMV_TYPE_INTERPOLATED) {
2404 vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2405 vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2408 if (dir) { // backward
2409 vc1_pred_mv(v, n, dmv_x[1], dmv_y[1], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2410 if (n == 3 || mv1) {
2411 vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
2414 vc1_pred_mv(v, n, dmv_x[0], dmv_y[0], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2415 if (n == 3 || mv1) {
2416 vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], 0, 1);
2421 /** Get predicted DC value for I-frames only
2422 * prediction dir: left=0, top=1
2423 * @param s MpegEncContext
2424 * @param overlap flag indicating that overlap filtering is used
2425 * @param pq integer part of picture quantizer
2426 * @param[in] n block index in the current MB
2427 * @param dc_val_ptr Pointer to DC predictor
2428 * @param dir_ptr Prediction direction for use in AC prediction
2430 static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2431 int16_t **dc_val_ptr, int *dir_ptr)
2433 int a, b, c, wrap, pred, scale;
2435 static const uint16_t dcpred[32] = {
2436 -1, 1024, 512, 341, 256, 205, 171, 146, 128,
2437 114, 102, 93, 85, 79, 73, 68, 64,
2438 60, 57, 54, 51, 49, 47, 45, 43,
2439 41, 39, 38, 37, 35, 34, 33
2442 /* find prediction - wmv3_dc_scale always used here in fact */
2443 if (n < 4) scale = s->y_dc_scale;
2444 else scale = s->c_dc_scale;
2446 wrap = s->block_wrap[n];
2447 dc_val = s->dc_val[0] + s->block_index[n];
2453 b = dc_val[ - 1 - wrap];
2454 a = dc_val[ - wrap];
2456 if (pq < 9 || !overlap) {
2457 /* Set outer values */
2458 if (s->first_slice_line && (n != 2 && n != 3))
2459 b = a = dcpred[scale];
2460 if (s->mb_x == 0 && (n != 1 && n != 3))
2461 b = c = dcpred[scale];
2463 /* Set outer values */
2464 if (s->first_slice_line && (n != 2 && n != 3))
2466 if (s->mb_x == 0 && (n != 1 && n != 3))
2470 if (abs(a - b) <= abs(b - c)) {
2472 *dir_ptr = 1; // left
2475 *dir_ptr = 0; // top
2478 /* update predictor */
2479 *dc_val_ptr = &dc_val[0];
2484 /** Get predicted DC value
2485 * prediction dir: left=0, top=1
2486 * @param s MpegEncContext
2487 * @param overlap flag indicating that overlap filtering is used
2488 * @param pq integer part of picture quantizer
2489 * @param[in] n block index in the current MB
2490 * @param a_avail flag indicating top block availability
2491 * @param c_avail flag indicating left block availability
2492 * @param dc_val_ptr Pointer to DC predictor
2493 * @param dir_ptr Prediction direction for use in AC prediction
2495 static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2496 int a_avail, int c_avail,
2497 int16_t **dc_val_ptr, int *dir_ptr)
2499 int a, b, c, wrap, pred;
2501 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2504 wrap = s->block_wrap[n];
2505 dc_val = s->dc_val[0] + s->block_index[n];
2511 b = dc_val[ - 1 - wrap];
2512 a = dc_val[ - wrap];
2513 /* scale predictors if needed */
2514 q1 = s->current_picture.f.qscale_table[mb_pos];
2515 if (c_avail && (n != 1 && n != 3)) {
2516 q2 = s->current_picture.f.qscale_table[mb_pos - 1];
2518 c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2520 if (a_avail && (n != 2 && n != 3)) {
2521 q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
2523 a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2525 if (a_avail && c_avail && (n != 3)) {
2530 off -= s->mb_stride;
2531 q2 = s->current_picture.f.qscale_table[off];
2533 b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2536 if (a_avail && c_avail) {
2537 if (abs(a - b) <= abs(b - c)) {
2539 *dir_ptr = 1; // left
2542 *dir_ptr = 0; // top
2544 } else if (a_avail) {
2546 *dir_ptr = 0; // top
2547 } else if (c_avail) {
2549 *dir_ptr = 1; // left
2552 *dir_ptr = 1; // left
2555 /* update predictor */
2556 *dc_val_ptr = &dc_val[0];
2560 /** @} */ // Block group
2563 * @name VC1 Macroblock-level functions in Simple/Main Profiles
2564 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
2568 static inline int vc1_coded_block_pred(MpegEncContext * s, int n,
2569 uint8_t **coded_block_ptr)
2571 int xy, wrap, pred, a, b, c;
2573 xy = s->block_index[n];
2574 wrap = s->b8_stride;
2579 a = s->coded_block[xy - 1 ];
2580 b = s->coded_block[xy - 1 - wrap];
2581 c = s->coded_block[xy - wrap];
2590 *coded_block_ptr = &s->coded_block[xy];
2596 * Decode one AC coefficient
2597 * @param v The VC1 context
2598 * @param last Last coefficient
2599 * @param skip How much zero coefficients to skip
2600 * @param value Decoded AC coefficient value
2601 * @param codingset set of VLC to decode data
2604 static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip,
2605 int *value, int codingset)
2607 GetBitContext *gb = &v->s.gb;
2608 int index, escape, run = 0, level = 0, lst = 0;
2610 index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2611 if (index != vc1_ac_sizes[codingset] - 1) {
2612 run = vc1_index_decode_table[codingset][index][0];
2613 level = vc1_index_decode_table[codingset][index][1];
2614 lst = index >= vc1_last_decode_table[codingset] || get_bits_left(gb) < 0;
2618 escape = decode210(gb);
2620 index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2621 run = vc1_index_decode_table[codingset][index][0];
2622 level = vc1_index_decode_table[codingset][index][1];
2623 lst = index >= vc1_last_decode_table[codingset];
2626 level += vc1_last_delta_level_table[codingset][run];
2628 level += vc1_delta_level_table[codingset][run];
2631 run += vc1_last_delta_run_table[codingset][level] + 1;
2633 run += vc1_delta_run_table[codingset][level] + 1;
2639 lst = get_bits1(gb);
2640 if (v->s.esc3_level_length == 0) {
2641 if (v->pq < 8 || v->dquantfrm) { // table 59
2642 v->s.esc3_level_length = get_bits(gb, 3);
2643 if (!v->s.esc3_level_length)
2644 v->s.esc3_level_length = get_bits(gb, 2) + 8;
2645 } else { // table 60
2646 v->s.esc3_level_length = get_unary(gb, 1, 6) + 2;
2648 v->s.esc3_run_length = 3 + get_bits(gb, 2);
2650 run = get_bits(gb, v->s.esc3_run_length);
2651 sign = get_bits1(gb);
2652 level = get_bits(gb, v->s.esc3_level_length);
2663 /** Decode intra block in intra frames - should be faster than decode_intra_block
2664 * @param v VC1Context
2665 * @param block block to decode
2666 * @param[in] n subblock index
2667 * @param coded are AC coeffs present or not
2668 * @param codingset set of VLC to decode data
2670 static int vc1_decode_i_block(VC1Context *v, DCTELEM block[64], int n,
2671 int coded, int codingset)
2673 GetBitContext *gb = &v->s.gb;
2674 MpegEncContext *s = &v->s;
2675 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2678 int16_t *ac_val, *ac_val2;
2681 /* Get DC differential */
2683 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2685 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2688 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2692 if (dcdiff == 119 /* ESC index value */) {
2693 /* TODO: Optimize */
2694 if (v->pq == 1) dcdiff = get_bits(gb, 10);
2695 else if (v->pq == 2) dcdiff = get_bits(gb, 9);
2696 else dcdiff = get_bits(gb, 8);
2699 dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2700 else if (v->pq == 2)
2701 dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2708 dcdiff += vc1_i_pred_dc(&v->s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir);
2711 /* Store the quantized DC coeff, used for prediction */
2713 block[0] = dcdiff * s->y_dc_scale;
2715 block[0] = dcdiff * s->c_dc_scale;
2726 int last = 0, skip, value;
2727 const uint8_t *zz_table;
2731 scale = v->pq * 2 + v->halfpq;
2735 zz_table = v->zz_8x8[2];
2737 zz_table = v->zz_8x8[3];
2739 zz_table = v->zz_8x8[1];
2741 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2743 if (dc_pred_dir) // left
2746 ac_val -= 16 * s->block_wrap[n];
2749 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2753 block[zz_table[i++]] = value;
2756 /* apply AC prediction if needed */
2758 if (dc_pred_dir) { // left
2759 for (k = 1; k < 8; k++)
2760 block[k << v->left_blk_sh] += ac_val[k];
2762 for (k = 1; k < 8; k++)
2763 block[k << v->top_blk_sh] += ac_val[k + 8];
2766 /* save AC coeffs for further prediction */
2767 for (k = 1; k < 8; k++) {
2768 ac_val2[k] = block[k << v->left_blk_sh];
2769 ac_val2[k + 8] = block[k << v->top_blk_sh];
2772 /* scale AC coeffs */
2773 for (k = 1; k < 64; k++)
2777 block[k] += (block[k] < 0) ? -v->pq : v->pq;
2780 if (s->ac_pred) i = 63;
2786 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2790 scale = v->pq * 2 + v->halfpq;
2791 memset(ac_val2, 0, 16 * 2);
2792 if (dc_pred_dir) { // left
2795 memcpy(ac_val2, ac_val, 8 * 2);
2797 ac_val -= 16 * s->block_wrap[n];
2799 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2802 /* apply AC prediction if needed */
2804 if (dc_pred_dir) { //left
2805 for (k = 1; k < 8; k++) {
2806 block[k << v->left_blk_sh] = ac_val[k] * scale;
2807 if (!v->pquantizer && block[k << v->left_blk_sh])
2808 block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -v->pq : v->pq;
2811 for (k = 1; k < 8; k++) {
2812 block[k << v->top_blk_sh] = ac_val[k + 8] * scale;
2813 if (!v->pquantizer && block[k << v->top_blk_sh])
2814 block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -v->pq : v->pq;
2820 s->block_last_index[n] = i;
2825 /** Decode intra block in intra frames - should be faster than decode_intra_block
2826 * @param v VC1Context
2827 * @param block block to decode
2828 * @param[in] n subblock number
2829 * @param coded are AC coeffs present or not
2830 * @param codingset set of VLC to decode data
2831 * @param mquant quantizer value for this macroblock
2833 static int vc1_decode_i_block_adv(VC1Context *v, DCTELEM block[64], int n,
2834 int coded, int codingset, int mquant)
2836 GetBitContext *gb = &v->s.gb;
2837 MpegEncContext *s = &v->s;
2838 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2841 int16_t *ac_val, *ac_val2;
2843 int a_avail = v->a_avail, c_avail = v->c_avail;
2844 int use_pred = s->ac_pred;
2847 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2849 /* Get DC differential */
2851 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2853 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2856 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2860 if (dcdiff == 119 /* ESC index value */) {
2861 /* TODO: Optimize */
2862 if (mquant == 1) dcdiff = get_bits(gb, 10);
2863 else if (mquant == 2) dcdiff = get_bits(gb, 9);
2864 else dcdiff = get_bits(gb, 8);
2867 dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2868 else if (mquant == 2)
2869 dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2876 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir);
2879 /* Store the quantized DC coeff, used for prediction */
2881 block[0] = dcdiff * s->y_dc_scale;
2883 block[0] = dcdiff * s->c_dc_scale;
2889 /* check if AC is needed at all */
2890 if (!a_avail && !c_avail)
2892 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2895 scale = mquant * 2 + ((mquant == v->pq) ? v->halfpq : 0);
2897 if (dc_pred_dir) // left
2900 ac_val -= 16 * s->block_wrap[n];
2902 q1 = s->current_picture.f.qscale_table[mb_pos];
2903 if ( dc_pred_dir && c_avail && mb_pos)
2904 q2 = s->current_picture.f.qscale_table[mb_pos - 1];
2905 if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
2906 q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
2907 if ( dc_pred_dir && n == 1)
2909 if (!dc_pred_dir && n == 2)
2915 int last = 0, skip, value;
2916 const uint8_t *zz_table;
2920 if (!use_pred && v->fcm == 1) {
2921 zz_table = v->zzi_8x8;
2923 if (!dc_pred_dir) // top
2924 zz_table = v->zz_8x8[2];
2926 zz_table = v->zz_8x8[3];
2930 zz_table = v->zz_8x8[1];
2932 zz_table = v->zzi_8x8;
2936 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2940 block[zz_table[i++]] = value;
2943 /* apply AC prediction if needed */
2945 /* scale predictors if needed*/
2946 if (q2 && q1 != q2) {
2947 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2948 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2950 if (dc_pred_dir) { // left
2951 for (k = 1; k < 8; k++)
2952 block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2954 for (k = 1; k < 8; k++)
2955 block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2958 if (dc_pred_dir) { //left
2959 for (k = 1; k < 8; k++)
2960 block[k << v->left_blk_sh] += ac_val[k];
2962 for (k = 1; k < 8; k++)
2963 block[k << v->top_blk_sh] += ac_val[k + 8];
2967 /* save AC coeffs for further prediction */
2968 for (k = 1; k < 8; k++) {
2969 ac_val2[k ] = block[k << v->left_blk_sh];
2970 ac_val2[k + 8] = block[k << v->top_blk_sh];
2973 /* scale AC coeffs */
2974 for (k = 1; k < 64; k++)
2978 block[k] += (block[k] < 0) ? -mquant : mquant;
2981 if (use_pred) i = 63;
2982 } else { // no AC coeffs
2985 memset(ac_val2, 0, 16 * 2);
2986 if (dc_pred_dir) { // left
2988 memcpy(ac_val2, ac_val, 8 * 2);
2989 if (q2 && q1 != q2) {
2990 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2991 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2992 for (k = 1; k < 8; k++)
2993 ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2998 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2999 if (q2 && q1 != q2) {
3000 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3001 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3002 for (k = 1; k < 8; k++)
3003 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3008 /* apply AC prediction if needed */
3010 if (dc_pred_dir) { // left
3011 for (k = 1; k < 8; k++) {
3012 block[k << v->left_blk_sh] = ac_val2[k] * scale;
3013 if (!v->pquantizer && block[k << v->left_blk_sh])
3014 block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
3017 for (k = 1; k < 8; k++) {
3018 block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
3019 if (!v->pquantizer && block[k << v->top_blk_sh])
3020 block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
3026 s->block_last_index[n] = i;
3031 /** Decode intra block in inter frames - more generic version than vc1_decode_i_block
3032 * @param v VC1Context
3033 * @param block block to decode
3034 * @param[in] n subblock index
3035 * @param coded are AC coeffs present or not
3036 * @param mquant block quantizer
3037 * @param codingset set of VLC to decode data
3039 static int vc1_decode_intra_block(VC1Context *v, DCTELEM block[64], int n,
3040 int coded, int mquant, int codingset)
3042 GetBitContext *gb = &v->s.gb;
3043 MpegEncContext *s = &v->s;
3044 int dc_pred_dir = 0; /* Direction of the DC prediction used */
3047 int16_t *ac_val, *ac_val2;
3049 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3050 int a_avail = v->a_avail, c_avail = v->c_avail;
3051 int use_pred = s->ac_pred;
3055 s->dsp.clear_block(block);
3057 /* XXX: Guard against dumb values of mquant */
3058 mquant = (mquant < 1) ? 0 : ((mquant > 31) ? 31 : mquant);
3060 /* Set DC scale - y and c use the same */
3061 s->y_dc_scale = s->y_dc_scale_table[mquant];
3062 s->c_dc_scale = s->c_dc_scale_table[mquant];
3064 /* Get DC differential */
3066 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
3068 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
3071 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
3075 if (dcdiff == 119 /* ESC index value */) {
3076 /* TODO: Optimize */
3077 if (mquant == 1) dcdiff = get_bits(gb, 10);
3078 else if (mquant == 2) dcdiff = get_bits(gb, 9);
3079 else dcdiff = get_bits(gb, 8);
3082 dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
3083 else if (mquant == 2)
3084 dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
3091 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir);
3094 /* Store the quantized DC coeff, used for prediction */
3097 block[0] = dcdiff * s->y_dc_scale;
3099 block[0] = dcdiff * s->c_dc_scale;
3105 /* check if AC is needed at all and adjust direction if needed */
3106 if (!a_avail) dc_pred_dir = 1;
3107 if (!c_avail) dc_pred_dir = 0;
3108 if (!a_avail && !c_avail) use_pred = 0;
3109 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
3112 scale = mquant * 2 + v->halfpq;
3114 if (dc_pred_dir) //left
3117 ac_val -= 16 * s->block_wrap[n];
3119 q1 = s->current_picture.f.qscale_table[mb_pos];
3120 if (dc_pred_dir && c_avail && mb_pos)
3121 q2 = s->current_picture.f.qscale_table[mb_pos - 1];
3122 if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
3123 q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
3124 if ( dc_pred_dir && n == 1)
3126 if (!dc_pred_dir && n == 2)
3128 if (n == 3) q2 = q1;
3131 int last = 0, skip, value;
3135 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
3140 block[v->zz_8x8[0][i++]] = value;
3142 if (use_pred && (v->fcm == 1)) {
3143 if (!dc_pred_dir) // top
3144 block[v->zz_8x8[2][i++]] = value;
3146 block[v->zz_8x8[3][i++]] = value;
3148 block[v->zzi_8x8[i++]] = value;
3153 /* apply AC prediction if needed */
3155 /* scale predictors if needed*/
3156 if (q2 && q1 != q2) {
3157 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3158 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3160 if (dc_pred_dir) { // left
3161 for (k = 1; k < 8; k++)
3162 block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3164 for (k = 1; k < 8; k++)
3165 block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3168 if (dc_pred_dir) { // left
3169 for (k = 1; k < 8; k++)
3170 block[k << v->left_blk_sh] += ac_val[k];
3172 for (k = 1; k < 8; k++)
3173 block[k << v->top_blk_sh] += ac_val[k + 8];
3177 /* save AC coeffs for further prediction */
3178 for (k = 1; k < 8; k++) {
3179 ac_val2[k ] = block[k << v->left_blk_sh];
3180 ac_val2[k + 8] = block[k << v->top_blk_sh];
3183 /* scale AC coeffs */
3184 for (k = 1; k < 64; k++)
3188 block[k] += (block[k] < 0) ? -mquant : mquant;
3191 if (use_pred) i = 63;
3192 } else { // no AC coeffs
3195 memset(ac_val2, 0, 16 * 2);
3196 if (dc_pred_dir) { // left
3198 memcpy(ac_val2, ac_val, 8 * 2);
3199 if (q2 && q1 != q2) {
3200 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3201 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3202 for (k = 1; k < 8; k++)
3203 ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3208 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
3209 if (q2 && q1 != q2) {
3210 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3211 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3212 for (k = 1; k < 8; k++)
3213 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3218 /* apply AC prediction if needed */
3220 if (dc_pred_dir) { // left
3221 for (k = 1; k < 8; k++) {
3222 block[k << v->left_blk_sh] = ac_val2[k] * scale;
3223 if (!v->pquantizer && block[k << v->left_blk_sh])
3224 block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
3227 for (k = 1; k < 8; k++) {
3228 block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
3229 if (!v->pquantizer && block[k << v->top_blk_sh])
3230 block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
3236 s->block_last_index[n] = i;
3243 static int vc1_decode_p_block(VC1Context *v, DCTELEM block[64], int n,
3244 int mquant, int ttmb, int first_block,
3245 uint8_t *dst, int linesize, int skip_block,
3248 MpegEncContext *s = &v->s;
3249 GetBitContext *gb = &s->gb;
3252 int scale, off, idx, last, skip, value;
3253 int ttblk = ttmb & 7;
3256 s->dsp.clear_block(block);
3259 ttblk = ff_vc1_ttblk_to_tt[v->tt_index][get_vlc2(gb, ff_vc1_ttblk_vlc[v->tt_index].table, VC1_TTBLK_VLC_BITS, 1)];
3261 if (ttblk == TT_4X4) {
3262 subblkpat = ~(get_vlc2(gb, ff_vc1_subblkpat_vlc[v->tt_index].table, VC1_SUBBLKPAT_VLC_BITS, 1) + 1);
3264 if ((ttblk != TT_8X8 && ttblk != TT_4X4)
3265 && ((v->ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))
3266 || (!v->res_rtm_flag && !first_block))) {
3267 subblkpat = decode012(gb);
3269 subblkpat ^= 3; // swap decoded pattern bits
3270 if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM)
3272 if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT)
3275 scale = 2 * mquant + ((v->pq == mquant) ? v->halfpq : 0);
3277 // convert transforms like 8X4_TOP to generic TT and SUBBLKPAT
3278 if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) {
3279 subblkpat = 2 - (ttblk == TT_8X4_TOP);
3282 if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) {
3283 subblkpat = 2 - (ttblk == TT_4X8_LEFT);
3292 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3297 idx = v->zz_8x8[0][i++];
3299 idx = v->zzi_8x8[i++];
3300 block[idx] = value * scale;
3302 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3306 v->vc1dsp.vc1_inv_trans_8x8_dc(dst, linesize, block);
3308 v->vc1dsp.vc1_inv_trans_8x8(block);
3309 s->dsp.add_pixels_clamped(block, dst, linesize);
3314 pat = ~subblkpat & 0xF;
3315 for (j = 0; j < 4; j++) {
3316 last = subblkpat & (1 << (3 - j));
3318 off = (j & 1) * 4 + (j & 2) * 16;
3320 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3325 idx = ff_vc1_simple_progressive_4x4_zz[i++];
3327 idx = ff_vc1_adv_interlaced_4x4_zz[i++];
3328 block[idx + off] = value * scale;
3330 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3332 if (!(subblkpat & (1 << (3 - j))) && !skip_block) {
3334 v->vc1dsp.vc1_inv_trans_4x4_dc(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3336 v->vc1dsp.vc1_inv_trans_4x4(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3341 pat = ~((subblkpat & 2) * 6 + (subblkpat & 1) * 3) & 0xF;
3342 for (j = 0; j < 2; j++) {
3343 last = subblkpat & (1 << (1 - j));
3347 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3352 idx = v->zz_8x4[i++] + off;
3354 idx = ff_vc1_adv_interlaced_8x4_zz[i++] + off;
3355 block[idx] = value * scale;
3357 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3359 if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3361 v->vc1dsp.vc1_inv_trans_8x4_dc(dst + j * 4 * linesize, linesize, block + off);
3363 v->vc1dsp.vc1_inv_trans_8x4(dst + j * 4 * linesize, linesize, block + off);
3368 pat = ~(subblkpat * 5) & 0xF;
3369 for (j = 0; j < 2; j++) {
3370 last = subblkpat & (1 << (1 - j));
3374 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3379 idx = v->zz_4x8[i++] + off;
3381 idx = ff_vc1_adv_interlaced_4x8_zz[i++] + off;
3382 block[idx] = value * scale;
3384 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3386 if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3388 v->vc1dsp.vc1_inv_trans_4x8_dc(dst + j * 4, linesize, block + off);
3390 v->vc1dsp.vc1_inv_trans_4x8(dst + j*4, linesize, block + off);
3396 *ttmb_out |= ttblk << (n * 4);
3400 /** @} */ // Macroblock group
3402 static const int size_table [6] = { 0, 2, 3, 4, 5, 8 };
3403 static const int offset_table[6] = { 0, 1, 3, 7, 15, 31 };
3405 static av_always_inline void vc1_apply_p_v_loop_filter(VC1Context *v, int block_num)
3407 MpegEncContext *s = &v->s;
3408 int mb_cbp = v->cbp[s->mb_x - s->mb_stride],
3409 block_cbp = mb_cbp >> (block_num * 4), bottom_cbp,
3410 mb_is_intra = v->is_intra[s->mb_x - s->mb_stride],
3411 block_is_intra = mb_is_intra >> (block_num * 4), bottom_is_intra;
3412 int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3415 if (block_num > 3) {
3416 dst = s->dest[block_num - 3];
3418 dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 8) * linesize;
3420 if (s->mb_y != s->end_mb_y || block_num < 2) {
3424 if (block_num > 3) {
3425 bottom_cbp = v->cbp[s->mb_x] >> (block_num * 4);
3426 bottom_is_intra = v->is_intra[s->mb_x] >> (block_num * 4);
3427 mv = &v->luma_mv[s->mb_x - s->mb_stride];
3428 mv_stride = s->mb_stride;
3430 bottom_cbp = (block_num < 2) ? (mb_cbp >> ((block_num + 2) * 4))
3431 : (v->cbp[s->mb_x] >> ((block_num - 2) * 4));
3432 bottom_is_intra = (block_num < 2) ? (mb_is_intra >> ((block_num + 2) * 4))
3433 : (v->is_intra[s->mb_x] >> ((block_num - 2) * 4));
3434 mv_stride = s->b8_stride;
3435 mv = &s->current_picture.f.motion_val[0][s->block_index[block_num] - 2 * mv_stride];
3438 if (bottom_is_intra & 1 || block_is_intra & 1 ||
3439 mv[0][0] != mv[mv_stride][0] || mv[0][1] != mv[mv_stride][1]) {
3440 v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3442 idx = ((bottom_cbp >> 2) | block_cbp) & 3;
3444 v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3447 v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3449 v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3454 dst -= 4 * linesize;
3455 ttblk = (v->ttblk[s->mb_x - s->mb_stride] >> (block_num * 4)) & 0xF;
3456 if (ttblk == TT_4X4 || ttblk == TT_8X4) {
3457 idx = (block_cbp | (block_cbp >> 2)) & 3;
3459 v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3462 v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3464 v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3469 static av_always_inline void vc1_apply_p_h_loop_filter(VC1Context *v, int block_num)
3471 MpegEncContext *s = &v->s;
3472 int mb_cbp = v->cbp[s->mb_x - 1 - s->mb_stride],
3473 block_cbp = mb_cbp >> (block_num * 4), right_cbp,
3474 mb_is_intra = v->is_intra[s->mb_x - 1 - s->mb_stride],
3475 block_is_intra = mb_is_intra >> (block_num * 4), right_is_intra;
3476 int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3479 if (block_num > 3) {
3480 dst = s->dest[block_num - 3] - 8 * linesize;
3482 dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 16) * linesize - 8;
3485 if (s->mb_x != s->mb_width || !(block_num & 5)) {
3488 if (block_num > 3) {
3489 right_cbp = v->cbp[s->mb_x - s->mb_stride] >> (block_num * 4);
3490 right_is_intra = v->is_intra[s->mb_x - s->mb_stride] >> (block_num * 4);
3491 mv = &v->luma_mv[s->mb_x - s->mb_stride - 1];
3493 right_cbp = (block_num & 1) ? (v->cbp[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3494 : (mb_cbp >> ((block_num + 1) * 4));
3495 right_is_intra = (block_num & 1) ? (v->is_intra[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3496 : (mb_is_intra >> ((block_num + 1) * 4));
3497 mv = &s->current_picture.f.motion_val[0][s->block_index[block_num] - s->b8_stride * 2 - 2];
3499 if (block_is_intra & 1 || right_is_intra & 1 || mv[0][0] != mv[1][0] || mv[0][1] != mv[1][1]) {
3500 v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3502 idx = ((right_cbp >> 1) | block_cbp) & 5; // FIXME check
3504 v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3507 v->vc1dsp.vc1_h_loop_filter4(dst + 4 * linesize, linesize, v->pq);
3509 v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3515 ttblk = (v->ttblk[s->mb_x - s->mb_stride - 1] >> (block_num * 4)) & 0xf;
3516 if (ttblk == TT_4X4 || ttblk == TT_4X8) {
3517 idx = (block_cbp | (block_cbp >> 1)) & 5;
3519 v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3522 v->vc1dsp.vc1_h_loop_filter4(dst + linesize * 4, linesize, v->pq);
3524 v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3529 static void vc1_apply_p_loop_filter(VC1Context *v)
3531 MpegEncContext *s = &v->s;
3534 for (i = 0; i < 6; i++) {
3535 vc1_apply_p_v_loop_filter(v, i);
3538 /* V always preceedes H, therefore we run H one MB before V;
3539 * at the end of a row, we catch up to complete the row */
3541 for (i = 0; i < 6; i++) {
3542 vc1_apply_p_h_loop_filter(v, i);
3544 if (s->mb_x == s->mb_width - 1) {
3546 ff_update_block_index(s);
3547 for (i = 0; i < 6; i++) {
3548 vc1_apply_p_h_loop_filter(v, i);
3554 /** Decode one P-frame MB
3556 static int vc1_decode_p_mb(VC1Context *v)
3558 MpegEncContext *s = &v->s;
3559 GetBitContext *gb = &s->gb;
3561 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3562 int cbp; /* cbp decoding stuff */
3563 int mqdiff, mquant; /* MB quantization */
3564 int ttmb = v->ttfrm; /* MB Transform type */
3566 int mb_has_coeffs = 1; /* last_flag */
3567 int dmv_x, dmv_y; /* Differential MV components */
3568 int index, index1; /* LUT indexes */
3569 int val, sign; /* temp values */
3570 int first_block = 1;
3572 int skipped, fourmv;
3573 int block_cbp = 0, pat, block_tt = 0, block_intra = 0;
3575 mquant = v->pq; /* Loosy initialization */
3577 if (v->mv_type_is_raw)
3578 fourmv = get_bits1(gb);
3580 fourmv = v->mv_type_mb_plane[mb_pos];
3582 skipped = get_bits1(gb);
3584 skipped = v->s.mbskip_table[mb_pos];
3586 if (!fourmv) { /* 1MV mode */
3588 GET_MVDATA(dmv_x, dmv_y);
3591 s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
3592 s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
3594 s->current_picture.f.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;
3595 vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3597 /* FIXME Set DC val for inter block ? */
3598 if (s->mb_intra && !mb_has_coeffs) {
3600 s->ac_pred = get_bits1(gb);
3602 } else if (mb_has_coeffs) {
3604 s->ac_pred = get_bits1(gb);
3605 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3611 s->current_picture.f.qscale_table[mb_pos] = mquant;
3613 if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
3614 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table,
3615 VC1_TTMB_VLC_BITS, 2);
3616 if (!s->mb_intra) vc1_mc_1mv(v, 0);
3618 for (i = 0; i < 6; i++) {
3619 s->dc_val[0][s->block_index[i]] = 0;
3621 val = ((cbp >> (5 - i)) & 1);
3622 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3623 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3625 /* check if prediction blocks A and C are available */
3626 v->a_avail = v->c_avail = 0;
3627 if (i == 2 || i == 3 || !s->first_slice_line)
3628 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3629 if (i == 1 || i == 3 || s->mb_x)
3630 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3632 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3633 (i & 4) ? v->codingset2 : v->codingset);
3634 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3636 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3638 for (j = 0; j < 64; j++)
3639 s->block[i][j] <<= 1;
3640 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3641 if (v->pq >= 9 && v->overlap) {
3643 v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3645 v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3647 block_cbp |= 0xF << (i << 2);
3648 block_intra |= 1 << i;
3650 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block,
3651 s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize,
3652 (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
3653 block_cbp |= pat << (i << 2);
3654 if (!v->ttmbf && ttmb < 8)
3661 for (i = 0; i < 6; i++) {
3662 v->mb_type[0][s->block_index[i]] = 0;
3663 s->dc_val[0][s->block_index[i]] = 0;
3665 s->current_picture.f.mb_type[mb_pos] = MB_TYPE_SKIP;
3666 s->current_picture.f.qscale_table[mb_pos] = 0;
3667 vc1_pred_mv(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3670 } else { // 4MV mode
3671 if (!skipped /* unskipped MB */) {
3672 int intra_count = 0, coded_inter = 0;
3673 int is_intra[6], is_coded[6];
3675 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3676 for (i = 0; i < 6; i++) {
3677 val = ((cbp >> (5 - i)) & 1);
3678 s->dc_val[0][s->block_index[i]] = 0;
3685 GET_MVDATA(dmv_x, dmv_y);
3687 vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3689 vc1_mc_4mv_luma(v, i, 0);
3690 intra_count += s->mb_intra;
3691 is_intra[i] = s->mb_intra;
3692 is_coded[i] = mb_has_coeffs;
3695 is_intra[i] = (intra_count >= 3);
3699 vc1_mc_4mv_chroma(v, 0);
3700 v->mb_type[0][s->block_index[i]] = is_intra[i];
3702 coded_inter = !is_intra[i] & is_coded[i];
3704 // if there are no coded blocks then don't do anything more
3706 if (!intra_count && !coded_inter)
3709 s->current_picture.f.qscale_table[mb_pos] = mquant;
3710 /* test if block is intra and has pred */
3713 for (i = 0; i < 6; i++)
3715 if (((!s->first_slice_line || (i == 2 || i == 3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]])
3716 || ((s->mb_x || (i == 1 || i == 3)) && v->mb_type[0][s->block_index[i] - 1])) {
3722 s->ac_pred = get_bits1(gb);
3726 if (!v->ttmbf && coded_inter)
3727 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3728 for (i = 0; i < 6; i++) {
3730 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3731 s->mb_intra = is_intra[i];
3733 /* check if prediction blocks A and C are available */
3734 v->a_avail = v->c_avail = 0;
3735 if (i == 2 || i == 3 || !s->first_slice_line)
3736 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3737 if (i == 1 || i == 3 || s->mb_x)
3738 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3740 vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant,
3741 (i & 4) ? v->codingset2 : v->codingset);
3742 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3744 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3746 for (j = 0; j < 64; j++)
3747 s->block[i][j] <<= 1;
3748 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off,
3749 (i & 4) ? s->uvlinesize : s->linesize);
3750 if (v->pq >= 9 && v->overlap) {
3752 v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3754 v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3756 block_cbp |= 0xF << (i << 2);
3757 block_intra |= 1 << i;
3758 } else if (is_coded[i]) {
3759 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3760 first_block, s->dest[dst_idx] + off,
3761 (i & 4) ? s->uvlinesize : s->linesize,
3762 (i & 4) && (s->flags & CODEC_FLAG_GRAY),
3764 block_cbp |= pat << (i << 2);
3765 if (!v->ttmbf && ttmb < 8)
3770 } else { // skipped MB
3772 s->current_picture.f.qscale_table[mb_pos] = 0;
3773 for (i = 0; i < 6; i++) {
3774 v->mb_type[0][s->block_index[i]] = 0;
3775 s->dc_val[0][s->block_index[i]] = 0;
3777 for (i = 0; i < 4; i++) {
3778 vc1_pred_mv(v, i, 0, 0, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3779 vc1_mc_4mv_luma(v, i, 0);
3781 vc1_mc_4mv_chroma(v, 0);
3782 s->current_picture.f.qscale_table[mb_pos] = 0;
3786 v->cbp[s->mb_x] = block_cbp;
3787 v->ttblk[s->mb_x] = block_tt;
3788 v->is_intra[s->mb_x] = block_intra;
3793 /* Decode one macroblock in an interlaced frame p picture */
3795 static int vc1_decode_p_mb_intfr(VC1Context *v)
3797 MpegEncContext *s = &v->s;
3798 GetBitContext *gb = &s->gb;
3800 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3801 int cbp = 0; /* cbp decoding stuff */
3802 int mqdiff, mquant; /* MB quantization */
3803 int ttmb = v->ttfrm; /* MB Transform type */
3805 int mb_has_coeffs = 1; /* last_flag */
3806 int dmv_x, dmv_y; /* Differential MV components */
3807 int val; /* temp value */
3808 int first_block = 1;
3810 int skipped, fourmv = 0, twomv = 0;
3811 int block_cbp = 0, pat, block_tt = 0;
3812 int idx_mbmode = 0, mvbp;
3813 int stride_y, fieldtx;
3815 mquant = v->pq; /* Loosy initialization */
3818 skipped = get_bits1(gb);
3820 skipped = v->s.mbskip_table[mb_pos];
3822 if (v->fourmvswitch)
3823 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_4MV_MBMODE_VLC_BITS, 2); // try getting this done
3825 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2); // in a single line
3826 switch (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0]) {
3827 /* store the motion vector type in a flag (useful later) */
3828 case MV_PMODE_INTFR_4MV:
3830 v->blk_mv_type[s->block_index[0]] = 0;
3831 v->blk_mv_type[s->block_index[1]] = 0;
3832 v->blk_mv_type[s->block_index[2]] = 0;
3833 v->blk_mv_type[s->block_index[3]] = 0;
3835 case MV_PMODE_INTFR_4MV_FIELD:
3837 v->blk_mv_type[s->block_index[0]] = 1;
3838 v->blk_mv_type[s->block_index[1]] = 1;
3839 v->blk_mv_type[s->block_index[2]] = 1;
3840 v->blk_mv_type[s->block_index[3]] = 1;
3842 case MV_PMODE_INTFR_2MV_FIELD:
3844 v->blk_mv_type[s->block_index[0]] = 1;
3845 v->blk_mv_type[s->block_index[1]] = 1;
3846 v->blk_mv_type[s->block_index[2]] = 1;
3847 v->blk_mv_type[s->block_index[3]] = 1;
3849 case MV_PMODE_INTFR_1MV:
3850 v->blk_mv_type[s->block_index[0]] = 0;
3851 v->blk_mv_type[s->block_index[1]] = 0;
3852 v->blk_mv_type[s->block_index[2]] = 0;
3853 v->blk_mv_type[s->block_index[3]] = 0;
3856 if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
3857 s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
3858 s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
3859 s->current_picture.f.mb_type[mb_pos] = MB_TYPE_INTRA;
3860 s->mb_intra = v->is_intra[s->mb_x] = 1;
3861 for (i = 0; i < 6; i++)
3862 v->mb_type[0][s->block_index[i]] = 1;
3863 fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
3864 mb_has_coeffs = get_bits1(gb);
3866 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3867 v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
3869 s->current_picture.f.qscale_table[mb_pos] = mquant;
3870 /* Set DC scale - y and c use the same (not sure if necessary here) */
3871 s->y_dc_scale = s->y_dc_scale_table[mquant];
3872 s->c_dc_scale = s->c_dc_scale_table[mquant];
3874 for (i = 0; i < 6; i++) {
3875 s->dc_val[0][s->block_index[i]] = 0;
3877 val = ((cbp >> (5 - i)) & 1);
3878 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3879 v->a_avail = v->c_avail = 0;
3880 if (i == 2 || i == 3 || !s->first_slice_line)
3881 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3882 if (i == 1 || i == 3 || s->mb_x)
3883 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3885 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3886 (i & 4) ? v->codingset2 : v->codingset);
3887 if ((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3888 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3890 stride_y = s->linesize << fieldtx;
3891 off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->linesize : (i & 1) * 8 + 4 * (i & 2) * s->linesize;
3893 stride_y = s->uvlinesize;
3896 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, stride_y);
3900 } else { // inter MB
3901 mb_has_coeffs = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][3];
3903 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3904 if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD) {
3905 v->twomvbp = get_vlc2(gb, v->twomvbp_vlc->table, VC1_2MV_BLOCK_PATTERN_VLC_BITS, 1);
3907 if ((ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV)
3908 || (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV_FIELD)) {
3909 v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
3912 s->mb_intra = v->is_intra[s->mb_x] = 0;
3913 for (i = 0; i < 6; i++)
3914 v->mb_type[0][s->block_index[i]] = 0;
3915 fieldtx = v->fieldtx_plane[mb_pos] = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][1];
3916 /* for all motion vector read MVDATA and motion compensate each block */
3920 for (i = 0; i < 6; i++) {
3923 val = ((mvbp >> (3 - i)) & 1);
3925 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3927 vc1_pred_mv_intfr(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0]);
3928 vc1_mc_4mv_luma(v, i, 0);
3929 } else if (i == 4) {
3930 vc1_mc_4mv_chroma4(v);
3937 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3939 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0]);
3940 vc1_mc_4mv_luma(v, 0, 0);
3941 vc1_mc_4mv_luma(v, 1, 0);
3944 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3946 vc1_pred_mv_intfr(v, 2, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0]);
3947 vc1_mc_4mv_luma(v, 2, 0);
3948 vc1_mc_4mv_luma(v, 3, 0);
3949 vc1_mc_4mv_chroma4(v);
3951 mvbp = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][2];
3953 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3955 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0]);
3959 GET_MQUANT(); // p. 227
3960 s->current_picture.f.qscale_table[mb_pos] = mquant;
3961 if (!v->ttmbf && cbp)
3962 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3963 for (i = 0; i < 6; i++) {
3964 s->dc_val[0][s->block_index[i]] = 0;
3966 val = ((cbp >> (5 - i)) & 1);
3968 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3970 off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->linesize));
3972 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3973 first_block, s->dest[dst_idx] + off,
3974 (i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
3975 (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
3976 block_cbp |= pat << (i << 2);
3977 if (!v->ttmbf && ttmb < 8)
3984 s->mb_intra = v->is_intra[s->mb_x] = 0;
3985 for (i = 0; i < 6; i++) {
3986 v->mb_type[0][s->block_index[i]] = 0;
3987 s->dc_val[0][s->block_index[i]] = 0;
3989 s->current_picture.f.mb_type[mb_pos] = MB_TYPE_SKIP;
3990 s->current_picture.f.qscale_table[mb_pos] = 0;
3991 v->blk_mv_type[s->block_index[0]] = 0;
3992 v->blk_mv_type[s->block_index[1]] = 0;
3993 v->blk_mv_type[s->block_index[2]] = 0;
3994 v->blk_mv_type[s->block_index[3]] = 0;
3995 vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0]);
3998 if (s->mb_x == s->mb_width - 1)
3999 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0])*s->mb_stride);
4003 static int vc1_decode_p_mb_intfi(VC1Context *v)
4005 MpegEncContext *s = &v->s;
4006 GetBitContext *gb = &s->gb;
4008 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4009 int cbp = 0; /* cbp decoding stuff */
4010 int mqdiff, mquant; /* MB quantization */
4011 int ttmb = v->ttfrm; /* MB Transform type */
4013 int mb_has_coeffs = 1; /* last_flag */
4014 int dmv_x, dmv_y; /* Differential MV components */
4015 int val; /* temp values */
4016 int first_block = 1;
4019 int block_cbp = 0, pat, block_tt = 0;
4022 mquant = v->pq; /* Loosy initialization */
4024 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
4025 if (idx_mbmode <= 1) { // intra MB
4026 s->mb_intra = v->is_intra[s->mb_x] = 1;
4027 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
4028 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
4029 s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4031 s->current_picture.f.qscale_table[mb_pos] = mquant;
4032 /* Set DC scale - y and c use the same (not sure if necessary here) */
4033 s->y_dc_scale = s->y_dc_scale_table[mquant];
4034 s->c_dc_scale = s->c_dc_scale_table[mquant];
4035 v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4036 mb_has_coeffs = idx_mbmode & 1;
4038 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
4040 for (i = 0; i < 6; i++) {
4041 s->dc_val[0][s->block_index[i]] = 0;
4042 v->mb_type[0][s->block_index[i]] = 1;
4044 val = ((cbp >> (5 - i)) & 1);
4045 v->a_avail = v->c_avail = 0;
4046 if (i == 2 || i == 3 || !s->first_slice_line)
4047 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4048 if (i == 1 || i == 3 || s->mb_x)
4049 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4051 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4052 (i & 4) ? v->codingset2 : v->codingset);
4053 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4055 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4056 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4057 off += v->cur_field_type ? ((i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0]) : 0;
4058 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize);
4059 // TODO: loop filter
4062 s->mb_intra = v->is_intra[s->mb_x] = 0;
4063 s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
4064 for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
4065 if (idx_mbmode <= 5) { // 1-MV
4067 if (idx_mbmode & 1) {
4068 get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
4070 vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
4072 mb_has_coeffs = !(idx_mbmode & 2);
4074 v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
4075 for (i = 0; i < 6; i++) {
4077 dmv_x = dmv_y = pred_flag = 0;
4078 val = ((v->fourmvbp >> (3 - i)) & 1);
4080 get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
4082 vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
4083 vc1_mc_4mv_luma(v, i, 0);
4085 vc1_mc_4mv_chroma(v, 0);
4087 mb_has_coeffs = idx_mbmode & 1;
4090 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4094 s->current_picture.f.qscale_table[mb_pos] = mquant;
4095 if (!v->ttmbf && cbp) {
4096 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4099 for (i = 0; i < 6; i++) {
4100 s->dc_val[0][s->block_index[i]] = 0;
4102 val = ((cbp >> (5 - i)) & 1);
4103 off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
4104 if (v->cur_field_type)
4105 off += (i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0];
4107 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4108 first_block, s->dest[dst_idx] + off,
4109 (i & 4) ? s->uvlinesize : s->linesize,
4110 (i & 4) && (s->flags & CODEC_FLAG_GRAY),
4112 block_cbp |= pat << (i << 2);
4113 if (!v->ttmbf && ttmb < 8) ttmb = -1;
4118 if (s->mb_x == s->mb_width - 1)
4119 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
4123 /** Decode one B-frame MB (in Main profile)
4125 static void vc1_decode_b_mb(VC1Context *v)
4127 MpegEncContext *s = &v->s;
4128 GetBitContext *gb = &s->gb;
4130 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4131 int cbp = 0; /* cbp decoding stuff */
4132 int mqdiff, mquant; /* MB quantization */
4133 int ttmb = v->ttfrm; /* MB Transform type */
4134 int mb_has_coeffs = 0; /* last_flag */
4135 int index, index1; /* LUT indexes */
4136 int val, sign; /* temp values */
4137 int first_block = 1;
4139 int skipped, direct;
4140 int dmv_x[2], dmv_y[2];
4141 int bmvtype = BMV_TYPE_BACKWARD;
4143 mquant = v->pq; /* Loosy initialization */
4147 direct = get_bits1(gb);
4149 direct = v->direct_mb_plane[mb_pos];
4151 skipped = get_bits1(gb);
4153 skipped = v->s.mbskip_table[mb_pos];
4155 dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4156 for (i = 0; i < 6; i++) {
4157 v->mb_type[0][s->block_index[i]] = 0;
4158 s->dc_val[0][s->block_index[i]] = 0;
4160 s->current_picture.f.qscale_table[mb_pos] = 0;
4164 GET_MVDATA(dmv_x[0], dmv_y[0]);
4165 dmv_x[1] = dmv_x[0];
4166 dmv_y[1] = dmv_y[0];
4168 if (skipped || !s->mb_intra) {
4169 bmvtype = decode012(gb);
4172 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
4175 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
4178 bmvtype = BMV_TYPE_INTERPOLATED;
4179 dmv_x[0] = dmv_y[0] = 0;
4183 for (i = 0; i < 6; i++)
4184 v->mb_type[0][s->block_index[i]] = s->mb_intra;
4188 bmvtype = BMV_TYPE_INTERPOLATED;
4189 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4190 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4194 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4197 s->current_picture.f.qscale_table[mb_pos] = mquant;
4199 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4200 dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
4201 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4202 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4204 if (!mb_has_coeffs && !s->mb_intra) {
4205 /* no coded blocks - effectively skipped */
4206 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4207 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4210 if (s->mb_intra && !mb_has_coeffs) {
4212 s->current_picture.f.qscale_table[mb_pos] = mquant;
4213 s->ac_pred = get_bits1(gb);
4215 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4217 if (bmvtype == BMV_TYPE_INTERPOLATED) {
4218 GET_MVDATA(dmv_x[0], dmv_y[0]);
4219 if (!mb_has_coeffs) {
4220 /* interpolated skipped block */
4221 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4222 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4226 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4228 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4231 s->ac_pred = get_bits1(gb);
4232 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4234 s->current_picture.f.qscale_table[mb_pos] = mquant;
4235 if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
4236 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4240 for (i = 0; i < 6; i++) {
4241 s->dc_val[0][s->block_index[i]] = 0;
4243 val = ((cbp >> (5 - i)) & 1);
4244 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4245 v->mb_type[0][s->block_index[i]] = s->mb_intra;
4247 /* check if prediction blocks A and C are available */
4248 v->a_avail = v->c_avail = 0;
4249 if (i == 2 || i == 3 || !s->first_slice_line)
4250 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4251 if (i == 1 || i == 3 || s->mb_x)
4252 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4254 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4255 (i & 4) ? v->codingset2 : v->codingset);
4256 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4258 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4260 for (j = 0; j < 64; j++)
4261 s->block[i][j] <<= 1;
4262 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
4264 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4265 first_block, s->dest[dst_idx] + off,
4266 (i & 4) ? s->uvlinesize : s->linesize,
4267 (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4268 if (!v->ttmbf && ttmb < 8)
4275 /** Decode one B-frame MB (in interlaced field B picture)
4277 static void vc1_decode_b_mb_intfi(VC1Context *v)
4279 MpegEncContext *s = &v->s;
4280 GetBitContext *gb = &s->gb;
4282 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4283 int cbp = 0; /* cbp decoding stuff */
4284 int mqdiff, mquant; /* MB quantization */
4285 int ttmb = v->ttfrm; /* MB Transform type */
4286 int mb_has_coeffs = 0; /* last_flag */
4287 int val; /* temp value */
4288 int first_block = 1;
4291 int dmv_x[2], dmv_y[2], pred_flag[2];
4292 int bmvtype = BMV_TYPE_BACKWARD;
4293 int idx_mbmode, interpmvp;
4295 mquant = v->pq; /* Loosy initialization */
4298 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
4299 if (idx_mbmode <= 1) { // intra MB
4300 s->mb_intra = v->is_intra[s->mb_x] = 1;
4301 s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
4302 s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
4303 s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4305 s->current_picture.f.qscale_table[mb_pos] = mquant;
4306 /* Set DC scale - y and c use the same (not sure if necessary here) */
4307 s->y_dc_scale = s->y_dc_scale_table[mquant];
4308 s->c_dc_scale = s->c_dc_scale_table[mquant];
4309 v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4310 mb_has_coeffs = idx_mbmode & 1;
4312 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
4314 for (i = 0; i < 6; i++) {
4315 s->dc_val[0][s->block_index[i]] = 0;
4317 val = ((cbp >> (5 - i)) & 1);
4318 v->mb_type[0][s->block_index[i]] = s->mb_intra;
4319 v->a_avail = v->c_avail = 0;
4320 if (i == 2 || i == 3 || !s->first_slice_line)
4321 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4322 if (i == 1 || i == 3 || s->mb_x)
4323 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4325 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4326 (i & 4) ? v->codingset2 : v->codingset);
4327 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4329 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4331 for (j = 0; j < 64; j++)
4332 s->block[i][j] <<= 1;
4333 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4334 off += v->cur_field_type ? ((i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0]) : 0;
4335 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize);
4336 // TODO: yet to perform loop filter
4339 s->mb_intra = v->is_intra[s->mb_x] = 0;
4340 s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
4341 for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
4343 fwd = v->forward_mb_plane[mb_pos] = get_bits1(gb);
4345 fwd = v->forward_mb_plane[mb_pos];
4346 if (idx_mbmode <= 5) { // 1-MV
4347 dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4348 pred_flag[0] = pred_flag[1] = 0;
4350 bmvtype = BMV_TYPE_FORWARD;
4352 bmvtype = decode012(gb);
4355 bmvtype = BMV_TYPE_BACKWARD;
4358 bmvtype = BMV_TYPE_DIRECT;
4361 bmvtype = BMV_TYPE_INTERPOLATED;
4362 interpmvp = get_bits1(gb);
4365 v->bmvtype = bmvtype;
4366 if (bmvtype != BMV_TYPE_DIRECT && idx_mbmode & 1) {
4367 get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD], &dmv_y[bmvtype == BMV_TYPE_BACKWARD], &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
4369 if (bmvtype == BMV_TYPE_INTERPOLATED && interpmvp) {
4370 get_mvdata_interlaced(v, &dmv_x[1], &dmv_y[1], &pred_flag[1]);
4372 if (bmvtype == BMV_TYPE_DIRECT) {
4373 dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4374 dmv_x[1] = dmv_y[1] = pred_flag[0] = 0;
4376 vc1_pred_b_mv_intfi(v, 0, dmv_x, dmv_y, 1, pred_flag);
4377 vc1_b_mc(v, dmv_x, dmv_y, (bmvtype == BMV_TYPE_DIRECT), bmvtype);
4378 mb_has_coeffs = !(idx_mbmode & 2);
4381 bmvtype = BMV_TYPE_FORWARD;
4382 v->bmvtype = bmvtype;
4383 v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
4384 for (i = 0; i < 6; i++) {
4386 dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4387 dmv_x[1] = dmv_y[1] = pred_flag[1] = 0;
4388 val = ((v->fourmvbp >> (3 - i)) & 1);
4390 get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD],
4391 &dmv_y[bmvtype == BMV_TYPE_BACKWARD],
4392 &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
4394 vc1_pred_b_mv_intfi(v, i, dmv_x, dmv_y, 0, pred_flag);
4395 vc1_mc_4mv_luma(v, i, bmvtype == BMV_TYPE_BACKWARD);
4397 vc1_mc_4mv_chroma(v, bmvtype == BMV_TYPE_BACKWARD);
4399 mb_has_coeffs = idx_mbmode & 1;
4402 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4406 s->current_picture.f.qscale_table[mb_pos] = mquant;
4407 if (!v->ttmbf && cbp) {
4408 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4411 for (i = 0; i < 6; i++) {
4412 s->dc_val[0][s->block_index[i]] = 0;
4414 val = ((cbp >> (5 - i)) & 1);
4415 off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
4416 if (v->cur_field_type)
4417 off += (i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0];
4419 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4420 first_block, s->dest[dst_idx] + off,
4421 (i & 4) ? s->uvlinesize : s->linesize,
4422 (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4423 if (!v->ttmbf && ttmb < 8)
4431 /** Decode blocks of I-frame
4433 static void vc1_decode_i_blocks(VC1Context *v)
4436 MpegEncContext *s = &v->s;
4441 /* select codingmode used for VLC tables selection */
4442 switch (v->y_ac_table_index) {
4444 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
4447 v->codingset = CS_HIGH_MOT_INTRA;
4450 v->codingset = CS_MID_RATE_INTRA;
4454 switch (v->c_ac_table_index) {
4456 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
4459 v->codingset2 = CS_HIGH_MOT_INTER;
4462 v->codingset2 = CS_MID_RATE_INTER;
4466 /* Set DC scale - y and c use the same */
4467 s->y_dc_scale = s->y_dc_scale_table[v->pq];
4468 s->c_dc_scale = s->c_dc_scale_table[v->pq];
4471 s->mb_x = s->mb_y = 0;
4473 s->first_slice_line = 1;
4474 for (s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
4476 ff_init_block_index(s);
4477 for (; s->mb_x < s->mb_width; s->mb_x++) {
4479 ff_update_block_index(s);
4480 dst[0] = s->dest[0];
4481 dst[1] = dst[0] + 8;
4482 dst[2] = s->dest[0] + s->linesize * 8;
4483 dst[3] = dst[2] + 8;
4484 dst[4] = s->dest[1];
4485 dst[5] = s->dest[2];
4486 s->dsp.clear_blocks(s->block[0]);
4487 mb_pos = s->mb_x + s->mb_y * s->mb_width;
4488 s->current_picture.f.mb_type[mb_pos] = MB_TYPE_INTRA;
4489 s->current_picture.f.qscale_table[mb_pos] = v->pq;
4490 s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
4491 s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
4493 // do actual MB decoding and displaying
4494 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
4495 v->s.ac_pred = get_bits1(&v->s.gb);
4497 for (k = 0; k < 6; k++) {
4498 val = ((cbp >> (5 - k)) & 1);
4501 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
4505 cbp |= val << (5 - k);
4507 vc1_decode_i_block(v, s->block[k], k, val, (k < 4) ? v->codingset : v->codingset2);
4509 if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
4511 v->vc1dsp.vc1_inv_trans_8x8(s->block[k]);
4512 if (v->pq >= 9 && v->overlap) {
4514 for (j = 0; j < 64; j++)
4515 s->block[k][j] <<= 1;
4516 s->dsp.put_signed_pixels_clamped(s->block[k], dst[k], k & 4 ? s->uvlinesize : s->linesize);
4519 for (j = 0; j < 64; j++)
4520 s->block[k][j] = (s->block[k][j] - 64) << 1;
4521 s->dsp.put_pixels_clamped(s->block[k], dst[k], k & 4 ? s->uvlinesize : s->linesize);
4525 if (v->pq >= 9 && v->overlap) {
4527 v->vc1dsp.vc1_h_overlap(s->dest[0], s->linesize);
4528 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
4529 if (!(s->flags & CODEC_FLAG_GRAY)) {
4530 v->vc1dsp.vc1_h_overlap(s->dest[1], s->uvlinesize);
4531 v->vc1dsp.vc1_h_overlap(s->dest[2], s->uvlinesize);
4534 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize);
4535 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
4536 if (!s->first_slice_line) {
4537 v->vc1dsp.vc1_v_overlap(s->dest[0], s->linesize);
4538 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize);
4539 if (!(s->flags & CODEC_FLAG_GRAY)) {
4540 v->vc1dsp.vc1_v_overlap(s->dest[1], s->uvlinesize);
4541 v->vc1dsp.vc1_v_overlap(s->dest[2], s->uvlinesize);
4544 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
4545 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
4547 if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
4549 if (get_bits_count(&s->gb) > v->bits) {
4550 ff_er_add_slice(s, 0, 0, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END));
4551 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
4552 get_bits_count(&s->gb), v->bits);
4556 if (!v->s.loop_filter)
4557 ff_draw_horiz_band(s, s->mb_y * 16, 16);
4559 ff_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
4561 s->first_slice_line = 0;
4563 if (v->s.loop_filter)
4564 ff_draw_horiz_band(s, (s->mb_height - 1) * 16, 16);
4565 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
4568 /** Decode blocks of I-frame for advanced profile
4570 static void vc1_decode_i_blocks_adv(VC1Context *v)
4573 MpegEncContext *s = &v->s;
4579 GetBitContext *gb = &s->gb;
4581 /* select codingmode used for VLC tables selection */
4582 switch (v->y_ac_table_index) {
4584 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
4587 v->codingset = CS_HIGH_MOT_INTRA;
4590 v->codingset = CS_MID_RATE_INTRA;
4594 switch (v->c_ac_table_index) {
4596 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
4599 v->codingset2 = CS_HIGH_MOT_INTER;
4602 v->codingset2 = CS_MID_RATE_INTER;
4607 s->mb_x = s->mb_y = 0;
4609 s->first_slice_line = 1;
4610 s->mb_y = s->start_mb_y;
4611 if (s->start_mb_y) {
4613 ff_init_block_index(s);
4614 memset(&s->coded_block[s->block_index[0] - s->b8_stride], 0,
4615 (1 + s->b8_stride) * sizeof(*s->coded_block));
4617 for (; s->mb_y < s->end_mb_y; s->mb_y++) {
4619 ff_init_block_index(s);
4620 for (;s->mb_x < s->mb_width; s->mb_x++) {
4621 DCTELEM (*block)[64] = v->block[v->cur_blk_idx];
4622 ff_update_block_index(s);
4623 s->dsp.clear_blocks(block[0]);
4624 mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4625 s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4626 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
4627 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
4629 // do actual MB decoding and displaying
4630 if (v->fieldtx_is_raw)
4631 v->fieldtx_plane[mb_pos] = get_bits1(&v->s.gb);
4632 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
4633 if ( v->acpred_is_raw)
4634 v->s.ac_pred = get_bits1(&v->s.gb);
4636 v->s.ac_pred = v->acpred_plane[mb_pos];
4638 if (v->condover == CONDOVER_SELECT && v->overflg_is_raw)
4639 v->over_flags_plane[mb_pos] = get_bits1(&v->s.gb);
4643 s->current_picture.f.qscale_table[mb_pos] = mquant;
4644 /* Set DC scale - y and c use the same */
4645 s->y_dc_scale = s->y_dc_scale_table[mquant];
4646 s->c_dc_scale = s->c_dc_scale_table[mquant];
4648 for (k = 0; k < 6; k++) {
4649 val = ((cbp >> (5 - k)) & 1);
4652 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
4656 cbp |= val << (5 - k);
4658 v->a_avail = !s->first_slice_line || (k == 2 || k == 3);
4659 v->c_avail = !!s->mb_x || (k == 1 || k == 3);
4661 vc1_decode_i_block_adv(v, block[k], k, val,
4662 (k < 4) ? v->codingset : v->codingset2, mquant);
4664 if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
4666 v->vc1dsp.vc1_inv_trans_8x8(block[k]);
4669 vc1_smooth_overlap_filter_iblk(v);
4670 vc1_put_signed_blocks_clamped(v);
4671 if (v->s.loop_filter) vc1_loop_filter_iblk_delayed(v, v->pq);
4673 if (get_bits_count(&s->gb) > v->bits) {
4674 // TODO: may need modification to handle slice coding
4675 ff_er_add_slice(s, 0, s->start_mb_y, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END));
4676 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
4677 get_bits_count(&s->gb), v->bits);
4681 if (!v->s.loop_filter)
4682 ff_draw_horiz_band(s, s->mb_y * 16, 16);
4684 ff_draw_horiz_band(s, (s->mb_y-1) * 16, 16);
4685 s->first_slice_line = 0;
4688 /* raw bottom MB row */
4690 ff_init_block_index(s);
4691 for (;s->mb_x < s->mb_width; s->mb_x++) {
4692 ff_update_block_index(s);
4693 vc1_put_signed_blocks_clamped(v);
4694 if (v->s.loop_filter)
4695 vc1_loop_filter_iblk_delayed(v, v->pq);
4697 if (v->s.loop_filter)
4698 ff_draw_horiz_band(s, (s->end_mb_y-1)*16, 16);
4699 ff_er_add_slice(s, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
4700 (s->end_mb_y << v->field_mode) - 1, (AC_END|DC_END|MV_END));
4703 static void vc1_decode_p_blocks(VC1Context *v)
4705 MpegEncContext *s = &v->s;
4706 int apply_loop_filter;
4708 /* select codingmode used for VLC tables selection */
4709 switch (v->c_ac_table_index) {
4711 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
4714 v->codingset = CS_HIGH_MOT_INTRA;
4717 v->codingset = CS_MID_RATE_INTRA;
4721 switch (v->c_ac_table_index) {
4723 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
4726 v->codingset2 = CS_HIGH_MOT_INTER;
4729 v->codingset2 = CS_MID_RATE_INTER;
4733 apply_loop_filter = s->loop_filter && !(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY);
4734 s->first_slice_line = 1;
4735 memset(v->cbp_base, 0, sizeof(v->cbp_base[0])*2*s->mb_stride);
4736 for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
4738 ff_init_block_index(s);
4739 for (; s->mb_x < s->mb_width; s->mb_x++) {
4740 ff_update_block_index(s);
4743 vc1_decode_p_mb_intfi(v);
4744 else if (v->fcm == 1)
4745 vc1_decode_p_mb_intfr(v);
4746 else vc1_decode_p_mb(v);
4747 if (s->mb_y != s->start_mb_y && apply_loop_filter && v->fcm == 0)
4748 vc1_apply_p_loop_filter(v);
4749 if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
4750 // TODO: may need modification to handle slice coding
4751 ff_er_add_slice(s, 0, s->start_mb_y, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END));
4752 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
4753 get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
4757 memmove(v->cbp_base, v->cbp, sizeof(v->cbp_base[0]) * s->mb_stride);
4758 memmove(v->ttblk_base, v->ttblk, sizeof(v->ttblk_base[0]) * s->mb_stride);
4759 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
4760 memmove(v->luma_mv_base, v->luma_mv, sizeof(v->luma_mv_base[0]) * s->mb_stride);
4761 if (s->mb_y != s->start_mb_y) ff_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
4762 s->first_slice_line = 0;
4764 if (apply_loop_filter) {
4766 ff_init_block_index(s);
4767 for (; s->mb_x < s->mb_width; s->mb_x++) {
4768 ff_update_block_index(s);
4769 vc1_apply_p_loop_filter(v);
4772 if (s->end_mb_y >= s->start_mb_y)
4773 ff_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
4774 ff_er_add_slice(s, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
4775 (s->end_mb_y << v->field_mode) - 1, (AC_END|DC_END|MV_END));
4778 static void vc1_decode_b_blocks(VC1Context *v)
4780 MpegEncContext *s = &v->s;
4782 /* select codingmode used for VLC tables selection */
4783 switch (v->c_ac_table_index) {
4785 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
4788 v->codingset = CS_HIGH_MOT_INTRA;
4791 v->codingset = CS_MID_RATE_INTRA;
4795 switch (v->c_ac_table_index) {
4797 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
4800 v->codingset2 = CS_HIGH_MOT_INTER;
4803 v->codingset2 = CS_MID_RATE_INTER;
4807 s->first_slice_line = 1;
4808 for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
4810 ff_init_block_index(s);
4811 for (; s->mb_x < s->mb_width; s->mb_x++) {
4812 ff_update_block_index(s);
4815 vc1_decode_b_mb_intfi(v);
4818 if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
4819 // TODO: may need modification to handle slice coding
4820 ff_er_add_slice(s, 0, s->start_mb_y, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END));
4821 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
4822 get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
4825 if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
4827 if (!v->s.loop_filter)
4828 ff_draw_horiz_band(s, s->mb_y * 16, 16);
4830 ff_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
4831 s->first_slice_line = 0;
4833 if (v->s.loop_filter)
4834 ff_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
4835 ff_er_add_slice(s, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
4836 (s->end_mb_y << v->field_mode) - 1, (AC_END|DC_END|MV_END));
4839 static void vc1_decode_skip_blocks(VC1Context *v)
4841 MpegEncContext *s = &v->s;
4843 ff_er_add_slice(s, 0, s->start_mb_y, s->mb_width - 1, s->end_mb_y - 1, (AC_END|DC_END|MV_END));
4844 s->first_slice_line = 1;
4845 for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
4847 ff_init_block_index(s);
4848 ff_update_block_index(s);
4849 memcpy(s->dest[0], s->last_picture.f.data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16);
4850 memcpy(s->dest[1], s->last_picture.f.data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
4851 memcpy(s->dest[2], s->last_picture.f.data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
4852 ff_draw_horiz_band(s, s->mb_y * 16, 16);
4853 s->first_slice_line = 0;
4855 s->pict_type = AV_PICTURE_TYPE_P;
4858 static void vc1_decode_blocks(VC1Context *v)
4861 v->s.esc3_level_length = 0;
4863 ff_intrax8_decode_picture(&v->x8, 2*v->pq + v->halfpq, v->pq * !v->pquantizer);
4866 v->left_blk_idx = -1;
4867 v->topleft_blk_idx = 1;
4869 switch (v->s.pict_type) {
4870 case AV_PICTURE_TYPE_I:
4871 if (v->profile == PROFILE_ADVANCED)
4872 vc1_decode_i_blocks_adv(v);
4874 vc1_decode_i_blocks(v);
4876 case AV_PICTURE_TYPE_P:
4877 if (v->p_frame_skipped)
4878 vc1_decode_skip_blocks(v);
4880 vc1_decode_p_blocks(v);
4882 case AV_PICTURE_TYPE_B:
4884 if (v->profile == PROFILE_ADVANCED)
4885 vc1_decode_i_blocks_adv(v);
4887 vc1_decode_i_blocks(v);
4889 vc1_decode_b_blocks(v);
4895 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
4899 * Transform coefficients for both sprites in 16.16 fixed point format,
4900 * in the order they appear in the bitstream:
4902 * rotation 1 (unused)
4904 * rotation 2 (unused)
4911 int effect_type, effect_flag;
4912 int effect_pcount1, effect_pcount2; ///< amount of effect parameters stored in effect_params
4913 int effect_params1[15], effect_params2[10]; ///< effect parameters in 16.16 fixed point format
4916 static inline int get_fp_val(GetBitContext* gb)
4918 return (get_bits_long(gb, 30) - (1 << 29)) << 1;
4921 static void vc1_sprite_parse_transform(GetBitContext* gb, int c[7])
4925 switch (get_bits(gb, 2)) {
4928 c[2] = get_fp_val(gb);
4932 c[0] = c[4] = get_fp_val(gb);
4933 c[2] = get_fp_val(gb);
4936 c[0] = get_fp_val(gb);
4937 c[2] = get_fp_val(gb);
4938 c[4] = get_fp_val(gb);
4941 c[0] = get_fp_val(gb);
4942 c[1] = get_fp_val(gb);
4943 c[2] = get_fp_val(gb);
4944 c[3] = get_fp_val(gb);
4945 c[4] = get_fp_val(gb);
4948 c[5] = get_fp_val(gb);
4950 c[6] = get_fp_val(gb);
4955 static void vc1_parse_sprites(VC1Context *v, GetBitContext* gb, SpriteData* sd)
4957 AVCodecContext *avctx = v->s.avctx;
4960 for (sprite = 0; sprite <= v->two_sprites; sprite++) {
4961 vc1_sprite_parse_transform(gb, sd->coefs[sprite]);
4962 if (sd->coefs[sprite][1] || sd->coefs[sprite][3])
4963 av_log_ask_for_sample(avctx, "Rotation coefficients are not zero");
4964 av_log(avctx, AV_LOG_DEBUG, sprite ? "S2:" : "S1:");
4965 for (i = 0; i < 7; i++)
4966 av_log(avctx, AV_LOG_DEBUG, " %d.%.3d",
4967 sd->coefs[sprite][i] / (1<<16),
4968 (abs(sd->coefs[sprite][i]) & 0xFFFF) * 1000 / (1 << 16));
4969 av_log(avctx, AV_LOG_DEBUG, "\n");
4973 if (sd->effect_type = get_bits_long(gb, 30)) {
4974 switch (sd->effect_pcount1 = get_bits(gb, 4)) {
4976 vc1_sprite_parse_transform(gb, sd->effect_params1);
4979 vc1_sprite_parse_transform(gb, sd->effect_params1);
4980 vc1_sprite_parse_transform(gb, sd->effect_params1 + 7);
4983 for (i = 0; i < sd->effect_pcount1; i++)
4984 sd->effect_params1[i] = get_fp_val(gb);
4986 if (sd->effect_type != 13 || sd->effect_params1[0] != sd->coefs[0][6]) {
4987 // effect 13 is simple alpha blending and matches the opacity above
4988 av_log(avctx, AV_LOG_DEBUG, "Effect: %d; params: ", sd->effect_type);
4989 for (i = 0; i < sd->effect_pcount1; i++)
4990 av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
4991 sd->effect_params1[i] / (1 << 16),
4992 (abs(sd->effect_params1[i]) & 0xFFFF) * 1000 / (1 << 16));
4993 av_log(avctx, AV_LOG_DEBUG, "\n");
4996 sd->effect_pcount2 = get_bits(gb, 16);
4997 if (sd->effect_pcount2 > 10) {
4998 av_log(avctx, AV_LOG_ERROR, "Too many effect parameters\n");
5000 } else if (sd->effect_pcount2) {
5002 av_log(avctx, AV_LOG_DEBUG, "Effect params 2: ");
5003 while (++i < sd->effect_pcount2) {
5004 sd->effect_params2[i] = get_fp_val(gb);
5005 av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
5006 sd->effect_params2[i] / (1 << 16),
5007 (abs(sd->effect_params2[i]) & 0xFFFF) * 1000 / (1 << 16));
5009 av_log(avctx, AV_LOG_DEBUG, "\n");
5012 if (sd->effect_flag = get_bits1(gb))
5013 av_log(avctx, AV_LOG_DEBUG, "Effect flag set\n");
5015 if (get_bits_count(gb) >= gb->size_in_bits +
5016 (avctx->codec_id == CODEC_ID_WMV3IMAGE ? 64 : 0))
5017 av_log(avctx, AV_LOG_ERROR, "Buffer overrun\n");
5018 if (get_bits_count(gb) < gb->size_in_bits - 8)
5019 av_log(avctx, AV_LOG_WARNING, "Buffer not fully read\n");
5022 static void vc1_draw_sprites(VC1Context *v, SpriteData* sd)
5024 int i, plane, row, sprite;
5025 int sr_cache[2][2] = { { -1, -1 }, { -1, -1 } };
5026 uint8_t* src_h[2][2];
5027 int xoff[2], xadv[2], yoff[2], yadv[2], alpha;
5029 MpegEncContext *s = &v->s;
5031 for (i = 0; i < 2; i++) {
5032 xoff[i] = av_clip(sd->coefs[i][2], 0, v->sprite_width-1 << 16);
5033 xadv[i] = sd->coefs[i][0];
5034 if (xadv[i] != 1<<16 || (v->sprite_width << 16) - (v->output_width << 16) - xoff[i])
5035 xadv[i] = av_clip(xadv[i], 0, ((v->sprite_width<<16) - xoff[i] - 1) / v->output_width);
5037 yoff[i] = av_clip(sd->coefs[i][5], 0, v->sprite_height-1 << 16);
5038 yadv[i] = av_clip(sd->coefs[i][4], 0, ((v->sprite_height << 16) - yoff[i]) / v->output_height);
5040 alpha = av_clip(sd->coefs[1][6], 0, (1<<16) - 1);
5042 for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++) {
5043 int width = v->output_width>>!!plane;
5045 for (row = 0; row < v->output_height>>!!plane; row++) {
5046 uint8_t *dst = v->sprite_output_frame.data[plane] +
5047 v->sprite_output_frame.linesize[plane] * row;
5049 for (sprite = 0; sprite <= v->two_sprites; sprite++) {
5050 uint8_t *iplane = s->current_picture.f.data[plane];
5051 int iline = s->current_picture.f.linesize[plane];
5052 int ycoord = yoff[sprite] + yadv[sprite] * row;
5053 int yline = ycoord >> 16;
5054 ysub[sprite] = ycoord & 0xFFFF;
5056 iplane = s->last_picture.f.data[plane];
5057 iline = s->last_picture.f.linesize[plane];
5059 if (!(xoff[sprite] & 0xFFFF) && xadv[sprite] == 1 << 16) {
5060 src_h[sprite][0] = iplane + (xoff[sprite] >> 16) + yline * iline;
5062 src_h[sprite][1] = iplane + (xoff[sprite] >> 16) + (yline + 1) * iline;
5064 if (sr_cache[sprite][0] != yline) {
5065 if (sr_cache[sprite][1] == yline) {
5066 FFSWAP(uint8_t*, v->sr_rows[sprite][0], v->sr_rows[sprite][1]);
5067 FFSWAP(int, sr_cache[sprite][0], sr_cache[sprite][1]);
5069 v->vc1dsp.sprite_h(v->sr_rows[sprite][0], iplane + yline * iline, xoff[sprite], xadv[sprite], width);
5070 sr_cache[sprite][0] = yline;
5073 if (ysub[sprite] && sr_cache[sprite][1] != yline + 1) {
5074 v->vc1dsp.sprite_h(v->sr_rows[sprite][1], iplane + (yline + 1) * iline, xoff[sprite], xadv[sprite], width);
5075 sr_cache[sprite][1] = yline + 1;
5077 src_h[sprite][0] = v->sr_rows[sprite][0];
5078 src_h[sprite][1] = v->sr_rows[sprite][1];
5082 if (!v->two_sprites) {
5084 v->vc1dsp.sprite_v_single(dst, src_h[0][0], src_h[0][1], ysub[0], width);
5086 memcpy(dst, src_h[0][0], width);
5089 if (ysub[0] && ysub[1]) {
5090 v->vc1dsp.sprite_v_double_twoscale(dst, src_h[0][0], src_h[0][1], ysub[0],
5091 src_h[1][0], src_h[1][1], ysub[1], alpha, width);
5092 } else if (ysub[0]) {
5093 v->vc1dsp.sprite_v_double_onescale(dst, src_h[0][0], src_h[0][1], ysub[0],
5094 src_h[1][0], alpha, width);
5095 } else if (ysub[1]) {
5096 v->vc1dsp.sprite_v_double_onescale(dst, src_h[1][0], src_h[1][1], ysub[1],
5097 src_h[0][0], (1<<16)-1-alpha, width);
5099 v->vc1dsp.sprite_v_double_noscale(dst, src_h[0][0], src_h[1][0], alpha, width);
5105 for (i = 0; i < 2; i++) {
5115 static int vc1_decode_sprites(VC1Context *v, GetBitContext* gb)
5117 MpegEncContext *s = &v->s;
5118 AVCodecContext *avctx = s->avctx;
5121 vc1_parse_sprites(v, gb, &sd);
5123 if (!s->current_picture.f.data[0]) {
5124 av_log(avctx, AV_LOG_ERROR, "Got no sprites\n");
5128 if (v->two_sprites && (!s->last_picture_ptr || !s->last_picture.f.data[0])) {
5129 av_log(avctx, AV_LOG_WARNING, "Need two sprites, only got one\n");
5133 if (v->sprite_output_frame.data[0])
5134 avctx->release_buffer(avctx, &v->sprite_output_frame);
5136 v->sprite_output_frame.buffer_hints = FF_BUFFER_HINTS_VALID;
5137 v->sprite_output_frame.reference = 0;
5138 if (avctx->get_buffer(avctx, &v->sprite_output_frame) < 0) {
5139 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
5143 vc1_draw_sprites(v, &sd);
5148 static void vc1_sprite_flush(AVCodecContext *avctx)
5150 VC1Context *v = avctx->priv_data;
5151 MpegEncContext *s = &v->s;
5152 AVFrame *f = &s->current_picture.f;
5155 /* Windows Media Image codecs have a convergence interval of two keyframes.
5156 Since we can't enforce it, clear to black the missing sprite. This is
5157 wrong but it looks better than doing nothing. */
5160 for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++)
5161 for (i = 0; i < v->sprite_height>>!!plane; i++)
5162 memset(f->data[plane] + i * f->linesize[plane],
5163 plane ? 128 : 0, f->linesize[plane]);
5168 static av_cold int vc1_decode_init_alloc_tables(VC1Context *v)
5170 MpegEncContext *s = &v->s;
5173 /* Allocate mb bitplanes */
5174 v->mv_type_mb_plane = av_malloc (s->mb_stride * s->mb_height);
5175 v->direct_mb_plane = av_malloc (s->mb_stride * s->mb_height);
5176 v->forward_mb_plane = av_malloc (s->mb_stride * s->mb_height);
5177 v->fieldtx_plane = av_mallocz(s->mb_stride * s->mb_height);
5178 v->acpred_plane = av_malloc (s->mb_stride * s->mb_height);
5179 v->over_flags_plane = av_malloc (s->mb_stride * s->mb_height);
5181 v->n_allocated_blks = s->mb_width + 2;
5182 v->block = av_malloc(sizeof(*v->block) * v->n_allocated_blks);
5183 v->cbp_base = av_malloc(sizeof(v->cbp_base[0]) * 2 * s->mb_stride);
5184 v->cbp = v->cbp_base + s->mb_stride;
5185 v->ttblk_base = av_malloc(sizeof(v->ttblk_base[0]) * 2 * s->mb_stride);
5186 v->ttblk = v->ttblk_base + s->mb_stride;
5187 v->is_intra_base = av_mallocz(sizeof(v->is_intra_base[0]) * 2 * s->mb_stride);
5188 v->is_intra = v->is_intra_base + s->mb_stride;
5189 v->luma_mv_base = av_malloc(sizeof(v->luma_mv_base[0]) * 2 * s->mb_stride);
5190 v->luma_mv = v->luma_mv_base + s->mb_stride;
5192 /* allocate block type info in that way so it could be used with s->block_index[] */
5193 v->mb_type_base = av_malloc(s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5194 v->mb_type[0] = v->mb_type_base + s->b8_stride + 1;
5195 v->mb_type[1] = v->mb_type_base + s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride + 1;
5196 v->mb_type[2] = v->mb_type[1] + s->mb_stride * (s->mb_height + 1);
5198 /* allocate memory to store block level MV info */
5199 v->blk_mv_type_base = av_mallocz( s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5200 v->blk_mv_type = v->blk_mv_type_base + s->b8_stride + 1;
5201 v->mv_f_base = av_mallocz(2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5202 v->mv_f[0] = v->mv_f_base + s->b8_stride + 1;
5203 v->mv_f[1] = v->mv_f[0] + (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5204 v->mv_f_last_base = av_mallocz(2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5205 v->mv_f_last[0] = v->mv_f_last_base + s->b8_stride + 1;
5206 v->mv_f_last[1] = v->mv_f_last[0] + (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5207 v->mv_f_next_base = av_mallocz(2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5208 v->mv_f_next[0] = v->mv_f_next_base + s->b8_stride + 1;
5209 v->mv_f_next[1] = v->mv_f_next[0] + (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5211 /* Init coded blocks info */
5212 if (v->profile == PROFILE_ADVANCED) {
5213 // if (alloc_bitplane(&v->over_flags_plane, s->mb_width, s->mb_height) < 0)
5215 // if (alloc_bitplane(&v->ac_pred_plane, s->mb_width, s->mb_height) < 0)
5219 ff_intrax8_common_init(&v->x8,s);
5221 if (s->avctx->codec_id == CODEC_ID_WMV3IMAGE || s->avctx->codec_id == CODEC_ID_VC1IMAGE) {
5222 for (i = 0; i < 4; i++)
5223 if (!(v->sr_rows[i >> 1][i & 1] = av_malloc(v->output_width))) return -1;
5226 if (!v->mv_type_mb_plane || !v->direct_mb_plane || !v->acpred_plane || !v->over_flags_plane ||
5227 !v->block || !v->cbp_base || !v->ttblk_base || !v->is_intra_base || !v->luma_mv_base ||
5234 /** Initialize a VC1/WMV3 decoder
5235 * @todo TODO: Handle VC-1 IDUs (Transport level?)
5236 * @todo TODO: Decypher remaining bits in extra_data
5238 static av_cold int vc1_decode_init(AVCodecContext *avctx)
5240 VC1Context *v = avctx->priv_data;
5241 MpegEncContext *s = &v->s;
5245 /* save the container output size for WMImage */
5246 v->output_width = avctx->width;
5247 v->output_height = avctx->height;
5249 if (!avctx->extradata_size || !avctx->extradata)
5251 if (!(avctx->flags & CODEC_FLAG_GRAY))
5252 avctx->pix_fmt = avctx->get_format(avctx, avctx->codec->pix_fmts);
5254 avctx->pix_fmt = PIX_FMT_GRAY8;
5255 avctx->hwaccel = ff_find_hwaccel(avctx->codec->id, avctx->pix_fmt);
5257 avctx->flags |= CODEC_FLAG_EMU_EDGE;
5258 v->s.flags |= CODEC_FLAG_EMU_EDGE;
5260 if (avctx->idct_algo == FF_IDCT_AUTO) {
5261 avctx->idct_algo = FF_IDCT_WMV2;
5264 if (vc1_init_common(v) < 0)
5266 ff_vc1dsp_init(&v->vc1dsp);
5268 if (avctx->codec_id == CODEC_ID_WMV3 || avctx->codec_id == CODEC_ID_WMV3IMAGE) {
5271 // looks like WMV3 has a sequence header stored in the extradata
5272 // advanced sequence header may be before the first frame
5273 // the last byte of the extradata is a version number, 1 for the
5274 // samples we can decode
5276 init_get_bits(&gb, avctx->extradata, avctx->extradata_size*8);
5278 if (vc1_decode_sequence_header(avctx, v, &gb) < 0)
5281 count = avctx->extradata_size*8 - get_bits_count(&gb);
5283 av_log(avctx, AV_LOG_INFO, "Extra data: %i bits left, value: %X\n",
5284 count, get_bits(&gb, count));
5285 } else if (count < 0) {
5286 av_log(avctx, AV_LOG_INFO, "Read %i bits in overflow\n", -count);
5288 } else { // VC1/WVC1/WVP2
5289 const uint8_t *start = avctx->extradata;
5290 uint8_t *end = avctx->extradata + avctx->extradata_size;
5291 const uint8_t *next;
5292 int size, buf2_size;
5293 uint8_t *buf2 = NULL;
5294 int seq_initialized = 0, ep_initialized = 0;
5296 if (avctx->extradata_size < 16) {
5297 av_log(avctx, AV_LOG_ERROR, "Extradata size too small: %i\n", avctx->extradata_size);
5301 buf2 = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
5302 start = find_next_marker(start, end); // in WVC1 extradata first byte is its size, but can be 0 in mkv
5304 for (; next < end; start = next) {
5305 next = find_next_marker(start + 4, end);
5306 size = next - start - 4;
5309 buf2_size = vc1_unescape_buffer(start + 4, size, buf2);
5310 init_get_bits(&gb, buf2, buf2_size * 8);
5311 switch (AV_RB32(start)) {
5312 case VC1_CODE_SEQHDR:
5313 if (vc1_decode_sequence_header(avctx, v, &gb) < 0) {
5317 seq_initialized = 1;
5319 case VC1_CODE_ENTRYPOINT:
5320 if (vc1_decode_entry_point(avctx, v, &gb) < 0) {
5329 if (!seq_initialized || !ep_initialized) {
5330 av_log(avctx, AV_LOG_ERROR, "Incomplete extradata\n");
5333 v->res_sprite = (avctx->codec_tag == MKTAG('W','V','P','2'));
5336 avctx->profile = v->profile;
5337 if (v->profile == PROFILE_ADVANCED)
5338 avctx->level = v->level;
5340 avctx->has_b_frames = !!(avctx->max_b_frames);
5342 s->mb_width = (avctx->coded_width + 15) >> 4;
5343 s->mb_height = (avctx->coded_height + 15) >> 4;
5345 if (v->profile == PROFILE_ADVANCED || v->res_fasttx) {
5346 for (i = 0; i < 64; i++) {
5347 #define transpose(x) ((x >> 3) | ((x & 7) << 3))
5348 v->zz_8x8[0][i] = transpose(wmv1_scantable[0][i]);
5349 v->zz_8x8[1][i] = transpose(wmv1_scantable[1][i]);
5350 v->zz_8x8[2][i] = transpose(wmv1_scantable[2][i]);
5351 v->zz_8x8[3][i] = transpose(wmv1_scantable[3][i]);
5352 v->zzi_8x8[i] = transpose(ff_vc1_adv_interlaced_8x8_zz[i]);
5357 memcpy(v->zz_8x8, wmv1_scantable, 4*64);
5362 if (avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE) {
5363 v->sprite_width = avctx->coded_width;
5364 v->sprite_height = avctx->coded_height;
5366 avctx->coded_width = avctx->width = v->output_width;
5367 avctx->coded_height = avctx->height = v->output_height;
5369 // prevent 16.16 overflows
5370 if (v->sprite_width > 1 << 14 ||
5371 v->sprite_height > 1 << 14 ||
5372 v->output_width > 1 << 14 ||
5373 v->output_height > 1 << 14) return -1;
5378 /** Close a VC1/WMV3 decoder
5379 * @warning Initial try at using MpegEncContext stuff
5381 static av_cold int vc1_decode_end(AVCodecContext *avctx)
5383 VC1Context *v = avctx->priv_data;
5386 if ((avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE)
5387 && v->sprite_output_frame.data[0])
5388 avctx->release_buffer(avctx, &v->sprite_output_frame);
5389 for (i = 0; i < 4; i++)
5390 av_freep(&v->sr_rows[i >> 1][i & 1]);
5391 av_freep(&v->hrd_rate);
5392 av_freep(&v->hrd_buffer);
5393 MPV_common_end(&v->s);
5394 av_freep(&v->mv_type_mb_plane);
5395 av_freep(&v->direct_mb_plane);
5396 av_freep(&v->forward_mb_plane);
5397 av_freep(&v->fieldtx_plane);
5398 av_freep(&v->acpred_plane);
5399 av_freep(&v->over_flags_plane);
5400 av_freep(&v->mb_type_base);
5401 av_freep(&v->blk_mv_type_base);
5402 av_freep(&v->mv_f_base);
5403 av_freep(&v->mv_f_last_base);
5404 av_freep(&v->mv_f_next_base);
5405 av_freep(&v->block);
5406 av_freep(&v->cbp_base);
5407 av_freep(&v->ttblk_base);
5408 av_freep(&v->is_intra_base); // FIXME use v->mb_type[]
5409 av_freep(&v->luma_mv_base);
5410 ff_intrax8_common_end(&v->x8);
5415 /** Decode a VC1/WMV3 frame
5416 * @todo TODO: Handle VC-1 IDUs (Transport level?)
5418 static int vc1_decode_frame(AVCodecContext *avctx, void *data,
5419 int *data_size, AVPacket *avpkt)
5421 const uint8_t *buf = avpkt->data;
5422 int buf_size = avpkt->size, n_slices = 0, i;
5423 VC1Context *v = avctx->priv_data;
5424 MpegEncContext *s = &v->s;
5425 AVFrame *pict = data;
5426 uint8_t *buf2 = NULL;
5427 uint8_t *buf_field2 = NULL;
5428 const uint8_t *buf_start = buf;
5429 int mb_height, n_slices1;
5436 /* no supplementary picture */
5437 if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == VC1_CODE_ENDOFSEQ)) {
5438 /* special case for last picture */
5439 if (s->low_delay == 0 && s->next_picture_ptr) {
5440 *pict = *(AVFrame*)s->next_picture_ptr;
5441 s->next_picture_ptr = NULL;
5443 *data_size = sizeof(AVFrame);
5449 if (s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) {
5450 if (v->profile < PROFILE_ADVANCED)
5451 avctx->pix_fmt = PIX_FMT_VDPAU_WMV3;
5453 avctx->pix_fmt = PIX_FMT_VDPAU_VC1;
5456 //for advanced profile we may need to parse and unescape data
5457 if (avctx->codec_id == CODEC_ID_VC1 || avctx->codec_id == CODEC_ID_VC1IMAGE) {
5459 buf2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5461 if (IS_MARKER(AV_RB32(buf))) { /* frame starts with marker and needs to be parsed */
5462 const uint8_t *start, *end, *next;
5466 for (start = buf, end = buf + buf_size; next < end; start = next) {
5467 next = find_next_marker(start + 4, end);
5468 size = next - start - 4;
5469 if (size <= 0) continue;
5470 switch (AV_RB32(start)) {
5471 case VC1_CODE_FRAME:
5472 if (avctx->hwaccel ||
5473 s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
5475 buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
5477 case VC1_CODE_FIELD: {
5479 slices = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5482 slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5483 if (!slices[n_slices].buf)
5485 buf_size3 = vc1_unescape_buffer(start + 4, size,
5486 slices[n_slices].buf);
5487 init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5489 /* assuming that the field marker is at the exact middle,
5490 hope it's correct */
5491 slices[n_slices].mby_start = s->mb_height >> 1;
5492 n_slices1 = n_slices - 1; // index of the last slice of the first field
5494 // not necessary, ad hoc until I find a way to handle WVC1i
5495 buf_field2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5496 vc1_unescape_buffer(start + 4, size, buf_field2);
5499 case VC1_CODE_ENTRYPOINT: /* it should be before frame data */
5500 buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
5501 init_get_bits(&s->gb, buf2, buf_size2 * 8);
5502 vc1_decode_entry_point(avctx, v, &s->gb);
5504 case VC1_CODE_SLICE: {
5506 slices = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5509 slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5510 if (!slices[n_slices].buf)
5512 buf_size3 = vc1_unescape_buffer(start + 4, size,
5513 slices[n_slices].buf);
5514 init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5516 slices[n_slices].mby_start = get_bits(&slices[n_slices].gb, 9);
5522 } else if (v->interlace && ((buf[0] & 0xC0) == 0xC0)) { /* WVC1 interlaced stores both fields divided by marker */
5523 const uint8_t *divider;
5525 divider = find_next_marker(buf, buf + buf_size);
5526 if ((divider == (buf + buf_size)) || AV_RB32(divider) != VC1_CODE_FIELD) {
5527 av_log(avctx, AV_LOG_ERROR, "Error in WVC1 interlaced frame\n");
5529 } else { // found field marker, unescape second field
5530 buf_field2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5531 vc1_unescape_buffer(divider + 4, buf + buf_size - divider - 4, buf_field2);
5533 buf_size2 = vc1_unescape_buffer(buf, divider - buf, buf2);
5535 buf_size2 = vc1_unescape_buffer(buf, buf_size, buf2);
5537 init_get_bits(&s->gb, buf2, buf_size2*8);
5539 init_get_bits(&s->gb, buf, buf_size*8);
5541 if (v->res_sprite) {
5542 v->new_sprite = !get_bits1(&s->gb);
5543 v->two_sprites = get_bits1(&s->gb);
5544 /* res_sprite means a Windows Media Image stream, CODEC_ID_*IMAGE means
5545 we're using the sprite compositor. These are intentionally kept separate
5546 so you can get the raw sprites by using the wmv3 decoder for WMVP or
5547 the vc1 one for WVP2 */
5548 if (avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE) {
5549 if (v->new_sprite) {
5550 // switch AVCodecContext parameters to those of the sprites
5551 avctx->width = avctx->coded_width = v->sprite_width;
5552 avctx->height = avctx->coded_height = v->sprite_height;
5559 if (s->context_initialized &&
5560 (s->width != avctx->coded_width ||
5561 s->height != avctx->coded_height)) {
5562 vc1_decode_end(avctx);
5565 if (!s->context_initialized) {
5566 if (ff_msmpeg4_decode_init(avctx) < 0 || vc1_decode_init_alloc_tables(v) < 0)
5569 s->low_delay = !avctx->has_b_frames || v->res_sprite;
5571 if (v->profile == PROFILE_ADVANCED) {
5572 s->h_edge_pos = avctx->coded_width;
5573 s->v_edge_pos = avctx->coded_height;
5577 /* We need to set current_picture_ptr before reading the header,
5578 * otherwise we cannot store anything in there. */
5579 if (s->current_picture_ptr == NULL || s->current_picture_ptr->f.data[0]) {
5580 int i = ff_find_unused_picture(s, 0);
5581 s->current_picture_ptr = &s->picture[i];
5584 // do parse frame header
5585 v->pic_header_flag = 0;
5586 if (v->profile < PROFILE_ADVANCED) {
5587 if (vc1_parse_frame_header(v, &s->gb) == -1) {
5591 if (vc1_parse_frame_header_adv(v, &s->gb) == -1) {
5596 if ((avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE)
5597 && s->pict_type != AV_PICTURE_TYPE_I) {
5598 av_log(v->s.avctx, AV_LOG_ERROR, "Sprite decoder: expected I-frame\n");
5602 // process pulldown flags
5603 s->current_picture_ptr->f.repeat_pict = 0;
5604 // Pulldown flags are only valid when 'broadcast' has been set.
5605 // So ticks_per_frame will be 2
5608 s->current_picture_ptr->f.repeat_pict = 1;
5609 } else if (v->rptfrm) {
5611 s->current_picture_ptr->f.repeat_pict = v->rptfrm * 2;
5614 // for skipping the frame
5615 s->current_picture.f.pict_type = s->pict_type;
5616 s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
5618 /* skip B-frames if we don't have reference frames */
5619 if (s->last_picture_ptr == NULL && (s->pict_type == AV_PICTURE_TYPE_B || s->dropable)) {
5622 if ((avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B) ||
5623 (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I) ||
5624 avctx->skip_frame >= AVDISCARD_ALL) {
5628 if (s->next_p_frame_damaged) {
5629 if (s->pict_type == AV_PICTURE_TYPE_B)
5632 s->next_p_frame_damaged = 0;
5635 if (MPV_frame_start(s, avctx) < 0) {
5639 s->me.qpel_put = s->dsp.put_qpel_pixels_tab;
5640 s->me.qpel_avg = s->dsp.avg_qpel_pixels_tab;
5642 if ((CONFIG_VC1_VDPAU_DECODER)
5643 &&s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
5644 ff_vdpau_vc1_decode_picture(s, buf_start, (buf + buf_size) - buf_start);
5645 else if (avctx->hwaccel) {
5646 if (avctx->hwaccel->start_frame(avctx, buf, buf_size) < 0)
5648 if (avctx->hwaccel->decode_slice(avctx, buf_start, (buf + buf_size) - buf_start) < 0)
5650 if (avctx->hwaccel->end_frame(avctx) < 0)
5653 ff_er_frame_start(s);
5655 v->bits = buf_size * 8;
5656 if (v->field_mode) {
5658 s->current_picture.f.linesize[0] <<= 1;
5659 s->current_picture.f.linesize[1] <<= 1;
5660 s->current_picture.f.linesize[2] <<= 1;
5662 s->uvlinesize <<= 1;
5663 tmp[0] = v->mv_f_last[0];
5664 tmp[1] = v->mv_f_last[1];
5665 v->mv_f_last[0] = v->mv_f_next[0];
5666 v->mv_f_last[1] = v->mv_f_next[1];
5667 v->mv_f_next[0] = v->mv_f[0];
5668 v->mv_f_next[1] = v->mv_f[1];
5669 v->mv_f[0] = tmp[0];
5670 v->mv_f[1] = tmp[1];
5672 mb_height = s->mb_height >> v->field_mode;
5673 for (i = 0; i <= n_slices; i++) {
5674 if (i > 0 && slices[i - 1].mby_start >= mb_height) {
5675 v->second_field = 1;
5676 v->blocks_off = s->mb_width * s->mb_height << 1;
5677 v->mb_off = s->mb_stride * s->mb_height >> 1;
5679 v->second_field = 0;
5684 v->pic_header_flag = 0;
5685 if (v->field_mode && i == n_slices1 + 2)
5686 vc1_parse_frame_header_adv(v, &s->gb);
5687 else if (get_bits1(&s->gb)) {
5688 v->pic_header_flag = 1;
5689 vc1_parse_frame_header_adv(v, &s->gb);
5692 s->start_mb_y = (i == 0) ? 0 : FFMAX(0, slices[i-1].mby_start % mb_height);
5693 if (!v->field_mode || v->second_field)
5694 s->end_mb_y = (i == n_slices ) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
5696 s->end_mb_y = (i == n_slices1 + 1) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
5697 vc1_decode_blocks(v);
5699 s->gb = slices[i].gb;
5701 if (v->field_mode) {
5702 av_free(buf_field2);
5703 v->second_field = 0;
5705 if (v->field_mode) {
5706 if (s->pict_type == AV_PICTURE_TYPE_B) {
5707 memcpy(v->mv_f_base, v->mv_f_next_base,
5708 2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5710 s->current_picture.f.linesize[0] >>= 1;
5711 s->current_picture.f.linesize[1] >>= 1;
5712 s->current_picture.f.linesize[2] >>= 1;
5714 s->uvlinesize >>= 1;
5716 //av_log(s->avctx, AV_LOG_INFO, "Consumed %i/%i bits\n", get_bits_count(&s->gb), s->gb.size_in_bits);
5717 // if (get_bits_count(&s->gb) > buf_size * 8)
5724 if (avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE) {
5726 avctx->width = avctx->coded_width = v->output_width;
5727 avctx->height = avctx->coded_height = v->output_height;
5728 if (avctx->skip_frame >= AVDISCARD_NONREF)
5730 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
5731 if (vc1_decode_sprites(v, &s->gb))
5734 *pict = v->sprite_output_frame;
5735 *data_size = sizeof(AVFrame);
5737 if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
5738 *pict = *(AVFrame*)s->current_picture_ptr;
5739 } else if (s->last_picture_ptr != NULL) {
5740 *pict = *(AVFrame*)s->last_picture_ptr;
5742 if (s->last_picture_ptr || s->low_delay) {
5743 *data_size = sizeof(AVFrame);
5744 ff_print_debug_info(s, pict);
5750 for (i = 0; i < n_slices; i++)
5751 av_free(slices[i].buf);
5757 for (i = 0; i < n_slices; i++)
5758 av_free(slices[i].buf);
5760 av_free(buf_field2);
5765 static const AVProfile profiles[] = {
5766 { FF_PROFILE_VC1_SIMPLE, "Simple" },
5767 { FF_PROFILE_VC1_MAIN, "Main" },
5768 { FF_PROFILE_VC1_COMPLEX, "Complex" },
5769 { FF_PROFILE_VC1_ADVANCED, "Advanced" },
5770 { FF_PROFILE_UNKNOWN },
5773 AVCodec ff_vc1_decoder = {
5775 .type = AVMEDIA_TYPE_VIDEO,
5777 .priv_data_size = sizeof(VC1Context),
5778 .init = vc1_decode_init,
5779 .close = vc1_decode_end,
5780 .decode = vc1_decode_frame,
5781 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
5782 .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1"),
5783 .pix_fmts = ff_hwaccel_pixfmt_list_420,
5784 .profiles = NULL_IF_CONFIG_SMALL(profiles)
5787 #if CONFIG_WMV3_DECODER
5788 AVCodec ff_wmv3_decoder = {
5790 .type = AVMEDIA_TYPE_VIDEO,
5791 .id = CODEC_ID_WMV3,
5792 .priv_data_size = sizeof(VC1Context),
5793 .init = vc1_decode_init,
5794 .close = vc1_decode_end,
5795 .decode = vc1_decode_frame,
5796 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
5797 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9"),
5798 .pix_fmts = ff_hwaccel_pixfmt_list_420,
5799 .profiles = NULL_IF_CONFIG_SMALL(profiles)
5803 #if CONFIG_WMV3_VDPAU_DECODER
5804 AVCodec ff_wmv3_vdpau_decoder = {
5805 .name = "wmv3_vdpau",
5806 .type = AVMEDIA_TYPE_VIDEO,
5807 .id = CODEC_ID_WMV3,
5808 .priv_data_size = sizeof(VC1Context),
5809 .init = vc1_decode_init,
5810 .close = vc1_decode_end,
5811 .decode = vc1_decode_frame,
5812 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU,
5813 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 VDPAU"),
5814 .pix_fmts = (const enum PixelFormat[]){PIX_FMT_VDPAU_WMV3, PIX_FMT_NONE},
5815 .profiles = NULL_IF_CONFIG_SMALL(profiles)
5819 #if CONFIG_VC1_VDPAU_DECODER
5820 AVCodec ff_vc1_vdpau_decoder = {
5821 .name = "vc1_vdpau",
5822 .type = AVMEDIA_TYPE_VIDEO,
5824 .priv_data_size = sizeof(VC1Context),
5825 .init = vc1_decode_init,
5826 .close = vc1_decode_end,
5827 .decode = vc1_decode_frame,
5828 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU,
5829 .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1 VDPAU"),
5830 .pix_fmts = (const enum PixelFormat[]){PIX_FMT_VDPAU_VC1, PIX_FMT_NONE},
5831 .profiles = NULL_IF_CONFIG_SMALL(profiles)
5835 #if CONFIG_WMV3IMAGE_DECODER
5836 AVCodec ff_wmv3image_decoder = {
5837 .name = "wmv3image",
5838 .type = AVMEDIA_TYPE_VIDEO,
5839 .id = CODEC_ID_WMV3IMAGE,
5840 .priv_data_size = sizeof(VC1Context),
5841 .init = vc1_decode_init,
5842 .close = vc1_decode_end,
5843 .decode = vc1_decode_frame,
5844 .capabilities = CODEC_CAP_DR1,
5845 .flush = vc1_sprite_flush,
5846 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image"),
5847 .pix_fmts = ff_pixfmt_list_420
5851 #if CONFIG_VC1IMAGE_DECODER
5852 AVCodec ff_vc1image_decoder = {
5854 .type = AVMEDIA_TYPE_VIDEO,
5855 .id = CODEC_ID_VC1IMAGE,
5856 .priv_data_size = sizeof(VC1Context),
5857 .init = vc1_decode_init,
5858 .close = vc1_decode_end,
5859 .decode = vc1_decode_frame,
5860 .capabilities = CODEC_CAP_DR1,
5861 .flush = vc1_sprite_flush,
5862 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image v2"),
5863 .pix_fmts = ff_pixfmt_list_420