2 * VC-1 and WMV3 decoder
3 * Copyright (c) 2011 Mashiat Sarker Shakkhar
4 * Copyright (c) 2006-2007 Konstantin Shishkov
5 * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
7 * This file is part of FFmpeg.
9 * FFmpeg is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * FFmpeg is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with FFmpeg; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * VC-1 and WMV3 decoder
32 #include "mpegvideo.h"
36 #include "vc1acdata.h"
37 #include "msmpeg4data.h"
39 #include "simple_idct.h"
41 #include "vdpau_internal.h"
46 #define MB_INTRA_VLC_BITS 9
51 static const uint16_t vlc_offs[] = {
52 0, 520, 552, 616, 1128, 1160, 1224, 1740, 1772, 1836, 1900, 2436,
53 2986, 3050, 3610, 4154, 4218, 4746, 5326, 5390, 5902, 6554, 7658, 8342,
54 9304, 9988, 10630, 11234, 12174, 13006, 13560, 14232, 14786, 15432, 16350, 17522,
55 20372, 21818, 22330, 22394, 23166, 23678, 23742, 24820, 25332, 25396, 26460, 26980,
56 27048, 27592, 27600, 27608, 27616, 27624, 28224, 28258, 28290, 28802, 28834, 28866,
57 29378, 29412, 29444, 29960, 29994, 30026, 30538, 30572, 30604, 31120, 31154, 31186,
58 31714, 31746, 31778, 32306, 32340, 32372
61 // offset tables for interlaced picture MVDATA decoding
62 static const int offset_table1[9] = { 0, 1, 2, 4, 8, 16, 32, 64, 128 };
63 static const int offset_table2[9] = { 0, 1, 3, 7, 15, 31, 63, 127, 255 };
66 * Init VC-1 specific tables and VC1Context members
67 * @param v The VC1Context to initialize
70 static int vc1_init_common(VC1Context *v)
74 static VLC_TYPE vlc_table[32372][2];
76 v->hrd_rate = v->hrd_buffer = NULL;
80 INIT_VLC_STATIC(&ff_vc1_bfraction_vlc, VC1_BFRACTION_VLC_BITS, 23,
81 ff_vc1_bfraction_bits, 1, 1,
82 ff_vc1_bfraction_codes, 1, 1, 1 << VC1_BFRACTION_VLC_BITS);
83 INIT_VLC_STATIC(&ff_vc1_norm2_vlc, VC1_NORM2_VLC_BITS, 4,
84 ff_vc1_norm2_bits, 1, 1,
85 ff_vc1_norm2_codes, 1, 1, 1 << VC1_NORM2_VLC_BITS);
86 INIT_VLC_STATIC(&ff_vc1_norm6_vlc, VC1_NORM6_VLC_BITS, 64,
87 ff_vc1_norm6_bits, 1, 1,
88 ff_vc1_norm6_codes, 2, 2, 556);
89 INIT_VLC_STATIC(&ff_vc1_imode_vlc, VC1_IMODE_VLC_BITS, 7,
90 ff_vc1_imode_bits, 1, 1,
91 ff_vc1_imode_codes, 1, 1, 1 << VC1_IMODE_VLC_BITS);
92 for (i = 0; i < 3; i++) {
93 ff_vc1_ttmb_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 0]];
94 ff_vc1_ttmb_vlc[i].table_allocated = vlc_offs[i * 3 + 1] - vlc_offs[i * 3 + 0];
95 init_vlc(&ff_vc1_ttmb_vlc[i], VC1_TTMB_VLC_BITS, 16,
96 ff_vc1_ttmb_bits[i], 1, 1,
97 ff_vc1_ttmb_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC);
98 ff_vc1_ttblk_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 1]];
99 ff_vc1_ttblk_vlc[i].table_allocated = vlc_offs[i * 3 + 2] - vlc_offs[i * 3 + 1];
100 init_vlc(&ff_vc1_ttblk_vlc[i], VC1_TTBLK_VLC_BITS, 8,
101 ff_vc1_ttblk_bits[i], 1, 1,
102 ff_vc1_ttblk_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
103 ff_vc1_subblkpat_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 2]];
104 ff_vc1_subblkpat_vlc[i].table_allocated = vlc_offs[i * 3 + 3] - vlc_offs[i * 3 + 2];
105 init_vlc(&ff_vc1_subblkpat_vlc[i], VC1_SUBBLKPAT_VLC_BITS, 15,
106 ff_vc1_subblkpat_bits[i], 1, 1,
107 ff_vc1_subblkpat_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
109 for (i = 0; i < 4; i++) {
110 ff_vc1_4mv_block_pattern_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 9]];
111 ff_vc1_4mv_block_pattern_vlc[i].table_allocated = vlc_offs[i * 3 + 10] - vlc_offs[i * 3 + 9];
112 init_vlc(&ff_vc1_4mv_block_pattern_vlc[i], VC1_4MV_BLOCK_PATTERN_VLC_BITS, 16,
113 ff_vc1_4mv_block_pattern_bits[i], 1, 1,
114 ff_vc1_4mv_block_pattern_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
115 ff_vc1_cbpcy_p_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 10]];
116 ff_vc1_cbpcy_p_vlc[i].table_allocated = vlc_offs[i * 3 + 11] - vlc_offs[i * 3 + 10];
117 init_vlc(&ff_vc1_cbpcy_p_vlc[i], VC1_CBPCY_P_VLC_BITS, 64,
118 ff_vc1_cbpcy_p_bits[i], 1, 1,
119 ff_vc1_cbpcy_p_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC);
120 ff_vc1_mv_diff_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 11]];
121 ff_vc1_mv_diff_vlc[i].table_allocated = vlc_offs[i * 3 + 12] - vlc_offs[i * 3 + 11];
122 init_vlc(&ff_vc1_mv_diff_vlc[i], VC1_MV_DIFF_VLC_BITS, 73,
123 ff_vc1_mv_diff_bits[i], 1, 1,
124 ff_vc1_mv_diff_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC);
126 for (i = 0; i < 8; i++) {
127 ff_vc1_ac_coeff_table[i].table = &vlc_table[vlc_offs[i * 2 + 21]];
128 ff_vc1_ac_coeff_table[i].table_allocated = vlc_offs[i * 2 + 22] - vlc_offs[i * 2 + 21];
129 init_vlc(&ff_vc1_ac_coeff_table[i], AC_VLC_BITS, vc1_ac_sizes[i],
130 &vc1_ac_tables[i][0][1], 8, 4,
131 &vc1_ac_tables[i][0][0], 8, 4, INIT_VLC_USE_NEW_STATIC);
132 /* initialize interlaced MVDATA tables (2-Ref) */
133 ff_vc1_2ref_mvdata_vlc[i].table = &vlc_table[vlc_offs[i * 2 + 22]];
134 ff_vc1_2ref_mvdata_vlc[i].table_allocated = vlc_offs[i * 2 + 23] - vlc_offs[i * 2 + 22];
135 init_vlc(&ff_vc1_2ref_mvdata_vlc[i], VC1_2REF_MVDATA_VLC_BITS, 126,
136 ff_vc1_2ref_mvdata_bits[i], 1, 1,
137 ff_vc1_2ref_mvdata_codes[i], 4, 4, INIT_VLC_USE_NEW_STATIC);
139 for (i = 0; i < 4; i++) {
140 /* initialize 4MV MBMODE VLC tables for interlaced frame P picture */
141 ff_vc1_intfr_4mv_mbmode_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 37]];
142 ff_vc1_intfr_4mv_mbmode_vlc[i].table_allocated = vlc_offs[i * 3 + 38] - vlc_offs[i * 3 + 37];
143 init_vlc(&ff_vc1_intfr_4mv_mbmode_vlc[i], VC1_INTFR_4MV_MBMODE_VLC_BITS, 15,
144 ff_vc1_intfr_4mv_mbmode_bits[i], 1, 1,
145 ff_vc1_intfr_4mv_mbmode_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC);
146 /* initialize NON-4MV MBMODE VLC tables for the same */
147 ff_vc1_intfr_non4mv_mbmode_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 38]];
148 ff_vc1_intfr_non4mv_mbmode_vlc[i].table_allocated = vlc_offs[i * 3 + 39] - vlc_offs[i * 3 + 38];
149 init_vlc(&ff_vc1_intfr_non4mv_mbmode_vlc[i], VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 9,
150 ff_vc1_intfr_non4mv_mbmode_bits[i], 1, 1,
151 ff_vc1_intfr_non4mv_mbmode_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
152 /* initialize interlaced MVDATA tables (1-Ref) */
153 ff_vc1_1ref_mvdata_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 39]];
154 ff_vc1_1ref_mvdata_vlc[i].table_allocated = vlc_offs[i * 3 + 40] - vlc_offs[i * 3 + 39];
155 init_vlc(&ff_vc1_1ref_mvdata_vlc[i], VC1_1REF_MVDATA_VLC_BITS, 72,
156 ff_vc1_1ref_mvdata_bits[i], 1, 1,
157 ff_vc1_1ref_mvdata_codes[i], 4, 4, INIT_VLC_USE_NEW_STATIC);
159 for (i = 0; i < 4; i++) {
160 /* Initialize 2MV Block pattern VLC tables */
161 ff_vc1_2mv_block_pattern_vlc[i].table = &vlc_table[vlc_offs[i + 49]];
162 ff_vc1_2mv_block_pattern_vlc[i].table_allocated = vlc_offs[i + 50] - vlc_offs[i + 49];
163 init_vlc(&ff_vc1_2mv_block_pattern_vlc[i], VC1_2MV_BLOCK_PATTERN_VLC_BITS, 4,
164 ff_vc1_2mv_block_pattern_bits[i], 1, 1,
165 ff_vc1_2mv_block_pattern_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
167 for (i = 0; i < 8; i++) {
168 /* Initialize interlaced CBPCY VLC tables (Table 124 - Table 131) */
169 ff_vc1_icbpcy_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 53]];
170 ff_vc1_icbpcy_vlc[i].table_allocated = vlc_offs[i * 3 + 54] - vlc_offs[i * 3 + 53];
171 init_vlc(&ff_vc1_icbpcy_vlc[i], VC1_ICBPCY_VLC_BITS, 63,
172 ff_vc1_icbpcy_p_bits[i], 1, 1,
173 ff_vc1_icbpcy_p_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC);
174 /* Initialize interlaced field picture MBMODE VLC tables */
175 ff_vc1_if_mmv_mbmode_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 54]];
176 ff_vc1_if_mmv_mbmode_vlc[i].table_allocated = vlc_offs[i * 3 + 55] - vlc_offs[i * 3 + 54];
177 init_vlc(&ff_vc1_if_mmv_mbmode_vlc[i], VC1_IF_MMV_MBMODE_VLC_BITS, 8,
178 ff_vc1_if_mmv_mbmode_bits[i], 1, 1,
179 ff_vc1_if_mmv_mbmode_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
180 ff_vc1_if_1mv_mbmode_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 55]];
181 ff_vc1_if_1mv_mbmode_vlc[i].table_allocated = vlc_offs[i * 3 + 56] - vlc_offs[i * 3 + 55];
182 init_vlc(&ff_vc1_if_1mv_mbmode_vlc[i], VC1_IF_1MV_MBMODE_VLC_BITS, 6,
183 ff_vc1_if_1mv_mbmode_bits[i], 1, 1,
184 ff_vc1_if_1mv_mbmode_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
191 v->mvrange = 0; /* 7.1.1.18, p80 */
196 /***********************************************************************/
198 * @name VC-1 Bitplane decoding
216 /** @} */ //imode defines
219 /** @} */ //Bitplane group
221 static void vc1_put_signed_blocks_clamped(VC1Context *v)
223 MpegEncContext *s = &v->s;
224 int topleft_mb_pos, top_mb_pos;
225 int stride_y, fieldtx;
228 /* The put pixels loop is always one MB row behind the decoding loop,
229 * because we can only put pixels when overlap filtering is done, and
230 * for filtering of the bottom edge of a MB, we need the next MB row
232 * Within the row, the put pixels loop is also one MB col behind the
233 * decoding loop. The reason for this is again, because for filtering
234 * of the right MB edge, we need the next MB present. */
235 if (!s->first_slice_line) {
237 topleft_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x - 1;
238 fieldtx = v->fieldtx_plane[topleft_mb_pos];
239 stride_y = s->linesize << fieldtx;
240 v_dist = (16 - fieldtx) >> (fieldtx == 0);
241 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][0],
242 s->dest[0] - 16 * s->linesize - 16,
244 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][1],
245 s->dest[0] - 16 * s->linesize - 8,
247 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][2],
248 s->dest[0] - v_dist * s->linesize - 16,
250 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][3],
251 s->dest[0] - v_dist * s->linesize - 8,
253 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][4],
254 s->dest[1] - 8 * s->uvlinesize - 8,
256 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][5],
257 s->dest[2] - 8 * s->uvlinesize - 8,
260 if (s->mb_x == s->mb_width - 1) {
261 top_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x;
262 fieldtx = v->fieldtx_plane[top_mb_pos];
263 stride_y = s->linesize << fieldtx;
264 v_dist = fieldtx ? 15 : 8;
265 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][0],
266 s->dest[0] - 16 * s->linesize,
268 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][1],
269 s->dest[0] - 16 * s->linesize + 8,
271 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][2],
272 s->dest[0] - v_dist * s->linesize,
274 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][3],
275 s->dest[0] - v_dist * s->linesize + 8,
277 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][4],
278 s->dest[1] - 8 * s->uvlinesize,
280 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][5],
281 s->dest[2] - 8 * s->uvlinesize,
286 #define inc_blk_idx(idx) do { \
288 if (idx >= v->n_allocated_blks) \
292 inc_blk_idx(v->topleft_blk_idx);
293 inc_blk_idx(v->top_blk_idx);
294 inc_blk_idx(v->left_blk_idx);
295 inc_blk_idx(v->cur_blk_idx);
298 static void vc1_loop_filter_iblk(VC1Context *v, int pq)
300 MpegEncContext *s = &v->s;
302 if (!s->first_slice_line) {
303 v->vc1dsp.vc1_v_loop_filter16(s->dest[0], s->linesize, pq);
305 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
306 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
307 for (j = 0; j < 2; j++) {
308 v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1], s->uvlinesize, pq);
310 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
313 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] + 8 * s->linesize, s->linesize, pq);
315 if (s->mb_y == s->end_mb_y - 1) {
317 v->vc1dsp.vc1_h_loop_filter16(s->dest[0], s->linesize, pq);
318 v->vc1dsp.vc1_h_loop_filter8(s->dest[1], s->uvlinesize, pq);
319 v->vc1dsp.vc1_h_loop_filter8(s->dest[2], s->uvlinesize, pq);
321 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] + 8, s->linesize, pq);
325 static void vc1_loop_filter_iblk_delayed(VC1Context *v, int pq)
327 MpegEncContext *s = &v->s;
330 /* The loopfilter runs 1 row and 1 column behind the overlap filter, which
331 * means it runs two rows/cols behind the decoding loop. */
332 if (!s->first_slice_line) {
334 if (s->mb_y >= s->start_mb_y + 2) {
335 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
338 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 16, s->linesize, pq);
339 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 8, s->linesize, pq);
340 for (j = 0; j < 2; j++) {
341 v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
343 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize - 8, s->uvlinesize, pq);
347 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize - 16, s->linesize, pq);
350 if (s->mb_x == s->mb_width - 1) {
351 if (s->mb_y >= s->start_mb_y + 2) {
352 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
355 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize, s->linesize, pq);
356 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize + 8, s->linesize, pq);
357 for (j = 0; j < 2; j++) {
358 v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
360 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize, s->uvlinesize, pq);
364 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize, s->linesize, pq);
367 if (s->mb_y == s->end_mb_y) {
370 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
371 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 8, s->linesize, pq);
373 for (j = 0; j < 2; j++) {
374 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
379 if (s->mb_x == s->mb_width - 1) {
381 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
382 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
384 for (j = 0; j < 2; j++) {
385 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
393 static void vc1_smooth_overlap_filter_iblk(VC1Context *v)
395 MpegEncContext *s = &v->s;
398 if (v->condover == CONDOVER_NONE)
401 mb_pos = s->mb_x + s->mb_y * s->mb_stride;
403 /* Within a MB, the horizontal overlap always runs before the vertical.
404 * To accomplish that, we run the H on left and internal borders of the
405 * currently decoded MB. Then, we wait for the next overlap iteration
406 * to do H overlap on the right edge of this MB, before moving over and
407 * running the V overlap. Therefore, the V overlap makes us trail by one
408 * MB col and the H overlap filter makes us trail by one MB row. This
409 * is reflected in the time at which we run the put_pixels loop. */
410 if (v->condover == CONDOVER_ALL || v->pq >= 9 || v->over_flags_plane[mb_pos]) {
411 if (s->mb_x && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
412 v->over_flags_plane[mb_pos - 1])) {
413 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][1],
414 v->block[v->cur_blk_idx][0]);
415 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][3],
416 v->block[v->cur_blk_idx][2]);
417 if (!(s->flags & CODEC_FLAG_GRAY)) {
418 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][4],
419 v->block[v->cur_blk_idx][4]);
420 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][5],
421 v->block[v->cur_blk_idx][5]);
424 v->vc1dsp.vc1_h_s_overlap(v->block[v->cur_blk_idx][0],
425 v->block[v->cur_blk_idx][1]);
426 v->vc1dsp.vc1_h_s_overlap(v->block[v->cur_blk_idx][2],
427 v->block[v->cur_blk_idx][3]);
429 if (s->mb_x == s->mb_width - 1) {
430 if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
431 v->over_flags_plane[mb_pos - s->mb_stride])) {
432 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][2],
433 v->block[v->cur_blk_idx][0]);
434 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][3],
435 v->block[v->cur_blk_idx][1]);
436 if (!(s->flags & CODEC_FLAG_GRAY)) {
437 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][4],
438 v->block[v->cur_blk_idx][4]);
439 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][5],
440 v->block[v->cur_blk_idx][5]);
443 v->vc1dsp.vc1_v_s_overlap(v->block[v->cur_blk_idx][0],
444 v->block[v->cur_blk_idx][2]);
445 v->vc1dsp.vc1_v_s_overlap(v->block[v->cur_blk_idx][1],
446 v->block[v->cur_blk_idx][3]);
449 if (s->mb_x && (v->condover == CONDOVER_ALL || v->over_flags_plane[mb_pos - 1])) {
450 if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
451 v->over_flags_plane[mb_pos - s->mb_stride - 1])) {
452 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][2],
453 v->block[v->left_blk_idx][0]);
454 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][3],
455 v->block[v->left_blk_idx][1]);
456 if (!(s->flags & CODEC_FLAG_GRAY)) {
457 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][4],
458 v->block[v->left_blk_idx][4]);
459 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][5],
460 v->block[v->left_blk_idx][5]);
463 v->vc1dsp.vc1_v_s_overlap(v->block[v->left_blk_idx][0],
464 v->block[v->left_blk_idx][2]);
465 v->vc1dsp.vc1_v_s_overlap(v->block[v->left_blk_idx][1],
466 v->block[v->left_blk_idx][3]);
470 /** Do motion compensation over 1 macroblock
471 * Mostly adapted hpel_motion and qpel_motion from mpegvideo.c
473 static void vc1_mc_1mv(VC1Context *v, int dir)
475 MpegEncContext *s = &v->s;
476 DSPContext *dsp = &v->s.dsp;
477 uint8_t *srcY, *srcU, *srcV;
478 int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
480 int v_edge_pos = s->v_edge_pos >> v->field_mode;
481 if (!v->field_mode && !v->s.last_picture.f.data[0])
484 mx = s->mv[dir][0][0];
485 my = s->mv[dir][0][1];
487 // store motion vectors for further use in B frames
488 if (s->pict_type == AV_PICTURE_TYPE_P) {
489 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = mx;
490 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = my;
493 uvmx = (mx + ((mx & 3) == 3)) >> 1;
494 uvmy = (my + ((my & 3) == 3)) >> 1;
495 v->luma_mv[s->mb_x][0] = uvmx;
496 v->luma_mv[s->mb_x][1] = uvmy;
499 v->cur_field_type != v->ref_field_type[dir]) {
500 my = my - 2 + 4 * v->cur_field_type;
501 uvmy = uvmy - 2 + 4 * v->cur_field_type;
504 // fastuvmc shall be ignored for interlaced frame picture
505 if (v->fastuvmc && (v->fcm != ILACE_FRAME)) {
506 uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
507 uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
509 if (v->field_mode) { // interlaced field picture
511 if ((v->cur_field_type != v->ref_field_type[dir]) && v->second_field) {
512 srcY = s->current_picture.f.data[0];
513 srcU = s->current_picture.f.data[1];
514 srcV = s->current_picture.f.data[2];
516 srcY = s->last_picture.f.data[0];
517 srcU = s->last_picture.f.data[1];
518 srcV = s->last_picture.f.data[2];
521 srcY = s->next_picture.f.data[0];
522 srcU = s->next_picture.f.data[1];
523 srcV = s->next_picture.f.data[2];
527 srcY = s->last_picture.f.data[0];
528 srcU = s->last_picture.f.data[1];
529 srcV = s->last_picture.f.data[2];
531 srcY = s->next_picture.f.data[0];
532 srcU = s->next_picture.f.data[1];
533 srcV = s->next_picture.f.data[2];
537 src_x = s->mb_x * 16 + (mx >> 2);
538 src_y = s->mb_y * 16 + (my >> 2);
539 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
540 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
542 if (v->profile != PROFILE_ADVANCED) {
543 src_x = av_clip( src_x, -16, s->mb_width * 16);
544 src_y = av_clip( src_y, -16, s->mb_height * 16);
545 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
546 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
548 src_x = av_clip( src_x, -17, s->avctx->coded_width);
549 src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
550 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
551 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
554 srcY += src_y * s->linesize + src_x;
555 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
556 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
558 if (v->field_mode && v->ref_field_type[dir]) {
559 srcY += s->current_picture_ptr->f.linesize[0];
560 srcU += s->current_picture_ptr->f.linesize[1];
561 srcV += s->current_picture_ptr->f.linesize[2];
564 /* for grayscale we should not try to read from unknown area */
565 if (s->flags & CODEC_FLAG_GRAY) {
566 srcU = s->edge_emu_buffer + 18 * s->linesize;
567 srcV = s->edge_emu_buffer + 18 * s->linesize;
570 if (v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
571 || s->h_edge_pos < 22 || v_edge_pos < 22
572 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel * 3
573 || (unsigned)(src_y - s->mspel) > v_edge_pos - (my&3) - 16 - s->mspel * 3) {
574 uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
576 srcY -= s->mspel * (1 + s->linesize);
577 s->dsp.emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize,
578 17 + s->mspel * 2, 17 + s->mspel * 2,
579 src_x - s->mspel, src_y - s->mspel,
580 s->h_edge_pos, v_edge_pos);
581 srcY = s->edge_emu_buffer;
582 s->dsp.emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8 + 1, 8 + 1,
583 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
584 s->dsp.emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8 + 1, 8 + 1,
585 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
588 /* if we deal with range reduction we need to scale source blocks */
589 if (v->rangeredfrm) {
594 for (j = 0; j < 17 + s->mspel * 2; j++) {
595 for (i = 0; i < 17 + s->mspel * 2; i++)
596 src[i] = ((src[i] - 128) >> 1) + 128;
601 for (j = 0; j < 9; j++) {
602 for (i = 0; i < 9; i++) {
603 src[i] = ((src[i] - 128) >> 1) + 128;
604 src2[i] = ((src2[i] - 128) >> 1) + 128;
606 src += s->uvlinesize;
607 src2 += s->uvlinesize;
610 /* if we deal with intensity compensation we need to scale source blocks */
611 if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
616 for (j = 0; j < 17 + s->mspel * 2; j++) {
617 for (i = 0; i < 17 + s->mspel * 2; i++)
618 src[i] = v->luty[src[i]];
623 for (j = 0; j < 9; j++) {
624 for (i = 0; i < 9; i++) {
625 src[i] = v->lutuv[src[i]];
626 src2[i] = v->lutuv[src2[i]];
628 src += s->uvlinesize;
629 src2 += s->uvlinesize;
632 srcY += s->mspel * (1 + s->linesize);
635 if (v->field_mode && v->second_field) {
636 off = s->current_picture_ptr->f.linesize[0];
637 off_uv = s->current_picture_ptr->f.linesize[1];
643 dxy = ((my & 3) << 2) | (mx & 3);
644 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off , srcY , s->linesize, v->rnd);
645 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8, srcY + 8, s->linesize, v->rnd);
646 srcY += s->linesize * 8;
647 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize , srcY , s->linesize, v->rnd);
648 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
649 } else { // hpel mc - always used for luma
650 dxy = (my & 2) | ((mx & 2) >> 1);
652 dsp->put_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
654 dsp->put_no_rnd_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
657 if (s->flags & CODEC_FLAG_GRAY) return;
658 /* Chroma MC always uses qpel bilinear */
659 uvmx = (uvmx & 3) << 1;
660 uvmy = (uvmy & 3) << 1;
662 dsp->put_h264_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
663 dsp->put_h264_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
665 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
666 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
670 static inline int median4(int a, int b, int c, int d)
673 if (c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2;
674 else return (FFMIN(b, c) + FFMAX(a, d)) / 2;
676 if (c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2;
677 else return (FFMIN(a, c) + FFMAX(b, d)) / 2;
681 /** Do motion compensation for 4-MV macroblock - luminance block
683 static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir)
685 MpegEncContext *s = &v->s;
686 DSPContext *dsp = &v->s.dsp;
688 int dxy, mx, my, src_x, src_y;
690 int fieldmv = (v->fcm == ILACE_FRAME) ? v->blk_mv_type[s->block_index[n]] : 0;
691 int v_edge_pos = s->v_edge_pos >> v->field_mode;
693 if (!v->field_mode && !v->s.last_picture.f.data[0])
696 mx = s->mv[dir][n][0];
697 my = s->mv[dir][n][1];
701 if ((v->cur_field_type != v->ref_field_type[dir]) && v->second_field)
702 srcY = s->current_picture.f.data[0];
704 srcY = s->last_picture.f.data[0];
706 srcY = s->last_picture.f.data[0];
708 srcY = s->next_picture.f.data[0];
711 if (v->cur_field_type != v->ref_field_type[dir])
712 my = my - 2 + 4 * v->cur_field_type;
715 if (s->pict_type == AV_PICTURE_TYPE_P && n == 3 && v->field_mode) {
716 int same_count = 0, opp_count = 0, k;
717 int chosen_mv[2][4][2], f;
719 for (k = 0; k < 4; k++) {
720 f = v->mv_f[0][s->block_index[k] + v->blocks_off];
721 chosen_mv[f][f ? opp_count : same_count][0] = s->mv[0][k][0];
722 chosen_mv[f][f ? opp_count : same_count][1] = s->mv[0][k][1];
726 f = opp_count > same_count;
727 switch (f ? opp_count : same_count) {
729 tx = median4(chosen_mv[f][0][0], chosen_mv[f][1][0],
730 chosen_mv[f][2][0], chosen_mv[f][3][0]);
731 ty = median4(chosen_mv[f][0][1], chosen_mv[f][1][1],
732 chosen_mv[f][2][1], chosen_mv[f][3][1]);
735 tx = mid_pred(chosen_mv[f][0][0], chosen_mv[f][1][0], chosen_mv[f][2][0]);
736 ty = mid_pred(chosen_mv[f][0][1], chosen_mv[f][1][1], chosen_mv[f][2][1]);
739 tx = (chosen_mv[f][0][0] + chosen_mv[f][1][0]) / 2;
740 ty = (chosen_mv[f][0][1] + chosen_mv[f][1][1]) / 2;
743 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
744 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
745 for (k = 0; k < 4; k++)
746 v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
749 if (v->fcm == ILACE_FRAME) { // not sure if needed for other types of picture
751 int width = s->avctx->coded_width;
752 int height = s->avctx->coded_height >> 1;
753 qx = (s->mb_x * 16) + (mx >> 2);
754 qy = (s->mb_y * 8) + (my >> 3);
759 mx -= 4 * (qx - width);
762 else if (qy > height + 1)
763 my -= 8 * (qy - height - 1);
766 if ((v->fcm == ILACE_FRAME) && fieldmv)
767 off = ((n > 1) ? s->linesize : 0) + (n & 1) * 8;
769 off = s->linesize * 4 * (n & 2) + (n & 1) * 8;
770 if (v->field_mode && v->second_field)
771 off += s->current_picture_ptr->f.linesize[0];
773 src_x = s->mb_x * 16 + (n & 1) * 8 + (mx >> 2);
775 src_y = s->mb_y * 16 + (n & 2) * 4 + (my >> 2);
777 src_y = s->mb_y * 16 + ((n > 1) ? 1 : 0) + (my >> 2);
779 if (v->profile != PROFILE_ADVANCED) {
780 src_x = av_clip(src_x, -16, s->mb_width * 16);
781 src_y = av_clip(src_y, -16, s->mb_height * 16);
783 src_x = av_clip(src_x, -17, s->avctx->coded_width);
784 if (v->fcm == ILACE_FRAME) {
786 src_y = av_clip(src_y, -17, s->avctx->coded_height + 1);
788 src_y = av_clip(src_y, -18, s->avctx->coded_height);
790 src_y = av_clip(src_y, -18, s->avctx->coded_height + 1);
794 srcY += src_y * s->linesize + src_x;
795 if (v->field_mode && v->ref_field_type[dir])
796 srcY += s->current_picture_ptr->f.linesize[0];
798 if (fieldmv && !(src_y & 1))
800 if (fieldmv && (src_y & 1) && src_y < 4)
802 if (v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
803 || s->h_edge_pos < 13 || v_edge_pos < 23
804 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx & 3) - 8 - s->mspel * 2
805 || (unsigned)(src_y - (s->mspel << fieldmv)) > v_edge_pos - (my & 3) - ((8 + s->mspel * 2) << fieldmv)) {
806 srcY -= s->mspel * (1 + (s->linesize << fieldmv));
807 /* check emulate edge stride and offset */
808 s->dsp.emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize,
809 9 + s->mspel * 2, (9 + s->mspel * 2) << fieldmv,
810 src_x - s->mspel, src_y - (s->mspel << fieldmv),
811 s->h_edge_pos, v_edge_pos);
812 srcY = s->edge_emu_buffer;
813 /* if we deal with range reduction we need to scale source blocks */
814 if (v->rangeredfrm) {
819 for (j = 0; j < 9 + s->mspel * 2; j++) {
820 for (i = 0; i < 9 + s->mspel * 2; i++)
821 src[i] = ((src[i] - 128) >> 1) + 128;
822 src += s->linesize << fieldmv;
825 /* if we deal with intensity compensation we need to scale source blocks */
826 if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
831 for (j = 0; j < 9 + s->mspel * 2; j++) {
832 for (i = 0; i < 9 + s->mspel * 2; i++)
833 src[i] = v->luty[src[i]];
834 src += s->linesize << fieldmv;
837 srcY += s->mspel * (1 + (s->linesize << fieldmv));
841 dxy = ((my & 3) << 2) | (mx & 3);
842 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize << fieldmv, v->rnd);
843 } else { // hpel mc - always used for luma
844 dxy = (my & 2) | ((mx & 2) >> 1);
846 dsp->put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
848 dsp->put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
852 static av_always_inline int get_chroma_mv(int *mvx, int *mvy, int *a, int flag, int *tx, int *ty)
855 static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
857 idx = ((a[3] != flag) << 3)
858 | ((a[2] != flag) << 2)
859 | ((a[1] != flag) << 1)
862 *tx = median4(mvx[0], mvx[1], mvx[2], mvx[3]);
863 *ty = median4(mvy[0], mvy[1], mvy[2], mvy[3]);
865 } else if (count[idx] == 1) {
868 *tx = mid_pred(mvx[1], mvx[2], mvx[3]);
869 *ty = mid_pred(mvy[1], mvy[2], mvy[3]);
872 *tx = mid_pred(mvx[0], mvx[2], mvx[3]);
873 *ty = mid_pred(mvy[0], mvy[2], mvy[3]);
876 *tx = mid_pred(mvx[0], mvx[1], mvx[3]);
877 *ty = mid_pred(mvy[0], mvy[1], mvy[3]);
880 *tx = mid_pred(mvx[0], mvx[1], mvx[2]);
881 *ty = mid_pred(mvy[0], mvy[1], mvy[2]);
884 } else if (count[idx] == 2) {
886 for (i = 0; i < 3; i++)
891 for (i = t1 + 1; i < 4; i++)
896 *tx = (mvx[t1] + mvx[t2]) / 2;
897 *ty = (mvy[t1] + mvy[t2]) / 2;
905 /** Do motion compensation for 4-MV macroblock - both chroma blocks
907 static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
909 MpegEncContext *s = &v->s;
910 DSPContext *dsp = &v->s.dsp;
911 uint8_t *srcU, *srcV;
912 int uvmx, uvmy, uvsrc_x, uvsrc_y;
913 int k, tx = 0, ty = 0;
914 int mvx[4], mvy[4], intra[4], mv_f[4];
916 int chroma_ref_type = v->cur_field_type, off = 0;
917 int v_edge_pos = s->v_edge_pos >> v->field_mode;
919 if (!v->field_mode && !v->s.last_picture.f.data[0])
921 if (s->flags & CODEC_FLAG_GRAY)
924 for (k = 0; k < 4; k++) {
925 mvx[k] = s->mv[dir][k][0];
926 mvy[k] = s->mv[dir][k][1];
927 intra[k] = v->mb_type[0][s->block_index[k]];
929 mv_f[k] = v->mv_f[dir][s->block_index[k] + v->blocks_off];
932 /* calculate chroma MV vector from four luma MVs */
933 if (!v->field_mode || (v->field_mode && !v->numref)) {
934 valid_count = get_chroma_mv(mvx, mvy, intra, 0, &tx, &ty);
936 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
937 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
938 v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
939 return; //no need to do MC for intra blocks
943 if (mv_f[0] + mv_f[1] + mv_f[2] + mv_f[3] > 2)
945 valid_count = get_chroma_mv(mvx, mvy, mv_f, dominant, &tx, &ty);
947 chroma_ref_type = !v->cur_field_type;
949 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
950 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
951 uvmx = (tx + ((tx & 3) == 3)) >> 1;
952 uvmy = (ty + ((ty & 3) == 3)) >> 1;
954 v->luma_mv[s->mb_x][0] = uvmx;
955 v->luma_mv[s->mb_x][1] = uvmy;
958 uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
959 uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
961 // Field conversion bias
962 if (v->cur_field_type != chroma_ref_type)
963 uvmy += 2 - 4 * chroma_ref_type;
965 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
966 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
968 if (v->profile != PROFILE_ADVANCED) {
969 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
970 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
972 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
973 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
978 if ((v->cur_field_type != chroma_ref_type) && v->cur_field_type) {
979 srcU = s->current_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
980 srcV = s->current_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
982 srcU = s->last_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
983 srcV = s->last_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
986 srcU = s->last_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
987 srcV = s->last_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
990 srcU = s->next_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
991 srcV = s->next_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
995 if (chroma_ref_type) {
996 srcU += s->current_picture_ptr->f.linesize[1];
997 srcV += s->current_picture_ptr->f.linesize[2];
999 off = v->second_field ? s->current_picture_ptr->f.linesize[1] : 0;
1002 if (v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
1003 || s->h_edge_pos < 18 || v_edge_pos < 18
1004 || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
1005 || (unsigned)uvsrc_y > (v_edge_pos >> 1) - 9) {
1006 s->dsp.emulated_edge_mc(s->edge_emu_buffer , srcU, s->uvlinesize,
1007 8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
1008 s->h_edge_pos >> 1, v_edge_pos >> 1);
1009 s->dsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV, s->uvlinesize,
1010 8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
1011 s->h_edge_pos >> 1, v_edge_pos >> 1);
1012 srcU = s->edge_emu_buffer;
1013 srcV = s->edge_emu_buffer + 16;
1015 /* if we deal with range reduction we need to scale source blocks */
1016 if (v->rangeredfrm) {
1018 uint8_t *src, *src2;
1022 for (j = 0; j < 9; j++) {
1023 for (i = 0; i < 9; i++) {
1024 src[i] = ((src[i] - 128) >> 1) + 128;
1025 src2[i] = ((src2[i] - 128) >> 1) + 128;
1027 src += s->uvlinesize;
1028 src2 += s->uvlinesize;
1031 /* if we deal with intensity compensation we need to scale source blocks */
1032 if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1034 uint8_t *src, *src2;
1038 for (j = 0; j < 9; j++) {
1039 for (i = 0; i < 9; i++) {
1040 src[i] = v->lutuv[src[i]];
1041 src2[i] = v->lutuv[src2[i]];
1043 src += s->uvlinesize;
1044 src2 += s->uvlinesize;
1049 /* Chroma MC always uses qpel bilinear */
1050 uvmx = (uvmx & 3) << 1;
1051 uvmy = (uvmy & 3) << 1;
1053 dsp->put_h264_chroma_pixels_tab[0](s->dest[1] + off, srcU, s->uvlinesize, 8, uvmx, uvmy);
1054 dsp->put_h264_chroma_pixels_tab[0](s->dest[2] + off, srcV, s->uvlinesize, 8, uvmx, uvmy);
1056 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off, srcU, s->uvlinesize, 8, uvmx, uvmy);
1057 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off, srcV, s->uvlinesize, 8, uvmx, uvmy);
1061 /** Do motion compensation for 4-MV field chroma macroblock (both U and V)
1063 static void vc1_mc_4mv_chroma4(VC1Context *v)
1065 MpegEncContext *s = &v->s;
1066 DSPContext *dsp = &v->s.dsp;
1067 uint8_t *srcU, *srcV;
1068 int uvsrc_x, uvsrc_y;
1069 int uvmx_field[4], uvmy_field[4];
1071 int fieldmv = v->blk_mv_type[s->block_index[0]];
1072 static const int s_rndtblfield[16] = { 0, 0, 1, 2, 4, 4, 5, 6, 2, 2, 3, 8, 6, 6, 7, 12 };
1073 int v_dist = fieldmv ? 1 : 4; // vertical offset for lower sub-blocks
1074 int v_edge_pos = s->v_edge_pos >> 1;
1076 if (!v->s.last_picture.f.data[0])
1078 if (s->flags & CODEC_FLAG_GRAY)
1081 for (i = 0; i < 4; i++) {
1082 tx = s->mv[0][i][0];
1083 uvmx_field[i] = (tx + ((tx & 3) == 3)) >> 1;
1084 ty = s->mv[0][i][1];
1086 uvmy_field[i] = (ty >> 4) * 8 + s_rndtblfield[ty & 0xF];
1088 uvmy_field[i] = (ty + ((ty & 3) == 3)) >> 1;
1091 for (i = 0; i < 4; i++) {
1092 off = (i & 1) * 4 + ((i & 2) ? v_dist * s->uvlinesize : 0);
1093 uvsrc_x = s->mb_x * 8 + (i & 1) * 4 + (uvmx_field[i] >> 2);
1094 uvsrc_y = s->mb_y * 8 + ((i & 2) ? v_dist : 0) + (uvmy_field[i] >> 2);
1095 // FIXME: implement proper pull-back (see vc1cropmv.c, vc1CROPMV_ChromaPullBack())
1096 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
1097 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
1098 srcU = s->last_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
1099 srcV = s->last_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
1100 uvmx_field[i] = (uvmx_field[i] & 3) << 1;
1101 uvmy_field[i] = (uvmy_field[i] & 3) << 1;
1103 if (fieldmv && !(uvsrc_y & 1))
1105 if (fieldmv && (uvsrc_y & 1) && uvsrc_y < 2)
1107 if ((v->mv_mode == MV_PMODE_INTENSITY_COMP)
1108 || s->h_edge_pos < 10 || v_edge_pos < (5 << fieldmv)
1109 || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 5
1110 || (unsigned)uvsrc_y > v_edge_pos - (5 << fieldmv)) {
1111 s->dsp.emulated_edge_mc(s->edge_emu_buffer, srcU, s->uvlinesize,
1112 5, (5 << fieldmv), uvsrc_x, uvsrc_y,
1113 s->h_edge_pos >> 1, v_edge_pos);
1114 s->dsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV, s->uvlinesize,
1115 5, (5 << fieldmv), uvsrc_x, uvsrc_y,
1116 s->h_edge_pos >> 1, v_edge_pos);
1117 srcU = s->edge_emu_buffer;
1118 srcV = s->edge_emu_buffer + 16;
1120 /* if we deal with intensity compensation we need to scale source blocks */
1121 if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1123 uint8_t *src, *src2;
1127 for (j = 0; j < 5; j++) {
1128 for (i = 0; i < 5; i++) {
1129 src[i] = v->lutuv[src[i]];
1130 src2[i] = v->lutuv[src2[i]];
1132 src += s->uvlinesize << 1;
1133 src2 += s->uvlinesize << 1;
1138 dsp->put_h264_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1139 dsp->put_h264_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1141 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1142 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1147 /***********************************************************************/
1149 * @name VC-1 Block-level functions
1150 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
1156 * @brief Get macroblock-level quantizer scale
1158 #define GET_MQUANT() \
1159 if (v->dquantfrm) { \
1161 if (v->dqprofile == DQPROFILE_ALL_MBS) { \
1162 if (v->dqbilevel) { \
1163 mquant = (get_bits1(gb)) ? v->altpq : v->pq; \
1165 mqdiff = get_bits(gb, 3); \
1167 mquant = v->pq + mqdiff; \
1169 mquant = get_bits(gb, 5); \
1172 if (v->dqprofile == DQPROFILE_SINGLE_EDGE) \
1173 edges = 1 << v->dqsbedge; \
1174 else if (v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
1175 edges = (3 << v->dqsbedge) % 15; \
1176 else if (v->dqprofile == DQPROFILE_FOUR_EDGES) \
1178 if ((edges&1) && !s->mb_x) \
1179 mquant = v->altpq; \
1180 if ((edges&2) && s->first_slice_line) \
1181 mquant = v->altpq; \
1182 if ((edges&4) && s->mb_x == (s->mb_width - 1)) \
1183 mquant = v->altpq; \
1184 if ((edges&8) && s->mb_y == (s->mb_height - 1)) \
1185 mquant = v->altpq; \
1189 * @def GET_MVDATA(_dmv_x, _dmv_y)
1190 * @brief Get MV differentials
1191 * @see MVDATA decoding from 8.3.5.2, p(1)20
1192 * @param _dmv_x Horizontal differential for decoded MV
1193 * @param _dmv_y Vertical differential for decoded MV
1195 #define GET_MVDATA(_dmv_x, _dmv_y) \
1196 index = 1 + get_vlc2(gb, ff_vc1_mv_diff_vlc[s->mv_table_index].table, \
1197 VC1_MV_DIFF_VLC_BITS, 2); \
1199 mb_has_coeffs = 1; \
1202 mb_has_coeffs = 0; \
1205 _dmv_x = _dmv_y = 0; \
1206 } else if (index == 35) { \
1207 _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
1208 _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
1209 } else if (index == 36) { \
1214 index1 = index % 6; \
1215 if (!s->quarter_sample && index1 == 5) val = 1; \
1217 if (size_table[index1] - val > 0) \
1218 val = get_bits(gb, size_table[index1] - val); \
1220 sign = 0 - (val&1); \
1221 _dmv_x = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1223 index1 = index / 6; \
1224 if (!s->quarter_sample && index1 == 5) val = 1; \
1226 if (size_table[index1] - val > 0) \
1227 val = get_bits(gb, size_table[index1] - val); \
1229 sign = 0 - (val & 1); \
1230 _dmv_y = (sign ^ ((val >> 1) + offset_table[index1])) - sign; \
1233 static av_always_inline void get_mvdata_interlaced(VC1Context *v, int *dmv_x,
1234 int *dmv_y, int *pred_flag)
1237 int extend_x = 0, extend_y = 0;
1238 GetBitContext *gb = &v->s.gb;
1241 const int* offs_tab;
1244 bits = VC1_2REF_MVDATA_VLC_BITS;
1247 bits = VC1_1REF_MVDATA_VLC_BITS;
1250 switch (v->dmvrange) {
1258 extend_x = extend_y = 1;
1261 index = get_vlc2(gb, v->imv_vlc->table, bits, 3);
1263 *dmv_x = get_bits(gb, v->k_x);
1264 *dmv_y = get_bits(gb, v->k_y);
1266 *pred_flag = *dmv_y & 1;
1267 *dmv_y = (*dmv_y + *pred_flag) >> 1;
1272 offs_tab = offset_table2;
1274 offs_tab = offset_table1;
1275 index1 = (index + 1) % 9;
1277 val = get_bits(gb, index1 + extend_x);
1278 sign = 0 -(val & 1);
1279 *dmv_x = (sign ^ ((val >> 1) + offs_tab[index1])) - sign;
1283 offs_tab = offset_table2;
1285 offs_tab = offset_table1;
1286 index1 = (index + 1) / 9;
1287 if (index1 > v->numref) {
1288 val = get_bits(gb, (index1 + (extend_y << v->numref)) >> v->numref);
1289 sign = 0 - (val & 1);
1290 *dmv_y = (sign ^ ((val >> 1) + offs_tab[index1 >> v->numref])) - sign;
1294 *pred_flag = index1 & 1;
1298 static av_always_inline int scaleforsame_x(VC1Context *v, int n /* MV */, int dir)
1300 int scaledvalue, refdist;
1301 int scalesame1, scalesame2;
1302 int scalezone1_x, zone1offset_x;
1303 int table_index = dir ^ v->second_field;
1305 if (v->s.pict_type != AV_PICTURE_TYPE_B)
1306 refdist = v->refdist;
1308 refdist = dir ? v->brfd : v->frfd;
1311 scalesame1 = vc1_field_mvpred_scales[table_index][1][refdist];
1312 scalesame2 = vc1_field_mvpred_scales[table_index][2][refdist];
1313 scalezone1_x = vc1_field_mvpred_scales[table_index][3][refdist];
1314 zone1offset_x = vc1_field_mvpred_scales[table_index][5][refdist];
1319 if (FFABS(n) < scalezone1_x)
1320 scaledvalue = (n * scalesame1) >> 8;
1323 scaledvalue = ((n * scalesame2) >> 8) - zone1offset_x;
1325 scaledvalue = ((n * scalesame2) >> 8) + zone1offset_x;
1328 return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1331 static av_always_inline int scaleforsame_y(VC1Context *v, int i, int n /* MV */, int dir)
1333 int scaledvalue, refdist;
1334 int scalesame1, scalesame2;
1335 int scalezone1_y, zone1offset_y;
1336 int table_index = dir ^ v->second_field;
1338 if (v->s.pict_type != AV_PICTURE_TYPE_B)
1339 refdist = v->refdist;
1341 refdist = dir ? v->brfd : v->frfd;
1344 scalesame1 = vc1_field_mvpred_scales[table_index][1][refdist];
1345 scalesame2 = vc1_field_mvpred_scales[table_index][2][refdist];
1346 scalezone1_y = vc1_field_mvpred_scales[table_index][4][refdist];
1347 zone1offset_y = vc1_field_mvpred_scales[table_index][6][refdist];
1352 if (FFABS(n) < scalezone1_y)
1353 scaledvalue = (n * scalesame1) >> 8;
1356 scaledvalue = ((n * scalesame2) >> 8) - zone1offset_y;
1358 scaledvalue = ((n * scalesame2) >> 8) + zone1offset_y;
1362 if (v->cur_field_type && !v->ref_field_type[dir])
1363 return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1365 return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1368 static av_always_inline int scaleforopp_x(VC1Context *v, int n /* MV */)
1370 int scalezone1_x, zone1offset_x;
1371 int scaleopp1, scaleopp2, brfd;
1374 brfd = FFMIN(v->brfd, 3);
1375 scalezone1_x = vc1_b_field_mvpred_scales[3][brfd];
1376 zone1offset_x = vc1_b_field_mvpred_scales[5][brfd];
1377 scaleopp1 = vc1_b_field_mvpred_scales[1][brfd];
1378 scaleopp2 = vc1_b_field_mvpred_scales[2][brfd];
1383 if (FFABS(n) < scalezone1_x)
1384 scaledvalue = (n * scaleopp1) >> 8;
1387 scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_x;
1389 scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_x;
1392 return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1395 static av_always_inline int scaleforopp_y(VC1Context *v, int n /* MV */, int dir)
1397 int scalezone1_y, zone1offset_y;
1398 int scaleopp1, scaleopp2, brfd;
1401 brfd = FFMIN(v->brfd, 3);
1402 scalezone1_y = vc1_b_field_mvpred_scales[4][brfd];
1403 zone1offset_y = vc1_b_field_mvpred_scales[6][brfd];
1404 scaleopp1 = vc1_b_field_mvpred_scales[1][brfd];
1405 scaleopp2 = vc1_b_field_mvpred_scales[2][brfd];
1410 if (FFABS(n) < scalezone1_y)
1411 scaledvalue = (n * scaleopp1) >> 8;
1414 scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_y;
1416 scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_y;
1419 if (v->cur_field_type && !v->ref_field_type[dir]) {
1420 return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1422 return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1426 static av_always_inline int scaleforsame(VC1Context *v, int i, int n /* MV */,
1429 int brfd, scalesame;
1430 int hpel = 1 - v->s.quarter_sample;
1433 if (v->s.pict_type != AV_PICTURE_TYPE_B || v->second_field || !dir) {
1435 n = scaleforsame_y(v, i, n, dir) << hpel;
1437 n = scaleforsame_x(v, n, dir) << hpel;
1440 brfd = FFMIN(v->brfd, 3);
1441 scalesame = vc1_b_field_mvpred_scales[0][brfd];
1443 n = (n * scalesame >> 8) << hpel;
1447 static av_always_inline int scaleforopp(VC1Context *v, int n /* MV */,
1450 int refdist, scaleopp;
1451 int hpel = 1 - v->s.quarter_sample;
1454 if (v->s.pict_type == AV_PICTURE_TYPE_B && !v->second_field && dir == 1) {
1456 n = scaleforopp_y(v, n, dir) << hpel;
1458 n = scaleforopp_x(v, n) << hpel;
1461 if (v->s.pict_type != AV_PICTURE_TYPE_B)
1462 refdist = FFMIN(v->refdist, 3);
1464 refdist = dir ? v->brfd : v->frfd;
1465 scaleopp = vc1_field_mvpred_scales[dir ^ v->second_field][0][refdist];
1467 n = (n * scaleopp >> 8) << hpel;
1471 /** Predict and set motion vector
1473 static inline void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
1474 int mv1, int r_x, int r_y, uint8_t* is_intra,
1475 int pred_flag, int dir)
1477 MpegEncContext *s = &v->s;
1478 int xy, wrap, off = 0;
1482 int mixedmv_pic, num_samefield = 0, num_oppfield = 0;
1483 int opposit, a_f, b_f, c_f;
1484 int16_t field_predA[2];
1485 int16_t field_predB[2];
1486 int16_t field_predC[2];
1487 int a_valid, b_valid, c_valid;
1488 int hybridmv_thresh, y_bias = 0;
1490 if (v->mv_mode == MV_PMODE_MIXED_MV ||
1491 ((v->mv_mode == MV_PMODE_INTENSITY_COMP) && (v->mv_mode2 == MV_PMODE_MIXED_MV)))
1495 /* scale MV difference to be quad-pel */
1496 dmv_x <<= 1 - s->quarter_sample;
1497 dmv_y <<= 1 - s->quarter_sample;
1499 wrap = s->b8_stride;
1500 xy = s->block_index[n];
1503 s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy + v->blocks_off][0] = 0;
1504 s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy + v->blocks_off][1] = 0;
1505 s->current_picture.f.motion_val[1][xy + v->blocks_off][0] = 0;
1506 s->current_picture.f.motion_val[1][xy + v->blocks_off][1] = 0;
1507 if (mv1) { /* duplicate motion data for 1-MV block */
1508 s->current_picture.f.motion_val[0][xy + 1 + v->blocks_off][0] = 0;
1509 s->current_picture.f.motion_val[0][xy + 1 + v->blocks_off][1] = 0;
1510 s->current_picture.f.motion_val[0][xy + wrap + v->blocks_off][0] = 0;
1511 s->current_picture.f.motion_val[0][xy + wrap + v->blocks_off][1] = 0;
1512 s->current_picture.f.motion_val[0][xy + wrap + 1 + v->blocks_off][0] = 0;
1513 s->current_picture.f.motion_val[0][xy + wrap + 1 + v->blocks_off][1] = 0;
1514 v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1515 s->current_picture.f.motion_val[1][xy + 1 + v->blocks_off][0] = 0;
1516 s->current_picture.f.motion_val[1][xy + 1 + v->blocks_off][1] = 0;
1517 s->current_picture.f.motion_val[1][xy + wrap][0] = 0;
1518 s->current_picture.f.motion_val[1][xy + wrap + v->blocks_off][1] = 0;
1519 s->current_picture.f.motion_val[1][xy + wrap + 1 + v->blocks_off][0] = 0;
1520 s->current_picture.f.motion_val[1][xy + wrap + 1 + v->blocks_off][1] = 0;
1525 C = s->current_picture.f.motion_val[dir][xy - 1 + v->blocks_off];
1526 A = s->current_picture.f.motion_val[dir][xy - wrap + v->blocks_off];
1528 if (v->field_mode && mixedmv_pic)
1529 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
1531 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2;
1533 //in 4-MV mode different blocks have different B predictor position
1536 off = (s->mb_x > 0) ? -1 : 1;
1539 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 1;
1548 B = s->current_picture.f.motion_val[dir][xy - wrap + off + v->blocks_off];
1550 a_valid = !s->first_slice_line || (n == 2 || n == 3);
1551 b_valid = a_valid && (s->mb_width > 1);
1552 c_valid = s->mb_x || (n == 1 || n == 3);
1553 if (v->field_mode) {
1554 a_valid = a_valid && !is_intra[xy - wrap];
1555 b_valid = b_valid && !is_intra[xy - wrap + off];
1556 c_valid = c_valid && !is_intra[xy - 1];
1560 a_f = v->mv_f[dir][xy - wrap + v->blocks_off];
1561 num_oppfield += a_f;
1562 num_samefield += 1 - a_f;
1563 field_predA[0] = A[0];
1564 field_predA[1] = A[1];
1566 field_predA[0] = field_predA[1] = 0;
1570 b_f = v->mv_f[dir][xy - wrap + off + v->blocks_off];
1571 num_oppfield += b_f;
1572 num_samefield += 1 - b_f;
1573 field_predB[0] = B[0];
1574 field_predB[1] = B[1];
1576 field_predB[0] = field_predB[1] = 0;
1580 c_f = v->mv_f[dir][xy - 1 + v->blocks_off];
1581 num_oppfield += c_f;
1582 num_samefield += 1 - c_f;
1583 field_predC[0] = C[0];
1584 field_predC[1] = C[1];
1586 field_predC[0] = field_predC[1] = 0;
1590 if (v->field_mode) {
1591 if (num_samefield <= num_oppfield)
1592 opposit = 1 - pred_flag;
1594 opposit = pred_flag;
1598 if (a_valid && !a_f) {
1599 field_predA[0] = scaleforopp(v, field_predA[0], 0, dir);
1600 field_predA[1] = scaleforopp(v, field_predA[1], 1, dir);
1602 if (b_valid && !b_f) {
1603 field_predB[0] = scaleforopp(v, field_predB[0], 0, dir);
1604 field_predB[1] = scaleforopp(v, field_predB[1], 1, dir);
1606 if (c_valid && !c_f) {
1607 field_predC[0] = scaleforopp(v, field_predC[0], 0, dir);
1608 field_predC[1] = scaleforopp(v, field_predC[1], 1, dir);
1610 v->mv_f[dir][xy + v->blocks_off] = 1;
1611 v->ref_field_type[dir] = !v->cur_field_type;
1613 if (a_valid && a_f) {
1614 field_predA[0] = scaleforsame(v, n, field_predA[0], 0, dir);
1615 field_predA[1] = scaleforsame(v, n, field_predA[1], 1, dir);
1617 if (b_valid && b_f) {
1618 field_predB[0] = scaleforsame(v, n, field_predB[0], 0, dir);
1619 field_predB[1] = scaleforsame(v, n, field_predB[1], 1, dir);
1621 if (c_valid && c_f) {
1622 field_predC[0] = scaleforsame(v, n, field_predC[0], 0, dir);
1623 field_predC[1] = scaleforsame(v, n, field_predC[1], 1, dir);
1625 v->mv_f[dir][xy + v->blocks_off] = 0;
1626 v->ref_field_type[dir] = v->cur_field_type;
1630 px = field_predA[0];
1631 py = field_predA[1];
1632 } else if (c_valid) {
1633 px = field_predC[0];
1634 py = field_predC[1];
1635 } else if (b_valid) {
1636 px = field_predB[0];
1637 py = field_predB[1];
1643 if (num_samefield + num_oppfield > 1) {
1644 px = mid_pred(field_predA[0], field_predB[0], field_predC[0]);
1645 py = mid_pred(field_predA[1], field_predB[1], field_predC[1]);
1648 /* Pullback MV as specified in 8.3.5.3.4 */
1649 if (!v->field_mode) {
1651 qx = (s->mb_x << 6) + ((n == 1 || n == 3) ? 32 : 0);
1652 qy = (s->mb_y << 6) + ((n == 2 || n == 3) ? 32 : 0);
1653 X = (s->mb_width << 6) - 4;
1654 Y = (s->mb_height << 6) - 4;
1656 if (qx + px < -60) px = -60 - qx;
1657 if (qy + py < -60) py = -60 - qy;
1659 if (qx + px < -28) px = -28 - qx;
1660 if (qy + py < -28) py = -28 - qy;
1662 if (qx + px > X) px = X - qx;
1663 if (qy + py > Y) py = Y - qy;
1666 if (!v->field_mode || s->pict_type != AV_PICTURE_TYPE_B) {
1667 /* Calculate hybrid prediction as specified in 8.3.5.3.5 (also 10.3.5.4.3.5) */
1668 hybridmv_thresh = 32;
1669 if (a_valid && c_valid) {
1670 if (is_intra[xy - wrap])
1671 sum = FFABS(px) + FFABS(py);
1673 sum = FFABS(px - field_predA[0]) + FFABS(py - field_predA[1]);
1674 if (sum > hybridmv_thresh) {
1675 if (get_bits1(&s->gb)) { // read HYBRIDPRED bit
1676 px = field_predA[0];
1677 py = field_predA[1];
1679 px = field_predC[0];
1680 py = field_predC[1];
1683 if (is_intra[xy - 1])
1684 sum = FFABS(px) + FFABS(py);
1686 sum = FFABS(px - field_predC[0]) + FFABS(py - field_predC[1]);
1687 if (sum > hybridmv_thresh) {
1688 if (get_bits1(&s->gb)) {
1689 px = field_predA[0];
1690 py = field_predA[1];
1692 px = field_predC[0];
1693 py = field_predC[1];
1700 if (v->field_mode && !s->quarter_sample) {
1704 if (v->field_mode && v->numref)
1706 if (v->field_mode && v->cur_field_type && v->ref_field_type[dir] == 0)
1708 /* store MV using signed modulus of MV range defined in 4.11 */
1709 s->mv[dir][n][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1710 s->mv[dir][n][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1] = ((py + dmv_y + r_y - y_bias) & ((r_y << 1) - 1)) - r_y + y_bias;
1711 if (mv1) { /* duplicate motion data for 1-MV block */
1712 s->current_picture.f.motion_val[dir][xy + 1 + v->blocks_off][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0];
1713 s->current_picture.f.motion_val[dir][xy + 1 + v->blocks_off][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1];
1714 s->current_picture.f.motion_val[dir][xy + wrap + v->blocks_off][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0];
1715 s->current_picture.f.motion_val[dir][xy + wrap + v->blocks_off][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1];
1716 s->current_picture.f.motion_val[dir][xy + wrap + 1 + v->blocks_off][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0];
1717 s->current_picture.f.motion_val[dir][xy + wrap + 1 + v->blocks_off][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1];
1718 v->mv_f[dir][xy + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1719 v->mv_f[dir][xy + wrap + v->blocks_off] = v->mv_f[dir][xy + wrap + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1723 /** Predict and set motion vector for interlaced frame picture MBs
1725 static inline void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
1726 int mvn, int r_x, int r_y, uint8_t* is_intra)
1728 MpegEncContext *s = &v->s;
1729 int xy, wrap, off = 0;
1730 int A[2], B[2], C[2];
1732 int a_valid = 0, b_valid = 0, c_valid = 0;
1733 int field_a, field_b, field_c; // 0: same, 1: opposit
1734 int total_valid, num_samefield, num_oppfield;
1735 int pos_c, pos_b, n_adj;
1737 wrap = s->b8_stride;
1738 xy = s->block_index[n];
1741 s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy][0] = 0;
1742 s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy][1] = 0;
1743 s->current_picture.f.motion_val[1][xy][0] = 0;
1744 s->current_picture.f.motion_val[1][xy][1] = 0;
1745 if (mvn == 1) { /* duplicate motion data for 1-MV block */
1746 s->current_picture.f.motion_val[0][xy + 1][0] = 0;
1747 s->current_picture.f.motion_val[0][xy + 1][1] = 0;
1748 s->current_picture.f.motion_val[0][xy + wrap][0] = 0;
1749 s->current_picture.f.motion_val[0][xy + wrap][1] = 0;
1750 s->current_picture.f.motion_val[0][xy + wrap + 1][0] = 0;
1751 s->current_picture.f.motion_val[0][xy + wrap + 1][1] = 0;
1752 v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1753 s->current_picture.f.motion_val[1][xy + 1][0] = 0;
1754 s->current_picture.f.motion_val[1][xy + 1][1] = 0;
1755 s->current_picture.f.motion_val[1][xy + wrap][0] = 0;
1756 s->current_picture.f.motion_val[1][xy + wrap][1] = 0;
1757 s->current_picture.f.motion_val[1][xy + wrap + 1][0] = 0;
1758 s->current_picture.f.motion_val[1][xy + wrap + 1][1] = 0;
1763 off = ((n == 0) || (n == 1)) ? 1 : -1;
1765 if (s->mb_x || (n == 1) || (n == 3)) {
1766 if ((v->blk_mv_type[xy]) // current block (MB) has a field MV
1767 || (!v->blk_mv_type[xy] && !v->blk_mv_type[xy - 1])) { // or both have frame MV
1768 A[0] = s->current_picture.f.motion_val[0][xy - 1][0];
1769 A[1] = s->current_picture.f.motion_val[0][xy - 1][1];
1771 } else { // current block has frame mv and cand. has field MV (so average)
1772 A[0] = (s->current_picture.f.motion_val[0][xy - 1][0]
1773 + s->current_picture.f.motion_val[0][xy - 1 + off * wrap][0] + 1) >> 1;
1774 A[1] = (s->current_picture.f.motion_val[0][xy - 1][1]
1775 + s->current_picture.f.motion_val[0][xy - 1 + off * wrap][1] + 1) >> 1;
1778 if (!(n & 1) && v->is_intra[s->mb_x - 1]) {
1784 /* Predict B and C */
1785 B[0] = B[1] = C[0] = C[1] = 0;
1786 if (n == 0 || n == 1 || v->blk_mv_type[xy]) {
1787 if (!s->first_slice_line) {
1788 if (!v->is_intra[s->mb_x - s->mb_stride]) {
1791 pos_b = s->block_index[n_adj] - 2 * wrap;
1792 if (v->blk_mv_type[pos_b] && v->blk_mv_type[xy]) {
1793 n_adj = (n & 2) | (n & 1);
1795 B[0] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap][0];
1796 B[1] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap][1];
1797 if (v->blk_mv_type[pos_b] && !v->blk_mv_type[xy]) {
1798 B[0] = (B[0] + s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap][0] + 1) >> 1;
1799 B[1] = (B[1] + s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap][1] + 1) >> 1;
1802 if (s->mb_width > 1) {
1803 if (!v->is_intra[s->mb_x - s->mb_stride + 1]) {
1806 pos_c = s->block_index[2] - 2 * wrap + 2;
1807 if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1810 C[0] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap + 2][0];
1811 C[1] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap + 2][1];
1812 if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1813 C[0] = (1 + C[0] + (s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap + 2][0])) >> 1;
1814 C[1] = (1 + C[1] + (s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap + 2][1])) >> 1;
1816 if (s->mb_x == s->mb_width - 1) {
1817 if (!v->is_intra[s->mb_x - s->mb_stride - 1]) {
1820 pos_c = s->block_index[3] - 2 * wrap - 2;
1821 if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1824 C[0] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap - 2][0];
1825 C[1] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap - 2][1];
1826 if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1827 C[0] = (1 + C[0] + s->current_picture.f.motion_val[0][s->block_index[1] - 2 * wrap - 2][0]) >> 1;
1828 C[1] = (1 + C[1] + s->current_picture.f.motion_val[0][s->block_index[1] - 2 * wrap - 2][1]) >> 1;
1837 pos_b = s->block_index[1];
1839 B[0] = s->current_picture.f.motion_val[0][pos_b][0];
1840 B[1] = s->current_picture.f.motion_val[0][pos_b][1];
1841 pos_c = s->block_index[0];
1843 C[0] = s->current_picture.f.motion_val[0][pos_c][0];
1844 C[1] = s->current_picture.f.motion_val[0][pos_c][1];
1847 total_valid = a_valid + b_valid + c_valid;
1848 // check if predictor A is out of bounds
1849 if (!s->mb_x && !(n == 1 || n == 3)) {
1852 // check if predictor B is out of bounds
1853 if ((s->first_slice_line && v->blk_mv_type[xy]) || (s->first_slice_line && !(n & 2))) {
1854 B[0] = B[1] = C[0] = C[1] = 0;
1856 if (!v->blk_mv_type[xy]) {
1857 if (s->mb_width == 1) {
1861 if (total_valid >= 2) {
1862 px = mid_pred(A[0], B[0], C[0]);
1863 py = mid_pred(A[1], B[1], C[1]);
1864 } else if (total_valid) {
1865 if (a_valid) { px = A[0]; py = A[1]; }
1866 if (b_valid) { px = B[0]; py = B[1]; }
1867 if (c_valid) { px = C[0]; py = C[1]; }
1873 field_a = (A[1] & 4) ? 1 : 0;
1877 field_b = (B[1] & 4) ? 1 : 0;
1881 field_c = (C[1] & 4) ? 1 : 0;
1885 num_oppfield = field_a + field_b + field_c;
1886 num_samefield = total_valid - num_oppfield;
1887 if (total_valid == 3) {
1888 if ((num_samefield == 3) || (num_oppfield == 3)) {
1889 px = mid_pred(A[0], B[0], C[0]);
1890 py = mid_pred(A[1], B[1], C[1]);
1891 } else if (num_samefield >= num_oppfield) {
1892 /* take one MV from same field set depending on priority
1893 the check for B may not be necessary */
1894 px = !field_a ? A[0] : B[0];
1895 py = !field_a ? A[1] : B[1];
1897 px = field_a ? A[0] : B[0];
1898 py = field_a ? A[1] : B[1];
1900 } else if (total_valid == 2) {
1901 if (num_samefield >= num_oppfield) {
1902 if (!field_a && a_valid) {
1905 } else if (!field_b && b_valid) {
1908 } else if (c_valid) {
1913 if (field_a && a_valid) {
1916 } else if (field_b && b_valid) {
1919 } else if (c_valid) {
1924 } else if (total_valid == 1) {
1925 px = (a_valid) ? A[0] : ((b_valid) ? B[0] : C[0]);
1926 py = (a_valid) ? A[1] : ((b_valid) ? B[1] : C[1]);
1931 /* store MV using signed modulus of MV range defined in 4.11 */
1932 s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1933 s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
1934 if (mvn == 1) { /* duplicate motion data for 1-MV block */
1935 s->current_picture.f.motion_val[0][xy + 1 ][0] = s->current_picture.f.motion_val[0][xy][0];
1936 s->current_picture.f.motion_val[0][xy + 1 ][1] = s->current_picture.f.motion_val[0][xy][1];
1937 s->current_picture.f.motion_val[0][xy + wrap ][0] = s->current_picture.f.motion_val[0][xy][0];
1938 s->current_picture.f.motion_val[0][xy + wrap ][1] = s->current_picture.f.motion_val[0][xy][1];
1939 s->current_picture.f.motion_val[0][xy + wrap + 1][0] = s->current_picture.f.motion_val[0][xy][0];
1940 s->current_picture.f.motion_val[0][xy + wrap + 1][1] = s->current_picture.f.motion_val[0][xy][1];
1941 } else if (mvn == 2) { /* duplicate motion data for 2-Field MV block */
1942 s->current_picture.f.motion_val[0][xy + 1][0] = s->current_picture.f.motion_val[0][xy][0];
1943 s->current_picture.f.motion_val[0][xy + 1][1] = s->current_picture.f.motion_val[0][xy][1];
1944 s->mv[0][n + 1][0] = s->mv[0][n][0];
1945 s->mv[0][n + 1][1] = s->mv[0][n][1];
1949 /** Motion compensation for direct or interpolated blocks in B-frames
1951 static void vc1_interp_mc(VC1Context *v)
1953 MpegEncContext *s = &v->s;
1954 DSPContext *dsp = &v->s.dsp;
1955 uint8_t *srcY, *srcU, *srcV;
1956 int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
1958 int v_edge_pos = s->v_edge_pos >> v->field_mode;
1960 if (!v->field_mode && !v->s.next_picture.f.data[0])
1963 mx = s->mv[1][0][0];
1964 my = s->mv[1][0][1];
1965 uvmx = (mx + ((mx & 3) == 3)) >> 1;
1966 uvmy = (my + ((my & 3) == 3)) >> 1;
1967 if (v->field_mode) {
1968 if (v->cur_field_type != v->ref_field_type[1])
1969 my = my - 2 + 4 * v->cur_field_type;
1970 uvmy = uvmy - 2 + 4 * v->cur_field_type;
1973 uvmx = uvmx + ((uvmx < 0) ? -(uvmx & 1) : (uvmx & 1));
1974 uvmy = uvmy + ((uvmy < 0) ? -(uvmy & 1) : (uvmy & 1));
1976 srcY = s->next_picture.f.data[0];
1977 srcU = s->next_picture.f.data[1];
1978 srcV = s->next_picture.f.data[2];
1980 src_x = s->mb_x * 16 + (mx >> 2);
1981 src_y = s->mb_y * 16 + (my >> 2);
1982 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
1983 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
1985 if (v->profile != PROFILE_ADVANCED) {
1986 src_x = av_clip( src_x, -16, s->mb_width * 16);
1987 src_y = av_clip( src_y, -16, s->mb_height * 16);
1988 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
1989 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
1991 src_x = av_clip( src_x, -17, s->avctx->coded_width);
1992 src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
1993 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
1994 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
1997 srcY += src_y * s->linesize + src_x;
1998 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
1999 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
2001 if (v->field_mode && v->ref_field_type[1]) {
2002 srcY += s->current_picture_ptr->f.linesize[0];
2003 srcU += s->current_picture_ptr->f.linesize[1];
2004 srcV += s->current_picture_ptr->f.linesize[2];
2007 /* for grayscale we should not try to read from unknown area */
2008 if (s->flags & CODEC_FLAG_GRAY) {
2009 srcU = s->edge_emu_buffer + 18 * s->linesize;
2010 srcV = s->edge_emu_buffer + 18 * s->linesize;
2013 if (v->rangeredfrm || s->h_edge_pos < 22 || v_edge_pos < 22
2014 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx & 3) - 16 - s->mspel * 3
2015 || (unsigned)(src_y - s->mspel) > v_edge_pos - (my & 3) - 16 - s->mspel * 3) {
2016 uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
2018 srcY -= s->mspel * (1 + s->linesize);
2019 s->dsp.emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize,
2020 17 + s->mspel * 2, 17 + s->mspel * 2,
2021 src_x - s->mspel, src_y - s->mspel,
2022 s->h_edge_pos, v_edge_pos);
2023 srcY = s->edge_emu_buffer;
2024 s->dsp.emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8 + 1, 8 + 1,
2025 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
2026 s->dsp.emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8 + 1, 8 + 1,
2027 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
2030 /* if we deal with range reduction we need to scale source blocks */
2031 if (v->rangeredfrm) {
2033 uint8_t *src, *src2;
2036 for (j = 0; j < 17 + s->mspel * 2; j++) {
2037 for (i = 0; i < 17 + s->mspel * 2; i++)
2038 src[i] = ((src[i] - 128) >> 1) + 128;
2043 for (j = 0; j < 9; j++) {
2044 for (i = 0; i < 9; i++) {
2045 src[i] = ((src[i] - 128) >> 1) + 128;
2046 src2[i] = ((src2[i] - 128) >> 1) + 128;
2048 src += s->uvlinesize;
2049 src2 += s->uvlinesize;
2052 srcY += s->mspel * (1 + s->linesize);
2055 if (v->field_mode && v->second_field) {
2056 off = s->current_picture_ptr->f.linesize[0];
2057 off_uv = s->current_picture_ptr->f.linesize[1];
2064 dxy = ((my & 3) << 2) | (mx & 3);
2065 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off , srcY , s->linesize, v->rnd);
2066 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8, srcY + 8, s->linesize, v->rnd);
2067 srcY += s->linesize * 8;
2068 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize , srcY , s->linesize, v->rnd);
2069 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
2071 dxy = (my & 2) | ((mx & 2) >> 1);
2074 dsp->avg_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
2076 dsp->avg_no_rnd_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
2079 if (s->flags & CODEC_FLAG_GRAY) return;
2080 /* Chroma MC always uses qpel blilinear */
2081 uvmx = (uvmx & 3) << 1;
2082 uvmy = (uvmy & 3) << 1;
2084 dsp->avg_h264_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
2085 dsp->avg_h264_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
2087 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
2088 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
2092 static av_always_inline int scale_mv(int value, int bfrac, int inv, int qs)
2096 #if B_FRACTION_DEN==256
2100 return 2 * ((value * n + 255) >> 9);
2101 return (value * n + 128) >> 8;
2104 n -= B_FRACTION_DEN;
2106 return 2 * ((value * n + B_FRACTION_DEN - 1) / (2 * B_FRACTION_DEN));
2107 return (value * n + B_FRACTION_DEN/2) / B_FRACTION_DEN;
2111 static av_always_inline int scale_mv_intfi(int value, int bfrac, int inv,
2112 int qs, int qs_last)
2120 return (value * n + 255) >> 9;
2122 return (value * n + 128) >> 8;
2125 /** Reconstruct motion vector for B-frame and do motion compensation
2127 static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2],
2128 int direct, int mode)
2131 v->mv_mode2 = v->mv_mode;
2132 v->mv_mode = MV_PMODE_INTENSITY_COMP;
2138 v->mv_mode = v->mv_mode2;
2141 if (mode == BMV_TYPE_INTERPOLATED) {
2145 v->mv_mode = v->mv_mode2;
2149 if (v->use_ic && (mode == BMV_TYPE_BACKWARD))
2150 v->mv_mode = v->mv_mode2;
2151 vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD));
2153 v->mv_mode = v->mv_mode2;
2156 static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2],
2157 int direct, int mvtype)
2159 MpegEncContext *s = &v->s;
2160 int xy, wrap, off = 0;
2165 const uint8_t *is_intra = v->mb_type[0];
2169 /* scale MV difference to be quad-pel */
2170 dmv_x[0] <<= 1 - s->quarter_sample;
2171 dmv_y[0] <<= 1 - s->quarter_sample;
2172 dmv_x[1] <<= 1 - s->quarter_sample;
2173 dmv_y[1] <<= 1 - s->quarter_sample;
2175 wrap = s->b8_stride;
2176 xy = s->block_index[0];
2179 s->current_picture.f.motion_val[0][xy + v->blocks_off][0] =
2180 s->current_picture.f.motion_val[0][xy + v->blocks_off][1] =
2181 s->current_picture.f.motion_val[1][xy + v->blocks_off][0] =
2182 s->current_picture.f.motion_val[1][xy + v->blocks_off][1] = 0;
2185 if (!v->field_mode) {
2186 s->mv[0][0][0] = scale_mv(s->next_picture.f.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
2187 s->mv[0][0][1] = scale_mv(s->next_picture.f.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
2188 s->mv[1][0][0] = scale_mv(s->next_picture.f.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
2189 s->mv[1][0][1] = scale_mv(s->next_picture.f.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
2191 /* Pullback predicted motion vectors as specified in 8.4.5.4 */
2192 s->mv[0][0][0] = av_clip(s->mv[0][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2193 s->mv[0][0][1] = av_clip(s->mv[0][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2194 s->mv[1][0][0] = av_clip(s->mv[1][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2195 s->mv[1][0][1] = av_clip(s->mv[1][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2198 s->current_picture.f.motion_val[0][xy + v->blocks_off][0] = s->mv[0][0][0];
2199 s->current_picture.f.motion_val[0][xy + v->blocks_off][1] = s->mv[0][0][1];
2200 s->current_picture.f.motion_val[1][xy + v->blocks_off][0] = s->mv[1][0][0];
2201 s->current_picture.f.motion_val[1][xy + v->blocks_off][1] = s->mv[1][0][1];
2205 if ((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2206 C = s->current_picture.f.motion_val[0][xy - 2];
2207 A = s->current_picture.f.motion_val[0][xy - wrap * 2];
2208 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2209 B = s->current_picture.f.motion_val[0][xy - wrap * 2 + off];
2211 if (!s->mb_x) C[0] = C[1] = 0;
2212 if (!s->first_slice_line) { // predictor A is not out of bounds
2213 if (s->mb_width == 1) {
2217 px = mid_pred(A[0], B[0], C[0]);
2218 py = mid_pred(A[1], B[1], C[1]);
2220 } else if (s->mb_x) { // predictor C is not out of bounds
2226 /* Pullback MV as specified in 8.3.5.3.4 */
2229 if (v->profile < PROFILE_ADVANCED) {
2230 qx = (s->mb_x << 5);
2231 qy = (s->mb_y << 5);
2232 X = (s->mb_width << 5) - 4;
2233 Y = (s->mb_height << 5) - 4;
2234 if (qx + px < -28) px = -28 - qx;
2235 if (qy + py < -28) py = -28 - qy;
2236 if (qx + px > X) px = X - qx;
2237 if (qy + py > Y) py = Y - qy;
2239 qx = (s->mb_x << 6);
2240 qy = (s->mb_y << 6);
2241 X = (s->mb_width << 6) - 4;
2242 Y = (s->mb_height << 6) - 4;
2243 if (qx + px < -60) px = -60 - qx;
2244 if (qy + py < -60) py = -60 - qy;
2245 if (qx + px > X) px = X - qx;
2246 if (qy + py > Y) py = Y - qy;
2249 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2250 if (0 && !s->first_slice_line && s->mb_x) {
2251 if (is_intra[xy - wrap])
2252 sum = FFABS(px) + FFABS(py);
2254 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2256 if (get_bits1(&s->gb)) {
2264 if (is_intra[xy - 2])
2265 sum = FFABS(px) + FFABS(py);
2267 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2269 if (get_bits1(&s->gb)) {
2279 /* store MV using signed modulus of MV range defined in 4.11 */
2280 s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x;
2281 s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
2283 if ((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2284 C = s->current_picture.f.motion_val[1][xy - 2];
2285 A = s->current_picture.f.motion_val[1][xy - wrap * 2];
2286 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2287 B = s->current_picture.f.motion_val[1][xy - wrap * 2 + off];
2291 if (!s->first_slice_line) { // predictor A is not out of bounds
2292 if (s->mb_width == 1) {
2296 px = mid_pred(A[0], B[0], C[0]);
2297 py = mid_pred(A[1], B[1], C[1]);
2299 } else if (s->mb_x) { // predictor C is not out of bounds
2305 /* Pullback MV as specified in 8.3.5.3.4 */
2308 if (v->profile < PROFILE_ADVANCED) {
2309 qx = (s->mb_x << 5);
2310 qy = (s->mb_y << 5);
2311 X = (s->mb_width << 5) - 4;
2312 Y = (s->mb_height << 5) - 4;
2313 if (qx + px < -28) px = -28 - qx;
2314 if (qy + py < -28) py = -28 - qy;
2315 if (qx + px > X) px = X - qx;
2316 if (qy + py > Y) py = Y - qy;
2318 qx = (s->mb_x << 6);
2319 qy = (s->mb_y << 6);
2320 X = (s->mb_width << 6) - 4;
2321 Y = (s->mb_height << 6) - 4;
2322 if (qx + px < -60) px = -60 - qx;
2323 if (qy + py < -60) py = -60 - qy;
2324 if (qx + px > X) px = X - qx;
2325 if (qy + py > Y) py = Y - qy;
2328 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2329 if (0 && !s->first_slice_line && s->mb_x) {
2330 if (is_intra[xy - wrap])
2331 sum = FFABS(px) + FFABS(py);
2333 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2335 if (get_bits1(&s->gb)) {
2343 if (is_intra[xy - 2])
2344 sum = FFABS(px) + FFABS(py);
2346 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2348 if (get_bits1(&s->gb)) {
2358 /* store MV using signed modulus of MV range defined in 4.11 */
2360 s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
2361 s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
2363 s->current_picture.f.motion_val[0][xy][0] = s->mv[0][0][0];
2364 s->current_picture.f.motion_val[0][xy][1] = s->mv[0][0][1];
2365 s->current_picture.f.motion_val[1][xy][0] = s->mv[1][0][0];
2366 s->current_picture.f.motion_val[1][xy][1] = s->mv[1][0][1];
2369 static inline void vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dmv_y, int mv1, int *pred_flag)
2371 int dir = (v->bmvtype == BMV_TYPE_BACKWARD) ? 1 : 0;
2372 MpegEncContext *s = &v->s;
2373 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2375 if (v->bmvtype == BMV_TYPE_DIRECT) {
2376 int total_opp, k, f;
2377 if (s->next_picture.f.mb_type[mb_pos + v->mb_off] != MB_TYPE_INTRA) {
2378 s->mv[0][0][0] = scale_mv_intfi(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0],
2379 v->bfraction, 0, s->quarter_sample, v->qs_last);
2380 s->mv[0][0][1] = scale_mv_intfi(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1],
2381 v->bfraction, 0, s->quarter_sample, v->qs_last);
2382 s->mv[1][0][0] = scale_mv_intfi(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0],
2383 v->bfraction, 1, s->quarter_sample, v->qs_last);
2384 s->mv[1][0][1] = scale_mv_intfi(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1],
2385 v->bfraction, 1, s->quarter_sample, v->qs_last);
2387 total_opp = v->mv_f_next[0][s->block_index[0] + v->blocks_off]
2388 + v->mv_f_next[0][s->block_index[1] + v->blocks_off]
2389 + v->mv_f_next[0][s->block_index[2] + v->blocks_off]
2390 + v->mv_f_next[0][s->block_index[3] + v->blocks_off];
2391 f = (total_opp > 2) ? 1 : 0;
2393 s->mv[0][0][0] = s->mv[0][0][1] = 0;
2394 s->mv[1][0][0] = s->mv[1][0][1] = 0;
2397 v->ref_field_type[0] = v->ref_field_type[1] = v->cur_field_type ^ f;
2398 for (k = 0; k < 4; k++) {
2399 s->current_picture.f.motion_val[0][s->block_index[k] + v->blocks_off][0] = s->mv[0][0][0];
2400 s->current_picture.f.motion_val[0][s->block_index[k] + v->blocks_off][1] = s->mv[0][0][1];
2401 s->current_picture.f.motion_val[1][s->block_index[k] + v->blocks_off][0] = s->mv[1][0][0];
2402 s->current_picture.f.motion_val[1][s->block_index[k] + v->blocks_off][1] = s->mv[1][0][1];
2403 v->mv_f[0][s->block_index[k] + v->blocks_off] = f;
2404 v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
2408 if (v->bmvtype == BMV_TYPE_INTERPOLATED) {
2409 vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2410 vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2413 if (dir) { // backward
2414 vc1_pred_mv(v, n, dmv_x[1], dmv_y[1], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2415 if (n == 3 || mv1) {
2416 vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
2419 vc1_pred_mv(v, n, dmv_x[0], dmv_y[0], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2420 if (n == 3 || mv1) {
2421 vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], 0, 1);
2426 /** Get predicted DC value for I-frames only
2427 * prediction dir: left=0, top=1
2428 * @param s MpegEncContext
2429 * @param overlap flag indicating that overlap filtering is used
2430 * @param pq integer part of picture quantizer
2431 * @param[in] n block index in the current MB
2432 * @param dc_val_ptr Pointer to DC predictor
2433 * @param dir_ptr Prediction direction for use in AC prediction
2435 static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2436 int16_t **dc_val_ptr, int *dir_ptr)
2438 int a, b, c, wrap, pred, scale;
2440 static const uint16_t dcpred[32] = {
2441 -1, 1024, 512, 341, 256, 205, 171, 146, 128,
2442 114, 102, 93, 85, 79, 73, 68, 64,
2443 60, 57, 54, 51, 49, 47, 45, 43,
2444 41, 39, 38, 37, 35, 34, 33
2447 /* find prediction - wmv3_dc_scale always used here in fact */
2448 if (n < 4) scale = s->y_dc_scale;
2449 else scale = s->c_dc_scale;
2451 wrap = s->block_wrap[n];
2452 dc_val = s->dc_val[0] + s->block_index[n];
2458 b = dc_val[ - 1 - wrap];
2459 a = dc_val[ - wrap];
2461 if (pq < 9 || !overlap) {
2462 /* Set outer values */
2463 if (s->first_slice_line && (n != 2 && n != 3))
2464 b = a = dcpred[scale];
2465 if (s->mb_x == 0 && (n != 1 && n != 3))
2466 b = c = dcpred[scale];
2468 /* Set outer values */
2469 if (s->first_slice_line && (n != 2 && n != 3))
2471 if (s->mb_x == 0 && (n != 1 && n != 3))
2475 if (abs(a - b) <= abs(b - c)) {
2477 *dir_ptr = 1; // left
2480 *dir_ptr = 0; // top
2483 /* update predictor */
2484 *dc_val_ptr = &dc_val[0];
2489 /** Get predicted DC value
2490 * prediction dir: left=0, top=1
2491 * @param s MpegEncContext
2492 * @param overlap flag indicating that overlap filtering is used
2493 * @param pq integer part of picture quantizer
2494 * @param[in] n block index in the current MB
2495 * @param a_avail flag indicating top block availability
2496 * @param c_avail flag indicating left block availability
2497 * @param dc_val_ptr Pointer to DC predictor
2498 * @param dir_ptr Prediction direction for use in AC prediction
2500 static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2501 int a_avail, int c_avail,
2502 int16_t **dc_val_ptr, int *dir_ptr)
2504 int a, b, c, wrap, pred;
2506 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2509 wrap = s->block_wrap[n];
2510 dc_val = s->dc_val[0] + s->block_index[n];
2516 b = dc_val[ - 1 - wrap];
2517 a = dc_val[ - wrap];
2518 /* scale predictors if needed */
2519 q1 = s->current_picture.f.qscale_table[mb_pos];
2520 if (c_avail && (n != 1 && n != 3)) {
2521 q2 = s->current_picture.f.qscale_table[mb_pos - 1];
2523 c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2525 if (a_avail && (n != 2 && n != 3)) {
2526 q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
2528 a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2530 if (a_avail && c_avail && (n != 3)) {
2535 off -= s->mb_stride;
2536 q2 = s->current_picture.f.qscale_table[off];
2538 b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2541 if (a_avail && c_avail) {
2542 if (abs(a - b) <= abs(b - c)) {
2544 *dir_ptr = 1; // left
2547 *dir_ptr = 0; // top
2549 } else if (a_avail) {
2551 *dir_ptr = 0; // top
2552 } else if (c_avail) {
2554 *dir_ptr = 1; // left
2557 *dir_ptr = 1; // left
2560 /* update predictor */
2561 *dc_val_ptr = &dc_val[0];
2565 /** @} */ // Block group
2568 * @name VC1 Macroblock-level functions in Simple/Main Profiles
2569 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
2573 static inline int vc1_coded_block_pred(MpegEncContext * s, int n,
2574 uint8_t **coded_block_ptr)
2576 int xy, wrap, pred, a, b, c;
2578 xy = s->block_index[n];
2579 wrap = s->b8_stride;
2584 a = s->coded_block[xy - 1 ];
2585 b = s->coded_block[xy - 1 - wrap];
2586 c = s->coded_block[xy - wrap];
2595 *coded_block_ptr = &s->coded_block[xy];
2601 * Decode one AC coefficient
2602 * @param v The VC1 context
2603 * @param last Last coefficient
2604 * @param skip How much zero coefficients to skip
2605 * @param value Decoded AC coefficient value
2606 * @param codingset set of VLC to decode data
2609 static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip,
2610 int *value, int codingset)
2612 GetBitContext *gb = &v->s.gb;
2613 int index, escape, run = 0, level = 0, lst = 0;
2615 index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2616 if (index != vc1_ac_sizes[codingset] - 1) {
2617 run = vc1_index_decode_table[codingset][index][0];
2618 level = vc1_index_decode_table[codingset][index][1];
2619 lst = index >= vc1_last_decode_table[codingset] || get_bits_left(gb) < 0;
2623 escape = decode210(gb);
2625 index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2626 run = vc1_index_decode_table[codingset][index][0];
2627 level = vc1_index_decode_table[codingset][index][1];
2628 lst = index >= vc1_last_decode_table[codingset];
2631 level += vc1_last_delta_level_table[codingset][run];
2633 level += vc1_delta_level_table[codingset][run];
2636 run += vc1_last_delta_run_table[codingset][level] + 1;
2638 run += vc1_delta_run_table[codingset][level] + 1;
2644 lst = get_bits1(gb);
2645 if (v->s.esc3_level_length == 0) {
2646 if (v->pq < 8 || v->dquantfrm) { // table 59
2647 v->s.esc3_level_length = get_bits(gb, 3);
2648 if (!v->s.esc3_level_length)
2649 v->s.esc3_level_length = get_bits(gb, 2) + 8;
2650 } else { // table 60
2651 v->s.esc3_level_length = get_unary(gb, 1, 6) + 2;
2653 v->s.esc3_run_length = 3 + get_bits(gb, 2);
2655 run = get_bits(gb, v->s.esc3_run_length);
2656 sign = get_bits1(gb);
2657 level = get_bits(gb, v->s.esc3_level_length);
2668 /** Decode intra block in intra frames - should be faster than decode_intra_block
2669 * @param v VC1Context
2670 * @param block block to decode
2671 * @param[in] n subblock index
2672 * @param coded are AC coeffs present or not
2673 * @param codingset set of VLC to decode data
2675 static int vc1_decode_i_block(VC1Context *v, DCTELEM block[64], int n,
2676 int coded, int codingset)
2678 GetBitContext *gb = &v->s.gb;
2679 MpegEncContext *s = &v->s;
2680 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2683 int16_t *ac_val, *ac_val2;
2686 /* Get DC differential */
2688 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2690 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2693 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2697 if (dcdiff == 119 /* ESC index value */) {
2698 /* TODO: Optimize */
2699 if (v->pq == 1) dcdiff = get_bits(gb, 10);
2700 else if (v->pq == 2) dcdiff = get_bits(gb, 9);
2701 else dcdiff = get_bits(gb, 8);
2704 dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2705 else if (v->pq == 2)
2706 dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2713 dcdiff += vc1_i_pred_dc(&v->s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir);
2716 /* Store the quantized DC coeff, used for prediction */
2718 block[0] = dcdiff * s->y_dc_scale;
2720 block[0] = dcdiff * s->c_dc_scale;
2731 int last = 0, skip, value;
2732 const uint8_t *zz_table;
2736 scale = v->pq * 2 + v->halfpq;
2740 zz_table = v->zz_8x8[2];
2742 zz_table = v->zz_8x8[3];
2744 zz_table = v->zz_8x8[1];
2746 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2748 if (dc_pred_dir) // left
2751 ac_val -= 16 * s->block_wrap[n];
2754 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2758 block[zz_table[i++]] = value;
2761 /* apply AC prediction if needed */
2763 if (dc_pred_dir) { // left
2764 for (k = 1; k < 8; k++)
2765 block[k << v->left_blk_sh] += ac_val[k];
2767 for (k = 1; k < 8; k++)
2768 block[k << v->top_blk_sh] += ac_val[k + 8];
2771 /* save AC coeffs for further prediction */
2772 for (k = 1; k < 8; k++) {
2773 ac_val2[k] = block[k << v->left_blk_sh];
2774 ac_val2[k + 8] = block[k << v->top_blk_sh];
2777 /* scale AC coeffs */
2778 for (k = 1; k < 64; k++)
2782 block[k] += (block[k] < 0) ? -v->pq : v->pq;
2785 if (s->ac_pred) i = 63;
2791 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2795 scale = v->pq * 2 + v->halfpq;
2796 memset(ac_val2, 0, 16 * 2);
2797 if (dc_pred_dir) { // left
2800 memcpy(ac_val2, ac_val, 8 * 2);
2802 ac_val -= 16 * s->block_wrap[n];
2804 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2807 /* apply AC prediction if needed */
2809 if (dc_pred_dir) { //left
2810 for (k = 1; k < 8; k++) {
2811 block[k << v->left_blk_sh] = ac_val[k] * scale;
2812 if (!v->pquantizer && block[k << v->left_blk_sh])
2813 block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -v->pq : v->pq;
2816 for (k = 1; k < 8; k++) {
2817 block[k << v->top_blk_sh] = ac_val[k + 8] * scale;
2818 if (!v->pquantizer && block[k << v->top_blk_sh])
2819 block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -v->pq : v->pq;
2825 s->block_last_index[n] = i;
2830 /** Decode intra block in intra frames - should be faster than decode_intra_block
2831 * @param v VC1Context
2832 * @param block block to decode
2833 * @param[in] n subblock number
2834 * @param coded are AC coeffs present or not
2835 * @param codingset set of VLC to decode data
2836 * @param mquant quantizer value for this macroblock
2838 static int vc1_decode_i_block_adv(VC1Context *v, DCTELEM block[64], int n,
2839 int coded, int codingset, int mquant)
2841 GetBitContext *gb = &v->s.gb;
2842 MpegEncContext *s = &v->s;
2843 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2846 int16_t *ac_val, *ac_val2;
2848 int a_avail = v->a_avail, c_avail = v->c_avail;
2849 int use_pred = s->ac_pred;
2852 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2854 /* Get DC differential */
2856 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2858 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2861 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2865 if (dcdiff == 119 /* ESC index value */) {
2866 /* TODO: Optimize */
2867 if (mquant == 1) dcdiff = get_bits(gb, 10);
2868 else if (mquant == 2) dcdiff = get_bits(gb, 9);
2869 else dcdiff = get_bits(gb, 8);
2872 dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2873 else if (mquant == 2)
2874 dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2881 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir);
2884 /* Store the quantized DC coeff, used for prediction */
2886 block[0] = dcdiff * s->y_dc_scale;
2888 block[0] = dcdiff * s->c_dc_scale;
2894 /* check if AC is needed at all */
2895 if (!a_avail && !c_avail)
2897 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2900 scale = mquant * 2 + ((mquant == v->pq) ? v->halfpq : 0);
2902 if (dc_pred_dir) // left
2905 ac_val -= 16 * s->block_wrap[n];
2907 q1 = s->current_picture.f.qscale_table[mb_pos];
2908 if ( dc_pred_dir && c_avail && mb_pos)
2909 q2 = s->current_picture.f.qscale_table[mb_pos - 1];
2910 if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
2911 q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
2912 if ( dc_pred_dir && n == 1)
2914 if (!dc_pred_dir && n == 2)
2920 int last = 0, skip, value;
2921 const uint8_t *zz_table;
2925 if (!use_pred && v->fcm == ILACE_FRAME) {
2926 zz_table = v->zzi_8x8;
2928 if (!dc_pred_dir) // top
2929 zz_table = v->zz_8x8[2];
2931 zz_table = v->zz_8x8[3];
2934 if (v->fcm != ILACE_FRAME)
2935 zz_table = v->zz_8x8[1];
2937 zz_table = v->zzi_8x8;
2941 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2945 block[zz_table[i++]] = value;
2948 /* apply AC prediction if needed */
2950 /* scale predictors if needed*/
2951 if (q2 && q1 != q2) {
2952 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2953 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2955 if (dc_pred_dir) { // left
2956 for (k = 1; k < 8; k++)
2957 block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2959 for (k = 1; k < 8; k++)
2960 block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2963 if (dc_pred_dir) { //left
2964 for (k = 1; k < 8; k++)
2965 block[k << v->left_blk_sh] += ac_val[k];
2967 for (k = 1; k < 8; k++)
2968 block[k << v->top_blk_sh] += ac_val[k + 8];
2972 /* save AC coeffs for further prediction */
2973 for (k = 1; k < 8; k++) {
2974 ac_val2[k ] = block[k << v->left_blk_sh];
2975 ac_val2[k + 8] = block[k << v->top_blk_sh];
2978 /* scale AC coeffs */
2979 for (k = 1; k < 64; k++)
2983 block[k] += (block[k] < 0) ? -mquant : mquant;
2986 if (use_pred) i = 63;
2987 } else { // no AC coeffs
2990 memset(ac_val2, 0, 16 * 2);
2991 if (dc_pred_dir) { // left
2993 memcpy(ac_val2, ac_val, 8 * 2);
2994 if (q2 && q1 != q2) {
2995 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2996 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2997 for (k = 1; k < 8; k++)
2998 ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3003 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
3004 if (q2 && q1 != q2) {
3005 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3006 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3007 for (k = 1; k < 8; k++)
3008 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3013 /* apply AC prediction if needed */
3015 if (dc_pred_dir) { // left
3016 for (k = 1; k < 8; k++) {
3017 block[k << v->left_blk_sh] = ac_val2[k] * scale;
3018 if (!v->pquantizer && block[k << v->left_blk_sh])
3019 block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
3022 for (k = 1; k < 8; k++) {
3023 block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
3024 if (!v->pquantizer && block[k << v->top_blk_sh])
3025 block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
3031 s->block_last_index[n] = i;
3036 /** Decode intra block in inter frames - more generic version than vc1_decode_i_block
3037 * @param v VC1Context
3038 * @param block block to decode
3039 * @param[in] n subblock index
3040 * @param coded are AC coeffs present or not
3041 * @param mquant block quantizer
3042 * @param codingset set of VLC to decode data
3044 static int vc1_decode_intra_block(VC1Context *v, DCTELEM block[64], int n,
3045 int coded, int mquant, int codingset)
3047 GetBitContext *gb = &v->s.gb;
3048 MpegEncContext *s = &v->s;
3049 int dc_pred_dir = 0; /* Direction of the DC prediction used */
3052 int16_t *ac_val, *ac_val2;
3054 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3055 int a_avail = v->a_avail, c_avail = v->c_avail;
3056 int use_pred = s->ac_pred;
3060 s->dsp.clear_block(block);
3062 /* XXX: Guard against dumb values of mquant */
3063 mquant = (mquant < 1) ? 0 : ((mquant > 31) ? 31 : mquant);
3065 /* Set DC scale - y and c use the same */
3066 s->y_dc_scale = s->y_dc_scale_table[mquant];
3067 s->c_dc_scale = s->c_dc_scale_table[mquant];
3069 /* Get DC differential */
3071 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
3073 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
3076 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
3080 if (dcdiff == 119 /* ESC index value */) {
3081 /* TODO: Optimize */
3082 if (mquant == 1) dcdiff = get_bits(gb, 10);
3083 else if (mquant == 2) dcdiff = get_bits(gb, 9);
3084 else dcdiff = get_bits(gb, 8);
3087 dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
3088 else if (mquant == 2)
3089 dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
3096 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir);
3099 /* Store the quantized DC coeff, used for prediction */
3102 block[0] = dcdiff * s->y_dc_scale;
3104 block[0] = dcdiff * s->c_dc_scale;
3110 /* check if AC is needed at all and adjust direction if needed */
3111 if (!a_avail) dc_pred_dir = 1;
3112 if (!c_avail) dc_pred_dir = 0;
3113 if (!a_avail && !c_avail) use_pred = 0;
3114 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
3117 scale = mquant * 2 + v->halfpq;
3119 if (dc_pred_dir) //left
3122 ac_val -= 16 * s->block_wrap[n];
3124 q1 = s->current_picture.f.qscale_table[mb_pos];
3125 if (dc_pred_dir && c_avail && mb_pos)
3126 q2 = s->current_picture.f.qscale_table[mb_pos - 1];
3127 if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
3128 q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
3129 if ( dc_pred_dir && n == 1)
3131 if (!dc_pred_dir && n == 2)
3133 if (n == 3) q2 = q1;
3136 int last = 0, skip, value;
3140 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
3144 if (v->fcm == PROGRESSIVE)
3145 block[v->zz_8x8[0][i++]] = value;
3147 if (use_pred && (v->fcm == ILACE_FRAME)) {
3148 if (!dc_pred_dir) // top
3149 block[v->zz_8x8[2][i++]] = value;
3151 block[v->zz_8x8[3][i++]] = value;
3153 block[v->zzi_8x8[i++]] = value;
3158 /* apply AC prediction if needed */
3160 /* scale predictors if needed*/
3161 if (q2 && q1 != q2) {
3162 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3163 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3165 if (dc_pred_dir) { // left
3166 for (k = 1; k < 8; k++)
3167 block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3169 for (k = 1; k < 8; k++)
3170 block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3173 if (dc_pred_dir) { // left
3174 for (k = 1; k < 8; k++)
3175 block[k << v->left_blk_sh] += ac_val[k];
3177 for (k = 1; k < 8; k++)
3178 block[k << v->top_blk_sh] += ac_val[k + 8];
3182 /* save AC coeffs for further prediction */
3183 for (k = 1; k < 8; k++) {
3184 ac_val2[k ] = block[k << v->left_blk_sh];
3185 ac_val2[k + 8] = block[k << v->top_blk_sh];
3188 /* scale AC coeffs */
3189 for (k = 1; k < 64; k++)
3193 block[k] += (block[k] < 0) ? -mquant : mquant;
3196 if (use_pred) i = 63;
3197 } else { // no AC coeffs
3200 memset(ac_val2, 0, 16 * 2);
3201 if (dc_pred_dir) { // left
3203 memcpy(ac_val2, ac_val, 8 * 2);
3204 if (q2 && q1 != q2) {
3205 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3206 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3207 for (k = 1; k < 8; k++)
3208 ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3213 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
3214 if (q2 && q1 != q2) {
3215 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3216 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3217 for (k = 1; k < 8; k++)
3218 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3223 /* apply AC prediction if needed */
3225 if (dc_pred_dir) { // left
3226 for (k = 1; k < 8; k++) {
3227 block[k << v->left_blk_sh] = ac_val2[k] * scale;
3228 if (!v->pquantizer && block[k << v->left_blk_sh])
3229 block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
3232 for (k = 1; k < 8; k++) {
3233 block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
3234 if (!v->pquantizer && block[k << v->top_blk_sh])
3235 block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
3241 s->block_last_index[n] = i;
3248 static int vc1_decode_p_block(VC1Context *v, DCTELEM block[64], int n,
3249 int mquant, int ttmb, int first_block,
3250 uint8_t *dst, int linesize, int skip_block,
3253 MpegEncContext *s = &v->s;
3254 GetBitContext *gb = &s->gb;
3257 int scale, off, idx, last, skip, value;
3258 int ttblk = ttmb & 7;
3261 s->dsp.clear_block(block);
3264 ttblk = ff_vc1_ttblk_to_tt[v->tt_index][get_vlc2(gb, ff_vc1_ttblk_vlc[v->tt_index].table, VC1_TTBLK_VLC_BITS, 1)];
3266 if (ttblk == TT_4X4) {
3267 subblkpat = ~(get_vlc2(gb, ff_vc1_subblkpat_vlc[v->tt_index].table, VC1_SUBBLKPAT_VLC_BITS, 1) + 1);
3269 if ((ttblk != TT_8X8 && ttblk != TT_4X4)
3270 && ((v->ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))
3271 || (!v->res_rtm_flag && !first_block))) {
3272 subblkpat = decode012(gb);
3274 subblkpat ^= 3; // swap decoded pattern bits
3275 if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM)
3277 if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT)
3280 scale = 2 * mquant + ((v->pq == mquant) ? v->halfpq : 0);
3282 // convert transforms like 8X4_TOP to generic TT and SUBBLKPAT
3283 if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) {
3284 subblkpat = 2 - (ttblk == TT_8X4_TOP);
3287 if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) {
3288 subblkpat = 2 - (ttblk == TT_4X8_LEFT);
3297 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3302 idx = v->zz_8x8[0][i++];
3304 idx = v->zzi_8x8[i++];
3305 block[idx] = value * scale;
3307 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3311 v->vc1dsp.vc1_inv_trans_8x8_dc(dst, linesize, block);
3313 v->vc1dsp.vc1_inv_trans_8x8(block);
3314 s->dsp.add_pixels_clamped(block, dst, linesize);
3319 pat = ~subblkpat & 0xF;
3320 for (j = 0; j < 4; j++) {
3321 last = subblkpat & (1 << (3 - j));
3323 off = (j & 1) * 4 + (j & 2) * 16;
3325 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3330 idx = ff_vc1_simple_progressive_4x4_zz[i++];
3332 idx = ff_vc1_adv_interlaced_4x4_zz[i++];
3333 block[idx + off] = value * scale;
3335 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3337 if (!(subblkpat & (1 << (3 - j))) && !skip_block) {
3339 v->vc1dsp.vc1_inv_trans_4x4_dc(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3341 v->vc1dsp.vc1_inv_trans_4x4(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3346 pat = ~((subblkpat & 2) * 6 + (subblkpat & 1) * 3) & 0xF;
3347 for (j = 0; j < 2; j++) {
3348 last = subblkpat & (1 << (1 - j));
3352 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3357 idx = v->zz_8x4[i++] + off;
3359 idx = ff_vc1_adv_interlaced_8x4_zz[i++] + off;
3360 block[idx] = value * scale;
3362 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3364 if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3366 v->vc1dsp.vc1_inv_trans_8x4_dc(dst + j * 4 * linesize, linesize, block + off);
3368 v->vc1dsp.vc1_inv_trans_8x4(dst + j * 4 * linesize, linesize, block + off);
3373 pat = ~(subblkpat * 5) & 0xF;
3374 for (j = 0; j < 2; j++) {
3375 last = subblkpat & (1 << (1 - j));
3379 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3384 idx = v->zz_4x8[i++] + off;
3386 idx = ff_vc1_adv_interlaced_4x8_zz[i++] + off;
3387 block[idx] = value * scale;
3389 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3391 if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3393 v->vc1dsp.vc1_inv_trans_4x8_dc(dst + j * 4, linesize, block + off);
3395 v->vc1dsp.vc1_inv_trans_4x8(dst + j*4, linesize, block + off);
3401 *ttmb_out |= ttblk << (n * 4);
3405 /** @} */ // Macroblock group
3407 static const int size_table [6] = { 0, 2, 3, 4, 5, 8 };
3408 static const int offset_table[6] = { 0, 1, 3, 7, 15, 31 };
3410 static av_always_inline void vc1_apply_p_v_loop_filter(VC1Context *v, int block_num)
3412 MpegEncContext *s = &v->s;
3413 int mb_cbp = v->cbp[s->mb_x - s->mb_stride],
3414 block_cbp = mb_cbp >> (block_num * 4), bottom_cbp,
3415 mb_is_intra = v->is_intra[s->mb_x - s->mb_stride],
3416 block_is_intra = mb_is_intra >> (block_num * 4), bottom_is_intra;
3417 int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3420 if (block_num > 3) {
3421 dst = s->dest[block_num - 3];
3423 dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 8) * linesize;
3425 if (s->mb_y != s->end_mb_y || block_num < 2) {
3429 if (block_num > 3) {
3430 bottom_cbp = v->cbp[s->mb_x] >> (block_num * 4);
3431 bottom_is_intra = v->is_intra[s->mb_x] >> (block_num * 4);
3432 mv = &v->luma_mv[s->mb_x - s->mb_stride];
3433 mv_stride = s->mb_stride;
3435 bottom_cbp = (block_num < 2) ? (mb_cbp >> ((block_num + 2) * 4))
3436 : (v->cbp[s->mb_x] >> ((block_num - 2) * 4));
3437 bottom_is_intra = (block_num < 2) ? (mb_is_intra >> ((block_num + 2) * 4))
3438 : (v->is_intra[s->mb_x] >> ((block_num - 2) * 4));
3439 mv_stride = s->b8_stride;
3440 mv = &s->current_picture.f.motion_val[0][s->block_index[block_num] - 2 * mv_stride];
3443 if (bottom_is_intra & 1 || block_is_intra & 1 ||
3444 mv[0][0] != mv[mv_stride][0] || mv[0][1] != mv[mv_stride][1]) {
3445 v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3447 idx = ((bottom_cbp >> 2) | block_cbp) & 3;
3449 v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3452 v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3454 v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3459 dst -= 4 * linesize;
3460 ttblk = (v->ttblk[s->mb_x - s->mb_stride] >> (block_num * 4)) & 0xF;
3461 if (ttblk == TT_4X4 || ttblk == TT_8X4) {
3462 idx = (block_cbp | (block_cbp >> 2)) & 3;
3464 v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3467 v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3469 v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3474 static av_always_inline void vc1_apply_p_h_loop_filter(VC1Context *v, int block_num)
3476 MpegEncContext *s = &v->s;
3477 int mb_cbp = v->cbp[s->mb_x - 1 - s->mb_stride],
3478 block_cbp = mb_cbp >> (block_num * 4), right_cbp,
3479 mb_is_intra = v->is_intra[s->mb_x - 1 - s->mb_stride],
3480 block_is_intra = mb_is_intra >> (block_num * 4), right_is_intra;
3481 int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3484 if (block_num > 3) {
3485 dst = s->dest[block_num - 3] - 8 * linesize;
3487 dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 16) * linesize - 8;
3490 if (s->mb_x != s->mb_width || !(block_num & 5)) {
3493 if (block_num > 3) {
3494 right_cbp = v->cbp[s->mb_x - s->mb_stride] >> (block_num * 4);
3495 right_is_intra = v->is_intra[s->mb_x - s->mb_stride] >> (block_num * 4);
3496 mv = &v->luma_mv[s->mb_x - s->mb_stride - 1];
3498 right_cbp = (block_num & 1) ? (v->cbp[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3499 : (mb_cbp >> ((block_num + 1) * 4));
3500 right_is_intra = (block_num & 1) ? (v->is_intra[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3501 : (mb_is_intra >> ((block_num + 1) * 4));
3502 mv = &s->current_picture.f.motion_val[0][s->block_index[block_num] - s->b8_stride * 2 - 2];
3504 if (block_is_intra & 1 || right_is_intra & 1 || mv[0][0] != mv[1][0] || mv[0][1] != mv[1][1]) {
3505 v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3507 idx = ((right_cbp >> 1) | block_cbp) & 5; // FIXME check
3509 v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3512 v->vc1dsp.vc1_h_loop_filter4(dst + 4 * linesize, linesize, v->pq);
3514 v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3520 ttblk = (v->ttblk[s->mb_x - s->mb_stride - 1] >> (block_num * 4)) & 0xf;
3521 if (ttblk == TT_4X4 || ttblk == TT_4X8) {
3522 idx = (block_cbp | (block_cbp >> 1)) & 5;
3524 v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3527 v->vc1dsp.vc1_h_loop_filter4(dst + linesize * 4, linesize, v->pq);
3529 v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3534 static void vc1_apply_p_loop_filter(VC1Context *v)
3536 MpegEncContext *s = &v->s;
3539 for (i = 0; i < 6; i++) {
3540 vc1_apply_p_v_loop_filter(v, i);
3543 /* V always precedes H, therefore we run H one MB before V;
3544 * at the end of a row, we catch up to complete the row */
3546 for (i = 0; i < 6; i++) {
3547 vc1_apply_p_h_loop_filter(v, i);
3549 if (s->mb_x == s->mb_width - 1) {
3551 ff_update_block_index(s);
3552 for (i = 0; i < 6; i++) {
3553 vc1_apply_p_h_loop_filter(v, i);
3559 /** Decode one P-frame MB
3561 static int vc1_decode_p_mb(VC1Context *v)
3563 MpegEncContext *s = &v->s;
3564 GetBitContext *gb = &s->gb;
3566 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3567 int cbp; /* cbp decoding stuff */
3568 int mqdiff, mquant; /* MB quantization */
3569 int ttmb = v->ttfrm; /* MB Transform type */
3571 int mb_has_coeffs = 1; /* last_flag */
3572 int dmv_x, dmv_y; /* Differential MV components */
3573 int index, index1; /* LUT indexes */
3574 int val, sign; /* temp values */
3575 int first_block = 1;
3577 int skipped, fourmv;
3578 int block_cbp = 0, pat, block_tt = 0, block_intra = 0;
3580 mquant = v->pq; /* lossy initialization */
3582 if (v->mv_type_is_raw)
3583 fourmv = get_bits1(gb);
3585 fourmv = v->mv_type_mb_plane[mb_pos];
3587 skipped = get_bits1(gb);
3589 skipped = v->s.mbskip_table[mb_pos];
3591 if (!fourmv) { /* 1MV mode */
3593 GET_MVDATA(dmv_x, dmv_y);
3596 s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
3597 s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
3599 s->current_picture.f.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;
3600 vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3602 /* FIXME Set DC val for inter block ? */
3603 if (s->mb_intra && !mb_has_coeffs) {
3605 s->ac_pred = get_bits1(gb);
3607 } else if (mb_has_coeffs) {
3609 s->ac_pred = get_bits1(gb);
3610 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3616 s->current_picture.f.qscale_table[mb_pos] = mquant;
3618 if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
3619 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table,
3620 VC1_TTMB_VLC_BITS, 2);
3621 if (!s->mb_intra) vc1_mc_1mv(v, 0);
3623 for (i = 0; i < 6; i++) {
3624 s->dc_val[0][s->block_index[i]] = 0;
3626 val = ((cbp >> (5 - i)) & 1);
3627 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3628 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3630 /* check if prediction blocks A and C are available */
3631 v->a_avail = v->c_avail = 0;
3632 if (i == 2 || i == 3 || !s->first_slice_line)
3633 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3634 if (i == 1 || i == 3 || s->mb_x)
3635 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3637 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3638 (i & 4) ? v->codingset2 : v->codingset);
3639 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3641 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3643 for (j = 0; j < 64; j++)
3644 s->block[i][j] <<= 1;
3645 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3646 if (v->pq >= 9 && v->overlap) {
3648 v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3650 v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3652 block_cbp |= 0xF << (i << 2);
3653 block_intra |= 1 << i;
3655 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block,
3656 s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize,
3657 (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
3658 block_cbp |= pat << (i << 2);
3659 if (!v->ttmbf && ttmb < 8)
3666 for (i = 0; i < 6; i++) {
3667 v->mb_type[0][s->block_index[i]] = 0;
3668 s->dc_val[0][s->block_index[i]] = 0;
3670 s->current_picture.f.mb_type[mb_pos] = MB_TYPE_SKIP;
3671 s->current_picture.f.qscale_table[mb_pos] = 0;
3672 vc1_pred_mv(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3675 } else { // 4MV mode
3676 if (!skipped /* unskipped MB */) {
3677 int intra_count = 0, coded_inter = 0;
3678 int is_intra[6], is_coded[6];
3680 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3681 for (i = 0; i < 6; i++) {
3682 val = ((cbp >> (5 - i)) & 1);
3683 s->dc_val[0][s->block_index[i]] = 0;
3690 GET_MVDATA(dmv_x, dmv_y);
3692 vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3694 vc1_mc_4mv_luma(v, i, 0);
3695 intra_count += s->mb_intra;
3696 is_intra[i] = s->mb_intra;
3697 is_coded[i] = mb_has_coeffs;
3700 is_intra[i] = (intra_count >= 3);
3704 vc1_mc_4mv_chroma(v, 0);
3705 v->mb_type[0][s->block_index[i]] = is_intra[i];
3707 coded_inter = !is_intra[i] & is_coded[i];
3709 // if there are no coded blocks then don't do anything more
3711 if (!intra_count && !coded_inter)
3714 s->current_picture.f.qscale_table[mb_pos] = mquant;
3715 /* test if block is intra and has pred */
3718 for (i = 0; i < 6; i++)
3720 if (((!s->first_slice_line || (i == 2 || i == 3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]])
3721 || ((s->mb_x || (i == 1 || i == 3)) && v->mb_type[0][s->block_index[i] - 1])) {
3727 s->ac_pred = get_bits1(gb);
3731 if (!v->ttmbf && coded_inter)
3732 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3733 for (i = 0; i < 6; i++) {
3735 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3736 s->mb_intra = is_intra[i];
3738 /* check if prediction blocks A and C are available */
3739 v->a_avail = v->c_avail = 0;
3740 if (i == 2 || i == 3 || !s->first_slice_line)
3741 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3742 if (i == 1 || i == 3 || s->mb_x)
3743 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3745 vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant,
3746 (i & 4) ? v->codingset2 : v->codingset);
3747 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3749 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3751 for (j = 0; j < 64; j++)
3752 s->block[i][j] <<= 1;
3753 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off,
3754 (i & 4) ? s->uvlinesize : s->linesize);
3755 if (v->pq >= 9 && v->overlap) {
3757 v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3759 v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3761 block_cbp |= 0xF << (i << 2);
3762 block_intra |= 1 << i;
3763 } else if (is_coded[i]) {
3764 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3765 first_block, s->dest[dst_idx] + off,
3766 (i & 4) ? s->uvlinesize : s->linesize,
3767 (i & 4) && (s->flags & CODEC_FLAG_GRAY),
3769 block_cbp |= pat << (i << 2);
3770 if (!v->ttmbf && ttmb < 8)
3775 } else { // skipped MB
3777 s->current_picture.f.qscale_table[mb_pos] = 0;
3778 for (i = 0; i < 6; i++) {
3779 v->mb_type[0][s->block_index[i]] = 0;
3780 s->dc_val[0][s->block_index[i]] = 0;
3782 for (i = 0; i < 4; i++) {
3783 vc1_pred_mv(v, i, 0, 0, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3784 vc1_mc_4mv_luma(v, i, 0);
3786 vc1_mc_4mv_chroma(v, 0);
3787 s->current_picture.f.qscale_table[mb_pos] = 0;
3791 v->cbp[s->mb_x] = block_cbp;
3792 v->ttblk[s->mb_x] = block_tt;
3793 v->is_intra[s->mb_x] = block_intra;
3798 /* Decode one macroblock in an interlaced frame p picture */
3800 static int vc1_decode_p_mb_intfr(VC1Context *v)
3802 MpegEncContext *s = &v->s;
3803 GetBitContext *gb = &s->gb;
3805 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3806 int cbp = 0; /* cbp decoding stuff */
3807 int mqdiff, mquant; /* MB quantization */
3808 int ttmb = v->ttfrm; /* MB Transform type */
3810 int mb_has_coeffs = 1; /* last_flag */
3811 int dmv_x, dmv_y; /* Differential MV components */
3812 int val; /* temp value */
3813 int first_block = 1;
3815 int skipped, fourmv = 0, twomv = 0;
3816 int block_cbp = 0, pat, block_tt = 0;
3817 int idx_mbmode = 0, mvbp;
3818 int stride_y, fieldtx;
3820 mquant = v->pq; /* Loosy initialization */
3823 skipped = get_bits1(gb);
3825 skipped = v->s.mbskip_table[mb_pos];
3827 if (v->fourmvswitch)
3828 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_4MV_MBMODE_VLC_BITS, 2); // try getting this done
3830 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2); // in a single line
3831 switch (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0]) {
3832 /* store the motion vector type in a flag (useful later) */
3833 case MV_PMODE_INTFR_4MV:
3835 v->blk_mv_type[s->block_index[0]] = 0;
3836 v->blk_mv_type[s->block_index[1]] = 0;
3837 v->blk_mv_type[s->block_index[2]] = 0;
3838 v->blk_mv_type[s->block_index[3]] = 0;
3840 case MV_PMODE_INTFR_4MV_FIELD:
3842 v->blk_mv_type[s->block_index[0]] = 1;
3843 v->blk_mv_type[s->block_index[1]] = 1;
3844 v->blk_mv_type[s->block_index[2]] = 1;
3845 v->blk_mv_type[s->block_index[3]] = 1;
3847 case MV_PMODE_INTFR_2MV_FIELD:
3849 v->blk_mv_type[s->block_index[0]] = 1;
3850 v->blk_mv_type[s->block_index[1]] = 1;
3851 v->blk_mv_type[s->block_index[2]] = 1;
3852 v->blk_mv_type[s->block_index[3]] = 1;
3854 case MV_PMODE_INTFR_1MV:
3855 v->blk_mv_type[s->block_index[0]] = 0;
3856 v->blk_mv_type[s->block_index[1]] = 0;
3857 v->blk_mv_type[s->block_index[2]] = 0;
3858 v->blk_mv_type[s->block_index[3]] = 0;
3861 if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
3862 s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
3863 s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
3864 s->current_picture.f.mb_type[mb_pos] = MB_TYPE_INTRA;
3865 s->mb_intra = v->is_intra[s->mb_x] = 1;
3866 for (i = 0; i < 6; i++)
3867 v->mb_type[0][s->block_index[i]] = 1;
3868 fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
3869 mb_has_coeffs = get_bits1(gb);
3871 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3872 v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
3874 s->current_picture.f.qscale_table[mb_pos] = mquant;
3875 /* Set DC scale - y and c use the same (not sure if necessary here) */
3876 s->y_dc_scale = s->y_dc_scale_table[mquant];
3877 s->c_dc_scale = s->c_dc_scale_table[mquant];
3879 for (i = 0; i < 6; i++) {
3880 s->dc_val[0][s->block_index[i]] = 0;
3882 val = ((cbp >> (5 - i)) & 1);
3883 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3884 v->a_avail = v->c_avail = 0;
3885 if (i == 2 || i == 3 || !s->first_slice_line)
3886 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3887 if (i == 1 || i == 3 || s->mb_x)
3888 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3890 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3891 (i & 4) ? v->codingset2 : v->codingset);
3892 if ((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3893 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3895 stride_y = s->linesize << fieldtx;
3896 off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->linesize : (i & 1) * 8 + 4 * (i & 2) * s->linesize;
3898 stride_y = s->uvlinesize;
3901 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, stride_y);
3905 } else { // inter MB
3906 mb_has_coeffs = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][3];
3908 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3909 if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD) {
3910 v->twomvbp = get_vlc2(gb, v->twomvbp_vlc->table, VC1_2MV_BLOCK_PATTERN_VLC_BITS, 1);
3912 if ((ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV)
3913 || (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV_FIELD)) {
3914 v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
3917 s->mb_intra = v->is_intra[s->mb_x] = 0;
3918 for (i = 0; i < 6; i++)
3919 v->mb_type[0][s->block_index[i]] = 0;
3920 fieldtx = v->fieldtx_plane[mb_pos] = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][1];
3921 /* for all motion vector read MVDATA and motion compensate each block */
3925 for (i = 0; i < 6; i++) {
3928 val = ((mvbp >> (3 - i)) & 1);
3930 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3932 vc1_pred_mv_intfr(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0]);
3933 vc1_mc_4mv_luma(v, i, 0);
3934 } else if (i == 4) {
3935 vc1_mc_4mv_chroma4(v);
3942 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3944 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0]);
3945 vc1_mc_4mv_luma(v, 0, 0);
3946 vc1_mc_4mv_luma(v, 1, 0);
3949 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3951 vc1_pred_mv_intfr(v, 2, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0]);
3952 vc1_mc_4mv_luma(v, 2, 0);
3953 vc1_mc_4mv_luma(v, 3, 0);
3954 vc1_mc_4mv_chroma4(v);
3956 mvbp = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][2];
3958 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3960 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0]);
3964 GET_MQUANT(); // p. 227
3965 s->current_picture.f.qscale_table[mb_pos] = mquant;
3966 if (!v->ttmbf && cbp)
3967 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3968 for (i = 0; i < 6; i++) {
3969 s->dc_val[0][s->block_index[i]] = 0;
3971 val = ((cbp >> (5 - i)) & 1);
3973 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3975 off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->linesize));
3977 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3978 first_block, s->dest[dst_idx] + off,
3979 (i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
3980 (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
3981 block_cbp |= pat << (i << 2);
3982 if (!v->ttmbf && ttmb < 8)
3989 s->mb_intra = v->is_intra[s->mb_x] = 0;
3990 for (i = 0; i < 6; i++) {
3991 v->mb_type[0][s->block_index[i]] = 0;
3992 s->dc_val[0][s->block_index[i]] = 0;
3994 s->current_picture.f.mb_type[mb_pos] = MB_TYPE_SKIP;
3995 s->current_picture.f.qscale_table[mb_pos] = 0;
3996 v->blk_mv_type[s->block_index[0]] = 0;
3997 v->blk_mv_type[s->block_index[1]] = 0;
3998 v->blk_mv_type[s->block_index[2]] = 0;
3999 v->blk_mv_type[s->block_index[3]] = 0;
4000 vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0]);
4003 if (s->mb_x == s->mb_width - 1)
4004 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0])*s->mb_stride);
4008 static int vc1_decode_p_mb_intfi(VC1Context *v)
4010 MpegEncContext *s = &v->s;
4011 GetBitContext *gb = &s->gb;
4013 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4014 int cbp = 0; /* cbp decoding stuff */
4015 int mqdiff, mquant; /* MB quantization */
4016 int ttmb = v->ttfrm; /* MB Transform type */
4018 int mb_has_coeffs = 1; /* last_flag */
4019 int dmv_x, dmv_y; /* Differential MV components */
4020 int val; /* temp values */
4021 int first_block = 1;
4024 int block_cbp = 0, pat, block_tt = 0;
4027 mquant = v->pq; /* Loosy initialization */
4029 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
4030 if (idx_mbmode <= 1) { // intra MB
4031 s->mb_intra = v->is_intra[s->mb_x] = 1;
4032 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
4033 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
4034 s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4036 s->current_picture.f.qscale_table[mb_pos] = mquant;
4037 /* Set DC scale - y and c use the same (not sure if necessary here) */
4038 s->y_dc_scale = s->y_dc_scale_table[mquant];
4039 s->c_dc_scale = s->c_dc_scale_table[mquant];
4040 v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4041 mb_has_coeffs = idx_mbmode & 1;
4043 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
4045 for (i = 0; i < 6; i++) {
4046 s->dc_val[0][s->block_index[i]] = 0;
4047 v->mb_type[0][s->block_index[i]] = 1;
4049 val = ((cbp >> (5 - i)) & 1);
4050 v->a_avail = v->c_avail = 0;
4051 if (i == 2 || i == 3 || !s->first_slice_line)
4052 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4053 if (i == 1 || i == 3 || s->mb_x)
4054 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4056 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4057 (i & 4) ? v->codingset2 : v->codingset);
4058 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4060 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4061 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4062 off += v->second_field ? ((i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0]) : 0;
4063 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize);
4064 // TODO: loop filter
4067 s->mb_intra = v->is_intra[s->mb_x] = 0;
4068 s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
4069 for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
4070 if (idx_mbmode <= 5) { // 1-MV
4072 if (idx_mbmode & 1) {
4073 get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
4075 vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
4077 mb_has_coeffs = !(idx_mbmode & 2);
4079 v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
4080 for (i = 0; i < 6; i++) {
4082 dmv_x = dmv_y = pred_flag = 0;
4083 val = ((v->fourmvbp >> (3 - i)) & 1);
4085 get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
4087 vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
4088 vc1_mc_4mv_luma(v, i, 0);
4090 vc1_mc_4mv_chroma(v, 0);
4092 mb_has_coeffs = idx_mbmode & 1;
4095 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4099 s->current_picture.f.qscale_table[mb_pos] = mquant;
4100 if (!v->ttmbf && cbp) {
4101 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4104 for (i = 0; i < 6; i++) {
4105 s->dc_val[0][s->block_index[i]] = 0;
4107 val = ((cbp >> (5 - i)) & 1);
4108 off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
4109 if (v->second_field)
4110 off += (i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0];
4112 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4113 first_block, s->dest[dst_idx] + off,
4114 (i & 4) ? s->uvlinesize : s->linesize,
4115 (i & 4) && (s->flags & CODEC_FLAG_GRAY),
4117 block_cbp |= pat << (i << 2);
4118 if (!v->ttmbf && ttmb < 8) ttmb = -1;
4123 if (s->mb_x == s->mb_width - 1)
4124 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
4128 /** Decode one B-frame MB (in Main profile)
4130 static void vc1_decode_b_mb(VC1Context *v)
4132 MpegEncContext *s = &v->s;
4133 GetBitContext *gb = &s->gb;
4135 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4136 int cbp = 0; /* cbp decoding stuff */
4137 int mqdiff, mquant; /* MB quantization */
4138 int ttmb = v->ttfrm; /* MB Transform type */
4139 int mb_has_coeffs = 0; /* last_flag */
4140 int index, index1; /* LUT indexes */
4141 int val, sign; /* temp values */
4142 int first_block = 1;
4144 int skipped, direct;
4145 int dmv_x[2], dmv_y[2];
4146 int bmvtype = BMV_TYPE_BACKWARD;
4148 mquant = v->pq; /* lossy initialization */
4152 direct = get_bits1(gb);
4154 direct = v->direct_mb_plane[mb_pos];
4156 skipped = get_bits1(gb);
4158 skipped = v->s.mbskip_table[mb_pos];
4160 dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4161 for (i = 0; i < 6; i++) {
4162 v->mb_type[0][s->block_index[i]] = 0;
4163 s->dc_val[0][s->block_index[i]] = 0;
4165 s->current_picture.f.qscale_table[mb_pos] = 0;
4169 GET_MVDATA(dmv_x[0], dmv_y[0]);
4170 dmv_x[1] = dmv_x[0];
4171 dmv_y[1] = dmv_y[0];
4173 if (skipped || !s->mb_intra) {
4174 bmvtype = decode012(gb);
4177 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
4180 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
4183 bmvtype = BMV_TYPE_INTERPOLATED;
4184 dmv_x[0] = dmv_y[0] = 0;
4188 for (i = 0; i < 6; i++)
4189 v->mb_type[0][s->block_index[i]] = s->mb_intra;
4193 bmvtype = BMV_TYPE_INTERPOLATED;
4194 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4195 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4199 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4202 s->current_picture.f.qscale_table[mb_pos] = mquant;
4204 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4205 dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
4206 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4207 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4209 if (!mb_has_coeffs && !s->mb_intra) {
4210 /* no coded blocks - effectively skipped */
4211 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4212 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4215 if (s->mb_intra && !mb_has_coeffs) {
4217 s->current_picture.f.qscale_table[mb_pos] = mquant;
4218 s->ac_pred = get_bits1(gb);
4220 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4222 if (bmvtype == BMV_TYPE_INTERPOLATED) {
4223 GET_MVDATA(dmv_x[0], dmv_y[0]);
4224 if (!mb_has_coeffs) {
4225 /* interpolated skipped block */
4226 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4227 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4231 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4233 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4236 s->ac_pred = get_bits1(gb);
4237 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4239 s->current_picture.f.qscale_table[mb_pos] = mquant;
4240 if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
4241 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4245 for (i = 0; i < 6; i++) {
4246 s->dc_val[0][s->block_index[i]] = 0;
4248 val = ((cbp >> (5 - i)) & 1);
4249 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4250 v->mb_type[0][s->block_index[i]] = s->mb_intra;
4252 /* check if prediction blocks A and C are available */
4253 v->a_avail = v->c_avail = 0;
4254 if (i == 2 || i == 3 || !s->first_slice_line)
4255 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4256 if (i == 1 || i == 3 || s->mb_x)
4257 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4259 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4260 (i & 4) ? v->codingset2 : v->codingset);
4261 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4263 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4265 for (j = 0; j < 64; j++)
4266 s->block[i][j] <<= 1;
4267 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
4269 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4270 first_block, s->dest[dst_idx] + off,
4271 (i & 4) ? s->uvlinesize : s->linesize,
4272 (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4273 if (!v->ttmbf && ttmb < 8)
4280 /** Decode one B-frame MB (in interlaced field B picture)
4282 static void vc1_decode_b_mb_intfi(VC1Context *v)
4284 MpegEncContext *s = &v->s;
4285 GetBitContext *gb = &s->gb;
4287 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4288 int cbp = 0; /* cbp decoding stuff */
4289 int mqdiff, mquant; /* MB quantization */
4290 int ttmb = v->ttfrm; /* MB Transform type */
4291 int mb_has_coeffs = 0; /* last_flag */
4292 int val; /* temp value */
4293 int first_block = 1;
4296 int dmv_x[2], dmv_y[2], pred_flag[2];
4297 int bmvtype = BMV_TYPE_BACKWARD;
4298 int idx_mbmode, interpmvp;
4300 mquant = v->pq; /* Loosy initialization */
4303 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
4304 if (idx_mbmode <= 1) { // intra MB
4305 s->mb_intra = v->is_intra[s->mb_x] = 1;
4306 s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
4307 s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
4308 s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4310 s->current_picture.f.qscale_table[mb_pos] = mquant;
4311 /* Set DC scale - y and c use the same (not sure if necessary here) */
4312 s->y_dc_scale = s->y_dc_scale_table[mquant];
4313 s->c_dc_scale = s->c_dc_scale_table[mquant];
4314 v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4315 mb_has_coeffs = idx_mbmode & 1;
4317 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
4319 for (i = 0; i < 6; i++) {
4320 s->dc_val[0][s->block_index[i]] = 0;
4322 val = ((cbp >> (5 - i)) & 1);
4323 v->mb_type[0][s->block_index[i]] = s->mb_intra;
4324 v->a_avail = v->c_avail = 0;
4325 if (i == 2 || i == 3 || !s->first_slice_line)
4326 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4327 if (i == 1 || i == 3 || s->mb_x)
4328 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4330 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4331 (i & 4) ? v->codingset2 : v->codingset);
4332 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4334 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4336 for (j = 0; j < 64; j++)
4337 s->block[i][j] <<= 1;
4338 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4339 off += v->second_field ? ((i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0]) : 0;
4340 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize);
4341 // TODO: yet to perform loop filter
4344 s->mb_intra = v->is_intra[s->mb_x] = 0;
4345 s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
4346 for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
4348 fwd = v->forward_mb_plane[mb_pos] = get_bits1(gb);
4350 fwd = v->forward_mb_plane[mb_pos];
4351 if (idx_mbmode <= 5) { // 1-MV
4352 dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4353 pred_flag[0] = pred_flag[1] = 0;
4355 bmvtype = BMV_TYPE_FORWARD;
4357 bmvtype = decode012(gb);
4360 bmvtype = BMV_TYPE_BACKWARD;
4363 bmvtype = BMV_TYPE_DIRECT;
4366 bmvtype = BMV_TYPE_INTERPOLATED;
4367 interpmvp = get_bits1(gb);
4370 v->bmvtype = bmvtype;
4371 if (bmvtype != BMV_TYPE_DIRECT && idx_mbmode & 1) {
4372 get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD], &dmv_y[bmvtype == BMV_TYPE_BACKWARD], &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
4374 if (bmvtype == BMV_TYPE_INTERPOLATED && interpmvp) {
4375 get_mvdata_interlaced(v, &dmv_x[1], &dmv_y[1], &pred_flag[1]);
4377 if (bmvtype == BMV_TYPE_DIRECT) {
4378 dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4379 dmv_x[1] = dmv_y[1] = pred_flag[0] = 0;
4381 vc1_pred_b_mv_intfi(v, 0, dmv_x, dmv_y, 1, pred_flag);
4382 vc1_b_mc(v, dmv_x, dmv_y, (bmvtype == BMV_TYPE_DIRECT), bmvtype);
4383 mb_has_coeffs = !(idx_mbmode & 2);
4386 bmvtype = BMV_TYPE_FORWARD;
4387 v->bmvtype = bmvtype;
4388 v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
4389 for (i = 0; i < 6; i++) {
4391 dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4392 dmv_x[1] = dmv_y[1] = pred_flag[1] = 0;
4393 val = ((v->fourmvbp >> (3 - i)) & 1);
4395 get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD],
4396 &dmv_y[bmvtype == BMV_TYPE_BACKWARD],
4397 &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
4399 vc1_pred_b_mv_intfi(v, i, dmv_x, dmv_y, 0, pred_flag);
4400 vc1_mc_4mv_luma(v, i, bmvtype == BMV_TYPE_BACKWARD);
4402 vc1_mc_4mv_chroma(v, bmvtype == BMV_TYPE_BACKWARD);
4404 mb_has_coeffs = idx_mbmode & 1;
4407 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4411 s->current_picture.f.qscale_table[mb_pos] = mquant;
4412 if (!v->ttmbf && cbp) {
4413 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4416 for (i = 0; i < 6; i++) {
4417 s->dc_val[0][s->block_index[i]] = 0;
4419 val = ((cbp >> (5 - i)) & 1);
4420 off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
4421 if (v->second_field)
4422 off += (i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0];
4424 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4425 first_block, s->dest[dst_idx] + off,
4426 (i & 4) ? s->uvlinesize : s->linesize,
4427 (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4428 if (!v->ttmbf && ttmb < 8)
4436 /** Decode blocks of I-frame
4438 static void vc1_decode_i_blocks(VC1Context *v)
4441 MpegEncContext *s = &v->s;
4446 /* select codingmode used for VLC tables selection */
4447 switch (v->y_ac_table_index) {
4449 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
4452 v->codingset = CS_HIGH_MOT_INTRA;
4455 v->codingset = CS_MID_RATE_INTRA;
4459 switch (v->c_ac_table_index) {
4461 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
4464 v->codingset2 = CS_HIGH_MOT_INTER;
4467 v->codingset2 = CS_MID_RATE_INTER;
4471 /* Set DC scale - y and c use the same */
4472 s->y_dc_scale = s->y_dc_scale_table[v->pq];
4473 s->c_dc_scale = s->c_dc_scale_table[v->pq];
4476 s->mb_x = s->mb_y = 0;
4478 s->first_slice_line = 1;
4479 for (s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
4481 ff_init_block_index(s);
4482 for (; s->mb_x < s->mb_width; s->mb_x++) {
4484 ff_update_block_index(s);
4485 dst[0] = s->dest[0];
4486 dst[1] = dst[0] + 8;
4487 dst[2] = s->dest[0] + s->linesize * 8;
4488 dst[3] = dst[2] + 8;
4489 dst[4] = s->dest[1];
4490 dst[5] = s->dest[2];
4491 s->dsp.clear_blocks(s->block[0]);
4492 mb_pos = s->mb_x + s->mb_y * s->mb_width;
4493 s->current_picture.f.mb_type[mb_pos] = MB_TYPE_INTRA;
4494 s->current_picture.f.qscale_table[mb_pos] = v->pq;
4495 s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
4496 s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
4498 // do actual MB decoding and displaying
4499 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
4500 v->s.ac_pred = get_bits1(&v->s.gb);
4502 for (k = 0; k < 6; k++) {
4503 val = ((cbp >> (5 - k)) & 1);
4506 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
4510 cbp |= val << (5 - k);
4512 vc1_decode_i_block(v, s->block[k], k, val, (k < 4) ? v->codingset : v->codingset2);
4514 if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
4516 v->vc1dsp.vc1_inv_trans_8x8(s->block[k]);
4517 if (v->pq >= 9 && v->overlap) {
4519 for (j = 0; j < 64; j++)
4520 s->block[k][j] <<= 1;
4521 s->dsp.put_signed_pixels_clamped(s->block[k], dst[k], k & 4 ? s->uvlinesize : s->linesize);
4524 for (j = 0; j < 64; j++)
4525 s->block[k][j] = (s->block[k][j] - 64) << 1;
4526 s->dsp.put_pixels_clamped(s->block[k], dst[k], k & 4 ? s->uvlinesize : s->linesize);
4530 if (v->pq >= 9 && v->overlap) {
4532 v->vc1dsp.vc1_h_overlap(s->dest[0], s->linesize);
4533 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
4534 if (!(s->flags & CODEC_FLAG_GRAY)) {
4535 v->vc1dsp.vc1_h_overlap(s->dest[1], s->uvlinesize);
4536 v->vc1dsp.vc1_h_overlap(s->dest[2], s->uvlinesize);
4539 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize);
4540 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
4541 if (!s->first_slice_line) {
4542 v->vc1dsp.vc1_v_overlap(s->dest[0], s->linesize);
4543 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize);
4544 if (!(s->flags & CODEC_FLAG_GRAY)) {
4545 v->vc1dsp.vc1_v_overlap(s->dest[1], s->uvlinesize);
4546 v->vc1dsp.vc1_v_overlap(s->dest[2], s->uvlinesize);
4549 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
4550 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
4552 if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
4554 if (get_bits_count(&s->gb) > v->bits) {
4555 ff_er_add_slice(s, 0, 0, s->mb_x, s->mb_y, ER_MB_ERROR);
4556 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
4557 get_bits_count(&s->gb), v->bits);
4561 if (!v->s.loop_filter)
4562 ff_draw_horiz_band(s, s->mb_y * 16, 16);
4564 ff_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
4566 s->first_slice_line = 0;
4568 if (v->s.loop_filter)
4569 ff_draw_horiz_band(s, (s->mb_height - 1) * 16, 16);
4570 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, ER_MB_END);
4573 /** Decode blocks of I-frame for advanced profile
4575 static void vc1_decode_i_blocks_adv(VC1Context *v)
4578 MpegEncContext *s = &v->s;
4584 GetBitContext *gb = &s->gb;
4586 /* select codingmode used for VLC tables selection */
4587 switch (v->y_ac_table_index) {
4589 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
4592 v->codingset = CS_HIGH_MOT_INTRA;
4595 v->codingset = CS_MID_RATE_INTRA;
4599 switch (v->c_ac_table_index) {
4601 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
4604 v->codingset2 = CS_HIGH_MOT_INTER;
4607 v->codingset2 = CS_MID_RATE_INTER;
4612 s->mb_x = s->mb_y = 0;
4614 s->first_slice_line = 1;
4615 s->mb_y = s->start_mb_y;
4616 if (s->start_mb_y) {
4618 ff_init_block_index(s);
4619 memset(&s->coded_block[s->block_index[0] - s->b8_stride], 0,
4620 (1 + s->b8_stride) * sizeof(*s->coded_block));
4622 for (; s->mb_y < s->end_mb_y; s->mb_y++) {
4624 ff_init_block_index(s);
4625 for (;s->mb_x < s->mb_width; s->mb_x++) {
4626 DCTELEM (*block)[64] = v->block[v->cur_blk_idx];
4627 ff_update_block_index(s);
4628 s->dsp.clear_blocks(block[0]);
4629 mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4630 s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4631 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
4632 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
4634 // do actual MB decoding and displaying
4635 if (v->fieldtx_is_raw)
4636 v->fieldtx_plane[mb_pos] = get_bits1(&v->s.gb);
4637 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
4638 if ( v->acpred_is_raw)
4639 v->s.ac_pred = get_bits1(&v->s.gb);
4641 v->s.ac_pred = v->acpred_plane[mb_pos];
4643 if (v->condover == CONDOVER_SELECT && v->overflg_is_raw)
4644 v->over_flags_plane[mb_pos] = get_bits1(&v->s.gb);
4648 s->current_picture.f.qscale_table[mb_pos] = mquant;
4649 /* Set DC scale - y and c use the same */
4650 s->y_dc_scale = s->y_dc_scale_table[mquant];
4651 s->c_dc_scale = s->c_dc_scale_table[mquant];
4653 for (k = 0; k < 6; k++) {
4654 val = ((cbp >> (5 - k)) & 1);
4657 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
4661 cbp |= val << (5 - k);
4663 v->a_avail = !s->first_slice_line || (k == 2 || k == 3);
4664 v->c_avail = !!s->mb_x || (k == 1 || k == 3);
4666 vc1_decode_i_block_adv(v, block[k], k, val,
4667 (k < 4) ? v->codingset : v->codingset2, mquant);
4669 if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
4671 v->vc1dsp.vc1_inv_trans_8x8(block[k]);
4674 vc1_smooth_overlap_filter_iblk(v);
4675 vc1_put_signed_blocks_clamped(v);
4676 if (v->s.loop_filter) vc1_loop_filter_iblk_delayed(v, v->pq);
4678 if (get_bits_count(&s->gb) > v->bits) {
4679 // TODO: may need modification to handle slice coding
4680 ff_er_add_slice(s, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
4681 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
4682 get_bits_count(&s->gb), v->bits);
4686 if (!v->s.loop_filter)
4687 ff_draw_horiz_band(s, s->mb_y * 16, 16);
4689 ff_draw_horiz_band(s, (s->mb_y-1) * 16, 16);
4690 s->first_slice_line = 0;
4693 /* raw bottom MB row */
4695 ff_init_block_index(s);
4696 for (;s->mb_x < s->mb_width; s->mb_x++) {
4697 ff_update_block_index(s);
4698 vc1_put_signed_blocks_clamped(v);
4699 if (v->s.loop_filter)
4700 vc1_loop_filter_iblk_delayed(v, v->pq);
4702 if (v->s.loop_filter)
4703 ff_draw_horiz_band(s, (s->end_mb_y-1)*16, 16);
4704 ff_er_add_slice(s, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
4705 (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
4708 static void vc1_decode_p_blocks(VC1Context *v)
4710 MpegEncContext *s = &v->s;
4711 int apply_loop_filter;
4713 /* select codingmode used for VLC tables selection */
4714 switch (v->c_ac_table_index) {
4716 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
4719 v->codingset = CS_HIGH_MOT_INTRA;
4722 v->codingset = CS_MID_RATE_INTRA;
4726 switch (v->c_ac_table_index) {
4728 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
4731 v->codingset2 = CS_HIGH_MOT_INTER;
4734 v->codingset2 = CS_MID_RATE_INTER;
4738 apply_loop_filter = s->loop_filter && !(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY);
4739 s->first_slice_line = 1;
4740 memset(v->cbp_base, 0, sizeof(v->cbp_base[0])*2*s->mb_stride);
4741 for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
4743 ff_init_block_index(s);
4744 for (; s->mb_x < s->mb_width; s->mb_x++) {
4745 ff_update_block_index(s);
4747 if (v->fcm == ILACE_FIELD)
4748 vc1_decode_p_mb_intfi(v);
4749 else if (v->fcm == ILACE_FRAME)
4750 vc1_decode_p_mb_intfr(v);
4751 else vc1_decode_p_mb(v);
4752 if (s->mb_y != s->start_mb_y && apply_loop_filter && v->fcm == PROGRESSIVE)
4753 vc1_apply_p_loop_filter(v);
4754 if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
4755 // TODO: may need modification to handle slice coding
4756 ff_er_add_slice(s, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
4757 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
4758 get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
4762 memmove(v->cbp_base, v->cbp, sizeof(v->cbp_base[0]) * s->mb_stride);
4763 memmove(v->ttblk_base, v->ttblk, sizeof(v->ttblk_base[0]) * s->mb_stride);
4764 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
4765 memmove(v->luma_mv_base, v->luma_mv, sizeof(v->luma_mv_base[0]) * s->mb_stride);
4766 if (s->mb_y != s->start_mb_y) ff_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
4767 s->first_slice_line = 0;
4769 if (apply_loop_filter) {
4771 ff_init_block_index(s);
4772 for (; s->mb_x < s->mb_width; s->mb_x++) {
4773 ff_update_block_index(s);
4774 vc1_apply_p_loop_filter(v);
4777 if (s->end_mb_y >= s->start_mb_y)
4778 ff_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
4779 ff_er_add_slice(s, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
4780 (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
4783 static void vc1_decode_b_blocks(VC1Context *v)
4785 MpegEncContext *s = &v->s;
4787 /* select codingmode used for VLC tables selection */
4788 switch (v->c_ac_table_index) {
4790 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
4793 v->codingset = CS_HIGH_MOT_INTRA;
4796 v->codingset = CS_MID_RATE_INTRA;
4800 switch (v->c_ac_table_index) {
4802 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
4805 v->codingset2 = CS_HIGH_MOT_INTER;
4808 v->codingset2 = CS_MID_RATE_INTER;
4812 s->first_slice_line = 1;
4813 for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
4815 ff_init_block_index(s);
4816 for (; s->mb_x < s->mb_width; s->mb_x++) {
4817 ff_update_block_index(s);
4819 if (v->fcm == ILACE_FIELD)
4820 vc1_decode_b_mb_intfi(v);
4823 if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
4824 // TODO: may need modification to handle slice coding
4825 ff_er_add_slice(s, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
4826 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
4827 get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
4830 if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
4832 if (!v->s.loop_filter)
4833 ff_draw_horiz_band(s, s->mb_y * 16, 16);
4835 ff_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
4836 s->first_slice_line = 0;
4838 if (v->s.loop_filter)
4839 ff_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
4840 ff_er_add_slice(s, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
4841 (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
4844 static void vc1_decode_skip_blocks(VC1Context *v)
4846 MpegEncContext *s = &v->s;
4848 ff_er_add_slice(s, 0, s->start_mb_y, s->mb_width - 1, s->end_mb_y - 1, ER_MB_END);
4849 s->first_slice_line = 1;
4850 for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
4852 ff_init_block_index(s);
4853 ff_update_block_index(s);
4854 memcpy(s->dest[0], s->last_picture.f.data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16);
4855 memcpy(s->dest[1], s->last_picture.f.data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
4856 memcpy(s->dest[2], s->last_picture.f.data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
4857 ff_draw_horiz_band(s, s->mb_y * 16, 16);
4858 s->first_slice_line = 0;
4860 s->pict_type = AV_PICTURE_TYPE_P;
4863 static void vc1_decode_blocks(VC1Context *v)
4866 v->s.esc3_level_length = 0;
4868 ff_intrax8_decode_picture(&v->x8, 2*v->pq + v->halfpq, v->pq * !v->pquantizer);
4871 v->left_blk_idx = -1;
4872 v->topleft_blk_idx = 1;
4874 switch (v->s.pict_type) {
4875 case AV_PICTURE_TYPE_I:
4876 if (v->profile == PROFILE_ADVANCED)
4877 vc1_decode_i_blocks_adv(v);
4879 vc1_decode_i_blocks(v);
4881 case AV_PICTURE_TYPE_P:
4882 if (v->p_frame_skipped)
4883 vc1_decode_skip_blocks(v);
4885 vc1_decode_p_blocks(v);
4887 case AV_PICTURE_TYPE_B:
4889 if (v->profile == PROFILE_ADVANCED)
4890 vc1_decode_i_blocks_adv(v);
4892 vc1_decode_i_blocks(v);
4894 vc1_decode_b_blocks(v);
4900 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
4904 * Transform coefficients for both sprites in 16.16 fixed point format,
4905 * in the order they appear in the bitstream:
4907 * rotation 1 (unused)
4909 * rotation 2 (unused)
4916 int effect_type, effect_flag;
4917 int effect_pcount1, effect_pcount2; ///< amount of effect parameters stored in effect_params
4918 int effect_params1[15], effect_params2[10]; ///< effect parameters in 16.16 fixed point format
4921 static inline int get_fp_val(GetBitContext* gb)
4923 return (get_bits_long(gb, 30) - (1 << 29)) << 1;
4926 static void vc1_sprite_parse_transform(GetBitContext* gb, int c[7])
4930 switch (get_bits(gb, 2)) {
4933 c[2] = get_fp_val(gb);
4937 c[0] = c[4] = get_fp_val(gb);
4938 c[2] = get_fp_val(gb);
4941 c[0] = get_fp_val(gb);
4942 c[2] = get_fp_val(gb);
4943 c[4] = get_fp_val(gb);
4946 c[0] = get_fp_val(gb);
4947 c[1] = get_fp_val(gb);
4948 c[2] = get_fp_val(gb);
4949 c[3] = get_fp_val(gb);
4950 c[4] = get_fp_val(gb);
4953 c[5] = get_fp_val(gb);
4955 c[6] = get_fp_val(gb);
4960 static void vc1_parse_sprites(VC1Context *v, GetBitContext* gb, SpriteData* sd)
4962 AVCodecContext *avctx = v->s.avctx;
4965 for (sprite = 0; sprite <= v->two_sprites; sprite++) {
4966 vc1_sprite_parse_transform(gb, sd->coefs[sprite]);
4967 if (sd->coefs[sprite][1] || sd->coefs[sprite][3])
4968 av_log_ask_for_sample(avctx, "Rotation coefficients are not zero");
4969 av_log(avctx, AV_LOG_DEBUG, sprite ? "S2:" : "S1:");
4970 for (i = 0; i < 7; i++)
4971 av_log(avctx, AV_LOG_DEBUG, " %d.%.3d",
4972 sd->coefs[sprite][i] / (1<<16),
4973 (abs(sd->coefs[sprite][i]) & 0xFFFF) * 1000 / (1 << 16));
4974 av_log(avctx, AV_LOG_DEBUG, "\n");
4978 if (sd->effect_type = get_bits_long(gb, 30)) {
4979 switch (sd->effect_pcount1 = get_bits(gb, 4)) {
4981 vc1_sprite_parse_transform(gb, sd->effect_params1);
4984 vc1_sprite_parse_transform(gb, sd->effect_params1);
4985 vc1_sprite_parse_transform(gb, sd->effect_params1 + 7);
4988 for (i = 0; i < sd->effect_pcount1; i++)
4989 sd->effect_params1[i] = get_fp_val(gb);
4991 if (sd->effect_type != 13 || sd->effect_params1[0] != sd->coefs[0][6]) {
4992 // effect 13 is simple alpha blending and matches the opacity above
4993 av_log(avctx, AV_LOG_DEBUG, "Effect: %d; params: ", sd->effect_type);
4994 for (i = 0; i < sd->effect_pcount1; i++)
4995 av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
4996 sd->effect_params1[i] / (1 << 16),
4997 (abs(sd->effect_params1[i]) & 0xFFFF) * 1000 / (1 << 16));
4998 av_log(avctx, AV_LOG_DEBUG, "\n");
5001 sd->effect_pcount2 = get_bits(gb, 16);
5002 if (sd->effect_pcount2 > 10) {
5003 av_log(avctx, AV_LOG_ERROR, "Too many effect parameters\n");
5005 } else if (sd->effect_pcount2) {
5007 av_log(avctx, AV_LOG_DEBUG, "Effect params 2: ");
5008 while (++i < sd->effect_pcount2) {
5009 sd->effect_params2[i] = get_fp_val(gb);
5010 av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
5011 sd->effect_params2[i] / (1 << 16),
5012 (abs(sd->effect_params2[i]) & 0xFFFF) * 1000 / (1 << 16));
5014 av_log(avctx, AV_LOG_DEBUG, "\n");
5017 if (sd->effect_flag = get_bits1(gb))
5018 av_log(avctx, AV_LOG_DEBUG, "Effect flag set\n");
5020 if (get_bits_count(gb) >= gb->size_in_bits +
5021 (avctx->codec_id == CODEC_ID_WMV3IMAGE ? 64 : 0))
5022 av_log(avctx, AV_LOG_ERROR, "Buffer overrun\n");
5023 if (get_bits_count(gb) < gb->size_in_bits - 8)
5024 av_log(avctx, AV_LOG_WARNING, "Buffer not fully read\n");
5027 static void vc1_draw_sprites(VC1Context *v, SpriteData* sd)
5029 int i, plane, row, sprite;
5030 int sr_cache[2][2] = { { -1, -1 }, { -1, -1 } };
5031 uint8_t* src_h[2][2];
5032 int xoff[2], xadv[2], yoff[2], yadv[2], alpha;
5034 MpegEncContext *s = &v->s;
5036 for (i = 0; i < 2; i++) {
5037 xoff[i] = av_clip(sd->coefs[i][2], 0, v->sprite_width-1 << 16);
5038 xadv[i] = sd->coefs[i][0];
5039 if (xadv[i] != 1<<16 || (v->sprite_width << 16) - (v->output_width << 16) - xoff[i])
5040 xadv[i] = av_clip(xadv[i], 0, ((v->sprite_width<<16) - xoff[i] - 1) / v->output_width);
5042 yoff[i] = av_clip(sd->coefs[i][5], 0, v->sprite_height-1 << 16);
5043 yadv[i] = av_clip(sd->coefs[i][4], 0, ((v->sprite_height << 16) - yoff[i]) / v->output_height);
5045 alpha = av_clip(sd->coefs[1][6], 0, (1<<16) - 1);
5047 for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++) {
5048 int width = v->output_width>>!!plane;
5050 for (row = 0; row < v->output_height>>!!plane; row++) {
5051 uint8_t *dst = v->sprite_output_frame.data[plane] +
5052 v->sprite_output_frame.linesize[plane] * row;
5054 for (sprite = 0; sprite <= v->two_sprites; sprite++) {
5055 uint8_t *iplane = s->current_picture.f.data[plane];
5056 int iline = s->current_picture.f.linesize[plane];
5057 int ycoord = yoff[sprite] + yadv[sprite] * row;
5058 int yline = ycoord >> 16;
5059 ysub[sprite] = ycoord & 0xFFFF;
5061 iplane = s->last_picture.f.data[plane];
5062 iline = s->last_picture.f.linesize[plane];
5064 if (!(xoff[sprite] & 0xFFFF) && xadv[sprite] == 1 << 16) {
5065 src_h[sprite][0] = iplane + (xoff[sprite] >> 16) + yline * iline;
5067 src_h[sprite][1] = iplane + (xoff[sprite] >> 16) + (yline + 1) * iline;
5069 if (sr_cache[sprite][0] != yline) {
5070 if (sr_cache[sprite][1] == yline) {
5071 FFSWAP(uint8_t*, v->sr_rows[sprite][0], v->sr_rows[sprite][1]);
5072 FFSWAP(int, sr_cache[sprite][0], sr_cache[sprite][1]);
5074 v->vc1dsp.sprite_h(v->sr_rows[sprite][0], iplane + yline * iline, xoff[sprite], xadv[sprite], width);
5075 sr_cache[sprite][0] = yline;
5078 if (ysub[sprite] && sr_cache[sprite][1] != yline + 1) {
5079 v->vc1dsp.sprite_h(v->sr_rows[sprite][1], iplane + (yline + 1) * iline, xoff[sprite], xadv[sprite], width);
5080 sr_cache[sprite][1] = yline + 1;
5082 src_h[sprite][0] = v->sr_rows[sprite][0];
5083 src_h[sprite][1] = v->sr_rows[sprite][1];
5087 if (!v->two_sprites) {
5089 v->vc1dsp.sprite_v_single(dst, src_h[0][0], src_h[0][1], ysub[0], width);
5091 memcpy(dst, src_h[0][0], width);
5094 if (ysub[0] && ysub[1]) {
5095 v->vc1dsp.sprite_v_double_twoscale(dst, src_h[0][0], src_h[0][1], ysub[0],
5096 src_h[1][0], src_h[1][1], ysub[1], alpha, width);
5097 } else if (ysub[0]) {
5098 v->vc1dsp.sprite_v_double_onescale(dst, src_h[0][0], src_h[0][1], ysub[0],
5099 src_h[1][0], alpha, width);
5100 } else if (ysub[1]) {
5101 v->vc1dsp.sprite_v_double_onescale(dst, src_h[1][0], src_h[1][1], ysub[1],
5102 src_h[0][0], (1<<16)-1-alpha, width);
5104 v->vc1dsp.sprite_v_double_noscale(dst, src_h[0][0], src_h[1][0], alpha, width);
5110 for (i = 0; i < 2; i++) {
5120 static int vc1_decode_sprites(VC1Context *v, GetBitContext* gb)
5122 MpegEncContext *s = &v->s;
5123 AVCodecContext *avctx = s->avctx;
5126 vc1_parse_sprites(v, gb, &sd);
5128 if (!s->current_picture.f.data[0]) {
5129 av_log(avctx, AV_LOG_ERROR, "Got no sprites\n");
5133 if (v->two_sprites && (!s->last_picture_ptr || !s->last_picture.f.data[0])) {
5134 av_log(avctx, AV_LOG_WARNING, "Need two sprites, only got one\n");
5138 if (v->sprite_output_frame.data[0])
5139 avctx->release_buffer(avctx, &v->sprite_output_frame);
5141 v->sprite_output_frame.buffer_hints = FF_BUFFER_HINTS_VALID;
5142 v->sprite_output_frame.reference = 0;
5143 if (avctx->get_buffer(avctx, &v->sprite_output_frame) < 0) {
5144 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
5148 vc1_draw_sprites(v, &sd);
5153 static void vc1_sprite_flush(AVCodecContext *avctx)
5155 VC1Context *v = avctx->priv_data;
5156 MpegEncContext *s = &v->s;
5157 AVFrame *f = &s->current_picture.f;
5160 /* Windows Media Image codecs have a convergence interval of two keyframes.
5161 Since we can't enforce it, clear to black the missing sprite. This is
5162 wrong but it looks better than doing nothing. */
5165 for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++)
5166 for (i = 0; i < v->sprite_height>>!!plane; i++)
5167 memset(f->data[plane] + i * f->linesize[plane],
5168 plane ? 128 : 0, f->linesize[plane]);
5173 static av_cold int vc1_decode_init_alloc_tables(VC1Context *v)
5175 MpegEncContext *s = &v->s;
5178 /* Allocate mb bitplanes */
5179 v->mv_type_mb_plane = av_malloc (s->mb_stride * s->mb_height);
5180 v->direct_mb_plane = av_malloc (s->mb_stride * s->mb_height);
5181 v->forward_mb_plane = av_malloc (s->mb_stride * s->mb_height);
5182 v->fieldtx_plane = av_mallocz(s->mb_stride * s->mb_height);
5183 v->acpred_plane = av_malloc (s->mb_stride * s->mb_height);
5184 v->over_flags_plane = av_malloc (s->mb_stride * s->mb_height);
5186 v->n_allocated_blks = s->mb_width + 2;
5187 v->block = av_malloc(sizeof(*v->block) * v->n_allocated_blks);
5188 v->cbp_base = av_malloc(sizeof(v->cbp_base[0]) * 2 * s->mb_stride);
5189 v->cbp = v->cbp_base + s->mb_stride;
5190 v->ttblk_base = av_malloc(sizeof(v->ttblk_base[0]) * 2 * s->mb_stride);
5191 v->ttblk = v->ttblk_base + s->mb_stride;
5192 v->is_intra_base = av_mallocz(sizeof(v->is_intra_base[0]) * 2 * s->mb_stride);
5193 v->is_intra = v->is_intra_base + s->mb_stride;
5194 v->luma_mv_base = av_malloc(sizeof(v->luma_mv_base[0]) * 2 * s->mb_stride);
5195 v->luma_mv = v->luma_mv_base + s->mb_stride;
5197 /* allocate block type info in that way so it could be used with s->block_index[] */
5198 v->mb_type_base = av_malloc(s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5199 v->mb_type[0] = v->mb_type_base + s->b8_stride + 1;
5200 v->mb_type[1] = v->mb_type_base + s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride + 1;
5201 v->mb_type[2] = v->mb_type[1] + s->mb_stride * (s->mb_height + 1);
5203 /* allocate memory to store block level MV info */
5204 v->blk_mv_type_base = av_mallocz( s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5205 v->blk_mv_type = v->blk_mv_type_base + s->b8_stride + 1;
5206 v->mv_f_base = av_mallocz(2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5207 v->mv_f[0] = v->mv_f_base + s->b8_stride + 1;
5208 v->mv_f[1] = v->mv_f[0] + (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5209 v->mv_f_last_base = av_mallocz(2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5210 v->mv_f_last[0] = v->mv_f_last_base + s->b8_stride + 1;
5211 v->mv_f_last[1] = v->mv_f_last[0] + (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5212 v->mv_f_next_base = av_mallocz(2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5213 v->mv_f_next[0] = v->mv_f_next_base + s->b8_stride + 1;
5214 v->mv_f_next[1] = v->mv_f_next[0] + (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5216 /* Init coded blocks info */
5217 if (v->profile == PROFILE_ADVANCED) {
5218 // if (alloc_bitplane(&v->over_flags_plane, s->mb_width, s->mb_height) < 0)
5220 // if (alloc_bitplane(&v->ac_pred_plane, s->mb_width, s->mb_height) < 0)
5224 ff_intrax8_common_init(&v->x8,s);
5226 if (s->avctx->codec_id == CODEC_ID_WMV3IMAGE || s->avctx->codec_id == CODEC_ID_VC1IMAGE) {
5227 for (i = 0; i < 4; i++)
5228 if (!(v->sr_rows[i >> 1][i & 1] = av_malloc(v->output_width))) return -1;
5231 if (!v->mv_type_mb_plane || !v->direct_mb_plane || !v->acpred_plane || !v->over_flags_plane ||
5232 !v->block || !v->cbp_base || !v->ttblk_base || !v->is_intra_base || !v->luma_mv_base ||
5239 /** Initialize a VC1/WMV3 decoder
5240 * @todo TODO: Handle VC-1 IDUs (Transport level?)
5241 * @todo TODO: Decypher remaining bits in extra_data
5243 static av_cold int vc1_decode_init(AVCodecContext *avctx)
5245 VC1Context *v = avctx->priv_data;
5246 MpegEncContext *s = &v->s;
5250 /* save the container output size for WMImage */
5251 v->output_width = avctx->width;
5252 v->output_height = avctx->height;
5254 if (!avctx->extradata_size || !avctx->extradata)
5256 if (!(avctx->flags & CODEC_FLAG_GRAY))
5257 avctx->pix_fmt = avctx->get_format(avctx, avctx->codec->pix_fmts);
5259 avctx->pix_fmt = PIX_FMT_GRAY8;
5260 avctx->hwaccel = ff_find_hwaccel(avctx->codec->id, avctx->pix_fmt);
5262 avctx->flags |= CODEC_FLAG_EMU_EDGE;
5263 v->s.flags |= CODEC_FLAG_EMU_EDGE;
5265 if (avctx->idct_algo == FF_IDCT_AUTO) {
5266 avctx->idct_algo = FF_IDCT_WMV2;
5269 if (vc1_init_common(v) < 0)
5271 ff_vc1dsp_init(&v->vc1dsp);
5273 if (avctx->codec_id == CODEC_ID_WMV3 || avctx->codec_id == CODEC_ID_WMV3IMAGE) {
5276 // looks like WMV3 has a sequence header stored in the extradata
5277 // advanced sequence header may be before the first frame
5278 // the last byte of the extradata is a version number, 1 for the
5279 // samples we can decode
5281 init_get_bits(&gb, avctx->extradata, avctx->extradata_size*8);
5283 if (vc1_decode_sequence_header(avctx, v, &gb) < 0)
5286 count = avctx->extradata_size*8 - get_bits_count(&gb);
5288 av_log(avctx, AV_LOG_INFO, "Extra data: %i bits left, value: %X\n",
5289 count, get_bits(&gb, count));
5290 } else if (count < 0) {
5291 av_log(avctx, AV_LOG_INFO, "Read %i bits in overflow\n", -count);
5293 } else { // VC1/WVC1/WVP2
5294 const uint8_t *start = avctx->extradata;
5295 uint8_t *end = avctx->extradata + avctx->extradata_size;
5296 const uint8_t *next;
5297 int size, buf2_size;
5298 uint8_t *buf2 = NULL;
5299 int seq_initialized = 0, ep_initialized = 0;
5301 if (avctx->extradata_size < 16) {
5302 av_log(avctx, AV_LOG_ERROR, "Extradata size too small: %i\n", avctx->extradata_size);
5306 buf2 = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
5307 start = find_next_marker(start, end); // in WVC1 extradata first byte is its size, but can be 0 in mkv
5309 for (; next < end; start = next) {
5310 next = find_next_marker(start + 4, end);
5311 size = next - start - 4;
5314 buf2_size = vc1_unescape_buffer(start + 4, size, buf2);
5315 init_get_bits(&gb, buf2, buf2_size * 8);
5316 switch (AV_RB32(start)) {
5317 case VC1_CODE_SEQHDR:
5318 if (vc1_decode_sequence_header(avctx, v, &gb) < 0) {
5322 seq_initialized = 1;
5324 case VC1_CODE_ENTRYPOINT:
5325 if (vc1_decode_entry_point(avctx, v, &gb) < 0) {
5334 if (!seq_initialized || !ep_initialized) {
5335 av_log(avctx, AV_LOG_ERROR, "Incomplete extradata\n");
5338 v->res_sprite = (avctx->codec_tag == MKTAG('W','V','P','2'));
5341 avctx->profile = v->profile;
5342 if (v->profile == PROFILE_ADVANCED)
5343 avctx->level = v->level;
5345 avctx->has_b_frames = !!avctx->max_b_frames;
5347 s->mb_width = (avctx->coded_width + 15) >> 4;
5348 s->mb_height = (avctx->coded_height + 15) >> 4;
5350 if (v->profile == PROFILE_ADVANCED || v->res_fasttx) {
5351 for (i = 0; i < 64; i++) {
5352 #define transpose(x) ((x >> 3) | ((x & 7) << 3))
5353 v->zz_8x8[0][i] = transpose(wmv1_scantable[0][i]);
5354 v->zz_8x8[1][i] = transpose(wmv1_scantable[1][i]);
5355 v->zz_8x8[2][i] = transpose(wmv1_scantable[2][i]);
5356 v->zz_8x8[3][i] = transpose(wmv1_scantable[3][i]);
5357 v->zzi_8x8[i] = transpose(ff_vc1_adv_interlaced_8x8_zz[i]);
5362 memcpy(v->zz_8x8, wmv1_scantable, 4*64);
5367 if (avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE) {
5368 v->sprite_width = avctx->coded_width;
5369 v->sprite_height = avctx->coded_height;
5371 avctx->coded_width = avctx->width = v->output_width;
5372 avctx->coded_height = avctx->height = v->output_height;
5374 // prevent 16.16 overflows
5375 if (v->sprite_width > 1 << 14 ||
5376 v->sprite_height > 1 << 14 ||
5377 v->output_width > 1 << 14 ||
5378 v->output_height > 1 << 14) return -1;
5383 /** Close a VC1/WMV3 decoder
5384 * @warning Initial try at using MpegEncContext stuff
5386 static av_cold int vc1_decode_end(AVCodecContext *avctx)
5388 VC1Context *v = avctx->priv_data;
5391 if ((avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE)
5392 && v->sprite_output_frame.data[0])
5393 avctx->release_buffer(avctx, &v->sprite_output_frame);
5394 for (i = 0; i < 4; i++)
5395 av_freep(&v->sr_rows[i >> 1][i & 1]);
5396 av_freep(&v->hrd_rate);
5397 av_freep(&v->hrd_buffer);
5398 MPV_common_end(&v->s);
5399 av_freep(&v->mv_type_mb_plane);
5400 av_freep(&v->direct_mb_plane);
5401 av_freep(&v->forward_mb_plane);
5402 av_freep(&v->fieldtx_plane);
5403 av_freep(&v->acpred_plane);
5404 av_freep(&v->over_flags_plane);
5405 av_freep(&v->mb_type_base);
5406 av_freep(&v->blk_mv_type_base);
5407 av_freep(&v->mv_f_base);
5408 av_freep(&v->mv_f_last_base);
5409 av_freep(&v->mv_f_next_base);
5410 av_freep(&v->block);
5411 av_freep(&v->cbp_base);
5412 av_freep(&v->ttblk_base);
5413 av_freep(&v->is_intra_base); // FIXME use v->mb_type[]
5414 av_freep(&v->luma_mv_base);
5415 ff_intrax8_common_end(&v->x8);
5420 /** Decode a VC1/WMV3 frame
5421 * @todo TODO: Handle VC-1 IDUs (Transport level?)
5423 static int vc1_decode_frame(AVCodecContext *avctx, void *data,
5424 int *data_size, AVPacket *avpkt)
5426 const uint8_t *buf = avpkt->data;
5427 int buf_size = avpkt->size, n_slices = 0, i;
5428 VC1Context *v = avctx->priv_data;
5429 MpegEncContext *s = &v->s;
5430 AVFrame *pict = data;
5431 uint8_t *buf2 = NULL;
5432 const uint8_t *buf_start = buf;
5433 int mb_height, n_slices1=-1;
5438 } *slices = NULL, *tmp;
5440 if(s->flags & CODEC_FLAG_LOW_DELAY)
5443 /* no supplementary picture */
5444 if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == VC1_CODE_ENDOFSEQ)) {
5445 /* special case for last picture */
5446 if (s->low_delay == 0 && s->next_picture_ptr) {
5447 *pict = *(AVFrame*)s->next_picture_ptr;
5448 s->next_picture_ptr = NULL;
5450 *data_size = sizeof(AVFrame);
5456 if (s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) {
5457 if (v->profile < PROFILE_ADVANCED)
5458 avctx->pix_fmt = PIX_FMT_VDPAU_WMV3;
5460 avctx->pix_fmt = PIX_FMT_VDPAU_VC1;
5463 //for advanced profile we may need to parse and unescape data
5464 if (avctx->codec_id == CODEC_ID_VC1 || avctx->codec_id == CODEC_ID_VC1IMAGE) {
5466 buf2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5468 if (IS_MARKER(AV_RB32(buf))) { /* frame starts with marker and needs to be parsed */
5469 const uint8_t *start, *end, *next;
5473 for (start = buf, end = buf + buf_size; next < end; start = next) {
5474 next = find_next_marker(start + 4, end);
5475 size = next - start - 4;
5476 if (size <= 0) continue;
5477 switch (AV_RB32(start)) {
5478 case VC1_CODE_FRAME:
5479 if (avctx->hwaccel ||
5480 s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
5482 buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
5484 case VC1_CODE_FIELD: {
5486 slices = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5489 slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5490 if (!slices[n_slices].buf)
5492 buf_size3 = vc1_unescape_buffer(start + 4, size,
5493 slices[n_slices].buf);
5494 init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5496 /* assuming that the field marker is at the exact middle,
5497 hope it's correct */
5498 slices[n_slices].mby_start = s->mb_height >> 1;
5499 n_slices1 = n_slices - 1; // index of the last slice of the first field
5503 case VC1_CODE_ENTRYPOINT: /* it should be before frame data */
5504 buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
5505 init_get_bits(&s->gb, buf2, buf_size2 * 8);
5506 vc1_decode_entry_point(avctx, v, &s->gb);
5508 case VC1_CODE_SLICE: {
5510 slices = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5513 slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5514 if (!slices[n_slices].buf)
5516 buf_size3 = vc1_unescape_buffer(start + 4, size,
5517 slices[n_slices].buf);
5518 init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5520 slices[n_slices].mby_start = get_bits(&slices[n_slices].gb, 9);
5526 } else if (v->interlace && ((buf[0] & 0xC0) == 0xC0)) { /* WVC1 interlaced stores both fields divided by marker */
5527 const uint8_t *divider;
5530 divider = find_next_marker(buf, buf + buf_size);
5531 if ((divider == (buf + buf_size)) || AV_RB32(divider) != VC1_CODE_FIELD) {
5532 av_log(avctx, AV_LOG_ERROR, "Error in WVC1 interlaced frame\n");
5534 } else { // found field marker, unescape second field
5535 tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5539 slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5540 if (!slices[n_slices].buf)
5542 buf_size3 = vc1_unescape_buffer(divider + 4, buf + buf_size - divider - 4, slices[n_slices].buf);
5543 init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5545 slices[n_slices].mby_start = s->mb_height >> 1;
5546 n_slices1 = n_slices - 1;
5549 buf_size2 = vc1_unescape_buffer(buf, divider - buf, buf2);
5551 buf_size2 = vc1_unescape_buffer(buf, buf_size, buf2);
5553 init_get_bits(&s->gb, buf2, buf_size2*8);
5555 init_get_bits(&s->gb, buf, buf_size*8);
5557 if (v->res_sprite) {
5558 v->new_sprite = !get_bits1(&s->gb);
5559 v->two_sprites = get_bits1(&s->gb);
5560 /* res_sprite means a Windows Media Image stream, CODEC_ID_*IMAGE means
5561 we're using the sprite compositor. These are intentionally kept separate
5562 so you can get the raw sprites by using the wmv3 decoder for WMVP or
5563 the vc1 one for WVP2 */
5564 if (avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE) {
5565 if (v->new_sprite) {
5566 // switch AVCodecContext parameters to those of the sprites
5567 avctx->width = avctx->coded_width = v->sprite_width;
5568 avctx->height = avctx->coded_height = v->sprite_height;
5575 if (s->context_initialized &&
5576 (s->width != avctx->coded_width ||
5577 s->height != avctx->coded_height)) {
5578 vc1_decode_end(avctx);
5581 if (!s->context_initialized) {
5582 if (ff_msmpeg4_decode_init(avctx) < 0 || vc1_decode_init_alloc_tables(v) < 0)
5585 s->low_delay = !avctx->has_b_frames || v->res_sprite;
5587 if (v->profile == PROFILE_ADVANCED) {
5588 s->h_edge_pos = avctx->coded_width;
5589 s->v_edge_pos = avctx->coded_height;
5593 /* We need to set current_picture_ptr before reading the header,
5594 * otherwise we cannot store anything in there. */
5595 if (s->current_picture_ptr == NULL || s->current_picture_ptr->f.data[0]) {
5596 int i = ff_find_unused_picture(s, 0);
5599 s->current_picture_ptr = &s->picture[i];
5602 // do parse frame header
5603 v->pic_header_flag = 0;
5604 if (v->profile < PROFILE_ADVANCED) {
5605 if (vc1_parse_frame_header(v, &s->gb) == -1) {
5609 if (vc1_parse_frame_header_adv(v, &s->gb) == -1) {
5614 if ((avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE)
5615 && s->pict_type != AV_PICTURE_TYPE_I) {
5616 av_log(v->s.avctx, AV_LOG_ERROR, "Sprite decoder: expected I-frame\n");
5620 // process pulldown flags
5621 s->current_picture_ptr->f.repeat_pict = 0;
5622 // Pulldown flags are only valid when 'broadcast' has been set.
5623 // So ticks_per_frame will be 2
5626 s->current_picture_ptr->f.repeat_pict = 1;
5627 } else if (v->rptfrm) {
5629 s->current_picture_ptr->f.repeat_pict = v->rptfrm * 2;
5632 // for skipping the frame
5633 s->current_picture.f.pict_type = s->pict_type;
5634 s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
5636 /* skip B-frames if we don't have reference frames */
5637 if (s->last_picture_ptr == NULL && (s->pict_type == AV_PICTURE_TYPE_B || s->dropable)) {
5640 if ((avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B) ||
5641 (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I) ||
5642 avctx->skip_frame >= AVDISCARD_ALL) {
5646 if (s->next_p_frame_damaged) {
5647 if (s->pict_type == AV_PICTURE_TYPE_B)
5650 s->next_p_frame_damaged = 0;
5653 if (MPV_frame_start(s, avctx) < 0) {
5657 s->me.qpel_put = s->dsp.put_qpel_pixels_tab;
5658 s->me.qpel_avg = s->dsp.avg_qpel_pixels_tab;
5660 if ((CONFIG_VC1_VDPAU_DECODER)
5661 &&s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
5662 ff_vdpau_vc1_decode_picture(s, buf_start, (buf + buf_size) - buf_start);
5663 else if (avctx->hwaccel) {
5664 if (avctx->hwaccel->start_frame(avctx, buf, buf_size) < 0)
5666 if (avctx->hwaccel->decode_slice(avctx, buf_start, (buf + buf_size) - buf_start) < 0)
5668 if (avctx->hwaccel->end_frame(avctx) < 0)
5671 ff_er_frame_start(s);
5673 v->bits = buf_size * 8;
5674 if (v->field_mode) {
5676 s->current_picture.f.linesize[0] <<= 1;
5677 s->current_picture.f.linesize[1] <<= 1;
5678 s->current_picture.f.linesize[2] <<= 1;
5680 s->uvlinesize <<= 1;
5681 tmp[0] = v->mv_f_last[0];
5682 tmp[1] = v->mv_f_last[1];
5683 v->mv_f_last[0] = v->mv_f_next[0];
5684 v->mv_f_last[1] = v->mv_f_next[1];
5685 v->mv_f_next[0] = v->mv_f[0];
5686 v->mv_f_next[1] = v->mv_f[1];
5687 v->mv_f[0] = tmp[0];
5688 v->mv_f[1] = tmp[1];
5690 mb_height = s->mb_height >> v->field_mode;
5691 for (i = 0; i <= n_slices; i++) {
5692 if (i > 0 && slices[i - 1].mby_start >= mb_height) {
5693 v->second_field = 1;
5694 v->blocks_off = s->mb_width * s->mb_height << 1;
5695 v->mb_off = s->mb_stride * s->mb_height >> 1;
5697 v->second_field = 0;
5702 v->pic_header_flag = 0;
5703 if (v->field_mode && i == n_slices1 + 2)
5704 vc1_parse_frame_header_adv(v, &s->gb);
5705 else if (get_bits1(&s->gb)) {
5706 v->pic_header_flag = 1;
5707 vc1_parse_frame_header_adv(v, &s->gb);
5710 s->start_mb_y = (i == 0) ? 0 : FFMAX(0, slices[i-1].mby_start % mb_height);
5711 if (!v->field_mode || v->second_field)
5712 s->end_mb_y = (i == n_slices ) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
5714 s->end_mb_y = (i == n_slices1 + 1) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
5715 vc1_decode_blocks(v);
5717 s->gb = slices[i].gb;
5719 if (v->field_mode) {
5720 v->second_field = 0;
5721 if (s->pict_type == AV_PICTURE_TYPE_B) {
5722 memcpy(v->mv_f_base, v->mv_f_next_base,
5723 2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5725 s->current_picture.f.linesize[0] >>= 1;
5726 s->current_picture.f.linesize[1] >>= 1;
5727 s->current_picture.f.linesize[2] >>= 1;
5729 s->uvlinesize >>= 1;
5731 //av_log(s->avctx, AV_LOG_INFO, "Consumed %i/%i bits\n", get_bits_count(&s->gb), s->gb.size_in_bits);
5732 // if (get_bits_count(&s->gb) > buf_size * 8)
5734 if(s->error_occurred && s->pict_type == AV_PICTURE_TYPE_B)
5741 if (avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE) {
5743 avctx->width = avctx->coded_width = v->output_width;
5744 avctx->height = avctx->coded_height = v->output_height;
5745 if (avctx->skip_frame >= AVDISCARD_NONREF)
5747 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
5748 if (vc1_decode_sprites(v, &s->gb))
5751 *pict = v->sprite_output_frame;
5752 *data_size = sizeof(AVFrame);
5754 if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
5755 *pict = *(AVFrame*)s->current_picture_ptr;
5756 } else if (s->last_picture_ptr != NULL) {
5757 *pict = *(AVFrame*)s->last_picture_ptr;
5759 if (s->last_picture_ptr || s->low_delay) {
5760 *data_size = sizeof(AVFrame);
5761 ff_print_debug_info(s, pict);
5767 for (i = 0; i < n_slices; i++)
5768 av_free(slices[i].buf);
5774 for (i = 0; i < n_slices; i++)
5775 av_free(slices[i].buf);
5781 static const AVProfile profiles[] = {
5782 { FF_PROFILE_VC1_SIMPLE, "Simple" },
5783 { FF_PROFILE_VC1_MAIN, "Main" },
5784 { FF_PROFILE_VC1_COMPLEX, "Complex" },
5785 { FF_PROFILE_VC1_ADVANCED, "Advanced" },
5786 { FF_PROFILE_UNKNOWN },
5789 AVCodec ff_vc1_decoder = {
5791 .type = AVMEDIA_TYPE_VIDEO,
5793 .priv_data_size = sizeof(VC1Context),
5794 .init = vc1_decode_init,
5795 .close = vc1_decode_end,
5796 .decode = vc1_decode_frame,
5797 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
5798 .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1"),
5799 .pix_fmts = ff_hwaccel_pixfmt_list_420,
5800 .profiles = NULL_IF_CONFIG_SMALL(profiles)
5803 #if CONFIG_WMV3_DECODER
5804 AVCodec ff_wmv3_decoder = {
5806 .type = AVMEDIA_TYPE_VIDEO,
5807 .id = CODEC_ID_WMV3,
5808 .priv_data_size = sizeof(VC1Context),
5809 .init = vc1_decode_init,
5810 .close = vc1_decode_end,
5811 .decode = vc1_decode_frame,
5812 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
5813 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9"),
5814 .pix_fmts = ff_hwaccel_pixfmt_list_420,
5815 .profiles = NULL_IF_CONFIG_SMALL(profiles)
5819 #if CONFIG_WMV3_VDPAU_DECODER
5820 AVCodec ff_wmv3_vdpau_decoder = {
5821 .name = "wmv3_vdpau",
5822 .type = AVMEDIA_TYPE_VIDEO,
5823 .id = CODEC_ID_WMV3,
5824 .priv_data_size = sizeof(VC1Context),
5825 .init = vc1_decode_init,
5826 .close = vc1_decode_end,
5827 .decode = vc1_decode_frame,
5828 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU,
5829 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 VDPAU"),
5830 .pix_fmts = (const enum PixelFormat[]){PIX_FMT_VDPAU_WMV3, PIX_FMT_NONE},
5831 .profiles = NULL_IF_CONFIG_SMALL(profiles)
5835 #if CONFIG_VC1_VDPAU_DECODER
5836 AVCodec ff_vc1_vdpau_decoder = {
5837 .name = "vc1_vdpau",
5838 .type = AVMEDIA_TYPE_VIDEO,
5840 .priv_data_size = sizeof(VC1Context),
5841 .init = vc1_decode_init,
5842 .close = vc1_decode_end,
5843 .decode = vc1_decode_frame,
5844 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU,
5845 .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1 VDPAU"),
5846 .pix_fmts = (const enum PixelFormat[]){PIX_FMT_VDPAU_VC1, PIX_FMT_NONE},
5847 .profiles = NULL_IF_CONFIG_SMALL(profiles)
5851 #if CONFIG_WMV3IMAGE_DECODER
5852 AVCodec ff_wmv3image_decoder = {
5853 .name = "wmv3image",
5854 .type = AVMEDIA_TYPE_VIDEO,
5855 .id = CODEC_ID_WMV3IMAGE,
5856 .priv_data_size = sizeof(VC1Context),
5857 .init = vc1_decode_init,
5858 .close = vc1_decode_end,
5859 .decode = vc1_decode_frame,
5860 .capabilities = CODEC_CAP_DR1,
5861 .flush = vc1_sprite_flush,
5862 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image"),
5863 .pix_fmts = ff_pixfmt_list_420
5867 #if CONFIG_VC1IMAGE_DECODER
5868 AVCodec ff_vc1image_decoder = {
5870 .type = AVMEDIA_TYPE_VIDEO,
5871 .id = CODEC_ID_VC1IMAGE,
5872 .priv_data_size = sizeof(VC1Context),
5873 .init = vc1_decode_init,
5874 .close = vc1_decode_end,
5875 .decode = vc1_decode_frame,
5876 .capabilities = CODEC_CAP_DR1,
5877 .flush = vc1_sprite_flush,
5878 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image v2"),
5879 .pix_fmts = ff_pixfmt_list_420