2 * VC-1 and WMV3 decoder
3 * Copyright (c) 2011 Mashiat Sarker Shakkhar
4 * Copyright (c) 2006-2007 Konstantin Shishkov
5 * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
7 * This file is part of FFmpeg.
9 * FFmpeg is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * FFmpeg is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with FFmpeg; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * VC-1 and WMV3 decoder
32 #include "mpegvideo.h"
36 #include "vc1acdata.h"
37 #include "msmpeg4data.h"
39 #include "simple_idct.h"
41 #include "vdpau_internal.h"
46 #define MB_INTRA_VLC_BITS 9
51 static const uint16_t vlc_offs[] = {
52 0, 520, 552, 616, 1128, 1160, 1224, 1740, 1772, 1836, 1900, 2436,
53 2986, 3050, 3610, 4154, 4218, 4746, 5326, 5390, 5902, 6554, 7658, 8342,
54 9304, 9988, 10630, 11234, 12174, 13006, 13560, 14232, 14786, 15432, 16350, 17522,
55 20372, 21818, 22330, 22394, 23166, 23678, 23742, 24820, 25332, 25396, 26460, 26980,
56 27048, 27592, 27600, 27608, 27616, 27624, 28224, 28258, 28290, 28802, 28834, 28866,
57 29378, 29412, 29444, 29960, 29994, 30026, 30538, 30572, 30604, 31120, 31154, 31186,
58 31714, 31746, 31778, 32306, 32340, 32372
61 // offset tables for interlaced picture MVDATA decoding
62 static const int offset_table1[9] = { 0, 1, 2, 4, 8, 16, 32, 64, 128 };
63 static const int offset_table2[9] = { 0, 1, 3, 7, 15, 31, 63, 127, 255 };
66 * Init VC-1 specific tables and VC1Context members
67 * @param v The VC1Context to initialize
70 static int vc1_init_common(VC1Context *v)
74 static VLC_TYPE vlc_table[32372][2];
76 v->hrd_rate = v->hrd_buffer = NULL;
80 INIT_VLC_STATIC(&ff_vc1_bfraction_vlc, VC1_BFRACTION_VLC_BITS, 23,
81 ff_vc1_bfraction_bits, 1, 1,
82 ff_vc1_bfraction_codes, 1, 1, 1 << VC1_BFRACTION_VLC_BITS);
83 INIT_VLC_STATIC(&ff_vc1_norm2_vlc, VC1_NORM2_VLC_BITS, 4,
84 ff_vc1_norm2_bits, 1, 1,
85 ff_vc1_norm2_codes, 1, 1, 1 << VC1_NORM2_VLC_BITS);
86 INIT_VLC_STATIC(&ff_vc1_norm6_vlc, VC1_NORM6_VLC_BITS, 64,
87 ff_vc1_norm6_bits, 1, 1,
88 ff_vc1_norm6_codes, 2, 2, 556);
89 INIT_VLC_STATIC(&ff_vc1_imode_vlc, VC1_IMODE_VLC_BITS, 7,
90 ff_vc1_imode_bits, 1, 1,
91 ff_vc1_imode_codes, 1, 1, 1 << VC1_IMODE_VLC_BITS);
92 for (i = 0; i < 3; i++) {
93 ff_vc1_ttmb_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 0]];
94 ff_vc1_ttmb_vlc[i].table_allocated = vlc_offs[i * 3 + 1] - vlc_offs[i * 3 + 0];
95 init_vlc(&ff_vc1_ttmb_vlc[i], VC1_TTMB_VLC_BITS, 16,
96 ff_vc1_ttmb_bits[i], 1, 1,
97 ff_vc1_ttmb_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC);
98 ff_vc1_ttblk_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 1]];
99 ff_vc1_ttblk_vlc[i].table_allocated = vlc_offs[i * 3 + 2] - vlc_offs[i * 3 + 1];
100 init_vlc(&ff_vc1_ttblk_vlc[i], VC1_TTBLK_VLC_BITS, 8,
101 ff_vc1_ttblk_bits[i], 1, 1,
102 ff_vc1_ttblk_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
103 ff_vc1_subblkpat_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 2]];
104 ff_vc1_subblkpat_vlc[i].table_allocated = vlc_offs[i * 3 + 3] - vlc_offs[i * 3 + 2];
105 init_vlc(&ff_vc1_subblkpat_vlc[i], VC1_SUBBLKPAT_VLC_BITS, 15,
106 ff_vc1_subblkpat_bits[i], 1, 1,
107 ff_vc1_subblkpat_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
109 for (i = 0; i < 4; i++) {
110 ff_vc1_4mv_block_pattern_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 9]];
111 ff_vc1_4mv_block_pattern_vlc[i].table_allocated = vlc_offs[i * 3 + 10] - vlc_offs[i * 3 + 9];
112 init_vlc(&ff_vc1_4mv_block_pattern_vlc[i], VC1_4MV_BLOCK_PATTERN_VLC_BITS, 16,
113 ff_vc1_4mv_block_pattern_bits[i], 1, 1,
114 ff_vc1_4mv_block_pattern_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
115 ff_vc1_cbpcy_p_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 10]];
116 ff_vc1_cbpcy_p_vlc[i].table_allocated = vlc_offs[i * 3 + 11] - vlc_offs[i * 3 + 10];
117 init_vlc(&ff_vc1_cbpcy_p_vlc[i], VC1_CBPCY_P_VLC_BITS, 64,
118 ff_vc1_cbpcy_p_bits[i], 1, 1,
119 ff_vc1_cbpcy_p_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC);
120 ff_vc1_mv_diff_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 11]];
121 ff_vc1_mv_diff_vlc[i].table_allocated = vlc_offs[i * 3 + 12] - vlc_offs[i * 3 + 11];
122 init_vlc(&ff_vc1_mv_diff_vlc[i], VC1_MV_DIFF_VLC_BITS, 73,
123 ff_vc1_mv_diff_bits[i], 1, 1,
124 ff_vc1_mv_diff_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC);
126 for (i = 0; i < 8; i++) {
127 ff_vc1_ac_coeff_table[i].table = &vlc_table[vlc_offs[i * 2 + 21]];
128 ff_vc1_ac_coeff_table[i].table_allocated = vlc_offs[i * 2 + 22] - vlc_offs[i * 2 + 21];
129 init_vlc(&ff_vc1_ac_coeff_table[i], AC_VLC_BITS, vc1_ac_sizes[i],
130 &vc1_ac_tables[i][0][1], 8, 4,
131 &vc1_ac_tables[i][0][0], 8, 4, INIT_VLC_USE_NEW_STATIC);
132 /* initialize interlaced MVDATA tables (2-Ref) */
133 ff_vc1_2ref_mvdata_vlc[i].table = &vlc_table[vlc_offs[i * 2 + 22]];
134 ff_vc1_2ref_mvdata_vlc[i].table_allocated = vlc_offs[i * 2 + 23] - vlc_offs[i * 2 + 22];
135 init_vlc(&ff_vc1_2ref_mvdata_vlc[i], VC1_2REF_MVDATA_VLC_BITS, 126,
136 ff_vc1_2ref_mvdata_bits[i], 1, 1,
137 ff_vc1_2ref_mvdata_codes[i], 4, 4, INIT_VLC_USE_NEW_STATIC);
139 for (i = 0; i < 4; i++) {
140 /* initialize 4MV MBMODE VLC tables for interlaced frame P picture */
141 ff_vc1_intfr_4mv_mbmode_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 37]];
142 ff_vc1_intfr_4mv_mbmode_vlc[i].table_allocated = vlc_offs[i * 3 + 38] - vlc_offs[i * 3 + 37];
143 init_vlc(&ff_vc1_intfr_4mv_mbmode_vlc[i], VC1_INTFR_4MV_MBMODE_VLC_BITS, 15,
144 ff_vc1_intfr_4mv_mbmode_bits[i], 1, 1,
145 ff_vc1_intfr_4mv_mbmode_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC);
146 /* initialize NON-4MV MBMODE VLC tables for the same */
147 ff_vc1_intfr_non4mv_mbmode_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 38]];
148 ff_vc1_intfr_non4mv_mbmode_vlc[i].table_allocated = vlc_offs[i * 3 + 39] - vlc_offs[i * 3 + 38];
149 init_vlc(&ff_vc1_intfr_non4mv_mbmode_vlc[i], VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 9,
150 ff_vc1_intfr_non4mv_mbmode_bits[i], 1, 1,
151 ff_vc1_intfr_non4mv_mbmode_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
152 /* initialize interlaced MVDATA tables (1-Ref) */
153 ff_vc1_1ref_mvdata_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 39]];
154 ff_vc1_1ref_mvdata_vlc[i].table_allocated = vlc_offs[i * 3 + 40] - vlc_offs[i * 3 + 39];
155 init_vlc(&ff_vc1_1ref_mvdata_vlc[i], VC1_1REF_MVDATA_VLC_BITS, 72,
156 ff_vc1_1ref_mvdata_bits[i], 1, 1,
157 ff_vc1_1ref_mvdata_codes[i], 4, 4, INIT_VLC_USE_NEW_STATIC);
159 for (i = 0; i < 4; i++) {
160 /* Initialize 2MV Block pattern VLC tables */
161 ff_vc1_2mv_block_pattern_vlc[i].table = &vlc_table[vlc_offs[i + 49]];
162 ff_vc1_2mv_block_pattern_vlc[i].table_allocated = vlc_offs[i + 50] - vlc_offs[i + 49];
163 init_vlc(&ff_vc1_2mv_block_pattern_vlc[i], VC1_2MV_BLOCK_PATTERN_VLC_BITS, 4,
164 ff_vc1_2mv_block_pattern_bits[i], 1, 1,
165 ff_vc1_2mv_block_pattern_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
167 for (i = 0; i < 8; i++) {
168 /* Initialize interlaced CBPCY VLC tables (Table 124 - Table 131) */
169 ff_vc1_icbpcy_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 53]];
170 ff_vc1_icbpcy_vlc[i].table_allocated = vlc_offs[i * 3 + 54] - vlc_offs[i * 3 + 53];
171 init_vlc(&ff_vc1_icbpcy_vlc[i], VC1_ICBPCY_VLC_BITS, 63,
172 ff_vc1_icbpcy_p_bits[i], 1, 1,
173 ff_vc1_icbpcy_p_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC);
174 /* Initialize interlaced field picture MBMODE VLC tables */
175 ff_vc1_if_mmv_mbmode_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 54]];
176 ff_vc1_if_mmv_mbmode_vlc[i].table_allocated = vlc_offs[i * 3 + 55] - vlc_offs[i * 3 + 54];
177 init_vlc(&ff_vc1_if_mmv_mbmode_vlc[i], VC1_IF_MMV_MBMODE_VLC_BITS, 8,
178 ff_vc1_if_mmv_mbmode_bits[i], 1, 1,
179 ff_vc1_if_mmv_mbmode_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
180 ff_vc1_if_1mv_mbmode_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 55]];
181 ff_vc1_if_1mv_mbmode_vlc[i].table_allocated = vlc_offs[i * 3 + 56] - vlc_offs[i * 3 + 55];
182 init_vlc(&ff_vc1_if_1mv_mbmode_vlc[i], VC1_IF_1MV_MBMODE_VLC_BITS, 6,
183 ff_vc1_if_1mv_mbmode_bits[i], 1, 1,
184 ff_vc1_if_1mv_mbmode_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
191 v->mvrange = 0; /* 7.1.1.18, p80 */
196 /***********************************************************************/
198 * @name VC-1 Bitplane decoding
216 /** @} */ //imode defines
219 /** @} */ //Bitplane group
221 static void vc1_put_signed_blocks_clamped(VC1Context *v)
223 MpegEncContext *s = &v->s;
224 int topleft_mb_pos, top_mb_pos;
225 int stride_y, fieldtx;
228 /* The put pixels loop is always one MB row behind the decoding loop,
229 * because we can only put pixels when overlap filtering is done, and
230 * for filtering of the bottom edge of a MB, we need the next MB row
232 * Within the row, the put pixels loop is also one MB col behind the
233 * decoding loop. The reason for this is again, because for filtering
234 * of the right MB edge, we need the next MB present. */
235 if (!s->first_slice_line) {
237 topleft_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x - 1;
238 fieldtx = v->fieldtx_plane[topleft_mb_pos];
239 stride_y = (s->linesize) << fieldtx;
240 v_dist = (16 - fieldtx) >> (fieldtx == 0);
241 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][0],
242 s->dest[0] - 16 * s->linesize - 16,
244 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][1],
245 s->dest[0] - 16 * s->linesize - 8,
247 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][2],
248 s->dest[0] - v_dist * s->linesize - 16,
250 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][3],
251 s->dest[0] - v_dist * s->linesize - 8,
253 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][4],
254 s->dest[1] - 8 * s->uvlinesize - 8,
256 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][5],
257 s->dest[2] - 8 * s->uvlinesize - 8,
260 if (s->mb_x == s->mb_width - 1) {
261 top_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x;
262 fieldtx = v->fieldtx_plane[top_mb_pos];
263 stride_y = s->linesize << fieldtx;
264 v_dist = fieldtx ? 15 : 8;
265 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][0],
266 s->dest[0] - 16 * s->linesize,
268 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][1],
269 s->dest[0] - 16 * s->linesize + 8,
271 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][2],
272 s->dest[0] - v_dist * s->linesize,
274 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][3],
275 s->dest[0] - v_dist * s->linesize + 8,
277 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][4],
278 s->dest[1] - 8 * s->uvlinesize,
280 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][5],
281 s->dest[2] - 8 * s->uvlinesize,
286 #define inc_blk_idx(idx) do { \
288 if (idx >= v->n_allocated_blks) \
292 inc_blk_idx(v->topleft_blk_idx);
293 inc_blk_idx(v->top_blk_idx);
294 inc_blk_idx(v->left_blk_idx);
295 inc_blk_idx(v->cur_blk_idx);
298 static void vc1_loop_filter_iblk(VC1Context *v, int pq)
300 MpegEncContext *s = &v->s;
302 if (!s->first_slice_line) {
303 v->vc1dsp.vc1_v_loop_filter16(s->dest[0], s->linesize, pq);
305 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
306 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
307 for (j = 0; j < 2; j++) {
308 v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1], s->uvlinesize, pq);
310 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
313 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] + 8 * s->linesize, s->linesize, pq);
315 if (s->mb_y == s->end_mb_y - 1) {
317 v->vc1dsp.vc1_h_loop_filter16(s->dest[0], s->linesize, pq);
318 v->vc1dsp.vc1_h_loop_filter8(s->dest[1], s->uvlinesize, pq);
319 v->vc1dsp.vc1_h_loop_filter8(s->dest[2], s->uvlinesize, pq);
321 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] + 8, s->linesize, pq);
325 static void vc1_loop_filter_iblk_delayed(VC1Context *v, int pq)
327 MpegEncContext *s = &v->s;
330 /* The loopfilter runs 1 row and 1 column behind the overlap filter, which
331 * means it runs two rows/cols behind the decoding loop. */
332 if (!s->first_slice_line) {
334 if (s->mb_y >= s->start_mb_y + 2) {
335 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
338 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 16, s->linesize, pq);
339 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 8, s->linesize, pq);
340 for (j = 0; j < 2; j++) {
341 v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
343 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize - 8, s->uvlinesize, pq);
347 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize - 16, s->linesize, pq);
350 if (s->mb_x == s->mb_width - 1) {
351 if (s->mb_y >= s->start_mb_y + 2) {
352 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
355 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize, s->linesize, pq);
356 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize + 8, s->linesize, pq);
357 for (j = 0; j < 2; j++) {
358 v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
360 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize, s->uvlinesize, pq);
364 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize, s->linesize, pq);
367 if (s->mb_y == s->end_mb_y) {
370 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
371 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 8, s->linesize, pq);
373 for (j = 0; j < 2; j++) {
374 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
379 if (s->mb_x == s->mb_width - 1) {
381 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
382 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
384 for (j = 0; j < 2; j++) {
385 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
393 static void vc1_smooth_overlap_filter_iblk(VC1Context *v)
395 MpegEncContext *s = &v->s;
398 if (v->condover == CONDOVER_NONE)
401 mb_pos = s->mb_x + s->mb_y * s->mb_stride;
403 /* Within a MB, the horizontal overlap always runs before the vertical.
404 * To accomplish that, we run the H on left and internal borders of the
405 * currently decoded MB. Then, we wait for the next overlap iteration
406 * to do H overlap on the right edge of this MB, before moving over and
407 * running the V overlap. Therefore, the V overlap makes us trail by one
408 * MB col and the H overlap filter makes us trail by one MB row. This
409 * is reflected in the time at which we run the put_pixels loop. */
410 if (v->condover == CONDOVER_ALL || v->pq >= 9 || v->over_flags_plane[mb_pos]) {
411 if (s->mb_x && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
412 v->over_flags_plane[mb_pos - 1])) {
413 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][1],
414 v->block[v->cur_blk_idx][0]);
415 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][3],
416 v->block[v->cur_blk_idx][2]);
417 if (!(s->flags & CODEC_FLAG_GRAY)) {
418 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][4],
419 v->block[v->cur_blk_idx][4]);
420 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][5],
421 v->block[v->cur_blk_idx][5]);
424 v->vc1dsp.vc1_h_s_overlap(v->block[v->cur_blk_idx][0],
425 v->block[v->cur_blk_idx][1]);
426 v->vc1dsp.vc1_h_s_overlap(v->block[v->cur_blk_idx][2],
427 v->block[v->cur_blk_idx][3]);
429 if (s->mb_x == s->mb_width - 1) {
430 if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
431 v->over_flags_plane[mb_pos - s->mb_stride])) {
432 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][2],
433 v->block[v->cur_blk_idx][0]);
434 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][3],
435 v->block[v->cur_blk_idx][1]);
436 if (!(s->flags & CODEC_FLAG_GRAY)) {
437 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][4],
438 v->block[v->cur_blk_idx][4]);
439 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][5],
440 v->block[v->cur_blk_idx][5]);
443 v->vc1dsp.vc1_v_s_overlap(v->block[v->cur_blk_idx][0],
444 v->block[v->cur_blk_idx][2]);
445 v->vc1dsp.vc1_v_s_overlap(v->block[v->cur_blk_idx][1],
446 v->block[v->cur_blk_idx][3]);
449 if (s->mb_x && (v->condover == CONDOVER_ALL || v->over_flags_plane[mb_pos - 1])) {
450 if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
451 v->over_flags_plane[mb_pos - s->mb_stride - 1])) {
452 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][2],
453 v->block[v->left_blk_idx][0]);
454 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][3],
455 v->block[v->left_blk_idx][1]);
456 if (!(s->flags & CODEC_FLAG_GRAY)) {
457 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][4],
458 v->block[v->left_blk_idx][4]);
459 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][5],
460 v->block[v->left_blk_idx][5]);
463 v->vc1dsp.vc1_v_s_overlap(v->block[v->left_blk_idx][0],
464 v->block[v->left_blk_idx][2]);
465 v->vc1dsp.vc1_v_s_overlap(v->block[v->left_blk_idx][1],
466 v->block[v->left_blk_idx][3]);
470 /** Do motion compensation over 1 macroblock
471 * Mostly adapted hpel_motion and qpel_motion from mpegvideo.c
473 static void vc1_mc_1mv(VC1Context *v, int dir)
475 MpegEncContext *s = &v->s;
476 DSPContext *dsp = &v->s.dsp;
477 uint8_t *srcY, *srcU, *srcV;
478 int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
480 int v_edge_pos = s->v_edge_pos >> v->field_mode;
481 if (!v->field_mode && !v->s.last_picture.f.data[0])
484 mx = s->mv[dir][0][0];
485 my = s->mv[dir][0][1];
487 // store motion vectors for further use in B frames
488 if (s->pict_type == AV_PICTURE_TYPE_P) {
489 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = mx;
490 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = my;
493 uvmx = (mx + ((mx & 3) == 3)) >> 1;
494 uvmy = (my + ((my & 3) == 3)) >> 1;
495 v->luma_mv[s->mb_x][0] = uvmx;
496 v->luma_mv[s->mb_x][1] = uvmy;
499 v->cur_field_type != v->ref_field_type[dir]) {
500 my = my - 2 + 4 * v->cur_field_type;
501 uvmy = uvmy - 2 + 4 * v->cur_field_type;
504 if (v->fastuvmc && (v->fcm != 1)) { // fastuvmc shall be ignored for interlaced frame picture
505 uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
506 uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
508 if (v->field_mode) { // interlaced field picture
510 if ((v->cur_field_type != v->ref_field_type[dir]) && v->cur_field_type) {
511 srcY = s->current_picture.f.data[0];
512 srcU = s->current_picture.f.data[1];
513 srcV = s->current_picture.f.data[2];
515 srcY = s->last_picture.f.data[0];
516 srcU = s->last_picture.f.data[1];
517 srcV = s->last_picture.f.data[2];
520 srcY = s->next_picture.f.data[0];
521 srcU = s->next_picture.f.data[1];
522 srcV = s->next_picture.f.data[2];
526 srcY = s->last_picture.f.data[0];
527 srcU = s->last_picture.f.data[1];
528 srcV = s->last_picture.f.data[2];
530 srcY = s->next_picture.f.data[0];
531 srcU = s->next_picture.f.data[1];
532 srcV = s->next_picture.f.data[2];
536 src_x = s->mb_x * 16 + (mx >> 2);
537 src_y = s->mb_y * 16 + (my >> 2);
538 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
539 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
541 if (v->profile != PROFILE_ADVANCED) {
542 src_x = av_clip( src_x, -16, s->mb_width * 16);
543 src_y = av_clip( src_y, -16, s->mb_height * 16);
544 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
545 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
547 src_x = av_clip( src_x, -17, s->avctx->coded_width);
548 src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
549 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
550 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
553 srcY += src_y * s->linesize + src_x;
554 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
555 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
557 if (v->field_mode && v->ref_field_type[dir]) {
558 srcY += s->current_picture_ptr->f.linesize[0];
559 srcU += s->current_picture_ptr->f.linesize[1];
560 srcV += s->current_picture_ptr->f.linesize[2];
563 /* for grayscale we should not try to read from unknown area */
564 if (s->flags & CODEC_FLAG_GRAY) {
565 srcU = s->edge_emu_buffer + 18 * s->linesize;
566 srcV = s->edge_emu_buffer + 18 * s->linesize;
569 if (v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
570 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel * 3
571 || (unsigned)(src_y - s->mspel) > v_edge_pos - (my&3) - 16 - s->mspel * 3) {
572 uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
574 srcY -= s->mspel * (1 + s->linesize);
575 s->dsp.emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize,
576 17 + s->mspel * 2, 17 + s->mspel * 2,
577 src_x - s->mspel, src_y - s->mspel,
578 s->h_edge_pos, v_edge_pos);
579 srcY = s->edge_emu_buffer;
580 s->dsp.emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8 + 1, 8 + 1,
581 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
582 s->dsp.emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8 + 1, 8 + 1,
583 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
586 /* if we deal with range reduction we need to scale source blocks */
587 if (v->rangeredfrm) {
592 for (j = 0; j < 17 + s->mspel * 2; j++) {
593 for (i = 0; i < 17 + s->mspel * 2; i++)
594 src[i] = ((src[i] - 128) >> 1) + 128;
599 for (j = 0; j < 9; j++) {
600 for (i = 0; i < 9; i++) {
601 src[i] = ((src[i] - 128) >> 1) + 128;
602 src2[i] = ((src2[i] - 128) >> 1) + 128;
604 src += s->uvlinesize;
605 src2 += s->uvlinesize;
608 /* if we deal with intensity compensation we need to scale source blocks */
609 if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
614 for (j = 0; j < 17 + s->mspel * 2; j++) {
615 for (i = 0; i < 17 + s->mspel * 2; i++)
616 src[i] = v->luty[src[i]];
621 for (j = 0; j < 9; j++) {
622 for (i = 0; i < 9; i++) {
623 src[i] = v->lutuv[src[i]];
624 src2[i] = v->lutuv[src2[i]];
626 src += s->uvlinesize;
627 src2 += s->uvlinesize;
630 srcY += s->mspel * (1 + s->linesize);
633 if (v->field_mode && v->cur_field_type) {
634 off = s->current_picture_ptr->f.linesize[0];
635 off_uv = s->current_picture_ptr->f.linesize[1];
641 dxy = ((my & 3) << 2) | (mx & 3);
642 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off , srcY , s->linesize, v->rnd);
643 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8, srcY + 8, s->linesize, v->rnd);
644 srcY += s->linesize * 8;
645 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize , srcY , s->linesize, v->rnd);
646 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
647 } else { // hpel mc - always used for luma
648 dxy = (my & 2) | ((mx & 2) >> 1);
650 dsp->put_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
652 dsp->put_no_rnd_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
655 if (s->flags & CODEC_FLAG_GRAY) return;
656 /* Chroma MC always uses qpel bilinear */
657 uvmx = (uvmx & 3) << 1;
658 uvmy = (uvmy & 3) << 1;
660 dsp->put_h264_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
661 dsp->put_h264_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
663 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
664 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
668 static inline int median4(int a, int b, int c, int d)
671 if (c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2;
672 else return (FFMIN(b, c) + FFMAX(a, d)) / 2;
674 if (c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2;
675 else return (FFMIN(a, c) + FFMAX(b, d)) / 2;
679 /** Do motion compensation for 4-MV macroblock - luminance block
681 static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir)
683 MpegEncContext *s = &v->s;
684 DSPContext *dsp = &v->s.dsp;
686 int dxy, mx, my, src_x, src_y;
688 int fieldmv = (v->fcm == 1) ? v->blk_mv_type[s->block_index[n]] : 0;
689 int v_edge_pos = s->v_edge_pos >> v->field_mode;
691 if (!v->field_mode && !v->s.last_picture.f.data[0])
694 mx = s->mv[dir][n][0];
695 my = s->mv[dir][n][1];
699 if ((v->cur_field_type != v->ref_field_type[dir]) && v->cur_field_type)
700 srcY = s->current_picture.f.data[0];
702 srcY = s->last_picture.f.data[0];
704 srcY = s->last_picture.f.data[0];
706 srcY = s->next_picture.f.data[0];
709 if (v->cur_field_type != v->ref_field_type[dir])
710 my = my - 2 + 4 * v->cur_field_type;
713 if (s->pict_type == AV_PICTURE_TYPE_P && n == 3 && v->field_mode) {
714 int same_count = 0, opp_count = 0, k;
715 int chosen_mv[2][4][2], f;
717 for (k = 0; k < 4; k++) {
718 f = v->mv_f[0][s->block_index[k] + v->blocks_off];
719 chosen_mv[f][f ? opp_count : same_count][0] = s->mv[0][k][0];
720 chosen_mv[f][f ? opp_count : same_count][1] = s->mv[0][k][1];
724 f = opp_count > same_count;
725 switch (f ? opp_count : same_count) {
727 tx = median4(chosen_mv[f][0][0], chosen_mv[f][1][0],
728 chosen_mv[f][2][0], chosen_mv[f][3][0]);
729 ty = median4(chosen_mv[f][0][1], chosen_mv[f][1][1],
730 chosen_mv[f][2][1], chosen_mv[f][3][1]);
733 tx = mid_pred(chosen_mv[f][0][0], chosen_mv[f][1][0], chosen_mv[f][2][0]);
734 ty = mid_pred(chosen_mv[f][0][1], chosen_mv[f][1][1], chosen_mv[f][2][1]);
737 tx = (chosen_mv[f][0][0] + chosen_mv[f][1][0]) / 2;
738 ty = (chosen_mv[f][0][1] + chosen_mv[f][1][1]) / 2;
741 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
742 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
743 for (k = 0; k < 4; k++)
744 v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
747 if (v->fcm == 1) { // not sure if needed for other types of picture
749 int width = s->avctx->coded_width;
750 int height = s->avctx->coded_height >> 1;
751 qx = (s->mb_x * 16) + (mx >> 2);
752 qy = (s->mb_y * 8) + (my >> 3);
757 mx -= 4 * (qx - width);
760 else if (qy > height + 1)
761 my -= 8 * (qy - height - 1);
764 if ((v->fcm == 1) && fieldmv)
765 off = ((n > 1) ? s->linesize : 0) + (n & 1) * 8;
767 off = s->linesize * 4 * (n & 2) + (n & 1) * 8;
768 if (v->field_mode && v->cur_field_type)
769 off += s->current_picture_ptr->f.linesize[0];
771 src_x = s->mb_x * 16 + (n & 1) * 8 + (mx >> 2);
773 src_y = s->mb_y * 16 + (n & 2) * 4 + (my >> 2);
775 src_y = s->mb_y * 16 + ((n > 1) ? 1 : 0) + (my >> 2);
777 if (v->profile != PROFILE_ADVANCED) {
778 src_x = av_clip(src_x, -16, s->mb_width * 16);
779 src_y = av_clip(src_y, -16, s->mb_height * 16);
781 src_x = av_clip(src_x, -17, s->avctx->coded_width);
784 src_y = av_clip(src_y, -17, s->avctx->coded_height + 1);
786 src_y = av_clip(src_y, -18, s->avctx->coded_height);
788 src_y = av_clip(src_y, -18, s->avctx->coded_height + 1);
792 srcY += src_y * s->linesize + src_x;
793 if (v->field_mode && v->ref_field_type[dir])
794 srcY += s->current_picture_ptr->f.linesize[0];
796 if (fieldmv && !(src_y & 1))
798 if (fieldmv && (src_y & 1) && src_y < 4)
800 if (v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
801 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx & 3) - 8 - s->mspel * 2
802 || (unsigned)(src_y - (s->mspel << fieldmv)) > v_edge_pos - (my & 3) - ((8 + s->mspel * 2) << fieldmv)) {
803 srcY -= s->mspel * (1 + (s->linesize << fieldmv));
804 /* check emulate edge stride and offset */
805 s->dsp.emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize,
806 9 + s->mspel * 2, (9 + s->mspel * 2) << fieldmv,
807 src_x - s->mspel, src_y - (s->mspel << fieldmv),
808 s->h_edge_pos, v_edge_pos);
809 srcY = s->edge_emu_buffer;
810 /* if we deal with range reduction we need to scale source blocks */
811 if (v->rangeredfrm) {
816 for (j = 0; j < 9 + s->mspel * 2; j++) {
817 for (i = 0; i < 9 + s->mspel * 2; i++)
818 src[i] = ((src[i] - 128) >> 1) + 128;
819 src += s->linesize << fieldmv;
822 /* if we deal with intensity compensation we need to scale source blocks */
823 if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
828 for (j = 0; j < 9 + s->mspel * 2; j++) {
829 for (i = 0; i < 9 + s->mspel * 2; i++)
830 src[i] = v->luty[src[i]];
831 src += s->linesize << fieldmv;
834 srcY += s->mspel * (1 + (s->linesize << fieldmv));
838 dxy = ((my & 3) << 2) | (mx & 3);
839 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize << fieldmv, v->rnd);
840 } else { // hpel mc - always used for luma
841 dxy = (my & 2) | ((mx & 2) >> 1);
843 dsp->put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
845 dsp->put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
849 static av_always_inline int get_chroma_mv(int *mvx, int *mvy, int *a, int flag, int *tx, int *ty)
852 static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
854 idx = ((a[3] != flag) << 3)
855 | ((a[2] != flag) << 2)
856 | ((a[1] != flag) << 1)
859 *tx = median4(mvx[0], mvx[1], mvx[2], mvx[3]);
860 *ty = median4(mvy[0], mvy[1], mvy[2], mvy[3]);
862 } else if (count[idx] == 1) {
865 *tx = mid_pred(mvx[1], mvx[2], mvx[3]);
866 *ty = mid_pred(mvy[1], mvy[2], mvy[3]);
869 *tx = mid_pred(mvx[0], mvx[2], mvx[3]);
870 *ty = mid_pred(mvy[0], mvy[2], mvy[3]);
873 *tx = mid_pred(mvx[0], mvx[1], mvx[3]);
874 *ty = mid_pred(mvy[0], mvy[1], mvy[3]);
877 *tx = mid_pred(mvx[0], mvx[1], mvx[2]);
878 *ty = mid_pred(mvy[0], mvy[1], mvy[2]);
881 } else if (count[idx] == 2) {
883 for (i = 0; i < 3; i++)
888 for (i = t1 + 1; i < 4; i++)
893 *tx = (mvx[t1] + mvx[t2]) / 2;
894 *ty = (mvy[t1] + mvy[t2]) / 2;
902 /** Do motion compensation for 4-MV macroblock - both chroma blocks
904 static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
906 MpegEncContext *s = &v->s;
907 DSPContext *dsp = &v->s.dsp;
908 uint8_t *srcU, *srcV;
909 int uvmx, uvmy, uvsrc_x, uvsrc_y;
910 int k, tx = 0, ty = 0;
911 int mvx[4], mvy[4], intra[4], mv_f[4];
913 int chroma_ref_type = v->cur_field_type, off = 0;
914 int v_edge_pos = s->v_edge_pos >> v->field_mode;
916 if (!v->field_mode && !v->s.last_picture.f.data[0])
918 if (s->flags & CODEC_FLAG_GRAY)
921 for (k = 0; k < 4; k++) {
922 mvx[k] = s->mv[dir][k][0];
923 mvy[k] = s->mv[dir][k][1];
924 intra[k] = v->mb_type[0][s->block_index[k]];
926 mv_f[k] = v->mv_f[dir][s->block_index[k] + v->blocks_off];
929 /* calculate chroma MV vector from four luma MVs */
930 if (!v->field_mode || (v->field_mode && !v->numref)) {
931 valid_count = get_chroma_mv(mvx, mvy, intra, 0, &tx, &ty);
933 v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
934 return; //no need to do MC for intra blocks
938 if (mv_f[0] + mv_f[1] + mv_f[2] + mv_f[3] > 2)
940 valid_count = get_chroma_mv(mvx, mvy, mv_f, dominant, &tx, &ty);
942 chroma_ref_type = !v->cur_field_type;
944 uvmx = (tx + ((tx & 3) == 3)) >> 1;
945 uvmy = (ty + ((ty & 3) == 3)) >> 1;
947 v->luma_mv[s->mb_x][0] = uvmx;
948 v->luma_mv[s->mb_x][1] = uvmy;
951 uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
952 uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
954 // Field conversion bias
955 if (v->cur_field_type != chroma_ref_type)
956 uvmy += 2 - 4 * chroma_ref_type;
958 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
959 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
961 if (v->profile != PROFILE_ADVANCED) {
962 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
963 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
965 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
966 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
971 if ((v->cur_field_type != chroma_ref_type) && v->cur_field_type) {
972 srcU = s->current_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
973 srcV = s->current_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
975 srcU = s->last_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
976 srcV = s->last_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
979 srcU = s->last_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
980 srcV = s->last_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
983 srcU = s->next_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
984 srcV = s->next_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
988 if (chroma_ref_type) {
989 srcU += s->current_picture_ptr->f.linesize[1];
990 srcV += s->current_picture_ptr->f.linesize[2];
992 off = v->cur_field_type ? s->current_picture_ptr->f.linesize[1] : 0;
995 if (v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
996 || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
997 || (unsigned)uvsrc_y > (v_edge_pos >> 1) - 9) {
998 s->dsp.emulated_edge_mc(s->edge_emu_buffer , srcU, s->uvlinesize,
999 8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
1000 s->h_edge_pos >> 1, v_edge_pos >> 1);
1001 s->dsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV, s->uvlinesize,
1002 8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
1003 s->h_edge_pos >> 1, v_edge_pos >> 1);
1004 srcU = s->edge_emu_buffer;
1005 srcV = s->edge_emu_buffer + 16;
1007 /* if we deal with range reduction we need to scale source blocks */
1008 if (v->rangeredfrm) {
1010 uint8_t *src, *src2;
1014 for (j = 0; j < 9; j++) {
1015 for (i = 0; i < 9; i++) {
1016 src[i] = ((src[i] - 128) >> 1) + 128;
1017 src2[i] = ((src2[i] - 128) >> 1) + 128;
1019 src += s->uvlinesize;
1020 src2 += s->uvlinesize;
1023 /* if we deal with intensity compensation we need to scale source blocks */
1024 if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1026 uint8_t *src, *src2;
1030 for (j = 0; j < 9; j++) {
1031 for (i = 0; i < 9; i++) {
1032 src[i] = v->lutuv[src[i]];
1033 src2[i] = v->lutuv[src2[i]];
1035 src += s->uvlinesize;
1036 src2 += s->uvlinesize;
1041 /* Chroma MC always uses qpel bilinear */
1042 uvmx = (uvmx & 3) << 1;
1043 uvmy = (uvmy & 3) << 1;
1045 dsp->put_h264_chroma_pixels_tab[0](s->dest[1] + off, srcU, s->uvlinesize, 8, uvmx, uvmy);
1046 dsp->put_h264_chroma_pixels_tab[0](s->dest[2] + off, srcV, s->uvlinesize, 8, uvmx, uvmy);
1048 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off, srcU, s->uvlinesize, 8, uvmx, uvmy);
1049 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off, srcV, s->uvlinesize, 8, uvmx, uvmy);
1053 /** Do motion compensation for 4-MV field chroma macroblock (both U and V)
1055 static void vc1_mc_4mv_chroma4(VC1Context *v)
1057 MpegEncContext *s = &v->s;
1058 DSPContext *dsp = &v->s.dsp;
1059 uint8_t *srcU, *srcV;
1060 int uvsrc_x, uvsrc_y;
1061 int uvmx_field[4], uvmy_field[4];
1063 int fieldmv = v->blk_mv_type[s->block_index[0]];
1064 static const int s_rndtblfield[16] = { 0, 0, 1, 2, 4, 4, 5, 6, 2, 2, 3, 8, 6, 6, 7, 12 };
1065 int v_dist = fieldmv ? 1 : 4; // vertical offset for lower sub-blocks
1066 int v_edge_pos = s->v_edge_pos >> 1;
1068 if (!v->s.last_picture.f.data[0])
1070 if (s->flags & CODEC_FLAG_GRAY)
1073 for (i = 0; i < 4; i++) {
1074 tx = s->mv[0][i][0];
1075 uvmx_field[i] = (tx + ((tx & 3) == 3)) >> 1;
1076 ty = s->mv[0][i][1];
1078 uvmy_field[i] = (ty >> 4) * 8 + s_rndtblfield[ty & 0xF];
1080 uvmy_field[i] = (ty + ((ty & 3) == 3)) >> 1;
1083 for (i = 0; i < 4; i++) {
1084 off = (i & 1) * 4 + ((i & 2) ? v_dist * s->uvlinesize : 0);
1085 uvsrc_x = s->mb_x * 8 + (i & 1) * 4 + (uvmx_field[i] >> 2);
1086 uvsrc_y = s->mb_y * 8 + ((i & 2) ? v_dist : 0) + (uvmy_field[i] >> 2);
1087 // FIXME: implement proper pull-back (see vc1cropmv.c, vc1CROPMV_ChromaPullBack())
1088 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
1089 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
1090 srcU = s->last_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
1091 srcV = s->last_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
1092 uvmx_field[i] = (uvmx_field[i] & 3) << 1;
1093 uvmy_field[i] = (uvmy_field[i] & 3) << 1;
1095 if (fieldmv && !(uvsrc_y & 1))
1097 if (fieldmv && (uvsrc_y & 1) && uvsrc_y < 2)
1099 if ((v->mv_mode == MV_PMODE_INTENSITY_COMP)
1100 || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 5
1101 || (unsigned)uvsrc_y > v_edge_pos - (5 << fieldmv)) {
1102 s->dsp.emulated_edge_mc(s->edge_emu_buffer, srcU, s->uvlinesize,
1103 5, (5 << fieldmv), uvsrc_x, uvsrc_y,
1104 s->h_edge_pos >> 1, v_edge_pos);
1105 s->dsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV, s->uvlinesize,
1106 5, (5 << fieldmv), uvsrc_x, uvsrc_y,
1107 s->h_edge_pos >> 1, v_edge_pos);
1108 srcU = s->edge_emu_buffer;
1109 srcV = s->edge_emu_buffer + 16;
1111 /* if we deal with intensity compensation we need to scale source blocks */
1112 if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1114 uint8_t *src, *src2;
1118 for (j = 0; j < 5; j++) {
1119 for (i = 0; i < 5; i++) {
1120 src[i] = v->lutuv[src[i]];
1121 src2[i] = v->lutuv[src2[i]];
1123 src += s->uvlinesize << 1;
1124 src2 += s->uvlinesize << 1;
1129 dsp->put_h264_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1130 dsp->put_h264_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1132 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1133 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1138 /***********************************************************************/
1140 * @name VC-1 Block-level functions
1141 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
1147 * @brief Get macroblock-level quantizer scale
1149 #define GET_MQUANT() \
1150 if (v->dquantfrm) { \
1152 if (v->dqprofile == DQPROFILE_ALL_MBS) { \
1153 if (v->dqbilevel) { \
1154 mquant = (get_bits1(gb)) ? v->altpq : v->pq; \
1156 mqdiff = get_bits(gb, 3); \
1158 mquant = v->pq + mqdiff; \
1160 mquant = get_bits(gb, 5); \
1163 if (v->dqprofile == DQPROFILE_SINGLE_EDGE) \
1164 edges = 1 << v->dqsbedge; \
1165 else if (v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
1166 edges = (3 << v->dqsbedge) % 15; \
1167 else if (v->dqprofile == DQPROFILE_FOUR_EDGES) \
1169 if ((edges&1) && !s->mb_x) \
1170 mquant = v->altpq; \
1171 if ((edges&2) && s->first_slice_line) \
1172 mquant = v->altpq; \
1173 if ((edges&4) && s->mb_x == (s->mb_width - 1)) \
1174 mquant = v->altpq; \
1175 if ((edges&8) && s->mb_y == (s->mb_height - 1)) \
1176 mquant = v->altpq; \
1180 * @def GET_MVDATA(_dmv_x, _dmv_y)
1181 * @brief Get MV differentials
1182 * @see MVDATA decoding from 8.3.5.2, p(1)20
1183 * @param _dmv_x Horizontal differential for decoded MV
1184 * @param _dmv_y Vertical differential for decoded MV
1186 #define GET_MVDATA(_dmv_x, _dmv_y) \
1187 index = 1 + get_vlc2(gb, ff_vc1_mv_diff_vlc[s->mv_table_index].table, \
1188 VC1_MV_DIFF_VLC_BITS, 2); \
1190 mb_has_coeffs = 1; \
1193 mb_has_coeffs = 0; \
1196 _dmv_x = _dmv_y = 0; \
1197 } else if (index == 35) { \
1198 _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
1199 _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
1200 } else if (index == 36) { \
1205 index1 = index % 6; \
1206 if (!s->quarter_sample && index1 == 5) val = 1; \
1208 if (size_table[index1] - val > 0) \
1209 val = get_bits(gb, size_table[index1] - val); \
1211 sign = 0 - (val&1); \
1212 _dmv_x = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1214 index1 = index / 6; \
1215 if (!s->quarter_sample && index1 == 5) val = 1; \
1217 if (size_table[index1] - val > 0) \
1218 val = get_bits(gb, size_table[index1] - val); \
1220 sign = 0 - (val & 1); \
1221 _dmv_y = (sign ^ ((val >> 1) + offset_table[index1])) - sign; \
1224 static av_always_inline void get_mvdata_interlaced(VC1Context *v, int *dmv_x,
1225 int *dmv_y, int *pred_flag)
1228 int extend_x = 0, extend_y = 0;
1229 GetBitContext *gb = &v->s.gb;
1232 const int* offs_tab;
1235 bits = VC1_2REF_MVDATA_VLC_BITS;
1238 bits = VC1_1REF_MVDATA_VLC_BITS;
1241 switch (v->dmvrange) {
1249 extend_x = extend_y = 1;
1252 index = get_vlc2(gb, v->imv_vlc->table, bits, 3);
1254 *dmv_x = get_bits(gb, v->k_x);
1255 *dmv_y = get_bits(gb, v->k_y);
1257 *pred_flag = *dmv_y & 1;
1258 *dmv_y = (*dmv_y + *pred_flag) >> 1;
1263 offs_tab = offset_table2;
1265 offs_tab = offset_table1;
1266 index1 = (index + 1) % 9;
1268 val = get_bits(gb, index1 + extend_x);
1269 sign = 0 -(val & 1);
1270 *dmv_x = (sign ^ ((val >> 1) + offs_tab[index1])) - sign;
1274 offs_tab = offset_table2;
1276 offs_tab = offset_table1;
1277 index1 = (index + 1) / 9;
1278 if (index1 > v->numref) {
1279 val = get_bits(gb, (index1 + (extend_y << v->numref)) >> v->numref);
1280 sign = 0 - (val & 1);
1281 *dmv_y = (sign ^ ((val >> 1) + offs_tab[index1 >> v->numref])) - sign;
1285 *pred_flag = index1 & 1;
1289 static av_always_inline int scaleforsame_x(VC1Context *v, int n /* MV */, int dir)
1291 int scaledvalue, refdist;
1292 int scalesame1, scalesame2;
1293 int scalezone1_x, zone1offset_x;
1294 int table_index = dir ^ v->second_field;
1296 if (v->s.pict_type != AV_PICTURE_TYPE_B)
1297 refdist = v->refdist;
1299 refdist = dir ? v->brfd : v->frfd;
1302 scalesame1 = vc1_field_mvpred_scales[table_index][1][refdist];
1303 scalesame2 = vc1_field_mvpred_scales[table_index][2][refdist];
1304 scalezone1_x = vc1_field_mvpred_scales[table_index][3][refdist];
1305 zone1offset_x = vc1_field_mvpred_scales[table_index][5][refdist];
1310 if (FFABS(n) < scalezone1_x)
1311 scaledvalue = (n * scalesame1) >> 8;
1314 scaledvalue = ((n * scalesame2) >> 8) - zone1offset_x;
1316 scaledvalue = ((n * scalesame2) >> 8) + zone1offset_x;
1319 return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1322 static av_always_inline int scaleforsame_y(VC1Context *v, int i, int n /* MV */, int dir)
1324 int scaledvalue, refdist;
1325 int scalesame1, scalesame2;
1326 int scalezone1_y, zone1offset_y;
1327 int table_index = dir ^ v->second_field;
1329 if (v->s.pict_type != AV_PICTURE_TYPE_B)
1330 refdist = v->refdist;
1332 refdist = dir ? v->brfd : v->frfd;
1335 scalesame1 = vc1_field_mvpred_scales[table_index][1][refdist];
1336 scalesame2 = vc1_field_mvpred_scales[table_index][2][refdist];
1337 scalezone1_y = vc1_field_mvpred_scales[table_index][4][refdist];
1338 zone1offset_y = vc1_field_mvpred_scales[table_index][6][refdist];
1343 if (FFABS(n) < scalezone1_y)
1344 scaledvalue = (n * scalesame1) >> 8;
1347 scaledvalue = ((n * scalesame2) >> 8) - zone1offset_y;
1349 scaledvalue = ((n * scalesame2) >> 8) + zone1offset_y;
1353 if (v->cur_field_type && !v->ref_field_type[dir])
1354 return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1356 return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1359 static av_always_inline int scaleforopp_x(VC1Context *v, int n /* MV */)
1361 int scalezone1_x, zone1offset_x;
1362 int scaleopp1, scaleopp2, brfd;
1365 brfd = FFMIN(v->brfd, 3);
1366 scalezone1_x = vc1_b_field_mvpred_scales[3][brfd];
1367 zone1offset_x = vc1_b_field_mvpred_scales[5][brfd];
1368 scaleopp1 = vc1_b_field_mvpred_scales[1][brfd];
1369 scaleopp2 = vc1_b_field_mvpred_scales[2][brfd];
1374 if (FFABS(n) < scalezone1_x)
1375 scaledvalue = (n * scaleopp1) >> 8;
1378 scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_x;
1380 scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_x;
1383 return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1386 static av_always_inline int scaleforopp_y(VC1Context *v, int n /* MV */, int dir)
1388 int scalezone1_y, zone1offset_y;
1389 int scaleopp1, scaleopp2, brfd;
1392 brfd = FFMIN(v->brfd, 3);
1393 scalezone1_y = vc1_b_field_mvpred_scales[4][brfd];
1394 zone1offset_y = vc1_b_field_mvpred_scales[6][brfd];
1395 scaleopp1 = vc1_b_field_mvpred_scales[1][brfd];
1396 scaleopp2 = vc1_b_field_mvpred_scales[2][brfd];
1401 if (FFABS(n) < scalezone1_y)
1402 scaledvalue = (n * scaleopp1) >> 8;
1405 scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_y;
1407 scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_y;
1410 if (v->cur_field_type && !v->ref_field_type[dir]) {
1411 return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1413 return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1417 static av_always_inline int scaleforsame(VC1Context *v, int i, int n /* MV */,
1420 int brfd, scalesame;
1422 if (v->s.pict_type != AV_PICTURE_TYPE_B || v->second_field || !dir) {
1424 return scaleforsame_y(v, i, n, dir);
1426 return scaleforsame_x(v, n, dir);
1428 brfd = FFMIN(v->brfd, 3);
1429 scalesame = vc1_b_field_mvpred_scales[0][brfd];
1431 return n * scalesame >> 8;
1434 static av_always_inline int scaleforopp(VC1Context *v, int n /* MV */,
1437 int refdist, scaleopp;
1439 if (v->s.pict_type == AV_PICTURE_TYPE_B && !v->second_field && dir == 1) {
1441 return scaleforopp_y(v, n, dir);
1443 return scaleforopp_x(v, n);
1445 if (v->s.pict_type != AV_PICTURE_TYPE_B)
1446 refdist = FFMIN(v->refdist, 3);
1448 refdist = dir ? v->brfd : v->frfd;
1449 scaleopp = vc1_field_mvpred_scales[dir ^ v->second_field][0][refdist];
1451 return n * scaleopp >> 8;
1454 /** Predict and set motion vector
1456 static inline void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
1457 int mv1, int r_x, int r_y, uint8_t* is_intra,
1458 int pred_flag, int dir)
1460 MpegEncContext *s = &v->s;
1461 int xy, wrap, off = 0;
1465 int mixedmv_pic, num_samefield = 0, num_oppfield = 0;
1467 int16_t samefield_pred[2], oppfield_pred[2];
1468 int16_t samefield_predA[2], oppfield_predA[2];
1469 int16_t samefield_predB[2], oppfield_predB[2];
1470 int16_t samefield_predC[2], oppfield_predC[2];
1471 int16_t *predA, *predC;
1472 int a_valid, b_valid, c_valid;
1473 int hybridmv_thresh, y_bias = 0;
1475 if (v->mv_mode == MV_PMODE_MIXED_MV ||
1476 ((v->mv_mode == MV_PMODE_INTENSITY_COMP) && (v->mv_mode2 == MV_PMODE_MIXED_MV)))
1480 /* scale MV difference to be quad-pel */
1481 dmv_x <<= 1 - s->quarter_sample;
1482 dmv_y <<= 1 - s->quarter_sample;
1484 wrap = s->b8_stride;
1485 xy = s->block_index[n];
1488 s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy + v->blocks_off][0] = 0;
1489 s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy + v->blocks_off][1] = 0;
1490 s->current_picture.f.motion_val[1][xy + v->blocks_off][0] = 0;
1491 s->current_picture.f.motion_val[1][xy + v->blocks_off][1] = 0;
1492 if (mv1) { /* duplicate motion data for 1-MV block */
1493 s->current_picture.f.motion_val[0][xy + 1 + v->blocks_off][0] = 0;
1494 s->current_picture.f.motion_val[0][xy + 1 + v->blocks_off][1] = 0;
1495 s->current_picture.f.motion_val[0][xy + wrap + v->blocks_off][0] = 0;
1496 s->current_picture.f.motion_val[0][xy + wrap + v->blocks_off][1] = 0;
1497 s->current_picture.f.motion_val[0][xy + wrap + 1 + v->blocks_off][0] = 0;
1498 s->current_picture.f.motion_val[0][xy + wrap + 1 + v->blocks_off][1] = 0;
1499 v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1500 s->current_picture.f.motion_val[1][xy + 1 + v->blocks_off][0] = 0;
1501 s->current_picture.f.motion_val[1][xy + 1 + v->blocks_off][1] = 0;
1502 s->current_picture.f.motion_val[1][xy + wrap][0] = 0;
1503 s->current_picture.f.motion_val[1][xy + wrap + v->blocks_off][1] = 0;
1504 s->current_picture.f.motion_val[1][xy + wrap + 1 + v->blocks_off][0] = 0;
1505 s->current_picture.f.motion_val[1][xy + wrap + 1 + v->blocks_off][1] = 0;
1510 C = s->current_picture.f.motion_val[dir][xy - 1 + v->blocks_off];
1511 A = s->current_picture.f.motion_val[dir][xy - wrap + v->blocks_off];
1513 if (v->field_mode && mixedmv_pic)
1514 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
1516 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2;
1518 //in 4-MV mode different blocks have different B predictor position
1521 off = (s->mb_x > 0) ? -1 : 1;
1524 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 1;
1533 B = s->current_picture.f.motion_val[dir][xy - wrap + off + v->blocks_off];
1535 a_valid = !s->first_slice_line || (n == 2 || n == 3);
1536 b_valid = a_valid && (s->mb_width > 1);
1537 c_valid = s->mb_x || (n == 1 || n == 3);
1538 if (v->field_mode) {
1539 a_valid = a_valid && !is_intra[xy - wrap];
1540 b_valid = b_valid && !is_intra[xy - wrap + off];
1541 c_valid = c_valid && !is_intra[xy - 1];
1545 f = v->mv_f[dir][xy - wrap + v->blocks_off];
1547 num_samefield += 1 - f;
1549 oppfield_predA[0] = A[0];
1550 oppfield_predA[1] = A[1];
1551 samefield_predA[0] = scaleforsame(v, 0, A[0], 0, dir);
1552 samefield_predA[1] = scaleforsame(v, n, A[1], 1, dir);
1554 samefield_predA[0] = A[0];
1555 samefield_predA[1] = A[1];
1557 oppfield_predA[0] = scaleforopp(v, A[0], 0, dir);
1559 oppfield_predA[1] = scaleforopp(v, A[1], 1, dir);
1562 samefield_predA[0] = samefield_predA[1] = 0;
1563 oppfield_predA[0] = oppfield_predA[1] = 0;
1566 f = v->mv_f[dir][xy - 1 + v->blocks_off];
1568 num_samefield += 1 - f;
1570 oppfield_predC[0] = C[0];
1571 oppfield_predC[1] = C[1];
1572 samefield_predC[0] = scaleforsame(v, 0, C[0], 0, dir);
1573 samefield_predC[1] = scaleforsame(v, n, C[1], 1, dir);
1575 samefield_predC[0] = C[0];
1576 samefield_predC[1] = C[1];
1578 oppfield_predC[0] = scaleforopp(v, C[0], 0, dir);
1580 oppfield_predC[1] = scaleforopp(v, C[1], 1, dir);
1583 samefield_predC[0] = samefield_predC[1] = 0;
1584 oppfield_predC[0] = oppfield_predC[1] = 0;
1587 f = v->mv_f[dir][xy - wrap + off + v->blocks_off];
1589 num_samefield += 1 - f;
1591 oppfield_predB[0] = B[0];
1592 oppfield_predB[1] = B[1];
1593 samefield_predB[0] = scaleforsame(v, 0, B[0], 0, dir);
1594 samefield_predB[1] = scaleforsame(v, n, B[1], 1, dir);
1596 samefield_predB[0] = B[0];
1597 samefield_predB[1] = B[1];
1599 oppfield_predB[0] = scaleforopp(v, B[0], 0, dir);
1601 oppfield_predB[1] = scaleforopp(v, B[1], 1, dir);
1604 samefield_predB[0] = samefield_predB[1] = 0;
1605 oppfield_predB[0] = oppfield_predB[1] = 0;
1609 samefield_pred[0] = samefield_predA[0];
1610 samefield_pred[1] = samefield_predA[1];
1611 oppfield_pred[0] = oppfield_predA[0];
1612 oppfield_pred[1] = oppfield_predA[1];
1613 } else if (c_valid) {
1614 samefield_pred[0] = samefield_predC[0];
1615 samefield_pred[1] = samefield_predC[1];
1616 oppfield_pred[0] = oppfield_predC[0];
1617 oppfield_pred[1] = oppfield_predC[1];
1618 } else if (b_valid) {
1619 samefield_pred[0] = samefield_predB[0];
1620 samefield_pred[1] = samefield_predB[1];
1621 oppfield_pred[0] = oppfield_predB[0];
1622 oppfield_pred[1] = oppfield_predB[1];
1624 samefield_pred[0] = samefield_pred[1] = 0;
1625 oppfield_pred[0] = oppfield_pred[1] = 0;
1628 if (num_samefield + num_oppfield > 1) {
1629 samefield_pred[0] = mid_pred(samefield_predA[0], samefield_predB[0], samefield_predC[0]);
1630 samefield_pred[1] = mid_pred(samefield_predA[1], samefield_predB[1], samefield_predC[1]);
1632 oppfield_pred[0] = mid_pred(oppfield_predA[0], oppfield_predB[0], oppfield_predC[0]);
1634 oppfield_pred[1] = mid_pred(oppfield_predA[1], oppfield_predB[1], oppfield_predC[1]);
1637 if (v->field_mode) {
1638 if (num_samefield <= num_oppfield)
1639 opposit = 1 - pred_flag;
1641 opposit = pred_flag;
1645 px = oppfield_pred[0];
1646 py = oppfield_pred[1];
1647 predA = oppfield_predA;
1648 predC = oppfield_predC;
1649 v->mv_f[dir][xy + v->blocks_off] = f = 1;
1650 v->ref_field_type[dir] = !v->cur_field_type;
1652 px = samefield_pred[0];
1653 py = samefield_pred[1];
1654 predA = samefield_predA;
1655 predC = samefield_predC;
1656 v->mv_f[dir][xy + v->blocks_off] = f = 0;
1657 v->ref_field_type[dir] = v->cur_field_type;
1660 /* Pullback MV as specified in 8.3.5.3.4 */
1661 if (!v->field_mode) {
1663 qx = (s->mb_x << 6) + ((n == 1 || n == 3) ? 32 : 0);
1664 qy = (s->mb_y << 6) + ((n == 2 || n == 3) ? 32 : 0);
1665 X = (s->mb_width << 6) - 4;
1666 Y = (s->mb_height << 6) - 4;
1668 if (qx + px < -60) px = -60 - qx;
1669 if (qy + py < -60) py = -60 - qy;
1671 if (qx + px < -28) px = -28 - qx;
1672 if (qy + py < -28) py = -28 - qy;
1674 if (qx + px > X) px = X - qx;
1675 if (qy + py > Y) py = Y - qy;
1678 if (!v->field_mode || s->pict_type != AV_PICTURE_TYPE_B) {
1679 /* Calculate hybrid prediction as specified in 8.3.5.3.5 (also 10.3.5.4.3.5) */
1680 if (v->field_mode && !s->quarter_sample)
1681 hybridmv_thresh = 16;
1683 hybridmv_thresh = 32;
1684 if (a_valid && c_valid) {
1685 if (is_intra[xy - wrap])
1686 sum = FFABS(px) + FFABS(py);
1688 sum = FFABS(px - predA[0]) + FFABS(py - predA[1]);
1689 if (sum > hybridmv_thresh) {
1690 if (get_bits1(&s->gb)) { // read HYBRIDPRED bit
1698 if (is_intra[xy - 1])
1699 sum = FFABS(px) + FFABS(py);
1701 sum = FFABS(px - predC[0]) + FFABS(py - predC[1]);
1702 if (sum > hybridmv_thresh) {
1703 if (get_bits1(&s->gb)) {
1715 if (v->field_mode && !s->quarter_sample) {
1719 if (v->field_mode && v->numref)
1721 if (v->field_mode && v->cur_field_type && v->ref_field_type[dir] == 0)
1723 /* store MV using signed modulus of MV range defined in 4.11 */
1724 s->mv[dir][n][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1725 s->mv[dir][n][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1] = ((py + dmv_y + r_y - y_bias) & ((r_y << 1) - 1)) - r_y + y_bias;
1726 if (mv1) { /* duplicate motion data for 1-MV block */
1727 s->current_picture.f.motion_val[dir][xy + 1 + v->blocks_off][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0];
1728 s->current_picture.f.motion_val[dir][xy + 1 + v->blocks_off][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1];
1729 s->current_picture.f.motion_val[dir][xy + wrap + v->blocks_off][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0];
1730 s->current_picture.f.motion_val[dir][xy + wrap + v->blocks_off][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1];
1731 s->current_picture.f.motion_val[dir][xy + wrap + 1 + v->blocks_off][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0];
1732 s->current_picture.f.motion_val[dir][xy + wrap + 1 + v->blocks_off][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1];
1733 v->mv_f[dir][xy + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1734 v->mv_f[dir][xy + wrap + v->blocks_off] = v->mv_f[dir][xy + wrap + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1738 /** Predict and set motion vector for interlaced frame picture MBs
1740 static inline void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
1741 int mvn, int r_x, int r_y, uint8_t* is_intra)
1743 MpegEncContext *s = &v->s;
1744 int xy, wrap, off = 0;
1745 int A[2], B[2], C[2];
1747 int a_valid = 0, b_valid = 0, c_valid = 0;
1748 int field_a, field_b, field_c; // 0: same, 1: opposit
1749 int total_valid, num_samefield, num_oppfield;
1750 int pos_c, pos_b, n_adj;
1752 wrap = s->b8_stride;
1753 xy = s->block_index[n];
1756 s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy][0] = 0;
1757 s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy][1] = 0;
1758 s->current_picture.f.motion_val[1][xy][0] = 0;
1759 s->current_picture.f.motion_val[1][xy][1] = 0;
1760 if (mvn == 1) { /* duplicate motion data for 1-MV block */
1761 s->current_picture.f.motion_val[0][xy + 1][0] = 0;
1762 s->current_picture.f.motion_val[0][xy + 1][1] = 0;
1763 s->current_picture.f.motion_val[0][xy + wrap][0] = 0;
1764 s->current_picture.f.motion_val[0][xy + wrap][1] = 0;
1765 s->current_picture.f.motion_val[0][xy + wrap + 1][0] = 0;
1766 s->current_picture.f.motion_val[0][xy + wrap + 1][1] = 0;
1767 v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1768 s->current_picture.f.motion_val[1][xy + 1][0] = 0;
1769 s->current_picture.f.motion_val[1][xy + 1][1] = 0;
1770 s->current_picture.f.motion_val[1][xy + wrap][0] = 0;
1771 s->current_picture.f.motion_val[1][xy + wrap][1] = 0;
1772 s->current_picture.f.motion_val[1][xy + wrap + 1][0] = 0;
1773 s->current_picture.f.motion_val[1][xy + wrap + 1][1] = 0;
1778 off = ((n == 0) || (n == 1)) ? 1 : -1;
1780 if (s->mb_x || (n == 1) || (n == 3)) {
1781 if ((v->blk_mv_type[xy]) // current block (MB) has a field MV
1782 || (!v->blk_mv_type[xy] && !v->blk_mv_type[xy - 1])) { // or both have frame MV
1783 A[0] = s->current_picture.f.motion_val[0][xy - 1][0];
1784 A[1] = s->current_picture.f.motion_val[0][xy - 1][1];
1786 } else { // current block has frame mv and cand. has field MV (so average)
1787 A[0] = (s->current_picture.f.motion_val[0][xy - 1][0]
1788 + s->current_picture.f.motion_val[0][xy - 1 + off * wrap][0] + 1) >> 1;
1789 A[1] = (s->current_picture.f.motion_val[0][xy - 1][1]
1790 + s->current_picture.f.motion_val[0][xy - 1 + off * wrap][1] + 1) >> 1;
1793 if (!(n & 1) && v->is_intra[s->mb_x - 1]) {
1799 /* Predict B and C */
1800 B[0] = B[1] = C[0] = C[1] = 0;
1801 if (n == 0 || n == 1 || v->blk_mv_type[xy]) {
1802 if (!s->first_slice_line) {
1803 if (!v->is_intra[s->mb_x - s->mb_stride]) {
1806 pos_b = s->block_index[n_adj] - 2 * wrap;
1807 if (v->blk_mv_type[pos_b] && v->blk_mv_type[xy]) {
1808 n_adj = (n & 2) | (n & 1);
1810 B[0] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap][0];
1811 B[1] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap][1];
1812 if (v->blk_mv_type[pos_b] && !v->blk_mv_type[xy]) {
1813 B[0] = (B[0] + s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap][0] + 1) >> 1;
1814 B[1] = (B[1] + s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap][1] + 1) >> 1;
1817 if (s->mb_width > 1) {
1818 if (!v->is_intra[s->mb_x - s->mb_stride + 1]) {
1821 pos_c = s->block_index[2] - 2 * wrap + 2;
1822 if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1825 C[0] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap + 2][0];
1826 C[1] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap + 2][1];
1827 if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1828 C[0] = (1 + C[0] + (s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap + 2][0])) >> 1;
1829 C[1] = (1 + C[1] + (s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap + 2][1])) >> 1;
1831 if (s->mb_x == s->mb_width - 1) {
1832 if (!v->is_intra[s->mb_x - s->mb_stride - 1]) {
1835 pos_c = s->block_index[3] - 2 * wrap - 2;
1836 if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1839 C[0] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap - 2][0];
1840 C[1] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap - 2][1];
1841 if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1842 C[0] = (1 + C[0] + s->current_picture.f.motion_val[0][s->block_index[1] - 2 * wrap - 2][0]) >> 1;
1843 C[1] = (1 + C[1] + s->current_picture.f.motion_val[0][s->block_index[1] - 2 * wrap - 2][1]) >> 1;
1852 pos_b = s->block_index[1];
1854 B[0] = s->current_picture.f.motion_val[0][pos_b][0];
1855 B[1] = s->current_picture.f.motion_val[0][pos_b][1];
1856 pos_c = s->block_index[0];
1858 C[0] = s->current_picture.f.motion_val[0][pos_c][0];
1859 C[1] = s->current_picture.f.motion_val[0][pos_c][1];
1862 total_valid = a_valid + b_valid + c_valid;
1863 // check if predictor A is out of bounds
1864 if (!s->mb_x && !(n == 1 || n == 3)) {
1867 // check if predictor B is out of bounds
1868 if ((s->first_slice_line && v->blk_mv_type[xy]) || (s->first_slice_line && !(n & 2))) {
1869 B[0] = B[1] = C[0] = C[1] = 0;
1871 if (!v->blk_mv_type[xy]) {
1872 if (s->mb_width == 1) {
1876 if (total_valid >= 2) {
1877 px = mid_pred(A[0], B[0], C[0]);
1878 py = mid_pred(A[1], B[1], C[1]);
1879 } else if (total_valid) {
1880 if (a_valid) { px = A[0]; py = A[1]; }
1881 if (b_valid) { px = B[0]; py = B[1]; }
1882 if (c_valid) { px = C[0]; py = C[1]; }
1888 field_a = (A[1] & 4) ? 1 : 0;
1892 field_b = (B[1] & 4) ? 1 : 0;
1896 field_c = (C[1] & 4) ? 1 : 0;
1900 num_oppfield = field_a + field_b + field_c;
1901 num_samefield = total_valid - num_oppfield;
1902 if (total_valid == 3) {
1903 if ((num_samefield == 3) || (num_oppfield == 3)) {
1904 px = mid_pred(A[0], B[0], C[0]);
1905 py = mid_pred(A[1], B[1], C[1]);
1906 } else if (num_samefield >= num_oppfield) {
1907 /* take one MV from same field set depending on priority
1908 the check for B may not be necessary */
1909 px = !field_a ? A[0] : B[0];
1910 py = !field_a ? A[1] : B[1];
1912 px = field_a ? A[0] : B[0];
1913 py = field_a ? A[1] : B[1];
1915 } else if (total_valid == 2) {
1916 if (num_samefield >= num_oppfield) {
1917 if (!field_a && a_valid) {
1920 } else if (!field_b && b_valid) {
1923 } else if (c_valid) {
1928 if (field_a && a_valid) {
1931 } else if (field_b && b_valid) {
1934 } else if (c_valid) {
1939 } else if (total_valid == 1) {
1940 px = (a_valid) ? A[0] : ((b_valid) ? B[0] : C[0]);
1941 py = (a_valid) ? A[1] : ((b_valid) ? B[1] : C[1]);
1946 /* store MV using signed modulus of MV range defined in 4.11 */
1947 s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1948 s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
1949 if (mvn == 1) { /* duplicate motion data for 1-MV block */
1950 s->current_picture.f.motion_val[0][xy + 1 ][0] = s->current_picture.f.motion_val[0][xy][0];
1951 s->current_picture.f.motion_val[0][xy + 1 ][1] = s->current_picture.f.motion_val[0][xy][1];
1952 s->current_picture.f.motion_val[0][xy + wrap ][0] = s->current_picture.f.motion_val[0][xy][0];
1953 s->current_picture.f.motion_val[0][xy + wrap ][1] = s->current_picture.f.motion_val[0][xy][1];
1954 s->current_picture.f.motion_val[0][xy + wrap + 1][0] = s->current_picture.f.motion_val[0][xy][0];
1955 s->current_picture.f.motion_val[0][xy + wrap + 1][1] = s->current_picture.f.motion_val[0][xy][1];
1956 } else if (mvn == 2) { /* duplicate motion data for 2-Field MV block */
1957 s->current_picture.f.motion_val[0][xy + 1][0] = s->current_picture.f.motion_val[0][xy][0];
1958 s->current_picture.f.motion_val[0][xy + 1][1] = s->current_picture.f.motion_val[0][xy][1];
1959 s->mv[0][n + 1][0] = s->mv[0][n][0];
1960 s->mv[0][n + 1][1] = s->mv[0][n][1];
1964 /** Motion compensation for direct or interpolated blocks in B-frames
1966 static void vc1_interp_mc(VC1Context *v)
1968 MpegEncContext *s = &v->s;
1969 DSPContext *dsp = &v->s.dsp;
1970 uint8_t *srcY, *srcU, *srcV;
1971 int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
1973 int v_edge_pos = s->v_edge_pos >> v->field_mode;
1975 if (!v->field_mode && !v->s.next_picture.f.data[0])
1978 mx = s->mv[1][0][0];
1979 my = s->mv[1][0][1];
1980 uvmx = (mx + ((mx & 3) == 3)) >> 1;
1981 uvmy = (my + ((my & 3) == 3)) >> 1;
1982 if (v->field_mode) {
1983 if (v->cur_field_type != v->ref_field_type[1])
1984 my = my - 2 + 4 * v->cur_field_type;
1985 uvmy = uvmy - 2 + 4 * v->cur_field_type;
1988 uvmx = uvmx + ((uvmx < 0) ? -(uvmx & 1) : (uvmx & 1));
1989 uvmy = uvmy + ((uvmy < 0) ? -(uvmy & 1) : (uvmy & 1));
1991 srcY = s->next_picture.f.data[0];
1992 srcU = s->next_picture.f.data[1];
1993 srcV = s->next_picture.f.data[2];
1995 src_x = s->mb_x * 16 + (mx >> 2);
1996 src_y = s->mb_y * 16 + (my >> 2);
1997 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
1998 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
2000 if (v->profile != PROFILE_ADVANCED) {
2001 src_x = av_clip( src_x, -16, s->mb_width * 16);
2002 src_y = av_clip( src_y, -16, s->mb_height * 16);
2003 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
2004 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
2006 src_x = av_clip( src_x, -17, s->avctx->coded_width);
2007 src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
2008 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
2009 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
2012 srcY += src_y * s->linesize + src_x;
2013 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
2014 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
2016 if (v->field_mode && v->ref_field_type[1]) {
2017 srcY += s->current_picture_ptr->f.linesize[0];
2018 srcU += s->current_picture_ptr->f.linesize[1];
2019 srcV += s->current_picture_ptr->f.linesize[2];
2022 /* for grayscale we should not try to read from unknown area */
2023 if (s->flags & CODEC_FLAG_GRAY) {
2024 srcU = s->edge_emu_buffer + 18 * s->linesize;
2025 srcV = s->edge_emu_buffer + 18 * s->linesize;
2029 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx & 3) - 16 - s->mspel * 3
2030 || (unsigned)(src_y - s->mspel) > v_edge_pos - (my & 3) - 16 - s->mspel * 3) {
2031 uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
2033 srcY -= s->mspel * (1 + s->linesize);
2034 s->dsp.emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize,
2035 17 + s->mspel * 2, 17 + s->mspel * 2,
2036 src_x - s->mspel, src_y - s->mspel,
2037 s->h_edge_pos, v_edge_pos);
2038 srcY = s->edge_emu_buffer;
2039 s->dsp.emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8 + 1, 8 + 1,
2040 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
2041 s->dsp.emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8 + 1, 8 + 1,
2042 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
2045 /* if we deal with range reduction we need to scale source blocks */
2046 if (v->rangeredfrm) {
2048 uint8_t *src, *src2;
2051 for (j = 0; j < 17 + s->mspel * 2; j++) {
2052 for (i = 0; i < 17 + s->mspel * 2; i++)
2053 src[i] = ((src[i] - 128) >> 1) + 128;
2058 for (j = 0; j < 9; j++) {
2059 for (i = 0; i < 9; i++) {
2060 src[i] = ((src[i] - 128) >> 1) + 128;
2061 src2[i] = ((src2[i] - 128) >> 1) + 128;
2063 src += s->uvlinesize;
2064 src2 += s->uvlinesize;
2067 srcY += s->mspel * (1 + s->linesize);
2070 if (v->field_mode && v->cur_field_type) {
2071 off = s->current_picture_ptr->f.linesize[0];
2072 off_uv = s->current_picture_ptr->f.linesize[1];
2079 dxy = ((my & 3) << 2) | (mx & 3);
2080 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off , srcY , s->linesize, v->rnd);
2081 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8, srcY + 8, s->linesize, v->rnd);
2082 srcY += s->linesize * 8;
2083 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize , srcY , s->linesize, v->rnd);
2084 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
2086 dxy = (my & 2) | ((mx & 2) >> 1);
2089 dsp->avg_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
2091 dsp->avg_no_rnd_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
2094 if (s->flags & CODEC_FLAG_GRAY) return;
2095 /* Chroma MC always uses qpel blilinear */
2096 uvmx = (uvmx & 3) << 1;
2097 uvmy = (uvmy & 3) << 1;
2099 dsp->avg_h264_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
2100 dsp->avg_h264_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
2102 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
2103 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
2107 static av_always_inline int scale_mv(int value, int bfrac, int inv, int qs)
2111 #if B_FRACTION_DEN==256
2115 return 2 * ((value * n + 255) >> 9);
2116 return (value * n + 128) >> 8;
2119 n -= B_FRACTION_DEN;
2121 return 2 * ((value * n + B_FRACTION_DEN - 1) / (2 * B_FRACTION_DEN));
2122 return (value * n + B_FRACTION_DEN/2) / B_FRACTION_DEN;
2126 static av_always_inline int scale_mv_intfi(int value, int bfrac, int inv,
2127 int qs, int qs_last)
2135 return (value * n + 255) >> 9;
2137 return (value * n + 128) >> 8;
2140 /** Reconstruct motion vector for B-frame and do motion compensation
2142 static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2],
2143 int direct, int mode)
2146 v->mv_mode2 = v->mv_mode;
2147 v->mv_mode = MV_PMODE_INTENSITY_COMP;
2153 v->mv_mode = v->mv_mode2;
2156 if (mode == BMV_TYPE_INTERPOLATED) {
2160 v->mv_mode = v->mv_mode2;
2164 if (v->use_ic && (mode == BMV_TYPE_BACKWARD))
2165 v->mv_mode = v->mv_mode2;
2166 vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD));
2168 v->mv_mode = v->mv_mode2;
2171 static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2],
2172 int direct, int mvtype)
2174 MpegEncContext *s = &v->s;
2175 int xy, wrap, off = 0;
2180 const uint8_t *is_intra = v->mb_type[0];
2184 /* scale MV difference to be quad-pel */
2185 dmv_x[0] <<= 1 - s->quarter_sample;
2186 dmv_y[0] <<= 1 - s->quarter_sample;
2187 dmv_x[1] <<= 1 - s->quarter_sample;
2188 dmv_y[1] <<= 1 - s->quarter_sample;
2190 wrap = s->b8_stride;
2191 xy = s->block_index[0];
2194 s->current_picture.f.motion_val[0][xy + v->blocks_off][0] =
2195 s->current_picture.f.motion_val[0][xy + v->blocks_off][1] =
2196 s->current_picture.f.motion_val[1][xy + v->blocks_off][0] =
2197 s->current_picture.f.motion_val[1][xy + v->blocks_off][1] = 0;
2200 if (!v->field_mode) {
2201 s->mv[0][0][0] = scale_mv(s->next_picture.f.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
2202 s->mv[0][0][1] = scale_mv(s->next_picture.f.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
2203 s->mv[1][0][0] = scale_mv(s->next_picture.f.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
2204 s->mv[1][0][1] = scale_mv(s->next_picture.f.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
2206 /* Pullback predicted motion vectors as specified in 8.4.5.4 */
2207 s->mv[0][0][0] = av_clip(s->mv[0][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2208 s->mv[0][0][1] = av_clip(s->mv[0][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2209 s->mv[1][0][0] = av_clip(s->mv[1][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2210 s->mv[1][0][1] = av_clip(s->mv[1][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2213 s->current_picture.f.motion_val[0][xy + v->blocks_off][0] = s->mv[0][0][0];
2214 s->current_picture.f.motion_val[0][xy + v->blocks_off][1] = s->mv[0][0][1];
2215 s->current_picture.f.motion_val[1][xy + v->blocks_off][0] = s->mv[1][0][0];
2216 s->current_picture.f.motion_val[1][xy + v->blocks_off][1] = s->mv[1][0][1];
2220 if ((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2221 C = s->current_picture.f.motion_val[0][xy - 2];
2222 A = s->current_picture.f.motion_val[0][xy - wrap * 2];
2223 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2224 B = s->current_picture.f.motion_val[0][xy - wrap * 2 + off];
2226 if (!s->mb_x) C[0] = C[1] = 0;
2227 if (!s->first_slice_line) { // predictor A is not out of bounds
2228 if (s->mb_width == 1) {
2232 px = mid_pred(A[0], B[0], C[0]);
2233 py = mid_pred(A[1], B[1], C[1]);
2235 } else if (s->mb_x) { // predictor C is not out of bounds
2241 /* Pullback MV as specified in 8.3.5.3.4 */
2244 if (v->profile < PROFILE_ADVANCED) {
2245 qx = (s->mb_x << 5);
2246 qy = (s->mb_y << 5);
2247 X = (s->mb_width << 5) - 4;
2248 Y = (s->mb_height << 5) - 4;
2249 if (qx + px < -28) px = -28 - qx;
2250 if (qy + py < -28) py = -28 - qy;
2251 if (qx + px > X) px = X - qx;
2252 if (qy + py > Y) py = Y - qy;
2254 qx = (s->mb_x << 6);
2255 qy = (s->mb_y << 6);
2256 X = (s->mb_width << 6) - 4;
2257 Y = (s->mb_height << 6) - 4;
2258 if (qx + px < -60) px = -60 - qx;
2259 if (qy + py < -60) py = -60 - qy;
2260 if (qx + px > X) px = X - qx;
2261 if (qy + py > Y) py = Y - qy;
2264 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2265 if (0 && !s->first_slice_line && s->mb_x) {
2266 if (is_intra[xy - wrap])
2267 sum = FFABS(px) + FFABS(py);
2269 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2271 if (get_bits1(&s->gb)) {
2279 if (is_intra[xy - 2])
2280 sum = FFABS(px) + FFABS(py);
2282 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2284 if (get_bits1(&s->gb)) {
2294 /* store MV using signed modulus of MV range defined in 4.11 */
2295 s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x;
2296 s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
2298 if ((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2299 C = s->current_picture.f.motion_val[1][xy - 2];
2300 A = s->current_picture.f.motion_val[1][xy - wrap * 2];
2301 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2302 B = s->current_picture.f.motion_val[1][xy - wrap * 2 + off];
2306 if (!s->first_slice_line) { // predictor A is not out of bounds
2307 if (s->mb_width == 1) {
2311 px = mid_pred(A[0], B[0], C[0]);
2312 py = mid_pred(A[1], B[1], C[1]);
2314 } else if (s->mb_x) { // predictor C is not out of bounds
2320 /* Pullback MV as specified in 8.3.5.3.4 */
2323 if (v->profile < PROFILE_ADVANCED) {
2324 qx = (s->mb_x << 5);
2325 qy = (s->mb_y << 5);
2326 X = (s->mb_width << 5) - 4;
2327 Y = (s->mb_height << 5) - 4;
2328 if (qx + px < -28) px = -28 - qx;
2329 if (qy + py < -28) py = -28 - qy;
2330 if (qx + px > X) px = X - qx;
2331 if (qy + py > Y) py = Y - qy;
2333 qx = (s->mb_x << 6);
2334 qy = (s->mb_y << 6);
2335 X = (s->mb_width << 6) - 4;
2336 Y = (s->mb_height << 6) - 4;
2337 if (qx + px < -60) px = -60 - qx;
2338 if (qy + py < -60) py = -60 - qy;
2339 if (qx + px > X) px = X - qx;
2340 if (qy + py > Y) py = Y - qy;
2343 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2344 if (0 && !s->first_slice_line && s->mb_x) {
2345 if (is_intra[xy - wrap])
2346 sum = FFABS(px) + FFABS(py);
2348 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2350 if (get_bits1(&s->gb)) {
2358 if (is_intra[xy - 2])
2359 sum = FFABS(px) + FFABS(py);
2361 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2363 if (get_bits1(&s->gb)) {
2373 /* store MV using signed modulus of MV range defined in 4.11 */
2375 s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
2376 s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
2378 s->current_picture.f.motion_val[0][xy][0] = s->mv[0][0][0];
2379 s->current_picture.f.motion_val[0][xy][1] = s->mv[0][0][1];
2380 s->current_picture.f.motion_val[1][xy][0] = s->mv[1][0][0];
2381 s->current_picture.f.motion_val[1][xy][1] = s->mv[1][0][1];
2384 static inline void vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dmv_y, int mv1, int *pred_flag)
2386 int dir = (v->bmvtype == BMV_TYPE_BACKWARD) ? 1 : 0;
2387 MpegEncContext *s = &v->s;
2388 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2390 if (v->bmvtype == BMV_TYPE_DIRECT) {
2391 int total_opp, k, f;
2392 if (s->next_picture.f.mb_type[mb_pos + v->mb_off] != MB_TYPE_INTRA) {
2393 s->mv[0][0][0] = scale_mv_intfi(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0],
2394 v->bfraction, 0, s->quarter_sample, v->qs_last);
2395 s->mv[0][0][1] = scale_mv_intfi(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1],
2396 v->bfraction, 0, s->quarter_sample, v->qs_last);
2397 s->mv[1][0][0] = scale_mv_intfi(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0],
2398 v->bfraction, 1, s->quarter_sample, v->qs_last);
2399 s->mv[1][0][1] = scale_mv_intfi(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1],
2400 v->bfraction, 1, s->quarter_sample, v->qs_last);
2402 total_opp = v->mv_f_next[0][s->block_index[0] + v->blocks_off]
2403 + v->mv_f_next[0][s->block_index[1] + v->blocks_off]
2404 + v->mv_f_next[0][s->block_index[2] + v->blocks_off]
2405 + v->mv_f_next[0][s->block_index[3] + v->blocks_off];
2406 f = (total_opp > 2) ? 1 : 0;
2408 s->mv[0][0][0] = s->mv[0][0][1] = 0;
2409 s->mv[1][0][0] = s->mv[1][0][1] = 0;
2412 v->ref_field_type[0] = v->ref_field_type[1] = v->cur_field_type ^ f;
2413 for (k = 0; k < 4; k++) {
2414 s->current_picture.f.motion_val[0][s->block_index[k] + v->blocks_off][0] = s->mv[0][0][0];
2415 s->current_picture.f.motion_val[0][s->block_index[k] + v->blocks_off][1] = s->mv[0][0][1];
2416 s->current_picture.f.motion_val[1][s->block_index[k] + v->blocks_off][0] = s->mv[1][0][0];
2417 s->current_picture.f.motion_val[1][s->block_index[k] + v->blocks_off][1] = s->mv[1][0][1];
2418 v->mv_f[0][s->block_index[k] + v->blocks_off] = f;
2419 v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
2423 if (v->bmvtype == BMV_TYPE_INTERPOLATED) {
2424 vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2425 vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2428 if (dir) { // backward
2429 vc1_pred_mv(v, n, dmv_x[1], dmv_y[1], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2430 if (n == 3 || mv1) {
2431 vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
2434 vc1_pred_mv(v, n, dmv_x[0], dmv_y[0], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2435 if (n == 3 || mv1) {
2436 vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], 0, 1);
2441 /** Get predicted DC value for I-frames only
2442 * prediction dir: left=0, top=1
2443 * @param s MpegEncContext
2444 * @param overlap flag indicating that overlap filtering is used
2445 * @param pq integer part of picture quantizer
2446 * @param[in] n block index in the current MB
2447 * @param dc_val_ptr Pointer to DC predictor
2448 * @param dir_ptr Prediction direction for use in AC prediction
2450 static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2451 int16_t **dc_val_ptr, int *dir_ptr)
2453 int a, b, c, wrap, pred, scale;
2455 static const uint16_t dcpred[32] = {
2456 -1, 1024, 512, 341, 256, 205, 171, 146, 128,
2457 114, 102, 93, 85, 79, 73, 68, 64,
2458 60, 57, 54, 51, 49, 47, 45, 43,
2459 41, 39, 38, 37, 35, 34, 33
2462 /* find prediction - wmv3_dc_scale always used here in fact */
2463 if (n < 4) scale = s->y_dc_scale;
2464 else scale = s->c_dc_scale;
2466 wrap = s->block_wrap[n];
2467 dc_val = s->dc_val[0] + s->block_index[n];
2473 b = dc_val[ - 1 - wrap];
2474 a = dc_val[ - wrap];
2476 if (pq < 9 || !overlap) {
2477 /* Set outer values */
2478 if (s->first_slice_line && (n != 2 && n != 3))
2479 b = a = dcpred[scale];
2480 if (s->mb_x == 0 && (n != 1 && n != 3))
2481 b = c = dcpred[scale];
2483 /* Set outer values */
2484 if (s->first_slice_line && (n != 2 && n != 3))
2486 if (s->mb_x == 0 && (n != 1 && n != 3))
2490 if (abs(a - b) <= abs(b - c)) {
2492 *dir_ptr = 1; // left
2495 *dir_ptr = 0; // top
2498 /* update predictor */
2499 *dc_val_ptr = &dc_val[0];
2504 /** Get predicted DC value
2505 * prediction dir: left=0, top=1
2506 * @param s MpegEncContext
2507 * @param overlap flag indicating that overlap filtering is used
2508 * @param pq integer part of picture quantizer
2509 * @param[in] n block index in the current MB
2510 * @param a_avail flag indicating top block availability
2511 * @param c_avail flag indicating left block availability
2512 * @param dc_val_ptr Pointer to DC predictor
2513 * @param dir_ptr Prediction direction for use in AC prediction
2515 static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2516 int a_avail, int c_avail,
2517 int16_t **dc_val_ptr, int *dir_ptr)
2519 int a, b, c, wrap, pred;
2521 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2524 wrap = s->block_wrap[n];
2525 dc_val = s->dc_val[0] + s->block_index[n];
2531 b = dc_val[ - 1 - wrap];
2532 a = dc_val[ - wrap];
2533 /* scale predictors if needed */
2534 q1 = s->current_picture.f.qscale_table[mb_pos];
2535 if (c_avail && (n != 1 && n != 3)) {
2536 q2 = s->current_picture.f.qscale_table[mb_pos - 1];
2538 c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2540 if (a_avail && (n != 2 && n != 3)) {
2541 q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
2543 a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2545 if (a_avail && c_avail && (n != 3)) {
2550 off -= s->mb_stride;
2551 q2 = s->current_picture.f.qscale_table[off];
2553 b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2556 if (a_avail && c_avail) {
2557 if (abs(a - b) <= abs(b - c)) {
2559 *dir_ptr = 1; // left
2562 *dir_ptr = 0; // top
2564 } else if (a_avail) {
2566 *dir_ptr = 0; // top
2567 } else if (c_avail) {
2569 *dir_ptr = 1; // left
2572 *dir_ptr = 1; // left
2575 /* update predictor */
2576 *dc_val_ptr = &dc_val[0];
2580 /** @} */ // Block group
2583 * @name VC1 Macroblock-level functions in Simple/Main Profiles
2584 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
2588 static inline int vc1_coded_block_pred(MpegEncContext * s, int n,
2589 uint8_t **coded_block_ptr)
2591 int xy, wrap, pred, a, b, c;
2593 xy = s->block_index[n];
2594 wrap = s->b8_stride;
2599 a = s->coded_block[xy - 1 ];
2600 b = s->coded_block[xy - 1 - wrap];
2601 c = s->coded_block[xy - wrap];
2610 *coded_block_ptr = &s->coded_block[xy];
2616 * Decode one AC coefficient
2617 * @param v The VC1 context
2618 * @param last Last coefficient
2619 * @param skip How much zero coefficients to skip
2620 * @param value Decoded AC coefficient value
2621 * @param codingset set of VLC to decode data
2624 static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip,
2625 int *value, int codingset)
2627 GetBitContext *gb = &v->s.gb;
2628 int index, escape, run = 0, level = 0, lst = 0;
2630 index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2631 if (index != vc1_ac_sizes[codingset] - 1) {
2632 run = vc1_index_decode_table[codingset][index][0];
2633 level = vc1_index_decode_table[codingset][index][1];
2634 lst = index >= vc1_last_decode_table[codingset] || get_bits_left(gb) < 0;
2638 escape = decode210(gb);
2640 index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2641 run = vc1_index_decode_table[codingset][index][0];
2642 level = vc1_index_decode_table[codingset][index][1];
2643 lst = index >= vc1_last_decode_table[codingset];
2646 level += vc1_last_delta_level_table[codingset][run];
2648 level += vc1_delta_level_table[codingset][run];
2651 run += vc1_last_delta_run_table[codingset][level] + 1;
2653 run += vc1_delta_run_table[codingset][level] + 1;
2659 lst = get_bits1(gb);
2660 if (v->s.esc3_level_length == 0) {
2661 if (v->pq < 8 || v->dquantfrm) { // table 59
2662 v->s.esc3_level_length = get_bits(gb, 3);
2663 if (!v->s.esc3_level_length)
2664 v->s.esc3_level_length = get_bits(gb, 2) + 8;
2665 } else { // table 60
2666 v->s.esc3_level_length = get_unary(gb, 1, 6) + 2;
2668 v->s.esc3_run_length = 3 + get_bits(gb, 2);
2670 run = get_bits(gb, v->s.esc3_run_length);
2671 sign = get_bits1(gb);
2672 level = get_bits(gb, v->s.esc3_level_length);
2683 /** Decode intra block in intra frames - should be faster than decode_intra_block
2684 * @param v VC1Context
2685 * @param block block to decode
2686 * @param[in] n subblock index
2687 * @param coded are AC coeffs present or not
2688 * @param codingset set of VLC to decode data
2690 static int vc1_decode_i_block(VC1Context *v, DCTELEM block[64], int n,
2691 int coded, int codingset)
2693 GetBitContext *gb = &v->s.gb;
2694 MpegEncContext *s = &v->s;
2695 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2698 int16_t *ac_val, *ac_val2;
2701 /* Get DC differential */
2703 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2705 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2708 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2712 if (dcdiff == 119 /* ESC index value */) {
2713 /* TODO: Optimize */
2714 if (v->pq == 1) dcdiff = get_bits(gb, 10);
2715 else if (v->pq == 2) dcdiff = get_bits(gb, 9);
2716 else dcdiff = get_bits(gb, 8);
2719 dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2720 else if (v->pq == 2)
2721 dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2728 dcdiff += vc1_i_pred_dc(&v->s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir);
2731 /* Store the quantized DC coeff, used for prediction */
2733 block[0] = dcdiff * s->y_dc_scale;
2735 block[0] = dcdiff * s->c_dc_scale;
2746 int last = 0, skip, value;
2747 const uint8_t *zz_table;
2751 scale = v->pq * 2 + v->halfpq;
2755 zz_table = v->zz_8x8[2];
2757 zz_table = v->zz_8x8[3];
2759 zz_table = v->zz_8x8[1];
2761 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2763 if (dc_pred_dir) // left
2766 ac_val -= 16 * s->block_wrap[n];
2769 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2773 block[zz_table[i++]] = value;
2776 /* apply AC prediction if needed */
2778 if (dc_pred_dir) { // left
2779 for (k = 1; k < 8; k++)
2780 block[k << v->left_blk_sh] += ac_val[k];
2782 for (k = 1; k < 8; k++)
2783 block[k << v->top_blk_sh] += ac_val[k + 8];
2786 /* save AC coeffs for further prediction */
2787 for (k = 1; k < 8; k++) {
2788 ac_val2[k] = block[k << v->left_blk_sh];
2789 ac_val2[k + 8] = block[k << v->top_blk_sh];
2792 /* scale AC coeffs */
2793 for (k = 1; k < 64; k++)
2797 block[k] += (block[k] < 0) ? -v->pq : v->pq;
2800 if (s->ac_pred) i = 63;
2806 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2810 scale = v->pq * 2 + v->halfpq;
2811 memset(ac_val2, 0, 16 * 2);
2812 if (dc_pred_dir) { // left
2815 memcpy(ac_val2, ac_val, 8 * 2);
2817 ac_val -= 16 * s->block_wrap[n];
2819 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2822 /* apply AC prediction if needed */
2824 if (dc_pred_dir) { //left
2825 for (k = 1; k < 8; k++) {
2826 block[k << v->left_blk_sh] = ac_val[k] * scale;
2827 if (!v->pquantizer && block[k << v->left_blk_sh])
2828 block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -v->pq : v->pq;
2831 for (k = 1; k < 8; k++) {
2832 block[k << v->top_blk_sh] = ac_val[k + 8] * scale;
2833 if (!v->pquantizer && block[k << v->top_blk_sh])
2834 block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -v->pq : v->pq;
2840 s->block_last_index[n] = i;
2845 /** Decode intra block in intra frames - should be faster than decode_intra_block
2846 * @param v VC1Context
2847 * @param block block to decode
2848 * @param[in] n subblock number
2849 * @param coded are AC coeffs present or not
2850 * @param codingset set of VLC to decode data
2851 * @param mquant quantizer value for this macroblock
2853 static int vc1_decode_i_block_adv(VC1Context *v, DCTELEM block[64], int n,
2854 int coded, int codingset, int mquant)
2856 GetBitContext *gb = &v->s.gb;
2857 MpegEncContext *s = &v->s;
2858 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2861 int16_t *ac_val, *ac_val2;
2863 int a_avail = v->a_avail, c_avail = v->c_avail;
2864 int use_pred = s->ac_pred;
2867 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2869 /* Get DC differential */
2871 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2873 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2876 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2880 if (dcdiff == 119 /* ESC index value */) {
2881 /* TODO: Optimize */
2882 if (mquant == 1) dcdiff = get_bits(gb, 10);
2883 else if (mquant == 2) dcdiff = get_bits(gb, 9);
2884 else dcdiff = get_bits(gb, 8);
2887 dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2888 else if (mquant == 2)
2889 dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2896 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir);
2899 /* Store the quantized DC coeff, used for prediction */
2901 block[0] = dcdiff * s->y_dc_scale;
2903 block[0] = dcdiff * s->c_dc_scale;
2909 /* check if AC is needed at all */
2910 if (!a_avail && !c_avail)
2912 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2915 scale = mquant * 2 + ((mquant == v->pq) ? v->halfpq : 0);
2917 if (dc_pred_dir) // left
2920 ac_val -= 16 * s->block_wrap[n];
2922 q1 = s->current_picture.f.qscale_table[mb_pos];
2923 if ( dc_pred_dir && c_avail && mb_pos)
2924 q2 = s->current_picture.f.qscale_table[mb_pos - 1];
2925 if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
2926 q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
2927 if ( dc_pred_dir && n == 1)
2929 if (!dc_pred_dir && n == 2)
2935 int last = 0, skip, value;
2936 const uint8_t *zz_table;
2940 if (!use_pred && v->fcm == 1) {
2941 zz_table = v->zzi_8x8;
2943 if (!dc_pred_dir) // top
2944 zz_table = v->zz_8x8[2];
2946 zz_table = v->zz_8x8[3];
2950 zz_table = v->zz_8x8[1];
2952 zz_table = v->zzi_8x8;
2956 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2960 block[zz_table[i++]] = value;
2963 /* apply AC prediction if needed */
2965 /* scale predictors if needed*/
2966 if (q2 && q1 != q2) {
2967 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2968 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2970 if (dc_pred_dir) { // left
2971 for (k = 1; k < 8; k++)
2972 block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2974 for (k = 1; k < 8; k++)
2975 block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2978 if (dc_pred_dir) { //left
2979 for (k = 1; k < 8; k++)
2980 block[k << v->left_blk_sh] += ac_val[k];
2982 for (k = 1; k < 8; k++)
2983 block[k << v->top_blk_sh] += ac_val[k + 8];
2987 /* save AC coeffs for further prediction */
2988 for (k = 1; k < 8; k++) {
2989 ac_val2[k ] = block[k << v->left_blk_sh];
2990 ac_val2[k + 8] = block[k << v->top_blk_sh];
2993 /* scale AC coeffs */
2994 for (k = 1; k < 64; k++)
2998 block[k] += (block[k] < 0) ? -mquant : mquant;
3001 if (use_pred) i = 63;
3002 } else { // no AC coeffs
3005 memset(ac_val2, 0, 16 * 2);
3006 if (dc_pred_dir) { // left
3008 memcpy(ac_val2, ac_val, 8 * 2);
3009 if (q2 && q1 != q2) {
3010 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3011 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3012 for (k = 1; k < 8; k++)
3013 ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3018 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
3019 if (q2 && q1 != q2) {
3020 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3021 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3022 for (k = 1; k < 8; k++)
3023 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3028 /* apply AC prediction if needed */
3030 if (dc_pred_dir) { // left
3031 for (k = 1; k < 8; k++) {
3032 block[k << v->left_blk_sh] = ac_val2[k] * scale;
3033 if (!v->pquantizer && block[k << v->left_blk_sh])
3034 block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
3037 for (k = 1; k < 8; k++) {
3038 block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
3039 if (!v->pquantizer && block[k << v->top_blk_sh])
3040 block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
3046 s->block_last_index[n] = i;
3051 /** Decode intra block in inter frames - more generic version than vc1_decode_i_block
3052 * @param v VC1Context
3053 * @param block block to decode
3054 * @param[in] n subblock index
3055 * @param coded are AC coeffs present or not
3056 * @param mquant block quantizer
3057 * @param codingset set of VLC to decode data
3059 static int vc1_decode_intra_block(VC1Context *v, DCTELEM block[64], int n,
3060 int coded, int mquant, int codingset)
3062 GetBitContext *gb = &v->s.gb;
3063 MpegEncContext *s = &v->s;
3064 int dc_pred_dir = 0; /* Direction of the DC prediction used */
3067 int16_t *ac_val, *ac_val2;
3069 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3070 int a_avail = v->a_avail, c_avail = v->c_avail;
3071 int use_pred = s->ac_pred;
3075 s->dsp.clear_block(block);
3077 /* XXX: Guard against dumb values of mquant */
3078 mquant = (mquant < 1) ? 0 : ((mquant > 31) ? 31 : mquant);
3080 /* Set DC scale - y and c use the same */
3081 s->y_dc_scale = s->y_dc_scale_table[mquant];
3082 s->c_dc_scale = s->c_dc_scale_table[mquant];
3084 /* Get DC differential */
3086 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
3088 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
3091 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
3095 if (dcdiff == 119 /* ESC index value */) {
3096 /* TODO: Optimize */
3097 if (mquant == 1) dcdiff = get_bits(gb, 10);
3098 else if (mquant == 2) dcdiff = get_bits(gb, 9);
3099 else dcdiff = get_bits(gb, 8);
3102 dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
3103 else if (mquant == 2)
3104 dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
3111 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir);
3114 /* Store the quantized DC coeff, used for prediction */
3117 block[0] = dcdiff * s->y_dc_scale;
3119 block[0] = dcdiff * s->c_dc_scale;
3125 /* check if AC is needed at all and adjust direction if needed */
3126 if (!a_avail) dc_pred_dir = 1;
3127 if (!c_avail) dc_pred_dir = 0;
3128 if (!a_avail && !c_avail) use_pred = 0;
3129 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
3132 scale = mquant * 2 + v->halfpq;
3134 if (dc_pred_dir) //left
3137 ac_val -= 16 * s->block_wrap[n];
3139 q1 = s->current_picture.f.qscale_table[mb_pos];
3140 if (dc_pred_dir && c_avail && mb_pos)
3141 q2 = s->current_picture.f.qscale_table[mb_pos - 1];
3142 if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
3143 q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
3144 if ( dc_pred_dir && n == 1)
3146 if (!dc_pred_dir && n == 2)
3148 if (n == 3) q2 = q1;
3151 int last = 0, skip, value;
3155 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
3160 block[v->zz_8x8[0][i++]] = value;
3162 if (use_pred && (v->fcm == 1)) {
3163 if (!dc_pred_dir) // top
3164 block[v->zz_8x8[2][i++]] = value;
3166 block[v->zz_8x8[3][i++]] = value;
3168 block[v->zzi_8x8[i++]] = value;
3173 /* apply AC prediction if needed */
3175 /* scale predictors if needed*/
3176 if (q2 && q1 != q2) {
3177 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3178 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3180 if (dc_pred_dir) { // left
3181 for (k = 1; k < 8; k++)
3182 block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3184 for (k = 1; k < 8; k++)
3185 block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3188 if (dc_pred_dir) { // left
3189 for (k = 1; k < 8; k++)
3190 block[k << v->left_blk_sh] += ac_val[k];
3192 for (k = 1; k < 8; k++)
3193 block[k << v->top_blk_sh] += ac_val[k + 8];
3197 /* save AC coeffs for further prediction */
3198 for (k = 1; k < 8; k++) {
3199 ac_val2[k ] = block[k << v->left_blk_sh];
3200 ac_val2[k + 8] = block[k << v->top_blk_sh];
3203 /* scale AC coeffs */
3204 for (k = 1; k < 64; k++)
3208 block[k] += (block[k] < 0) ? -mquant : mquant;
3211 if (use_pred) i = 63;
3212 } else { // no AC coeffs
3215 memset(ac_val2, 0, 16 * 2);
3216 if (dc_pred_dir) { // left
3218 memcpy(ac_val2, ac_val, 8 * 2);
3219 if (q2 && q1 != q2) {
3220 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3221 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3222 for (k = 1; k < 8; k++)
3223 ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3228 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
3229 if (q2 && q1 != q2) {
3230 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3231 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3232 for (k = 1; k < 8; k++)
3233 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3238 /* apply AC prediction if needed */
3240 if (dc_pred_dir) { // left
3241 for (k = 1; k < 8; k++) {
3242 block[k << v->left_blk_sh] = ac_val2[k] * scale;
3243 if (!v->pquantizer && block[k << v->left_blk_sh])
3244 block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
3247 for (k = 1; k < 8; k++) {
3248 block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
3249 if (!v->pquantizer && block[k << v->top_blk_sh])
3250 block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
3256 s->block_last_index[n] = i;
3263 static int vc1_decode_p_block(VC1Context *v, DCTELEM block[64], int n,
3264 int mquant, int ttmb, int first_block,
3265 uint8_t *dst, int linesize, int skip_block,
3268 MpegEncContext *s = &v->s;
3269 GetBitContext *gb = &s->gb;
3272 int scale, off, idx, last, skip, value;
3273 int ttblk = ttmb & 7;
3276 s->dsp.clear_block(block);
3279 ttblk = ff_vc1_ttblk_to_tt[v->tt_index][get_vlc2(gb, ff_vc1_ttblk_vlc[v->tt_index].table, VC1_TTBLK_VLC_BITS, 1)];
3281 if (ttblk == TT_4X4) {
3282 subblkpat = ~(get_vlc2(gb, ff_vc1_subblkpat_vlc[v->tt_index].table, VC1_SUBBLKPAT_VLC_BITS, 1) + 1);
3284 if ((ttblk != TT_8X8 && ttblk != TT_4X4)
3285 && ((v->ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))
3286 || (!v->res_rtm_flag && !first_block))) {
3287 subblkpat = decode012(gb);
3289 subblkpat ^= 3; // swap decoded pattern bits
3290 if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM)
3292 if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT)
3295 scale = 2 * mquant + ((v->pq == mquant) ? v->halfpq : 0);
3297 // convert transforms like 8X4_TOP to generic TT and SUBBLKPAT
3298 if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) {
3299 subblkpat = 2 - (ttblk == TT_8X4_TOP);
3302 if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) {
3303 subblkpat = 2 - (ttblk == TT_4X8_LEFT);
3312 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3317 idx = v->zz_8x8[0][i++];
3319 idx = v->zzi_8x8[i++];
3320 block[idx] = value * scale;
3322 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3326 v->vc1dsp.vc1_inv_trans_8x8_dc(dst, linesize, block);
3328 v->vc1dsp.vc1_inv_trans_8x8(block);
3329 s->dsp.add_pixels_clamped(block, dst, linesize);
3334 pat = ~subblkpat & 0xF;
3335 for (j = 0; j < 4; j++) {
3336 last = subblkpat & (1 << (3 - j));
3338 off = (j & 1) * 4 + (j & 2) * 16;
3340 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3345 idx = ff_vc1_simple_progressive_4x4_zz[i++];
3347 idx = ff_vc1_adv_interlaced_4x4_zz[i++];
3348 block[idx + off] = value * scale;
3350 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3352 if (!(subblkpat & (1 << (3 - j))) && !skip_block) {
3354 v->vc1dsp.vc1_inv_trans_4x4_dc(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3356 v->vc1dsp.vc1_inv_trans_4x4(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3361 pat = ~((subblkpat & 2) * 6 + (subblkpat & 1) * 3) & 0xF;
3362 for (j = 0; j < 2; j++) {
3363 last = subblkpat & (1 << (1 - j));
3367 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3372 idx = v->zz_8x4[i++] + off;
3374 idx = ff_vc1_adv_interlaced_8x4_zz[i++] + off;
3375 block[idx] = value * scale;
3377 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3379 if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3381 v->vc1dsp.vc1_inv_trans_8x4_dc(dst + j * 4 * linesize, linesize, block + off);
3383 v->vc1dsp.vc1_inv_trans_8x4(dst + j * 4 * linesize, linesize, block + off);
3388 pat = ~(subblkpat * 5) & 0xF;
3389 for (j = 0; j < 2; j++) {
3390 last = subblkpat & (1 << (1 - j));
3394 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3399 idx = v->zz_4x8[i++] + off;
3401 idx = ff_vc1_adv_interlaced_4x8_zz[i++] + off;
3402 block[idx] = value * scale;
3404 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3406 if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3408 v->vc1dsp.vc1_inv_trans_4x8_dc(dst + j * 4, linesize, block + off);
3410 v->vc1dsp.vc1_inv_trans_4x8(dst + j*4, linesize, block + off);
3416 *ttmb_out |= ttblk << (n * 4);
3420 /** @} */ // Macroblock group
3422 static const int size_table [6] = { 0, 2, 3, 4, 5, 8 };
3423 static const int offset_table[6] = { 0, 1, 3, 7, 15, 31 };
3425 static av_always_inline void vc1_apply_p_v_loop_filter(VC1Context *v, int block_num)
3427 MpegEncContext *s = &v->s;
3428 int mb_cbp = v->cbp[s->mb_x - s->mb_stride],
3429 block_cbp = mb_cbp >> (block_num * 4), bottom_cbp,
3430 mb_is_intra = v->is_intra[s->mb_x - s->mb_stride],
3431 block_is_intra = mb_is_intra >> (block_num * 4), bottom_is_intra;
3432 int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3435 if (block_num > 3) {
3436 dst = s->dest[block_num - 3];
3438 dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 8) * linesize;
3440 if (s->mb_y != s->end_mb_y || block_num < 2) {
3444 if (block_num > 3) {
3445 bottom_cbp = v->cbp[s->mb_x] >> (block_num * 4);
3446 bottom_is_intra = v->is_intra[s->mb_x] >> (block_num * 4);
3447 mv = &v->luma_mv[s->mb_x - s->mb_stride];
3448 mv_stride = s->mb_stride;
3450 bottom_cbp = (block_num < 2) ? (mb_cbp >> ((block_num + 2) * 4))
3451 : (v->cbp[s->mb_x] >> ((block_num - 2) * 4));
3452 bottom_is_intra = (block_num < 2) ? (mb_is_intra >> ((block_num + 2) * 4))
3453 : (v->is_intra[s->mb_x] >> ((block_num - 2) * 4));
3454 mv_stride = s->b8_stride;
3455 mv = &s->current_picture.f.motion_val[0][s->block_index[block_num] - 2 * mv_stride];
3458 if (bottom_is_intra & 1 || block_is_intra & 1 ||
3459 mv[0][0] != mv[mv_stride][0] || mv[0][1] != mv[mv_stride][1]) {
3460 v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3462 idx = ((bottom_cbp >> 2) | block_cbp) & 3;
3464 v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3467 v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3469 v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3474 dst -= 4 * linesize;
3475 ttblk = (v->ttblk[s->mb_x - s->mb_stride] >> (block_num * 4)) & 0xF;
3476 if (ttblk == TT_4X4 || ttblk == TT_8X4) {
3477 idx = (block_cbp | (block_cbp >> 2)) & 3;
3479 v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3482 v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3484 v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3489 static av_always_inline void vc1_apply_p_h_loop_filter(VC1Context *v, int block_num)
3491 MpegEncContext *s = &v->s;
3492 int mb_cbp = v->cbp[s->mb_x - 1 - s->mb_stride],
3493 block_cbp = mb_cbp >> (block_num * 4), right_cbp,
3494 mb_is_intra = v->is_intra[s->mb_x - 1 - s->mb_stride],
3495 block_is_intra = mb_is_intra >> (block_num * 4), right_is_intra;
3496 int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3499 if (block_num > 3) {
3500 dst = s->dest[block_num - 3] - 8 * linesize;
3502 dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 16) * linesize - 8;
3505 if (s->mb_x != s->mb_width || !(block_num & 5)) {
3508 if (block_num > 3) {
3509 right_cbp = v->cbp[s->mb_x - s->mb_stride] >> (block_num * 4);
3510 right_is_intra = v->is_intra[s->mb_x - s->mb_stride] >> (block_num * 4);
3511 mv = &v->luma_mv[s->mb_x - s->mb_stride - 1];
3513 right_cbp = (block_num & 1) ? (v->cbp[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3514 : (mb_cbp >> ((block_num + 1) * 4));
3515 right_is_intra = (block_num & 1) ? (v->is_intra[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3516 : (mb_is_intra >> ((block_num + 1) * 4));
3517 mv = &s->current_picture.f.motion_val[0][s->block_index[block_num] - s->b8_stride * 2 - 2];
3519 if (block_is_intra & 1 || right_is_intra & 1 || mv[0][0] != mv[1][0] || mv[0][1] != mv[1][1]) {
3520 v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3522 idx = ((right_cbp >> 1) | block_cbp) & 5; // FIXME check
3524 v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3527 v->vc1dsp.vc1_h_loop_filter4(dst + 4 * linesize, linesize, v->pq);
3529 v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3535 ttblk = (v->ttblk[s->mb_x - s->mb_stride - 1] >> (block_num * 4)) & 0xf;
3536 if (ttblk == TT_4X4 || ttblk == TT_4X8) {
3537 idx = (block_cbp | (block_cbp >> 1)) & 5;
3539 v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3542 v->vc1dsp.vc1_h_loop_filter4(dst + linesize * 4, linesize, v->pq);
3544 v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3549 static void vc1_apply_p_loop_filter(VC1Context *v)
3551 MpegEncContext *s = &v->s;
3554 for (i = 0; i < 6; i++) {
3555 vc1_apply_p_v_loop_filter(v, i);
3558 /* V always preceedes H, therefore we run H one MB before V;
3559 * at the end of a row, we catch up to complete the row */
3561 for (i = 0; i < 6; i++) {
3562 vc1_apply_p_h_loop_filter(v, i);
3564 if (s->mb_x == s->mb_width - 1) {
3566 ff_update_block_index(s);
3567 for (i = 0; i < 6; i++) {
3568 vc1_apply_p_h_loop_filter(v, i);
3574 /** Decode one P-frame MB
3576 static int vc1_decode_p_mb(VC1Context *v)
3578 MpegEncContext *s = &v->s;
3579 GetBitContext *gb = &s->gb;
3581 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3582 int cbp; /* cbp decoding stuff */
3583 int mqdiff, mquant; /* MB quantization */
3584 int ttmb = v->ttfrm; /* MB Transform type */
3586 int mb_has_coeffs = 1; /* last_flag */
3587 int dmv_x, dmv_y; /* Differential MV components */
3588 int index, index1; /* LUT indexes */
3589 int val, sign; /* temp values */
3590 int first_block = 1;
3592 int skipped, fourmv;
3593 int block_cbp = 0, pat, block_tt = 0, block_intra = 0;
3595 mquant = v->pq; /* Loosy initialization */
3597 if (v->mv_type_is_raw)
3598 fourmv = get_bits1(gb);
3600 fourmv = v->mv_type_mb_plane[mb_pos];
3602 skipped = get_bits1(gb);
3604 skipped = v->s.mbskip_table[mb_pos];
3606 if (!fourmv) { /* 1MV mode */
3608 GET_MVDATA(dmv_x, dmv_y);
3611 s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
3612 s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
3614 s->current_picture.f.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;
3615 vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3617 /* FIXME Set DC val for inter block ? */
3618 if (s->mb_intra && !mb_has_coeffs) {
3620 s->ac_pred = get_bits1(gb);
3622 } else if (mb_has_coeffs) {
3624 s->ac_pred = get_bits1(gb);
3625 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3631 s->current_picture.f.qscale_table[mb_pos] = mquant;
3633 if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
3634 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table,
3635 VC1_TTMB_VLC_BITS, 2);
3636 if (!s->mb_intra) vc1_mc_1mv(v, 0);
3638 for (i = 0; i < 6; i++) {
3639 s->dc_val[0][s->block_index[i]] = 0;
3641 val = ((cbp >> (5 - i)) & 1);
3642 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3643 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3645 /* check if prediction blocks A and C are available */
3646 v->a_avail = v->c_avail = 0;
3647 if (i == 2 || i == 3 || !s->first_slice_line)
3648 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3649 if (i == 1 || i == 3 || s->mb_x)
3650 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3652 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3653 (i & 4) ? v->codingset2 : v->codingset);
3654 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3656 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3658 for (j = 0; j < 64; j++)
3659 s->block[i][j] <<= 1;
3660 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3661 if (v->pq >= 9 && v->overlap) {
3663 v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3665 v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3667 block_cbp |= 0xF << (i << 2);
3668 block_intra |= 1 << i;
3670 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block,
3671 s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize,
3672 (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
3673 block_cbp |= pat << (i << 2);
3674 if (!v->ttmbf && ttmb < 8)
3681 for (i = 0; i < 6; i++) {
3682 v->mb_type[0][s->block_index[i]] = 0;
3683 s->dc_val[0][s->block_index[i]] = 0;
3685 s->current_picture.f.mb_type[mb_pos] = MB_TYPE_SKIP;
3686 s->current_picture.f.qscale_table[mb_pos] = 0;
3687 vc1_pred_mv(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3690 } else { // 4MV mode
3691 if (!skipped /* unskipped MB */) {
3692 int intra_count = 0, coded_inter = 0;
3693 int is_intra[6], is_coded[6];
3695 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3696 for (i = 0; i < 6; i++) {
3697 val = ((cbp >> (5 - i)) & 1);
3698 s->dc_val[0][s->block_index[i]] = 0;
3705 GET_MVDATA(dmv_x, dmv_y);
3707 vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3709 vc1_mc_4mv_luma(v, i, 0);
3710 intra_count += s->mb_intra;
3711 is_intra[i] = s->mb_intra;
3712 is_coded[i] = mb_has_coeffs;
3715 is_intra[i] = (intra_count >= 3);
3719 vc1_mc_4mv_chroma(v, 0);
3720 v->mb_type[0][s->block_index[i]] = is_intra[i];
3722 coded_inter = !is_intra[i] & is_coded[i];
3724 // if there are no coded blocks then don't do anything more
3726 if (!intra_count && !coded_inter)
3729 s->current_picture.f.qscale_table[mb_pos] = mquant;
3730 /* test if block is intra and has pred */
3733 for (i = 0; i < 6; i++)
3735 if (((!s->first_slice_line || (i == 2 || i == 3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]])
3736 || ((s->mb_x || (i == 1 || i == 3)) && v->mb_type[0][s->block_index[i] - 1])) {
3742 s->ac_pred = get_bits1(gb);
3746 if (!v->ttmbf && coded_inter)
3747 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3748 for (i = 0; i < 6; i++) {
3750 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3751 s->mb_intra = is_intra[i];
3753 /* check if prediction blocks A and C are available */
3754 v->a_avail = v->c_avail = 0;
3755 if (i == 2 || i == 3 || !s->first_slice_line)
3756 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3757 if (i == 1 || i == 3 || s->mb_x)
3758 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3760 vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant,
3761 (i & 4) ? v->codingset2 : v->codingset);
3762 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3764 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3766 for (j = 0; j < 64; j++)
3767 s->block[i][j] <<= 1;
3768 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off,
3769 (i & 4) ? s->uvlinesize : s->linesize);
3770 if (v->pq >= 9 && v->overlap) {
3772 v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3774 v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3776 block_cbp |= 0xF << (i << 2);
3777 block_intra |= 1 << i;
3778 } else if (is_coded[i]) {
3779 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3780 first_block, s->dest[dst_idx] + off,
3781 (i & 4) ? s->uvlinesize : s->linesize,
3782 (i & 4) && (s->flags & CODEC_FLAG_GRAY),
3784 block_cbp |= pat << (i << 2);
3785 if (!v->ttmbf && ttmb < 8)
3790 } else { // skipped MB
3792 s->current_picture.f.qscale_table[mb_pos] = 0;
3793 for (i = 0; i < 6; i++) {
3794 v->mb_type[0][s->block_index[i]] = 0;
3795 s->dc_val[0][s->block_index[i]] = 0;
3797 for (i = 0; i < 4; i++) {
3798 vc1_pred_mv(v, i, 0, 0, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3799 vc1_mc_4mv_luma(v, i, 0);
3801 vc1_mc_4mv_chroma(v, 0);
3802 s->current_picture.f.qscale_table[mb_pos] = 0;
3806 v->cbp[s->mb_x] = block_cbp;
3807 v->ttblk[s->mb_x] = block_tt;
3808 v->is_intra[s->mb_x] = block_intra;
3813 /* Decode one macroblock in an interlaced frame p picture */
3815 static int vc1_decode_p_mb_intfr(VC1Context *v)
3817 MpegEncContext *s = &v->s;
3818 GetBitContext *gb = &s->gb;
3820 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3821 int cbp = 0; /* cbp decoding stuff */
3822 int mqdiff, mquant; /* MB quantization */
3823 int ttmb = v->ttfrm; /* MB Transform type */
3825 int mb_has_coeffs = 1; /* last_flag */
3826 int dmv_x, dmv_y; /* Differential MV components */
3827 int val; /* temp value */
3828 int first_block = 1;
3830 int skipped, fourmv = 0, twomv = 0;
3831 int block_cbp = 0, pat, block_tt = 0;
3832 int idx_mbmode = 0, mvbp;
3833 int stride_y, fieldtx;
3835 mquant = v->pq; /* Loosy initialization */
3838 skipped = get_bits1(gb);
3840 skipped = v->s.mbskip_table[mb_pos];
3842 if (v->fourmvswitch)
3843 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_4MV_MBMODE_VLC_BITS, 2); // try getting this done
3845 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2); // in a single line
3846 switch (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0]) {
3847 /* store the motion vector type in a flag (useful later) */
3848 case MV_PMODE_INTFR_4MV:
3850 v->blk_mv_type[s->block_index[0]] = 0;
3851 v->blk_mv_type[s->block_index[1]] = 0;
3852 v->blk_mv_type[s->block_index[2]] = 0;
3853 v->blk_mv_type[s->block_index[3]] = 0;
3855 case MV_PMODE_INTFR_4MV_FIELD:
3857 v->blk_mv_type[s->block_index[0]] = 1;
3858 v->blk_mv_type[s->block_index[1]] = 1;
3859 v->blk_mv_type[s->block_index[2]] = 1;
3860 v->blk_mv_type[s->block_index[3]] = 1;
3862 case MV_PMODE_INTFR_2MV_FIELD:
3864 v->blk_mv_type[s->block_index[0]] = 1;
3865 v->blk_mv_type[s->block_index[1]] = 1;
3866 v->blk_mv_type[s->block_index[2]] = 1;
3867 v->blk_mv_type[s->block_index[3]] = 1;
3869 case MV_PMODE_INTFR_1MV:
3870 v->blk_mv_type[s->block_index[0]] = 0;
3871 v->blk_mv_type[s->block_index[1]] = 0;
3872 v->blk_mv_type[s->block_index[2]] = 0;
3873 v->blk_mv_type[s->block_index[3]] = 0;
3876 if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
3877 s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
3878 s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
3879 s->current_picture.f.mb_type[mb_pos] = MB_TYPE_INTRA;
3880 s->mb_intra = v->is_intra[s->mb_x] = 1;
3881 for (i = 0; i < 6; i++)
3882 v->mb_type[0][s->block_index[i]] = 1;
3883 fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
3884 mb_has_coeffs = get_bits1(gb);
3886 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3887 v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
3889 s->current_picture.f.qscale_table[mb_pos] = mquant;
3890 /* Set DC scale - y and c use the same (not sure if necessary here) */
3891 s->y_dc_scale = s->y_dc_scale_table[mquant];
3892 s->c_dc_scale = s->c_dc_scale_table[mquant];
3894 for (i = 0; i < 6; i++) {
3895 s->dc_val[0][s->block_index[i]] = 0;
3897 val = ((cbp >> (5 - i)) & 1);
3898 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3899 v->a_avail = v->c_avail = 0;
3900 if (i == 2 || i == 3 || !s->first_slice_line)
3901 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3902 if (i == 1 || i == 3 || s->mb_x)
3903 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3905 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3906 (i & 4) ? v->codingset2 : v->codingset);
3907 if ((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3908 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3910 stride_y = s->linesize << fieldtx;
3911 off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->linesize : (i & 1) * 8 + 4 * (i & 2) * s->linesize;
3913 stride_y = s->uvlinesize;
3916 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, stride_y);
3920 } else { // inter MB
3921 mb_has_coeffs = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][3];
3923 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3924 if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD) {
3925 v->twomvbp = get_vlc2(gb, v->twomvbp_vlc->table, VC1_2MV_BLOCK_PATTERN_VLC_BITS, 1);
3927 if ((ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV)
3928 || (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV_FIELD)) {
3929 v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
3932 s->mb_intra = v->is_intra[s->mb_x] = 0;
3933 for (i = 0; i < 6; i++)
3934 v->mb_type[0][s->block_index[i]] = 0;
3935 fieldtx = v->fieldtx_plane[mb_pos] = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][1];
3936 /* for all motion vector read MVDATA and motion compensate each block */
3940 for (i = 0; i < 6; i++) {
3943 val = ((mvbp >> (3 - i)) & 1);
3945 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3947 vc1_pred_mv_intfr(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0]);
3948 vc1_mc_4mv_luma(v, i, 0);
3949 } else if (i == 4) {
3950 vc1_mc_4mv_chroma4(v);
3957 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3959 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0]);
3960 vc1_mc_4mv_luma(v, 0, 0);
3961 vc1_mc_4mv_luma(v, 1, 0);
3964 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3966 vc1_pred_mv_intfr(v, 2, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0]);
3967 vc1_mc_4mv_luma(v, 2, 0);
3968 vc1_mc_4mv_luma(v, 3, 0);
3969 vc1_mc_4mv_chroma4(v);
3971 mvbp = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][2];
3973 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3975 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0]);
3979 GET_MQUANT(); // p. 227
3980 s->current_picture.f.qscale_table[mb_pos] = mquant;
3981 if (!v->ttmbf && cbp)
3982 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3983 for (i = 0; i < 6; i++) {
3984 s->dc_val[0][s->block_index[i]] = 0;
3986 val = ((cbp >> (5 - i)) & 1);
3988 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3990 off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->linesize));
3992 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3993 first_block, s->dest[dst_idx] + off,
3994 (i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
3995 (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
3996 block_cbp |= pat << (i << 2);
3997 if (!v->ttmbf && ttmb < 8)
4004 s->mb_intra = v->is_intra[s->mb_x] = 0;
4005 for (i = 0; i < 6; i++) {
4006 v->mb_type[0][s->block_index[i]] = 0;
4007 s->dc_val[0][s->block_index[i]] = 0;
4009 s->current_picture.f.mb_type[mb_pos] = MB_TYPE_SKIP;
4010 s->current_picture.f.qscale_table[mb_pos] = 0;
4011 v->blk_mv_type[s->block_index[0]] = 0;
4012 v->blk_mv_type[s->block_index[1]] = 0;
4013 v->blk_mv_type[s->block_index[2]] = 0;
4014 v->blk_mv_type[s->block_index[3]] = 0;
4015 vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0]);
4018 if (s->mb_x == s->mb_width - 1)
4019 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0])*s->mb_stride);
4023 static int vc1_decode_p_mb_intfi(VC1Context *v)
4025 MpegEncContext *s = &v->s;
4026 GetBitContext *gb = &s->gb;
4028 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4029 int cbp = 0; /* cbp decoding stuff */
4030 int mqdiff, mquant; /* MB quantization */
4031 int ttmb = v->ttfrm; /* MB Transform type */
4033 int mb_has_coeffs = 1; /* last_flag */
4034 int dmv_x, dmv_y; /* Differential MV components */
4035 int val; /* temp values */
4036 int first_block = 1;
4039 int block_cbp = 0, pat, block_tt = 0;
4042 mquant = v->pq; /* Loosy initialization */
4044 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
4045 if (idx_mbmode <= 1) { // intra MB
4046 s->mb_intra = v->is_intra[s->mb_x] = 1;
4047 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
4048 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
4049 s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4051 s->current_picture.f.qscale_table[mb_pos] = mquant;
4052 /* Set DC scale - y and c use the same (not sure if necessary here) */
4053 s->y_dc_scale = s->y_dc_scale_table[mquant];
4054 s->c_dc_scale = s->c_dc_scale_table[mquant];
4055 v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4056 mb_has_coeffs = idx_mbmode & 1;
4058 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
4060 for (i = 0; i < 6; i++) {
4061 s->dc_val[0][s->block_index[i]] = 0;
4062 v->mb_type[0][s->block_index[i]] = 1;
4064 val = ((cbp >> (5 - i)) & 1);
4065 v->a_avail = v->c_avail = 0;
4066 if (i == 2 || i == 3 || !s->first_slice_line)
4067 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4068 if (i == 1 || i == 3 || s->mb_x)
4069 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4071 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4072 (i & 4) ? v->codingset2 : v->codingset);
4073 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4075 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4076 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4077 off += v->cur_field_type ? ((i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0]) : 0;
4078 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize);
4079 // TODO: loop filter
4082 s->mb_intra = v->is_intra[s->mb_x] = 0;
4083 s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
4084 for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
4085 if (idx_mbmode <= 5) { // 1-MV
4087 if (idx_mbmode & 1) {
4088 get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
4090 vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
4092 mb_has_coeffs = !(idx_mbmode & 2);
4094 v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
4095 for (i = 0; i < 6; i++) {
4097 dmv_x = dmv_y = pred_flag = 0;
4098 val = ((v->fourmvbp >> (3 - i)) & 1);
4100 get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
4102 vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
4103 vc1_mc_4mv_luma(v, i, 0);
4105 vc1_mc_4mv_chroma(v, 0);
4107 mb_has_coeffs = idx_mbmode & 1;
4110 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4114 s->current_picture.f.qscale_table[mb_pos] = mquant;
4115 if (!v->ttmbf && cbp) {
4116 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4119 for (i = 0; i < 6; i++) {
4120 s->dc_val[0][s->block_index[i]] = 0;
4122 val = ((cbp >> (5 - i)) & 1);
4123 off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
4124 if (v->cur_field_type)
4125 off += (i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0];
4127 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4128 first_block, s->dest[dst_idx] + off,
4129 (i & 4) ? s->uvlinesize : s->linesize,
4130 (i & 4) && (s->flags & CODEC_FLAG_GRAY),
4132 block_cbp |= pat << (i << 2);
4133 if (!v->ttmbf && ttmb < 8) ttmb = -1;
4138 if (s->mb_x == s->mb_width - 1)
4139 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
4143 /** Decode one B-frame MB (in Main profile)
4145 static void vc1_decode_b_mb(VC1Context *v)
4147 MpegEncContext *s = &v->s;
4148 GetBitContext *gb = &s->gb;
4150 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4151 int cbp = 0; /* cbp decoding stuff */
4152 int mqdiff, mquant; /* MB quantization */
4153 int ttmb = v->ttfrm; /* MB Transform type */
4154 int mb_has_coeffs = 0; /* last_flag */
4155 int index, index1; /* LUT indexes */
4156 int val, sign; /* temp values */
4157 int first_block = 1;
4159 int skipped, direct;
4160 int dmv_x[2], dmv_y[2];
4161 int bmvtype = BMV_TYPE_BACKWARD;
4163 mquant = v->pq; /* Loosy initialization */
4167 direct = get_bits1(gb);
4169 direct = v->direct_mb_plane[mb_pos];
4171 skipped = get_bits1(gb);
4173 skipped = v->s.mbskip_table[mb_pos];
4175 dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4176 for (i = 0; i < 6; i++) {
4177 v->mb_type[0][s->block_index[i]] = 0;
4178 s->dc_val[0][s->block_index[i]] = 0;
4180 s->current_picture.f.qscale_table[mb_pos] = 0;
4184 GET_MVDATA(dmv_x[0], dmv_y[0]);
4185 dmv_x[1] = dmv_x[0];
4186 dmv_y[1] = dmv_y[0];
4188 if (skipped || !s->mb_intra) {
4189 bmvtype = decode012(gb);
4192 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
4195 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
4198 bmvtype = BMV_TYPE_INTERPOLATED;
4199 dmv_x[0] = dmv_y[0] = 0;
4203 for (i = 0; i < 6; i++)
4204 v->mb_type[0][s->block_index[i]] = s->mb_intra;
4208 bmvtype = BMV_TYPE_INTERPOLATED;
4209 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4210 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4214 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4217 s->current_picture.f.qscale_table[mb_pos] = mquant;
4219 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4220 dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
4221 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4222 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4224 if (!mb_has_coeffs && !s->mb_intra) {
4225 /* no coded blocks - effectively skipped */
4226 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4227 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4230 if (s->mb_intra && !mb_has_coeffs) {
4232 s->current_picture.f.qscale_table[mb_pos] = mquant;
4233 s->ac_pred = get_bits1(gb);
4235 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4237 if (bmvtype == BMV_TYPE_INTERPOLATED) {
4238 GET_MVDATA(dmv_x[0], dmv_y[0]);
4239 if (!mb_has_coeffs) {
4240 /* interpolated skipped block */
4241 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4242 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4246 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4248 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4251 s->ac_pred = get_bits1(gb);
4252 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4254 s->current_picture.f.qscale_table[mb_pos] = mquant;
4255 if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
4256 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4260 for (i = 0; i < 6; i++) {
4261 s->dc_val[0][s->block_index[i]] = 0;
4263 val = ((cbp >> (5 - i)) & 1);
4264 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4265 v->mb_type[0][s->block_index[i]] = s->mb_intra;
4267 /* check if prediction blocks A and C are available */
4268 v->a_avail = v->c_avail = 0;
4269 if (i == 2 || i == 3 || !s->first_slice_line)
4270 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4271 if (i == 1 || i == 3 || s->mb_x)
4272 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4274 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4275 (i & 4) ? v->codingset2 : v->codingset);
4276 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4278 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4280 for (j = 0; j < 64; j++)
4281 s->block[i][j] <<= 1;
4282 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
4284 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4285 first_block, s->dest[dst_idx] + off,
4286 (i & 4) ? s->uvlinesize : s->linesize,
4287 (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4288 if (!v->ttmbf && ttmb < 8)
4295 /** Decode one B-frame MB (in interlaced field B picture)
4297 static void vc1_decode_b_mb_intfi(VC1Context *v)
4299 MpegEncContext *s = &v->s;
4300 GetBitContext *gb = &s->gb;
4302 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4303 int cbp = 0; /* cbp decoding stuff */
4304 int mqdiff, mquant; /* MB quantization */
4305 int ttmb = v->ttfrm; /* MB Transform type */
4306 int mb_has_coeffs = 0; /* last_flag */
4307 int val; /* temp value */
4308 int first_block = 1;
4311 int dmv_x[2], dmv_y[2], pred_flag[2];
4312 int bmvtype = BMV_TYPE_BACKWARD;
4313 int idx_mbmode, interpmvp;
4315 mquant = v->pq; /* Loosy initialization */
4318 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
4319 if (idx_mbmode <= 1) { // intra MB
4320 s->mb_intra = v->is_intra[s->mb_x] = 1;
4321 s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
4322 s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
4323 s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4325 s->current_picture.f.qscale_table[mb_pos] = mquant;
4326 /* Set DC scale - y and c use the same (not sure if necessary here) */
4327 s->y_dc_scale = s->y_dc_scale_table[mquant];
4328 s->c_dc_scale = s->c_dc_scale_table[mquant];
4329 v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4330 mb_has_coeffs = idx_mbmode & 1;
4332 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
4334 for (i = 0; i < 6; i++) {
4335 s->dc_val[0][s->block_index[i]] = 0;
4337 val = ((cbp >> (5 - i)) & 1);
4338 v->mb_type[0][s->block_index[i]] = s->mb_intra;
4339 v->a_avail = v->c_avail = 0;
4340 if (i == 2 || i == 3 || !s->first_slice_line)
4341 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4342 if (i == 1 || i == 3 || s->mb_x)
4343 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4345 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4346 (i & 4) ? v->codingset2 : v->codingset);
4347 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4349 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4351 for (j = 0; j < 64; j++)
4352 s->block[i][j] <<= 1;
4353 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4354 off += v->cur_field_type ? ((i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0]) : 0;
4355 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize);
4356 // TODO: yet to perform loop filter
4359 s->mb_intra = v->is_intra[s->mb_x] = 0;
4360 s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
4361 for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
4363 fwd = v->forward_mb_plane[mb_pos] = get_bits1(gb);
4365 fwd = v->forward_mb_plane[mb_pos];
4366 if (idx_mbmode <= 5) { // 1-MV
4367 dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4368 pred_flag[0] = pred_flag[1] = 0;
4370 bmvtype = BMV_TYPE_FORWARD;
4372 bmvtype = decode012(gb);
4375 bmvtype = BMV_TYPE_BACKWARD;
4378 bmvtype = BMV_TYPE_DIRECT;
4381 bmvtype = BMV_TYPE_INTERPOLATED;
4382 interpmvp = get_bits1(gb);
4385 v->bmvtype = bmvtype;
4386 if (bmvtype != BMV_TYPE_DIRECT && idx_mbmode & 1) {
4387 get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD], &dmv_y[bmvtype == BMV_TYPE_BACKWARD], &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
4389 if (bmvtype == BMV_TYPE_INTERPOLATED && interpmvp) {
4390 get_mvdata_interlaced(v, &dmv_x[1], &dmv_y[1], &pred_flag[1]);
4392 if (bmvtype == BMV_TYPE_DIRECT) {
4393 dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4394 dmv_x[1] = dmv_y[1] = pred_flag[0] = 0;
4396 vc1_pred_b_mv_intfi(v, 0, dmv_x, dmv_y, 1, pred_flag);
4397 vc1_b_mc(v, dmv_x, dmv_y, (bmvtype == BMV_TYPE_DIRECT), bmvtype);
4398 mb_has_coeffs = !(idx_mbmode & 2);
4401 bmvtype = BMV_TYPE_FORWARD;
4402 v->bmvtype = bmvtype;
4403 v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
4404 for (i = 0; i < 6; i++) {
4406 dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4407 dmv_x[1] = dmv_y[1] = pred_flag[1] = 0;
4408 val = ((v->fourmvbp >> (3 - i)) & 1);
4410 get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD],
4411 &dmv_y[bmvtype == BMV_TYPE_BACKWARD],
4412 &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
4414 vc1_pred_b_mv_intfi(v, i, dmv_x, dmv_y, 0, pred_flag);
4415 vc1_mc_4mv_luma(v, i, bmvtype == BMV_TYPE_BACKWARD);
4417 vc1_mc_4mv_chroma(v, bmvtype == BMV_TYPE_BACKWARD);
4419 mb_has_coeffs = idx_mbmode & 1;
4422 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4426 s->current_picture.f.qscale_table[mb_pos] = mquant;
4427 if (!v->ttmbf && cbp) {
4428 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4431 for (i = 0; i < 6; i++) {
4432 s->dc_val[0][s->block_index[i]] = 0;
4434 val = ((cbp >> (5 - i)) & 1);
4435 off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
4436 if (v->cur_field_type)
4437 off += (i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0];
4439 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4440 first_block, s->dest[dst_idx] + off,
4441 (i & 4) ? s->uvlinesize : s->linesize,
4442 (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4443 if (!v->ttmbf && ttmb < 8)
4451 /** Decode blocks of I-frame
4453 static void vc1_decode_i_blocks(VC1Context *v)
4456 MpegEncContext *s = &v->s;
4461 /* select codingmode used for VLC tables selection */
4462 switch (v->y_ac_table_index) {
4464 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
4467 v->codingset = CS_HIGH_MOT_INTRA;
4470 v->codingset = CS_MID_RATE_INTRA;
4474 switch (v->c_ac_table_index) {
4476 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
4479 v->codingset2 = CS_HIGH_MOT_INTER;
4482 v->codingset2 = CS_MID_RATE_INTER;
4486 /* Set DC scale - y and c use the same */
4487 s->y_dc_scale = s->y_dc_scale_table[v->pq];
4488 s->c_dc_scale = s->c_dc_scale_table[v->pq];
4491 s->mb_x = s->mb_y = 0;
4493 s->first_slice_line = 1;
4494 for (s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
4496 ff_init_block_index(s);
4497 for (; s->mb_x < s->mb_width; s->mb_x++) {
4499 ff_update_block_index(s);
4500 dst[0] = s->dest[0];
4501 dst[1] = dst[0] + 8;
4502 dst[2] = s->dest[0] + s->linesize * 8;
4503 dst[3] = dst[2] + 8;
4504 dst[4] = s->dest[1];
4505 dst[5] = s->dest[2];
4506 s->dsp.clear_blocks(s->block[0]);
4507 mb_pos = s->mb_x + s->mb_y * s->mb_width;
4508 s->current_picture.f.mb_type[mb_pos] = MB_TYPE_INTRA;
4509 s->current_picture.f.qscale_table[mb_pos] = v->pq;
4510 s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
4511 s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
4513 // do actual MB decoding and displaying
4514 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
4515 v->s.ac_pred = get_bits1(&v->s.gb);
4517 for (k = 0; k < 6; k++) {
4518 val = ((cbp >> (5 - k)) & 1);
4521 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
4525 cbp |= val << (5 - k);
4527 vc1_decode_i_block(v, s->block[k], k, val, (k < 4) ? v->codingset : v->codingset2);
4529 if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
4531 v->vc1dsp.vc1_inv_trans_8x8(s->block[k]);
4532 if (v->pq >= 9 && v->overlap) {
4534 for (j = 0; j < 64; j++)
4535 s->block[k][j] <<= 1;
4536 s->dsp.put_signed_pixels_clamped(s->block[k], dst[k], k & 4 ? s->uvlinesize : s->linesize);
4539 for (j = 0; j < 64; j++)
4540 s->block[k][j] = (s->block[k][j] - 64) << 1;
4541 s->dsp.put_pixels_clamped(s->block[k], dst[k], k & 4 ? s->uvlinesize : s->linesize);
4545 if (v->pq >= 9 && v->overlap) {
4547 v->vc1dsp.vc1_h_overlap(s->dest[0], s->linesize);
4548 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
4549 if (!(s->flags & CODEC_FLAG_GRAY)) {
4550 v->vc1dsp.vc1_h_overlap(s->dest[1], s->uvlinesize);
4551 v->vc1dsp.vc1_h_overlap(s->dest[2], s->uvlinesize);
4554 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize);
4555 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
4556 if (!s->first_slice_line) {
4557 v->vc1dsp.vc1_v_overlap(s->dest[0], s->linesize);
4558 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize);
4559 if (!(s->flags & CODEC_FLAG_GRAY)) {
4560 v->vc1dsp.vc1_v_overlap(s->dest[1], s->uvlinesize);
4561 v->vc1dsp.vc1_v_overlap(s->dest[2], s->uvlinesize);
4564 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
4565 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
4567 if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
4569 if (get_bits_count(&s->gb) > v->bits) {
4570 ff_er_add_slice(s, 0, 0, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END));
4571 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
4572 get_bits_count(&s->gb), v->bits);
4576 if (!v->s.loop_filter)
4577 ff_draw_horiz_band(s, s->mb_y * 16, 16);
4579 ff_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
4581 s->first_slice_line = 0;
4583 if (v->s.loop_filter)
4584 ff_draw_horiz_band(s, (s->mb_height - 1) * 16, 16);
4585 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
4588 /** Decode blocks of I-frame for advanced profile
4590 static void vc1_decode_i_blocks_adv(VC1Context *v)
4593 MpegEncContext *s = &v->s;
4599 GetBitContext *gb = &s->gb;
4601 /* select codingmode used for VLC tables selection */
4602 switch (v->y_ac_table_index) {
4604 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
4607 v->codingset = CS_HIGH_MOT_INTRA;
4610 v->codingset = CS_MID_RATE_INTRA;
4614 switch (v->c_ac_table_index) {
4616 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
4619 v->codingset2 = CS_HIGH_MOT_INTER;
4622 v->codingset2 = CS_MID_RATE_INTER;
4627 s->mb_x = s->mb_y = 0;
4629 s->first_slice_line = 1;
4630 s->mb_y = s->start_mb_y;
4631 if (s->start_mb_y) {
4633 ff_init_block_index(s);
4634 memset(&s->coded_block[s->block_index[0] - s->b8_stride], 0,
4635 (1 + s->b8_stride) * sizeof(*s->coded_block));
4637 for (; s->mb_y < s->end_mb_y; s->mb_y++) {
4639 ff_init_block_index(s);
4640 for (;s->mb_x < s->mb_width; s->mb_x++) {
4641 DCTELEM (*block)[64] = v->block[v->cur_blk_idx];
4642 ff_update_block_index(s);
4643 s->dsp.clear_blocks(block[0]);
4644 mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4645 s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4646 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
4647 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
4649 // do actual MB decoding and displaying
4650 if (v->fieldtx_is_raw)
4651 v->fieldtx_plane[mb_pos] = get_bits1(&v->s.gb);
4652 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
4653 if ( v->acpred_is_raw)
4654 v->s.ac_pred = get_bits1(&v->s.gb);
4656 v->s.ac_pred = v->acpred_plane[mb_pos];
4658 if (v->condover == CONDOVER_SELECT && v->overflg_is_raw)
4659 v->over_flags_plane[mb_pos] = get_bits1(&v->s.gb);
4663 s->current_picture.f.qscale_table[mb_pos] = mquant;
4664 /* Set DC scale - y and c use the same */
4665 s->y_dc_scale = s->y_dc_scale_table[mquant];
4666 s->c_dc_scale = s->c_dc_scale_table[mquant];
4668 for (k = 0; k < 6; k++) {
4669 val = ((cbp >> (5 - k)) & 1);
4672 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
4676 cbp |= val << (5 - k);
4678 v->a_avail = !s->first_slice_line || (k == 2 || k == 3);
4679 v->c_avail = !!s->mb_x || (k == 1 || k == 3);
4681 vc1_decode_i_block_adv(v, block[k], k, val,
4682 (k < 4) ? v->codingset : v->codingset2, mquant);
4684 if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
4686 v->vc1dsp.vc1_inv_trans_8x8(block[k]);
4689 vc1_smooth_overlap_filter_iblk(v);
4690 vc1_put_signed_blocks_clamped(v);
4691 if (v->s.loop_filter) vc1_loop_filter_iblk_delayed(v, v->pq);
4693 if (get_bits_count(&s->gb) > v->bits) {
4694 // TODO: may need modification to handle slice coding
4695 ff_er_add_slice(s, 0, s->start_mb_y, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END));
4696 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
4697 get_bits_count(&s->gb), v->bits);
4701 if (!v->s.loop_filter)
4702 ff_draw_horiz_band(s, s->mb_y * 16, 16);
4704 ff_draw_horiz_band(s, (s->mb_y-1) * 16, 16);
4705 s->first_slice_line = 0;
4708 /* raw bottom MB row */
4710 ff_init_block_index(s);
4711 for (;s->mb_x < s->mb_width; s->mb_x++) {
4712 ff_update_block_index(s);
4713 vc1_put_signed_blocks_clamped(v);
4714 if (v->s.loop_filter)
4715 vc1_loop_filter_iblk_delayed(v, v->pq);
4717 if (v->s.loop_filter)
4718 ff_draw_horiz_band(s, (s->end_mb_y-1)*16, 16);
4719 ff_er_add_slice(s, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
4720 (s->end_mb_y << v->field_mode) - 1, (AC_END|DC_END|MV_END));
4723 static void vc1_decode_p_blocks(VC1Context *v)
4725 MpegEncContext *s = &v->s;
4726 int apply_loop_filter;
4728 /* select codingmode used for VLC tables selection */
4729 switch (v->c_ac_table_index) {
4731 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
4734 v->codingset = CS_HIGH_MOT_INTRA;
4737 v->codingset = CS_MID_RATE_INTRA;
4741 switch (v->c_ac_table_index) {
4743 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
4746 v->codingset2 = CS_HIGH_MOT_INTER;
4749 v->codingset2 = CS_MID_RATE_INTER;
4753 apply_loop_filter = s->loop_filter && !(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY);
4754 s->first_slice_line = 1;
4755 memset(v->cbp_base, 0, sizeof(v->cbp_base[0])*2*s->mb_stride);
4756 for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
4758 ff_init_block_index(s);
4759 for (; s->mb_x < s->mb_width; s->mb_x++) {
4760 ff_update_block_index(s);
4763 vc1_decode_p_mb_intfi(v);
4764 else if (v->fcm == 1)
4765 vc1_decode_p_mb_intfr(v);
4766 else vc1_decode_p_mb(v);
4767 if (s->mb_y != s->start_mb_y && apply_loop_filter && v->fcm == 0)
4768 vc1_apply_p_loop_filter(v);
4769 if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
4770 // TODO: may need modification to handle slice coding
4771 ff_er_add_slice(s, 0, s->start_mb_y, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END));
4772 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
4773 get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
4777 memmove(v->cbp_base, v->cbp, sizeof(v->cbp_base[0]) * s->mb_stride);
4778 memmove(v->ttblk_base, v->ttblk, sizeof(v->ttblk_base[0]) * s->mb_stride);
4779 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
4780 memmove(v->luma_mv_base, v->luma_mv, sizeof(v->luma_mv_base[0]) * s->mb_stride);
4781 if (s->mb_y != s->start_mb_y) ff_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
4782 s->first_slice_line = 0;
4784 if (apply_loop_filter) {
4786 ff_init_block_index(s);
4787 for (; s->mb_x < s->mb_width; s->mb_x++) {
4788 ff_update_block_index(s);
4789 vc1_apply_p_loop_filter(v);
4792 if (s->end_mb_y >= s->start_mb_y)
4793 ff_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
4794 ff_er_add_slice(s, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
4795 (s->end_mb_y << v->field_mode) - 1, (AC_END|DC_END|MV_END));
4798 static void vc1_decode_b_blocks(VC1Context *v)
4800 MpegEncContext *s = &v->s;
4802 /* select codingmode used for VLC tables selection */
4803 switch (v->c_ac_table_index) {
4805 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
4808 v->codingset = CS_HIGH_MOT_INTRA;
4811 v->codingset = CS_MID_RATE_INTRA;
4815 switch (v->c_ac_table_index) {
4817 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
4820 v->codingset2 = CS_HIGH_MOT_INTER;
4823 v->codingset2 = CS_MID_RATE_INTER;
4827 s->first_slice_line = 1;
4828 for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
4830 ff_init_block_index(s);
4831 for (; s->mb_x < s->mb_width; s->mb_x++) {
4832 ff_update_block_index(s);
4835 vc1_decode_b_mb_intfi(v);
4838 if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
4839 // TODO: may need modification to handle slice coding
4840 ff_er_add_slice(s, 0, s->start_mb_y, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END));
4841 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
4842 get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
4845 if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
4847 if (!v->s.loop_filter)
4848 ff_draw_horiz_band(s, s->mb_y * 16, 16);
4850 ff_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
4851 s->first_slice_line = 0;
4853 if (v->s.loop_filter)
4854 ff_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
4855 ff_er_add_slice(s, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
4856 (s->end_mb_y << v->field_mode) - 1, (AC_END|DC_END|MV_END));
4859 static void vc1_decode_skip_blocks(VC1Context *v)
4861 MpegEncContext *s = &v->s;
4863 ff_er_add_slice(s, 0, s->start_mb_y, s->mb_width - 1, s->end_mb_y - 1, (AC_END|DC_END|MV_END));
4864 s->first_slice_line = 1;
4865 for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
4867 ff_init_block_index(s);
4868 ff_update_block_index(s);
4869 memcpy(s->dest[0], s->last_picture.f.data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16);
4870 memcpy(s->dest[1], s->last_picture.f.data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
4871 memcpy(s->dest[2], s->last_picture.f.data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
4872 ff_draw_horiz_band(s, s->mb_y * 16, 16);
4873 s->first_slice_line = 0;
4875 s->pict_type = AV_PICTURE_TYPE_P;
4878 static void vc1_decode_blocks(VC1Context *v)
4881 v->s.esc3_level_length = 0;
4883 ff_intrax8_decode_picture(&v->x8, 2*v->pq + v->halfpq, v->pq * !v->pquantizer);
4886 v->left_blk_idx = -1;
4887 v->topleft_blk_idx = 1;
4889 switch (v->s.pict_type) {
4890 case AV_PICTURE_TYPE_I:
4891 if (v->profile == PROFILE_ADVANCED)
4892 vc1_decode_i_blocks_adv(v);
4894 vc1_decode_i_blocks(v);
4896 case AV_PICTURE_TYPE_P:
4897 if (v->p_frame_skipped)
4898 vc1_decode_skip_blocks(v);
4900 vc1_decode_p_blocks(v);
4902 case AV_PICTURE_TYPE_B:
4904 if (v->profile == PROFILE_ADVANCED)
4905 vc1_decode_i_blocks_adv(v);
4907 vc1_decode_i_blocks(v);
4909 vc1_decode_b_blocks(v);
4915 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
4919 * Transform coefficients for both sprites in 16.16 fixed point format,
4920 * in the order they appear in the bitstream:
4922 * rotation 1 (unused)
4924 * rotation 2 (unused)
4931 int effect_type, effect_flag;
4932 int effect_pcount1, effect_pcount2; ///< amount of effect parameters stored in effect_params
4933 int effect_params1[15], effect_params2[10]; ///< effect parameters in 16.16 fixed point format
4936 static inline int get_fp_val(GetBitContext* gb)
4938 return (get_bits_long(gb, 30) - (1 << 29)) << 1;
4941 static void vc1_sprite_parse_transform(GetBitContext* gb, int c[7])
4945 switch (get_bits(gb, 2)) {
4948 c[2] = get_fp_val(gb);
4952 c[0] = c[4] = get_fp_val(gb);
4953 c[2] = get_fp_val(gb);
4956 c[0] = get_fp_val(gb);
4957 c[2] = get_fp_val(gb);
4958 c[4] = get_fp_val(gb);
4961 c[0] = get_fp_val(gb);
4962 c[1] = get_fp_val(gb);
4963 c[2] = get_fp_val(gb);
4964 c[3] = get_fp_val(gb);
4965 c[4] = get_fp_val(gb);
4968 c[5] = get_fp_val(gb);
4970 c[6] = get_fp_val(gb);
4975 static void vc1_parse_sprites(VC1Context *v, GetBitContext* gb, SpriteData* sd)
4977 AVCodecContext *avctx = v->s.avctx;
4980 for (sprite = 0; sprite <= v->two_sprites; sprite++) {
4981 vc1_sprite_parse_transform(gb, sd->coefs[sprite]);
4982 if (sd->coefs[sprite][1] || sd->coefs[sprite][3])
4983 av_log_ask_for_sample(avctx, "Rotation coefficients are not zero");
4984 av_log(avctx, AV_LOG_DEBUG, sprite ? "S2:" : "S1:");
4985 for (i = 0; i < 7; i++)
4986 av_log(avctx, AV_LOG_DEBUG, " %d.%.3d",
4987 sd->coefs[sprite][i] / (1<<16),
4988 (abs(sd->coefs[sprite][i]) & 0xFFFF) * 1000 / (1 << 16));
4989 av_log(avctx, AV_LOG_DEBUG, "\n");
4993 if (sd->effect_type = get_bits_long(gb, 30)) {
4994 switch (sd->effect_pcount1 = get_bits(gb, 4)) {
4996 vc1_sprite_parse_transform(gb, sd->effect_params1);
4999 vc1_sprite_parse_transform(gb, sd->effect_params1);
5000 vc1_sprite_parse_transform(gb, sd->effect_params1 + 7);
5003 for (i = 0; i < sd->effect_pcount1; i++)
5004 sd->effect_params1[i] = get_fp_val(gb);
5006 if (sd->effect_type != 13 || sd->effect_params1[0] != sd->coefs[0][6]) {
5007 // effect 13 is simple alpha blending and matches the opacity above
5008 av_log(avctx, AV_LOG_DEBUG, "Effect: %d; params: ", sd->effect_type);
5009 for (i = 0; i < sd->effect_pcount1; i++)
5010 av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
5011 sd->effect_params1[i] / (1 << 16),
5012 (abs(sd->effect_params1[i]) & 0xFFFF) * 1000 / (1 << 16));
5013 av_log(avctx, AV_LOG_DEBUG, "\n");
5016 sd->effect_pcount2 = get_bits(gb, 16);
5017 if (sd->effect_pcount2 > 10) {
5018 av_log(avctx, AV_LOG_ERROR, "Too many effect parameters\n");
5020 } else if (sd->effect_pcount2) {
5022 av_log(avctx, AV_LOG_DEBUG, "Effect params 2: ");
5023 while (++i < sd->effect_pcount2) {
5024 sd->effect_params2[i] = get_fp_val(gb);
5025 av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
5026 sd->effect_params2[i] / (1 << 16),
5027 (abs(sd->effect_params2[i]) & 0xFFFF) * 1000 / (1 << 16));
5029 av_log(avctx, AV_LOG_DEBUG, "\n");
5032 if (sd->effect_flag = get_bits1(gb))
5033 av_log(avctx, AV_LOG_DEBUG, "Effect flag set\n");
5035 if (get_bits_count(gb) >= gb->size_in_bits +
5036 (avctx->codec_id == CODEC_ID_WMV3IMAGE ? 64 : 0))
5037 av_log(avctx, AV_LOG_ERROR, "Buffer overrun\n");
5038 if (get_bits_count(gb) < gb->size_in_bits - 8)
5039 av_log(avctx, AV_LOG_WARNING, "Buffer not fully read\n");
5042 static void vc1_draw_sprites(VC1Context *v, SpriteData* sd)
5044 int i, plane, row, sprite;
5045 int sr_cache[2][2] = { { -1, -1 }, { -1, -1 } };
5046 uint8_t* src_h[2][2];
5047 int xoff[2], xadv[2], yoff[2], yadv[2], alpha;
5049 MpegEncContext *s = &v->s;
5051 for (i = 0; i < 2; i++) {
5052 xoff[i] = av_clip(sd->coefs[i][2], 0, v->sprite_width-1 << 16);
5053 xadv[i] = sd->coefs[i][0];
5054 if (xadv[i] != 1<<16 || (v->sprite_width << 16) - (v->output_width << 16) - xoff[i])
5055 xadv[i] = av_clip(xadv[i], 0, ((v->sprite_width<<16) - xoff[i] - 1) / v->output_width);
5057 yoff[i] = av_clip(sd->coefs[i][5], 0, v->sprite_height-1 << 16);
5058 yadv[i] = av_clip(sd->coefs[i][4], 0, ((v->sprite_height << 16) - yoff[i]) / v->output_height);
5060 alpha = av_clip(sd->coefs[1][6], 0, (1<<16) - 1);
5062 for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++) {
5063 int width = v->output_width>>!!plane;
5065 for (row = 0; row < v->output_height>>!!plane; row++) {
5066 uint8_t *dst = v->sprite_output_frame.data[plane] +
5067 v->sprite_output_frame.linesize[plane] * row;
5069 for (sprite = 0; sprite <= v->two_sprites; sprite++) {
5070 uint8_t *iplane = s->current_picture.f.data[plane];
5071 int iline = s->current_picture.f.linesize[plane];
5072 int ycoord = yoff[sprite] + yadv[sprite] * row;
5073 int yline = ycoord >> 16;
5074 ysub[sprite] = ycoord & 0xFFFF;
5076 iplane = s->last_picture.f.data[plane];
5077 iline = s->last_picture.f.linesize[plane];
5079 if (!(xoff[sprite] & 0xFFFF) && xadv[sprite] == 1 << 16) {
5080 src_h[sprite][0] = iplane + (xoff[sprite] >> 16) + yline * iline;
5082 src_h[sprite][1] = iplane + (xoff[sprite] >> 16) + (yline + 1) * iline;
5084 if (sr_cache[sprite][0] != yline) {
5085 if (sr_cache[sprite][1] == yline) {
5086 FFSWAP(uint8_t*, v->sr_rows[sprite][0], v->sr_rows[sprite][1]);
5087 FFSWAP(int, sr_cache[sprite][0], sr_cache[sprite][1]);
5089 v->vc1dsp.sprite_h(v->sr_rows[sprite][0], iplane + yline * iline, xoff[sprite], xadv[sprite], width);
5090 sr_cache[sprite][0] = yline;
5093 if (ysub[sprite] && sr_cache[sprite][1] != yline + 1) {
5094 v->vc1dsp.sprite_h(v->sr_rows[sprite][1], iplane + (yline + 1) * iline, xoff[sprite], xadv[sprite], width);
5095 sr_cache[sprite][1] = yline + 1;
5097 src_h[sprite][0] = v->sr_rows[sprite][0];
5098 src_h[sprite][1] = v->sr_rows[sprite][1];
5102 if (!v->two_sprites) {
5104 v->vc1dsp.sprite_v_single(dst, src_h[0][0], src_h[0][1], ysub[0], width);
5106 memcpy(dst, src_h[0][0], width);
5109 if (ysub[0] && ysub[1]) {
5110 v->vc1dsp.sprite_v_double_twoscale(dst, src_h[0][0], src_h[0][1], ysub[0],
5111 src_h[1][0], src_h[1][1], ysub[1], alpha, width);
5112 } else if (ysub[0]) {
5113 v->vc1dsp.sprite_v_double_onescale(dst, src_h[0][0], src_h[0][1], ysub[0],
5114 src_h[1][0], alpha, width);
5115 } else if (ysub[1]) {
5116 v->vc1dsp.sprite_v_double_onescale(dst, src_h[1][0], src_h[1][1], ysub[1],
5117 src_h[0][0], (1<<16)-1-alpha, width);
5119 v->vc1dsp.sprite_v_double_noscale(dst, src_h[0][0], src_h[1][0], alpha, width);
5125 for (i = 0; i < 2; i++) {
5135 static int vc1_decode_sprites(VC1Context *v, GetBitContext* gb)
5137 MpegEncContext *s = &v->s;
5138 AVCodecContext *avctx = s->avctx;
5141 vc1_parse_sprites(v, gb, &sd);
5143 if (!s->current_picture.f.data[0]) {
5144 av_log(avctx, AV_LOG_ERROR, "Got no sprites\n");
5148 if (v->two_sprites && (!s->last_picture_ptr || !s->last_picture.f.data[0])) {
5149 av_log(avctx, AV_LOG_WARNING, "Need two sprites, only got one\n");
5153 if (v->sprite_output_frame.data[0])
5154 avctx->release_buffer(avctx, &v->sprite_output_frame);
5156 v->sprite_output_frame.buffer_hints = FF_BUFFER_HINTS_VALID;
5157 v->sprite_output_frame.reference = 0;
5158 if (avctx->get_buffer(avctx, &v->sprite_output_frame) < 0) {
5159 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
5163 vc1_draw_sprites(v, &sd);
5168 static void vc1_sprite_flush(AVCodecContext *avctx)
5170 VC1Context *v = avctx->priv_data;
5171 MpegEncContext *s = &v->s;
5172 AVFrame *f = &s->current_picture.f;
5175 /* Windows Media Image codecs have a convergence interval of two keyframes.
5176 Since we can't enforce it, clear to black the missing sprite. This is
5177 wrong but it looks better than doing nothing. */
5180 for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++)
5181 for (i = 0; i < v->sprite_height>>!!plane; i++)
5182 memset(f->data[plane] + i * f->linesize[plane],
5183 plane ? 128 : 0, f->linesize[plane]);
5188 static av_cold int vc1_decode_init_alloc_tables(VC1Context *v)
5190 MpegEncContext *s = &v->s;
5193 /* Allocate mb bitplanes */
5194 v->mv_type_mb_plane = av_malloc (s->mb_stride * s->mb_height);
5195 v->direct_mb_plane = av_malloc (s->mb_stride * s->mb_height);
5196 v->forward_mb_plane = av_malloc (s->mb_stride * s->mb_height);
5197 v->fieldtx_plane = av_mallocz(s->mb_stride * s->mb_height);
5198 v->acpred_plane = av_malloc (s->mb_stride * s->mb_height);
5199 v->over_flags_plane = av_malloc (s->mb_stride * s->mb_height);
5201 v->n_allocated_blks = s->mb_width + 2;
5202 v->block = av_malloc(sizeof(*v->block) * v->n_allocated_blks);
5203 v->cbp_base = av_malloc(sizeof(v->cbp_base[0]) * 2 * s->mb_stride);
5204 v->cbp = v->cbp_base + s->mb_stride;
5205 v->ttblk_base = av_malloc(sizeof(v->ttblk_base[0]) * 2 * s->mb_stride);
5206 v->ttblk = v->ttblk_base + s->mb_stride;
5207 v->is_intra_base = av_mallocz(sizeof(v->is_intra_base[0]) * 2 * s->mb_stride);
5208 v->is_intra = v->is_intra_base + s->mb_stride;
5209 v->luma_mv_base = av_malloc(sizeof(v->luma_mv_base[0]) * 2 * s->mb_stride);
5210 v->luma_mv = v->luma_mv_base + s->mb_stride;
5212 /* allocate block type info in that way so it could be used with s->block_index[] */
5213 v->mb_type_base = av_malloc(s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5214 v->mb_type[0] = v->mb_type_base + s->b8_stride + 1;
5215 v->mb_type[1] = v->mb_type_base + s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride + 1;
5216 v->mb_type[2] = v->mb_type[1] + s->mb_stride * (s->mb_height + 1);
5218 /* allocate memory to store block level MV info */
5219 v->blk_mv_type_base = av_mallocz( s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5220 v->blk_mv_type = v->blk_mv_type_base + s->b8_stride + 1;
5221 v->mv_f_base = av_mallocz(2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5222 v->mv_f[0] = v->mv_f_base + s->b8_stride + 1;
5223 v->mv_f[1] = v->mv_f[0] + (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5224 v->mv_f_last_base = av_mallocz(2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5225 v->mv_f_last[0] = v->mv_f_last_base + s->b8_stride + 1;
5226 v->mv_f_last[1] = v->mv_f_last[0] + (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5227 v->mv_f_next_base = av_mallocz(2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5228 v->mv_f_next[0] = v->mv_f_next_base + s->b8_stride + 1;
5229 v->mv_f_next[1] = v->mv_f_next[0] + (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5231 /* Init coded blocks info */
5232 if (v->profile == PROFILE_ADVANCED) {
5233 // if (alloc_bitplane(&v->over_flags_plane, s->mb_width, s->mb_height) < 0)
5235 // if (alloc_bitplane(&v->ac_pred_plane, s->mb_width, s->mb_height) < 0)
5239 ff_intrax8_common_init(&v->x8,s);
5241 if (s->avctx->codec_id == CODEC_ID_WMV3IMAGE || s->avctx->codec_id == CODEC_ID_VC1IMAGE) {
5242 for (i = 0; i < 4; i++)
5243 if (!(v->sr_rows[i >> 1][i & 1] = av_malloc(v->output_width))) return -1;
5246 if (!v->mv_type_mb_plane || !v->direct_mb_plane || !v->acpred_plane || !v->over_flags_plane ||
5247 !v->block || !v->cbp_base || !v->ttblk_base || !v->is_intra_base || !v->luma_mv_base ||
5254 /** Initialize a VC1/WMV3 decoder
5255 * @todo TODO: Handle VC-1 IDUs (Transport level?)
5256 * @todo TODO: Decypher remaining bits in extra_data
5258 static av_cold int vc1_decode_init(AVCodecContext *avctx)
5260 VC1Context *v = avctx->priv_data;
5261 MpegEncContext *s = &v->s;
5265 /* save the container output size for WMImage */
5266 v->output_width = avctx->width;
5267 v->output_height = avctx->height;
5269 if (!avctx->extradata_size || !avctx->extradata)
5271 if (!(avctx->flags & CODEC_FLAG_GRAY))
5272 avctx->pix_fmt = avctx->get_format(avctx, avctx->codec->pix_fmts);
5274 avctx->pix_fmt = PIX_FMT_GRAY8;
5275 avctx->hwaccel = ff_find_hwaccel(avctx->codec->id, avctx->pix_fmt);
5277 avctx->flags |= CODEC_FLAG_EMU_EDGE;
5278 v->s.flags |= CODEC_FLAG_EMU_EDGE;
5280 if (avctx->idct_algo == FF_IDCT_AUTO) {
5281 avctx->idct_algo = FF_IDCT_WMV2;
5284 if (vc1_init_common(v) < 0)
5286 ff_vc1dsp_init(&v->vc1dsp);
5288 if (avctx->codec_id == CODEC_ID_WMV3 || avctx->codec_id == CODEC_ID_WMV3IMAGE) {
5291 // looks like WMV3 has a sequence header stored in the extradata
5292 // advanced sequence header may be before the first frame
5293 // the last byte of the extradata is a version number, 1 for the
5294 // samples we can decode
5296 init_get_bits(&gb, avctx->extradata, avctx->extradata_size*8);
5298 if (vc1_decode_sequence_header(avctx, v, &gb) < 0)
5301 count = avctx->extradata_size*8 - get_bits_count(&gb);
5303 av_log(avctx, AV_LOG_INFO, "Extra data: %i bits left, value: %X\n",
5304 count, get_bits(&gb, count));
5305 } else if (count < 0) {
5306 av_log(avctx, AV_LOG_INFO, "Read %i bits in overflow\n", -count);
5308 } else { // VC1/WVC1/WVP2
5309 const uint8_t *start = avctx->extradata;
5310 uint8_t *end = avctx->extradata + avctx->extradata_size;
5311 const uint8_t *next;
5312 int size, buf2_size;
5313 uint8_t *buf2 = NULL;
5314 int seq_initialized = 0, ep_initialized = 0;
5316 if (avctx->extradata_size < 16) {
5317 av_log(avctx, AV_LOG_ERROR, "Extradata size too small: %i\n", avctx->extradata_size);
5321 buf2 = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
5322 start = find_next_marker(start, end); // in WVC1 extradata first byte is its size, but can be 0 in mkv
5324 for (; next < end; start = next) {
5325 next = find_next_marker(start + 4, end);
5326 size = next - start - 4;
5329 buf2_size = vc1_unescape_buffer(start + 4, size, buf2);
5330 init_get_bits(&gb, buf2, buf2_size * 8);
5331 switch (AV_RB32(start)) {
5332 case VC1_CODE_SEQHDR:
5333 if (vc1_decode_sequence_header(avctx, v, &gb) < 0) {
5337 seq_initialized = 1;
5339 case VC1_CODE_ENTRYPOINT:
5340 if (vc1_decode_entry_point(avctx, v, &gb) < 0) {
5349 if (!seq_initialized || !ep_initialized) {
5350 av_log(avctx, AV_LOG_ERROR, "Incomplete extradata\n");
5353 v->res_sprite = (avctx->codec_tag == MKTAG('W','V','P','2'));
5356 avctx->profile = v->profile;
5357 if (v->profile == PROFILE_ADVANCED)
5358 avctx->level = v->level;
5360 avctx->has_b_frames = !!(avctx->max_b_frames);
5362 s->mb_width = (avctx->coded_width + 15) >> 4;
5363 s->mb_height = (avctx->coded_height + 15) >> 4;
5365 if (v->profile == PROFILE_ADVANCED || v->res_fasttx) {
5366 for (i = 0; i < 64; i++) {
5367 #define transpose(x) ((x >> 3) | ((x & 7) << 3))
5368 v->zz_8x8[0][i] = transpose(wmv1_scantable[0][i]);
5369 v->zz_8x8[1][i] = transpose(wmv1_scantable[1][i]);
5370 v->zz_8x8[2][i] = transpose(wmv1_scantable[2][i]);
5371 v->zz_8x8[3][i] = transpose(wmv1_scantable[3][i]);
5372 v->zzi_8x8[i] = transpose(ff_vc1_adv_interlaced_8x8_zz[i]);
5377 memcpy(v->zz_8x8, wmv1_scantable, 4*64);
5382 if (avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE) {
5383 v->sprite_width = avctx->coded_width;
5384 v->sprite_height = avctx->coded_height;
5386 avctx->coded_width = avctx->width = v->output_width;
5387 avctx->coded_height = avctx->height = v->output_height;
5389 // prevent 16.16 overflows
5390 if (v->sprite_width > 1 << 14 ||
5391 v->sprite_height > 1 << 14 ||
5392 v->output_width > 1 << 14 ||
5393 v->output_height > 1 << 14) return -1;
5398 /** Close a VC1/WMV3 decoder
5399 * @warning Initial try at using MpegEncContext stuff
5401 static av_cold int vc1_decode_end(AVCodecContext *avctx)
5403 VC1Context *v = avctx->priv_data;
5406 if ((avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE)
5407 && v->sprite_output_frame.data[0])
5408 avctx->release_buffer(avctx, &v->sprite_output_frame);
5409 for (i = 0; i < 4; i++)
5410 av_freep(&v->sr_rows[i >> 1][i & 1]);
5411 av_freep(&v->hrd_rate);
5412 av_freep(&v->hrd_buffer);
5413 MPV_common_end(&v->s);
5414 av_freep(&v->mv_type_mb_plane);
5415 av_freep(&v->direct_mb_plane);
5416 av_freep(&v->forward_mb_plane);
5417 av_freep(&v->fieldtx_plane);
5418 av_freep(&v->acpred_plane);
5419 av_freep(&v->over_flags_plane);
5420 av_freep(&v->mb_type_base);
5421 av_freep(&v->blk_mv_type_base);
5422 av_freep(&v->mv_f_base);
5423 av_freep(&v->mv_f_last_base);
5424 av_freep(&v->mv_f_next_base);
5425 av_freep(&v->block);
5426 av_freep(&v->cbp_base);
5427 av_freep(&v->ttblk_base);
5428 av_freep(&v->is_intra_base); // FIXME use v->mb_type[]
5429 av_freep(&v->luma_mv_base);
5430 ff_intrax8_common_end(&v->x8);
5435 /** Decode a VC1/WMV3 frame
5436 * @todo TODO: Handle VC-1 IDUs (Transport level?)
5438 static int vc1_decode_frame(AVCodecContext *avctx, void *data,
5439 int *data_size, AVPacket *avpkt)
5441 const uint8_t *buf = avpkt->data;
5442 int buf_size = avpkt->size, n_slices = 0, i;
5443 VC1Context *v = avctx->priv_data;
5444 MpegEncContext *s = &v->s;
5445 AVFrame *pict = data;
5446 uint8_t *buf2 = NULL;
5447 uint8_t *buf_field2 = NULL;
5448 const uint8_t *buf_start = buf;
5449 int mb_height, n_slices1=-1;
5456 /* no supplementary picture */
5457 if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == VC1_CODE_ENDOFSEQ)) {
5458 /* special case for last picture */
5459 if (s->low_delay == 0 && s->next_picture_ptr) {
5460 *pict = *(AVFrame*)s->next_picture_ptr;
5461 s->next_picture_ptr = NULL;
5463 *data_size = sizeof(AVFrame);
5469 if (s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) {
5470 if (v->profile < PROFILE_ADVANCED)
5471 avctx->pix_fmt = PIX_FMT_VDPAU_WMV3;
5473 avctx->pix_fmt = PIX_FMT_VDPAU_VC1;
5476 //for advanced profile we may need to parse and unescape data
5477 if (avctx->codec_id == CODEC_ID_VC1 || avctx->codec_id == CODEC_ID_VC1IMAGE) {
5479 buf2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5481 if (IS_MARKER(AV_RB32(buf))) { /* frame starts with marker and needs to be parsed */
5482 const uint8_t *start, *end, *next;
5486 for (start = buf, end = buf + buf_size; next < end; start = next) {
5487 next = find_next_marker(start + 4, end);
5488 size = next - start - 4;
5489 if (size <= 0) continue;
5490 switch (AV_RB32(start)) {
5491 case VC1_CODE_FRAME:
5492 if (avctx->hwaccel ||
5493 s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
5495 buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
5497 case VC1_CODE_FIELD: {
5499 slices = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5502 slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5503 if (!slices[n_slices].buf)
5505 buf_size3 = vc1_unescape_buffer(start + 4, size,
5506 slices[n_slices].buf);
5507 init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5509 /* assuming that the field marker is at the exact middle,
5510 hope it's correct */
5511 slices[n_slices].mby_start = s->mb_height >> 1;
5512 n_slices1 = n_slices - 1; // index of the last slice of the first field
5514 // not necessary, ad hoc until I find a way to handle WVC1i
5515 buf_field2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5516 vc1_unescape_buffer(start + 4, size, buf_field2);
5519 case VC1_CODE_ENTRYPOINT: /* it should be before frame data */
5520 buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
5521 init_get_bits(&s->gb, buf2, buf_size2 * 8);
5522 vc1_decode_entry_point(avctx, v, &s->gb);
5524 case VC1_CODE_SLICE: {
5526 slices = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5529 slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5530 if (!slices[n_slices].buf)
5532 buf_size3 = vc1_unescape_buffer(start + 4, size,
5533 slices[n_slices].buf);
5534 init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5536 slices[n_slices].mby_start = get_bits(&slices[n_slices].gb, 9);
5542 } else if (v->interlace && ((buf[0] & 0xC0) == 0xC0)) { /* WVC1 interlaced stores both fields divided by marker */
5543 const uint8_t *divider;
5545 divider = find_next_marker(buf, buf + buf_size);
5546 if ((divider == (buf + buf_size)) || AV_RB32(divider) != VC1_CODE_FIELD) {
5547 av_log(avctx, AV_LOG_ERROR, "Error in WVC1 interlaced frame\n");
5549 } else { // found field marker, unescape second field
5550 buf_field2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5551 vc1_unescape_buffer(divider + 4, buf + buf_size - divider - 4, buf_field2);
5553 buf_size2 = vc1_unescape_buffer(buf, divider - buf, buf2);
5555 buf_size2 = vc1_unescape_buffer(buf, buf_size, buf2);
5557 init_get_bits(&s->gb, buf2, buf_size2*8);
5559 init_get_bits(&s->gb, buf, buf_size*8);
5561 if (v->res_sprite) {
5562 v->new_sprite = !get_bits1(&s->gb);
5563 v->two_sprites = get_bits1(&s->gb);
5564 /* res_sprite means a Windows Media Image stream, CODEC_ID_*IMAGE means
5565 we're using the sprite compositor. These are intentionally kept separate
5566 so you can get the raw sprites by using the wmv3 decoder for WMVP or
5567 the vc1 one for WVP2 */
5568 if (avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE) {
5569 if (v->new_sprite) {
5570 // switch AVCodecContext parameters to those of the sprites
5571 avctx->width = avctx->coded_width = v->sprite_width;
5572 avctx->height = avctx->coded_height = v->sprite_height;
5579 if (s->context_initialized &&
5580 (s->width != avctx->coded_width ||
5581 s->height != avctx->coded_height)) {
5582 vc1_decode_end(avctx);
5585 if (!s->context_initialized) {
5586 if (ff_msmpeg4_decode_init(avctx) < 0 || vc1_decode_init_alloc_tables(v) < 0)
5589 s->low_delay = !avctx->has_b_frames || v->res_sprite;
5591 if (v->profile == PROFILE_ADVANCED) {
5592 s->h_edge_pos = avctx->coded_width;
5593 s->v_edge_pos = avctx->coded_height;
5597 /* We need to set current_picture_ptr before reading the header,
5598 * otherwise we cannot store anything in there. */
5599 if (s->current_picture_ptr == NULL || s->current_picture_ptr->f.data[0]) {
5600 int i = ff_find_unused_picture(s, 0);
5601 s->current_picture_ptr = &s->picture[i];
5604 // do parse frame header
5605 v->pic_header_flag = 0;
5606 if (v->profile < PROFILE_ADVANCED) {
5607 if (vc1_parse_frame_header(v, &s->gb) == -1) {
5611 if (vc1_parse_frame_header_adv(v, &s->gb) == -1) {
5616 if ((avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE)
5617 && s->pict_type != AV_PICTURE_TYPE_I) {
5618 av_log(v->s.avctx, AV_LOG_ERROR, "Sprite decoder: expected I-frame\n");
5622 // process pulldown flags
5623 s->current_picture_ptr->f.repeat_pict = 0;
5624 // Pulldown flags are only valid when 'broadcast' has been set.
5625 // So ticks_per_frame will be 2
5628 s->current_picture_ptr->f.repeat_pict = 1;
5629 } else if (v->rptfrm) {
5631 s->current_picture_ptr->f.repeat_pict = v->rptfrm * 2;
5634 // for skipping the frame
5635 s->current_picture.f.pict_type = s->pict_type;
5636 s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
5638 /* skip B-frames if we don't have reference frames */
5639 if (s->last_picture_ptr == NULL && (s->pict_type == AV_PICTURE_TYPE_B || s->dropable)) {
5642 if ((avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B) ||
5643 (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I) ||
5644 avctx->skip_frame >= AVDISCARD_ALL) {
5648 if (s->next_p_frame_damaged) {
5649 if (s->pict_type == AV_PICTURE_TYPE_B)
5652 s->next_p_frame_damaged = 0;
5655 if (MPV_frame_start(s, avctx) < 0) {
5659 s->me.qpel_put = s->dsp.put_qpel_pixels_tab;
5660 s->me.qpel_avg = s->dsp.avg_qpel_pixels_tab;
5662 if ((CONFIG_VC1_VDPAU_DECODER)
5663 &&s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
5664 ff_vdpau_vc1_decode_picture(s, buf_start, (buf + buf_size) - buf_start);
5665 else if (avctx->hwaccel) {
5666 if (avctx->hwaccel->start_frame(avctx, buf, buf_size) < 0)
5668 if (avctx->hwaccel->decode_slice(avctx, buf_start, (buf + buf_size) - buf_start) < 0)
5670 if (avctx->hwaccel->end_frame(avctx) < 0)
5673 ff_er_frame_start(s);
5675 v->bits = buf_size * 8;
5676 if (v->field_mode) {
5678 s->current_picture.f.linesize[0] <<= 1;
5679 s->current_picture.f.linesize[1] <<= 1;
5680 s->current_picture.f.linesize[2] <<= 1;
5682 s->uvlinesize <<= 1;
5683 tmp[0] = v->mv_f_last[0];
5684 tmp[1] = v->mv_f_last[1];
5685 v->mv_f_last[0] = v->mv_f_next[0];
5686 v->mv_f_last[1] = v->mv_f_next[1];
5687 v->mv_f_next[0] = v->mv_f[0];
5688 v->mv_f_next[1] = v->mv_f[1];
5689 v->mv_f[0] = tmp[0];
5690 v->mv_f[1] = tmp[1];
5692 mb_height = s->mb_height >> v->field_mode;
5693 for (i = 0; i <= n_slices; i++) {
5694 if (i > 0 && slices[i - 1].mby_start >= mb_height) {
5695 v->second_field = 1;
5696 v->blocks_off = s->mb_width * s->mb_height << 1;
5697 v->mb_off = s->mb_stride * s->mb_height >> 1;
5699 v->second_field = 0;
5704 v->pic_header_flag = 0;
5705 if (v->field_mode && i == n_slices1 + 2)
5706 vc1_parse_frame_header_adv(v, &s->gb);
5707 else if (get_bits1(&s->gb)) {
5708 v->pic_header_flag = 1;
5709 vc1_parse_frame_header_adv(v, &s->gb);
5712 s->start_mb_y = (i == 0) ? 0 : FFMAX(0, slices[i-1].mby_start % mb_height);
5713 if (!v->field_mode || v->second_field)
5714 s->end_mb_y = (i == n_slices ) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
5716 s->end_mb_y = (i == n_slices1 + 1) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
5717 vc1_decode_blocks(v);
5719 s->gb = slices[i].gb;
5721 if (v->field_mode) {
5722 av_free(buf_field2);
5723 v->second_field = 0;
5725 if (v->field_mode) {
5726 if (s->pict_type == AV_PICTURE_TYPE_B) {
5727 memcpy(v->mv_f_base, v->mv_f_next_base,
5728 2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5730 s->current_picture.f.linesize[0] >>= 1;
5731 s->current_picture.f.linesize[1] >>= 1;
5732 s->current_picture.f.linesize[2] >>= 1;
5734 s->uvlinesize >>= 1;
5736 //av_log(s->avctx, AV_LOG_INFO, "Consumed %i/%i bits\n", get_bits_count(&s->gb), s->gb.size_in_bits);
5737 // if (get_bits_count(&s->gb) > buf_size * 8)
5744 if (avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE) {
5746 avctx->width = avctx->coded_width = v->output_width;
5747 avctx->height = avctx->coded_height = v->output_height;
5748 if (avctx->skip_frame >= AVDISCARD_NONREF)
5750 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
5751 if (vc1_decode_sprites(v, &s->gb))
5754 *pict = v->sprite_output_frame;
5755 *data_size = sizeof(AVFrame);
5757 if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
5758 *pict = *(AVFrame*)s->current_picture_ptr;
5759 } else if (s->last_picture_ptr != NULL) {
5760 *pict = *(AVFrame*)s->last_picture_ptr;
5762 if (s->last_picture_ptr || s->low_delay) {
5763 *data_size = sizeof(AVFrame);
5764 ff_print_debug_info(s, pict);
5770 for (i = 0; i < n_slices; i++)
5771 av_free(slices[i].buf);
5777 for (i = 0; i < n_slices; i++)
5778 av_free(slices[i].buf);
5780 av_free(buf_field2);
5785 static const AVProfile profiles[] = {
5786 { FF_PROFILE_VC1_SIMPLE, "Simple" },
5787 { FF_PROFILE_VC1_MAIN, "Main" },
5788 { FF_PROFILE_VC1_COMPLEX, "Complex" },
5789 { FF_PROFILE_VC1_ADVANCED, "Advanced" },
5790 { FF_PROFILE_UNKNOWN },
5793 AVCodec ff_vc1_decoder = {
5795 .type = AVMEDIA_TYPE_VIDEO,
5797 .priv_data_size = sizeof(VC1Context),
5798 .init = vc1_decode_init,
5799 .close = vc1_decode_end,
5800 .decode = vc1_decode_frame,
5801 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
5802 .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1"),
5803 .pix_fmts = ff_hwaccel_pixfmt_list_420,
5804 .profiles = NULL_IF_CONFIG_SMALL(profiles)
5807 #if CONFIG_WMV3_DECODER
5808 AVCodec ff_wmv3_decoder = {
5810 .type = AVMEDIA_TYPE_VIDEO,
5811 .id = CODEC_ID_WMV3,
5812 .priv_data_size = sizeof(VC1Context),
5813 .init = vc1_decode_init,
5814 .close = vc1_decode_end,
5815 .decode = vc1_decode_frame,
5816 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
5817 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9"),
5818 .pix_fmts = ff_hwaccel_pixfmt_list_420,
5819 .profiles = NULL_IF_CONFIG_SMALL(profiles)
5823 #if CONFIG_WMV3_VDPAU_DECODER
5824 AVCodec ff_wmv3_vdpau_decoder = {
5825 .name = "wmv3_vdpau",
5826 .type = AVMEDIA_TYPE_VIDEO,
5827 .id = CODEC_ID_WMV3,
5828 .priv_data_size = sizeof(VC1Context),
5829 .init = vc1_decode_init,
5830 .close = vc1_decode_end,
5831 .decode = vc1_decode_frame,
5832 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU,
5833 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 VDPAU"),
5834 .pix_fmts = (const enum PixelFormat[]){PIX_FMT_VDPAU_WMV3, PIX_FMT_NONE},
5835 .profiles = NULL_IF_CONFIG_SMALL(profiles)
5839 #if CONFIG_VC1_VDPAU_DECODER
5840 AVCodec ff_vc1_vdpau_decoder = {
5841 .name = "vc1_vdpau",
5842 .type = AVMEDIA_TYPE_VIDEO,
5844 .priv_data_size = sizeof(VC1Context),
5845 .init = vc1_decode_init,
5846 .close = vc1_decode_end,
5847 .decode = vc1_decode_frame,
5848 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU,
5849 .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1 VDPAU"),
5850 .pix_fmts = (const enum PixelFormat[]){PIX_FMT_VDPAU_VC1, PIX_FMT_NONE},
5851 .profiles = NULL_IF_CONFIG_SMALL(profiles)
5855 #if CONFIG_WMV3IMAGE_DECODER
5856 AVCodec ff_wmv3image_decoder = {
5857 .name = "wmv3image",
5858 .type = AVMEDIA_TYPE_VIDEO,
5859 .id = CODEC_ID_WMV3IMAGE,
5860 .priv_data_size = sizeof(VC1Context),
5861 .init = vc1_decode_init,
5862 .close = vc1_decode_end,
5863 .decode = vc1_decode_frame,
5864 .capabilities = CODEC_CAP_DR1,
5865 .flush = vc1_sprite_flush,
5866 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image"),
5867 .pix_fmts = ff_pixfmt_list_420
5871 #if CONFIG_VC1IMAGE_DECODER
5872 AVCodec ff_vc1image_decoder = {
5874 .type = AVMEDIA_TYPE_VIDEO,
5875 .id = CODEC_ID_VC1IMAGE,
5876 .priv_data_size = sizeof(VC1Context),
5877 .init = vc1_decode_init,
5878 .close = vc1_decode_end,
5879 .decode = vc1_decode_frame,
5880 .capabilities = CODEC_CAP_DR1,
5881 .flush = vc1_sprite_flush,
5882 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image v2"),
5883 .pix_fmts = ff_pixfmt_list_420