2 * VC-1 and WMV3 decoder
3 * Copyright (c) 2011 Mashiat Sarker Shakkhar
4 * Copyright (c) 2006-2007 Konstantin Shishkov
5 * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
7 * This file is part of Libav.
9 * Libav is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * Libav is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with Libav; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * VC-1 and WMV3 decoder
32 #include "mpegvideo.h"
36 #include "vc1acdata.h"
37 #include "msmpeg4data.h"
39 #include "simple_idct.h"
41 #include "vdpau_internal.h"
46 #define MB_INTRA_VLC_BITS 9
51 static const uint16_t vlc_offs[] = {
52 0, 520, 552, 616, 1128, 1160, 1224, 1740, 1772, 1836, 1900, 2436,
53 2986, 3050, 3610, 4154, 4218, 4746, 5326, 5390, 5902, 6554, 7658, 8342,
54 9304, 9988, 10630, 11234, 12174, 13006, 13560, 14232, 14786, 15432, 16350, 17522,
55 20372, 21818, 22330, 22394, 23166, 23678, 23742, 24820, 25332, 25396, 26460, 26980,
56 27048, 27592, 27600, 27608, 27616, 27624, 28224, 28258, 28290, 28802, 28834, 28866,
57 29378, 29412, 29444, 29960, 29994, 30026, 30538, 30572, 30604, 31120, 31154, 31186,
58 31714, 31746, 31778, 32306, 32340, 32372
61 // offset tables for interlaced picture MVDATA decoding
62 static const int offset_table1[9] = { 0, 1, 2, 4, 8, 16, 32, 64, 128 };
63 static const int offset_table2[9] = { 0, 1, 3, 7, 15, 31, 63, 127, 255 };
66 * Init VC-1 specific tables and VC1Context members
67 * @param v The VC1Context to initialize
70 int ff_vc1_init_common(VC1Context *v)
74 static VLC_TYPE vlc_table[32372][2];
76 v->hrd_rate = v->hrd_buffer = NULL;
80 INIT_VLC_STATIC(&ff_vc1_bfraction_vlc, VC1_BFRACTION_VLC_BITS, 23,
81 ff_vc1_bfraction_bits, 1, 1,
82 ff_vc1_bfraction_codes, 1, 1, 1 << VC1_BFRACTION_VLC_BITS);
83 INIT_VLC_STATIC(&ff_vc1_norm2_vlc, VC1_NORM2_VLC_BITS, 4,
84 ff_vc1_norm2_bits, 1, 1,
85 ff_vc1_norm2_codes, 1, 1, 1 << VC1_NORM2_VLC_BITS);
86 INIT_VLC_STATIC(&ff_vc1_norm6_vlc, VC1_NORM6_VLC_BITS, 64,
87 ff_vc1_norm6_bits, 1, 1,
88 ff_vc1_norm6_codes, 2, 2, 556);
89 INIT_VLC_STATIC(&ff_vc1_imode_vlc, VC1_IMODE_VLC_BITS, 7,
90 ff_vc1_imode_bits, 1, 1,
91 ff_vc1_imode_codes, 1, 1, 1 << VC1_IMODE_VLC_BITS);
92 for (i = 0; i < 3; i++) {
93 ff_vc1_ttmb_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 0]];
94 ff_vc1_ttmb_vlc[i].table_allocated = vlc_offs[i * 3 + 1] - vlc_offs[i * 3 + 0];
95 init_vlc(&ff_vc1_ttmb_vlc[i], VC1_TTMB_VLC_BITS, 16,
96 ff_vc1_ttmb_bits[i], 1, 1,
97 ff_vc1_ttmb_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC);
98 ff_vc1_ttblk_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 1]];
99 ff_vc1_ttblk_vlc[i].table_allocated = vlc_offs[i * 3 + 2] - vlc_offs[i * 3 + 1];
100 init_vlc(&ff_vc1_ttblk_vlc[i], VC1_TTBLK_VLC_BITS, 8,
101 ff_vc1_ttblk_bits[i], 1, 1,
102 ff_vc1_ttblk_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
103 ff_vc1_subblkpat_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 2]];
104 ff_vc1_subblkpat_vlc[i].table_allocated = vlc_offs[i * 3 + 3] - vlc_offs[i * 3 + 2];
105 init_vlc(&ff_vc1_subblkpat_vlc[i], VC1_SUBBLKPAT_VLC_BITS, 15,
106 ff_vc1_subblkpat_bits[i], 1, 1,
107 ff_vc1_subblkpat_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
109 for (i = 0; i < 4; i++) {
110 ff_vc1_4mv_block_pattern_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 9]];
111 ff_vc1_4mv_block_pattern_vlc[i].table_allocated = vlc_offs[i * 3 + 10] - vlc_offs[i * 3 + 9];
112 init_vlc(&ff_vc1_4mv_block_pattern_vlc[i], VC1_4MV_BLOCK_PATTERN_VLC_BITS, 16,
113 ff_vc1_4mv_block_pattern_bits[i], 1, 1,
114 ff_vc1_4mv_block_pattern_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
115 ff_vc1_cbpcy_p_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 10]];
116 ff_vc1_cbpcy_p_vlc[i].table_allocated = vlc_offs[i * 3 + 11] - vlc_offs[i * 3 + 10];
117 init_vlc(&ff_vc1_cbpcy_p_vlc[i], VC1_CBPCY_P_VLC_BITS, 64,
118 ff_vc1_cbpcy_p_bits[i], 1, 1,
119 ff_vc1_cbpcy_p_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC);
120 ff_vc1_mv_diff_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 11]];
121 ff_vc1_mv_diff_vlc[i].table_allocated = vlc_offs[i * 3 + 12] - vlc_offs[i * 3 + 11];
122 init_vlc(&ff_vc1_mv_diff_vlc[i], VC1_MV_DIFF_VLC_BITS, 73,
123 ff_vc1_mv_diff_bits[i], 1, 1,
124 ff_vc1_mv_diff_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC);
126 for (i = 0; i < 8; i++) {
127 ff_vc1_ac_coeff_table[i].table = &vlc_table[vlc_offs[i * 2 + 21]];
128 ff_vc1_ac_coeff_table[i].table_allocated = vlc_offs[i * 2 + 22] - vlc_offs[i * 2 + 21];
129 init_vlc(&ff_vc1_ac_coeff_table[i], AC_VLC_BITS, vc1_ac_sizes[i],
130 &vc1_ac_tables[i][0][1], 8, 4,
131 &vc1_ac_tables[i][0][0], 8, 4, INIT_VLC_USE_NEW_STATIC);
132 /* initialize interlaced MVDATA tables (2-Ref) */
133 ff_vc1_2ref_mvdata_vlc[i].table = &vlc_table[vlc_offs[i * 2 + 22]];
134 ff_vc1_2ref_mvdata_vlc[i].table_allocated = vlc_offs[i * 2 + 23] - vlc_offs[i * 2 + 22];
135 init_vlc(&ff_vc1_2ref_mvdata_vlc[i], VC1_2REF_MVDATA_VLC_BITS, 126,
136 ff_vc1_2ref_mvdata_bits[i], 1, 1,
137 ff_vc1_2ref_mvdata_codes[i], 4, 4, INIT_VLC_USE_NEW_STATIC);
139 for (i = 0; i < 4; i++) {
140 /* initialize 4MV MBMODE VLC tables for interlaced frame P picture */
141 ff_vc1_intfr_4mv_mbmode_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 37]];
142 ff_vc1_intfr_4mv_mbmode_vlc[i].table_allocated = vlc_offs[i * 3 + 38] - vlc_offs[i * 3 + 37];
143 init_vlc(&ff_vc1_intfr_4mv_mbmode_vlc[i], VC1_INTFR_4MV_MBMODE_VLC_BITS, 15,
144 ff_vc1_intfr_4mv_mbmode_bits[i], 1, 1,
145 ff_vc1_intfr_4mv_mbmode_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC);
146 /* initialize NON-4MV MBMODE VLC tables for the same */
147 ff_vc1_intfr_non4mv_mbmode_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 38]];
148 ff_vc1_intfr_non4mv_mbmode_vlc[i].table_allocated = vlc_offs[i * 3 + 39] - vlc_offs[i * 3 + 38];
149 init_vlc(&ff_vc1_intfr_non4mv_mbmode_vlc[i], VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 9,
150 ff_vc1_intfr_non4mv_mbmode_bits[i], 1, 1,
151 ff_vc1_intfr_non4mv_mbmode_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
152 /* initialize interlaced MVDATA tables (1-Ref) */
153 ff_vc1_1ref_mvdata_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 39]];
154 ff_vc1_1ref_mvdata_vlc[i].table_allocated = vlc_offs[i * 3 + 40] - vlc_offs[i * 3 + 39];
155 init_vlc(&ff_vc1_1ref_mvdata_vlc[i], VC1_1REF_MVDATA_VLC_BITS, 72,
156 ff_vc1_1ref_mvdata_bits[i], 1, 1,
157 ff_vc1_1ref_mvdata_codes[i], 4, 4, INIT_VLC_USE_NEW_STATIC);
159 for (i = 0; i < 4; i++) {
160 /* Initialize 2MV Block pattern VLC tables */
161 ff_vc1_2mv_block_pattern_vlc[i].table = &vlc_table[vlc_offs[i + 49]];
162 ff_vc1_2mv_block_pattern_vlc[i].table_allocated = vlc_offs[i + 50] - vlc_offs[i + 49];
163 init_vlc(&ff_vc1_2mv_block_pattern_vlc[i], VC1_2MV_BLOCK_PATTERN_VLC_BITS, 4,
164 ff_vc1_2mv_block_pattern_bits[i], 1, 1,
165 ff_vc1_2mv_block_pattern_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
167 for (i = 0; i < 8; i++) {
168 /* Initialize interlaced CBPCY VLC tables (Table 124 - Table 131) */
169 ff_vc1_icbpcy_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 53]];
170 ff_vc1_icbpcy_vlc[i].table_allocated = vlc_offs[i * 3 + 54] - vlc_offs[i * 3 + 53];
171 init_vlc(&ff_vc1_icbpcy_vlc[i], VC1_ICBPCY_VLC_BITS, 63,
172 ff_vc1_icbpcy_p_bits[i], 1, 1,
173 ff_vc1_icbpcy_p_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC);
174 /* Initialize interlaced field picture MBMODE VLC tables */
175 ff_vc1_if_mmv_mbmode_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 54]];
176 ff_vc1_if_mmv_mbmode_vlc[i].table_allocated = vlc_offs[i * 3 + 55] - vlc_offs[i * 3 + 54];
177 init_vlc(&ff_vc1_if_mmv_mbmode_vlc[i], VC1_IF_MMV_MBMODE_VLC_BITS, 8,
178 ff_vc1_if_mmv_mbmode_bits[i], 1, 1,
179 ff_vc1_if_mmv_mbmode_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
180 ff_vc1_if_1mv_mbmode_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 55]];
181 ff_vc1_if_1mv_mbmode_vlc[i].table_allocated = vlc_offs[i * 3 + 56] - vlc_offs[i * 3 + 55];
182 init_vlc(&ff_vc1_if_1mv_mbmode_vlc[i], VC1_IF_1MV_MBMODE_VLC_BITS, 6,
183 ff_vc1_if_1mv_mbmode_bits[i], 1, 1,
184 ff_vc1_if_1mv_mbmode_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
191 v->mvrange = 0; /* 7.1.1.18, p80 */
196 /***********************************************************************/
198 * @name VC-1 Bitplane decoding
216 /** @} */ //imode defines
219 /** @} */ //Bitplane group
221 static void vc1_put_signed_blocks_clamped(VC1Context *v)
223 MpegEncContext *s = &v->s;
224 int topleft_mb_pos, top_mb_pos;
225 int stride_y, fieldtx;
228 /* The put pixels loop is always one MB row behind the decoding loop,
229 * because we can only put pixels when overlap filtering is done, and
230 * for filtering of the bottom edge of a MB, we need the next MB row
232 * Within the row, the put pixels loop is also one MB col behind the
233 * decoding loop. The reason for this is again, because for filtering
234 * of the right MB edge, we need the next MB present. */
235 if (!s->first_slice_line) {
237 topleft_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x - 1;
238 fieldtx = v->fieldtx_plane[topleft_mb_pos];
239 stride_y = s->linesize << fieldtx;
240 v_dist = (16 - fieldtx) >> (fieldtx == 0);
241 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][0],
242 s->dest[0] - 16 * s->linesize - 16,
244 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][1],
245 s->dest[0] - 16 * s->linesize - 8,
247 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][2],
248 s->dest[0] - v_dist * s->linesize - 16,
250 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][3],
251 s->dest[0] - v_dist * s->linesize - 8,
253 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][4],
254 s->dest[1] - 8 * s->uvlinesize - 8,
256 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][5],
257 s->dest[2] - 8 * s->uvlinesize - 8,
260 if (s->mb_x == s->mb_width - 1) {
261 top_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x;
262 fieldtx = v->fieldtx_plane[top_mb_pos];
263 stride_y = s->linesize << fieldtx;
264 v_dist = fieldtx ? 15 : 8;
265 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][0],
266 s->dest[0] - 16 * s->linesize,
268 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][1],
269 s->dest[0] - 16 * s->linesize + 8,
271 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][2],
272 s->dest[0] - v_dist * s->linesize,
274 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][3],
275 s->dest[0] - v_dist * s->linesize + 8,
277 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][4],
278 s->dest[1] - 8 * s->uvlinesize,
280 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][5],
281 s->dest[2] - 8 * s->uvlinesize,
286 #define inc_blk_idx(idx) do { \
288 if (idx >= v->n_allocated_blks) \
292 inc_blk_idx(v->topleft_blk_idx);
293 inc_blk_idx(v->top_blk_idx);
294 inc_blk_idx(v->left_blk_idx);
295 inc_blk_idx(v->cur_blk_idx);
298 static void vc1_loop_filter_iblk(VC1Context *v, int pq)
300 MpegEncContext *s = &v->s;
302 if (!s->first_slice_line) {
303 v->vc1dsp.vc1_v_loop_filter16(s->dest[0], s->linesize, pq);
305 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
306 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
307 for (j = 0; j < 2; j++) {
308 v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1], s->uvlinesize, pq);
310 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
313 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] + 8 * s->linesize, s->linesize, pq);
315 if (s->mb_y == s->end_mb_y - 1) {
317 v->vc1dsp.vc1_h_loop_filter16(s->dest[0], s->linesize, pq);
318 v->vc1dsp.vc1_h_loop_filter8(s->dest[1], s->uvlinesize, pq);
319 v->vc1dsp.vc1_h_loop_filter8(s->dest[2], s->uvlinesize, pq);
321 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] + 8, s->linesize, pq);
325 static void vc1_loop_filter_iblk_delayed(VC1Context *v, int pq)
327 MpegEncContext *s = &v->s;
330 /* The loopfilter runs 1 row and 1 column behind the overlap filter, which
331 * means it runs two rows/cols behind the decoding loop. */
332 if (!s->first_slice_line) {
334 if (s->mb_y >= s->start_mb_y + 2) {
335 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
338 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 16, s->linesize, pq);
339 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 8, s->linesize, pq);
340 for (j = 0; j < 2; j++) {
341 v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
343 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize - 8, s->uvlinesize, pq);
347 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize - 16, s->linesize, pq);
350 if (s->mb_x == s->mb_width - 1) {
351 if (s->mb_y >= s->start_mb_y + 2) {
352 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
355 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize, s->linesize, pq);
356 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize + 8, s->linesize, pq);
357 for (j = 0; j < 2; j++) {
358 v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
360 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize, s->uvlinesize, pq);
364 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize, s->linesize, pq);
367 if (s->mb_y == s->end_mb_y) {
370 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
371 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 8, s->linesize, pq);
373 for (j = 0; j < 2; j++) {
374 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
379 if (s->mb_x == s->mb_width - 1) {
381 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
382 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
384 for (j = 0; j < 2; j++) {
385 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
393 static void vc1_smooth_overlap_filter_iblk(VC1Context *v)
395 MpegEncContext *s = &v->s;
398 if (v->condover == CONDOVER_NONE)
401 mb_pos = s->mb_x + s->mb_y * s->mb_stride;
403 /* Within a MB, the horizontal overlap always runs before the vertical.
404 * To accomplish that, we run the H on left and internal borders of the
405 * currently decoded MB. Then, we wait for the next overlap iteration
406 * to do H overlap on the right edge of this MB, before moving over and
407 * running the V overlap. Therefore, the V overlap makes us trail by one
408 * MB col and the H overlap filter makes us trail by one MB row. This
409 * is reflected in the time at which we run the put_pixels loop. */
410 if (v->condover == CONDOVER_ALL || v->pq >= 9 || v->over_flags_plane[mb_pos]) {
411 if (s->mb_x && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
412 v->over_flags_plane[mb_pos - 1])) {
413 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][1],
414 v->block[v->cur_blk_idx][0]);
415 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][3],
416 v->block[v->cur_blk_idx][2]);
417 if (!(s->flags & CODEC_FLAG_GRAY)) {
418 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][4],
419 v->block[v->cur_blk_idx][4]);
420 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][5],
421 v->block[v->cur_blk_idx][5]);
424 v->vc1dsp.vc1_h_s_overlap(v->block[v->cur_blk_idx][0],
425 v->block[v->cur_blk_idx][1]);
426 v->vc1dsp.vc1_h_s_overlap(v->block[v->cur_blk_idx][2],
427 v->block[v->cur_blk_idx][3]);
429 if (s->mb_x == s->mb_width - 1) {
430 if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
431 v->over_flags_plane[mb_pos - s->mb_stride])) {
432 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][2],
433 v->block[v->cur_blk_idx][0]);
434 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][3],
435 v->block[v->cur_blk_idx][1]);
436 if (!(s->flags & CODEC_FLAG_GRAY)) {
437 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][4],
438 v->block[v->cur_blk_idx][4]);
439 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][5],
440 v->block[v->cur_blk_idx][5]);
443 v->vc1dsp.vc1_v_s_overlap(v->block[v->cur_blk_idx][0],
444 v->block[v->cur_blk_idx][2]);
445 v->vc1dsp.vc1_v_s_overlap(v->block[v->cur_blk_idx][1],
446 v->block[v->cur_blk_idx][3]);
449 if (s->mb_x && (v->condover == CONDOVER_ALL || v->over_flags_plane[mb_pos - 1])) {
450 if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
451 v->over_flags_plane[mb_pos - s->mb_stride - 1])) {
452 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][2],
453 v->block[v->left_blk_idx][0]);
454 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][3],
455 v->block[v->left_blk_idx][1]);
456 if (!(s->flags & CODEC_FLAG_GRAY)) {
457 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][4],
458 v->block[v->left_blk_idx][4]);
459 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][5],
460 v->block[v->left_blk_idx][5]);
463 v->vc1dsp.vc1_v_s_overlap(v->block[v->left_blk_idx][0],
464 v->block[v->left_blk_idx][2]);
465 v->vc1dsp.vc1_v_s_overlap(v->block[v->left_blk_idx][1],
466 v->block[v->left_blk_idx][3]);
470 /** Do motion compensation over 1 macroblock
471 * Mostly adapted hpel_motion and qpel_motion from mpegvideo.c
473 static void vc1_mc_1mv(VC1Context *v, int dir)
475 MpegEncContext *s = &v->s;
476 DSPContext *dsp = &v->s.dsp;
477 uint8_t *srcY, *srcU, *srcV;
478 int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
480 int v_edge_pos = s->v_edge_pos >> v->field_mode;
482 if ((!v->field_mode ||
483 (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
484 !v->s.last_picture.f.data[0])
487 mx = s->mv[dir][0][0];
488 my = s->mv[dir][0][1];
490 // store motion vectors for further use in B frames
491 if (s->pict_type == AV_PICTURE_TYPE_P) {
492 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = mx;
493 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = my;
496 uvmx = (mx + ((mx & 3) == 3)) >> 1;
497 uvmy = (my + ((my & 3) == 3)) >> 1;
498 v->luma_mv[s->mb_x][0] = uvmx;
499 v->luma_mv[s->mb_x][1] = uvmy;
502 v->cur_field_type != v->ref_field_type[dir]) {
503 my = my - 2 + 4 * v->cur_field_type;
504 uvmy = uvmy - 2 + 4 * v->cur_field_type;
507 // fastuvmc shall be ignored for interlaced frame picture
508 if (v->fastuvmc && (v->fcm != ILACE_FRAME)) {
509 uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
510 uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
512 if (v->field_mode) { // interlaced field picture
514 if ((v->cur_field_type != v->ref_field_type[dir]) && v->cur_field_type) {
515 srcY = s->current_picture.f.data[0];
516 srcU = s->current_picture.f.data[1];
517 srcV = s->current_picture.f.data[2];
519 srcY = s->last_picture.f.data[0];
520 srcU = s->last_picture.f.data[1];
521 srcV = s->last_picture.f.data[2];
524 srcY = s->next_picture.f.data[0];
525 srcU = s->next_picture.f.data[1];
526 srcV = s->next_picture.f.data[2];
530 srcY = s->last_picture.f.data[0];
531 srcU = s->last_picture.f.data[1];
532 srcV = s->last_picture.f.data[2];
534 srcY = s->next_picture.f.data[0];
535 srcU = s->next_picture.f.data[1];
536 srcV = s->next_picture.f.data[2];
540 src_x = s->mb_x * 16 + (mx >> 2);
541 src_y = s->mb_y * 16 + (my >> 2);
542 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
543 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
545 if (v->profile != PROFILE_ADVANCED) {
546 src_x = av_clip( src_x, -16, s->mb_width * 16);
547 src_y = av_clip( src_y, -16, s->mb_height * 16);
548 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
549 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
551 src_x = av_clip( src_x, -17, s->avctx->coded_width);
552 src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
553 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
554 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
557 srcY += src_y * s->linesize + src_x;
558 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
559 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
561 if (v->field_mode && v->ref_field_type[dir]) {
562 srcY += s->current_picture_ptr->f.linesize[0];
563 srcU += s->current_picture_ptr->f.linesize[1];
564 srcV += s->current_picture_ptr->f.linesize[2];
567 /* for grayscale we should not try to read from unknown area */
568 if (s->flags & CODEC_FLAG_GRAY) {
569 srcU = s->edge_emu_buffer + 18 * s->linesize;
570 srcV = s->edge_emu_buffer + 18 * s->linesize;
573 if (v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
574 || s->h_edge_pos < 22 || v_edge_pos < 22
575 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel * 3
576 || (unsigned)(src_y - s->mspel) > v_edge_pos - (my&3) - 16 - s->mspel * 3) {
577 uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
579 srcY -= s->mspel * (1 + s->linesize);
580 s->dsp.emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize,
581 17 + s->mspel * 2, 17 + s->mspel * 2,
582 src_x - s->mspel, src_y - s->mspel,
583 s->h_edge_pos, v_edge_pos);
584 srcY = s->edge_emu_buffer;
585 s->dsp.emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8 + 1, 8 + 1,
586 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
587 s->dsp.emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8 + 1, 8 + 1,
588 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
591 /* if we deal with range reduction we need to scale source blocks */
592 if (v->rangeredfrm) {
597 for (j = 0; j < 17 + s->mspel * 2; j++) {
598 for (i = 0; i < 17 + s->mspel * 2; i++)
599 src[i] = ((src[i] - 128) >> 1) + 128;
604 for (j = 0; j < 9; j++) {
605 for (i = 0; i < 9; i++) {
606 src[i] = ((src[i] - 128) >> 1) + 128;
607 src2[i] = ((src2[i] - 128) >> 1) + 128;
609 src += s->uvlinesize;
610 src2 += s->uvlinesize;
613 /* if we deal with intensity compensation we need to scale source blocks */
614 if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
619 for (j = 0; j < 17 + s->mspel * 2; j++) {
620 for (i = 0; i < 17 + s->mspel * 2; i++)
621 src[i] = v->luty[src[i]];
626 for (j = 0; j < 9; j++) {
627 for (i = 0; i < 9; i++) {
628 src[i] = v->lutuv[src[i]];
629 src2[i] = v->lutuv[src2[i]];
631 src += s->uvlinesize;
632 src2 += s->uvlinesize;
635 srcY += s->mspel * (1 + s->linesize);
638 if (v->field_mode && v->cur_field_type) {
639 off = s->current_picture_ptr->f.linesize[0];
640 off_uv = s->current_picture_ptr->f.linesize[1];
646 dxy = ((my & 3) << 2) | (mx & 3);
647 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off , srcY , s->linesize, v->rnd);
648 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8, srcY + 8, s->linesize, v->rnd);
649 srcY += s->linesize * 8;
650 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize , srcY , s->linesize, v->rnd);
651 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
652 } else { // hpel mc - always used for luma
653 dxy = (my & 2) | ((mx & 2) >> 1);
655 dsp->put_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
657 dsp->put_no_rnd_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
660 if (s->flags & CODEC_FLAG_GRAY) return;
661 /* Chroma MC always uses qpel bilinear */
662 uvmx = (uvmx & 3) << 1;
663 uvmy = (uvmy & 3) << 1;
665 dsp->put_h264_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
666 dsp->put_h264_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
668 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
669 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
673 static inline int median4(int a, int b, int c, int d)
676 if (c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2;
677 else return (FFMIN(b, c) + FFMAX(a, d)) / 2;
679 if (c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2;
680 else return (FFMIN(a, c) + FFMAX(b, d)) / 2;
684 /** Do motion compensation for 4-MV macroblock - luminance block
686 static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir)
688 MpegEncContext *s = &v->s;
689 DSPContext *dsp = &v->s.dsp;
691 int dxy, mx, my, src_x, src_y;
693 int fieldmv = (v->fcm == ILACE_FRAME) ? v->blk_mv_type[s->block_index[n]] : 0;
694 int v_edge_pos = s->v_edge_pos >> v->field_mode;
696 if ((!v->field_mode ||
697 (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
698 !v->s.last_picture.f.data[0])
701 mx = s->mv[dir][n][0];
702 my = s->mv[dir][n][1];
706 if ((v->cur_field_type != v->ref_field_type[dir]) && v->cur_field_type)
707 srcY = s->current_picture.f.data[0];
709 srcY = s->last_picture.f.data[0];
711 srcY = s->last_picture.f.data[0];
713 srcY = s->next_picture.f.data[0];
716 if (v->cur_field_type != v->ref_field_type[dir])
717 my = my - 2 + 4 * v->cur_field_type;
720 if (s->pict_type == AV_PICTURE_TYPE_P && n == 3 && v->field_mode) {
721 int same_count = 0, opp_count = 0, k;
722 int chosen_mv[2][4][2], f;
724 for (k = 0; k < 4; k++) {
725 f = v->mv_f[0][s->block_index[k] + v->blocks_off];
726 chosen_mv[f][f ? opp_count : same_count][0] = s->mv[0][k][0];
727 chosen_mv[f][f ? opp_count : same_count][1] = s->mv[0][k][1];
731 f = opp_count > same_count;
732 switch (f ? opp_count : same_count) {
734 tx = median4(chosen_mv[f][0][0], chosen_mv[f][1][0],
735 chosen_mv[f][2][0], chosen_mv[f][3][0]);
736 ty = median4(chosen_mv[f][0][1], chosen_mv[f][1][1],
737 chosen_mv[f][2][1], chosen_mv[f][3][1]);
740 tx = mid_pred(chosen_mv[f][0][0], chosen_mv[f][1][0], chosen_mv[f][2][0]);
741 ty = mid_pred(chosen_mv[f][0][1], chosen_mv[f][1][1], chosen_mv[f][2][1]);
744 tx = (chosen_mv[f][0][0] + chosen_mv[f][1][0]) / 2;
745 ty = (chosen_mv[f][0][1] + chosen_mv[f][1][1]) / 2;
748 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
749 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
750 for (k = 0; k < 4; k++)
751 v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
754 if (v->fcm == ILACE_FRAME) { // not sure if needed for other types of picture
756 int width = s->avctx->coded_width;
757 int height = s->avctx->coded_height >> 1;
758 qx = (s->mb_x * 16) + (mx >> 2);
759 qy = (s->mb_y * 8) + (my >> 3);
764 mx -= 4 * (qx - width);
767 else if (qy > height + 1)
768 my -= 8 * (qy - height - 1);
771 if ((v->fcm == ILACE_FRAME) && fieldmv)
772 off = ((n > 1) ? s->linesize : 0) + (n & 1) * 8;
774 off = s->linesize * 4 * (n & 2) + (n & 1) * 8;
775 if (v->field_mode && v->cur_field_type)
776 off += s->current_picture_ptr->f.linesize[0];
778 src_x = s->mb_x * 16 + (n & 1) * 8 + (mx >> 2);
780 src_y = s->mb_y * 16 + (n & 2) * 4 + (my >> 2);
782 src_y = s->mb_y * 16 + ((n > 1) ? 1 : 0) + (my >> 2);
784 if (v->profile != PROFILE_ADVANCED) {
785 src_x = av_clip(src_x, -16, s->mb_width * 16);
786 src_y = av_clip(src_y, -16, s->mb_height * 16);
788 src_x = av_clip(src_x, -17, s->avctx->coded_width);
789 if (v->fcm == ILACE_FRAME) {
791 src_y = av_clip(src_y, -17, s->avctx->coded_height + 1);
793 src_y = av_clip(src_y, -18, s->avctx->coded_height);
795 src_y = av_clip(src_y, -18, s->avctx->coded_height + 1);
799 srcY += src_y * s->linesize + src_x;
800 if (v->field_mode && v->ref_field_type[dir])
801 srcY += s->current_picture_ptr->f.linesize[0];
803 if (fieldmv && !(src_y & 1))
805 if (fieldmv && (src_y & 1) && src_y < 4)
807 if (v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
808 || s->h_edge_pos < 13 || v_edge_pos < 23
809 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx & 3) - 8 - s->mspel * 2
810 || (unsigned)(src_y - (s->mspel << fieldmv)) > v_edge_pos - (my & 3) - ((8 + s->mspel * 2) << fieldmv)) {
811 srcY -= s->mspel * (1 + (s->linesize << fieldmv));
812 /* check emulate edge stride and offset */
813 s->dsp.emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize,
814 9 + s->mspel * 2, (9 + s->mspel * 2) << fieldmv,
815 src_x - s->mspel, src_y - (s->mspel << fieldmv),
816 s->h_edge_pos, v_edge_pos);
817 srcY = s->edge_emu_buffer;
818 /* if we deal with range reduction we need to scale source blocks */
819 if (v->rangeredfrm) {
824 for (j = 0; j < 9 + s->mspel * 2; j++) {
825 for (i = 0; i < 9 + s->mspel * 2; i++)
826 src[i] = ((src[i] - 128) >> 1) + 128;
827 src += s->linesize << fieldmv;
830 /* if we deal with intensity compensation we need to scale source blocks */
831 if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
836 for (j = 0; j < 9 + s->mspel * 2; j++) {
837 for (i = 0; i < 9 + s->mspel * 2; i++)
838 src[i] = v->luty[src[i]];
839 src += s->linesize << fieldmv;
842 srcY += s->mspel * (1 + (s->linesize << fieldmv));
846 dxy = ((my & 3) << 2) | (mx & 3);
847 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize << fieldmv, v->rnd);
848 } else { // hpel mc - always used for luma
849 dxy = (my & 2) | ((mx & 2) >> 1);
851 dsp->put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
853 dsp->put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
857 static av_always_inline int get_chroma_mv(int *mvx, int *mvy, int *a, int flag, int *tx, int *ty)
860 static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
862 idx = ((a[3] != flag) << 3)
863 | ((a[2] != flag) << 2)
864 | ((a[1] != flag) << 1)
867 *tx = median4(mvx[0], mvx[1], mvx[2], mvx[3]);
868 *ty = median4(mvy[0], mvy[1], mvy[2], mvy[3]);
870 } else if (count[idx] == 1) {
873 *tx = mid_pred(mvx[1], mvx[2], mvx[3]);
874 *ty = mid_pred(mvy[1], mvy[2], mvy[3]);
877 *tx = mid_pred(mvx[0], mvx[2], mvx[3]);
878 *ty = mid_pred(mvy[0], mvy[2], mvy[3]);
881 *tx = mid_pred(mvx[0], mvx[1], mvx[3]);
882 *ty = mid_pred(mvy[0], mvy[1], mvy[3]);
885 *tx = mid_pred(mvx[0], mvx[1], mvx[2]);
886 *ty = mid_pred(mvy[0], mvy[1], mvy[2]);
889 } else if (count[idx] == 2) {
891 for (i = 0; i < 3; i++)
896 for (i = t1 + 1; i < 4; i++)
901 *tx = (mvx[t1] + mvx[t2]) / 2;
902 *ty = (mvy[t1] + mvy[t2]) / 2;
910 /** Do motion compensation for 4-MV macroblock - both chroma blocks
912 static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
914 MpegEncContext *s = &v->s;
915 DSPContext *dsp = &v->s.dsp;
916 uint8_t *srcU, *srcV;
917 int uvmx, uvmy, uvsrc_x, uvsrc_y;
918 int k, tx = 0, ty = 0;
919 int mvx[4], mvy[4], intra[4], mv_f[4];
921 int chroma_ref_type = v->cur_field_type, off = 0;
922 int v_edge_pos = s->v_edge_pos >> v->field_mode;
924 if (!v->field_mode && !v->s.last_picture.f.data[0])
926 if (s->flags & CODEC_FLAG_GRAY)
929 for (k = 0; k < 4; k++) {
930 mvx[k] = s->mv[dir][k][0];
931 mvy[k] = s->mv[dir][k][1];
932 intra[k] = v->mb_type[0][s->block_index[k]];
934 mv_f[k] = v->mv_f[dir][s->block_index[k] + v->blocks_off];
937 /* calculate chroma MV vector from four luma MVs */
938 if (!v->field_mode || (v->field_mode && !v->numref)) {
939 valid_count = get_chroma_mv(mvx, mvy, intra, 0, &tx, &ty);
941 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
942 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
943 v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
944 return; //no need to do MC for intra blocks
948 if (mv_f[0] + mv_f[1] + mv_f[2] + mv_f[3] > 2)
950 valid_count = get_chroma_mv(mvx, mvy, mv_f, dominant, &tx, &ty);
952 chroma_ref_type = !v->cur_field_type;
954 if (v->field_mode && chroma_ref_type == 1 && v->cur_field_type == 1 && !v->s.last_picture.f.data[0])
956 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
957 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
958 uvmx = (tx + ((tx & 3) == 3)) >> 1;
959 uvmy = (ty + ((ty & 3) == 3)) >> 1;
961 v->luma_mv[s->mb_x][0] = uvmx;
962 v->luma_mv[s->mb_x][1] = uvmy;
965 uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
966 uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
968 // Field conversion bias
969 if (v->cur_field_type != chroma_ref_type)
970 uvmy += 2 - 4 * chroma_ref_type;
972 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
973 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
975 if (v->profile != PROFILE_ADVANCED) {
976 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
977 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
979 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
980 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
985 if ((v->cur_field_type != chroma_ref_type) && v->cur_field_type) {
986 srcU = s->current_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
987 srcV = s->current_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
989 srcU = s->last_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
990 srcV = s->last_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
993 srcU = s->last_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
994 srcV = s->last_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
997 srcU = s->next_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
998 srcV = s->next_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
1001 if (v->field_mode) {
1002 if (chroma_ref_type) {
1003 srcU += s->current_picture_ptr->f.linesize[1];
1004 srcV += s->current_picture_ptr->f.linesize[2];
1006 off = v->cur_field_type ? s->current_picture_ptr->f.linesize[1] : 0;
1009 if (v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
1010 || s->h_edge_pos < 18 || v_edge_pos < 18
1011 || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
1012 || (unsigned)uvsrc_y > (v_edge_pos >> 1) - 9) {
1013 s->dsp.emulated_edge_mc(s->edge_emu_buffer , srcU, s->uvlinesize,
1014 8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
1015 s->h_edge_pos >> 1, v_edge_pos >> 1);
1016 s->dsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV, s->uvlinesize,
1017 8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
1018 s->h_edge_pos >> 1, v_edge_pos >> 1);
1019 srcU = s->edge_emu_buffer;
1020 srcV = s->edge_emu_buffer + 16;
1022 /* if we deal with range reduction we need to scale source blocks */
1023 if (v->rangeredfrm) {
1025 uint8_t *src, *src2;
1029 for (j = 0; j < 9; j++) {
1030 for (i = 0; i < 9; i++) {
1031 src[i] = ((src[i] - 128) >> 1) + 128;
1032 src2[i] = ((src2[i] - 128) >> 1) + 128;
1034 src += s->uvlinesize;
1035 src2 += s->uvlinesize;
1038 /* if we deal with intensity compensation we need to scale source blocks */
1039 if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1041 uint8_t *src, *src2;
1045 for (j = 0; j < 9; j++) {
1046 for (i = 0; i < 9; i++) {
1047 src[i] = v->lutuv[src[i]];
1048 src2[i] = v->lutuv[src2[i]];
1050 src += s->uvlinesize;
1051 src2 += s->uvlinesize;
1056 /* Chroma MC always uses qpel bilinear */
1057 uvmx = (uvmx & 3) << 1;
1058 uvmy = (uvmy & 3) << 1;
1060 dsp->put_h264_chroma_pixels_tab[0](s->dest[1] + off, srcU, s->uvlinesize, 8, uvmx, uvmy);
1061 dsp->put_h264_chroma_pixels_tab[0](s->dest[2] + off, srcV, s->uvlinesize, 8, uvmx, uvmy);
1063 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off, srcU, s->uvlinesize, 8, uvmx, uvmy);
1064 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off, srcV, s->uvlinesize, 8, uvmx, uvmy);
1068 /** Do motion compensation for 4-MV field chroma macroblock (both U and V)
1070 static void vc1_mc_4mv_chroma4(VC1Context *v)
1072 MpegEncContext *s = &v->s;
1073 DSPContext *dsp = &v->s.dsp;
1074 uint8_t *srcU, *srcV;
1075 int uvsrc_x, uvsrc_y;
1076 int uvmx_field[4], uvmy_field[4];
1078 int fieldmv = v->blk_mv_type[s->block_index[0]];
1079 static const int s_rndtblfield[16] = { 0, 0, 1, 2, 4, 4, 5, 6, 2, 2, 3, 8, 6, 6, 7, 12 };
1080 int v_dist = fieldmv ? 1 : 4; // vertical offset for lower sub-blocks
1081 int v_edge_pos = s->v_edge_pos >> 1;
1083 if (!v->s.last_picture.f.data[0])
1085 if (s->flags & CODEC_FLAG_GRAY)
1088 for (i = 0; i < 4; i++) {
1089 tx = s->mv[0][i][0];
1090 uvmx_field[i] = (tx + ((tx & 3) == 3)) >> 1;
1091 ty = s->mv[0][i][1];
1093 uvmy_field[i] = (ty >> 4) * 8 + s_rndtblfield[ty & 0xF];
1095 uvmy_field[i] = (ty + ((ty & 3) == 3)) >> 1;
1098 for (i = 0; i < 4; i++) {
1099 off = (i & 1) * 4 + ((i & 2) ? v_dist * s->uvlinesize : 0);
1100 uvsrc_x = s->mb_x * 8 + (i & 1) * 4 + (uvmx_field[i] >> 2);
1101 uvsrc_y = s->mb_y * 8 + ((i & 2) ? v_dist : 0) + (uvmy_field[i] >> 2);
1102 // FIXME: implement proper pull-back (see vc1cropmv.c, vc1CROPMV_ChromaPullBack())
1103 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
1104 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
1105 srcU = s->last_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
1106 srcV = s->last_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
1107 uvmx_field[i] = (uvmx_field[i] & 3) << 1;
1108 uvmy_field[i] = (uvmy_field[i] & 3) << 1;
1110 if (fieldmv && !(uvsrc_y & 1))
1112 if (fieldmv && (uvsrc_y & 1) && uvsrc_y < 2)
1114 if ((v->mv_mode == MV_PMODE_INTENSITY_COMP)
1115 || s->h_edge_pos < 10 || v_edge_pos < (5 << fieldmv)
1116 || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 5
1117 || (unsigned)uvsrc_y > v_edge_pos - (5 << fieldmv)) {
1118 s->dsp.emulated_edge_mc(s->edge_emu_buffer, srcU, s->uvlinesize,
1119 5, (5 << fieldmv), uvsrc_x, uvsrc_y,
1120 s->h_edge_pos >> 1, v_edge_pos);
1121 s->dsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV, s->uvlinesize,
1122 5, (5 << fieldmv), uvsrc_x, uvsrc_y,
1123 s->h_edge_pos >> 1, v_edge_pos);
1124 srcU = s->edge_emu_buffer;
1125 srcV = s->edge_emu_buffer + 16;
1127 /* if we deal with intensity compensation we need to scale source blocks */
1128 if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1130 uint8_t *src, *src2;
1134 for (j = 0; j < 5; j++) {
1135 for (i = 0; i < 5; i++) {
1136 src[i] = v->lutuv[src[i]];
1137 src2[i] = v->lutuv[src2[i]];
1139 src += s->uvlinesize << 1;
1140 src2 += s->uvlinesize << 1;
1145 dsp->put_h264_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1146 dsp->put_h264_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1148 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1149 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1154 /***********************************************************************/
1156 * @name VC-1 Block-level functions
1157 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
1163 * @brief Get macroblock-level quantizer scale
1165 #define GET_MQUANT() \
1166 if (v->dquantfrm) { \
1168 if (v->dqprofile == DQPROFILE_ALL_MBS) { \
1169 if (v->dqbilevel) { \
1170 mquant = (get_bits1(gb)) ? v->altpq : v->pq; \
1172 mqdiff = get_bits(gb, 3); \
1174 mquant = v->pq + mqdiff; \
1176 mquant = get_bits(gb, 5); \
1179 if (v->dqprofile == DQPROFILE_SINGLE_EDGE) \
1180 edges = 1 << v->dqsbedge; \
1181 else if (v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
1182 edges = (3 << v->dqsbedge) % 15; \
1183 else if (v->dqprofile == DQPROFILE_FOUR_EDGES) \
1185 if ((edges&1) && !s->mb_x) \
1186 mquant = v->altpq; \
1187 if ((edges&2) && s->first_slice_line) \
1188 mquant = v->altpq; \
1189 if ((edges&4) && s->mb_x == (s->mb_width - 1)) \
1190 mquant = v->altpq; \
1191 if ((edges&8) && s->mb_y == (s->mb_height - 1)) \
1192 mquant = v->altpq; \
1196 * @def GET_MVDATA(_dmv_x, _dmv_y)
1197 * @brief Get MV differentials
1198 * @see MVDATA decoding from 8.3.5.2, p(1)20
1199 * @param _dmv_x Horizontal differential for decoded MV
1200 * @param _dmv_y Vertical differential for decoded MV
1202 #define GET_MVDATA(_dmv_x, _dmv_y) \
1203 index = 1 + get_vlc2(gb, ff_vc1_mv_diff_vlc[s->mv_table_index].table, \
1204 VC1_MV_DIFF_VLC_BITS, 2); \
1206 mb_has_coeffs = 1; \
1209 mb_has_coeffs = 0; \
1212 _dmv_x = _dmv_y = 0; \
1213 } else if (index == 35) { \
1214 _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
1215 _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
1216 } else if (index == 36) { \
1221 index1 = index % 6; \
1222 if (!s->quarter_sample && index1 == 5) val = 1; \
1224 if (size_table[index1] - val > 0) \
1225 val = get_bits(gb, size_table[index1] - val); \
1227 sign = 0 - (val&1); \
1228 _dmv_x = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1230 index1 = index / 6; \
1231 if (!s->quarter_sample && index1 == 5) val = 1; \
1233 if (size_table[index1] - val > 0) \
1234 val = get_bits(gb, size_table[index1] - val); \
1236 sign = 0 - (val & 1); \
1237 _dmv_y = (sign ^ ((val >> 1) + offset_table[index1])) - sign; \
1240 static av_always_inline void get_mvdata_interlaced(VC1Context *v, int *dmv_x,
1241 int *dmv_y, int *pred_flag)
1244 int extend_x = 0, extend_y = 0;
1245 GetBitContext *gb = &v->s.gb;
1248 const int* offs_tab;
1251 bits = VC1_2REF_MVDATA_VLC_BITS;
1254 bits = VC1_1REF_MVDATA_VLC_BITS;
1257 switch (v->dmvrange) {
1265 extend_x = extend_y = 1;
1268 index = get_vlc2(gb, v->imv_vlc->table, bits, 3);
1270 *dmv_x = get_bits(gb, v->k_x);
1271 *dmv_y = get_bits(gb, v->k_y);
1273 *pred_flag = *dmv_y & 1;
1274 *dmv_y = (*dmv_y + *pred_flag) >> 1;
1279 offs_tab = offset_table2;
1281 offs_tab = offset_table1;
1282 index1 = (index + 1) % 9;
1284 val = get_bits(gb, index1 + extend_x);
1285 sign = 0 -(val & 1);
1286 *dmv_x = (sign ^ ((val >> 1) + offs_tab[index1])) - sign;
1290 offs_tab = offset_table2;
1292 offs_tab = offset_table1;
1293 index1 = (index + 1) / 9;
1294 if (index1 > v->numref) {
1295 val = get_bits(gb, (index1 + (extend_y << v->numref)) >> v->numref);
1296 sign = 0 - (val & 1);
1297 *dmv_y = (sign ^ ((val >> 1) + offs_tab[index1 >> v->numref])) - sign;
1301 *pred_flag = index1 & 1;
1305 static av_always_inline int scaleforsame_x(VC1Context *v, int n /* MV */, int dir)
1307 int scaledvalue, refdist;
1308 int scalesame1, scalesame2;
1309 int scalezone1_x, zone1offset_x;
1310 int table_index = dir ^ v->second_field;
1312 if (v->s.pict_type != AV_PICTURE_TYPE_B)
1313 refdist = v->refdist;
1315 refdist = dir ? v->brfd : v->frfd;
1318 scalesame1 = ff_vc1_field_mvpred_scales[table_index][1][refdist];
1319 scalesame2 = ff_vc1_field_mvpred_scales[table_index][2][refdist];
1320 scalezone1_x = ff_vc1_field_mvpred_scales[table_index][3][refdist];
1321 zone1offset_x = ff_vc1_field_mvpred_scales[table_index][5][refdist];
1326 if (FFABS(n) < scalezone1_x)
1327 scaledvalue = (n * scalesame1) >> 8;
1330 scaledvalue = ((n * scalesame2) >> 8) - zone1offset_x;
1332 scaledvalue = ((n * scalesame2) >> 8) + zone1offset_x;
1335 return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1338 static av_always_inline int scaleforsame_y(VC1Context *v, int i, int n /* MV */, int dir)
1340 int scaledvalue, refdist;
1341 int scalesame1, scalesame2;
1342 int scalezone1_y, zone1offset_y;
1343 int table_index = dir ^ v->second_field;
1345 if (v->s.pict_type != AV_PICTURE_TYPE_B)
1346 refdist = v->refdist;
1348 refdist = dir ? v->brfd : v->frfd;
1351 scalesame1 = ff_vc1_field_mvpred_scales[table_index][1][refdist];
1352 scalesame2 = ff_vc1_field_mvpred_scales[table_index][2][refdist];
1353 scalezone1_y = ff_vc1_field_mvpred_scales[table_index][4][refdist];
1354 zone1offset_y = ff_vc1_field_mvpred_scales[table_index][6][refdist];
1359 if (FFABS(n) < scalezone1_y)
1360 scaledvalue = (n * scalesame1) >> 8;
1363 scaledvalue = ((n * scalesame2) >> 8) - zone1offset_y;
1365 scaledvalue = ((n * scalesame2) >> 8) + zone1offset_y;
1369 if (v->cur_field_type && !v->ref_field_type[dir])
1370 return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1372 return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1375 static av_always_inline int scaleforopp_x(VC1Context *v, int n /* MV */)
1377 int scalezone1_x, zone1offset_x;
1378 int scaleopp1, scaleopp2, brfd;
1381 brfd = FFMIN(v->brfd, 3);
1382 scalezone1_x = ff_vc1_b_field_mvpred_scales[3][brfd];
1383 zone1offset_x = ff_vc1_b_field_mvpred_scales[5][brfd];
1384 scaleopp1 = ff_vc1_b_field_mvpred_scales[1][brfd];
1385 scaleopp2 = ff_vc1_b_field_mvpred_scales[2][brfd];
1390 if (FFABS(n) < scalezone1_x)
1391 scaledvalue = (n * scaleopp1) >> 8;
1394 scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_x;
1396 scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_x;
1399 return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1402 static av_always_inline int scaleforopp_y(VC1Context *v, int n /* MV */, int dir)
1404 int scalezone1_y, zone1offset_y;
1405 int scaleopp1, scaleopp2, brfd;
1408 brfd = FFMIN(v->brfd, 3);
1409 scalezone1_y = ff_vc1_b_field_mvpred_scales[4][brfd];
1410 zone1offset_y = ff_vc1_b_field_mvpred_scales[6][brfd];
1411 scaleopp1 = ff_vc1_b_field_mvpred_scales[1][brfd];
1412 scaleopp2 = ff_vc1_b_field_mvpred_scales[2][brfd];
1417 if (FFABS(n) < scalezone1_y)
1418 scaledvalue = (n * scaleopp1) >> 8;
1421 scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_y;
1423 scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_y;
1426 if (v->cur_field_type && !v->ref_field_type[dir]) {
1427 return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1429 return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1433 static av_always_inline int scaleforsame(VC1Context *v, int i, int n /* MV */,
1436 int brfd, scalesame;
1437 int hpel = 1 - v->s.quarter_sample;
1440 if (v->s.pict_type != AV_PICTURE_TYPE_B || v->second_field || !dir) {
1442 n = scaleforsame_y(v, i, n, dir) << hpel;
1444 n = scaleforsame_x(v, n, dir) << hpel;
1447 brfd = FFMIN(v->brfd, 3);
1448 scalesame = ff_vc1_b_field_mvpred_scales[0][brfd];
1450 n = (n * scalesame >> 8) << hpel;
1454 static av_always_inline int scaleforopp(VC1Context *v, int n /* MV */,
1457 int refdist, scaleopp;
1458 int hpel = 1 - v->s.quarter_sample;
1461 if (v->s.pict_type == AV_PICTURE_TYPE_B && !v->second_field && dir == 1) {
1463 n = scaleforopp_y(v, n, dir) << hpel;
1465 n = scaleforopp_x(v, n) << hpel;
1468 if (v->s.pict_type != AV_PICTURE_TYPE_B)
1469 refdist = FFMIN(v->refdist, 3);
1471 refdist = dir ? v->brfd : v->frfd;
1472 scaleopp = ff_vc1_field_mvpred_scales[dir ^ v->second_field][0][refdist];
1474 n = (n * scaleopp >> 8) << hpel;
1478 /** Predict and set motion vector
1480 static inline void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
1481 int mv1, int r_x, int r_y, uint8_t* is_intra,
1482 int pred_flag, int dir)
1484 MpegEncContext *s = &v->s;
1485 int xy, wrap, off = 0;
1489 int mixedmv_pic, num_samefield = 0, num_oppfield = 0;
1490 int opposit, a_f, b_f, c_f;
1491 int16_t field_predA[2];
1492 int16_t field_predB[2];
1493 int16_t field_predC[2];
1494 int a_valid, b_valid, c_valid;
1495 int hybridmv_thresh, y_bias = 0;
1497 if (v->mv_mode == MV_PMODE_MIXED_MV ||
1498 ((v->mv_mode == MV_PMODE_INTENSITY_COMP) && (v->mv_mode2 == MV_PMODE_MIXED_MV)))
1502 /* scale MV difference to be quad-pel */
1503 dmv_x <<= 1 - s->quarter_sample;
1504 dmv_y <<= 1 - s->quarter_sample;
1506 wrap = s->b8_stride;
1507 xy = s->block_index[n];
1510 s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy + v->blocks_off][0] = 0;
1511 s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy + v->blocks_off][1] = 0;
1512 s->current_picture.f.motion_val[1][xy + v->blocks_off][0] = 0;
1513 s->current_picture.f.motion_val[1][xy + v->blocks_off][1] = 0;
1514 if (mv1) { /* duplicate motion data for 1-MV block */
1515 s->current_picture.f.motion_val[0][xy + 1 + v->blocks_off][0] = 0;
1516 s->current_picture.f.motion_val[0][xy + 1 + v->blocks_off][1] = 0;
1517 s->current_picture.f.motion_val[0][xy + wrap + v->blocks_off][0] = 0;
1518 s->current_picture.f.motion_val[0][xy + wrap + v->blocks_off][1] = 0;
1519 s->current_picture.f.motion_val[0][xy + wrap + 1 + v->blocks_off][0] = 0;
1520 s->current_picture.f.motion_val[0][xy + wrap + 1 + v->blocks_off][1] = 0;
1521 v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1522 s->current_picture.f.motion_val[1][xy + 1 + v->blocks_off][0] = 0;
1523 s->current_picture.f.motion_val[1][xy + 1 + v->blocks_off][1] = 0;
1524 s->current_picture.f.motion_val[1][xy + wrap][0] = 0;
1525 s->current_picture.f.motion_val[1][xy + wrap + v->blocks_off][1] = 0;
1526 s->current_picture.f.motion_val[1][xy + wrap + 1 + v->blocks_off][0] = 0;
1527 s->current_picture.f.motion_val[1][xy + wrap + 1 + v->blocks_off][1] = 0;
1532 C = s->current_picture.f.motion_val[dir][xy - 1 + v->blocks_off];
1533 A = s->current_picture.f.motion_val[dir][xy - wrap + v->blocks_off];
1535 if (v->field_mode && mixedmv_pic)
1536 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
1538 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2;
1540 //in 4-MV mode different blocks have different B predictor position
1543 off = (s->mb_x > 0) ? -1 : 1;
1546 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 1;
1555 B = s->current_picture.f.motion_val[dir][xy - wrap + off + v->blocks_off];
1557 a_valid = !s->first_slice_line || (n == 2 || n == 3);
1558 b_valid = a_valid && (s->mb_width > 1);
1559 c_valid = s->mb_x || (n == 1 || n == 3);
1560 if (v->field_mode) {
1561 a_valid = a_valid && !is_intra[xy - wrap];
1562 b_valid = b_valid && !is_intra[xy - wrap + off];
1563 c_valid = c_valid && !is_intra[xy - 1];
1567 a_f = v->mv_f[dir][xy - wrap + v->blocks_off];
1568 num_oppfield += a_f;
1569 num_samefield += 1 - a_f;
1570 field_predA[0] = A[0];
1571 field_predA[1] = A[1];
1573 field_predA[0] = field_predA[1] = 0;
1577 b_f = v->mv_f[dir][xy - wrap + off + v->blocks_off];
1578 num_oppfield += b_f;
1579 num_samefield += 1 - b_f;
1580 field_predB[0] = B[0];
1581 field_predB[1] = B[1];
1583 field_predB[0] = field_predB[1] = 0;
1587 c_f = v->mv_f[dir][xy - 1 + v->blocks_off];
1588 num_oppfield += c_f;
1589 num_samefield += 1 - c_f;
1590 field_predC[0] = C[0];
1591 field_predC[1] = C[1];
1593 field_predC[0] = field_predC[1] = 0;
1597 if (v->field_mode) {
1598 if (num_samefield <= num_oppfield)
1599 opposit = 1 - pred_flag;
1601 opposit = pred_flag;
1605 if (a_valid && !a_f) {
1606 field_predA[0] = scaleforopp(v, field_predA[0], 0, dir);
1607 field_predA[1] = scaleforopp(v, field_predA[1], 1, dir);
1609 if (b_valid && !b_f) {
1610 field_predB[0] = scaleforopp(v, field_predB[0], 0, dir);
1611 field_predB[1] = scaleforopp(v, field_predB[1], 1, dir);
1613 if (c_valid && !c_f) {
1614 field_predC[0] = scaleforopp(v, field_predC[0], 0, dir);
1615 field_predC[1] = scaleforopp(v, field_predC[1], 1, dir);
1617 v->mv_f[dir][xy + v->blocks_off] = 1;
1618 v->ref_field_type[dir] = !v->cur_field_type;
1620 if (a_valid && a_f) {
1621 field_predA[0] = scaleforsame(v, n, field_predA[0], 0, dir);
1622 field_predA[1] = scaleforsame(v, n, field_predA[1], 1, dir);
1624 if (b_valid && b_f) {
1625 field_predB[0] = scaleforsame(v, n, field_predB[0], 0, dir);
1626 field_predB[1] = scaleforsame(v, n, field_predB[1], 1, dir);
1628 if (c_valid && c_f) {
1629 field_predC[0] = scaleforsame(v, n, field_predC[0], 0, dir);
1630 field_predC[1] = scaleforsame(v, n, field_predC[1], 1, dir);
1632 v->mv_f[dir][xy + v->blocks_off] = 0;
1633 v->ref_field_type[dir] = v->cur_field_type;
1637 px = field_predA[0];
1638 py = field_predA[1];
1639 } else if (c_valid) {
1640 px = field_predC[0];
1641 py = field_predC[1];
1642 } else if (b_valid) {
1643 px = field_predB[0];
1644 py = field_predB[1];
1650 if (num_samefield + num_oppfield > 1) {
1651 px = mid_pred(field_predA[0], field_predB[0], field_predC[0]);
1652 py = mid_pred(field_predA[1], field_predB[1], field_predC[1]);
1655 /* Pullback MV as specified in 8.3.5.3.4 */
1656 if (!v->field_mode) {
1658 qx = (s->mb_x << 6) + ((n == 1 || n == 3) ? 32 : 0);
1659 qy = (s->mb_y << 6) + ((n == 2 || n == 3) ? 32 : 0);
1660 X = (s->mb_width << 6) - 4;
1661 Y = (s->mb_height << 6) - 4;
1663 if (qx + px < -60) px = -60 - qx;
1664 if (qy + py < -60) py = -60 - qy;
1666 if (qx + px < -28) px = -28 - qx;
1667 if (qy + py < -28) py = -28 - qy;
1669 if (qx + px > X) px = X - qx;
1670 if (qy + py > Y) py = Y - qy;
1673 if (!v->field_mode || s->pict_type != AV_PICTURE_TYPE_B) {
1674 /* Calculate hybrid prediction as specified in 8.3.5.3.5 (also 10.3.5.4.3.5) */
1675 hybridmv_thresh = 32;
1676 if (a_valid && c_valid) {
1677 if (is_intra[xy - wrap])
1678 sum = FFABS(px) + FFABS(py);
1680 sum = FFABS(px - field_predA[0]) + FFABS(py - field_predA[1]);
1681 if (sum > hybridmv_thresh) {
1682 if (get_bits1(&s->gb)) { // read HYBRIDPRED bit
1683 px = field_predA[0];
1684 py = field_predA[1];
1686 px = field_predC[0];
1687 py = field_predC[1];
1690 if (is_intra[xy - 1])
1691 sum = FFABS(px) + FFABS(py);
1693 sum = FFABS(px - field_predC[0]) + FFABS(py - field_predC[1]);
1694 if (sum > hybridmv_thresh) {
1695 if (get_bits1(&s->gb)) {
1696 px = field_predA[0];
1697 py = field_predA[1];
1699 px = field_predC[0];
1700 py = field_predC[1];
1707 if (v->field_mode && !s->quarter_sample) {
1711 if (v->field_mode && v->numref)
1713 if (v->field_mode && v->cur_field_type && v->ref_field_type[dir] == 0)
1715 /* store MV using signed modulus of MV range defined in 4.11 */
1716 s->mv[dir][n][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1717 s->mv[dir][n][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1] = ((py + dmv_y + r_y - y_bias) & ((r_y << 1) - 1)) - r_y + y_bias;
1718 if (mv1) { /* duplicate motion data for 1-MV block */
1719 s->current_picture.f.motion_val[dir][xy + 1 + v->blocks_off][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0];
1720 s->current_picture.f.motion_val[dir][xy + 1 + v->blocks_off][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1];
1721 s->current_picture.f.motion_val[dir][xy + wrap + v->blocks_off][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0];
1722 s->current_picture.f.motion_val[dir][xy + wrap + v->blocks_off][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1];
1723 s->current_picture.f.motion_val[dir][xy + wrap + 1 + v->blocks_off][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0];
1724 s->current_picture.f.motion_val[dir][xy + wrap + 1 + v->blocks_off][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1];
1725 v->mv_f[dir][xy + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1726 v->mv_f[dir][xy + wrap + v->blocks_off] = v->mv_f[dir][xy + wrap + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1730 /** Predict and set motion vector for interlaced frame picture MBs
1732 static inline void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
1733 int mvn, int r_x, int r_y, uint8_t* is_intra)
1735 MpegEncContext *s = &v->s;
1736 int xy, wrap, off = 0;
1737 int A[2], B[2], C[2];
1739 int a_valid = 0, b_valid = 0, c_valid = 0;
1740 int field_a, field_b, field_c; // 0: same, 1: opposit
1741 int total_valid, num_samefield, num_oppfield;
1742 int pos_c, pos_b, n_adj;
1744 wrap = s->b8_stride;
1745 xy = s->block_index[n];
1748 s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy][0] = 0;
1749 s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy][1] = 0;
1750 s->current_picture.f.motion_val[1][xy][0] = 0;
1751 s->current_picture.f.motion_val[1][xy][1] = 0;
1752 if (mvn == 1) { /* duplicate motion data for 1-MV block */
1753 s->current_picture.f.motion_val[0][xy + 1][0] = 0;
1754 s->current_picture.f.motion_val[0][xy + 1][1] = 0;
1755 s->current_picture.f.motion_val[0][xy + wrap][0] = 0;
1756 s->current_picture.f.motion_val[0][xy + wrap][1] = 0;
1757 s->current_picture.f.motion_val[0][xy + wrap + 1][0] = 0;
1758 s->current_picture.f.motion_val[0][xy + wrap + 1][1] = 0;
1759 v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1760 s->current_picture.f.motion_val[1][xy + 1][0] = 0;
1761 s->current_picture.f.motion_val[1][xy + 1][1] = 0;
1762 s->current_picture.f.motion_val[1][xy + wrap][0] = 0;
1763 s->current_picture.f.motion_val[1][xy + wrap][1] = 0;
1764 s->current_picture.f.motion_val[1][xy + wrap + 1][0] = 0;
1765 s->current_picture.f.motion_val[1][xy + wrap + 1][1] = 0;
1770 off = ((n == 0) || (n == 1)) ? 1 : -1;
1772 if (s->mb_x || (n == 1) || (n == 3)) {
1773 if ((v->blk_mv_type[xy]) // current block (MB) has a field MV
1774 || (!v->blk_mv_type[xy] && !v->blk_mv_type[xy - 1])) { // or both have frame MV
1775 A[0] = s->current_picture.f.motion_val[0][xy - 1][0];
1776 A[1] = s->current_picture.f.motion_val[0][xy - 1][1];
1778 } else { // current block has frame mv and cand. has field MV (so average)
1779 A[0] = (s->current_picture.f.motion_val[0][xy - 1][0]
1780 + s->current_picture.f.motion_val[0][xy - 1 + off * wrap][0] + 1) >> 1;
1781 A[1] = (s->current_picture.f.motion_val[0][xy - 1][1]
1782 + s->current_picture.f.motion_val[0][xy - 1 + off * wrap][1] + 1) >> 1;
1785 if (!(n & 1) && v->is_intra[s->mb_x - 1]) {
1791 /* Predict B and C */
1792 B[0] = B[1] = C[0] = C[1] = 0;
1793 if (n == 0 || n == 1 || v->blk_mv_type[xy]) {
1794 if (!s->first_slice_line) {
1795 if (!v->is_intra[s->mb_x - s->mb_stride]) {
1798 pos_b = s->block_index[n_adj] - 2 * wrap;
1799 if (v->blk_mv_type[pos_b] && v->blk_mv_type[xy]) {
1800 n_adj = (n & 2) | (n & 1);
1802 B[0] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap][0];
1803 B[1] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap][1];
1804 if (v->blk_mv_type[pos_b] && !v->blk_mv_type[xy]) {
1805 B[0] = (B[0] + s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap][0] + 1) >> 1;
1806 B[1] = (B[1] + s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap][1] + 1) >> 1;
1809 if (s->mb_width > 1) {
1810 if (!v->is_intra[s->mb_x - s->mb_stride + 1]) {
1813 pos_c = s->block_index[2] - 2 * wrap + 2;
1814 if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1817 C[0] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap + 2][0];
1818 C[1] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap + 2][1];
1819 if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1820 C[0] = (1 + C[0] + (s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap + 2][0])) >> 1;
1821 C[1] = (1 + C[1] + (s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap + 2][1])) >> 1;
1823 if (s->mb_x == s->mb_width - 1) {
1824 if (!v->is_intra[s->mb_x - s->mb_stride - 1]) {
1827 pos_c = s->block_index[3] - 2 * wrap - 2;
1828 if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1831 C[0] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap - 2][0];
1832 C[1] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap - 2][1];
1833 if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1834 C[0] = (1 + C[0] + s->current_picture.f.motion_val[0][s->block_index[1] - 2 * wrap - 2][0]) >> 1;
1835 C[1] = (1 + C[1] + s->current_picture.f.motion_val[0][s->block_index[1] - 2 * wrap - 2][1]) >> 1;
1844 pos_b = s->block_index[1];
1846 B[0] = s->current_picture.f.motion_val[0][pos_b][0];
1847 B[1] = s->current_picture.f.motion_val[0][pos_b][1];
1848 pos_c = s->block_index[0];
1850 C[0] = s->current_picture.f.motion_val[0][pos_c][0];
1851 C[1] = s->current_picture.f.motion_val[0][pos_c][1];
1854 total_valid = a_valid + b_valid + c_valid;
1855 // check if predictor A is out of bounds
1856 if (!s->mb_x && !(n == 1 || n == 3)) {
1859 // check if predictor B is out of bounds
1860 if ((s->first_slice_line && v->blk_mv_type[xy]) || (s->first_slice_line && !(n & 2))) {
1861 B[0] = B[1] = C[0] = C[1] = 0;
1863 if (!v->blk_mv_type[xy]) {
1864 if (s->mb_width == 1) {
1868 if (total_valid >= 2) {
1869 px = mid_pred(A[0], B[0], C[0]);
1870 py = mid_pred(A[1], B[1], C[1]);
1871 } else if (total_valid) {
1872 if (a_valid) { px = A[0]; py = A[1]; }
1873 if (b_valid) { px = B[0]; py = B[1]; }
1874 if (c_valid) { px = C[0]; py = C[1]; }
1880 field_a = (A[1] & 4) ? 1 : 0;
1884 field_b = (B[1] & 4) ? 1 : 0;
1888 field_c = (C[1] & 4) ? 1 : 0;
1892 num_oppfield = field_a + field_b + field_c;
1893 num_samefield = total_valid - num_oppfield;
1894 if (total_valid == 3) {
1895 if ((num_samefield == 3) || (num_oppfield == 3)) {
1896 px = mid_pred(A[0], B[0], C[0]);
1897 py = mid_pred(A[1], B[1], C[1]);
1898 } else if (num_samefield >= num_oppfield) {
1899 /* take one MV from same field set depending on priority
1900 the check for B may not be necessary */
1901 px = !field_a ? A[0] : B[0];
1902 py = !field_a ? A[1] : B[1];
1904 px = field_a ? A[0] : B[0];
1905 py = field_a ? A[1] : B[1];
1907 } else if (total_valid == 2) {
1908 if (num_samefield >= num_oppfield) {
1909 if (!field_a && a_valid) {
1912 } else if (!field_b && b_valid) {
1915 } else if (c_valid) {
1920 if (field_a && a_valid) {
1923 } else if (field_b && b_valid) {
1926 } else if (c_valid) {
1931 } else if (total_valid == 1) {
1932 px = (a_valid) ? A[0] : ((b_valid) ? B[0] : C[0]);
1933 py = (a_valid) ? A[1] : ((b_valid) ? B[1] : C[1]);
1938 /* store MV using signed modulus of MV range defined in 4.11 */
1939 s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1940 s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
1941 if (mvn == 1) { /* duplicate motion data for 1-MV block */
1942 s->current_picture.f.motion_val[0][xy + 1 ][0] = s->current_picture.f.motion_val[0][xy][0];
1943 s->current_picture.f.motion_val[0][xy + 1 ][1] = s->current_picture.f.motion_val[0][xy][1];
1944 s->current_picture.f.motion_val[0][xy + wrap ][0] = s->current_picture.f.motion_val[0][xy][0];
1945 s->current_picture.f.motion_val[0][xy + wrap ][1] = s->current_picture.f.motion_val[0][xy][1];
1946 s->current_picture.f.motion_val[0][xy + wrap + 1][0] = s->current_picture.f.motion_val[0][xy][0];
1947 s->current_picture.f.motion_val[0][xy + wrap + 1][1] = s->current_picture.f.motion_val[0][xy][1];
1948 } else if (mvn == 2) { /* duplicate motion data for 2-Field MV block */
1949 s->current_picture.f.motion_val[0][xy + 1][0] = s->current_picture.f.motion_val[0][xy][0];
1950 s->current_picture.f.motion_val[0][xy + 1][1] = s->current_picture.f.motion_val[0][xy][1];
1951 s->mv[0][n + 1][0] = s->mv[0][n][0];
1952 s->mv[0][n + 1][1] = s->mv[0][n][1];
1956 /** Motion compensation for direct or interpolated blocks in B-frames
1958 static void vc1_interp_mc(VC1Context *v)
1960 MpegEncContext *s = &v->s;
1961 DSPContext *dsp = &v->s.dsp;
1962 uint8_t *srcY, *srcU, *srcV;
1963 int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
1965 int v_edge_pos = s->v_edge_pos >> v->field_mode;
1967 if (!v->field_mode && !v->s.next_picture.f.data[0])
1970 mx = s->mv[1][0][0];
1971 my = s->mv[1][0][1];
1972 uvmx = (mx + ((mx & 3) == 3)) >> 1;
1973 uvmy = (my + ((my & 3) == 3)) >> 1;
1974 if (v->field_mode) {
1975 if (v->cur_field_type != v->ref_field_type[1])
1976 my = my - 2 + 4 * v->cur_field_type;
1977 uvmy = uvmy - 2 + 4 * v->cur_field_type;
1980 uvmx = uvmx + ((uvmx < 0) ? -(uvmx & 1) : (uvmx & 1));
1981 uvmy = uvmy + ((uvmy < 0) ? -(uvmy & 1) : (uvmy & 1));
1983 srcY = s->next_picture.f.data[0];
1984 srcU = s->next_picture.f.data[1];
1985 srcV = s->next_picture.f.data[2];
1987 src_x = s->mb_x * 16 + (mx >> 2);
1988 src_y = s->mb_y * 16 + (my >> 2);
1989 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
1990 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
1992 if (v->profile != PROFILE_ADVANCED) {
1993 src_x = av_clip( src_x, -16, s->mb_width * 16);
1994 src_y = av_clip( src_y, -16, s->mb_height * 16);
1995 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
1996 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
1998 src_x = av_clip( src_x, -17, s->avctx->coded_width);
1999 src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
2000 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
2001 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
2004 srcY += src_y * s->linesize + src_x;
2005 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
2006 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
2008 if (v->field_mode && v->ref_field_type[1]) {
2009 srcY += s->current_picture_ptr->f.linesize[0];
2010 srcU += s->current_picture_ptr->f.linesize[1];
2011 srcV += s->current_picture_ptr->f.linesize[2];
2014 /* for grayscale we should not try to read from unknown area */
2015 if (s->flags & CODEC_FLAG_GRAY) {
2016 srcU = s->edge_emu_buffer + 18 * s->linesize;
2017 srcV = s->edge_emu_buffer + 18 * s->linesize;
2020 if (v->rangeredfrm || s->h_edge_pos < 22 || v_edge_pos < 22
2021 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx & 3) - 16 - s->mspel * 3
2022 || (unsigned)(src_y - s->mspel) > v_edge_pos - (my & 3) - 16 - s->mspel * 3) {
2023 uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
2025 srcY -= s->mspel * (1 + s->linesize);
2026 s->dsp.emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize,
2027 17 + s->mspel * 2, 17 + s->mspel * 2,
2028 src_x - s->mspel, src_y - s->mspel,
2029 s->h_edge_pos, v_edge_pos);
2030 srcY = s->edge_emu_buffer;
2031 s->dsp.emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8 + 1, 8 + 1,
2032 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
2033 s->dsp.emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8 + 1, 8 + 1,
2034 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
2037 /* if we deal with range reduction we need to scale source blocks */
2038 if (v->rangeredfrm) {
2040 uint8_t *src, *src2;
2043 for (j = 0; j < 17 + s->mspel * 2; j++) {
2044 for (i = 0; i < 17 + s->mspel * 2; i++)
2045 src[i] = ((src[i] - 128) >> 1) + 128;
2050 for (j = 0; j < 9; j++) {
2051 for (i = 0; i < 9; i++) {
2052 src[i] = ((src[i] - 128) >> 1) + 128;
2053 src2[i] = ((src2[i] - 128) >> 1) + 128;
2055 src += s->uvlinesize;
2056 src2 += s->uvlinesize;
2059 srcY += s->mspel * (1 + s->linesize);
2062 if (v->field_mode && v->cur_field_type) {
2063 off = s->current_picture_ptr->f.linesize[0];
2064 off_uv = s->current_picture_ptr->f.linesize[1];
2071 dxy = ((my & 3) << 2) | (mx & 3);
2072 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off , srcY , s->linesize, v->rnd);
2073 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8, srcY + 8, s->linesize, v->rnd);
2074 srcY += s->linesize * 8;
2075 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize , srcY , s->linesize, v->rnd);
2076 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
2078 dxy = (my & 2) | ((mx & 2) >> 1);
2081 dsp->avg_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
2083 dsp->avg_no_rnd_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
2086 if (s->flags & CODEC_FLAG_GRAY) return;
2087 /* Chroma MC always uses qpel blilinear */
2088 uvmx = (uvmx & 3) << 1;
2089 uvmy = (uvmy & 3) << 1;
2091 dsp->avg_h264_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
2092 dsp->avg_h264_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
2094 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
2095 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
2099 static av_always_inline int scale_mv(int value, int bfrac, int inv, int qs)
2103 #if B_FRACTION_DEN==256
2107 return 2 * ((value * n + 255) >> 9);
2108 return (value * n + 128) >> 8;
2111 n -= B_FRACTION_DEN;
2113 return 2 * ((value * n + B_FRACTION_DEN - 1) / (2 * B_FRACTION_DEN));
2114 return (value * n + B_FRACTION_DEN/2) / B_FRACTION_DEN;
2118 static av_always_inline int scale_mv_intfi(int value, int bfrac, int inv,
2119 int qs, int qs_last)
2127 return (value * n + 255) >> 9;
2129 return (value * n + 128) >> 8;
2132 /** Reconstruct motion vector for B-frame and do motion compensation
2134 static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2],
2135 int direct, int mode)
2138 v->mv_mode2 = v->mv_mode;
2139 v->mv_mode = MV_PMODE_INTENSITY_COMP;
2145 v->mv_mode = v->mv_mode2;
2148 if (mode == BMV_TYPE_INTERPOLATED) {
2152 v->mv_mode = v->mv_mode2;
2156 if (v->use_ic && (mode == BMV_TYPE_BACKWARD))
2157 v->mv_mode = v->mv_mode2;
2158 vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD));
2160 v->mv_mode = v->mv_mode2;
2163 static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2],
2164 int direct, int mvtype)
2166 MpegEncContext *s = &v->s;
2167 int xy, wrap, off = 0;
2172 const uint8_t *is_intra = v->mb_type[0];
2176 /* scale MV difference to be quad-pel */
2177 dmv_x[0] <<= 1 - s->quarter_sample;
2178 dmv_y[0] <<= 1 - s->quarter_sample;
2179 dmv_x[1] <<= 1 - s->quarter_sample;
2180 dmv_y[1] <<= 1 - s->quarter_sample;
2182 wrap = s->b8_stride;
2183 xy = s->block_index[0];
2186 s->current_picture.f.motion_val[0][xy + v->blocks_off][0] =
2187 s->current_picture.f.motion_val[0][xy + v->blocks_off][1] =
2188 s->current_picture.f.motion_val[1][xy + v->blocks_off][0] =
2189 s->current_picture.f.motion_val[1][xy + v->blocks_off][1] = 0;
2192 if (!v->field_mode) {
2193 s->mv[0][0][0] = scale_mv(s->next_picture.f.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
2194 s->mv[0][0][1] = scale_mv(s->next_picture.f.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
2195 s->mv[1][0][0] = scale_mv(s->next_picture.f.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
2196 s->mv[1][0][1] = scale_mv(s->next_picture.f.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
2198 /* Pullback predicted motion vectors as specified in 8.4.5.4 */
2199 s->mv[0][0][0] = av_clip(s->mv[0][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2200 s->mv[0][0][1] = av_clip(s->mv[0][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2201 s->mv[1][0][0] = av_clip(s->mv[1][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2202 s->mv[1][0][1] = av_clip(s->mv[1][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2205 s->current_picture.f.motion_val[0][xy + v->blocks_off][0] = s->mv[0][0][0];
2206 s->current_picture.f.motion_val[0][xy + v->blocks_off][1] = s->mv[0][0][1];
2207 s->current_picture.f.motion_val[1][xy + v->blocks_off][0] = s->mv[1][0][0];
2208 s->current_picture.f.motion_val[1][xy + v->blocks_off][1] = s->mv[1][0][1];
2212 if ((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2213 C = s->current_picture.f.motion_val[0][xy - 2];
2214 A = s->current_picture.f.motion_val[0][xy - wrap * 2];
2215 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2216 B = s->current_picture.f.motion_val[0][xy - wrap * 2 + off];
2218 if (!s->mb_x) C[0] = C[1] = 0;
2219 if (!s->first_slice_line) { // predictor A is not out of bounds
2220 if (s->mb_width == 1) {
2224 px = mid_pred(A[0], B[0], C[0]);
2225 py = mid_pred(A[1], B[1], C[1]);
2227 } else if (s->mb_x) { // predictor C is not out of bounds
2233 /* Pullback MV as specified in 8.3.5.3.4 */
2236 if (v->profile < PROFILE_ADVANCED) {
2237 qx = (s->mb_x << 5);
2238 qy = (s->mb_y << 5);
2239 X = (s->mb_width << 5) - 4;
2240 Y = (s->mb_height << 5) - 4;
2241 if (qx + px < -28) px = -28 - qx;
2242 if (qy + py < -28) py = -28 - qy;
2243 if (qx + px > X) px = X - qx;
2244 if (qy + py > Y) py = Y - qy;
2246 qx = (s->mb_x << 6);
2247 qy = (s->mb_y << 6);
2248 X = (s->mb_width << 6) - 4;
2249 Y = (s->mb_height << 6) - 4;
2250 if (qx + px < -60) px = -60 - qx;
2251 if (qy + py < -60) py = -60 - qy;
2252 if (qx + px > X) px = X - qx;
2253 if (qy + py > Y) py = Y - qy;
2256 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2257 if (0 && !s->first_slice_line && s->mb_x) {
2258 if (is_intra[xy - wrap])
2259 sum = FFABS(px) + FFABS(py);
2261 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2263 if (get_bits1(&s->gb)) {
2271 if (is_intra[xy - 2])
2272 sum = FFABS(px) + FFABS(py);
2274 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2276 if (get_bits1(&s->gb)) {
2286 /* store MV using signed modulus of MV range defined in 4.11 */
2287 s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x;
2288 s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
2290 if ((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2291 C = s->current_picture.f.motion_val[1][xy - 2];
2292 A = s->current_picture.f.motion_val[1][xy - wrap * 2];
2293 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2294 B = s->current_picture.f.motion_val[1][xy - wrap * 2 + off];
2298 if (!s->first_slice_line) { // predictor A is not out of bounds
2299 if (s->mb_width == 1) {
2303 px = mid_pred(A[0], B[0], C[0]);
2304 py = mid_pred(A[1], B[1], C[1]);
2306 } else if (s->mb_x) { // predictor C is not out of bounds
2312 /* Pullback MV as specified in 8.3.5.3.4 */
2315 if (v->profile < PROFILE_ADVANCED) {
2316 qx = (s->mb_x << 5);
2317 qy = (s->mb_y << 5);
2318 X = (s->mb_width << 5) - 4;
2319 Y = (s->mb_height << 5) - 4;
2320 if (qx + px < -28) px = -28 - qx;
2321 if (qy + py < -28) py = -28 - qy;
2322 if (qx + px > X) px = X - qx;
2323 if (qy + py > Y) py = Y - qy;
2325 qx = (s->mb_x << 6);
2326 qy = (s->mb_y << 6);
2327 X = (s->mb_width << 6) - 4;
2328 Y = (s->mb_height << 6) - 4;
2329 if (qx + px < -60) px = -60 - qx;
2330 if (qy + py < -60) py = -60 - qy;
2331 if (qx + px > X) px = X - qx;
2332 if (qy + py > Y) py = Y - qy;
2335 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2336 if (0 && !s->first_slice_line && s->mb_x) {
2337 if (is_intra[xy - wrap])
2338 sum = FFABS(px) + FFABS(py);
2340 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2342 if (get_bits1(&s->gb)) {
2350 if (is_intra[xy - 2])
2351 sum = FFABS(px) + FFABS(py);
2353 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2355 if (get_bits1(&s->gb)) {
2365 /* store MV using signed modulus of MV range defined in 4.11 */
2367 s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
2368 s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
2370 s->current_picture.f.motion_val[0][xy][0] = s->mv[0][0][0];
2371 s->current_picture.f.motion_val[0][xy][1] = s->mv[0][0][1];
2372 s->current_picture.f.motion_val[1][xy][0] = s->mv[1][0][0];
2373 s->current_picture.f.motion_val[1][xy][1] = s->mv[1][0][1];
2376 static inline void vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dmv_y, int mv1, int *pred_flag)
2378 int dir = (v->bmvtype == BMV_TYPE_BACKWARD) ? 1 : 0;
2379 MpegEncContext *s = &v->s;
2380 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2382 if (v->bmvtype == BMV_TYPE_DIRECT) {
2383 int total_opp, k, f;
2384 if (s->next_picture.f.mb_type[mb_pos + v->mb_off] != MB_TYPE_INTRA) {
2385 s->mv[0][0][0] = scale_mv_intfi(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0],
2386 v->bfraction, 0, s->quarter_sample, v->qs_last);
2387 s->mv[0][0][1] = scale_mv_intfi(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1],
2388 v->bfraction, 0, s->quarter_sample, v->qs_last);
2389 s->mv[1][0][0] = scale_mv_intfi(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0],
2390 v->bfraction, 1, s->quarter_sample, v->qs_last);
2391 s->mv[1][0][1] = scale_mv_intfi(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1],
2392 v->bfraction, 1, s->quarter_sample, v->qs_last);
2394 total_opp = v->mv_f_next[0][s->block_index[0] + v->blocks_off]
2395 + v->mv_f_next[0][s->block_index[1] + v->blocks_off]
2396 + v->mv_f_next[0][s->block_index[2] + v->blocks_off]
2397 + v->mv_f_next[0][s->block_index[3] + v->blocks_off];
2398 f = (total_opp > 2) ? 1 : 0;
2400 s->mv[0][0][0] = s->mv[0][0][1] = 0;
2401 s->mv[1][0][0] = s->mv[1][0][1] = 0;
2404 v->ref_field_type[0] = v->ref_field_type[1] = v->cur_field_type ^ f;
2405 for (k = 0; k < 4; k++) {
2406 s->current_picture.f.motion_val[0][s->block_index[k] + v->blocks_off][0] = s->mv[0][0][0];
2407 s->current_picture.f.motion_val[0][s->block_index[k] + v->blocks_off][1] = s->mv[0][0][1];
2408 s->current_picture.f.motion_val[1][s->block_index[k] + v->blocks_off][0] = s->mv[1][0][0];
2409 s->current_picture.f.motion_val[1][s->block_index[k] + v->blocks_off][1] = s->mv[1][0][1];
2410 v->mv_f[0][s->block_index[k] + v->blocks_off] = f;
2411 v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
2415 if (v->bmvtype == BMV_TYPE_INTERPOLATED) {
2416 vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2417 vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2420 if (dir) { // backward
2421 vc1_pred_mv(v, n, dmv_x[1], dmv_y[1], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2422 if (n == 3 || mv1) {
2423 vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
2426 vc1_pred_mv(v, n, dmv_x[0], dmv_y[0], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2427 if (n == 3 || mv1) {
2428 vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], 0, 1);
2433 /** Get predicted DC value for I-frames only
2434 * prediction dir: left=0, top=1
2435 * @param s MpegEncContext
2436 * @param overlap flag indicating that overlap filtering is used
2437 * @param pq integer part of picture quantizer
2438 * @param[in] n block index in the current MB
2439 * @param dc_val_ptr Pointer to DC predictor
2440 * @param dir_ptr Prediction direction for use in AC prediction
2442 static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2443 int16_t **dc_val_ptr, int *dir_ptr)
2445 int a, b, c, wrap, pred, scale;
2447 static const uint16_t dcpred[32] = {
2448 -1, 1024, 512, 341, 256, 205, 171, 146, 128,
2449 114, 102, 93, 85, 79, 73, 68, 64,
2450 60, 57, 54, 51, 49, 47, 45, 43,
2451 41, 39, 38, 37, 35, 34, 33
2454 /* find prediction - wmv3_dc_scale always used here in fact */
2455 if (n < 4) scale = s->y_dc_scale;
2456 else scale = s->c_dc_scale;
2458 wrap = s->block_wrap[n];
2459 dc_val = s->dc_val[0] + s->block_index[n];
2465 b = dc_val[ - 1 - wrap];
2466 a = dc_val[ - wrap];
2468 if (pq < 9 || !overlap) {
2469 /* Set outer values */
2470 if (s->first_slice_line && (n != 2 && n != 3))
2471 b = a = dcpred[scale];
2472 if (s->mb_x == 0 && (n != 1 && n != 3))
2473 b = c = dcpred[scale];
2475 /* Set outer values */
2476 if (s->first_slice_line && (n != 2 && n != 3))
2478 if (s->mb_x == 0 && (n != 1 && n != 3))
2482 if (abs(a - b) <= abs(b - c)) {
2484 *dir_ptr = 1; // left
2487 *dir_ptr = 0; // top
2490 /* update predictor */
2491 *dc_val_ptr = &dc_val[0];
2496 /** Get predicted DC value
2497 * prediction dir: left=0, top=1
2498 * @param s MpegEncContext
2499 * @param overlap flag indicating that overlap filtering is used
2500 * @param pq integer part of picture quantizer
2501 * @param[in] n block index in the current MB
2502 * @param a_avail flag indicating top block availability
2503 * @param c_avail flag indicating left block availability
2504 * @param dc_val_ptr Pointer to DC predictor
2505 * @param dir_ptr Prediction direction for use in AC prediction
2507 static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2508 int a_avail, int c_avail,
2509 int16_t **dc_val_ptr, int *dir_ptr)
2511 int a, b, c, wrap, pred;
2513 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2516 wrap = s->block_wrap[n];
2517 dc_val = s->dc_val[0] + s->block_index[n];
2523 b = dc_val[ - 1 - wrap];
2524 a = dc_val[ - wrap];
2525 /* scale predictors if needed */
2526 q1 = s->current_picture.f.qscale_table[mb_pos];
2527 if (c_avail && (n != 1 && n != 3)) {
2528 q2 = s->current_picture.f.qscale_table[mb_pos - 1];
2530 c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2532 if (a_avail && (n != 2 && n != 3)) {
2533 q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
2535 a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2537 if (a_avail && c_avail && (n != 3)) {
2542 off -= s->mb_stride;
2543 q2 = s->current_picture.f.qscale_table[off];
2545 b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2548 if (a_avail && c_avail) {
2549 if (abs(a - b) <= abs(b - c)) {
2551 *dir_ptr = 1; // left
2554 *dir_ptr = 0; // top
2556 } else if (a_avail) {
2558 *dir_ptr = 0; // top
2559 } else if (c_avail) {
2561 *dir_ptr = 1; // left
2564 *dir_ptr = 1; // left
2567 /* update predictor */
2568 *dc_val_ptr = &dc_val[0];
2572 /** @} */ // Block group
2575 * @name VC1 Macroblock-level functions in Simple/Main Profiles
2576 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
2580 static inline int vc1_coded_block_pred(MpegEncContext * s, int n,
2581 uint8_t **coded_block_ptr)
2583 int xy, wrap, pred, a, b, c;
2585 xy = s->block_index[n];
2586 wrap = s->b8_stride;
2591 a = s->coded_block[xy - 1 ];
2592 b = s->coded_block[xy - 1 - wrap];
2593 c = s->coded_block[xy - wrap];
2602 *coded_block_ptr = &s->coded_block[xy];
2608 * Decode one AC coefficient
2609 * @param v The VC1 context
2610 * @param last Last coefficient
2611 * @param skip How much zero coefficients to skip
2612 * @param value Decoded AC coefficient value
2613 * @param codingset set of VLC to decode data
2616 static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip,
2617 int *value, int codingset)
2619 GetBitContext *gb = &v->s.gb;
2620 int index, escape, run = 0, level = 0, lst = 0;
2622 index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2623 if (index != vc1_ac_sizes[codingset] - 1) {
2624 run = vc1_index_decode_table[codingset][index][0];
2625 level = vc1_index_decode_table[codingset][index][1];
2626 lst = index >= vc1_last_decode_table[codingset] || get_bits_left(gb) < 0;
2630 escape = decode210(gb);
2632 index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2633 run = vc1_index_decode_table[codingset][index][0];
2634 level = vc1_index_decode_table[codingset][index][1];
2635 lst = index >= vc1_last_decode_table[codingset];
2638 level += vc1_last_delta_level_table[codingset][run];
2640 level += vc1_delta_level_table[codingset][run];
2643 run += vc1_last_delta_run_table[codingset][level] + 1;
2645 run += vc1_delta_run_table[codingset][level] + 1;
2651 lst = get_bits1(gb);
2652 if (v->s.esc3_level_length == 0) {
2653 if (v->pq < 8 || v->dquantfrm) { // table 59
2654 v->s.esc3_level_length = get_bits(gb, 3);
2655 if (!v->s.esc3_level_length)
2656 v->s.esc3_level_length = get_bits(gb, 2) + 8;
2657 } else { // table 60
2658 v->s.esc3_level_length = get_unary(gb, 1, 6) + 2;
2660 v->s.esc3_run_length = 3 + get_bits(gb, 2);
2662 run = get_bits(gb, v->s.esc3_run_length);
2663 sign = get_bits1(gb);
2664 level = get_bits(gb, v->s.esc3_level_length);
2675 /** Decode intra block in intra frames - should be faster than decode_intra_block
2676 * @param v VC1Context
2677 * @param block block to decode
2678 * @param[in] n subblock index
2679 * @param coded are AC coeffs present or not
2680 * @param codingset set of VLC to decode data
2682 static int vc1_decode_i_block(VC1Context *v, DCTELEM block[64], int n,
2683 int coded, int codingset)
2685 GetBitContext *gb = &v->s.gb;
2686 MpegEncContext *s = &v->s;
2687 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2690 int16_t *ac_val, *ac_val2;
2693 /* Get DC differential */
2695 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2697 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2700 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2704 if (dcdiff == 119 /* ESC index value */) {
2705 /* TODO: Optimize */
2706 if (v->pq == 1) dcdiff = get_bits(gb, 10);
2707 else if (v->pq == 2) dcdiff = get_bits(gb, 9);
2708 else dcdiff = get_bits(gb, 8);
2711 dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2712 else if (v->pq == 2)
2713 dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2720 dcdiff += vc1_i_pred_dc(&v->s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir);
2723 /* Store the quantized DC coeff, used for prediction */
2725 block[0] = dcdiff * s->y_dc_scale;
2727 block[0] = dcdiff * s->c_dc_scale;
2738 int last = 0, skip, value;
2739 const uint8_t *zz_table;
2743 scale = v->pq * 2 + v->halfpq;
2747 zz_table = v->zz_8x8[2];
2749 zz_table = v->zz_8x8[3];
2751 zz_table = v->zz_8x8[1];
2753 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2755 if (dc_pred_dir) // left
2758 ac_val -= 16 * s->block_wrap[n];
2761 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2765 block[zz_table[i++]] = value;
2768 /* apply AC prediction if needed */
2770 if (dc_pred_dir) { // left
2771 for (k = 1; k < 8; k++)
2772 block[k << v->left_blk_sh] += ac_val[k];
2774 for (k = 1; k < 8; k++)
2775 block[k << v->top_blk_sh] += ac_val[k + 8];
2778 /* save AC coeffs for further prediction */
2779 for (k = 1; k < 8; k++) {
2780 ac_val2[k] = block[k << v->left_blk_sh];
2781 ac_val2[k + 8] = block[k << v->top_blk_sh];
2784 /* scale AC coeffs */
2785 for (k = 1; k < 64; k++)
2789 block[k] += (block[k] < 0) ? -v->pq : v->pq;
2792 if (s->ac_pred) i = 63;
2798 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2802 scale = v->pq * 2 + v->halfpq;
2803 memset(ac_val2, 0, 16 * 2);
2804 if (dc_pred_dir) { // left
2807 memcpy(ac_val2, ac_val, 8 * 2);
2809 ac_val -= 16 * s->block_wrap[n];
2811 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2814 /* apply AC prediction if needed */
2816 if (dc_pred_dir) { //left
2817 for (k = 1; k < 8; k++) {
2818 block[k << v->left_blk_sh] = ac_val[k] * scale;
2819 if (!v->pquantizer && block[k << v->left_blk_sh])
2820 block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -v->pq : v->pq;
2823 for (k = 1; k < 8; k++) {
2824 block[k << v->top_blk_sh] = ac_val[k + 8] * scale;
2825 if (!v->pquantizer && block[k << v->top_blk_sh])
2826 block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -v->pq : v->pq;
2832 s->block_last_index[n] = i;
2837 /** Decode intra block in intra frames - should be faster than decode_intra_block
2838 * @param v VC1Context
2839 * @param block block to decode
2840 * @param[in] n subblock number
2841 * @param coded are AC coeffs present or not
2842 * @param codingset set of VLC to decode data
2843 * @param mquant quantizer value for this macroblock
2845 static int vc1_decode_i_block_adv(VC1Context *v, DCTELEM block[64], int n,
2846 int coded, int codingset, int mquant)
2848 GetBitContext *gb = &v->s.gb;
2849 MpegEncContext *s = &v->s;
2850 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2853 int16_t *ac_val, *ac_val2;
2855 int a_avail = v->a_avail, c_avail = v->c_avail;
2856 int use_pred = s->ac_pred;
2859 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2861 /* Get DC differential */
2863 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2865 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2868 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2872 if (dcdiff == 119 /* ESC index value */) {
2873 /* TODO: Optimize */
2874 if (mquant == 1) dcdiff = get_bits(gb, 10);
2875 else if (mquant == 2) dcdiff = get_bits(gb, 9);
2876 else dcdiff = get_bits(gb, 8);
2879 dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2880 else if (mquant == 2)
2881 dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2888 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir);
2891 /* Store the quantized DC coeff, used for prediction */
2893 block[0] = dcdiff * s->y_dc_scale;
2895 block[0] = dcdiff * s->c_dc_scale;
2901 /* check if AC is needed at all */
2902 if (!a_avail && !c_avail)
2904 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2907 scale = mquant * 2 + ((mquant == v->pq) ? v->halfpq : 0);
2909 if (dc_pred_dir) // left
2912 ac_val -= 16 * s->block_wrap[n];
2914 q1 = s->current_picture.f.qscale_table[mb_pos];
2915 if ( dc_pred_dir && c_avail && mb_pos)
2916 q2 = s->current_picture.f.qscale_table[mb_pos - 1];
2917 if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
2918 q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
2919 if ( dc_pred_dir && n == 1)
2921 if (!dc_pred_dir && n == 2)
2927 int last = 0, skip, value;
2928 const uint8_t *zz_table;
2932 if (!use_pred && v->fcm == ILACE_FRAME) {
2933 zz_table = v->zzi_8x8;
2935 if (!dc_pred_dir) // top
2936 zz_table = v->zz_8x8[2];
2938 zz_table = v->zz_8x8[3];
2941 if (v->fcm != ILACE_FRAME)
2942 zz_table = v->zz_8x8[1];
2944 zz_table = v->zzi_8x8;
2948 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2952 block[zz_table[i++]] = value;
2955 /* apply AC prediction if needed */
2957 /* scale predictors if needed*/
2958 if (q2 && q1 != q2) {
2959 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2960 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2962 if (dc_pred_dir) { // left
2963 for (k = 1; k < 8; k++)
2964 block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2966 for (k = 1; k < 8; k++)
2967 block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2970 if (dc_pred_dir) { //left
2971 for (k = 1; k < 8; k++)
2972 block[k << v->left_blk_sh] += ac_val[k];
2974 for (k = 1; k < 8; k++)
2975 block[k << v->top_blk_sh] += ac_val[k + 8];
2979 /* save AC coeffs for further prediction */
2980 for (k = 1; k < 8; k++) {
2981 ac_val2[k ] = block[k << v->left_blk_sh];
2982 ac_val2[k + 8] = block[k << v->top_blk_sh];
2985 /* scale AC coeffs */
2986 for (k = 1; k < 64; k++)
2990 block[k] += (block[k] < 0) ? -mquant : mquant;
2993 if (use_pred) i = 63;
2994 } else { // no AC coeffs
2997 memset(ac_val2, 0, 16 * 2);
2998 if (dc_pred_dir) { // left
3000 memcpy(ac_val2, ac_val, 8 * 2);
3001 if (q2 && q1 != q2) {
3002 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3003 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3004 for (k = 1; k < 8; k++)
3005 ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3010 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
3011 if (q2 && q1 != q2) {
3012 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3013 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3014 for (k = 1; k < 8; k++)
3015 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3020 /* apply AC prediction if needed */
3022 if (dc_pred_dir) { // left
3023 for (k = 1; k < 8; k++) {
3024 block[k << v->left_blk_sh] = ac_val2[k] * scale;
3025 if (!v->pquantizer && block[k << v->left_blk_sh])
3026 block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
3029 for (k = 1; k < 8; k++) {
3030 block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
3031 if (!v->pquantizer && block[k << v->top_blk_sh])
3032 block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
3038 s->block_last_index[n] = i;
3043 /** Decode intra block in inter frames - more generic version than vc1_decode_i_block
3044 * @param v VC1Context
3045 * @param block block to decode
3046 * @param[in] n subblock index
3047 * @param coded are AC coeffs present or not
3048 * @param mquant block quantizer
3049 * @param codingset set of VLC to decode data
3051 static int vc1_decode_intra_block(VC1Context *v, DCTELEM block[64], int n,
3052 int coded, int mquant, int codingset)
3054 GetBitContext *gb = &v->s.gb;
3055 MpegEncContext *s = &v->s;
3056 int dc_pred_dir = 0; /* Direction of the DC prediction used */
3059 int16_t *ac_val, *ac_val2;
3061 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3062 int a_avail = v->a_avail, c_avail = v->c_avail;
3063 int use_pred = s->ac_pred;
3067 s->dsp.clear_block(block);
3069 /* XXX: Guard against dumb values of mquant */
3070 mquant = (mquant < 1) ? 0 : ((mquant > 31) ? 31 : mquant);
3072 /* Set DC scale - y and c use the same */
3073 s->y_dc_scale = s->y_dc_scale_table[mquant];
3074 s->c_dc_scale = s->c_dc_scale_table[mquant];
3076 /* Get DC differential */
3078 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
3080 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
3083 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
3087 if (dcdiff == 119 /* ESC index value */) {
3088 /* TODO: Optimize */
3089 if (mquant == 1) dcdiff = get_bits(gb, 10);
3090 else if (mquant == 2) dcdiff = get_bits(gb, 9);
3091 else dcdiff = get_bits(gb, 8);
3094 dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
3095 else if (mquant == 2)
3096 dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
3103 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir);
3106 /* Store the quantized DC coeff, used for prediction */
3109 block[0] = dcdiff * s->y_dc_scale;
3111 block[0] = dcdiff * s->c_dc_scale;
3117 /* check if AC is needed at all and adjust direction if needed */
3118 if (!a_avail) dc_pred_dir = 1;
3119 if (!c_avail) dc_pred_dir = 0;
3120 if (!a_avail && !c_avail) use_pred = 0;
3121 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
3124 scale = mquant * 2 + v->halfpq;
3126 if (dc_pred_dir) //left
3129 ac_val -= 16 * s->block_wrap[n];
3131 q1 = s->current_picture.f.qscale_table[mb_pos];
3132 if (dc_pred_dir && c_avail && mb_pos)
3133 q2 = s->current_picture.f.qscale_table[mb_pos - 1];
3134 if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
3135 q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
3136 if ( dc_pred_dir && n == 1)
3138 if (!dc_pred_dir && n == 2)
3140 if (n == 3) q2 = q1;
3143 int last = 0, skip, value;
3147 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
3151 if (v->fcm == PROGRESSIVE)
3152 block[v->zz_8x8[0][i++]] = value;
3154 if (use_pred && (v->fcm == ILACE_FRAME)) {
3155 if (!dc_pred_dir) // top
3156 block[v->zz_8x8[2][i++]] = value;
3158 block[v->zz_8x8[3][i++]] = value;
3160 block[v->zzi_8x8[i++]] = value;
3165 /* apply AC prediction if needed */
3167 /* scale predictors if needed*/
3168 if (q2 && q1 != q2) {
3169 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3170 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3172 if (dc_pred_dir) { // left
3173 for (k = 1; k < 8; k++)
3174 block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3176 for (k = 1; k < 8; k++)
3177 block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3180 if (dc_pred_dir) { // left
3181 for (k = 1; k < 8; k++)
3182 block[k << v->left_blk_sh] += ac_val[k];
3184 for (k = 1; k < 8; k++)
3185 block[k << v->top_blk_sh] += ac_val[k + 8];
3189 /* save AC coeffs for further prediction */
3190 for (k = 1; k < 8; k++) {
3191 ac_val2[k ] = block[k << v->left_blk_sh];
3192 ac_val2[k + 8] = block[k << v->top_blk_sh];
3195 /* scale AC coeffs */
3196 for (k = 1; k < 64; k++)
3200 block[k] += (block[k] < 0) ? -mquant : mquant;
3203 if (use_pred) i = 63;
3204 } else { // no AC coeffs
3207 memset(ac_val2, 0, 16 * 2);
3208 if (dc_pred_dir) { // left
3210 memcpy(ac_val2, ac_val, 8 * 2);
3211 if (q2 && q1 != q2) {
3212 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3213 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3214 for (k = 1; k < 8; k++)
3215 ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3220 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
3221 if (q2 && q1 != q2) {
3222 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3223 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3224 for (k = 1; k < 8; k++)
3225 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3230 /* apply AC prediction if needed */
3232 if (dc_pred_dir) { // left
3233 for (k = 1; k < 8; k++) {
3234 block[k << v->left_blk_sh] = ac_val2[k] * scale;
3235 if (!v->pquantizer && block[k << v->left_blk_sh])
3236 block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
3239 for (k = 1; k < 8; k++) {
3240 block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
3241 if (!v->pquantizer && block[k << v->top_blk_sh])
3242 block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
3248 s->block_last_index[n] = i;
3255 static int vc1_decode_p_block(VC1Context *v, DCTELEM block[64], int n,
3256 int mquant, int ttmb, int first_block,
3257 uint8_t *dst, int linesize, int skip_block,
3260 MpegEncContext *s = &v->s;
3261 GetBitContext *gb = &s->gb;
3264 int scale, off, idx, last, skip, value;
3265 int ttblk = ttmb & 7;
3268 s->dsp.clear_block(block);
3271 ttblk = ff_vc1_ttblk_to_tt[v->tt_index][get_vlc2(gb, ff_vc1_ttblk_vlc[v->tt_index].table, VC1_TTBLK_VLC_BITS, 1)];
3273 if (ttblk == TT_4X4) {
3274 subblkpat = ~(get_vlc2(gb, ff_vc1_subblkpat_vlc[v->tt_index].table, VC1_SUBBLKPAT_VLC_BITS, 1) + 1);
3276 if ((ttblk != TT_8X8 && ttblk != TT_4X4)
3277 && ((v->ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))
3278 || (!v->res_rtm_flag && !first_block))) {
3279 subblkpat = decode012(gb);
3281 subblkpat ^= 3; // swap decoded pattern bits
3282 if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM)
3284 if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT)
3287 scale = 2 * mquant + ((v->pq == mquant) ? v->halfpq : 0);
3289 // convert transforms like 8X4_TOP to generic TT and SUBBLKPAT
3290 if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) {
3291 subblkpat = 2 - (ttblk == TT_8X4_TOP);
3294 if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) {
3295 subblkpat = 2 - (ttblk == TT_4X8_LEFT);
3304 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3309 idx = v->zz_8x8[0][i++];
3311 idx = v->zzi_8x8[i++];
3312 block[idx] = value * scale;
3314 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3318 v->vc1dsp.vc1_inv_trans_8x8_dc(dst, linesize, block);
3320 v->vc1dsp.vc1_inv_trans_8x8(block);
3321 s->dsp.add_pixels_clamped(block, dst, linesize);
3326 pat = ~subblkpat & 0xF;
3327 for (j = 0; j < 4; j++) {
3328 last = subblkpat & (1 << (3 - j));
3330 off = (j & 1) * 4 + (j & 2) * 16;
3332 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3337 idx = ff_vc1_simple_progressive_4x4_zz[i++];
3339 idx = ff_vc1_adv_interlaced_4x4_zz[i++];
3340 block[idx + off] = value * scale;
3342 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3344 if (!(subblkpat & (1 << (3 - j))) && !skip_block) {
3346 v->vc1dsp.vc1_inv_trans_4x4_dc(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3348 v->vc1dsp.vc1_inv_trans_4x4(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3353 pat = ~((subblkpat & 2) * 6 + (subblkpat & 1) * 3) & 0xF;
3354 for (j = 0; j < 2; j++) {
3355 last = subblkpat & (1 << (1 - j));
3359 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3364 idx = v->zz_8x4[i++] + off;
3366 idx = ff_vc1_adv_interlaced_8x4_zz[i++] + off;
3367 block[idx] = value * scale;
3369 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3371 if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3373 v->vc1dsp.vc1_inv_trans_8x4_dc(dst + j * 4 * linesize, linesize, block + off);
3375 v->vc1dsp.vc1_inv_trans_8x4(dst + j * 4 * linesize, linesize, block + off);
3380 pat = ~(subblkpat * 5) & 0xF;
3381 for (j = 0; j < 2; j++) {
3382 last = subblkpat & (1 << (1 - j));
3386 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3391 idx = v->zz_4x8[i++] + off;
3393 idx = ff_vc1_adv_interlaced_4x8_zz[i++] + off;
3394 block[idx] = value * scale;
3396 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3398 if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3400 v->vc1dsp.vc1_inv_trans_4x8_dc(dst + j * 4, linesize, block + off);
3402 v->vc1dsp.vc1_inv_trans_4x8(dst + j*4, linesize, block + off);
3408 *ttmb_out |= ttblk << (n * 4);
3412 /** @} */ // Macroblock group
3414 static const int size_table [6] = { 0, 2, 3, 4, 5, 8 };
3415 static const int offset_table[6] = { 0, 1, 3, 7, 15, 31 };
3417 static av_always_inline void vc1_apply_p_v_loop_filter(VC1Context *v, int block_num)
3419 MpegEncContext *s = &v->s;
3420 int mb_cbp = v->cbp[s->mb_x - s->mb_stride],
3421 block_cbp = mb_cbp >> (block_num * 4), bottom_cbp,
3422 mb_is_intra = v->is_intra[s->mb_x - s->mb_stride],
3423 block_is_intra = mb_is_intra >> (block_num * 4), bottom_is_intra;
3424 int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3427 if (block_num > 3) {
3428 dst = s->dest[block_num - 3];
3430 dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 8) * linesize;
3432 if (s->mb_y != s->end_mb_y || block_num < 2) {
3436 if (block_num > 3) {
3437 bottom_cbp = v->cbp[s->mb_x] >> (block_num * 4);
3438 bottom_is_intra = v->is_intra[s->mb_x] >> (block_num * 4);
3439 mv = &v->luma_mv[s->mb_x - s->mb_stride];
3440 mv_stride = s->mb_stride;
3442 bottom_cbp = (block_num < 2) ? (mb_cbp >> ((block_num + 2) * 4))
3443 : (v->cbp[s->mb_x] >> ((block_num - 2) * 4));
3444 bottom_is_intra = (block_num < 2) ? (mb_is_intra >> ((block_num + 2) * 4))
3445 : (v->is_intra[s->mb_x] >> ((block_num - 2) * 4));
3446 mv_stride = s->b8_stride;
3447 mv = &s->current_picture.f.motion_val[0][s->block_index[block_num] - 2 * mv_stride];
3450 if (bottom_is_intra & 1 || block_is_intra & 1 ||
3451 mv[0][0] != mv[mv_stride][0] || mv[0][1] != mv[mv_stride][1]) {
3452 v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3454 idx = ((bottom_cbp >> 2) | block_cbp) & 3;
3456 v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3459 v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3461 v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3466 dst -= 4 * linesize;
3467 ttblk = (v->ttblk[s->mb_x - s->mb_stride] >> (block_num * 4)) & 0xF;
3468 if (ttblk == TT_4X4 || ttblk == TT_8X4) {
3469 idx = (block_cbp | (block_cbp >> 2)) & 3;
3471 v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3474 v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3476 v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3481 static av_always_inline void vc1_apply_p_h_loop_filter(VC1Context *v, int block_num)
3483 MpegEncContext *s = &v->s;
3484 int mb_cbp = v->cbp[s->mb_x - 1 - s->mb_stride],
3485 block_cbp = mb_cbp >> (block_num * 4), right_cbp,
3486 mb_is_intra = v->is_intra[s->mb_x - 1 - s->mb_stride],
3487 block_is_intra = mb_is_intra >> (block_num * 4), right_is_intra;
3488 int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3491 if (block_num > 3) {
3492 dst = s->dest[block_num - 3] - 8 * linesize;
3494 dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 16) * linesize - 8;
3497 if (s->mb_x != s->mb_width || !(block_num & 5)) {
3500 if (block_num > 3) {
3501 right_cbp = v->cbp[s->mb_x - s->mb_stride] >> (block_num * 4);
3502 right_is_intra = v->is_intra[s->mb_x - s->mb_stride] >> (block_num * 4);
3503 mv = &v->luma_mv[s->mb_x - s->mb_stride - 1];
3505 right_cbp = (block_num & 1) ? (v->cbp[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3506 : (mb_cbp >> ((block_num + 1) * 4));
3507 right_is_intra = (block_num & 1) ? (v->is_intra[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3508 : (mb_is_intra >> ((block_num + 1) * 4));
3509 mv = &s->current_picture.f.motion_val[0][s->block_index[block_num] - s->b8_stride * 2 - 2];
3511 if (block_is_intra & 1 || right_is_intra & 1 || mv[0][0] != mv[1][0] || mv[0][1] != mv[1][1]) {
3512 v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3514 idx = ((right_cbp >> 1) | block_cbp) & 5; // FIXME check
3516 v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3519 v->vc1dsp.vc1_h_loop_filter4(dst + 4 * linesize, linesize, v->pq);
3521 v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3527 ttblk = (v->ttblk[s->mb_x - s->mb_stride - 1] >> (block_num * 4)) & 0xf;
3528 if (ttblk == TT_4X4 || ttblk == TT_4X8) {
3529 idx = (block_cbp | (block_cbp >> 1)) & 5;
3531 v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3534 v->vc1dsp.vc1_h_loop_filter4(dst + linesize * 4, linesize, v->pq);
3536 v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3541 static void vc1_apply_p_loop_filter(VC1Context *v)
3543 MpegEncContext *s = &v->s;
3546 for (i = 0; i < 6; i++) {
3547 vc1_apply_p_v_loop_filter(v, i);
3550 /* V always precedes H, therefore we run H one MB before V;
3551 * at the end of a row, we catch up to complete the row */
3553 for (i = 0; i < 6; i++) {
3554 vc1_apply_p_h_loop_filter(v, i);
3556 if (s->mb_x == s->mb_width - 1) {
3558 ff_update_block_index(s);
3559 for (i = 0; i < 6; i++) {
3560 vc1_apply_p_h_loop_filter(v, i);
3566 /** Decode one P-frame MB
3568 static int vc1_decode_p_mb(VC1Context *v)
3570 MpegEncContext *s = &v->s;
3571 GetBitContext *gb = &s->gb;
3573 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3574 int cbp; /* cbp decoding stuff */
3575 int mqdiff, mquant; /* MB quantization */
3576 int ttmb = v->ttfrm; /* MB Transform type */
3578 int mb_has_coeffs = 1; /* last_flag */
3579 int dmv_x, dmv_y; /* Differential MV components */
3580 int index, index1; /* LUT indexes */
3581 int val, sign; /* temp values */
3582 int first_block = 1;
3584 int skipped, fourmv;
3585 int block_cbp = 0, pat, block_tt = 0, block_intra = 0;
3587 mquant = v->pq; /* lossy initialization */
3589 if (v->mv_type_is_raw)
3590 fourmv = get_bits1(gb);
3592 fourmv = v->mv_type_mb_plane[mb_pos];
3594 skipped = get_bits1(gb);
3596 skipped = v->s.mbskip_table[mb_pos];
3598 if (!fourmv) { /* 1MV mode */
3600 GET_MVDATA(dmv_x, dmv_y);
3603 s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
3604 s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
3606 s->current_picture.f.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;
3607 vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3609 /* FIXME Set DC val for inter block ? */
3610 if (s->mb_intra && !mb_has_coeffs) {
3612 s->ac_pred = get_bits1(gb);
3614 } else if (mb_has_coeffs) {
3616 s->ac_pred = get_bits1(gb);
3617 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3623 s->current_picture.f.qscale_table[mb_pos] = mquant;
3625 if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
3626 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table,
3627 VC1_TTMB_VLC_BITS, 2);
3628 if (!s->mb_intra) vc1_mc_1mv(v, 0);
3630 for (i = 0; i < 6; i++) {
3631 s->dc_val[0][s->block_index[i]] = 0;
3633 val = ((cbp >> (5 - i)) & 1);
3634 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3635 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3637 /* check if prediction blocks A and C are available */
3638 v->a_avail = v->c_avail = 0;
3639 if (i == 2 || i == 3 || !s->first_slice_line)
3640 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3641 if (i == 1 || i == 3 || s->mb_x)
3642 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3644 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3645 (i & 4) ? v->codingset2 : v->codingset);
3646 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3648 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3650 for (j = 0; j < 64; j++)
3651 s->block[i][j] <<= 1;
3652 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3653 if (v->pq >= 9 && v->overlap) {
3655 v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3657 v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3659 block_cbp |= 0xF << (i << 2);
3660 block_intra |= 1 << i;
3662 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block,
3663 s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize,
3664 (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
3665 block_cbp |= pat << (i << 2);
3666 if (!v->ttmbf && ttmb < 8)
3673 for (i = 0; i < 6; i++) {
3674 v->mb_type[0][s->block_index[i]] = 0;
3675 s->dc_val[0][s->block_index[i]] = 0;
3677 s->current_picture.f.mb_type[mb_pos] = MB_TYPE_SKIP;
3678 s->current_picture.f.qscale_table[mb_pos] = 0;
3679 vc1_pred_mv(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3682 } else { // 4MV mode
3683 if (!skipped /* unskipped MB */) {
3684 int intra_count = 0, coded_inter = 0;
3685 int is_intra[6], is_coded[6];
3687 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3688 for (i = 0; i < 6; i++) {
3689 val = ((cbp >> (5 - i)) & 1);
3690 s->dc_val[0][s->block_index[i]] = 0;
3697 GET_MVDATA(dmv_x, dmv_y);
3699 vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3701 vc1_mc_4mv_luma(v, i, 0);
3702 intra_count += s->mb_intra;
3703 is_intra[i] = s->mb_intra;
3704 is_coded[i] = mb_has_coeffs;
3707 is_intra[i] = (intra_count >= 3);
3711 vc1_mc_4mv_chroma(v, 0);
3712 v->mb_type[0][s->block_index[i]] = is_intra[i];
3714 coded_inter = !is_intra[i] & is_coded[i];
3716 // if there are no coded blocks then don't do anything more
3718 if (!intra_count && !coded_inter)
3721 s->current_picture.f.qscale_table[mb_pos] = mquant;
3722 /* test if block is intra and has pred */
3725 for (i = 0; i < 6; i++)
3727 if (((!s->first_slice_line || (i == 2 || i == 3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]])
3728 || ((s->mb_x || (i == 1 || i == 3)) && v->mb_type[0][s->block_index[i] - 1])) {
3734 s->ac_pred = get_bits1(gb);
3738 if (!v->ttmbf && coded_inter)
3739 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3740 for (i = 0; i < 6; i++) {
3742 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3743 s->mb_intra = is_intra[i];
3745 /* check if prediction blocks A and C are available */
3746 v->a_avail = v->c_avail = 0;
3747 if (i == 2 || i == 3 || !s->first_slice_line)
3748 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3749 if (i == 1 || i == 3 || s->mb_x)
3750 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3752 vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant,
3753 (i & 4) ? v->codingset2 : v->codingset);
3754 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3756 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3758 for (j = 0; j < 64; j++)
3759 s->block[i][j] <<= 1;
3760 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off,
3761 (i & 4) ? s->uvlinesize : s->linesize);
3762 if (v->pq >= 9 && v->overlap) {
3764 v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3766 v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3768 block_cbp |= 0xF << (i << 2);
3769 block_intra |= 1 << i;
3770 } else if (is_coded[i]) {
3771 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3772 first_block, s->dest[dst_idx] + off,
3773 (i & 4) ? s->uvlinesize : s->linesize,
3774 (i & 4) && (s->flags & CODEC_FLAG_GRAY),
3776 block_cbp |= pat << (i << 2);
3777 if (!v->ttmbf && ttmb < 8)
3782 } else { // skipped MB
3784 s->current_picture.f.qscale_table[mb_pos] = 0;
3785 for (i = 0; i < 6; i++) {
3786 v->mb_type[0][s->block_index[i]] = 0;
3787 s->dc_val[0][s->block_index[i]] = 0;
3789 for (i = 0; i < 4; i++) {
3790 vc1_pred_mv(v, i, 0, 0, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3791 vc1_mc_4mv_luma(v, i, 0);
3793 vc1_mc_4mv_chroma(v, 0);
3794 s->current_picture.f.qscale_table[mb_pos] = 0;
3798 v->cbp[s->mb_x] = block_cbp;
3799 v->ttblk[s->mb_x] = block_tt;
3800 v->is_intra[s->mb_x] = block_intra;
3805 /* Decode one macroblock in an interlaced frame p picture */
3807 static int vc1_decode_p_mb_intfr(VC1Context *v)
3809 MpegEncContext *s = &v->s;
3810 GetBitContext *gb = &s->gb;
3812 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3813 int cbp = 0; /* cbp decoding stuff */
3814 int mqdiff, mquant; /* MB quantization */
3815 int ttmb = v->ttfrm; /* MB Transform type */
3817 int mb_has_coeffs = 1; /* last_flag */
3818 int dmv_x, dmv_y; /* Differential MV components */
3819 int val; /* temp value */
3820 int first_block = 1;
3822 int skipped, fourmv = 0, twomv = 0;
3823 int block_cbp = 0, pat, block_tt = 0;
3824 int idx_mbmode = 0, mvbp;
3825 int stride_y, fieldtx;
3827 mquant = v->pq; /* Loosy initialization */
3830 skipped = get_bits1(gb);
3832 skipped = v->s.mbskip_table[mb_pos];
3834 if (v->fourmvswitch)
3835 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_4MV_MBMODE_VLC_BITS, 2); // try getting this done
3837 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2); // in a single line
3838 switch (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0]) {
3839 /* store the motion vector type in a flag (useful later) */
3840 case MV_PMODE_INTFR_4MV:
3842 v->blk_mv_type[s->block_index[0]] = 0;
3843 v->blk_mv_type[s->block_index[1]] = 0;
3844 v->blk_mv_type[s->block_index[2]] = 0;
3845 v->blk_mv_type[s->block_index[3]] = 0;
3847 case MV_PMODE_INTFR_4MV_FIELD:
3849 v->blk_mv_type[s->block_index[0]] = 1;
3850 v->blk_mv_type[s->block_index[1]] = 1;
3851 v->blk_mv_type[s->block_index[2]] = 1;
3852 v->blk_mv_type[s->block_index[3]] = 1;
3854 case MV_PMODE_INTFR_2MV_FIELD:
3856 v->blk_mv_type[s->block_index[0]] = 1;
3857 v->blk_mv_type[s->block_index[1]] = 1;
3858 v->blk_mv_type[s->block_index[2]] = 1;
3859 v->blk_mv_type[s->block_index[3]] = 1;
3861 case MV_PMODE_INTFR_1MV:
3862 v->blk_mv_type[s->block_index[0]] = 0;
3863 v->blk_mv_type[s->block_index[1]] = 0;
3864 v->blk_mv_type[s->block_index[2]] = 0;
3865 v->blk_mv_type[s->block_index[3]] = 0;
3868 if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
3869 s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
3870 s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
3871 s->current_picture.f.mb_type[mb_pos] = MB_TYPE_INTRA;
3872 s->mb_intra = v->is_intra[s->mb_x] = 1;
3873 for (i = 0; i < 6; i++)
3874 v->mb_type[0][s->block_index[i]] = 1;
3875 fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
3876 mb_has_coeffs = get_bits1(gb);
3878 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3879 v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
3881 s->current_picture.f.qscale_table[mb_pos] = mquant;
3882 /* Set DC scale - y and c use the same (not sure if necessary here) */
3883 s->y_dc_scale = s->y_dc_scale_table[mquant];
3884 s->c_dc_scale = s->c_dc_scale_table[mquant];
3886 for (i = 0; i < 6; i++) {
3887 s->dc_val[0][s->block_index[i]] = 0;
3889 val = ((cbp >> (5 - i)) & 1);
3890 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3891 v->a_avail = v->c_avail = 0;
3892 if (i == 2 || i == 3 || !s->first_slice_line)
3893 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3894 if (i == 1 || i == 3 || s->mb_x)
3895 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3897 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3898 (i & 4) ? v->codingset2 : v->codingset);
3899 if ((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3900 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3902 stride_y = s->linesize << fieldtx;
3903 off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->linesize : (i & 1) * 8 + 4 * (i & 2) * s->linesize;
3905 stride_y = s->uvlinesize;
3908 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, stride_y);
3912 } else { // inter MB
3913 mb_has_coeffs = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][3];
3915 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3916 if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD) {
3917 v->twomvbp = get_vlc2(gb, v->twomvbp_vlc->table, VC1_2MV_BLOCK_PATTERN_VLC_BITS, 1);
3919 if ((ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV)
3920 || (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV_FIELD)) {
3921 v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
3924 s->mb_intra = v->is_intra[s->mb_x] = 0;
3925 for (i = 0; i < 6; i++)
3926 v->mb_type[0][s->block_index[i]] = 0;
3927 fieldtx = v->fieldtx_plane[mb_pos] = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][1];
3928 /* for all motion vector read MVDATA and motion compensate each block */
3932 for (i = 0; i < 6; i++) {
3935 val = ((mvbp >> (3 - i)) & 1);
3937 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3939 vc1_pred_mv_intfr(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0]);
3940 vc1_mc_4mv_luma(v, i, 0);
3941 } else if (i == 4) {
3942 vc1_mc_4mv_chroma4(v);
3949 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3951 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0]);
3952 vc1_mc_4mv_luma(v, 0, 0);
3953 vc1_mc_4mv_luma(v, 1, 0);
3956 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3958 vc1_pred_mv_intfr(v, 2, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0]);
3959 vc1_mc_4mv_luma(v, 2, 0);
3960 vc1_mc_4mv_luma(v, 3, 0);
3961 vc1_mc_4mv_chroma4(v);
3963 mvbp = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][2];
3966 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3968 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0]);
3972 GET_MQUANT(); // p. 227
3973 s->current_picture.f.qscale_table[mb_pos] = mquant;
3974 if (!v->ttmbf && cbp)
3975 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3976 for (i = 0; i < 6; i++) {
3977 s->dc_val[0][s->block_index[i]] = 0;
3979 val = ((cbp >> (5 - i)) & 1);
3981 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3983 off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->linesize));
3985 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3986 first_block, s->dest[dst_idx] + off,
3987 (i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
3988 (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
3989 block_cbp |= pat << (i << 2);
3990 if (!v->ttmbf && ttmb < 8)
3997 s->mb_intra = v->is_intra[s->mb_x] = 0;
3998 for (i = 0; i < 6; i++) {
3999 v->mb_type[0][s->block_index[i]] = 0;
4000 s->dc_val[0][s->block_index[i]] = 0;
4002 s->current_picture.f.mb_type[mb_pos] = MB_TYPE_SKIP;
4003 s->current_picture.f.qscale_table[mb_pos] = 0;
4004 v->blk_mv_type[s->block_index[0]] = 0;
4005 v->blk_mv_type[s->block_index[1]] = 0;
4006 v->blk_mv_type[s->block_index[2]] = 0;
4007 v->blk_mv_type[s->block_index[3]] = 0;
4008 vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0]);
4011 if (s->mb_x == s->mb_width - 1)
4012 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0])*s->mb_stride);
4016 static int vc1_decode_p_mb_intfi(VC1Context *v)
4018 MpegEncContext *s = &v->s;
4019 GetBitContext *gb = &s->gb;
4021 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4022 int cbp = 0; /* cbp decoding stuff */
4023 int mqdiff, mquant; /* MB quantization */
4024 int ttmb = v->ttfrm; /* MB Transform type */
4026 int mb_has_coeffs = 1; /* last_flag */
4027 int dmv_x, dmv_y; /* Differential MV components */
4028 int val; /* temp values */
4029 int first_block = 1;
4032 int block_cbp = 0, pat, block_tt = 0;
4035 mquant = v->pq; /* Loosy initialization */
4037 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
4038 if (idx_mbmode <= 1) { // intra MB
4039 s->mb_intra = v->is_intra[s->mb_x] = 1;
4040 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
4041 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
4042 s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4044 s->current_picture.f.qscale_table[mb_pos] = mquant;
4045 /* Set DC scale - y and c use the same (not sure if necessary here) */
4046 s->y_dc_scale = s->y_dc_scale_table[mquant];
4047 s->c_dc_scale = s->c_dc_scale_table[mquant];
4048 v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4049 mb_has_coeffs = idx_mbmode & 1;
4051 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
4053 for (i = 0; i < 6; i++) {
4054 s->dc_val[0][s->block_index[i]] = 0;
4055 v->mb_type[0][s->block_index[i]] = 1;
4057 val = ((cbp >> (5 - i)) & 1);
4058 v->a_avail = v->c_avail = 0;
4059 if (i == 2 || i == 3 || !s->first_slice_line)
4060 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4061 if (i == 1 || i == 3 || s->mb_x)
4062 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4064 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4065 (i & 4) ? v->codingset2 : v->codingset);
4066 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4068 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4069 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4070 off += v->cur_field_type ? ((i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0]) : 0;
4071 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize);
4072 // TODO: loop filter
4075 s->mb_intra = v->is_intra[s->mb_x] = 0;
4076 s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
4077 for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
4078 if (idx_mbmode <= 5) { // 1-MV
4080 if (idx_mbmode & 1) {
4081 get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
4083 vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
4085 mb_has_coeffs = !(idx_mbmode & 2);
4087 v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
4088 for (i = 0; i < 6; i++) {
4090 dmv_x = dmv_y = pred_flag = 0;
4091 val = ((v->fourmvbp >> (3 - i)) & 1);
4093 get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
4095 vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
4096 vc1_mc_4mv_luma(v, i, 0);
4098 vc1_mc_4mv_chroma(v, 0);
4100 mb_has_coeffs = idx_mbmode & 1;
4103 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4107 s->current_picture.f.qscale_table[mb_pos] = mquant;
4108 if (!v->ttmbf && cbp) {
4109 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4112 for (i = 0; i < 6; i++) {
4113 s->dc_val[0][s->block_index[i]] = 0;
4115 val = ((cbp >> (5 - i)) & 1);
4116 off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
4117 if (v->cur_field_type)
4118 off += (i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0];
4120 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4121 first_block, s->dest[dst_idx] + off,
4122 (i & 4) ? s->uvlinesize : s->linesize,
4123 (i & 4) && (s->flags & CODEC_FLAG_GRAY),
4125 block_cbp |= pat << (i << 2);
4126 if (!v->ttmbf && ttmb < 8) ttmb = -1;
4131 if (s->mb_x == s->mb_width - 1)
4132 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
4136 /** Decode one B-frame MB (in Main profile)
4138 static void vc1_decode_b_mb(VC1Context *v)
4140 MpegEncContext *s = &v->s;
4141 GetBitContext *gb = &s->gb;
4143 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4144 int cbp = 0; /* cbp decoding stuff */
4145 int mqdiff, mquant; /* MB quantization */
4146 int ttmb = v->ttfrm; /* MB Transform type */
4147 int mb_has_coeffs = 0; /* last_flag */
4148 int index, index1; /* LUT indexes */
4149 int val, sign; /* temp values */
4150 int first_block = 1;
4152 int skipped, direct;
4153 int dmv_x[2], dmv_y[2];
4154 int bmvtype = BMV_TYPE_BACKWARD;
4156 mquant = v->pq; /* lossy initialization */
4160 direct = get_bits1(gb);
4162 direct = v->direct_mb_plane[mb_pos];
4164 skipped = get_bits1(gb);
4166 skipped = v->s.mbskip_table[mb_pos];
4168 dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4169 for (i = 0; i < 6; i++) {
4170 v->mb_type[0][s->block_index[i]] = 0;
4171 s->dc_val[0][s->block_index[i]] = 0;
4173 s->current_picture.f.qscale_table[mb_pos] = 0;
4177 GET_MVDATA(dmv_x[0], dmv_y[0]);
4178 dmv_x[1] = dmv_x[0];
4179 dmv_y[1] = dmv_y[0];
4181 if (skipped || !s->mb_intra) {
4182 bmvtype = decode012(gb);
4185 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
4188 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
4191 bmvtype = BMV_TYPE_INTERPOLATED;
4192 dmv_x[0] = dmv_y[0] = 0;
4196 for (i = 0; i < 6; i++)
4197 v->mb_type[0][s->block_index[i]] = s->mb_intra;
4201 bmvtype = BMV_TYPE_INTERPOLATED;
4202 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4203 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4207 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4210 s->current_picture.f.qscale_table[mb_pos] = mquant;
4212 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4213 dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
4214 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4215 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4217 if (!mb_has_coeffs && !s->mb_intra) {
4218 /* no coded blocks - effectively skipped */
4219 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4220 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4223 if (s->mb_intra && !mb_has_coeffs) {
4225 s->current_picture.f.qscale_table[mb_pos] = mquant;
4226 s->ac_pred = get_bits1(gb);
4228 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4230 if (bmvtype == BMV_TYPE_INTERPOLATED) {
4231 GET_MVDATA(dmv_x[0], dmv_y[0]);
4232 if (!mb_has_coeffs) {
4233 /* interpolated skipped block */
4234 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4235 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4239 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4241 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4244 s->ac_pred = get_bits1(gb);
4245 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4247 s->current_picture.f.qscale_table[mb_pos] = mquant;
4248 if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
4249 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4253 for (i = 0; i < 6; i++) {
4254 s->dc_val[0][s->block_index[i]] = 0;
4256 val = ((cbp >> (5 - i)) & 1);
4257 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4258 v->mb_type[0][s->block_index[i]] = s->mb_intra;
4260 /* check if prediction blocks A and C are available */
4261 v->a_avail = v->c_avail = 0;
4262 if (i == 2 || i == 3 || !s->first_slice_line)
4263 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4264 if (i == 1 || i == 3 || s->mb_x)
4265 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4267 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4268 (i & 4) ? v->codingset2 : v->codingset);
4269 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4271 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4273 for (j = 0; j < 64; j++)
4274 s->block[i][j] <<= 1;
4275 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
4277 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4278 first_block, s->dest[dst_idx] + off,
4279 (i & 4) ? s->uvlinesize : s->linesize,
4280 (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4281 if (!v->ttmbf && ttmb < 8)
4288 /** Decode one B-frame MB (in interlaced field B picture)
4290 static void vc1_decode_b_mb_intfi(VC1Context *v)
4292 MpegEncContext *s = &v->s;
4293 GetBitContext *gb = &s->gb;
4295 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4296 int cbp = 0; /* cbp decoding stuff */
4297 int mqdiff, mquant; /* MB quantization */
4298 int ttmb = v->ttfrm; /* MB Transform type */
4299 int mb_has_coeffs = 0; /* last_flag */
4300 int val; /* temp value */
4301 int first_block = 1;
4304 int dmv_x[2], dmv_y[2], pred_flag[2];
4305 int bmvtype = BMV_TYPE_BACKWARD;
4306 int idx_mbmode, interpmvp;
4308 mquant = v->pq; /* Loosy initialization */
4311 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
4312 if (idx_mbmode <= 1) { // intra MB
4313 s->mb_intra = v->is_intra[s->mb_x] = 1;
4314 s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
4315 s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
4316 s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4318 s->current_picture.f.qscale_table[mb_pos] = mquant;
4319 /* Set DC scale - y and c use the same (not sure if necessary here) */
4320 s->y_dc_scale = s->y_dc_scale_table[mquant];
4321 s->c_dc_scale = s->c_dc_scale_table[mquant];
4322 v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4323 mb_has_coeffs = idx_mbmode & 1;
4325 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
4327 for (i = 0; i < 6; i++) {
4328 s->dc_val[0][s->block_index[i]] = 0;
4330 val = ((cbp >> (5 - i)) & 1);
4331 v->mb_type[0][s->block_index[i]] = s->mb_intra;
4332 v->a_avail = v->c_avail = 0;
4333 if (i == 2 || i == 3 || !s->first_slice_line)
4334 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4335 if (i == 1 || i == 3 || s->mb_x)
4336 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4338 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4339 (i & 4) ? v->codingset2 : v->codingset);
4340 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4342 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4344 for (j = 0; j < 64; j++)
4345 s->block[i][j] <<= 1;
4346 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4347 off += v->cur_field_type ? ((i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0]) : 0;
4348 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize);
4349 // TODO: yet to perform loop filter
4352 s->mb_intra = v->is_intra[s->mb_x] = 0;
4353 s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
4354 for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
4356 fwd = v->forward_mb_plane[mb_pos] = get_bits1(gb);
4358 fwd = v->forward_mb_plane[mb_pos];
4359 if (idx_mbmode <= 5) { // 1-MV
4360 dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4361 pred_flag[0] = pred_flag[1] = 0;
4363 bmvtype = BMV_TYPE_FORWARD;
4365 bmvtype = decode012(gb);
4368 bmvtype = BMV_TYPE_BACKWARD;
4371 bmvtype = BMV_TYPE_DIRECT;
4374 bmvtype = BMV_TYPE_INTERPOLATED;
4375 interpmvp = get_bits1(gb);
4378 v->bmvtype = bmvtype;
4379 if (bmvtype != BMV_TYPE_DIRECT && idx_mbmode & 1) {
4380 get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD], &dmv_y[bmvtype == BMV_TYPE_BACKWARD], &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
4382 if (bmvtype == BMV_TYPE_INTERPOLATED && interpmvp) {
4383 get_mvdata_interlaced(v, &dmv_x[1], &dmv_y[1], &pred_flag[1]);
4385 if (bmvtype == BMV_TYPE_DIRECT) {
4386 dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4387 dmv_x[1] = dmv_y[1] = pred_flag[0] = 0;
4389 vc1_pred_b_mv_intfi(v, 0, dmv_x, dmv_y, 1, pred_flag);
4390 vc1_b_mc(v, dmv_x, dmv_y, (bmvtype == BMV_TYPE_DIRECT), bmvtype);
4391 mb_has_coeffs = !(idx_mbmode & 2);
4394 bmvtype = BMV_TYPE_FORWARD;
4395 v->bmvtype = bmvtype;
4396 v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
4397 for (i = 0; i < 6; i++) {
4399 dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4400 dmv_x[1] = dmv_y[1] = pred_flag[1] = 0;
4401 val = ((v->fourmvbp >> (3 - i)) & 1);
4403 get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD],
4404 &dmv_y[bmvtype == BMV_TYPE_BACKWARD],
4405 &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
4407 vc1_pred_b_mv_intfi(v, i, dmv_x, dmv_y, 0, pred_flag);
4408 vc1_mc_4mv_luma(v, i, bmvtype == BMV_TYPE_BACKWARD);
4410 vc1_mc_4mv_chroma(v, bmvtype == BMV_TYPE_BACKWARD);
4412 mb_has_coeffs = idx_mbmode & 1;
4415 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4419 s->current_picture.f.qscale_table[mb_pos] = mquant;
4420 if (!v->ttmbf && cbp) {
4421 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4424 for (i = 0; i < 6; i++) {
4425 s->dc_val[0][s->block_index[i]] = 0;
4427 val = ((cbp >> (5 - i)) & 1);
4428 off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
4429 if (v->cur_field_type)
4430 off += (i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0];
4432 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4433 first_block, s->dest[dst_idx] + off,
4434 (i & 4) ? s->uvlinesize : s->linesize,
4435 (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4436 if (!v->ttmbf && ttmb < 8)
4444 /** Decode blocks of I-frame
4446 static void vc1_decode_i_blocks(VC1Context *v)
4449 MpegEncContext *s = &v->s;
4454 /* select codingmode used for VLC tables selection */
4455 switch (v->y_ac_table_index) {
4457 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
4460 v->codingset = CS_HIGH_MOT_INTRA;
4463 v->codingset = CS_MID_RATE_INTRA;
4467 switch (v->c_ac_table_index) {
4469 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
4472 v->codingset2 = CS_HIGH_MOT_INTER;
4475 v->codingset2 = CS_MID_RATE_INTER;
4479 /* Set DC scale - y and c use the same */
4480 s->y_dc_scale = s->y_dc_scale_table[v->pq];
4481 s->c_dc_scale = s->c_dc_scale_table[v->pq];
4484 s->mb_x = s->mb_y = 0;
4486 s->first_slice_line = 1;
4487 for (s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
4489 ff_init_block_index(s);
4490 for (; s->mb_x < s->mb_width; s->mb_x++) {
4492 ff_update_block_index(s);
4493 dst[0] = s->dest[0];
4494 dst[1] = dst[0] + 8;
4495 dst[2] = s->dest[0] + s->linesize * 8;
4496 dst[3] = dst[2] + 8;
4497 dst[4] = s->dest[1];
4498 dst[5] = s->dest[2];
4499 s->dsp.clear_blocks(s->block[0]);
4500 mb_pos = s->mb_x + s->mb_y * s->mb_width;
4501 s->current_picture.f.mb_type[mb_pos] = MB_TYPE_INTRA;
4502 s->current_picture.f.qscale_table[mb_pos] = v->pq;
4503 s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
4504 s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
4506 // do actual MB decoding and displaying
4507 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
4508 v->s.ac_pred = get_bits1(&v->s.gb);
4510 for (k = 0; k < 6; k++) {
4511 val = ((cbp >> (5 - k)) & 1);
4514 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
4518 cbp |= val << (5 - k);
4520 vc1_decode_i_block(v, s->block[k], k, val, (k < 4) ? v->codingset : v->codingset2);
4522 if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
4524 v->vc1dsp.vc1_inv_trans_8x8(s->block[k]);
4525 if (v->pq >= 9 && v->overlap) {
4527 for (j = 0; j < 64; j++)
4528 s->block[k][j] <<= 1;
4529 s->dsp.put_signed_pixels_clamped(s->block[k], dst[k], k & 4 ? s->uvlinesize : s->linesize);
4532 for (j = 0; j < 64; j++)
4533 s->block[k][j] = (s->block[k][j] - 64) << 1;
4534 s->dsp.put_pixels_clamped(s->block[k], dst[k], k & 4 ? s->uvlinesize : s->linesize);
4538 if (v->pq >= 9 && v->overlap) {
4540 v->vc1dsp.vc1_h_overlap(s->dest[0], s->linesize);
4541 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
4542 if (!(s->flags & CODEC_FLAG_GRAY)) {
4543 v->vc1dsp.vc1_h_overlap(s->dest[1], s->uvlinesize);
4544 v->vc1dsp.vc1_h_overlap(s->dest[2], s->uvlinesize);
4547 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize);
4548 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
4549 if (!s->first_slice_line) {
4550 v->vc1dsp.vc1_v_overlap(s->dest[0], s->linesize);
4551 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize);
4552 if (!(s->flags & CODEC_FLAG_GRAY)) {
4553 v->vc1dsp.vc1_v_overlap(s->dest[1], s->uvlinesize);
4554 v->vc1dsp.vc1_v_overlap(s->dest[2], s->uvlinesize);
4557 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
4558 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
4560 if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
4562 if (get_bits_count(&s->gb) > v->bits) {
4563 ff_er_add_slice(s, 0, 0, s->mb_x, s->mb_y, ER_MB_ERROR);
4564 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
4565 get_bits_count(&s->gb), v->bits);
4569 if (!v->s.loop_filter)
4570 ff_draw_horiz_band(s, s->mb_y * 16, 16);
4572 ff_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
4574 s->first_slice_line = 0;
4576 if (v->s.loop_filter)
4577 ff_draw_horiz_band(s, (s->mb_height - 1) * 16, 16);
4578 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, ER_MB_END);
4581 /** Decode blocks of I-frame for advanced profile
4583 static void vc1_decode_i_blocks_adv(VC1Context *v)
4586 MpegEncContext *s = &v->s;
4592 GetBitContext *gb = &s->gb;
4594 /* select codingmode used for VLC tables selection */
4595 switch (v->y_ac_table_index) {
4597 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
4600 v->codingset = CS_HIGH_MOT_INTRA;
4603 v->codingset = CS_MID_RATE_INTRA;
4607 switch (v->c_ac_table_index) {
4609 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
4612 v->codingset2 = CS_HIGH_MOT_INTER;
4615 v->codingset2 = CS_MID_RATE_INTER;
4620 s->mb_x = s->mb_y = 0;
4622 s->first_slice_line = 1;
4623 s->mb_y = s->start_mb_y;
4624 if (s->start_mb_y) {
4626 ff_init_block_index(s);
4627 memset(&s->coded_block[s->block_index[0] - s->b8_stride], 0,
4628 (1 + s->b8_stride) * sizeof(*s->coded_block));
4630 for (; s->mb_y < s->end_mb_y; s->mb_y++) {
4632 ff_init_block_index(s);
4633 for (;s->mb_x < s->mb_width; s->mb_x++) {
4634 DCTELEM (*block)[64] = v->block[v->cur_blk_idx];
4635 ff_update_block_index(s);
4636 s->dsp.clear_blocks(block[0]);
4637 mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4638 s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4639 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
4640 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
4642 // do actual MB decoding and displaying
4643 if (v->fieldtx_is_raw)
4644 v->fieldtx_plane[mb_pos] = get_bits1(&v->s.gb);
4645 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
4646 if ( v->acpred_is_raw)
4647 v->s.ac_pred = get_bits1(&v->s.gb);
4649 v->s.ac_pred = v->acpred_plane[mb_pos];
4651 if (v->condover == CONDOVER_SELECT && v->overflg_is_raw)
4652 v->over_flags_plane[mb_pos] = get_bits1(&v->s.gb);
4656 s->current_picture.f.qscale_table[mb_pos] = mquant;
4657 /* Set DC scale - y and c use the same */
4658 s->y_dc_scale = s->y_dc_scale_table[mquant];
4659 s->c_dc_scale = s->c_dc_scale_table[mquant];
4661 for (k = 0; k < 6; k++) {
4662 val = ((cbp >> (5 - k)) & 1);
4665 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
4669 cbp |= val << (5 - k);
4671 v->a_avail = !s->first_slice_line || (k == 2 || k == 3);
4672 v->c_avail = !!s->mb_x || (k == 1 || k == 3);
4674 vc1_decode_i_block_adv(v, block[k], k, val,
4675 (k < 4) ? v->codingset : v->codingset2, mquant);
4677 if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
4679 v->vc1dsp.vc1_inv_trans_8x8(block[k]);
4682 vc1_smooth_overlap_filter_iblk(v);
4683 vc1_put_signed_blocks_clamped(v);
4684 if (v->s.loop_filter) vc1_loop_filter_iblk_delayed(v, v->pq);
4686 if (get_bits_count(&s->gb) > v->bits) {
4687 // TODO: may need modification to handle slice coding
4688 ff_er_add_slice(s, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
4689 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
4690 get_bits_count(&s->gb), v->bits);
4694 if (!v->s.loop_filter)
4695 ff_draw_horiz_band(s, s->mb_y * 16, 16);
4697 ff_draw_horiz_band(s, (s->mb_y-1) * 16, 16);
4698 s->first_slice_line = 0;
4701 /* raw bottom MB row */
4703 ff_init_block_index(s);
4704 for (;s->mb_x < s->mb_width; s->mb_x++) {
4705 ff_update_block_index(s);
4706 vc1_put_signed_blocks_clamped(v);
4707 if (v->s.loop_filter)
4708 vc1_loop_filter_iblk_delayed(v, v->pq);
4710 if (v->s.loop_filter)
4711 ff_draw_horiz_band(s, (s->end_mb_y-1)*16, 16);
4712 ff_er_add_slice(s, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
4713 (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
4716 static void vc1_decode_p_blocks(VC1Context *v)
4718 MpegEncContext *s = &v->s;
4719 int apply_loop_filter;
4721 /* select codingmode used for VLC tables selection */
4722 switch (v->c_ac_table_index) {
4724 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
4727 v->codingset = CS_HIGH_MOT_INTRA;
4730 v->codingset = CS_MID_RATE_INTRA;
4734 switch (v->c_ac_table_index) {
4736 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
4739 v->codingset2 = CS_HIGH_MOT_INTER;
4742 v->codingset2 = CS_MID_RATE_INTER;
4746 apply_loop_filter = s->loop_filter && !(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY);
4747 s->first_slice_line = 1;
4748 memset(v->cbp_base, 0, sizeof(v->cbp_base[0])*2*s->mb_stride);
4749 for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
4751 ff_init_block_index(s);
4752 for (; s->mb_x < s->mb_width; s->mb_x++) {
4753 ff_update_block_index(s);
4755 if (v->fcm == ILACE_FIELD)
4756 vc1_decode_p_mb_intfi(v);
4757 else if (v->fcm == ILACE_FRAME)
4758 vc1_decode_p_mb_intfr(v);
4759 else vc1_decode_p_mb(v);
4760 if (s->mb_y != s->start_mb_y && apply_loop_filter && v->fcm == PROGRESSIVE)
4761 vc1_apply_p_loop_filter(v);
4762 if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
4763 // TODO: may need modification to handle slice coding
4764 ff_er_add_slice(s, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
4765 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
4766 get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
4770 memmove(v->cbp_base, v->cbp, sizeof(v->cbp_base[0]) * s->mb_stride);
4771 memmove(v->ttblk_base, v->ttblk, sizeof(v->ttblk_base[0]) * s->mb_stride);
4772 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
4773 memmove(v->luma_mv_base, v->luma_mv, sizeof(v->luma_mv_base[0]) * s->mb_stride);
4774 if (s->mb_y != s->start_mb_y) ff_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
4775 s->first_slice_line = 0;
4777 if (apply_loop_filter) {
4779 ff_init_block_index(s);
4780 for (; s->mb_x < s->mb_width; s->mb_x++) {
4781 ff_update_block_index(s);
4782 vc1_apply_p_loop_filter(v);
4785 if (s->end_mb_y >= s->start_mb_y)
4786 ff_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
4787 ff_er_add_slice(s, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
4788 (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
4791 static void vc1_decode_b_blocks(VC1Context *v)
4793 MpegEncContext *s = &v->s;
4795 /* select codingmode used for VLC tables selection */
4796 switch (v->c_ac_table_index) {
4798 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
4801 v->codingset = CS_HIGH_MOT_INTRA;
4804 v->codingset = CS_MID_RATE_INTRA;
4808 switch (v->c_ac_table_index) {
4810 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
4813 v->codingset2 = CS_HIGH_MOT_INTER;
4816 v->codingset2 = CS_MID_RATE_INTER;
4820 s->first_slice_line = 1;
4821 for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
4823 ff_init_block_index(s);
4824 for (; s->mb_x < s->mb_width; s->mb_x++) {
4825 ff_update_block_index(s);
4827 if (v->fcm == ILACE_FIELD)
4828 vc1_decode_b_mb_intfi(v);
4831 if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
4832 // TODO: may need modification to handle slice coding
4833 ff_er_add_slice(s, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
4834 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
4835 get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
4838 if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
4840 if (!v->s.loop_filter)
4841 ff_draw_horiz_band(s, s->mb_y * 16, 16);
4843 ff_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
4844 s->first_slice_line = 0;
4846 if (v->s.loop_filter)
4847 ff_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
4848 ff_er_add_slice(s, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
4849 (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
4852 static void vc1_decode_skip_blocks(VC1Context *v)
4854 MpegEncContext *s = &v->s;
4856 ff_er_add_slice(s, 0, s->start_mb_y, s->mb_width - 1, s->end_mb_y - 1, ER_MB_END);
4857 s->first_slice_line = 1;
4858 for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
4860 ff_init_block_index(s);
4861 ff_update_block_index(s);
4862 memcpy(s->dest[0], s->last_picture.f.data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16);
4863 memcpy(s->dest[1], s->last_picture.f.data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
4864 memcpy(s->dest[2], s->last_picture.f.data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
4865 ff_draw_horiz_band(s, s->mb_y * 16, 16);
4866 s->first_slice_line = 0;
4868 s->pict_type = AV_PICTURE_TYPE_P;
4871 static void vc1_decode_blocks(VC1Context *v)
4874 v->s.esc3_level_length = 0;
4876 ff_intrax8_decode_picture(&v->x8, 2*v->pq + v->halfpq, v->pq * !v->pquantizer);
4879 v->left_blk_idx = -1;
4880 v->topleft_blk_idx = 1;
4882 switch (v->s.pict_type) {
4883 case AV_PICTURE_TYPE_I:
4884 if (v->profile == PROFILE_ADVANCED)
4885 vc1_decode_i_blocks_adv(v);
4887 vc1_decode_i_blocks(v);
4889 case AV_PICTURE_TYPE_P:
4890 if (v->p_frame_skipped)
4891 vc1_decode_skip_blocks(v);
4893 vc1_decode_p_blocks(v);
4895 case AV_PICTURE_TYPE_B:
4897 if (v->profile == PROFILE_ADVANCED)
4898 vc1_decode_i_blocks_adv(v);
4900 vc1_decode_i_blocks(v);
4902 vc1_decode_b_blocks(v);
4908 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
4912 * Transform coefficients for both sprites in 16.16 fixed point format,
4913 * in the order they appear in the bitstream:
4915 * rotation 1 (unused)
4917 * rotation 2 (unused)
4924 int effect_type, effect_flag;
4925 int effect_pcount1, effect_pcount2; ///< amount of effect parameters stored in effect_params
4926 int effect_params1[15], effect_params2[10]; ///< effect parameters in 16.16 fixed point format
4929 static inline int get_fp_val(GetBitContext* gb)
4931 return (get_bits_long(gb, 30) - (1 << 29)) << 1;
4934 static void vc1_sprite_parse_transform(GetBitContext* gb, int c[7])
4938 switch (get_bits(gb, 2)) {
4941 c[2] = get_fp_val(gb);
4945 c[0] = c[4] = get_fp_val(gb);
4946 c[2] = get_fp_val(gb);
4949 c[0] = get_fp_val(gb);
4950 c[2] = get_fp_val(gb);
4951 c[4] = get_fp_val(gb);
4954 c[0] = get_fp_val(gb);
4955 c[1] = get_fp_val(gb);
4956 c[2] = get_fp_val(gb);
4957 c[3] = get_fp_val(gb);
4958 c[4] = get_fp_val(gb);
4961 c[5] = get_fp_val(gb);
4963 c[6] = get_fp_val(gb);
4968 static void vc1_parse_sprites(VC1Context *v, GetBitContext* gb, SpriteData* sd)
4970 AVCodecContext *avctx = v->s.avctx;
4973 for (sprite = 0; sprite <= v->two_sprites; sprite++) {
4974 vc1_sprite_parse_transform(gb, sd->coefs[sprite]);
4975 if (sd->coefs[sprite][1] || sd->coefs[sprite][3])
4976 av_log_ask_for_sample(avctx, "Rotation coefficients are not zero");
4977 av_log(avctx, AV_LOG_DEBUG, sprite ? "S2:" : "S1:");
4978 for (i = 0; i < 7; i++)
4979 av_log(avctx, AV_LOG_DEBUG, " %d.%.3d",
4980 sd->coefs[sprite][i] / (1<<16),
4981 (abs(sd->coefs[sprite][i]) & 0xFFFF) * 1000 / (1 << 16));
4982 av_log(avctx, AV_LOG_DEBUG, "\n");
4986 if (sd->effect_type = get_bits_long(gb, 30)) {
4987 switch (sd->effect_pcount1 = get_bits(gb, 4)) {
4989 vc1_sprite_parse_transform(gb, sd->effect_params1);
4992 vc1_sprite_parse_transform(gb, sd->effect_params1);
4993 vc1_sprite_parse_transform(gb, sd->effect_params1 + 7);
4996 for (i = 0; i < sd->effect_pcount1; i++)
4997 sd->effect_params1[i] = get_fp_val(gb);
4999 if (sd->effect_type != 13 || sd->effect_params1[0] != sd->coefs[0][6]) {
5000 // effect 13 is simple alpha blending and matches the opacity above
5001 av_log(avctx, AV_LOG_DEBUG, "Effect: %d; params: ", sd->effect_type);
5002 for (i = 0; i < sd->effect_pcount1; i++)
5003 av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
5004 sd->effect_params1[i] / (1 << 16),
5005 (abs(sd->effect_params1[i]) & 0xFFFF) * 1000 / (1 << 16));
5006 av_log(avctx, AV_LOG_DEBUG, "\n");
5009 sd->effect_pcount2 = get_bits(gb, 16);
5010 if (sd->effect_pcount2 > 10) {
5011 av_log(avctx, AV_LOG_ERROR, "Too many effect parameters\n");
5013 } else if (sd->effect_pcount2) {
5015 av_log(avctx, AV_LOG_DEBUG, "Effect params 2: ");
5016 while (++i < sd->effect_pcount2) {
5017 sd->effect_params2[i] = get_fp_val(gb);
5018 av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
5019 sd->effect_params2[i] / (1 << 16),
5020 (abs(sd->effect_params2[i]) & 0xFFFF) * 1000 / (1 << 16));
5022 av_log(avctx, AV_LOG_DEBUG, "\n");
5025 if (sd->effect_flag = get_bits1(gb))
5026 av_log(avctx, AV_LOG_DEBUG, "Effect flag set\n");
5028 if (get_bits_count(gb) >= gb->size_in_bits +
5029 (avctx->codec_id == CODEC_ID_WMV3IMAGE ? 64 : 0))
5030 av_log(avctx, AV_LOG_ERROR, "Buffer overrun\n");
5031 if (get_bits_count(gb) < gb->size_in_bits - 8)
5032 av_log(avctx, AV_LOG_WARNING, "Buffer not fully read\n");
5035 static void vc1_draw_sprites(VC1Context *v, SpriteData* sd)
5037 int i, plane, row, sprite;
5038 int sr_cache[2][2] = { { -1, -1 }, { -1, -1 } };
5039 uint8_t* src_h[2][2];
5040 int xoff[2], xadv[2], yoff[2], yadv[2], alpha;
5042 MpegEncContext *s = &v->s;
5044 for (i = 0; i < 2; i++) {
5045 xoff[i] = av_clip(sd->coefs[i][2], 0, v->sprite_width-1 << 16);
5046 xadv[i] = sd->coefs[i][0];
5047 if (xadv[i] != 1<<16 || (v->sprite_width << 16) - (v->output_width << 16) - xoff[i])
5048 xadv[i] = av_clip(xadv[i], 0, ((v->sprite_width<<16) - xoff[i] - 1) / v->output_width);
5050 yoff[i] = av_clip(sd->coefs[i][5], 0, v->sprite_height-1 << 16);
5051 yadv[i] = av_clip(sd->coefs[i][4], 0, ((v->sprite_height << 16) - yoff[i]) / v->output_height);
5053 alpha = av_clip(sd->coefs[1][6], 0, (1<<16) - 1);
5055 for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++) {
5056 int width = v->output_width>>!!plane;
5058 for (row = 0; row < v->output_height>>!!plane; row++) {
5059 uint8_t *dst = v->sprite_output_frame.data[plane] +
5060 v->sprite_output_frame.linesize[plane] * row;
5062 for (sprite = 0; sprite <= v->two_sprites; sprite++) {
5063 uint8_t *iplane = s->current_picture.f.data[plane];
5064 int iline = s->current_picture.f.linesize[plane];
5065 int ycoord = yoff[sprite] + yadv[sprite] * row;
5066 int yline = ycoord >> 16;
5067 ysub[sprite] = ycoord & 0xFFFF;
5069 iplane = s->last_picture.f.data[plane];
5070 iline = s->last_picture.f.linesize[plane];
5072 if (!(xoff[sprite] & 0xFFFF) && xadv[sprite] == 1 << 16) {
5073 src_h[sprite][0] = iplane + (xoff[sprite] >> 16) + yline * iline;
5075 src_h[sprite][1] = iplane + (xoff[sprite] >> 16) + (yline + 1) * iline;
5077 if (sr_cache[sprite][0] != yline) {
5078 if (sr_cache[sprite][1] == yline) {
5079 FFSWAP(uint8_t*, v->sr_rows[sprite][0], v->sr_rows[sprite][1]);
5080 FFSWAP(int, sr_cache[sprite][0], sr_cache[sprite][1]);
5082 v->vc1dsp.sprite_h(v->sr_rows[sprite][0], iplane + yline * iline, xoff[sprite], xadv[sprite], width);
5083 sr_cache[sprite][0] = yline;
5086 if (ysub[sprite] && sr_cache[sprite][1] != yline + 1) {
5087 v->vc1dsp.sprite_h(v->sr_rows[sprite][1], iplane + (yline + 1) * iline, xoff[sprite], xadv[sprite], width);
5088 sr_cache[sprite][1] = yline + 1;
5090 src_h[sprite][0] = v->sr_rows[sprite][0];
5091 src_h[sprite][1] = v->sr_rows[sprite][1];
5095 if (!v->two_sprites) {
5097 v->vc1dsp.sprite_v_single(dst, src_h[0][0], src_h[0][1], ysub[0], width);
5099 memcpy(dst, src_h[0][0], width);
5102 if (ysub[0] && ysub[1]) {
5103 v->vc1dsp.sprite_v_double_twoscale(dst, src_h[0][0], src_h[0][1], ysub[0],
5104 src_h[1][0], src_h[1][1], ysub[1], alpha, width);
5105 } else if (ysub[0]) {
5106 v->vc1dsp.sprite_v_double_onescale(dst, src_h[0][0], src_h[0][1], ysub[0],
5107 src_h[1][0], alpha, width);
5108 } else if (ysub[1]) {
5109 v->vc1dsp.sprite_v_double_onescale(dst, src_h[1][0], src_h[1][1], ysub[1],
5110 src_h[0][0], (1<<16)-1-alpha, width);
5112 v->vc1dsp.sprite_v_double_noscale(dst, src_h[0][0], src_h[1][0], alpha, width);
5118 for (i = 0; i < 2; i++) {
5128 static int vc1_decode_sprites(VC1Context *v, GetBitContext* gb)
5130 MpegEncContext *s = &v->s;
5131 AVCodecContext *avctx = s->avctx;
5134 vc1_parse_sprites(v, gb, &sd);
5136 if (!s->current_picture.f.data[0]) {
5137 av_log(avctx, AV_LOG_ERROR, "Got no sprites\n");
5141 if (v->two_sprites && (!s->last_picture_ptr || !s->last_picture.f.data[0])) {
5142 av_log(avctx, AV_LOG_WARNING, "Need two sprites, only got one\n");
5146 if (v->sprite_output_frame.data[0])
5147 avctx->release_buffer(avctx, &v->sprite_output_frame);
5149 v->sprite_output_frame.buffer_hints = FF_BUFFER_HINTS_VALID;
5150 v->sprite_output_frame.reference = 0;
5151 if (avctx->get_buffer(avctx, &v->sprite_output_frame) < 0) {
5152 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
5156 vc1_draw_sprites(v, &sd);
5161 static void vc1_sprite_flush(AVCodecContext *avctx)
5163 VC1Context *v = avctx->priv_data;
5164 MpegEncContext *s = &v->s;
5165 AVFrame *f = &s->current_picture.f;
5168 /* Windows Media Image codecs have a convergence interval of two keyframes.
5169 Since we can't enforce it, clear to black the missing sprite. This is
5170 wrong but it looks better than doing nothing. */
5173 for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++)
5174 for (i = 0; i < v->sprite_height>>!!plane; i++)
5175 memset(f->data[plane] + i * f->linesize[plane],
5176 plane ? 128 : 0, f->linesize[plane]);
5181 static av_cold int vc1_decode_init_alloc_tables(VC1Context *v)
5183 MpegEncContext *s = &v->s;
5186 /* Allocate mb bitplanes */
5187 v->mv_type_mb_plane = av_malloc (s->mb_stride * s->mb_height);
5188 v->direct_mb_plane = av_malloc (s->mb_stride * s->mb_height);
5189 v->forward_mb_plane = av_malloc (s->mb_stride * s->mb_height);
5190 v->fieldtx_plane = av_mallocz(s->mb_stride * s->mb_height);
5191 v->acpred_plane = av_malloc (s->mb_stride * s->mb_height);
5192 v->over_flags_plane = av_malloc (s->mb_stride * s->mb_height);
5194 v->n_allocated_blks = s->mb_width + 2;
5195 v->block = av_malloc(sizeof(*v->block) * v->n_allocated_blks);
5196 v->cbp_base = av_malloc(sizeof(v->cbp_base[0]) * 2 * s->mb_stride);
5197 v->cbp = v->cbp_base + s->mb_stride;
5198 v->ttblk_base = av_malloc(sizeof(v->ttblk_base[0]) * 2 * s->mb_stride);
5199 v->ttblk = v->ttblk_base + s->mb_stride;
5200 v->is_intra_base = av_mallocz(sizeof(v->is_intra_base[0]) * 2 * s->mb_stride);
5201 v->is_intra = v->is_intra_base + s->mb_stride;
5202 v->luma_mv_base = av_malloc(sizeof(v->luma_mv_base[0]) * 2 * s->mb_stride);
5203 v->luma_mv = v->luma_mv_base + s->mb_stride;
5205 /* allocate block type info in that way so it could be used with s->block_index[] */
5206 v->mb_type_base = av_malloc(s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5207 v->mb_type[0] = v->mb_type_base + s->b8_stride + 1;
5208 v->mb_type[1] = v->mb_type_base + s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride + 1;
5209 v->mb_type[2] = v->mb_type[1] + s->mb_stride * (s->mb_height + 1);
5211 /* allocate memory to store block level MV info */
5212 v->blk_mv_type_base = av_mallocz( s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5213 v->blk_mv_type = v->blk_mv_type_base + s->b8_stride + 1;
5214 v->mv_f_base = av_mallocz(2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5215 v->mv_f[0] = v->mv_f_base + s->b8_stride + 1;
5216 v->mv_f[1] = v->mv_f[0] + (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5217 v->mv_f_last_base = av_mallocz(2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5218 v->mv_f_last[0] = v->mv_f_last_base + s->b8_stride + 1;
5219 v->mv_f_last[1] = v->mv_f_last[0] + (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5220 v->mv_f_next_base = av_mallocz(2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5221 v->mv_f_next[0] = v->mv_f_next_base + s->b8_stride + 1;
5222 v->mv_f_next[1] = v->mv_f_next[0] + (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5224 /* Init coded blocks info */
5225 if (v->profile == PROFILE_ADVANCED) {
5226 // if (alloc_bitplane(&v->over_flags_plane, s->mb_width, s->mb_height) < 0)
5228 // if (alloc_bitplane(&v->ac_pred_plane, s->mb_width, s->mb_height) < 0)
5232 ff_intrax8_common_init(&v->x8,s);
5234 if (s->avctx->codec_id == CODEC_ID_WMV3IMAGE || s->avctx->codec_id == CODEC_ID_VC1IMAGE) {
5235 for (i = 0; i < 4; i++)
5236 if (!(v->sr_rows[i >> 1][i & 1] = av_malloc(v->output_width))) return -1;
5239 if (!v->mv_type_mb_plane || !v->direct_mb_plane || !v->acpred_plane || !v->over_flags_plane ||
5240 !v->block || !v->cbp_base || !v->ttblk_base || !v->is_intra_base || !v->luma_mv_base ||
5247 /** Initialize a VC1/WMV3 decoder
5248 * @todo TODO: Handle VC-1 IDUs (Transport level?)
5249 * @todo TODO: Decypher remaining bits in extra_data
5251 static av_cold int vc1_decode_init(AVCodecContext *avctx)
5253 VC1Context *v = avctx->priv_data;
5254 MpegEncContext *s = &v->s;
5258 /* save the container output size for WMImage */
5259 v->output_width = avctx->width;
5260 v->output_height = avctx->height;
5262 if (!avctx->extradata_size || !avctx->extradata)
5264 if (!(avctx->flags & CODEC_FLAG_GRAY))
5265 avctx->pix_fmt = avctx->get_format(avctx, avctx->codec->pix_fmts);
5267 avctx->pix_fmt = PIX_FMT_GRAY8;
5268 avctx->hwaccel = ff_find_hwaccel(avctx->codec->id, avctx->pix_fmt);
5270 avctx->flags |= CODEC_FLAG_EMU_EDGE;
5271 v->s.flags |= CODEC_FLAG_EMU_EDGE;
5273 if (avctx->idct_algo == FF_IDCT_AUTO) {
5274 avctx->idct_algo = FF_IDCT_WMV2;
5277 if (ff_vc1_init_common(v) < 0)
5279 ff_vc1dsp_init(&v->vc1dsp);
5281 if (avctx->codec_id == CODEC_ID_WMV3 || avctx->codec_id == CODEC_ID_WMV3IMAGE) {
5284 // looks like WMV3 has a sequence header stored in the extradata
5285 // advanced sequence header may be before the first frame
5286 // the last byte of the extradata is a version number, 1 for the
5287 // samples we can decode
5289 init_get_bits(&gb, avctx->extradata, avctx->extradata_size*8);
5291 if (ff_vc1_decode_sequence_header(avctx, v, &gb) < 0)
5294 count = avctx->extradata_size*8 - get_bits_count(&gb);
5296 av_log(avctx, AV_LOG_INFO, "Extra data: %i bits left, value: %X\n",
5297 count, get_bits(&gb, count));
5298 } else if (count < 0) {
5299 av_log(avctx, AV_LOG_INFO, "Read %i bits in overflow\n", -count);
5301 } else { // VC1/WVC1/WVP2
5302 const uint8_t *start = avctx->extradata;
5303 uint8_t *end = avctx->extradata + avctx->extradata_size;
5304 const uint8_t *next;
5305 int size, buf2_size;
5306 uint8_t *buf2 = NULL;
5307 int seq_initialized = 0, ep_initialized = 0;
5309 if (avctx->extradata_size < 16) {
5310 av_log(avctx, AV_LOG_ERROR, "Extradata size too small: %i\n", avctx->extradata_size);
5314 buf2 = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
5315 start = find_next_marker(start, end); // in WVC1 extradata first byte is its size, but can be 0 in mkv
5317 for (; next < end; start = next) {
5318 next = find_next_marker(start + 4, end);
5319 size = next - start - 4;
5322 buf2_size = vc1_unescape_buffer(start + 4, size, buf2);
5323 init_get_bits(&gb, buf2, buf2_size * 8);
5324 switch (AV_RB32(start)) {
5325 case VC1_CODE_SEQHDR:
5326 if (ff_vc1_decode_sequence_header(avctx, v, &gb) < 0) {
5330 seq_initialized = 1;
5332 case VC1_CODE_ENTRYPOINT:
5333 if (ff_vc1_decode_entry_point(avctx, v, &gb) < 0) {
5342 if (!seq_initialized || !ep_initialized) {
5343 av_log(avctx, AV_LOG_ERROR, "Incomplete extradata\n");
5346 v->res_sprite = (avctx->codec_tag == MKTAG('W','V','P','2'));
5349 avctx->profile = v->profile;
5350 if (v->profile == PROFILE_ADVANCED)
5351 avctx->level = v->level;
5353 avctx->has_b_frames = !!avctx->max_b_frames;
5355 s->mb_width = (avctx->coded_width + 15) >> 4;
5356 s->mb_height = (avctx->coded_height + 15) >> 4;
5358 if (v->profile == PROFILE_ADVANCED || v->res_fasttx) {
5359 for (i = 0; i < 64; i++) {
5360 #define transpose(x) ((x >> 3) | ((x & 7) << 3))
5361 v->zz_8x8[0][i] = transpose(ff_wmv1_scantable[0][i]);
5362 v->zz_8x8[1][i] = transpose(ff_wmv1_scantable[1][i]);
5363 v->zz_8x8[2][i] = transpose(ff_wmv1_scantable[2][i]);
5364 v->zz_8x8[3][i] = transpose(ff_wmv1_scantable[3][i]);
5365 v->zzi_8x8[i] = transpose(ff_vc1_adv_interlaced_8x8_zz[i]);
5370 memcpy(v->zz_8x8, ff_wmv1_scantable, 4*64);
5375 if (avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE) {
5376 v->sprite_width = avctx->coded_width;
5377 v->sprite_height = avctx->coded_height;
5379 avctx->coded_width = avctx->width = v->output_width;
5380 avctx->coded_height = avctx->height = v->output_height;
5382 // prevent 16.16 overflows
5383 if (v->sprite_width > 1 << 14 ||
5384 v->sprite_height > 1 << 14 ||
5385 v->output_width > 1 << 14 ||
5386 v->output_height > 1 << 14) return -1;
5391 /** Close a VC1/WMV3 decoder
5392 * @warning Initial try at using MpegEncContext stuff
5394 static av_cold int vc1_decode_end(AVCodecContext *avctx)
5396 VC1Context *v = avctx->priv_data;
5399 if ((avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE)
5400 && v->sprite_output_frame.data[0])
5401 avctx->release_buffer(avctx, &v->sprite_output_frame);
5402 for (i = 0; i < 4; i++)
5403 av_freep(&v->sr_rows[i >> 1][i & 1]);
5404 av_freep(&v->hrd_rate);
5405 av_freep(&v->hrd_buffer);
5406 ff_MPV_common_end(&v->s);
5407 av_freep(&v->mv_type_mb_plane);
5408 av_freep(&v->direct_mb_plane);
5409 av_freep(&v->forward_mb_plane);
5410 av_freep(&v->fieldtx_plane);
5411 av_freep(&v->acpred_plane);
5412 av_freep(&v->over_flags_plane);
5413 av_freep(&v->mb_type_base);
5414 av_freep(&v->blk_mv_type_base);
5415 av_freep(&v->mv_f_base);
5416 av_freep(&v->mv_f_last_base);
5417 av_freep(&v->mv_f_next_base);
5418 av_freep(&v->block);
5419 av_freep(&v->cbp_base);
5420 av_freep(&v->ttblk_base);
5421 av_freep(&v->is_intra_base); // FIXME use v->mb_type[]
5422 av_freep(&v->luma_mv_base);
5423 ff_intrax8_common_end(&v->x8);
5428 /** Decode a VC1/WMV3 frame
5429 * @todo TODO: Handle VC-1 IDUs (Transport level?)
5431 static int vc1_decode_frame(AVCodecContext *avctx, void *data,
5432 int *data_size, AVPacket *avpkt)
5434 const uint8_t *buf = avpkt->data;
5435 int buf_size = avpkt->size, n_slices = 0, i;
5436 VC1Context *v = avctx->priv_data;
5437 MpegEncContext *s = &v->s;
5438 AVFrame *pict = data;
5439 uint8_t *buf2 = NULL;
5440 const uint8_t *buf_start = buf;
5441 int mb_height, n_slices1;
5446 } *slices = NULL, *tmp;
5448 /* no supplementary picture */
5449 if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == VC1_CODE_ENDOFSEQ)) {
5450 /* special case for last picture */
5451 if (s->low_delay == 0 && s->next_picture_ptr) {
5452 *pict = s->next_picture_ptr->f;
5453 s->next_picture_ptr = NULL;
5455 *data_size = sizeof(AVFrame);
5461 if (s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) {
5462 if (v->profile < PROFILE_ADVANCED)
5463 avctx->pix_fmt = PIX_FMT_VDPAU_WMV3;
5465 avctx->pix_fmt = PIX_FMT_VDPAU_VC1;
5468 //for advanced profile we may need to parse and unescape data
5469 if (avctx->codec_id == CODEC_ID_VC1 || avctx->codec_id == CODEC_ID_VC1IMAGE) {
5471 buf2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5473 if (IS_MARKER(AV_RB32(buf))) { /* frame starts with marker and needs to be parsed */
5474 const uint8_t *start, *end, *next;
5478 for (start = buf, end = buf + buf_size; next < end; start = next) {
5479 next = find_next_marker(start + 4, end);
5480 size = next - start - 4;
5481 if (size <= 0) continue;
5482 switch (AV_RB32(start)) {
5483 case VC1_CODE_FRAME:
5484 if (avctx->hwaccel ||
5485 s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
5487 buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
5489 case VC1_CODE_FIELD: {
5491 slices = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5494 slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5495 if (!slices[n_slices].buf)
5497 buf_size3 = vc1_unescape_buffer(start + 4, size,
5498 slices[n_slices].buf);
5499 init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5501 /* assuming that the field marker is at the exact middle,
5502 hope it's correct */
5503 slices[n_slices].mby_start = s->mb_height >> 1;
5504 n_slices1 = n_slices - 1; // index of the last slice of the first field
5508 case VC1_CODE_ENTRYPOINT: /* it should be before frame data */
5509 buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
5510 init_get_bits(&s->gb, buf2, buf_size2 * 8);
5511 ff_vc1_decode_entry_point(avctx, v, &s->gb);
5513 case VC1_CODE_SLICE: {
5515 slices = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5518 slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5519 if (!slices[n_slices].buf)
5521 buf_size3 = vc1_unescape_buffer(start + 4, size,
5522 slices[n_slices].buf);
5523 init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5525 slices[n_slices].mby_start = get_bits(&slices[n_slices].gb, 9);
5531 } else if (v->interlace && ((buf[0] & 0xC0) == 0xC0)) { /* WVC1 interlaced stores both fields divided by marker */
5532 const uint8_t *divider;
5535 divider = find_next_marker(buf, buf + buf_size);
5536 if ((divider == (buf + buf_size)) || AV_RB32(divider) != VC1_CODE_FIELD) {
5537 av_log(avctx, AV_LOG_ERROR, "Error in WVC1 interlaced frame\n");
5539 } else { // found field marker, unescape second field
5540 tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5544 slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5545 if (!slices[n_slices].buf)
5547 buf_size3 = vc1_unescape_buffer(divider + 4, buf + buf_size - divider - 4, slices[n_slices].buf);
5548 init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5550 slices[n_slices].mby_start = s->mb_height >> 1;
5551 n_slices1 = n_slices - 1;
5554 buf_size2 = vc1_unescape_buffer(buf, divider - buf, buf2);
5556 buf_size2 = vc1_unescape_buffer(buf, buf_size, buf2);
5558 init_get_bits(&s->gb, buf2, buf_size2*8);
5560 init_get_bits(&s->gb, buf, buf_size*8);
5562 if (v->res_sprite) {
5563 v->new_sprite = !get_bits1(&s->gb);
5564 v->two_sprites = get_bits1(&s->gb);
5565 /* res_sprite means a Windows Media Image stream, CODEC_ID_*IMAGE means
5566 we're using the sprite compositor. These are intentionally kept separate
5567 so you can get the raw sprites by using the wmv3 decoder for WMVP or
5568 the vc1 one for WVP2 */
5569 if (avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE) {
5570 if (v->new_sprite) {
5571 // switch AVCodecContext parameters to those of the sprites
5572 avctx->width = avctx->coded_width = v->sprite_width;
5573 avctx->height = avctx->coded_height = v->sprite_height;
5580 if (s->context_initialized &&
5581 (s->width != avctx->coded_width ||
5582 s->height != avctx->coded_height)) {
5583 vc1_decode_end(avctx);
5586 if (!s->context_initialized) {
5587 if (ff_msmpeg4_decode_init(avctx) < 0 || vc1_decode_init_alloc_tables(v) < 0)
5590 s->low_delay = !avctx->has_b_frames || v->res_sprite;
5592 if (v->profile == PROFILE_ADVANCED) {
5593 s->h_edge_pos = avctx->coded_width;
5594 s->v_edge_pos = avctx->coded_height;
5598 /* We need to set current_picture_ptr before reading the header,
5599 * otherwise we cannot store anything in there. */
5600 if (s->current_picture_ptr == NULL || s->current_picture_ptr->f.data[0]) {
5601 int i = ff_find_unused_picture(s, 0);
5604 s->current_picture_ptr = &s->picture[i];
5607 // do parse frame header
5608 v->pic_header_flag = 0;
5609 if (v->profile < PROFILE_ADVANCED) {
5610 if (ff_vc1_parse_frame_header(v, &s->gb) == -1) {
5614 if (ff_vc1_parse_frame_header_adv(v, &s->gb) == -1) {
5619 if ((avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE)
5620 && s->pict_type != AV_PICTURE_TYPE_I) {
5621 av_log(v->s.avctx, AV_LOG_ERROR, "Sprite decoder: expected I-frame\n");
5625 // process pulldown flags
5626 s->current_picture_ptr->f.repeat_pict = 0;
5627 // Pulldown flags are only valid when 'broadcast' has been set.
5628 // So ticks_per_frame will be 2
5631 s->current_picture_ptr->f.repeat_pict = 1;
5632 } else if (v->rptfrm) {
5634 s->current_picture_ptr->f.repeat_pict = v->rptfrm * 2;
5637 // for skipping the frame
5638 s->current_picture.f.pict_type = s->pict_type;
5639 s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
5641 /* skip B-frames if we don't have reference frames */
5642 if (s->last_picture_ptr == NULL && (s->pict_type == AV_PICTURE_TYPE_B || s->dropable)) {
5645 if ((avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B) ||
5646 (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I) ||
5647 avctx->skip_frame >= AVDISCARD_ALL) {
5651 if (s->next_p_frame_damaged) {
5652 if (s->pict_type == AV_PICTURE_TYPE_B)
5655 s->next_p_frame_damaged = 0;
5658 if (ff_MPV_frame_start(s, avctx) < 0) {
5662 s->me.qpel_put = s->dsp.put_qpel_pixels_tab;
5663 s->me.qpel_avg = s->dsp.avg_qpel_pixels_tab;
5665 if ((CONFIG_VC1_VDPAU_DECODER)
5666 &&s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
5667 ff_vdpau_vc1_decode_picture(s, buf_start, (buf + buf_size) - buf_start);
5668 else if (avctx->hwaccel) {
5669 if (avctx->hwaccel->start_frame(avctx, buf, buf_size) < 0)
5671 if (avctx->hwaccel->decode_slice(avctx, buf_start, (buf + buf_size) - buf_start) < 0)
5673 if (avctx->hwaccel->end_frame(avctx) < 0)
5676 ff_er_frame_start(s);
5678 v->bits = buf_size * 8;
5679 if (v->field_mode) {
5681 s->current_picture.f.linesize[0] <<= 1;
5682 s->current_picture.f.linesize[1] <<= 1;
5683 s->current_picture.f.linesize[2] <<= 1;
5685 s->uvlinesize <<= 1;
5686 tmp[0] = v->mv_f_last[0];
5687 tmp[1] = v->mv_f_last[1];
5688 v->mv_f_last[0] = v->mv_f_next[0];
5689 v->mv_f_last[1] = v->mv_f_next[1];
5690 v->mv_f_next[0] = v->mv_f[0];
5691 v->mv_f_next[1] = v->mv_f[1];
5692 v->mv_f[0] = tmp[0];
5693 v->mv_f[1] = tmp[1];
5695 mb_height = s->mb_height >> v->field_mode;
5696 for (i = 0; i <= n_slices; i++) {
5697 if (i > 0 && slices[i - 1].mby_start >= mb_height) {
5698 v->second_field = 1;
5699 v->blocks_off = s->mb_width * s->mb_height << 1;
5700 v->mb_off = s->mb_stride * s->mb_height >> 1;
5702 v->second_field = 0;
5707 v->pic_header_flag = 0;
5708 if (v->field_mode && i == n_slices1 + 2)
5709 ff_vc1_parse_frame_header_adv(v, &s->gb);
5710 else if (get_bits1(&s->gb)) {
5711 v->pic_header_flag = 1;
5712 ff_vc1_parse_frame_header_adv(v, &s->gb);
5715 s->start_mb_y = (i == 0) ? 0 : FFMAX(0, slices[i-1].mby_start % mb_height);
5716 if (!v->field_mode || v->second_field)
5717 s->end_mb_y = (i == n_slices ) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
5719 s->end_mb_y = (i <= n_slices1 + 1) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
5720 vc1_decode_blocks(v);
5722 s->gb = slices[i].gb;
5724 if (v->field_mode) {
5725 v->second_field = 0;
5726 if (s->pict_type == AV_PICTURE_TYPE_B) {
5727 memcpy(v->mv_f_base, v->mv_f_next_base,
5728 2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5730 s->current_picture.f.linesize[0] >>= 1;
5731 s->current_picture.f.linesize[1] >>= 1;
5732 s->current_picture.f.linesize[2] >>= 1;
5734 s->uvlinesize >>= 1;
5736 //av_log(s->avctx, AV_LOG_INFO, "Consumed %i/%i bits\n", get_bits_count(&s->gb), s->gb.size_in_bits);
5737 // if (get_bits_count(&s->gb) > buf_size * 8)
5742 ff_MPV_frame_end(s);
5744 if (avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE) {
5746 avctx->width = avctx->coded_width = v->output_width;
5747 avctx->height = avctx->coded_height = v->output_height;
5748 if (avctx->skip_frame >= AVDISCARD_NONREF)
5750 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
5751 if (vc1_decode_sprites(v, &s->gb))
5754 *pict = v->sprite_output_frame;
5755 *data_size = sizeof(AVFrame);
5757 if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
5758 *pict = s->current_picture_ptr->f;
5759 } else if (s->last_picture_ptr != NULL) {
5760 *pict = s->last_picture_ptr->f;
5762 if (s->last_picture_ptr || s->low_delay) {
5763 *data_size = sizeof(AVFrame);
5764 ff_print_debug_info(s, pict);
5770 for (i = 0; i < n_slices; i++)
5771 av_free(slices[i].buf);
5777 for (i = 0; i < n_slices; i++)
5778 av_free(slices[i].buf);
5784 static const AVProfile profiles[] = {
5785 { FF_PROFILE_VC1_SIMPLE, "Simple" },
5786 { FF_PROFILE_VC1_MAIN, "Main" },
5787 { FF_PROFILE_VC1_COMPLEX, "Complex" },
5788 { FF_PROFILE_VC1_ADVANCED, "Advanced" },
5789 { FF_PROFILE_UNKNOWN },
5792 AVCodec ff_vc1_decoder = {
5794 .type = AVMEDIA_TYPE_VIDEO,
5796 .priv_data_size = sizeof(VC1Context),
5797 .init = vc1_decode_init,
5798 .close = vc1_decode_end,
5799 .decode = vc1_decode_frame,
5800 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
5801 .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1"),
5802 .pix_fmts = ff_hwaccel_pixfmt_list_420,
5803 .profiles = NULL_IF_CONFIG_SMALL(profiles)
5806 #if CONFIG_WMV3_DECODER
5807 AVCodec ff_wmv3_decoder = {
5809 .type = AVMEDIA_TYPE_VIDEO,
5810 .id = CODEC_ID_WMV3,
5811 .priv_data_size = sizeof(VC1Context),
5812 .init = vc1_decode_init,
5813 .close = vc1_decode_end,
5814 .decode = vc1_decode_frame,
5815 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
5816 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9"),
5817 .pix_fmts = ff_hwaccel_pixfmt_list_420,
5818 .profiles = NULL_IF_CONFIG_SMALL(profiles)
5822 #if CONFIG_WMV3_VDPAU_DECODER
5823 AVCodec ff_wmv3_vdpau_decoder = {
5824 .name = "wmv3_vdpau",
5825 .type = AVMEDIA_TYPE_VIDEO,
5826 .id = CODEC_ID_WMV3,
5827 .priv_data_size = sizeof(VC1Context),
5828 .init = vc1_decode_init,
5829 .close = vc1_decode_end,
5830 .decode = vc1_decode_frame,
5831 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU,
5832 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 VDPAU"),
5833 .pix_fmts = (const enum PixelFormat[]){PIX_FMT_VDPAU_WMV3, PIX_FMT_NONE},
5834 .profiles = NULL_IF_CONFIG_SMALL(profiles)
5838 #if CONFIG_VC1_VDPAU_DECODER
5839 AVCodec ff_vc1_vdpau_decoder = {
5840 .name = "vc1_vdpau",
5841 .type = AVMEDIA_TYPE_VIDEO,
5843 .priv_data_size = sizeof(VC1Context),
5844 .init = vc1_decode_init,
5845 .close = vc1_decode_end,
5846 .decode = vc1_decode_frame,
5847 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU,
5848 .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1 VDPAU"),
5849 .pix_fmts = (const enum PixelFormat[]){PIX_FMT_VDPAU_VC1, PIX_FMT_NONE},
5850 .profiles = NULL_IF_CONFIG_SMALL(profiles)
5854 #if CONFIG_WMV3IMAGE_DECODER
5855 AVCodec ff_wmv3image_decoder = {
5856 .name = "wmv3image",
5857 .type = AVMEDIA_TYPE_VIDEO,
5858 .id = CODEC_ID_WMV3IMAGE,
5859 .priv_data_size = sizeof(VC1Context),
5860 .init = vc1_decode_init,
5861 .close = vc1_decode_end,
5862 .decode = vc1_decode_frame,
5863 .capabilities = CODEC_CAP_DR1,
5864 .flush = vc1_sprite_flush,
5865 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image"),
5866 .pix_fmts = ff_pixfmt_list_420
5870 #if CONFIG_VC1IMAGE_DECODER
5871 AVCodec ff_vc1image_decoder = {
5873 .type = AVMEDIA_TYPE_VIDEO,
5874 .id = CODEC_ID_VC1IMAGE,
5875 .priv_data_size = sizeof(VC1Context),
5876 .init = vc1_decode_init,
5877 .close = vc1_decode_end,
5878 .decode = vc1_decode_frame,
5879 .capabilities = CODEC_CAP_DR1,
5880 .flush = vc1_sprite_flush,
5881 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image v2"),
5882 .pix_fmts = ff_pixfmt_list_420