2 * VC-1 and WMV3 decoder
3 * Copyright (c) 2011 Mashiat Sarker Shakkhar
4 * Copyright (c) 2006-2007 Konstantin Shishkov
5 * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
7 * This file is part of FFmpeg.
9 * FFmpeg is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * FFmpeg is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with FFmpeg; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * VC-1 and WMV3 decoder
32 #include "mpegvideo.h"
36 #include "vc1acdata.h"
37 #include "msmpeg4data.h"
39 #include "simple_idct.h"
41 #include "vdpau_internal.h"
46 #define MB_INTRA_VLC_BITS 9
51 static const uint16_t vlc_offs[] = {
52 0, 520, 552, 616, 1128, 1160, 1224, 1740, 1772, 1836, 1900, 2436,
53 2986, 3050, 3610, 4154, 4218, 4746, 5326, 5390, 5902, 6554, 7658, 8342,
54 9304, 9988, 10630, 11234, 12174, 13006, 13560, 14232, 14786, 15432, 16350, 17522,
55 20372, 21818, 22330, 22394, 23166, 23678, 23742, 24820, 25332, 25396, 26460, 26980,
56 27048, 27592, 27600, 27608, 27616, 27624, 28224, 28258, 28290, 28802, 28834, 28866,
57 29378, 29412, 29444, 29960, 29994, 30026, 30538, 30572, 30604, 31120, 31154, 31186,
58 31714, 31746, 31778, 32306, 32340, 32372
61 // offset tables for interlaced picture MVDATA decoding
62 static const int offset_table1[9] = { 0, 1, 2, 4, 8, 16, 32, 64, 128 };
63 static const int offset_table2[9] = { 0, 1, 3, 7, 15, 31, 63, 127, 255 };
66 * Init VC-1 specific tables and VC1Context members
67 * @param v The VC1Context to initialize
70 int ff_vc1_init_common(VC1Context *v)
74 static VLC_TYPE vlc_table[32372][2];
76 v->hrd_rate = v->hrd_buffer = NULL;
80 INIT_VLC_STATIC(&ff_vc1_bfraction_vlc, VC1_BFRACTION_VLC_BITS, 23,
81 ff_vc1_bfraction_bits, 1, 1,
82 ff_vc1_bfraction_codes, 1, 1, 1 << VC1_BFRACTION_VLC_BITS);
83 INIT_VLC_STATIC(&ff_vc1_norm2_vlc, VC1_NORM2_VLC_BITS, 4,
84 ff_vc1_norm2_bits, 1, 1,
85 ff_vc1_norm2_codes, 1, 1, 1 << VC1_NORM2_VLC_BITS);
86 INIT_VLC_STATIC(&ff_vc1_norm6_vlc, VC1_NORM6_VLC_BITS, 64,
87 ff_vc1_norm6_bits, 1, 1,
88 ff_vc1_norm6_codes, 2, 2, 556);
89 INIT_VLC_STATIC(&ff_vc1_imode_vlc, VC1_IMODE_VLC_BITS, 7,
90 ff_vc1_imode_bits, 1, 1,
91 ff_vc1_imode_codes, 1, 1, 1 << VC1_IMODE_VLC_BITS);
92 for (i = 0; i < 3; i++) {
93 ff_vc1_ttmb_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 0]];
94 ff_vc1_ttmb_vlc[i].table_allocated = vlc_offs[i * 3 + 1] - vlc_offs[i * 3 + 0];
95 init_vlc(&ff_vc1_ttmb_vlc[i], VC1_TTMB_VLC_BITS, 16,
96 ff_vc1_ttmb_bits[i], 1, 1,
97 ff_vc1_ttmb_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC);
98 ff_vc1_ttblk_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 1]];
99 ff_vc1_ttblk_vlc[i].table_allocated = vlc_offs[i * 3 + 2] - vlc_offs[i * 3 + 1];
100 init_vlc(&ff_vc1_ttblk_vlc[i], VC1_TTBLK_VLC_BITS, 8,
101 ff_vc1_ttblk_bits[i], 1, 1,
102 ff_vc1_ttblk_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
103 ff_vc1_subblkpat_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 2]];
104 ff_vc1_subblkpat_vlc[i].table_allocated = vlc_offs[i * 3 + 3] - vlc_offs[i * 3 + 2];
105 init_vlc(&ff_vc1_subblkpat_vlc[i], VC1_SUBBLKPAT_VLC_BITS, 15,
106 ff_vc1_subblkpat_bits[i], 1, 1,
107 ff_vc1_subblkpat_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
109 for (i = 0; i < 4; i++) {
110 ff_vc1_4mv_block_pattern_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 9]];
111 ff_vc1_4mv_block_pattern_vlc[i].table_allocated = vlc_offs[i * 3 + 10] - vlc_offs[i * 3 + 9];
112 init_vlc(&ff_vc1_4mv_block_pattern_vlc[i], VC1_4MV_BLOCK_PATTERN_VLC_BITS, 16,
113 ff_vc1_4mv_block_pattern_bits[i], 1, 1,
114 ff_vc1_4mv_block_pattern_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
115 ff_vc1_cbpcy_p_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 10]];
116 ff_vc1_cbpcy_p_vlc[i].table_allocated = vlc_offs[i * 3 + 11] - vlc_offs[i * 3 + 10];
117 init_vlc(&ff_vc1_cbpcy_p_vlc[i], VC1_CBPCY_P_VLC_BITS, 64,
118 ff_vc1_cbpcy_p_bits[i], 1, 1,
119 ff_vc1_cbpcy_p_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC);
120 ff_vc1_mv_diff_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 11]];
121 ff_vc1_mv_diff_vlc[i].table_allocated = vlc_offs[i * 3 + 12] - vlc_offs[i * 3 + 11];
122 init_vlc(&ff_vc1_mv_diff_vlc[i], VC1_MV_DIFF_VLC_BITS, 73,
123 ff_vc1_mv_diff_bits[i], 1, 1,
124 ff_vc1_mv_diff_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC);
126 for (i = 0; i < 8; i++) {
127 ff_vc1_ac_coeff_table[i].table = &vlc_table[vlc_offs[i * 2 + 21]];
128 ff_vc1_ac_coeff_table[i].table_allocated = vlc_offs[i * 2 + 22] - vlc_offs[i * 2 + 21];
129 init_vlc(&ff_vc1_ac_coeff_table[i], AC_VLC_BITS, vc1_ac_sizes[i],
130 &vc1_ac_tables[i][0][1], 8, 4,
131 &vc1_ac_tables[i][0][0], 8, 4, INIT_VLC_USE_NEW_STATIC);
132 /* initialize interlaced MVDATA tables (2-Ref) */
133 ff_vc1_2ref_mvdata_vlc[i].table = &vlc_table[vlc_offs[i * 2 + 22]];
134 ff_vc1_2ref_mvdata_vlc[i].table_allocated = vlc_offs[i * 2 + 23] - vlc_offs[i * 2 + 22];
135 init_vlc(&ff_vc1_2ref_mvdata_vlc[i], VC1_2REF_MVDATA_VLC_BITS, 126,
136 ff_vc1_2ref_mvdata_bits[i], 1, 1,
137 ff_vc1_2ref_mvdata_codes[i], 4, 4, INIT_VLC_USE_NEW_STATIC);
139 for (i = 0; i < 4; i++) {
140 /* initialize 4MV MBMODE VLC tables for interlaced frame P picture */
141 ff_vc1_intfr_4mv_mbmode_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 37]];
142 ff_vc1_intfr_4mv_mbmode_vlc[i].table_allocated = vlc_offs[i * 3 + 38] - vlc_offs[i * 3 + 37];
143 init_vlc(&ff_vc1_intfr_4mv_mbmode_vlc[i], VC1_INTFR_4MV_MBMODE_VLC_BITS, 15,
144 ff_vc1_intfr_4mv_mbmode_bits[i], 1, 1,
145 ff_vc1_intfr_4mv_mbmode_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC);
146 /* initialize NON-4MV MBMODE VLC tables for the same */
147 ff_vc1_intfr_non4mv_mbmode_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 38]];
148 ff_vc1_intfr_non4mv_mbmode_vlc[i].table_allocated = vlc_offs[i * 3 + 39] - vlc_offs[i * 3 + 38];
149 init_vlc(&ff_vc1_intfr_non4mv_mbmode_vlc[i], VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 9,
150 ff_vc1_intfr_non4mv_mbmode_bits[i], 1, 1,
151 ff_vc1_intfr_non4mv_mbmode_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
152 /* initialize interlaced MVDATA tables (1-Ref) */
153 ff_vc1_1ref_mvdata_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 39]];
154 ff_vc1_1ref_mvdata_vlc[i].table_allocated = vlc_offs[i * 3 + 40] - vlc_offs[i * 3 + 39];
155 init_vlc(&ff_vc1_1ref_mvdata_vlc[i], VC1_1REF_MVDATA_VLC_BITS, 72,
156 ff_vc1_1ref_mvdata_bits[i], 1, 1,
157 ff_vc1_1ref_mvdata_codes[i], 4, 4, INIT_VLC_USE_NEW_STATIC);
159 for (i = 0; i < 4; i++) {
160 /* Initialize 2MV Block pattern VLC tables */
161 ff_vc1_2mv_block_pattern_vlc[i].table = &vlc_table[vlc_offs[i + 49]];
162 ff_vc1_2mv_block_pattern_vlc[i].table_allocated = vlc_offs[i + 50] - vlc_offs[i + 49];
163 init_vlc(&ff_vc1_2mv_block_pattern_vlc[i], VC1_2MV_BLOCK_PATTERN_VLC_BITS, 4,
164 ff_vc1_2mv_block_pattern_bits[i], 1, 1,
165 ff_vc1_2mv_block_pattern_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
167 for (i = 0; i < 8; i++) {
168 /* Initialize interlaced CBPCY VLC tables (Table 124 - Table 131) */
169 ff_vc1_icbpcy_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 53]];
170 ff_vc1_icbpcy_vlc[i].table_allocated = vlc_offs[i * 3 + 54] - vlc_offs[i * 3 + 53];
171 init_vlc(&ff_vc1_icbpcy_vlc[i], VC1_ICBPCY_VLC_BITS, 63,
172 ff_vc1_icbpcy_p_bits[i], 1, 1,
173 ff_vc1_icbpcy_p_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC);
174 /* Initialize interlaced field picture MBMODE VLC tables */
175 ff_vc1_if_mmv_mbmode_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 54]];
176 ff_vc1_if_mmv_mbmode_vlc[i].table_allocated = vlc_offs[i * 3 + 55] - vlc_offs[i * 3 + 54];
177 init_vlc(&ff_vc1_if_mmv_mbmode_vlc[i], VC1_IF_MMV_MBMODE_VLC_BITS, 8,
178 ff_vc1_if_mmv_mbmode_bits[i], 1, 1,
179 ff_vc1_if_mmv_mbmode_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
180 ff_vc1_if_1mv_mbmode_vlc[i].table = &vlc_table[vlc_offs[i * 3 + 55]];
181 ff_vc1_if_1mv_mbmode_vlc[i].table_allocated = vlc_offs[i * 3 + 56] - vlc_offs[i * 3 + 55];
182 init_vlc(&ff_vc1_if_1mv_mbmode_vlc[i], VC1_IF_1MV_MBMODE_VLC_BITS, 6,
183 ff_vc1_if_1mv_mbmode_bits[i], 1, 1,
184 ff_vc1_if_1mv_mbmode_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
191 v->mvrange = 0; /* 7.1.1.18, p80 */
196 /***********************************************************************/
198 * @name VC-1 Bitplane decoding
216 /** @} */ //imode defines
219 /** @} */ //Bitplane group
221 static void vc1_put_signed_blocks_clamped(VC1Context *v)
223 MpegEncContext *s = &v->s;
224 int topleft_mb_pos, top_mb_pos;
225 int stride_y, fieldtx;
228 /* The put pixels loop is always one MB row behind the decoding loop,
229 * because we can only put pixels when overlap filtering is done, and
230 * for filtering of the bottom edge of a MB, we need the next MB row
232 * Within the row, the put pixels loop is also one MB col behind the
233 * decoding loop. The reason for this is again, because for filtering
234 * of the right MB edge, we need the next MB present. */
235 if (!s->first_slice_line) {
237 topleft_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x - 1;
238 fieldtx = v->fieldtx_plane[topleft_mb_pos];
239 stride_y = s->linesize << fieldtx;
240 v_dist = (16 - fieldtx) >> (fieldtx == 0);
241 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][0],
242 s->dest[0] - 16 * s->linesize - 16,
244 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][1],
245 s->dest[0] - 16 * s->linesize - 8,
247 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][2],
248 s->dest[0] - v_dist * s->linesize - 16,
250 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][3],
251 s->dest[0] - v_dist * s->linesize - 8,
253 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][4],
254 s->dest[1] - 8 * s->uvlinesize - 8,
256 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][5],
257 s->dest[2] - 8 * s->uvlinesize - 8,
260 if (s->mb_x == s->mb_width - 1) {
261 top_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x;
262 fieldtx = v->fieldtx_plane[top_mb_pos];
263 stride_y = s->linesize << fieldtx;
264 v_dist = fieldtx ? 15 : 8;
265 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][0],
266 s->dest[0] - 16 * s->linesize,
268 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][1],
269 s->dest[0] - 16 * s->linesize + 8,
271 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][2],
272 s->dest[0] - v_dist * s->linesize,
274 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][3],
275 s->dest[0] - v_dist * s->linesize + 8,
277 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][4],
278 s->dest[1] - 8 * s->uvlinesize,
280 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][5],
281 s->dest[2] - 8 * s->uvlinesize,
286 #define inc_blk_idx(idx) do { \
288 if (idx >= v->n_allocated_blks) \
292 inc_blk_idx(v->topleft_blk_idx);
293 inc_blk_idx(v->top_blk_idx);
294 inc_blk_idx(v->left_blk_idx);
295 inc_blk_idx(v->cur_blk_idx);
298 static void vc1_loop_filter_iblk(VC1Context *v, int pq)
300 MpegEncContext *s = &v->s;
302 if (!s->first_slice_line) {
303 v->vc1dsp.vc1_v_loop_filter16(s->dest[0], s->linesize, pq);
305 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
306 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
307 for (j = 0; j < 2; j++) {
308 v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1], s->uvlinesize, pq);
310 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
313 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] + 8 * s->linesize, s->linesize, pq);
315 if (s->mb_y == s->end_mb_y - 1) {
317 v->vc1dsp.vc1_h_loop_filter16(s->dest[0], s->linesize, pq);
318 v->vc1dsp.vc1_h_loop_filter8(s->dest[1], s->uvlinesize, pq);
319 v->vc1dsp.vc1_h_loop_filter8(s->dest[2], s->uvlinesize, pq);
321 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] + 8, s->linesize, pq);
325 static void vc1_loop_filter_iblk_delayed(VC1Context *v, int pq)
327 MpegEncContext *s = &v->s;
330 /* The loopfilter runs 1 row and 1 column behind the overlap filter, which
331 * means it runs two rows/cols behind the decoding loop. */
332 if (!s->first_slice_line) {
334 if (s->mb_y >= s->start_mb_y + 2) {
335 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
338 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 16, s->linesize, pq);
339 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 8, s->linesize, pq);
340 for (j = 0; j < 2; j++) {
341 v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
343 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize - 8, s->uvlinesize, pq);
347 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize - 16, s->linesize, pq);
350 if (s->mb_x == s->mb_width - 1) {
351 if (s->mb_y >= s->start_mb_y + 2) {
352 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
355 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize, s->linesize, pq);
356 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize + 8, s->linesize, pq);
357 for (j = 0; j < 2; j++) {
358 v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
360 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize, s->uvlinesize, pq);
364 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize, s->linesize, pq);
367 if (s->mb_y == s->end_mb_y) {
370 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
371 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 8, s->linesize, pq);
373 for (j = 0; j < 2; j++) {
374 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
379 if (s->mb_x == s->mb_width - 1) {
381 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
382 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
384 for (j = 0; j < 2; j++) {
385 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
393 static void vc1_smooth_overlap_filter_iblk(VC1Context *v)
395 MpegEncContext *s = &v->s;
398 if (v->condover == CONDOVER_NONE)
401 mb_pos = s->mb_x + s->mb_y * s->mb_stride;
403 /* Within a MB, the horizontal overlap always runs before the vertical.
404 * To accomplish that, we run the H on left and internal borders of the
405 * currently decoded MB. Then, we wait for the next overlap iteration
406 * to do H overlap on the right edge of this MB, before moving over and
407 * running the V overlap. Therefore, the V overlap makes us trail by one
408 * MB col and the H overlap filter makes us trail by one MB row. This
409 * is reflected in the time at which we run the put_pixels loop. */
410 if (v->condover == CONDOVER_ALL || v->pq >= 9 || v->over_flags_plane[mb_pos]) {
411 if (s->mb_x && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
412 v->over_flags_plane[mb_pos - 1])) {
413 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][1],
414 v->block[v->cur_blk_idx][0]);
415 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][3],
416 v->block[v->cur_blk_idx][2]);
417 if (!(s->flags & CODEC_FLAG_GRAY)) {
418 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][4],
419 v->block[v->cur_blk_idx][4]);
420 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][5],
421 v->block[v->cur_blk_idx][5]);
424 v->vc1dsp.vc1_h_s_overlap(v->block[v->cur_blk_idx][0],
425 v->block[v->cur_blk_idx][1]);
426 v->vc1dsp.vc1_h_s_overlap(v->block[v->cur_blk_idx][2],
427 v->block[v->cur_blk_idx][3]);
429 if (s->mb_x == s->mb_width - 1) {
430 if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
431 v->over_flags_plane[mb_pos - s->mb_stride])) {
432 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][2],
433 v->block[v->cur_blk_idx][0]);
434 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][3],
435 v->block[v->cur_blk_idx][1]);
436 if (!(s->flags & CODEC_FLAG_GRAY)) {
437 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][4],
438 v->block[v->cur_blk_idx][4]);
439 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][5],
440 v->block[v->cur_blk_idx][5]);
443 v->vc1dsp.vc1_v_s_overlap(v->block[v->cur_blk_idx][0],
444 v->block[v->cur_blk_idx][2]);
445 v->vc1dsp.vc1_v_s_overlap(v->block[v->cur_blk_idx][1],
446 v->block[v->cur_blk_idx][3]);
449 if (s->mb_x && (v->condover == CONDOVER_ALL || v->over_flags_plane[mb_pos - 1])) {
450 if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
451 v->over_flags_plane[mb_pos - s->mb_stride - 1])) {
452 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][2],
453 v->block[v->left_blk_idx][0]);
454 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][3],
455 v->block[v->left_blk_idx][1]);
456 if (!(s->flags & CODEC_FLAG_GRAY)) {
457 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][4],
458 v->block[v->left_blk_idx][4]);
459 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][5],
460 v->block[v->left_blk_idx][5]);
463 v->vc1dsp.vc1_v_s_overlap(v->block[v->left_blk_idx][0],
464 v->block[v->left_blk_idx][2]);
465 v->vc1dsp.vc1_v_s_overlap(v->block[v->left_blk_idx][1],
466 v->block[v->left_blk_idx][3]);
470 /** Do motion compensation over 1 macroblock
471 * Mostly adapted hpel_motion and qpel_motion from mpegvideo.c
473 static void vc1_mc_1mv(VC1Context *v, int dir)
475 MpegEncContext *s = &v->s;
476 DSPContext *dsp = &v->s.dsp;
477 uint8_t *srcY, *srcU, *srcV;
478 int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
480 int v_edge_pos = s->v_edge_pos >> v->field_mode;
482 if ((!v->field_mode ||
483 (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
484 !v->s.last_picture.f.data[0])
487 mx = s->mv[dir][0][0];
488 my = s->mv[dir][0][1];
490 // store motion vectors for further use in B frames
491 if (s->pict_type == AV_PICTURE_TYPE_P) {
492 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = mx;
493 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = my;
496 uvmx = (mx + ((mx & 3) == 3)) >> 1;
497 uvmy = (my + ((my & 3) == 3)) >> 1;
498 v->luma_mv[s->mb_x][0] = uvmx;
499 v->luma_mv[s->mb_x][1] = uvmy;
502 v->cur_field_type != v->ref_field_type[dir]) {
503 my = my - 2 + 4 * v->cur_field_type;
504 uvmy = uvmy - 2 + 4 * v->cur_field_type;
507 // fastuvmc shall be ignored for interlaced frame picture
508 if (v->fastuvmc && (v->fcm != ILACE_FRAME)) {
509 uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
510 uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
512 if (v->field_mode) { // interlaced field picture
514 if ((v->cur_field_type != v->ref_field_type[dir]) && v->second_field) {
515 srcY = s->current_picture.f.data[0];
516 srcU = s->current_picture.f.data[1];
517 srcV = s->current_picture.f.data[2];
519 srcY = s->last_picture.f.data[0];
520 srcU = s->last_picture.f.data[1];
521 srcV = s->last_picture.f.data[2];
524 srcY = s->next_picture.f.data[0];
525 srcU = s->next_picture.f.data[1];
526 srcV = s->next_picture.f.data[2];
530 srcY = s->last_picture.f.data[0];
531 srcU = s->last_picture.f.data[1];
532 srcV = s->last_picture.f.data[2];
534 srcY = s->next_picture.f.data[0];
535 srcU = s->next_picture.f.data[1];
536 srcV = s->next_picture.f.data[2];
540 src_x = s->mb_x * 16 + (mx >> 2);
541 src_y = s->mb_y * 16 + (my >> 2);
542 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
543 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
545 if (v->profile != PROFILE_ADVANCED) {
546 src_x = av_clip( src_x, -16, s->mb_width * 16);
547 src_y = av_clip( src_y, -16, s->mb_height * 16);
548 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
549 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
551 src_x = av_clip( src_x, -17, s->avctx->coded_width);
552 src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
553 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
554 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
557 srcY += src_y * s->linesize + src_x;
558 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
559 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
561 if (v->field_mode && v->ref_field_type[dir]) {
562 srcY += s->current_picture_ptr->f.linesize[0];
563 srcU += s->current_picture_ptr->f.linesize[1];
564 srcV += s->current_picture_ptr->f.linesize[2];
567 /* for grayscale we should not try to read from unknown area */
568 if (s->flags & CODEC_FLAG_GRAY) {
569 srcU = s->edge_emu_buffer + 18 * s->linesize;
570 srcV = s->edge_emu_buffer + 18 * s->linesize;
573 if (v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
574 || s->h_edge_pos < 22 || v_edge_pos < 22
575 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel * 3
576 || (unsigned)(src_y - s->mspel) > v_edge_pos - (my&3) - 16 - s->mspel * 3) {
577 uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
579 srcY -= s->mspel * (1 + s->linesize);
580 s->dsp.emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize,
581 17 + s->mspel * 2, 17 + s->mspel * 2,
582 src_x - s->mspel, src_y - s->mspel,
583 s->h_edge_pos, v_edge_pos);
584 srcY = s->edge_emu_buffer;
585 s->dsp.emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8 + 1, 8 + 1,
586 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
587 s->dsp.emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8 + 1, 8 + 1,
588 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
591 /* if we deal with range reduction we need to scale source blocks */
592 if (v->rangeredfrm) {
597 for (j = 0; j < 17 + s->mspel * 2; j++) {
598 for (i = 0; i < 17 + s->mspel * 2; i++)
599 src[i] = ((src[i] - 128) >> 1) + 128;
604 for (j = 0; j < 9; j++) {
605 for (i = 0; i < 9; i++) {
606 src[i] = ((src[i] - 128) >> 1) + 128;
607 src2[i] = ((src2[i] - 128) >> 1) + 128;
609 src += s->uvlinesize;
610 src2 += s->uvlinesize;
613 /* if we deal with intensity compensation we need to scale source blocks */
614 if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
619 for (j = 0; j < 17 + s->mspel * 2; j++) {
620 for (i = 0; i < 17 + s->mspel * 2; i++)
621 src[i] = v->luty[src[i]];
626 for (j = 0; j < 9; j++) {
627 for (i = 0; i < 9; i++) {
628 src[i] = v->lutuv[src[i]];
629 src2[i] = v->lutuv[src2[i]];
631 src += s->uvlinesize;
632 src2 += s->uvlinesize;
635 srcY += s->mspel * (1 + s->linesize);
638 if (v->field_mode && v->second_field) {
639 off = s->current_picture_ptr->f.linesize[0];
640 off_uv = s->current_picture_ptr->f.linesize[1];
646 dxy = ((my & 3) << 2) | (mx & 3);
647 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off , srcY , s->linesize, v->rnd);
648 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8, srcY + 8, s->linesize, v->rnd);
649 srcY += s->linesize * 8;
650 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize , srcY , s->linesize, v->rnd);
651 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
652 } else { // hpel mc - always used for luma
653 dxy = (my & 2) | ((mx & 2) >> 1);
655 dsp->put_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
657 dsp->put_no_rnd_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
660 if (s->flags & CODEC_FLAG_GRAY) return;
661 /* Chroma MC always uses qpel bilinear */
662 uvmx = (uvmx & 3) << 1;
663 uvmy = (uvmy & 3) << 1;
665 dsp->put_h264_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
666 dsp->put_h264_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
668 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
669 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
673 static inline int median4(int a, int b, int c, int d)
676 if (c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2;
677 else return (FFMIN(b, c) + FFMAX(a, d)) / 2;
679 if (c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2;
680 else return (FFMIN(a, c) + FFMAX(b, d)) / 2;
684 /** Do motion compensation for 4-MV macroblock - luminance block
686 static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir)
688 MpegEncContext *s = &v->s;
689 DSPContext *dsp = &v->s.dsp;
691 int dxy, mx, my, src_x, src_y;
693 int fieldmv = (v->fcm == ILACE_FRAME) ? v->blk_mv_type[s->block_index[n]] : 0;
694 int v_edge_pos = s->v_edge_pos >> v->field_mode;
696 if ((!v->field_mode ||
697 (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
698 !v->s.last_picture.f.data[0])
701 mx = s->mv[dir][n][0];
702 my = s->mv[dir][n][1];
706 if ((v->cur_field_type != v->ref_field_type[dir]) && v->second_field)
707 srcY = s->current_picture.f.data[0];
709 srcY = s->last_picture.f.data[0];
711 srcY = s->last_picture.f.data[0];
713 srcY = s->next_picture.f.data[0];
716 if (v->cur_field_type != v->ref_field_type[dir])
717 my = my - 2 + 4 * v->cur_field_type;
720 if (s->pict_type == AV_PICTURE_TYPE_P && n == 3 && v->field_mode) {
721 int same_count = 0, opp_count = 0, k;
722 int chosen_mv[2][4][2], f;
724 for (k = 0; k < 4; k++) {
725 f = v->mv_f[0][s->block_index[k] + v->blocks_off];
726 chosen_mv[f][f ? opp_count : same_count][0] = s->mv[0][k][0];
727 chosen_mv[f][f ? opp_count : same_count][1] = s->mv[0][k][1];
731 f = opp_count > same_count;
732 switch (f ? opp_count : same_count) {
734 tx = median4(chosen_mv[f][0][0], chosen_mv[f][1][0],
735 chosen_mv[f][2][0], chosen_mv[f][3][0]);
736 ty = median4(chosen_mv[f][0][1], chosen_mv[f][1][1],
737 chosen_mv[f][2][1], chosen_mv[f][3][1]);
740 tx = mid_pred(chosen_mv[f][0][0], chosen_mv[f][1][0], chosen_mv[f][2][0]);
741 ty = mid_pred(chosen_mv[f][0][1], chosen_mv[f][1][1], chosen_mv[f][2][1]);
744 tx = (chosen_mv[f][0][0] + chosen_mv[f][1][0]) / 2;
745 ty = (chosen_mv[f][0][1] + chosen_mv[f][1][1]) / 2;
748 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
749 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
750 for (k = 0; k < 4; k++)
751 v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
754 if (v->fcm == ILACE_FRAME) { // not sure if needed for other types of picture
756 int width = s->avctx->coded_width;
757 int height = s->avctx->coded_height >> 1;
758 qx = (s->mb_x * 16) + (mx >> 2);
759 qy = (s->mb_y * 8) + (my >> 3);
764 mx -= 4 * (qx - width);
767 else if (qy > height + 1)
768 my -= 8 * (qy - height - 1);
771 if ((v->fcm == ILACE_FRAME) && fieldmv)
772 off = ((n > 1) ? s->linesize : 0) + (n & 1) * 8;
774 off = s->linesize * 4 * (n & 2) + (n & 1) * 8;
775 if (v->field_mode && v->second_field)
776 off += s->current_picture_ptr->f.linesize[0];
778 src_x = s->mb_x * 16 + (n & 1) * 8 + (mx >> 2);
780 src_y = s->mb_y * 16 + (n & 2) * 4 + (my >> 2);
782 src_y = s->mb_y * 16 + ((n > 1) ? 1 : 0) + (my >> 2);
784 if (v->profile != PROFILE_ADVANCED) {
785 src_x = av_clip(src_x, -16, s->mb_width * 16);
786 src_y = av_clip(src_y, -16, s->mb_height * 16);
788 src_x = av_clip(src_x, -17, s->avctx->coded_width);
789 if (v->fcm == ILACE_FRAME) {
791 src_y = av_clip(src_y, -17, s->avctx->coded_height + 1);
793 src_y = av_clip(src_y, -18, s->avctx->coded_height);
795 src_y = av_clip(src_y, -18, s->avctx->coded_height + 1);
799 srcY += src_y * s->linesize + src_x;
800 if (v->field_mode && v->ref_field_type[dir])
801 srcY += s->current_picture_ptr->f.linesize[0];
803 if (fieldmv && !(src_y & 1))
805 if (fieldmv && (src_y & 1) && src_y < 4)
807 if (v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
808 || s->h_edge_pos < 13 || v_edge_pos < 23
809 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx & 3) - 8 - s->mspel * 2
810 || (unsigned)(src_y - (s->mspel << fieldmv)) > v_edge_pos - (my & 3) - ((8 + s->mspel * 2) << fieldmv)) {
811 srcY -= s->mspel * (1 + (s->linesize << fieldmv));
812 /* check emulate edge stride and offset */
813 s->dsp.emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize,
814 9 + s->mspel * 2, (9 + s->mspel * 2) << fieldmv,
815 src_x - s->mspel, src_y - (s->mspel << fieldmv),
816 s->h_edge_pos, v_edge_pos);
817 srcY = s->edge_emu_buffer;
818 /* if we deal with range reduction we need to scale source blocks */
819 if (v->rangeredfrm) {
824 for (j = 0; j < 9 + s->mspel * 2; j++) {
825 for (i = 0; i < 9 + s->mspel * 2; i++)
826 src[i] = ((src[i] - 128) >> 1) + 128;
827 src += s->linesize << fieldmv;
830 /* if we deal with intensity compensation we need to scale source blocks */
831 if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
836 for (j = 0; j < 9 + s->mspel * 2; j++) {
837 for (i = 0; i < 9 + s->mspel * 2; i++)
838 src[i] = v->luty[src[i]];
839 src += s->linesize << fieldmv;
842 srcY += s->mspel * (1 + (s->linesize << fieldmv));
846 dxy = ((my & 3) << 2) | (mx & 3);
847 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize << fieldmv, v->rnd);
848 } else { // hpel mc - always used for luma
849 dxy = (my & 2) | ((mx & 2) >> 1);
851 dsp->put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
853 dsp->put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
857 static av_always_inline int get_chroma_mv(int *mvx, int *mvy, int *a, int flag, int *tx, int *ty)
860 static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
862 idx = ((a[3] != flag) << 3)
863 | ((a[2] != flag) << 2)
864 | ((a[1] != flag) << 1)
867 *tx = median4(mvx[0], mvx[1], mvx[2], mvx[3]);
868 *ty = median4(mvy[0], mvy[1], mvy[2], mvy[3]);
870 } else if (count[idx] == 1) {
873 *tx = mid_pred(mvx[1], mvx[2], mvx[3]);
874 *ty = mid_pred(mvy[1], mvy[2], mvy[3]);
877 *tx = mid_pred(mvx[0], mvx[2], mvx[3]);
878 *ty = mid_pred(mvy[0], mvy[2], mvy[3]);
881 *tx = mid_pred(mvx[0], mvx[1], mvx[3]);
882 *ty = mid_pred(mvy[0], mvy[1], mvy[3]);
885 *tx = mid_pred(mvx[0], mvx[1], mvx[2]);
886 *ty = mid_pred(mvy[0], mvy[1], mvy[2]);
889 } else if (count[idx] == 2) {
891 for (i = 0; i < 3; i++)
896 for (i = t1 + 1; i < 4; i++)
901 *tx = (mvx[t1] + mvx[t2]) / 2;
902 *ty = (mvy[t1] + mvy[t2]) / 2;
910 /** Do motion compensation for 4-MV macroblock - both chroma blocks
912 static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
914 MpegEncContext *s = &v->s;
915 DSPContext *dsp = &v->s.dsp;
916 uint8_t *srcU, *srcV;
917 int uvmx, uvmy, uvsrc_x, uvsrc_y;
918 int k, tx = 0, ty = 0;
919 int mvx[4], mvy[4], intra[4], mv_f[4];
921 int chroma_ref_type = v->cur_field_type, off = 0;
922 int v_edge_pos = s->v_edge_pos >> v->field_mode;
924 if (!v->field_mode && !v->s.last_picture.f.data[0])
926 if (s->flags & CODEC_FLAG_GRAY)
929 for (k = 0; k < 4; k++) {
930 mvx[k] = s->mv[dir][k][0];
931 mvy[k] = s->mv[dir][k][1];
932 intra[k] = v->mb_type[0][s->block_index[k]];
934 mv_f[k] = v->mv_f[dir][s->block_index[k] + v->blocks_off];
937 /* calculate chroma MV vector from four luma MVs */
938 if (!v->field_mode || (v->field_mode && !v->numref)) {
939 valid_count = get_chroma_mv(mvx, mvy, intra, 0, &tx, &ty);
941 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
942 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
943 v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
944 return; //no need to do MC for intra blocks
948 if (mv_f[0] + mv_f[1] + mv_f[2] + mv_f[3] > 2)
950 valid_count = get_chroma_mv(mvx, mvy, mv_f, dominant, &tx, &ty);
952 chroma_ref_type = !v->cur_field_type;
954 if (v->field_mode && chroma_ref_type == 1 && v->cur_field_type == 1 && !v->s.last_picture.f.data[0])
956 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
957 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
958 uvmx = (tx + ((tx & 3) == 3)) >> 1;
959 uvmy = (ty + ((ty & 3) == 3)) >> 1;
961 v->luma_mv[s->mb_x][0] = uvmx;
962 v->luma_mv[s->mb_x][1] = uvmy;
965 uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
966 uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
968 // Field conversion bias
969 if (v->cur_field_type != chroma_ref_type)
970 uvmy += 2 - 4 * chroma_ref_type;
972 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
973 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
975 if (v->profile != PROFILE_ADVANCED) {
976 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
977 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
979 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
980 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
985 if ((v->cur_field_type != chroma_ref_type) && v->cur_field_type) {
986 srcU = s->current_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
987 srcV = s->current_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
989 srcU = s->last_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
990 srcV = s->last_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
993 srcU = s->last_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
994 srcV = s->last_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
997 srcU = s->next_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
998 srcV = s->next_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
1001 if (v->field_mode) {
1002 if (chroma_ref_type) {
1003 srcU += s->current_picture_ptr->f.linesize[1];
1004 srcV += s->current_picture_ptr->f.linesize[2];
1006 off = v->second_field ? s->current_picture_ptr->f.linesize[1] : 0;
1009 if (v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
1010 || s->h_edge_pos < 18 || v_edge_pos < 18
1011 || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
1012 || (unsigned)uvsrc_y > (v_edge_pos >> 1) - 9) {
1013 s->dsp.emulated_edge_mc(s->edge_emu_buffer , srcU, s->uvlinesize,
1014 8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
1015 s->h_edge_pos >> 1, v_edge_pos >> 1);
1016 s->dsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV, s->uvlinesize,
1017 8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
1018 s->h_edge_pos >> 1, v_edge_pos >> 1);
1019 srcU = s->edge_emu_buffer;
1020 srcV = s->edge_emu_buffer + 16;
1022 /* if we deal with range reduction we need to scale source blocks */
1023 if (v->rangeredfrm) {
1025 uint8_t *src, *src2;
1029 for (j = 0; j < 9; j++) {
1030 for (i = 0; i < 9; i++) {
1031 src[i] = ((src[i] - 128) >> 1) + 128;
1032 src2[i] = ((src2[i] - 128) >> 1) + 128;
1034 src += s->uvlinesize;
1035 src2 += s->uvlinesize;
1038 /* if we deal with intensity compensation we need to scale source blocks */
1039 if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1041 uint8_t *src, *src2;
1045 for (j = 0; j < 9; j++) {
1046 for (i = 0; i < 9; i++) {
1047 src[i] = v->lutuv[src[i]];
1048 src2[i] = v->lutuv[src2[i]];
1050 src += s->uvlinesize;
1051 src2 += s->uvlinesize;
1056 /* Chroma MC always uses qpel bilinear */
1057 uvmx = (uvmx & 3) << 1;
1058 uvmy = (uvmy & 3) << 1;
1060 dsp->put_h264_chroma_pixels_tab[0](s->dest[1] + off, srcU, s->uvlinesize, 8, uvmx, uvmy);
1061 dsp->put_h264_chroma_pixels_tab[0](s->dest[2] + off, srcV, s->uvlinesize, 8, uvmx, uvmy);
1063 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off, srcU, s->uvlinesize, 8, uvmx, uvmy);
1064 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off, srcV, s->uvlinesize, 8, uvmx, uvmy);
1068 /** Do motion compensation for 4-MV field chroma macroblock (both U and V)
1070 static void vc1_mc_4mv_chroma4(VC1Context *v)
1072 MpegEncContext *s = &v->s;
1073 DSPContext *dsp = &v->s.dsp;
1074 uint8_t *srcU, *srcV;
1075 int uvsrc_x, uvsrc_y;
1076 int uvmx_field[4], uvmy_field[4];
1078 int fieldmv = v->blk_mv_type[s->block_index[0]];
1079 static const int s_rndtblfield[16] = { 0, 0, 1, 2, 4, 4, 5, 6, 2, 2, 3, 8, 6, 6, 7, 12 };
1080 int v_dist = fieldmv ? 1 : 4; // vertical offset for lower sub-blocks
1081 int v_edge_pos = s->v_edge_pos >> 1;
1083 if (!v->s.last_picture.f.data[0])
1085 if (s->flags & CODEC_FLAG_GRAY)
1088 for (i = 0; i < 4; i++) {
1089 tx = s->mv[0][i][0];
1090 uvmx_field[i] = (tx + ((tx & 3) == 3)) >> 1;
1091 ty = s->mv[0][i][1];
1093 uvmy_field[i] = (ty >> 4) * 8 + s_rndtblfield[ty & 0xF];
1095 uvmy_field[i] = (ty + ((ty & 3) == 3)) >> 1;
1098 for (i = 0; i < 4; i++) {
1099 off = (i & 1) * 4 + ((i & 2) ? v_dist * s->uvlinesize : 0);
1100 uvsrc_x = s->mb_x * 8 + (i & 1) * 4 + (uvmx_field[i] >> 2);
1101 uvsrc_y = s->mb_y * 8 + ((i & 2) ? v_dist : 0) + (uvmy_field[i] >> 2);
1102 // FIXME: implement proper pull-back (see vc1cropmv.c, vc1CROPMV_ChromaPullBack())
1103 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
1104 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
1105 srcU = s->last_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
1106 srcV = s->last_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
1107 uvmx_field[i] = (uvmx_field[i] & 3) << 1;
1108 uvmy_field[i] = (uvmy_field[i] & 3) << 1;
1110 if (fieldmv && !(uvsrc_y & 1))
1112 if (fieldmv && (uvsrc_y & 1) && uvsrc_y < 2)
1114 if ((v->mv_mode == MV_PMODE_INTENSITY_COMP)
1115 || s->h_edge_pos < 10 || v_edge_pos < (5 << fieldmv)
1116 || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 5
1117 || (unsigned)uvsrc_y > v_edge_pos - (5 << fieldmv)) {
1118 s->dsp.emulated_edge_mc(s->edge_emu_buffer, srcU, s->uvlinesize,
1119 5, (5 << fieldmv), uvsrc_x, uvsrc_y,
1120 s->h_edge_pos >> 1, v_edge_pos);
1121 s->dsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV, s->uvlinesize,
1122 5, (5 << fieldmv), uvsrc_x, uvsrc_y,
1123 s->h_edge_pos >> 1, v_edge_pos);
1124 srcU = s->edge_emu_buffer;
1125 srcV = s->edge_emu_buffer + 16;
1127 /* if we deal with intensity compensation we need to scale source blocks */
1128 if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1130 uint8_t *src, *src2;
1134 for (j = 0; j < 5; j++) {
1135 for (i = 0; i < 5; i++) {
1136 src[i] = v->lutuv[src[i]];
1137 src2[i] = v->lutuv[src2[i]];
1139 src += s->uvlinesize << 1;
1140 src2 += s->uvlinesize << 1;
1145 dsp->put_h264_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1146 dsp->put_h264_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1148 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1149 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1154 /***********************************************************************/
1156 * @name VC-1 Block-level functions
1157 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
1163 * @brief Get macroblock-level quantizer scale
1165 #define GET_MQUANT() \
1166 if (v->dquantfrm) { \
1168 if (v->dqprofile == DQPROFILE_ALL_MBS) { \
1169 if (v->dqbilevel) { \
1170 mquant = (get_bits1(gb)) ? v->altpq : v->pq; \
1172 mqdiff = get_bits(gb, 3); \
1174 mquant = v->pq + mqdiff; \
1176 mquant = get_bits(gb, 5); \
1178 av_log(v->s.avctx,AV_LOG_ERROR, "zero mquant\n"); \
1183 if (v->dqprofile == DQPROFILE_SINGLE_EDGE) \
1184 edges = 1 << v->dqsbedge; \
1185 else if (v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
1186 edges = (3 << v->dqsbedge) % 15; \
1187 else if (v->dqprofile == DQPROFILE_FOUR_EDGES) \
1189 if ((edges&1) && !s->mb_x) \
1190 mquant = v->altpq; \
1191 if ((edges&2) && s->first_slice_line) \
1192 mquant = v->altpq; \
1193 if ((edges&4) && s->mb_x == (s->mb_width - 1)) \
1194 mquant = v->altpq; \
1195 if ((edges&8) && s->mb_y == (s->mb_height - 1)) \
1196 mquant = v->altpq; \
1200 * @def GET_MVDATA(_dmv_x, _dmv_y)
1201 * @brief Get MV differentials
1202 * @see MVDATA decoding from 8.3.5.2, p(1)20
1203 * @param _dmv_x Horizontal differential for decoded MV
1204 * @param _dmv_y Vertical differential for decoded MV
1206 #define GET_MVDATA(_dmv_x, _dmv_y) \
1207 index = 1 + get_vlc2(gb, ff_vc1_mv_diff_vlc[s->mv_table_index].table, \
1208 VC1_MV_DIFF_VLC_BITS, 2); \
1210 mb_has_coeffs = 1; \
1213 mb_has_coeffs = 0; \
1216 _dmv_x = _dmv_y = 0; \
1217 } else if (index == 35) { \
1218 _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
1219 _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
1220 } else if (index == 36) { \
1225 index1 = index % 6; \
1226 if (!s->quarter_sample && index1 == 5) val = 1; \
1228 if (size_table[index1] - val > 0) \
1229 val = get_bits(gb, size_table[index1] - val); \
1231 sign = 0 - (val&1); \
1232 _dmv_x = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1234 index1 = index / 6; \
1235 if (!s->quarter_sample && index1 == 5) val = 1; \
1237 if (size_table[index1] - val > 0) \
1238 val = get_bits(gb, size_table[index1] - val); \
1240 sign = 0 - (val & 1); \
1241 _dmv_y = (sign ^ ((val >> 1) + offset_table[index1])) - sign; \
1244 static av_always_inline void get_mvdata_interlaced(VC1Context *v, int *dmv_x,
1245 int *dmv_y, int *pred_flag)
1248 int extend_x = 0, extend_y = 0;
1249 GetBitContext *gb = &v->s.gb;
1252 const int* offs_tab;
1255 bits = VC1_2REF_MVDATA_VLC_BITS;
1258 bits = VC1_1REF_MVDATA_VLC_BITS;
1261 switch (v->dmvrange) {
1269 extend_x = extend_y = 1;
1272 index = get_vlc2(gb, v->imv_vlc->table, bits, 3);
1274 *dmv_x = get_bits(gb, v->k_x);
1275 *dmv_y = get_bits(gb, v->k_y);
1277 *pred_flag = *dmv_y & 1;
1278 *dmv_y = (*dmv_y + *pred_flag) >> 1;
1283 offs_tab = offset_table2;
1285 offs_tab = offset_table1;
1286 index1 = (index + 1) % 9;
1288 val = get_bits(gb, index1 + extend_x);
1289 sign = 0 -(val & 1);
1290 *dmv_x = (sign ^ ((val >> 1) + offs_tab[index1])) - sign;
1294 offs_tab = offset_table2;
1296 offs_tab = offset_table1;
1297 index1 = (index + 1) / 9;
1298 if (index1 > v->numref) {
1299 val = get_bits(gb, (index1 + (extend_y << v->numref)) >> v->numref);
1300 sign = 0 - (val & 1);
1301 *dmv_y = (sign ^ ((val >> 1) + offs_tab[index1 >> v->numref])) - sign;
1305 *pred_flag = index1 & 1;
1309 static av_always_inline int scaleforsame_x(VC1Context *v, int n /* MV */, int dir)
1311 int scaledvalue, refdist;
1312 int scalesame1, scalesame2;
1313 int scalezone1_x, zone1offset_x;
1314 int table_index = dir ^ v->second_field;
1316 if (v->s.pict_type != AV_PICTURE_TYPE_B)
1317 refdist = v->refdist;
1319 refdist = dir ? v->brfd : v->frfd;
1322 scalesame1 = ff_vc1_field_mvpred_scales[table_index][1][refdist];
1323 scalesame2 = ff_vc1_field_mvpred_scales[table_index][2][refdist];
1324 scalezone1_x = ff_vc1_field_mvpred_scales[table_index][3][refdist];
1325 zone1offset_x = ff_vc1_field_mvpred_scales[table_index][5][refdist];
1330 if (FFABS(n) < scalezone1_x)
1331 scaledvalue = (n * scalesame1) >> 8;
1334 scaledvalue = ((n * scalesame2) >> 8) - zone1offset_x;
1336 scaledvalue = ((n * scalesame2) >> 8) + zone1offset_x;
1339 return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1342 static av_always_inline int scaleforsame_y(VC1Context *v, int i, int n /* MV */, int dir)
1344 int scaledvalue, refdist;
1345 int scalesame1, scalesame2;
1346 int scalezone1_y, zone1offset_y;
1347 int table_index = dir ^ v->second_field;
1349 if (v->s.pict_type != AV_PICTURE_TYPE_B)
1350 refdist = v->refdist;
1352 refdist = dir ? v->brfd : v->frfd;
1355 scalesame1 = ff_vc1_field_mvpred_scales[table_index][1][refdist];
1356 scalesame2 = ff_vc1_field_mvpred_scales[table_index][2][refdist];
1357 scalezone1_y = ff_vc1_field_mvpred_scales[table_index][4][refdist];
1358 zone1offset_y = ff_vc1_field_mvpred_scales[table_index][6][refdist];
1363 if (FFABS(n) < scalezone1_y)
1364 scaledvalue = (n * scalesame1) >> 8;
1367 scaledvalue = ((n * scalesame2) >> 8) - zone1offset_y;
1369 scaledvalue = ((n * scalesame2) >> 8) + zone1offset_y;
1373 if (v->cur_field_type && !v->ref_field_type[dir])
1374 return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1376 return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1379 static av_always_inline int scaleforopp_x(VC1Context *v, int n /* MV */)
1381 int scalezone1_x, zone1offset_x;
1382 int scaleopp1, scaleopp2, brfd;
1385 brfd = FFMIN(v->brfd, 3);
1386 scalezone1_x = ff_vc1_b_field_mvpred_scales[3][brfd];
1387 zone1offset_x = ff_vc1_b_field_mvpred_scales[5][brfd];
1388 scaleopp1 = ff_vc1_b_field_mvpred_scales[1][brfd];
1389 scaleopp2 = ff_vc1_b_field_mvpred_scales[2][brfd];
1394 if (FFABS(n) < scalezone1_x)
1395 scaledvalue = (n * scaleopp1) >> 8;
1398 scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_x;
1400 scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_x;
1403 return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1406 static av_always_inline int scaleforopp_y(VC1Context *v, int n /* MV */, int dir)
1408 int scalezone1_y, zone1offset_y;
1409 int scaleopp1, scaleopp2, brfd;
1412 brfd = FFMIN(v->brfd, 3);
1413 scalezone1_y = ff_vc1_b_field_mvpred_scales[4][brfd];
1414 zone1offset_y = ff_vc1_b_field_mvpred_scales[6][brfd];
1415 scaleopp1 = ff_vc1_b_field_mvpred_scales[1][brfd];
1416 scaleopp2 = ff_vc1_b_field_mvpred_scales[2][brfd];
1421 if (FFABS(n) < scalezone1_y)
1422 scaledvalue = (n * scaleopp1) >> 8;
1425 scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_y;
1427 scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_y;
1430 if (v->cur_field_type && !v->ref_field_type[dir]) {
1431 return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1433 return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1437 static av_always_inline int scaleforsame(VC1Context *v, int i, int n /* MV */,
1440 int brfd, scalesame;
1441 int hpel = 1 - v->s.quarter_sample;
1444 if (v->s.pict_type != AV_PICTURE_TYPE_B || v->second_field || !dir) {
1446 n = scaleforsame_y(v, i, n, dir) << hpel;
1448 n = scaleforsame_x(v, n, dir) << hpel;
1451 brfd = FFMIN(v->brfd, 3);
1452 scalesame = ff_vc1_b_field_mvpred_scales[0][brfd];
1454 n = (n * scalesame >> 8) << hpel;
1458 static av_always_inline int scaleforopp(VC1Context *v, int n /* MV */,
1461 int refdist, scaleopp;
1462 int hpel = 1 - v->s.quarter_sample;
1465 if (v->s.pict_type == AV_PICTURE_TYPE_B && !v->second_field && dir == 1) {
1467 n = scaleforopp_y(v, n, dir) << hpel;
1469 n = scaleforopp_x(v, n) << hpel;
1472 if (v->s.pict_type != AV_PICTURE_TYPE_B)
1473 refdist = FFMIN(v->refdist, 3);
1475 refdist = dir ? v->brfd : v->frfd;
1476 scaleopp = ff_vc1_field_mvpred_scales[dir ^ v->second_field][0][refdist];
1478 n = (n * scaleopp >> 8) << hpel;
1482 /** Predict and set motion vector
1484 static inline void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
1485 int mv1, int r_x, int r_y, uint8_t* is_intra,
1486 int pred_flag, int dir)
1488 MpegEncContext *s = &v->s;
1489 int xy, wrap, off = 0;
1493 int mixedmv_pic, num_samefield = 0, num_oppfield = 0;
1494 int opposit, a_f, b_f, c_f;
1495 int16_t field_predA[2];
1496 int16_t field_predB[2];
1497 int16_t field_predC[2];
1498 int a_valid, b_valid, c_valid;
1499 int hybridmv_thresh, y_bias = 0;
1501 if (v->mv_mode == MV_PMODE_MIXED_MV ||
1502 ((v->mv_mode == MV_PMODE_INTENSITY_COMP) && (v->mv_mode2 == MV_PMODE_MIXED_MV)))
1506 /* scale MV difference to be quad-pel */
1507 dmv_x <<= 1 - s->quarter_sample;
1508 dmv_y <<= 1 - s->quarter_sample;
1510 wrap = s->b8_stride;
1511 xy = s->block_index[n];
1514 s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy + v->blocks_off][0] = 0;
1515 s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy + v->blocks_off][1] = 0;
1516 s->current_picture.f.motion_val[1][xy + v->blocks_off][0] = 0;
1517 s->current_picture.f.motion_val[1][xy + v->blocks_off][1] = 0;
1518 if (mv1) { /* duplicate motion data for 1-MV block */
1519 s->current_picture.f.motion_val[0][xy + 1 + v->blocks_off][0] = 0;
1520 s->current_picture.f.motion_val[0][xy + 1 + v->blocks_off][1] = 0;
1521 s->current_picture.f.motion_val[0][xy + wrap + v->blocks_off][0] = 0;
1522 s->current_picture.f.motion_val[0][xy + wrap + v->blocks_off][1] = 0;
1523 s->current_picture.f.motion_val[0][xy + wrap + 1 + v->blocks_off][0] = 0;
1524 s->current_picture.f.motion_val[0][xy + wrap + 1 + v->blocks_off][1] = 0;
1525 v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1526 s->current_picture.f.motion_val[1][xy + 1 + v->blocks_off][0] = 0;
1527 s->current_picture.f.motion_val[1][xy + 1 + v->blocks_off][1] = 0;
1528 s->current_picture.f.motion_val[1][xy + wrap][0] = 0;
1529 s->current_picture.f.motion_val[1][xy + wrap + v->blocks_off][1] = 0;
1530 s->current_picture.f.motion_val[1][xy + wrap + 1 + v->blocks_off][0] = 0;
1531 s->current_picture.f.motion_val[1][xy + wrap + 1 + v->blocks_off][1] = 0;
1536 C = s->current_picture.f.motion_val[dir][xy - 1 + v->blocks_off];
1537 A = s->current_picture.f.motion_val[dir][xy - wrap + v->blocks_off];
1539 if (v->field_mode && mixedmv_pic)
1540 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
1542 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2;
1544 //in 4-MV mode different blocks have different B predictor position
1547 off = (s->mb_x > 0) ? -1 : 1;
1550 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 1;
1559 B = s->current_picture.f.motion_val[dir][xy - wrap + off + v->blocks_off];
1561 a_valid = !s->first_slice_line || (n == 2 || n == 3);
1562 b_valid = a_valid && (s->mb_width > 1);
1563 c_valid = s->mb_x || (n == 1 || n == 3);
1564 if (v->field_mode) {
1565 a_valid = a_valid && !is_intra[xy - wrap];
1566 b_valid = b_valid && !is_intra[xy - wrap + off];
1567 c_valid = c_valid && !is_intra[xy - 1];
1571 a_f = v->mv_f[dir][xy - wrap + v->blocks_off];
1572 num_oppfield += a_f;
1573 num_samefield += 1 - a_f;
1574 field_predA[0] = A[0];
1575 field_predA[1] = A[1];
1577 field_predA[0] = field_predA[1] = 0;
1581 b_f = v->mv_f[dir][xy - wrap + off + v->blocks_off];
1582 num_oppfield += b_f;
1583 num_samefield += 1 - b_f;
1584 field_predB[0] = B[0];
1585 field_predB[1] = B[1];
1587 field_predB[0] = field_predB[1] = 0;
1591 c_f = v->mv_f[dir][xy - 1 + v->blocks_off];
1592 num_oppfield += c_f;
1593 num_samefield += 1 - c_f;
1594 field_predC[0] = C[0];
1595 field_predC[1] = C[1];
1597 field_predC[0] = field_predC[1] = 0;
1601 if (v->field_mode) {
1602 if (num_samefield <= num_oppfield)
1603 opposit = 1 - pred_flag;
1605 opposit = pred_flag;
1609 if (a_valid && !a_f) {
1610 field_predA[0] = scaleforopp(v, field_predA[0], 0, dir);
1611 field_predA[1] = scaleforopp(v, field_predA[1], 1, dir);
1613 if (b_valid && !b_f) {
1614 field_predB[0] = scaleforopp(v, field_predB[0], 0, dir);
1615 field_predB[1] = scaleforopp(v, field_predB[1], 1, dir);
1617 if (c_valid && !c_f) {
1618 field_predC[0] = scaleforopp(v, field_predC[0], 0, dir);
1619 field_predC[1] = scaleforopp(v, field_predC[1], 1, dir);
1621 v->mv_f[dir][xy + v->blocks_off] = 1;
1622 v->ref_field_type[dir] = !v->cur_field_type;
1624 if (a_valid && a_f) {
1625 field_predA[0] = scaleforsame(v, n, field_predA[0], 0, dir);
1626 field_predA[1] = scaleforsame(v, n, field_predA[1], 1, dir);
1628 if (b_valid && b_f) {
1629 field_predB[0] = scaleforsame(v, n, field_predB[0], 0, dir);
1630 field_predB[1] = scaleforsame(v, n, field_predB[1], 1, dir);
1632 if (c_valid && c_f) {
1633 field_predC[0] = scaleforsame(v, n, field_predC[0], 0, dir);
1634 field_predC[1] = scaleforsame(v, n, field_predC[1], 1, dir);
1636 v->mv_f[dir][xy + v->blocks_off] = 0;
1637 v->ref_field_type[dir] = v->cur_field_type;
1641 px = field_predA[0];
1642 py = field_predA[1];
1643 } else if (c_valid) {
1644 px = field_predC[0];
1645 py = field_predC[1];
1646 } else if (b_valid) {
1647 px = field_predB[0];
1648 py = field_predB[1];
1654 if (num_samefield + num_oppfield > 1) {
1655 px = mid_pred(field_predA[0], field_predB[0], field_predC[0]);
1656 py = mid_pred(field_predA[1], field_predB[1], field_predC[1]);
1659 /* Pullback MV as specified in 8.3.5.3.4 */
1660 if (!v->field_mode) {
1662 qx = (s->mb_x << 6) + ((n == 1 || n == 3) ? 32 : 0);
1663 qy = (s->mb_y << 6) + ((n == 2 || n == 3) ? 32 : 0);
1664 X = (s->mb_width << 6) - 4;
1665 Y = (s->mb_height << 6) - 4;
1667 if (qx + px < -60) px = -60 - qx;
1668 if (qy + py < -60) py = -60 - qy;
1670 if (qx + px < -28) px = -28 - qx;
1671 if (qy + py < -28) py = -28 - qy;
1673 if (qx + px > X) px = X - qx;
1674 if (qy + py > Y) py = Y - qy;
1677 if (!v->field_mode || s->pict_type != AV_PICTURE_TYPE_B) {
1678 /* Calculate hybrid prediction as specified in 8.3.5.3.5 (also 10.3.5.4.3.5) */
1679 hybridmv_thresh = 32;
1680 if (a_valid && c_valid) {
1681 if (is_intra[xy - wrap])
1682 sum = FFABS(px) + FFABS(py);
1684 sum = FFABS(px - field_predA[0]) + FFABS(py - field_predA[1]);
1685 if (sum > hybridmv_thresh) {
1686 if (get_bits1(&s->gb)) { // read HYBRIDPRED bit
1687 px = field_predA[0];
1688 py = field_predA[1];
1690 px = field_predC[0];
1691 py = field_predC[1];
1694 if (is_intra[xy - 1])
1695 sum = FFABS(px) + FFABS(py);
1697 sum = FFABS(px - field_predC[0]) + FFABS(py - field_predC[1]);
1698 if (sum > hybridmv_thresh) {
1699 if (get_bits1(&s->gb)) {
1700 px = field_predA[0];
1701 py = field_predA[1];
1703 px = field_predC[0];
1704 py = field_predC[1];
1711 if (v->field_mode && !s->quarter_sample) {
1715 if (v->field_mode && v->numref)
1717 if (v->field_mode && v->cur_field_type && v->ref_field_type[dir] == 0)
1719 /* store MV using signed modulus of MV range defined in 4.11 */
1720 s->mv[dir][n][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1721 s->mv[dir][n][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1] = ((py + dmv_y + r_y - y_bias) & ((r_y << 1) - 1)) - r_y + y_bias;
1722 if (mv1) { /* duplicate motion data for 1-MV block */
1723 s->current_picture.f.motion_val[dir][xy + 1 + v->blocks_off][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0];
1724 s->current_picture.f.motion_val[dir][xy + 1 + v->blocks_off][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1];
1725 s->current_picture.f.motion_val[dir][xy + wrap + v->blocks_off][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0];
1726 s->current_picture.f.motion_val[dir][xy + wrap + v->blocks_off][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1];
1727 s->current_picture.f.motion_val[dir][xy + wrap + 1 + v->blocks_off][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0];
1728 s->current_picture.f.motion_val[dir][xy + wrap + 1 + v->blocks_off][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1];
1729 v->mv_f[dir][xy + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1730 v->mv_f[dir][xy + wrap + v->blocks_off] = v->mv_f[dir][xy + wrap + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1734 /** Predict and set motion vector for interlaced frame picture MBs
1736 static inline void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
1737 int mvn, int r_x, int r_y, uint8_t* is_intra)
1739 MpegEncContext *s = &v->s;
1740 int xy, wrap, off = 0;
1741 int A[2], B[2], C[2];
1743 int a_valid = 0, b_valid = 0, c_valid = 0;
1744 int field_a, field_b, field_c; // 0: same, 1: opposit
1745 int total_valid, num_samefield, num_oppfield;
1746 int pos_c, pos_b, n_adj;
1748 wrap = s->b8_stride;
1749 xy = s->block_index[n];
1752 s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy][0] = 0;
1753 s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy][1] = 0;
1754 s->current_picture.f.motion_val[1][xy][0] = 0;
1755 s->current_picture.f.motion_val[1][xy][1] = 0;
1756 if (mvn == 1) { /* duplicate motion data for 1-MV block */
1757 s->current_picture.f.motion_val[0][xy + 1][0] = 0;
1758 s->current_picture.f.motion_val[0][xy + 1][1] = 0;
1759 s->current_picture.f.motion_val[0][xy + wrap][0] = 0;
1760 s->current_picture.f.motion_val[0][xy + wrap][1] = 0;
1761 s->current_picture.f.motion_val[0][xy + wrap + 1][0] = 0;
1762 s->current_picture.f.motion_val[0][xy + wrap + 1][1] = 0;
1763 v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1764 s->current_picture.f.motion_val[1][xy + 1][0] = 0;
1765 s->current_picture.f.motion_val[1][xy + 1][1] = 0;
1766 s->current_picture.f.motion_val[1][xy + wrap][0] = 0;
1767 s->current_picture.f.motion_val[1][xy + wrap][1] = 0;
1768 s->current_picture.f.motion_val[1][xy + wrap + 1][0] = 0;
1769 s->current_picture.f.motion_val[1][xy + wrap + 1][1] = 0;
1774 off = ((n == 0) || (n == 1)) ? 1 : -1;
1776 if (s->mb_x || (n == 1) || (n == 3)) {
1777 if ((v->blk_mv_type[xy]) // current block (MB) has a field MV
1778 || (!v->blk_mv_type[xy] && !v->blk_mv_type[xy - 1])) { // or both have frame MV
1779 A[0] = s->current_picture.f.motion_val[0][xy - 1][0];
1780 A[1] = s->current_picture.f.motion_val[0][xy - 1][1];
1782 } else { // current block has frame mv and cand. has field MV (so average)
1783 A[0] = (s->current_picture.f.motion_val[0][xy - 1][0]
1784 + s->current_picture.f.motion_val[0][xy - 1 + off * wrap][0] + 1) >> 1;
1785 A[1] = (s->current_picture.f.motion_val[0][xy - 1][1]
1786 + s->current_picture.f.motion_val[0][xy - 1 + off * wrap][1] + 1) >> 1;
1789 if (!(n & 1) && v->is_intra[s->mb_x - 1]) {
1795 /* Predict B and C */
1796 B[0] = B[1] = C[0] = C[1] = 0;
1797 if (n == 0 || n == 1 || v->blk_mv_type[xy]) {
1798 if (!s->first_slice_line) {
1799 if (!v->is_intra[s->mb_x - s->mb_stride]) {
1802 pos_b = s->block_index[n_adj] - 2 * wrap;
1803 if (v->blk_mv_type[pos_b] && v->blk_mv_type[xy]) {
1804 n_adj = (n & 2) | (n & 1);
1806 B[0] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap][0];
1807 B[1] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap][1];
1808 if (v->blk_mv_type[pos_b] && !v->blk_mv_type[xy]) {
1809 B[0] = (B[0] + s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap][0] + 1) >> 1;
1810 B[1] = (B[1] + s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap][1] + 1) >> 1;
1813 if (s->mb_width > 1) {
1814 if (!v->is_intra[s->mb_x - s->mb_stride + 1]) {
1817 pos_c = s->block_index[2] - 2 * wrap + 2;
1818 if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1821 C[0] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap + 2][0];
1822 C[1] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap + 2][1];
1823 if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1824 C[0] = (1 + C[0] + (s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap + 2][0])) >> 1;
1825 C[1] = (1 + C[1] + (s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap + 2][1])) >> 1;
1827 if (s->mb_x == s->mb_width - 1) {
1828 if (!v->is_intra[s->mb_x - s->mb_stride - 1]) {
1831 pos_c = s->block_index[3] - 2 * wrap - 2;
1832 if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1835 C[0] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap - 2][0];
1836 C[1] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap - 2][1];
1837 if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1838 C[0] = (1 + C[0] + s->current_picture.f.motion_val[0][s->block_index[1] - 2 * wrap - 2][0]) >> 1;
1839 C[1] = (1 + C[1] + s->current_picture.f.motion_val[0][s->block_index[1] - 2 * wrap - 2][1]) >> 1;
1848 pos_b = s->block_index[1];
1850 B[0] = s->current_picture.f.motion_val[0][pos_b][0];
1851 B[1] = s->current_picture.f.motion_val[0][pos_b][1];
1852 pos_c = s->block_index[0];
1854 C[0] = s->current_picture.f.motion_val[0][pos_c][0];
1855 C[1] = s->current_picture.f.motion_val[0][pos_c][1];
1858 total_valid = a_valid + b_valid + c_valid;
1859 // check if predictor A is out of bounds
1860 if (!s->mb_x && !(n == 1 || n == 3)) {
1863 // check if predictor B is out of bounds
1864 if ((s->first_slice_line && v->blk_mv_type[xy]) || (s->first_slice_line && !(n & 2))) {
1865 B[0] = B[1] = C[0] = C[1] = 0;
1867 if (!v->blk_mv_type[xy]) {
1868 if (s->mb_width == 1) {
1872 if (total_valid >= 2) {
1873 px = mid_pred(A[0], B[0], C[0]);
1874 py = mid_pred(A[1], B[1], C[1]);
1875 } else if (total_valid) {
1876 if (a_valid) { px = A[0]; py = A[1]; }
1877 if (b_valid) { px = B[0]; py = B[1]; }
1878 if (c_valid) { px = C[0]; py = C[1]; }
1884 field_a = (A[1] & 4) ? 1 : 0;
1888 field_b = (B[1] & 4) ? 1 : 0;
1892 field_c = (C[1] & 4) ? 1 : 0;
1896 num_oppfield = field_a + field_b + field_c;
1897 num_samefield = total_valid - num_oppfield;
1898 if (total_valid == 3) {
1899 if ((num_samefield == 3) || (num_oppfield == 3)) {
1900 px = mid_pred(A[0], B[0], C[0]);
1901 py = mid_pred(A[1], B[1], C[1]);
1902 } else if (num_samefield >= num_oppfield) {
1903 /* take one MV from same field set depending on priority
1904 the check for B may not be necessary */
1905 px = !field_a ? A[0] : B[0];
1906 py = !field_a ? A[1] : B[1];
1908 px = field_a ? A[0] : B[0];
1909 py = field_a ? A[1] : B[1];
1911 } else if (total_valid == 2) {
1912 if (num_samefield >= num_oppfield) {
1913 if (!field_a && a_valid) {
1916 } else if (!field_b && b_valid) {
1919 } else if (c_valid) {
1924 if (field_a && a_valid) {
1927 } else if (field_b && b_valid) {
1930 } else if (c_valid) {
1935 } else if (total_valid == 1) {
1936 px = (a_valid) ? A[0] : ((b_valid) ? B[0] : C[0]);
1937 py = (a_valid) ? A[1] : ((b_valid) ? B[1] : C[1]);
1942 /* store MV using signed modulus of MV range defined in 4.11 */
1943 s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1944 s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
1945 if (mvn == 1) { /* duplicate motion data for 1-MV block */
1946 s->current_picture.f.motion_val[0][xy + 1 ][0] = s->current_picture.f.motion_val[0][xy][0];
1947 s->current_picture.f.motion_val[0][xy + 1 ][1] = s->current_picture.f.motion_val[0][xy][1];
1948 s->current_picture.f.motion_val[0][xy + wrap ][0] = s->current_picture.f.motion_val[0][xy][0];
1949 s->current_picture.f.motion_val[0][xy + wrap ][1] = s->current_picture.f.motion_val[0][xy][1];
1950 s->current_picture.f.motion_val[0][xy + wrap + 1][0] = s->current_picture.f.motion_val[0][xy][0];
1951 s->current_picture.f.motion_val[0][xy + wrap + 1][1] = s->current_picture.f.motion_val[0][xy][1];
1952 } else if (mvn == 2) { /* duplicate motion data for 2-Field MV block */
1953 s->current_picture.f.motion_val[0][xy + 1][0] = s->current_picture.f.motion_val[0][xy][0];
1954 s->current_picture.f.motion_val[0][xy + 1][1] = s->current_picture.f.motion_val[0][xy][1];
1955 s->mv[0][n + 1][0] = s->mv[0][n][0];
1956 s->mv[0][n + 1][1] = s->mv[0][n][1];
1960 /** Motion compensation for direct or interpolated blocks in B-frames
1962 static void vc1_interp_mc(VC1Context *v)
1964 MpegEncContext *s = &v->s;
1965 DSPContext *dsp = &v->s.dsp;
1966 uint8_t *srcY, *srcU, *srcV;
1967 int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
1969 int v_edge_pos = s->v_edge_pos >> v->field_mode;
1971 if (!v->field_mode && !v->s.next_picture.f.data[0])
1974 mx = s->mv[1][0][0];
1975 my = s->mv[1][0][1];
1976 uvmx = (mx + ((mx & 3) == 3)) >> 1;
1977 uvmy = (my + ((my & 3) == 3)) >> 1;
1978 if (v->field_mode) {
1979 if (v->cur_field_type != v->ref_field_type[1])
1980 my = my - 2 + 4 * v->cur_field_type;
1981 uvmy = uvmy - 2 + 4 * v->cur_field_type;
1984 uvmx = uvmx + ((uvmx < 0) ? -(uvmx & 1) : (uvmx & 1));
1985 uvmy = uvmy + ((uvmy < 0) ? -(uvmy & 1) : (uvmy & 1));
1987 srcY = s->next_picture.f.data[0];
1988 srcU = s->next_picture.f.data[1];
1989 srcV = s->next_picture.f.data[2];
1991 src_x = s->mb_x * 16 + (mx >> 2);
1992 src_y = s->mb_y * 16 + (my >> 2);
1993 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
1994 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
1996 if (v->profile != PROFILE_ADVANCED) {
1997 src_x = av_clip( src_x, -16, s->mb_width * 16);
1998 src_y = av_clip( src_y, -16, s->mb_height * 16);
1999 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
2000 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
2002 src_x = av_clip( src_x, -17, s->avctx->coded_width);
2003 src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
2004 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
2005 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
2008 srcY += src_y * s->linesize + src_x;
2009 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
2010 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
2012 if (v->field_mode && v->ref_field_type[1]) {
2013 srcY += s->current_picture_ptr->f.linesize[0];
2014 srcU += s->current_picture_ptr->f.linesize[1];
2015 srcV += s->current_picture_ptr->f.linesize[2];
2018 /* for grayscale we should not try to read from unknown area */
2019 if (s->flags & CODEC_FLAG_GRAY) {
2020 srcU = s->edge_emu_buffer + 18 * s->linesize;
2021 srcV = s->edge_emu_buffer + 18 * s->linesize;
2024 if (v->rangeredfrm || s->h_edge_pos < 22 || v_edge_pos < 22
2025 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx & 3) - 16 - s->mspel * 3
2026 || (unsigned)(src_y - s->mspel) > v_edge_pos - (my & 3) - 16 - s->mspel * 3) {
2027 uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
2029 srcY -= s->mspel * (1 + s->linesize);
2030 s->dsp.emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize,
2031 17 + s->mspel * 2, 17 + s->mspel * 2,
2032 src_x - s->mspel, src_y - s->mspel,
2033 s->h_edge_pos, v_edge_pos);
2034 srcY = s->edge_emu_buffer;
2035 s->dsp.emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8 + 1, 8 + 1,
2036 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
2037 s->dsp.emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8 + 1, 8 + 1,
2038 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
2041 /* if we deal with range reduction we need to scale source blocks */
2042 if (v->rangeredfrm) {
2044 uint8_t *src, *src2;
2047 for (j = 0; j < 17 + s->mspel * 2; j++) {
2048 for (i = 0; i < 17 + s->mspel * 2; i++)
2049 src[i] = ((src[i] - 128) >> 1) + 128;
2054 for (j = 0; j < 9; j++) {
2055 for (i = 0; i < 9; i++) {
2056 src[i] = ((src[i] - 128) >> 1) + 128;
2057 src2[i] = ((src2[i] - 128) >> 1) + 128;
2059 src += s->uvlinesize;
2060 src2 += s->uvlinesize;
2063 srcY += s->mspel * (1 + s->linesize);
2066 if (v->field_mode && v->second_field) {
2067 off = s->current_picture_ptr->f.linesize[0];
2068 off_uv = s->current_picture_ptr->f.linesize[1];
2075 dxy = ((my & 3) << 2) | (mx & 3);
2076 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off , srcY , s->linesize, v->rnd);
2077 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8, srcY + 8, s->linesize, v->rnd);
2078 srcY += s->linesize * 8;
2079 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize , srcY , s->linesize, v->rnd);
2080 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
2082 dxy = (my & 2) | ((mx & 2) >> 1);
2085 dsp->avg_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
2087 dsp->avg_no_rnd_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
2090 if (s->flags & CODEC_FLAG_GRAY) return;
2091 /* Chroma MC always uses qpel blilinear */
2092 uvmx = (uvmx & 3) << 1;
2093 uvmy = (uvmy & 3) << 1;
2095 dsp->avg_h264_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
2096 dsp->avg_h264_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
2098 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
2099 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
2103 static av_always_inline int scale_mv(int value, int bfrac, int inv, int qs)
2107 #if B_FRACTION_DEN==256
2111 return 2 * ((value * n + 255) >> 9);
2112 return (value * n + 128) >> 8;
2115 n -= B_FRACTION_DEN;
2117 return 2 * ((value * n + B_FRACTION_DEN - 1) / (2 * B_FRACTION_DEN));
2118 return (value * n + B_FRACTION_DEN/2) / B_FRACTION_DEN;
2122 static av_always_inline int scale_mv_intfi(int value, int bfrac, int inv,
2123 int qs, int qs_last)
2131 return (value * n + 255) >> 9;
2133 return (value * n + 128) >> 8;
2136 /** Reconstruct motion vector for B-frame and do motion compensation
2138 static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2],
2139 int direct, int mode)
2142 v->mv_mode2 = v->mv_mode;
2143 v->mv_mode = MV_PMODE_INTENSITY_COMP;
2149 v->mv_mode = v->mv_mode2;
2152 if (mode == BMV_TYPE_INTERPOLATED) {
2156 v->mv_mode = v->mv_mode2;
2160 if (v->use_ic && (mode == BMV_TYPE_BACKWARD))
2161 v->mv_mode = v->mv_mode2;
2162 vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD));
2164 v->mv_mode = v->mv_mode2;
2167 static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2],
2168 int direct, int mvtype)
2170 MpegEncContext *s = &v->s;
2171 int xy, wrap, off = 0;
2176 const uint8_t *is_intra = v->mb_type[0];
2180 /* scale MV difference to be quad-pel */
2181 dmv_x[0] <<= 1 - s->quarter_sample;
2182 dmv_y[0] <<= 1 - s->quarter_sample;
2183 dmv_x[1] <<= 1 - s->quarter_sample;
2184 dmv_y[1] <<= 1 - s->quarter_sample;
2186 wrap = s->b8_stride;
2187 xy = s->block_index[0];
2190 s->current_picture.f.motion_val[0][xy + v->blocks_off][0] =
2191 s->current_picture.f.motion_val[0][xy + v->blocks_off][1] =
2192 s->current_picture.f.motion_val[1][xy + v->blocks_off][0] =
2193 s->current_picture.f.motion_val[1][xy + v->blocks_off][1] = 0;
2196 if (!v->field_mode) {
2197 s->mv[0][0][0] = scale_mv(s->next_picture.f.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
2198 s->mv[0][0][1] = scale_mv(s->next_picture.f.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
2199 s->mv[1][0][0] = scale_mv(s->next_picture.f.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
2200 s->mv[1][0][1] = scale_mv(s->next_picture.f.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
2202 /* Pullback predicted motion vectors as specified in 8.4.5.4 */
2203 s->mv[0][0][0] = av_clip(s->mv[0][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2204 s->mv[0][0][1] = av_clip(s->mv[0][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2205 s->mv[1][0][0] = av_clip(s->mv[1][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2206 s->mv[1][0][1] = av_clip(s->mv[1][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2209 s->current_picture.f.motion_val[0][xy + v->blocks_off][0] = s->mv[0][0][0];
2210 s->current_picture.f.motion_val[0][xy + v->blocks_off][1] = s->mv[0][0][1];
2211 s->current_picture.f.motion_val[1][xy + v->blocks_off][0] = s->mv[1][0][0];
2212 s->current_picture.f.motion_val[1][xy + v->blocks_off][1] = s->mv[1][0][1];
2216 if ((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2217 C = s->current_picture.f.motion_val[0][xy - 2];
2218 A = s->current_picture.f.motion_val[0][xy - wrap * 2];
2219 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2220 B = s->current_picture.f.motion_val[0][xy - wrap * 2 + off];
2222 if (!s->mb_x) C[0] = C[1] = 0;
2223 if (!s->first_slice_line) { // predictor A is not out of bounds
2224 if (s->mb_width == 1) {
2228 px = mid_pred(A[0], B[0], C[0]);
2229 py = mid_pred(A[1], B[1], C[1]);
2231 } else if (s->mb_x) { // predictor C is not out of bounds
2237 /* Pullback MV as specified in 8.3.5.3.4 */
2240 if (v->profile < PROFILE_ADVANCED) {
2241 qx = (s->mb_x << 5);
2242 qy = (s->mb_y << 5);
2243 X = (s->mb_width << 5) - 4;
2244 Y = (s->mb_height << 5) - 4;
2245 if (qx + px < -28) px = -28 - qx;
2246 if (qy + py < -28) py = -28 - qy;
2247 if (qx + px > X) px = X - qx;
2248 if (qy + py > Y) py = Y - qy;
2250 qx = (s->mb_x << 6);
2251 qy = (s->mb_y << 6);
2252 X = (s->mb_width << 6) - 4;
2253 Y = (s->mb_height << 6) - 4;
2254 if (qx + px < -60) px = -60 - qx;
2255 if (qy + py < -60) py = -60 - qy;
2256 if (qx + px > X) px = X - qx;
2257 if (qy + py > Y) py = Y - qy;
2260 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2261 if (0 && !s->first_slice_line && s->mb_x) {
2262 if (is_intra[xy - wrap])
2263 sum = FFABS(px) + FFABS(py);
2265 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2267 if (get_bits1(&s->gb)) {
2275 if (is_intra[xy - 2])
2276 sum = FFABS(px) + FFABS(py);
2278 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2280 if (get_bits1(&s->gb)) {
2290 /* store MV using signed modulus of MV range defined in 4.11 */
2291 s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x;
2292 s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
2294 if ((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2295 C = s->current_picture.f.motion_val[1][xy - 2];
2296 A = s->current_picture.f.motion_val[1][xy - wrap * 2];
2297 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2298 B = s->current_picture.f.motion_val[1][xy - wrap * 2 + off];
2302 if (!s->first_slice_line) { // predictor A is not out of bounds
2303 if (s->mb_width == 1) {
2307 px = mid_pred(A[0], B[0], C[0]);
2308 py = mid_pred(A[1], B[1], C[1]);
2310 } else if (s->mb_x) { // predictor C is not out of bounds
2316 /* Pullback MV as specified in 8.3.5.3.4 */
2319 if (v->profile < PROFILE_ADVANCED) {
2320 qx = (s->mb_x << 5);
2321 qy = (s->mb_y << 5);
2322 X = (s->mb_width << 5) - 4;
2323 Y = (s->mb_height << 5) - 4;
2324 if (qx + px < -28) px = -28 - qx;
2325 if (qy + py < -28) py = -28 - qy;
2326 if (qx + px > X) px = X - qx;
2327 if (qy + py > Y) py = Y - qy;
2329 qx = (s->mb_x << 6);
2330 qy = (s->mb_y << 6);
2331 X = (s->mb_width << 6) - 4;
2332 Y = (s->mb_height << 6) - 4;
2333 if (qx + px < -60) px = -60 - qx;
2334 if (qy + py < -60) py = -60 - qy;
2335 if (qx + px > X) px = X - qx;
2336 if (qy + py > Y) py = Y - qy;
2339 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2340 if (0 && !s->first_slice_line && s->mb_x) {
2341 if (is_intra[xy - wrap])
2342 sum = FFABS(px) + FFABS(py);
2344 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2346 if (get_bits1(&s->gb)) {
2354 if (is_intra[xy - 2])
2355 sum = FFABS(px) + FFABS(py);
2357 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2359 if (get_bits1(&s->gb)) {
2369 /* store MV using signed modulus of MV range defined in 4.11 */
2371 s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
2372 s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
2374 s->current_picture.f.motion_val[0][xy][0] = s->mv[0][0][0];
2375 s->current_picture.f.motion_val[0][xy][1] = s->mv[0][0][1];
2376 s->current_picture.f.motion_val[1][xy][0] = s->mv[1][0][0];
2377 s->current_picture.f.motion_val[1][xy][1] = s->mv[1][0][1];
2380 static inline void vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dmv_y, int mv1, int *pred_flag)
2382 int dir = (v->bmvtype == BMV_TYPE_BACKWARD) ? 1 : 0;
2383 MpegEncContext *s = &v->s;
2384 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2386 if (v->bmvtype == BMV_TYPE_DIRECT) {
2387 int total_opp, k, f;
2388 if (s->next_picture.f.mb_type[mb_pos + v->mb_off] != MB_TYPE_INTRA) {
2389 s->mv[0][0][0] = scale_mv_intfi(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0],
2390 v->bfraction, 0, s->quarter_sample, v->qs_last);
2391 s->mv[0][0][1] = scale_mv_intfi(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1],
2392 v->bfraction, 0, s->quarter_sample, v->qs_last);
2393 s->mv[1][0][0] = scale_mv_intfi(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0],
2394 v->bfraction, 1, s->quarter_sample, v->qs_last);
2395 s->mv[1][0][1] = scale_mv_intfi(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1],
2396 v->bfraction, 1, s->quarter_sample, v->qs_last);
2398 total_opp = v->mv_f_next[0][s->block_index[0] + v->blocks_off]
2399 + v->mv_f_next[0][s->block_index[1] + v->blocks_off]
2400 + v->mv_f_next[0][s->block_index[2] + v->blocks_off]
2401 + v->mv_f_next[0][s->block_index[3] + v->blocks_off];
2402 f = (total_opp > 2) ? 1 : 0;
2404 s->mv[0][0][0] = s->mv[0][0][1] = 0;
2405 s->mv[1][0][0] = s->mv[1][0][1] = 0;
2408 v->ref_field_type[0] = v->ref_field_type[1] = v->cur_field_type ^ f;
2409 for (k = 0; k < 4; k++) {
2410 s->current_picture.f.motion_val[0][s->block_index[k] + v->blocks_off][0] = s->mv[0][0][0];
2411 s->current_picture.f.motion_val[0][s->block_index[k] + v->blocks_off][1] = s->mv[0][0][1];
2412 s->current_picture.f.motion_val[1][s->block_index[k] + v->blocks_off][0] = s->mv[1][0][0];
2413 s->current_picture.f.motion_val[1][s->block_index[k] + v->blocks_off][1] = s->mv[1][0][1];
2414 v->mv_f[0][s->block_index[k] + v->blocks_off] = f;
2415 v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
2419 if (v->bmvtype == BMV_TYPE_INTERPOLATED) {
2420 vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2421 vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2424 if (dir) { // backward
2425 vc1_pred_mv(v, n, dmv_x[1], dmv_y[1], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2426 if (n == 3 || mv1) {
2427 vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
2430 vc1_pred_mv(v, n, dmv_x[0], dmv_y[0], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2431 if (n == 3 || mv1) {
2432 vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], 0, 1);
2437 /** Get predicted DC value for I-frames only
2438 * prediction dir: left=0, top=1
2439 * @param s MpegEncContext
2440 * @param overlap flag indicating that overlap filtering is used
2441 * @param pq integer part of picture quantizer
2442 * @param[in] n block index in the current MB
2443 * @param dc_val_ptr Pointer to DC predictor
2444 * @param dir_ptr Prediction direction for use in AC prediction
2446 static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2447 int16_t **dc_val_ptr, int *dir_ptr)
2449 int a, b, c, wrap, pred, scale;
2451 static const uint16_t dcpred[32] = {
2452 -1, 1024, 512, 341, 256, 205, 171, 146, 128,
2453 114, 102, 93, 85, 79, 73, 68, 64,
2454 60, 57, 54, 51, 49, 47, 45, 43,
2455 41, 39, 38, 37, 35, 34, 33
2458 /* find prediction - wmv3_dc_scale always used here in fact */
2459 if (n < 4) scale = s->y_dc_scale;
2460 else scale = s->c_dc_scale;
2462 wrap = s->block_wrap[n];
2463 dc_val = s->dc_val[0] + s->block_index[n];
2469 b = dc_val[ - 1 - wrap];
2470 a = dc_val[ - wrap];
2472 if (pq < 9 || !overlap) {
2473 /* Set outer values */
2474 if (s->first_slice_line && (n != 2 && n != 3))
2475 b = a = dcpred[scale];
2476 if (s->mb_x == 0 && (n != 1 && n != 3))
2477 b = c = dcpred[scale];
2479 /* Set outer values */
2480 if (s->first_slice_line && (n != 2 && n != 3))
2482 if (s->mb_x == 0 && (n != 1 && n != 3))
2486 if (abs(a - b) <= abs(b - c)) {
2488 *dir_ptr = 1; // left
2491 *dir_ptr = 0; // top
2494 /* update predictor */
2495 *dc_val_ptr = &dc_val[0];
2500 /** Get predicted DC value
2501 * prediction dir: left=0, top=1
2502 * @param s MpegEncContext
2503 * @param overlap flag indicating that overlap filtering is used
2504 * @param pq integer part of picture quantizer
2505 * @param[in] n block index in the current MB
2506 * @param a_avail flag indicating top block availability
2507 * @param c_avail flag indicating left block availability
2508 * @param dc_val_ptr Pointer to DC predictor
2509 * @param dir_ptr Prediction direction for use in AC prediction
2511 static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2512 int a_avail, int c_avail,
2513 int16_t **dc_val_ptr, int *dir_ptr)
2515 int a, b, c, wrap, pred;
2517 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2520 wrap = s->block_wrap[n];
2521 dc_val = s->dc_val[0] + s->block_index[n];
2527 b = dc_val[ - 1 - wrap];
2528 a = dc_val[ - wrap];
2529 /* scale predictors if needed */
2530 q1 = s->current_picture.f.qscale_table[mb_pos];
2531 if (c_avail && (n != 1 && n != 3)) {
2532 q2 = s->current_picture.f.qscale_table[mb_pos - 1];
2534 c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2536 if (a_avail && (n != 2 && n != 3)) {
2537 q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
2539 a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2541 if (a_avail && c_avail && (n != 3)) {
2546 off -= s->mb_stride;
2547 q2 = s->current_picture.f.qscale_table[off];
2549 b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2552 if (a_avail && c_avail) {
2553 if (abs(a - b) <= abs(b - c)) {
2555 *dir_ptr = 1; // left
2558 *dir_ptr = 0; // top
2560 } else if (a_avail) {
2562 *dir_ptr = 0; // top
2563 } else if (c_avail) {
2565 *dir_ptr = 1; // left
2568 *dir_ptr = 1; // left
2571 /* update predictor */
2572 *dc_val_ptr = &dc_val[0];
2576 /** @} */ // Block group
2579 * @name VC1 Macroblock-level functions in Simple/Main Profiles
2580 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
2584 static inline int vc1_coded_block_pred(MpegEncContext * s, int n,
2585 uint8_t **coded_block_ptr)
2587 int xy, wrap, pred, a, b, c;
2589 xy = s->block_index[n];
2590 wrap = s->b8_stride;
2595 a = s->coded_block[xy - 1 ];
2596 b = s->coded_block[xy - 1 - wrap];
2597 c = s->coded_block[xy - wrap];
2606 *coded_block_ptr = &s->coded_block[xy];
2612 * Decode one AC coefficient
2613 * @param v The VC1 context
2614 * @param last Last coefficient
2615 * @param skip How much zero coefficients to skip
2616 * @param value Decoded AC coefficient value
2617 * @param codingset set of VLC to decode data
2620 static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip,
2621 int *value, int codingset)
2623 GetBitContext *gb = &v->s.gb;
2624 int index, escape, run = 0, level = 0, lst = 0;
2626 index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2627 if (index != vc1_ac_sizes[codingset] - 1) {
2628 run = vc1_index_decode_table[codingset][index][0];
2629 level = vc1_index_decode_table[codingset][index][1];
2630 lst = index >= vc1_last_decode_table[codingset] || get_bits_left(gb) < 0;
2634 escape = decode210(gb);
2636 index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2637 run = vc1_index_decode_table[codingset][index][0];
2638 level = vc1_index_decode_table[codingset][index][1];
2639 lst = index >= vc1_last_decode_table[codingset];
2642 level += vc1_last_delta_level_table[codingset][run];
2644 level += vc1_delta_level_table[codingset][run];
2647 run += vc1_last_delta_run_table[codingset][level] + 1;
2649 run += vc1_delta_run_table[codingset][level] + 1;
2655 lst = get_bits1(gb);
2656 if (v->s.esc3_level_length == 0) {
2657 if (v->pq < 8 || v->dquantfrm) { // table 59
2658 v->s.esc3_level_length = get_bits(gb, 3);
2659 if (!v->s.esc3_level_length)
2660 v->s.esc3_level_length = get_bits(gb, 2) + 8;
2661 } else { // table 60
2662 v->s.esc3_level_length = get_unary(gb, 1, 6) + 2;
2664 v->s.esc3_run_length = 3 + get_bits(gb, 2);
2666 run = get_bits(gb, v->s.esc3_run_length);
2667 sign = get_bits1(gb);
2668 level = get_bits(gb, v->s.esc3_level_length);
2679 /** Decode intra block in intra frames - should be faster than decode_intra_block
2680 * @param v VC1Context
2681 * @param block block to decode
2682 * @param[in] n subblock index
2683 * @param coded are AC coeffs present or not
2684 * @param codingset set of VLC to decode data
2686 static int vc1_decode_i_block(VC1Context *v, DCTELEM block[64], int n,
2687 int coded, int codingset)
2689 GetBitContext *gb = &v->s.gb;
2690 MpegEncContext *s = &v->s;
2691 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2694 int16_t *ac_val, *ac_val2;
2697 /* Get DC differential */
2699 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2701 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2704 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2708 if (dcdiff == 119 /* ESC index value */) {
2709 /* TODO: Optimize */
2710 if (v->pq == 1) dcdiff = get_bits(gb, 10);
2711 else if (v->pq == 2) dcdiff = get_bits(gb, 9);
2712 else dcdiff = get_bits(gb, 8);
2715 dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2716 else if (v->pq == 2)
2717 dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2724 dcdiff += vc1_i_pred_dc(&v->s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir);
2727 /* Store the quantized DC coeff, used for prediction */
2729 block[0] = dcdiff * s->y_dc_scale;
2731 block[0] = dcdiff * s->c_dc_scale;
2742 int last = 0, skip, value;
2743 const uint8_t *zz_table;
2747 scale = v->pq * 2 + v->halfpq;
2751 zz_table = v->zz_8x8[2];
2753 zz_table = v->zz_8x8[3];
2755 zz_table = v->zz_8x8[1];
2757 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2759 if (dc_pred_dir) // left
2762 ac_val -= 16 * s->block_wrap[n];
2765 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2769 block[zz_table[i++]] = value;
2772 /* apply AC prediction if needed */
2774 if (dc_pred_dir) { // left
2775 for (k = 1; k < 8; k++)
2776 block[k << v->left_blk_sh] += ac_val[k];
2778 for (k = 1; k < 8; k++)
2779 block[k << v->top_blk_sh] += ac_val[k + 8];
2782 /* save AC coeffs for further prediction */
2783 for (k = 1; k < 8; k++) {
2784 ac_val2[k] = block[k << v->left_blk_sh];
2785 ac_val2[k + 8] = block[k << v->top_blk_sh];
2788 /* scale AC coeffs */
2789 for (k = 1; k < 64; k++)
2793 block[k] += (block[k] < 0) ? -v->pq : v->pq;
2796 if (s->ac_pred) i = 63;
2802 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2806 scale = v->pq * 2 + v->halfpq;
2807 memset(ac_val2, 0, 16 * 2);
2808 if (dc_pred_dir) { // left
2811 memcpy(ac_val2, ac_val, 8 * 2);
2813 ac_val -= 16 * s->block_wrap[n];
2815 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2818 /* apply AC prediction if needed */
2820 if (dc_pred_dir) { //left
2821 for (k = 1; k < 8; k++) {
2822 block[k << v->left_blk_sh] = ac_val[k] * scale;
2823 if (!v->pquantizer && block[k << v->left_blk_sh])
2824 block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -v->pq : v->pq;
2827 for (k = 1; k < 8; k++) {
2828 block[k << v->top_blk_sh] = ac_val[k + 8] * scale;
2829 if (!v->pquantizer && block[k << v->top_blk_sh])
2830 block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -v->pq : v->pq;
2836 s->block_last_index[n] = i;
2841 /** Decode intra block in intra frames - should be faster than decode_intra_block
2842 * @param v VC1Context
2843 * @param block block to decode
2844 * @param[in] n subblock number
2845 * @param coded are AC coeffs present or not
2846 * @param codingset set of VLC to decode data
2847 * @param mquant quantizer value for this macroblock
2849 static int vc1_decode_i_block_adv(VC1Context *v, DCTELEM block[64], int n,
2850 int coded, int codingset, int mquant)
2852 GetBitContext *gb = &v->s.gb;
2853 MpegEncContext *s = &v->s;
2854 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2857 int16_t *ac_val, *ac_val2;
2859 int a_avail = v->a_avail, c_avail = v->c_avail;
2860 int use_pred = s->ac_pred;
2863 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2865 /* Get DC differential */
2867 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2869 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2872 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2876 if (dcdiff == 119 /* ESC index value */) {
2877 /* TODO: Optimize */
2878 if (mquant == 1) dcdiff = get_bits(gb, 10);
2879 else if (mquant == 2) dcdiff = get_bits(gb, 9);
2880 else dcdiff = get_bits(gb, 8);
2883 dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2884 else if (mquant == 2)
2885 dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2892 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir);
2895 /* Store the quantized DC coeff, used for prediction */
2897 block[0] = dcdiff * s->y_dc_scale;
2899 block[0] = dcdiff * s->c_dc_scale;
2905 /* check if AC is needed at all */
2906 if (!a_avail && !c_avail)
2908 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2911 scale = mquant * 2 + ((mquant == v->pq) ? v->halfpq : 0);
2913 if (dc_pred_dir) // left
2916 ac_val -= 16 * s->block_wrap[n];
2918 q1 = s->current_picture.f.qscale_table[mb_pos];
2919 if ( dc_pred_dir && c_avail && mb_pos)
2920 q2 = s->current_picture.f.qscale_table[mb_pos - 1];
2921 if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
2922 q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
2923 if ( dc_pred_dir && n == 1)
2925 if (!dc_pred_dir && n == 2)
2931 int last = 0, skip, value;
2932 const uint8_t *zz_table;
2936 if (!use_pred && v->fcm == ILACE_FRAME) {
2937 zz_table = v->zzi_8x8;
2939 if (!dc_pred_dir) // top
2940 zz_table = v->zz_8x8[2];
2942 zz_table = v->zz_8x8[3];
2945 if (v->fcm != ILACE_FRAME)
2946 zz_table = v->zz_8x8[1];
2948 zz_table = v->zzi_8x8;
2952 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2956 block[zz_table[i++]] = value;
2959 /* apply AC prediction if needed */
2961 /* scale predictors if needed*/
2962 if (q2 && q1 != q2) {
2963 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2964 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2966 if (dc_pred_dir) { // left
2967 for (k = 1; k < 8; k++)
2968 block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2970 for (k = 1; k < 8; k++)
2971 block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2974 if (dc_pred_dir) { //left
2975 for (k = 1; k < 8; k++)
2976 block[k << v->left_blk_sh] += ac_val[k];
2978 for (k = 1; k < 8; k++)
2979 block[k << v->top_blk_sh] += ac_val[k + 8];
2983 /* save AC coeffs for further prediction */
2984 for (k = 1; k < 8; k++) {
2985 ac_val2[k ] = block[k << v->left_blk_sh];
2986 ac_val2[k + 8] = block[k << v->top_blk_sh];
2989 /* scale AC coeffs */
2990 for (k = 1; k < 64; k++)
2994 block[k] += (block[k] < 0) ? -mquant : mquant;
2997 if (use_pred) i = 63;
2998 } else { // no AC coeffs
3001 memset(ac_val2, 0, 16 * 2);
3002 if (dc_pred_dir) { // left
3004 memcpy(ac_val2, ac_val, 8 * 2);
3005 if (q2 && q1 != q2) {
3006 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3007 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3008 for (k = 1; k < 8; k++)
3009 ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3014 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
3015 if (q2 && q1 != q2) {
3016 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3017 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3018 for (k = 1; k < 8; k++)
3019 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3024 /* apply AC prediction if needed */
3026 if (dc_pred_dir) { // left
3027 for (k = 1; k < 8; k++) {
3028 block[k << v->left_blk_sh] = ac_val2[k] * scale;
3029 if (!v->pquantizer && block[k << v->left_blk_sh])
3030 block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
3033 for (k = 1; k < 8; k++) {
3034 block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
3035 if (!v->pquantizer && block[k << v->top_blk_sh])
3036 block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
3042 s->block_last_index[n] = i;
3047 /** Decode intra block in inter frames - more generic version than vc1_decode_i_block
3048 * @param v VC1Context
3049 * @param block block to decode
3050 * @param[in] n subblock index
3051 * @param coded are AC coeffs present or not
3052 * @param mquant block quantizer
3053 * @param codingset set of VLC to decode data
3055 static int vc1_decode_intra_block(VC1Context *v, DCTELEM block[64], int n,
3056 int coded, int mquant, int codingset)
3058 GetBitContext *gb = &v->s.gb;
3059 MpegEncContext *s = &v->s;
3060 int dc_pred_dir = 0; /* Direction of the DC prediction used */
3063 int16_t *ac_val, *ac_val2;
3065 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3066 int a_avail = v->a_avail, c_avail = v->c_avail;
3067 int use_pred = s->ac_pred;
3071 s->dsp.clear_block(block);
3073 /* XXX: Guard against dumb values of mquant */
3074 mquant = (mquant < 1) ? 0 : ((mquant > 31) ? 31 : mquant);
3076 /* Set DC scale - y and c use the same */
3077 s->y_dc_scale = s->y_dc_scale_table[mquant];
3078 s->c_dc_scale = s->c_dc_scale_table[mquant];
3080 /* Get DC differential */
3082 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
3084 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
3087 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
3091 if (dcdiff == 119 /* ESC index value */) {
3092 /* TODO: Optimize */
3093 if (mquant == 1) dcdiff = get_bits(gb, 10);
3094 else if (mquant == 2) dcdiff = get_bits(gb, 9);
3095 else dcdiff = get_bits(gb, 8);
3098 dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
3099 else if (mquant == 2)
3100 dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
3107 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir);
3110 /* Store the quantized DC coeff, used for prediction */
3113 block[0] = dcdiff * s->y_dc_scale;
3115 block[0] = dcdiff * s->c_dc_scale;
3121 /* check if AC is needed at all and adjust direction if needed */
3122 if (!a_avail) dc_pred_dir = 1;
3123 if (!c_avail) dc_pred_dir = 0;
3124 if (!a_avail && !c_avail) use_pred = 0;
3125 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
3128 scale = mquant * 2 + v->halfpq;
3130 if (dc_pred_dir) //left
3133 ac_val -= 16 * s->block_wrap[n];
3135 q1 = s->current_picture.f.qscale_table[mb_pos];
3136 if (dc_pred_dir && c_avail && mb_pos)
3137 q2 = s->current_picture.f.qscale_table[mb_pos - 1];
3138 if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
3139 q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
3140 if ( dc_pred_dir && n == 1)
3142 if (!dc_pred_dir && n == 2)
3144 if (n == 3) q2 = q1;
3147 int last = 0, skip, value;
3151 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
3155 if (v->fcm == PROGRESSIVE)
3156 block[v->zz_8x8[0][i++]] = value;
3158 if (use_pred && (v->fcm == ILACE_FRAME)) {
3159 if (!dc_pred_dir) // top
3160 block[v->zz_8x8[2][i++]] = value;
3162 block[v->zz_8x8[3][i++]] = value;
3164 block[v->zzi_8x8[i++]] = value;
3169 /* apply AC prediction if needed */
3171 /* scale predictors if needed*/
3172 if (q2 && q1 != q2) {
3173 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3174 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3176 if (dc_pred_dir) { // left
3177 for (k = 1; k < 8; k++)
3178 block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3180 for (k = 1; k < 8; k++)
3181 block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3184 if (dc_pred_dir) { // left
3185 for (k = 1; k < 8; k++)
3186 block[k << v->left_blk_sh] += ac_val[k];
3188 for (k = 1; k < 8; k++)
3189 block[k << v->top_blk_sh] += ac_val[k + 8];
3193 /* save AC coeffs for further prediction */
3194 for (k = 1; k < 8; k++) {
3195 ac_val2[k ] = block[k << v->left_blk_sh];
3196 ac_val2[k + 8] = block[k << v->top_blk_sh];
3199 /* scale AC coeffs */
3200 for (k = 1; k < 64; k++)
3204 block[k] += (block[k] < 0) ? -mquant : mquant;
3207 if (use_pred) i = 63;
3208 } else { // no AC coeffs
3211 memset(ac_val2, 0, 16 * 2);
3212 if (dc_pred_dir) { // left
3214 memcpy(ac_val2, ac_val, 8 * 2);
3215 if (q2 && q1 != q2) {
3216 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3217 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3218 for (k = 1; k < 8; k++)
3219 ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3224 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
3225 if (q2 && q1 != q2) {
3226 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3227 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3228 for (k = 1; k < 8; k++)
3229 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3234 /* apply AC prediction if needed */
3236 if (dc_pred_dir) { // left
3237 for (k = 1; k < 8; k++) {
3238 block[k << v->left_blk_sh] = ac_val2[k] * scale;
3239 if (!v->pquantizer && block[k << v->left_blk_sh])
3240 block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
3243 for (k = 1; k < 8; k++) {
3244 block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
3245 if (!v->pquantizer && block[k << v->top_blk_sh])
3246 block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
3252 s->block_last_index[n] = i;
3259 static int vc1_decode_p_block(VC1Context *v, DCTELEM block[64], int n,
3260 int mquant, int ttmb, int first_block,
3261 uint8_t *dst, int linesize, int skip_block,
3264 MpegEncContext *s = &v->s;
3265 GetBitContext *gb = &s->gb;
3268 int scale, off, idx, last, skip, value;
3269 int ttblk = ttmb & 7;
3272 s->dsp.clear_block(block);
3275 ttblk = ff_vc1_ttblk_to_tt[v->tt_index][get_vlc2(gb, ff_vc1_ttblk_vlc[v->tt_index].table, VC1_TTBLK_VLC_BITS, 1)];
3277 if (ttblk == TT_4X4) {
3278 subblkpat = ~(get_vlc2(gb, ff_vc1_subblkpat_vlc[v->tt_index].table, VC1_SUBBLKPAT_VLC_BITS, 1) + 1);
3280 if ((ttblk != TT_8X8 && ttblk != TT_4X4)
3281 && ((v->ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))
3282 || (!v->res_rtm_flag && !first_block))) {
3283 subblkpat = decode012(gb);
3285 subblkpat ^= 3; // swap decoded pattern bits
3286 if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM)
3288 if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT)
3291 scale = 2 * mquant + ((v->pq == mquant) ? v->halfpq : 0);
3293 // convert transforms like 8X4_TOP to generic TT and SUBBLKPAT
3294 if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) {
3295 subblkpat = 2 - (ttblk == TT_8X4_TOP);
3298 if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) {
3299 subblkpat = 2 - (ttblk == TT_4X8_LEFT);
3308 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3313 idx = v->zz_8x8[0][i++];
3315 idx = v->zzi_8x8[i++];
3316 block[idx] = value * scale;
3318 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3322 v->vc1dsp.vc1_inv_trans_8x8_dc(dst, linesize, block);
3324 v->vc1dsp.vc1_inv_trans_8x8(block);
3325 s->dsp.add_pixels_clamped(block, dst, linesize);
3330 pat = ~subblkpat & 0xF;
3331 for (j = 0; j < 4; j++) {
3332 last = subblkpat & (1 << (3 - j));
3334 off = (j & 1) * 4 + (j & 2) * 16;
3336 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3341 idx = ff_vc1_simple_progressive_4x4_zz[i++];
3343 idx = ff_vc1_adv_interlaced_4x4_zz[i++];
3344 block[idx + off] = value * scale;
3346 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3348 if (!(subblkpat & (1 << (3 - j))) && !skip_block) {
3350 v->vc1dsp.vc1_inv_trans_4x4_dc(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3352 v->vc1dsp.vc1_inv_trans_4x4(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3357 pat = ~((subblkpat & 2) * 6 + (subblkpat & 1) * 3) & 0xF;
3358 for (j = 0; j < 2; j++) {
3359 last = subblkpat & (1 << (1 - j));
3363 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3368 idx = v->zz_8x4[i++] + off;
3370 idx = ff_vc1_adv_interlaced_8x4_zz[i++] + off;
3371 block[idx] = value * scale;
3373 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3375 if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3377 v->vc1dsp.vc1_inv_trans_8x4_dc(dst + j * 4 * linesize, linesize, block + off);
3379 v->vc1dsp.vc1_inv_trans_8x4(dst + j * 4 * linesize, linesize, block + off);
3384 pat = ~(subblkpat * 5) & 0xF;
3385 for (j = 0; j < 2; j++) {
3386 last = subblkpat & (1 << (1 - j));
3390 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3395 idx = v->zz_4x8[i++] + off;
3397 idx = ff_vc1_adv_interlaced_4x8_zz[i++] + off;
3398 block[idx] = value * scale;
3400 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3402 if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3404 v->vc1dsp.vc1_inv_trans_4x8_dc(dst + j * 4, linesize, block + off);
3406 v->vc1dsp.vc1_inv_trans_4x8(dst + j*4, linesize, block + off);
3412 *ttmb_out |= ttblk << (n * 4);
3416 /** @} */ // Macroblock group
3418 static const int size_table [6] = { 0, 2, 3, 4, 5, 8 };
3419 static const int offset_table[6] = { 0, 1, 3, 7, 15, 31 };
3421 static av_always_inline void vc1_apply_p_v_loop_filter(VC1Context *v, int block_num)
3423 MpegEncContext *s = &v->s;
3424 int mb_cbp = v->cbp[s->mb_x - s->mb_stride],
3425 block_cbp = mb_cbp >> (block_num * 4), bottom_cbp,
3426 mb_is_intra = v->is_intra[s->mb_x - s->mb_stride],
3427 block_is_intra = mb_is_intra >> (block_num * 4), bottom_is_intra;
3428 int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3431 if (block_num > 3) {
3432 dst = s->dest[block_num - 3];
3434 dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 8) * linesize;
3436 if (s->mb_y != s->end_mb_y || block_num < 2) {
3440 if (block_num > 3) {
3441 bottom_cbp = v->cbp[s->mb_x] >> (block_num * 4);
3442 bottom_is_intra = v->is_intra[s->mb_x] >> (block_num * 4);
3443 mv = &v->luma_mv[s->mb_x - s->mb_stride];
3444 mv_stride = s->mb_stride;
3446 bottom_cbp = (block_num < 2) ? (mb_cbp >> ((block_num + 2) * 4))
3447 : (v->cbp[s->mb_x] >> ((block_num - 2) * 4));
3448 bottom_is_intra = (block_num < 2) ? (mb_is_intra >> ((block_num + 2) * 4))
3449 : (v->is_intra[s->mb_x] >> ((block_num - 2) * 4));
3450 mv_stride = s->b8_stride;
3451 mv = &s->current_picture.f.motion_val[0][s->block_index[block_num] - 2 * mv_stride];
3454 if (bottom_is_intra & 1 || block_is_intra & 1 ||
3455 mv[0][0] != mv[mv_stride][0] || mv[0][1] != mv[mv_stride][1]) {
3456 v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3458 idx = ((bottom_cbp >> 2) | block_cbp) & 3;
3460 v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3463 v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3465 v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3470 dst -= 4 * linesize;
3471 ttblk = (v->ttblk[s->mb_x - s->mb_stride] >> (block_num * 4)) & 0xF;
3472 if (ttblk == TT_4X4 || ttblk == TT_8X4) {
3473 idx = (block_cbp | (block_cbp >> 2)) & 3;
3475 v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3478 v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3480 v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3485 static av_always_inline void vc1_apply_p_h_loop_filter(VC1Context *v, int block_num)
3487 MpegEncContext *s = &v->s;
3488 int mb_cbp = v->cbp[s->mb_x - 1 - s->mb_stride],
3489 block_cbp = mb_cbp >> (block_num * 4), right_cbp,
3490 mb_is_intra = v->is_intra[s->mb_x - 1 - s->mb_stride],
3491 block_is_intra = mb_is_intra >> (block_num * 4), right_is_intra;
3492 int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3495 if (block_num > 3) {
3496 dst = s->dest[block_num - 3] - 8 * linesize;
3498 dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 16) * linesize - 8;
3501 if (s->mb_x != s->mb_width || !(block_num & 5)) {
3504 if (block_num > 3) {
3505 right_cbp = v->cbp[s->mb_x - s->mb_stride] >> (block_num * 4);
3506 right_is_intra = v->is_intra[s->mb_x - s->mb_stride] >> (block_num * 4);
3507 mv = &v->luma_mv[s->mb_x - s->mb_stride - 1];
3509 right_cbp = (block_num & 1) ? (v->cbp[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3510 : (mb_cbp >> ((block_num + 1) * 4));
3511 right_is_intra = (block_num & 1) ? (v->is_intra[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3512 : (mb_is_intra >> ((block_num + 1) * 4));
3513 mv = &s->current_picture.f.motion_val[0][s->block_index[block_num] - s->b8_stride * 2 - 2];
3515 if (block_is_intra & 1 || right_is_intra & 1 || mv[0][0] != mv[1][0] || mv[0][1] != mv[1][1]) {
3516 v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3518 idx = ((right_cbp >> 1) | block_cbp) & 5; // FIXME check
3520 v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3523 v->vc1dsp.vc1_h_loop_filter4(dst + 4 * linesize, linesize, v->pq);
3525 v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3531 ttblk = (v->ttblk[s->mb_x - s->mb_stride - 1] >> (block_num * 4)) & 0xf;
3532 if (ttblk == TT_4X4 || ttblk == TT_4X8) {
3533 idx = (block_cbp | (block_cbp >> 1)) & 5;
3535 v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3538 v->vc1dsp.vc1_h_loop_filter4(dst + linesize * 4, linesize, v->pq);
3540 v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3545 static void vc1_apply_p_loop_filter(VC1Context *v)
3547 MpegEncContext *s = &v->s;
3550 for (i = 0; i < 6; i++) {
3551 vc1_apply_p_v_loop_filter(v, i);
3554 /* V always precedes H, therefore we run H one MB before V;
3555 * at the end of a row, we catch up to complete the row */
3557 for (i = 0; i < 6; i++) {
3558 vc1_apply_p_h_loop_filter(v, i);
3560 if (s->mb_x == s->mb_width - 1) {
3562 ff_update_block_index(s);
3563 for (i = 0; i < 6; i++) {
3564 vc1_apply_p_h_loop_filter(v, i);
3570 /** Decode one P-frame MB
3572 static int vc1_decode_p_mb(VC1Context *v)
3574 MpegEncContext *s = &v->s;
3575 GetBitContext *gb = &s->gb;
3577 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3578 int cbp; /* cbp decoding stuff */
3579 int mqdiff, mquant; /* MB quantization */
3580 int ttmb = v->ttfrm; /* MB Transform type */
3582 int mb_has_coeffs = 1; /* last_flag */
3583 int dmv_x, dmv_y; /* Differential MV components */
3584 int index, index1; /* LUT indexes */
3585 int val, sign; /* temp values */
3586 int first_block = 1;
3588 int skipped, fourmv;
3589 int block_cbp = 0, pat, block_tt = 0, block_intra = 0;
3591 mquant = v->pq; /* lossy initialization */
3593 if (v->mv_type_is_raw)
3594 fourmv = get_bits1(gb);
3596 fourmv = v->mv_type_mb_plane[mb_pos];
3598 skipped = get_bits1(gb);
3600 skipped = v->s.mbskip_table[mb_pos];
3602 if (!fourmv) { /* 1MV mode */
3604 GET_MVDATA(dmv_x, dmv_y);
3607 s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
3608 s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
3610 s->current_picture.f.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;
3611 vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3613 /* FIXME Set DC val for inter block ? */
3614 if (s->mb_intra && !mb_has_coeffs) {
3616 s->ac_pred = get_bits1(gb);
3618 } else if (mb_has_coeffs) {
3620 s->ac_pred = get_bits1(gb);
3621 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3627 s->current_picture.f.qscale_table[mb_pos] = mquant;
3629 if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
3630 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table,
3631 VC1_TTMB_VLC_BITS, 2);
3632 if (!s->mb_intra) vc1_mc_1mv(v, 0);
3634 for (i = 0; i < 6; i++) {
3635 s->dc_val[0][s->block_index[i]] = 0;
3637 val = ((cbp >> (5 - i)) & 1);
3638 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3639 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3641 /* check if prediction blocks A and C are available */
3642 v->a_avail = v->c_avail = 0;
3643 if (i == 2 || i == 3 || !s->first_slice_line)
3644 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3645 if (i == 1 || i == 3 || s->mb_x)
3646 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3648 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3649 (i & 4) ? v->codingset2 : v->codingset);
3650 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3652 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3654 for (j = 0; j < 64; j++)
3655 s->block[i][j] <<= 1;
3656 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3657 if (v->pq >= 9 && v->overlap) {
3659 v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3661 v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3663 block_cbp |= 0xF << (i << 2);
3664 block_intra |= 1 << i;
3666 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block,
3667 s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize,
3668 (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
3669 block_cbp |= pat << (i << 2);
3670 if (!v->ttmbf && ttmb < 8)
3677 for (i = 0; i < 6; i++) {
3678 v->mb_type[0][s->block_index[i]] = 0;
3679 s->dc_val[0][s->block_index[i]] = 0;
3681 s->current_picture.f.mb_type[mb_pos] = MB_TYPE_SKIP;
3682 s->current_picture.f.qscale_table[mb_pos] = 0;
3683 vc1_pred_mv(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3686 } else { // 4MV mode
3687 if (!skipped /* unskipped MB */) {
3688 int intra_count = 0, coded_inter = 0;
3689 int is_intra[6], is_coded[6];
3691 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3692 for (i = 0; i < 6; i++) {
3693 val = ((cbp >> (5 - i)) & 1);
3694 s->dc_val[0][s->block_index[i]] = 0;
3701 GET_MVDATA(dmv_x, dmv_y);
3703 vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3705 vc1_mc_4mv_luma(v, i, 0);
3706 intra_count += s->mb_intra;
3707 is_intra[i] = s->mb_intra;
3708 is_coded[i] = mb_has_coeffs;
3711 is_intra[i] = (intra_count >= 3);
3715 vc1_mc_4mv_chroma(v, 0);
3716 v->mb_type[0][s->block_index[i]] = is_intra[i];
3718 coded_inter = !is_intra[i] & is_coded[i];
3720 // if there are no coded blocks then don't do anything more
3722 if (!intra_count && !coded_inter)
3725 s->current_picture.f.qscale_table[mb_pos] = mquant;
3726 /* test if block is intra and has pred */
3729 for (i = 0; i < 6; i++)
3731 if (((!s->first_slice_line || (i == 2 || i == 3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]])
3732 || ((s->mb_x || (i == 1 || i == 3)) && v->mb_type[0][s->block_index[i] - 1])) {
3738 s->ac_pred = get_bits1(gb);
3742 if (!v->ttmbf && coded_inter)
3743 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3744 for (i = 0; i < 6; i++) {
3746 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3747 s->mb_intra = is_intra[i];
3749 /* check if prediction blocks A and C are available */
3750 v->a_avail = v->c_avail = 0;
3751 if (i == 2 || i == 3 || !s->first_slice_line)
3752 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3753 if (i == 1 || i == 3 || s->mb_x)
3754 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3756 vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant,
3757 (i & 4) ? v->codingset2 : v->codingset);
3758 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3760 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3762 for (j = 0; j < 64; j++)
3763 s->block[i][j] <<= 1;
3764 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off,
3765 (i & 4) ? s->uvlinesize : s->linesize);
3766 if (v->pq >= 9 && v->overlap) {
3768 v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3770 v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3772 block_cbp |= 0xF << (i << 2);
3773 block_intra |= 1 << i;
3774 } else if (is_coded[i]) {
3775 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3776 first_block, s->dest[dst_idx] + off,
3777 (i & 4) ? s->uvlinesize : s->linesize,
3778 (i & 4) && (s->flags & CODEC_FLAG_GRAY),
3780 block_cbp |= pat << (i << 2);
3781 if (!v->ttmbf && ttmb < 8)
3786 } else { // skipped MB
3788 s->current_picture.f.qscale_table[mb_pos] = 0;
3789 for (i = 0; i < 6; i++) {
3790 v->mb_type[0][s->block_index[i]] = 0;
3791 s->dc_val[0][s->block_index[i]] = 0;
3793 for (i = 0; i < 4; i++) {
3794 vc1_pred_mv(v, i, 0, 0, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3795 vc1_mc_4mv_luma(v, i, 0);
3797 vc1_mc_4mv_chroma(v, 0);
3798 s->current_picture.f.qscale_table[mb_pos] = 0;
3802 v->cbp[s->mb_x] = block_cbp;
3803 v->ttblk[s->mb_x] = block_tt;
3804 v->is_intra[s->mb_x] = block_intra;
3809 /* Decode one macroblock in an interlaced frame p picture */
3811 static int vc1_decode_p_mb_intfr(VC1Context *v)
3813 MpegEncContext *s = &v->s;
3814 GetBitContext *gb = &s->gb;
3816 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3817 int cbp = 0; /* cbp decoding stuff */
3818 int mqdiff, mquant; /* MB quantization */
3819 int ttmb = v->ttfrm; /* MB Transform type */
3821 int mb_has_coeffs = 1; /* last_flag */
3822 int dmv_x, dmv_y; /* Differential MV components */
3823 int val; /* temp value */
3824 int first_block = 1;
3826 int skipped, fourmv = 0, twomv = 0;
3827 int block_cbp = 0, pat, block_tt = 0;
3828 int idx_mbmode = 0, mvbp;
3829 int stride_y, fieldtx;
3831 mquant = v->pq; /* Loosy initialization */
3834 skipped = get_bits1(gb);
3836 skipped = v->s.mbskip_table[mb_pos];
3838 if (v->fourmvswitch)
3839 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_4MV_MBMODE_VLC_BITS, 2); // try getting this done
3841 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2); // in a single line
3842 switch (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0]) {
3843 /* store the motion vector type in a flag (useful later) */
3844 case MV_PMODE_INTFR_4MV:
3846 v->blk_mv_type[s->block_index[0]] = 0;
3847 v->blk_mv_type[s->block_index[1]] = 0;
3848 v->blk_mv_type[s->block_index[2]] = 0;
3849 v->blk_mv_type[s->block_index[3]] = 0;
3851 case MV_PMODE_INTFR_4MV_FIELD:
3853 v->blk_mv_type[s->block_index[0]] = 1;
3854 v->blk_mv_type[s->block_index[1]] = 1;
3855 v->blk_mv_type[s->block_index[2]] = 1;
3856 v->blk_mv_type[s->block_index[3]] = 1;
3858 case MV_PMODE_INTFR_2MV_FIELD:
3860 v->blk_mv_type[s->block_index[0]] = 1;
3861 v->blk_mv_type[s->block_index[1]] = 1;
3862 v->blk_mv_type[s->block_index[2]] = 1;
3863 v->blk_mv_type[s->block_index[3]] = 1;
3865 case MV_PMODE_INTFR_1MV:
3866 v->blk_mv_type[s->block_index[0]] = 0;
3867 v->blk_mv_type[s->block_index[1]] = 0;
3868 v->blk_mv_type[s->block_index[2]] = 0;
3869 v->blk_mv_type[s->block_index[3]] = 0;
3872 if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
3873 s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
3874 s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
3875 s->current_picture.f.mb_type[mb_pos] = MB_TYPE_INTRA;
3876 s->mb_intra = v->is_intra[s->mb_x] = 1;
3877 for (i = 0; i < 6; i++)
3878 v->mb_type[0][s->block_index[i]] = 1;
3879 fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
3880 mb_has_coeffs = get_bits1(gb);
3882 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3883 v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
3885 s->current_picture.f.qscale_table[mb_pos] = mquant;
3886 /* Set DC scale - y and c use the same (not sure if necessary here) */
3887 s->y_dc_scale = s->y_dc_scale_table[mquant];
3888 s->c_dc_scale = s->c_dc_scale_table[mquant];
3890 for (i = 0; i < 6; i++) {
3891 s->dc_val[0][s->block_index[i]] = 0;
3893 val = ((cbp >> (5 - i)) & 1);
3894 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3895 v->a_avail = v->c_avail = 0;
3896 if (i == 2 || i == 3 || !s->first_slice_line)
3897 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3898 if (i == 1 || i == 3 || s->mb_x)
3899 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3901 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3902 (i & 4) ? v->codingset2 : v->codingset);
3903 if ((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3904 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3906 stride_y = s->linesize << fieldtx;
3907 off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->linesize : (i & 1) * 8 + 4 * (i & 2) * s->linesize;
3909 stride_y = s->uvlinesize;
3912 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, stride_y);
3916 } else { // inter MB
3917 mb_has_coeffs = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][3];
3919 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3920 if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD) {
3921 v->twomvbp = get_vlc2(gb, v->twomvbp_vlc->table, VC1_2MV_BLOCK_PATTERN_VLC_BITS, 1);
3923 if ((ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV)
3924 || (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV_FIELD)) {
3925 v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
3928 s->mb_intra = v->is_intra[s->mb_x] = 0;
3929 for (i = 0; i < 6; i++)
3930 v->mb_type[0][s->block_index[i]] = 0;
3931 fieldtx = v->fieldtx_plane[mb_pos] = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][1];
3932 /* for all motion vector read MVDATA and motion compensate each block */
3936 for (i = 0; i < 6; i++) {
3939 val = ((mvbp >> (3 - i)) & 1);
3941 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3943 vc1_pred_mv_intfr(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0]);
3944 vc1_mc_4mv_luma(v, i, 0);
3945 } else if (i == 4) {
3946 vc1_mc_4mv_chroma4(v);
3953 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3955 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0]);
3956 vc1_mc_4mv_luma(v, 0, 0);
3957 vc1_mc_4mv_luma(v, 1, 0);
3960 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3962 vc1_pred_mv_intfr(v, 2, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0]);
3963 vc1_mc_4mv_luma(v, 2, 0);
3964 vc1_mc_4mv_luma(v, 3, 0);
3965 vc1_mc_4mv_chroma4(v);
3967 mvbp = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][2];
3970 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3972 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0]);
3976 GET_MQUANT(); // p. 227
3977 s->current_picture.f.qscale_table[mb_pos] = mquant;
3978 if (!v->ttmbf && cbp)
3979 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3980 for (i = 0; i < 6; i++) {
3981 s->dc_val[0][s->block_index[i]] = 0;
3983 val = ((cbp >> (5 - i)) & 1);
3985 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3987 off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->linesize));
3989 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3990 first_block, s->dest[dst_idx] + off,
3991 (i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
3992 (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
3993 block_cbp |= pat << (i << 2);
3994 if (!v->ttmbf && ttmb < 8)
4001 s->mb_intra = v->is_intra[s->mb_x] = 0;
4002 for (i = 0; i < 6; i++) {
4003 v->mb_type[0][s->block_index[i]] = 0;
4004 s->dc_val[0][s->block_index[i]] = 0;
4006 s->current_picture.f.mb_type[mb_pos] = MB_TYPE_SKIP;
4007 s->current_picture.f.qscale_table[mb_pos] = 0;
4008 v->blk_mv_type[s->block_index[0]] = 0;
4009 v->blk_mv_type[s->block_index[1]] = 0;
4010 v->blk_mv_type[s->block_index[2]] = 0;
4011 v->blk_mv_type[s->block_index[3]] = 0;
4012 vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0]);
4015 if (s->mb_x == s->mb_width - 1)
4016 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0])*s->mb_stride);
4020 static int vc1_decode_p_mb_intfi(VC1Context *v)
4022 MpegEncContext *s = &v->s;
4023 GetBitContext *gb = &s->gb;
4025 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4026 int cbp = 0; /* cbp decoding stuff */
4027 int mqdiff, mquant; /* MB quantization */
4028 int ttmb = v->ttfrm; /* MB Transform type */
4030 int mb_has_coeffs = 1; /* last_flag */
4031 int dmv_x, dmv_y; /* Differential MV components */
4032 int val; /* temp values */
4033 int first_block = 1;
4036 int block_cbp = 0, pat, block_tt = 0;
4039 mquant = v->pq; /* Loosy initialization */
4041 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
4042 if (idx_mbmode <= 1) { // intra MB
4043 s->mb_intra = v->is_intra[s->mb_x] = 1;
4044 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
4045 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
4046 s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4048 s->current_picture.f.qscale_table[mb_pos] = mquant;
4049 /* Set DC scale - y and c use the same (not sure if necessary here) */
4050 s->y_dc_scale = s->y_dc_scale_table[mquant];
4051 s->c_dc_scale = s->c_dc_scale_table[mquant];
4052 v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4053 mb_has_coeffs = idx_mbmode & 1;
4055 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
4057 for (i = 0; i < 6; i++) {
4058 s->dc_val[0][s->block_index[i]] = 0;
4059 v->mb_type[0][s->block_index[i]] = 1;
4061 val = ((cbp >> (5 - i)) & 1);
4062 v->a_avail = v->c_avail = 0;
4063 if (i == 2 || i == 3 || !s->first_slice_line)
4064 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4065 if (i == 1 || i == 3 || s->mb_x)
4066 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4068 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4069 (i & 4) ? v->codingset2 : v->codingset);
4070 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4072 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4073 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4074 off += v->second_field ? ((i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0]) : 0;
4075 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize);
4076 // TODO: loop filter
4079 s->mb_intra = v->is_intra[s->mb_x] = 0;
4080 s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
4081 for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
4082 if (idx_mbmode <= 5) { // 1-MV
4084 if (idx_mbmode & 1) {
4085 get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
4087 vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
4089 mb_has_coeffs = !(idx_mbmode & 2);
4091 v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
4092 for (i = 0; i < 6; i++) {
4094 dmv_x = dmv_y = pred_flag = 0;
4095 val = ((v->fourmvbp >> (3 - i)) & 1);
4097 get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
4099 vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
4100 vc1_mc_4mv_luma(v, i, 0);
4102 vc1_mc_4mv_chroma(v, 0);
4104 mb_has_coeffs = idx_mbmode & 1;
4107 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4111 s->current_picture.f.qscale_table[mb_pos] = mquant;
4112 if (!v->ttmbf && cbp) {
4113 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4116 for (i = 0; i < 6; i++) {
4117 s->dc_val[0][s->block_index[i]] = 0;
4119 val = ((cbp >> (5 - i)) & 1);
4120 off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
4121 if (v->second_field)
4122 off += (i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0];
4124 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4125 first_block, s->dest[dst_idx] + off,
4126 (i & 4) ? s->uvlinesize : s->linesize,
4127 (i & 4) && (s->flags & CODEC_FLAG_GRAY),
4129 block_cbp |= pat << (i << 2);
4130 if (!v->ttmbf && ttmb < 8) ttmb = -1;
4135 if (s->mb_x == s->mb_width - 1)
4136 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
4140 /** Decode one B-frame MB (in Main profile)
4142 static void vc1_decode_b_mb(VC1Context *v)
4144 MpegEncContext *s = &v->s;
4145 GetBitContext *gb = &s->gb;
4147 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4148 int cbp = 0; /* cbp decoding stuff */
4149 int mqdiff, mquant; /* MB quantization */
4150 int ttmb = v->ttfrm; /* MB Transform type */
4151 int mb_has_coeffs = 0; /* last_flag */
4152 int index, index1; /* LUT indexes */
4153 int val, sign; /* temp values */
4154 int first_block = 1;
4156 int skipped, direct;
4157 int dmv_x[2], dmv_y[2];
4158 int bmvtype = BMV_TYPE_BACKWARD;
4160 mquant = v->pq; /* lossy initialization */
4164 direct = get_bits1(gb);
4166 direct = v->direct_mb_plane[mb_pos];
4168 skipped = get_bits1(gb);
4170 skipped = v->s.mbskip_table[mb_pos];
4172 dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4173 for (i = 0; i < 6; i++) {
4174 v->mb_type[0][s->block_index[i]] = 0;
4175 s->dc_val[0][s->block_index[i]] = 0;
4177 s->current_picture.f.qscale_table[mb_pos] = 0;
4181 GET_MVDATA(dmv_x[0], dmv_y[0]);
4182 dmv_x[1] = dmv_x[0];
4183 dmv_y[1] = dmv_y[0];
4185 if (skipped || !s->mb_intra) {
4186 bmvtype = decode012(gb);
4189 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
4192 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
4195 bmvtype = BMV_TYPE_INTERPOLATED;
4196 dmv_x[0] = dmv_y[0] = 0;
4200 for (i = 0; i < 6; i++)
4201 v->mb_type[0][s->block_index[i]] = s->mb_intra;
4205 bmvtype = BMV_TYPE_INTERPOLATED;
4206 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4207 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4211 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4214 s->current_picture.f.qscale_table[mb_pos] = mquant;
4216 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4217 dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
4218 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4219 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4221 if (!mb_has_coeffs && !s->mb_intra) {
4222 /* no coded blocks - effectively skipped */
4223 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4224 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4227 if (s->mb_intra && !mb_has_coeffs) {
4229 s->current_picture.f.qscale_table[mb_pos] = mquant;
4230 s->ac_pred = get_bits1(gb);
4232 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4234 if (bmvtype == BMV_TYPE_INTERPOLATED) {
4235 GET_MVDATA(dmv_x[0], dmv_y[0]);
4236 if (!mb_has_coeffs) {
4237 /* interpolated skipped block */
4238 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4239 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4243 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4245 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4248 s->ac_pred = get_bits1(gb);
4249 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4251 s->current_picture.f.qscale_table[mb_pos] = mquant;
4252 if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
4253 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4257 for (i = 0; i < 6; i++) {
4258 s->dc_val[0][s->block_index[i]] = 0;
4260 val = ((cbp >> (5 - i)) & 1);
4261 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4262 v->mb_type[0][s->block_index[i]] = s->mb_intra;
4264 /* check if prediction blocks A and C are available */
4265 v->a_avail = v->c_avail = 0;
4266 if (i == 2 || i == 3 || !s->first_slice_line)
4267 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4268 if (i == 1 || i == 3 || s->mb_x)
4269 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4271 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4272 (i & 4) ? v->codingset2 : v->codingset);
4273 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4275 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4277 for (j = 0; j < 64; j++)
4278 s->block[i][j] <<= 1;
4279 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
4281 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4282 first_block, s->dest[dst_idx] + off,
4283 (i & 4) ? s->uvlinesize : s->linesize,
4284 (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4285 if (!v->ttmbf && ttmb < 8)
4292 /** Decode one B-frame MB (in interlaced field B picture)
4294 static void vc1_decode_b_mb_intfi(VC1Context *v)
4296 MpegEncContext *s = &v->s;
4297 GetBitContext *gb = &s->gb;
4299 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4300 int cbp = 0; /* cbp decoding stuff */
4301 int mqdiff, mquant; /* MB quantization */
4302 int ttmb = v->ttfrm; /* MB Transform type */
4303 int mb_has_coeffs = 0; /* last_flag */
4304 int val; /* temp value */
4305 int first_block = 1;
4308 int dmv_x[2], dmv_y[2], pred_flag[2];
4309 int bmvtype = BMV_TYPE_BACKWARD;
4310 int idx_mbmode, interpmvp;
4312 mquant = v->pq; /* Loosy initialization */
4315 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
4316 if (idx_mbmode <= 1) { // intra MB
4317 s->mb_intra = v->is_intra[s->mb_x] = 1;
4318 s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
4319 s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
4320 s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4322 s->current_picture.f.qscale_table[mb_pos] = mquant;
4323 /* Set DC scale - y and c use the same (not sure if necessary here) */
4324 s->y_dc_scale = s->y_dc_scale_table[mquant];
4325 s->c_dc_scale = s->c_dc_scale_table[mquant];
4326 v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4327 mb_has_coeffs = idx_mbmode & 1;
4329 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
4331 for (i = 0; i < 6; i++) {
4332 s->dc_val[0][s->block_index[i]] = 0;
4334 val = ((cbp >> (5 - i)) & 1);
4335 v->mb_type[0][s->block_index[i]] = s->mb_intra;
4336 v->a_avail = v->c_avail = 0;
4337 if (i == 2 || i == 3 || !s->first_slice_line)
4338 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4339 if (i == 1 || i == 3 || s->mb_x)
4340 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4342 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4343 (i & 4) ? v->codingset2 : v->codingset);
4344 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4346 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4348 for (j = 0; j < 64; j++)
4349 s->block[i][j] <<= 1;
4350 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4351 off += v->second_field ? ((i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0]) : 0;
4352 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize);
4353 // TODO: yet to perform loop filter
4356 s->mb_intra = v->is_intra[s->mb_x] = 0;
4357 s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
4358 for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
4360 fwd = v->forward_mb_plane[mb_pos] = get_bits1(gb);
4362 fwd = v->forward_mb_plane[mb_pos];
4363 if (idx_mbmode <= 5) { // 1-MV
4364 dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4365 pred_flag[0] = pred_flag[1] = 0;
4367 bmvtype = BMV_TYPE_FORWARD;
4369 bmvtype = decode012(gb);
4372 bmvtype = BMV_TYPE_BACKWARD;
4375 bmvtype = BMV_TYPE_DIRECT;
4378 bmvtype = BMV_TYPE_INTERPOLATED;
4379 interpmvp = get_bits1(gb);
4382 v->bmvtype = bmvtype;
4383 if (bmvtype != BMV_TYPE_DIRECT && idx_mbmode & 1) {
4384 get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD], &dmv_y[bmvtype == BMV_TYPE_BACKWARD], &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
4386 if (bmvtype == BMV_TYPE_INTERPOLATED && interpmvp) {
4387 get_mvdata_interlaced(v, &dmv_x[1], &dmv_y[1], &pred_flag[1]);
4389 if (bmvtype == BMV_TYPE_DIRECT) {
4390 dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4391 dmv_x[1] = dmv_y[1] = pred_flag[0] = 0;
4393 vc1_pred_b_mv_intfi(v, 0, dmv_x, dmv_y, 1, pred_flag);
4394 vc1_b_mc(v, dmv_x, dmv_y, (bmvtype == BMV_TYPE_DIRECT), bmvtype);
4395 mb_has_coeffs = !(idx_mbmode & 2);
4398 bmvtype = BMV_TYPE_FORWARD;
4399 v->bmvtype = bmvtype;
4400 v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
4401 for (i = 0; i < 6; i++) {
4403 dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4404 dmv_x[1] = dmv_y[1] = pred_flag[1] = 0;
4405 val = ((v->fourmvbp >> (3 - i)) & 1);
4407 get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD],
4408 &dmv_y[bmvtype == BMV_TYPE_BACKWARD],
4409 &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
4411 vc1_pred_b_mv_intfi(v, i, dmv_x, dmv_y, 0, pred_flag);
4412 vc1_mc_4mv_luma(v, i, bmvtype == BMV_TYPE_BACKWARD);
4414 vc1_mc_4mv_chroma(v, bmvtype == BMV_TYPE_BACKWARD);
4416 mb_has_coeffs = idx_mbmode & 1;
4419 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4423 s->current_picture.f.qscale_table[mb_pos] = mquant;
4424 if (!v->ttmbf && cbp) {
4425 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4428 for (i = 0; i < 6; i++) {
4429 s->dc_val[0][s->block_index[i]] = 0;
4431 val = ((cbp >> (5 - i)) & 1);
4432 off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
4433 if (v->second_field)
4434 off += (i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0];
4436 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4437 first_block, s->dest[dst_idx] + off,
4438 (i & 4) ? s->uvlinesize : s->linesize,
4439 (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4440 if (!v->ttmbf && ttmb < 8)
4448 /** Decode blocks of I-frame
4450 static void vc1_decode_i_blocks(VC1Context *v)
4453 MpegEncContext *s = &v->s;
4458 /* select codingmode used for VLC tables selection */
4459 switch (v->y_ac_table_index) {
4461 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
4464 v->codingset = CS_HIGH_MOT_INTRA;
4467 v->codingset = CS_MID_RATE_INTRA;
4471 switch (v->c_ac_table_index) {
4473 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
4476 v->codingset2 = CS_HIGH_MOT_INTER;
4479 v->codingset2 = CS_MID_RATE_INTER;
4483 /* Set DC scale - y and c use the same */
4484 s->y_dc_scale = s->y_dc_scale_table[v->pq];
4485 s->c_dc_scale = s->c_dc_scale_table[v->pq];
4488 s->mb_x = s->mb_y = 0;
4490 s->first_slice_line = 1;
4491 for (s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
4493 ff_init_block_index(s);
4494 for (; s->mb_x < s->mb_width; s->mb_x++) {
4496 ff_update_block_index(s);
4497 dst[0] = s->dest[0];
4498 dst[1] = dst[0] + 8;
4499 dst[2] = s->dest[0] + s->linesize * 8;
4500 dst[3] = dst[2] + 8;
4501 dst[4] = s->dest[1];
4502 dst[5] = s->dest[2];
4503 s->dsp.clear_blocks(s->block[0]);
4504 mb_pos = s->mb_x + s->mb_y * s->mb_width;
4505 s->current_picture.f.mb_type[mb_pos] = MB_TYPE_INTRA;
4506 s->current_picture.f.qscale_table[mb_pos] = v->pq;
4507 s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
4508 s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
4510 // do actual MB decoding and displaying
4511 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
4512 v->s.ac_pred = get_bits1(&v->s.gb);
4514 for (k = 0; k < 6; k++) {
4515 val = ((cbp >> (5 - k)) & 1);
4518 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
4522 cbp |= val << (5 - k);
4524 vc1_decode_i_block(v, s->block[k], k, val, (k < 4) ? v->codingset : v->codingset2);
4526 if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
4528 v->vc1dsp.vc1_inv_trans_8x8(s->block[k]);
4529 if (v->pq >= 9 && v->overlap) {
4531 for (j = 0; j < 64; j++)
4532 s->block[k][j] <<= 1;
4533 s->dsp.put_signed_pixels_clamped(s->block[k], dst[k], k & 4 ? s->uvlinesize : s->linesize);
4536 for (j = 0; j < 64; j++)
4537 s->block[k][j] = (s->block[k][j] - 64) << 1;
4538 s->dsp.put_pixels_clamped(s->block[k], dst[k], k & 4 ? s->uvlinesize : s->linesize);
4542 if (v->pq >= 9 && v->overlap) {
4544 v->vc1dsp.vc1_h_overlap(s->dest[0], s->linesize);
4545 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
4546 if (!(s->flags & CODEC_FLAG_GRAY)) {
4547 v->vc1dsp.vc1_h_overlap(s->dest[1], s->uvlinesize);
4548 v->vc1dsp.vc1_h_overlap(s->dest[2], s->uvlinesize);
4551 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize);
4552 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
4553 if (!s->first_slice_line) {
4554 v->vc1dsp.vc1_v_overlap(s->dest[0], s->linesize);
4555 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize);
4556 if (!(s->flags & CODEC_FLAG_GRAY)) {
4557 v->vc1dsp.vc1_v_overlap(s->dest[1], s->uvlinesize);
4558 v->vc1dsp.vc1_v_overlap(s->dest[2], s->uvlinesize);
4561 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
4562 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
4564 if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
4566 if (get_bits_count(&s->gb) > v->bits) {
4567 ff_er_add_slice(s, 0, 0, s->mb_x, s->mb_y, ER_MB_ERROR);
4568 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
4569 get_bits_count(&s->gb), v->bits);
4573 if (!v->s.loop_filter)
4574 ff_draw_horiz_band(s, s->mb_y * 16, 16);
4576 ff_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
4578 s->first_slice_line = 0;
4580 if (v->s.loop_filter)
4581 ff_draw_horiz_band(s, (s->mb_height - 1) * 16, 16);
4582 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, ER_MB_END);
4585 /** Decode blocks of I-frame for advanced profile
4587 static void vc1_decode_i_blocks_adv(VC1Context *v)
4590 MpegEncContext *s = &v->s;
4596 GetBitContext *gb = &s->gb;
4598 /* select codingmode used for VLC tables selection */
4599 switch (v->y_ac_table_index) {
4601 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
4604 v->codingset = CS_HIGH_MOT_INTRA;
4607 v->codingset = CS_MID_RATE_INTRA;
4611 switch (v->c_ac_table_index) {
4613 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
4616 v->codingset2 = CS_HIGH_MOT_INTER;
4619 v->codingset2 = CS_MID_RATE_INTER;
4624 s->mb_x = s->mb_y = 0;
4626 s->first_slice_line = 1;
4627 s->mb_y = s->start_mb_y;
4628 if (s->start_mb_y) {
4630 ff_init_block_index(s);
4631 memset(&s->coded_block[s->block_index[0] - s->b8_stride], 0,
4632 (1 + s->b8_stride) * sizeof(*s->coded_block));
4634 for (; s->mb_y < s->end_mb_y; s->mb_y++) {
4636 ff_init_block_index(s);
4637 for (;s->mb_x < s->mb_width; s->mb_x++) {
4638 DCTELEM (*block)[64] = v->block[v->cur_blk_idx];
4639 ff_update_block_index(s);
4640 s->dsp.clear_blocks(block[0]);
4641 mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4642 s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4643 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
4644 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
4646 // do actual MB decoding and displaying
4647 if (v->fieldtx_is_raw)
4648 v->fieldtx_plane[mb_pos] = get_bits1(&v->s.gb);
4649 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
4650 if ( v->acpred_is_raw)
4651 v->s.ac_pred = get_bits1(&v->s.gb);
4653 v->s.ac_pred = v->acpred_plane[mb_pos];
4655 if (v->condover == CONDOVER_SELECT && v->overflg_is_raw)
4656 v->over_flags_plane[mb_pos] = get_bits1(&v->s.gb);
4660 s->current_picture.f.qscale_table[mb_pos] = mquant;
4661 /* Set DC scale - y and c use the same */
4662 s->y_dc_scale = s->y_dc_scale_table[mquant];
4663 s->c_dc_scale = s->c_dc_scale_table[mquant];
4665 for (k = 0; k < 6; k++) {
4666 val = ((cbp >> (5 - k)) & 1);
4669 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
4673 cbp |= val << (5 - k);
4675 v->a_avail = !s->first_slice_line || (k == 2 || k == 3);
4676 v->c_avail = !!s->mb_x || (k == 1 || k == 3);
4678 vc1_decode_i_block_adv(v, block[k], k, val,
4679 (k < 4) ? v->codingset : v->codingset2, mquant);
4681 if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
4683 v->vc1dsp.vc1_inv_trans_8x8(block[k]);
4686 vc1_smooth_overlap_filter_iblk(v);
4687 vc1_put_signed_blocks_clamped(v);
4688 if (v->s.loop_filter) vc1_loop_filter_iblk_delayed(v, v->pq);
4690 if (get_bits_count(&s->gb) > v->bits) {
4691 // TODO: may need modification to handle slice coding
4692 ff_er_add_slice(s, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
4693 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
4694 get_bits_count(&s->gb), v->bits);
4698 if (!v->s.loop_filter)
4699 ff_draw_horiz_band(s, s->mb_y * 16, 16);
4701 ff_draw_horiz_band(s, (s->mb_y-1) * 16, 16);
4702 s->first_slice_line = 0;
4705 /* raw bottom MB row */
4707 ff_init_block_index(s);
4708 for (;s->mb_x < s->mb_width; s->mb_x++) {
4709 ff_update_block_index(s);
4710 vc1_put_signed_blocks_clamped(v);
4711 if (v->s.loop_filter)
4712 vc1_loop_filter_iblk_delayed(v, v->pq);
4714 if (v->s.loop_filter)
4715 ff_draw_horiz_band(s, (s->end_mb_y-1)*16, 16);
4716 ff_er_add_slice(s, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
4717 (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
4720 static void vc1_decode_p_blocks(VC1Context *v)
4722 MpegEncContext *s = &v->s;
4723 int apply_loop_filter;
4725 /* select codingmode used for VLC tables selection */
4726 switch (v->c_ac_table_index) {
4728 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
4731 v->codingset = CS_HIGH_MOT_INTRA;
4734 v->codingset = CS_MID_RATE_INTRA;
4738 switch (v->c_ac_table_index) {
4740 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
4743 v->codingset2 = CS_HIGH_MOT_INTER;
4746 v->codingset2 = CS_MID_RATE_INTER;
4750 apply_loop_filter = s->loop_filter && !(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY);
4751 s->first_slice_line = 1;
4752 memset(v->cbp_base, 0, sizeof(v->cbp_base[0])*2*s->mb_stride);
4753 for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
4755 ff_init_block_index(s);
4756 for (; s->mb_x < s->mb_width; s->mb_x++) {
4757 ff_update_block_index(s);
4759 if (v->fcm == ILACE_FIELD)
4760 vc1_decode_p_mb_intfi(v);
4761 else if (v->fcm == ILACE_FRAME)
4762 vc1_decode_p_mb_intfr(v);
4763 else vc1_decode_p_mb(v);
4764 if (s->mb_y != s->start_mb_y && apply_loop_filter && v->fcm == PROGRESSIVE)
4765 vc1_apply_p_loop_filter(v);
4766 if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
4767 // TODO: may need modification to handle slice coding
4768 ff_er_add_slice(s, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
4769 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
4770 get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
4774 memmove(v->cbp_base, v->cbp, sizeof(v->cbp_base[0]) * s->mb_stride);
4775 memmove(v->ttblk_base, v->ttblk, sizeof(v->ttblk_base[0]) * s->mb_stride);
4776 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
4777 memmove(v->luma_mv_base, v->luma_mv, sizeof(v->luma_mv_base[0]) * s->mb_stride);
4778 if (s->mb_y != s->start_mb_y) ff_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
4779 s->first_slice_line = 0;
4781 if (apply_loop_filter) {
4783 ff_init_block_index(s);
4784 for (; s->mb_x < s->mb_width; s->mb_x++) {
4785 ff_update_block_index(s);
4786 vc1_apply_p_loop_filter(v);
4789 if (s->end_mb_y >= s->start_mb_y)
4790 ff_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
4791 ff_er_add_slice(s, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
4792 (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
4795 static void vc1_decode_b_blocks(VC1Context *v)
4797 MpegEncContext *s = &v->s;
4799 /* select codingmode used for VLC tables selection */
4800 switch (v->c_ac_table_index) {
4802 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
4805 v->codingset = CS_HIGH_MOT_INTRA;
4808 v->codingset = CS_MID_RATE_INTRA;
4812 switch (v->c_ac_table_index) {
4814 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
4817 v->codingset2 = CS_HIGH_MOT_INTER;
4820 v->codingset2 = CS_MID_RATE_INTER;
4824 s->first_slice_line = 1;
4825 for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
4827 ff_init_block_index(s);
4828 for (; s->mb_x < s->mb_width; s->mb_x++) {
4829 ff_update_block_index(s);
4831 if (v->fcm == ILACE_FIELD)
4832 vc1_decode_b_mb_intfi(v);
4835 if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
4836 // TODO: may need modification to handle slice coding
4837 ff_er_add_slice(s, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
4838 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
4839 get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
4842 if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
4844 if (!v->s.loop_filter)
4845 ff_draw_horiz_band(s, s->mb_y * 16, 16);
4847 ff_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
4848 s->first_slice_line = 0;
4850 if (v->s.loop_filter)
4851 ff_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
4852 ff_er_add_slice(s, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
4853 (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
4856 static void vc1_decode_skip_blocks(VC1Context *v)
4858 MpegEncContext *s = &v->s;
4860 ff_er_add_slice(s, 0, s->start_mb_y, s->mb_width - 1, s->end_mb_y - 1, ER_MB_END);
4861 s->first_slice_line = 1;
4862 for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
4864 ff_init_block_index(s);
4865 ff_update_block_index(s);
4866 memcpy(s->dest[0], s->last_picture.f.data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16);
4867 memcpy(s->dest[1], s->last_picture.f.data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
4868 memcpy(s->dest[2], s->last_picture.f.data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
4869 ff_draw_horiz_band(s, s->mb_y * 16, 16);
4870 s->first_slice_line = 0;
4872 s->pict_type = AV_PICTURE_TYPE_P;
4875 static void vc1_decode_blocks(VC1Context *v)
4878 v->s.esc3_level_length = 0;
4880 ff_intrax8_decode_picture(&v->x8, 2*v->pq + v->halfpq, v->pq * !v->pquantizer);
4883 v->left_blk_idx = -1;
4884 v->topleft_blk_idx = 1;
4886 switch (v->s.pict_type) {
4887 case AV_PICTURE_TYPE_I:
4888 if (v->profile == PROFILE_ADVANCED)
4889 vc1_decode_i_blocks_adv(v);
4891 vc1_decode_i_blocks(v);
4893 case AV_PICTURE_TYPE_P:
4894 if (v->p_frame_skipped)
4895 vc1_decode_skip_blocks(v);
4897 vc1_decode_p_blocks(v);
4899 case AV_PICTURE_TYPE_B:
4901 if (v->profile == PROFILE_ADVANCED)
4902 vc1_decode_i_blocks_adv(v);
4904 vc1_decode_i_blocks(v);
4906 vc1_decode_b_blocks(v);
4912 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
4916 * Transform coefficients for both sprites in 16.16 fixed point format,
4917 * in the order they appear in the bitstream:
4919 * rotation 1 (unused)
4921 * rotation 2 (unused)
4928 int effect_type, effect_flag;
4929 int effect_pcount1, effect_pcount2; ///< amount of effect parameters stored in effect_params
4930 int effect_params1[15], effect_params2[10]; ///< effect parameters in 16.16 fixed point format
4933 static inline int get_fp_val(GetBitContext* gb)
4935 return (get_bits_long(gb, 30) - (1 << 29)) << 1;
4938 static void vc1_sprite_parse_transform(GetBitContext* gb, int c[7])
4942 switch (get_bits(gb, 2)) {
4945 c[2] = get_fp_val(gb);
4949 c[0] = c[4] = get_fp_val(gb);
4950 c[2] = get_fp_val(gb);
4953 c[0] = get_fp_val(gb);
4954 c[2] = get_fp_val(gb);
4955 c[4] = get_fp_val(gb);
4958 c[0] = get_fp_val(gb);
4959 c[1] = get_fp_val(gb);
4960 c[2] = get_fp_val(gb);
4961 c[3] = get_fp_val(gb);
4962 c[4] = get_fp_val(gb);
4965 c[5] = get_fp_val(gb);
4967 c[6] = get_fp_val(gb);
4972 static void vc1_parse_sprites(VC1Context *v, GetBitContext* gb, SpriteData* sd)
4974 AVCodecContext *avctx = v->s.avctx;
4977 for (sprite = 0; sprite <= v->two_sprites; sprite++) {
4978 vc1_sprite_parse_transform(gb, sd->coefs[sprite]);
4979 if (sd->coefs[sprite][1] || sd->coefs[sprite][3])
4980 av_log_ask_for_sample(avctx, "Rotation coefficients are not zero");
4981 av_log(avctx, AV_LOG_DEBUG, sprite ? "S2:" : "S1:");
4982 for (i = 0; i < 7; i++)
4983 av_log(avctx, AV_LOG_DEBUG, " %d.%.3d",
4984 sd->coefs[sprite][i] / (1<<16),
4985 (abs(sd->coefs[sprite][i]) & 0xFFFF) * 1000 / (1 << 16));
4986 av_log(avctx, AV_LOG_DEBUG, "\n");
4990 if (sd->effect_type = get_bits_long(gb, 30)) {
4991 switch (sd->effect_pcount1 = get_bits(gb, 4)) {
4993 vc1_sprite_parse_transform(gb, sd->effect_params1);
4996 vc1_sprite_parse_transform(gb, sd->effect_params1);
4997 vc1_sprite_parse_transform(gb, sd->effect_params1 + 7);
5000 for (i = 0; i < sd->effect_pcount1; i++)
5001 sd->effect_params1[i] = get_fp_val(gb);
5003 if (sd->effect_type != 13 || sd->effect_params1[0] != sd->coefs[0][6]) {
5004 // effect 13 is simple alpha blending and matches the opacity above
5005 av_log(avctx, AV_LOG_DEBUG, "Effect: %d; params: ", sd->effect_type);
5006 for (i = 0; i < sd->effect_pcount1; i++)
5007 av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
5008 sd->effect_params1[i] / (1 << 16),
5009 (abs(sd->effect_params1[i]) & 0xFFFF) * 1000 / (1 << 16));
5010 av_log(avctx, AV_LOG_DEBUG, "\n");
5013 sd->effect_pcount2 = get_bits(gb, 16);
5014 if (sd->effect_pcount2 > 10) {
5015 av_log(avctx, AV_LOG_ERROR, "Too many effect parameters\n");
5017 } else if (sd->effect_pcount2) {
5019 av_log(avctx, AV_LOG_DEBUG, "Effect params 2: ");
5020 while (++i < sd->effect_pcount2) {
5021 sd->effect_params2[i] = get_fp_val(gb);
5022 av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
5023 sd->effect_params2[i] / (1 << 16),
5024 (abs(sd->effect_params2[i]) & 0xFFFF) * 1000 / (1 << 16));
5026 av_log(avctx, AV_LOG_DEBUG, "\n");
5029 if (sd->effect_flag = get_bits1(gb))
5030 av_log(avctx, AV_LOG_DEBUG, "Effect flag set\n");
5032 if (get_bits_count(gb) >= gb->size_in_bits +
5033 (avctx->codec_id == CODEC_ID_WMV3IMAGE ? 64 : 0))
5034 av_log(avctx, AV_LOG_ERROR, "Buffer overrun\n");
5035 if (get_bits_count(gb) < gb->size_in_bits - 8)
5036 av_log(avctx, AV_LOG_WARNING, "Buffer not fully read\n");
5039 static void vc1_draw_sprites(VC1Context *v, SpriteData* sd)
5041 int i, plane, row, sprite;
5042 int sr_cache[2][2] = { { -1, -1 }, { -1, -1 } };
5043 uint8_t* src_h[2][2];
5044 int xoff[2], xadv[2], yoff[2], yadv[2], alpha;
5046 MpegEncContext *s = &v->s;
5048 for (i = 0; i < 2; i++) {
5049 xoff[i] = av_clip(sd->coefs[i][2], 0, v->sprite_width-1 << 16);
5050 xadv[i] = sd->coefs[i][0];
5051 if (xadv[i] != 1<<16 || (v->sprite_width << 16) - (v->output_width << 16) - xoff[i])
5052 xadv[i] = av_clip(xadv[i], 0, ((v->sprite_width<<16) - xoff[i] - 1) / v->output_width);
5054 yoff[i] = av_clip(sd->coefs[i][5], 0, v->sprite_height-1 << 16);
5055 yadv[i] = av_clip(sd->coefs[i][4], 0, ((v->sprite_height << 16) - yoff[i]) / v->output_height);
5057 alpha = av_clip(sd->coefs[1][6], 0, (1<<16) - 1);
5059 for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++) {
5060 int width = v->output_width>>!!plane;
5062 for (row = 0; row < v->output_height>>!!plane; row++) {
5063 uint8_t *dst = v->sprite_output_frame.data[plane] +
5064 v->sprite_output_frame.linesize[plane] * row;
5066 for (sprite = 0; sprite <= v->two_sprites; sprite++) {
5067 uint8_t *iplane = s->current_picture.f.data[plane];
5068 int iline = s->current_picture.f.linesize[plane];
5069 int ycoord = yoff[sprite] + yadv[sprite] * row;
5070 int yline = ycoord >> 16;
5071 ysub[sprite] = ycoord & 0xFFFF;
5073 iplane = s->last_picture.f.data[plane];
5074 iline = s->last_picture.f.linesize[plane];
5076 if (!(xoff[sprite] & 0xFFFF) && xadv[sprite] == 1 << 16) {
5077 src_h[sprite][0] = iplane + (xoff[sprite] >> 16) + yline * iline;
5079 src_h[sprite][1] = iplane + (xoff[sprite] >> 16) + FFMIN(yline + 1, (v->sprite_height>>!!plane)-1) * iline;
5081 if (sr_cache[sprite][0] != yline) {
5082 if (sr_cache[sprite][1] == yline) {
5083 FFSWAP(uint8_t*, v->sr_rows[sprite][0], v->sr_rows[sprite][1]);
5084 FFSWAP(int, sr_cache[sprite][0], sr_cache[sprite][1]);
5086 v->vc1dsp.sprite_h(v->sr_rows[sprite][0], iplane + yline * iline, xoff[sprite], xadv[sprite], width);
5087 sr_cache[sprite][0] = yline;
5090 if (ysub[sprite] && sr_cache[sprite][1] != yline + 1) {
5091 v->vc1dsp.sprite_h(v->sr_rows[sprite][1], iplane + FFMIN(yline + 1, (v->sprite_height>>!!plane)-1) * iline, xoff[sprite], xadv[sprite], width);
5092 sr_cache[sprite][1] = yline + 1;
5094 src_h[sprite][0] = v->sr_rows[sprite][0];
5095 src_h[sprite][1] = v->sr_rows[sprite][1];
5099 if (!v->two_sprites) {
5101 v->vc1dsp.sprite_v_single(dst, src_h[0][0], src_h[0][1], ysub[0], width);
5103 memcpy(dst, src_h[0][0], width);
5106 if (ysub[0] && ysub[1]) {
5107 v->vc1dsp.sprite_v_double_twoscale(dst, src_h[0][0], src_h[0][1], ysub[0],
5108 src_h[1][0], src_h[1][1], ysub[1], alpha, width);
5109 } else if (ysub[0]) {
5110 v->vc1dsp.sprite_v_double_onescale(dst, src_h[0][0], src_h[0][1], ysub[0],
5111 src_h[1][0], alpha, width);
5112 } else if (ysub[1]) {
5113 v->vc1dsp.sprite_v_double_onescale(dst, src_h[1][0], src_h[1][1], ysub[1],
5114 src_h[0][0], (1<<16)-1-alpha, width);
5116 v->vc1dsp.sprite_v_double_noscale(dst, src_h[0][0], src_h[1][0], alpha, width);
5122 for (i = 0; i < 2; i++) {
5132 static int vc1_decode_sprites(VC1Context *v, GetBitContext* gb)
5134 MpegEncContext *s = &v->s;
5135 AVCodecContext *avctx = s->avctx;
5138 vc1_parse_sprites(v, gb, &sd);
5140 if (!s->current_picture.f.data[0]) {
5141 av_log(avctx, AV_LOG_ERROR, "Got no sprites\n");
5145 if (v->two_sprites && (!s->last_picture_ptr || !s->last_picture.f.data[0])) {
5146 av_log(avctx, AV_LOG_WARNING, "Need two sprites, only got one\n");
5150 if (v->sprite_output_frame.data[0])
5151 avctx->release_buffer(avctx, &v->sprite_output_frame);
5153 v->sprite_output_frame.buffer_hints = FF_BUFFER_HINTS_VALID;
5154 v->sprite_output_frame.reference = 0;
5155 if (avctx->get_buffer(avctx, &v->sprite_output_frame) < 0) {
5156 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
5160 vc1_draw_sprites(v, &sd);
5165 static void vc1_sprite_flush(AVCodecContext *avctx)
5167 VC1Context *v = avctx->priv_data;
5168 MpegEncContext *s = &v->s;
5169 AVFrame *f = &s->current_picture.f;
5172 /* Windows Media Image codecs have a convergence interval of two keyframes.
5173 Since we can't enforce it, clear to black the missing sprite. This is
5174 wrong but it looks better than doing nothing. */
5177 for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++)
5178 for (i = 0; i < v->sprite_height>>!!plane; i++)
5179 memset(f->data[plane] + i * f->linesize[plane],
5180 plane ? 128 : 0, f->linesize[plane]);
5185 static av_cold int vc1_decode_init_alloc_tables(VC1Context *v)
5187 MpegEncContext *s = &v->s;
5190 /* Allocate mb bitplanes */
5191 v->mv_type_mb_plane = av_malloc (s->mb_stride * s->mb_height);
5192 v->direct_mb_plane = av_malloc (s->mb_stride * s->mb_height);
5193 v->forward_mb_plane = av_malloc (s->mb_stride * s->mb_height);
5194 v->fieldtx_plane = av_mallocz(s->mb_stride * s->mb_height);
5195 v->acpred_plane = av_malloc (s->mb_stride * s->mb_height);
5196 v->over_flags_plane = av_malloc (s->mb_stride * s->mb_height);
5198 v->n_allocated_blks = s->mb_width + 2;
5199 v->block = av_malloc(sizeof(*v->block) * v->n_allocated_blks);
5200 v->cbp_base = av_malloc(sizeof(v->cbp_base[0]) * 2 * s->mb_stride);
5201 v->cbp = v->cbp_base + s->mb_stride;
5202 v->ttblk_base = av_malloc(sizeof(v->ttblk_base[0]) * 2 * s->mb_stride);
5203 v->ttblk = v->ttblk_base + s->mb_stride;
5204 v->is_intra_base = av_mallocz(sizeof(v->is_intra_base[0]) * 2 * s->mb_stride);
5205 v->is_intra = v->is_intra_base + s->mb_stride;
5206 v->luma_mv_base = av_malloc(sizeof(v->luma_mv_base[0]) * 2 * s->mb_stride);
5207 v->luma_mv = v->luma_mv_base + s->mb_stride;
5209 /* allocate block type info in that way so it could be used with s->block_index[] */
5210 v->mb_type_base = av_malloc(s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5211 v->mb_type[0] = v->mb_type_base + s->b8_stride + 1;
5212 v->mb_type[1] = v->mb_type_base + s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride + 1;
5213 v->mb_type[2] = v->mb_type[1] + s->mb_stride * (s->mb_height + 1);
5215 /* allocate memory to store block level MV info */
5216 v->blk_mv_type_base = av_mallocz( s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5217 v->blk_mv_type = v->blk_mv_type_base + s->b8_stride + 1;
5218 v->mv_f_base = av_mallocz(2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5219 v->mv_f[0] = v->mv_f_base + s->b8_stride + 1;
5220 v->mv_f[1] = v->mv_f[0] + (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5221 v->mv_f_last_base = av_mallocz(2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5222 v->mv_f_last[0] = v->mv_f_last_base + s->b8_stride + 1;
5223 v->mv_f_last[1] = v->mv_f_last[0] + (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5224 v->mv_f_next_base = av_mallocz(2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5225 v->mv_f_next[0] = v->mv_f_next_base + s->b8_stride + 1;
5226 v->mv_f_next[1] = v->mv_f_next[0] + (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5228 /* Init coded blocks info */
5229 if (v->profile == PROFILE_ADVANCED) {
5230 // if (alloc_bitplane(&v->over_flags_plane, s->mb_width, s->mb_height) < 0)
5232 // if (alloc_bitplane(&v->ac_pred_plane, s->mb_width, s->mb_height) < 0)
5236 ff_intrax8_common_init(&v->x8,s);
5238 if (s->avctx->codec_id == CODEC_ID_WMV3IMAGE || s->avctx->codec_id == CODEC_ID_VC1IMAGE) {
5239 for (i = 0; i < 4; i++)
5240 if (!(v->sr_rows[i >> 1][i & 1] = av_malloc(v->output_width))) return -1;
5243 if (!v->mv_type_mb_plane || !v->direct_mb_plane || !v->acpred_plane || !v->over_flags_plane ||
5244 !v->block || !v->cbp_base || !v->ttblk_base || !v->is_intra_base || !v->luma_mv_base ||
5251 /** Initialize a VC1/WMV3 decoder
5252 * @todo TODO: Handle VC-1 IDUs (Transport level?)
5253 * @todo TODO: Decypher remaining bits in extra_data
5255 static av_cold int vc1_decode_init(AVCodecContext *avctx)
5257 VC1Context *v = avctx->priv_data;
5258 MpegEncContext *s = &v->s;
5262 /* save the container output size for WMImage */
5263 v->output_width = avctx->width;
5264 v->output_height = avctx->height;
5266 if (!avctx->extradata_size || !avctx->extradata)
5268 if (!(avctx->flags & CODEC_FLAG_GRAY))
5269 avctx->pix_fmt = avctx->get_format(avctx, avctx->codec->pix_fmts);
5271 avctx->pix_fmt = PIX_FMT_GRAY8;
5272 avctx->hwaccel = ff_find_hwaccel(avctx->codec->id, avctx->pix_fmt);
5274 avctx->flags |= CODEC_FLAG_EMU_EDGE;
5275 v->s.flags |= CODEC_FLAG_EMU_EDGE;
5277 if (avctx->idct_algo == FF_IDCT_AUTO) {
5278 avctx->idct_algo = FF_IDCT_WMV2;
5281 if (ff_vc1_init_common(v) < 0)
5283 ff_vc1dsp_init(&v->vc1dsp);
5285 if (avctx->codec_id == CODEC_ID_WMV3 || avctx->codec_id == CODEC_ID_WMV3IMAGE) {
5288 // looks like WMV3 has a sequence header stored in the extradata
5289 // advanced sequence header may be before the first frame
5290 // the last byte of the extradata is a version number, 1 for the
5291 // samples we can decode
5293 init_get_bits(&gb, avctx->extradata, avctx->extradata_size*8);
5295 if (ff_vc1_decode_sequence_header(avctx, v, &gb) < 0)
5298 count = avctx->extradata_size*8 - get_bits_count(&gb);
5300 av_log(avctx, AV_LOG_INFO, "Extra data: %i bits left, value: %X\n",
5301 count, get_bits(&gb, count));
5302 } else if (count < 0) {
5303 av_log(avctx, AV_LOG_INFO, "Read %i bits in overflow\n", -count);
5305 } else { // VC1/WVC1/WVP2
5306 const uint8_t *start = avctx->extradata;
5307 uint8_t *end = avctx->extradata + avctx->extradata_size;
5308 const uint8_t *next;
5309 int size, buf2_size;
5310 uint8_t *buf2 = NULL;
5311 int seq_initialized = 0, ep_initialized = 0;
5313 if (avctx->extradata_size < 16) {
5314 av_log(avctx, AV_LOG_ERROR, "Extradata size too small: %i\n", avctx->extradata_size);
5318 buf2 = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
5319 start = find_next_marker(start, end); // in WVC1 extradata first byte is its size, but can be 0 in mkv
5321 for (; next < end; start = next) {
5322 next = find_next_marker(start + 4, end);
5323 size = next - start - 4;
5326 buf2_size = vc1_unescape_buffer(start + 4, size, buf2);
5327 init_get_bits(&gb, buf2, buf2_size * 8);
5328 switch (AV_RB32(start)) {
5329 case VC1_CODE_SEQHDR:
5330 if (ff_vc1_decode_sequence_header(avctx, v, &gb) < 0) {
5334 seq_initialized = 1;
5336 case VC1_CODE_ENTRYPOINT:
5337 if (ff_vc1_decode_entry_point(avctx, v, &gb) < 0) {
5346 if (!seq_initialized || !ep_initialized) {
5347 av_log(avctx, AV_LOG_ERROR, "Incomplete extradata\n");
5350 v->res_sprite = (avctx->codec_tag == MKTAG('W','V','P','2'));
5353 avctx->profile = v->profile;
5354 if (v->profile == PROFILE_ADVANCED)
5355 avctx->level = v->level;
5357 avctx->has_b_frames = !!avctx->max_b_frames;
5359 s->mb_width = (avctx->coded_width + 15) >> 4;
5360 s->mb_height = (avctx->coded_height + 15) >> 4;
5362 if (v->profile == PROFILE_ADVANCED || v->res_fasttx) {
5363 for (i = 0; i < 64; i++) {
5364 #define transpose(x) ((x >> 3) | ((x & 7) << 3))
5365 v->zz_8x8[0][i] = transpose(ff_wmv1_scantable[0][i]);
5366 v->zz_8x8[1][i] = transpose(ff_wmv1_scantable[1][i]);
5367 v->zz_8x8[2][i] = transpose(ff_wmv1_scantable[2][i]);
5368 v->zz_8x8[3][i] = transpose(ff_wmv1_scantable[3][i]);
5369 v->zzi_8x8[i] = transpose(ff_vc1_adv_interlaced_8x8_zz[i]);
5374 memcpy(v->zz_8x8, ff_wmv1_scantable, 4*64);
5379 if (avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE) {
5380 v->sprite_width = avctx->coded_width;
5381 v->sprite_height = avctx->coded_height;
5383 avctx->coded_width = avctx->width = v->output_width;
5384 avctx->coded_height = avctx->height = v->output_height;
5386 // prevent 16.16 overflows
5387 if (v->sprite_width > 1 << 14 ||
5388 v->sprite_height > 1 << 14 ||
5389 v->output_width > 1 << 14 ||
5390 v->output_height > 1 << 14) return -1;
5395 /** Close a VC1/WMV3 decoder
5396 * @warning Initial try at using MpegEncContext stuff
5398 static av_cold int vc1_decode_end(AVCodecContext *avctx)
5400 VC1Context *v = avctx->priv_data;
5403 if ((avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE)
5404 && v->sprite_output_frame.data[0])
5405 avctx->release_buffer(avctx, &v->sprite_output_frame);
5406 for (i = 0; i < 4; i++)
5407 av_freep(&v->sr_rows[i >> 1][i & 1]);
5408 av_freep(&v->hrd_rate);
5409 av_freep(&v->hrd_buffer);
5410 ff_MPV_common_end(&v->s);
5411 av_freep(&v->mv_type_mb_plane);
5412 av_freep(&v->direct_mb_plane);
5413 av_freep(&v->forward_mb_plane);
5414 av_freep(&v->fieldtx_plane);
5415 av_freep(&v->acpred_plane);
5416 av_freep(&v->over_flags_plane);
5417 av_freep(&v->mb_type_base);
5418 av_freep(&v->blk_mv_type_base);
5419 av_freep(&v->mv_f_base);
5420 av_freep(&v->mv_f_last_base);
5421 av_freep(&v->mv_f_next_base);
5422 av_freep(&v->block);
5423 av_freep(&v->cbp_base);
5424 av_freep(&v->ttblk_base);
5425 av_freep(&v->is_intra_base); // FIXME use v->mb_type[]
5426 av_freep(&v->luma_mv_base);
5427 ff_intrax8_common_end(&v->x8);
5432 /** Decode a VC1/WMV3 frame
5433 * @todo TODO: Handle VC-1 IDUs (Transport level?)
5435 static int vc1_decode_frame(AVCodecContext *avctx, void *data,
5436 int *data_size, AVPacket *avpkt)
5438 const uint8_t *buf = avpkt->data;
5439 int buf_size = avpkt->size, n_slices = 0, i;
5440 VC1Context *v = avctx->priv_data;
5441 MpegEncContext *s = &v->s;
5442 AVFrame *pict = data;
5443 uint8_t *buf2 = NULL;
5444 const uint8_t *buf_start = buf;
5445 int mb_height, n_slices1=-1;
5450 } *slices = NULL, *tmp;
5452 if(s->flags & CODEC_FLAG_LOW_DELAY)
5455 /* no supplementary picture */
5456 if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == VC1_CODE_ENDOFSEQ)) {
5457 /* special case for last picture */
5458 if (s->low_delay == 0 && s->next_picture_ptr) {
5459 *pict = s->next_picture_ptr->f;
5460 s->next_picture_ptr = NULL;
5462 *data_size = sizeof(AVFrame);
5468 if (s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) {
5469 if (v->profile < PROFILE_ADVANCED)
5470 avctx->pix_fmt = PIX_FMT_VDPAU_WMV3;
5472 avctx->pix_fmt = PIX_FMT_VDPAU_VC1;
5475 //for advanced profile we may need to parse and unescape data
5476 if (avctx->codec_id == CODEC_ID_VC1 || avctx->codec_id == CODEC_ID_VC1IMAGE) {
5478 buf2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5480 if (IS_MARKER(AV_RB32(buf))) { /* frame starts with marker and needs to be parsed */
5481 const uint8_t *start, *end, *next;
5485 for (start = buf, end = buf + buf_size; next < end; start = next) {
5486 next = find_next_marker(start + 4, end);
5487 size = next - start - 4;
5488 if (size <= 0) continue;
5489 switch (AV_RB32(start)) {
5490 case VC1_CODE_FRAME:
5491 if (avctx->hwaccel ||
5492 s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
5494 buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
5496 case VC1_CODE_FIELD: {
5498 slices = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5501 slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5502 if (!slices[n_slices].buf)
5504 buf_size3 = vc1_unescape_buffer(start + 4, size,
5505 slices[n_slices].buf);
5506 init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5508 /* assuming that the field marker is at the exact middle,
5509 hope it's correct */
5510 slices[n_slices].mby_start = s->mb_height >> 1;
5511 n_slices1 = n_slices - 1; // index of the last slice of the first field
5515 case VC1_CODE_ENTRYPOINT: /* it should be before frame data */
5516 buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
5517 init_get_bits(&s->gb, buf2, buf_size2 * 8);
5518 ff_vc1_decode_entry_point(avctx, v, &s->gb);
5520 case VC1_CODE_SLICE: {
5522 slices = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5525 slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5526 if (!slices[n_slices].buf)
5528 buf_size3 = vc1_unescape_buffer(start + 4, size,
5529 slices[n_slices].buf);
5530 init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5532 slices[n_slices].mby_start = get_bits(&slices[n_slices].gb, 9);
5538 } else if (v->interlace && ((buf[0] & 0xC0) == 0xC0)) { /* WVC1 interlaced stores both fields divided by marker */
5539 const uint8_t *divider;
5542 divider = find_next_marker(buf, buf + buf_size);
5543 if ((divider == (buf + buf_size)) || AV_RB32(divider) != VC1_CODE_FIELD) {
5544 av_log(avctx, AV_LOG_ERROR, "Error in WVC1 interlaced frame\n");
5546 } else { // found field marker, unescape second field
5547 tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5551 slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5552 if (!slices[n_slices].buf)
5554 buf_size3 = vc1_unescape_buffer(divider + 4, buf + buf_size - divider - 4, slices[n_slices].buf);
5555 init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5557 slices[n_slices].mby_start = s->mb_height >> 1;
5558 n_slices1 = n_slices - 1;
5561 buf_size2 = vc1_unescape_buffer(buf, divider - buf, buf2);
5563 buf_size2 = vc1_unescape_buffer(buf, buf_size, buf2);
5565 init_get_bits(&s->gb, buf2, buf_size2*8);
5567 init_get_bits(&s->gb, buf, buf_size*8);
5569 if (v->res_sprite) {
5570 v->new_sprite = !get_bits1(&s->gb);
5571 v->two_sprites = get_bits1(&s->gb);
5572 /* res_sprite means a Windows Media Image stream, CODEC_ID_*IMAGE means
5573 we're using the sprite compositor. These are intentionally kept separate
5574 so you can get the raw sprites by using the wmv3 decoder for WMVP or
5575 the vc1 one for WVP2 */
5576 if (avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE) {
5577 if (v->new_sprite) {
5578 // switch AVCodecContext parameters to those of the sprites
5579 avctx->width = avctx->coded_width = v->sprite_width;
5580 avctx->height = avctx->coded_height = v->sprite_height;
5587 if (s->context_initialized &&
5588 (s->width != avctx->coded_width ||
5589 s->height != avctx->coded_height)) {
5590 vc1_decode_end(avctx);
5593 if (!s->context_initialized) {
5594 if (ff_msmpeg4_decode_init(avctx) < 0 || vc1_decode_init_alloc_tables(v) < 0)
5597 s->low_delay = !avctx->has_b_frames || v->res_sprite;
5599 if (v->profile == PROFILE_ADVANCED) {
5600 s->h_edge_pos = avctx->coded_width;
5601 s->v_edge_pos = avctx->coded_height;
5605 /* We need to set current_picture_ptr before reading the header,
5606 * otherwise we cannot store anything in there. */
5607 if (s->current_picture_ptr == NULL || s->current_picture_ptr->f.data[0]) {
5608 int i = ff_find_unused_picture(s, 0);
5611 s->current_picture_ptr = &s->picture[i];
5614 // do parse frame header
5615 v->pic_header_flag = 0;
5616 if (v->profile < PROFILE_ADVANCED) {
5617 if (ff_vc1_parse_frame_header(v, &s->gb) == -1) {
5621 if (ff_vc1_parse_frame_header_adv(v, &s->gb) == -1) {
5626 if ((avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE)
5627 && s->pict_type != AV_PICTURE_TYPE_I) {
5628 av_log(v->s.avctx, AV_LOG_ERROR, "Sprite decoder: expected I-frame\n");
5632 // process pulldown flags
5633 s->current_picture_ptr->f.repeat_pict = 0;
5634 // Pulldown flags are only valid when 'broadcast' has been set.
5635 // So ticks_per_frame will be 2
5638 s->current_picture_ptr->f.repeat_pict = 1;
5639 } else if (v->rptfrm) {
5641 s->current_picture_ptr->f.repeat_pict = v->rptfrm * 2;
5644 // for skipping the frame
5645 s->current_picture.f.pict_type = s->pict_type;
5646 s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
5648 /* skip B-frames if we don't have reference frames */
5649 if (s->last_picture_ptr == NULL && (s->pict_type == AV_PICTURE_TYPE_B || s->dropable)) {
5652 if ((avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B) ||
5653 (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I) ||
5654 avctx->skip_frame >= AVDISCARD_ALL) {
5658 if (s->next_p_frame_damaged) {
5659 if (s->pict_type == AV_PICTURE_TYPE_B)
5662 s->next_p_frame_damaged = 0;
5665 if (ff_MPV_frame_start(s, avctx) < 0) {
5669 s->me.qpel_put = s->dsp.put_qpel_pixels_tab;
5670 s->me.qpel_avg = s->dsp.avg_qpel_pixels_tab;
5672 if ((CONFIG_VC1_VDPAU_DECODER)
5673 &&s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
5674 ff_vdpau_vc1_decode_picture(s, buf_start, (buf + buf_size) - buf_start);
5675 else if (avctx->hwaccel) {
5676 if (avctx->hwaccel->start_frame(avctx, buf, buf_size) < 0)
5678 if (avctx->hwaccel->decode_slice(avctx, buf_start, (buf + buf_size) - buf_start) < 0)
5680 if (avctx->hwaccel->end_frame(avctx) < 0)
5683 ff_er_frame_start(s);
5685 v->bits = buf_size * 8;
5686 if (v->field_mode) {
5688 s->current_picture.f.linesize[0] <<= 1;
5689 s->current_picture.f.linesize[1] <<= 1;
5690 s->current_picture.f.linesize[2] <<= 1;
5692 s->uvlinesize <<= 1;
5693 tmp[0] = v->mv_f_last[0];
5694 tmp[1] = v->mv_f_last[1];
5695 v->mv_f_last[0] = v->mv_f_next[0];
5696 v->mv_f_last[1] = v->mv_f_next[1];
5697 v->mv_f_next[0] = v->mv_f[0];
5698 v->mv_f_next[1] = v->mv_f[1];
5699 v->mv_f[0] = tmp[0];
5700 v->mv_f[1] = tmp[1];
5702 mb_height = s->mb_height >> v->field_mode;
5703 for (i = 0; i <= n_slices; i++) {
5704 if (i > 0 && slices[i - 1].mby_start >= mb_height) {
5705 v->second_field = 1;
5706 v->blocks_off = s->mb_width * s->mb_height << 1;
5707 v->mb_off = s->mb_stride * s->mb_height >> 1;
5709 v->second_field = 0;
5714 v->pic_header_flag = 0;
5715 if (v->field_mode && i == n_slices1 + 2)
5716 ff_vc1_parse_frame_header_adv(v, &s->gb);
5717 else if (get_bits1(&s->gb)) {
5718 v->pic_header_flag = 1;
5719 ff_vc1_parse_frame_header_adv(v, &s->gb);
5722 s->start_mb_y = (i == 0) ? 0 : FFMAX(0, slices[i-1].mby_start % mb_height);
5723 if (!v->field_mode || v->second_field)
5724 s->end_mb_y = (i == n_slices ) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
5726 s->end_mb_y = (i <= n_slices1 + 1) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
5727 vc1_decode_blocks(v);
5729 s->gb = slices[i].gb;
5731 if (v->field_mode) {
5732 v->second_field = 0;
5733 if (s->pict_type == AV_PICTURE_TYPE_B) {
5734 memcpy(v->mv_f_base, v->mv_f_next_base,
5735 2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5737 s->current_picture.f.linesize[0] >>= 1;
5738 s->current_picture.f.linesize[1] >>= 1;
5739 s->current_picture.f.linesize[2] >>= 1;
5741 s->uvlinesize >>= 1;
5743 //av_log(s->avctx, AV_LOG_INFO, "Consumed %i/%i bits\n", get_bits_count(&s->gb), s->gb.size_in_bits);
5744 // if (get_bits_count(&s->gb) > buf_size * 8)
5746 if(s->error_occurred && s->pict_type == AV_PICTURE_TYPE_B)
5751 ff_MPV_frame_end(s);
5753 if (avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE) {
5755 avctx->width = avctx->coded_width = v->output_width;
5756 avctx->height = avctx->coded_height = v->output_height;
5757 if (avctx->skip_frame >= AVDISCARD_NONREF)
5759 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
5760 if (vc1_decode_sprites(v, &s->gb))
5763 *pict = v->sprite_output_frame;
5764 *data_size = sizeof(AVFrame);
5766 if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
5767 *pict = s->current_picture_ptr->f;
5768 } else if (s->last_picture_ptr != NULL) {
5769 *pict = s->last_picture_ptr->f;
5771 if (s->last_picture_ptr || s->low_delay) {
5772 *data_size = sizeof(AVFrame);
5773 ff_print_debug_info(s, pict);
5779 for (i = 0; i < n_slices; i++)
5780 av_free(slices[i].buf);
5786 for (i = 0; i < n_slices; i++)
5787 av_free(slices[i].buf);
5793 static const AVProfile profiles[] = {
5794 { FF_PROFILE_VC1_SIMPLE, "Simple" },
5795 { FF_PROFILE_VC1_MAIN, "Main" },
5796 { FF_PROFILE_VC1_COMPLEX, "Complex" },
5797 { FF_PROFILE_VC1_ADVANCED, "Advanced" },
5798 { FF_PROFILE_UNKNOWN },
5801 AVCodec ff_vc1_decoder = {
5803 .type = AVMEDIA_TYPE_VIDEO,
5805 .priv_data_size = sizeof(VC1Context),
5806 .init = vc1_decode_init,
5807 .close = vc1_decode_end,
5808 .decode = vc1_decode_frame,
5809 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
5810 .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1"),
5811 .pix_fmts = ff_hwaccel_pixfmt_list_420,
5812 .profiles = NULL_IF_CONFIG_SMALL(profiles)
5815 #if CONFIG_WMV3_DECODER
5816 AVCodec ff_wmv3_decoder = {
5818 .type = AVMEDIA_TYPE_VIDEO,
5819 .id = CODEC_ID_WMV3,
5820 .priv_data_size = sizeof(VC1Context),
5821 .init = vc1_decode_init,
5822 .close = vc1_decode_end,
5823 .decode = vc1_decode_frame,
5824 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
5825 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9"),
5826 .pix_fmts = ff_hwaccel_pixfmt_list_420,
5827 .profiles = NULL_IF_CONFIG_SMALL(profiles)
5831 #if CONFIG_WMV3_VDPAU_DECODER
5832 AVCodec ff_wmv3_vdpau_decoder = {
5833 .name = "wmv3_vdpau",
5834 .type = AVMEDIA_TYPE_VIDEO,
5835 .id = CODEC_ID_WMV3,
5836 .priv_data_size = sizeof(VC1Context),
5837 .init = vc1_decode_init,
5838 .close = vc1_decode_end,
5839 .decode = vc1_decode_frame,
5840 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU,
5841 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 VDPAU"),
5842 .pix_fmts = (const enum PixelFormat[]){PIX_FMT_VDPAU_WMV3, PIX_FMT_NONE},
5843 .profiles = NULL_IF_CONFIG_SMALL(profiles)
5847 #if CONFIG_VC1_VDPAU_DECODER
5848 AVCodec ff_vc1_vdpau_decoder = {
5849 .name = "vc1_vdpau",
5850 .type = AVMEDIA_TYPE_VIDEO,
5852 .priv_data_size = sizeof(VC1Context),
5853 .init = vc1_decode_init,
5854 .close = vc1_decode_end,
5855 .decode = vc1_decode_frame,
5856 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU,
5857 .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1 VDPAU"),
5858 .pix_fmts = (const enum PixelFormat[]){PIX_FMT_VDPAU_VC1, PIX_FMT_NONE},
5859 .profiles = NULL_IF_CONFIG_SMALL(profiles)
5863 #if CONFIG_WMV3IMAGE_DECODER
5864 AVCodec ff_wmv3image_decoder = {
5865 .name = "wmv3image",
5866 .type = AVMEDIA_TYPE_VIDEO,
5867 .id = CODEC_ID_WMV3IMAGE,
5868 .priv_data_size = sizeof(VC1Context),
5869 .init = vc1_decode_init,
5870 .close = vc1_decode_end,
5871 .decode = vc1_decode_frame,
5872 .capabilities = CODEC_CAP_DR1,
5873 .flush = vc1_sprite_flush,
5874 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image"),
5875 .pix_fmts = ff_pixfmt_list_420
5879 #if CONFIG_VC1IMAGE_DECODER
5880 AVCodec ff_vc1image_decoder = {
5882 .type = AVMEDIA_TYPE_VIDEO,
5883 .id = CODEC_ID_VC1IMAGE,
5884 .priv_data_size = sizeof(VC1Context),
5885 .init = vc1_decode_init,
5886 .close = vc1_decode_end,
5887 .decode = vc1_decode_frame,
5888 .capabilities = CODEC_CAP_DR1,
5889 .flush = vc1_sprite_flush,
5890 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image v2"),
5891 .pix_fmts = ff_pixfmt_list_420