2 * VC-1 and WMV3 decoder
3 * Copyright (c) 2011 Mashiat Sarker Shakkhar
4 * Copyright (c) 2006-2007 Konstantin Shishkov
5 * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
7 * This file is part of FFmpeg.
9 * FFmpeg is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * FFmpeg is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with FFmpeg; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * VC-1 and WMV3 decoder
31 #include "error_resilience.h"
32 #include "mpegvideo.h"
34 #include "h264chroma.h"
37 #include "vc1acdata.h"
38 #include "msmpeg4data.h"
41 #include "vdpau_internal.h"
42 #include "libavutil/avassert.h"
47 #define MB_INTRA_VLC_BITS 9
51 // offset tables for interlaced picture MVDATA decoding
52 static const int offset_table1[9] = { 0, 1, 2, 4, 8, 16, 32, 64, 128 };
53 static const int offset_table2[9] = { 0, 1, 3, 7, 15, 31, 63, 127, 255 };
55 /***********************************************************************/
57 * @name VC-1 Bitplane decoding
75 /** @} */ //imode defines
77 static void init_block_index(VC1Context *v)
79 MpegEncContext *s = &v->s;
80 ff_init_block_index(s);
81 if (v->field_mode && !(v->second_field ^ v->tff)) {
82 s->dest[0] += s->current_picture_ptr->f.linesize[0];
83 s->dest[1] += s->current_picture_ptr->f.linesize[1];
84 s->dest[2] += s->current_picture_ptr->f.linesize[2];
88 /** @} */ //Bitplane group
90 static void vc1_put_signed_blocks_clamped(VC1Context *v)
92 MpegEncContext *s = &v->s;
93 int topleft_mb_pos, top_mb_pos;
94 int stride_y, fieldtx = 0;
97 /* The put pixels loop is always one MB row behind the decoding loop,
98 * because we can only put pixels when overlap filtering is done, and
99 * for filtering of the bottom edge of a MB, we need the next MB row
101 * Within the row, the put pixels loop is also one MB col behind the
102 * decoding loop. The reason for this is again, because for filtering
103 * of the right MB edge, we need the next MB present. */
104 if (!s->first_slice_line) {
106 topleft_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x - 1;
107 if (v->fcm == ILACE_FRAME)
108 fieldtx = v->fieldtx_plane[topleft_mb_pos];
109 stride_y = s->linesize << fieldtx;
110 v_dist = (16 - fieldtx) >> (fieldtx == 0);
111 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][0],
112 s->dest[0] - 16 * s->linesize - 16,
114 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][1],
115 s->dest[0] - 16 * s->linesize - 8,
117 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][2],
118 s->dest[0] - v_dist * s->linesize - 16,
120 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][3],
121 s->dest[0] - v_dist * s->linesize - 8,
123 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][4],
124 s->dest[1] - 8 * s->uvlinesize - 8,
126 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][5],
127 s->dest[2] - 8 * s->uvlinesize - 8,
130 if (s->mb_x == s->mb_width - 1) {
131 top_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x;
132 if (v->fcm == ILACE_FRAME)
133 fieldtx = v->fieldtx_plane[top_mb_pos];
134 stride_y = s->linesize << fieldtx;
135 v_dist = fieldtx ? 15 : 8;
136 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][0],
137 s->dest[0] - 16 * s->linesize,
139 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][1],
140 s->dest[0] - 16 * s->linesize + 8,
142 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][2],
143 s->dest[0] - v_dist * s->linesize,
145 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][3],
146 s->dest[0] - v_dist * s->linesize + 8,
148 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][4],
149 s->dest[1] - 8 * s->uvlinesize,
151 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][5],
152 s->dest[2] - 8 * s->uvlinesize,
157 #define inc_blk_idx(idx) do { \
159 if (idx >= v->n_allocated_blks) \
163 inc_blk_idx(v->topleft_blk_idx);
164 inc_blk_idx(v->top_blk_idx);
165 inc_blk_idx(v->left_blk_idx);
166 inc_blk_idx(v->cur_blk_idx);
169 static void vc1_loop_filter_iblk(VC1Context *v, int pq)
171 MpegEncContext *s = &v->s;
173 if (!s->first_slice_line) {
174 v->vc1dsp.vc1_v_loop_filter16(s->dest[0], s->linesize, pq);
176 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
177 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
178 for (j = 0; j < 2; j++) {
179 v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1], s->uvlinesize, pq);
181 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
184 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] + 8 * s->linesize, s->linesize, pq);
186 if (s->mb_y == s->end_mb_y - 1) {
188 v->vc1dsp.vc1_h_loop_filter16(s->dest[0], s->linesize, pq);
189 v->vc1dsp.vc1_h_loop_filter8(s->dest[1], s->uvlinesize, pq);
190 v->vc1dsp.vc1_h_loop_filter8(s->dest[2], s->uvlinesize, pq);
192 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] + 8, s->linesize, pq);
196 static void vc1_loop_filter_iblk_delayed(VC1Context *v, int pq)
198 MpegEncContext *s = &v->s;
201 /* The loopfilter runs 1 row and 1 column behind the overlap filter, which
202 * means it runs two rows/cols behind the decoding loop. */
203 if (!s->first_slice_line) {
205 if (s->mb_y >= s->start_mb_y + 2) {
206 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
209 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 16, s->linesize, pq);
210 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 8, s->linesize, pq);
211 for (j = 0; j < 2; j++) {
212 v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
214 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize - 8, s->uvlinesize, pq);
218 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize - 16, s->linesize, pq);
221 if (s->mb_x == s->mb_width - 1) {
222 if (s->mb_y >= s->start_mb_y + 2) {
223 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
226 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize, s->linesize, pq);
227 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize + 8, s->linesize, pq);
228 for (j = 0; j < 2; j++) {
229 v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
231 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize, s->uvlinesize, pq);
235 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize, s->linesize, pq);
238 if (s->mb_y == s->end_mb_y) {
241 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
242 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 8, s->linesize, pq);
244 for (j = 0; j < 2; j++) {
245 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
250 if (s->mb_x == s->mb_width - 1) {
252 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
253 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
255 for (j = 0; j < 2; j++) {
256 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
264 static void vc1_smooth_overlap_filter_iblk(VC1Context *v)
266 MpegEncContext *s = &v->s;
269 if (v->condover == CONDOVER_NONE)
272 mb_pos = s->mb_x + s->mb_y * s->mb_stride;
274 /* Within a MB, the horizontal overlap always runs before the vertical.
275 * To accomplish that, we run the H on left and internal borders of the
276 * currently decoded MB. Then, we wait for the next overlap iteration
277 * to do H overlap on the right edge of this MB, before moving over and
278 * running the V overlap. Therefore, the V overlap makes us trail by one
279 * MB col and the H overlap filter makes us trail by one MB row. This
280 * is reflected in the time at which we run the put_pixels loop. */
281 if (v->condover == CONDOVER_ALL || v->pq >= 9 || v->over_flags_plane[mb_pos]) {
282 if (s->mb_x && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
283 v->over_flags_plane[mb_pos - 1])) {
284 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][1],
285 v->block[v->cur_blk_idx][0]);
286 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][3],
287 v->block[v->cur_blk_idx][2]);
288 if (!(s->flags & CODEC_FLAG_GRAY)) {
289 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][4],
290 v->block[v->cur_blk_idx][4]);
291 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][5],
292 v->block[v->cur_blk_idx][5]);
295 v->vc1dsp.vc1_h_s_overlap(v->block[v->cur_blk_idx][0],
296 v->block[v->cur_blk_idx][1]);
297 v->vc1dsp.vc1_h_s_overlap(v->block[v->cur_blk_idx][2],
298 v->block[v->cur_blk_idx][3]);
300 if (s->mb_x == s->mb_width - 1) {
301 if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
302 v->over_flags_plane[mb_pos - s->mb_stride])) {
303 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][2],
304 v->block[v->cur_blk_idx][0]);
305 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][3],
306 v->block[v->cur_blk_idx][1]);
307 if (!(s->flags & CODEC_FLAG_GRAY)) {
308 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][4],
309 v->block[v->cur_blk_idx][4]);
310 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][5],
311 v->block[v->cur_blk_idx][5]);
314 v->vc1dsp.vc1_v_s_overlap(v->block[v->cur_blk_idx][0],
315 v->block[v->cur_blk_idx][2]);
316 v->vc1dsp.vc1_v_s_overlap(v->block[v->cur_blk_idx][1],
317 v->block[v->cur_blk_idx][3]);
320 if (s->mb_x && (v->condover == CONDOVER_ALL || v->over_flags_plane[mb_pos - 1])) {
321 if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
322 v->over_flags_plane[mb_pos - s->mb_stride - 1])) {
323 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][2],
324 v->block[v->left_blk_idx][0]);
325 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][3],
326 v->block[v->left_blk_idx][1]);
327 if (!(s->flags & CODEC_FLAG_GRAY)) {
328 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][4],
329 v->block[v->left_blk_idx][4]);
330 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][5],
331 v->block[v->left_blk_idx][5]);
334 v->vc1dsp.vc1_v_s_overlap(v->block[v->left_blk_idx][0],
335 v->block[v->left_blk_idx][2]);
336 v->vc1dsp.vc1_v_s_overlap(v->block[v->left_blk_idx][1],
337 v->block[v->left_blk_idx][3]);
341 /** Do motion compensation over 1 macroblock
342 * Mostly adapted hpel_motion and qpel_motion from mpegvideo.c
344 static void vc1_mc_1mv(VC1Context *v, int dir)
346 MpegEncContext *s = &v->s;
347 H264ChromaContext *h264chroma = &v->h264chroma;
348 uint8_t *srcY, *srcU, *srcV;
349 int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
350 int v_edge_pos = s->v_edge_pos >> v->field_mode;
352 uint8_t (*luty)[256], (*lutuv)[256];
355 if ((!v->field_mode ||
356 (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
357 !v->s.last_picture.f.data[0])
360 mx = s->mv[dir][0][0];
361 my = s->mv[dir][0][1];
363 // store motion vectors for further use in B frames
364 if (s->pict_type == AV_PICTURE_TYPE_P) {
365 for (i = 0; i < 4; i++) {
366 s->current_picture.motion_val[1][s->block_index[i] + v->blocks_off][0] = mx;
367 s->current_picture.motion_val[1][s->block_index[i] + v->blocks_off][1] = my;
371 uvmx = (mx + ((mx & 3) == 3)) >> 1;
372 uvmy = (my + ((my & 3) == 3)) >> 1;
373 v->luma_mv[s->mb_x][0] = uvmx;
374 v->luma_mv[s->mb_x][1] = uvmy;
377 v->cur_field_type != v->ref_field_type[dir]) {
378 my = my - 2 + 4 * v->cur_field_type;
379 uvmy = uvmy - 2 + 4 * v->cur_field_type;
382 // fastuvmc shall be ignored for interlaced frame picture
383 if (v->fastuvmc && (v->fcm != ILACE_FRAME)) {
384 uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
385 uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
388 if (v->field_mode && (v->cur_field_type != v->ref_field_type[dir]) && v->second_field) {
389 srcY = s->current_picture.f.data[0];
390 srcU = s->current_picture.f.data[1];
391 srcV = s->current_picture.f.data[2];
393 lutuv = v->curr_lutuv;
394 use_ic = v->curr_use_ic;
396 srcY = s->last_picture.f.data[0];
397 srcU = s->last_picture.f.data[1];
398 srcV = s->last_picture.f.data[2];
400 lutuv = v->last_lutuv;
401 use_ic = v->last_use_ic;
404 srcY = s->next_picture.f.data[0];
405 srcU = s->next_picture.f.data[1];
406 srcV = s->next_picture.f.data[2];
408 lutuv = v->next_lutuv;
409 use_ic = v->next_use_ic;
412 if (!srcY || !srcU) {
413 av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
417 src_x = s->mb_x * 16 + (mx >> 2);
418 src_y = s->mb_y * 16 + (my >> 2);
419 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
420 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
422 if (v->profile != PROFILE_ADVANCED) {
423 src_x = av_clip( src_x, -16, s->mb_width * 16);
424 src_y = av_clip( src_y, -16, s->mb_height * 16);
425 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
426 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
428 src_x = av_clip( src_x, -17, s->avctx->coded_width);
429 src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
430 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
431 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
434 srcY += src_y * s->linesize + src_x;
435 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
436 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
438 if (v->field_mode && v->ref_field_type[dir]) {
439 srcY += s->current_picture_ptr->f.linesize[0];
440 srcU += s->current_picture_ptr->f.linesize[1];
441 srcV += s->current_picture_ptr->f.linesize[2];
444 /* for grayscale we should not try to read from unknown area */
445 if (s->flags & CODEC_FLAG_GRAY) {
446 srcU = s->edge_emu_buffer + 18 * s->linesize;
447 srcV = s->edge_emu_buffer + 18 * s->linesize;
450 if (v->rangeredfrm || use_ic
451 || s->h_edge_pos < 22 || v_edge_pos < 22
452 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel * 3
453 || (unsigned)(src_y - 1) > v_edge_pos - (my&3) - 16 - 3) {
454 uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
456 srcY -= s->mspel * (1 + s->linesize);
457 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcY,
458 s->linesize, s->linesize,
459 17 + s->mspel * 2, 17 + s->mspel * 2,
460 src_x - s->mspel, src_y - s->mspel,
461 s->h_edge_pos, v_edge_pos);
462 srcY = s->edge_emu_buffer;
463 s->vdsp.emulated_edge_mc(uvbuf, srcU,
464 s->uvlinesize, s->uvlinesize,
467 s->h_edge_pos >> 1, v_edge_pos >> 1);
468 s->vdsp.emulated_edge_mc(uvbuf + 16, srcV,
469 s->uvlinesize, s->uvlinesize,
472 s->h_edge_pos >> 1, v_edge_pos >> 1);
475 /* if we deal with range reduction we need to scale source blocks */
476 if (v->rangeredfrm) {
481 for (j = 0; j < 17 + s->mspel * 2; j++) {
482 for (i = 0; i < 17 + s->mspel * 2; i++)
483 src[i] = ((src[i] - 128) >> 1) + 128;
488 for (j = 0; j < 9; j++) {
489 for (i = 0; i < 9; i++) {
490 src[i] = ((src[i] - 128) >> 1) + 128;
491 src2[i] = ((src2[i] - 128) >> 1) + 128;
493 src += s->uvlinesize;
494 src2 += s->uvlinesize;
497 /* if we deal with intensity compensation we need to scale source blocks */
503 for (j = 0; j < 17 + s->mspel * 2; j++) {
504 int f = v->field_mode ? v->ref_field_type[dir] : ((j + src_y - s->mspel) & 1) ;
505 for (i = 0; i < 17 + s->mspel * 2; i++)
506 src[i] = luty[f][src[i]];
511 for (j = 0; j < 9; j++) {
512 int f = v->field_mode ? v->ref_field_type[dir] : ((j + uvsrc_y) & 1);
513 for (i = 0; i < 9; i++) {
514 src[i] = lutuv[f][src[i]];
515 src2[i] = lutuv[f][src2[i]];
517 src += s->uvlinesize;
518 src2 += s->uvlinesize;
521 srcY += s->mspel * (1 + s->linesize);
525 dxy = ((my & 3) << 2) | (mx & 3);
526 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] , srcY , s->linesize, v->rnd);
527 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8, srcY + 8, s->linesize, v->rnd);
528 srcY += s->linesize * 8;
529 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize , srcY , s->linesize, v->rnd);
530 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
531 } else { // hpel mc - always used for luma
532 dxy = (my & 2) | ((mx & 2) >> 1);
534 s->hdsp.put_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
536 s->hdsp.put_no_rnd_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
539 if (s->flags & CODEC_FLAG_GRAY) return;
540 /* Chroma MC always uses qpel bilinear */
541 uvmx = (uvmx & 3) << 1;
542 uvmy = (uvmy & 3) << 1;
544 h264chroma->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
545 h264chroma->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
547 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
548 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
552 static inline int median4(int a, int b, int c, int d)
555 if (c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2;
556 else return (FFMIN(b, c) + FFMAX(a, d)) / 2;
558 if (c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2;
559 else return (FFMIN(a, c) + FFMAX(b, d)) / 2;
563 /** Do motion compensation for 4-MV macroblock - luminance block
565 static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir, int avg)
567 MpegEncContext *s = &v->s;
569 int dxy, mx, my, src_x, src_y;
571 int fieldmv = (v->fcm == ILACE_FRAME) ? v->blk_mv_type[s->block_index[n]] : 0;
572 int v_edge_pos = s->v_edge_pos >> v->field_mode;
573 uint8_t (*luty)[256];
576 if ((!v->field_mode ||
577 (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
578 !v->s.last_picture.f.data[0])
581 mx = s->mv[dir][n][0];
582 my = s->mv[dir][n][1];
585 if (v->field_mode && (v->cur_field_type != v->ref_field_type[dir]) && v->second_field) {
586 srcY = s->current_picture.f.data[0];
588 use_ic = v->curr_use_ic;
590 srcY = s->last_picture.f.data[0];
592 use_ic = v->last_use_ic;
595 srcY = s->next_picture.f.data[0];
597 use_ic = v->next_use_ic;
601 av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
606 if (v->cur_field_type != v->ref_field_type[dir])
607 my = my - 2 + 4 * v->cur_field_type;
610 if (s->pict_type == AV_PICTURE_TYPE_P && n == 3 && v->field_mode) {
611 int same_count = 0, opp_count = 0, k;
612 int chosen_mv[2][4][2], f;
614 for (k = 0; k < 4; k++) {
615 f = v->mv_f[0][s->block_index[k] + v->blocks_off];
616 chosen_mv[f][f ? opp_count : same_count][0] = s->mv[0][k][0];
617 chosen_mv[f][f ? opp_count : same_count][1] = s->mv[0][k][1];
621 f = opp_count > same_count;
622 switch (f ? opp_count : same_count) {
624 tx = median4(chosen_mv[f][0][0], chosen_mv[f][1][0],
625 chosen_mv[f][2][0], chosen_mv[f][3][0]);
626 ty = median4(chosen_mv[f][0][1], chosen_mv[f][1][1],
627 chosen_mv[f][2][1], chosen_mv[f][3][1]);
630 tx = mid_pred(chosen_mv[f][0][0], chosen_mv[f][1][0], chosen_mv[f][2][0]);
631 ty = mid_pred(chosen_mv[f][0][1], chosen_mv[f][1][1], chosen_mv[f][2][1]);
634 tx = (chosen_mv[f][0][0] + chosen_mv[f][1][0]) / 2;
635 ty = (chosen_mv[f][0][1] + chosen_mv[f][1][1]) / 2;
640 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
641 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
642 for (k = 0; k < 4; k++)
643 v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
646 if (v->fcm == ILACE_FRAME) { // not sure if needed for other types of picture
648 int width = s->avctx->coded_width;
649 int height = s->avctx->coded_height >> 1;
650 if (s->pict_type == AV_PICTURE_TYPE_P) {
651 s->current_picture.motion_val[1][s->block_index[n] + v->blocks_off][0] = mx;
652 s->current_picture.motion_val[1][s->block_index[n] + v->blocks_off][1] = my;
654 qx = (s->mb_x * 16) + (mx >> 2);
655 qy = (s->mb_y * 8) + (my >> 3);
660 mx -= 4 * (qx - width);
663 else if (qy > height + 1)
664 my -= 8 * (qy - height - 1);
667 if ((v->fcm == ILACE_FRAME) && fieldmv)
668 off = ((n > 1) ? s->linesize : 0) + (n & 1) * 8;
670 off = s->linesize * 4 * (n & 2) + (n & 1) * 8;
672 src_x = s->mb_x * 16 + (n & 1) * 8 + (mx >> 2);
674 src_y = s->mb_y * 16 + (n & 2) * 4 + (my >> 2);
676 src_y = s->mb_y * 16 + ((n > 1) ? 1 : 0) + (my >> 2);
678 if (v->profile != PROFILE_ADVANCED) {
679 src_x = av_clip(src_x, -16, s->mb_width * 16);
680 src_y = av_clip(src_y, -16, s->mb_height * 16);
682 src_x = av_clip(src_x, -17, s->avctx->coded_width);
683 if (v->fcm == ILACE_FRAME) {
685 src_y = av_clip(src_y, -17, s->avctx->coded_height + 1);
687 src_y = av_clip(src_y, -18, s->avctx->coded_height);
689 src_y = av_clip(src_y, -18, s->avctx->coded_height + 1);
693 srcY += src_y * s->linesize + src_x;
694 if (v->field_mode && v->ref_field_type[dir])
695 srcY += s->current_picture_ptr->f.linesize[0];
697 if (fieldmv && !(src_y & 1))
699 if (fieldmv && (src_y & 1) && src_y < 4)
701 if (v->rangeredfrm || use_ic
702 || s->h_edge_pos < 13 || v_edge_pos < 23
703 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx & 3) - 8 - s->mspel * 2
704 || (unsigned)(src_y - (s->mspel << fieldmv)) > v_edge_pos - (my & 3) - ((8 + s->mspel * 2) << fieldmv)) {
705 srcY -= s->mspel * (1 + (s->linesize << fieldmv));
706 /* check emulate edge stride and offset */
707 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcY,
708 s->linesize, s->linesize,
709 9 + s->mspel * 2, (9 + s->mspel * 2) << fieldmv,
710 src_x - s->mspel, src_y - (s->mspel << fieldmv),
711 s->h_edge_pos, v_edge_pos);
712 srcY = s->edge_emu_buffer;
713 /* if we deal with range reduction we need to scale source blocks */
714 if (v->rangeredfrm) {
719 for (j = 0; j < 9 + s->mspel * 2; j++) {
720 for (i = 0; i < 9 + s->mspel * 2; i++)
721 src[i] = ((src[i] - 128) >> 1) + 128;
722 src += s->linesize << fieldmv;
725 /* if we deal with intensity compensation we need to scale source blocks */
731 for (j = 0; j < 9 + s->mspel * 2; j++) {
732 int f = v->field_mode ? v->ref_field_type[dir] : (((j<<fieldmv)+src_y - (s->mspel << fieldmv)) & 1);
733 for (i = 0; i < 9 + s->mspel * 2; i++)
734 src[i] = luty[f][src[i]];
735 src += s->linesize << fieldmv;
738 srcY += s->mspel * (1 + (s->linesize << fieldmv));
742 dxy = ((my & 3) << 2) | (mx & 3);
744 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize << fieldmv, v->rnd);
746 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize << fieldmv, v->rnd);
747 } else { // hpel mc - always used for luma
748 dxy = (my & 2) | ((mx & 2) >> 1);
750 s->hdsp.put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
752 s->hdsp.put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
756 static av_always_inline int get_chroma_mv(int *mvx, int *mvy, int *a, int flag, int *tx, int *ty)
759 static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
761 idx = ((a[3] != flag) << 3)
762 | ((a[2] != flag) << 2)
763 | ((a[1] != flag) << 1)
766 *tx = median4(mvx[0], mvx[1], mvx[2], mvx[3]);
767 *ty = median4(mvy[0], mvy[1], mvy[2], mvy[3]);
769 } else if (count[idx] == 1) {
772 *tx = mid_pred(mvx[1], mvx[2], mvx[3]);
773 *ty = mid_pred(mvy[1], mvy[2], mvy[3]);
776 *tx = mid_pred(mvx[0], mvx[2], mvx[3]);
777 *ty = mid_pred(mvy[0], mvy[2], mvy[3]);
780 *tx = mid_pred(mvx[0], mvx[1], mvx[3]);
781 *ty = mid_pred(mvy[0], mvy[1], mvy[3]);
784 *tx = mid_pred(mvx[0], mvx[1], mvx[2]);
785 *ty = mid_pred(mvy[0], mvy[1], mvy[2]);
788 } else if (count[idx] == 2) {
790 for (i = 0; i < 3; i++)
795 for (i = t1 + 1; i < 4; i++)
800 *tx = (mvx[t1] + mvx[t2]) / 2;
801 *ty = (mvy[t1] + mvy[t2]) / 2;
809 /** Do motion compensation for 4-MV macroblock - both chroma blocks
811 static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
813 MpegEncContext *s = &v->s;
814 H264ChromaContext *h264chroma = &v->h264chroma;
815 uint8_t *srcU, *srcV;
816 int uvmx, uvmy, uvsrc_x, uvsrc_y;
817 int k, tx = 0, ty = 0;
818 int mvx[4], mvy[4], intra[4], mv_f[4];
820 int chroma_ref_type = v->cur_field_type;
821 int v_edge_pos = s->v_edge_pos >> v->field_mode;
822 uint8_t (*lutuv)[256];
825 if (!v->field_mode && !v->s.last_picture.f.data[0])
827 if (s->flags & CODEC_FLAG_GRAY)
830 for (k = 0; k < 4; k++) {
831 mvx[k] = s->mv[dir][k][0];
832 mvy[k] = s->mv[dir][k][1];
833 intra[k] = v->mb_type[0][s->block_index[k]];
835 mv_f[k] = v->mv_f[dir][s->block_index[k] + v->blocks_off];
838 /* calculate chroma MV vector from four luma MVs */
839 if (!v->field_mode || (v->field_mode && !v->numref)) {
840 valid_count = get_chroma_mv(mvx, mvy, intra, 0, &tx, &ty);
841 chroma_ref_type = v->reffield;
843 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
844 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
845 v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
846 return; //no need to do MC for intra blocks
850 if (mv_f[0] + mv_f[1] + mv_f[2] + mv_f[3] > 2)
852 valid_count = get_chroma_mv(mvx, mvy, mv_f, dominant, &tx, &ty);
854 chroma_ref_type = !v->cur_field_type;
856 if (v->field_mode && chroma_ref_type == 1 && v->cur_field_type == 1 && !v->s.last_picture.f.data[0])
858 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
859 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
860 uvmx = (tx + ((tx & 3) == 3)) >> 1;
861 uvmy = (ty + ((ty & 3) == 3)) >> 1;
863 v->luma_mv[s->mb_x][0] = uvmx;
864 v->luma_mv[s->mb_x][1] = uvmy;
867 uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
868 uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
870 // Field conversion bias
871 if (v->cur_field_type != chroma_ref_type)
872 uvmy += 2 - 4 * chroma_ref_type;
874 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
875 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
877 if (v->profile != PROFILE_ADVANCED) {
878 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
879 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
881 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
882 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
886 if (v->field_mode && (v->cur_field_type != chroma_ref_type) && v->second_field) {
887 srcU = s->current_picture.f.data[1];
888 srcV = s->current_picture.f.data[2];
889 lutuv = v->curr_lutuv;
890 use_ic = v->curr_use_ic;
892 srcU = s->last_picture.f.data[1];
893 srcV = s->last_picture.f.data[2];
894 lutuv = v->last_lutuv;
895 use_ic = v->last_use_ic;
898 srcU = s->next_picture.f.data[1];
899 srcV = s->next_picture.f.data[2];
900 lutuv = v->next_lutuv;
901 use_ic = v->next_use_ic;
905 av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
909 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
910 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
913 if (chroma_ref_type) {
914 srcU += s->current_picture_ptr->f.linesize[1];
915 srcV += s->current_picture_ptr->f.linesize[2];
919 if (v->rangeredfrm || use_ic
920 || s->h_edge_pos < 18 || v_edge_pos < 18
921 || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
922 || (unsigned)uvsrc_y > (v_edge_pos >> 1) - 9) {
923 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcU,
924 s->uvlinesize, s->uvlinesize,
925 8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
926 s->h_edge_pos >> 1, v_edge_pos >> 1);
927 s->vdsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV,
928 s->uvlinesize, s->uvlinesize,
929 8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
930 s->h_edge_pos >> 1, v_edge_pos >> 1);
931 srcU = s->edge_emu_buffer;
932 srcV = s->edge_emu_buffer + 16;
934 /* if we deal with range reduction we need to scale source blocks */
935 if (v->rangeredfrm) {
941 for (j = 0; j < 9; j++) {
942 for (i = 0; i < 9; i++) {
943 src[i] = ((src[i] - 128) >> 1) + 128;
944 src2[i] = ((src2[i] - 128) >> 1) + 128;
946 src += s->uvlinesize;
947 src2 += s->uvlinesize;
950 /* if we deal with intensity compensation we need to scale source blocks */
957 for (j = 0; j < 9; j++) {
958 int f = v->field_mode ? chroma_ref_type : ((j + uvsrc_y) & 1);
959 for (i = 0; i < 9; i++) {
960 src[i] = lutuv[f][src[i]];
961 src2[i] = lutuv[f][src2[i]];
963 src += s->uvlinesize;
964 src2 += s->uvlinesize;
969 /* Chroma MC always uses qpel bilinear */
970 uvmx = (uvmx & 3) << 1;
971 uvmy = (uvmy & 3) << 1;
973 h264chroma->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
974 h264chroma->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
976 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
977 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
981 /** Do motion compensation for 4-MV interlaced frame chroma macroblock (both U and V)
983 static void vc1_mc_4mv_chroma4(VC1Context *v, int dir, int dir2, int avg)
985 MpegEncContext *s = &v->s;
986 H264ChromaContext *h264chroma = &v->h264chroma;
987 uint8_t *srcU, *srcV;
988 int uvsrc_x, uvsrc_y;
989 int uvmx_field[4], uvmy_field[4];
991 int fieldmv = v->blk_mv_type[s->block_index[0]];
992 static const int s_rndtblfield[16] = { 0, 0, 1, 2, 4, 4, 5, 6, 2, 2, 3, 8, 6, 6, 7, 12 };
993 int v_dist = fieldmv ? 1 : 4; // vertical offset for lower sub-blocks
994 int v_edge_pos = s->v_edge_pos >> 1;
996 uint8_t (*lutuv)[256];
998 if (s->flags & CODEC_FLAG_GRAY)
1001 for (i = 0; i < 4; i++) {
1002 int d = i < 2 ? dir: dir2;
1003 tx = s->mv[d][i][0];
1004 uvmx_field[i] = (tx + ((tx & 3) == 3)) >> 1;
1005 ty = s->mv[d][i][1];
1007 uvmy_field[i] = (ty >> 4) * 8 + s_rndtblfield[ty & 0xF];
1009 uvmy_field[i] = (ty + ((ty & 3) == 3)) >> 1;
1012 for (i = 0; i < 4; i++) {
1013 off = (i & 1) * 4 + ((i & 2) ? v_dist * s->uvlinesize : 0);
1014 uvsrc_x = s->mb_x * 8 + (i & 1) * 4 + (uvmx_field[i] >> 2);
1015 uvsrc_y = s->mb_y * 8 + ((i & 2) ? v_dist : 0) + (uvmy_field[i] >> 2);
1016 // FIXME: implement proper pull-back (see vc1cropmv.c, vc1CROPMV_ChromaPullBack())
1017 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
1018 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
1019 if (i < 2 ? dir : dir2) {
1020 srcU = s->next_picture.f.data[1];
1021 srcV = s->next_picture.f.data[2];
1022 lutuv = v->next_lutuv;
1023 use_ic = v->next_use_ic;
1025 srcU = s->last_picture.f.data[1];
1026 srcV = s->last_picture.f.data[2];
1027 lutuv = v->last_lutuv;
1028 use_ic = v->last_use_ic;
1032 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
1033 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
1034 uvmx_field[i] = (uvmx_field[i] & 3) << 1;
1035 uvmy_field[i] = (uvmy_field[i] & 3) << 1;
1037 if (fieldmv && !(uvsrc_y & 1))
1038 v_edge_pos = (s->v_edge_pos >> 1) - 1;
1040 if (fieldmv && (uvsrc_y & 1) && uvsrc_y < 2)
1043 || s->h_edge_pos < 10 || v_edge_pos < (5 << fieldmv)
1044 || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 5
1045 || (unsigned)uvsrc_y > v_edge_pos - (5 << fieldmv)) {
1046 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcU,
1047 s->uvlinesize, s->uvlinesize,
1048 5, (5 << fieldmv), uvsrc_x, uvsrc_y,
1049 s->h_edge_pos >> 1, v_edge_pos);
1050 s->vdsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV,
1051 s->uvlinesize, s->uvlinesize,
1052 5, (5 << fieldmv), uvsrc_x, uvsrc_y,
1053 s->h_edge_pos >> 1, v_edge_pos);
1054 srcU = s->edge_emu_buffer;
1055 srcV = s->edge_emu_buffer + 16;
1057 /* if we deal with intensity compensation we need to scale source blocks */
1060 uint8_t *src, *src2;
1064 for (j = 0; j < 5; j++) {
1065 int f = (uvsrc_y + (j << fieldmv)) & 1;
1066 for (i = 0; i < 5; i++) {
1067 src[i] = lutuv[f][src[i]];
1068 src2[i] = lutuv[f][src2[i]];
1070 src += s->uvlinesize << fieldmv;
1071 src2 += s->uvlinesize << fieldmv;
1077 h264chroma->avg_h264_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1078 h264chroma->avg_h264_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1080 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1081 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1085 h264chroma->put_h264_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1086 h264chroma->put_h264_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1088 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1089 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1095 /***********************************************************************/
1097 * @name VC-1 Block-level functions
1098 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
1104 * @brief Get macroblock-level quantizer scale
1106 #define GET_MQUANT() \
1107 if (v->dquantfrm) { \
1109 if (v->dqprofile == DQPROFILE_ALL_MBS) { \
1110 if (v->dqbilevel) { \
1111 mquant = (get_bits1(gb)) ? v->altpq : v->pq; \
1113 mqdiff = get_bits(gb, 3); \
1115 mquant = v->pq + mqdiff; \
1117 mquant = get_bits(gb, 5); \
1120 if (v->dqprofile == DQPROFILE_SINGLE_EDGE) \
1121 edges = 1 << v->dqsbedge; \
1122 else if (v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
1123 edges = (3 << v->dqsbedge) % 15; \
1124 else if (v->dqprofile == DQPROFILE_FOUR_EDGES) \
1126 if ((edges&1) && !s->mb_x) \
1127 mquant = v->altpq; \
1128 if ((edges&2) && s->first_slice_line) \
1129 mquant = v->altpq; \
1130 if ((edges&4) && s->mb_x == (s->mb_width - 1)) \
1131 mquant = v->altpq; \
1132 if ((edges&8) && s->mb_y == (s->mb_height - 1)) \
1133 mquant = v->altpq; \
1134 if (!mquant || mquant > 31) { \
1135 av_log(v->s.avctx, AV_LOG_ERROR, \
1136 "Overriding invalid mquant %d\n", mquant); \
1142 * @def GET_MVDATA(_dmv_x, _dmv_y)
1143 * @brief Get MV differentials
1144 * @see MVDATA decoding from 8.3.5.2, p(1)20
1145 * @param _dmv_x Horizontal differential for decoded MV
1146 * @param _dmv_y Vertical differential for decoded MV
1148 #define GET_MVDATA(_dmv_x, _dmv_y) \
1149 index = 1 + get_vlc2(gb, ff_vc1_mv_diff_vlc[s->mv_table_index].table, \
1150 VC1_MV_DIFF_VLC_BITS, 2); \
1152 mb_has_coeffs = 1; \
1155 mb_has_coeffs = 0; \
1158 _dmv_x = _dmv_y = 0; \
1159 } else if (index == 35) { \
1160 _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
1161 _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
1162 } else if (index == 36) { \
1167 index1 = index % 6; \
1168 if (!s->quarter_sample && index1 == 5) val = 1; \
1170 if (size_table[index1] - val > 0) \
1171 val = get_bits(gb, size_table[index1] - val); \
1173 sign = 0 - (val&1); \
1174 _dmv_x = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1176 index1 = index / 6; \
1177 if (!s->quarter_sample && index1 == 5) val = 1; \
1179 if (size_table[index1] - val > 0) \
1180 val = get_bits(gb, size_table[index1] - val); \
1182 sign = 0 - (val & 1); \
1183 _dmv_y = (sign ^ ((val >> 1) + offset_table[index1])) - sign; \
1186 static av_always_inline void get_mvdata_interlaced(VC1Context *v, int *dmv_x,
1187 int *dmv_y, int *pred_flag)
1190 int extend_x = 0, extend_y = 0;
1191 GetBitContext *gb = &v->s.gb;
1194 const int* offs_tab;
1197 bits = VC1_2REF_MVDATA_VLC_BITS;
1200 bits = VC1_1REF_MVDATA_VLC_BITS;
1203 switch (v->dmvrange) {
1211 extend_x = extend_y = 1;
1214 index = get_vlc2(gb, v->imv_vlc->table, bits, 3);
1216 *dmv_x = get_bits(gb, v->k_x);
1217 *dmv_y = get_bits(gb, v->k_y);
1220 *pred_flag = *dmv_y & 1;
1221 *dmv_y = (*dmv_y + *pred_flag) >> 1;
1223 *dmv_y = (*dmv_y + (*dmv_y & 1)) >> 1;
1228 av_assert0(index < esc);
1230 offs_tab = offset_table2;
1232 offs_tab = offset_table1;
1233 index1 = (index + 1) % 9;
1235 val = get_bits(gb, index1 + extend_x);
1236 sign = 0 -(val & 1);
1237 *dmv_x = (sign ^ ((val >> 1) + offs_tab[index1])) - sign;
1241 offs_tab = offset_table2;
1243 offs_tab = offset_table1;
1244 index1 = (index + 1) / 9;
1245 if (index1 > v->numref) {
1246 val = get_bits(gb, (index1 + (extend_y << v->numref)) >> v->numref);
1247 sign = 0 - (val & 1);
1248 *dmv_y = (sign ^ ((val >> 1) + offs_tab[index1 >> v->numref])) - sign;
1251 if (v->numref && pred_flag)
1252 *pred_flag = index1 & 1;
1256 static av_always_inline int scaleforsame_x(VC1Context *v, int n /* MV */, int dir)
1258 int scaledvalue, refdist;
1259 int scalesame1, scalesame2;
1260 int scalezone1_x, zone1offset_x;
1261 int table_index = dir ^ v->second_field;
1263 if (v->s.pict_type != AV_PICTURE_TYPE_B)
1264 refdist = v->refdist;
1266 refdist = dir ? v->brfd : v->frfd;
1269 scalesame1 = ff_vc1_field_mvpred_scales[table_index][1][refdist];
1270 scalesame2 = ff_vc1_field_mvpred_scales[table_index][2][refdist];
1271 scalezone1_x = ff_vc1_field_mvpred_scales[table_index][3][refdist];
1272 zone1offset_x = ff_vc1_field_mvpred_scales[table_index][5][refdist];
1277 if (FFABS(n) < scalezone1_x)
1278 scaledvalue = (n * scalesame1) >> 8;
1281 scaledvalue = ((n * scalesame2) >> 8) - zone1offset_x;
1283 scaledvalue = ((n * scalesame2) >> 8) + zone1offset_x;
1286 return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1289 static av_always_inline int scaleforsame_y(VC1Context *v, int i, int n /* MV */, int dir)
1291 int scaledvalue, refdist;
1292 int scalesame1, scalesame2;
1293 int scalezone1_y, zone1offset_y;
1294 int table_index = dir ^ v->second_field;
1296 if (v->s.pict_type != AV_PICTURE_TYPE_B)
1297 refdist = v->refdist;
1299 refdist = dir ? v->brfd : v->frfd;
1302 scalesame1 = ff_vc1_field_mvpred_scales[table_index][1][refdist];
1303 scalesame2 = ff_vc1_field_mvpred_scales[table_index][2][refdist];
1304 scalezone1_y = ff_vc1_field_mvpred_scales[table_index][4][refdist];
1305 zone1offset_y = ff_vc1_field_mvpred_scales[table_index][6][refdist];
1310 if (FFABS(n) < scalezone1_y)
1311 scaledvalue = (n * scalesame1) >> 8;
1314 scaledvalue = ((n * scalesame2) >> 8) - zone1offset_y;
1316 scaledvalue = ((n * scalesame2) >> 8) + zone1offset_y;
1320 if (v->cur_field_type && !v->ref_field_type[dir])
1321 return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1323 return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1326 static av_always_inline int scaleforopp_x(VC1Context *v, int n /* MV */)
1328 int scalezone1_x, zone1offset_x;
1329 int scaleopp1, scaleopp2, brfd;
1332 brfd = FFMIN(v->brfd, 3);
1333 scalezone1_x = ff_vc1_b_field_mvpred_scales[3][brfd];
1334 zone1offset_x = ff_vc1_b_field_mvpred_scales[5][brfd];
1335 scaleopp1 = ff_vc1_b_field_mvpred_scales[1][brfd];
1336 scaleopp2 = ff_vc1_b_field_mvpred_scales[2][brfd];
1341 if (FFABS(n) < scalezone1_x)
1342 scaledvalue = (n * scaleopp1) >> 8;
1345 scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_x;
1347 scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_x;
1350 return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1353 static av_always_inline int scaleforopp_y(VC1Context *v, int n /* MV */, int dir)
1355 int scalezone1_y, zone1offset_y;
1356 int scaleopp1, scaleopp2, brfd;
1359 brfd = FFMIN(v->brfd, 3);
1360 scalezone1_y = ff_vc1_b_field_mvpred_scales[4][brfd];
1361 zone1offset_y = ff_vc1_b_field_mvpred_scales[6][brfd];
1362 scaleopp1 = ff_vc1_b_field_mvpred_scales[1][brfd];
1363 scaleopp2 = ff_vc1_b_field_mvpred_scales[2][brfd];
1368 if (FFABS(n) < scalezone1_y)
1369 scaledvalue = (n * scaleopp1) >> 8;
1372 scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_y;
1374 scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_y;
1377 if (v->cur_field_type && !v->ref_field_type[dir]) {
1378 return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1380 return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1384 static av_always_inline int scaleforsame(VC1Context *v, int i, int n /* MV */,
1387 int brfd, scalesame;
1388 int hpel = 1 - v->s.quarter_sample;
1391 if (v->s.pict_type != AV_PICTURE_TYPE_B || v->second_field || !dir) {
1393 n = scaleforsame_y(v, i, n, dir) << hpel;
1395 n = scaleforsame_x(v, n, dir) << hpel;
1398 brfd = FFMIN(v->brfd, 3);
1399 scalesame = ff_vc1_b_field_mvpred_scales[0][brfd];
1401 n = (n * scalesame >> 8) << hpel;
1405 static av_always_inline int scaleforopp(VC1Context *v, int n /* MV */,
1408 int refdist, scaleopp;
1409 int hpel = 1 - v->s.quarter_sample;
1412 if (v->s.pict_type == AV_PICTURE_TYPE_B && !v->second_field && dir == 1) {
1414 n = scaleforopp_y(v, n, dir) << hpel;
1416 n = scaleforopp_x(v, n) << hpel;
1419 if (v->s.pict_type != AV_PICTURE_TYPE_B)
1420 refdist = FFMIN(v->refdist, 3);
1422 refdist = dir ? v->brfd : v->frfd;
1423 scaleopp = ff_vc1_field_mvpred_scales[dir ^ v->second_field][0][refdist];
1425 n = (n * scaleopp >> 8) << hpel;
1429 /** Predict and set motion vector
1431 static inline void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
1432 int mv1, int r_x, int r_y, uint8_t* is_intra,
1433 int pred_flag, int dir)
1435 MpegEncContext *s = &v->s;
1436 int xy, wrap, off = 0;
1440 int mixedmv_pic, num_samefield = 0, num_oppfield = 0;
1441 int opposite, a_f, b_f, c_f;
1442 int16_t field_predA[2];
1443 int16_t field_predB[2];
1444 int16_t field_predC[2];
1445 int a_valid, b_valid, c_valid;
1446 int hybridmv_thresh, y_bias = 0;
1448 if (v->mv_mode == MV_PMODE_MIXED_MV ||
1449 ((v->mv_mode == MV_PMODE_INTENSITY_COMP) && (v->mv_mode2 == MV_PMODE_MIXED_MV)))
1453 /* scale MV difference to be quad-pel */
1454 dmv_x <<= 1 - s->quarter_sample;
1455 dmv_y <<= 1 - s->quarter_sample;
1457 wrap = s->b8_stride;
1458 xy = s->block_index[n];
1461 s->mv[0][n][0] = s->current_picture.motion_val[0][xy + v->blocks_off][0] = 0;
1462 s->mv[0][n][1] = s->current_picture.motion_val[0][xy + v->blocks_off][1] = 0;
1463 s->current_picture.motion_val[1][xy + v->blocks_off][0] = 0;
1464 s->current_picture.motion_val[1][xy + v->blocks_off][1] = 0;
1465 if (mv1) { /* duplicate motion data for 1-MV block */
1466 s->current_picture.motion_val[0][xy + 1 + v->blocks_off][0] = 0;
1467 s->current_picture.motion_val[0][xy + 1 + v->blocks_off][1] = 0;
1468 s->current_picture.motion_val[0][xy + wrap + v->blocks_off][0] = 0;
1469 s->current_picture.motion_val[0][xy + wrap + v->blocks_off][1] = 0;
1470 s->current_picture.motion_val[0][xy + wrap + 1 + v->blocks_off][0] = 0;
1471 s->current_picture.motion_val[0][xy + wrap + 1 + v->blocks_off][1] = 0;
1472 v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1473 s->current_picture.motion_val[1][xy + 1 + v->blocks_off][0] = 0;
1474 s->current_picture.motion_val[1][xy + 1 + v->blocks_off][1] = 0;
1475 s->current_picture.motion_val[1][xy + wrap][0] = 0;
1476 s->current_picture.motion_val[1][xy + wrap + v->blocks_off][1] = 0;
1477 s->current_picture.motion_val[1][xy + wrap + 1 + v->blocks_off][0] = 0;
1478 s->current_picture.motion_val[1][xy + wrap + 1 + v->blocks_off][1] = 0;
1483 C = s->current_picture.motion_val[dir][xy - 1 + v->blocks_off];
1484 A = s->current_picture.motion_val[dir][xy - wrap + v->blocks_off];
1486 if (v->field_mode && mixedmv_pic)
1487 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
1489 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2;
1491 //in 4-MV mode different blocks have different B predictor position
1494 off = (s->mb_x > 0) ? -1 : 1;
1497 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 1;
1506 B = s->current_picture.motion_val[dir][xy - wrap + off + v->blocks_off];
1508 a_valid = !s->first_slice_line || (n == 2 || n == 3);
1509 b_valid = a_valid && (s->mb_width > 1);
1510 c_valid = s->mb_x || (n == 1 || n == 3);
1511 if (v->field_mode) {
1512 a_valid = a_valid && !is_intra[xy - wrap];
1513 b_valid = b_valid && !is_intra[xy - wrap + off];
1514 c_valid = c_valid && !is_intra[xy - 1];
1518 a_f = v->mv_f[dir][xy - wrap + v->blocks_off];
1519 num_oppfield += a_f;
1520 num_samefield += 1 - a_f;
1521 field_predA[0] = A[0];
1522 field_predA[1] = A[1];
1524 field_predA[0] = field_predA[1] = 0;
1528 b_f = v->mv_f[dir][xy - wrap + off + v->blocks_off];
1529 num_oppfield += b_f;
1530 num_samefield += 1 - b_f;
1531 field_predB[0] = B[0];
1532 field_predB[1] = B[1];
1534 field_predB[0] = field_predB[1] = 0;
1538 c_f = v->mv_f[dir][xy - 1 + v->blocks_off];
1539 num_oppfield += c_f;
1540 num_samefield += 1 - c_f;
1541 field_predC[0] = C[0];
1542 field_predC[1] = C[1];
1544 field_predC[0] = field_predC[1] = 0;
1548 if (v->field_mode) {
1550 // REFFIELD determines if the last field or the second-last field is
1551 // to be used as reference
1552 opposite = 1 - v->reffield;
1554 if (num_samefield <= num_oppfield)
1555 opposite = 1 - pred_flag;
1557 opposite = pred_flag;
1562 if (a_valid && !a_f) {
1563 field_predA[0] = scaleforopp(v, field_predA[0], 0, dir);
1564 field_predA[1] = scaleforopp(v, field_predA[1], 1, dir);
1566 if (b_valid && !b_f) {
1567 field_predB[0] = scaleforopp(v, field_predB[0], 0, dir);
1568 field_predB[1] = scaleforopp(v, field_predB[1], 1, dir);
1570 if (c_valid && !c_f) {
1571 field_predC[0] = scaleforopp(v, field_predC[0], 0, dir);
1572 field_predC[1] = scaleforopp(v, field_predC[1], 1, dir);
1574 v->mv_f[dir][xy + v->blocks_off] = 1;
1575 v->ref_field_type[dir] = !v->cur_field_type;
1577 if (a_valid && a_f) {
1578 field_predA[0] = scaleforsame(v, n, field_predA[0], 0, dir);
1579 field_predA[1] = scaleforsame(v, n, field_predA[1], 1, dir);
1581 if (b_valid && b_f) {
1582 field_predB[0] = scaleforsame(v, n, field_predB[0], 0, dir);
1583 field_predB[1] = scaleforsame(v, n, field_predB[1], 1, dir);
1585 if (c_valid && c_f) {
1586 field_predC[0] = scaleforsame(v, n, field_predC[0], 0, dir);
1587 field_predC[1] = scaleforsame(v, n, field_predC[1], 1, dir);
1589 v->mv_f[dir][xy + v->blocks_off] = 0;
1590 v->ref_field_type[dir] = v->cur_field_type;
1594 px = field_predA[0];
1595 py = field_predA[1];
1596 } else if (c_valid) {
1597 px = field_predC[0];
1598 py = field_predC[1];
1599 } else if (b_valid) {
1600 px = field_predB[0];
1601 py = field_predB[1];
1607 if (num_samefield + num_oppfield > 1) {
1608 px = mid_pred(field_predA[0], field_predB[0], field_predC[0]);
1609 py = mid_pred(field_predA[1], field_predB[1], field_predC[1]);
1612 /* Pullback MV as specified in 8.3.5.3.4 */
1613 if (!v->field_mode) {
1615 qx = (s->mb_x << 6) + ((n == 1 || n == 3) ? 32 : 0);
1616 qy = (s->mb_y << 6) + ((n == 2 || n == 3) ? 32 : 0);
1617 X = (s->mb_width << 6) - 4;
1618 Y = (s->mb_height << 6) - 4;
1620 if (qx + px < -60) px = -60 - qx;
1621 if (qy + py < -60) py = -60 - qy;
1623 if (qx + px < -28) px = -28 - qx;
1624 if (qy + py < -28) py = -28 - qy;
1626 if (qx + px > X) px = X - qx;
1627 if (qy + py > Y) py = Y - qy;
1630 if (!v->field_mode || s->pict_type != AV_PICTURE_TYPE_B) {
1631 /* Calculate hybrid prediction as specified in 8.3.5.3.5 (also 10.3.5.4.3.5) */
1632 hybridmv_thresh = 32;
1633 if (a_valid && c_valid) {
1634 if (is_intra[xy - wrap])
1635 sum = FFABS(px) + FFABS(py);
1637 sum = FFABS(px - field_predA[0]) + FFABS(py - field_predA[1]);
1638 if (sum > hybridmv_thresh) {
1639 if (get_bits1(&s->gb)) { // read HYBRIDPRED bit
1640 px = field_predA[0];
1641 py = field_predA[1];
1643 px = field_predC[0];
1644 py = field_predC[1];
1647 if (is_intra[xy - 1])
1648 sum = FFABS(px) + FFABS(py);
1650 sum = FFABS(px - field_predC[0]) + FFABS(py - field_predC[1]);
1651 if (sum > hybridmv_thresh) {
1652 if (get_bits1(&s->gb)) {
1653 px = field_predA[0];
1654 py = field_predA[1];
1656 px = field_predC[0];
1657 py = field_predC[1];
1664 if (v->field_mode && v->numref)
1666 if (v->field_mode && v->cur_field_type && v->ref_field_type[dir] == 0)
1668 /* store MV using signed modulus of MV range defined in 4.11 */
1669 s->mv[dir][n][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1670 s->mv[dir][n][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1] = ((py + dmv_y + r_y - y_bias) & ((r_y << 1) - 1)) - r_y + y_bias;
1671 if (mv1) { /* duplicate motion data for 1-MV block */
1672 s->current_picture.motion_val[dir][xy + 1 + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
1673 s->current_picture.motion_val[dir][xy + 1 + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
1674 s->current_picture.motion_val[dir][xy + wrap + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
1675 s->current_picture.motion_val[dir][xy + wrap + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
1676 s->current_picture.motion_val[dir][xy + wrap + 1 + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
1677 s->current_picture.motion_val[dir][xy + wrap + 1 + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
1678 v->mv_f[dir][xy + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1679 v->mv_f[dir][xy + wrap + v->blocks_off] = v->mv_f[dir][xy + wrap + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1683 /** Predict and set motion vector for interlaced frame picture MBs
1685 static inline void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
1686 int mvn, int r_x, int r_y, uint8_t* is_intra, int dir)
1688 MpegEncContext *s = &v->s;
1689 int xy, wrap, off = 0;
1690 int A[2], B[2], C[2];
1692 int a_valid = 0, b_valid = 0, c_valid = 0;
1693 int field_a, field_b, field_c; // 0: same, 1: opposit
1694 int total_valid, num_samefield, num_oppfield;
1695 int pos_c, pos_b, n_adj;
1697 wrap = s->b8_stride;
1698 xy = s->block_index[n];
1701 s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = 0;
1702 s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = 0;
1703 s->current_picture.motion_val[1][xy][0] = 0;
1704 s->current_picture.motion_val[1][xy][1] = 0;
1705 if (mvn == 1) { /* duplicate motion data for 1-MV block */
1706 s->current_picture.motion_val[0][xy + 1][0] = 0;
1707 s->current_picture.motion_val[0][xy + 1][1] = 0;
1708 s->current_picture.motion_val[0][xy + wrap][0] = 0;
1709 s->current_picture.motion_val[0][xy + wrap][1] = 0;
1710 s->current_picture.motion_val[0][xy + wrap + 1][0] = 0;
1711 s->current_picture.motion_val[0][xy + wrap + 1][1] = 0;
1712 v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1713 s->current_picture.motion_val[1][xy + 1][0] = 0;
1714 s->current_picture.motion_val[1][xy + 1][1] = 0;
1715 s->current_picture.motion_val[1][xy + wrap][0] = 0;
1716 s->current_picture.motion_val[1][xy + wrap][1] = 0;
1717 s->current_picture.motion_val[1][xy + wrap + 1][0] = 0;
1718 s->current_picture.motion_val[1][xy + wrap + 1][1] = 0;
1723 off = ((n == 0) || (n == 1)) ? 1 : -1;
1725 if (s->mb_x || (n == 1) || (n == 3)) {
1726 if ((v->blk_mv_type[xy]) // current block (MB) has a field MV
1727 || (!v->blk_mv_type[xy] && !v->blk_mv_type[xy - 1])) { // or both have frame MV
1728 A[0] = s->current_picture.motion_val[dir][xy - 1][0];
1729 A[1] = s->current_picture.motion_val[dir][xy - 1][1];
1731 } else { // current block has frame mv and cand. has field MV (so average)
1732 A[0] = (s->current_picture.motion_val[dir][xy - 1][0]
1733 + s->current_picture.motion_val[dir][xy - 1 + off * wrap][0] + 1) >> 1;
1734 A[1] = (s->current_picture.motion_val[dir][xy - 1][1]
1735 + s->current_picture.motion_val[dir][xy - 1 + off * wrap][1] + 1) >> 1;
1738 if (!(n & 1) && v->is_intra[s->mb_x - 1]) {
1744 /* Predict B and C */
1745 B[0] = B[1] = C[0] = C[1] = 0;
1746 if (n == 0 || n == 1 || v->blk_mv_type[xy]) {
1747 if (!s->first_slice_line) {
1748 if (!v->is_intra[s->mb_x - s->mb_stride]) {
1751 pos_b = s->block_index[n_adj] - 2 * wrap;
1752 if (v->blk_mv_type[pos_b] && v->blk_mv_type[xy]) {
1753 n_adj = (n & 2) | (n & 1);
1755 B[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap][0];
1756 B[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap][1];
1757 if (v->blk_mv_type[pos_b] && !v->blk_mv_type[xy]) {
1758 B[0] = (B[0] + s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap][0] + 1) >> 1;
1759 B[1] = (B[1] + s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap][1] + 1) >> 1;
1762 if (s->mb_width > 1) {
1763 if (!v->is_intra[s->mb_x - s->mb_stride + 1]) {
1766 pos_c = s->block_index[2] - 2 * wrap + 2;
1767 if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1770 C[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap + 2][0];
1771 C[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap + 2][1];
1772 if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1773 C[0] = (1 + C[0] + (s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap + 2][0])) >> 1;
1774 C[1] = (1 + C[1] + (s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap + 2][1])) >> 1;
1776 if (s->mb_x == s->mb_width - 1) {
1777 if (!v->is_intra[s->mb_x - s->mb_stride - 1]) {
1780 pos_c = s->block_index[3] - 2 * wrap - 2;
1781 if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1784 C[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap - 2][0];
1785 C[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap - 2][1];
1786 if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1787 C[0] = (1 + C[0] + s->current_picture.motion_val[dir][s->block_index[1] - 2 * wrap - 2][0]) >> 1;
1788 C[1] = (1 + C[1] + s->current_picture.motion_val[dir][s->block_index[1] - 2 * wrap - 2][1]) >> 1;
1797 pos_b = s->block_index[1];
1799 B[0] = s->current_picture.motion_val[dir][pos_b][0];
1800 B[1] = s->current_picture.motion_val[dir][pos_b][1];
1801 pos_c = s->block_index[0];
1803 C[0] = s->current_picture.motion_val[dir][pos_c][0];
1804 C[1] = s->current_picture.motion_val[dir][pos_c][1];
1807 total_valid = a_valid + b_valid + c_valid;
1808 // check if predictor A is out of bounds
1809 if (!s->mb_x && !(n == 1 || n == 3)) {
1812 // check if predictor B is out of bounds
1813 if ((s->first_slice_line && v->blk_mv_type[xy]) || (s->first_slice_line && !(n & 2))) {
1814 B[0] = B[1] = C[0] = C[1] = 0;
1816 if (!v->blk_mv_type[xy]) {
1817 if (s->mb_width == 1) {
1821 if (total_valid >= 2) {
1822 px = mid_pred(A[0], B[0], C[0]);
1823 py = mid_pred(A[1], B[1], C[1]);
1824 } else if (total_valid) {
1825 if (a_valid) { px = A[0]; py = A[1]; }
1826 else if (b_valid) { px = B[0]; py = B[1]; }
1827 else { px = C[0]; py = C[1]; }
1832 field_a = (A[1] & 4) ? 1 : 0;
1836 field_b = (B[1] & 4) ? 1 : 0;
1840 field_c = (C[1] & 4) ? 1 : 0;
1844 num_oppfield = field_a + field_b + field_c;
1845 num_samefield = total_valid - num_oppfield;
1846 if (total_valid == 3) {
1847 if ((num_samefield == 3) || (num_oppfield == 3)) {
1848 px = mid_pred(A[0], B[0], C[0]);
1849 py = mid_pred(A[1], B[1], C[1]);
1850 } else if (num_samefield >= num_oppfield) {
1851 /* take one MV from same field set depending on priority
1852 the check for B may not be necessary */
1853 px = !field_a ? A[0] : B[0];
1854 py = !field_a ? A[1] : B[1];
1856 px = field_a ? A[0] : B[0];
1857 py = field_a ? A[1] : B[1];
1859 } else if (total_valid == 2) {
1860 if (num_samefield >= num_oppfield) {
1861 if (!field_a && a_valid) {
1864 } else if (!field_b && b_valid) {
1867 } else /*if (c_valid)*/ {
1868 av_assert1(c_valid);
1871 } /*else px = py = 0;*/
1873 if (field_a && a_valid) {
1876 } else /*if (field_b && b_valid)*/ {
1877 av_assert1(field_b && b_valid);
1880 } /*else if (c_valid) {
1885 } else if (total_valid == 1) {
1886 px = (a_valid) ? A[0] : ((b_valid) ? B[0] : C[0]);
1887 py = (a_valid) ? A[1] : ((b_valid) ? B[1] : C[1]);
1891 /* store MV using signed modulus of MV range defined in 4.11 */
1892 s->mv[dir][n][0] = s->current_picture.motion_val[dir][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1893 s->mv[dir][n][1] = s->current_picture.motion_val[dir][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
1894 if (mvn == 1) { /* duplicate motion data for 1-MV block */
1895 s->current_picture.motion_val[dir][xy + 1 ][0] = s->current_picture.motion_val[dir][xy][0];
1896 s->current_picture.motion_val[dir][xy + 1 ][1] = s->current_picture.motion_val[dir][xy][1];
1897 s->current_picture.motion_val[dir][xy + wrap ][0] = s->current_picture.motion_val[dir][xy][0];
1898 s->current_picture.motion_val[dir][xy + wrap ][1] = s->current_picture.motion_val[dir][xy][1];
1899 s->current_picture.motion_val[dir][xy + wrap + 1][0] = s->current_picture.motion_val[dir][xy][0];
1900 s->current_picture.motion_val[dir][xy + wrap + 1][1] = s->current_picture.motion_val[dir][xy][1];
1901 } else if (mvn == 2) { /* duplicate motion data for 2-Field MV block */
1902 s->current_picture.motion_val[dir][xy + 1][0] = s->current_picture.motion_val[dir][xy][0];
1903 s->current_picture.motion_val[dir][xy + 1][1] = s->current_picture.motion_val[dir][xy][1];
1904 s->mv[dir][n + 1][0] = s->mv[dir][n][0];
1905 s->mv[dir][n + 1][1] = s->mv[dir][n][1];
1909 /** Motion compensation for direct or interpolated blocks in B-frames
1911 static void vc1_interp_mc(VC1Context *v)
1913 MpegEncContext *s = &v->s;
1914 H264ChromaContext *h264chroma = &v->h264chroma;
1915 uint8_t *srcY, *srcU, *srcV;
1916 int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
1918 int v_edge_pos = s->v_edge_pos >> v->field_mode;
1919 int use_ic = v->next_use_ic;
1921 if (!v->field_mode && !v->s.next_picture.f.data[0])
1924 mx = s->mv[1][0][0];
1925 my = s->mv[1][0][1];
1926 uvmx = (mx + ((mx & 3) == 3)) >> 1;
1927 uvmy = (my + ((my & 3) == 3)) >> 1;
1928 if (v->field_mode) {
1929 if (v->cur_field_type != v->ref_field_type[1])
1930 my = my - 2 + 4 * v->cur_field_type;
1931 uvmy = uvmy - 2 + 4 * v->cur_field_type;
1934 uvmx = uvmx + ((uvmx < 0) ? -(uvmx & 1) : (uvmx & 1));
1935 uvmy = uvmy + ((uvmy < 0) ? -(uvmy & 1) : (uvmy & 1));
1937 srcY = s->next_picture.f.data[0];
1938 srcU = s->next_picture.f.data[1];
1939 srcV = s->next_picture.f.data[2];
1941 src_x = s->mb_x * 16 + (mx >> 2);
1942 src_y = s->mb_y * 16 + (my >> 2);
1943 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
1944 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
1946 if (v->profile != PROFILE_ADVANCED) {
1947 src_x = av_clip( src_x, -16, s->mb_width * 16);
1948 src_y = av_clip( src_y, -16, s->mb_height * 16);
1949 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
1950 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
1952 src_x = av_clip( src_x, -17, s->avctx->coded_width);
1953 src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
1954 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
1955 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
1958 srcY += src_y * s->linesize + src_x;
1959 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
1960 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
1962 if (v->field_mode && v->ref_field_type[1]) {
1963 srcY += s->current_picture_ptr->f.linesize[0];
1964 srcU += s->current_picture_ptr->f.linesize[1];
1965 srcV += s->current_picture_ptr->f.linesize[2];
1968 /* for grayscale we should not try to read from unknown area */
1969 if (s->flags & CODEC_FLAG_GRAY) {
1970 srcU = s->edge_emu_buffer + 18 * s->linesize;
1971 srcV = s->edge_emu_buffer + 18 * s->linesize;
1974 if (v->rangeredfrm || s->h_edge_pos < 22 || v_edge_pos < 22 || use_ic
1975 || (unsigned)(src_x - 1) > s->h_edge_pos - (mx & 3) - 16 - 3
1976 || (unsigned)(src_y - 1) > v_edge_pos - (my & 3) - 16 - 3) {
1977 uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
1979 srcY -= s->mspel * (1 + s->linesize);
1980 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcY,
1981 s->linesize, s->linesize,
1982 17 + s->mspel * 2, 17 + s->mspel * 2,
1983 src_x - s->mspel, src_y - s->mspel,
1984 s->h_edge_pos, v_edge_pos);
1985 srcY = s->edge_emu_buffer;
1986 s->vdsp.emulated_edge_mc(uvbuf, srcU,
1987 s->uvlinesize, s->uvlinesize,
1990 s->h_edge_pos >> 1, v_edge_pos >> 1);
1991 s->vdsp.emulated_edge_mc(uvbuf + 16, srcV,
1992 s->uvlinesize, s->uvlinesize,
1995 s->h_edge_pos >> 1, v_edge_pos >> 1);
1998 /* if we deal with range reduction we need to scale source blocks */
1999 if (v->rangeredfrm) {
2001 uint8_t *src, *src2;
2004 for (j = 0; j < 17 + s->mspel * 2; j++) {
2005 for (i = 0; i < 17 + s->mspel * 2; i++)
2006 src[i] = ((src[i] - 128) >> 1) + 128;
2011 for (j = 0; j < 9; j++) {
2012 for (i = 0; i < 9; i++) {
2013 src[i] = ((src[i] - 128) >> 1) + 128;
2014 src2[i] = ((src2[i] - 128) >> 1) + 128;
2016 src += s->uvlinesize;
2017 src2 += s->uvlinesize;
2022 uint8_t (*luty )[256] = v->next_luty;
2023 uint8_t (*lutuv)[256] = v->next_lutuv;
2025 uint8_t *src, *src2;
2028 for (j = 0; j < 17 + s->mspel * 2; j++) {
2029 int f = v->field_mode ? v->ref_field_type[1] : ((j+src_y - s->mspel) & 1);
2030 for (i = 0; i < 17 + s->mspel * 2; i++)
2031 src[i] = luty[f][src[i]];
2036 for (j = 0; j < 9; j++) {
2037 int f = v->field_mode ? v->ref_field_type[1] : ((j+uvsrc_y) & 1);
2038 for (i = 0; i < 9; i++) {
2039 src[i] = lutuv[f][src[i]];
2040 src2[i] = lutuv[f][src2[i]];
2042 src += s->uvlinesize;
2043 src2 += s->uvlinesize;
2046 srcY += s->mspel * (1 + s->linesize);
2053 dxy = ((my & 3) << 2) | (mx & 3);
2054 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off , srcY , s->linesize, v->rnd);
2055 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8, srcY + 8, s->linesize, v->rnd);
2056 srcY += s->linesize * 8;
2057 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize , srcY , s->linesize, v->rnd);
2058 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
2060 dxy = (my & 2) | ((mx & 2) >> 1);
2063 s->hdsp.avg_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
2065 s->hdsp.avg_no_rnd_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize, 16);
2068 if (s->flags & CODEC_FLAG_GRAY) return;
2069 /* Chroma MC always uses qpel blilinear */
2070 uvmx = (uvmx & 3) << 1;
2071 uvmy = (uvmy & 3) << 1;
2073 h264chroma->avg_h264_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
2074 h264chroma->avg_h264_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
2076 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
2077 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
2081 static av_always_inline int scale_mv(int value, int bfrac, int inv, int qs)
2085 #if B_FRACTION_DEN==256
2089 return 2 * ((value * n + 255) >> 9);
2090 return (value * n + 128) >> 8;
2093 n -= B_FRACTION_DEN;
2095 return 2 * ((value * n + B_FRACTION_DEN - 1) / (2 * B_FRACTION_DEN));
2096 return (value * n + B_FRACTION_DEN/2) / B_FRACTION_DEN;
2100 /** Reconstruct motion vector for B-frame and do motion compensation
2102 static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2],
2103 int direct, int mode)
2110 if (mode == BMV_TYPE_INTERPOLATED) {
2116 vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD));
2119 static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2],
2120 int direct, int mvtype)
2122 MpegEncContext *s = &v->s;
2123 int xy, wrap, off = 0;
2128 const uint8_t *is_intra = v->mb_type[0];
2132 /* scale MV difference to be quad-pel */
2133 dmv_x[0] <<= 1 - s->quarter_sample;
2134 dmv_y[0] <<= 1 - s->quarter_sample;
2135 dmv_x[1] <<= 1 - s->quarter_sample;
2136 dmv_y[1] <<= 1 - s->quarter_sample;
2138 wrap = s->b8_stride;
2139 xy = s->block_index[0];
2142 s->current_picture.motion_val[0][xy + v->blocks_off][0] =
2143 s->current_picture.motion_val[0][xy + v->blocks_off][1] =
2144 s->current_picture.motion_val[1][xy + v->blocks_off][0] =
2145 s->current_picture.motion_val[1][xy + v->blocks_off][1] = 0;
2148 if (!v->field_mode) {
2149 s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
2150 s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
2151 s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
2152 s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
2154 /* Pullback predicted motion vectors as specified in 8.4.5.4 */
2155 s->mv[0][0][0] = av_clip(s->mv[0][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2156 s->mv[0][0][1] = av_clip(s->mv[0][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2157 s->mv[1][0][0] = av_clip(s->mv[1][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2158 s->mv[1][0][1] = av_clip(s->mv[1][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2161 s->current_picture.motion_val[0][xy + v->blocks_off][0] = s->mv[0][0][0];
2162 s->current_picture.motion_val[0][xy + v->blocks_off][1] = s->mv[0][0][1];
2163 s->current_picture.motion_val[1][xy + v->blocks_off][0] = s->mv[1][0][0];
2164 s->current_picture.motion_val[1][xy + v->blocks_off][1] = s->mv[1][0][1];
2168 if ((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2169 C = s->current_picture.motion_val[0][xy - 2];
2170 A = s->current_picture.motion_val[0][xy - wrap * 2];
2171 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2172 B = s->current_picture.motion_val[0][xy - wrap * 2 + off];
2174 if (!s->mb_x) C[0] = C[1] = 0;
2175 if (!s->first_slice_line) { // predictor A is not out of bounds
2176 if (s->mb_width == 1) {
2180 px = mid_pred(A[0], B[0], C[0]);
2181 py = mid_pred(A[1], B[1], C[1]);
2183 } else if (s->mb_x) { // predictor C is not out of bounds
2189 /* Pullback MV as specified in 8.3.5.3.4 */
2192 if (v->profile < PROFILE_ADVANCED) {
2193 qx = (s->mb_x << 5);
2194 qy = (s->mb_y << 5);
2195 X = (s->mb_width << 5) - 4;
2196 Y = (s->mb_height << 5) - 4;
2197 if (qx + px < -28) px = -28 - qx;
2198 if (qy + py < -28) py = -28 - qy;
2199 if (qx + px > X) px = X - qx;
2200 if (qy + py > Y) py = Y - qy;
2202 qx = (s->mb_x << 6);
2203 qy = (s->mb_y << 6);
2204 X = (s->mb_width << 6) - 4;
2205 Y = (s->mb_height << 6) - 4;
2206 if (qx + px < -60) px = -60 - qx;
2207 if (qy + py < -60) py = -60 - qy;
2208 if (qx + px > X) px = X - qx;
2209 if (qy + py > Y) py = Y - qy;
2212 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2213 if (0 && !s->first_slice_line && s->mb_x) {
2214 if (is_intra[xy - wrap])
2215 sum = FFABS(px) + FFABS(py);
2217 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2219 if (get_bits1(&s->gb)) {
2227 if (is_intra[xy - 2])
2228 sum = FFABS(px) + FFABS(py);
2230 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2232 if (get_bits1(&s->gb)) {
2242 /* store MV using signed modulus of MV range defined in 4.11 */
2243 s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x;
2244 s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
2246 if ((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2247 C = s->current_picture.motion_val[1][xy - 2];
2248 A = s->current_picture.motion_val[1][xy - wrap * 2];
2249 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2250 B = s->current_picture.motion_val[1][xy - wrap * 2 + off];
2254 if (!s->first_slice_line) { // predictor A is not out of bounds
2255 if (s->mb_width == 1) {
2259 px = mid_pred(A[0], B[0], C[0]);
2260 py = mid_pred(A[1], B[1], C[1]);
2262 } else if (s->mb_x) { // predictor C is not out of bounds
2268 /* Pullback MV as specified in 8.3.5.3.4 */
2271 if (v->profile < PROFILE_ADVANCED) {
2272 qx = (s->mb_x << 5);
2273 qy = (s->mb_y << 5);
2274 X = (s->mb_width << 5) - 4;
2275 Y = (s->mb_height << 5) - 4;
2276 if (qx + px < -28) px = -28 - qx;
2277 if (qy + py < -28) py = -28 - qy;
2278 if (qx + px > X) px = X - qx;
2279 if (qy + py > Y) py = Y - qy;
2281 qx = (s->mb_x << 6);
2282 qy = (s->mb_y << 6);
2283 X = (s->mb_width << 6) - 4;
2284 Y = (s->mb_height << 6) - 4;
2285 if (qx + px < -60) px = -60 - qx;
2286 if (qy + py < -60) py = -60 - qy;
2287 if (qx + px > X) px = X - qx;
2288 if (qy + py > Y) py = Y - qy;
2291 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2292 if (0 && !s->first_slice_line && s->mb_x) {
2293 if (is_intra[xy - wrap])
2294 sum = FFABS(px) + FFABS(py);
2296 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2298 if (get_bits1(&s->gb)) {
2306 if (is_intra[xy - 2])
2307 sum = FFABS(px) + FFABS(py);
2309 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2311 if (get_bits1(&s->gb)) {
2321 /* store MV using signed modulus of MV range defined in 4.11 */
2323 s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
2324 s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
2326 s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
2327 s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
2328 s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
2329 s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
2332 static inline void vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dmv_y, int mv1, int *pred_flag)
2334 int dir = (v->bmvtype == BMV_TYPE_BACKWARD) ? 1 : 0;
2335 MpegEncContext *s = &v->s;
2336 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2338 if (v->bmvtype == BMV_TYPE_DIRECT) {
2339 int total_opp, k, f;
2340 if (s->next_picture.mb_type[mb_pos + v->mb_off] != MB_TYPE_INTRA) {
2341 s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][0],
2342 v->bfraction, 0, s->quarter_sample);
2343 s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][1],
2344 v->bfraction, 0, s->quarter_sample);
2345 s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][0],
2346 v->bfraction, 1, s->quarter_sample);
2347 s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][1],
2348 v->bfraction, 1, s->quarter_sample);
2350 total_opp = v->mv_f_next[0][s->block_index[0] + v->blocks_off]
2351 + v->mv_f_next[0][s->block_index[1] + v->blocks_off]
2352 + v->mv_f_next[0][s->block_index[2] + v->blocks_off]
2353 + v->mv_f_next[0][s->block_index[3] + v->blocks_off];
2354 f = (total_opp > 2) ? 1 : 0;
2356 s->mv[0][0][0] = s->mv[0][0][1] = 0;
2357 s->mv[1][0][0] = s->mv[1][0][1] = 0;
2360 v->ref_field_type[0] = v->ref_field_type[1] = v->cur_field_type ^ f;
2361 for (k = 0; k < 4; k++) {
2362 s->current_picture.motion_val[0][s->block_index[k] + v->blocks_off][0] = s->mv[0][0][0];
2363 s->current_picture.motion_val[0][s->block_index[k] + v->blocks_off][1] = s->mv[0][0][1];
2364 s->current_picture.motion_val[1][s->block_index[k] + v->blocks_off][0] = s->mv[1][0][0];
2365 s->current_picture.motion_val[1][s->block_index[k] + v->blocks_off][1] = s->mv[1][0][1];
2366 v->mv_f[0][s->block_index[k] + v->blocks_off] = f;
2367 v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
2371 if (v->bmvtype == BMV_TYPE_INTERPOLATED) {
2372 vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2373 vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2376 if (dir) { // backward
2377 vc1_pred_mv(v, n, dmv_x[1], dmv_y[1], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2378 if (n == 3 || mv1) {
2379 vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
2382 vc1_pred_mv(v, n, dmv_x[0], dmv_y[0], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2383 if (n == 3 || mv1) {
2384 vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], 0, 1);
2389 /** Get predicted DC value for I-frames only
2390 * prediction dir: left=0, top=1
2391 * @param s MpegEncContext
2392 * @param overlap flag indicating that overlap filtering is used
2393 * @param pq integer part of picture quantizer
2394 * @param[in] n block index in the current MB
2395 * @param dc_val_ptr Pointer to DC predictor
2396 * @param dir_ptr Prediction direction for use in AC prediction
2398 static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2399 int16_t **dc_val_ptr, int *dir_ptr)
2401 int a, b, c, wrap, pred, scale;
2403 static const uint16_t dcpred[32] = {
2404 -1, 1024, 512, 341, 256, 205, 171, 146, 128,
2405 114, 102, 93, 85, 79, 73, 68, 64,
2406 60, 57, 54, 51, 49, 47, 45, 43,
2407 41, 39, 38, 37, 35, 34, 33
2410 /* find prediction - wmv3_dc_scale always used here in fact */
2411 if (n < 4) scale = s->y_dc_scale;
2412 else scale = s->c_dc_scale;
2414 wrap = s->block_wrap[n];
2415 dc_val = s->dc_val[0] + s->block_index[n];
2421 b = dc_val[ - 1 - wrap];
2422 a = dc_val[ - wrap];
2424 if (pq < 9 || !overlap) {
2425 /* Set outer values */
2426 if (s->first_slice_line && (n != 2 && n != 3))
2427 b = a = dcpred[scale];
2428 if (s->mb_x == 0 && (n != 1 && n != 3))
2429 b = c = dcpred[scale];
2431 /* Set outer values */
2432 if (s->first_slice_line && (n != 2 && n != 3))
2434 if (s->mb_x == 0 && (n != 1 && n != 3))
2438 if (abs(a - b) <= abs(b - c)) {
2440 *dir_ptr = 1; // left
2443 *dir_ptr = 0; // top
2446 /* update predictor */
2447 *dc_val_ptr = &dc_val[0];
2452 /** Get predicted DC value
2453 * prediction dir: left=0, top=1
2454 * @param s MpegEncContext
2455 * @param overlap flag indicating that overlap filtering is used
2456 * @param pq integer part of picture quantizer
2457 * @param[in] n block index in the current MB
2458 * @param a_avail flag indicating top block availability
2459 * @param c_avail flag indicating left block availability
2460 * @param dc_val_ptr Pointer to DC predictor
2461 * @param dir_ptr Prediction direction for use in AC prediction
2463 static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2464 int a_avail, int c_avail,
2465 int16_t **dc_val_ptr, int *dir_ptr)
2467 int a, b, c, wrap, pred;
2469 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2473 wrap = s->block_wrap[n];
2474 dc_val = s->dc_val[0] + s->block_index[n];
2480 b = dc_val[ - 1 - wrap];
2481 a = dc_val[ - wrap];
2482 /* scale predictors if needed */
2483 q1 = s->current_picture.qscale_table[mb_pos];
2484 dqscale_index = s->y_dc_scale_table[q1] - 1;
2485 if (dqscale_index < 0)
2487 if (c_avail && (n != 1 && n != 3)) {
2488 q2 = s->current_picture.qscale_table[mb_pos - 1];
2490 c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2492 if (a_avail && (n != 2 && n != 3)) {
2493 q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2495 a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2497 if (a_avail && c_avail && (n != 3)) {
2502 off -= s->mb_stride;
2503 q2 = s->current_picture.qscale_table[off];
2505 b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2508 if (a_avail && c_avail) {
2509 if (abs(a - b) <= abs(b - c)) {
2511 *dir_ptr = 1; // left
2514 *dir_ptr = 0; // top
2516 } else if (a_avail) {
2518 *dir_ptr = 0; // top
2519 } else if (c_avail) {
2521 *dir_ptr = 1; // left
2524 *dir_ptr = 1; // left
2527 /* update predictor */
2528 *dc_val_ptr = &dc_val[0];
2532 /** @} */ // Block group
2535 * @name VC1 Macroblock-level functions in Simple/Main Profiles
2536 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
2540 static inline int vc1_coded_block_pred(MpegEncContext * s, int n,
2541 uint8_t **coded_block_ptr)
2543 int xy, wrap, pred, a, b, c;
2545 xy = s->block_index[n];
2546 wrap = s->b8_stride;
2551 a = s->coded_block[xy - 1 ];
2552 b = s->coded_block[xy - 1 - wrap];
2553 c = s->coded_block[xy - wrap];
2562 *coded_block_ptr = &s->coded_block[xy];
2568 * Decode one AC coefficient
2569 * @param v The VC1 context
2570 * @param last Last coefficient
2571 * @param skip How much zero coefficients to skip
2572 * @param value Decoded AC coefficient value
2573 * @param codingset set of VLC to decode data
2576 static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip,
2577 int *value, int codingset)
2579 GetBitContext *gb = &v->s.gb;
2580 int index, escape, run = 0, level = 0, lst = 0;
2582 index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2583 if (index != ff_vc1_ac_sizes[codingset] - 1) {
2584 run = vc1_index_decode_table[codingset][index][0];
2585 level = vc1_index_decode_table[codingset][index][1];
2586 lst = index >= vc1_last_decode_table[codingset] || get_bits_left(gb) < 0;
2590 escape = decode210(gb);
2592 index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2593 run = vc1_index_decode_table[codingset][index][0];
2594 level = vc1_index_decode_table[codingset][index][1];
2595 lst = index >= vc1_last_decode_table[codingset];
2598 level += vc1_last_delta_level_table[codingset][run];
2600 level += vc1_delta_level_table[codingset][run];
2603 run += vc1_last_delta_run_table[codingset][level] + 1;
2605 run += vc1_delta_run_table[codingset][level] + 1;
2611 lst = get_bits1(gb);
2612 if (v->s.esc3_level_length == 0) {
2613 if (v->pq < 8 || v->dquantfrm) { // table 59
2614 v->s.esc3_level_length = get_bits(gb, 3);
2615 if (!v->s.esc3_level_length)
2616 v->s.esc3_level_length = get_bits(gb, 2) + 8;
2617 } else { // table 60
2618 v->s.esc3_level_length = get_unary(gb, 1, 6) + 2;
2620 v->s.esc3_run_length = 3 + get_bits(gb, 2);
2622 run = get_bits(gb, v->s.esc3_run_length);
2623 sign = get_bits1(gb);
2624 level = get_bits(gb, v->s.esc3_level_length);
2635 /** Decode intra block in intra frames - should be faster than decode_intra_block
2636 * @param v VC1Context
2637 * @param block block to decode
2638 * @param[in] n subblock index
2639 * @param coded are AC coeffs present or not
2640 * @param codingset set of VLC to decode data
2642 static int vc1_decode_i_block(VC1Context *v, int16_t block[64], int n,
2643 int coded, int codingset)
2645 GetBitContext *gb = &v->s.gb;
2646 MpegEncContext *s = &v->s;
2647 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2650 int16_t *ac_val, *ac_val2;
2653 /* Get DC differential */
2655 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2657 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2660 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2664 if (dcdiff == 119 /* ESC index value */) {
2665 /* TODO: Optimize */
2666 if (v->pq == 1) dcdiff = get_bits(gb, 10);
2667 else if (v->pq == 2) dcdiff = get_bits(gb, 9);
2668 else dcdiff = get_bits(gb, 8);
2671 dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2672 else if (v->pq == 2)
2673 dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2680 dcdiff += vc1_i_pred_dc(&v->s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir);
2683 /* Store the quantized DC coeff, used for prediction */
2685 block[0] = dcdiff * s->y_dc_scale;
2687 block[0] = dcdiff * s->c_dc_scale;
2698 int last = 0, skip, value;
2699 const uint8_t *zz_table;
2703 scale = v->pq * 2 + v->halfpq;
2707 zz_table = v->zz_8x8[2];
2709 zz_table = v->zz_8x8[3];
2711 zz_table = v->zz_8x8[1];
2713 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2715 if (dc_pred_dir) // left
2718 ac_val -= 16 * s->block_wrap[n];
2721 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2725 block[zz_table[i++]] = value;
2728 /* apply AC prediction if needed */
2730 if (dc_pred_dir) { // left
2731 for (k = 1; k < 8; k++)
2732 block[k << v->left_blk_sh] += ac_val[k];
2734 for (k = 1; k < 8; k++)
2735 block[k << v->top_blk_sh] += ac_val[k + 8];
2738 /* save AC coeffs for further prediction */
2739 for (k = 1; k < 8; k++) {
2740 ac_val2[k] = block[k << v->left_blk_sh];
2741 ac_val2[k + 8] = block[k << v->top_blk_sh];
2744 /* scale AC coeffs */
2745 for (k = 1; k < 64; k++)
2749 block[k] += (block[k] < 0) ? -v->pq : v->pq;
2752 if (s->ac_pred) i = 63;
2758 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2762 scale = v->pq * 2 + v->halfpq;
2763 memset(ac_val2, 0, 16 * 2);
2764 if (dc_pred_dir) { // left
2767 memcpy(ac_val2, ac_val, 8 * 2);
2769 ac_val -= 16 * s->block_wrap[n];
2771 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2774 /* apply AC prediction if needed */
2776 if (dc_pred_dir) { //left
2777 for (k = 1; k < 8; k++) {
2778 block[k << v->left_blk_sh] = ac_val[k] * scale;
2779 if (!v->pquantizer && block[k << v->left_blk_sh])
2780 block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -v->pq : v->pq;
2783 for (k = 1; k < 8; k++) {
2784 block[k << v->top_blk_sh] = ac_val[k + 8] * scale;
2785 if (!v->pquantizer && block[k << v->top_blk_sh])
2786 block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -v->pq : v->pq;
2792 s->block_last_index[n] = i;
2797 /** Decode intra block in intra frames - should be faster than decode_intra_block
2798 * @param v VC1Context
2799 * @param block block to decode
2800 * @param[in] n subblock number
2801 * @param coded are AC coeffs present or not
2802 * @param codingset set of VLC to decode data
2803 * @param mquant quantizer value for this macroblock
2805 static int vc1_decode_i_block_adv(VC1Context *v, int16_t block[64], int n,
2806 int coded, int codingset, int mquant)
2808 GetBitContext *gb = &v->s.gb;
2809 MpegEncContext *s = &v->s;
2810 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2812 int16_t *dc_val = NULL;
2813 int16_t *ac_val, *ac_val2;
2815 int a_avail = v->a_avail, c_avail = v->c_avail;
2816 int use_pred = s->ac_pred;
2819 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2821 /* Get DC differential */
2823 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2825 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2828 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2832 if (dcdiff == 119 /* ESC index value */) {
2833 /* TODO: Optimize */
2834 if (mquant == 1) dcdiff = get_bits(gb, 10);
2835 else if (mquant == 2) dcdiff = get_bits(gb, 9);
2836 else dcdiff = get_bits(gb, 8);
2839 dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2840 else if (mquant == 2)
2841 dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2848 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir);
2851 /* Store the quantized DC coeff, used for prediction */
2853 block[0] = dcdiff * s->y_dc_scale;
2855 block[0] = dcdiff * s->c_dc_scale;
2861 /* check if AC is needed at all */
2862 if (!a_avail && !c_avail)
2864 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2867 scale = mquant * 2 + ((mquant == v->pq) ? v->halfpq : 0);
2869 if (dc_pred_dir) // left
2872 ac_val -= 16 * s->block_wrap[n];
2874 q1 = s->current_picture.qscale_table[mb_pos];
2875 if ( dc_pred_dir && c_avail && mb_pos)
2876 q2 = s->current_picture.qscale_table[mb_pos - 1];
2877 if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
2878 q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2879 if ( dc_pred_dir && n == 1)
2881 if (!dc_pred_dir && n == 2)
2887 int last = 0, skip, value;
2888 const uint8_t *zz_table;
2892 if (!use_pred && v->fcm == ILACE_FRAME) {
2893 zz_table = v->zzi_8x8;
2895 if (!dc_pred_dir) // top
2896 zz_table = v->zz_8x8[2];
2898 zz_table = v->zz_8x8[3];
2901 if (v->fcm != ILACE_FRAME)
2902 zz_table = v->zz_8x8[1];
2904 zz_table = v->zzi_8x8;
2908 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2912 block[zz_table[i++]] = value;
2915 /* apply AC prediction if needed */
2917 /* scale predictors if needed*/
2918 if (q2 && q1 != q2) {
2919 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2920 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2923 return AVERROR_INVALIDDATA;
2924 if (dc_pred_dir) { // left
2925 for (k = 1; k < 8; k++)
2926 block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2928 for (k = 1; k < 8; k++)
2929 block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2932 if (dc_pred_dir) { //left
2933 for (k = 1; k < 8; k++)
2934 block[k << v->left_blk_sh] += ac_val[k];
2936 for (k = 1; k < 8; k++)
2937 block[k << v->top_blk_sh] += ac_val[k + 8];
2941 /* save AC coeffs for further prediction */
2942 for (k = 1; k < 8; k++) {
2943 ac_val2[k ] = block[k << v->left_blk_sh];
2944 ac_val2[k + 8] = block[k << v->top_blk_sh];
2947 /* scale AC coeffs */
2948 for (k = 1; k < 64; k++)
2952 block[k] += (block[k] < 0) ? -mquant : mquant;
2955 if (use_pred) i = 63;
2956 } else { // no AC coeffs
2959 memset(ac_val2, 0, 16 * 2);
2960 if (dc_pred_dir) { // left
2962 memcpy(ac_val2, ac_val, 8 * 2);
2963 if (q2 && q1 != q2) {
2964 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2965 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2967 return AVERROR_INVALIDDATA;
2968 for (k = 1; k < 8; k++)
2969 ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2974 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2975 if (q2 && q1 != q2) {
2976 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2977 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2979 return AVERROR_INVALIDDATA;
2980 for (k = 1; k < 8; k++)
2981 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2986 /* apply AC prediction if needed */
2988 if (dc_pred_dir) { // left
2989 for (k = 1; k < 8; k++) {
2990 block[k << v->left_blk_sh] = ac_val2[k] * scale;
2991 if (!v->pquantizer && block[k << v->left_blk_sh])
2992 block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
2995 for (k = 1; k < 8; k++) {
2996 block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
2997 if (!v->pquantizer && block[k << v->top_blk_sh])
2998 block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
3004 s->block_last_index[n] = i;
3009 /** Decode intra block in inter frames - more generic version than vc1_decode_i_block
3010 * @param v VC1Context
3011 * @param block block to decode
3012 * @param[in] n subblock index
3013 * @param coded are AC coeffs present or not
3014 * @param mquant block quantizer
3015 * @param codingset set of VLC to decode data
3017 static int vc1_decode_intra_block(VC1Context *v, int16_t block[64], int n,
3018 int coded, int mquant, int codingset)
3020 GetBitContext *gb = &v->s.gb;
3021 MpegEncContext *s = &v->s;
3022 int dc_pred_dir = 0; /* Direction of the DC prediction used */
3024 int16_t *dc_val = NULL;
3025 int16_t *ac_val, *ac_val2;
3027 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3028 int a_avail = v->a_avail, c_avail = v->c_avail;
3029 int use_pred = s->ac_pred;
3033 s->dsp.clear_block(block);
3035 /* XXX: Guard against dumb values of mquant */
3036 mquant = (mquant < 1) ? 0 : ((mquant > 31) ? 31 : mquant);
3038 /* Set DC scale - y and c use the same */
3039 s->y_dc_scale = s->y_dc_scale_table[mquant];
3040 s->c_dc_scale = s->c_dc_scale_table[mquant];
3042 /* Get DC differential */
3044 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
3046 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
3049 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
3053 if (dcdiff == 119 /* ESC index value */) {
3054 /* TODO: Optimize */
3055 if (mquant == 1) dcdiff = get_bits(gb, 10);
3056 else if (mquant == 2) dcdiff = get_bits(gb, 9);
3057 else dcdiff = get_bits(gb, 8);
3060 dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
3061 else if (mquant == 2)
3062 dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
3069 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir);
3072 /* Store the quantized DC coeff, used for prediction */
3075 block[0] = dcdiff * s->y_dc_scale;
3077 block[0] = dcdiff * s->c_dc_scale;
3083 /* check if AC is needed at all and adjust direction if needed */
3084 if (!a_avail) dc_pred_dir = 1;
3085 if (!c_avail) dc_pred_dir = 0;
3086 if (!a_avail && !c_avail) use_pred = 0;
3087 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
3090 scale = mquant * 2 + v->halfpq;
3092 if (dc_pred_dir) //left
3095 ac_val -= 16 * s->block_wrap[n];
3097 q1 = s->current_picture.qscale_table[mb_pos];
3098 if (dc_pred_dir && c_avail && mb_pos)
3099 q2 = s->current_picture.qscale_table[mb_pos - 1];
3100 if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
3101 q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
3102 if ( dc_pred_dir && n == 1)
3104 if (!dc_pred_dir && n == 2)
3106 if (n == 3) q2 = q1;
3109 int last = 0, skip, value;
3113 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
3117 if (v->fcm == PROGRESSIVE)
3118 block[v->zz_8x8[0][i++]] = value;
3120 if (use_pred && (v->fcm == ILACE_FRAME)) {
3121 if (!dc_pred_dir) // top
3122 block[v->zz_8x8[2][i++]] = value;
3124 block[v->zz_8x8[3][i++]] = value;
3126 block[v->zzi_8x8[i++]] = value;
3131 /* apply AC prediction if needed */
3133 /* scale predictors if needed*/
3134 if (q2 && q1 != q2) {
3135 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3136 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3139 return AVERROR_INVALIDDATA;
3140 if (dc_pred_dir) { // left
3141 for (k = 1; k < 8; k++)
3142 block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3144 for (k = 1; k < 8; k++)
3145 block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3148 if (dc_pred_dir) { // left
3149 for (k = 1; k < 8; k++)
3150 block[k << v->left_blk_sh] += ac_val[k];
3152 for (k = 1; k < 8; k++)
3153 block[k << v->top_blk_sh] += ac_val[k + 8];
3157 /* save AC coeffs for further prediction */
3158 for (k = 1; k < 8; k++) {
3159 ac_val2[k ] = block[k << v->left_blk_sh];
3160 ac_val2[k + 8] = block[k << v->top_blk_sh];
3163 /* scale AC coeffs */
3164 for (k = 1; k < 64; k++)
3168 block[k] += (block[k] < 0) ? -mquant : mquant;
3171 if (use_pred) i = 63;
3172 } else { // no AC coeffs
3175 memset(ac_val2, 0, 16 * 2);
3176 if (dc_pred_dir) { // left
3178 memcpy(ac_val2, ac_val, 8 * 2);
3179 if (q2 && q1 != q2) {
3180 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3181 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3183 return AVERROR_INVALIDDATA;
3184 for (k = 1; k < 8; k++)
3185 ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3190 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
3191 if (q2 && q1 != q2) {
3192 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3193 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3195 return AVERROR_INVALIDDATA;
3196 for (k = 1; k < 8; k++)
3197 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3202 /* apply AC prediction if needed */
3204 if (dc_pred_dir) { // left
3205 for (k = 1; k < 8; k++) {
3206 block[k << v->left_blk_sh] = ac_val2[k] * scale;
3207 if (!v->pquantizer && block[k << v->left_blk_sh])
3208 block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
3211 for (k = 1; k < 8; k++) {
3212 block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
3213 if (!v->pquantizer && block[k << v->top_blk_sh])
3214 block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
3220 s->block_last_index[n] = i;
3227 static int vc1_decode_p_block(VC1Context *v, int16_t block[64], int n,
3228 int mquant, int ttmb, int first_block,
3229 uint8_t *dst, int linesize, int skip_block,
3232 MpegEncContext *s = &v->s;
3233 GetBitContext *gb = &s->gb;
3236 int scale, off, idx, last, skip, value;
3237 int ttblk = ttmb & 7;
3240 s->dsp.clear_block(block);
3243 ttblk = ff_vc1_ttblk_to_tt[v->tt_index][get_vlc2(gb, ff_vc1_ttblk_vlc[v->tt_index].table, VC1_TTBLK_VLC_BITS, 1)];
3245 if (ttblk == TT_4X4) {
3246 subblkpat = ~(get_vlc2(gb, ff_vc1_subblkpat_vlc[v->tt_index].table, VC1_SUBBLKPAT_VLC_BITS, 1) + 1);
3248 if ((ttblk != TT_8X8 && ttblk != TT_4X4)
3249 && ((v->ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))
3250 || (!v->res_rtm_flag && !first_block))) {
3251 subblkpat = decode012(gb);
3253 subblkpat ^= 3; // swap decoded pattern bits
3254 if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM)
3256 if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT)
3259 scale = 2 * mquant + ((v->pq == mquant) ? v->halfpq : 0);
3261 // convert transforms like 8X4_TOP to generic TT and SUBBLKPAT
3262 if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) {
3263 subblkpat = 2 - (ttblk == TT_8X4_TOP);
3266 if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) {
3267 subblkpat = 2 - (ttblk == TT_4X8_LEFT);
3276 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3281 idx = v->zz_8x8[0][i++];
3283 idx = v->zzi_8x8[i++];
3284 block[idx] = value * scale;
3286 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3290 v->vc1dsp.vc1_inv_trans_8x8_dc(dst, linesize, block);
3292 v->vc1dsp.vc1_inv_trans_8x8(block);
3293 s->dsp.add_pixels_clamped(block, dst, linesize);
3298 pat = ~subblkpat & 0xF;
3299 for (j = 0; j < 4; j++) {
3300 last = subblkpat & (1 << (3 - j));
3302 off = (j & 1) * 4 + (j & 2) * 16;
3304 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3309 idx = ff_vc1_simple_progressive_4x4_zz[i++];
3311 idx = ff_vc1_adv_interlaced_4x4_zz[i++];
3312 block[idx + off] = value * scale;
3314 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3316 if (!(subblkpat & (1 << (3 - j))) && !skip_block) {
3318 v->vc1dsp.vc1_inv_trans_4x4_dc(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3320 v->vc1dsp.vc1_inv_trans_4x4(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3325 pat = ~((subblkpat & 2) * 6 + (subblkpat & 1) * 3) & 0xF;
3326 for (j = 0; j < 2; j++) {
3327 last = subblkpat & (1 << (1 - j));
3331 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3336 idx = v->zz_8x4[i++] + off;
3338 idx = ff_vc1_adv_interlaced_8x4_zz[i++] + off;
3339 block[idx] = value * scale;
3341 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3343 if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3345 v->vc1dsp.vc1_inv_trans_8x4_dc(dst + j * 4 * linesize, linesize, block + off);
3347 v->vc1dsp.vc1_inv_trans_8x4(dst + j * 4 * linesize, linesize, block + off);
3352 pat = ~(subblkpat * 5) & 0xF;
3353 for (j = 0; j < 2; j++) {
3354 last = subblkpat & (1 << (1 - j));
3358 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3363 idx = v->zz_4x8[i++] + off;
3365 idx = ff_vc1_adv_interlaced_4x8_zz[i++] + off;
3366 block[idx] = value * scale;
3368 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3370 if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3372 v->vc1dsp.vc1_inv_trans_4x8_dc(dst + j * 4, linesize, block + off);
3374 v->vc1dsp.vc1_inv_trans_4x8(dst + j*4, linesize, block + off);
3380 *ttmb_out |= ttblk << (n * 4);
3384 /** @} */ // Macroblock group
3386 static const int size_table [6] = { 0, 2, 3, 4, 5, 8 };
3387 static const int offset_table[6] = { 0, 1, 3, 7, 15, 31 };
3389 static av_always_inline void vc1_apply_p_v_loop_filter(VC1Context *v, int block_num)
3391 MpegEncContext *s = &v->s;
3392 int mb_cbp = v->cbp[s->mb_x - s->mb_stride],
3393 block_cbp = mb_cbp >> (block_num * 4), bottom_cbp,
3394 mb_is_intra = v->is_intra[s->mb_x - s->mb_stride],
3395 block_is_intra = mb_is_intra >> (block_num * 4), bottom_is_intra;
3396 int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3399 if (block_num > 3) {
3400 dst = s->dest[block_num - 3];
3402 dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 8) * linesize;
3404 if (s->mb_y != s->end_mb_y || block_num < 2) {
3408 if (block_num > 3) {
3409 bottom_cbp = v->cbp[s->mb_x] >> (block_num * 4);
3410 bottom_is_intra = v->is_intra[s->mb_x] >> (block_num * 4);
3411 mv = &v->luma_mv[s->mb_x - s->mb_stride];
3412 mv_stride = s->mb_stride;
3414 bottom_cbp = (block_num < 2) ? (mb_cbp >> ((block_num + 2) * 4))
3415 : (v->cbp[s->mb_x] >> ((block_num - 2) * 4));
3416 bottom_is_intra = (block_num < 2) ? (mb_is_intra >> ((block_num + 2) * 4))
3417 : (v->is_intra[s->mb_x] >> ((block_num - 2) * 4));
3418 mv_stride = s->b8_stride;
3419 mv = &s->current_picture.motion_val[0][s->block_index[block_num] - 2 * mv_stride];
3422 if (bottom_is_intra & 1 || block_is_intra & 1 ||
3423 mv[0][0] != mv[mv_stride][0] || mv[0][1] != mv[mv_stride][1]) {
3424 v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3426 idx = ((bottom_cbp >> 2) | block_cbp) & 3;
3428 v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3431 v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3433 v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3438 dst -= 4 * linesize;
3439 ttblk = (v->ttblk[s->mb_x - s->mb_stride] >> (block_num * 4)) & 0xF;
3440 if (ttblk == TT_4X4 || ttblk == TT_8X4) {
3441 idx = (block_cbp | (block_cbp >> 2)) & 3;
3443 v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3446 v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3448 v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3453 static av_always_inline void vc1_apply_p_h_loop_filter(VC1Context *v, int block_num)
3455 MpegEncContext *s = &v->s;
3456 int mb_cbp = v->cbp[s->mb_x - 1 - s->mb_stride],
3457 block_cbp = mb_cbp >> (block_num * 4), right_cbp,
3458 mb_is_intra = v->is_intra[s->mb_x - 1 - s->mb_stride],
3459 block_is_intra = mb_is_intra >> (block_num * 4), right_is_intra;
3460 int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3463 if (block_num > 3) {
3464 dst = s->dest[block_num - 3] - 8 * linesize;
3466 dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 16) * linesize - 8;
3469 if (s->mb_x != s->mb_width || !(block_num & 5)) {
3472 if (block_num > 3) {
3473 right_cbp = v->cbp[s->mb_x - s->mb_stride] >> (block_num * 4);
3474 right_is_intra = v->is_intra[s->mb_x - s->mb_stride] >> (block_num * 4);
3475 mv = &v->luma_mv[s->mb_x - s->mb_stride - 1];
3477 right_cbp = (block_num & 1) ? (v->cbp[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3478 : (mb_cbp >> ((block_num + 1) * 4));
3479 right_is_intra = (block_num & 1) ? (v->is_intra[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3480 : (mb_is_intra >> ((block_num + 1) * 4));
3481 mv = &s->current_picture.motion_val[0][s->block_index[block_num] - s->b8_stride * 2 - 2];
3483 if (block_is_intra & 1 || right_is_intra & 1 || mv[0][0] != mv[1][0] || mv[0][1] != mv[1][1]) {
3484 v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3486 idx = ((right_cbp >> 1) | block_cbp) & 5; // FIXME check
3488 v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3491 v->vc1dsp.vc1_h_loop_filter4(dst + 4 * linesize, linesize, v->pq);
3493 v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3499 ttblk = (v->ttblk[s->mb_x - s->mb_stride - 1] >> (block_num * 4)) & 0xf;
3500 if (ttblk == TT_4X4 || ttblk == TT_4X8) {
3501 idx = (block_cbp | (block_cbp >> 1)) & 5;
3503 v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3506 v->vc1dsp.vc1_h_loop_filter4(dst + linesize * 4, linesize, v->pq);
3508 v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3513 static void vc1_apply_p_loop_filter(VC1Context *v)
3515 MpegEncContext *s = &v->s;
3518 for (i = 0; i < 6; i++) {
3519 vc1_apply_p_v_loop_filter(v, i);
3522 /* V always precedes H, therefore we run H one MB before V;
3523 * at the end of a row, we catch up to complete the row */
3525 for (i = 0; i < 6; i++) {
3526 vc1_apply_p_h_loop_filter(v, i);
3528 if (s->mb_x == s->mb_width - 1) {
3530 ff_update_block_index(s);
3531 for (i = 0; i < 6; i++) {
3532 vc1_apply_p_h_loop_filter(v, i);
3538 /** Decode one P-frame MB
3540 static int vc1_decode_p_mb(VC1Context *v)
3542 MpegEncContext *s = &v->s;
3543 GetBitContext *gb = &s->gb;
3545 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3546 int cbp; /* cbp decoding stuff */
3547 int mqdiff, mquant; /* MB quantization */
3548 int ttmb = v->ttfrm; /* MB Transform type */
3550 int mb_has_coeffs = 1; /* last_flag */
3551 int dmv_x, dmv_y; /* Differential MV components */
3552 int index, index1; /* LUT indexes */
3553 int val, sign; /* temp values */
3554 int first_block = 1;
3556 int skipped, fourmv;
3557 int block_cbp = 0, pat, block_tt = 0, block_intra = 0;
3559 mquant = v->pq; /* lossy initialization */
3561 if (v->mv_type_is_raw)
3562 fourmv = get_bits1(gb);
3564 fourmv = v->mv_type_mb_plane[mb_pos];
3566 skipped = get_bits1(gb);
3568 skipped = v->s.mbskip_table[mb_pos];
3570 if (!fourmv) { /* 1MV mode */
3572 GET_MVDATA(dmv_x, dmv_y);
3575 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
3576 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
3578 s->current_picture.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;
3579 vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3581 /* FIXME Set DC val for inter block ? */
3582 if (s->mb_intra && !mb_has_coeffs) {
3584 s->ac_pred = get_bits1(gb);
3586 } else if (mb_has_coeffs) {
3588 s->ac_pred = get_bits1(gb);
3589 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3595 s->current_picture.qscale_table[mb_pos] = mquant;
3597 if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
3598 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table,
3599 VC1_TTMB_VLC_BITS, 2);
3600 if (!s->mb_intra) vc1_mc_1mv(v, 0);
3602 for (i = 0; i < 6; i++) {
3603 s->dc_val[0][s->block_index[i]] = 0;
3605 val = ((cbp >> (5 - i)) & 1);
3606 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3607 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3609 /* check if prediction blocks A and C are available */
3610 v->a_avail = v->c_avail = 0;
3611 if (i == 2 || i == 3 || !s->first_slice_line)
3612 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3613 if (i == 1 || i == 3 || s->mb_x)
3614 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3616 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3617 (i & 4) ? v->codingset2 : v->codingset);
3618 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3620 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3622 for (j = 0; j < 64; j++)
3623 s->block[i][j] <<= 1;
3624 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3625 if (v->pq >= 9 && v->overlap) {
3627 v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3629 v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3631 block_cbp |= 0xF << (i << 2);
3632 block_intra |= 1 << i;
3634 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block,
3635 s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize,
3636 (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
3637 block_cbp |= pat << (i << 2);
3638 if (!v->ttmbf && ttmb < 8)
3645 for (i = 0; i < 6; i++) {
3646 v->mb_type[0][s->block_index[i]] = 0;
3647 s->dc_val[0][s->block_index[i]] = 0;
3649 s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
3650 s->current_picture.qscale_table[mb_pos] = 0;
3651 vc1_pred_mv(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3654 } else { // 4MV mode
3655 if (!skipped /* unskipped MB */) {
3656 int intra_count = 0, coded_inter = 0;
3657 int is_intra[6], is_coded[6];
3659 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3660 for (i = 0; i < 6; i++) {
3661 val = ((cbp >> (5 - i)) & 1);
3662 s->dc_val[0][s->block_index[i]] = 0;
3669 GET_MVDATA(dmv_x, dmv_y);
3671 vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3673 vc1_mc_4mv_luma(v, i, 0, 0);
3674 intra_count += s->mb_intra;
3675 is_intra[i] = s->mb_intra;
3676 is_coded[i] = mb_has_coeffs;
3679 is_intra[i] = (intra_count >= 3);
3683 vc1_mc_4mv_chroma(v, 0);
3684 v->mb_type[0][s->block_index[i]] = is_intra[i];
3686 coded_inter = !is_intra[i] & is_coded[i];
3688 // if there are no coded blocks then don't do anything more
3690 if (!intra_count && !coded_inter)
3693 s->current_picture.qscale_table[mb_pos] = mquant;
3694 /* test if block is intra and has pred */
3697 for (i = 0; i < 6; i++)
3699 if (((!s->first_slice_line || (i == 2 || i == 3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]])
3700 || ((s->mb_x || (i == 1 || i == 3)) && v->mb_type[0][s->block_index[i] - 1])) {
3706 s->ac_pred = get_bits1(gb);
3710 if (!v->ttmbf && coded_inter)
3711 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3712 for (i = 0; i < 6; i++) {
3714 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3715 s->mb_intra = is_intra[i];
3717 /* check if prediction blocks A and C are available */
3718 v->a_avail = v->c_avail = 0;
3719 if (i == 2 || i == 3 || !s->first_slice_line)
3720 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3721 if (i == 1 || i == 3 || s->mb_x)
3722 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3724 vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant,
3725 (i & 4) ? v->codingset2 : v->codingset);
3726 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3728 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3730 for (j = 0; j < 64; j++)
3731 s->block[i][j] <<= 1;
3732 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off,
3733 (i & 4) ? s->uvlinesize : s->linesize);
3734 if (v->pq >= 9 && v->overlap) {
3736 v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3738 v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3740 block_cbp |= 0xF << (i << 2);
3741 block_intra |= 1 << i;
3742 } else if (is_coded[i]) {
3743 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3744 first_block, s->dest[dst_idx] + off,
3745 (i & 4) ? s->uvlinesize : s->linesize,
3746 (i & 4) && (s->flags & CODEC_FLAG_GRAY),
3748 block_cbp |= pat << (i << 2);
3749 if (!v->ttmbf && ttmb < 8)
3754 } else { // skipped MB
3756 s->current_picture.qscale_table[mb_pos] = 0;
3757 for (i = 0; i < 6; i++) {
3758 v->mb_type[0][s->block_index[i]] = 0;
3759 s->dc_val[0][s->block_index[i]] = 0;
3761 for (i = 0; i < 4; i++) {
3762 vc1_pred_mv(v, i, 0, 0, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3763 vc1_mc_4mv_luma(v, i, 0, 0);
3765 vc1_mc_4mv_chroma(v, 0);
3766 s->current_picture.qscale_table[mb_pos] = 0;
3770 v->cbp[s->mb_x] = block_cbp;
3771 v->ttblk[s->mb_x] = block_tt;
3772 v->is_intra[s->mb_x] = block_intra;
3777 /* Decode one macroblock in an interlaced frame p picture */
3779 static int vc1_decode_p_mb_intfr(VC1Context *v)
3781 MpegEncContext *s = &v->s;
3782 GetBitContext *gb = &s->gb;
3784 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3785 int cbp = 0; /* cbp decoding stuff */
3786 int mqdiff, mquant; /* MB quantization */
3787 int ttmb = v->ttfrm; /* MB Transform type */
3789 int mb_has_coeffs = 1; /* last_flag */
3790 int dmv_x, dmv_y; /* Differential MV components */
3791 int val; /* temp value */
3792 int first_block = 1;
3794 int skipped, fourmv = 0, twomv = 0;
3795 int block_cbp = 0, pat, block_tt = 0;
3796 int idx_mbmode = 0, mvbp;
3797 int stride_y, fieldtx;
3799 mquant = v->pq; /* Lossy initialization */
3802 skipped = get_bits1(gb);
3804 skipped = v->s.mbskip_table[mb_pos];
3806 if (v->fourmvswitch)
3807 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_4MV_MBMODE_VLC_BITS, 2); // try getting this done
3809 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2); // in a single line
3810 switch (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0]) {
3811 /* store the motion vector type in a flag (useful later) */
3812 case MV_PMODE_INTFR_4MV:
3814 v->blk_mv_type[s->block_index[0]] = 0;
3815 v->blk_mv_type[s->block_index[1]] = 0;
3816 v->blk_mv_type[s->block_index[2]] = 0;
3817 v->blk_mv_type[s->block_index[3]] = 0;
3819 case MV_PMODE_INTFR_4MV_FIELD:
3821 v->blk_mv_type[s->block_index[0]] = 1;
3822 v->blk_mv_type[s->block_index[1]] = 1;
3823 v->blk_mv_type[s->block_index[2]] = 1;
3824 v->blk_mv_type[s->block_index[3]] = 1;
3826 case MV_PMODE_INTFR_2MV_FIELD:
3828 v->blk_mv_type[s->block_index[0]] = 1;
3829 v->blk_mv_type[s->block_index[1]] = 1;
3830 v->blk_mv_type[s->block_index[2]] = 1;
3831 v->blk_mv_type[s->block_index[3]] = 1;
3833 case MV_PMODE_INTFR_1MV:
3834 v->blk_mv_type[s->block_index[0]] = 0;
3835 v->blk_mv_type[s->block_index[1]] = 0;
3836 v->blk_mv_type[s->block_index[2]] = 0;
3837 v->blk_mv_type[s->block_index[3]] = 0;
3840 if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
3841 for (i = 0; i < 4; i++) {
3842 s->current_picture.motion_val[1][s->block_index[i]][0] = 0;
3843 s->current_picture.motion_val[1][s->block_index[i]][1] = 0;
3845 s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
3846 s->mb_intra = v->is_intra[s->mb_x] = 1;
3847 for (i = 0; i < 6; i++)
3848 v->mb_type[0][s->block_index[i]] = 1;
3849 fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
3850 mb_has_coeffs = get_bits1(gb);
3852 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3853 v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
3855 s->current_picture.qscale_table[mb_pos] = mquant;
3856 /* Set DC scale - y and c use the same (not sure if necessary here) */
3857 s->y_dc_scale = s->y_dc_scale_table[mquant];
3858 s->c_dc_scale = s->c_dc_scale_table[mquant];
3860 for (i = 0; i < 6; i++) {
3861 s->dc_val[0][s->block_index[i]] = 0;
3863 val = ((cbp >> (5 - i)) & 1);
3864 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3865 v->a_avail = v->c_avail = 0;
3866 if (i == 2 || i == 3 || !s->first_slice_line)
3867 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3868 if (i == 1 || i == 3 || s->mb_x)
3869 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3871 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3872 (i & 4) ? v->codingset2 : v->codingset);
3873 if ((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3874 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3876 stride_y = s->linesize << fieldtx;
3877 off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->linesize : (i & 1) * 8 + 4 * (i & 2) * s->linesize;
3879 stride_y = s->uvlinesize;
3882 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, stride_y);
3886 } else { // inter MB
3887 mb_has_coeffs = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][3];
3889 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3890 if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD) {
3891 v->twomvbp = get_vlc2(gb, v->twomvbp_vlc->table, VC1_2MV_BLOCK_PATTERN_VLC_BITS, 1);
3893 if ((ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV)
3894 || (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV_FIELD)) {
3895 v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
3898 s->mb_intra = v->is_intra[s->mb_x] = 0;
3899 for (i = 0; i < 6; i++)
3900 v->mb_type[0][s->block_index[i]] = 0;
3901 fieldtx = v->fieldtx_plane[mb_pos] = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][1];
3902 /* for all motion vector read MVDATA and motion compensate each block */
3906 for (i = 0; i < 6; i++) {
3909 val = ((mvbp >> (3 - i)) & 1);
3911 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3913 vc1_pred_mv_intfr(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0);
3914 vc1_mc_4mv_luma(v, i, 0, 0);
3915 } else if (i == 4) {
3916 vc1_mc_4mv_chroma4(v, 0, 0, 0);
3923 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3925 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], 0);
3926 vc1_mc_4mv_luma(v, 0, 0, 0);
3927 vc1_mc_4mv_luma(v, 1, 0, 0);
3930 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3932 vc1_pred_mv_intfr(v, 2, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], 0);
3933 vc1_mc_4mv_luma(v, 2, 0, 0);
3934 vc1_mc_4mv_luma(v, 3, 0, 0);
3935 vc1_mc_4mv_chroma4(v, 0, 0, 0);
3937 mvbp = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][2];
3940 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3942 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0);
3946 GET_MQUANT(); // p. 227
3947 s->current_picture.qscale_table[mb_pos] = mquant;
3948 if (!v->ttmbf && cbp)
3949 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3950 for (i = 0; i < 6; i++) {
3951 s->dc_val[0][s->block_index[i]] = 0;
3953 val = ((cbp >> (5 - i)) & 1);
3955 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3957 off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->linesize));
3959 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3960 first_block, s->dest[dst_idx] + off,
3961 (i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
3962 (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
3963 block_cbp |= pat << (i << 2);
3964 if (!v->ttmbf && ttmb < 8)
3971 s->mb_intra = v->is_intra[s->mb_x] = 0;
3972 for (i = 0; i < 6; i++) {
3973 v->mb_type[0][s->block_index[i]] = 0;
3974 s->dc_val[0][s->block_index[i]] = 0;
3976 s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
3977 s->current_picture.qscale_table[mb_pos] = 0;
3978 v->blk_mv_type[s->block_index[0]] = 0;
3979 v->blk_mv_type[s->block_index[1]] = 0;
3980 v->blk_mv_type[s->block_index[2]] = 0;
3981 v->blk_mv_type[s->block_index[3]] = 0;
3982 vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0);
3985 if (s->mb_x == s->mb_width - 1)
3986 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0])*s->mb_stride);
3990 static int vc1_decode_p_mb_intfi(VC1Context *v)
3992 MpegEncContext *s = &v->s;
3993 GetBitContext *gb = &s->gb;
3995 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3996 int cbp = 0; /* cbp decoding stuff */
3997 int mqdiff, mquant; /* MB quantization */
3998 int ttmb = v->ttfrm; /* MB Transform type */
4000 int mb_has_coeffs = 1; /* last_flag */
4001 int dmv_x, dmv_y; /* Differential MV components */
4002 int val; /* temp values */
4003 int first_block = 1;
4006 int block_cbp = 0, pat, block_tt = 0;
4009 mquant = v->pq; /* Lossy initialization */
4011 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
4012 if (idx_mbmode <= 1) { // intra MB
4013 s->mb_intra = v->is_intra[s->mb_x] = 1;
4014 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
4015 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
4016 s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4018 s->current_picture.qscale_table[mb_pos] = mquant;
4019 /* Set DC scale - y and c use the same (not sure if necessary here) */
4020 s->y_dc_scale = s->y_dc_scale_table[mquant];
4021 s->c_dc_scale = s->c_dc_scale_table[mquant];
4022 v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4023 mb_has_coeffs = idx_mbmode & 1;
4025 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
4027 for (i = 0; i < 6; i++) {
4028 s->dc_val[0][s->block_index[i]] = 0;
4029 v->mb_type[0][s->block_index[i]] = 1;
4031 val = ((cbp >> (5 - i)) & 1);
4032 v->a_avail = v->c_avail = 0;
4033 if (i == 2 || i == 3 || !s->first_slice_line)
4034 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4035 if (i == 1 || i == 3 || s->mb_x)
4036 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4038 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4039 (i & 4) ? v->codingset2 : v->codingset);
4040 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4042 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4043 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4044 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize);
4045 // TODO: loop filter
4048 s->mb_intra = v->is_intra[s->mb_x] = 0;
4049 s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
4050 for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
4051 if (idx_mbmode <= 5) { // 1-MV
4052 dmv_x = dmv_y = pred_flag = 0;
4053 if (idx_mbmode & 1) {
4054 get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
4056 vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
4058 mb_has_coeffs = !(idx_mbmode & 2);
4060 v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
4061 for (i = 0; i < 6; i++) {
4063 dmv_x = dmv_y = pred_flag = 0;
4064 val = ((v->fourmvbp >> (3 - i)) & 1);
4066 get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
4068 vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
4069 vc1_mc_4mv_luma(v, i, 0, 0);
4071 vc1_mc_4mv_chroma(v, 0);
4073 mb_has_coeffs = idx_mbmode & 1;
4076 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4080 s->current_picture.qscale_table[mb_pos] = mquant;
4081 if (!v->ttmbf && cbp) {
4082 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4085 for (i = 0; i < 6; i++) {
4086 s->dc_val[0][s->block_index[i]] = 0;
4088 val = ((cbp >> (5 - i)) & 1);
4089 off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
4091 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4092 first_block, s->dest[dst_idx] + off,
4093 (i & 4) ? s->uvlinesize : s->linesize,
4094 (i & 4) && (s->flags & CODEC_FLAG_GRAY),
4096 block_cbp |= pat << (i << 2);
4097 if (!v->ttmbf && ttmb < 8) ttmb = -1;
4102 if (s->mb_x == s->mb_width - 1)
4103 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
4107 /** Decode one B-frame MB (in Main profile)
4109 static void vc1_decode_b_mb(VC1Context *v)
4111 MpegEncContext *s = &v->s;
4112 GetBitContext *gb = &s->gb;
4114 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4115 int cbp = 0; /* cbp decoding stuff */
4116 int mqdiff, mquant; /* MB quantization */
4117 int ttmb = v->ttfrm; /* MB Transform type */
4118 int mb_has_coeffs = 0; /* last_flag */
4119 int index, index1; /* LUT indexes */
4120 int val, sign; /* temp values */
4121 int first_block = 1;
4123 int skipped, direct;
4124 int dmv_x[2], dmv_y[2];
4125 int bmvtype = BMV_TYPE_BACKWARD;
4127 mquant = v->pq; /* lossy initialization */
4131 direct = get_bits1(gb);
4133 direct = v->direct_mb_plane[mb_pos];
4135 skipped = get_bits1(gb);
4137 skipped = v->s.mbskip_table[mb_pos];
4139 dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4140 for (i = 0; i < 6; i++) {
4141 v->mb_type[0][s->block_index[i]] = 0;
4142 s->dc_val[0][s->block_index[i]] = 0;
4144 s->current_picture.qscale_table[mb_pos] = 0;
4148 GET_MVDATA(dmv_x[0], dmv_y[0]);
4149 dmv_x[1] = dmv_x[0];
4150 dmv_y[1] = dmv_y[0];
4152 if (skipped || !s->mb_intra) {
4153 bmvtype = decode012(gb);
4156 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
4159 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
4162 bmvtype = BMV_TYPE_INTERPOLATED;
4163 dmv_x[0] = dmv_y[0] = 0;
4167 for (i = 0; i < 6; i++)
4168 v->mb_type[0][s->block_index[i]] = s->mb_intra;
4172 bmvtype = BMV_TYPE_INTERPOLATED;
4173 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4174 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4178 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4181 s->current_picture.qscale_table[mb_pos] = mquant;
4183 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4184 dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
4185 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4186 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4188 if (!mb_has_coeffs && !s->mb_intra) {
4189 /* no coded blocks - effectively skipped */
4190 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4191 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4194 if (s->mb_intra && !mb_has_coeffs) {
4196 s->current_picture.qscale_table[mb_pos] = mquant;
4197 s->ac_pred = get_bits1(gb);
4199 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4201 if (bmvtype == BMV_TYPE_INTERPOLATED) {
4202 GET_MVDATA(dmv_x[0], dmv_y[0]);
4203 if (!mb_has_coeffs) {
4204 /* interpolated skipped block */
4205 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4206 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4210 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4212 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4215 s->ac_pred = get_bits1(gb);
4216 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4218 s->current_picture.qscale_table[mb_pos] = mquant;
4219 if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
4220 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4224 for (i = 0; i < 6; i++) {
4225 s->dc_val[0][s->block_index[i]] = 0;
4227 val = ((cbp >> (5 - i)) & 1);
4228 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4229 v->mb_type[0][s->block_index[i]] = s->mb_intra;
4231 /* check if prediction blocks A and C are available */
4232 v->a_avail = v->c_avail = 0;
4233 if (i == 2 || i == 3 || !s->first_slice_line)
4234 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4235 if (i == 1 || i == 3 || s->mb_x)
4236 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4238 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4239 (i & 4) ? v->codingset2 : v->codingset);
4240 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4242 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4244 for (j = 0; j < 64; j++)
4245 s->block[i][j] <<= 1;
4246 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
4248 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4249 first_block, s->dest[dst_idx] + off,
4250 (i & 4) ? s->uvlinesize : s->linesize,
4251 (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4252 if (!v->ttmbf && ttmb < 8)
4259 /** Decode one B-frame MB (in interlaced field B picture)
4261 static void vc1_decode_b_mb_intfi(VC1Context *v)
4263 MpegEncContext *s = &v->s;
4264 GetBitContext *gb = &s->gb;
4266 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4267 int cbp = 0; /* cbp decoding stuff */
4268 int mqdiff, mquant; /* MB quantization */
4269 int ttmb = v->ttfrm; /* MB Transform type */
4270 int mb_has_coeffs = 0; /* last_flag */
4271 int val; /* temp value */
4272 int first_block = 1;
4275 int dmv_x[2], dmv_y[2], pred_flag[2];
4276 int bmvtype = BMV_TYPE_BACKWARD;
4279 mquant = v->pq; /* Lossy initialization */
4282 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
4283 if (idx_mbmode <= 1) { // intra MB
4284 s->mb_intra = v->is_intra[s->mb_x] = 1;
4285 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
4286 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
4287 s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4289 s->current_picture.qscale_table[mb_pos] = mquant;
4290 /* Set DC scale - y and c use the same (not sure if necessary here) */
4291 s->y_dc_scale = s->y_dc_scale_table[mquant];
4292 s->c_dc_scale = s->c_dc_scale_table[mquant];
4293 v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4294 mb_has_coeffs = idx_mbmode & 1;
4296 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
4298 for (i = 0; i < 6; i++) {
4299 s->dc_val[0][s->block_index[i]] = 0;
4301 val = ((cbp >> (5 - i)) & 1);
4302 v->mb_type[0][s->block_index[i]] = s->mb_intra;
4303 v->a_avail = v->c_avail = 0;
4304 if (i == 2 || i == 3 || !s->first_slice_line)
4305 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4306 if (i == 1 || i == 3 || s->mb_x)
4307 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4309 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4310 (i & 4) ? v->codingset2 : v->codingset);
4311 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4313 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4315 for (j = 0; j < 64; j++)
4316 s->block[i][j] <<= 1;
4317 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4318 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize);
4319 // TODO: yet to perform loop filter
4322 s->mb_intra = v->is_intra[s->mb_x] = 0;
4323 s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
4324 for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
4326 fwd = v->forward_mb_plane[mb_pos] = get_bits1(gb);
4328 fwd = v->forward_mb_plane[mb_pos];
4329 if (idx_mbmode <= 5) { // 1-MV
4331 dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4332 pred_flag[0] = pred_flag[1] = 0;
4334 bmvtype = BMV_TYPE_FORWARD;
4336 bmvtype = decode012(gb);
4339 bmvtype = BMV_TYPE_BACKWARD;
4342 bmvtype = BMV_TYPE_DIRECT;
4345 bmvtype = BMV_TYPE_INTERPOLATED;
4346 interpmvp = get_bits1(gb);
4349 v->bmvtype = bmvtype;
4350 if (bmvtype != BMV_TYPE_DIRECT && idx_mbmode & 1) {
4351 get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD], &dmv_y[bmvtype == BMV_TYPE_BACKWARD], &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
4354 get_mvdata_interlaced(v, &dmv_x[1], &dmv_y[1], &pred_flag[1]);
4356 if (bmvtype == BMV_TYPE_DIRECT) {
4357 dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4358 dmv_x[1] = dmv_y[1] = pred_flag[0] = 0;
4360 vc1_pred_b_mv_intfi(v, 0, dmv_x, dmv_y, 1, pred_flag);
4361 vc1_b_mc(v, dmv_x, dmv_y, (bmvtype == BMV_TYPE_DIRECT), bmvtype);
4362 mb_has_coeffs = !(idx_mbmode & 2);
4365 bmvtype = BMV_TYPE_FORWARD;
4366 v->bmvtype = bmvtype;
4367 v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
4368 for (i = 0; i < 6; i++) {
4370 dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4371 dmv_x[1] = dmv_y[1] = pred_flag[1] = 0;
4372 val = ((v->fourmvbp >> (3 - i)) & 1);
4374 get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD],
4375 &dmv_y[bmvtype == BMV_TYPE_BACKWARD],
4376 &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
4378 vc1_pred_b_mv_intfi(v, i, dmv_x, dmv_y, 0, pred_flag);
4379 vc1_mc_4mv_luma(v, i, bmvtype == BMV_TYPE_BACKWARD, 0);
4381 vc1_mc_4mv_chroma(v, bmvtype == BMV_TYPE_BACKWARD);
4383 mb_has_coeffs = idx_mbmode & 1;
4386 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4390 s->current_picture.qscale_table[mb_pos] = mquant;
4391 if (!v->ttmbf && cbp) {
4392 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4395 for (i = 0; i < 6; i++) {
4396 s->dc_val[0][s->block_index[i]] = 0;
4398 val = ((cbp >> (5 - i)) & 1);
4399 off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
4401 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4402 first_block, s->dest[dst_idx] + off,
4403 (i & 4) ? s->uvlinesize : s->linesize,
4404 (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4405 if (!v->ttmbf && ttmb < 8)
4413 /** Decode one B-frame MB (in interlaced frame B picture)
4415 static int vc1_decode_b_mb_intfr(VC1Context *v)
4417 MpegEncContext *s = &v->s;
4418 GetBitContext *gb = &s->gb;
4420 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4421 int cbp = 0; /* cbp decoding stuff */
4422 int mqdiff, mquant; /* MB quantization */
4423 int ttmb = v->ttfrm; /* MB Transform type */
4424 int mvsw = 0; /* motion vector switch */
4425 int mb_has_coeffs = 1; /* last_flag */
4426 int dmv_x, dmv_y; /* Differential MV components */
4427 int val; /* temp value */
4428 int first_block = 1;
4430 int skipped, direct, twomv = 0;
4431 int block_cbp = 0, pat, block_tt = 0;
4432 int idx_mbmode = 0, mvbp;
4433 int stride_y, fieldtx;
4434 int bmvtype = BMV_TYPE_BACKWARD;
4437 mquant = v->pq; /* Lossy initialization */
4440 skipped = get_bits1(gb);
4442 skipped = v->s.mbskip_table[mb_pos];
4445 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2);
4446 if (ff_vc1_mbmode_intfrp[0][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD) {
4448 v->blk_mv_type[s->block_index[0]] = 1;
4449 v->blk_mv_type[s->block_index[1]] = 1;
4450 v->blk_mv_type[s->block_index[2]] = 1;
4451 v->blk_mv_type[s->block_index[3]] = 1;
4453 v->blk_mv_type[s->block_index[0]] = 0;
4454 v->blk_mv_type[s->block_index[1]] = 0;
4455 v->blk_mv_type[s->block_index[2]] = 0;
4456 v->blk_mv_type[s->block_index[3]] = 0;
4461 direct = get_bits1(gb);
4463 direct = v->direct_mb_plane[mb_pos];
4466 s->mv[0][0][0] = s->current_picture.motion_val[0][s->block_index[0]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][0], v->bfraction, 0, s->quarter_sample);
4467 s->mv[0][0][1] = s->current_picture.motion_val[0][s->block_index[0]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][1], v->bfraction, 0, s->quarter_sample);
4468 s->mv[1][0][0] = s->current_picture.motion_val[1][s->block_index[0]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][0], v->bfraction, 1, s->quarter_sample);
4469 s->mv[1][0][1] = s->current_picture.motion_val[1][s->block_index[0]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][1], v->bfraction, 1, s->quarter_sample);
4472 s->mv[0][2][0] = s->current_picture.motion_val[0][s->block_index[2]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][0], v->bfraction, 0, s->quarter_sample);
4473 s->mv[0][2][1] = s->current_picture.motion_val[0][s->block_index[2]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][1], v->bfraction, 0, s->quarter_sample);
4474 s->mv[1][2][0] = s->current_picture.motion_val[1][s->block_index[2]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][0], v->bfraction, 1, s->quarter_sample);
4475 s->mv[1][2][1] = s->current_picture.motion_val[1][s->block_index[2]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][1], v->bfraction, 1, s->quarter_sample);
4477 for (i = 1; i < 4; i += 2) {
4478 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = s->mv[0][i-1][0];
4479 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = s->mv[0][i-1][1];
4480 s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = s->mv[1][i-1][0];
4481 s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = s->mv[1][i-1][1];
4484 for (i = 1; i < 4; i++) {
4485 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = s->mv[0][0][0];
4486 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = s->mv[0][0][1];
4487 s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = s->mv[1][0][0];
4488 s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = s->mv[1][0][1];
4493 if (ff_vc1_mbmode_intfrp[0][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
4494 for (i = 0; i < 4; i++) {
4495 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = 0;
4496 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = 0;
4497 s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = 0;
4498 s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = 0;
4500 s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
4501 s->mb_intra = v->is_intra[s->mb_x] = 1;
4502 for (i = 0; i < 6; i++)
4503 v->mb_type[0][s->block_index[i]] = 1;
4504 fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
4505 mb_has_coeffs = get_bits1(gb);
4507 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4508 v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4510 s->current_picture.qscale_table[mb_pos] = mquant;
4511 /* Set DC scale - y and c use the same (not sure if necessary here) */
4512 s->y_dc_scale = s->y_dc_scale_table[mquant];
4513 s->c_dc_scale = s->c_dc_scale_table[mquant];
4515 for (i = 0; i < 6; i++) {
4516 s->dc_val[0][s->block_index[i]] = 0;
4518 val = ((cbp >> (5 - i)) & 1);
4519 v->mb_type[0][s->block_index[i]] = s->mb_intra;
4520 v->a_avail = v->c_avail = 0;
4521 if (i == 2 || i == 3 || !s->first_slice_line)
4522 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4523 if (i == 1 || i == 3 || s->mb_x)
4524 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4526 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4527 (i & 4) ? v->codingset2 : v->codingset);
4528 if (i > 3 && (s->flags & CODEC_FLAG_GRAY))
4530 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4532 stride_y = s->linesize << fieldtx;
4533 off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->linesize : (i & 1) * 8 + 4 * (i & 2) * s->linesize;
4535 stride_y = s->uvlinesize;
4538 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, stride_y);
4541 s->mb_intra = v->is_intra[s->mb_x] = 0;
4543 if (skipped || !s->mb_intra) {
4544 bmvtype = decode012(gb);
4547 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
4550 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
4553 bmvtype = BMV_TYPE_INTERPOLATED;
4557 if (twomv && bmvtype != BMV_TYPE_INTERPOLATED)
4558 mvsw = get_bits1(gb);
4561 if (!skipped) { // inter MB
4562 mb_has_coeffs = ff_vc1_mbmode_intfrp[0][idx_mbmode][3];
4564 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4566 if (bmvtype == BMV_TYPE_INTERPOLATED && twomv) {
4567 v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
4568 } else if (bmvtype == BMV_TYPE_INTERPOLATED || twomv) {
4569 v->twomvbp = get_vlc2(gb, v->twomvbp_vlc->table, VC1_2MV_BLOCK_PATTERN_VLC_BITS, 1);
4573 for (i = 0; i < 6; i++)
4574 v->mb_type[0][s->block_index[i]] = 0;
4575 fieldtx = v->fieldtx_plane[mb_pos] = ff_vc1_mbmode_intfrp[0][idx_mbmode][1];
4576 /* for all motion vector read MVDATA and motion compensate each block */
4580 for (i = 0; i < 4; i++) {
4581 vc1_mc_4mv_luma(v, i, 0, 0);
4582 vc1_mc_4mv_luma(v, i, 1, 1);
4584 vc1_mc_4mv_chroma4(v, 0, 0, 0);
4585 vc1_mc_4mv_chroma4(v, 1, 1, 1);
4590 } else if (twomv && bmvtype == BMV_TYPE_INTERPOLATED) {
4592 for (i = 0; i < 4; i++) {
4595 val = ((mvbp >> (3 - i)) & 1);
4597 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4599 vc1_pred_mv_intfr(v, j, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir);
4600 vc1_mc_4mv_luma(v, j, dir, dir);
4601 vc1_mc_4mv_luma(v, j+1, dir, dir);
4604 vc1_mc_4mv_chroma4(v, 0, 0, 0);
4605 vc1_mc_4mv_chroma4(v, 1, 1, 1);
4606 } else if (bmvtype == BMV_TYPE_INTERPOLATED) {
4610 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4612 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0);
4617 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4619 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 1);
4622 dir = bmvtype == BMV_TYPE_BACKWARD;
4629 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4630 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir);
4634 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4635 vc1_pred_mv_intfr(v, 2, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir2);
4638 for (i = 0; i < 2; i++) {
4639 s->mv[dir][i+2][0] = s->mv[dir][i][0] = s->current_picture.motion_val[dir][s->block_index[i+2]][0] = s->current_picture.motion_val[dir][s->block_index[i]][0];
4640 s->mv[dir][i+2][1] = s->mv[dir][i][1] = s->current_picture.motion_val[dir][s->block_index[i+2]][1] = s->current_picture.motion_val[dir][s->block_index[i]][1];
4641 s->mv[dir2][i+2][0] = s->mv[dir2][i][0] = s->current_picture.motion_val[dir2][s->block_index[i]][0] = s->current_picture.motion_val[dir2][s->block_index[i+2]][0];
4642 s->mv[dir2][i+2][1] = s->mv[dir2][i][1] = s->current_picture.motion_val[dir2][s->block_index[i]][1] = s->current_picture.motion_val[dir2][s->block_index[i+2]][1];
4645 vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, v->mb_type[0], !dir);
4646 vc1_pred_mv_intfr(v, 2, 0, 0, 2, v->range_x, v->range_y, v->mb_type[0], !dir);
4649 vc1_mc_4mv_luma(v, 0, dir, 0);
4650 vc1_mc_4mv_luma(v, 1, dir, 0);
4651 vc1_mc_4mv_luma(v, 2, dir2, 0);
4652 vc1_mc_4mv_luma(v, 3, dir2, 0);
4653 vc1_mc_4mv_chroma4(v, dir, dir2, 0);
4655 dir = bmvtype == BMV_TYPE_BACKWARD;
4657 mvbp = ff_vc1_mbmode_intfrp[0][idx_mbmode][2];
4660 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4662 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], dir);
4663 v->blk_mv_type[s->block_index[0]] = 1;
4664 v->blk_mv_type[s->block_index[1]] = 1;
4665 v->blk_mv_type[s->block_index[2]] = 1;
4666 v->blk_mv_type[s->block_index[3]] = 1;
4667 vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, 0, !dir);
4668 for (i = 0; i < 2; i++) {
4669 s->mv[!dir][i+2][0] = s->mv[!dir][i][0] = s->current_picture.motion_val[!dir][s->block_index[i+2]][0] = s->current_picture.motion_val[!dir][s->block_index[i]][0];
4670 s->mv[!dir][i+2][1] = s->mv[!dir][i][1] = s->current_picture.motion_val[!dir][s->block_index[i+2]][1] = s->current_picture.motion_val[!dir][s->block_index[i]][1];
4676 GET_MQUANT(); // p. 227
4677 s->current_picture.qscale_table[mb_pos] = mquant;
4678 if (!v->ttmbf && cbp)
4679 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4680 for (i = 0; i < 6; i++) {
4681 s->dc_val[0][s->block_index[i]] = 0;
4683 val = ((cbp >> (5 - i)) & 1);
4685 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4687 off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->linesize));
4689 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4690 first_block, s->dest[dst_idx] + off,
4691 (i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
4692 (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
4693 block_cbp |= pat << (i << 2);
4694 if (!v->ttmbf && ttmb < 8)
4702 for (i = 0; i < 6; i++) {
4703 v->mb_type[0][s->block_index[i]] = 0;
4704 s->dc_val[0][s->block_index[i]] = 0;
4706 s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
4707 s->current_picture.qscale_table[mb_pos] = 0;
4708 v->blk_mv_type[s->block_index[0]] = 0;
4709 v->blk_mv_type[s->block_index[1]] = 0;
4710 v->blk_mv_type[s->block_index[2]] = 0;
4711 v->blk_mv_type[s->block_index[3]] = 0;
4714 if (bmvtype == BMV_TYPE_INTERPOLATED) {
4715 vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0);
4716 vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 1);
4718 dir = bmvtype == BMV_TYPE_BACKWARD;
4719 vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], dir);
4724 for (i = 0; i < 2; i++) {
4725 s->mv[dir][i+2][0] = s->mv[dir][i][0] = s->current_picture.motion_val[dir][s->block_index[i+2]][0] = s->current_picture.motion_val[dir][s->block_index[i]][0];
4726 s->mv[dir][i+2][1] = s->mv[dir][i][1] = s->current_picture.motion_val[dir][s->block_index[i+2]][1] = s->current_picture.motion_val[dir][s->block_index[i]][1];
4727 s->mv[dir2][i+2][0] = s->mv[dir2][i][0] = s->current_picture.motion_val[dir2][s->block_index[i]][0] = s->current_picture.motion_val[dir2][s->block_index[i+2]][0];
4728 s->mv[dir2][i+2][1] = s->mv[dir2][i][1] = s->current_picture.motion_val[dir2][s->block_index[i]][1] = s->current_picture.motion_val[dir2][s->block_index[i+2]][1];
4731 v->blk_mv_type[s->block_index[0]] = 1;
4732 v->blk_mv_type[s->block_index[1]] = 1;
4733 v->blk_mv_type[s->block_index[2]] = 1;
4734 v->blk_mv_type[s->block_index[3]] = 1;
4735 vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, 0, !dir);
4736 for (i = 0; i < 2; i++) {
4737 s->mv[!dir][i+2][0] = s->mv[!dir][i][0] = s->current_picture.motion_val[!dir][s->block_index[i+2]][0] = s->current_picture.motion_val[!dir][s->block_index[i]][0];
4738 s->mv[!dir][i+2][1] = s->mv[!dir][i][1] = s->current_picture.motion_val[!dir][s->block_index[i+2]][1] = s->current_picture.motion_val[!dir][s->block_index[i]][1];
4745 if (direct || bmvtype == BMV_TYPE_INTERPOLATED) {
4750 if (s->mb_x == s->mb_width - 1)
4751 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
4752 v->cbp[s->mb_x] = block_cbp;
4753 v->ttblk[s->mb_x] = block_tt;
4757 /** Decode blocks of I-frame
4759 static void vc1_decode_i_blocks(VC1Context *v)
4762 MpegEncContext *s = &v->s;
4767 /* select codingmode used for VLC tables selection */
4768 switch (v->y_ac_table_index) {
4770 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
4773 v->codingset = CS_HIGH_MOT_INTRA;
4776 v->codingset = CS_MID_RATE_INTRA;
4780 switch (v->c_ac_table_index) {
4782 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
4785 v->codingset2 = CS_HIGH_MOT_INTER;
4788 v->codingset2 = CS_MID_RATE_INTER;
4792 /* Set DC scale - y and c use the same */
4793 s->y_dc_scale = s->y_dc_scale_table[v->pq];
4794 s->c_dc_scale = s->c_dc_scale_table[v->pq];
4797 s->mb_x = s->mb_y = 0;
4799 s->first_slice_line = 1;
4800 for (s->mb_y = 0; s->mb_y < s->end_mb_y; s->mb_y++) {
4802 init_block_index(v);
4803 for (; s->mb_x < v->end_mb_x; s->mb_x++) {
4805 ff_update_block_index(s);
4806 dst[0] = s->dest[0];
4807 dst[1] = dst[0] + 8;
4808 dst[2] = s->dest[0] + s->linesize * 8;
4809 dst[3] = dst[2] + 8;
4810 dst[4] = s->dest[1];
4811 dst[5] = s->dest[2];
4812 s->dsp.clear_blocks(s->block[0]);
4813 mb_pos = s->mb_x + s->mb_y * s->mb_width;
4814 s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
4815 s->current_picture.qscale_table[mb_pos] = v->pq;
4816 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
4817 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
4819 // do actual MB decoding and displaying
4820 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
4821 v->s.ac_pred = get_bits1(&v->s.gb);
4823 for (k = 0; k < 6; k++) {
4824 val = ((cbp >> (5 - k)) & 1);
4827 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
4831 cbp |= val << (5 - k);
4833 vc1_decode_i_block(v, s->block[k], k, val, (k < 4) ? v->codingset : v->codingset2);
4835 if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
4837 v->vc1dsp.vc1_inv_trans_8x8(s->block[k]);
4838 if (v->pq >= 9 && v->overlap) {
4840 for (j = 0; j < 64; j++)
4841 s->block[k][j] <<= 1;
4842 s->dsp.put_signed_pixels_clamped(s->block[k], dst[k], k & 4 ? s->uvlinesize : s->linesize);
4845 for (j = 0; j < 64; j++)
4846 s->block[k][j] = (s->block[k][j] - 64) << 1;
4847 s->dsp.put_pixels_clamped(s->block[k], dst[k], k & 4 ? s->uvlinesize : s->linesize);
4851 if (v->pq >= 9 && v->overlap) {
4853 v->vc1dsp.vc1_h_overlap(s->dest[0], s->linesize);
4854 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
4855 if (!(s->flags & CODEC_FLAG_GRAY)) {
4856 v->vc1dsp.vc1_h_overlap(s->dest[1], s->uvlinesize);
4857 v->vc1dsp.vc1_h_overlap(s->dest[2], s->uvlinesize);
4860 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize);
4861 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
4862 if (!s->first_slice_line) {
4863 v->vc1dsp.vc1_v_overlap(s->dest[0], s->linesize);
4864 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize);
4865 if (!(s->flags & CODEC_FLAG_GRAY)) {
4866 v->vc1dsp.vc1_v_overlap(s->dest[1], s->uvlinesize);
4867 v->vc1dsp.vc1_v_overlap(s->dest[2], s->uvlinesize);
4870 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
4871 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
4873 if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
4875 if (get_bits_count(&s->gb) > v->bits) {
4876 ff_er_add_slice(&s->er, 0, 0, s->mb_x, s->mb_y, ER_MB_ERROR);
4877 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
4878 get_bits_count(&s->gb), v->bits);
4882 if (!v->s.loop_filter)
4883 ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
4885 ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
4887 s->first_slice_line = 0;
4889 if (v->s.loop_filter)
4890 ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
4892 /* This is intentionally mb_height and not end_mb_y - unlike in advanced
4893 * profile, these only differ are when decoding MSS2 rectangles. */
4894 ff_er_add_slice(&s->er, 0, 0, s->mb_width - 1, s->mb_height - 1, ER_MB_END);
4897 /** Decode blocks of I-frame for advanced profile
4899 static void vc1_decode_i_blocks_adv(VC1Context *v)
4902 MpegEncContext *s = &v->s;
4908 GetBitContext *gb = &s->gb;
4910 /* select codingmode used for VLC tables selection */
4911 switch (v->y_ac_table_index) {
4913 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
4916 v->codingset = CS_HIGH_MOT_INTRA;
4919 v->codingset = CS_MID_RATE_INTRA;
4923 switch (v->c_ac_table_index) {
4925 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
4928 v->codingset2 = CS_HIGH_MOT_INTER;
4931 v->codingset2 = CS_MID_RATE_INTER;
4936 s->mb_x = s->mb_y = 0;
4938 s->first_slice_line = 1;
4939 s->mb_y = s->start_mb_y;
4940 if (s->start_mb_y) {
4942 init_block_index(v);
4943 memset(&s->coded_block[s->block_index[0] - s->b8_stride], 0,
4944 (1 + s->b8_stride) * sizeof(*s->coded_block));
4946 for (; s->mb_y < s->end_mb_y; s->mb_y++) {
4948 init_block_index(v);
4949 for (;s->mb_x < s->mb_width; s->mb_x++) {
4950 int16_t (*block)[64] = v->block[v->cur_blk_idx];
4951 ff_update_block_index(s);
4952 s->dsp.clear_blocks(block[0]);
4953 mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4954 s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4955 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
4956 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
4958 // do actual MB decoding and displaying
4959 if (v->fieldtx_is_raw)
4960 v->fieldtx_plane[mb_pos] = get_bits1(&v->s.gb);
4961 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
4962 if ( v->acpred_is_raw)
4963 v->s.ac_pred = get_bits1(&v->s.gb);
4965 v->s.ac_pred = v->acpred_plane[mb_pos];
4967 if (v->condover == CONDOVER_SELECT && v->overflg_is_raw)
4968 v->over_flags_plane[mb_pos] = get_bits1(&v->s.gb);
4972 s->current_picture.qscale_table[mb_pos] = mquant;
4973 /* Set DC scale - y and c use the same */
4974 s->y_dc_scale = s->y_dc_scale_table[mquant];
4975 s->c_dc_scale = s->c_dc_scale_table[mquant];
4977 for (k = 0; k < 6; k++) {
4978 val = ((cbp >> (5 - k)) & 1);
4981 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
4985 cbp |= val << (5 - k);
4987 v->a_avail = !s->first_slice_line || (k == 2 || k == 3);
4988 v->c_avail = !!s->mb_x || (k == 1 || k == 3);
4990 vc1_decode_i_block_adv(v, block[k], k, val,
4991 (k < 4) ? v->codingset : v->codingset2, mquant);
4993 if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
4995 v->vc1dsp.vc1_inv_trans_8x8(block[k]);
4998 vc1_smooth_overlap_filter_iblk(v);
4999 vc1_put_signed_blocks_clamped(v);
5000 if (v->s.loop_filter) vc1_loop_filter_iblk_delayed(v, v->pq);
5002 if (get_bits_count(&s->gb) > v->bits) {
5003 // TODO: may need modification to handle slice coding
5004 ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
5005 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
5006 get_bits_count(&s->gb), v->bits);
5010 if (!v->s.loop_filter)
5011 ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
5013 ff_mpeg_draw_horiz_band(s, (s->mb_y-1) * 16, 16);
5014 s->first_slice_line = 0;
5017 /* raw bottom MB row */
5019 init_block_index(v);
5021 for (;s->mb_x < s->mb_width; s->mb_x++) {
5022 ff_update_block_index(s);
5023 vc1_put_signed_blocks_clamped(v);
5024 if (v->s.loop_filter)
5025 vc1_loop_filter_iblk_delayed(v, v->pq);
5027 if (v->s.loop_filter)
5028 ff_mpeg_draw_horiz_band(s, (s->end_mb_y-1)*16, 16);
5029 ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
5030 (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
5033 static void vc1_decode_p_blocks(VC1Context *v)
5035 MpegEncContext *s = &v->s;
5036 int apply_loop_filter;
5038 /* select codingmode used for VLC tables selection */
5039 switch (v->c_ac_table_index) {
5041 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
5044 v->codingset = CS_HIGH_MOT_INTRA;
5047 v->codingset = CS_MID_RATE_INTRA;
5051 switch (v->c_ac_table_index) {
5053 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
5056 v->codingset2 = CS_HIGH_MOT_INTER;
5059 v->codingset2 = CS_MID_RATE_INTER;
5063 apply_loop_filter = s->loop_filter && !(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY) &&
5064 v->fcm == PROGRESSIVE;
5065 s->first_slice_line = 1;
5066 memset(v->cbp_base, 0, sizeof(v->cbp_base[0])*2*s->mb_stride);
5067 for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
5069 init_block_index(v);
5070 for (; s->mb_x < s->mb_width; s->mb_x++) {
5071 ff_update_block_index(s);
5073 if (v->fcm == ILACE_FIELD)
5074 vc1_decode_p_mb_intfi(v);
5075 else if (v->fcm == ILACE_FRAME)
5076 vc1_decode_p_mb_intfr(v);
5077 else vc1_decode_p_mb(v);
5078 if (s->mb_y != s->start_mb_y && apply_loop_filter)
5079 vc1_apply_p_loop_filter(v);
5080 if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
5081 // TODO: may need modification to handle slice coding
5082 ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
5083 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
5084 get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
5088 memmove(v->cbp_base, v->cbp, sizeof(v->cbp_base[0]) * s->mb_stride);
5089 memmove(v->ttblk_base, v->ttblk, sizeof(v->ttblk_base[0]) * s->mb_stride);
5090 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
5091 memmove(v->luma_mv_base, v->luma_mv, sizeof(v->luma_mv_base[0]) * s->mb_stride);
5092 if (s->mb_y != s->start_mb_y) ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
5093 s->first_slice_line = 0;
5095 if (apply_loop_filter) {
5097 init_block_index(v);
5098 for (; s->mb_x < s->mb_width; s->mb_x++) {
5099 ff_update_block_index(s);
5100 vc1_apply_p_loop_filter(v);
5103 if (s->end_mb_y >= s->start_mb_y)
5104 ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
5105 ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
5106 (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
5109 static void vc1_decode_b_blocks(VC1Context *v)
5111 MpegEncContext *s = &v->s;
5113 /* select codingmode used for VLC tables selection */
5114 switch (v->c_ac_table_index) {
5116 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
5119 v->codingset = CS_HIGH_MOT_INTRA;
5122 v->codingset = CS_MID_RATE_INTRA;
5126 switch (v->c_ac_table_index) {
5128 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
5131 v->codingset2 = CS_HIGH_MOT_INTER;
5134 v->codingset2 = CS_MID_RATE_INTER;
5138 s->first_slice_line = 1;
5139 for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
5141 init_block_index(v);
5142 for (; s->mb_x < s->mb_width; s->mb_x++) {
5143 ff_update_block_index(s);
5145 if (v->fcm == ILACE_FIELD)
5146 vc1_decode_b_mb_intfi(v);
5147 else if (v->fcm == ILACE_FRAME)
5148 vc1_decode_b_mb_intfr(v);
5151 if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
5152 // TODO: may need modification to handle slice coding
5153 ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
5154 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
5155 get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
5158 if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
5160 if (!v->s.loop_filter)
5161 ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
5163 ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
5164 s->first_slice_line = 0;
5166 if (v->s.loop_filter)
5167 ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
5168 ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
5169 (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
5172 static void vc1_decode_skip_blocks(VC1Context *v)
5174 MpegEncContext *s = &v->s;
5176 if (!v->s.last_picture.f.data[0])
5179 ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_width - 1, s->end_mb_y - 1, ER_MB_END);
5180 s->first_slice_line = 1;
5181 for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
5183 init_block_index(v);
5184 ff_update_block_index(s);
5185 memcpy(s->dest[0], s->last_picture.f.data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16);
5186 memcpy(s->dest[1], s->last_picture.f.data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
5187 memcpy(s->dest[2], s->last_picture.f.data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
5188 ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
5189 s->first_slice_line = 0;
5191 s->pict_type = AV_PICTURE_TYPE_P;
5194 void ff_vc1_decode_blocks(VC1Context *v)
5197 v->s.esc3_level_length = 0;
5199 ff_intrax8_decode_picture(&v->x8, 2*v->pq + v->halfpq, v->pq * !v->pquantizer);
5202 v->left_blk_idx = -1;
5203 v->topleft_blk_idx = 1;
5205 switch (v->s.pict_type) {
5206 case AV_PICTURE_TYPE_I:
5207 if (v->profile == PROFILE_ADVANCED)
5208 vc1_decode_i_blocks_adv(v);
5210 vc1_decode_i_blocks(v);
5212 case AV_PICTURE_TYPE_P:
5213 if (v->p_frame_skipped)
5214 vc1_decode_skip_blocks(v);
5216 vc1_decode_p_blocks(v);
5218 case AV_PICTURE_TYPE_B:
5220 if (v->profile == PROFILE_ADVANCED)
5221 vc1_decode_i_blocks_adv(v);
5223 vc1_decode_i_blocks(v);
5225 vc1_decode_b_blocks(v);
5231 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
5235 * Transform coefficients for both sprites in 16.16 fixed point format,
5236 * in the order they appear in the bitstream:
5238 * rotation 1 (unused)
5240 * rotation 2 (unused)
5247 int effect_type, effect_flag;
5248 int effect_pcount1, effect_pcount2; ///< amount of effect parameters stored in effect_params
5249 int effect_params1[15], effect_params2[10]; ///< effect parameters in 16.16 fixed point format
5252 static inline int get_fp_val(GetBitContext* gb)
5254 return (get_bits_long(gb, 30) - (1 << 29)) << 1;
5257 static void vc1_sprite_parse_transform(GetBitContext* gb, int c[7])
5261 switch (get_bits(gb, 2)) {
5264 c[2] = get_fp_val(gb);
5268 c[0] = c[4] = get_fp_val(gb);
5269 c[2] = get_fp_val(gb);
5272 c[0] = get_fp_val(gb);
5273 c[2] = get_fp_val(gb);
5274 c[4] = get_fp_val(gb);
5277 c[0] = get_fp_val(gb);
5278 c[1] = get_fp_val(gb);
5279 c[2] = get_fp_val(gb);
5280 c[3] = get_fp_val(gb);
5281 c[4] = get_fp_val(gb);
5284 c[5] = get_fp_val(gb);
5286 c[6] = get_fp_val(gb);
5291 static void vc1_parse_sprites(VC1Context *v, GetBitContext* gb, SpriteData* sd)
5293 AVCodecContext *avctx = v->s.avctx;
5296 for (sprite = 0; sprite <= v->two_sprites; sprite++) {
5297 vc1_sprite_parse_transform(gb, sd->coefs[sprite]);
5298 if (sd->coefs[sprite][1] || sd->coefs[sprite][3])
5299 avpriv_request_sample(avctx, "Non-zero rotation coefficients");
5300 av_log(avctx, AV_LOG_DEBUG, sprite ? "S2:" : "S1:");
5301 for (i = 0; i < 7; i++)
5302 av_log(avctx, AV_LOG_DEBUG, " %d.%.3d",
5303 sd->coefs[sprite][i] / (1<<16),
5304 (abs(sd->coefs[sprite][i]) & 0xFFFF) * 1000 / (1 << 16));
5305 av_log(avctx, AV_LOG_DEBUG, "\n");
5309 if (sd->effect_type = get_bits_long(gb, 30)) {
5310 switch (sd->effect_pcount1 = get_bits(gb, 4)) {
5312 vc1_sprite_parse_transform(gb, sd->effect_params1);
5315 vc1_sprite_parse_transform(gb, sd->effect_params1);
5316 vc1_sprite_parse_transform(gb, sd->effect_params1 + 7);
5319 for (i = 0; i < sd->effect_pcount1; i++)
5320 sd->effect_params1[i] = get_fp_val(gb);
5322 if (sd->effect_type != 13 || sd->effect_params1[0] != sd->coefs[0][6]) {
5323 // effect 13 is simple alpha blending and matches the opacity above
5324 av_log(avctx, AV_LOG_DEBUG, "Effect: %d; params: ", sd->effect_type);
5325 for (i = 0; i < sd->effect_pcount1; i++)
5326 av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
5327 sd->effect_params1[i] / (1 << 16),
5328 (abs(sd->effect_params1[i]) & 0xFFFF) * 1000 / (1 << 16));
5329 av_log(avctx, AV_LOG_DEBUG, "\n");
5332 sd->effect_pcount2 = get_bits(gb, 16);
5333 if (sd->effect_pcount2 > 10) {
5334 av_log(avctx, AV_LOG_ERROR, "Too many effect parameters\n");
5336 } else if (sd->effect_pcount2) {
5338 av_log(avctx, AV_LOG_DEBUG, "Effect params 2: ");
5339 while (++i < sd->effect_pcount2) {
5340 sd->effect_params2[i] = get_fp_val(gb);
5341 av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
5342 sd->effect_params2[i] / (1 << 16),
5343 (abs(sd->effect_params2[i]) & 0xFFFF) * 1000 / (1 << 16));
5345 av_log(avctx, AV_LOG_DEBUG, "\n");
5348 if (sd->effect_flag = get_bits1(gb))
5349 av_log(avctx, AV_LOG_DEBUG, "Effect flag set\n");
5351 if (get_bits_count(gb) >= gb->size_in_bits +
5352 (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE ? 64 : 0))
5353 av_log(avctx, AV_LOG_ERROR, "Buffer overrun\n");
5354 if (get_bits_count(gb) < gb->size_in_bits - 8)
5355 av_log(avctx, AV_LOG_WARNING, "Buffer not fully read\n");
5358 static void vc1_draw_sprites(VC1Context *v, SpriteData* sd)
5360 int i, plane, row, sprite;
5361 int sr_cache[2][2] = { { -1, -1 }, { -1, -1 } };
5362 uint8_t* src_h[2][2];
5363 int xoff[2], xadv[2], yoff[2], yadv[2], alpha;
5365 MpegEncContext *s = &v->s;
5367 for (i = 0; i < 2; i++) {
5368 xoff[i] = av_clip(sd->coefs[i][2], 0, v->sprite_width-1 << 16);
5369 xadv[i] = sd->coefs[i][0];
5370 if (xadv[i] != 1<<16 || (v->sprite_width << 16) - (v->output_width << 16) - xoff[i])
5371 xadv[i] = av_clip(xadv[i], 0, ((v->sprite_width<<16) - xoff[i] - 1) / v->output_width);
5373 yoff[i] = av_clip(sd->coefs[i][5], 0, v->sprite_height-1 << 16);
5374 yadv[i] = av_clip(sd->coefs[i][4], 0, ((v->sprite_height << 16) - yoff[i]) / v->output_height);
5376 alpha = av_clip(sd->coefs[1][6], 0, (1<<16) - 1);
5378 for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++) {
5379 int width = v->output_width>>!!plane;
5381 for (row = 0; row < v->output_height>>!!plane; row++) {
5382 uint8_t *dst = v->sprite_output_frame->data[plane] +
5383 v->sprite_output_frame->linesize[plane] * row;
5385 for (sprite = 0; sprite <= v->two_sprites; sprite++) {
5386 uint8_t *iplane = s->current_picture.f.data[plane];
5387 int iline = s->current_picture.f.linesize[plane];
5388 int ycoord = yoff[sprite] + yadv[sprite] * row;
5389 int yline = ycoord >> 16;
5391 ysub[sprite] = ycoord & 0xFFFF;
5393 iplane = s->last_picture.f.data[plane];
5394 iline = s->last_picture.f.linesize[plane];
5396 next_line = FFMIN(yline + 1, (v->sprite_height >> !!plane) - 1) * iline;
5397 if (!(xoff[sprite] & 0xFFFF) && xadv[sprite] == 1 << 16) {
5398 src_h[sprite][0] = iplane + (xoff[sprite] >> 16) + yline * iline;
5400 src_h[sprite][1] = iplane + (xoff[sprite] >> 16) + next_line;
5402 if (sr_cache[sprite][0] != yline) {
5403 if (sr_cache[sprite][1] == yline) {
5404 FFSWAP(uint8_t*, v->sr_rows[sprite][0], v->sr_rows[sprite][1]);
5405 FFSWAP(int, sr_cache[sprite][0], sr_cache[sprite][1]);
5407 v->vc1dsp.sprite_h(v->sr_rows[sprite][0], iplane + yline * iline, xoff[sprite], xadv[sprite], width);
5408 sr_cache[sprite][0] = yline;
5411 if (ysub[sprite] && sr_cache[sprite][1] != yline + 1) {
5412 v->vc1dsp.sprite_h(v->sr_rows[sprite][1],
5413 iplane + next_line, xoff[sprite],
5414 xadv[sprite], width);
5415 sr_cache[sprite][1] = yline + 1;
5417 src_h[sprite][0] = v->sr_rows[sprite][0];
5418 src_h[sprite][1] = v->sr_rows[sprite][1];
5422 if (!v->two_sprites) {
5424 v->vc1dsp.sprite_v_single(dst, src_h[0][0], src_h[0][1], ysub[0], width);
5426 memcpy(dst, src_h[0][0], width);
5429 if (ysub[0] && ysub[1]) {
5430 v->vc1dsp.sprite_v_double_twoscale(dst, src_h[0][0], src_h[0][1], ysub[0],
5431 src_h[1][0], src_h[1][1], ysub[1], alpha, width);
5432 } else if (ysub[0]) {
5433 v->vc1dsp.sprite_v_double_onescale(dst, src_h[0][0], src_h[0][1], ysub[0],
5434 src_h[1][0], alpha, width);
5435 } else if (ysub[1]) {
5436 v->vc1dsp.sprite_v_double_onescale(dst, src_h[1][0], src_h[1][1], ysub[1],
5437 src_h[0][0], (1<<16)-1-alpha, width);
5439 v->vc1dsp.sprite_v_double_noscale(dst, src_h[0][0], src_h[1][0], alpha, width);
5445 for (i = 0; i < 2; i++) {
5455 static int vc1_decode_sprites(VC1Context *v, GetBitContext* gb)
5458 MpegEncContext *s = &v->s;
5459 AVCodecContext *avctx = s->avctx;
5462 vc1_parse_sprites(v, gb, &sd);
5464 if (!s->current_picture.f.data[0]) {
5465 av_log(avctx, AV_LOG_ERROR, "Got no sprites\n");
5469 if (v->two_sprites && (!s->last_picture_ptr || !s->last_picture.f.data[0])) {
5470 av_log(avctx, AV_LOG_WARNING, "Need two sprites, only got one\n");
5474 av_frame_unref(v->sprite_output_frame);
5475 if ((ret = ff_get_buffer(avctx, v->sprite_output_frame, 0)) < 0)
5478 vc1_draw_sprites(v, &sd);
5483 static void vc1_sprite_flush(AVCodecContext *avctx)
5485 VC1Context *v = avctx->priv_data;
5486 MpegEncContext *s = &v->s;
5487 AVFrame *f = &s->current_picture.f;
5490 /* Windows Media Image codecs have a convergence interval of two keyframes.
5491 Since we can't enforce it, clear to black the missing sprite. This is
5492 wrong but it looks better than doing nothing. */
5495 for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++)
5496 for (i = 0; i < v->sprite_height>>!!plane; i++)
5497 memset(f->data[plane] + i * f->linesize[plane],
5498 plane ? 128 : 0, f->linesize[plane]);
5503 av_cold int ff_vc1_decode_init_alloc_tables(VC1Context *v)
5505 MpegEncContext *s = &v->s;
5508 /* Allocate mb bitplanes */
5509 v->mv_type_mb_plane = av_malloc (s->mb_stride * s->mb_height);
5510 v->direct_mb_plane = av_malloc (s->mb_stride * s->mb_height);
5511 v->forward_mb_plane = av_malloc (s->mb_stride * s->mb_height);
5512 v->fieldtx_plane = av_mallocz(s->mb_stride * s->mb_height);
5513 v->acpred_plane = av_malloc (s->mb_stride * s->mb_height);
5514 v->over_flags_plane = av_malloc (s->mb_stride * s->mb_height);
5516 v->n_allocated_blks = s->mb_width + 2;
5517 v->block = av_malloc(sizeof(*v->block) * v->n_allocated_blks);
5518 v->cbp_base = av_malloc(sizeof(v->cbp_base[0]) * 2 * s->mb_stride);
5519 v->cbp = v->cbp_base + s->mb_stride;
5520 v->ttblk_base = av_malloc(sizeof(v->ttblk_base[0]) * 2 * s->mb_stride);
5521 v->ttblk = v->ttblk_base + s->mb_stride;
5522 v->is_intra_base = av_mallocz(sizeof(v->is_intra_base[0]) * 2 * s->mb_stride);
5523 v->is_intra = v->is_intra_base + s->mb_stride;
5524 v->luma_mv_base = av_malloc(sizeof(v->luma_mv_base[0]) * 2 * s->mb_stride);
5525 v->luma_mv = v->luma_mv_base + s->mb_stride;
5527 /* allocate block type info in that way so it could be used with s->block_index[] */
5528 v->mb_type_base = av_malloc(s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5529 v->mb_type[0] = v->mb_type_base + s->b8_stride + 1;
5530 v->mb_type[1] = v->mb_type_base + s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride + 1;
5531 v->mb_type[2] = v->mb_type[1] + s->mb_stride * (s->mb_height + 1);
5533 /* allocate memory to store block level MV info */
5534 v->blk_mv_type_base = av_mallocz( s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5535 v->blk_mv_type = v->blk_mv_type_base + s->b8_stride + 1;
5536 v->mv_f_base = av_mallocz(2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5537 v->mv_f[0] = v->mv_f_base + s->b8_stride + 1;
5538 v->mv_f[1] = v->mv_f[0] + (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5539 v->mv_f_next_base = av_mallocz(2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5540 v->mv_f_next[0] = v->mv_f_next_base + s->b8_stride + 1;
5541 v->mv_f_next[1] = v->mv_f_next[0] + (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5543 /* Init coded blocks info */
5544 if (v->profile == PROFILE_ADVANCED) {
5545 // if (alloc_bitplane(&v->over_flags_plane, s->mb_width, s->mb_height) < 0)
5547 // if (alloc_bitplane(&v->ac_pred_plane, s->mb_width, s->mb_height) < 0)
5551 ff_intrax8_common_init(&v->x8,s);
5553 if (s->avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || s->avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5554 for (i = 0; i < 4; i++)
5555 if (!(v->sr_rows[i >> 1][i & 1] = av_malloc(v->output_width)))
5556 return AVERROR(ENOMEM);
5559 if (!v->mv_type_mb_plane || !v->direct_mb_plane || !v->acpred_plane || !v->over_flags_plane ||
5560 !v->block || !v->cbp_base || !v->ttblk_base || !v->is_intra_base || !v->luma_mv_base ||
5562 av_freep(&v->mv_type_mb_plane);
5563 av_freep(&v->direct_mb_plane);
5564 av_freep(&v->acpred_plane);
5565 av_freep(&v->over_flags_plane);
5566 av_freep(&v->block);
5567 av_freep(&v->cbp_base);
5568 av_freep(&v->ttblk_base);
5569 av_freep(&v->is_intra_base);
5570 av_freep(&v->luma_mv_base);
5571 av_freep(&v->mb_type_base);
5572 return AVERROR(ENOMEM);
5578 av_cold void ff_vc1_init_transposed_scantables(VC1Context *v)
5581 for (i = 0; i < 64; i++) {
5582 #define transpose(x) ((x >> 3) | ((x & 7) << 3))
5583 v->zz_8x8[0][i] = transpose(ff_wmv1_scantable[0][i]);
5584 v->zz_8x8[1][i] = transpose(ff_wmv1_scantable[1][i]);
5585 v->zz_8x8[2][i] = transpose(ff_wmv1_scantable[2][i]);
5586 v->zz_8x8[3][i] = transpose(ff_wmv1_scantable[3][i]);
5587 v->zzi_8x8[i] = transpose(ff_vc1_adv_interlaced_8x8_zz[i]);
5593 /** Initialize a VC1/WMV3 decoder
5594 * @todo TODO: Handle VC-1 IDUs (Transport level?)
5595 * @todo TODO: Decypher remaining bits in extra_data
5597 static av_cold int vc1_decode_init(AVCodecContext *avctx)
5599 VC1Context *v = avctx->priv_data;
5600 MpegEncContext *s = &v->s;
5604 /* save the container output size for WMImage */
5605 v->output_width = avctx->width;
5606 v->output_height = avctx->height;
5608 if (!avctx->extradata_size || !avctx->extradata)
5610 if (!(avctx->flags & CODEC_FLAG_GRAY))
5611 avctx->pix_fmt = avctx->get_format(avctx, avctx->codec->pix_fmts);
5613 avctx->pix_fmt = AV_PIX_FMT_GRAY8;
5614 avctx->hwaccel = ff_find_hwaccel(avctx);
5616 avctx->flags |= CODEC_FLAG_EMU_EDGE;
5617 v->s.flags |= CODEC_FLAG_EMU_EDGE;
5619 if ((ret = ff_vc1_init_common(v)) < 0)
5621 // ensure static VLC tables are initialized
5622 if ((ret = ff_msmpeg4_decode_init(avctx)) < 0)
5624 if ((ret = ff_vc1_decode_init_alloc_tables(v)) < 0)
5626 // Hack to ensure the above functions will be called
5627 // again once we know all necessary settings.
5628 // That this is necessary might indicate a bug.
5629 ff_vc1_decode_end(avctx);
5631 ff_h264chroma_init(&v->h264chroma, 8);
5632 ff_vc1dsp_init(&v->vc1dsp);
5634 if (avctx->codec_id == AV_CODEC_ID_WMV3 || avctx->codec_id == AV_CODEC_ID_WMV3IMAGE) {
5637 // looks like WMV3 has a sequence header stored in the extradata
5638 // advanced sequence header may be before the first frame
5639 // the last byte of the extradata is a version number, 1 for the
5640 // samples we can decode
5642 init_get_bits(&gb, avctx->extradata, avctx->extradata_size*8);
5644 if ((ret = ff_vc1_decode_sequence_header(avctx, v, &gb)) < 0)
5647 count = avctx->extradata_size*8 - get_bits_count(&gb);
5649 av_log(avctx, AV_LOG_INFO, "Extra data: %i bits left, value: %X\n",
5650 count, get_bits(&gb, count));
5651 } else if (count < 0) {
5652 av_log(avctx, AV_LOG_INFO, "Read %i bits in overflow\n", -count);
5654 } else { // VC1/WVC1/WVP2
5655 const uint8_t *start = avctx->extradata;
5656 uint8_t *end = avctx->extradata + avctx->extradata_size;
5657 const uint8_t *next;
5658 int size, buf2_size;
5659 uint8_t *buf2 = NULL;
5660 int seq_initialized = 0, ep_initialized = 0;
5662 if (avctx->extradata_size < 16) {
5663 av_log(avctx, AV_LOG_ERROR, "Extradata size too small: %i\n", avctx->extradata_size);
5667 buf2 = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
5668 start = find_next_marker(start, end); // in WVC1 extradata first byte is its size, but can be 0 in mkv
5670 for (; next < end; start = next) {
5671 next = find_next_marker(start + 4, end);
5672 size = next - start - 4;
5675 buf2_size = vc1_unescape_buffer(start + 4, size, buf2);
5676 init_get_bits(&gb, buf2, buf2_size * 8);
5677 switch (AV_RB32(start)) {
5678 case VC1_CODE_SEQHDR:
5679 if ((ret = ff_vc1_decode_sequence_header(avctx, v, &gb)) < 0) {
5683 seq_initialized = 1;
5685 case VC1_CODE_ENTRYPOINT:
5686 if ((ret = ff_vc1_decode_entry_point(avctx, v, &gb)) < 0) {
5695 if (!seq_initialized || !ep_initialized) {
5696 av_log(avctx, AV_LOG_ERROR, "Incomplete extradata\n");
5699 v->res_sprite = (avctx->codec_id == AV_CODEC_ID_VC1IMAGE);
5702 v->sprite_output_frame = av_frame_alloc();
5703 if (!v->sprite_output_frame)
5704 return AVERROR(ENOMEM);
5706 avctx->profile = v->profile;
5707 if (v->profile == PROFILE_ADVANCED)
5708 avctx->level = v->level;
5710 avctx->has_b_frames = !!avctx->max_b_frames;
5712 s->mb_width = (avctx->coded_width + 15) >> 4;
5713 s->mb_height = (avctx->coded_height + 15) >> 4;
5715 if (v->profile == PROFILE_ADVANCED || v->res_fasttx) {
5716 ff_vc1_init_transposed_scantables(v);
5718 memcpy(v->zz_8x8, ff_wmv1_scantable, 4*64);
5723 if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5724 v->sprite_width = avctx->coded_width;
5725 v->sprite_height = avctx->coded_height;
5727 avctx->coded_width = avctx->width = v->output_width;
5728 avctx->coded_height = avctx->height = v->output_height;
5730 // prevent 16.16 overflows
5731 if (v->sprite_width > 1 << 14 ||
5732 v->sprite_height > 1 << 14 ||
5733 v->output_width > 1 << 14 ||
5734 v->output_height > 1 << 14) return -1;
5736 if ((v->sprite_width&1) || (v->sprite_height&1)) {
5737 avpriv_request_sample(avctx, "odd sprites support");
5738 return AVERROR_PATCHWELCOME;
5744 /** Close a VC1/WMV3 decoder
5745 * @warning Initial try at using MpegEncContext stuff
5747 av_cold int ff_vc1_decode_end(AVCodecContext *avctx)
5749 VC1Context *v = avctx->priv_data;
5752 av_frame_free(&v->sprite_output_frame);
5754 for (i = 0; i < 4; i++)
5755 av_freep(&v->sr_rows[i >> 1][i & 1]);
5756 av_freep(&v->hrd_rate);
5757 av_freep(&v->hrd_buffer);
5758 ff_MPV_common_end(&v->s);
5759 av_freep(&v->mv_type_mb_plane);
5760 av_freep(&v->direct_mb_plane);
5761 av_freep(&v->forward_mb_plane);
5762 av_freep(&v->fieldtx_plane);
5763 av_freep(&v->acpred_plane);
5764 av_freep(&v->over_flags_plane);
5765 av_freep(&v->mb_type_base);
5766 av_freep(&v->blk_mv_type_base);
5767 av_freep(&v->mv_f_base);
5768 av_freep(&v->mv_f_next_base);
5769 av_freep(&v->block);
5770 av_freep(&v->cbp_base);
5771 av_freep(&v->ttblk_base);
5772 av_freep(&v->is_intra_base); // FIXME use v->mb_type[]
5773 av_freep(&v->luma_mv_base);
5774 ff_intrax8_common_end(&v->x8);
5779 /** Decode a VC1/WMV3 frame
5780 * @todo TODO: Handle VC-1 IDUs (Transport level?)
5782 static int vc1_decode_frame(AVCodecContext *avctx, void *data,
5783 int *got_frame, AVPacket *avpkt)
5785 const uint8_t *buf = avpkt->data;
5786 int buf_size = avpkt->size, n_slices = 0, i, ret;
5787 VC1Context *v = avctx->priv_data;
5788 MpegEncContext *s = &v->s;
5789 AVFrame *pict = data;
5790 uint8_t *buf2 = NULL;
5791 const uint8_t *buf_start = buf, *buf_start_second_field = NULL;
5792 int mb_height, n_slices1=-1;
5797 } *slices = NULL, *tmp;
5799 v->second_field = 0;
5801 if(s->flags & CODEC_FLAG_LOW_DELAY)
5804 /* no supplementary picture */
5805 if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == VC1_CODE_ENDOFSEQ)) {
5806 /* special case for last picture */
5807 if (s->low_delay == 0 && s->next_picture_ptr) {
5808 if ((ret = av_frame_ref(pict, &s->next_picture_ptr->f)) < 0)
5810 s->next_picture_ptr = NULL;
5818 if (s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) {
5819 if (v->profile < PROFILE_ADVANCED)
5820 avctx->pix_fmt = AV_PIX_FMT_VDPAU_WMV3;
5822 avctx->pix_fmt = AV_PIX_FMT_VDPAU_VC1;
5825 //for advanced profile we may need to parse and unescape data
5826 if (avctx->codec_id == AV_CODEC_ID_VC1 || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5828 buf2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5830 return AVERROR(ENOMEM);
5832 if (IS_MARKER(AV_RB32(buf))) { /* frame starts with marker and needs to be parsed */
5833 const uint8_t *start, *end, *next;
5837 for (start = buf, end = buf + buf_size; next < end; start = next) {
5838 next = find_next_marker(start + 4, end);
5839 size = next - start - 4;
5840 if (size <= 0) continue;
5841 switch (AV_RB32(start)) {
5842 case VC1_CODE_FRAME:
5843 if (avctx->hwaccel ||
5844 s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
5846 buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
5848 case VC1_CODE_FIELD: {
5850 if (avctx->hwaccel ||
5851 s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
5852 buf_start_second_field = start;
5853 tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5857 slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5858 if (!slices[n_slices].buf)
5860 buf_size3 = vc1_unescape_buffer(start + 4, size,
5861 slices[n_slices].buf);
5862 init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5864 /* assuming that the field marker is at the exact middle,
5865 hope it's correct */
5866 slices[n_slices].mby_start = s->mb_height >> 1;
5867 n_slices1 = n_slices - 1; // index of the last slice of the first field
5871 case VC1_CODE_ENTRYPOINT: /* it should be before frame data */
5872 buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
5873 init_get_bits(&s->gb, buf2, buf_size2 * 8);
5874 ff_vc1_decode_entry_point(avctx, v, &s->gb);
5876 case VC1_CODE_SLICE: {
5878 tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5882 slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5883 if (!slices[n_slices].buf)
5885 buf_size3 = vc1_unescape_buffer(start + 4, size,
5886 slices[n_slices].buf);
5887 init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5889 slices[n_slices].mby_start = get_bits(&slices[n_slices].gb, 9);
5895 } else if (v->interlace && ((buf[0] & 0xC0) == 0xC0)) { /* WVC1 interlaced stores both fields divided by marker */
5896 const uint8_t *divider;
5899 divider = find_next_marker(buf, buf + buf_size);
5900 if ((divider == (buf + buf_size)) || AV_RB32(divider) != VC1_CODE_FIELD) {
5901 av_log(avctx, AV_LOG_ERROR, "Error in WVC1 interlaced frame\n");
5903 } else { // found field marker, unescape second field
5904 if (avctx->hwaccel ||
5905 s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
5906 buf_start_second_field = divider;
5907 tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5911 slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5912 if (!slices[n_slices].buf)
5914 buf_size3 = vc1_unescape_buffer(divider + 4, buf + buf_size - divider - 4, slices[n_slices].buf);
5915 init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5917 slices[n_slices].mby_start = s->mb_height >> 1;
5918 n_slices1 = n_slices - 1;
5921 buf_size2 = vc1_unescape_buffer(buf, divider - buf, buf2);
5923 buf_size2 = vc1_unescape_buffer(buf, buf_size, buf2);
5925 init_get_bits(&s->gb, buf2, buf_size2*8);
5927 init_get_bits(&s->gb, buf, buf_size*8);
5929 if (v->res_sprite) {
5930 v->new_sprite = !get_bits1(&s->gb);
5931 v->two_sprites = get_bits1(&s->gb);
5932 /* res_sprite means a Windows Media Image stream, AV_CODEC_ID_*IMAGE means
5933 we're using the sprite compositor. These are intentionally kept separate
5934 so you can get the raw sprites by using the wmv3 decoder for WMVP or
5935 the vc1 one for WVP2 */
5936 if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5937 if (v->new_sprite) {
5938 // switch AVCodecContext parameters to those of the sprites
5939 avctx->width = avctx->coded_width = v->sprite_width;
5940 avctx->height = avctx->coded_height = v->sprite_height;
5947 if (s->context_initialized &&
5948 (s->width != avctx->coded_width ||
5949 s->height != avctx->coded_height)) {
5950 ff_vc1_decode_end(avctx);
5953 if (!s->context_initialized) {
5954 if (ff_msmpeg4_decode_init(avctx) < 0)
5956 if (ff_vc1_decode_init_alloc_tables(v) < 0) {
5957 ff_MPV_common_end(s);
5961 s->low_delay = !avctx->has_b_frames || v->res_sprite;
5963 if (v->profile == PROFILE_ADVANCED) {
5964 if(avctx->coded_width<=1 || avctx->coded_height<=1)
5966 s->h_edge_pos = avctx->coded_width;
5967 s->v_edge_pos = avctx->coded_height;
5971 // do parse frame header
5972 v->pic_header_flag = 0;
5973 v->first_pic_header_flag = 1;
5974 if (v->profile < PROFILE_ADVANCED) {
5975 if (ff_vc1_parse_frame_header(v, &s->gb) < 0) {
5979 if (ff_vc1_parse_frame_header_adv(v, &s->gb) < 0) {
5983 v->first_pic_header_flag = 0;
5985 if (avctx->debug & FF_DEBUG_PICT_INFO)
5986 av_log(v->s.avctx, AV_LOG_DEBUG, "pict_type: %c\n", av_get_picture_type_char(s->pict_type));
5988 if ((avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE)
5989 && s->pict_type != AV_PICTURE_TYPE_I) {
5990 av_log(v->s.avctx, AV_LOG_ERROR, "Sprite decoder: expected I-frame\n");
5994 if ((s->mb_height >> v->field_mode) == 0) {
5995 av_log(v->s.avctx, AV_LOG_ERROR, "image too short\n");
5999 // for skipping the frame
6000 s->current_picture.f.pict_type = s->pict_type;
6001 s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
6003 /* skip B-frames if we don't have reference frames */
6004 if (s->last_picture_ptr == NULL && (s->pict_type == AV_PICTURE_TYPE_B || s->droppable)) {
6007 if ((avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B) ||
6008 (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I) ||
6009 avctx->skip_frame >= AVDISCARD_ALL) {
6013 if (s->next_p_frame_damaged) {
6014 if (s->pict_type == AV_PICTURE_TYPE_B)
6017 s->next_p_frame_damaged = 0;
6020 if (ff_MPV_frame_start(s, avctx) < 0) {
6024 v->s.current_picture_ptr->f.interlaced_frame = (v->fcm != PROGRESSIVE);
6025 v->s.current_picture_ptr->f.top_field_first = v->tff;
6027 // process pulldown flags
6028 s->current_picture_ptr->f.repeat_pict = 0;
6029 // Pulldown flags are only valid when 'broadcast' has been set.
6030 // So ticks_per_frame will be 2
6033 s->current_picture_ptr->f.repeat_pict = 1;
6034 } else if (v->rptfrm) {
6036 s->current_picture_ptr->f.repeat_pict = v->rptfrm * 2;
6039 s->me.qpel_put = s->dsp.put_qpel_pixels_tab;
6040 s->me.qpel_avg = s->dsp.avg_qpel_pixels_tab;
6042 if ((CONFIG_VC1_VDPAU_DECODER)
6043 &&s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) {
6044 if (v->field_mode && buf_start_second_field) {
6045 ff_vdpau_vc1_decode_picture(s, buf_start, buf_start_second_field - buf_start);
6046 ff_vdpau_vc1_decode_picture(s, buf_start_second_field, (buf + buf_size) - buf_start_second_field);
6048 ff_vdpau_vc1_decode_picture(s, buf_start, (buf + buf_size) - buf_start);
6050 } else if (avctx->hwaccel) {
6051 if (v->field_mode && buf_start_second_field) {
6052 // decode first field
6053 s->picture_structure = PICT_BOTTOM_FIELD - v->tff;
6054 if (avctx->hwaccel->start_frame(avctx, buf_start, buf_start_second_field - buf_start) < 0)
6056 if (avctx->hwaccel->decode_slice(avctx, buf_start, buf_start_second_field - buf_start) < 0)
6058 if (avctx->hwaccel->end_frame(avctx) < 0)
6061 // decode second field
6062 s->gb = slices[n_slices1 + 1].gb;
6063 s->picture_structure = PICT_TOP_FIELD + v->tff;
6064 v->second_field = 1;
6065 v->pic_header_flag = 0;
6066 if (ff_vc1_parse_frame_header_adv(v, &s->gb) < 0) {
6067 av_log(avctx, AV_LOG_ERROR, "parsing header for second field failed");
6070 v->s.current_picture_ptr->f.pict_type = v->s.pict_type;
6072 if (avctx->hwaccel->start_frame(avctx, buf_start_second_field, (buf + buf_size) - buf_start_second_field) < 0)
6074 if (avctx->hwaccel->decode_slice(avctx, buf_start_second_field, (buf + buf_size) - buf_start_second_field) < 0)
6076 if (avctx->hwaccel->end_frame(avctx) < 0)
6079 s->picture_structure = PICT_FRAME;
6080 if (avctx->hwaccel->start_frame(avctx, buf_start, (buf + buf_size) - buf_start) < 0)
6082 if (avctx->hwaccel->decode_slice(avctx, buf_start, (buf + buf_size) - buf_start) < 0)
6084 if (avctx->hwaccel->end_frame(avctx) < 0)
6090 ff_mpeg_er_frame_start(s);
6092 v->bits = buf_size * 8;
6093 v->end_mb_x = s->mb_width;
6094 if (v->field_mode) {
6095 s->current_picture.f.linesize[0] <<= 1;
6096 s->current_picture.f.linesize[1] <<= 1;
6097 s->current_picture.f.linesize[2] <<= 1;
6099 s->uvlinesize <<= 1;
6101 mb_height = s->mb_height >> v->field_mode;
6103 av_assert0 (mb_height > 0);
6105 for (i = 0; i <= n_slices; i++) {
6106 if (i > 0 && slices[i - 1].mby_start >= mb_height) {
6107 if (v->field_mode <= 0) {
6108 av_log(v->s.avctx, AV_LOG_ERROR, "Slice %d starts beyond "
6109 "picture boundary (%d >= %d)\n", i,
6110 slices[i - 1].mby_start, mb_height);
6113 v->second_field = 1;
6114 v->blocks_off = s->b8_stride * (s->mb_height&~1);
6115 v->mb_off = s->mb_stride * s->mb_height >> 1;
6117 v->second_field = 0;
6122 v->pic_header_flag = 0;
6123 if (v->field_mode && i == n_slices1 + 2) {
6124 if ((header_ret = ff_vc1_parse_frame_header_adv(v, &s->gb)) < 0) {
6125 av_log(v->s.avctx, AV_LOG_ERROR, "Field header damaged\n");
6126 if (avctx->err_recognition & AV_EF_EXPLODE)
6130 } else if (get_bits1(&s->gb)) {
6131 v->pic_header_flag = 1;
6132 if ((header_ret = ff_vc1_parse_frame_header_adv(v, &s->gb)) < 0) {
6133 av_log(v->s.avctx, AV_LOG_ERROR, "Slice header damaged\n");
6134 if (avctx->err_recognition & AV_EF_EXPLODE)
6142 s->start_mb_y = (i == 0) ? 0 : FFMAX(0, slices[i-1].mby_start % mb_height);
6143 if (!v->field_mode || v->second_field)
6144 s->end_mb_y = (i == n_slices ) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
6146 if (i >= n_slices) {
6147 av_log(v->s.avctx, AV_LOG_ERROR, "first field slice count too large\n");
6150 s->end_mb_y = (i <= n_slices1 + 1) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
6152 if (s->end_mb_y <= s->start_mb_y) {
6153 av_log(v->s.avctx, AV_LOG_ERROR, "end mb y %d %d invalid\n", s->end_mb_y, s->start_mb_y);
6156 if (!v->p_frame_skipped && s->pict_type != AV_PICTURE_TYPE_I && !v->cbpcy_vlc) {
6157 av_log(v->s.avctx, AV_LOG_ERROR, "missing cbpcy_vlc\n");
6160 ff_vc1_decode_blocks(v);
6162 s->gb = slices[i].gb;
6164 if (v->field_mode) {
6165 v->second_field = 0;
6166 s->current_picture.f.linesize[0] >>= 1;
6167 s->current_picture.f.linesize[1] >>= 1;
6168 s->current_picture.f.linesize[2] >>= 1;
6170 s->uvlinesize >>= 1;
6171 if (v->s.pict_type != AV_PICTURE_TYPE_BI && v->s.pict_type != AV_PICTURE_TYPE_B) {
6172 FFSWAP(uint8_t *, v->mv_f_next[0], v->mv_f[0]);
6173 FFSWAP(uint8_t *, v->mv_f_next[1], v->mv_f[1]);
6176 av_dlog(s->avctx, "Consumed %i/%i bits\n",
6177 get_bits_count(&s->gb), s->gb.size_in_bits);
6178 // if (get_bits_count(&s->gb) > buf_size * 8)
6180 if(s->er.error_occurred && s->pict_type == AV_PICTURE_TYPE_B)
6183 ff_er_frame_end(&s->er);
6186 ff_MPV_frame_end(s);
6188 if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
6190 avctx->width = avctx->coded_width = v->output_width;
6191 avctx->height = avctx->coded_height = v->output_height;
6192 if (avctx->skip_frame >= AVDISCARD_NONREF)
6194 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
6195 if (vc1_decode_sprites(v, &s->gb))
6198 if ((ret = av_frame_ref(pict, v->sprite_output_frame)) < 0)
6202 if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
6203 if ((ret = av_frame_ref(pict, &s->current_picture_ptr->f)) < 0)
6205 ff_print_debug_info(s, s->current_picture_ptr, pict);
6207 } else if (s->last_picture_ptr != NULL) {
6208 if ((ret = av_frame_ref(pict, &s->last_picture_ptr->f)) < 0)
6210 ff_print_debug_info(s, s->last_picture_ptr, pict);
6217 for (i = 0; i < n_slices; i++)
6218 av_free(slices[i].buf);
6224 for (i = 0; i < n_slices; i++)
6225 av_free(slices[i].buf);
6231 static const AVProfile profiles[] = {
6232 { FF_PROFILE_VC1_SIMPLE, "Simple" },
6233 { FF_PROFILE_VC1_MAIN, "Main" },
6234 { FF_PROFILE_VC1_COMPLEX, "Complex" },
6235 { FF_PROFILE_VC1_ADVANCED, "Advanced" },
6236 { FF_PROFILE_UNKNOWN },
6239 static const enum AVPixelFormat vc1_hwaccel_pixfmt_list_420[] = {
6241 AV_PIX_FMT_DXVA2_VLD,
6244 AV_PIX_FMT_VAAPI_VLD,
6253 AVCodec ff_vc1_decoder = {
6255 .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1"),
6256 .type = AVMEDIA_TYPE_VIDEO,
6257 .id = AV_CODEC_ID_VC1,
6258 .priv_data_size = sizeof(VC1Context),
6259 .init = vc1_decode_init,
6260 .close = ff_vc1_decode_end,
6261 .decode = vc1_decode_frame,
6262 .flush = ff_mpeg_flush,
6263 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
6264 .pix_fmts = vc1_hwaccel_pixfmt_list_420,
6265 .profiles = NULL_IF_CONFIG_SMALL(profiles)
6268 #if CONFIG_WMV3_DECODER
6269 AVCodec ff_wmv3_decoder = {
6271 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9"),
6272 .type = AVMEDIA_TYPE_VIDEO,
6273 .id = AV_CODEC_ID_WMV3,
6274 .priv_data_size = sizeof(VC1Context),
6275 .init = vc1_decode_init,
6276 .close = ff_vc1_decode_end,
6277 .decode = vc1_decode_frame,
6278 .flush = ff_mpeg_flush,
6279 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
6280 .pix_fmts = vc1_hwaccel_pixfmt_list_420,
6281 .profiles = NULL_IF_CONFIG_SMALL(profiles)
6285 #if CONFIG_WMV3_VDPAU_DECODER
6286 AVCodec ff_wmv3_vdpau_decoder = {
6287 .name = "wmv3_vdpau",
6288 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 VDPAU"),
6289 .type = AVMEDIA_TYPE_VIDEO,
6290 .id = AV_CODEC_ID_WMV3,
6291 .priv_data_size = sizeof(VC1Context),
6292 .init = vc1_decode_init,
6293 .close = ff_vc1_decode_end,
6294 .decode = vc1_decode_frame,
6295 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU,
6296 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_VDPAU_WMV3, AV_PIX_FMT_NONE },
6297 .profiles = NULL_IF_CONFIG_SMALL(profiles)
6301 #if CONFIG_VC1_VDPAU_DECODER
6302 AVCodec ff_vc1_vdpau_decoder = {
6303 .name = "vc1_vdpau",
6304 .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1 VDPAU"),
6305 .type = AVMEDIA_TYPE_VIDEO,
6306 .id = AV_CODEC_ID_VC1,
6307 .priv_data_size = sizeof(VC1Context),
6308 .init = vc1_decode_init,
6309 .close = ff_vc1_decode_end,
6310 .decode = vc1_decode_frame,
6311 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU,
6312 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_VDPAU_VC1, AV_PIX_FMT_NONE },
6313 .profiles = NULL_IF_CONFIG_SMALL(profiles)
6317 #if CONFIG_WMV3IMAGE_DECODER
6318 AVCodec ff_wmv3image_decoder = {
6319 .name = "wmv3image",
6320 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image"),
6321 .type = AVMEDIA_TYPE_VIDEO,
6322 .id = AV_CODEC_ID_WMV3IMAGE,
6323 .priv_data_size = sizeof(VC1Context),
6324 .init = vc1_decode_init,
6325 .close = ff_vc1_decode_end,
6326 .decode = vc1_decode_frame,
6327 .capabilities = CODEC_CAP_DR1,
6328 .flush = vc1_sprite_flush,
6329 .pix_fmts = ff_pixfmt_list_420
6333 #if CONFIG_VC1IMAGE_DECODER
6334 AVCodec ff_vc1image_decoder = {
6336 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image v2"),
6337 .type = AVMEDIA_TYPE_VIDEO,
6338 .id = AV_CODEC_ID_VC1IMAGE,
6339 .priv_data_size = sizeof(VC1Context),
6340 .init = vc1_decode_init,
6341 .close = ff_vc1_decode_end,
6342 .decode = vc1_decode_frame,
6343 .capabilities = CODEC_CAP_DR1,
6344 .flush = vc1_sprite_flush,
6345 .pix_fmts = ff_pixfmt_list_420