2 * VC-1 and WMV3 decoder
3 * Copyright (c) 2011 Mashiat Sarker Shakkhar
4 * Copyright (c) 2006-2007 Konstantin Shishkov
5 * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
7 * This file is part of Libav.
9 * Libav is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * Libav is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with Libav; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * VC-1 and WMV3 decoder
31 #include "error_resilience.h"
32 #include "mpegvideo.h"
34 #include "h264chroma.h"
37 #include "vc1acdata.h"
38 #include "msmpeg4data.h"
45 #define MB_INTRA_VLC_BITS 9
49 // offset tables for interlaced picture MVDATA decoding
50 static const int offset_table1[9] = { 0, 1, 2, 4, 8, 16, 32, 64, 128 };
51 static const int offset_table2[9] = { 0, 1, 3, 7, 15, 31, 63, 127, 255 };
53 /***********************************************************************/
55 * @name VC-1 Bitplane decoding
73 /** @} */ //imode defines
75 static void init_block_index(VC1Context *v)
77 MpegEncContext *s = &v->s;
78 ff_init_block_index(s);
79 if (v->field_mode && !(v->second_field ^ v->tff)) {
80 s->dest[0] += s->current_picture_ptr->f.linesize[0];
81 s->dest[1] += s->current_picture_ptr->f.linesize[1];
82 s->dest[2] += s->current_picture_ptr->f.linesize[2];
86 /** @} */ //Bitplane group
88 static void vc1_put_signed_blocks_clamped(VC1Context *v)
90 MpegEncContext *s = &v->s;
91 int topleft_mb_pos, top_mb_pos;
92 int stride_y, fieldtx = 0;
95 /* The put pixels loop is always one MB row behind the decoding loop,
96 * because we can only put pixels when overlap filtering is done, and
97 * for filtering of the bottom edge of a MB, we need the next MB row
99 * Within the row, the put pixels loop is also one MB col behind the
100 * decoding loop. The reason for this is again, because for filtering
101 * of the right MB edge, we need the next MB present. */
102 if (!s->first_slice_line) {
104 topleft_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x - 1;
105 if (v->fcm == ILACE_FRAME)
106 fieldtx = v->fieldtx_plane[topleft_mb_pos];
107 stride_y = s->linesize << fieldtx;
108 v_dist = (16 - fieldtx) >> (fieldtx == 0);
109 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][0],
110 s->dest[0] - 16 * s->linesize - 16,
112 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][1],
113 s->dest[0] - 16 * s->linesize - 8,
115 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][2],
116 s->dest[0] - v_dist * s->linesize - 16,
118 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][3],
119 s->dest[0] - v_dist * s->linesize - 8,
121 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][4],
122 s->dest[1] - 8 * s->uvlinesize - 8,
124 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][5],
125 s->dest[2] - 8 * s->uvlinesize - 8,
128 if (s->mb_x == s->mb_width - 1) {
129 top_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x;
130 if (v->fcm == ILACE_FRAME)
131 fieldtx = v->fieldtx_plane[top_mb_pos];
132 stride_y = s->linesize << fieldtx;
133 v_dist = fieldtx ? 15 : 8;
134 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][0],
135 s->dest[0] - 16 * s->linesize,
137 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][1],
138 s->dest[0] - 16 * s->linesize + 8,
140 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][2],
141 s->dest[0] - v_dist * s->linesize,
143 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][3],
144 s->dest[0] - v_dist * s->linesize + 8,
146 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][4],
147 s->dest[1] - 8 * s->uvlinesize,
149 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][5],
150 s->dest[2] - 8 * s->uvlinesize,
155 #define inc_blk_idx(idx) do { \
157 if (idx >= v->n_allocated_blks) \
161 inc_blk_idx(v->topleft_blk_idx);
162 inc_blk_idx(v->top_blk_idx);
163 inc_blk_idx(v->left_blk_idx);
164 inc_blk_idx(v->cur_blk_idx);
167 static void vc1_loop_filter_iblk(VC1Context *v, int pq)
169 MpegEncContext *s = &v->s;
171 if (!s->first_slice_line) {
172 v->vc1dsp.vc1_v_loop_filter16(s->dest[0], s->linesize, pq);
174 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
175 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
176 for (j = 0; j < 2; j++) {
177 v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1], s->uvlinesize, pq);
179 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
182 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] + 8 * s->linesize, s->linesize, pq);
184 if (s->mb_y == s->end_mb_y - 1) {
186 v->vc1dsp.vc1_h_loop_filter16(s->dest[0], s->linesize, pq);
187 v->vc1dsp.vc1_h_loop_filter8(s->dest[1], s->uvlinesize, pq);
188 v->vc1dsp.vc1_h_loop_filter8(s->dest[2], s->uvlinesize, pq);
190 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] + 8, s->linesize, pq);
194 static void vc1_loop_filter_iblk_delayed(VC1Context *v, int pq)
196 MpegEncContext *s = &v->s;
199 /* The loopfilter runs 1 row and 1 column behind the overlap filter, which
200 * means it runs two rows/cols behind the decoding loop. */
201 if (!s->first_slice_line) {
203 if (s->mb_y >= s->start_mb_y + 2) {
204 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
207 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 16, s->linesize, pq);
208 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 8, s->linesize, pq);
209 for (j = 0; j < 2; j++) {
210 v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
212 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize - 8, s->uvlinesize, pq);
216 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize - 16, s->linesize, pq);
219 if (s->mb_x == s->mb_width - 1) {
220 if (s->mb_y >= s->start_mb_y + 2) {
221 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
224 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize, s->linesize, pq);
225 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize + 8, s->linesize, pq);
226 for (j = 0; j < 2; j++) {
227 v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
229 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize, s->uvlinesize, pq);
233 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize, s->linesize, pq);
236 if (s->mb_y == s->end_mb_y) {
239 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
240 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 8, s->linesize, pq);
242 for (j = 0; j < 2; j++) {
243 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
248 if (s->mb_x == s->mb_width - 1) {
250 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
251 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
253 for (j = 0; j < 2; j++) {
254 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
262 static void vc1_smooth_overlap_filter_iblk(VC1Context *v)
264 MpegEncContext *s = &v->s;
267 if (v->condover == CONDOVER_NONE)
270 mb_pos = s->mb_x + s->mb_y * s->mb_stride;
272 /* Within a MB, the horizontal overlap always runs before the vertical.
273 * To accomplish that, we run the H on left and internal borders of the
274 * currently decoded MB. Then, we wait for the next overlap iteration
275 * to do H overlap on the right edge of this MB, before moving over and
276 * running the V overlap. Therefore, the V overlap makes us trail by one
277 * MB col and the H overlap filter makes us trail by one MB row. This
278 * is reflected in the time at which we run the put_pixels loop. */
279 if (v->condover == CONDOVER_ALL || v->pq >= 9 || v->over_flags_plane[mb_pos]) {
280 if (s->mb_x && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
281 v->over_flags_plane[mb_pos - 1])) {
282 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][1],
283 v->block[v->cur_blk_idx][0]);
284 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][3],
285 v->block[v->cur_blk_idx][2]);
286 if (!(s->flags & CODEC_FLAG_GRAY)) {
287 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][4],
288 v->block[v->cur_blk_idx][4]);
289 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][5],
290 v->block[v->cur_blk_idx][5]);
293 v->vc1dsp.vc1_h_s_overlap(v->block[v->cur_blk_idx][0],
294 v->block[v->cur_blk_idx][1]);
295 v->vc1dsp.vc1_h_s_overlap(v->block[v->cur_blk_idx][2],
296 v->block[v->cur_blk_idx][3]);
298 if (s->mb_x == s->mb_width - 1) {
299 if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
300 v->over_flags_plane[mb_pos - s->mb_stride])) {
301 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][2],
302 v->block[v->cur_blk_idx][0]);
303 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][3],
304 v->block[v->cur_blk_idx][1]);
305 if (!(s->flags & CODEC_FLAG_GRAY)) {
306 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][4],
307 v->block[v->cur_blk_idx][4]);
308 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][5],
309 v->block[v->cur_blk_idx][5]);
312 v->vc1dsp.vc1_v_s_overlap(v->block[v->cur_blk_idx][0],
313 v->block[v->cur_blk_idx][2]);
314 v->vc1dsp.vc1_v_s_overlap(v->block[v->cur_blk_idx][1],
315 v->block[v->cur_blk_idx][3]);
318 if (s->mb_x && (v->condover == CONDOVER_ALL || v->over_flags_plane[mb_pos - 1])) {
319 if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
320 v->over_flags_plane[mb_pos - s->mb_stride - 1])) {
321 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][2],
322 v->block[v->left_blk_idx][0]);
323 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][3],
324 v->block[v->left_blk_idx][1]);
325 if (!(s->flags & CODEC_FLAG_GRAY)) {
326 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][4],
327 v->block[v->left_blk_idx][4]);
328 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][5],
329 v->block[v->left_blk_idx][5]);
332 v->vc1dsp.vc1_v_s_overlap(v->block[v->left_blk_idx][0],
333 v->block[v->left_blk_idx][2]);
334 v->vc1dsp.vc1_v_s_overlap(v->block[v->left_blk_idx][1],
335 v->block[v->left_blk_idx][3]);
339 /** Do motion compensation over 1 macroblock
340 * Mostly adapted hpel_motion and qpel_motion from mpegvideo.c
342 static void vc1_mc_1mv(VC1Context *v, int dir)
344 MpegEncContext *s = &v->s;
345 H264ChromaContext *h264chroma = &v->h264chroma;
346 uint8_t *srcY, *srcU, *srcV;
347 int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
348 int v_edge_pos = s->v_edge_pos >> v->field_mode;
350 uint8_t (*luty)[256], (*lutuv)[256];
353 if ((!v->field_mode ||
354 (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
355 !v->s.last_picture.f.data[0])
358 mx = s->mv[dir][0][0];
359 my = s->mv[dir][0][1];
361 // store motion vectors for further use in B frames
362 if (s->pict_type == AV_PICTURE_TYPE_P) {
363 for (i = 0; i < 4; i++) {
364 s->current_picture.motion_val[1][s->block_index[i] + v->blocks_off][0] = mx;
365 s->current_picture.motion_val[1][s->block_index[i] + v->blocks_off][1] = my;
369 uvmx = (mx + ((mx & 3) == 3)) >> 1;
370 uvmy = (my + ((my & 3) == 3)) >> 1;
371 v->luma_mv[s->mb_x][0] = uvmx;
372 v->luma_mv[s->mb_x][1] = uvmy;
375 v->cur_field_type != v->ref_field_type[dir]) {
376 my = my - 2 + 4 * v->cur_field_type;
377 uvmy = uvmy - 2 + 4 * v->cur_field_type;
380 // fastuvmc shall be ignored for interlaced frame picture
381 if (v->fastuvmc && (v->fcm != ILACE_FRAME)) {
382 uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
383 uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
386 if (v->field_mode && (v->cur_field_type != v->ref_field_type[dir]) && v->second_field) {
387 srcY = s->current_picture.f.data[0];
388 srcU = s->current_picture.f.data[1];
389 srcV = s->current_picture.f.data[2];
391 lutuv = v->curr_lutuv;
392 use_ic = v->curr_use_ic;
394 srcY = s->last_picture.f.data[0];
395 srcU = s->last_picture.f.data[1];
396 srcV = s->last_picture.f.data[2];
398 lutuv = v->last_lutuv;
399 use_ic = v->last_use_ic;
402 srcY = s->next_picture.f.data[0];
403 srcU = s->next_picture.f.data[1];
404 srcV = s->next_picture.f.data[2];
406 lutuv = v->next_lutuv;
407 use_ic = v->next_use_ic;
410 if (!srcY || !srcU) {
411 av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
415 src_x = s->mb_x * 16 + (mx >> 2);
416 src_y = s->mb_y * 16 + (my >> 2);
417 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
418 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
420 if (v->profile != PROFILE_ADVANCED) {
421 src_x = av_clip( src_x, -16, s->mb_width * 16);
422 src_y = av_clip( src_y, -16, s->mb_height * 16);
423 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
424 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
426 src_x = av_clip( src_x, -17, s->avctx->coded_width);
427 src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
428 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
429 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
432 srcY += src_y * s->linesize + src_x;
433 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
434 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
436 if (v->field_mode && v->ref_field_type[dir]) {
437 srcY += s->current_picture_ptr->f.linesize[0];
438 srcU += s->current_picture_ptr->f.linesize[1];
439 srcV += s->current_picture_ptr->f.linesize[2];
442 /* for grayscale we should not try to read from unknown area */
443 if (s->flags & CODEC_FLAG_GRAY) {
444 srcU = s->edge_emu_buffer + 18 * s->linesize;
445 srcV = s->edge_emu_buffer + 18 * s->linesize;
448 if (v->rangeredfrm || use_ic
449 || s->h_edge_pos < 22 || v_edge_pos < 22
450 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel * 3
451 || (unsigned)(src_y - 1) > v_edge_pos - (my&3) - 16 - 3) {
452 uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
454 srcY -= s->mspel * (1 + s->linesize);
455 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcY,
456 s->linesize, s->linesize,
457 17 + s->mspel * 2, 17 + s->mspel * 2,
458 src_x - s->mspel, src_y - s->mspel,
459 s->h_edge_pos, v_edge_pos);
460 srcY = s->edge_emu_buffer;
461 s->vdsp.emulated_edge_mc(uvbuf, srcU,
462 s->uvlinesize, s->uvlinesize,
464 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
465 s->vdsp.emulated_edge_mc(uvbuf + 16, srcV,
466 s->uvlinesize, s->uvlinesize,
468 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
471 /* if we deal with range reduction we need to scale source blocks */
472 if (v->rangeredfrm) {
477 for (j = 0; j < 17 + s->mspel * 2; j++) {
478 for (i = 0; i < 17 + s->mspel * 2; i++)
479 src[i] = ((src[i] - 128) >> 1) + 128;
484 for (j = 0; j < 9; j++) {
485 for (i = 0; i < 9; i++) {
486 src[i] = ((src[i] - 128) >> 1) + 128;
487 src2[i] = ((src2[i] - 128) >> 1) + 128;
489 src += s->uvlinesize;
490 src2 += s->uvlinesize;
493 /* if we deal with intensity compensation we need to scale source blocks */
499 for (j = 0; j < 17 + s->mspel * 2; j++) {
500 int f = v->field_mode ? v->ref_field_type[dir] : ((j + src_y - s->mspel) & 1) ;
501 for (i = 0; i < 17 + s->mspel * 2; i++)
502 src[i] = luty[f][src[i]];
507 for (j = 0; j < 9; j++) {
508 int f = v->field_mode ? v->ref_field_type[dir] : ((j + uvsrc_y) & 1);
509 for (i = 0; i < 9; i++) {
510 src[i] = lutuv[f][src[i]];
511 src2[i] = lutuv[f][src2[i]];
513 src += s->uvlinesize;
514 src2 += s->uvlinesize;
517 srcY += s->mspel * (1 + s->linesize);
521 dxy = ((my & 3) << 2) | (mx & 3);
522 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] , srcY , s->linesize, v->rnd);
523 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8, srcY + 8, s->linesize, v->rnd);
524 srcY += s->linesize * 8;
525 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize , srcY , s->linesize, v->rnd);
526 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
527 } else { // hpel mc - always used for luma
528 dxy = (my & 2) | ((mx & 2) >> 1);
530 s->hdsp.put_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
532 s->hdsp.put_no_rnd_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
535 if (s->flags & CODEC_FLAG_GRAY) return;
536 /* Chroma MC always uses qpel bilinear */
537 uvmx = (uvmx & 3) << 1;
538 uvmy = (uvmy & 3) << 1;
540 h264chroma->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
541 h264chroma->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
543 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
544 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
548 static inline int median4(int a, int b, int c, int d)
551 if (c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2;
552 else return (FFMIN(b, c) + FFMAX(a, d)) / 2;
554 if (c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2;
555 else return (FFMIN(a, c) + FFMAX(b, d)) / 2;
559 /** Do motion compensation for 4-MV macroblock - luminance block
561 static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir, int avg)
563 MpegEncContext *s = &v->s;
565 int dxy, mx, my, src_x, src_y;
567 int fieldmv = (v->fcm == ILACE_FRAME) ? v->blk_mv_type[s->block_index[n]] : 0;
568 int v_edge_pos = s->v_edge_pos >> v->field_mode;
569 uint8_t (*luty)[256];
572 if ((!v->field_mode ||
573 (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
574 !v->s.last_picture.f.data[0])
577 mx = s->mv[dir][n][0];
578 my = s->mv[dir][n][1];
581 if (v->field_mode && (v->cur_field_type != v->ref_field_type[dir]) && v->second_field) {
582 srcY = s->current_picture.f.data[0];
584 use_ic = v->curr_use_ic;
586 srcY = s->last_picture.f.data[0];
588 use_ic = v->last_use_ic;
591 srcY = s->next_picture.f.data[0];
593 use_ic = v->next_use_ic;
597 av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
602 if (v->cur_field_type != v->ref_field_type[dir])
603 my = my - 2 + 4 * v->cur_field_type;
606 if (s->pict_type == AV_PICTURE_TYPE_P && n == 3 && v->field_mode) {
607 int same_count = 0, opp_count = 0, k;
608 int chosen_mv[2][4][2], f;
610 for (k = 0; k < 4; k++) {
611 f = v->mv_f[0][s->block_index[k] + v->blocks_off];
612 chosen_mv[f][f ? opp_count : same_count][0] = s->mv[0][k][0];
613 chosen_mv[f][f ? opp_count : same_count][1] = s->mv[0][k][1];
617 f = opp_count > same_count;
618 switch (f ? opp_count : same_count) {
620 tx = median4(chosen_mv[f][0][0], chosen_mv[f][1][0],
621 chosen_mv[f][2][0], chosen_mv[f][3][0]);
622 ty = median4(chosen_mv[f][0][1], chosen_mv[f][1][1],
623 chosen_mv[f][2][1], chosen_mv[f][3][1]);
626 tx = mid_pred(chosen_mv[f][0][0], chosen_mv[f][1][0], chosen_mv[f][2][0]);
627 ty = mid_pred(chosen_mv[f][0][1], chosen_mv[f][1][1], chosen_mv[f][2][1]);
630 tx = (chosen_mv[f][0][0] + chosen_mv[f][1][0]) / 2;
631 ty = (chosen_mv[f][0][1] + chosen_mv[f][1][1]) / 2;
634 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
635 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
636 for (k = 0; k < 4; k++)
637 v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
640 if (v->fcm == ILACE_FRAME) { // not sure if needed for other types of picture
642 int width = s->avctx->coded_width;
643 int height = s->avctx->coded_height >> 1;
644 if (s->pict_type == AV_PICTURE_TYPE_P) {
645 s->current_picture.motion_val[1][s->block_index[n] + v->blocks_off][0] = mx;
646 s->current_picture.motion_val[1][s->block_index[n] + v->blocks_off][1] = my;
648 qx = (s->mb_x * 16) + (mx >> 2);
649 qy = (s->mb_y * 8) + (my >> 3);
654 mx -= 4 * (qx - width);
657 else if (qy > height + 1)
658 my -= 8 * (qy - height - 1);
661 if ((v->fcm == ILACE_FRAME) && fieldmv)
662 off = ((n > 1) ? s->linesize : 0) + (n & 1) * 8;
664 off = s->linesize * 4 * (n & 2) + (n & 1) * 8;
666 src_x = s->mb_x * 16 + (n & 1) * 8 + (mx >> 2);
668 src_y = s->mb_y * 16 + (n & 2) * 4 + (my >> 2);
670 src_y = s->mb_y * 16 + ((n > 1) ? 1 : 0) + (my >> 2);
672 if (v->profile != PROFILE_ADVANCED) {
673 src_x = av_clip(src_x, -16, s->mb_width * 16);
674 src_y = av_clip(src_y, -16, s->mb_height * 16);
676 src_x = av_clip(src_x, -17, s->avctx->coded_width);
677 if (v->fcm == ILACE_FRAME) {
679 src_y = av_clip(src_y, -17, s->avctx->coded_height + 1);
681 src_y = av_clip(src_y, -18, s->avctx->coded_height);
683 src_y = av_clip(src_y, -18, s->avctx->coded_height + 1);
687 srcY += src_y * s->linesize + src_x;
688 if (v->field_mode && v->ref_field_type[dir])
689 srcY += s->current_picture_ptr->f.linesize[0];
691 if (fieldmv && !(src_y & 1))
693 if (fieldmv && (src_y & 1) && src_y < 4)
695 if (v->rangeredfrm || use_ic
696 || s->h_edge_pos < 13 || v_edge_pos < 23
697 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx & 3) - 8 - s->mspel * 2
698 || (unsigned)(src_y - (s->mspel << fieldmv)) > v_edge_pos - (my & 3) - ((8 + s->mspel * 2) << fieldmv)) {
699 srcY -= s->mspel * (1 + (s->linesize << fieldmv));
700 /* check emulate edge stride and offset */
701 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcY,
702 s->linesize, s->linesize,
703 9 + s->mspel * 2, (9 + s->mspel * 2) << fieldmv,
704 src_x - s->mspel, src_y - (s->mspel << fieldmv),
705 s->h_edge_pos, v_edge_pos);
706 srcY = s->edge_emu_buffer;
707 /* if we deal with range reduction we need to scale source blocks */
708 if (v->rangeredfrm) {
713 for (j = 0; j < 9 + s->mspel * 2; j++) {
714 for (i = 0; i < 9 + s->mspel * 2; i++)
715 src[i] = ((src[i] - 128) >> 1) + 128;
716 src += s->linesize << fieldmv;
719 /* if we deal with intensity compensation we need to scale source blocks */
725 for (j = 0; j < 9 + s->mspel * 2; j++) {
726 int f = v->field_mode ? v->ref_field_type[dir] : (((j<<fieldmv)+src_y - (s->mspel << fieldmv)) & 1);
727 for (i = 0; i < 9 + s->mspel * 2; i++)
728 src[i] = luty[f][src[i]];
729 src += s->linesize << fieldmv;
732 srcY += s->mspel * (1 + (s->linesize << fieldmv));
736 dxy = ((my & 3) << 2) | (mx & 3);
738 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize << fieldmv, v->rnd);
740 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize << fieldmv, v->rnd);
741 } else { // hpel mc - always used for luma
742 dxy = (my & 2) | ((mx & 2) >> 1);
744 s->hdsp.put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
746 s->hdsp.put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
750 static av_always_inline int get_chroma_mv(int *mvx, int *mvy, int *a, int flag, int *tx, int *ty)
753 static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
755 idx = ((a[3] != flag) << 3)
756 | ((a[2] != flag) << 2)
757 | ((a[1] != flag) << 1)
760 *tx = median4(mvx[0], mvx[1], mvx[2], mvx[3]);
761 *ty = median4(mvy[0], mvy[1], mvy[2], mvy[3]);
763 } else if (count[idx] == 1) {
766 *tx = mid_pred(mvx[1], mvx[2], mvx[3]);
767 *ty = mid_pred(mvy[1], mvy[2], mvy[3]);
770 *tx = mid_pred(mvx[0], mvx[2], mvx[3]);
771 *ty = mid_pred(mvy[0], mvy[2], mvy[3]);
774 *tx = mid_pred(mvx[0], mvx[1], mvx[3]);
775 *ty = mid_pred(mvy[0], mvy[1], mvy[3]);
778 *tx = mid_pred(mvx[0], mvx[1], mvx[2]);
779 *ty = mid_pred(mvy[0], mvy[1], mvy[2]);
782 } else if (count[idx] == 2) {
784 for (i = 0; i < 3; i++)
789 for (i = t1 + 1; i < 4; i++)
794 *tx = (mvx[t1] + mvx[t2]) / 2;
795 *ty = (mvy[t1] + mvy[t2]) / 2;
803 /** Do motion compensation for 4-MV macroblock - both chroma blocks
805 static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
807 MpegEncContext *s = &v->s;
808 H264ChromaContext *h264chroma = &v->h264chroma;
809 uint8_t *srcU, *srcV;
810 int uvmx, uvmy, uvsrc_x, uvsrc_y;
811 int k, tx = 0, ty = 0;
812 int mvx[4], mvy[4], intra[4], mv_f[4];
814 int chroma_ref_type = v->cur_field_type;
815 int v_edge_pos = s->v_edge_pos >> v->field_mode;
816 uint8_t (*lutuv)[256];
819 if (!v->field_mode && !v->s.last_picture.f.data[0])
821 if (s->flags & CODEC_FLAG_GRAY)
824 for (k = 0; k < 4; k++) {
825 mvx[k] = s->mv[dir][k][0];
826 mvy[k] = s->mv[dir][k][1];
827 intra[k] = v->mb_type[0][s->block_index[k]];
829 mv_f[k] = v->mv_f[dir][s->block_index[k] + v->blocks_off];
832 /* calculate chroma MV vector from four luma MVs */
833 if (!v->field_mode || (v->field_mode && !v->numref)) {
834 valid_count = get_chroma_mv(mvx, mvy, intra, 0, &tx, &ty);
835 chroma_ref_type = v->reffield;
837 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
838 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
839 v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
840 return; //no need to do MC for intra blocks
844 if (mv_f[0] + mv_f[1] + mv_f[2] + mv_f[3] > 2)
846 valid_count = get_chroma_mv(mvx, mvy, mv_f, dominant, &tx, &ty);
848 chroma_ref_type = !v->cur_field_type;
850 if (v->field_mode && chroma_ref_type == 1 && v->cur_field_type == 1 && !v->s.last_picture.f.data[0])
852 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
853 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
854 uvmx = (tx + ((tx & 3) == 3)) >> 1;
855 uvmy = (ty + ((ty & 3) == 3)) >> 1;
857 v->luma_mv[s->mb_x][0] = uvmx;
858 v->luma_mv[s->mb_x][1] = uvmy;
861 uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
862 uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
864 // Field conversion bias
865 if (v->cur_field_type != chroma_ref_type)
866 uvmy += 2 - 4 * chroma_ref_type;
868 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
869 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
871 if (v->profile != PROFILE_ADVANCED) {
872 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
873 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
875 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
876 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
880 if (v->field_mode && (v->cur_field_type != chroma_ref_type) && v->second_field) {
881 srcU = s->current_picture.f.data[1];
882 srcV = s->current_picture.f.data[2];
883 lutuv = v->curr_lutuv;
884 use_ic = v->curr_use_ic;
886 srcU = s->last_picture.f.data[1];
887 srcV = s->last_picture.f.data[2];
888 lutuv = v->last_lutuv;
889 use_ic = v->last_use_ic;
892 srcU = s->next_picture.f.data[1];
893 srcV = s->next_picture.f.data[2];
894 lutuv = v->next_lutuv;
895 use_ic = v->next_use_ic;
899 av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
903 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
904 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
907 if (chroma_ref_type) {
908 srcU += s->current_picture_ptr->f.linesize[1];
909 srcV += s->current_picture_ptr->f.linesize[2];
913 if (v->rangeredfrm || use_ic
914 || s->h_edge_pos < 18 || v_edge_pos < 18
915 || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
916 || (unsigned)uvsrc_y > (v_edge_pos >> 1) - 9) {
917 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcU,
918 s->uvlinesize, s->uvlinesize,
919 8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
920 s->h_edge_pos >> 1, v_edge_pos >> 1);
921 s->vdsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV,
922 s->uvlinesize, s->uvlinesize,
923 8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
924 s->h_edge_pos >> 1, v_edge_pos >> 1);
925 srcU = s->edge_emu_buffer;
926 srcV = s->edge_emu_buffer + 16;
928 /* if we deal with range reduction we need to scale source blocks */
929 if (v->rangeredfrm) {
935 for (j = 0; j < 9; j++) {
936 for (i = 0; i < 9; i++) {
937 src[i] = ((src[i] - 128) >> 1) + 128;
938 src2[i] = ((src2[i] - 128) >> 1) + 128;
940 src += s->uvlinesize;
941 src2 += s->uvlinesize;
944 /* if we deal with intensity compensation we need to scale source blocks */
951 for (j = 0; j < 9; j++) {
952 int f = v->field_mode ? chroma_ref_type : ((j + uvsrc_y) & 1);
953 for (i = 0; i < 9; i++) {
954 src[i] = lutuv[f][src[i]];
955 src2[i] = lutuv[f][src2[i]];
957 src += s->uvlinesize;
958 src2 += s->uvlinesize;
963 /* Chroma MC always uses qpel bilinear */
964 uvmx = (uvmx & 3) << 1;
965 uvmy = (uvmy & 3) << 1;
967 h264chroma->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
968 h264chroma->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
970 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
971 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
975 /** Do motion compensation for 4-MV interlaced frame chroma macroblock (both U and V)
977 static void vc1_mc_4mv_chroma4(VC1Context *v, int dir, int dir2, int avg)
979 MpegEncContext *s = &v->s;
980 H264ChromaContext *h264chroma = &v->h264chroma;
981 uint8_t *srcU, *srcV;
982 int uvsrc_x, uvsrc_y;
983 int uvmx_field[4], uvmy_field[4];
985 int fieldmv = v->blk_mv_type[s->block_index[0]];
986 static const int s_rndtblfield[16] = { 0, 0, 1, 2, 4, 4, 5, 6, 2, 2, 3, 8, 6, 6, 7, 12 };
987 int v_dist = fieldmv ? 1 : 4; // vertical offset for lower sub-blocks
988 int v_edge_pos = s->v_edge_pos >> 1;
990 uint8_t (*lutuv)[256];
992 if (s->flags & CODEC_FLAG_GRAY)
995 for (i = 0; i < 4; i++) {
996 int d = i < 2 ? dir: dir2;
998 uvmx_field[i] = (tx + ((tx & 3) == 3)) >> 1;
1001 uvmy_field[i] = (ty >> 4) * 8 + s_rndtblfield[ty & 0xF];
1003 uvmy_field[i] = (ty + ((ty & 3) == 3)) >> 1;
1006 for (i = 0; i < 4; i++) {
1007 off = (i & 1) * 4 + ((i & 2) ? v_dist * s->uvlinesize : 0);
1008 uvsrc_x = s->mb_x * 8 + (i & 1) * 4 + (uvmx_field[i] >> 2);
1009 uvsrc_y = s->mb_y * 8 + ((i & 2) ? v_dist : 0) + (uvmy_field[i] >> 2);
1010 // FIXME: implement proper pull-back (see vc1cropmv.c, vc1CROPMV_ChromaPullBack())
1011 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
1012 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
1013 if (i < 2 ? dir : dir2) {
1014 srcU = s->next_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
1015 srcV = s->next_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
1016 lutuv = v->next_lutuv;
1017 use_ic = v->next_use_ic;
1019 srcU = s->last_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
1020 srcV = s->last_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
1021 lutuv = v->last_lutuv;
1022 use_ic = v->last_use_ic;
1024 uvmx_field[i] = (uvmx_field[i] & 3) << 1;
1025 uvmy_field[i] = (uvmy_field[i] & 3) << 1;
1027 if (fieldmv && !(uvsrc_y & 1))
1029 if (fieldmv && (uvsrc_y & 1) && uvsrc_y < 2)
1032 || s->h_edge_pos < 10 || v_edge_pos < (5 << fieldmv)
1033 || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 5
1034 || (unsigned)uvsrc_y > v_edge_pos - (5 << fieldmv)) {
1035 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcU,
1036 s->uvlinesize, s->uvlinesize,
1037 5, (5 << fieldmv), uvsrc_x, uvsrc_y,
1038 s->h_edge_pos >> 1, v_edge_pos);
1039 s->vdsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV,
1040 s->uvlinesize, s->uvlinesize,
1041 5, (5 << fieldmv), uvsrc_x, uvsrc_y,
1042 s->h_edge_pos >> 1, v_edge_pos);
1043 srcU = s->edge_emu_buffer;
1044 srcV = s->edge_emu_buffer + 16;
1046 /* if we deal with intensity compensation we need to scale source blocks */
1049 uint8_t *src, *src2;
1053 for (j = 0; j < 5; j++) {
1054 int f = (uvsrc_y + (j << fieldmv)) & 1;
1055 for (i = 0; i < 5; i++) {
1056 src[i] = lutuv[f][src[i]];
1057 src2[i] = lutuv[f][src2[i]];
1059 src += s->uvlinesize << fieldmv;
1060 src2 += s->uvlinesize << fieldmv;
1066 h264chroma->avg_h264_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1067 h264chroma->avg_h264_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1069 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1070 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1074 h264chroma->put_h264_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1075 h264chroma->put_h264_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1077 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1078 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1084 /***********************************************************************/
1086 * @name VC-1 Block-level functions
1087 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
1093 * @brief Get macroblock-level quantizer scale
1095 #define GET_MQUANT() \
1096 if (v->dquantfrm) { \
1098 if (v->dqprofile == DQPROFILE_ALL_MBS) { \
1099 if (v->dqbilevel) { \
1100 mquant = (get_bits1(gb)) ? v->altpq : v->pq; \
1102 mqdiff = get_bits(gb, 3); \
1104 mquant = v->pq + mqdiff; \
1106 mquant = get_bits(gb, 5); \
1109 if (v->dqprofile == DQPROFILE_SINGLE_EDGE) \
1110 edges = 1 << v->dqsbedge; \
1111 else if (v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
1112 edges = (3 << v->dqsbedge) % 15; \
1113 else if (v->dqprofile == DQPROFILE_FOUR_EDGES) \
1115 if ((edges&1) && !s->mb_x) \
1116 mquant = v->altpq; \
1117 if ((edges&2) && s->first_slice_line) \
1118 mquant = v->altpq; \
1119 if ((edges&4) && s->mb_x == (s->mb_width - 1)) \
1120 mquant = v->altpq; \
1121 if ((edges&8) && s->mb_y == (s->mb_height - 1)) \
1122 mquant = v->altpq; \
1123 if (!mquant || mquant > 31) { \
1124 av_log(v->s.avctx, AV_LOG_ERROR, \
1125 "Overriding invalid mquant %d\n", mquant); \
1131 * @def GET_MVDATA(_dmv_x, _dmv_y)
1132 * @brief Get MV differentials
1133 * @see MVDATA decoding from 8.3.5.2, p(1)20
1134 * @param _dmv_x Horizontal differential for decoded MV
1135 * @param _dmv_y Vertical differential for decoded MV
1137 #define GET_MVDATA(_dmv_x, _dmv_y) \
1138 index = 1 + get_vlc2(gb, ff_vc1_mv_diff_vlc[s->mv_table_index].table, \
1139 VC1_MV_DIFF_VLC_BITS, 2); \
1141 mb_has_coeffs = 1; \
1144 mb_has_coeffs = 0; \
1147 _dmv_x = _dmv_y = 0; \
1148 } else if (index == 35) { \
1149 _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
1150 _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
1151 } else if (index == 36) { \
1156 index1 = index % 6; \
1157 if (!s->quarter_sample && index1 == 5) val = 1; \
1159 if (size_table[index1] - val > 0) \
1160 val = get_bits(gb, size_table[index1] - val); \
1162 sign = 0 - (val&1); \
1163 _dmv_x = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1165 index1 = index / 6; \
1166 if (!s->quarter_sample && index1 == 5) val = 1; \
1168 if (size_table[index1] - val > 0) \
1169 val = get_bits(gb, size_table[index1] - val); \
1171 sign = 0 - (val & 1); \
1172 _dmv_y = (sign ^ ((val >> 1) + offset_table[index1])) - sign; \
1175 static av_always_inline void get_mvdata_interlaced(VC1Context *v, int *dmv_x,
1176 int *dmv_y, int *pred_flag)
1179 int extend_x = 0, extend_y = 0;
1180 GetBitContext *gb = &v->s.gb;
1183 const int* offs_tab;
1186 bits = VC1_2REF_MVDATA_VLC_BITS;
1189 bits = VC1_1REF_MVDATA_VLC_BITS;
1192 switch (v->dmvrange) {
1200 extend_x = extend_y = 1;
1203 index = get_vlc2(gb, v->imv_vlc->table, bits, 3);
1205 *dmv_x = get_bits(gb, v->k_x);
1206 *dmv_y = get_bits(gb, v->k_y);
1209 *pred_flag = *dmv_y & 1;
1210 *dmv_y = (*dmv_y + *pred_flag) >> 1;
1212 *dmv_y = (*dmv_y + (*dmv_y & 1)) >> 1;
1218 offs_tab = offset_table2;
1220 offs_tab = offset_table1;
1221 index1 = (index + 1) % 9;
1223 val = get_bits(gb, index1 + extend_x);
1224 sign = 0 -(val & 1);
1225 *dmv_x = (sign ^ ((val >> 1) + offs_tab[index1])) - sign;
1229 offs_tab = offset_table2;
1231 offs_tab = offset_table1;
1232 index1 = (index + 1) / 9;
1233 if (index1 > v->numref) {
1234 val = get_bits(gb, (index1 + (extend_y << v->numref)) >> v->numref);
1235 sign = 0 - (val & 1);
1236 *dmv_y = (sign ^ ((val >> 1) + offs_tab[index1 >> v->numref])) - sign;
1239 if (v->numref && pred_flag)
1240 *pred_flag = index1 & 1;
1244 static av_always_inline int scaleforsame_x(VC1Context *v, int n /* MV */, int dir)
1246 int scaledvalue, refdist;
1247 int scalesame1, scalesame2;
1248 int scalezone1_x, zone1offset_x;
1249 int table_index = dir ^ v->second_field;
1251 if (v->s.pict_type != AV_PICTURE_TYPE_B)
1252 refdist = v->refdist;
1254 refdist = dir ? v->brfd : v->frfd;
1257 scalesame1 = ff_vc1_field_mvpred_scales[table_index][1][refdist];
1258 scalesame2 = ff_vc1_field_mvpred_scales[table_index][2][refdist];
1259 scalezone1_x = ff_vc1_field_mvpred_scales[table_index][3][refdist];
1260 zone1offset_x = ff_vc1_field_mvpred_scales[table_index][5][refdist];
1265 if (FFABS(n) < scalezone1_x)
1266 scaledvalue = (n * scalesame1) >> 8;
1269 scaledvalue = ((n * scalesame2) >> 8) - zone1offset_x;
1271 scaledvalue = ((n * scalesame2) >> 8) + zone1offset_x;
1274 return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1277 static av_always_inline int scaleforsame_y(VC1Context *v, int i, int n /* MV */, int dir)
1279 int scaledvalue, refdist;
1280 int scalesame1, scalesame2;
1281 int scalezone1_y, zone1offset_y;
1282 int table_index = dir ^ v->second_field;
1284 if (v->s.pict_type != AV_PICTURE_TYPE_B)
1285 refdist = v->refdist;
1287 refdist = dir ? v->brfd : v->frfd;
1290 scalesame1 = ff_vc1_field_mvpred_scales[table_index][1][refdist];
1291 scalesame2 = ff_vc1_field_mvpred_scales[table_index][2][refdist];
1292 scalezone1_y = ff_vc1_field_mvpred_scales[table_index][4][refdist];
1293 zone1offset_y = ff_vc1_field_mvpred_scales[table_index][6][refdist];
1298 if (FFABS(n) < scalezone1_y)
1299 scaledvalue = (n * scalesame1) >> 8;
1302 scaledvalue = ((n * scalesame2) >> 8) - zone1offset_y;
1304 scaledvalue = ((n * scalesame2) >> 8) + zone1offset_y;
1308 if (v->cur_field_type && !v->ref_field_type[dir])
1309 return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1311 return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1314 static av_always_inline int scaleforopp_x(VC1Context *v, int n /* MV */)
1316 int scalezone1_x, zone1offset_x;
1317 int scaleopp1, scaleopp2, brfd;
1320 brfd = FFMIN(v->brfd, 3);
1321 scalezone1_x = ff_vc1_b_field_mvpred_scales[3][brfd];
1322 zone1offset_x = ff_vc1_b_field_mvpred_scales[5][brfd];
1323 scaleopp1 = ff_vc1_b_field_mvpred_scales[1][brfd];
1324 scaleopp2 = ff_vc1_b_field_mvpred_scales[2][brfd];
1329 if (FFABS(n) < scalezone1_x)
1330 scaledvalue = (n * scaleopp1) >> 8;
1333 scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_x;
1335 scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_x;
1338 return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1341 static av_always_inline int scaleforopp_y(VC1Context *v, int n /* MV */, int dir)
1343 int scalezone1_y, zone1offset_y;
1344 int scaleopp1, scaleopp2, brfd;
1347 brfd = FFMIN(v->brfd, 3);
1348 scalezone1_y = ff_vc1_b_field_mvpred_scales[4][brfd];
1349 zone1offset_y = ff_vc1_b_field_mvpred_scales[6][brfd];
1350 scaleopp1 = ff_vc1_b_field_mvpred_scales[1][brfd];
1351 scaleopp2 = ff_vc1_b_field_mvpred_scales[2][brfd];
1356 if (FFABS(n) < scalezone1_y)
1357 scaledvalue = (n * scaleopp1) >> 8;
1360 scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_y;
1362 scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_y;
1365 if (v->cur_field_type && !v->ref_field_type[dir]) {
1366 return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1368 return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1372 static av_always_inline int scaleforsame(VC1Context *v, int i, int n /* MV */,
1375 int brfd, scalesame;
1376 int hpel = 1 - v->s.quarter_sample;
1379 if (v->s.pict_type != AV_PICTURE_TYPE_B || v->second_field || !dir) {
1381 n = scaleforsame_y(v, i, n, dir) << hpel;
1383 n = scaleforsame_x(v, n, dir) << hpel;
1386 brfd = FFMIN(v->brfd, 3);
1387 scalesame = ff_vc1_b_field_mvpred_scales[0][brfd];
1389 n = (n * scalesame >> 8) << hpel;
1393 static av_always_inline int scaleforopp(VC1Context *v, int n /* MV */,
1396 int refdist, scaleopp;
1397 int hpel = 1 - v->s.quarter_sample;
1400 if (v->s.pict_type == AV_PICTURE_TYPE_B && !v->second_field && dir == 1) {
1402 n = scaleforopp_y(v, n, dir) << hpel;
1404 n = scaleforopp_x(v, n) << hpel;
1407 if (v->s.pict_type != AV_PICTURE_TYPE_B)
1408 refdist = FFMIN(v->refdist, 3);
1410 refdist = dir ? v->brfd : v->frfd;
1411 scaleopp = ff_vc1_field_mvpred_scales[dir ^ v->second_field][0][refdist];
1413 n = (n * scaleopp >> 8) << hpel;
1417 /** Predict and set motion vector
1419 static inline void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
1420 int mv1, int r_x, int r_y, uint8_t* is_intra,
1421 int pred_flag, int dir)
1423 MpegEncContext *s = &v->s;
1424 int xy, wrap, off = 0;
1428 int mixedmv_pic, num_samefield = 0, num_oppfield = 0;
1429 int opposite, a_f, b_f, c_f;
1430 int16_t field_predA[2];
1431 int16_t field_predB[2];
1432 int16_t field_predC[2];
1433 int a_valid, b_valid, c_valid;
1434 int hybridmv_thresh, y_bias = 0;
1436 if (v->mv_mode == MV_PMODE_MIXED_MV ||
1437 ((v->mv_mode == MV_PMODE_INTENSITY_COMP) && (v->mv_mode2 == MV_PMODE_MIXED_MV)))
1441 /* scale MV difference to be quad-pel */
1442 dmv_x <<= 1 - s->quarter_sample;
1443 dmv_y <<= 1 - s->quarter_sample;
1445 wrap = s->b8_stride;
1446 xy = s->block_index[n];
1449 s->mv[0][n][0] = s->current_picture.motion_val[0][xy + v->blocks_off][0] = 0;
1450 s->mv[0][n][1] = s->current_picture.motion_val[0][xy + v->blocks_off][1] = 0;
1451 s->current_picture.motion_val[1][xy + v->blocks_off][0] = 0;
1452 s->current_picture.motion_val[1][xy + v->blocks_off][1] = 0;
1453 if (mv1) { /* duplicate motion data for 1-MV block */
1454 s->current_picture.motion_val[0][xy + 1 + v->blocks_off][0] = 0;
1455 s->current_picture.motion_val[0][xy + 1 + v->blocks_off][1] = 0;
1456 s->current_picture.motion_val[0][xy + wrap + v->blocks_off][0] = 0;
1457 s->current_picture.motion_val[0][xy + wrap + v->blocks_off][1] = 0;
1458 s->current_picture.motion_val[0][xy + wrap + 1 + v->blocks_off][0] = 0;
1459 s->current_picture.motion_val[0][xy + wrap + 1 + v->blocks_off][1] = 0;
1460 v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1461 s->current_picture.motion_val[1][xy + 1 + v->blocks_off][0] = 0;
1462 s->current_picture.motion_val[1][xy + 1 + v->blocks_off][1] = 0;
1463 s->current_picture.motion_val[1][xy + wrap][0] = 0;
1464 s->current_picture.motion_val[1][xy + wrap + v->blocks_off][1] = 0;
1465 s->current_picture.motion_val[1][xy + wrap + 1 + v->blocks_off][0] = 0;
1466 s->current_picture.motion_val[1][xy + wrap + 1 + v->blocks_off][1] = 0;
1471 C = s->current_picture.motion_val[dir][xy - 1 + v->blocks_off];
1472 A = s->current_picture.motion_val[dir][xy - wrap + v->blocks_off];
1474 if (v->field_mode && mixedmv_pic)
1475 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
1477 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2;
1479 //in 4-MV mode different blocks have different B predictor position
1482 off = (s->mb_x > 0) ? -1 : 1;
1485 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 1;
1494 B = s->current_picture.motion_val[dir][xy - wrap + off + v->blocks_off];
1496 a_valid = !s->first_slice_line || (n == 2 || n == 3);
1497 b_valid = a_valid && (s->mb_width > 1);
1498 c_valid = s->mb_x || (n == 1 || n == 3);
1499 if (v->field_mode) {
1500 a_valid = a_valid && !is_intra[xy - wrap];
1501 b_valid = b_valid && !is_intra[xy - wrap + off];
1502 c_valid = c_valid && !is_intra[xy - 1];
1506 a_f = v->mv_f[dir][xy - wrap + v->blocks_off];
1507 num_oppfield += a_f;
1508 num_samefield += 1 - a_f;
1509 field_predA[0] = A[0];
1510 field_predA[1] = A[1];
1512 field_predA[0] = field_predA[1] = 0;
1516 b_f = v->mv_f[dir][xy - wrap + off + v->blocks_off];
1517 num_oppfield += b_f;
1518 num_samefield += 1 - b_f;
1519 field_predB[0] = B[0];
1520 field_predB[1] = B[1];
1522 field_predB[0] = field_predB[1] = 0;
1526 c_f = v->mv_f[dir][xy - 1 + v->blocks_off];
1527 num_oppfield += c_f;
1528 num_samefield += 1 - c_f;
1529 field_predC[0] = C[0];
1530 field_predC[1] = C[1];
1532 field_predC[0] = field_predC[1] = 0;
1536 if (v->field_mode) {
1538 // REFFIELD determines if the last field or the second-last field is
1539 // to be used as reference
1540 opposite = 1 - v->reffield;
1542 if (num_samefield <= num_oppfield)
1543 opposite = 1 - pred_flag;
1545 opposite = pred_flag;
1550 if (a_valid && !a_f) {
1551 field_predA[0] = scaleforopp(v, field_predA[0], 0, dir);
1552 field_predA[1] = scaleforopp(v, field_predA[1], 1, dir);
1554 if (b_valid && !b_f) {
1555 field_predB[0] = scaleforopp(v, field_predB[0], 0, dir);
1556 field_predB[1] = scaleforopp(v, field_predB[1], 1, dir);
1558 if (c_valid && !c_f) {
1559 field_predC[0] = scaleforopp(v, field_predC[0], 0, dir);
1560 field_predC[1] = scaleforopp(v, field_predC[1], 1, dir);
1562 v->mv_f[dir][xy + v->blocks_off] = 1;
1563 v->ref_field_type[dir] = !v->cur_field_type;
1565 if (a_valid && a_f) {
1566 field_predA[0] = scaleforsame(v, n, field_predA[0], 0, dir);
1567 field_predA[1] = scaleforsame(v, n, field_predA[1], 1, dir);
1569 if (b_valid && b_f) {
1570 field_predB[0] = scaleforsame(v, n, field_predB[0], 0, dir);
1571 field_predB[1] = scaleforsame(v, n, field_predB[1], 1, dir);
1573 if (c_valid && c_f) {
1574 field_predC[0] = scaleforsame(v, n, field_predC[0], 0, dir);
1575 field_predC[1] = scaleforsame(v, n, field_predC[1], 1, dir);
1577 v->mv_f[dir][xy + v->blocks_off] = 0;
1578 v->ref_field_type[dir] = v->cur_field_type;
1582 px = field_predA[0];
1583 py = field_predA[1];
1584 } else if (c_valid) {
1585 px = field_predC[0];
1586 py = field_predC[1];
1587 } else if (b_valid) {
1588 px = field_predB[0];
1589 py = field_predB[1];
1595 if (num_samefield + num_oppfield > 1) {
1596 px = mid_pred(field_predA[0], field_predB[0], field_predC[0]);
1597 py = mid_pred(field_predA[1], field_predB[1], field_predC[1]);
1600 /* Pullback MV as specified in 8.3.5.3.4 */
1601 if (!v->field_mode) {
1603 qx = (s->mb_x << 6) + ((n == 1 || n == 3) ? 32 : 0);
1604 qy = (s->mb_y << 6) + ((n == 2 || n == 3) ? 32 : 0);
1605 X = (s->mb_width << 6) - 4;
1606 Y = (s->mb_height << 6) - 4;
1608 if (qx + px < -60) px = -60 - qx;
1609 if (qy + py < -60) py = -60 - qy;
1611 if (qx + px < -28) px = -28 - qx;
1612 if (qy + py < -28) py = -28 - qy;
1614 if (qx + px > X) px = X - qx;
1615 if (qy + py > Y) py = Y - qy;
1618 if (!v->field_mode || s->pict_type != AV_PICTURE_TYPE_B) {
1619 /* Calculate hybrid prediction as specified in 8.3.5.3.5 (also 10.3.5.4.3.5) */
1620 hybridmv_thresh = 32;
1621 if (a_valid && c_valid) {
1622 if (is_intra[xy - wrap])
1623 sum = FFABS(px) + FFABS(py);
1625 sum = FFABS(px - field_predA[0]) + FFABS(py - field_predA[1]);
1626 if (sum > hybridmv_thresh) {
1627 if (get_bits1(&s->gb)) { // read HYBRIDPRED bit
1628 px = field_predA[0];
1629 py = field_predA[1];
1631 px = field_predC[0];
1632 py = field_predC[1];
1635 if (is_intra[xy - 1])
1636 sum = FFABS(px) + FFABS(py);
1638 sum = FFABS(px - field_predC[0]) + FFABS(py - field_predC[1]);
1639 if (sum > hybridmv_thresh) {
1640 if (get_bits1(&s->gb)) {
1641 px = field_predA[0];
1642 py = field_predA[1];
1644 px = field_predC[0];
1645 py = field_predC[1];
1652 if (v->field_mode && v->numref)
1654 if (v->field_mode && v->cur_field_type && v->ref_field_type[dir] == 0)
1656 /* store MV using signed modulus of MV range defined in 4.11 */
1657 s->mv[dir][n][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1658 s->mv[dir][n][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1] = ((py + dmv_y + r_y - y_bias) & ((r_y << 1) - 1)) - r_y + y_bias;
1659 if (mv1) { /* duplicate motion data for 1-MV block */
1660 s->current_picture.motion_val[dir][xy + 1 + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
1661 s->current_picture.motion_val[dir][xy + 1 + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
1662 s->current_picture.motion_val[dir][xy + wrap + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
1663 s->current_picture.motion_val[dir][xy + wrap + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
1664 s->current_picture.motion_val[dir][xy + wrap + 1 + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
1665 s->current_picture.motion_val[dir][xy + wrap + 1 + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
1666 v->mv_f[dir][xy + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1667 v->mv_f[dir][xy + wrap + v->blocks_off] = v->mv_f[dir][xy + wrap + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1671 /** Predict and set motion vector for interlaced frame picture MBs
1673 static inline void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
1674 int mvn, int r_x, int r_y, uint8_t* is_intra, int dir)
1676 MpegEncContext *s = &v->s;
1677 int xy, wrap, off = 0;
1678 int A[2], B[2], C[2];
1680 int a_valid = 0, b_valid = 0, c_valid = 0;
1681 int field_a, field_b, field_c; // 0: same, 1: opposit
1682 int total_valid, num_samefield, num_oppfield;
1683 int pos_c, pos_b, n_adj;
1685 wrap = s->b8_stride;
1686 xy = s->block_index[n];
1689 s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = 0;
1690 s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = 0;
1691 s->current_picture.motion_val[1][xy][0] = 0;
1692 s->current_picture.motion_val[1][xy][1] = 0;
1693 if (mvn == 1) { /* duplicate motion data for 1-MV block */
1694 s->current_picture.motion_val[0][xy + 1][0] = 0;
1695 s->current_picture.motion_val[0][xy + 1][1] = 0;
1696 s->current_picture.motion_val[0][xy + wrap][0] = 0;
1697 s->current_picture.motion_val[0][xy + wrap][1] = 0;
1698 s->current_picture.motion_val[0][xy + wrap + 1][0] = 0;
1699 s->current_picture.motion_val[0][xy + wrap + 1][1] = 0;
1700 v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1701 s->current_picture.motion_val[1][xy + 1][0] = 0;
1702 s->current_picture.motion_val[1][xy + 1][1] = 0;
1703 s->current_picture.motion_val[1][xy + wrap][0] = 0;
1704 s->current_picture.motion_val[1][xy + wrap][1] = 0;
1705 s->current_picture.motion_val[1][xy + wrap + 1][0] = 0;
1706 s->current_picture.motion_val[1][xy + wrap + 1][1] = 0;
1711 off = ((n == 0) || (n == 1)) ? 1 : -1;
1713 if (s->mb_x || (n == 1) || (n == 3)) {
1714 if ((v->blk_mv_type[xy]) // current block (MB) has a field MV
1715 || (!v->blk_mv_type[xy] && !v->blk_mv_type[xy - 1])) { // or both have frame MV
1716 A[0] = s->current_picture.motion_val[dir][xy - 1][0];
1717 A[1] = s->current_picture.motion_val[dir][xy - 1][1];
1719 } else { // current block has frame mv and cand. has field MV (so average)
1720 A[0] = (s->current_picture.motion_val[dir][xy - 1][0]
1721 + s->current_picture.motion_val[dir][xy - 1 + off * wrap][0] + 1) >> 1;
1722 A[1] = (s->current_picture.motion_val[dir][xy - 1][1]
1723 + s->current_picture.motion_val[dir][xy - 1 + off * wrap][1] + 1) >> 1;
1726 if (!(n & 1) && v->is_intra[s->mb_x - 1]) {
1732 /* Predict B and C */
1733 B[0] = B[1] = C[0] = C[1] = 0;
1734 if (n == 0 || n == 1 || v->blk_mv_type[xy]) {
1735 if (!s->first_slice_line) {
1736 if (!v->is_intra[s->mb_x - s->mb_stride]) {
1739 pos_b = s->block_index[n_adj] - 2 * wrap;
1740 if (v->blk_mv_type[pos_b] && v->blk_mv_type[xy]) {
1741 n_adj = (n & 2) | (n & 1);
1743 B[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap][0];
1744 B[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap][1];
1745 if (v->blk_mv_type[pos_b] && !v->blk_mv_type[xy]) {
1746 B[0] = (B[0] + s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap][0] + 1) >> 1;
1747 B[1] = (B[1] + s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap][1] + 1) >> 1;
1750 if (s->mb_width > 1) {
1751 if (!v->is_intra[s->mb_x - s->mb_stride + 1]) {
1754 pos_c = s->block_index[2] - 2 * wrap + 2;
1755 if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1758 C[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap + 2][0];
1759 C[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap + 2][1];
1760 if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1761 C[0] = (1 + C[0] + (s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap + 2][0])) >> 1;
1762 C[1] = (1 + C[1] + (s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap + 2][1])) >> 1;
1764 if (s->mb_x == s->mb_width - 1) {
1765 if (!v->is_intra[s->mb_x - s->mb_stride - 1]) {
1768 pos_c = s->block_index[3] - 2 * wrap - 2;
1769 if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1772 C[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap - 2][0];
1773 C[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap - 2][1];
1774 if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1775 C[0] = (1 + C[0] + s->current_picture.motion_val[dir][s->block_index[1] - 2 * wrap - 2][0]) >> 1;
1776 C[1] = (1 + C[1] + s->current_picture.motion_val[dir][s->block_index[1] - 2 * wrap - 2][1]) >> 1;
1785 pos_b = s->block_index[1];
1787 B[0] = s->current_picture.motion_val[dir][pos_b][0];
1788 B[1] = s->current_picture.motion_val[dir][pos_b][1];
1789 pos_c = s->block_index[0];
1791 C[0] = s->current_picture.motion_val[dir][pos_c][0];
1792 C[1] = s->current_picture.motion_val[dir][pos_c][1];
1795 total_valid = a_valid + b_valid + c_valid;
1796 // check if predictor A is out of bounds
1797 if (!s->mb_x && !(n == 1 || n == 3)) {
1800 // check if predictor B is out of bounds
1801 if ((s->first_slice_line && v->blk_mv_type[xy]) || (s->first_slice_line && !(n & 2))) {
1802 B[0] = B[1] = C[0] = C[1] = 0;
1804 if (!v->blk_mv_type[xy]) {
1805 if (s->mb_width == 1) {
1809 if (total_valid >= 2) {
1810 px = mid_pred(A[0], B[0], C[0]);
1811 py = mid_pred(A[1], B[1], C[1]);
1812 } else if (total_valid) {
1813 if (a_valid) { px = A[0]; py = A[1]; }
1814 if (b_valid) { px = B[0]; py = B[1]; }
1815 if (c_valid) { px = C[0]; py = C[1]; }
1821 field_a = (A[1] & 4) ? 1 : 0;
1825 field_b = (B[1] & 4) ? 1 : 0;
1829 field_c = (C[1] & 4) ? 1 : 0;
1833 num_oppfield = field_a + field_b + field_c;
1834 num_samefield = total_valid - num_oppfield;
1835 if (total_valid == 3) {
1836 if ((num_samefield == 3) || (num_oppfield == 3)) {
1837 px = mid_pred(A[0], B[0], C[0]);
1838 py = mid_pred(A[1], B[1], C[1]);
1839 } else if (num_samefield >= num_oppfield) {
1840 /* take one MV from same field set depending on priority
1841 the check for B may not be necessary */
1842 px = !field_a ? A[0] : B[0];
1843 py = !field_a ? A[1] : B[1];
1845 px = field_a ? A[0] : B[0];
1846 py = field_a ? A[1] : B[1];
1848 } else if (total_valid == 2) {
1849 if (num_samefield >= num_oppfield) {
1850 if (!field_a && a_valid) {
1853 } else if (!field_b && b_valid) {
1856 } else if (c_valid) {
1861 if (field_a && a_valid) {
1864 } else if (field_b && b_valid) {
1867 } else if (c_valid) {
1872 } else if (total_valid == 1) {
1873 px = (a_valid) ? A[0] : ((b_valid) ? B[0] : C[0]);
1874 py = (a_valid) ? A[1] : ((b_valid) ? B[1] : C[1]);
1879 /* store MV using signed modulus of MV range defined in 4.11 */
1880 s->mv[dir][n][0] = s->current_picture.motion_val[dir][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1881 s->mv[dir][n][1] = s->current_picture.motion_val[dir][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
1882 if (mvn == 1) { /* duplicate motion data for 1-MV block */
1883 s->current_picture.motion_val[dir][xy + 1 ][0] = s->current_picture.motion_val[dir][xy][0];
1884 s->current_picture.motion_val[dir][xy + 1 ][1] = s->current_picture.motion_val[dir][xy][1];
1885 s->current_picture.motion_val[dir][xy + wrap ][0] = s->current_picture.motion_val[dir][xy][0];
1886 s->current_picture.motion_val[dir][xy + wrap ][1] = s->current_picture.motion_val[dir][xy][1];
1887 s->current_picture.motion_val[dir][xy + wrap + 1][0] = s->current_picture.motion_val[dir][xy][0];
1888 s->current_picture.motion_val[dir][xy + wrap + 1][1] = s->current_picture.motion_val[dir][xy][1];
1889 } else if (mvn == 2) { /* duplicate motion data for 2-Field MV block */
1890 s->current_picture.motion_val[dir][xy + 1][0] = s->current_picture.motion_val[dir][xy][0];
1891 s->current_picture.motion_val[dir][xy + 1][1] = s->current_picture.motion_val[dir][xy][1];
1892 s->mv[dir][n + 1][0] = s->mv[dir][n][0];
1893 s->mv[dir][n + 1][1] = s->mv[dir][n][1];
1897 /** Motion compensation for direct or interpolated blocks in B-frames
1899 static void vc1_interp_mc(VC1Context *v)
1901 MpegEncContext *s = &v->s;
1902 H264ChromaContext *h264chroma = &v->h264chroma;
1903 uint8_t *srcY, *srcU, *srcV;
1904 int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
1906 int v_edge_pos = s->v_edge_pos >> v->field_mode;
1907 int use_ic = v->next_use_ic;
1909 if (!v->field_mode && !v->s.next_picture.f.data[0])
1912 mx = s->mv[1][0][0];
1913 my = s->mv[1][0][1];
1914 uvmx = (mx + ((mx & 3) == 3)) >> 1;
1915 uvmy = (my + ((my & 3) == 3)) >> 1;
1916 if (v->field_mode) {
1917 if (v->cur_field_type != v->ref_field_type[1])
1918 my = my - 2 + 4 * v->cur_field_type;
1919 uvmy = uvmy - 2 + 4 * v->cur_field_type;
1922 uvmx = uvmx + ((uvmx < 0) ? -(uvmx & 1) : (uvmx & 1));
1923 uvmy = uvmy + ((uvmy < 0) ? -(uvmy & 1) : (uvmy & 1));
1925 srcY = s->next_picture.f.data[0];
1926 srcU = s->next_picture.f.data[1];
1927 srcV = s->next_picture.f.data[2];
1929 src_x = s->mb_x * 16 + (mx >> 2);
1930 src_y = s->mb_y * 16 + (my >> 2);
1931 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
1932 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
1934 if (v->profile != PROFILE_ADVANCED) {
1935 src_x = av_clip( src_x, -16, s->mb_width * 16);
1936 src_y = av_clip( src_y, -16, s->mb_height * 16);
1937 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
1938 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
1940 src_x = av_clip( src_x, -17, s->avctx->coded_width);
1941 src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
1942 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
1943 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
1946 srcY += src_y * s->linesize + src_x;
1947 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
1948 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
1950 if (v->field_mode && v->ref_field_type[1]) {
1951 srcY += s->current_picture_ptr->f.linesize[0];
1952 srcU += s->current_picture_ptr->f.linesize[1];
1953 srcV += s->current_picture_ptr->f.linesize[2];
1956 /* for grayscale we should not try to read from unknown area */
1957 if (s->flags & CODEC_FLAG_GRAY) {
1958 srcU = s->edge_emu_buffer + 18 * s->linesize;
1959 srcV = s->edge_emu_buffer + 18 * s->linesize;
1962 if (v->rangeredfrm || s->h_edge_pos < 22 || v_edge_pos < 22 || use_ic
1963 || (unsigned)(src_x - 1) > s->h_edge_pos - (mx & 3) - 16 - 3
1964 || (unsigned)(src_y - 1) > v_edge_pos - (my & 3) - 16 - 3) {
1965 uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
1967 srcY -= s->mspel * (1 + s->linesize);
1968 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcY,
1969 s->linesize, s->linesize,
1970 17 + s->mspel * 2, 17 + s->mspel * 2,
1971 src_x - s->mspel, src_y - s->mspel,
1972 s->h_edge_pos, v_edge_pos);
1973 srcY = s->edge_emu_buffer;
1974 s->vdsp.emulated_edge_mc(uvbuf, srcU,
1975 s->uvlinesize, s->uvlinesize,
1977 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
1978 s->vdsp.emulated_edge_mc(uvbuf + 16, srcV,
1979 s->uvlinesize, s->uvlinesize,
1981 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
1984 /* if we deal with range reduction we need to scale source blocks */
1985 if (v->rangeredfrm) {
1987 uint8_t *src, *src2;
1990 for (j = 0; j < 17 + s->mspel * 2; j++) {
1991 for (i = 0; i < 17 + s->mspel * 2; i++)
1992 src[i] = ((src[i] - 128) >> 1) + 128;
1997 for (j = 0; j < 9; j++) {
1998 for (i = 0; i < 9; i++) {
1999 src[i] = ((src[i] - 128) >> 1) + 128;
2000 src2[i] = ((src2[i] - 128) >> 1) + 128;
2002 src += s->uvlinesize;
2003 src2 += s->uvlinesize;
2008 uint8_t (*luty )[256] = v->next_luty;
2009 uint8_t (*lutuv)[256] = v->next_lutuv;
2011 uint8_t *src, *src2;
2014 for (j = 0; j < 17 + s->mspel * 2; j++) {
2015 int f = v->field_mode ? v->ref_field_type[1] : ((j+src_y - s->mspel) & 1);
2016 for (i = 0; i < 17 + s->mspel * 2; i++)
2017 src[i] = luty[f][src[i]];
2022 for (j = 0; j < 9; j++) {
2023 int f = v->field_mode ? v->ref_field_type[1] : ((j+uvsrc_y) & 1);
2024 for (i = 0; i < 9; i++) {
2025 src[i] = lutuv[f][src[i]];
2026 src2[i] = lutuv[f][src2[i]];
2028 src += s->uvlinesize;
2029 src2 += s->uvlinesize;
2032 srcY += s->mspel * (1 + s->linesize);
2039 dxy = ((my & 3) << 2) | (mx & 3);
2040 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off , srcY , s->linesize, v->rnd);
2041 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8, srcY + 8, s->linesize, v->rnd);
2042 srcY += s->linesize * 8;
2043 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize , srcY , s->linesize, v->rnd);
2044 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
2046 dxy = (my & 2) | ((mx & 2) >> 1);
2049 s->hdsp.avg_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
2051 s->hdsp.avg_no_rnd_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize, 16);
2054 if (s->flags & CODEC_FLAG_GRAY) return;
2055 /* Chroma MC always uses qpel blilinear */
2056 uvmx = (uvmx & 3) << 1;
2057 uvmy = (uvmy & 3) << 1;
2059 h264chroma->avg_h264_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
2060 h264chroma->avg_h264_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
2062 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
2063 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
2067 static av_always_inline int scale_mv(int value, int bfrac, int inv, int qs)
2071 #if B_FRACTION_DEN==256
2075 return 2 * ((value * n + 255) >> 9);
2076 return (value * n + 128) >> 8;
2079 n -= B_FRACTION_DEN;
2081 return 2 * ((value * n + B_FRACTION_DEN - 1) / (2 * B_FRACTION_DEN));
2082 return (value * n + B_FRACTION_DEN/2) / B_FRACTION_DEN;
2086 /** Reconstruct motion vector for B-frame and do motion compensation
2088 static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2],
2089 int direct, int mode)
2096 if (mode == BMV_TYPE_INTERPOLATED) {
2102 vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD));
2105 static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2],
2106 int direct, int mvtype)
2108 MpegEncContext *s = &v->s;
2109 int xy, wrap, off = 0;
2114 const uint8_t *is_intra = v->mb_type[0];
2118 /* scale MV difference to be quad-pel */
2119 dmv_x[0] <<= 1 - s->quarter_sample;
2120 dmv_y[0] <<= 1 - s->quarter_sample;
2121 dmv_x[1] <<= 1 - s->quarter_sample;
2122 dmv_y[1] <<= 1 - s->quarter_sample;
2124 wrap = s->b8_stride;
2125 xy = s->block_index[0];
2128 s->current_picture.motion_val[0][xy + v->blocks_off][0] =
2129 s->current_picture.motion_val[0][xy + v->blocks_off][1] =
2130 s->current_picture.motion_val[1][xy + v->blocks_off][0] =
2131 s->current_picture.motion_val[1][xy + v->blocks_off][1] = 0;
2134 if (!v->field_mode) {
2135 s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
2136 s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
2137 s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
2138 s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
2140 /* Pullback predicted motion vectors as specified in 8.4.5.4 */
2141 s->mv[0][0][0] = av_clip(s->mv[0][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2142 s->mv[0][0][1] = av_clip(s->mv[0][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2143 s->mv[1][0][0] = av_clip(s->mv[1][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2144 s->mv[1][0][1] = av_clip(s->mv[1][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2147 s->current_picture.motion_val[0][xy + v->blocks_off][0] = s->mv[0][0][0];
2148 s->current_picture.motion_val[0][xy + v->blocks_off][1] = s->mv[0][0][1];
2149 s->current_picture.motion_val[1][xy + v->blocks_off][0] = s->mv[1][0][0];
2150 s->current_picture.motion_val[1][xy + v->blocks_off][1] = s->mv[1][0][1];
2154 if ((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2155 C = s->current_picture.motion_val[0][xy - 2];
2156 A = s->current_picture.motion_val[0][xy - wrap * 2];
2157 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2158 B = s->current_picture.motion_val[0][xy - wrap * 2 + off];
2160 if (!s->mb_x) C[0] = C[1] = 0;
2161 if (!s->first_slice_line) { // predictor A is not out of bounds
2162 if (s->mb_width == 1) {
2166 px = mid_pred(A[0], B[0], C[0]);
2167 py = mid_pred(A[1], B[1], C[1]);
2169 } else if (s->mb_x) { // predictor C is not out of bounds
2175 /* Pullback MV as specified in 8.3.5.3.4 */
2178 if (v->profile < PROFILE_ADVANCED) {
2179 qx = (s->mb_x << 5);
2180 qy = (s->mb_y << 5);
2181 X = (s->mb_width << 5) - 4;
2182 Y = (s->mb_height << 5) - 4;
2183 if (qx + px < -28) px = -28 - qx;
2184 if (qy + py < -28) py = -28 - qy;
2185 if (qx + px > X) px = X - qx;
2186 if (qy + py > Y) py = Y - qy;
2188 qx = (s->mb_x << 6);
2189 qy = (s->mb_y << 6);
2190 X = (s->mb_width << 6) - 4;
2191 Y = (s->mb_height << 6) - 4;
2192 if (qx + px < -60) px = -60 - qx;
2193 if (qy + py < -60) py = -60 - qy;
2194 if (qx + px > X) px = X - qx;
2195 if (qy + py > Y) py = Y - qy;
2198 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2199 if (0 && !s->first_slice_line && s->mb_x) {
2200 if (is_intra[xy - wrap])
2201 sum = FFABS(px) + FFABS(py);
2203 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2205 if (get_bits1(&s->gb)) {
2213 if (is_intra[xy - 2])
2214 sum = FFABS(px) + FFABS(py);
2216 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2218 if (get_bits1(&s->gb)) {
2228 /* store MV using signed modulus of MV range defined in 4.11 */
2229 s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x;
2230 s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
2232 if ((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2233 C = s->current_picture.motion_val[1][xy - 2];
2234 A = s->current_picture.motion_val[1][xy - wrap * 2];
2235 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2236 B = s->current_picture.motion_val[1][xy - wrap * 2 + off];
2240 if (!s->first_slice_line) { // predictor A is not out of bounds
2241 if (s->mb_width == 1) {
2245 px = mid_pred(A[0], B[0], C[0]);
2246 py = mid_pred(A[1], B[1], C[1]);
2248 } else if (s->mb_x) { // predictor C is not out of bounds
2254 /* Pullback MV as specified in 8.3.5.3.4 */
2257 if (v->profile < PROFILE_ADVANCED) {
2258 qx = (s->mb_x << 5);
2259 qy = (s->mb_y << 5);
2260 X = (s->mb_width << 5) - 4;
2261 Y = (s->mb_height << 5) - 4;
2262 if (qx + px < -28) px = -28 - qx;
2263 if (qy + py < -28) py = -28 - qy;
2264 if (qx + px > X) px = X - qx;
2265 if (qy + py > Y) py = Y - qy;
2267 qx = (s->mb_x << 6);
2268 qy = (s->mb_y << 6);
2269 X = (s->mb_width << 6) - 4;
2270 Y = (s->mb_height << 6) - 4;
2271 if (qx + px < -60) px = -60 - qx;
2272 if (qy + py < -60) py = -60 - qy;
2273 if (qx + px > X) px = X - qx;
2274 if (qy + py > Y) py = Y - qy;
2277 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2278 if (0 && !s->first_slice_line && s->mb_x) {
2279 if (is_intra[xy - wrap])
2280 sum = FFABS(px) + FFABS(py);
2282 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2284 if (get_bits1(&s->gb)) {
2292 if (is_intra[xy - 2])
2293 sum = FFABS(px) + FFABS(py);
2295 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2297 if (get_bits1(&s->gb)) {
2307 /* store MV using signed modulus of MV range defined in 4.11 */
2309 s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
2310 s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
2312 s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
2313 s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
2314 s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
2315 s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
2318 static inline void vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dmv_y, int mv1, int *pred_flag)
2320 int dir = (v->bmvtype == BMV_TYPE_BACKWARD) ? 1 : 0;
2321 MpegEncContext *s = &v->s;
2322 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2324 if (v->bmvtype == BMV_TYPE_DIRECT) {
2325 int total_opp, k, f;
2326 if (s->next_picture.mb_type[mb_pos + v->mb_off] != MB_TYPE_INTRA) {
2327 s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][0],
2328 v->bfraction, 0, s->quarter_sample);
2329 s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][1],
2330 v->bfraction, 0, s->quarter_sample);
2331 s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][0],
2332 v->bfraction, 1, s->quarter_sample);
2333 s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][1],
2334 v->bfraction, 1, s->quarter_sample);
2336 total_opp = v->mv_f_next[0][s->block_index[0] + v->blocks_off]
2337 + v->mv_f_next[0][s->block_index[1] + v->blocks_off]
2338 + v->mv_f_next[0][s->block_index[2] + v->blocks_off]
2339 + v->mv_f_next[0][s->block_index[3] + v->blocks_off];
2340 f = (total_opp > 2) ? 1 : 0;
2342 s->mv[0][0][0] = s->mv[0][0][1] = 0;
2343 s->mv[1][0][0] = s->mv[1][0][1] = 0;
2346 v->ref_field_type[0] = v->ref_field_type[1] = v->cur_field_type ^ f;
2347 for (k = 0; k < 4; k++) {
2348 s->current_picture.motion_val[0][s->block_index[k] + v->blocks_off][0] = s->mv[0][0][0];
2349 s->current_picture.motion_val[0][s->block_index[k] + v->blocks_off][1] = s->mv[0][0][1];
2350 s->current_picture.motion_val[1][s->block_index[k] + v->blocks_off][0] = s->mv[1][0][0];
2351 s->current_picture.motion_val[1][s->block_index[k] + v->blocks_off][1] = s->mv[1][0][1];
2352 v->mv_f[0][s->block_index[k] + v->blocks_off] = f;
2353 v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
2357 if (v->bmvtype == BMV_TYPE_INTERPOLATED) {
2358 vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2359 vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2362 if (dir) { // backward
2363 vc1_pred_mv(v, n, dmv_x[1], dmv_y[1], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2364 if (n == 3 || mv1) {
2365 vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
2368 vc1_pred_mv(v, n, dmv_x[0], dmv_y[0], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2369 if (n == 3 || mv1) {
2370 vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], 0, 1);
2375 /** Get predicted DC value for I-frames only
2376 * prediction dir: left=0, top=1
2377 * @param s MpegEncContext
2378 * @param overlap flag indicating that overlap filtering is used
2379 * @param pq integer part of picture quantizer
2380 * @param[in] n block index in the current MB
2381 * @param dc_val_ptr Pointer to DC predictor
2382 * @param dir_ptr Prediction direction for use in AC prediction
2384 static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2385 int16_t **dc_val_ptr, int *dir_ptr)
2387 int a, b, c, wrap, pred, scale;
2389 static const uint16_t dcpred[32] = {
2390 -1, 1024, 512, 341, 256, 205, 171, 146, 128,
2391 114, 102, 93, 85, 79, 73, 68, 64,
2392 60, 57, 54, 51, 49, 47, 45, 43,
2393 41, 39, 38, 37, 35, 34, 33
2396 /* find prediction - wmv3_dc_scale always used here in fact */
2397 if (n < 4) scale = s->y_dc_scale;
2398 else scale = s->c_dc_scale;
2400 wrap = s->block_wrap[n];
2401 dc_val = s->dc_val[0] + s->block_index[n];
2407 b = dc_val[ - 1 - wrap];
2408 a = dc_val[ - wrap];
2410 if (pq < 9 || !overlap) {
2411 /* Set outer values */
2412 if (s->first_slice_line && (n != 2 && n != 3))
2413 b = a = dcpred[scale];
2414 if (s->mb_x == 0 && (n != 1 && n != 3))
2415 b = c = dcpred[scale];
2417 /* Set outer values */
2418 if (s->first_slice_line && (n != 2 && n != 3))
2420 if (s->mb_x == 0 && (n != 1 && n != 3))
2424 if (abs(a - b) <= abs(b - c)) {
2426 *dir_ptr = 1; // left
2429 *dir_ptr = 0; // top
2432 /* update predictor */
2433 *dc_val_ptr = &dc_val[0];
2438 /** Get predicted DC value
2439 * prediction dir: left=0, top=1
2440 * @param s MpegEncContext
2441 * @param overlap flag indicating that overlap filtering is used
2442 * @param pq integer part of picture quantizer
2443 * @param[in] n block index in the current MB
2444 * @param a_avail flag indicating top block availability
2445 * @param c_avail flag indicating left block availability
2446 * @param dc_val_ptr Pointer to DC predictor
2447 * @param dir_ptr Prediction direction for use in AC prediction
2449 static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2450 int a_avail, int c_avail,
2451 int16_t **dc_val_ptr, int *dir_ptr)
2453 int a, b, c, wrap, pred;
2455 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2459 wrap = s->block_wrap[n];
2460 dc_val = s->dc_val[0] + s->block_index[n];
2466 b = dc_val[ - 1 - wrap];
2467 a = dc_val[ - wrap];
2468 /* scale predictors if needed */
2469 q1 = s->current_picture.qscale_table[mb_pos];
2470 dqscale_index = s->y_dc_scale_table[q1] - 1;
2471 if (dqscale_index < 0)
2473 if (c_avail && (n != 1 && n != 3)) {
2474 q2 = s->current_picture.qscale_table[mb_pos - 1];
2476 c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2478 if (a_avail && (n != 2 && n != 3)) {
2479 q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2481 a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2483 if (a_avail && c_avail && (n != 3)) {
2488 off -= s->mb_stride;
2489 q2 = s->current_picture.qscale_table[off];
2491 b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2494 if (a_avail && c_avail) {
2495 if (abs(a - b) <= abs(b - c)) {
2497 *dir_ptr = 1; // left
2500 *dir_ptr = 0; // top
2502 } else if (a_avail) {
2504 *dir_ptr = 0; // top
2505 } else if (c_avail) {
2507 *dir_ptr = 1; // left
2510 *dir_ptr = 1; // left
2513 /* update predictor */
2514 *dc_val_ptr = &dc_val[0];
2518 /** @} */ // Block group
2521 * @name VC1 Macroblock-level functions in Simple/Main Profiles
2522 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
2526 static inline int vc1_coded_block_pred(MpegEncContext * s, int n,
2527 uint8_t **coded_block_ptr)
2529 int xy, wrap, pred, a, b, c;
2531 xy = s->block_index[n];
2532 wrap = s->b8_stride;
2537 a = s->coded_block[xy - 1 ];
2538 b = s->coded_block[xy - 1 - wrap];
2539 c = s->coded_block[xy - wrap];
2548 *coded_block_ptr = &s->coded_block[xy];
2554 * Decode one AC coefficient
2555 * @param v The VC1 context
2556 * @param last Last coefficient
2557 * @param skip How much zero coefficients to skip
2558 * @param value Decoded AC coefficient value
2559 * @param codingset set of VLC to decode data
2562 static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip,
2563 int *value, int codingset)
2565 GetBitContext *gb = &v->s.gb;
2566 int index, escape, run = 0, level = 0, lst = 0;
2568 index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2569 if (index != ff_vc1_ac_sizes[codingset] - 1) {
2570 run = vc1_index_decode_table[codingset][index][0];
2571 level = vc1_index_decode_table[codingset][index][1];
2572 lst = index >= vc1_last_decode_table[codingset] || get_bits_left(gb) < 0;
2576 escape = decode210(gb);
2578 index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2579 run = vc1_index_decode_table[codingset][index][0];
2580 level = vc1_index_decode_table[codingset][index][1];
2581 lst = index >= vc1_last_decode_table[codingset];
2584 level += vc1_last_delta_level_table[codingset][run];
2586 level += vc1_delta_level_table[codingset][run];
2589 run += vc1_last_delta_run_table[codingset][level] + 1;
2591 run += vc1_delta_run_table[codingset][level] + 1;
2597 lst = get_bits1(gb);
2598 if (v->s.esc3_level_length == 0) {
2599 if (v->pq < 8 || v->dquantfrm) { // table 59
2600 v->s.esc3_level_length = get_bits(gb, 3);
2601 if (!v->s.esc3_level_length)
2602 v->s.esc3_level_length = get_bits(gb, 2) + 8;
2603 } else { // table 60
2604 v->s.esc3_level_length = get_unary(gb, 1, 6) + 2;
2606 v->s.esc3_run_length = 3 + get_bits(gb, 2);
2608 run = get_bits(gb, v->s.esc3_run_length);
2609 sign = get_bits1(gb);
2610 level = get_bits(gb, v->s.esc3_level_length);
2621 /** Decode intra block in intra frames - should be faster than decode_intra_block
2622 * @param v VC1Context
2623 * @param block block to decode
2624 * @param[in] n subblock index
2625 * @param coded are AC coeffs present or not
2626 * @param codingset set of VLC to decode data
2628 static int vc1_decode_i_block(VC1Context *v, int16_t block[64], int n,
2629 int coded, int codingset)
2631 GetBitContext *gb = &v->s.gb;
2632 MpegEncContext *s = &v->s;
2633 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2636 int16_t *ac_val, *ac_val2;
2639 /* Get DC differential */
2641 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2643 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2646 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2650 if (dcdiff == 119 /* ESC index value */) {
2651 /* TODO: Optimize */
2652 if (v->pq == 1) dcdiff = get_bits(gb, 10);
2653 else if (v->pq == 2) dcdiff = get_bits(gb, 9);
2654 else dcdiff = get_bits(gb, 8);
2657 dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2658 else if (v->pq == 2)
2659 dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2666 dcdiff += vc1_i_pred_dc(&v->s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir);
2669 /* Store the quantized DC coeff, used for prediction */
2671 block[0] = dcdiff * s->y_dc_scale;
2673 block[0] = dcdiff * s->c_dc_scale;
2684 int last = 0, skip, value;
2685 const uint8_t *zz_table;
2689 scale = v->pq * 2 + v->halfpq;
2693 zz_table = v->zz_8x8[2];
2695 zz_table = v->zz_8x8[3];
2697 zz_table = v->zz_8x8[1];
2699 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2701 if (dc_pred_dir) // left
2704 ac_val -= 16 * s->block_wrap[n];
2707 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2711 block[zz_table[i++]] = value;
2714 /* apply AC prediction if needed */
2716 if (dc_pred_dir) { // left
2717 for (k = 1; k < 8; k++)
2718 block[k << v->left_blk_sh] += ac_val[k];
2720 for (k = 1; k < 8; k++)
2721 block[k << v->top_blk_sh] += ac_val[k + 8];
2724 /* save AC coeffs for further prediction */
2725 for (k = 1; k < 8; k++) {
2726 ac_val2[k] = block[k << v->left_blk_sh];
2727 ac_val2[k + 8] = block[k << v->top_blk_sh];
2730 /* scale AC coeffs */
2731 for (k = 1; k < 64; k++)
2735 block[k] += (block[k] < 0) ? -v->pq : v->pq;
2738 if (s->ac_pred) i = 63;
2744 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2748 scale = v->pq * 2 + v->halfpq;
2749 memset(ac_val2, 0, 16 * 2);
2750 if (dc_pred_dir) { // left
2753 memcpy(ac_val2, ac_val, 8 * 2);
2755 ac_val -= 16 * s->block_wrap[n];
2757 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2760 /* apply AC prediction if needed */
2762 if (dc_pred_dir) { //left
2763 for (k = 1; k < 8; k++) {
2764 block[k << v->left_blk_sh] = ac_val[k] * scale;
2765 if (!v->pquantizer && block[k << v->left_blk_sh])
2766 block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -v->pq : v->pq;
2769 for (k = 1; k < 8; k++) {
2770 block[k << v->top_blk_sh] = ac_val[k + 8] * scale;
2771 if (!v->pquantizer && block[k << v->top_blk_sh])
2772 block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -v->pq : v->pq;
2778 s->block_last_index[n] = i;
2783 /** Decode intra block in intra frames - should be faster than decode_intra_block
2784 * @param v VC1Context
2785 * @param block block to decode
2786 * @param[in] n subblock number
2787 * @param coded are AC coeffs present or not
2788 * @param codingset set of VLC to decode data
2789 * @param mquant quantizer value for this macroblock
2791 static int vc1_decode_i_block_adv(VC1Context *v, int16_t block[64], int n,
2792 int coded, int codingset, int mquant)
2794 GetBitContext *gb = &v->s.gb;
2795 MpegEncContext *s = &v->s;
2796 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2799 int16_t *ac_val, *ac_val2;
2801 int a_avail = v->a_avail, c_avail = v->c_avail;
2802 int use_pred = s->ac_pred;
2805 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2807 /* Get DC differential */
2809 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2811 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2814 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2818 if (dcdiff == 119 /* ESC index value */) {
2819 /* TODO: Optimize */
2820 if (mquant == 1) dcdiff = get_bits(gb, 10);
2821 else if (mquant == 2) dcdiff = get_bits(gb, 9);
2822 else dcdiff = get_bits(gb, 8);
2825 dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2826 else if (mquant == 2)
2827 dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2834 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir);
2837 /* Store the quantized DC coeff, used for prediction */
2839 block[0] = dcdiff * s->y_dc_scale;
2841 block[0] = dcdiff * s->c_dc_scale;
2847 /* check if AC is needed at all */
2848 if (!a_avail && !c_avail)
2850 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2853 scale = mquant * 2 + ((mquant == v->pq) ? v->halfpq : 0);
2855 if (dc_pred_dir) // left
2858 ac_val -= 16 * s->block_wrap[n];
2860 q1 = s->current_picture.qscale_table[mb_pos];
2861 if ( dc_pred_dir && c_avail && mb_pos)
2862 q2 = s->current_picture.qscale_table[mb_pos - 1];
2863 if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
2864 q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2865 if ( dc_pred_dir && n == 1)
2867 if (!dc_pred_dir && n == 2)
2873 int last = 0, skip, value;
2874 const uint8_t *zz_table;
2878 if (!use_pred && v->fcm == ILACE_FRAME) {
2879 zz_table = v->zzi_8x8;
2881 if (!dc_pred_dir) // top
2882 zz_table = v->zz_8x8[2];
2884 zz_table = v->zz_8x8[3];
2887 if (v->fcm != ILACE_FRAME)
2888 zz_table = v->zz_8x8[1];
2890 zz_table = v->zzi_8x8;
2894 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2898 block[zz_table[i++]] = value;
2901 /* apply AC prediction if needed */
2903 /* scale predictors if needed*/
2904 if (q2 && q1 != q2) {
2905 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2906 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2909 return AVERROR_INVALIDDATA;
2910 if (dc_pred_dir) { // left
2911 for (k = 1; k < 8; k++)
2912 block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2914 for (k = 1; k < 8; k++)
2915 block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2918 if (dc_pred_dir) { //left
2919 for (k = 1; k < 8; k++)
2920 block[k << v->left_blk_sh] += ac_val[k];
2922 for (k = 1; k < 8; k++)
2923 block[k << v->top_blk_sh] += ac_val[k + 8];
2927 /* save AC coeffs for further prediction */
2928 for (k = 1; k < 8; k++) {
2929 ac_val2[k ] = block[k << v->left_blk_sh];
2930 ac_val2[k + 8] = block[k << v->top_blk_sh];
2933 /* scale AC coeffs */
2934 for (k = 1; k < 64; k++)
2938 block[k] += (block[k] < 0) ? -mquant : mquant;
2941 if (use_pred) i = 63;
2942 } else { // no AC coeffs
2945 memset(ac_val2, 0, 16 * 2);
2946 if (dc_pred_dir) { // left
2948 memcpy(ac_val2, ac_val, 8 * 2);
2949 if (q2 && q1 != q2) {
2950 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2951 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2953 return AVERROR_INVALIDDATA;
2954 for (k = 1; k < 8; k++)
2955 ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2960 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2961 if (q2 && q1 != q2) {
2962 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2963 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2965 return AVERROR_INVALIDDATA;
2966 for (k = 1; k < 8; k++)
2967 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2972 /* apply AC prediction if needed */
2974 if (dc_pred_dir) { // left
2975 for (k = 1; k < 8; k++) {
2976 block[k << v->left_blk_sh] = ac_val2[k] * scale;
2977 if (!v->pquantizer && block[k << v->left_blk_sh])
2978 block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
2981 for (k = 1; k < 8; k++) {
2982 block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
2983 if (!v->pquantizer && block[k << v->top_blk_sh])
2984 block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
2990 s->block_last_index[n] = i;
2995 /** Decode intra block in inter frames - more generic version than vc1_decode_i_block
2996 * @param v VC1Context
2997 * @param block block to decode
2998 * @param[in] n subblock index
2999 * @param coded are AC coeffs present or not
3000 * @param mquant block quantizer
3001 * @param codingset set of VLC to decode data
3003 static int vc1_decode_intra_block(VC1Context *v, int16_t block[64], int n,
3004 int coded, int mquant, int codingset)
3006 GetBitContext *gb = &v->s.gb;
3007 MpegEncContext *s = &v->s;
3008 int dc_pred_dir = 0; /* Direction of the DC prediction used */
3011 int16_t *ac_val, *ac_val2;
3013 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3014 int a_avail = v->a_avail, c_avail = v->c_avail;
3015 int use_pred = s->ac_pred;
3019 s->dsp.clear_block(block);
3021 /* XXX: Guard against dumb values of mquant */
3022 mquant = (mquant < 1) ? 0 : ((mquant > 31) ? 31 : mquant);
3024 /* Set DC scale - y and c use the same */
3025 s->y_dc_scale = s->y_dc_scale_table[mquant];
3026 s->c_dc_scale = s->c_dc_scale_table[mquant];
3028 /* Get DC differential */
3030 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
3032 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
3035 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
3039 if (dcdiff == 119 /* ESC index value */) {
3040 /* TODO: Optimize */
3041 if (mquant == 1) dcdiff = get_bits(gb, 10);
3042 else if (mquant == 2) dcdiff = get_bits(gb, 9);
3043 else dcdiff = get_bits(gb, 8);
3046 dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
3047 else if (mquant == 2)
3048 dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
3055 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir);
3058 /* Store the quantized DC coeff, used for prediction */
3061 block[0] = dcdiff * s->y_dc_scale;
3063 block[0] = dcdiff * s->c_dc_scale;
3069 /* check if AC is needed at all and adjust direction if needed */
3070 if (!a_avail) dc_pred_dir = 1;
3071 if (!c_avail) dc_pred_dir = 0;
3072 if (!a_avail && !c_avail) use_pred = 0;
3073 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
3076 scale = mquant * 2 + v->halfpq;
3078 if (dc_pred_dir) //left
3081 ac_val -= 16 * s->block_wrap[n];
3083 q1 = s->current_picture.qscale_table[mb_pos];
3084 if (dc_pred_dir && c_avail && mb_pos)
3085 q2 = s->current_picture.qscale_table[mb_pos - 1];
3086 if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
3087 q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
3088 if ( dc_pred_dir && n == 1)
3090 if (!dc_pred_dir && n == 2)
3092 if (n == 3) q2 = q1;
3095 int last = 0, skip, value;
3099 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
3103 if (v->fcm == PROGRESSIVE)
3104 block[v->zz_8x8[0][i++]] = value;
3106 if (use_pred && (v->fcm == ILACE_FRAME)) {
3107 if (!dc_pred_dir) // top
3108 block[v->zz_8x8[2][i++]] = value;
3110 block[v->zz_8x8[3][i++]] = value;
3112 block[v->zzi_8x8[i++]] = value;
3117 /* apply AC prediction if needed */
3119 /* scale predictors if needed*/
3120 if (q2 && q1 != q2) {
3121 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3122 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3125 return AVERROR_INVALIDDATA;
3126 if (dc_pred_dir) { // left
3127 for (k = 1; k < 8; k++)
3128 block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3130 for (k = 1; k < 8; k++)
3131 block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3134 if (dc_pred_dir) { // left
3135 for (k = 1; k < 8; k++)
3136 block[k << v->left_blk_sh] += ac_val[k];
3138 for (k = 1; k < 8; k++)
3139 block[k << v->top_blk_sh] += ac_val[k + 8];
3143 /* save AC coeffs for further prediction */
3144 for (k = 1; k < 8; k++) {
3145 ac_val2[k ] = block[k << v->left_blk_sh];
3146 ac_val2[k + 8] = block[k << v->top_blk_sh];
3149 /* scale AC coeffs */
3150 for (k = 1; k < 64; k++)
3154 block[k] += (block[k] < 0) ? -mquant : mquant;
3157 if (use_pred) i = 63;
3158 } else { // no AC coeffs
3161 memset(ac_val2, 0, 16 * 2);
3162 if (dc_pred_dir) { // left
3164 memcpy(ac_val2, ac_val, 8 * 2);
3165 if (q2 && q1 != q2) {
3166 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3167 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3169 return AVERROR_INVALIDDATA;
3170 for (k = 1; k < 8; k++)
3171 ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3176 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
3177 if (q2 && q1 != q2) {
3178 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3179 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3181 return AVERROR_INVALIDDATA;
3182 for (k = 1; k < 8; k++)
3183 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3188 /* apply AC prediction if needed */
3190 if (dc_pred_dir) { // left
3191 for (k = 1; k < 8; k++) {
3192 block[k << v->left_blk_sh] = ac_val2[k] * scale;
3193 if (!v->pquantizer && block[k << v->left_blk_sh])
3194 block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
3197 for (k = 1; k < 8; k++) {
3198 block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
3199 if (!v->pquantizer && block[k << v->top_blk_sh])
3200 block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
3206 s->block_last_index[n] = i;
3213 static int vc1_decode_p_block(VC1Context *v, int16_t block[64], int n,
3214 int mquant, int ttmb, int first_block,
3215 uint8_t *dst, int linesize, int skip_block,
3218 MpegEncContext *s = &v->s;
3219 GetBitContext *gb = &s->gb;
3222 int scale, off, idx, last, skip, value;
3223 int ttblk = ttmb & 7;
3226 s->dsp.clear_block(block);
3229 ttblk = ff_vc1_ttblk_to_tt[v->tt_index][get_vlc2(gb, ff_vc1_ttblk_vlc[v->tt_index].table, VC1_TTBLK_VLC_BITS, 1)];
3231 if (ttblk == TT_4X4) {
3232 subblkpat = ~(get_vlc2(gb, ff_vc1_subblkpat_vlc[v->tt_index].table, VC1_SUBBLKPAT_VLC_BITS, 1) + 1);
3234 if ((ttblk != TT_8X8 && ttblk != TT_4X4)
3235 && ((v->ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))
3236 || (!v->res_rtm_flag && !first_block))) {
3237 subblkpat = decode012(gb);
3239 subblkpat ^= 3; // swap decoded pattern bits
3240 if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM)
3242 if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT)
3245 scale = 2 * mquant + ((v->pq == mquant) ? v->halfpq : 0);
3247 // convert transforms like 8X4_TOP to generic TT and SUBBLKPAT
3248 if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) {
3249 subblkpat = 2 - (ttblk == TT_8X4_TOP);
3252 if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) {
3253 subblkpat = 2 - (ttblk == TT_4X8_LEFT);
3262 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3267 idx = v->zz_8x8[0][i++];
3269 idx = v->zzi_8x8[i++];
3270 block[idx] = value * scale;
3272 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3276 v->vc1dsp.vc1_inv_trans_8x8_dc(dst, linesize, block);
3278 v->vc1dsp.vc1_inv_trans_8x8(block);
3279 s->dsp.add_pixels_clamped(block, dst, linesize);
3284 pat = ~subblkpat & 0xF;
3285 for (j = 0; j < 4; j++) {
3286 last = subblkpat & (1 << (3 - j));
3288 off = (j & 1) * 4 + (j & 2) * 16;
3290 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3295 idx = ff_vc1_simple_progressive_4x4_zz[i++];
3297 idx = ff_vc1_adv_interlaced_4x4_zz[i++];
3298 block[idx + off] = value * scale;
3300 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3302 if (!(subblkpat & (1 << (3 - j))) && !skip_block) {
3304 v->vc1dsp.vc1_inv_trans_4x4_dc(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3306 v->vc1dsp.vc1_inv_trans_4x4(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3311 pat = ~((subblkpat & 2) * 6 + (subblkpat & 1) * 3) & 0xF;
3312 for (j = 0; j < 2; j++) {
3313 last = subblkpat & (1 << (1 - j));
3317 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3322 idx = v->zz_8x4[i++] + off;
3324 idx = ff_vc1_adv_interlaced_8x4_zz[i++] + off;
3325 block[idx] = value * scale;
3327 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3329 if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3331 v->vc1dsp.vc1_inv_trans_8x4_dc(dst + j * 4 * linesize, linesize, block + off);
3333 v->vc1dsp.vc1_inv_trans_8x4(dst + j * 4 * linesize, linesize, block + off);
3338 pat = ~(subblkpat * 5) & 0xF;
3339 for (j = 0; j < 2; j++) {
3340 last = subblkpat & (1 << (1 - j));
3344 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3349 idx = v->zz_4x8[i++] + off;
3351 idx = ff_vc1_adv_interlaced_4x8_zz[i++] + off;
3352 block[idx] = value * scale;
3354 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3356 if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3358 v->vc1dsp.vc1_inv_trans_4x8_dc(dst + j * 4, linesize, block + off);
3360 v->vc1dsp.vc1_inv_trans_4x8(dst + j*4, linesize, block + off);
3366 *ttmb_out |= ttblk << (n * 4);
3370 /** @} */ // Macroblock group
3372 static const int size_table [6] = { 0, 2, 3, 4, 5, 8 };
3373 static const int offset_table[6] = { 0, 1, 3, 7, 15, 31 };
3375 static av_always_inline void vc1_apply_p_v_loop_filter(VC1Context *v, int block_num)
3377 MpegEncContext *s = &v->s;
3378 int mb_cbp = v->cbp[s->mb_x - s->mb_stride],
3379 block_cbp = mb_cbp >> (block_num * 4), bottom_cbp,
3380 mb_is_intra = v->is_intra[s->mb_x - s->mb_stride],
3381 block_is_intra = mb_is_intra >> (block_num * 4), bottom_is_intra;
3382 int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3385 if (block_num > 3) {
3386 dst = s->dest[block_num - 3];
3388 dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 8) * linesize;
3390 if (s->mb_y != s->end_mb_y || block_num < 2) {
3394 if (block_num > 3) {
3395 bottom_cbp = v->cbp[s->mb_x] >> (block_num * 4);
3396 bottom_is_intra = v->is_intra[s->mb_x] >> (block_num * 4);
3397 mv = &v->luma_mv[s->mb_x - s->mb_stride];
3398 mv_stride = s->mb_stride;
3400 bottom_cbp = (block_num < 2) ? (mb_cbp >> ((block_num + 2) * 4))
3401 : (v->cbp[s->mb_x] >> ((block_num - 2) * 4));
3402 bottom_is_intra = (block_num < 2) ? (mb_is_intra >> ((block_num + 2) * 4))
3403 : (v->is_intra[s->mb_x] >> ((block_num - 2) * 4));
3404 mv_stride = s->b8_stride;
3405 mv = &s->current_picture.motion_val[0][s->block_index[block_num] - 2 * mv_stride];
3408 if (bottom_is_intra & 1 || block_is_intra & 1 ||
3409 mv[0][0] != mv[mv_stride][0] || mv[0][1] != mv[mv_stride][1]) {
3410 v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3412 idx = ((bottom_cbp >> 2) | block_cbp) & 3;
3414 v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3417 v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3419 v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3424 dst -= 4 * linesize;
3425 ttblk = (v->ttblk[s->mb_x - s->mb_stride] >> (block_num * 4)) & 0xF;
3426 if (ttblk == TT_4X4 || ttblk == TT_8X4) {
3427 idx = (block_cbp | (block_cbp >> 2)) & 3;
3429 v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3432 v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3434 v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3439 static av_always_inline void vc1_apply_p_h_loop_filter(VC1Context *v, int block_num)
3441 MpegEncContext *s = &v->s;
3442 int mb_cbp = v->cbp[s->mb_x - 1 - s->mb_stride],
3443 block_cbp = mb_cbp >> (block_num * 4), right_cbp,
3444 mb_is_intra = v->is_intra[s->mb_x - 1 - s->mb_stride],
3445 block_is_intra = mb_is_intra >> (block_num * 4), right_is_intra;
3446 int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3449 if (block_num > 3) {
3450 dst = s->dest[block_num - 3] - 8 * linesize;
3452 dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 16) * linesize - 8;
3455 if (s->mb_x != s->mb_width || !(block_num & 5)) {
3458 if (block_num > 3) {
3459 right_cbp = v->cbp[s->mb_x - s->mb_stride] >> (block_num * 4);
3460 right_is_intra = v->is_intra[s->mb_x - s->mb_stride] >> (block_num * 4);
3461 mv = &v->luma_mv[s->mb_x - s->mb_stride - 1];
3463 right_cbp = (block_num & 1) ? (v->cbp[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3464 : (mb_cbp >> ((block_num + 1) * 4));
3465 right_is_intra = (block_num & 1) ? (v->is_intra[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3466 : (mb_is_intra >> ((block_num + 1) * 4));
3467 mv = &s->current_picture.motion_val[0][s->block_index[block_num] - s->b8_stride * 2 - 2];
3469 if (block_is_intra & 1 || right_is_intra & 1 || mv[0][0] != mv[1][0] || mv[0][1] != mv[1][1]) {
3470 v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3472 idx = ((right_cbp >> 1) | block_cbp) & 5; // FIXME check
3474 v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3477 v->vc1dsp.vc1_h_loop_filter4(dst + 4 * linesize, linesize, v->pq);
3479 v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3485 ttblk = (v->ttblk[s->mb_x - s->mb_stride - 1] >> (block_num * 4)) & 0xf;
3486 if (ttblk == TT_4X4 || ttblk == TT_4X8) {
3487 idx = (block_cbp | (block_cbp >> 1)) & 5;
3489 v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3492 v->vc1dsp.vc1_h_loop_filter4(dst + linesize * 4, linesize, v->pq);
3494 v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3499 static void vc1_apply_p_loop_filter(VC1Context *v)
3501 MpegEncContext *s = &v->s;
3504 for (i = 0; i < 6; i++) {
3505 vc1_apply_p_v_loop_filter(v, i);
3508 /* V always precedes H, therefore we run H one MB before V;
3509 * at the end of a row, we catch up to complete the row */
3511 for (i = 0; i < 6; i++) {
3512 vc1_apply_p_h_loop_filter(v, i);
3514 if (s->mb_x == s->mb_width - 1) {
3516 ff_update_block_index(s);
3517 for (i = 0; i < 6; i++) {
3518 vc1_apply_p_h_loop_filter(v, i);
3524 /** Decode one P-frame MB
3526 static int vc1_decode_p_mb(VC1Context *v)
3528 MpegEncContext *s = &v->s;
3529 GetBitContext *gb = &s->gb;
3531 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3532 int cbp; /* cbp decoding stuff */
3533 int mqdiff, mquant; /* MB quantization */
3534 int ttmb = v->ttfrm; /* MB Transform type */
3536 int mb_has_coeffs = 1; /* last_flag */
3537 int dmv_x, dmv_y; /* Differential MV components */
3538 int index, index1; /* LUT indexes */
3539 int val, sign; /* temp values */
3540 int first_block = 1;
3542 int skipped, fourmv;
3543 int block_cbp = 0, pat, block_tt = 0, block_intra = 0;
3545 mquant = v->pq; /* lossy initialization */
3547 if (v->mv_type_is_raw)
3548 fourmv = get_bits1(gb);
3550 fourmv = v->mv_type_mb_plane[mb_pos];
3552 skipped = get_bits1(gb);
3554 skipped = v->s.mbskip_table[mb_pos];
3556 if (!fourmv) { /* 1MV mode */
3558 GET_MVDATA(dmv_x, dmv_y);
3561 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
3562 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
3564 s->current_picture.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;
3565 vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3567 /* FIXME Set DC val for inter block ? */
3568 if (s->mb_intra && !mb_has_coeffs) {
3570 s->ac_pred = get_bits1(gb);
3572 } else if (mb_has_coeffs) {
3574 s->ac_pred = get_bits1(gb);
3575 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3581 s->current_picture.qscale_table[mb_pos] = mquant;
3583 if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
3584 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table,
3585 VC1_TTMB_VLC_BITS, 2);
3586 if (!s->mb_intra) vc1_mc_1mv(v, 0);
3588 for (i = 0; i < 6; i++) {
3589 s->dc_val[0][s->block_index[i]] = 0;
3591 val = ((cbp >> (5 - i)) & 1);
3592 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3593 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3595 /* check if prediction blocks A and C are available */
3596 v->a_avail = v->c_avail = 0;
3597 if (i == 2 || i == 3 || !s->first_slice_line)
3598 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3599 if (i == 1 || i == 3 || s->mb_x)
3600 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3602 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3603 (i & 4) ? v->codingset2 : v->codingset);
3604 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3606 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3608 for (j = 0; j < 64; j++)
3609 s->block[i][j] <<= 1;
3610 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3611 if (v->pq >= 9 && v->overlap) {
3613 v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3615 v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3617 block_cbp |= 0xF << (i << 2);
3618 block_intra |= 1 << i;
3620 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block,
3621 s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize,
3622 (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
3623 block_cbp |= pat << (i << 2);
3624 if (!v->ttmbf && ttmb < 8)
3631 for (i = 0; i < 6; i++) {
3632 v->mb_type[0][s->block_index[i]] = 0;
3633 s->dc_val[0][s->block_index[i]] = 0;
3635 s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
3636 s->current_picture.qscale_table[mb_pos] = 0;
3637 vc1_pred_mv(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3640 } else { // 4MV mode
3641 if (!skipped /* unskipped MB */) {
3642 int intra_count = 0, coded_inter = 0;
3643 int is_intra[6], is_coded[6];
3645 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3646 for (i = 0; i < 6; i++) {
3647 val = ((cbp >> (5 - i)) & 1);
3648 s->dc_val[0][s->block_index[i]] = 0;
3655 GET_MVDATA(dmv_x, dmv_y);
3657 vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3659 vc1_mc_4mv_luma(v, i, 0, 0);
3660 intra_count += s->mb_intra;
3661 is_intra[i] = s->mb_intra;
3662 is_coded[i] = mb_has_coeffs;
3665 is_intra[i] = (intra_count >= 3);
3669 vc1_mc_4mv_chroma(v, 0);
3670 v->mb_type[0][s->block_index[i]] = is_intra[i];
3672 coded_inter = !is_intra[i] & is_coded[i];
3674 // if there are no coded blocks then don't do anything more
3676 if (!intra_count && !coded_inter)
3679 s->current_picture.qscale_table[mb_pos] = mquant;
3680 /* test if block is intra and has pred */
3683 for (i = 0; i < 6; i++)
3685 if (((!s->first_slice_line || (i == 2 || i == 3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]])
3686 || ((s->mb_x || (i == 1 || i == 3)) && v->mb_type[0][s->block_index[i] - 1])) {
3692 s->ac_pred = get_bits1(gb);
3696 if (!v->ttmbf && coded_inter)
3697 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3698 for (i = 0; i < 6; i++) {
3700 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3701 s->mb_intra = is_intra[i];
3703 /* check if prediction blocks A and C are available */
3704 v->a_avail = v->c_avail = 0;
3705 if (i == 2 || i == 3 || !s->first_slice_line)
3706 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3707 if (i == 1 || i == 3 || s->mb_x)
3708 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3710 vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant,
3711 (i & 4) ? v->codingset2 : v->codingset);
3712 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3714 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3716 for (j = 0; j < 64; j++)
3717 s->block[i][j] <<= 1;
3718 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off,
3719 (i & 4) ? s->uvlinesize : s->linesize);
3720 if (v->pq >= 9 && v->overlap) {
3722 v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3724 v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3726 block_cbp |= 0xF << (i << 2);
3727 block_intra |= 1 << i;
3728 } else if (is_coded[i]) {
3729 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3730 first_block, s->dest[dst_idx] + off,
3731 (i & 4) ? s->uvlinesize : s->linesize,
3732 (i & 4) && (s->flags & CODEC_FLAG_GRAY),
3734 block_cbp |= pat << (i << 2);
3735 if (!v->ttmbf && ttmb < 8)
3740 } else { // skipped MB
3742 s->current_picture.qscale_table[mb_pos] = 0;
3743 for (i = 0; i < 6; i++) {
3744 v->mb_type[0][s->block_index[i]] = 0;
3745 s->dc_val[0][s->block_index[i]] = 0;
3747 for (i = 0; i < 4; i++) {
3748 vc1_pred_mv(v, i, 0, 0, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3749 vc1_mc_4mv_luma(v, i, 0, 0);
3751 vc1_mc_4mv_chroma(v, 0);
3752 s->current_picture.qscale_table[mb_pos] = 0;
3756 v->cbp[s->mb_x] = block_cbp;
3757 v->ttblk[s->mb_x] = block_tt;
3758 v->is_intra[s->mb_x] = block_intra;
3763 /* Decode one macroblock in an interlaced frame p picture */
3765 static int vc1_decode_p_mb_intfr(VC1Context *v)
3767 MpegEncContext *s = &v->s;
3768 GetBitContext *gb = &s->gb;
3770 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3771 int cbp = 0; /* cbp decoding stuff */
3772 int mqdiff, mquant; /* MB quantization */
3773 int ttmb = v->ttfrm; /* MB Transform type */
3775 int mb_has_coeffs = 1; /* last_flag */
3776 int dmv_x, dmv_y; /* Differential MV components */
3777 int val; /* temp value */
3778 int first_block = 1;
3780 int skipped, fourmv = 0, twomv = 0;
3781 int block_cbp = 0, pat, block_tt = 0;
3782 int idx_mbmode = 0, mvbp;
3783 int stride_y, fieldtx;
3785 mquant = v->pq; /* Loosy initialization */
3788 skipped = get_bits1(gb);
3790 skipped = v->s.mbskip_table[mb_pos];
3792 if (v->fourmvswitch)
3793 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_4MV_MBMODE_VLC_BITS, 2); // try getting this done
3795 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2); // in a single line
3796 switch (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0]) {
3797 /* store the motion vector type in a flag (useful later) */
3798 case MV_PMODE_INTFR_4MV:
3800 v->blk_mv_type[s->block_index[0]] = 0;
3801 v->blk_mv_type[s->block_index[1]] = 0;
3802 v->blk_mv_type[s->block_index[2]] = 0;
3803 v->blk_mv_type[s->block_index[3]] = 0;
3805 case MV_PMODE_INTFR_4MV_FIELD:
3807 v->blk_mv_type[s->block_index[0]] = 1;
3808 v->blk_mv_type[s->block_index[1]] = 1;
3809 v->blk_mv_type[s->block_index[2]] = 1;
3810 v->blk_mv_type[s->block_index[3]] = 1;
3812 case MV_PMODE_INTFR_2MV_FIELD:
3814 v->blk_mv_type[s->block_index[0]] = 1;
3815 v->blk_mv_type[s->block_index[1]] = 1;
3816 v->blk_mv_type[s->block_index[2]] = 1;
3817 v->blk_mv_type[s->block_index[3]] = 1;
3819 case MV_PMODE_INTFR_1MV:
3820 v->blk_mv_type[s->block_index[0]] = 0;
3821 v->blk_mv_type[s->block_index[1]] = 0;
3822 v->blk_mv_type[s->block_index[2]] = 0;
3823 v->blk_mv_type[s->block_index[3]] = 0;
3826 if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
3827 for (i = 0; i < 4; i++) {
3828 s->current_picture.motion_val[1][s->block_index[i]][0] = 0;
3829 s->current_picture.motion_val[1][s->block_index[i]][1] = 0;
3831 s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
3832 s->mb_intra = v->is_intra[s->mb_x] = 1;
3833 for (i = 0; i < 6; i++)
3834 v->mb_type[0][s->block_index[i]] = 1;
3835 fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
3836 mb_has_coeffs = get_bits1(gb);
3838 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3839 v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
3841 s->current_picture.qscale_table[mb_pos] = mquant;
3842 /* Set DC scale - y and c use the same (not sure if necessary here) */
3843 s->y_dc_scale = s->y_dc_scale_table[mquant];
3844 s->c_dc_scale = s->c_dc_scale_table[mquant];
3846 for (i = 0; i < 6; i++) {
3847 s->dc_val[0][s->block_index[i]] = 0;
3849 val = ((cbp >> (5 - i)) & 1);
3850 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3851 v->a_avail = v->c_avail = 0;
3852 if (i == 2 || i == 3 || !s->first_slice_line)
3853 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3854 if (i == 1 || i == 3 || s->mb_x)
3855 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3857 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3858 (i & 4) ? v->codingset2 : v->codingset);
3859 if ((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3860 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3862 stride_y = s->linesize << fieldtx;
3863 off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->linesize : (i & 1) * 8 + 4 * (i & 2) * s->linesize;
3865 stride_y = s->uvlinesize;
3868 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, stride_y);
3872 } else { // inter MB
3873 mb_has_coeffs = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][3];
3875 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3876 if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD) {
3877 v->twomvbp = get_vlc2(gb, v->twomvbp_vlc->table, VC1_2MV_BLOCK_PATTERN_VLC_BITS, 1);
3879 if ((ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV)
3880 || (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV_FIELD)) {
3881 v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
3884 s->mb_intra = v->is_intra[s->mb_x] = 0;
3885 for (i = 0; i < 6; i++)
3886 v->mb_type[0][s->block_index[i]] = 0;
3887 fieldtx = v->fieldtx_plane[mb_pos] = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][1];
3888 /* for all motion vector read MVDATA and motion compensate each block */
3892 for (i = 0; i < 6; i++) {
3895 val = ((mvbp >> (3 - i)) & 1);
3897 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3899 vc1_pred_mv_intfr(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0);
3900 vc1_mc_4mv_luma(v, i, 0, 0);
3901 } else if (i == 4) {
3902 vc1_mc_4mv_chroma4(v, 0, 0, 0);
3909 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3911 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], 0);
3912 vc1_mc_4mv_luma(v, 0, 0, 0);
3913 vc1_mc_4mv_luma(v, 1, 0, 0);
3916 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3918 vc1_pred_mv_intfr(v, 2, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], 0);
3919 vc1_mc_4mv_luma(v, 2, 0, 0);
3920 vc1_mc_4mv_luma(v, 3, 0, 0);
3921 vc1_mc_4mv_chroma4(v, 0, 0, 0);
3923 mvbp = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][2];
3926 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3928 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0);
3932 GET_MQUANT(); // p. 227
3933 s->current_picture.qscale_table[mb_pos] = mquant;
3934 if (!v->ttmbf && cbp)
3935 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3936 for (i = 0; i < 6; i++) {
3937 s->dc_val[0][s->block_index[i]] = 0;
3939 val = ((cbp >> (5 - i)) & 1);
3941 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3943 off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->linesize));
3945 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3946 first_block, s->dest[dst_idx] + off,
3947 (i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
3948 (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
3949 block_cbp |= pat << (i << 2);
3950 if (!v->ttmbf && ttmb < 8)
3957 s->mb_intra = v->is_intra[s->mb_x] = 0;
3958 for (i = 0; i < 6; i++) {
3959 v->mb_type[0][s->block_index[i]] = 0;
3960 s->dc_val[0][s->block_index[i]] = 0;
3962 s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
3963 s->current_picture.qscale_table[mb_pos] = 0;
3964 v->blk_mv_type[s->block_index[0]] = 0;
3965 v->blk_mv_type[s->block_index[1]] = 0;
3966 v->blk_mv_type[s->block_index[2]] = 0;
3967 v->blk_mv_type[s->block_index[3]] = 0;
3968 vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0);
3971 if (s->mb_x == s->mb_width - 1)
3972 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0])*s->mb_stride);
3976 static int vc1_decode_p_mb_intfi(VC1Context *v)
3978 MpegEncContext *s = &v->s;
3979 GetBitContext *gb = &s->gb;
3981 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3982 int cbp = 0; /* cbp decoding stuff */
3983 int mqdiff, mquant; /* MB quantization */
3984 int ttmb = v->ttfrm; /* MB Transform type */
3986 int mb_has_coeffs = 1; /* last_flag */
3987 int dmv_x, dmv_y; /* Differential MV components */
3988 int val; /* temp values */
3989 int first_block = 1;
3992 int block_cbp = 0, pat, block_tt = 0;
3995 mquant = v->pq; /* Loosy initialization */
3997 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
3998 if (idx_mbmode <= 1) { // intra MB
3999 s->mb_intra = v->is_intra[s->mb_x] = 1;
4000 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
4001 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
4002 s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4004 s->current_picture.qscale_table[mb_pos] = mquant;
4005 /* Set DC scale - y and c use the same (not sure if necessary here) */
4006 s->y_dc_scale = s->y_dc_scale_table[mquant];
4007 s->c_dc_scale = s->c_dc_scale_table[mquant];
4008 v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4009 mb_has_coeffs = idx_mbmode & 1;
4011 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
4013 for (i = 0; i < 6; i++) {
4014 s->dc_val[0][s->block_index[i]] = 0;
4015 v->mb_type[0][s->block_index[i]] = 1;
4017 val = ((cbp >> (5 - i)) & 1);
4018 v->a_avail = v->c_avail = 0;
4019 if (i == 2 || i == 3 || !s->first_slice_line)
4020 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4021 if (i == 1 || i == 3 || s->mb_x)
4022 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4024 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4025 (i & 4) ? v->codingset2 : v->codingset);
4026 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4028 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4029 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4030 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize);
4031 // TODO: loop filter
4034 s->mb_intra = v->is_intra[s->mb_x] = 0;
4035 s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
4036 for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
4037 if (idx_mbmode <= 5) { // 1-MV
4038 dmv_x = dmv_y = pred_flag = 0;
4039 if (idx_mbmode & 1) {
4040 get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
4042 vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
4044 mb_has_coeffs = !(idx_mbmode & 2);
4046 v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
4047 for (i = 0; i < 6; i++) {
4049 dmv_x = dmv_y = pred_flag = 0;
4050 val = ((v->fourmvbp >> (3 - i)) & 1);
4052 get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
4054 vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
4055 vc1_mc_4mv_luma(v, i, 0, 0);
4057 vc1_mc_4mv_chroma(v, 0);
4059 mb_has_coeffs = idx_mbmode & 1;
4062 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4066 s->current_picture.qscale_table[mb_pos] = mquant;
4067 if (!v->ttmbf && cbp) {
4068 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4071 for (i = 0; i < 6; i++) {
4072 s->dc_val[0][s->block_index[i]] = 0;
4074 val = ((cbp >> (5 - i)) & 1);
4075 off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
4077 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4078 first_block, s->dest[dst_idx] + off,
4079 (i & 4) ? s->uvlinesize : s->linesize,
4080 (i & 4) && (s->flags & CODEC_FLAG_GRAY),
4082 block_cbp |= pat << (i << 2);
4083 if (!v->ttmbf && ttmb < 8) ttmb = -1;
4088 if (s->mb_x == s->mb_width - 1)
4089 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
4093 /** Decode one B-frame MB (in Main profile)
4095 static void vc1_decode_b_mb(VC1Context *v)
4097 MpegEncContext *s = &v->s;
4098 GetBitContext *gb = &s->gb;
4100 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4101 int cbp = 0; /* cbp decoding stuff */
4102 int mqdiff, mquant; /* MB quantization */
4103 int ttmb = v->ttfrm; /* MB Transform type */
4104 int mb_has_coeffs = 0; /* last_flag */
4105 int index, index1; /* LUT indexes */
4106 int val, sign; /* temp values */
4107 int first_block = 1;
4109 int skipped, direct;
4110 int dmv_x[2], dmv_y[2];
4111 int bmvtype = BMV_TYPE_BACKWARD;
4113 mquant = v->pq; /* lossy initialization */
4117 direct = get_bits1(gb);
4119 direct = v->direct_mb_plane[mb_pos];
4121 skipped = get_bits1(gb);
4123 skipped = v->s.mbskip_table[mb_pos];
4125 dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4126 for (i = 0; i < 6; i++) {
4127 v->mb_type[0][s->block_index[i]] = 0;
4128 s->dc_val[0][s->block_index[i]] = 0;
4130 s->current_picture.qscale_table[mb_pos] = 0;
4134 GET_MVDATA(dmv_x[0], dmv_y[0]);
4135 dmv_x[1] = dmv_x[0];
4136 dmv_y[1] = dmv_y[0];
4138 if (skipped || !s->mb_intra) {
4139 bmvtype = decode012(gb);
4142 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
4145 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
4148 bmvtype = BMV_TYPE_INTERPOLATED;
4149 dmv_x[0] = dmv_y[0] = 0;
4153 for (i = 0; i < 6; i++)
4154 v->mb_type[0][s->block_index[i]] = s->mb_intra;
4158 bmvtype = BMV_TYPE_INTERPOLATED;
4159 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4160 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4164 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4167 s->current_picture.qscale_table[mb_pos] = mquant;
4169 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4170 dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
4171 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4172 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4174 if (!mb_has_coeffs && !s->mb_intra) {
4175 /* no coded blocks - effectively skipped */
4176 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4177 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4180 if (s->mb_intra && !mb_has_coeffs) {
4182 s->current_picture.qscale_table[mb_pos] = mquant;
4183 s->ac_pred = get_bits1(gb);
4185 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4187 if (bmvtype == BMV_TYPE_INTERPOLATED) {
4188 GET_MVDATA(dmv_x[0], dmv_y[0]);
4189 if (!mb_has_coeffs) {
4190 /* interpolated skipped block */
4191 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4192 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4196 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4198 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4201 s->ac_pred = get_bits1(gb);
4202 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4204 s->current_picture.qscale_table[mb_pos] = mquant;
4205 if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
4206 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4210 for (i = 0; i < 6; i++) {
4211 s->dc_val[0][s->block_index[i]] = 0;
4213 val = ((cbp >> (5 - i)) & 1);
4214 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4215 v->mb_type[0][s->block_index[i]] = s->mb_intra;
4217 /* check if prediction blocks A and C are available */
4218 v->a_avail = v->c_avail = 0;
4219 if (i == 2 || i == 3 || !s->first_slice_line)
4220 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4221 if (i == 1 || i == 3 || s->mb_x)
4222 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4224 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4225 (i & 4) ? v->codingset2 : v->codingset);
4226 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4228 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4230 for (j = 0; j < 64; j++)
4231 s->block[i][j] <<= 1;
4232 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
4234 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4235 first_block, s->dest[dst_idx] + off,
4236 (i & 4) ? s->uvlinesize : s->linesize,
4237 (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4238 if (!v->ttmbf && ttmb < 8)
4245 /** Decode one B-frame MB (in interlaced field B picture)
4247 static void vc1_decode_b_mb_intfi(VC1Context *v)
4249 MpegEncContext *s = &v->s;
4250 GetBitContext *gb = &s->gb;
4252 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4253 int cbp = 0; /* cbp decoding stuff */
4254 int mqdiff, mquant; /* MB quantization */
4255 int ttmb = v->ttfrm; /* MB Transform type */
4256 int mb_has_coeffs = 0; /* last_flag */
4257 int val; /* temp value */
4258 int first_block = 1;
4261 int dmv_x[2], dmv_y[2], pred_flag[2];
4262 int bmvtype = BMV_TYPE_BACKWARD;
4263 int idx_mbmode, interpmvp;
4265 mquant = v->pq; /* Loosy initialization */
4268 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
4269 if (idx_mbmode <= 1) { // intra MB
4270 s->mb_intra = v->is_intra[s->mb_x] = 1;
4271 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
4272 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
4273 s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4275 s->current_picture.qscale_table[mb_pos] = mquant;
4276 /* Set DC scale - y and c use the same (not sure if necessary here) */
4277 s->y_dc_scale = s->y_dc_scale_table[mquant];
4278 s->c_dc_scale = s->c_dc_scale_table[mquant];
4279 v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4280 mb_has_coeffs = idx_mbmode & 1;
4282 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
4284 for (i = 0; i < 6; i++) {
4285 s->dc_val[0][s->block_index[i]] = 0;
4287 val = ((cbp >> (5 - i)) & 1);
4288 v->mb_type[0][s->block_index[i]] = s->mb_intra;
4289 v->a_avail = v->c_avail = 0;
4290 if (i == 2 || i == 3 || !s->first_slice_line)
4291 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4292 if (i == 1 || i == 3 || s->mb_x)
4293 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4295 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4296 (i & 4) ? v->codingset2 : v->codingset);
4297 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4299 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4301 for (j = 0; j < 64; j++)
4302 s->block[i][j] <<= 1;
4303 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4304 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize);
4305 // TODO: yet to perform loop filter
4308 s->mb_intra = v->is_intra[s->mb_x] = 0;
4309 s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
4310 for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
4312 fwd = v->forward_mb_plane[mb_pos] = get_bits1(gb);
4314 fwd = v->forward_mb_plane[mb_pos];
4315 if (idx_mbmode <= 5) { // 1-MV
4316 dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4317 pred_flag[0] = pred_flag[1] = 0;
4319 bmvtype = BMV_TYPE_FORWARD;
4321 bmvtype = decode012(gb);
4324 bmvtype = BMV_TYPE_BACKWARD;
4327 bmvtype = BMV_TYPE_DIRECT;
4330 bmvtype = BMV_TYPE_INTERPOLATED;
4331 interpmvp = get_bits1(gb);
4334 v->bmvtype = bmvtype;
4335 if (bmvtype != BMV_TYPE_DIRECT && idx_mbmode & 1) {
4336 get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD], &dmv_y[bmvtype == BMV_TYPE_BACKWARD], &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
4338 if (bmvtype == BMV_TYPE_INTERPOLATED && interpmvp) {
4339 get_mvdata_interlaced(v, &dmv_x[1], &dmv_y[1], &pred_flag[1]);
4341 if (bmvtype == BMV_TYPE_DIRECT) {
4342 dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4343 dmv_x[1] = dmv_y[1] = pred_flag[0] = 0;
4345 vc1_pred_b_mv_intfi(v, 0, dmv_x, dmv_y, 1, pred_flag);
4346 vc1_b_mc(v, dmv_x, dmv_y, (bmvtype == BMV_TYPE_DIRECT), bmvtype);
4347 mb_has_coeffs = !(idx_mbmode & 2);
4350 bmvtype = BMV_TYPE_FORWARD;
4351 v->bmvtype = bmvtype;
4352 v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
4353 for (i = 0; i < 6; i++) {
4355 dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4356 dmv_x[1] = dmv_y[1] = pred_flag[1] = 0;
4357 val = ((v->fourmvbp >> (3 - i)) & 1);
4359 get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD],
4360 &dmv_y[bmvtype == BMV_TYPE_BACKWARD],
4361 &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
4363 vc1_pred_b_mv_intfi(v, i, dmv_x, dmv_y, 0, pred_flag);
4364 vc1_mc_4mv_luma(v, i, bmvtype == BMV_TYPE_BACKWARD, 0);
4366 vc1_mc_4mv_chroma(v, bmvtype == BMV_TYPE_BACKWARD);
4368 mb_has_coeffs = idx_mbmode & 1;
4371 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4375 s->current_picture.qscale_table[mb_pos] = mquant;
4376 if (!v->ttmbf && cbp) {
4377 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4380 for (i = 0; i < 6; i++) {
4381 s->dc_val[0][s->block_index[i]] = 0;
4383 val = ((cbp >> (5 - i)) & 1);
4384 off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
4386 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4387 first_block, s->dest[dst_idx] + off,
4388 (i & 4) ? s->uvlinesize : s->linesize,
4389 (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4390 if (!v->ttmbf && ttmb < 8)
4398 /** Decode one B-frame MB (in interlaced frame B picture)
4400 static int vc1_decode_b_mb_intfr(VC1Context *v)
4402 MpegEncContext *s = &v->s;
4403 GetBitContext *gb = &s->gb;
4405 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4406 int cbp = 0; /* cbp decoding stuff */
4407 int mqdiff, mquant; /* MB quantization */
4408 int ttmb = v->ttfrm; /* MB Transform type */
4409 int mvsw = 0; /* motion vector switch */
4410 int mb_has_coeffs = 1; /* last_flag */
4411 int dmv_x, dmv_y; /* Differential MV components */
4412 int val; /* temp value */
4413 int first_block = 1;
4415 int skipped, direct, twomv = 0;
4416 int block_cbp = 0, pat, block_tt = 0;
4417 int idx_mbmode = 0, mvbp;
4418 int stride_y, fieldtx;
4419 int bmvtype = BMV_TYPE_BACKWARD;
4422 mquant = v->pq; /* Lossy initialization */
4425 skipped = get_bits1(gb);
4427 skipped = v->s.mbskip_table[mb_pos];
4430 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2);
4431 if (ff_vc1_mbmode_intfrp[0][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD) {
4433 v->blk_mv_type[s->block_index[0]] = 1;
4434 v->blk_mv_type[s->block_index[1]] = 1;
4435 v->blk_mv_type[s->block_index[2]] = 1;
4436 v->blk_mv_type[s->block_index[3]] = 1;
4438 v->blk_mv_type[s->block_index[0]] = 0;
4439 v->blk_mv_type[s->block_index[1]] = 0;
4440 v->blk_mv_type[s->block_index[2]] = 0;
4441 v->blk_mv_type[s->block_index[3]] = 0;
4446 direct = get_bits1(gb);
4448 direct = v->direct_mb_plane[mb_pos];
4451 s->mv[0][0][0] = s->current_picture.motion_val[0][s->block_index[0]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][0], v->bfraction, 0, s->quarter_sample);
4452 s->mv[0][0][1] = s->current_picture.motion_val[0][s->block_index[0]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][1], v->bfraction, 0, s->quarter_sample);
4453 s->mv[1][0][0] = s->current_picture.motion_val[1][s->block_index[0]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][0], v->bfraction, 1, s->quarter_sample);
4454 s->mv[1][0][1] = s->current_picture.motion_val[1][s->block_index[0]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][1], v->bfraction, 1, s->quarter_sample);
4457 s->mv[0][2][0] = s->current_picture.motion_val[0][s->block_index[2]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][0], v->bfraction, 0, s->quarter_sample);
4458 s->mv[0][2][1] = s->current_picture.motion_val[0][s->block_index[2]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][1], v->bfraction, 0, s->quarter_sample);
4459 s->mv[1][2][0] = s->current_picture.motion_val[1][s->block_index[2]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][0], v->bfraction, 1, s->quarter_sample);
4460 s->mv[1][2][1] = s->current_picture.motion_val[1][s->block_index[2]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][1], v->bfraction, 1, s->quarter_sample);
4462 for (i = 1; i < 4; i += 2) {
4463 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = s->mv[0][i-1][0];
4464 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = s->mv[0][i-1][1];
4465 s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = s->mv[1][i-1][0];
4466 s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = s->mv[1][i-1][1];
4469 for (i = 1; i < 4; i++) {
4470 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = s->mv[0][0][0];
4471 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = s->mv[0][0][1];
4472 s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = s->mv[1][0][0];
4473 s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = s->mv[1][0][1];
4478 if (ff_vc1_mbmode_intfrp[0][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
4479 for (i = 0; i < 4; i++) {
4480 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = 0;
4481 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = 0;
4482 s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = 0;
4483 s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = 0;
4485 s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
4486 s->mb_intra = v->is_intra[s->mb_x] = 1;
4487 for (i = 0; i < 6; i++)
4488 v->mb_type[0][s->block_index[i]] = 1;
4489 fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
4490 mb_has_coeffs = get_bits1(gb);
4492 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4493 v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4495 s->current_picture.qscale_table[mb_pos] = mquant;
4496 /* Set DC scale - y and c use the same (not sure if necessary here) */
4497 s->y_dc_scale = s->y_dc_scale_table[mquant];
4498 s->c_dc_scale = s->c_dc_scale_table[mquant];
4500 for (i = 0; i < 6; i++) {
4501 s->dc_val[0][s->block_index[i]] = 0;
4503 val = ((cbp >> (5 - i)) & 1);
4504 v->mb_type[0][s->block_index[i]] = s->mb_intra;
4505 v->a_avail = v->c_avail = 0;
4506 if (i == 2 || i == 3 || !s->first_slice_line)
4507 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4508 if (i == 1 || i == 3 || s->mb_x)
4509 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4511 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4512 (i & 4) ? v->codingset2 : v->codingset);
4513 if (i > 3 && (s->flags & CODEC_FLAG_GRAY))
4515 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4517 stride_y = s->linesize << fieldtx;
4518 off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->linesize : (i & 1) * 8 + 4 * (i & 2) * s->linesize;
4520 stride_y = s->uvlinesize;
4523 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, stride_y);
4526 s->mb_intra = v->is_intra[s->mb_x] = 0;
4528 if (skipped || !s->mb_intra) {
4529 bmvtype = decode012(gb);
4532 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
4535 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
4538 bmvtype = BMV_TYPE_INTERPOLATED;
4542 if (twomv && bmvtype != BMV_TYPE_INTERPOLATED)
4543 mvsw = get_bits1(gb);
4546 if (!skipped) { // inter MB
4547 mb_has_coeffs = ff_vc1_mbmode_intfrp[0][idx_mbmode][3];
4549 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4551 if (bmvtype == BMV_TYPE_INTERPOLATED & twomv) {
4552 v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
4553 } else if (bmvtype == BMV_TYPE_INTERPOLATED | twomv) {
4554 v->twomvbp = get_vlc2(gb, v->twomvbp_vlc->table, VC1_2MV_BLOCK_PATTERN_VLC_BITS, 1);
4558 for (i = 0; i < 6; i++)
4559 v->mb_type[0][s->block_index[i]] = 0;
4560 fieldtx = v->fieldtx_plane[mb_pos] = ff_vc1_mbmode_intfrp[0][idx_mbmode][1];
4561 /* for all motion vector read MVDATA and motion compensate each block */
4565 for (i = 0; i < 4; i++) {
4566 vc1_mc_4mv_luma(v, i, 0, 0);
4567 vc1_mc_4mv_luma(v, i, 1, 1);
4569 vc1_mc_4mv_chroma4(v, 0, 0, 0);
4570 vc1_mc_4mv_chroma4(v, 1, 1, 1);
4575 } else if (twomv && bmvtype == BMV_TYPE_INTERPOLATED) {
4577 for (i = 0; i < 4; i++) {
4580 val = ((mvbp >> (3 - i)) & 1);
4582 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4584 vc1_pred_mv_intfr(v, j, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir);
4585 vc1_mc_4mv_luma(v, j, dir, dir);
4586 vc1_mc_4mv_luma(v, j+1, dir, dir);
4589 vc1_mc_4mv_chroma4(v, 0, 0, 0);
4590 vc1_mc_4mv_chroma4(v, 1, 1, 1);
4591 } else if (bmvtype == BMV_TYPE_INTERPOLATED) {
4595 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4597 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0);
4602 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4604 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 1);
4607 dir = bmvtype == BMV_TYPE_BACKWARD;
4614 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4615 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir);
4619 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4620 vc1_pred_mv_intfr(v, 2, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir2);
4623 for (i = 0; i < 2; i++) {
4624 s->mv[dir][i+2][0] = s->mv[dir][i][0] = s->current_picture.motion_val[dir][s->block_index[i+2]][0] = s->current_picture.motion_val[dir][s->block_index[i]][0];
4625 s->mv[dir][i+2][1] = s->mv[dir][i][1] = s->current_picture.motion_val[dir][s->block_index[i+2]][1] = s->current_picture.motion_val[dir][s->block_index[i]][1];
4626 s->mv[dir2][i+2][0] = s->mv[dir2][i][0] = s->current_picture.motion_val[dir2][s->block_index[i]][0] = s->current_picture.motion_val[dir2][s->block_index[i+2]][0];
4627 s->mv[dir2][i+2][1] = s->mv[dir2][i][1] = s->current_picture.motion_val[dir2][s->block_index[i]][1] = s->current_picture.motion_val[dir2][s->block_index[i+2]][1];
4630 vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, v->mb_type[0], !dir);
4631 vc1_pred_mv_intfr(v, 2, 0, 0, 2, v->range_x, v->range_y, v->mb_type[0], !dir);
4634 vc1_mc_4mv_luma(v, 0, dir, 0);
4635 vc1_mc_4mv_luma(v, 1, dir, 0);
4636 vc1_mc_4mv_luma(v, 2, dir2, 0);
4637 vc1_mc_4mv_luma(v, 3, dir2, 0);
4638 vc1_mc_4mv_chroma4(v, dir, dir2, 0);
4640 dir = bmvtype == BMV_TYPE_BACKWARD;
4642 mvbp = ff_vc1_mbmode_intfrp[0][idx_mbmode][2];
4645 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4647 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], dir);
4648 v->blk_mv_type[s->block_index[0]] = 1;
4649 v->blk_mv_type[s->block_index[1]] = 1;
4650 v->blk_mv_type[s->block_index[2]] = 1;
4651 v->blk_mv_type[s->block_index[3]] = 1;
4652 vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, 0, !dir);
4653 for (i = 0; i < 2; i++) {
4654 s->mv[!dir][i+2][0] = s->mv[!dir][i][0] = s->current_picture.motion_val[!dir][s->block_index[i+2]][0] = s->current_picture.motion_val[!dir][s->block_index[i]][0];
4655 s->mv[!dir][i+2][1] = s->mv[!dir][i][1] = s->current_picture.motion_val[!dir][s->block_index[i+2]][1] = s->current_picture.motion_val[!dir][s->block_index[i]][1];
4661 GET_MQUANT(); // p. 227
4662 s->current_picture.qscale_table[mb_pos] = mquant;
4663 if (!v->ttmbf && cbp)
4664 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4665 for (i = 0; i < 6; i++) {
4666 s->dc_val[0][s->block_index[i]] = 0;
4668 val = ((cbp >> (5 - i)) & 1);
4670 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4672 off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->linesize));
4674 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4675 first_block, s->dest[dst_idx] + off,
4676 (i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
4677 (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
4678 block_cbp |= pat << (i << 2);
4679 if (!v->ttmbf && ttmb < 8)
4687 for (i = 0; i < 6; i++) {
4688 v->mb_type[0][s->block_index[i]] = 0;
4689 s->dc_val[0][s->block_index[i]] = 0;
4691 s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
4692 s->current_picture.qscale_table[mb_pos] = 0;
4693 v->blk_mv_type[s->block_index[0]] = 0;
4694 v->blk_mv_type[s->block_index[1]] = 0;
4695 v->blk_mv_type[s->block_index[2]] = 0;
4696 v->blk_mv_type[s->block_index[3]] = 0;
4699 if (bmvtype == BMV_TYPE_INTERPOLATED) {
4700 vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0);
4701 vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 1);
4703 dir = bmvtype == BMV_TYPE_BACKWARD;
4704 vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], dir);
4709 for (i = 0; i < 2; i++) {
4710 s->mv[dir][i+2][0] = s->mv[dir][i][0] = s->current_picture.motion_val[dir][s->block_index[i+2]][0] = s->current_picture.motion_val[dir][s->block_index[i]][0];
4711 s->mv[dir][i+2][1] = s->mv[dir][i][1] = s->current_picture.motion_val[dir][s->block_index[i+2]][1] = s->current_picture.motion_val[dir][s->block_index[i]][1];
4712 s->mv[dir2][i+2][0] = s->mv[dir2][i][0] = s->current_picture.motion_val[dir2][s->block_index[i]][0] = s->current_picture.motion_val[dir2][s->block_index[i+2]][0];
4713 s->mv[dir2][i+2][1] = s->mv[dir2][i][1] = s->current_picture.motion_val[dir2][s->block_index[i]][1] = s->current_picture.motion_val[dir2][s->block_index[i+2]][1];
4716 v->blk_mv_type[s->block_index[0]] = 1;
4717 v->blk_mv_type[s->block_index[1]] = 1;
4718 v->blk_mv_type[s->block_index[2]] = 1;
4719 v->blk_mv_type[s->block_index[3]] = 1;
4720 vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, 0, !dir);
4721 for (i = 0; i < 2; i++) {
4722 s->mv[!dir][i+2][0] = s->mv[!dir][i][0] = s->current_picture.motion_val[!dir][s->block_index[i+2]][0] = s->current_picture.motion_val[!dir][s->block_index[i]][0];
4723 s->mv[!dir][i+2][1] = s->mv[!dir][i][1] = s->current_picture.motion_val[!dir][s->block_index[i+2]][1] = s->current_picture.motion_val[!dir][s->block_index[i]][1];
4730 if (direct || bmvtype == BMV_TYPE_INTERPOLATED) {
4735 if (s->mb_x == s->mb_width - 1)
4736 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
4737 v->cbp[s->mb_x] = block_cbp;
4738 v->ttblk[s->mb_x] = block_tt;
4742 /** Decode blocks of I-frame
4744 static void vc1_decode_i_blocks(VC1Context *v)
4747 MpegEncContext *s = &v->s;
4752 /* select codingmode used for VLC tables selection */
4753 switch (v->y_ac_table_index) {
4755 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
4758 v->codingset = CS_HIGH_MOT_INTRA;
4761 v->codingset = CS_MID_RATE_INTRA;
4765 switch (v->c_ac_table_index) {
4767 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
4770 v->codingset2 = CS_HIGH_MOT_INTER;
4773 v->codingset2 = CS_MID_RATE_INTER;
4777 /* Set DC scale - y and c use the same */
4778 s->y_dc_scale = s->y_dc_scale_table[v->pq];
4779 s->c_dc_scale = s->c_dc_scale_table[v->pq];
4782 s->mb_x = s->mb_y = 0;
4784 s->first_slice_line = 1;
4785 for (s->mb_y = 0; s->mb_y < s->end_mb_y; s->mb_y++) {
4787 init_block_index(v);
4788 for (; s->mb_x < v->end_mb_x; s->mb_x++) {
4790 ff_update_block_index(s);
4791 dst[0] = s->dest[0];
4792 dst[1] = dst[0] + 8;
4793 dst[2] = s->dest[0] + s->linesize * 8;
4794 dst[3] = dst[2] + 8;
4795 dst[4] = s->dest[1];
4796 dst[5] = s->dest[2];
4797 s->dsp.clear_blocks(s->block[0]);
4798 mb_pos = s->mb_x + s->mb_y * s->mb_width;
4799 s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
4800 s->current_picture.qscale_table[mb_pos] = v->pq;
4801 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
4802 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
4804 // do actual MB decoding and displaying
4805 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
4806 v->s.ac_pred = get_bits1(&v->s.gb);
4808 for (k = 0; k < 6; k++) {
4809 val = ((cbp >> (5 - k)) & 1);
4812 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
4816 cbp |= val << (5 - k);
4818 vc1_decode_i_block(v, s->block[k], k, val, (k < 4) ? v->codingset : v->codingset2);
4820 if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
4822 v->vc1dsp.vc1_inv_trans_8x8(s->block[k]);
4823 if (v->pq >= 9 && v->overlap) {
4825 for (j = 0; j < 64; j++)
4826 s->block[k][j] <<= 1;
4827 s->dsp.put_signed_pixels_clamped(s->block[k], dst[k], k & 4 ? s->uvlinesize : s->linesize);
4830 for (j = 0; j < 64; j++)
4831 s->block[k][j] = (s->block[k][j] - 64) << 1;
4832 s->dsp.put_pixels_clamped(s->block[k], dst[k], k & 4 ? s->uvlinesize : s->linesize);
4836 if (v->pq >= 9 && v->overlap) {
4838 v->vc1dsp.vc1_h_overlap(s->dest[0], s->linesize);
4839 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
4840 if (!(s->flags & CODEC_FLAG_GRAY)) {
4841 v->vc1dsp.vc1_h_overlap(s->dest[1], s->uvlinesize);
4842 v->vc1dsp.vc1_h_overlap(s->dest[2], s->uvlinesize);
4845 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize);
4846 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
4847 if (!s->first_slice_line) {
4848 v->vc1dsp.vc1_v_overlap(s->dest[0], s->linesize);
4849 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize);
4850 if (!(s->flags & CODEC_FLAG_GRAY)) {
4851 v->vc1dsp.vc1_v_overlap(s->dest[1], s->uvlinesize);
4852 v->vc1dsp.vc1_v_overlap(s->dest[2], s->uvlinesize);
4855 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
4856 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
4858 if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
4860 if (get_bits_count(&s->gb) > v->bits) {
4861 ff_er_add_slice(&s->er, 0, 0, s->mb_x, s->mb_y, ER_MB_ERROR);
4862 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
4863 get_bits_count(&s->gb), v->bits);
4867 if (!v->s.loop_filter)
4868 ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
4870 ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
4872 s->first_slice_line = 0;
4874 if (v->s.loop_filter)
4875 ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
4877 /* This is intentionally mb_height and not end_mb_y - unlike in advanced
4878 * profile, these only differ are when decoding MSS2 rectangles. */
4879 ff_er_add_slice(&s->er, 0, 0, s->mb_width - 1, s->mb_height - 1, ER_MB_END);
4882 /** Decode blocks of I-frame for advanced profile
4884 static void vc1_decode_i_blocks_adv(VC1Context *v)
4887 MpegEncContext *s = &v->s;
4893 GetBitContext *gb = &s->gb;
4895 /* select codingmode used for VLC tables selection */
4896 switch (v->y_ac_table_index) {
4898 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
4901 v->codingset = CS_HIGH_MOT_INTRA;
4904 v->codingset = CS_MID_RATE_INTRA;
4908 switch (v->c_ac_table_index) {
4910 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
4913 v->codingset2 = CS_HIGH_MOT_INTER;
4916 v->codingset2 = CS_MID_RATE_INTER;
4921 s->mb_x = s->mb_y = 0;
4923 s->first_slice_line = 1;
4924 s->mb_y = s->start_mb_y;
4925 if (s->start_mb_y) {
4927 init_block_index(v);
4928 memset(&s->coded_block[s->block_index[0] - s->b8_stride], 0,
4929 (1 + s->b8_stride) * sizeof(*s->coded_block));
4931 for (; s->mb_y < s->end_mb_y; s->mb_y++) {
4933 init_block_index(v);
4934 for (;s->mb_x < s->mb_width; s->mb_x++) {
4935 int16_t (*block)[64] = v->block[v->cur_blk_idx];
4936 ff_update_block_index(s);
4937 s->dsp.clear_blocks(block[0]);
4938 mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4939 s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4940 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
4941 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
4943 // do actual MB decoding and displaying
4944 if (v->fieldtx_is_raw)
4945 v->fieldtx_plane[mb_pos] = get_bits1(&v->s.gb);
4946 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
4947 if ( v->acpred_is_raw)
4948 v->s.ac_pred = get_bits1(&v->s.gb);
4950 v->s.ac_pred = v->acpred_plane[mb_pos];
4952 if (v->condover == CONDOVER_SELECT && v->overflg_is_raw)
4953 v->over_flags_plane[mb_pos] = get_bits1(&v->s.gb);
4957 s->current_picture.qscale_table[mb_pos] = mquant;
4958 /* Set DC scale - y and c use the same */
4959 s->y_dc_scale = s->y_dc_scale_table[mquant];
4960 s->c_dc_scale = s->c_dc_scale_table[mquant];
4962 for (k = 0; k < 6; k++) {
4963 val = ((cbp >> (5 - k)) & 1);
4966 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
4970 cbp |= val << (5 - k);
4972 v->a_avail = !s->first_slice_line || (k == 2 || k == 3);
4973 v->c_avail = !!s->mb_x || (k == 1 || k == 3);
4975 vc1_decode_i_block_adv(v, block[k], k, val,
4976 (k < 4) ? v->codingset : v->codingset2, mquant);
4978 if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
4980 v->vc1dsp.vc1_inv_trans_8x8(block[k]);
4983 vc1_smooth_overlap_filter_iblk(v);
4984 vc1_put_signed_blocks_clamped(v);
4985 if (v->s.loop_filter) vc1_loop_filter_iblk_delayed(v, v->pq);
4987 if (get_bits_count(&s->gb) > v->bits) {
4988 // TODO: may need modification to handle slice coding
4989 ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
4990 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
4991 get_bits_count(&s->gb), v->bits);
4995 if (!v->s.loop_filter)
4996 ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
4998 ff_mpeg_draw_horiz_band(s, (s->mb_y-1) * 16, 16);
4999 s->first_slice_line = 0;
5002 /* raw bottom MB row */
5004 init_block_index(v);
5006 for (;s->mb_x < s->mb_width; s->mb_x++) {
5007 ff_update_block_index(s);
5008 vc1_put_signed_blocks_clamped(v);
5009 if (v->s.loop_filter)
5010 vc1_loop_filter_iblk_delayed(v, v->pq);
5012 if (v->s.loop_filter)
5013 ff_mpeg_draw_horiz_band(s, (s->end_mb_y-1)*16, 16);
5014 ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
5015 (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
5018 static void vc1_decode_p_blocks(VC1Context *v)
5020 MpegEncContext *s = &v->s;
5021 int apply_loop_filter;
5023 /* select codingmode used for VLC tables selection */
5024 switch (v->c_ac_table_index) {
5026 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
5029 v->codingset = CS_HIGH_MOT_INTRA;
5032 v->codingset = CS_MID_RATE_INTRA;
5036 switch (v->c_ac_table_index) {
5038 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
5041 v->codingset2 = CS_HIGH_MOT_INTER;
5044 v->codingset2 = CS_MID_RATE_INTER;
5048 apply_loop_filter = s->loop_filter && !(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY) &&
5049 v->fcm == PROGRESSIVE;
5050 s->first_slice_line = 1;
5051 memset(v->cbp_base, 0, sizeof(v->cbp_base[0])*2*s->mb_stride);
5052 for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
5054 init_block_index(v);
5055 for (; s->mb_x < s->mb_width; s->mb_x++) {
5056 ff_update_block_index(s);
5058 if (v->fcm == ILACE_FIELD)
5059 vc1_decode_p_mb_intfi(v);
5060 else if (v->fcm == ILACE_FRAME)
5061 vc1_decode_p_mb_intfr(v);
5062 else vc1_decode_p_mb(v);
5063 if (s->mb_y != s->start_mb_y && apply_loop_filter)
5064 vc1_apply_p_loop_filter(v);
5065 if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
5066 // TODO: may need modification to handle slice coding
5067 ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
5068 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
5069 get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
5073 memmove(v->cbp_base, v->cbp, sizeof(v->cbp_base[0]) * s->mb_stride);
5074 memmove(v->ttblk_base, v->ttblk, sizeof(v->ttblk_base[0]) * s->mb_stride);
5075 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
5076 memmove(v->luma_mv_base, v->luma_mv, sizeof(v->luma_mv_base[0]) * s->mb_stride);
5077 if (s->mb_y != s->start_mb_y) ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
5078 s->first_slice_line = 0;
5080 if (apply_loop_filter) {
5082 init_block_index(v);
5083 for (; s->mb_x < s->mb_width; s->mb_x++) {
5084 ff_update_block_index(s);
5085 vc1_apply_p_loop_filter(v);
5088 if (s->end_mb_y >= s->start_mb_y)
5089 ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
5090 ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
5091 (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
5094 static void vc1_decode_b_blocks(VC1Context *v)
5096 MpegEncContext *s = &v->s;
5098 /* select codingmode used for VLC tables selection */
5099 switch (v->c_ac_table_index) {
5101 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
5104 v->codingset = CS_HIGH_MOT_INTRA;
5107 v->codingset = CS_MID_RATE_INTRA;
5111 switch (v->c_ac_table_index) {
5113 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
5116 v->codingset2 = CS_HIGH_MOT_INTER;
5119 v->codingset2 = CS_MID_RATE_INTER;
5123 s->first_slice_line = 1;
5124 for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
5126 init_block_index(v);
5127 for (; s->mb_x < s->mb_width; s->mb_x++) {
5128 ff_update_block_index(s);
5130 if (v->fcm == ILACE_FIELD)
5131 vc1_decode_b_mb_intfi(v);
5132 else if (v->fcm == ILACE_FRAME)
5133 vc1_decode_b_mb_intfr(v);
5136 if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
5137 // TODO: may need modification to handle slice coding
5138 ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
5139 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
5140 get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
5143 if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
5145 if (!v->s.loop_filter)
5146 ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
5148 ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
5149 s->first_slice_line = 0;
5151 if (v->s.loop_filter)
5152 ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
5153 ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
5154 (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
5157 static void vc1_decode_skip_blocks(VC1Context *v)
5159 MpegEncContext *s = &v->s;
5161 if (!v->s.last_picture.f.data[0])
5164 ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_width - 1, s->end_mb_y - 1, ER_MB_END);
5165 s->first_slice_line = 1;
5166 for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
5168 init_block_index(v);
5169 ff_update_block_index(s);
5170 memcpy(s->dest[0], s->last_picture.f.data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16);
5171 memcpy(s->dest[1], s->last_picture.f.data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
5172 memcpy(s->dest[2], s->last_picture.f.data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
5173 ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
5174 s->first_slice_line = 0;
5176 s->pict_type = AV_PICTURE_TYPE_P;
5179 void ff_vc1_decode_blocks(VC1Context *v)
5182 v->s.esc3_level_length = 0;
5184 ff_intrax8_decode_picture(&v->x8, 2*v->pq + v->halfpq, v->pq * !v->pquantizer);
5187 v->left_blk_idx = -1;
5188 v->topleft_blk_idx = 1;
5190 switch (v->s.pict_type) {
5191 case AV_PICTURE_TYPE_I:
5192 if (v->profile == PROFILE_ADVANCED)
5193 vc1_decode_i_blocks_adv(v);
5195 vc1_decode_i_blocks(v);
5197 case AV_PICTURE_TYPE_P:
5198 if (v->p_frame_skipped)
5199 vc1_decode_skip_blocks(v);
5201 vc1_decode_p_blocks(v);
5203 case AV_PICTURE_TYPE_B:
5205 if (v->profile == PROFILE_ADVANCED)
5206 vc1_decode_i_blocks_adv(v);
5208 vc1_decode_i_blocks(v);
5210 vc1_decode_b_blocks(v);
5216 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
5220 * Transform coefficients for both sprites in 16.16 fixed point format,
5221 * in the order they appear in the bitstream:
5223 * rotation 1 (unused)
5225 * rotation 2 (unused)
5232 int effect_type, effect_flag;
5233 int effect_pcount1, effect_pcount2; ///< amount of effect parameters stored in effect_params
5234 int effect_params1[15], effect_params2[10]; ///< effect parameters in 16.16 fixed point format
5237 static inline int get_fp_val(GetBitContext* gb)
5239 return (get_bits_long(gb, 30) - (1 << 29)) << 1;
5242 static void vc1_sprite_parse_transform(GetBitContext* gb, int c[7])
5246 switch (get_bits(gb, 2)) {
5249 c[2] = get_fp_val(gb);
5253 c[0] = c[4] = get_fp_val(gb);
5254 c[2] = get_fp_val(gb);
5257 c[0] = get_fp_val(gb);
5258 c[2] = get_fp_val(gb);
5259 c[4] = get_fp_val(gb);
5262 c[0] = get_fp_val(gb);
5263 c[1] = get_fp_val(gb);
5264 c[2] = get_fp_val(gb);
5265 c[3] = get_fp_val(gb);
5266 c[4] = get_fp_val(gb);
5269 c[5] = get_fp_val(gb);
5271 c[6] = get_fp_val(gb);
5276 static void vc1_parse_sprites(VC1Context *v, GetBitContext* gb, SpriteData* sd)
5278 AVCodecContext *avctx = v->s.avctx;
5281 for (sprite = 0; sprite <= v->two_sprites; sprite++) {
5282 vc1_sprite_parse_transform(gb, sd->coefs[sprite]);
5283 if (sd->coefs[sprite][1] || sd->coefs[sprite][3])
5284 avpriv_request_sample(avctx, "Non-zero rotation coefficients");
5285 av_log(avctx, AV_LOG_DEBUG, sprite ? "S2:" : "S1:");
5286 for (i = 0; i < 7; i++)
5287 av_log(avctx, AV_LOG_DEBUG, " %d.%.3d",
5288 sd->coefs[sprite][i] / (1<<16),
5289 (abs(sd->coefs[sprite][i]) & 0xFFFF) * 1000 / (1 << 16));
5290 av_log(avctx, AV_LOG_DEBUG, "\n");
5294 if (sd->effect_type = get_bits_long(gb, 30)) {
5295 switch (sd->effect_pcount1 = get_bits(gb, 4)) {
5297 vc1_sprite_parse_transform(gb, sd->effect_params1);
5300 vc1_sprite_parse_transform(gb, sd->effect_params1);
5301 vc1_sprite_parse_transform(gb, sd->effect_params1 + 7);
5304 for (i = 0; i < sd->effect_pcount1; i++)
5305 sd->effect_params1[i] = get_fp_val(gb);
5307 if (sd->effect_type != 13 || sd->effect_params1[0] != sd->coefs[0][6]) {
5308 // effect 13 is simple alpha blending and matches the opacity above
5309 av_log(avctx, AV_LOG_DEBUG, "Effect: %d; params: ", sd->effect_type);
5310 for (i = 0; i < sd->effect_pcount1; i++)
5311 av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
5312 sd->effect_params1[i] / (1 << 16),
5313 (abs(sd->effect_params1[i]) & 0xFFFF) * 1000 / (1 << 16));
5314 av_log(avctx, AV_LOG_DEBUG, "\n");
5317 sd->effect_pcount2 = get_bits(gb, 16);
5318 if (sd->effect_pcount2 > 10) {
5319 av_log(avctx, AV_LOG_ERROR, "Too many effect parameters\n");
5321 } else if (sd->effect_pcount2) {
5323 av_log(avctx, AV_LOG_DEBUG, "Effect params 2: ");
5324 while (++i < sd->effect_pcount2) {
5325 sd->effect_params2[i] = get_fp_val(gb);
5326 av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
5327 sd->effect_params2[i] / (1 << 16),
5328 (abs(sd->effect_params2[i]) & 0xFFFF) * 1000 / (1 << 16));
5330 av_log(avctx, AV_LOG_DEBUG, "\n");
5333 if (sd->effect_flag = get_bits1(gb))
5334 av_log(avctx, AV_LOG_DEBUG, "Effect flag set\n");
5336 if (get_bits_count(gb) >= gb->size_in_bits +
5337 (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE ? 64 : 0))
5338 av_log(avctx, AV_LOG_ERROR, "Buffer overrun\n");
5339 if (get_bits_count(gb) < gb->size_in_bits - 8)
5340 av_log(avctx, AV_LOG_WARNING, "Buffer not fully read\n");
5343 static void vc1_draw_sprites(VC1Context *v, SpriteData* sd)
5345 int i, plane, row, sprite;
5346 int sr_cache[2][2] = { { -1, -1 }, { -1, -1 } };
5347 uint8_t* src_h[2][2];
5348 int xoff[2], xadv[2], yoff[2], yadv[2], alpha;
5350 MpegEncContext *s = &v->s;
5352 for (i = 0; i < 2; i++) {
5353 xoff[i] = av_clip(sd->coefs[i][2], 0, v->sprite_width-1 << 16);
5354 xadv[i] = sd->coefs[i][0];
5355 if (xadv[i] != 1<<16 || (v->sprite_width << 16) - (v->output_width << 16) - xoff[i])
5356 xadv[i] = av_clip(xadv[i], 0, ((v->sprite_width<<16) - xoff[i] - 1) / v->output_width);
5358 yoff[i] = av_clip(sd->coefs[i][5], 0, v->sprite_height-1 << 16);
5359 yadv[i] = av_clip(sd->coefs[i][4], 0, ((v->sprite_height << 16) - yoff[i]) / v->output_height);
5361 alpha = av_clip(sd->coefs[1][6], 0, (1<<16) - 1);
5363 for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++) {
5364 int width = v->output_width>>!!plane;
5366 for (row = 0; row < v->output_height>>!!plane; row++) {
5367 uint8_t *dst = v->sprite_output_frame->data[plane] +
5368 v->sprite_output_frame->linesize[plane] * row;
5370 for (sprite = 0; sprite <= v->two_sprites; sprite++) {
5371 uint8_t *iplane = s->current_picture.f.data[plane];
5372 int iline = s->current_picture.f.linesize[plane];
5373 int ycoord = yoff[sprite] + yadv[sprite] * row;
5374 int yline = ycoord >> 16;
5376 ysub[sprite] = ycoord & 0xFFFF;
5378 iplane = s->last_picture.f.data[plane];
5379 iline = s->last_picture.f.linesize[plane];
5381 next_line = FFMIN(yline + 1, (v->sprite_height >> !!plane) - 1) * iline;
5382 if (!(xoff[sprite] & 0xFFFF) && xadv[sprite] == 1 << 16) {
5383 src_h[sprite][0] = iplane + (xoff[sprite] >> 16) + yline * iline;
5385 src_h[sprite][1] = iplane + (xoff[sprite] >> 16) + next_line;
5387 if (sr_cache[sprite][0] != yline) {
5388 if (sr_cache[sprite][1] == yline) {
5389 FFSWAP(uint8_t*, v->sr_rows[sprite][0], v->sr_rows[sprite][1]);
5390 FFSWAP(int, sr_cache[sprite][0], sr_cache[sprite][1]);
5392 v->vc1dsp.sprite_h(v->sr_rows[sprite][0], iplane + yline * iline, xoff[sprite], xadv[sprite], width);
5393 sr_cache[sprite][0] = yline;
5396 if (ysub[sprite] && sr_cache[sprite][1] != yline + 1) {
5397 v->vc1dsp.sprite_h(v->sr_rows[sprite][1],
5398 iplane + next_line, xoff[sprite],
5399 xadv[sprite], width);
5400 sr_cache[sprite][1] = yline + 1;
5402 src_h[sprite][0] = v->sr_rows[sprite][0];
5403 src_h[sprite][1] = v->sr_rows[sprite][1];
5407 if (!v->two_sprites) {
5409 v->vc1dsp.sprite_v_single(dst, src_h[0][0], src_h[0][1], ysub[0], width);
5411 memcpy(dst, src_h[0][0], width);
5414 if (ysub[0] && ysub[1]) {
5415 v->vc1dsp.sprite_v_double_twoscale(dst, src_h[0][0], src_h[0][1], ysub[0],
5416 src_h[1][0], src_h[1][1], ysub[1], alpha, width);
5417 } else if (ysub[0]) {
5418 v->vc1dsp.sprite_v_double_onescale(dst, src_h[0][0], src_h[0][1], ysub[0],
5419 src_h[1][0], alpha, width);
5420 } else if (ysub[1]) {
5421 v->vc1dsp.sprite_v_double_onescale(dst, src_h[1][0], src_h[1][1], ysub[1],
5422 src_h[0][0], (1<<16)-1-alpha, width);
5424 v->vc1dsp.sprite_v_double_noscale(dst, src_h[0][0], src_h[1][0], alpha, width);
5430 for (i = 0; i < 2; i++) {
5440 static int vc1_decode_sprites(VC1Context *v, GetBitContext* gb)
5442 MpegEncContext *s = &v->s;
5443 AVCodecContext *avctx = s->avctx;
5446 vc1_parse_sprites(v, gb, &sd);
5448 if (!s->current_picture.f.data[0]) {
5449 av_log(avctx, AV_LOG_ERROR, "Got no sprites\n");
5453 if (v->two_sprites && (!s->last_picture_ptr || !s->last_picture.f.data[0])) {
5454 av_log(avctx, AV_LOG_WARNING, "Need two sprites, only got one\n");
5458 av_frame_unref(v->sprite_output_frame);
5459 if (ff_get_buffer(avctx, v->sprite_output_frame, 0) < 0) {
5460 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
5464 vc1_draw_sprites(v, &sd);
5469 static void vc1_sprite_flush(AVCodecContext *avctx)
5471 VC1Context *v = avctx->priv_data;
5472 MpegEncContext *s = &v->s;
5473 AVFrame *f = &s->current_picture.f;
5476 /* Windows Media Image codecs have a convergence interval of two keyframes.
5477 Since we can't enforce it, clear to black the missing sprite. This is
5478 wrong but it looks better than doing nothing. */
5481 for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++)
5482 for (i = 0; i < v->sprite_height>>!!plane; i++)
5483 memset(f->data[plane] + i * f->linesize[plane],
5484 plane ? 128 : 0, f->linesize[plane]);
5489 av_cold int ff_vc1_decode_init_alloc_tables(VC1Context *v)
5491 MpegEncContext *s = &v->s;
5493 int mb_height = FFALIGN(s->mb_height, 2);
5495 /* Allocate mb bitplanes */
5496 v->mv_type_mb_plane = av_malloc (s->mb_stride * mb_height);
5497 v->direct_mb_plane = av_malloc (s->mb_stride * mb_height);
5498 v->forward_mb_plane = av_malloc (s->mb_stride * mb_height);
5499 v->fieldtx_plane = av_mallocz(s->mb_stride * mb_height);
5500 v->acpred_plane = av_malloc (s->mb_stride * mb_height);
5501 v->over_flags_plane = av_malloc (s->mb_stride * mb_height);
5503 v->n_allocated_blks = s->mb_width + 2;
5504 v->block = av_malloc(sizeof(*v->block) * v->n_allocated_blks);
5505 v->cbp_base = av_malloc(sizeof(v->cbp_base[0]) * 2 * s->mb_stride);
5506 v->cbp = v->cbp_base + s->mb_stride;
5507 v->ttblk_base = av_malloc(sizeof(v->ttblk_base[0]) * 2 * s->mb_stride);
5508 v->ttblk = v->ttblk_base + s->mb_stride;
5509 v->is_intra_base = av_mallocz(sizeof(v->is_intra_base[0]) * 2 * s->mb_stride);
5510 v->is_intra = v->is_intra_base + s->mb_stride;
5511 v->luma_mv_base = av_malloc(sizeof(v->luma_mv_base[0]) * 2 * s->mb_stride);
5512 v->luma_mv = v->luma_mv_base + s->mb_stride;
5514 /* allocate block type info in that way so it could be used with s->block_index[] */
5515 v->mb_type_base = av_malloc(s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2);
5516 v->mb_type[0] = v->mb_type_base + s->b8_stride + 1;
5517 v->mb_type[1] = v->mb_type_base + s->b8_stride * (mb_height * 2 + 1) + s->mb_stride + 1;
5518 v->mb_type[2] = v->mb_type[1] + s->mb_stride * (mb_height + 1);
5520 /* allocate memory to store block level MV info */
5521 v->blk_mv_type_base = av_mallocz( s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2);
5522 v->blk_mv_type = v->blk_mv_type_base + s->b8_stride + 1;
5523 v->mv_f_base = av_mallocz(2 * (s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2));
5524 v->mv_f[0] = v->mv_f_base + s->b8_stride + 1;
5525 v->mv_f[1] = v->mv_f[0] + (s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2);
5526 v->mv_f_next_base = av_mallocz(2 * (s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2));
5527 v->mv_f_next[0] = v->mv_f_next_base + s->b8_stride + 1;
5528 v->mv_f_next[1] = v->mv_f_next[0] + (s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2);
5530 /* Init coded blocks info */
5531 if (v->profile == PROFILE_ADVANCED) {
5532 // if (alloc_bitplane(&v->over_flags_plane, s->mb_width, s->mb_height) < 0)
5534 // if (alloc_bitplane(&v->ac_pred_plane, s->mb_width, s->mb_height) < 0)
5538 ff_intrax8_common_init(&v->x8,s);
5540 if (s->avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || s->avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5541 for (i = 0; i < 4; i++)
5542 if (!(v->sr_rows[i >> 1][i & 1] = av_malloc(v->output_width))) return -1;
5545 if (!v->mv_type_mb_plane || !v->direct_mb_plane || !v->acpred_plane || !v->over_flags_plane ||
5546 !v->block || !v->cbp_base || !v->ttblk_base || !v->is_intra_base || !v->luma_mv_base ||
5548 av_freep(&v->mv_type_mb_plane);
5549 av_freep(&v->direct_mb_plane);
5550 av_freep(&v->acpred_plane);
5551 av_freep(&v->over_flags_plane);
5552 av_freep(&v->block);
5553 av_freep(&v->cbp_base);
5554 av_freep(&v->ttblk_base);
5555 av_freep(&v->is_intra_base);
5556 av_freep(&v->luma_mv_base);
5557 av_freep(&v->mb_type_base);
5558 return AVERROR(ENOMEM);
5564 av_cold void ff_vc1_init_transposed_scantables(VC1Context *v)
5567 for (i = 0; i < 64; i++) {
5568 #define transpose(x) ((x >> 3) | ((x & 7) << 3))
5569 v->zz_8x8[0][i] = transpose(ff_wmv1_scantable[0][i]);
5570 v->zz_8x8[1][i] = transpose(ff_wmv1_scantable[1][i]);
5571 v->zz_8x8[2][i] = transpose(ff_wmv1_scantable[2][i]);
5572 v->zz_8x8[3][i] = transpose(ff_wmv1_scantable[3][i]);
5573 v->zzi_8x8[i] = transpose(ff_vc1_adv_interlaced_8x8_zz[i]);
5579 /** Initialize a VC1/WMV3 decoder
5580 * @todo TODO: Handle VC-1 IDUs (Transport level?)
5581 * @todo TODO: Decypher remaining bits in extra_data
5583 static av_cold int vc1_decode_init(AVCodecContext *avctx)
5585 VC1Context *v = avctx->priv_data;
5586 MpegEncContext *s = &v->s;
5589 /* save the container output size for WMImage */
5590 v->output_width = avctx->width;
5591 v->output_height = avctx->height;
5593 if (!avctx->extradata_size || !avctx->extradata)
5595 if (!(avctx->flags & CODEC_FLAG_GRAY))
5596 avctx->pix_fmt = avctx->get_format(avctx, avctx->codec->pix_fmts);
5598 avctx->pix_fmt = AV_PIX_FMT_GRAY8;
5599 avctx->hwaccel = ff_find_hwaccel(avctx);
5602 if (ff_vc1_init_common(v) < 0)
5604 ff_h264chroma_init(&v->h264chroma, 8);
5605 ff_vc1dsp_init(&v->vc1dsp);
5607 if (avctx->codec_id == AV_CODEC_ID_WMV3 || avctx->codec_id == AV_CODEC_ID_WMV3IMAGE) {
5610 // looks like WMV3 has a sequence header stored in the extradata
5611 // advanced sequence header may be before the first frame
5612 // the last byte of the extradata is a version number, 1 for the
5613 // samples we can decode
5615 init_get_bits(&gb, avctx->extradata, avctx->extradata_size*8);
5617 if (ff_vc1_decode_sequence_header(avctx, v, &gb) < 0)
5620 count = avctx->extradata_size*8 - get_bits_count(&gb);
5622 av_log(avctx, AV_LOG_INFO, "Extra data: %i bits left, value: %X\n",
5623 count, get_bits(&gb, count));
5624 } else if (count < 0) {
5625 av_log(avctx, AV_LOG_INFO, "Read %i bits in overflow\n", -count);
5627 } else { // VC1/WVC1/WVP2
5628 const uint8_t *start = avctx->extradata;
5629 uint8_t *end = avctx->extradata + avctx->extradata_size;
5630 const uint8_t *next;
5631 int size, buf2_size;
5632 uint8_t *buf2 = NULL;
5633 int seq_initialized = 0, ep_initialized = 0;
5635 if (avctx->extradata_size < 16) {
5636 av_log(avctx, AV_LOG_ERROR, "Extradata size too small: %i\n", avctx->extradata_size);
5640 buf2 = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
5641 start = find_next_marker(start, end); // in WVC1 extradata first byte is its size, but can be 0 in mkv
5643 for (; next < end; start = next) {
5644 next = find_next_marker(start + 4, end);
5645 size = next - start - 4;
5648 buf2_size = vc1_unescape_buffer(start + 4, size, buf2);
5649 init_get_bits(&gb, buf2, buf2_size * 8);
5650 switch (AV_RB32(start)) {
5651 case VC1_CODE_SEQHDR:
5652 if (ff_vc1_decode_sequence_header(avctx, v, &gb) < 0) {
5656 seq_initialized = 1;
5658 case VC1_CODE_ENTRYPOINT:
5659 if (ff_vc1_decode_entry_point(avctx, v, &gb) < 0) {
5668 if (!seq_initialized || !ep_initialized) {
5669 av_log(avctx, AV_LOG_ERROR, "Incomplete extradata\n");
5672 v->res_sprite = (avctx->codec_id == AV_CODEC_ID_VC1IMAGE);
5675 v->sprite_output_frame = av_frame_alloc();
5676 if (!v->sprite_output_frame)
5677 return AVERROR(ENOMEM);
5679 avctx->profile = v->profile;
5680 if (v->profile == PROFILE_ADVANCED)
5681 avctx->level = v->level;
5683 avctx->has_b_frames = !!avctx->max_b_frames;
5685 s->mb_width = (avctx->coded_width + 15) >> 4;
5686 s->mb_height = (avctx->coded_height + 15) >> 4;
5688 if (v->profile == PROFILE_ADVANCED || v->res_fasttx) {
5689 ff_vc1_init_transposed_scantables(v);
5691 memcpy(v->zz_8x8, ff_wmv1_scantable, 4*64);
5696 if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5697 v->sprite_width = avctx->coded_width;
5698 v->sprite_height = avctx->coded_height;
5700 avctx->coded_width = avctx->width = v->output_width;
5701 avctx->coded_height = avctx->height = v->output_height;
5703 // prevent 16.16 overflows
5704 if (v->sprite_width > 1 << 14 ||
5705 v->sprite_height > 1 << 14 ||
5706 v->output_width > 1 << 14 ||
5707 v->output_height > 1 << 14) return -1;
5712 /** Close a VC1/WMV3 decoder
5713 * @warning Initial try at using MpegEncContext stuff
5715 av_cold int ff_vc1_decode_end(AVCodecContext *avctx)
5717 VC1Context *v = avctx->priv_data;
5720 av_frame_free(&v->sprite_output_frame);
5722 for (i = 0; i < 4; i++)
5723 av_freep(&v->sr_rows[i >> 1][i & 1]);
5724 av_freep(&v->hrd_rate);
5725 av_freep(&v->hrd_buffer);
5726 ff_MPV_common_end(&v->s);
5727 av_freep(&v->mv_type_mb_plane);
5728 av_freep(&v->direct_mb_plane);
5729 av_freep(&v->forward_mb_plane);
5730 av_freep(&v->fieldtx_plane);
5731 av_freep(&v->acpred_plane);
5732 av_freep(&v->over_flags_plane);
5733 av_freep(&v->mb_type_base);
5734 av_freep(&v->blk_mv_type_base);
5735 av_freep(&v->mv_f_base);
5736 av_freep(&v->mv_f_next_base);
5737 av_freep(&v->block);
5738 av_freep(&v->cbp_base);
5739 av_freep(&v->ttblk_base);
5740 av_freep(&v->is_intra_base); // FIXME use v->mb_type[]
5741 av_freep(&v->luma_mv_base);
5742 ff_intrax8_common_end(&v->x8);
5747 /** Decode a VC1/WMV3 frame
5748 * @todo TODO: Handle VC-1 IDUs (Transport level?)
5750 static int vc1_decode_frame(AVCodecContext *avctx, void *data,
5751 int *got_frame, AVPacket *avpkt)
5753 const uint8_t *buf = avpkt->data;
5754 int buf_size = avpkt->size, n_slices = 0, i, ret;
5755 VC1Context *v = avctx->priv_data;
5756 MpegEncContext *s = &v->s;
5757 AVFrame *pict = data;
5758 uint8_t *buf2 = NULL;
5759 const uint8_t *buf_start = buf;
5760 int mb_height, n_slices1;
5765 } *slices = NULL, *tmp;
5767 /* no supplementary picture */
5768 if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == VC1_CODE_ENDOFSEQ)) {
5769 /* special case for last picture */
5770 if (s->low_delay == 0 && s->next_picture_ptr) {
5771 if ((ret = av_frame_ref(pict, &s->next_picture_ptr->f)) < 0)
5773 s->next_picture_ptr = NULL;
5781 //for advanced profile we may need to parse and unescape data
5782 if (avctx->codec_id == AV_CODEC_ID_VC1 || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5784 buf2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5786 if (IS_MARKER(AV_RB32(buf))) { /* frame starts with marker and needs to be parsed */
5787 const uint8_t *start, *end, *next;
5791 for (start = buf, end = buf + buf_size; next < end; start = next) {
5792 next = find_next_marker(start + 4, end);
5793 size = next - start - 4;
5794 if (size <= 0) continue;
5795 switch (AV_RB32(start)) {
5796 case VC1_CODE_FRAME:
5799 buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
5801 case VC1_CODE_FIELD: {
5803 tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5807 slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5808 if (!slices[n_slices].buf)
5810 buf_size3 = vc1_unescape_buffer(start + 4, size,
5811 slices[n_slices].buf);
5812 init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5814 /* assuming that the field marker is at the exact middle,
5815 hope it's correct */
5816 slices[n_slices].mby_start = s->mb_height >> 1;
5817 n_slices1 = n_slices - 1; // index of the last slice of the first field
5821 case VC1_CODE_ENTRYPOINT: /* it should be before frame data */
5822 buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
5823 init_get_bits(&s->gb, buf2, buf_size2 * 8);
5824 ff_vc1_decode_entry_point(avctx, v, &s->gb);
5826 case VC1_CODE_SLICE: {
5828 tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5832 slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5833 if (!slices[n_slices].buf)
5835 buf_size3 = vc1_unescape_buffer(start + 4, size,
5836 slices[n_slices].buf);
5837 init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5839 slices[n_slices].mby_start = get_bits(&slices[n_slices].gb, 9);
5845 } else if (v->interlace && ((buf[0] & 0xC0) == 0xC0)) { /* WVC1 interlaced stores both fields divided by marker */
5846 const uint8_t *divider;
5849 divider = find_next_marker(buf, buf + buf_size);
5850 if ((divider == (buf + buf_size)) || AV_RB32(divider) != VC1_CODE_FIELD) {
5851 av_log(avctx, AV_LOG_ERROR, "Error in WVC1 interlaced frame\n");
5853 } else { // found field marker, unescape second field
5854 tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5858 slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5859 if (!slices[n_slices].buf)
5861 buf_size3 = vc1_unescape_buffer(divider + 4, buf + buf_size - divider - 4, slices[n_slices].buf);
5862 init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5864 slices[n_slices].mby_start = s->mb_height >> 1;
5865 n_slices1 = n_slices - 1;
5868 buf_size2 = vc1_unescape_buffer(buf, divider - buf, buf2);
5870 buf_size2 = vc1_unescape_buffer(buf, buf_size, buf2);
5872 init_get_bits(&s->gb, buf2, buf_size2*8);
5874 init_get_bits(&s->gb, buf, buf_size*8);
5876 if (v->res_sprite) {
5877 v->new_sprite = !get_bits1(&s->gb);
5878 v->two_sprites = get_bits1(&s->gb);
5879 /* res_sprite means a Windows Media Image stream, AV_CODEC_ID_*IMAGE means
5880 we're using the sprite compositor. These are intentionally kept separate
5881 so you can get the raw sprites by using the wmv3 decoder for WMVP or
5882 the vc1 one for WVP2 */
5883 if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5884 if (v->new_sprite) {
5885 // switch AVCodecContext parameters to those of the sprites
5886 avctx->width = avctx->coded_width = v->sprite_width;
5887 avctx->height = avctx->coded_height = v->sprite_height;
5894 if (s->context_initialized &&
5895 (s->width != avctx->coded_width ||
5896 s->height != avctx->coded_height)) {
5897 ff_vc1_decode_end(avctx);
5900 if (!s->context_initialized) {
5901 if (ff_msmpeg4_decode_init(avctx) < 0)
5903 if (ff_vc1_decode_init_alloc_tables(v) < 0) {
5904 ff_MPV_common_end(s);
5908 s->low_delay = !avctx->has_b_frames || v->res_sprite;
5910 if (v->profile == PROFILE_ADVANCED) {
5911 s->h_edge_pos = avctx->coded_width;
5912 s->v_edge_pos = avctx->coded_height;
5916 // do parse frame header
5917 v->pic_header_flag = 0;
5918 v->first_pic_header_flag = 1;
5919 if (v->profile < PROFILE_ADVANCED) {
5920 if (ff_vc1_parse_frame_header(v, &s->gb) < 0) {
5924 if (ff_vc1_parse_frame_header_adv(v, &s->gb) < 0) {
5928 v->first_pic_header_flag = 0;
5930 if ((avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE)
5931 && s->pict_type != AV_PICTURE_TYPE_I) {
5932 av_log(v->s.avctx, AV_LOG_ERROR, "Sprite decoder: expected I-frame\n");
5936 // for skipping the frame
5937 s->current_picture.f.pict_type = s->pict_type;
5938 s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
5940 /* skip B-frames if we don't have reference frames */
5941 if (s->last_picture_ptr == NULL && (s->pict_type == AV_PICTURE_TYPE_B || s->droppable)) {
5944 if ((avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B) ||
5945 (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I) ||
5946 avctx->skip_frame >= AVDISCARD_ALL) {
5950 if (s->next_p_frame_damaged) {
5951 if (s->pict_type == AV_PICTURE_TYPE_B)
5954 s->next_p_frame_damaged = 0;
5957 if (ff_MPV_frame_start(s, avctx) < 0) {
5961 // process pulldown flags
5962 s->current_picture_ptr->f.repeat_pict = 0;
5963 // Pulldown flags are only valid when 'broadcast' has been set.
5964 // So ticks_per_frame will be 2
5967 s->current_picture_ptr->f.repeat_pict = 1;
5968 } else if (v->rptfrm) {
5970 s->current_picture_ptr->f.repeat_pict = v->rptfrm * 2;
5973 s->me.qpel_put = s->dsp.put_qpel_pixels_tab;
5974 s->me.qpel_avg = s->dsp.avg_qpel_pixels_tab;
5976 if (avctx->hwaccel) {
5977 if (avctx->hwaccel->start_frame(avctx, buf, buf_size) < 0)
5979 if (avctx->hwaccel->decode_slice(avctx, buf_start, (buf + buf_size) - buf_start) < 0)
5981 if (avctx->hwaccel->end_frame(avctx) < 0)
5986 ff_mpeg_er_frame_start(s);
5988 v->bits = buf_size * 8;
5989 v->end_mb_x = s->mb_width;
5990 if (v->field_mode) {
5991 s->current_picture.f.linesize[0] <<= 1;
5992 s->current_picture.f.linesize[1] <<= 1;
5993 s->current_picture.f.linesize[2] <<= 1;
5995 s->uvlinesize <<= 1;
5997 mb_height = s->mb_height >> v->field_mode;
6000 av_log(v->s.avctx, AV_LOG_ERROR, "Invalid mb_height.\n");
6004 for (i = 0; i <= n_slices; i++) {
6005 if (i > 0 && slices[i - 1].mby_start >= mb_height) {
6006 if (v->field_mode <= 0) {
6007 av_log(v->s.avctx, AV_LOG_ERROR, "Slice %d starts beyond "
6008 "picture boundary (%d >= %d)\n", i,
6009 slices[i - 1].mby_start, mb_height);
6012 v->second_field = 1;
6013 v->blocks_off = s->mb_width * s->mb_height << 1;
6014 v->mb_off = s->mb_stride * s->mb_height >> 1;
6016 v->second_field = 0;
6021 v->pic_header_flag = 0;
6022 if (v->field_mode && i == n_slices1 + 2) {
6023 if ((header_ret = ff_vc1_parse_frame_header_adv(v, &s->gb)) < 0) {
6024 av_log(v->s.avctx, AV_LOG_ERROR, "Field header damaged\n");
6025 if (avctx->err_recognition & AV_EF_EXPLODE)
6029 } else if (get_bits1(&s->gb)) {
6030 v->pic_header_flag = 1;
6031 if ((header_ret = ff_vc1_parse_frame_header_adv(v, &s->gb)) < 0) {
6032 av_log(v->s.avctx, AV_LOG_ERROR, "Slice header damaged\n");
6033 if (avctx->err_recognition & AV_EF_EXPLODE)
6041 s->start_mb_y = (i == 0) ? 0 : FFMAX(0, slices[i-1].mby_start % mb_height);
6042 if (!v->field_mode || v->second_field)
6043 s->end_mb_y = (i == n_slices ) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
6045 s->end_mb_y = (i <= n_slices1 + 1) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
6046 ff_vc1_decode_blocks(v);
6048 s->gb = slices[i].gb;
6050 if (v->field_mode) {
6051 v->second_field = 0;
6052 s->current_picture.f.linesize[0] >>= 1;
6053 s->current_picture.f.linesize[1] >>= 1;
6054 s->current_picture.f.linesize[2] >>= 1;
6056 s->uvlinesize >>= 1;
6057 if (v->s.pict_type != AV_PICTURE_TYPE_BI && v->s.pict_type != AV_PICTURE_TYPE_B) {
6058 FFSWAP(uint8_t *, v->mv_f_next[0], v->mv_f[0]);
6059 FFSWAP(uint8_t *, v->mv_f_next[1], v->mv_f[1]);
6062 av_dlog(s->avctx, "Consumed %i/%i bits\n",
6063 get_bits_count(&s->gb), s->gb.size_in_bits);
6064 // if (get_bits_count(&s->gb) > buf_size * 8)
6067 ff_er_frame_end(&s->er);
6070 ff_MPV_frame_end(s);
6072 if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
6074 avctx->width = avctx->coded_width = v->output_width;
6075 avctx->height = avctx->coded_height = v->output_height;
6076 if (avctx->skip_frame >= AVDISCARD_NONREF)
6078 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
6079 if (vc1_decode_sprites(v, &s->gb))
6082 if ((ret = av_frame_ref(pict, v->sprite_output_frame)) < 0)
6086 if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
6087 if ((ret = av_frame_ref(pict, &s->current_picture_ptr->f)) < 0)
6089 ff_print_debug_info(s, s->current_picture_ptr);
6091 } else if (s->last_picture_ptr != NULL) {
6092 if ((ret = av_frame_ref(pict, &s->last_picture_ptr->f)) < 0)
6094 ff_print_debug_info(s, s->last_picture_ptr);
6101 for (i = 0; i < n_slices; i++)
6102 av_free(slices[i].buf);
6108 for (i = 0; i < n_slices; i++)
6109 av_free(slices[i].buf);
6115 static const AVProfile profiles[] = {
6116 { FF_PROFILE_VC1_SIMPLE, "Simple" },
6117 { FF_PROFILE_VC1_MAIN, "Main" },
6118 { FF_PROFILE_VC1_COMPLEX, "Complex" },
6119 { FF_PROFILE_VC1_ADVANCED, "Advanced" },
6120 { FF_PROFILE_UNKNOWN },
6123 static const enum AVPixelFormat vc1_hwaccel_pixfmt_list_420[] = {
6125 AV_PIX_FMT_DXVA2_VLD,
6128 AV_PIX_FMT_VAAPI_VLD,
6137 AVCodec ff_vc1_decoder = {
6139 .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1"),
6140 .type = AVMEDIA_TYPE_VIDEO,
6141 .id = AV_CODEC_ID_VC1,
6142 .priv_data_size = sizeof(VC1Context),
6143 .init = vc1_decode_init,
6144 .close = ff_vc1_decode_end,
6145 .decode = vc1_decode_frame,
6146 .flush = ff_mpeg_flush,
6147 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
6148 .pix_fmts = vc1_hwaccel_pixfmt_list_420,
6149 .profiles = NULL_IF_CONFIG_SMALL(profiles)
6152 #if CONFIG_WMV3_DECODER
6153 AVCodec ff_wmv3_decoder = {
6155 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9"),
6156 .type = AVMEDIA_TYPE_VIDEO,
6157 .id = AV_CODEC_ID_WMV3,
6158 .priv_data_size = sizeof(VC1Context),
6159 .init = vc1_decode_init,
6160 .close = ff_vc1_decode_end,
6161 .decode = vc1_decode_frame,
6162 .flush = ff_mpeg_flush,
6163 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
6164 .pix_fmts = vc1_hwaccel_pixfmt_list_420,
6165 .profiles = NULL_IF_CONFIG_SMALL(profiles)
6169 #if CONFIG_WMV3IMAGE_DECODER
6170 AVCodec ff_wmv3image_decoder = {
6171 .name = "wmv3image",
6172 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image"),
6173 .type = AVMEDIA_TYPE_VIDEO,
6174 .id = AV_CODEC_ID_WMV3IMAGE,
6175 .priv_data_size = sizeof(VC1Context),
6176 .init = vc1_decode_init,
6177 .close = ff_vc1_decode_end,
6178 .decode = vc1_decode_frame,
6179 .capabilities = CODEC_CAP_DR1,
6180 .flush = vc1_sprite_flush,
6181 .pix_fmts = ff_pixfmt_list_420
6185 #if CONFIG_VC1IMAGE_DECODER
6186 AVCodec ff_vc1image_decoder = {
6188 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image v2"),
6189 .type = AVMEDIA_TYPE_VIDEO,
6190 .id = AV_CODEC_ID_VC1IMAGE,
6191 .priv_data_size = sizeof(VC1Context),
6192 .init = vc1_decode_init,
6193 .close = ff_vc1_decode_end,
6194 .decode = vc1_decode_frame,
6195 .capabilities = CODEC_CAP_DR1,
6196 .flush = vc1_sprite_flush,
6197 .pix_fmts = ff_pixfmt_list_420