2 * VC-1 and WMV3 decoder
3 * Copyright (c) 2011 Mashiat Sarker Shakkhar
4 * Copyright (c) 2006-2007 Konstantin Shishkov
5 * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
7 * This file is part of Libav.
9 * Libav is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * Libav is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with Libav; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * VC-1 and WMV3 decoder
31 #include "error_resilience.h"
33 #include "mpegutils.h"
34 #include "mpegvideo.h"
36 #include "h264chroma.h"
40 #include "vc1acdata.h"
41 #include "msmpeg4data.h"
48 #define MB_INTRA_VLC_BITS 9
52 // offset tables for interlaced picture MVDATA decoding
53 static const int offset_table1[9] = { 0, 1, 2, 4, 8, 16, 32, 64, 128 };
54 static const int offset_table2[9] = { 0, 1, 3, 7, 15, 31, 63, 127, 255 };
56 /***********************************************************************/
58 * @name VC-1 Bitplane decoding
76 /** @} */ //imode defines
78 static void init_block_index(VC1Context *v)
80 MpegEncContext *s = &v->s;
81 ff_init_block_index(s);
82 if (v->field_mode && !(v->second_field ^ v->tff)) {
83 s->dest[0] += s->current_picture_ptr->f->linesize[0];
84 s->dest[1] += s->current_picture_ptr->f->linesize[1];
85 s->dest[2] += s->current_picture_ptr->f->linesize[2];
89 /** @} */ //Bitplane group
91 static void vc1_put_signed_blocks_clamped(VC1Context *v)
93 MpegEncContext *s = &v->s;
94 int topleft_mb_pos, top_mb_pos;
95 int stride_y, fieldtx = 0;
98 /* The put pixels loop is always one MB row behind the decoding loop,
99 * because we can only put pixels when overlap filtering is done, and
100 * for filtering of the bottom edge of a MB, we need the next MB row
102 * Within the row, the put pixels loop is also one MB col behind the
103 * decoding loop. The reason for this is again, because for filtering
104 * of the right MB edge, we need the next MB present. */
105 if (!s->first_slice_line) {
107 topleft_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x - 1;
108 if (v->fcm == ILACE_FRAME)
109 fieldtx = v->fieldtx_plane[topleft_mb_pos];
110 stride_y = s->linesize << fieldtx;
111 v_dist = (16 - fieldtx) >> (fieldtx == 0);
112 s->idsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][0],
113 s->dest[0] - 16 * s->linesize - 16,
115 s->idsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][1],
116 s->dest[0] - 16 * s->linesize - 8,
118 s->idsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][2],
119 s->dest[0] - v_dist * s->linesize - 16,
121 s->idsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][3],
122 s->dest[0] - v_dist * s->linesize - 8,
124 s->idsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][4],
125 s->dest[1] - 8 * s->uvlinesize - 8,
127 s->idsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][5],
128 s->dest[2] - 8 * s->uvlinesize - 8,
131 if (s->mb_x == s->mb_width - 1) {
132 top_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x;
133 if (v->fcm == ILACE_FRAME)
134 fieldtx = v->fieldtx_plane[top_mb_pos];
135 stride_y = s->linesize << fieldtx;
136 v_dist = fieldtx ? 15 : 8;
137 s->idsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][0],
138 s->dest[0] - 16 * s->linesize,
140 s->idsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][1],
141 s->dest[0] - 16 * s->linesize + 8,
143 s->idsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][2],
144 s->dest[0] - v_dist * s->linesize,
146 s->idsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][3],
147 s->dest[0] - v_dist * s->linesize + 8,
149 s->idsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][4],
150 s->dest[1] - 8 * s->uvlinesize,
152 s->idsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][5],
153 s->dest[2] - 8 * s->uvlinesize,
158 #define inc_blk_idx(idx) do { \
160 if (idx >= v->n_allocated_blks) \
164 inc_blk_idx(v->topleft_blk_idx);
165 inc_blk_idx(v->top_blk_idx);
166 inc_blk_idx(v->left_blk_idx);
167 inc_blk_idx(v->cur_blk_idx);
170 static void vc1_loop_filter_iblk(VC1Context *v, int pq)
172 MpegEncContext *s = &v->s;
174 if (!s->first_slice_line) {
175 v->vc1dsp.vc1_v_loop_filter16(s->dest[0], s->linesize, pq);
177 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
178 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
179 for (j = 0; j < 2; j++) {
180 v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1], s->uvlinesize, pq);
182 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
185 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] + 8 * s->linesize, s->linesize, pq);
187 if (s->mb_y == s->end_mb_y - 1) {
189 v->vc1dsp.vc1_h_loop_filter16(s->dest[0], s->linesize, pq);
190 v->vc1dsp.vc1_h_loop_filter8(s->dest[1], s->uvlinesize, pq);
191 v->vc1dsp.vc1_h_loop_filter8(s->dest[2], s->uvlinesize, pq);
193 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] + 8, s->linesize, pq);
197 static void vc1_loop_filter_iblk_delayed(VC1Context *v, int pq)
199 MpegEncContext *s = &v->s;
202 /* The loopfilter runs 1 row and 1 column behind the overlap filter, which
203 * means it runs two rows/cols behind the decoding loop. */
204 if (!s->first_slice_line) {
206 if (s->mb_y >= s->start_mb_y + 2) {
207 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
210 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 16, s->linesize, pq);
211 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 8, s->linesize, pq);
212 for (j = 0; j < 2; j++) {
213 v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
215 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize - 8, s->uvlinesize, pq);
219 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize - 16, s->linesize, pq);
222 if (s->mb_x == s->mb_width - 1) {
223 if (s->mb_y >= s->start_mb_y + 2) {
224 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
227 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize, s->linesize, pq);
228 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize + 8, s->linesize, pq);
229 for (j = 0; j < 2; j++) {
230 v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
232 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize, s->uvlinesize, pq);
236 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize, s->linesize, pq);
239 if (s->mb_y == s->end_mb_y) {
242 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
243 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 8, s->linesize, pq);
245 for (j = 0; j < 2; j++) {
246 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
251 if (s->mb_x == s->mb_width - 1) {
253 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
254 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
256 for (j = 0; j < 2; j++) {
257 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
265 static void vc1_smooth_overlap_filter_iblk(VC1Context *v)
267 MpegEncContext *s = &v->s;
270 if (v->condover == CONDOVER_NONE)
273 mb_pos = s->mb_x + s->mb_y * s->mb_stride;
275 /* Within a MB, the horizontal overlap always runs before the vertical.
276 * To accomplish that, we run the H on left and internal borders of the
277 * currently decoded MB. Then, we wait for the next overlap iteration
278 * to do H overlap on the right edge of this MB, before moving over and
279 * running the V overlap. Therefore, the V overlap makes us trail by one
280 * MB col and the H overlap filter makes us trail by one MB row. This
281 * is reflected in the time at which we run the put_pixels loop. */
282 if (v->condover == CONDOVER_ALL || v->pq >= 9 || v->over_flags_plane[mb_pos]) {
283 if (s->mb_x && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
284 v->over_flags_plane[mb_pos - 1])) {
285 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][1],
286 v->block[v->cur_blk_idx][0]);
287 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][3],
288 v->block[v->cur_blk_idx][2]);
289 if (!(s->flags & CODEC_FLAG_GRAY)) {
290 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][4],
291 v->block[v->cur_blk_idx][4]);
292 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][5],
293 v->block[v->cur_blk_idx][5]);
296 v->vc1dsp.vc1_h_s_overlap(v->block[v->cur_blk_idx][0],
297 v->block[v->cur_blk_idx][1]);
298 v->vc1dsp.vc1_h_s_overlap(v->block[v->cur_blk_idx][2],
299 v->block[v->cur_blk_idx][3]);
301 if (s->mb_x == s->mb_width - 1) {
302 if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
303 v->over_flags_plane[mb_pos - s->mb_stride])) {
304 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][2],
305 v->block[v->cur_blk_idx][0]);
306 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][3],
307 v->block[v->cur_blk_idx][1]);
308 if (!(s->flags & CODEC_FLAG_GRAY)) {
309 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][4],
310 v->block[v->cur_blk_idx][4]);
311 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][5],
312 v->block[v->cur_blk_idx][5]);
315 v->vc1dsp.vc1_v_s_overlap(v->block[v->cur_blk_idx][0],
316 v->block[v->cur_blk_idx][2]);
317 v->vc1dsp.vc1_v_s_overlap(v->block[v->cur_blk_idx][1],
318 v->block[v->cur_blk_idx][3]);
321 if (s->mb_x && (v->condover == CONDOVER_ALL || v->over_flags_plane[mb_pos - 1])) {
322 if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
323 v->over_flags_plane[mb_pos - s->mb_stride - 1])) {
324 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][2],
325 v->block[v->left_blk_idx][0]);
326 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][3],
327 v->block[v->left_blk_idx][1]);
328 if (!(s->flags & CODEC_FLAG_GRAY)) {
329 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][4],
330 v->block[v->left_blk_idx][4]);
331 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][5],
332 v->block[v->left_blk_idx][5]);
335 v->vc1dsp.vc1_v_s_overlap(v->block[v->left_blk_idx][0],
336 v->block[v->left_blk_idx][2]);
337 v->vc1dsp.vc1_v_s_overlap(v->block[v->left_blk_idx][1],
338 v->block[v->left_blk_idx][3]);
342 /** Do motion compensation over 1 macroblock
343 * Mostly adapted hpel_motion and qpel_motion from mpegvideo.c
345 static void vc1_mc_1mv(VC1Context *v, int dir)
347 MpegEncContext *s = &v->s;
348 H264ChromaContext *h264chroma = &v->h264chroma;
349 uint8_t *srcY, *srcU, *srcV;
350 int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
351 int v_edge_pos = s->v_edge_pos >> v->field_mode;
353 uint8_t (*luty)[256], (*lutuv)[256];
356 if ((!v->field_mode ||
357 (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
358 !v->s.last_picture.f->data[0])
361 mx = s->mv[dir][0][0];
362 my = s->mv[dir][0][1];
364 // store motion vectors for further use in B frames
365 if (s->pict_type == AV_PICTURE_TYPE_P) {
366 for (i = 0; i < 4; i++) {
367 s->current_picture.motion_val[1][s->block_index[i] + v->blocks_off][0] = mx;
368 s->current_picture.motion_val[1][s->block_index[i] + v->blocks_off][1] = my;
372 uvmx = (mx + ((mx & 3) == 3)) >> 1;
373 uvmy = (my + ((my & 3) == 3)) >> 1;
374 v->luma_mv[s->mb_x][0] = uvmx;
375 v->luma_mv[s->mb_x][1] = uvmy;
378 v->cur_field_type != v->ref_field_type[dir]) {
379 my = my - 2 + 4 * v->cur_field_type;
380 uvmy = uvmy - 2 + 4 * v->cur_field_type;
383 // fastuvmc shall be ignored for interlaced frame picture
384 if (v->fastuvmc && (v->fcm != ILACE_FRAME)) {
385 uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
386 uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
389 if (v->field_mode && (v->cur_field_type != v->ref_field_type[dir]) && v->second_field) {
390 srcY = s->current_picture.f->data[0];
391 srcU = s->current_picture.f->data[1];
392 srcV = s->current_picture.f->data[2];
394 lutuv = v->curr_lutuv;
395 use_ic = v->curr_use_ic;
397 srcY = s->last_picture.f->data[0];
398 srcU = s->last_picture.f->data[1];
399 srcV = s->last_picture.f->data[2];
401 lutuv = v->last_lutuv;
402 use_ic = v->last_use_ic;
405 srcY = s->next_picture.f->data[0];
406 srcU = s->next_picture.f->data[1];
407 srcV = s->next_picture.f->data[2];
409 lutuv = v->next_lutuv;
410 use_ic = v->next_use_ic;
413 if (!srcY || !srcU) {
414 av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
418 src_x = s->mb_x * 16 + (mx >> 2);
419 src_y = s->mb_y * 16 + (my >> 2);
420 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
421 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
423 if (v->profile != PROFILE_ADVANCED) {
424 src_x = av_clip( src_x, -16, s->mb_width * 16);
425 src_y = av_clip( src_y, -16, s->mb_height * 16);
426 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
427 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
429 src_x = av_clip( src_x, -17, s->avctx->coded_width);
430 src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
431 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
432 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
435 srcY += src_y * s->linesize + src_x;
436 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
437 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
439 if (v->field_mode && v->ref_field_type[dir]) {
440 srcY += s->current_picture_ptr->f->linesize[0];
441 srcU += s->current_picture_ptr->f->linesize[1];
442 srcV += s->current_picture_ptr->f->linesize[2];
445 /* for grayscale we should not try to read from unknown area */
446 if (s->flags & CODEC_FLAG_GRAY) {
447 srcU = s->edge_emu_buffer + 18 * s->linesize;
448 srcV = s->edge_emu_buffer + 18 * s->linesize;
451 if (v->rangeredfrm || use_ic
452 || s->h_edge_pos < 22 || v_edge_pos < 22
453 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel * 3
454 || (unsigned)(src_y - 1) > v_edge_pos - (my&3) - 16 - 3) {
455 uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
457 srcY -= s->mspel * (1 + s->linesize);
458 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcY,
459 s->linesize, s->linesize,
460 17 + s->mspel * 2, 17 + s->mspel * 2,
461 src_x - s->mspel, src_y - s->mspel,
462 s->h_edge_pos, v_edge_pos);
463 srcY = s->edge_emu_buffer;
464 s->vdsp.emulated_edge_mc(uvbuf, srcU,
465 s->uvlinesize, s->uvlinesize,
467 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
468 s->vdsp.emulated_edge_mc(uvbuf + 16, srcV,
469 s->uvlinesize, s->uvlinesize,
471 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
474 /* if we deal with range reduction we need to scale source blocks */
475 if (v->rangeredfrm) {
480 for (j = 0; j < 17 + s->mspel * 2; j++) {
481 for (i = 0; i < 17 + s->mspel * 2; i++)
482 src[i] = ((src[i] - 128) >> 1) + 128;
487 for (j = 0; j < 9; j++) {
488 for (i = 0; i < 9; i++) {
489 src[i] = ((src[i] - 128) >> 1) + 128;
490 src2[i] = ((src2[i] - 128) >> 1) + 128;
492 src += s->uvlinesize;
493 src2 += s->uvlinesize;
496 /* if we deal with intensity compensation we need to scale source blocks */
502 for (j = 0; j < 17 + s->mspel * 2; j++) {
503 int f = v->field_mode ? v->ref_field_type[dir] : ((j + src_y - s->mspel) & 1) ;
504 for (i = 0; i < 17 + s->mspel * 2; i++)
505 src[i] = luty[f][src[i]];
510 for (j = 0; j < 9; j++) {
511 int f = v->field_mode ? v->ref_field_type[dir] : ((j + uvsrc_y) & 1);
512 for (i = 0; i < 9; i++) {
513 src[i] = lutuv[f][src[i]];
514 src2[i] = lutuv[f][src2[i]];
516 src += s->uvlinesize;
517 src2 += s->uvlinesize;
520 srcY += s->mspel * (1 + s->linesize);
524 dxy = ((my & 3) << 2) | (mx & 3);
525 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] , srcY , s->linesize, v->rnd);
526 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8, srcY + 8, s->linesize, v->rnd);
527 srcY += s->linesize * 8;
528 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize , srcY , s->linesize, v->rnd);
529 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
530 } else { // hpel mc - always used for luma
531 dxy = (my & 2) | ((mx & 2) >> 1);
533 s->hdsp.put_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
535 s->hdsp.put_no_rnd_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
538 if (s->flags & CODEC_FLAG_GRAY) return;
539 /* Chroma MC always uses qpel bilinear */
540 uvmx = (uvmx & 3) << 1;
541 uvmy = (uvmy & 3) << 1;
543 h264chroma->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
544 h264chroma->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
546 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
547 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
551 static inline int median4(int a, int b, int c, int d)
554 if (c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2;
555 else return (FFMIN(b, c) + FFMAX(a, d)) / 2;
557 if (c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2;
558 else return (FFMIN(a, c) + FFMAX(b, d)) / 2;
562 /** Do motion compensation for 4-MV macroblock - luminance block
564 static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir, int avg)
566 MpegEncContext *s = &v->s;
568 int dxy, mx, my, src_x, src_y;
570 int fieldmv = (v->fcm == ILACE_FRAME) ? v->blk_mv_type[s->block_index[n]] : 0;
571 int v_edge_pos = s->v_edge_pos >> v->field_mode;
572 uint8_t (*luty)[256];
575 if ((!v->field_mode ||
576 (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
577 !v->s.last_picture.f->data[0])
580 mx = s->mv[dir][n][0];
581 my = s->mv[dir][n][1];
584 if (v->field_mode && (v->cur_field_type != v->ref_field_type[dir]) && v->second_field) {
585 srcY = s->current_picture.f->data[0];
587 use_ic = v->curr_use_ic;
589 srcY = s->last_picture.f->data[0];
591 use_ic = v->last_use_ic;
594 srcY = s->next_picture.f->data[0];
596 use_ic = v->next_use_ic;
600 av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
605 if (v->cur_field_type != v->ref_field_type[dir])
606 my = my - 2 + 4 * v->cur_field_type;
609 if (s->pict_type == AV_PICTURE_TYPE_P && n == 3 && v->field_mode) {
610 int same_count = 0, opp_count = 0, k;
611 int chosen_mv[2][4][2], f;
613 for (k = 0; k < 4; k++) {
614 f = v->mv_f[0][s->block_index[k] + v->blocks_off];
615 chosen_mv[f][f ? opp_count : same_count][0] = s->mv[0][k][0];
616 chosen_mv[f][f ? opp_count : same_count][1] = s->mv[0][k][1];
620 f = opp_count > same_count;
621 switch (f ? opp_count : same_count) {
623 tx = median4(chosen_mv[f][0][0], chosen_mv[f][1][0],
624 chosen_mv[f][2][0], chosen_mv[f][3][0]);
625 ty = median4(chosen_mv[f][0][1], chosen_mv[f][1][1],
626 chosen_mv[f][2][1], chosen_mv[f][3][1]);
629 tx = mid_pred(chosen_mv[f][0][0], chosen_mv[f][1][0], chosen_mv[f][2][0]);
630 ty = mid_pred(chosen_mv[f][0][1], chosen_mv[f][1][1], chosen_mv[f][2][1]);
633 tx = (chosen_mv[f][0][0] + chosen_mv[f][1][0]) / 2;
634 ty = (chosen_mv[f][0][1] + chosen_mv[f][1][1]) / 2;
637 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
638 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
639 for (k = 0; k < 4; k++)
640 v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
643 if (v->fcm == ILACE_FRAME) { // not sure if needed for other types of picture
645 int width = s->avctx->coded_width;
646 int height = s->avctx->coded_height >> 1;
647 if (s->pict_type == AV_PICTURE_TYPE_P) {
648 s->current_picture.motion_val[1][s->block_index[n] + v->blocks_off][0] = mx;
649 s->current_picture.motion_val[1][s->block_index[n] + v->blocks_off][1] = my;
651 qx = (s->mb_x * 16) + (mx >> 2);
652 qy = (s->mb_y * 8) + (my >> 3);
657 mx -= 4 * (qx - width);
660 else if (qy > height + 1)
661 my -= 8 * (qy - height - 1);
664 if ((v->fcm == ILACE_FRAME) && fieldmv)
665 off = ((n > 1) ? s->linesize : 0) + (n & 1) * 8;
667 off = s->linesize * 4 * (n & 2) + (n & 1) * 8;
669 src_x = s->mb_x * 16 + (n & 1) * 8 + (mx >> 2);
671 src_y = s->mb_y * 16 + (n & 2) * 4 + (my >> 2);
673 src_y = s->mb_y * 16 + ((n > 1) ? 1 : 0) + (my >> 2);
675 if (v->profile != PROFILE_ADVANCED) {
676 src_x = av_clip(src_x, -16, s->mb_width * 16);
677 src_y = av_clip(src_y, -16, s->mb_height * 16);
679 src_x = av_clip(src_x, -17, s->avctx->coded_width);
680 if (v->fcm == ILACE_FRAME) {
682 src_y = av_clip(src_y, -17, s->avctx->coded_height + 1);
684 src_y = av_clip(src_y, -18, s->avctx->coded_height);
686 src_y = av_clip(src_y, -18, s->avctx->coded_height + 1);
690 srcY += src_y * s->linesize + src_x;
691 if (v->field_mode && v->ref_field_type[dir])
692 srcY += s->current_picture_ptr->f->linesize[0];
694 if (fieldmv && !(src_y & 1))
696 if (fieldmv && (src_y & 1) && src_y < 4)
698 if (v->rangeredfrm || use_ic
699 || s->h_edge_pos < 13 || v_edge_pos < 23
700 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx & 3) - 8 - s->mspel * 2
701 || (unsigned)(src_y - (s->mspel << fieldmv)) > v_edge_pos - (my & 3) - ((8 + s->mspel * 2) << fieldmv)) {
702 srcY -= s->mspel * (1 + (s->linesize << fieldmv));
703 /* check emulate edge stride and offset */
704 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcY,
705 s->linesize, s->linesize,
706 9 + s->mspel * 2, (9 + s->mspel * 2) << fieldmv,
707 src_x - s->mspel, src_y - (s->mspel << fieldmv),
708 s->h_edge_pos, v_edge_pos);
709 srcY = s->edge_emu_buffer;
710 /* if we deal with range reduction we need to scale source blocks */
711 if (v->rangeredfrm) {
716 for (j = 0; j < 9 + s->mspel * 2; j++) {
717 for (i = 0; i < 9 + s->mspel * 2; i++)
718 src[i] = ((src[i] - 128) >> 1) + 128;
719 src += s->linesize << fieldmv;
722 /* if we deal with intensity compensation we need to scale source blocks */
728 for (j = 0; j < 9 + s->mspel * 2; j++) {
729 int f = v->field_mode ? v->ref_field_type[dir] : (((j<<fieldmv)+src_y - (s->mspel << fieldmv)) & 1);
730 for (i = 0; i < 9 + s->mspel * 2; i++)
731 src[i] = luty[f][src[i]];
732 src += s->linesize << fieldmv;
735 srcY += s->mspel * (1 + (s->linesize << fieldmv));
739 dxy = ((my & 3) << 2) | (mx & 3);
741 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize << fieldmv, v->rnd);
743 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize << fieldmv, v->rnd);
744 } else { // hpel mc - always used for luma
745 dxy = (my & 2) | ((mx & 2) >> 1);
747 s->hdsp.put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
749 s->hdsp.put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
753 static av_always_inline int get_chroma_mv(int *mvx, int *mvy, int *a, int flag, int *tx, int *ty)
756 static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
758 idx = ((a[3] != flag) << 3)
759 | ((a[2] != flag) << 2)
760 | ((a[1] != flag) << 1)
763 *tx = median4(mvx[0], mvx[1], mvx[2], mvx[3]);
764 *ty = median4(mvy[0], mvy[1], mvy[2], mvy[3]);
766 } else if (count[idx] == 1) {
769 *tx = mid_pred(mvx[1], mvx[2], mvx[3]);
770 *ty = mid_pred(mvy[1], mvy[2], mvy[3]);
773 *tx = mid_pred(mvx[0], mvx[2], mvx[3]);
774 *ty = mid_pred(mvy[0], mvy[2], mvy[3]);
777 *tx = mid_pred(mvx[0], mvx[1], mvx[3]);
778 *ty = mid_pred(mvy[0], mvy[1], mvy[3]);
781 *tx = mid_pred(mvx[0], mvx[1], mvx[2]);
782 *ty = mid_pred(mvy[0], mvy[1], mvy[2]);
785 } else if (count[idx] == 2) {
787 for (i = 0; i < 3; i++)
792 for (i = t1 + 1; i < 4; i++)
797 *tx = (mvx[t1] + mvx[t2]) / 2;
798 *ty = (mvy[t1] + mvy[t2]) / 2;
806 /** Do motion compensation for 4-MV macroblock - both chroma blocks
808 static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
810 MpegEncContext *s = &v->s;
811 H264ChromaContext *h264chroma = &v->h264chroma;
812 uint8_t *srcU, *srcV;
813 int uvmx, uvmy, uvsrc_x, uvsrc_y;
814 int k, tx = 0, ty = 0;
815 int mvx[4], mvy[4], intra[4], mv_f[4];
817 int chroma_ref_type = v->cur_field_type;
818 int v_edge_pos = s->v_edge_pos >> v->field_mode;
819 uint8_t (*lutuv)[256];
822 if (!v->field_mode && !v->s.last_picture.f->data[0])
824 if (s->flags & CODEC_FLAG_GRAY)
827 for (k = 0; k < 4; k++) {
828 mvx[k] = s->mv[dir][k][0];
829 mvy[k] = s->mv[dir][k][1];
830 intra[k] = v->mb_type[0][s->block_index[k]];
832 mv_f[k] = v->mv_f[dir][s->block_index[k] + v->blocks_off];
835 /* calculate chroma MV vector from four luma MVs */
836 if (!v->field_mode || (v->field_mode && !v->numref)) {
837 valid_count = get_chroma_mv(mvx, mvy, intra, 0, &tx, &ty);
838 chroma_ref_type = v->reffield;
840 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
841 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
842 v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
843 return; //no need to do MC for intra blocks
847 if (mv_f[0] + mv_f[1] + mv_f[2] + mv_f[3] > 2)
849 valid_count = get_chroma_mv(mvx, mvy, mv_f, dominant, &tx, &ty);
851 chroma_ref_type = !v->cur_field_type;
853 if (v->field_mode && chroma_ref_type == 1 && v->cur_field_type == 1 && !v->s.last_picture.f->data[0])
855 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
856 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
857 uvmx = (tx + ((tx & 3) == 3)) >> 1;
858 uvmy = (ty + ((ty & 3) == 3)) >> 1;
860 v->luma_mv[s->mb_x][0] = uvmx;
861 v->luma_mv[s->mb_x][1] = uvmy;
864 uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
865 uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
867 // Field conversion bias
868 if (v->cur_field_type != chroma_ref_type)
869 uvmy += 2 - 4 * chroma_ref_type;
871 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
872 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
874 if (v->profile != PROFILE_ADVANCED) {
875 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
876 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
878 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
879 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
883 if (v->field_mode && (v->cur_field_type != chroma_ref_type) && v->second_field) {
884 srcU = s->current_picture.f->data[1];
885 srcV = s->current_picture.f->data[2];
886 lutuv = v->curr_lutuv;
887 use_ic = v->curr_use_ic;
889 srcU = s->last_picture.f->data[1];
890 srcV = s->last_picture.f->data[2];
891 lutuv = v->last_lutuv;
892 use_ic = v->last_use_ic;
895 srcU = s->next_picture.f->data[1];
896 srcV = s->next_picture.f->data[2];
897 lutuv = v->next_lutuv;
898 use_ic = v->next_use_ic;
902 av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
906 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
907 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
910 if (chroma_ref_type) {
911 srcU += s->current_picture_ptr->f->linesize[1];
912 srcV += s->current_picture_ptr->f->linesize[2];
916 if (v->rangeredfrm || use_ic
917 || s->h_edge_pos < 18 || v_edge_pos < 18
918 || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
919 || (unsigned)uvsrc_y > (v_edge_pos >> 1) - 9) {
920 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcU,
921 s->uvlinesize, s->uvlinesize,
922 8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
923 s->h_edge_pos >> 1, v_edge_pos >> 1);
924 s->vdsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV,
925 s->uvlinesize, s->uvlinesize,
926 8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
927 s->h_edge_pos >> 1, v_edge_pos >> 1);
928 srcU = s->edge_emu_buffer;
929 srcV = s->edge_emu_buffer + 16;
931 /* if we deal with range reduction we need to scale source blocks */
932 if (v->rangeredfrm) {
938 for (j = 0; j < 9; j++) {
939 for (i = 0; i < 9; i++) {
940 src[i] = ((src[i] - 128) >> 1) + 128;
941 src2[i] = ((src2[i] - 128) >> 1) + 128;
943 src += s->uvlinesize;
944 src2 += s->uvlinesize;
947 /* if we deal with intensity compensation we need to scale source blocks */
954 for (j = 0; j < 9; j++) {
955 int f = v->field_mode ? chroma_ref_type : ((j + uvsrc_y) & 1);
956 for (i = 0; i < 9; i++) {
957 src[i] = lutuv[f][src[i]];
958 src2[i] = lutuv[f][src2[i]];
960 src += s->uvlinesize;
961 src2 += s->uvlinesize;
966 /* Chroma MC always uses qpel bilinear */
967 uvmx = (uvmx & 3) << 1;
968 uvmy = (uvmy & 3) << 1;
970 h264chroma->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
971 h264chroma->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
973 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
974 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
978 /** Do motion compensation for 4-MV interlaced frame chroma macroblock (both U and V)
980 static void vc1_mc_4mv_chroma4(VC1Context *v, int dir, int dir2, int avg)
982 MpegEncContext *s = &v->s;
983 H264ChromaContext *h264chroma = &v->h264chroma;
984 uint8_t *srcU, *srcV;
985 int uvsrc_x, uvsrc_y;
986 int uvmx_field[4], uvmy_field[4];
988 int fieldmv = v->blk_mv_type[s->block_index[0]];
989 static const int s_rndtblfield[16] = { 0, 0, 1, 2, 4, 4, 5, 6, 2, 2, 3, 8, 6, 6, 7, 12 };
990 int v_dist = fieldmv ? 1 : 4; // vertical offset for lower sub-blocks
991 int v_edge_pos = s->v_edge_pos >> 1;
993 uint8_t (*lutuv)[256];
995 if (s->flags & CODEC_FLAG_GRAY)
998 for (i = 0; i < 4; i++) {
999 int d = i < 2 ? dir: dir2;
1000 tx = s->mv[d][i][0];
1001 uvmx_field[i] = (tx + ((tx & 3) == 3)) >> 1;
1002 ty = s->mv[d][i][1];
1004 uvmy_field[i] = (ty >> 4) * 8 + s_rndtblfield[ty & 0xF];
1006 uvmy_field[i] = (ty + ((ty & 3) == 3)) >> 1;
1009 for (i = 0; i < 4; i++) {
1010 off = (i & 1) * 4 + ((i & 2) ? v_dist * s->uvlinesize : 0);
1011 uvsrc_x = s->mb_x * 8 + (i & 1) * 4 + (uvmx_field[i] >> 2);
1012 uvsrc_y = s->mb_y * 8 + ((i & 2) ? v_dist : 0) + (uvmy_field[i] >> 2);
1013 // FIXME: implement proper pull-back (see vc1cropmv.c, vc1CROPMV_ChromaPullBack())
1014 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
1015 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
1016 if (i < 2 ? dir : dir2) {
1017 srcU = s->next_picture.f->data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
1018 srcV = s->next_picture.f->data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
1019 lutuv = v->next_lutuv;
1020 use_ic = v->next_use_ic;
1022 srcU = s->last_picture.f->data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
1023 srcV = s->last_picture.f->data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
1024 lutuv = v->last_lutuv;
1025 use_ic = v->last_use_ic;
1027 uvmx_field[i] = (uvmx_field[i] & 3) << 1;
1028 uvmy_field[i] = (uvmy_field[i] & 3) << 1;
1030 if (fieldmv && !(uvsrc_y & 1))
1032 if (fieldmv && (uvsrc_y & 1) && uvsrc_y < 2)
1035 || s->h_edge_pos < 10 || v_edge_pos < (5 << fieldmv)
1036 || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 5
1037 || (unsigned)uvsrc_y > v_edge_pos - (5 << fieldmv)) {
1038 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcU,
1039 s->uvlinesize, s->uvlinesize,
1040 5, (5 << fieldmv), uvsrc_x, uvsrc_y,
1041 s->h_edge_pos >> 1, v_edge_pos);
1042 s->vdsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV,
1043 s->uvlinesize, s->uvlinesize,
1044 5, (5 << fieldmv), uvsrc_x, uvsrc_y,
1045 s->h_edge_pos >> 1, v_edge_pos);
1046 srcU = s->edge_emu_buffer;
1047 srcV = s->edge_emu_buffer + 16;
1049 /* if we deal with intensity compensation we need to scale source blocks */
1052 uint8_t *src, *src2;
1056 for (j = 0; j < 5; j++) {
1057 int f = (uvsrc_y + (j << fieldmv)) & 1;
1058 for (i = 0; i < 5; i++) {
1059 src[i] = lutuv[f][src[i]];
1060 src2[i] = lutuv[f][src2[i]];
1062 src += s->uvlinesize << fieldmv;
1063 src2 += s->uvlinesize << fieldmv;
1069 h264chroma->avg_h264_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1070 h264chroma->avg_h264_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1072 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1073 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1077 h264chroma->put_h264_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1078 h264chroma->put_h264_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1080 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1081 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1087 /***********************************************************************/
1089 * @name VC-1 Block-level functions
1090 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
1096 * @brief Get macroblock-level quantizer scale
1098 #define GET_MQUANT() \
1099 if (v->dquantfrm) { \
1101 if (v->dqprofile == DQPROFILE_ALL_MBS) { \
1102 if (v->dqbilevel) { \
1103 mquant = (get_bits1(gb)) ? v->altpq : v->pq; \
1105 mqdiff = get_bits(gb, 3); \
1107 mquant = v->pq + mqdiff; \
1109 mquant = get_bits(gb, 5); \
1112 if (v->dqprofile == DQPROFILE_SINGLE_EDGE) \
1113 edges = 1 << v->dqsbedge; \
1114 else if (v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
1115 edges = (3 << v->dqsbedge) % 15; \
1116 else if (v->dqprofile == DQPROFILE_FOUR_EDGES) \
1118 if ((edges&1) && !s->mb_x) \
1119 mquant = v->altpq; \
1120 if ((edges&2) && s->first_slice_line) \
1121 mquant = v->altpq; \
1122 if ((edges&4) && s->mb_x == (s->mb_width - 1)) \
1123 mquant = v->altpq; \
1124 if ((edges&8) && s->mb_y == (s->mb_height - 1)) \
1125 mquant = v->altpq; \
1126 if (!mquant || mquant > 31) { \
1127 av_log(v->s.avctx, AV_LOG_ERROR, \
1128 "Overriding invalid mquant %d\n", mquant); \
1134 * @def GET_MVDATA(_dmv_x, _dmv_y)
1135 * @brief Get MV differentials
1136 * @see MVDATA decoding from 8.3.5.2, p(1)20
1137 * @param _dmv_x Horizontal differential for decoded MV
1138 * @param _dmv_y Vertical differential for decoded MV
1140 #define GET_MVDATA(_dmv_x, _dmv_y) \
1141 index = 1 + get_vlc2(gb, ff_vc1_mv_diff_vlc[s->mv_table_index].table, \
1142 VC1_MV_DIFF_VLC_BITS, 2); \
1144 mb_has_coeffs = 1; \
1147 mb_has_coeffs = 0; \
1150 _dmv_x = _dmv_y = 0; \
1151 } else if (index == 35) { \
1152 _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
1153 _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
1154 } else if (index == 36) { \
1159 index1 = index % 6; \
1160 if (!s->quarter_sample && index1 == 5) val = 1; \
1162 if (size_table[index1] - val > 0) \
1163 val = get_bits(gb, size_table[index1] - val); \
1165 sign = 0 - (val&1); \
1166 _dmv_x = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1168 index1 = index / 6; \
1169 if (!s->quarter_sample && index1 == 5) val = 1; \
1171 if (size_table[index1] - val > 0) \
1172 val = get_bits(gb, size_table[index1] - val); \
1174 sign = 0 - (val & 1); \
1175 _dmv_y = (sign ^ ((val >> 1) + offset_table[index1])) - sign; \
1178 static av_always_inline void get_mvdata_interlaced(VC1Context *v, int *dmv_x,
1179 int *dmv_y, int *pred_flag)
1182 int extend_x = 0, extend_y = 0;
1183 GetBitContext *gb = &v->s.gb;
1186 const int* offs_tab;
1189 bits = VC1_2REF_MVDATA_VLC_BITS;
1192 bits = VC1_1REF_MVDATA_VLC_BITS;
1195 switch (v->dmvrange) {
1203 extend_x = extend_y = 1;
1206 index = get_vlc2(gb, v->imv_vlc->table, bits, 3);
1208 *dmv_x = get_bits(gb, v->k_x);
1209 *dmv_y = get_bits(gb, v->k_y);
1212 *pred_flag = *dmv_y & 1;
1213 *dmv_y = (*dmv_y + *pred_flag) >> 1;
1215 *dmv_y = (*dmv_y + (*dmv_y & 1)) >> 1;
1221 offs_tab = offset_table2;
1223 offs_tab = offset_table1;
1224 index1 = (index + 1) % 9;
1226 val = get_bits(gb, index1 + extend_x);
1227 sign = 0 -(val & 1);
1228 *dmv_x = (sign ^ ((val >> 1) + offs_tab[index1])) - sign;
1232 offs_tab = offset_table2;
1234 offs_tab = offset_table1;
1235 index1 = (index + 1) / 9;
1236 if (index1 > v->numref) {
1237 val = get_bits(gb, (index1 + (extend_y << v->numref)) >> v->numref);
1238 sign = 0 - (val & 1);
1239 *dmv_y = (sign ^ ((val >> 1) + offs_tab[index1 >> v->numref])) - sign;
1242 if (v->numref && pred_flag)
1243 *pred_flag = index1 & 1;
1247 static av_always_inline int scaleforsame_x(VC1Context *v, int n /* MV */, int dir)
1249 int scaledvalue, refdist;
1250 int scalesame1, scalesame2;
1251 int scalezone1_x, zone1offset_x;
1252 int table_index = dir ^ v->second_field;
1254 if (v->s.pict_type != AV_PICTURE_TYPE_B)
1255 refdist = v->refdist;
1257 refdist = dir ? v->brfd : v->frfd;
1260 scalesame1 = ff_vc1_field_mvpred_scales[table_index][1][refdist];
1261 scalesame2 = ff_vc1_field_mvpred_scales[table_index][2][refdist];
1262 scalezone1_x = ff_vc1_field_mvpred_scales[table_index][3][refdist];
1263 zone1offset_x = ff_vc1_field_mvpred_scales[table_index][5][refdist];
1268 if (FFABS(n) < scalezone1_x)
1269 scaledvalue = (n * scalesame1) >> 8;
1272 scaledvalue = ((n * scalesame2) >> 8) - zone1offset_x;
1274 scaledvalue = ((n * scalesame2) >> 8) + zone1offset_x;
1277 return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1280 static av_always_inline int scaleforsame_y(VC1Context *v, int i, int n /* MV */, int dir)
1282 int scaledvalue, refdist;
1283 int scalesame1, scalesame2;
1284 int scalezone1_y, zone1offset_y;
1285 int table_index = dir ^ v->second_field;
1287 if (v->s.pict_type != AV_PICTURE_TYPE_B)
1288 refdist = v->refdist;
1290 refdist = dir ? v->brfd : v->frfd;
1293 scalesame1 = ff_vc1_field_mvpred_scales[table_index][1][refdist];
1294 scalesame2 = ff_vc1_field_mvpred_scales[table_index][2][refdist];
1295 scalezone1_y = ff_vc1_field_mvpred_scales[table_index][4][refdist];
1296 zone1offset_y = ff_vc1_field_mvpred_scales[table_index][6][refdist];
1301 if (FFABS(n) < scalezone1_y)
1302 scaledvalue = (n * scalesame1) >> 8;
1305 scaledvalue = ((n * scalesame2) >> 8) - zone1offset_y;
1307 scaledvalue = ((n * scalesame2) >> 8) + zone1offset_y;
1311 if (v->cur_field_type && !v->ref_field_type[dir])
1312 return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1314 return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1317 static av_always_inline int scaleforopp_x(VC1Context *v, int n /* MV */)
1319 int scalezone1_x, zone1offset_x;
1320 int scaleopp1, scaleopp2, brfd;
1323 brfd = FFMIN(v->brfd, 3);
1324 scalezone1_x = ff_vc1_b_field_mvpred_scales[3][brfd];
1325 zone1offset_x = ff_vc1_b_field_mvpred_scales[5][brfd];
1326 scaleopp1 = ff_vc1_b_field_mvpred_scales[1][brfd];
1327 scaleopp2 = ff_vc1_b_field_mvpred_scales[2][brfd];
1332 if (FFABS(n) < scalezone1_x)
1333 scaledvalue = (n * scaleopp1) >> 8;
1336 scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_x;
1338 scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_x;
1341 return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1344 static av_always_inline int scaleforopp_y(VC1Context *v, int n /* MV */, int dir)
1346 int scalezone1_y, zone1offset_y;
1347 int scaleopp1, scaleopp2, brfd;
1350 brfd = FFMIN(v->brfd, 3);
1351 scalezone1_y = ff_vc1_b_field_mvpred_scales[4][brfd];
1352 zone1offset_y = ff_vc1_b_field_mvpred_scales[6][brfd];
1353 scaleopp1 = ff_vc1_b_field_mvpred_scales[1][brfd];
1354 scaleopp2 = ff_vc1_b_field_mvpred_scales[2][brfd];
1359 if (FFABS(n) < scalezone1_y)
1360 scaledvalue = (n * scaleopp1) >> 8;
1363 scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_y;
1365 scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_y;
1368 if (v->cur_field_type && !v->ref_field_type[dir]) {
1369 return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1371 return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1375 static av_always_inline int scaleforsame(VC1Context *v, int i, int n /* MV */,
1378 int brfd, scalesame;
1379 int hpel = 1 - v->s.quarter_sample;
1382 if (v->s.pict_type != AV_PICTURE_TYPE_B || v->second_field || !dir) {
1384 n = scaleforsame_y(v, i, n, dir) << hpel;
1386 n = scaleforsame_x(v, n, dir) << hpel;
1389 brfd = FFMIN(v->brfd, 3);
1390 scalesame = ff_vc1_b_field_mvpred_scales[0][brfd];
1392 n = (n * scalesame >> 8) << hpel;
1396 static av_always_inline int scaleforopp(VC1Context *v, int n /* MV */,
1399 int refdist, scaleopp;
1400 int hpel = 1 - v->s.quarter_sample;
1403 if (v->s.pict_type == AV_PICTURE_TYPE_B && !v->second_field && dir == 1) {
1405 n = scaleforopp_y(v, n, dir) << hpel;
1407 n = scaleforopp_x(v, n) << hpel;
1410 if (v->s.pict_type != AV_PICTURE_TYPE_B)
1411 refdist = FFMIN(v->refdist, 3);
1413 refdist = dir ? v->brfd : v->frfd;
1414 scaleopp = ff_vc1_field_mvpred_scales[dir ^ v->second_field][0][refdist];
1416 n = (n * scaleopp >> 8) << hpel;
1420 /** Predict and set motion vector
1422 static inline void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
1423 int mv1, int r_x, int r_y, uint8_t* is_intra,
1424 int pred_flag, int dir)
1426 MpegEncContext *s = &v->s;
1427 int xy, wrap, off = 0;
1431 int mixedmv_pic, num_samefield = 0, num_oppfield = 0;
1432 int opposite, a_f, b_f, c_f;
1433 int16_t field_predA[2];
1434 int16_t field_predB[2];
1435 int16_t field_predC[2];
1436 int a_valid, b_valid, c_valid;
1437 int hybridmv_thresh, y_bias = 0;
1439 if (v->mv_mode == MV_PMODE_MIXED_MV ||
1440 ((v->mv_mode == MV_PMODE_INTENSITY_COMP) && (v->mv_mode2 == MV_PMODE_MIXED_MV)))
1444 /* scale MV difference to be quad-pel */
1445 dmv_x <<= 1 - s->quarter_sample;
1446 dmv_y <<= 1 - s->quarter_sample;
1448 wrap = s->b8_stride;
1449 xy = s->block_index[n];
1452 s->mv[0][n][0] = s->current_picture.motion_val[0][xy + v->blocks_off][0] = 0;
1453 s->mv[0][n][1] = s->current_picture.motion_val[0][xy + v->blocks_off][1] = 0;
1454 s->current_picture.motion_val[1][xy + v->blocks_off][0] = 0;
1455 s->current_picture.motion_val[1][xy + v->blocks_off][1] = 0;
1456 if (mv1) { /* duplicate motion data for 1-MV block */
1457 s->current_picture.motion_val[0][xy + 1 + v->blocks_off][0] = 0;
1458 s->current_picture.motion_val[0][xy + 1 + v->blocks_off][1] = 0;
1459 s->current_picture.motion_val[0][xy + wrap + v->blocks_off][0] = 0;
1460 s->current_picture.motion_val[0][xy + wrap + v->blocks_off][1] = 0;
1461 s->current_picture.motion_val[0][xy + wrap + 1 + v->blocks_off][0] = 0;
1462 s->current_picture.motion_val[0][xy + wrap + 1 + v->blocks_off][1] = 0;
1463 v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1464 s->current_picture.motion_val[1][xy + 1 + v->blocks_off][0] = 0;
1465 s->current_picture.motion_val[1][xy + 1 + v->blocks_off][1] = 0;
1466 s->current_picture.motion_val[1][xy + wrap][0] = 0;
1467 s->current_picture.motion_val[1][xy + wrap + v->blocks_off][1] = 0;
1468 s->current_picture.motion_val[1][xy + wrap + 1 + v->blocks_off][0] = 0;
1469 s->current_picture.motion_val[1][xy + wrap + 1 + v->blocks_off][1] = 0;
1474 C = s->current_picture.motion_val[dir][xy - 1 + v->blocks_off];
1475 A = s->current_picture.motion_val[dir][xy - wrap + v->blocks_off];
1477 if (v->field_mode && mixedmv_pic)
1478 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
1480 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2;
1482 //in 4-MV mode different blocks have different B predictor position
1485 off = (s->mb_x > 0) ? -1 : 1;
1488 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 1;
1497 B = s->current_picture.motion_val[dir][xy - wrap + off + v->blocks_off];
1499 a_valid = !s->first_slice_line || (n == 2 || n == 3);
1500 b_valid = a_valid && (s->mb_width > 1);
1501 c_valid = s->mb_x || (n == 1 || n == 3);
1502 if (v->field_mode) {
1503 a_valid = a_valid && !is_intra[xy - wrap];
1504 b_valid = b_valid && !is_intra[xy - wrap + off];
1505 c_valid = c_valid && !is_intra[xy - 1];
1509 a_f = v->mv_f[dir][xy - wrap + v->blocks_off];
1510 num_oppfield += a_f;
1511 num_samefield += 1 - a_f;
1512 field_predA[0] = A[0];
1513 field_predA[1] = A[1];
1515 field_predA[0] = field_predA[1] = 0;
1519 b_f = v->mv_f[dir][xy - wrap + off + v->blocks_off];
1520 num_oppfield += b_f;
1521 num_samefield += 1 - b_f;
1522 field_predB[0] = B[0];
1523 field_predB[1] = B[1];
1525 field_predB[0] = field_predB[1] = 0;
1529 c_f = v->mv_f[dir][xy - 1 + v->blocks_off];
1530 num_oppfield += c_f;
1531 num_samefield += 1 - c_f;
1532 field_predC[0] = C[0];
1533 field_predC[1] = C[1];
1535 field_predC[0] = field_predC[1] = 0;
1539 if (v->field_mode) {
1541 // REFFIELD determines if the last field or the second-last field is
1542 // to be used as reference
1543 opposite = 1 - v->reffield;
1545 if (num_samefield <= num_oppfield)
1546 opposite = 1 - pred_flag;
1548 opposite = pred_flag;
1553 if (a_valid && !a_f) {
1554 field_predA[0] = scaleforopp(v, field_predA[0], 0, dir);
1555 field_predA[1] = scaleforopp(v, field_predA[1], 1, dir);
1557 if (b_valid && !b_f) {
1558 field_predB[0] = scaleforopp(v, field_predB[0], 0, dir);
1559 field_predB[1] = scaleforopp(v, field_predB[1], 1, dir);
1561 if (c_valid && !c_f) {
1562 field_predC[0] = scaleforopp(v, field_predC[0], 0, dir);
1563 field_predC[1] = scaleforopp(v, field_predC[1], 1, dir);
1565 v->mv_f[dir][xy + v->blocks_off] = 1;
1566 v->ref_field_type[dir] = !v->cur_field_type;
1568 if (a_valid && a_f) {
1569 field_predA[0] = scaleforsame(v, n, field_predA[0], 0, dir);
1570 field_predA[1] = scaleforsame(v, n, field_predA[1], 1, dir);
1572 if (b_valid && b_f) {
1573 field_predB[0] = scaleforsame(v, n, field_predB[0], 0, dir);
1574 field_predB[1] = scaleforsame(v, n, field_predB[1], 1, dir);
1576 if (c_valid && c_f) {
1577 field_predC[0] = scaleforsame(v, n, field_predC[0], 0, dir);
1578 field_predC[1] = scaleforsame(v, n, field_predC[1], 1, dir);
1580 v->mv_f[dir][xy + v->blocks_off] = 0;
1581 v->ref_field_type[dir] = v->cur_field_type;
1585 px = field_predA[0];
1586 py = field_predA[1];
1587 } else if (c_valid) {
1588 px = field_predC[0];
1589 py = field_predC[1];
1590 } else if (b_valid) {
1591 px = field_predB[0];
1592 py = field_predB[1];
1598 if (num_samefield + num_oppfield > 1) {
1599 px = mid_pred(field_predA[0], field_predB[0], field_predC[0]);
1600 py = mid_pred(field_predA[1], field_predB[1], field_predC[1]);
1603 /* Pullback MV as specified in 8.3.5.3.4 */
1604 if (!v->field_mode) {
1606 qx = (s->mb_x << 6) + ((n == 1 || n == 3) ? 32 : 0);
1607 qy = (s->mb_y << 6) + ((n == 2 || n == 3) ? 32 : 0);
1608 X = (s->mb_width << 6) - 4;
1609 Y = (s->mb_height << 6) - 4;
1611 if (qx + px < -60) px = -60 - qx;
1612 if (qy + py < -60) py = -60 - qy;
1614 if (qx + px < -28) px = -28 - qx;
1615 if (qy + py < -28) py = -28 - qy;
1617 if (qx + px > X) px = X - qx;
1618 if (qy + py > Y) py = Y - qy;
1621 if (!v->field_mode || s->pict_type != AV_PICTURE_TYPE_B) {
1622 /* Calculate hybrid prediction as specified in 8.3.5.3.5 (also 10.3.5.4.3.5) */
1623 hybridmv_thresh = 32;
1624 if (a_valid && c_valid) {
1625 if (is_intra[xy - wrap])
1626 sum = FFABS(px) + FFABS(py);
1628 sum = FFABS(px - field_predA[0]) + FFABS(py - field_predA[1]);
1629 if (sum > hybridmv_thresh) {
1630 if (get_bits1(&s->gb)) { // read HYBRIDPRED bit
1631 px = field_predA[0];
1632 py = field_predA[1];
1634 px = field_predC[0];
1635 py = field_predC[1];
1638 if (is_intra[xy - 1])
1639 sum = FFABS(px) + FFABS(py);
1641 sum = FFABS(px - field_predC[0]) + FFABS(py - field_predC[1]);
1642 if (sum > hybridmv_thresh) {
1643 if (get_bits1(&s->gb)) {
1644 px = field_predA[0];
1645 py = field_predA[1];
1647 px = field_predC[0];
1648 py = field_predC[1];
1655 if (v->field_mode && v->numref)
1657 if (v->field_mode && v->cur_field_type && v->ref_field_type[dir] == 0)
1659 /* store MV using signed modulus of MV range defined in 4.11 */
1660 s->mv[dir][n][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1661 s->mv[dir][n][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1] = ((py + dmv_y + r_y - y_bias) & ((r_y << 1) - 1)) - r_y + y_bias;
1662 if (mv1) { /* duplicate motion data for 1-MV block */
1663 s->current_picture.motion_val[dir][xy + 1 + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
1664 s->current_picture.motion_val[dir][xy + 1 + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
1665 s->current_picture.motion_val[dir][xy + wrap + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
1666 s->current_picture.motion_val[dir][xy + wrap + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
1667 s->current_picture.motion_val[dir][xy + wrap + 1 + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
1668 s->current_picture.motion_val[dir][xy + wrap + 1 + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
1669 v->mv_f[dir][xy + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1670 v->mv_f[dir][xy + wrap + v->blocks_off] = v->mv_f[dir][xy + wrap + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1674 /** Predict and set motion vector for interlaced frame picture MBs
1676 static inline void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
1677 int mvn, int r_x, int r_y, uint8_t* is_intra, int dir)
1679 MpegEncContext *s = &v->s;
1680 int xy, wrap, off = 0;
1681 int A[2], B[2], C[2];
1683 int a_valid = 0, b_valid = 0, c_valid = 0;
1684 int field_a, field_b, field_c; // 0: same, 1: opposit
1685 int total_valid, num_samefield, num_oppfield;
1686 int pos_c, pos_b, n_adj;
1688 wrap = s->b8_stride;
1689 xy = s->block_index[n];
1692 s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = 0;
1693 s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = 0;
1694 s->current_picture.motion_val[1][xy][0] = 0;
1695 s->current_picture.motion_val[1][xy][1] = 0;
1696 if (mvn == 1) { /* duplicate motion data for 1-MV block */
1697 s->current_picture.motion_val[0][xy + 1][0] = 0;
1698 s->current_picture.motion_val[0][xy + 1][1] = 0;
1699 s->current_picture.motion_val[0][xy + wrap][0] = 0;
1700 s->current_picture.motion_val[0][xy + wrap][1] = 0;
1701 s->current_picture.motion_val[0][xy + wrap + 1][0] = 0;
1702 s->current_picture.motion_val[0][xy + wrap + 1][1] = 0;
1703 v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1704 s->current_picture.motion_val[1][xy + 1][0] = 0;
1705 s->current_picture.motion_val[1][xy + 1][1] = 0;
1706 s->current_picture.motion_val[1][xy + wrap][0] = 0;
1707 s->current_picture.motion_val[1][xy + wrap][1] = 0;
1708 s->current_picture.motion_val[1][xy + wrap + 1][0] = 0;
1709 s->current_picture.motion_val[1][xy + wrap + 1][1] = 0;
1714 off = ((n == 0) || (n == 1)) ? 1 : -1;
1716 if (s->mb_x || (n == 1) || (n == 3)) {
1717 if ((v->blk_mv_type[xy]) // current block (MB) has a field MV
1718 || (!v->blk_mv_type[xy] && !v->blk_mv_type[xy - 1])) { // or both have frame MV
1719 A[0] = s->current_picture.motion_val[dir][xy - 1][0];
1720 A[1] = s->current_picture.motion_val[dir][xy - 1][1];
1722 } else { // current block has frame mv and cand. has field MV (so average)
1723 A[0] = (s->current_picture.motion_val[dir][xy - 1][0]
1724 + s->current_picture.motion_val[dir][xy - 1 + off * wrap][0] + 1) >> 1;
1725 A[1] = (s->current_picture.motion_val[dir][xy - 1][1]
1726 + s->current_picture.motion_val[dir][xy - 1 + off * wrap][1] + 1) >> 1;
1729 if (!(n & 1) && v->is_intra[s->mb_x - 1]) {
1735 /* Predict B and C */
1736 B[0] = B[1] = C[0] = C[1] = 0;
1737 if (n == 0 || n == 1 || v->blk_mv_type[xy]) {
1738 if (!s->first_slice_line) {
1739 if (!v->is_intra[s->mb_x - s->mb_stride]) {
1742 pos_b = s->block_index[n_adj] - 2 * wrap;
1743 if (v->blk_mv_type[pos_b] && v->blk_mv_type[xy]) {
1744 n_adj = (n & 2) | (n & 1);
1746 B[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap][0];
1747 B[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap][1];
1748 if (v->blk_mv_type[pos_b] && !v->blk_mv_type[xy]) {
1749 B[0] = (B[0] + s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap][0] + 1) >> 1;
1750 B[1] = (B[1] + s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap][1] + 1) >> 1;
1753 if (s->mb_width > 1) {
1754 if (!v->is_intra[s->mb_x - s->mb_stride + 1]) {
1757 pos_c = s->block_index[2] - 2 * wrap + 2;
1758 if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1761 C[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap + 2][0];
1762 C[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap + 2][1];
1763 if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1764 C[0] = (1 + C[0] + (s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap + 2][0])) >> 1;
1765 C[1] = (1 + C[1] + (s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap + 2][1])) >> 1;
1767 if (s->mb_x == s->mb_width - 1) {
1768 if (!v->is_intra[s->mb_x - s->mb_stride - 1]) {
1771 pos_c = s->block_index[3] - 2 * wrap - 2;
1772 if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1775 C[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap - 2][0];
1776 C[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap - 2][1];
1777 if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1778 C[0] = (1 + C[0] + s->current_picture.motion_val[dir][s->block_index[1] - 2 * wrap - 2][0]) >> 1;
1779 C[1] = (1 + C[1] + s->current_picture.motion_val[dir][s->block_index[1] - 2 * wrap - 2][1]) >> 1;
1788 pos_b = s->block_index[1];
1790 B[0] = s->current_picture.motion_val[dir][pos_b][0];
1791 B[1] = s->current_picture.motion_val[dir][pos_b][1];
1792 pos_c = s->block_index[0];
1794 C[0] = s->current_picture.motion_val[dir][pos_c][0];
1795 C[1] = s->current_picture.motion_val[dir][pos_c][1];
1798 total_valid = a_valid + b_valid + c_valid;
1799 // check if predictor A is out of bounds
1800 if (!s->mb_x && !(n == 1 || n == 3)) {
1803 // check if predictor B is out of bounds
1804 if ((s->first_slice_line && v->blk_mv_type[xy]) || (s->first_slice_line && !(n & 2))) {
1805 B[0] = B[1] = C[0] = C[1] = 0;
1807 if (!v->blk_mv_type[xy]) {
1808 if (s->mb_width == 1) {
1812 if (total_valid >= 2) {
1813 px = mid_pred(A[0], B[0], C[0]);
1814 py = mid_pred(A[1], B[1], C[1]);
1815 } else if (total_valid) {
1816 if (a_valid) { px = A[0]; py = A[1]; }
1817 if (b_valid) { px = B[0]; py = B[1]; }
1818 if (c_valid) { px = C[0]; py = C[1]; }
1824 field_a = (A[1] & 4) ? 1 : 0;
1828 field_b = (B[1] & 4) ? 1 : 0;
1832 field_c = (C[1] & 4) ? 1 : 0;
1836 num_oppfield = field_a + field_b + field_c;
1837 num_samefield = total_valid - num_oppfield;
1838 if (total_valid == 3) {
1839 if ((num_samefield == 3) || (num_oppfield == 3)) {
1840 px = mid_pred(A[0], B[0], C[0]);
1841 py = mid_pred(A[1], B[1], C[1]);
1842 } else if (num_samefield >= num_oppfield) {
1843 /* take one MV from same field set depending on priority
1844 the check for B may not be necessary */
1845 px = !field_a ? A[0] : B[0];
1846 py = !field_a ? A[1] : B[1];
1848 px = field_a ? A[0] : B[0];
1849 py = field_a ? A[1] : B[1];
1851 } else if (total_valid == 2) {
1852 if (num_samefield >= num_oppfield) {
1853 if (!field_a && a_valid) {
1856 } else if (!field_b && b_valid) {
1859 } else if (c_valid) {
1864 if (field_a && a_valid) {
1867 } else if (field_b && b_valid) {
1870 } else if (c_valid) {
1876 } else if (total_valid == 1) {
1877 px = (a_valid) ? A[0] : ((b_valid) ? B[0] : C[0]);
1878 py = (a_valid) ? A[1] : ((b_valid) ? B[1] : C[1]);
1883 /* store MV using signed modulus of MV range defined in 4.11 */
1884 s->mv[dir][n][0] = s->current_picture.motion_val[dir][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1885 s->mv[dir][n][1] = s->current_picture.motion_val[dir][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
1886 if (mvn == 1) { /* duplicate motion data for 1-MV block */
1887 s->current_picture.motion_val[dir][xy + 1 ][0] = s->current_picture.motion_val[dir][xy][0];
1888 s->current_picture.motion_val[dir][xy + 1 ][1] = s->current_picture.motion_val[dir][xy][1];
1889 s->current_picture.motion_val[dir][xy + wrap ][0] = s->current_picture.motion_val[dir][xy][0];
1890 s->current_picture.motion_val[dir][xy + wrap ][1] = s->current_picture.motion_val[dir][xy][1];
1891 s->current_picture.motion_val[dir][xy + wrap + 1][0] = s->current_picture.motion_val[dir][xy][0];
1892 s->current_picture.motion_val[dir][xy + wrap + 1][1] = s->current_picture.motion_val[dir][xy][1];
1893 } else if (mvn == 2) { /* duplicate motion data for 2-Field MV block */
1894 s->current_picture.motion_val[dir][xy + 1][0] = s->current_picture.motion_val[dir][xy][0];
1895 s->current_picture.motion_val[dir][xy + 1][1] = s->current_picture.motion_val[dir][xy][1];
1896 s->mv[dir][n + 1][0] = s->mv[dir][n][0];
1897 s->mv[dir][n + 1][1] = s->mv[dir][n][1];
1901 /** Motion compensation for direct or interpolated blocks in B-frames
1903 static void vc1_interp_mc(VC1Context *v)
1905 MpegEncContext *s = &v->s;
1906 H264ChromaContext *h264chroma = &v->h264chroma;
1907 uint8_t *srcY, *srcU, *srcV;
1908 int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
1910 int v_edge_pos = s->v_edge_pos >> v->field_mode;
1911 int use_ic = v->next_use_ic;
1913 if (!v->field_mode && !v->s.next_picture.f->data[0])
1916 mx = s->mv[1][0][0];
1917 my = s->mv[1][0][1];
1918 uvmx = (mx + ((mx & 3) == 3)) >> 1;
1919 uvmy = (my + ((my & 3) == 3)) >> 1;
1920 if (v->field_mode) {
1921 if (v->cur_field_type != v->ref_field_type[1])
1922 my = my - 2 + 4 * v->cur_field_type;
1923 uvmy = uvmy - 2 + 4 * v->cur_field_type;
1926 uvmx = uvmx + ((uvmx < 0) ? -(uvmx & 1) : (uvmx & 1));
1927 uvmy = uvmy + ((uvmy < 0) ? -(uvmy & 1) : (uvmy & 1));
1929 srcY = s->next_picture.f->data[0];
1930 srcU = s->next_picture.f->data[1];
1931 srcV = s->next_picture.f->data[2];
1933 src_x = s->mb_x * 16 + (mx >> 2);
1934 src_y = s->mb_y * 16 + (my >> 2);
1935 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
1936 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
1938 if (v->profile != PROFILE_ADVANCED) {
1939 src_x = av_clip( src_x, -16, s->mb_width * 16);
1940 src_y = av_clip( src_y, -16, s->mb_height * 16);
1941 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
1942 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
1944 src_x = av_clip( src_x, -17, s->avctx->coded_width);
1945 src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
1946 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
1947 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
1950 srcY += src_y * s->linesize + src_x;
1951 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
1952 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
1954 if (v->field_mode && v->ref_field_type[1]) {
1955 srcY += s->current_picture_ptr->f->linesize[0];
1956 srcU += s->current_picture_ptr->f->linesize[1];
1957 srcV += s->current_picture_ptr->f->linesize[2];
1960 /* for grayscale we should not try to read from unknown area */
1961 if (s->flags & CODEC_FLAG_GRAY) {
1962 srcU = s->edge_emu_buffer + 18 * s->linesize;
1963 srcV = s->edge_emu_buffer + 18 * s->linesize;
1966 if (v->rangeredfrm || s->h_edge_pos < 22 || v_edge_pos < 22 || use_ic
1967 || (unsigned)(src_x - 1) > s->h_edge_pos - (mx & 3) - 16 - 3
1968 || (unsigned)(src_y - 1) > v_edge_pos - (my & 3) - 16 - 3) {
1969 uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
1971 srcY -= s->mspel * (1 + s->linesize);
1972 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcY,
1973 s->linesize, s->linesize,
1974 17 + s->mspel * 2, 17 + s->mspel * 2,
1975 src_x - s->mspel, src_y - s->mspel,
1976 s->h_edge_pos, v_edge_pos);
1977 srcY = s->edge_emu_buffer;
1978 s->vdsp.emulated_edge_mc(uvbuf, srcU,
1979 s->uvlinesize, s->uvlinesize,
1981 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
1982 s->vdsp.emulated_edge_mc(uvbuf + 16, srcV,
1983 s->uvlinesize, s->uvlinesize,
1985 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
1988 /* if we deal with range reduction we need to scale source blocks */
1989 if (v->rangeredfrm) {
1991 uint8_t *src, *src2;
1994 for (j = 0; j < 17 + s->mspel * 2; j++) {
1995 for (i = 0; i < 17 + s->mspel * 2; i++)
1996 src[i] = ((src[i] - 128) >> 1) + 128;
2001 for (j = 0; j < 9; j++) {
2002 for (i = 0; i < 9; i++) {
2003 src[i] = ((src[i] - 128) >> 1) + 128;
2004 src2[i] = ((src2[i] - 128) >> 1) + 128;
2006 src += s->uvlinesize;
2007 src2 += s->uvlinesize;
2012 uint8_t (*luty )[256] = v->next_luty;
2013 uint8_t (*lutuv)[256] = v->next_lutuv;
2015 uint8_t *src, *src2;
2018 for (j = 0; j < 17 + s->mspel * 2; j++) {
2019 int f = v->field_mode ? v->ref_field_type[1] : ((j+src_y - s->mspel) & 1);
2020 for (i = 0; i < 17 + s->mspel * 2; i++)
2021 src[i] = luty[f][src[i]];
2026 for (j = 0; j < 9; j++) {
2027 int f = v->field_mode ? v->ref_field_type[1] : ((j+uvsrc_y) & 1);
2028 for (i = 0; i < 9; i++) {
2029 src[i] = lutuv[f][src[i]];
2030 src2[i] = lutuv[f][src2[i]];
2032 src += s->uvlinesize;
2033 src2 += s->uvlinesize;
2036 srcY += s->mspel * (1 + s->linesize);
2043 dxy = ((my & 3) << 2) | (mx & 3);
2044 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off , srcY , s->linesize, v->rnd);
2045 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8, srcY + 8, s->linesize, v->rnd);
2046 srcY += s->linesize * 8;
2047 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize , srcY , s->linesize, v->rnd);
2048 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
2050 dxy = (my & 2) | ((mx & 2) >> 1);
2053 s->hdsp.avg_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
2055 s->hdsp.avg_no_rnd_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize, 16);
2058 if (s->flags & CODEC_FLAG_GRAY) return;
2059 /* Chroma MC always uses qpel blilinear */
2060 uvmx = (uvmx & 3) << 1;
2061 uvmy = (uvmy & 3) << 1;
2063 h264chroma->avg_h264_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
2064 h264chroma->avg_h264_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
2066 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
2067 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
2071 static av_always_inline int scale_mv(int value, int bfrac, int inv, int qs)
2075 #if B_FRACTION_DEN==256
2079 return 2 * ((value * n + 255) >> 9);
2080 return (value * n + 128) >> 8;
2083 n -= B_FRACTION_DEN;
2085 return 2 * ((value * n + B_FRACTION_DEN - 1) / (2 * B_FRACTION_DEN));
2086 return (value * n + B_FRACTION_DEN/2) / B_FRACTION_DEN;
2090 /** Reconstruct motion vector for B-frame and do motion compensation
2092 static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2],
2093 int direct, int mode)
2100 if (mode == BMV_TYPE_INTERPOLATED) {
2106 vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD));
2109 static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2],
2110 int direct, int mvtype)
2112 MpegEncContext *s = &v->s;
2113 int xy, wrap, off = 0;
2118 const uint8_t *is_intra = v->mb_type[0];
2122 /* scale MV difference to be quad-pel */
2123 dmv_x[0] <<= 1 - s->quarter_sample;
2124 dmv_y[0] <<= 1 - s->quarter_sample;
2125 dmv_x[1] <<= 1 - s->quarter_sample;
2126 dmv_y[1] <<= 1 - s->quarter_sample;
2128 wrap = s->b8_stride;
2129 xy = s->block_index[0];
2132 s->current_picture.motion_val[0][xy + v->blocks_off][0] =
2133 s->current_picture.motion_val[0][xy + v->blocks_off][1] =
2134 s->current_picture.motion_val[1][xy + v->blocks_off][0] =
2135 s->current_picture.motion_val[1][xy + v->blocks_off][1] = 0;
2138 if (!v->field_mode) {
2139 s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
2140 s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
2141 s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
2142 s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
2144 /* Pullback predicted motion vectors as specified in 8.4.5.4 */
2145 s->mv[0][0][0] = av_clip(s->mv[0][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2146 s->mv[0][0][1] = av_clip(s->mv[0][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2147 s->mv[1][0][0] = av_clip(s->mv[1][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2148 s->mv[1][0][1] = av_clip(s->mv[1][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2151 s->current_picture.motion_val[0][xy + v->blocks_off][0] = s->mv[0][0][0];
2152 s->current_picture.motion_val[0][xy + v->blocks_off][1] = s->mv[0][0][1];
2153 s->current_picture.motion_val[1][xy + v->blocks_off][0] = s->mv[1][0][0];
2154 s->current_picture.motion_val[1][xy + v->blocks_off][1] = s->mv[1][0][1];
2158 if ((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2159 C = s->current_picture.motion_val[0][xy - 2];
2160 A = s->current_picture.motion_val[0][xy - wrap * 2];
2161 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2162 B = s->current_picture.motion_val[0][xy - wrap * 2 + off];
2164 if (!s->mb_x) C[0] = C[1] = 0;
2165 if (!s->first_slice_line) { // predictor A is not out of bounds
2166 if (s->mb_width == 1) {
2170 px = mid_pred(A[0], B[0], C[0]);
2171 py = mid_pred(A[1], B[1], C[1]);
2173 } else if (s->mb_x) { // predictor C is not out of bounds
2179 /* Pullback MV as specified in 8.3.5.3.4 */
2182 if (v->profile < PROFILE_ADVANCED) {
2183 qx = (s->mb_x << 5);
2184 qy = (s->mb_y << 5);
2185 X = (s->mb_width << 5) - 4;
2186 Y = (s->mb_height << 5) - 4;
2187 if (qx + px < -28) px = -28 - qx;
2188 if (qy + py < -28) py = -28 - qy;
2189 if (qx + px > X) px = X - qx;
2190 if (qy + py > Y) py = Y - qy;
2192 qx = (s->mb_x << 6);
2193 qy = (s->mb_y << 6);
2194 X = (s->mb_width << 6) - 4;
2195 Y = (s->mb_height << 6) - 4;
2196 if (qx + px < -60) px = -60 - qx;
2197 if (qy + py < -60) py = -60 - qy;
2198 if (qx + px > X) px = X - qx;
2199 if (qy + py > Y) py = Y - qy;
2202 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2203 if (0 && !s->first_slice_line && s->mb_x) {
2204 if (is_intra[xy - wrap])
2205 sum = FFABS(px) + FFABS(py);
2207 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2209 if (get_bits1(&s->gb)) {
2217 if (is_intra[xy - 2])
2218 sum = FFABS(px) + FFABS(py);
2220 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2222 if (get_bits1(&s->gb)) {
2232 /* store MV using signed modulus of MV range defined in 4.11 */
2233 s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x;
2234 s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
2236 if ((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2237 C = s->current_picture.motion_val[1][xy - 2];
2238 A = s->current_picture.motion_val[1][xy - wrap * 2];
2239 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2240 B = s->current_picture.motion_val[1][xy - wrap * 2 + off];
2244 if (!s->first_slice_line) { // predictor A is not out of bounds
2245 if (s->mb_width == 1) {
2249 px = mid_pred(A[0], B[0], C[0]);
2250 py = mid_pred(A[1], B[1], C[1]);
2252 } else if (s->mb_x) { // predictor C is not out of bounds
2258 /* Pullback MV as specified in 8.3.5.3.4 */
2261 if (v->profile < PROFILE_ADVANCED) {
2262 qx = (s->mb_x << 5);
2263 qy = (s->mb_y << 5);
2264 X = (s->mb_width << 5) - 4;
2265 Y = (s->mb_height << 5) - 4;
2266 if (qx + px < -28) px = -28 - qx;
2267 if (qy + py < -28) py = -28 - qy;
2268 if (qx + px > X) px = X - qx;
2269 if (qy + py > Y) py = Y - qy;
2271 qx = (s->mb_x << 6);
2272 qy = (s->mb_y << 6);
2273 X = (s->mb_width << 6) - 4;
2274 Y = (s->mb_height << 6) - 4;
2275 if (qx + px < -60) px = -60 - qx;
2276 if (qy + py < -60) py = -60 - qy;
2277 if (qx + px > X) px = X - qx;
2278 if (qy + py > Y) py = Y - qy;
2281 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2282 if (0 && !s->first_slice_line && s->mb_x) {
2283 if (is_intra[xy - wrap])
2284 sum = FFABS(px) + FFABS(py);
2286 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2288 if (get_bits1(&s->gb)) {
2296 if (is_intra[xy - 2])
2297 sum = FFABS(px) + FFABS(py);
2299 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2301 if (get_bits1(&s->gb)) {
2311 /* store MV using signed modulus of MV range defined in 4.11 */
2313 s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
2314 s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
2316 s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
2317 s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
2318 s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
2319 s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
2322 static inline void vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dmv_y, int mv1, int *pred_flag)
2324 int dir = (v->bmvtype == BMV_TYPE_BACKWARD) ? 1 : 0;
2325 MpegEncContext *s = &v->s;
2326 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2328 if (v->bmvtype == BMV_TYPE_DIRECT) {
2329 int total_opp, k, f;
2330 if (s->next_picture.mb_type[mb_pos + v->mb_off] != MB_TYPE_INTRA) {
2331 s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][0],
2332 v->bfraction, 0, s->quarter_sample);
2333 s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][1],
2334 v->bfraction, 0, s->quarter_sample);
2335 s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][0],
2336 v->bfraction, 1, s->quarter_sample);
2337 s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][1],
2338 v->bfraction, 1, s->quarter_sample);
2340 total_opp = v->mv_f_next[0][s->block_index[0] + v->blocks_off]
2341 + v->mv_f_next[0][s->block_index[1] + v->blocks_off]
2342 + v->mv_f_next[0][s->block_index[2] + v->blocks_off]
2343 + v->mv_f_next[0][s->block_index[3] + v->blocks_off];
2344 f = (total_opp > 2) ? 1 : 0;
2346 s->mv[0][0][0] = s->mv[0][0][1] = 0;
2347 s->mv[1][0][0] = s->mv[1][0][1] = 0;
2350 v->ref_field_type[0] = v->ref_field_type[1] = v->cur_field_type ^ f;
2351 for (k = 0; k < 4; k++) {
2352 s->current_picture.motion_val[0][s->block_index[k] + v->blocks_off][0] = s->mv[0][0][0];
2353 s->current_picture.motion_val[0][s->block_index[k] + v->blocks_off][1] = s->mv[0][0][1];
2354 s->current_picture.motion_val[1][s->block_index[k] + v->blocks_off][0] = s->mv[1][0][0];
2355 s->current_picture.motion_val[1][s->block_index[k] + v->blocks_off][1] = s->mv[1][0][1];
2356 v->mv_f[0][s->block_index[k] + v->blocks_off] = f;
2357 v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
2361 if (v->bmvtype == BMV_TYPE_INTERPOLATED) {
2362 vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2363 vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2366 if (dir) { // backward
2367 vc1_pred_mv(v, n, dmv_x[1], dmv_y[1], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2368 if (n == 3 || mv1) {
2369 vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
2372 vc1_pred_mv(v, n, dmv_x[0], dmv_y[0], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2373 if (n == 3 || mv1) {
2374 vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], 0, 1);
2379 /** Get predicted DC value for I-frames only
2380 * prediction dir: left=0, top=1
2381 * @param s MpegEncContext
2382 * @param overlap flag indicating that overlap filtering is used
2383 * @param pq integer part of picture quantizer
2384 * @param[in] n block index in the current MB
2385 * @param dc_val_ptr Pointer to DC predictor
2386 * @param dir_ptr Prediction direction for use in AC prediction
2388 static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2389 int16_t **dc_val_ptr, int *dir_ptr)
2391 int a, b, c, wrap, pred, scale;
2393 static const uint16_t dcpred[32] = {
2394 -1, 1024, 512, 341, 256, 205, 171, 146, 128,
2395 114, 102, 93, 85, 79, 73, 68, 64,
2396 60, 57, 54, 51, 49, 47, 45, 43,
2397 41, 39, 38, 37, 35, 34, 33
2400 /* find prediction - wmv3_dc_scale always used here in fact */
2401 if (n < 4) scale = s->y_dc_scale;
2402 else scale = s->c_dc_scale;
2404 wrap = s->block_wrap[n];
2405 dc_val = s->dc_val[0] + s->block_index[n];
2411 b = dc_val[ - 1 - wrap];
2412 a = dc_val[ - wrap];
2414 if (pq < 9 || !overlap) {
2415 /* Set outer values */
2416 if (s->first_slice_line && (n != 2 && n != 3))
2417 b = a = dcpred[scale];
2418 if (s->mb_x == 0 && (n != 1 && n != 3))
2419 b = c = dcpred[scale];
2421 /* Set outer values */
2422 if (s->first_slice_line && (n != 2 && n != 3))
2424 if (s->mb_x == 0 && (n != 1 && n != 3))
2428 if (abs(a - b) <= abs(b - c)) {
2430 *dir_ptr = 1; // left
2433 *dir_ptr = 0; // top
2436 /* update predictor */
2437 *dc_val_ptr = &dc_val[0];
2442 /** Get predicted DC value
2443 * prediction dir: left=0, top=1
2444 * @param s MpegEncContext
2445 * @param overlap flag indicating that overlap filtering is used
2446 * @param pq integer part of picture quantizer
2447 * @param[in] n block index in the current MB
2448 * @param a_avail flag indicating top block availability
2449 * @param c_avail flag indicating left block availability
2450 * @param dc_val_ptr Pointer to DC predictor
2451 * @param dir_ptr Prediction direction for use in AC prediction
2453 static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2454 int a_avail, int c_avail,
2455 int16_t **dc_val_ptr, int *dir_ptr)
2457 int a, b, c, wrap, pred;
2459 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2463 wrap = s->block_wrap[n];
2464 dc_val = s->dc_val[0] + s->block_index[n];
2470 b = dc_val[ - 1 - wrap];
2471 a = dc_val[ - wrap];
2472 /* scale predictors if needed */
2473 q1 = s->current_picture.qscale_table[mb_pos];
2474 dqscale_index = s->y_dc_scale_table[q1] - 1;
2475 if (dqscale_index < 0)
2477 if (c_avail && (n != 1 && n != 3)) {
2478 q2 = s->current_picture.qscale_table[mb_pos - 1];
2480 c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2482 if (a_avail && (n != 2 && n != 3)) {
2483 q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2485 a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2487 if (a_avail && c_avail && (n != 3)) {
2492 off -= s->mb_stride;
2493 q2 = s->current_picture.qscale_table[off];
2495 b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2498 if (a_avail && c_avail) {
2499 if (abs(a - b) <= abs(b - c)) {
2501 *dir_ptr = 1; // left
2504 *dir_ptr = 0; // top
2506 } else if (a_avail) {
2508 *dir_ptr = 0; // top
2509 } else if (c_avail) {
2511 *dir_ptr = 1; // left
2514 *dir_ptr = 1; // left
2517 /* update predictor */
2518 *dc_val_ptr = &dc_val[0];
2522 /** @} */ // Block group
2525 * @name VC1 Macroblock-level functions in Simple/Main Profiles
2526 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
2530 static inline int vc1_coded_block_pred(MpegEncContext * s, int n,
2531 uint8_t **coded_block_ptr)
2533 int xy, wrap, pred, a, b, c;
2535 xy = s->block_index[n];
2536 wrap = s->b8_stride;
2541 a = s->coded_block[xy - 1 ];
2542 b = s->coded_block[xy - 1 - wrap];
2543 c = s->coded_block[xy - wrap];
2552 *coded_block_ptr = &s->coded_block[xy];
2558 * Decode one AC coefficient
2559 * @param v The VC1 context
2560 * @param last Last coefficient
2561 * @param skip How much zero coefficients to skip
2562 * @param value Decoded AC coefficient value
2563 * @param codingset set of VLC to decode data
2566 static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip,
2567 int *value, int codingset)
2569 GetBitContext *gb = &v->s.gb;
2570 int index, escape, run = 0, level = 0, lst = 0;
2572 index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2573 if (index != ff_vc1_ac_sizes[codingset] - 1) {
2574 run = vc1_index_decode_table[codingset][index][0];
2575 level = vc1_index_decode_table[codingset][index][1];
2576 lst = index >= vc1_last_decode_table[codingset] || get_bits_left(gb) < 0;
2580 escape = decode210(gb);
2582 index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2583 run = vc1_index_decode_table[codingset][index][0];
2584 level = vc1_index_decode_table[codingset][index][1];
2585 lst = index >= vc1_last_decode_table[codingset];
2588 level += vc1_last_delta_level_table[codingset][run];
2590 level += vc1_delta_level_table[codingset][run];
2593 run += vc1_last_delta_run_table[codingset][level] + 1;
2595 run += vc1_delta_run_table[codingset][level] + 1;
2601 lst = get_bits1(gb);
2602 if (v->s.esc3_level_length == 0) {
2603 if (v->pq < 8 || v->dquantfrm) { // table 59
2604 v->s.esc3_level_length = get_bits(gb, 3);
2605 if (!v->s.esc3_level_length)
2606 v->s.esc3_level_length = get_bits(gb, 2) + 8;
2607 } else { // table 60
2608 v->s.esc3_level_length = get_unary(gb, 1, 6) + 2;
2610 v->s.esc3_run_length = 3 + get_bits(gb, 2);
2612 run = get_bits(gb, v->s.esc3_run_length);
2613 sign = get_bits1(gb);
2614 level = get_bits(gb, v->s.esc3_level_length);
2625 /** Decode intra block in intra frames - should be faster than decode_intra_block
2626 * @param v VC1Context
2627 * @param block block to decode
2628 * @param[in] n subblock index
2629 * @param coded are AC coeffs present or not
2630 * @param codingset set of VLC to decode data
2632 static int vc1_decode_i_block(VC1Context *v, int16_t block[64], int n,
2633 int coded, int codingset)
2635 GetBitContext *gb = &v->s.gb;
2636 MpegEncContext *s = &v->s;
2637 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2640 int16_t *ac_val, *ac_val2;
2643 /* Get DC differential */
2645 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2647 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2650 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2654 if (dcdiff == 119 /* ESC index value */) {
2655 /* TODO: Optimize */
2656 if (v->pq == 1) dcdiff = get_bits(gb, 10);
2657 else if (v->pq == 2) dcdiff = get_bits(gb, 9);
2658 else dcdiff = get_bits(gb, 8);
2661 dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2662 else if (v->pq == 2)
2663 dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2670 dcdiff += vc1_i_pred_dc(&v->s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir);
2673 /* Store the quantized DC coeff, used for prediction */
2675 block[0] = dcdiff * s->y_dc_scale;
2677 block[0] = dcdiff * s->c_dc_scale;
2688 int last = 0, skip, value;
2689 const uint8_t *zz_table;
2693 scale = v->pq * 2 + v->halfpq;
2697 zz_table = v->zz_8x8[2];
2699 zz_table = v->zz_8x8[3];
2701 zz_table = v->zz_8x8[1];
2703 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2705 if (dc_pred_dir) // left
2708 ac_val -= 16 * s->block_wrap[n];
2711 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2715 block[zz_table[i++]] = value;
2718 /* apply AC prediction if needed */
2720 if (dc_pred_dir) { // left
2721 for (k = 1; k < 8; k++)
2722 block[k << v->left_blk_sh] += ac_val[k];
2724 for (k = 1; k < 8; k++)
2725 block[k << v->top_blk_sh] += ac_val[k + 8];
2728 /* save AC coeffs for further prediction */
2729 for (k = 1; k < 8; k++) {
2730 ac_val2[k] = block[k << v->left_blk_sh];
2731 ac_val2[k + 8] = block[k << v->top_blk_sh];
2734 /* scale AC coeffs */
2735 for (k = 1; k < 64; k++)
2739 block[k] += (block[k] < 0) ? -v->pq : v->pq;
2742 if (s->ac_pred) i = 63;
2748 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2752 scale = v->pq * 2 + v->halfpq;
2753 memset(ac_val2, 0, 16 * 2);
2754 if (dc_pred_dir) { // left
2757 memcpy(ac_val2, ac_val, 8 * 2);
2759 ac_val -= 16 * s->block_wrap[n];
2761 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2764 /* apply AC prediction if needed */
2766 if (dc_pred_dir) { //left
2767 for (k = 1; k < 8; k++) {
2768 block[k << v->left_blk_sh] = ac_val[k] * scale;
2769 if (!v->pquantizer && block[k << v->left_blk_sh])
2770 block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -v->pq : v->pq;
2773 for (k = 1; k < 8; k++) {
2774 block[k << v->top_blk_sh] = ac_val[k + 8] * scale;
2775 if (!v->pquantizer && block[k << v->top_blk_sh])
2776 block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -v->pq : v->pq;
2782 s->block_last_index[n] = i;
2787 /** Decode intra block in intra frames - should be faster than decode_intra_block
2788 * @param v VC1Context
2789 * @param block block to decode
2790 * @param[in] n subblock number
2791 * @param coded are AC coeffs present or not
2792 * @param codingset set of VLC to decode data
2793 * @param mquant quantizer value for this macroblock
2795 static int vc1_decode_i_block_adv(VC1Context *v, int16_t block[64], int n,
2796 int coded, int codingset, int mquant)
2798 GetBitContext *gb = &v->s.gb;
2799 MpegEncContext *s = &v->s;
2800 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2803 int16_t *ac_val, *ac_val2;
2805 int a_avail = v->a_avail, c_avail = v->c_avail;
2806 int use_pred = s->ac_pred;
2809 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2811 /* Get DC differential */
2813 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2815 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2818 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2822 if (dcdiff == 119 /* ESC index value */) {
2823 /* TODO: Optimize */
2824 if (mquant == 1) dcdiff = get_bits(gb, 10);
2825 else if (mquant == 2) dcdiff = get_bits(gb, 9);
2826 else dcdiff = get_bits(gb, 8);
2829 dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2830 else if (mquant == 2)
2831 dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2838 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir);
2841 /* Store the quantized DC coeff, used for prediction */
2843 block[0] = dcdiff * s->y_dc_scale;
2845 block[0] = dcdiff * s->c_dc_scale;
2851 /* check if AC is needed at all */
2852 if (!a_avail && !c_avail)
2854 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2857 scale = mquant * 2 + ((mquant == v->pq) ? v->halfpq : 0);
2859 if (dc_pred_dir) // left
2862 ac_val -= 16 * s->block_wrap[n];
2864 q1 = s->current_picture.qscale_table[mb_pos];
2865 if ( dc_pred_dir && c_avail && mb_pos)
2866 q2 = s->current_picture.qscale_table[mb_pos - 1];
2867 if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
2868 q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2869 if ( dc_pred_dir && n == 1)
2871 if (!dc_pred_dir && n == 2)
2877 int last = 0, skip, value;
2878 const uint8_t *zz_table;
2882 if (!use_pred && v->fcm == ILACE_FRAME) {
2883 zz_table = v->zzi_8x8;
2885 if (!dc_pred_dir) // top
2886 zz_table = v->zz_8x8[2];
2888 zz_table = v->zz_8x8[3];
2891 if (v->fcm != ILACE_FRAME)
2892 zz_table = v->zz_8x8[1];
2894 zz_table = v->zzi_8x8;
2898 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2902 block[zz_table[i++]] = value;
2905 /* apply AC prediction if needed */
2907 /* scale predictors if needed*/
2908 if (q2 && q1 != q2) {
2909 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2910 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2913 return AVERROR_INVALIDDATA;
2914 if (dc_pred_dir) { // left
2915 for (k = 1; k < 8; k++)
2916 block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2918 for (k = 1; k < 8; k++)
2919 block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2922 if (dc_pred_dir) { //left
2923 for (k = 1; k < 8; k++)
2924 block[k << v->left_blk_sh] += ac_val[k];
2926 for (k = 1; k < 8; k++)
2927 block[k << v->top_blk_sh] += ac_val[k + 8];
2931 /* save AC coeffs for further prediction */
2932 for (k = 1; k < 8; k++) {
2933 ac_val2[k ] = block[k << v->left_blk_sh];
2934 ac_val2[k + 8] = block[k << v->top_blk_sh];
2937 /* scale AC coeffs */
2938 for (k = 1; k < 64; k++)
2942 block[k] += (block[k] < 0) ? -mquant : mquant;
2945 if (use_pred) i = 63;
2946 } else { // no AC coeffs
2949 memset(ac_val2, 0, 16 * 2);
2950 if (dc_pred_dir) { // left
2952 memcpy(ac_val2, ac_val, 8 * 2);
2953 if (q2 && q1 != q2) {
2954 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2955 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2957 return AVERROR_INVALIDDATA;
2958 for (k = 1; k < 8; k++)
2959 ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2964 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2965 if (q2 && q1 != q2) {
2966 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2967 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2969 return AVERROR_INVALIDDATA;
2970 for (k = 1; k < 8; k++)
2971 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2976 /* apply AC prediction if needed */
2978 if (dc_pred_dir) { // left
2979 for (k = 1; k < 8; k++) {
2980 block[k << v->left_blk_sh] = ac_val2[k] * scale;
2981 if (!v->pquantizer && block[k << v->left_blk_sh])
2982 block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
2985 for (k = 1; k < 8; k++) {
2986 block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
2987 if (!v->pquantizer && block[k << v->top_blk_sh])
2988 block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
2994 s->block_last_index[n] = i;
2999 /** Decode intra block in inter frames - more generic version than vc1_decode_i_block
3000 * @param v VC1Context
3001 * @param block block to decode
3002 * @param[in] n subblock index
3003 * @param coded are AC coeffs present or not
3004 * @param mquant block quantizer
3005 * @param codingset set of VLC to decode data
3007 static int vc1_decode_intra_block(VC1Context *v, int16_t block[64], int n,
3008 int coded, int mquant, int codingset)
3010 GetBitContext *gb = &v->s.gb;
3011 MpegEncContext *s = &v->s;
3012 int dc_pred_dir = 0; /* Direction of the DC prediction used */
3015 int16_t *ac_val, *ac_val2;
3017 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3018 int a_avail = v->a_avail, c_avail = v->c_avail;
3019 int use_pred = s->ac_pred;
3023 s->bdsp.clear_block(block);
3025 /* XXX: Guard against dumb values of mquant */
3026 mquant = (mquant < 1) ? 0 : ((mquant > 31) ? 31 : mquant);
3028 /* Set DC scale - y and c use the same */
3029 s->y_dc_scale = s->y_dc_scale_table[mquant];
3030 s->c_dc_scale = s->c_dc_scale_table[mquant];
3032 /* Get DC differential */
3034 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
3036 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
3039 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
3043 if (dcdiff == 119 /* ESC index value */) {
3044 /* TODO: Optimize */
3045 if (mquant == 1) dcdiff = get_bits(gb, 10);
3046 else if (mquant == 2) dcdiff = get_bits(gb, 9);
3047 else dcdiff = get_bits(gb, 8);
3050 dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
3051 else if (mquant == 2)
3052 dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
3059 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir);
3062 /* Store the quantized DC coeff, used for prediction */
3065 block[0] = dcdiff * s->y_dc_scale;
3067 block[0] = dcdiff * s->c_dc_scale;
3073 /* check if AC is needed at all and adjust direction if needed */
3074 if (!a_avail) dc_pred_dir = 1;
3075 if (!c_avail) dc_pred_dir = 0;
3076 if (!a_avail && !c_avail) use_pred = 0;
3077 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
3080 scale = mquant * 2 + v->halfpq;
3082 if (dc_pred_dir) //left
3085 ac_val -= 16 * s->block_wrap[n];
3087 q1 = s->current_picture.qscale_table[mb_pos];
3088 if (dc_pred_dir && c_avail && mb_pos)
3089 q2 = s->current_picture.qscale_table[mb_pos - 1];
3090 if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
3091 q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
3092 if ( dc_pred_dir && n == 1)
3094 if (!dc_pred_dir && n == 2)
3096 if (n == 3) q2 = q1;
3099 int last = 0, skip, value;
3103 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
3107 if (v->fcm == PROGRESSIVE)
3108 block[v->zz_8x8[0][i++]] = value;
3110 if (use_pred && (v->fcm == ILACE_FRAME)) {
3111 if (!dc_pred_dir) // top
3112 block[v->zz_8x8[2][i++]] = value;
3114 block[v->zz_8x8[3][i++]] = value;
3116 block[v->zzi_8x8[i++]] = value;
3121 /* apply AC prediction if needed */
3123 /* scale predictors if needed*/
3124 if (q2 && q1 != q2) {
3125 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3126 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3129 return AVERROR_INVALIDDATA;
3130 if (dc_pred_dir) { // left
3131 for (k = 1; k < 8; k++)
3132 block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3134 for (k = 1; k < 8; k++)
3135 block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3138 if (dc_pred_dir) { // left
3139 for (k = 1; k < 8; k++)
3140 block[k << v->left_blk_sh] += ac_val[k];
3142 for (k = 1; k < 8; k++)
3143 block[k << v->top_blk_sh] += ac_val[k + 8];
3147 /* save AC coeffs for further prediction */
3148 for (k = 1; k < 8; k++) {
3149 ac_val2[k ] = block[k << v->left_blk_sh];
3150 ac_val2[k + 8] = block[k << v->top_blk_sh];
3153 /* scale AC coeffs */
3154 for (k = 1; k < 64; k++)
3158 block[k] += (block[k] < 0) ? -mquant : mquant;
3161 if (use_pred) i = 63;
3162 } else { // no AC coeffs
3165 memset(ac_val2, 0, 16 * 2);
3166 if (dc_pred_dir) { // left
3168 memcpy(ac_val2, ac_val, 8 * 2);
3169 if (q2 && q1 != q2) {
3170 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3171 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3173 return AVERROR_INVALIDDATA;
3174 for (k = 1; k < 8; k++)
3175 ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3180 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
3181 if (q2 && q1 != q2) {
3182 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3183 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3185 return AVERROR_INVALIDDATA;
3186 for (k = 1; k < 8; k++)
3187 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3192 /* apply AC prediction if needed */
3194 if (dc_pred_dir) { // left
3195 for (k = 1; k < 8; k++) {
3196 block[k << v->left_blk_sh] = ac_val2[k] * scale;
3197 if (!v->pquantizer && block[k << v->left_blk_sh])
3198 block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
3201 for (k = 1; k < 8; k++) {
3202 block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
3203 if (!v->pquantizer && block[k << v->top_blk_sh])
3204 block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
3210 s->block_last_index[n] = i;
3217 static int vc1_decode_p_block(VC1Context *v, int16_t block[64], int n,
3218 int mquant, int ttmb, int first_block,
3219 uint8_t *dst, int linesize, int skip_block,
3222 MpegEncContext *s = &v->s;
3223 GetBitContext *gb = &s->gb;
3226 int scale, off, idx, last, skip, value;
3227 int ttblk = ttmb & 7;
3230 s->bdsp.clear_block(block);
3233 ttblk = ff_vc1_ttblk_to_tt[v->tt_index][get_vlc2(gb, ff_vc1_ttblk_vlc[v->tt_index].table, VC1_TTBLK_VLC_BITS, 1)];
3235 if (ttblk == TT_4X4) {
3236 subblkpat = ~(get_vlc2(gb, ff_vc1_subblkpat_vlc[v->tt_index].table, VC1_SUBBLKPAT_VLC_BITS, 1) + 1);
3238 if ((ttblk != TT_8X8 && ttblk != TT_4X4)
3239 && ((v->ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))
3240 || (!v->res_rtm_flag && !first_block))) {
3241 subblkpat = decode012(gb);
3243 subblkpat ^= 3; // swap decoded pattern bits
3244 if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM)
3246 if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT)
3249 scale = 2 * mquant + ((v->pq == mquant) ? v->halfpq : 0);
3251 // convert transforms like 8X4_TOP to generic TT and SUBBLKPAT
3252 if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) {
3253 subblkpat = 2 - (ttblk == TT_8X4_TOP);
3256 if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) {
3257 subblkpat = 2 - (ttblk == TT_4X8_LEFT);
3266 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3271 idx = v->zz_8x8[0][i++];
3273 idx = v->zzi_8x8[i++];
3274 block[idx] = value * scale;
3276 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3280 v->vc1dsp.vc1_inv_trans_8x8_dc(dst, linesize, block);
3282 v->vc1dsp.vc1_inv_trans_8x8(block);
3283 s->idsp.add_pixels_clamped(block, dst, linesize);
3288 pat = ~subblkpat & 0xF;
3289 for (j = 0; j < 4; j++) {
3290 last = subblkpat & (1 << (3 - j));
3292 off = (j & 1) * 4 + (j & 2) * 16;
3294 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3299 idx = ff_vc1_simple_progressive_4x4_zz[i++];
3301 idx = ff_vc1_adv_interlaced_4x4_zz[i++];
3302 block[idx + off] = value * scale;
3304 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3306 if (!(subblkpat & (1 << (3 - j))) && !skip_block) {
3308 v->vc1dsp.vc1_inv_trans_4x4_dc(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3310 v->vc1dsp.vc1_inv_trans_4x4(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3315 pat = ~((subblkpat & 2) * 6 + (subblkpat & 1) * 3) & 0xF;
3316 for (j = 0; j < 2; j++) {
3317 last = subblkpat & (1 << (1 - j));
3321 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3326 idx = v->zz_8x4[i++] + off;
3328 idx = ff_vc1_adv_interlaced_8x4_zz[i++] + off;
3329 block[idx] = value * scale;
3331 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3333 if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3335 v->vc1dsp.vc1_inv_trans_8x4_dc(dst + j * 4 * linesize, linesize, block + off);
3337 v->vc1dsp.vc1_inv_trans_8x4(dst + j * 4 * linesize, linesize, block + off);
3342 pat = ~(subblkpat * 5) & 0xF;
3343 for (j = 0; j < 2; j++) {
3344 last = subblkpat & (1 << (1 - j));
3348 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3353 idx = v->zz_4x8[i++] + off;
3355 idx = ff_vc1_adv_interlaced_4x8_zz[i++] + off;
3356 block[idx] = value * scale;
3358 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3360 if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3362 v->vc1dsp.vc1_inv_trans_4x8_dc(dst + j * 4, linesize, block + off);
3364 v->vc1dsp.vc1_inv_trans_4x8(dst + j*4, linesize, block + off);
3370 *ttmb_out |= ttblk << (n * 4);
3374 /** @} */ // Macroblock group
3376 static const int size_table [6] = { 0, 2, 3, 4, 5, 8 };
3377 static const int offset_table[6] = { 0, 1, 3, 7, 15, 31 };
3379 static av_always_inline void vc1_apply_p_v_loop_filter(VC1Context *v, int block_num)
3381 MpegEncContext *s = &v->s;
3382 int mb_cbp = v->cbp[s->mb_x - s->mb_stride],
3383 block_cbp = mb_cbp >> (block_num * 4), bottom_cbp,
3384 mb_is_intra = v->is_intra[s->mb_x - s->mb_stride],
3385 block_is_intra = mb_is_intra >> (block_num * 4), bottom_is_intra;
3386 int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3389 if (block_num > 3) {
3390 dst = s->dest[block_num - 3];
3392 dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 8) * linesize;
3394 if (s->mb_y != s->end_mb_y || block_num < 2) {
3398 if (block_num > 3) {
3399 bottom_cbp = v->cbp[s->mb_x] >> (block_num * 4);
3400 bottom_is_intra = v->is_intra[s->mb_x] >> (block_num * 4);
3401 mv = &v->luma_mv[s->mb_x - s->mb_stride];
3402 mv_stride = s->mb_stride;
3404 bottom_cbp = (block_num < 2) ? (mb_cbp >> ((block_num + 2) * 4))
3405 : (v->cbp[s->mb_x] >> ((block_num - 2) * 4));
3406 bottom_is_intra = (block_num < 2) ? (mb_is_intra >> ((block_num + 2) * 4))
3407 : (v->is_intra[s->mb_x] >> ((block_num - 2) * 4));
3408 mv_stride = s->b8_stride;
3409 mv = &s->current_picture.motion_val[0][s->block_index[block_num] - 2 * mv_stride];
3412 if (bottom_is_intra & 1 || block_is_intra & 1 ||
3413 mv[0][0] != mv[mv_stride][0] || mv[0][1] != mv[mv_stride][1]) {
3414 v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3416 idx = ((bottom_cbp >> 2) | block_cbp) & 3;
3418 v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3421 v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3423 v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3428 dst -= 4 * linesize;
3429 ttblk = (v->ttblk[s->mb_x - s->mb_stride] >> (block_num * 4)) & 0xF;
3430 if (ttblk == TT_4X4 || ttblk == TT_8X4) {
3431 idx = (block_cbp | (block_cbp >> 2)) & 3;
3433 v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3436 v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3438 v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3443 static av_always_inline void vc1_apply_p_h_loop_filter(VC1Context *v, int block_num)
3445 MpegEncContext *s = &v->s;
3446 int mb_cbp = v->cbp[s->mb_x - 1 - s->mb_stride],
3447 block_cbp = mb_cbp >> (block_num * 4), right_cbp,
3448 mb_is_intra = v->is_intra[s->mb_x - 1 - s->mb_stride],
3449 block_is_intra = mb_is_intra >> (block_num * 4), right_is_intra;
3450 int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3453 if (block_num > 3) {
3454 dst = s->dest[block_num - 3] - 8 * linesize;
3456 dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 16) * linesize - 8;
3459 if (s->mb_x != s->mb_width || !(block_num & 5)) {
3462 if (block_num > 3) {
3463 right_cbp = v->cbp[s->mb_x - s->mb_stride] >> (block_num * 4);
3464 right_is_intra = v->is_intra[s->mb_x - s->mb_stride] >> (block_num * 4);
3465 mv = &v->luma_mv[s->mb_x - s->mb_stride - 1];
3467 right_cbp = (block_num & 1) ? (v->cbp[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3468 : (mb_cbp >> ((block_num + 1) * 4));
3469 right_is_intra = (block_num & 1) ? (v->is_intra[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3470 : (mb_is_intra >> ((block_num + 1) * 4));
3471 mv = &s->current_picture.motion_val[0][s->block_index[block_num] - s->b8_stride * 2 - 2];
3473 if (block_is_intra & 1 || right_is_intra & 1 || mv[0][0] != mv[1][0] || mv[0][1] != mv[1][1]) {
3474 v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3476 idx = ((right_cbp >> 1) | block_cbp) & 5; // FIXME check
3478 v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3481 v->vc1dsp.vc1_h_loop_filter4(dst + 4 * linesize, linesize, v->pq);
3483 v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3489 ttblk = (v->ttblk[s->mb_x - s->mb_stride - 1] >> (block_num * 4)) & 0xf;
3490 if (ttblk == TT_4X4 || ttblk == TT_4X8) {
3491 idx = (block_cbp | (block_cbp >> 1)) & 5;
3493 v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3496 v->vc1dsp.vc1_h_loop_filter4(dst + linesize * 4, linesize, v->pq);
3498 v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3503 static void vc1_apply_p_loop_filter(VC1Context *v)
3505 MpegEncContext *s = &v->s;
3508 for (i = 0; i < 6; i++) {
3509 vc1_apply_p_v_loop_filter(v, i);
3512 /* V always precedes H, therefore we run H one MB before V;
3513 * at the end of a row, we catch up to complete the row */
3515 for (i = 0; i < 6; i++) {
3516 vc1_apply_p_h_loop_filter(v, i);
3518 if (s->mb_x == s->mb_width - 1) {
3520 ff_update_block_index(s);
3521 for (i = 0; i < 6; i++) {
3522 vc1_apply_p_h_loop_filter(v, i);
3528 /** Decode one P-frame MB
3530 static int vc1_decode_p_mb(VC1Context *v)
3532 MpegEncContext *s = &v->s;
3533 GetBitContext *gb = &s->gb;
3535 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3536 int cbp; /* cbp decoding stuff */
3537 int mqdiff, mquant; /* MB quantization */
3538 int ttmb = v->ttfrm; /* MB Transform type */
3540 int mb_has_coeffs = 1; /* last_flag */
3541 int dmv_x, dmv_y; /* Differential MV components */
3542 int index, index1; /* LUT indexes */
3543 int val, sign; /* temp values */
3544 int first_block = 1;
3546 int skipped, fourmv;
3547 int block_cbp = 0, pat, block_tt = 0, block_intra = 0;
3549 mquant = v->pq; /* lossy initialization */
3551 if (v->mv_type_is_raw)
3552 fourmv = get_bits1(gb);
3554 fourmv = v->mv_type_mb_plane[mb_pos];
3556 skipped = get_bits1(gb);
3558 skipped = v->s.mbskip_table[mb_pos];
3560 if (!fourmv) { /* 1MV mode */
3562 GET_MVDATA(dmv_x, dmv_y);
3565 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
3566 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
3568 s->current_picture.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;
3569 vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3571 /* FIXME Set DC val for inter block ? */
3572 if (s->mb_intra && !mb_has_coeffs) {
3574 s->ac_pred = get_bits1(gb);
3576 } else if (mb_has_coeffs) {
3578 s->ac_pred = get_bits1(gb);
3579 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3585 s->current_picture.qscale_table[mb_pos] = mquant;
3587 if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
3588 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table,
3589 VC1_TTMB_VLC_BITS, 2);
3590 if (!s->mb_intra) vc1_mc_1mv(v, 0);
3592 for (i = 0; i < 6; i++) {
3593 s->dc_val[0][s->block_index[i]] = 0;
3595 val = ((cbp >> (5 - i)) & 1);
3596 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3597 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3599 /* check if prediction blocks A and C are available */
3600 v->a_avail = v->c_avail = 0;
3601 if (i == 2 || i == 3 || !s->first_slice_line)
3602 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3603 if (i == 1 || i == 3 || s->mb_x)
3604 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3606 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3607 (i & 4) ? v->codingset2 : v->codingset);
3608 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3610 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3612 for (j = 0; j < 64; j++)
3613 s->block[i][j] <<= 1;
3614 s->idsp.put_signed_pixels_clamped(s->block[i],
3615 s->dest[dst_idx] + off,
3616 i & 4 ? s->uvlinesize
3618 if (v->pq >= 9 && v->overlap) {
3620 v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3622 v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3624 block_cbp |= 0xF << (i << 2);
3625 block_intra |= 1 << i;
3627 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block,
3628 s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize,
3629 (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
3630 block_cbp |= pat << (i << 2);
3631 if (!v->ttmbf && ttmb < 8)
3638 for (i = 0; i < 6; i++) {
3639 v->mb_type[0][s->block_index[i]] = 0;
3640 s->dc_val[0][s->block_index[i]] = 0;
3642 s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
3643 s->current_picture.qscale_table[mb_pos] = 0;
3644 vc1_pred_mv(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3647 } else { // 4MV mode
3648 if (!skipped /* unskipped MB */) {
3649 int intra_count = 0, coded_inter = 0;
3650 int is_intra[6], is_coded[6];
3652 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3653 for (i = 0; i < 6; i++) {
3654 val = ((cbp >> (5 - i)) & 1);
3655 s->dc_val[0][s->block_index[i]] = 0;
3662 GET_MVDATA(dmv_x, dmv_y);
3664 vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3666 vc1_mc_4mv_luma(v, i, 0, 0);
3667 intra_count += s->mb_intra;
3668 is_intra[i] = s->mb_intra;
3669 is_coded[i] = mb_has_coeffs;
3672 is_intra[i] = (intra_count >= 3);
3676 vc1_mc_4mv_chroma(v, 0);
3677 v->mb_type[0][s->block_index[i]] = is_intra[i];
3679 coded_inter = !is_intra[i] & is_coded[i];
3681 // if there are no coded blocks then don't do anything more
3683 if (!intra_count && !coded_inter)
3686 s->current_picture.qscale_table[mb_pos] = mquant;
3687 /* test if block is intra and has pred */
3690 for (i = 0; i < 6; i++)
3692 if (((!s->first_slice_line || (i == 2 || i == 3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]])
3693 || ((s->mb_x || (i == 1 || i == 3)) && v->mb_type[0][s->block_index[i] - 1])) {
3699 s->ac_pred = get_bits1(gb);
3703 if (!v->ttmbf && coded_inter)
3704 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3705 for (i = 0; i < 6; i++) {
3707 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3708 s->mb_intra = is_intra[i];
3710 /* check if prediction blocks A and C are available */
3711 v->a_avail = v->c_avail = 0;
3712 if (i == 2 || i == 3 || !s->first_slice_line)
3713 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3714 if (i == 1 || i == 3 || s->mb_x)
3715 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3717 vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant,
3718 (i & 4) ? v->codingset2 : v->codingset);
3719 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3721 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3723 for (j = 0; j < 64; j++)
3724 s->block[i][j] <<= 1;
3725 s->idsp.put_signed_pixels_clamped(s->block[i],
3726 s->dest[dst_idx] + off,
3727 (i & 4) ? s->uvlinesize
3729 if (v->pq >= 9 && v->overlap) {
3731 v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3733 v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3735 block_cbp |= 0xF << (i << 2);
3736 block_intra |= 1 << i;
3737 } else if (is_coded[i]) {
3738 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3739 first_block, s->dest[dst_idx] + off,
3740 (i & 4) ? s->uvlinesize : s->linesize,
3741 (i & 4) && (s->flags & CODEC_FLAG_GRAY),
3743 block_cbp |= pat << (i << 2);
3744 if (!v->ttmbf && ttmb < 8)
3749 } else { // skipped MB
3751 s->current_picture.qscale_table[mb_pos] = 0;
3752 for (i = 0; i < 6; i++) {
3753 v->mb_type[0][s->block_index[i]] = 0;
3754 s->dc_val[0][s->block_index[i]] = 0;
3756 for (i = 0; i < 4; i++) {
3757 vc1_pred_mv(v, i, 0, 0, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3758 vc1_mc_4mv_luma(v, i, 0, 0);
3760 vc1_mc_4mv_chroma(v, 0);
3761 s->current_picture.qscale_table[mb_pos] = 0;
3765 v->cbp[s->mb_x] = block_cbp;
3766 v->ttblk[s->mb_x] = block_tt;
3767 v->is_intra[s->mb_x] = block_intra;
3772 /* Decode one macroblock in an interlaced frame p picture */
3774 static int vc1_decode_p_mb_intfr(VC1Context *v)
3776 MpegEncContext *s = &v->s;
3777 GetBitContext *gb = &s->gb;
3779 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3780 int cbp = 0; /* cbp decoding stuff */
3781 int mqdiff, mquant; /* MB quantization */
3782 int ttmb = v->ttfrm; /* MB Transform type */
3784 int mb_has_coeffs = 1; /* last_flag */
3785 int dmv_x, dmv_y; /* Differential MV components */
3786 int val; /* temp value */
3787 int first_block = 1;
3789 int skipped, fourmv = 0, twomv = 0;
3790 int block_cbp = 0, pat, block_tt = 0;
3791 int idx_mbmode = 0, mvbp;
3792 int stride_y, fieldtx;
3794 mquant = v->pq; /* Loosy initialization */
3797 skipped = get_bits1(gb);
3799 skipped = v->s.mbskip_table[mb_pos];
3801 if (v->fourmvswitch)
3802 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_4MV_MBMODE_VLC_BITS, 2); // try getting this done
3804 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2); // in a single line
3805 switch (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0]) {
3806 /* store the motion vector type in a flag (useful later) */
3807 case MV_PMODE_INTFR_4MV:
3809 v->blk_mv_type[s->block_index[0]] = 0;
3810 v->blk_mv_type[s->block_index[1]] = 0;
3811 v->blk_mv_type[s->block_index[2]] = 0;
3812 v->blk_mv_type[s->block_index[3]] = 0;
3814 case MV_PMODE_INTFR_4MV_FIELD:
3816 v->blk_mv_type[s->block_index[0]] = 1;
3817 v->blk_mv_type[s->block_index[1]] = 1;
3818 v->blk_mv_type[s->block_index[2]] = 1;
3819 v->blk_mv_type[s->block_index[3]] = 1;
3821 case MV_PMODE_INTFR_2MV_FIELD:
3823 v->blk_mv_type[s->block_index[0]] = 1;
3824 v->blk_mv_type[s->block_index[1]] = 1;
3825 v->blk_mv_type[s->block_index[2]] = 1;
3826 v->blk_mv_type[s->block_index[3]] = 1;
3828 case MV_PMODE_INTFR_1MV:
3829 v->blk_mv_type[s->block_index[0]] = 0;
3830 v->blk_mv_type[s->block_index[1]] = 0;
3831 v->blk_mv_type[s->block_index[2]] = 0;
3832 v->blk_mv_type[s->block_index[3]] = 0;
3835 if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
3836 for (i = 0; i < 4; i++) {
3837 s->current_picture.motion_val[1][s->block_index[i]][0] = 0;
3838 s->current_picture.motion_val[1][s->block_index[i]][1] = 0;
3840 s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
3841 s->mb_intra = v->is_intra[s->mb_x] = 1;
3842 for (i = 0; i < 6; i++)
3843 v->mb_type[0][s->block_index[i]] = 1;
3844 fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
3845 mb_has_coeffs = get_bits1(gb);
3847 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3848 v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
3850 s->current_picture.qscale_table[mb_pos] = mquant;
3851 /* Set DC scale - y and c use the same (not sure if necessary here) */
3852 s->y_dc_scale = s->y_dc_scale_table[mquant];
3853 s->c_dc_scale = s->c_dc_scale_table[mquant];
3855 for (i = 0; i < 6; i++) {
3856 s->dc_val[0][s->block_index[i]] = 0;
3858 val = ((cbp >> (5 - i)) & 1);
3859 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3860 v->a_avail = v->c_avail = 0;
3861 if (i == 2 || i == 3 || !s->first_slice_line)
3862 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3863 if (i == 1 || i == 3 || s->mb_x)
3864 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3866 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3867 (i & 4) ? v->codingset2 : v->codingset);
3868 if ((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3869 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3871 stride_y = s->linesize << fieldtx;
3872 off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->linesize : (i & 1) * 8 + 4 * (i & 2) * s->linesize;
3874 stride_y = s->uvlinesize;
3877 s->idsp.put_signed_pixels_clamped(s->block[i],
3878 s->dest[dst_idx] + off,
3883 } else { // inter MB
3884 mb_has_coeffs = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][3];
3886 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3887 if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD) {
3888 v->twomvbp = get_vlc2(gb, v->twomvbp_vlc->table, VC1_2MV_BLOCK_PATTERN_VLC_BITS, 1);
3890 if ((ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV)
3891 || (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV_FIELD)) {
3892 v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
3895 s->mb_intra = v->is_intra[s->mb_x] = 0;
3896 for (i = 0; i < 6; i++)
3897 v->mb_type[0][s->block_index[i]] = 0;
3898 fieldtx = v->fieldtx_plane[mb_pos] = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][1];
3899 /* for all motion vector read MVDATA and motion compensate each block */
3903 for (i = 0; i < 6; i++) {
3906 val = ((mvbp >> (3 - i)) & 1);
3908 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3910 vc1_pred_mv_intfr(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0);
3911 vc1_mc_4mv_luma(v, i, 0, 0);
3912 } else if (i == 4) {
3913 vc1_mc_4mv_chroma4(v, 0, 0, 0);
3920 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3922 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], 0);
3923 vc1_mc_4mv_luma(v, 0, 0, 0);
3924 vc1_mc_4mv_luma(v, 1, 0, 0);
3927 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3929 vc1_pred_mv_intfr(v, 2, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], 0);
3930 vc1_mc_4mv_luma(v, 2, 0, 0);
3931 vc1_mc_4mv_luma(v, 3, 0, 0);
3932 vc1_mc_4mv_chroma4(v, 0, 0, 0);
3934 mvbp = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][2];
3937 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3939 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0);
3943 GET_MQUANT(); // p. 227
3944 s->current_picture.qscale_table[mb_pos] = mquant;
3945 if (!v->ttmbf && cbp)
3946 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3947 for (i = 0; i < 6; i++) {
3948 s->dc_val[0][s->block_index[i]] = 0;
3950 val = ((cbp >> (5 - i)) & 1);
3952 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3954 off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->linesize));
3956 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3957 first_block, s->dest[dst_idx] + off,
3958 (i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
3959 (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
3960 block_cbp |= pat << (i << 2);
3961 if (!v->ttmbf && ttmb < 8)
3968 s->mb_intra = v->is_intra[s->mb_x] = 0;
3969 for (i = 0; i < 6; i++) {
3970 v->mb_type[0][s->block_index[i]] = 0;
3971 s->dc_val[0][s->block_index[i]] = 0;
3973 s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
3974 s->current_picture.qscale_table[mb_pos] = 0;
3975 v->blk_mv_type[s->block_index[0]] = 0;
3976 v->blk_mv_type[s->block_index[1]] = 0;
3977 v->blk_mv_type[s->block_index[2]] = 0;
3978 v->blk_mv_type[s->block_index[3]] = 0;
3979 vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0);
3982 if (s->mb_x == s->mb_width - 1)
3983 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0])*s->mb_stride);
3987 static int vc1_decode_p_mb_intfi(VC1Context *v)
3989 MpegEncContext *s = &v->s;
3990 GetBitContext *gb = &s->gb;
3992 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3993 int cbp = 0; /* cbp decoding stuff */
3994 int mqdiff, mquant; /* MB quantization */
3995 int ttmb = v->ttfrm; /* MB Transform type */
3997 int mb_has_coeffs = 1; /* last_flag */
3998 int dmv_x, dmv_y; /* Differential MV components */
3999 int val; /* temp values */
4000 int first_block = 1;
4003 int block_cbp = 0, pat, block_tt = 0;
4006 mquant = v->pq; /* Loosy initialization */
4008 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
4009 if (idx_mbmode <= 1) { // intra MB
4010 s->mb_intra = v->is_intra[s->mb_x] = 1;
4011 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
4012 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
4013 s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4015 s->current_picture.qscale_table[mb_pos] = mquant;
4016 /* Set DC scale - y and c use the same (not sure if necessary here) */
4017 s->y_dc_scale = s->y_dc_scale_table[mquant];
4018 s->c_dc_scale = s->c_dc_scale_table[mquant];
4019 v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4020 mb_has_coeffs = idx_mbmode & 1;
4022 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
4024 for (i = 0; i < 6; i++) {
4025 s->dc_val[0][s->block_index[i]] = 0;
4026 v->mb_type[0][s->block_index[i]] = 1;
4028 val = ((cbp >> (5 - i)) & 1);
4029 v->a_avail = v->c_avail = 0;
4030 if (i == 2 || i == 3 || !s->first_slice_line)
4031 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4032 if (i == 1 || i == 3 || s->mb_x)
4033 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4035 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4036 (i & 4) ? v->codingset2 : v->codingset);
4037 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4039 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4040 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4041 s->idsp.put_signed_pixels_clamped(s->block[i],
4042 s->dest[dst_idx] + off,
4043 (i & 4) ? s->uvlinesize
4045 // TODO: loop filter
4048 s->mb_intra = v->is_intra[s->mb_x] = 0;
4049 s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
4050 for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
4051 if (idx_mbmode <= 5) { // 1-MV
4052 dmv_x = dmv_y = pred_flag = 0;
4053 if (idx_mbmode & 1) {
4054 get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
4056 vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
4058 mb_has_coeffs = !(idx_mbmode & 2);
4060 v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
4061 for (i = 0; i < 6; i++) {
4063 dmv_x = dmv_y = pred_flag = 0;
4064 val = ((v->fourmvbp >> (3 - i)) & 1);
4066 get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
4068 vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
4069 vc1_mc_4mv_luma(v, i, 0, 0);
4071 vc1_mc_4mv_chroma(v, 0);
4073 mb_has_coeffs = idx_mbmode & 1;
4076 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4080 s->current_picture.qscale_table[mb_pos] = mquant;
4081 if (!v->ttmbf && cbp) {
4082 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4085 for (i = 0; i < 6; i++) {
4086 s->dc_val[0][s->block_index[i]] = 0;
4088 val = ((cbp >> (5 - i)) & 1);
4089 off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
4091 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4092 first_block, s->dest[dst_idx] + off,
4093 (i & 4) ? s->uvlinesize : s->linesize,
4094 (i & 4) && (s->flags & CODEC_FLAG_GRAY),
4096 block_cbp |= pat << (i << 2);
4097 if (!v->ttmbf && ttmb < 8) ttmb = -1;
4102 if (s->mb_x == s->mb_width - 1)
4103 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
4107 /** Decode one B-frame MB (in Main profile)
4109 static void vc1_decode_b_mb(VC1Context *v)
4111 MpegEncContext *s = &v->s;
4112 GetBitContext *gb = &s->gb;
4114 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4115 int cbp = 0; /* cbp decoding stuff */
4116 int mqdiff, mquant; /* MB quantization */
4117 int ttmb = v->ttfrm; /* MB Transform type */
4118 int mb_has_coeffs = 0; /* last_flag */
4119 int index, index1; /* LUT indexes */
4120 int val, sign; /* temp values */
4121 int first_block = 1;
4123 int skipped, direct;
4124 int dmv_x[2], dmv_y[2];
4125 int bmvtype = BMV_TYPE_BACKWARD;
4127 mquant = v->pq; /* lossy initialization */
4131 direct = get_bits1(gb);
4133 direct = v->direct_mb_plane[mb_pos];
4135 skipped = get_bits1(gb);
4137 skipped = v->s.mbskip_table[mb_pos];
4139 dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4140 for (i = 0; i < 6; i++) {
4141 v->mb_type[0][s->block_index[i]] = 0;
4142 s->dc_val[0][s->block_index[i]] = 0;
4144 s->current_picture.qscale_table[mb_pos] = 0;
4148 GET_MVDATA(dmv_x[0], dmv_y[0]);
4149 dmv_x[1] = dmv_x[0];
4150 dmv_y[1] = dmv_y[0];
4152 if (skipped || !s->mb_intra) {
4153 bmvtype = decode012(gb);
4156 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
4159 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
4162 bmvtype = BMV_TYPE_INTERPOLATED;
4163 dmv_x[0] = dmv_y[0] = 0;
4167 for (i = 0; i < 6; i++)
4168 v->mb_type[0][s->block_index[i]] = s->mb_intra;
4172 bmvtype = BMV_TYPE_INTERPOLATED;
4173 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4174 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4178 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4181 s->current_picture.qscale_table[mb_pos] = mquant;
4183 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4184 dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
4185 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4186 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4188 if (!mb_has_coeffs && !s->mb_intra) {
4189 /* no coded blocks - effectively skipped */
4190 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4191 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4194 if (s->mb_intra && !mb_has_coeffs) {
4196 s->current_picture.qscale_table[mb_pos] = mquant;
4197 s->ac_pred = get_bits1(gb);
4199 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4201 if (bmvtype == BMV_TYPE_INTERPOLATED) {
4202 GET_MVDATA(dmv_x[0], dmv_y[0]);
4203 if (!mb_has_coeffs) {
4204 /* interpolated skipped block */
4205 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4206 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4210 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4212 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4215 s->ac_pred = get_bits1(gb);
4216 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4218 s->current_picture.qscale_table[mb_pos] = mquant;
4219 if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
4220 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4224 for (i = 0; i < 6; i++) {
4225 s->dc_val[0][s->block_index[i]] = 0;
4227 val = ((cbp >> (5 - i)) & 1);
4228 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4229 v->mb_type[0][s->block_index[i]] = s->mb_intra;
4231 /* check if prediction blocks A and C are available */
4232 v->a_avail = v->c_avail = 0;
4233 if (i == 2 || i == 3 || !s->first_slice_line)
4234 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4235 if (i == 1 || i == 3 || s->mb_x)
4236 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4238 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4239 (i & 4) ? v->codingset2 : v->codingset);
4240 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4242 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4244 for (j = 0; j < 64; j++)
4245 s->block[i][j] <<= 1;
4246 s->idsp.put_signed_pixels_clamped(s->block[i],
4247 s->dest[dst_idx] + off,
4248 i & 4 ? s->uvlinesize
4251 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4252 first_block, s->dest[dst_idx] + off,
4253 (i & 4) ? s->uvlinesize : s->linesize,
4254 (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4255 if (!v->ttmbf && ttmb < 8)
4262 /** Decode one B-frame MB (in interlaced field B picture)
4264 static void vc1_decode_b_mb_intfi(VC1Context *v)
4266 MpegEncContext *s = &v->s;
4267 GetBitContext *gb = &s->gb;
4269 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4270 int cbp = 0; /* cbp decoding stuff */
4271 int mqdiff, mquant; /* MB quantization */
4272 int ttmb = v->ttfrm; /* MB Transform type */
4273 int mb_has_coeffs = 0; /* last_flag */
4274 int val; /* temp value */
4275 int first_block = 1;
4278 int dmv_x[2], dmv_y[2], pred_flag[2];
4279 int bmvtype = BMV_TYPE_BACKWARD;
4280 int idx_mbmode, interpmvp;
4282 mquant = v->pq; /* Loosy initialization */
4285 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
4286 if (idx_mbmode <= 1) { // intra MB
4287 s->mb_intra = v->is_intra[s->mb_x] = 1;
4288 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
4289 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
4290 s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4292 s->current_picture.qscale_table[mb_pos] = mquant;
4293 /* Set DC scale - y and c use the same (not sure if necessary here) */
4294 s->y_dc_scale = s->y_dc_scale_table[mquant];
4295 s->c_dc_scale = s->c_dc_scale_table[mquant];
4296 v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4297 mb_has_coeffs = idx_mbmode & 1;
4299 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
4301 for (i = 0; i < 6; i++) {
4302 s->dc_val[0][s->block_index[i]] = 0;
4304 val = ((cbp >> (5 - i)) & 1);
4305 v->mb_type[0][s->block_index[i]] = s->mb_intra;
4306 v->a_avail = v->c_avail = 0;
4307 if (i == 2 || i == 3 || !s->first_slice_line)
4308 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4309 if (i == 1 || i == 3 || s->mb_x)
4310 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4312 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4313 (i & 4) ? v->codingset2 : v->codingset);
4314 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4316 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4318 for (j = 0; j < 64; j++)
4319 s->block[i][j] <<= 1;
4320 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4321 s->idsp.put_signed_pixels_clamped(s->block[i],
4322 s->dest[dst_idx] + off,
4323 (i & 4) ? s->uvlinesize
4325 // TODO: yet to perform loop filter
4328 s->mb_intra = v->is_intra[s->mb_x] = 0;
4329 s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
4330 for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
4332 fwd = v->forward_mb_plane[mb_pos] = get_bits1(gb);
4334 fwd = v->forward_mb_plane[mb_pos];
4335 if (idx_mbmode <= 5) { // 1-MV
4336 dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4337 pred_flag[0] = pred_flag[1] = 0;
4339 bmvtype = BMV_TYPE_FORWARD;
4341 bmvtype = decode012(gb);
4344 bmvtype = BMV_TYPE_BACKWARD;
4347 bmvtype = BMV_TYPE_DIRECT;
4350 bmvtype = BMV_TYPE_INTERPOLATED;
4351 interpmvp = get_bits1(gb);
4354 v->bmvtype = bmvtype;
4355 if (bmvtype != BMV_TYPE_DIRECT && idx_mbmode & 1) {
4356 get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD], &dmv_y[bmvtype == BMV_TYPE_BACKWARD], &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
4358 if (bmvtype == BMV_TYPE_INTERPOLATED && interpmvp) {
4359 get_mvdata_interlaced(v, &dmv_x[1], &dmv_y[1], &pred_flag[1]);
4361 if (bmvtype == BMV_TYPE_DIRECT) {
4362 dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4363 dmv_x[1] = dmv_y[1] = pred_flag[0] = 0;
4365 vc1_pred_b_mv_intfi(v, 0, dmv_x, dmv_y, 1, pred_flag);
4366 vc1_b_mc(v, dmv_x, dmv_y, (bmvtype == BMV_TYPE_DIRECT), bmvtype);
4367 mb_has_coeffs = !(idx_mbmode & 2);
4370 bmvtype = BMV_TYPE_FORWARD;
4371 v->bmvtype = bmvtype;
4372 v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
4373 for (i = 0; i < 6; i++) {
4375 dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4376 dmv_x[1] = dmv_y[1] = pred_flag[1] = 0;
4377 val = ((v->fourmvbp >> (3 - i)) & 1);
4379 get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD],
4380 &dmv_y[bmvtype == BMV_TYPE_BACKWARD],
4381 &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
4383 vc1_pred_b_mv_intfi(v, i, dmv_x, dmv_y, 0, pred_flag);
4384 vc1_mc_4mv_luma(v, i, bmvtype == BMV_TYPE_BACKWARD, 0);
4386 vc1_mc_4mv_chroma(v, bmvtype == BMV_TYPE_BACKWARD);
4388 mb_has_coeffs = idx_mbmode & 1;
4391 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4395 s->current_picture.qscale_table[mb_pos] = mquant;
4396 if (!v->ttmbf && cbp) {
4397 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4400 for (i = 0; i < 6; i++) {
4401 s->dc_val[0][s->block_index[i]] = 0;
4403 val = ((cbp >> (5 - i)) & 1);
4404 off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
4406 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4407 first_block, s->dest[dst_idx] + off,
4408 (i & 4) ? s->uvlinesize : s->linesize,
4409 (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4410 if (!v->ttmbf && ttmb < 8)
4418 /** Decode one B-frame MB (in interlaced frame B picture)
4420 static int vc1_decode_b_mb_intfr(VC1Context *v)
4422 MpegEncContext *s = &v->s;
4423 GetBitContext *gb = &s->gb;
4425 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4426 int cbp = 0; /* cbp decoding stuff */
4427 int mqdiff, mquant; /* MB quantization */
4428 int ttmb = v->ttfrm; /* MB Transform type */
4429 int mvsw = 0; /* motion vector switch */
4430 int mb_has_coeffs = 1; /* last_flag */
4431 int dmv_x, dmv_y; /* Differential MV components */
4432 int val; /* temp value */
4433 int first_block = 1;
4435 int skipped, direct, twomv = 0;
4436 int block_cbp = 0, pat, block_tt = 0;
4437 int idx_mbmode = 0, mvbp;
4438 int stride_y, fieldtx;
4439 int bmvtype = BMV_TYPE_BACKWARD;
4442 mquant = v->pq; /* Lossy initialization */
4445 skipped = get_bits1(gb);
4447 skipped = v->s.mbskip_table[mb_pos];
4450 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2);
4451 if (ff_vc1_mbmode_intfrp[0][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD) {
4453 v->blk_mv_type[s->block_index[0]] = 1;
4454 v->blk_mv_type[s->block_index[1]] = 1;
4455 v->blk_mv_type[s->block_index[2]] = 1;
4456 v->blk_mv_type[s->block_index[3]] = 1;
4458 v->blk_mv_type[s->block_index[0]] = 0;
4459 v->blk_mv_type[s->block_index[1]] = 0;
4460 v->blk_mv_type[s->block_index[2]] = 0;
4461 v->blk_mv_type[s->block_index[3]] = 0;
4466 direct = get_bits1(gb);
4468 direct = v->direct_mb_plane[mb_pos];
4471 s->mv[0][0][0] = s->current_picture.motion_val[0][s->block_index[0]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][0], v->bfraction, 0, s->quarter_sample);
4472 s->mv[0][0][1] = s->current_picture.motion_val[0][s->block_index[0]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][1], v->bfraction, 0, s->quarter_sample);
4473 s->mv[1][0][0] = s->current_picture.motion_val[1][s->block_index[0]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][0], v->bfraction, 1, s->quarter_sample);
4474 s->mv[1][0][1] = s->current_picture.motion_val[1][s->block_index[0]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][1], v->bfraction, 1, s->quarter_sample);
4477 s->mv[0][2][0] = s->current_picture.motion_val[0][s->block_index[2]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][0], v->bfraction, 0, s->quarter_sample);
4478 s->mv[0][2][1] = s->current_picture.motion_val[0][s->block_index[2]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][1], v->bfraction, 0, s->quarter_sample);
4479 s->mv[1][2][0] = s->current_picture.motion_val[1][s->block_index[2]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][0], v->bfraction, 1, s->quarter_sample);
4480 s->mv[1][2][1] = s->current_picture.motion_val[1][s->block_index[2]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][1], v->bfraction, 1, s->quarter_sample);
4482 for (i = 1; i < 4; i += 2) {
4483 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = s->mv[0][i-1][0];
4484 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = s->mv[0][i-1][1];
4485 s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = s->mv[1][i-1][0];
4486 s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = s->mv[1][i-1][1];
4489 for (i = 1; i < 4; i++) {
4490 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = s->mv[0][0][0];
4491 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = s->mv[0][0][1];
4492 s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = s->mv[1][0][0];
4493 s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = s->mv[1][0][1];
4498 if (ff_vc1_mbmode_intfrp[0][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
4499 for (i = 0; i < 4; i++) {
4500 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = 0;
4501 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = 0;
4502 s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = 0;
4503 s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = 0;
4505 s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
4506 s->mb_intra = v->is_intra[s->mb_x] = 1;
4507 for (i = 0; i < 6; i++)
4508 v->mb_type[0][s->block_index[i]] = 1;
4509 fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
4510 mb_has_coeffs = get_bits1(gb);
4512 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4513 v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4515 s->current_picture.qscale_table[mb_pos] = mquant;
4516 /* Set DC scale - y and c use the same (not sure if necessary here) */
4517 s->y_dc_scale = s->y_dc_scale_table[mquant];
4518 s->c_dc_scale = s->c_dc_scale_table[mquant];
4520 for (i = 0; i < 6; i++) {
4521 s->dc_val[0][s->block_index[i]] = 0;
4523 val = ((cbp >> (5 - i)) & 1);
4524 v->mb_type[0][s->block_index[i]] = s->mb_intra;
4525 v->a_avail = v->c_avail = 0;
4526 if (i == 2 || i == 3 || !s->first_slice_line)
4527 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4528 if (i == 1 || i == 3 || s->mb_x)
4529 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4531 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4532 (i & 4) ? v->codingset2 : v->codingset);
4533 if (i > 3 && (s->flags & CODEC_FLAG_GRAY))
4535 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4537 stride_y = s->linesize << fieldtx;
4538 off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->linesize : (i & 1) * 8 + 4 * (i & 2) * s->linesize;
4540 stride_y = s->uvlinesize;
4543 s->idsp.put_signed_pixels_clamped(s->block[i],
4544 s->dest[dst_idx] + off,
4548 s->mb_intra = v->is_intra[s->mb_x] = 0;
4550 if (skipped || !s->mb_intra) {
4551 bmvtype = decode012(gb);
4554 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
4557 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
4560 bmvtype = BMV_TYPE_INTERPOLATED;
4564 if (twomv && bmvtype != BMV_TYPE_INTERPOLATED)
4565 mvsw = get_bits1(gb);
4568 if (!skipped) { // inter MB
4569 mb_has_coeffs = ff_vc1_mbmode_intfrp[0][idx_mbmode][3];
4571 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4573 if (bmvtype == BMV_TYPE_INTERPOLATED & twomv) {
4574 v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
4575 } else if (bmvtype == BMV_TYPE_INTERPOLATED | twomv) {
4576 v->twomvbp = get_vlc2(gb, v->twomvbp_vlc->table, VC1_2MV_BLOCK_PATTERN_VLC_BITS, 1);
4580 for (i = 0; i < 6; i++)
4581 v->mb_type[0][s->block_index[i]] = 0;
4582 fieldtx = v->fieldtx_plane[mb_pos] = ff_vc1_mbmode_intfrp[0][idx_mbmode][1];
4583 /* for all motion vector read MVDATA and motion compensate each block */
4587 for (i = 0; i < 4; i++) {
4588 vc1_mc_4mv_luma(v, i, 0, 0);
4589 vc1_mc_4mv_luma(v, i, 1, 1);
4591 vc1_mc_4mv_chroma4(v, 0, 0, 0);
4592 vc1_mc_4mv_chroma4(v, 1, 1, 1);
4597 } else if (twomv && bmvtype == BMV_TYPE_INTERPOLATED) {
4599 for (i = 0; i < 4; i++) {
4602 val = ((mvbp >> (3 - i)) & 1);
4604 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4606 vc1_pred_mv_intfr(v, j, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir);
4607 vc1_mc_4mv_luma(v, j, dir, dir);
4608 vc1_mc_4mv_luma(v, j+1, dir, dir);
4611 vc1_mc_4mv_chroma4(v, 0, 0, 0);
4612 vc1_mc_4mv_chroma4(v, 1, 1, 1);
4613 } else if (bmvtype == BMV_TYPE_INTERPOLATED) {
4617 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4619 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0);
4624 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4626 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 1);
4629 dir = bmvtype == BMV_TYPE_BACKWARD;
4636 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4637 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir);
4641 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4642 vc1_pred_mv_intfr(v, 2, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir2);
4645 for (i = 0; i < 2; i++) {
4646 s->mv[dir][i+2][0] = s->mv[dir][i][0] = s->current_picture.motion_val[dir][s->block_index[i+2]][0] = s->current_picture.motion_val[dir][s->block_index[i]][0];
4647 s->mv[dir][i+2][1] = s->mv[dir][i][1] = s->current_picture.motion_val[dir][s->block_index[i+2]][1] = s->current_picture.motion_val[dir][s->block_index[i]][1];
4648 s->mv[dir2][i+2][0] = s->mv[dir2][i][0] = s->current_picture.motion_val[dir2][s->block_index[i]][0] = s->current_picture.motion_val[dir2][s->block_index[i+2]][0];
4649 s->mv[dir2][i+2][1] = s->mv[dir2][i][1] = s->current_picture.motion_val[dir2][s->block_index[i]][1] = s->current_picture.motion_val[dir2][s->block_index[i+2]][1];
4652 vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, v->mb_type[0], !dir);
4653 vc1_pred_mv_intfr(v, 2, 0, 0, 2, v->range_x, v->range_y, v->mb_type[0], !dir);
4656 vc1_mc_4mv_luma(v, 0, dir, 0);
4657 vc1_mc_4mv_luma(v, 1, dir, 0);
4658 vc1_mc_4mv_luma(v, 2, dir2, 0);
4659 vc1_mc_4mv_luma(v, 3, dir2, 0);
4660 vc1_mc_4mv_chroma4(v, dir, dir2, 0);
4662 dir = bmvtype == BMV_TYPE_BACKWARD;
4664 mvbp = ff_vc1_mbmode_intfrp[0][idx_mbmode][2];
4667 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4669 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], dir);
4670 v->blk_mv_type[s->block_index[0]] = 1;
4671 v->blk_mv_type[s->block_index[1]] = 1;
4672 v->blk_mv_type[s->block_index[2]] = 1;
4673 v->blk_mv_type[s->block_index[3]] = 1;
4674 vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, 0, !dir);
4675 for (i = 0; i < 2; i++) {
4676 s->mv[!dir][i+2][0] = s->mv[!dir][i][0] = s->current_picture.motion_val[!dir][s->block_index[i+2]][0] = s->current_picture.motion_val[!dir][s->block_index[i]][0];
4677 s->mv[!dir][i+2][1] = s->mv[!dir][i][1] = s->current_picture.motion_val[!dir][s->block_index[i+2]][1] = s->current_picture.motion_val[!dir][s->block_index[i]][1];
4683 GET_MQUANT(); // p. 227
4684 s->current_picture.qscale_table[mb_pos] = mquant;
4685 if (!v->ttmbf && cbp)
4686 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4687 for (i = 0; i < 6; i++) {
4688 s->dc_val[0][s->block_index[i]] = 0;
4690 val = ((cbp >> (5 - i)) & 1);
4692 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4694 off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->linesize));
4696 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4697 first_block, s->dest[dst_idx] + off,
4698 (i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
4699 (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
4700 block_cbp |= pat << (i << 2);
4701 if (!v->ttmbf && ttmb < 8)
4709 for (i = 0; i < 6; i++) {
4710 v->mb_type[0][s->block_index[i]] = 0;
4711 s->dc_val[0][s->block_index[i]] = 0;
4713 s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
4714 s->current_picture.qscale_table[mb_pos] = 0;
4715 v->blk_mv_type[s->block_index[0]] = 0;
4716 v->blk_mv_type[s->block_index[1]] = 0;
4717 v->blk_mv_type[s->block_index[2]] = 0;
4718 v->blk_mv_type[s->block_index[3]] = 0;
4721 if (bmvtype == BMV_TYPE_INTERPOLATED) {
4722 vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0);
4723 vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 1);
4725 dir = bmvtype == BMV_TYPE_BACKWARD;
4726 vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], dir);
4731 for (i = 0; i < 2; i++) {
4732 s->mv[dir][i+2][0] = s->mv[dir][i][0] = s->current_picture.motion_val[dir][s->block_index[i+2]][0] = s->current_picture.motion_val[dir][s->block_index[i]][0];
4733 s->mv[dir][i+2][1] = s->mv[dir][i][1] = s->current_picture.motion_val[dir][s->block_index[i+2]][1] = s->current_picture.motion_val[dir][s->block_index[i]][1];
4734 s->mv[dir2][i+2][0] = s->mv[dir2][i][0] = s->current_picture.motion_val[dir2][s->block_index[i]][0] = s->current_picture.motion_val[dir2][s->block_index[i+2]][0];
4735 s->mv[dir2][i+2][1] = s->mv[dir2][i][1] = s->current_picture.motion_val[dir2][s->block_index[i]][1] = s->current_picture.motion_val[dir2][s->block_index[i+2]][1];
4738 v->blk_mv_type[s->block_index[0]] = 1;
4739 v->blk_mv_type[s->block_index[1]] = 1;
4740 v->blk_mv_type[s->block_index[2]] = 1;
4741 v->blk_mv_type[s->block_index[3]] = 1;
4742 vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, 0, !dir);
4743 for (i = 0; i < 2; i++) {
4744 s->mv[!dir][i+2][0] = s->mv[!dir][i][0] = s->current_picture.motion_val[!dir][s->block_index[i+2]][0] = s->current_picture.motion_val[!dir][s->block_index[i]][0];
4745 s->mv[!dir][i+2][1] = s->mv[!dir][i][1] = s->current_picture.motion_val[!dir][s->block_index[i+2]][1] = s->current_picture.motion_val[!dir][s->block_index[i]][1];
4752 if (direct || bmvtype == BMV_TYPE_INTERPOLATED) {
4757 if (s->mb_x == s->mb_width - 1)
4758 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
4759 v->cbp[s->mb_x] = block_cbp;
4760 v->ttblk[s->mb_x] = block_tt;
4764 /** Decode blocks of I-frame
4766 static void vc1_decode_i_blocks(VC1Context *v)
4769 MpegEncContext *s = &v->s;
4774 /* select codingmode used for VLC tables selection */
4775 switch (v->y_ac_table_index) {
4777 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
4780 v->codingset = CS_HIGH_MOT_INTRA;
4783 v->codingset = CS_MID_RATE_INTRA;
4787 switch (v->c_ac_table_index) {
4789 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
4792 v->codingset2 = CS_HIGH_MOT_INTER;
4795 v->codingset2 = CS_MID_RATE_INTER;
4799 /* Set DC scale - y and c use the same */
4800 s->y_dc_scale = s->y_dc_scale_table[v->pq];
4801 s->c_dc_scale = s->c_dc_scale_table[v->pq];
4804 s->mb_x = s->mb_y = 0;
4806 s->first_slice_line = 1;
4807 for (s->mb_y = 0; s->mb_y < s->end_mb_y; s->mb_y++) {
4809 init_block_index(v);
4810 for (; s->mb_x < v->end_mb_x; s->mb_x++) {
4812 ff_update_block_index(s);
4813 dst[0] = s->dest[0];
4814 dst[1] = dst[0] + 8;
4815 dst[2] = s->dest[0] + s->linesize * 8;
4816 dst[3] = dst[2] + 8;
4817 dst[4] = s->dest[1];
4818 dst[5] = s->dest[2];
4819 s->bdsp.clear_blocks(s->block[0]);
4820 mb_pos = s->mb_x + s->mb_y * s->mb_width;
4821 s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
4822 s->current_picture.qscale_table[mb_pos] = v->pq;
4823 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
4824 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
4826 // do actual MB decoding and displaying
4827 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
4828 v->s.ac_pred = get_bits1(&v->s.gb);
4830 for (k = 0; k < 6; k++) {
4831 val = ((cbp >> (5 - k)) & 1);
4834 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
4838 cbp |= val << (5 - k);
4840 vc1_decode_i_block(v, s->block[k], k, val, (k < 4) ? v->codingset : v->codingset2);
4842 if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
4844 v->vc1dsp.vc1_inv_trans_8x8(s->block[k]);
4845 if (v->pq >= 9 && v->overlap) {
4847 for (j = 0; j < 64; j++)
4848 s->block[k][j] <<= 1;
4849 s->idsp.put_signed_pixels_clamped(s->block[k], dst[k],
4850 k & 4 ? s->uvlinesize
4854 for (j = 0; j < 64; j++)
4855 s->block[k][j] = (s->block[k][j] - 64) << 1;
4856 s->idsp.put_pixels_clamped(s->block[k], dst[k],
4857 k & 4 ? s->uvlinesize
4862 if (v->pq >= 9 && v->overlap) {
4864 v->vc1dsp.vc1_h_overlap(s->dest[0], s->linesize);
4865 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
4866 if (!(s->flags & CODEC_FLAG_GRAY)) {
4867 v->vc1dsp.vc1_h_overlap(s->dest[1], s->uvlinesize);
4868 v->vc1dsp.vc1_h_overlap(s->dest[2], s->uvlinesize);
4871 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize);
4872 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
4873 if (!s->first_slice_line) {
4874 v->vc1dsp.vc1_v_overlap(s->dest[0], s->linesize);
4875 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize);
4876 if (!(s->flags & CODEC_FLAG_GRAY)) {
4877 v->vc1dsp.vc1_v_overlap(s->dest[1], s->uvlinesize);
4878 v->vc1dsp.vc1_v_overlap(s->dest[2], s->uvlinesize);
4881 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
4882 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
4884 if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
4886 if (get_bits_count(&s->gb) > v->bits) {
4887 ff_er_add_slice(&s->er, 0, 0, s->mb_x, s->mb_y, ER_MB_ERROR);
4888 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
4889 get_bits_count(&s->gb), v->bits);
4893 if (!v->s.loop_filter)
4894 ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
4896 ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
4898 s->first_slice_line = 0;
4900 if (v->s.loop_filter)
4901 ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
4903 /* This is intentionally mb_height and not end_mb_y - unlike in advanced
4904 * profile, these only differ are when decoding MSS2 rectangles. */
4905 ff_er_add_slice(&s->er, 0, 0, s->mb_width - 1, s->mb_height - 1, ER_MB_END);
4908 /** Decode blocks of I-frame for advanced profile
4910 static void vc1_decode_i_blocks_adv(VC1Context *v)
4913 MpegEncContext *s = &v->s;
4919 GetBitContext *gb = &s->gb;
4921 /* select codingmode used for VLC tables selection */
4922 switch (v->y_ac_table_index) {
4924 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
4927 v->codingset = CS_HIGH_MOT_INTRA;
4930 v->codingset = CS_MID_RATE_INTRA;
4934 switch (v->c_ac_table_index) {
4936 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
4939 v->codingset2 = CS_HIGH_MOT_INTER;
4942 v->codingset2 = CS_MID_RATE_INTER;
4947 s->mb_x = s->mb_y = 0;
4949 s->first_slice_line = 1;
4950 s->mb_y = s->start_mb_y;
4951 if (s->start_mb_y) {
4953 init_block_index(v);
4954 memset(&s->coded_block[s->block_index[0] - s->b8_stride], 0,
4955 (1 + s->b8_stride) * sizeof(*s->coded_block));
4957 for (; s->mb_y < s->end_mb_y; s->mb_y++) {
4959 init_block_index(v);
4960 for (;s->mb_x < s->mb_width; s->mb_x++) {
4961 int16_t (*block)[64] = v->block[v->cur_blk_idx];
4962 ff_update_block_index(s);
4963 s->bdsp.clear_blocks(block[0]);
4964 mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4965 s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4966 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
4967 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
4969 // do actual MB decoding and displaying
4970 if (v->fieldtx_is_raw)
4971 v->fieldtx_plane[mb_pos] = get_bits1(&v->s.gb);
4972 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
4973 if ( v->acpred_is_raw)
4974 v->s.ac_pred = get_bits1(&v->s.gb);
4976 v->s.ac_pred = v->acpred_plane[mb_pos];
4978 if (v->condover == CONDOVER_SELECT && v->overflg_is_raw)
4979 v->over_flags_plane[mb_pos] = get_bits1(&v->s.gb);
4983 s->current_picture.qscale_table[mb_pos] = mquant;
4984 /* Set DC scale - y and c use the same */
4985 s->y_dc_scale = s->y_dc_scale_table[mquant];
4986 s->c_dc_scale = s->c_dc_scale_table[mquant];
4988 for (k = 0; k < 6; k++) {
4989 val = ((cbp >> (5 - k)) & 1);
4992 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
4996 cbp |= val << (5 - k);
4998 v->a_avail = !s->first_slice_line || (k == 2 || k == 3);
4999 v->c_avail = !!s->mb_x || (k == 1 || k == 3);
5001 vc1_decode_i_block_adv(v, block[k], k, val,
5002 (k < 4) ? v->codingset : v->codingset2, mquant);
5004 if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
5006 v->vc1dsp.vc1_inv_trans_8x8(block[k]);
5009 vc1_smooth_overlap_filter_iblk(v);
5010 vc1_put_signed_blocks_clamped(v);
5011 if (v->s.loop_filter) vc1_loop_filter_iblk_delayed(v, v->pq);
5013 if (get_bits_count(&s->gb) > v->bits) {
5014 // TODO: may need modification to handle slice coding
5015 ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
5016 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
5017 get_bits_count(&s->gb), v->bits);
5021 if (!v->s.loop_filter)
5022 ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
5024 ff_mpeg_draw_horiz_band(s, (s->mb_y-1) * 16, 16);
5025 s->first_slice_line = 0;
5028 /* raw bottom MB row */
5030 init_block_index(v);
5032 for (;s->mb_x < s->mb_width; s->mb_x++) {
5033 ff_update_block_index(s);
5034 vc1_put_signed_blocks_clamped(v);
5035 if (v->s.loop_filter)
5036 vc1_loop_filter_iblk_delayed(v, v->pq);
5038 if (v->s.loop_filter)
5039 ff_mpeg_draw_horiz_band(s, (s->end_mb_y-1)*16, 16);
5040 ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
5041 (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
5044 static void vc1_decode_p_blocks(VC1Context *v)
5046 MpegEncContext *s = &v->s;
5047 int apply_loop_filter;
5049 /* select codingmode used for VLC tables selection */
5050 switch (v->c_ac_table_index) {
5052 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
5055 v->codingset = CS_HIGH_MOT_INTRA;
5058 v->codingset = CS_MID_RATE_INTRA;
5062 switch (v->c_ac_table_index) {
5064 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
5067 v->codingset2 = CS_HIGH_MOT_INTER;
5070 v->codingset2 = CS_MID_RATE_INTER;
5074 apply_loop_filter = s->loop_filter && !(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY) &&
5075 v->fcm == PROGRESSIVE;
5076 s->first_slice_line = 1;
5077 memset(v->cbp_base, 0, sizeof(v->cbp_base[0])*2*s->mb_stride);
5078 for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
5080 init_block_index(v);
5081 for (; s->mb_x < s->mb_width; s->mb_x++) {
5082 ff_update_block_index(s);
5084 if (v->fcm == ILACE_FIELD)
5085 vc1_decode_p_mb_intfi(v);
5086 else if (v->fcm == ILACE_FRAME)
5087 vc1_decode_p_mb_intfr(v);
5088 else vc1_decode_p_mb(v);
5089 if (s->mb_y != s->start_mb_y && apply_loop_filter)
5090 vc1_apply_p_loop_filter(v);
5091 if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
5092 // TODO: may need modification to handle slice coding
5093 ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
5094 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
5095 get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
5099 memmove(v->cbp_base, v->cbp, sizeof(v->cbp_base[0]) * s->mb_stride);
5100 memmove(v->ttblk_base, v->ttblk, sizeof(v->ttblk_base[0]) * s->mb_stride);
5101 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
5102 memmove(v->luma_mv_base, v->luma_mv, sizeof(v->luma_mv_base[0]) * s->mb_stride);
5103 if (s->mb_y != s->start_mb_y) ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
5104 s->first_slice_line = 0;
5106 if (apply_loop_filter) {
5108 init_block_index(v);
5109 for (; s->mb_x < s->mb_width; s->mb_x++) {
5110 ff_update_block_index(s);
5111 vc1_apply_p_loop_filter(v);
5114 if (s->end_mb_y >= s->start_mb_y)
5115 ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
5116 ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
5117 (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
5120 static void vc1_decode_b_blocks(VC1Context *v)
5122 MpegEncContext *s = &v->s;
5124 /* select codingmode used for VLC tables selection */
5125 switch (v->c_ac_table_index) {
5127 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
5130 v->codingset = CS_HIGH_MOT_INTRA;
5133 v->codingset = CS_MID_RATE_INTRA;
5137 switch (v->c_ac_table_index) {
5139 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
5142 v->codingset2 = CS_HIGH_MOT_INTER;
5145 v->codingset2 = CS_MID_RATE_INTER;
5149 s->first_slice_line = 1;
5150 for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
5152 init_block_index(v);
5153 for (; s->mb_x < s->mb_width; s->mb_x++) {
5154 ff_update_block_index(s);
5156 if (v->fcm == ILACE_FIELD)
5157 vc1_decode_b_mb_intfi(v);
5158 else if (v->fcm == ILACE_FRAME)
5159 vc1_decode_b_mb_intfr(v);
5162 if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
5163 // TODO: may need modification to handle slice coding
5164 ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
5165 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
5166 get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
5169 if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
5171 if (!v->s.loop_filter)
5172 ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
5174 ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
5175 s->first_slice_line = 0;
5177 if (v->s.loop_filter)
5178 ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
5179 ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
5180 (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
5183 static void vc1_decode_skip_blocks(VC1Context *v)
5185 MpegEncContext *s = &v->s;
5187 if (!v->s.last_picture.f->data[0])
5190 ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_width - 1, s->end_mb_y - 1, ER_MB_END);
5191 s->first_slice_line = 1;
5192 for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
5194 init_block_index(v);
5195 ff_update_block_index(s);
5196 memcpy(s->dest[0], s->last_picture.f->data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16);
5197 memcpy(s->dest[1], s->last_picture.f->data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
5198 memcpy(s->dest[2], s->last_picture.f->data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
5199 ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
5200 s->first_slice_line = 0;
5202 s->pict_type = AV_PICTURE_TYPE_P;
5205 void ff_vc1_decode_blocks(VC1Context *v)
5208 v->s.esc3_level_length = 0;
5210 ff_intrax8_decode_picture(&v->x8, 2*v->pq + v->halfpq, v->pq * !v->pquantizer);
5213 v->left_blk_idx = -1;
5214 v->topleft_blk_idx = 1;
5216 switch (v->s.pict_type) {
5217 case AV_PICTURE_TYPE_I:
5218 if (v->profile == PROFILE_ADVANCED)
5219 vc1_decode_i_blocks_adv(v);
5221 vc1_decode_i_blocks(v);
5223 case AV_PICTURE_TYPE_P:
5224 if (v->p_frame_skipped)
5225 vc1_decode_skip_blocks(v);
5227 vc1_decode_p_blocks(v);
5229 case AV_PICTURE_TYPE_B:
5231 if (v->profile == PROFILE_ADVANCED)
5232 vc1_decode_i_blocks_adv(v);
5234 vc1_decode_i_blocks(v);
5236 vc1_decode_b_blocks(v);
5242 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
5246 * Transform coefficients for both sprites in 16.16 fixed point format,
5247 * in the order they appear in the bitstream:
5249 * rotation 1 (unused)
5251 * rotation 2 (unused)
5258 int effect_type, effect_flag;
5259 int effect_pcount1, effect_pcount2; ///< amount of effect parameters stored in effect_params
5260 int effect_params1[15], effect_params2[10]; ///< effect parameters in 16.16 fixed point format
5263 static inline int get_fp_val(GetBitContext* gb)
5265 return (get_bits_long(gb, 30) - (1 << 29)) << 1;
5268 static void vc1_sprite_parse_transform(GetBitContext* gb, int c[7])
5272 switch (get_bits(gb, 2)) {
5275 c[2] = get_fp_val(gb);
5279 c[0] = c[4] = get_fp_val(gb);
5280 c[2] = get_fp_val(gb);
5283 c[0] = get_fp_val(gb);
5284 c[2] = get_fp_val(gb);
5285 c[4] = get_fp_val(gb);
5288 c[0] = get_fp_val(gb);
5289 c[1] = get_fp_val(gb);
5290 c[2] = get_fp_val(gb);
5291 c[3] = get_fp_val(gb);
5292 c[4] = get_fp_val(gb);
5295 c[5] = get_fp_val(gb);
5297 c[6] = get_fp_val(gb);
5302 static void vc1_parse_sprites(VC1Context *v, GetBitContext* gb, SpriteData* sd)
5304 AVCodecContext *avctx = v->s.avctx;
5307 for (sprite = 0; sprite <= v->two_sprites; sprite++) {
5308 vc1_sprite_parse_transform(gb, sd->coefs[sprite]);
5309 if (sd->coefs[sprite][1] || sd->coefs[sprite][3])
5310 avpriv_request_sample(avctx, "Non-zero rotation coefficients");
5311 av_log(avctx, AV_LOG_DEBUG, sprite ? "S2:" : "S1:");
5312 for (i = 0; i < 7; i++)
5313 av_log(avctx, AV_LOG_DEBUG, " %d.%.3d",
5314 sd->coefs[sprite][i] / (1<<16),
5315 (abs(sd->coefs[sprite][i]) & 0xFFFF) * 1000 / (1 << 16));
5316 av_log(avctx, AV_LOG_DEBUG, "\n");
5320 if (sd->effect_type = get_bits_long(gb, 30)) {
5321 switch (sd->effect_pcount1 = get_bits(gb, 4)) {
5323 vc1_sprite_parse_transform(gb, sd->effect_params1);
5326 vc1_sprite_parse_transform(gb, sd->effect_params1);
5327 vc1_sprite_parse_transform(gb, sd->effect_params1 + 7);
5330 for (i = 0; i < sd->effect_pcount1; i++)
5331 sd->effect_params1[i] = get_fp_val(gb);
5333 if (sd->effect_type != 13 || sd->effect_params1[0] != sd->coefs[0][6]) {
5334 // effect 13 is simple alpha blending and matches the opacity above
5335 av_log(avctx, AV_LOG_DEBUG, "Effect: %d; params: ", sd->effect_type);
5336 for (i = 0; i < sd->effect_pcount1; i++)
5337 av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
5338 sd->effect_params1[i] / (1 << 16),
5339 (abs(sd->effect_params1[i]) & 0xFFFF) * 1000 / (1 << 16));
5340 av_log(avctx, AV_LOG_DEBUG, "\n");
5343 sd->effect_pcount2 = get_bits(gb, 16);
5344 if (sd->effect_pcount2 > 10) {
5345 av_log(avctx, AV_LOG_ERROR, "Too many effect parameters\n");
5347 } else if (sd->effect_pcount2) {
5349 av_log(avctx, AV_LOG_DEBUG, "Effect params 2: ");
5350 while (++i < sd->effect_pcount2) {
5351 sd->effect_params2[i] = get_fp_val(gb);
5352 av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
5353 sd->effect_params2[i] / (1 << 16),
5354 (abs(sd->effect_params2[i]) & 0xFFFF) * 1000 / (1 << 16));
5356 av_log(avctx, AV_LOG_DEBUG, "\n");
5359 if (sd->effect_flag = get_bits1(gb))
5360 av_log(avctx, AV_LOG_DEBUG, "Effect flag set\n");
5362 if (get_bits_count(gb) >= gb->size_in_bits +
5363 (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE ? 64 : 0))
5364 av_log(avctx, AV_LOG_ERROR, "Buffer overrun\n");
5365 if (get_bits_count(gb) < gb->size_in_bits - 8)
5366 av_log(avctx, AV_LOG_WARNING, "Buffer not fully read\n");
5369 static void vc1_draw_sprites(VC1Context *v, SpriteData* sd)
5371 int i, plane, row, sprite;
5372 int sr_cache[2][2] = { { -1, -1 }, { -1, -1 } };
5373 uint8_t* src_h[2][2];
5374 int xoff[2], xadv[2], yoff[2], yadv[2], alpha;
5376 MpegEncContext *s = &v->s;
5378 for (i = 0; i < 2; i++) {
5379 xoff[i] = av_clip(sd->coefs[i][2], 0, v->sprite_width-1 << 16);
5380 xadv[i] = sd->coefs[i][0];
5381 if (xadv[i] != 1<<16 || (v->sprite_width << 16) - (v->output_width << 16) - xoff[i])
5382 xadv[i] = av_clip(xadv[i], 0, ((v->sprite_width<<16) - xoff[i] - 1) / v->output_width);
5384 yoff[i] = av_clip(sd->coefs[i][5], 0, v->sprite_height-1 << 16);
5385 yadv[i] = av_clip(sd->coefs[i][4], 0, ((v->sprite_height << 16) - yoff[i]) / v->output_height);
5387 alpha = av_clip(sd->coefs[1][6], 0, (1<<16) - 1);
5389 for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++) {
5390 int width = v->output_width>>!!plane;
5392 for (row = 0; row < v->output_height>>!!plane; row++) {
5393 uint8_t *dst = v->sprite_output_frame->data[plane] +
5394 v->sprite_output_frame->linesize[plane] * row;
5396 for (sprite = 0; sprite <= v->two_sprites; sprite++) {
5397 uint8_t *iplane = s->current_picture.f->data[plane];
5398 int iline = s->current_picture.f->linesize[plane];
5399 int ycoord = yoff[sprite] + yadv[sprite] * row;
5400 int yline = ycoord >> 16;
5402 ysub[sprite] = ycoord & 0xFFFF;
5404 iplane = s->last_picture.f->data[plane];
5405 iline = s->last_picture.f->linesize[plane];
5407 next_line = FFMIN(yline + 1, (v->sprite_height >> !!plane) - 1) * iline;
5408 if (!(xoff[sprite] & 0xFFFF) && xadv[sprite] == 1 << 16) {
5409 src_h[sprite][0] = iplane + (xoff[sprite] >> 16) + yline * iline;
5411 src_h[sprite][1] = iplane + (xoff[sprite] >> 16) + next_line;
5413 if (sr_cache[sprite][0] != yline) {
5414 if (sr_cache[sprite][1] == yline) {
5415 FFSWAP(uint8_t*, v->sr_rows[sprite][0], v->sr_rows[sprite][1]);
5416 FFSWAP(int, sr_cache[sprite][0], sr_cache[sprite][1]);
5418 v->vc1dsp.sprite_h(v->sr_rows[sprite][0], iplane + yline * iline, xoff[sprite], xadv[sprite], width);
5419 sr_cache[sprite][0] = yline;
5422 if (ysub[sprite] && sr_cache[sprite][1] != yline + 1) {
5423 v->vc1dsp.sprite_h(v->sr_rows[sprite][1],
5424 iplane + next_line, xoff[sprite],
5425 xadv[sprite], width);
5426 sr_cache[sprite][1] = yline + 1;
5428 src_h[sprite][0] = v->sr_rows[sprite][0];
5429 src_h[sprite][1] = v->sr_rows[sprite][1];
5433 if (!v->two_sprites) {
5435 v->vc1dsp.sprite_v_single(dst, src_h[0][0], src_h[0][1], ysub[0], width);
5437 memcpy(dst, src_h[0][0], width);
5440 if (ysub[0] && ysub[1]) {
5441 v->vc1dsp.sprite_v_double_twoscale(dst, src_h[0][0], src_h[0][1], ysub[0],
5442 src_h[1][0], src_h[1][1], ysub[1], alpha, width);
5443 } else if (ysub[0]) {
5444 v->vc1dsp.sprite_v_double_onescale(dst, src_h[0][0], src_h[0][1], ysub[0],
5445 src_h[1][0], alpha, width);
5446 } else if (ysub[1]) {
5447 v->vc1dsp.sprite_v_double_onescale(dst, src_h[1][0], src_h[1][1], ysub[1],
5448 src_h[0][0], (1<<16)-1-alpha, width);
5450 v->vc1dsp.sprite_v_double_noscale(dst, src_h[0][0], src_h[1][0], alpha, width);
5456 for (i = 0; i < 2; i++) {
5466 static int vc1_decode_sprites(VC1Context *v, GetBitContext* gb)
5468 MpegEncContext *s = &v->s;
5469 AVCodecContext *avctx = s->avctx;
5472 vc1_parse_sprites(v, gb, &sd);
5474 if (!s->current_picture.f->data[0]) {
5475 av_log(avctx, AV_LOG_ERROR, "Got no sprites\n");
5479 if (v->two_sprites && (!s->last_picture_ptr || !s->last_picture.f->data[0])) {
5480 av_log(avctx, AV_LOG_WARNING, "Need two sprites, only got one\n");
5484 av_frame_unref(v->sprite_output_frame);
5485 if (ff_get_buffer(avctx, v->sprite_output_frame, 0) < 0) {
5486 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
5490 vc1_draw_sprites(v, &sd);
5495 static void vc1_sprite_flush(AVCodecContext *avctx)
5497 VC1Context *v = avctx->priv_data;
5498 MpegEncContext *s = &v->s;
5499 AVFrame *f = s->current_picture.f;
5502 /* Windows Media Image codecs have a convergence interval of two keyframes.
5503 Since we can't enforce it, clear to black the missing sprite. This is
5504 wrong but it looks better than doing nothing. */
5507 for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++)
5508 for (i = 0; i < v->sprite_height>>!!plane; i++)
5509 memset(f->data[plane] + i * f->linesize[plane],
5510 plane ? 128 : 0, f->linesize[plane]);
5515 av_cold int ff_vc1_decode_init_alloc_tables(VC1Context *v)
5517 MpegEncContext *s = &v->s;
5519 int mb_height = FFALIGN(s->mb_height, 2);
5521 /* Allocate mb bitplanes */
5522 v->mv_type_mb_plane = av_malloc (s->mb_stride * mb_height);
5523 v->direct_mb_plane = av_malloc (s->mb_stride * mb_height);
5524 v->forward_mb_plane = av_malloc (s->mb_stride * mb_height);
5525 v->fieldtx_plane = av_mallocz(s->mb_stride * mb_height);
5526 v->acpred_plane = av_malloc (s->mb_stride * mb_height);
5527 v->over_flags_plane = av_malloc (s->mb_stride * mb_height);
5529 v->n_allocated_blks = s->mb_width + 2;
5530 v->block = av_malloc(sizeof(*v->block) * v->n_allocated_blks);
5531 v->cbp_base = av_malloc(sizeof(v->cbp_base[0]) * 2 * s->mb_stride);
5532 v->cbp = v->cbp_base + s->mb_stride;
5533 v->ttblk_base = av_malloc(sizeof(v->ttblk_base[0]) * 2 * s->mb_stride);
5534 v->ttblk = v->ttblk_base + s->mb_stride;
5535 v->is_intra_base = av_mallocz(sizeof(v->is_intra_base[0]) * 2 * s->mb_stride);
5536 v->is_intra = v->is_intra_base + s->mb_stride;
5537 v->luma_mv_base = av_malloc(sizeof(v->luma_mv_base[0]) * 2 * s->mb_stride);
5538 v->luma_mv = v->luma_mv_base + s->mb_stride;
5540 /* allocate block type info in that way so it could be used with s->block_index[] */
5541 v->mb_type_base = av_malloc(s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2);
5542 v->mb_type[0] = v->mb_type_base + s->b8_stride + 1;
5543 v->mb_type[1] = v->mb_type_base + s->b8_stride * (mb_height * 2 + 1) + s->mb_stride + 1;
5544 v->mb_type[2] = v->mb_type[1] + s->mb_stride * (mb_height + 1);
5546 /* allocate memory to store block level MV info */
5547 v->blk_mv_type_base = av_mallocz( s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2);
5548 v->blk_mv_type = v->blk_mv_type_base + s->b8_stride + 1;
5549 v->mv_f_base = av_mallocz(2 * (s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2));
5550 v->mv_f[0] = v->mv_f_base + s->b8_stride + 1;
5551 v->mv_f[1] = v->mv_f[0] + (s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2);
5552 v->mv_f_next_base = av_mallocz(2 * (s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2));
5553 v->mv_f_next[0] = v->mv_f_next_base + s->b8_stride + 1;
5554 v->mv_f_next[1] = v->mv_f_next[0] + (s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2);
5556 /* Init coded blocks info */
5557 if (v->profile == PROFILE_ADVANCED) {
5558 // if (alloc_bitplane(&v->over_flags_plane, s->mb_width, s->mb_height) < 0)
5560 // if (alloc_bitplane(&v->ac_pred_plane, s->mb_width, s->mb_height) < 0)
5564 ff_intrax8_common_init(&v->x8,s);
5566 if (s->avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || s->avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5567 for (i = 0; i < 4; i++)
5568 if (!(v->sr_rows[i >> 1][i & 1] = av_malloc(v->output_width))) return -1;
5571 if (!v->mv_type_mb_plane || !v->direct_mb_plane || !v->acpred_plane || !v->over_flags_plane ||
5572 !v->block || !v->cbp_base || !v->ttblk_base || !v->is_intra_base || !v->luma_mv_base ||
5574 av_freep(&v->mv_type_mb_plane);
5575 av_freep(&v->direct_mb_plane);
5576 av_freep(&v->acpred_plane);
5577 av_freep(&v->over_flags_plane);
5578 av_freep(&v->block);
5579 av_freep(&v->cbp_base);
5580 av_freep(&v->ttblk_base);
5581 av_freep(&v->is_intra_base);
5582 av_freep(&v->luma_mv_base);
5583 av_freep(&v->mb_type_base);
5584 return AVERROR(ENOMEM);
5590 av_cold void ff_vc1_init_transposed_scantables(VC1Context *v)
5593 for (i = 0; i < 64; i++) {
5594 #define transpose(x) ((x >> 3) | ((x & 7) << 3))
5595 v->zz_8x8[0][i] = transpose(ff_wmv1_scantable[0][i]);
5596 v->zz_8x8[1][i] = transpose(ff_wmv1_scantable[1][i]);
5597 v->zz_8x8[2][i] = transpose(ff_wmv1_scantable[2][i]);
5598 v->zz_8x8[3][i] = transpose(ff_wmv1_scantable[3][i]);
5599 v->zzi_8x8[i] = transpose(ff_vc1_adv_interlaced_8x8_zz[i]);
5605 /** Initialize a VC1/WMV3 decoder
5606 * @todo TODO: Handle VC-1 IDUs (Transport level?)
5607 * @todo TODO: Decypher remaining bits in extra_data
5609 static av_cold int vc1_decode_init(AVCodecContext *avctx)
5611 VC1Context *v = avctx->priv_data;
5612 MpegEncContext *s = &v->s;
5615 /* save the container output size for WMImage */
5616 v->output_width = avctx->width;
5617 v->output_height = avctx->height;
5619 if (!avctx->extradata_size || !avctx->extradata)
5621 if (!(avctx->flags & CODEC_FLAG_GRAY))
5622 avctx->pix_fmt = ff_get_format(avctx, avctx->codec->pix_fmts);
5624 avctx->pix_fmt = AV_PIX_FMT_GRAY8;
5627 if (ff_vc1_init_common(v) < 0)
5629 ff_blockdsp_init(&s->bdsp, avctx);
5630 ff_h264chroma_init(&v->h264chroma, 8);
5631 ff_qpeldsp_init(&s->qdsp);
5632 ff_vc1dsp_init(&v->vc1dsp);
5634 if (avctx->codec_id == AV_CODEC_ID_WMV3 || avctx->codec_id == AV_CODEC_ID_WMV3IMAGE) {
5637 // looks like WMV3 has a sequence header stored in the extradata
5638 // advanced sequence header may be before the first frame
5639 // the last byte of the extradata is a version number, 1 for the
5640 // samples we can decode
5642 init_get_bits(&gb, avctx->extradata, avctx->extradata_size*8);
5644 if (ff_vc1_decode_sequence_header(avctx, v, &gb) < 0)
5647 count = avctx->extradata_size*8 - get_bits_count(&gb);
5649 av_log(avctx, AV_LOG_INFO, "Extra data: %i bits left, value: %X\n",
5650 count, get_bits(&gb, count));
5651 } else if (count < 0) {
5652 av_log(avctx, AV_LOG_INFO, "Read %i bits in overflow\n", -count);
5654 } else { // VC1/WVC1/WVP2
5655 const uint8_t *start = avctx->extradata;
5656 uint8_t *end = avctx->extradata + avctx->extradata_size;
5657 const uint8_t *next;
5658 int size, buf2_size;
5659 uint8_t *buf2 = NULL;
5660 int seq_initialized = 0, ep_initialized = 0;
5662 if (avctx->extradata_size < 16) {
5663 av_log(avctx, AV_LOG_ERROR, "Extradata size too small: %i\n", avctx->extradata_size);
5667 buf2 = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
5668 start = find_next_marker(start, end); // in WVC1 extradata first byte is its size, but can be 0 in mkv
5670 for (; next < end; start = next) {
5671 next = find_next_marker(start + 4, end);
5672 size = next - start - 4;
5675 buf2_size = vc1_unescape_buffer(start + 4, size, buf2);
5676 init_get_bits(&gb, buf2, buf2_size * 8);
5677 switch (AV_RB32(start)) {
5678 case VC1_CODE_SEQHDR:
5679 if (ff_vc1_decode_sequence_header(avctx, v, &gb) < 0) {
5683 seq_initialized = 1;
5685 case VC1_CODE_ENTRYPOINT:
5686 if (ff_vc1_decode_entry_point(avctx, v, &gb) < 0) {
5695 if (!seq_initialized || !ep_initialized) {
5696 av_log(avctx, AV_LOG_ERROR, "Incomplete extradata\n");
5699 v->res_sprite = (avctx->codec_id == AV_CODEC_ID_VC1IMAGE);
5702 v->sprite_output_frame = av_frame_alloc();
5703 if (!v->sprite_output_frame)
5704 return AVERROR(ENOMEM);
5706 avctx->profile = v->profile;
5707 if (v->profile == PROFILE_ADVANCED)
5708 avctx->level = v->level;
5710 avctx->has_b_frames = !!avctx->max_b_frames;
5712 s->mb_width = (avctx->coded_width + 15) >> 4;
5713 s->mb_height = (avctx->coded_height + 15) >> 4;
5715 if (v->profile == PROFILE_ADVANCED || v->res_fasttx) {
5716 ff_vc1_init_transposed_scantables(v);
5718 memcpy(v->zz_8x8, ff_wmv1_scantable, 4*64);
5723 if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5724 v->sprite_width = avctx->coded_width;
5725 v->sprite_height = avctx->coded_height;
5727 avctx->coded_width = avctx->width = v->output_width;
5728 avctx->coded_height = avctx->height = v->output_height;
5730 // prevent 16.16 overflows
5731 if (v->sprite_width > 1 << 14 ||
5732 v->sprite_height > 1 << 14 ||
5733 v->output_width > 1 << 14 ||
5734 v->output_height > 1 << 14) return -1;
5739 /** Close a VC1/WMV3 decoder
5740 * @warning Initial try at using MpegEncContext stuff
5742 av_cold int ff_vc1_decode_end(AVCodecContext *avctx)
5744 VC1Context *v = avctx->priv_data;
5747 av_frame_free(&v->sprite_output_frame);
5749 for (i = 0; i < 4; i++)
5750 av_freep(&v->sr_rows[i >> 1][i & 1]);
5751 av_freep(&v->hrd_rate);
5752 av_freep(&v->hrd_buffer);
5753 ff_MPV_common_end(&v->s);
5754 av_freep(&v->mv_type_mb_plane);
5755 av_freep(&v->direct_mb_plane);
5756 av_freep(&v->forward_mb_plane);
5757 av_freep(&v->fieldtx_plane);
5758 av_freep(&v->acpred_plane);
5759 av_freep(&v->over_flags_plane);
5760 av_freep(&v->mb_type_base);
5761 av_freep(&v->blk_mv_type_base);
5762 av_freep(&v->mv_f_base);
5763 av_freep(&v->mv_f_next_base);
5764 av_freep(&v->block);
5765 av_freep(&v->cbp_base);
5766 av_freep(&v->ttblk_base);
5767 av_freep(&v->is_intra_base); // FIXME use v->mb_type[]
5768 av_freep(&v->luma_mv_base);
5769 ff_intrax8_common_end(&v->x8);
5774 /** Decode a VC1/WMV3 frame
5775 * @todo TODO: Handle VC-1 IDUs (Transport level?)
5777 static int vc1_decode_frame(AVCodecContext *avctx, void *data,
5778 int *got_frame, AVPacket *avpkt)
5780 const uint8_t *buf = avpkt->data;
5781 int buf_size = avpkt->size, n_slices = 0, i, ret;
5782 VC1Context *v = avctx->priv_data;
5783 MpegEncContext *s = &v->s;
5784 AVFrame *pict = data;
5785 uint8_t *buf2 = NULL;
5786 const uint8_t *buf_start = buf;
5787 int mb_height, n_slices1;
5792 } *slices = NULL, *tmp;
5794 /* no supplementary picture */
5795 if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == VC1_CODE_ENDOFSEQ)) {
5796 /* special case for last picture */
5797 if (s->low_delay == 0 && s->next_picture_ptr) {
5798 if ((ret = av_frame_ref(pict, s->next_picture_ptr->f)) < 0)
5800 s->next_picture_ptr = NULL;
5808 //for advanced profile we may need to parse and unescape data
5809 if (avctx->codec_id == AV_CODEC_ID_VC1 || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5811 buf2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5813 if (IS_MARKER(AV_RB32(buf))) { /* frame starts with marker and needs to be parsed */
5814 const uint8_t *start, *end, *next;
5818 for (start = buf, end = buf + buf_size; next < end; start = next) {
5819 next = find_next_marker(start + 4, end);
5820 size = next - start - 4;
5821 if (size <= 0) continue;
5822 switch (AV_RB32(start)) {
5823 case VC1_CODE_FRAME:
5826 buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
5828 case VC1_CODE_FIELD: {
5830 tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5834 slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5835 if (!slices[n_slices].buf)
5837 buf_size3 = vc1_unescape_buffer(start + 4, size,
5838 slices[n_slices].buf);
5839 init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5841 /* assuming that the field marker is at the exact middle,
5842 hope it's correct */
5843 slices[n_slices].mby_start = s->mb_height >> 1;
5844 n_slices1 = n_slices - 1; // index of the last slice of the first field
5848 case VC1_CODE_ENTRYPOINT: /* it should be before frame data */
5849 buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
5850 init_get_bits(&s->gb, buf2, buf_size2 * 8);
5851 ff_vc1_decode_entry_point(avctx, v, &s->gb);
5853 case VC1_CODE_SLICE: {
5855 tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5859 slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5860 if (!slices[n_slices].buf)
5862 buf_size3 = vc1_unescape_buffer(start + 4, size,
5863 slices[n_slices].buf);
5864 init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5866 slices[n_slices].mby_start = get_bits(&slices[n_slices].gb, 9);
5872 } else if (v->interlace && ((buf[0] & 0xC0) == 0xC0)) { /* WVC1 interlaced stores both fields divided by marker */
5873 const uint8_t *divider;
5876 divider = find_next_marker(buf, buf + buf_size);
5877 if ((divider == (buf + buf_size)) || AV_RB32(divider) != VC1_CODE_FIELD) {
5878 av_log(avctx, AV_LOG_ERROR, "Error in WVC1 interlaced frame\n");
5880 } else { // found field marker, unescape second field
5881 tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5885 slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5886 if (!slices[n_slices].buf)
5888 buf_size3 = vc1_unescape_buffer(divider + 4, buf + buf_size - divider - 4, slices[n_slices].buf);
5889 init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5891 slices[n_slices].mby_start = s->mb_height >> 1;
5892 n_slices1 = n_slices - 1;
5895 buf_size2 = vc1_unescape_buffer(buf, divider - buf, buf2);
5897 buf_size2 = vc1_unescape_buffer(buf, buf_size, buf2);
5899 init_get_bits(&s->gb, buf2, buf_size2*8);
5901 init_get_bits(&s->gb, buf, buf_size*8);
5903 if (v->res_sprite) {
5904 v->new_sprite = !get_bits1(&s->gb);
5905 v->two_sprites = get_bits1(&s->gb);
5906 /* res_sprite means a Windows Media Image stream, AV_CODEC_ID_*IMAGE means
5907 we're using the sprite compositor. These are intentionally kept separate
5908 so you can get the raw sprites by using the wmv3 decoder for WMVP or
5909 the vc1 one for WVP2 */
5910 if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5911 if (v->new_sprite) {
5912 // switch AVCodecContext parameters to those of the sprites
5913 avctx->width = avctx->coded_width = v->sprite_width;
5914 avctx->height = avctx->coded_height = v->sprite_height;
5921 if (s->context_initialized &&
5922 (s->width != avctx->coded_width ||
5923 s->height != avctx->coded_height)) {
5924 ff_vc1_decode_end(avctx);
5927 if (!s->context_initialized) {
5928 if (ff_msmpeg4_decode_init(avctx) < 0)
5930 if (ff_vc1_decode_init_alloc_tables(v) < 0) {
5931 ff_MPV_common_end(s);
5935 s->low_delay = !avctx->has_b_frames || v->res_sprite;
5937 if (v->profile == PROFILE_ADVANCED) {
5938 s->h_edge_pos = avctx->coded_width;
5939 s->v_edge_pos = avctx->coded_height;
5943 // do parse frame header
5944 v->pic_header_flag = 0;
5945 v->first_pic_header_flag = 1;
5946 if (v->profile < PROFILE_ADVANCED) {
5947 if (ff_vc1_parse_frame_header(v, &s->gb) < 0) {
5951 if (ff_vc1_parse_frame_header_adv(v, &s->gb) < 0) {
5955 v->first_pic_header_flag = 0;
5957 if ((avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE)
5958 && s->pict_type != AV_PICTURE_TYPE_I) {
5959 av_log(v->s.avctx, AV_LOG_ERROR, "Sprite decoder: expected I-frame\n");
5963 // for skipping the frame
5964 s->current_picture.f->pict_type = s->pict_type;
5965 s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
5967 /* skip B-frames if we don't have reference frames */
5968 if (s->last_picture_ptr == NULL && (s->pict_type == AV_PICTURE_TYPE_B || s->droppable)) {
5971 if ((avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B) ||
5972 (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I) ||
5973 avctx->skip_frame >= AVDISCARD_ALL) {
5977 if (s->next_p_frame_damaged) {
5978 if (s->pict_type == AV_PICTURE_TYPE_B)
5981 s->next_p_frame_damaged = 0;
5984 if (ff_MPV_frame_start(s, avctx) < 0) {
5988 // process pulldown flags
5989 s->current_picture_ptr->f->repeat_pict = 0;
5990 // Pulldown flags are only valid when 'broadcast' has been set.
5991 // So ticks_per_frame will be 2
5994 s->current_picture_ptr->f->repeat_pict = 1;
5995 } else if (v->rptfrm) {
5997 s->current_picture_ptr->f->repeat_pict = v->rptfrm * 2;
6000 s->me.qpel_put = s->qdsp.put_qpel_pixels_tab;
6001 s->me.qpel_avg = s->qdsp.avg_qpel_pixels_tab;
6003 if (avctx->hwaccel) {
6004 if (avctx->hwaccel->start_frame(avctx, buf, buf_size) < 0)
6006 if (avctx->hwaccel->decode_slice(avctx, buf_start, (buf + buf_size) - buf_start) < 0)
6008 if (avctx->hwaccel->end_frame(avctx) < 0)
6013 ff_mpeg_er_frame_start(s);
6015 v->bits = buf_size * 8;
6016 v->end_mb_x = s->mb_width;
6017 if (v->field_mode) {
6018 s->current_picture.f->linesize[0] <<= 1;
6019 s->current_picture.f->linesize[1] <<= 1;
6020 s->current_picture.f->linesize[2] <<= 1;
6022 s->uvlinesize <<= 1;
6024 mb_height = s->mb_height >> v->field_mode;
6027 av_log(v->s.avctx, AV_LOG_ERROR, "Invalid mb_height.\n");
6031 for (i = 0; i <= n_slices; i++) {
6032 if (i > 0 && slices[i - 1].mby_start >= mb_height) {
6033 if (v->field_mode <= 0) {
6034 av_log(v->s.avctx, AV_LOG_ERROR, "Slice %d starts beyond "
6035 "picture boundary (%d >= %d)\n", i,
6036 slices[i - 1].mby_start, mb_height);
6039 v->second_field = 1;
6040 v->blocks_off = s->mb_width * s->mb_height << 1;
6041 v->mb_off = s->mb_stride * s->mb_height >> 1;
6043 v->second_field = 0;
6048 v->pic_header_flag = 0;
6049 if (v->field_mode && i == n_slices1 + 2) {
6050 if ((header_ret = ff_vc1_parse_frame_header_adv(v, &s->gb)) < 0) {
6051 av_log(v->s.avctx, AV_LOG_ERROR, "Field header damaged\n");
6052 if (avctx->err_recognition & AV_EF_EXPLODE)
6056 } else if (get_bits1(&s->gb)) {
6057 v->pic_header_flag = 1;
6058 if ((header_ret = ff_vc1_parse_frame_header_adv(v, &s->gb)) < 0) {
6059 av_log(v->s.avctx, AV_LOG_ERROR, "Slice header damaged\n");
6060 if (avctx->err_recognition & AV_EF_EXPLODE)
6068 s->start_mb_y = (i == 0) ? 0 : FFMAX(0, slices[i-1].mby_start % mb_height);
6069 if (!v->field_mode || v->second_field)
6070 s->end_mb_y = (i == n_slices ) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
6072 s->end_mb_y = (i <= n_slices1 + 1) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
6073 ff_vc1_decode_blocks(v);
6075 s->gb = slices[i].gb;
6077 if (v->field_mode) {
6078 v->second_field = 0;
6079 s->current_picture.f->linesize[0] >>= 1;
6080 s->current_picture.f->linesize[1] >>= 1;
6081 s->current_picture.f->linesize[2] >>= 1;
6083 s->uvlinesize >>= 1;
6084 if (v->s.pict_type != AV_PICTURE_TYPE_BI && v->s.pict_type != AV_PICTURE_TYPE_B) {
6085 FFSWAP(uint8_t *, v->mv_f_next[0], v->mv_f[0]);
6086 FFSWAP(uint8_t *, v->mv_f_next[1], v->mv_f[1]);
6089 av_dlog(s->avctx, "Consumed %i/%i bits\n",
6090 get_bits_count(&s->gb), s->gb.size_in_bits);
6091 // if (get_bits_count(&s->gb) > buf_size * 8)
6094 ff_er_frame_end(&s->er);
6097 ff_MPV_frame_end(s);
6099 if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
6101 avctx->width = avctx->coded_width = v->output_width;
6102 avctx->height = avctx->coded_height = v->output_height;
6103 if (avctx->skip_frame >= AVDISCARD_NONREF)
6105 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
6106 if (vc1_decode_sprites(v, &s->gb))
6109 if ((ret = av_frame_ref(pict, v->sprite_output_frame)) < 0)
6113 if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
6114 if ((ret = av_frame_ref(pict, s->current_picture_ptr->f)) < 0)
6116 ff_print_debug_info(s, s->current_picture_ptr);
6118 } else if (s->last_picture_ptr != NULL) {
6119 if ((ret = av_frame_ref(pict, s->last_picture_ptr->f)) < 0)
6121 ff_print_debug_info(s, s->last_picture_ptr);
6128 for (i = 0; i < n_slices; i++)
6129 av_free(slices[i].buf);
6135 for (i = 0; i < n_slices; i++)
6136 av_free(slices[i].buf);
6142 static const AVProfile profiles[] = {
6143 { FF_PROFILE_VC1_SIMPLE, "Simple" },
6144 { FF_PROFILE_VC1_MAIN, "Main" },
6145 { FF_PROFILE_VC1_COMPLEX, "Complex" },
6146 { FF_PROFILE_VC1_ADVANCED, "Advanced" },
6147 { FF_PROFILE_UNKNOWN },
6150 static const enum AVPixelFormat vc1_hwaccel_pixfmt_list_420[] = {
6151 #if CONFIG_VC1_DXVA2_HWACCEL
6152 AV_PIX_FMT_DXVA2_VLD,
6154 #if CONFIG_VC1_VAAPI_HWACCEL
6155 AV_PIX_FMT_VAAPI_VLD,
6157 #if CONFIG_VC1_VDPAU_HWACCEL
6164 AVCodec ff_vc1_decoder = {
6166 .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1"),
6167 .type = AVMEDIA_TYPE_VIDEO,
6168 .id = AV_CODEC_ID_VC1,
6169 .priv_data_size = sizeof(VC1Context),
6170 .init = vc1_decode_init,
6171 .close = ff_vc1_decode_end,
6172 .decode = vc1_decode_frame,
6173 .flush = ff_mpeg_flush,
6174 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
6175 .pix_fmts = vc1_hwaccel_pixfmt_list_420,
6176 .profiles = NULL_IF_CONFIG_SMALL(profiles)
6179 #if CONFIG_WMV3_DECODER
6180 AVCodec ff_wmv3_decoder = {
6182 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9"),
6183 .type = AVMEDIA_TYPE_VIDEO,
6184 .id = AV_CODEC_ID_WMV3,
6185 .priv_data_size = sizeof(VC1Context),
6186 .init = vc1_decode_init,
6187 .close = ff_vc1_decode_end,
6188 .decode = vc1_decode_frame,
6189 .flush = ff_mpeg_flush,
6190 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
6191 .pix_fmts = vc1_hwaccel_pixfmt_list_420,
6192 .profiles = NULL_IF_CONFIG_SMALL(profiles)
6196 #if CONFIG_WMV3IMAGE_DECODER
6197 AVCodec ff_wmv3image_decoder = {
6198 .name = "wmv3image",
6199 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image"),
6200 .type = AVMEDIA_TYPE_VIDEO,
6201 .id = AV_CODEC_ID_WMV3IMAGE,
6202 .priv_data_size = sizeof(VC1Context),
6203 .init = vc1_decode_init,
6204 .close = ff_vc1_decode_end,
6205 .decode = vc1_decode_frame,
6206 .capabilities = CODEC_CAP_DR1,
6207 .flush = vc1_sprite_flush,
6208 .pix_fmts = (const enum AVPixelFormat[]) {
6215 #if CONFIG_VC1IMAGE_DECODER
6216 AVCodec ff_vc1image_decoder = {
6218 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image v2"),
6219 .type = AVMEDIA_TYPE_VIDEO,
6220 .id = AV_CODEC_ID_VC1IMAGE,
6221 .priv_data_size = sizeof(VC1Context),
6222 .init = vc1_decode_init,
6223 .close = ff_vc1_decode_end,
6224 .decode = vc1_decode_frame,
6225 .capabilities = CODEC_CAP_DR1,
6226 .flush = vc1_sprite_flush,
6227 .pix_fmts = (const enum AVPixelFormat[]) {