2 * VC-1 and WMV3 decoder
3 * Copyright (c) 2011 Mashiat Sarker Shakkhar
4 * Copyright (c) 2006-2007 Konstantin Shishkov
5 * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
7 * This file is part of FFmpeg.
9 * FFmpeg is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * FFmpeg is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with FFmpeg; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * VC-1 and WMV3 decoder
31 #include "error_resilience.h"
33 #include "mpegutils.h"
34 #include "mpegvideo.h"
36 #include "h264chroma.h"
40 #include "vc1acdata.h"
41 #include "msmpeg4data.h"
44 #include "vdpau_internal.h"
45 #include "libavutil/avassert.h"
50 #define MB_INTRA_VLC_BITS 9
54 // offset tables for interlaced picture MVDATA decoding
55 static const int offset_table1[9] = { 0, 1, 2, 4, 8, 16, 32, 64, 128 };
56 static const int offset_table2[9] = { 0, 1, 3, 7, 15, 31, 63, 127, 255 };
58 /***********************************************************************/
60 * @name VC-1 Bitplane decoding
66 static void init_block_index(VC1Context *v)
68 MpegEncContext *s = &v->s;
69 ff_init_block_index(s);
70 if (v->field_mode && !(v->second_field ^ v->tff)) {
71 s->dest[0] += s->current_picture_ptr->f->linesize[0];
72 s->dest[1] += s->current_picture_ptr->f->linesize[1];
73 s->dest[2] += s->current_picture_ptr->f->linesize[2];
77 /** @} */ //Bitplane group
79 static void vc1_put_signed_blocks_clamped(VC1Context *v)
81 MpegEncContext *s = &v->s;
82 int topleft_mb_pos, top_mb_pos;
83 int stride_y, fieldtx = 0;
86 /* The put pixels loop is always one MB row behind the decoding loop,
87 * because we can only put pixels when overlap filtering is done, and
88 * for filtering of the bottom edge of a MB, we need the next MB row
90 * Within the row, the put pixels loop is also one MB col behind the
91 * decoding loop. The reason for this is again, because for filtering
92 * of the right MB edge, we need the next MB present. */
93 if (!s->first_slice_line) {
95 topleft_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x - 1;
96 if (v->fcm == ILACE_FRAME)
97 fieldtx = v->fieldtx_plane[topleft_mb_pos];
98 stride_y = s->linesize << fieldtx;
99 v_dist = (16 - fieldtx) >> (fieldtx == 0);
100 s->idsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][0],
101 s->dest[0] - 16 * s->linesize - 16,
103 s->idsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][1],
104 s->dest[0] - 16 * s->linesize - 8,
106 s->idsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][2],
107 s->dest[0] - v_dist * s->linesize - 16,
109 s->idsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][3],
110 s->dest[0] - v_dist * s->linesize - 8,
112 s->idsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][4],
113 s->dest[1] - 8 * s->uvlinesize - 8,
115 s->idsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][5],
116 s->dest[2] - 8 * s->uvlinesize - 8,
119 if (s->mb_x == s->mb_width - 1) {
120 top_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x;
121 if (v->fcm == ILACE_FRAME)
122 fieldtx = v->fieldtx_plane[top_mb_pos];
123 stride_y = s->linesize << fieldtx;
124 v_dist = fieldtx ? 15 : 8;
125 s->idsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][0],
126 s->dest[0] - 16 * s->linesize,
128 s->idsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][1],
129 s->dest[0] - 16 * s->linesize + 8,
131 s->idsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][2],
132 s->dest[0] - v_dist * s->linesize,
134 s->idsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][3],
135 s->dest[0] - v_dist * s->linesize + 8,
137 s->idsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][4],
138 s->dest[1] - 8 * s->uvlinesize,
140 s->idsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][5],
141 s->dest[2] - 8 * s->uvlinesize,
146 #define inc_blk_idx(idx) do { \
148 if (idx >= v->n_allocated_blks) \
152 inc_blk_idx(v->topleft_blk_idx);
153 inc_blk_idx(v->top_blk_idx);
154 inc_blk_idx(v->left_blk_idx);
155 inc_blk_idx(v->cur_blk_idx);
158 static void vc1_loop_filter_iblk(VC1Context *v, int pq)
160 MpegEncContext *s = &v->s;
162 if (!s->first_slice_line) {
163 v->vc1dsp.vc1_v_loop_filter16(s->dest[0], s->linesize, pq);
165 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
166 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
167 for (j = 0; j < 2; j++) {
168 v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1], s->uvlinesize, pq);
170 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
173 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] + 8 * s->linesize, s->linesize, pq);
175 if (s->mb_y == s->end_mb_y - 1) {
177 v->vc1dsp.vc1_h_loop_filter16(s->dest[0], s->linesize, pq);
178 v->vc1dsp.vc1_h_loop_filter8(s->dest[1], s->uvlinesize, pq);
179 v->vc1dsp.vc1_h_loop_filter8(s->dest[2], s->uvlinesize, pq);
181 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] + 8, s->linesize, pq);
185 static void vc1_loop_filter_iblk_delayed(VC1Context *v, int pq)
187 MpegEncContext *s = &v->s;
190 /* The loopfilter runs 1 row and 1 column behind the overlap filter, which
191 * means it runs two rows/cols behind the decoding loop. */
192 if (!s->first_slice_line) {
194 if (s->mb_y >= s->start_mb_y + 2) {
195 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
198 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 16, s->linesize, pq);
199 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 8, s->linesize, pq);
200 for (j = 0; j < 2; j++) {
201 v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
203 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize - 8, s->uvlinesize, pq);
207 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize - 16, s->linesize, pq);
210 if (s->mb_x == s->mb_width - 1) {
211 if (s->mb_y >= s->start_mb_y + 2) {
212 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
215 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize, s->linesize, pq);
216 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize + 8, s->linesize, pq);
217 for (j = 0; j < 2; j++) {
218 v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
220 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize, s->uvlinesize, pq);
224 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize, s->linesize, pq);
227 if (s->mb_y == s->end_mb_y) {
230 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
231 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 8, s->linesize, pq);
233 for (j = 0; j < 2; j++) {
234 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
239 if (s->mb_x == s->mb_width - 1) {
241 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
242 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
244 for (j = 0; j < 2; j++) {
245 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
253 static void vc1_smooth_overlap_filter_iblk(VC1Context *v)
255 MpegEncContext *s = &v->s;
258 if (v->condover == CONDOVER_NONE)
261 mb_pos = s->mb_x + s->mb_y * s->mb_stride;
263 /* Within a MB, the horizontal overlap always runs before the vertical.
264 * To accomplish that, we run the H on left and internal borders of the
265 * currently decoded MB. Then, we wait for the next overlap iteration
266 * to do H overlap on the right edge of this MB, before moving over and
267 * running the V overlap. Therefore, the V overlap makes us trail by one
268 * MB col and the H overlap filter makes us trail by one MB row. This
269 * is reflected in the time at which we run the put_pixels loop. */
270 if (v->condover == CONDOVER_ALL || v->pq >= 9 || v->over_flags_plane[mb_pos]) {
271 if (s->mb_x && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
272 v->over_flags_plane[mb_pos - 1])) {
273 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][1],
274 v->block[v->cur_blk_idx][0]);
275 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][3],
276 v->block[v->cur_blk_idx][2]);
277 if (!(s->flags & CODEC_FLAG_GRAY)) {
278 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][4],
279 v->block[v->cur_blk_idx][4]);
280 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][5],
281 v->block[v->cur_blk_idx][5]);
284 v->vc1dsp.vc1_h_s_overlap(v->block[v->cur_blk_idx][0],
285 v->block[v->cur_blk_idx][1]);
286 v->vc1dsp.vc1_h_s_overlap(v->block[v->cur_blk_idx][2],
287 v->block[v->cur_blk_idx][3]);
289 if (s->mb_x == s->mb_width - 1) {
290 if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
291 v->over_flags_plane[mb_pos - s->mb_stride])) {
292 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][2],
293 v->block[v->cur_blk_idx][0]);
294 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][3],
295 v->block[v->cur_blk_idx][1]);
296 if (!(s->flags & CODEC_FLAG_GRAY)) {
297 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][4],
298 v->block[v->cur_blk_idx][4]);
299 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][5],
300 v->block[v->cur_blk_idx][5]);
303 v->vc1dsp.vc1_v_s_overlap(v->block[v->cur_blk_idx][0],
304 v->block[v->cur_blk_idx][2]);
305 v->vc1dsp.vc1_v_s_overlap(v->block[v->cur_blk_idx][1],
306 v->block[v->cur_blk_idx][3]);
309 if (s->mb_x && (v->condover == CONDOVER_ALL || v->over_flags_plane[mb_pos - 1])) {
310 if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
311 v->over_flags_plane[mb_pos - s->mb_stride - 1])) {
312 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][2],
313 v->block[v->left_blk_idx][0]);
314 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][3],
315 v->block[v->left_blk_idx][1]);
316 if (!(s->flags & CODEC_FLAG_GRAY)) {
317 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][4],
318 v->block[v->left_blk_idx][4]);
319 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][5],
320 v->block[v->left_blk_idx][5]);
323 v->vc1dsp.vc1_v_s_overlap(v->block[v->left_blk_idx][0],
324 v->block[v->left_blk_idx][2]);
325 v->vc1dsp.vc1_v_s_overlap(v->block[v->left_blk_idx][1],
326 v->block[v->left_blk_idx][3]);
330 /** Do motion compensation over 1 macroblock
331 * Mostly adapted hpel_motion and qpel_motion from mpegvideo.c
333 static void vc1_mc_1mv(VC1Context *v, int dir)
335 MpegEncContext *s = &v->s;
336 H264ChromaContext *h264chroma = &v->h264chroma;
337 uint8_t *srcY, *srcU, *srcV;
338 int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
339 int v_edge_pos = s->v_edge_pos >> v->field_mode;
341 uint8_t (*luty)[256], (*lutuv)[256];
344 if ((!v->field_mode ||
345 (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
346 !v->s.last_picture.f->data[0])
349 mx = s->mv[dir][0][0];
350 my = s->mv[dir][0][1];
352 // store motion vectors for further use in B frames
353 if (s->pict_type == AV_PICTURE_TYPE_P) {
354 for (i = 0; i < 4; i++) {
355 s->current_picture.motion_val[1][s->block_index[i] + v->blocks_off][0] = mx;
356 s->current_picture.motion_val[1][s->block_index[i] + v->blocks_off][1] = my;
360 uvmx = (mx + ((mx & 3) == 3)) >> 1;
361 uvmy = (my + ((my & 3) == 3)) >> 1;
362 v->luma_mv[s->mb_x][0] = uvmx;
363 v->luma_mv[s->mb_x][1] = uvmy;
366 v->cur_field_type != v->ref_field_type[dir]) {
367 my = my - 2 + 4 * v->cur_field_type;
368 uvmy = uvmy - 2 + 4 * v->cur_field_type;
371 // fastuvmc shall be ignored for interlaced frame picture
372 if (v->fastuvmc && (v->fcm != ILACE_FRAME)) {
373 uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
374 uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
377 if (v->field_mode && (v->cur_field_type != v->ref_field_type[dir]) && v->second_field) {
378 srcY = s->current_picture.f->data[0];
379 srcU = s->current_picture.f->data[1];
380 srcV = s->current_picture.f->data[2];
382 lutuv = v->curr_lutuv;
383 use_ic = *v->curr_use_ic;
385 srcY = s->last_picture.f->data[0];
386 srcU = s->last_picture.f->data[1];
387 srcV = s->last_picture.f->data[2];
389 lutuv = v->last_lutuv;
390 use_ic = v->last_use_ic;
393 srcY = s->next_picture.f->data[0];
394 srcU = s->next_picture.f->data[1];
395 srcV = s->next_picture.f->data[2];
397 lutuv = v->next_lutuv;
398 use_ic = v->next_use_ic;
401 if (!srcY || !srcU) {
402 av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
406 src_x = s->mb_x * 16 + (mx >> 2);
407 src_y = s->mb_y * 16 + (my >> 2);
408 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
409 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
411 if (v->profile != PROFILE_ADVANCED) {
412 src_x = av_clip( src_x, -16, s->mb_width * 16);
413 src_y = av_clip( src_y, -16, s->mb_height * 16);
414 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
415 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
417 src_x = av_clip( src_x, -17, s->avctx->coded_width);
418 src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
419 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
420 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
423 srcY += src_y * s->linesize + src_x;
424 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
425 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
427 if (v->field_mode && v->ref_field_type[dir]) {
428 srcY += s->current_picture_ptr->f->linesize[0];
429 srcU += s->current_picture_ptr->f->linesize[1];
430 srcV += s->current_picture_ptr->f->linesize[2];
433 /* for grayscale we should not try to read from unknown area */
434 if (s->flags & CODEC_FLAG_GRAY) {
435 srcU = s->edge_emu_buffer + 18 * s->linesize;
436 srcV = s->edge_emu_buffer + 18 * s->linesize;
439 if (v->rangeredfrm || use_ic
440 || s->h_edge_pos < 22 || v_edge_pos < 22
441 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel * 3
442 || (unsigned)(src_y - 1) > v_edge_pos - (my&3) - 16 - 3) {
443 uint8_t *ubuf = s->edge_emu_buffer + 19 * s->linesize;
444 uint8_t *vbuf = ubuf + 9 * s->uvlinesize;
446 srcY -= s->mspel * (1 + s->linesize);
447 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcY,
448 s->linesize, s->linesize,
449 17 + s->mspel * 2, 17 + s->mspel * 2,
450 src_x - s->mspel, src_y - s->mspel,
451 s->h_edge_pos, v_edge_pos);
452 srcY = s->edge_emu_buffer;
453 s->vdsp.emulated_edge_mc(ubuf, srcU,
454 s->uvlinesize, s->uvlinesize,
457 s->h_edge_pos >> 1, v_edge_pos >> 1);
458 s->vdsp.emulated_edge_mc(vbuf, srcV,
459 s->uvlinesize, s->uvlinesize,
462 s->h_edge_pos >> 1, v_edge_pos >> 1);
465 /* if we deal with range reduction we need to scale source blocks */
466 if (v->rangeredfrm) {
471 for (j = 0; j < 17 + s->mspel * 2; j++) {
472 for (i = 0; i < 17 + s->mspel * 2; i++)
473 src[i] = ((src[i] - 128) >> 1) + 128;
478 for (j = 0; j < 9; j++) {
479 for (i = 0; i < 9; i++) {
480 src[i] = ((src[i] - 128) >> 1) + 128;
481 src2[i] = ((src2[i] - 128) >> 1) + 128;
483 src += s->uvlinesize;
484 src2 += s->uvlinesize;
487 /* if we deal with intensity compensation we need to scale source blocks */
493 for (j = 0; j < 17 + s->mspel * 2; j++) {
494 int f = v->field_mode ? v->ref_field_type[dir] : ((j + src_y - s->mspel) & 1) ;
495 for (i = 0; i < 17 + s->mspel * 2; i++)
496 src[i] = luty[f][src[i]];
501 for (j = 0; j < 9; j++) {
502 int f = v->field_mode ? v->ref_field_type[dir] : ((j + uvsrc_y) & 1);
503 for (i = 0; i < 9; i++) {
504 src[i] = lutuv[f][src[i]];
505 src2[i] = lutuv[f][src2[i]];
507 src += s->uvlinesize;
508 src2 += s->uvlinesize;
511 srcY += s->mspel * (1 + s->linesize);
515 dxy = ((my & 3) << 2) | (mx & 3);
516 v->vc1dsp.put_vc1_mspel_pixels_tab[0][dxy](s->dest[0] , srcY , s->linesize, v->rnd);
517 } else { // hpel mc - always used for luma
518 dxy = (my & 2) | ((mx & 2) >> 1);
520 s->hdsp.put_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
522 s->hdsp.put_no_rnd_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
525 if (s->flags & CODEC_FLAG_GRAY) return;
526 /* Chroma MC always uses qpel bilinear */
527 uvmx = (uvmx & 3) << 1;
528 uvmy = (uvmy & 3) << 1;
530 h264chroma->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
531 h264chroma->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
533 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
534 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
538 static inline int median4(int a, int b, int c, int d)
541 if (c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2;
542 else return (FFMIN(b, c) + FFMAX(a, d)) / 2;
544 if (c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2;
545 else return (FFMIN(a, c) + FFMAX(b, d)) / 2;
549 /** Do motion compensation for 4-MV macroblock - luminance block
551 static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir, int avg)
553 MpegEncContext *s = &v->s;
555 int dxy, mx, my, src_x, src_y;
557 int fieldmv = (v->fcm == ILACE_FRAME) ? v->blk_mv_type[s->block_index[n]] : 0;
558 int v_edge_pos = s->v_edge_pos >> v->field_mode;
559 uint8_t (*luty)[256];
562 if ((!v->field_mode ||
563 (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
564 !v->s.last_picture.f->data[0])
567 mx = s->mv[dir][n][0];
568 my = s->mv[dir][n][1];
571 if (v->field_mode && (v->cur_field_type != v->ref_field_type[dir]) && v->second_field) {
572 srcY = s->current_picture.f->data[0];
574 use_ic = *v->curr_use_ic;
576 srcY = s->last_picture.f->data[0];
578 use_ic = v->last_use_ic;
581 srcY = s->next_picture.f->data[0];
583 use_ic = v->next_use_ic;
587 av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
592 if (v->cur_field_type != v->ref_field_type[dir])
593 my = my - 2 + 4 * v->cur_field_type;
596 if (s->pict_type == AV_PICTURE_TYPE_P && n == 3 && v->field_mode) {
597 int same_count = 0, opp_count = 0, k;
598 int chosen_mv[2][4][2], f;
600 for (k = 0; k < 4; k++) {
601 f = v->mv_f[0][s->block_index[k] + v->blocks_off];
602 chosen_mv[f][f ? opp_count : same_count][0] = s->mv[0][k][0];
603 chosen_mv[f][f ? opp_count : same_count][1] = s->mv[0][k][1];
607 f = opp_count > same_count;
608 switch (f ? opp_count : same_count) {
610 tx = median4(chosen_mv[f][0][0], chosen_mv[f][1][0],
611 chosen_mv[f][2][0], chosen_mv[f][3][0]);
612 ty = median4(chosen_mv[f][0][1], chosen_mv[f][1][1],
613 chosen_mv[f][2][1], chosen_mv[f][3][1]);
616 tx = mid_pred(chosen_mv[f][0][0], chosen_mv[f][1][0], chosen_mv[f][2][0]);
617 ty = mid_pred(chosen_mv[f][0][1], chosen_mv[f][1][1], chosen_mv[f][2][1]);
620 tx = (chosen_mv[f][0][0] + chosen_mv[f][1][0]) / 2;
621 ty = (chosen_mv[f][0][1] + chosen_mv[f][1][1]) / 2;
626 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
627 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
628 for (k = 0; k < 4; k++)
629 v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
632 if (v->fcm == ILACE_FRAME) { // not sure if needed for other types of picture
634 int width = s->avctx->coded_width;
635 int height = s->avctx->coded_height >> 1;
636 if (s->pict_type == AV_PICTURE_TYPE_P) {
637 s->current_picture.motion_val[1][s->block_index[n] + v->blocks_off][0] = mx;
638 s->current_picture.motion_val[1][s->block_index[n] + v->blocks_off][1] = my;
640 qx = (s->mb_x * 16) + (mx >> 2);
641 qy = (s->mb_y * 8) + (my >> 3);
646 mx -= 4 * (qx - width);
649 else if (qy > height + 1)
650 my -= 8 * (qy - height - 1);
653 if ((v->fcm == ILACE_FRAME) && fieldmv)
654 off = ((n > 1) ? s->linesize : 0) + (n & 1) * 8;
656 off = s->linesize * 4 * (n & 2) + (n & 1) * 8;
658 src_x = s->mb_x * 16 + (n & 1) * 8 + (mx >> 2);
660 src_y = s->mb_y * 16 + (n & 2) * 4 + (my >> 2);
662 src_y = s->mb_y * 16 + ((n > 1) ? 1 : 0) + (my >> 2);
664 if (v->profile != PROFILE_ADVANCED) {
665 src_x = av_clip(src_x, -16, s->mb_width * 16);
666 src_y = av_clip(src_y, -16, s->mb_height * 16);
668 src_x = av_clip(src_x, -17, s->avctx->coded_width);
669 if (v->fcm == ILACE_FRAME) {
671 src_y = av_clip(src_y, -17, s->avctx->coded_height + 1);
673 src_y = av_clip(src_y, -18, s->avctx->coded_height);
675 src_y = av_clip(src_y, -18, s->avctx->coded_height + 1);
679 srcY += src_y * s->linesize + src_x;
680 if (v->field_mode && v->ref_field_type[dir])
681 srcY += s->current_picture_ptr->f->linesize[0];
683 if (fieldmv && !(src_y & 1))
685 if (fieldmv && (src_y & 1) && src_y < 4)
687 if (v->rangeredfrm || use_ic
688 || s->h_edge_pos < 13 || v_edge_pos < 23
689 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx & 3) - 8 - s->mspel * 2
690 || (unsigned)(src_y - (s->mspel << fieldmv)) > v_edge_pos - (my & 3) - ((8 + s->mspel * 2) << fieldmv)) {
691 srcY -= s->mspel * (1 + (s->linesize << fieldmv));
692 /* check emulate edge stride and offset */
693 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcY,
694 s->linesize, s->linesize,
695 9 + s->mspel * 2, (9 + s->mspel * 2) << fieldmv,
696 src_x - s->mspel, src_y - (s->mspel << fieldmv),
697 s->h_edge_pos, v_edge_pos);
698 srcY = s->edge_emu_buffer;
699 /* if we deal with range reduction we need to scale source blocks */
700 if (v->rangeredfrm) {
705 for (j = 0; j < 9 + s->mspel * 2; j++) {
706 for (i = 0; i < 9 + s->mspel * 2; i++)
707 src[i] = ((src[i] - 128) >> 1) + 128;
708 src += s->linesize << fieldmv;
711 /* if we deal with intensity compensation we need to scale source blocks */
717 for (j = 0; j < 9 + s->mspel * 2; j++) {
718 int f = v->field_mode ? v->ref_field_type[dir] : (((j<<fieldmv)+src_y - (s->mspel << fieldmv)) & 1);
719 for (i = 0; i < 9 + s->mspel * 2; i++)
720 src[i] = luty[f][src[i]];
721 src += s->linesize << fieldmv;
724 srcY += s->mspel * (1 + (s->linesize << fieldmv));
728 dxy = ((my & 3) << 2) | (mx & 3);
730 v->vc1dsp.avg_vc1_mspel_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize << fieldmv, v->rnd);
732 v->vc1dsp.put_vc1_mspel_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize << fieldmv, v->rnd);
733 } else { // hpel mc - always used for luma
734 dxy = (my & 2) | ((mx & 2) >> 1);
736 s->hdsp.put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
738 s->hdsp.put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
742 static av_always_inline int get_chroma_mv(int *mvx, int *mvy, int *a, int flag, int *tx, int *ty)
745 static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
747 idx = ((a[3] != flag) << 3)
748 | ((a[2] != flag) << 2)
749 | ((a[1] != flag) << 1)
752 *tx = median4(mvx[0], mvx[1], mvx[2], mvx[3]);
753 *ty = median4(mvy[0], mvy[1], mvy[2], mvy[3]);
755 } else if (count[idx] == 1) {
758 *tx = mid_pred(mvx[1], mvx[2], mvx[3]);
759 *ty = mid_pred(mvy[1], mvy[2], mvy[3]);
762 *tx = mid_pred(mvx[0], mvx[2], mvx[3]);
763 *ty = mid_pred(mvy[0], mvy[2], mvy[3]);
766 *tx = mid_pred(mvx[0], mvx[1], mvx[3]);
767 *ty = mid_pred(mvy[0], mvy[1], mvy[3]);
770 *tx = mid_pred(mvx[0], mvx[1], mvx[2]);
771 *ty = mid_pred(mvy[0], mvy[1], mvy[2]);
774 } else if (count[idx] == 2) {
776 for (i = 0; i < 3; i++)
781 for (i = t1 + 1; i < 4; i++)
786 *tx = (mvx[t1] + mvx[t2]) / 2;
787 *ty = (mvy[t1] + mvy[t2]) / 2;
795 /** Do motion compensation for 4-MV macroblock - both chroma blocks
797 static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
799 MpegEncContext *s = &v->s;
800 H264ChromaContext *h264chroma = &v->h264chroma;
801 uint8_t *srcU, *srcV;
802 int uvmx, uvmy, uvsrc_x, uvsrc_y;
803 int k, tx = 0, ty = 0;
804 int mvx[4], mvy[4], intra[4], mv_f[4];
806 int chroma_ref_type = v->cur_field_type;
807 int v_edge_pos = s->v_edge_pos >> v->field_mode;
808 uint8_t (*lutuv)[256];
811 if (!v->field_mode && !v->s.last_picture.f->data[0])
813 if (s->flags & CODEC_FLAG_GRAY)
816 for (k = 0; k < 4; k++) {
817 mvx[k] = s->mv[dir][k][0];
818 mvy[k] = s->mv[dir][k][1];
819 intra[k] = v->mb_type[0][s->block_index[k]];
821 mv_f[k] = v->mv_f[dir][s->block_index[k] + v->blocks_off];
824 /* calculate chroma MV vector from four luma MVs */
825 if (!v->field_mode || (v->field_mode && !v->numref)) {
826 valid_count = get_chroma_mv(mvx, mvy, intra, 0, &tx, &ty);
827 chroma_ref_type = v->reffield;
829 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
830 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
831 v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
832 return; //no need to do MC for intra blocks
836 if (mv_f[0] + mv_f[1] + mv_f[2] + mv_f[3] > 2)
838 valid_count = get_chroma_mv(mvx, mvy, mv_f, dominant, &tx, &ty);
840 chroma_ref_type = !v->cur_field_type;
842 if (v->field_mode && chroma_ref_type == 1 && v->cur_field_type == 1 && !v->s.last_picture.f->data[0])
844 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
845 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
846 uvmx = (tx + ((tx & 3) == 3)) >> 1;
847 uvmy = (ty + ((ty & 3) == 3)) >> 1;
849 v->luma_mv[s->mb_x][0] = uvmx;
850 v->luma_mv[s->mb_x][1] = uvmy;
853 uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
854 uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
856 // Field conversion bias
857 if (v->cur_field_type != chroma_ref_type)
858 uvmy += 2 - 4 * chroma_ref_type;
860 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
861 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
863 if (v->profile != PROFILE_ADVANCED) {
864 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
865 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
867 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
868 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
872 if (v->field_mode && (v->cur_field_type != chroma_ref_type) && v->second_field) {
873 srcU = s->current_picture.f->data[1];
874 srcV = s->current_picture.f->data[2];
875 lutuv = v->curr_lutuv;
876 use_ic = *v->curr_use_ic;
878 srcU = s->last_picture.f->data[1];
879 srcV = s->last_picture.f->data[2];
880 lutuv = v->last_lutuv;
881 use_ic = v->last_use_ic;
884 srcU = s->next_picture.f->data[1];
885 srcV = s->next_picture.f->data[2];
886 lutuv = v->next_lutuv;
887 use_ic = v->next_use_ic;
891 av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
895 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
896 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
899 if (chroma_ref_type) {
900 srcU += s->current_picture_ptr->f->linesize[1];
901 srcV += s->current_picture_ptr->f->linesize[2];
905 if (v->rangeredfrm || use_ic
906 || s->h_edge_pos < 18 || v_edge_pos < 18
907 || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
908 || (unsigned)uvsrc_y > (v_edge_pos >> 1) - 9) {
909 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcU,
910 s->uvlinesize, s->uvlinesize,
911 8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
912 s->h_edge_pos >> 1, v_edge_pos >> 1);
913 s->vdsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV,
914 s->uvlinesize, s->uvlinesize,
915 8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
916 s->h_edge_pos >> 1, v_edge_pos >> 1);
917 srcU = s->edge_emu_buffer;
918 srcV = s->edge_emu_buffer + 16;
920 /* if we deal with range reduction we need to scale source blocks */
921 if (v->rangeredfrm) {
927 for (j = 0; j < 9; j++) {
928 for (i = 0; i < 9; i++) {
929 src[i] = ((src[i] - 128) >> 1) + 128;
930 src2[i] = ((src2[i] - 128) >> 1) + 128;
932 src += s->uvlinesize;
933 src2 += s->uvlinesize;
936 /* if we deal with intensity compensation we need to scale source blocks */
943 for (j = 0; j < 9; j++) {
944 int f = v->field_mode ? chroma_ref_type : ((j + uvsrc_y) & 1);
945 for (i = 0; i < 9; i++) {
946 src[i] = lutuv[f][src[i]];
947 src2[i] = lutuv[f][src2[i]];
949 src += s->uvlinesize;
950 src2 += s->uvlinesize;
955 /* Chroma MC always uses qpel bilinear */
956 uvmx = (uvmx & 3) << 1;
957 uvmy = (uvmy & 3) << 1;
959 h264chroma->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
960 h264chroma->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
962 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
963 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
967 /** Do motion compensation for 4-MV interlaced frame chroma macroblock (both U and V)
969 static void vc1_mc_4mv_chroma4(VC1Context *v, int dir, int dir2, int avg)
971 MpegEncContext *s = &v->s;
972 H264ChromaContext *h264chroma = &v->h264chroma;
973 uint8_t *srcU, *srcV;
974 int uvsrc_x, uvsrc_y;
975 int uvmx_field[4], uvmy_field[4];
977 int fieldmv = v->blk_mv_type[s->block_index[0]];
978 static const int s_rndtblfield[16] = { 0, 0, 1, 2, 4, 4, 5, 6, 2, 2, 3, 8, 6, 6, 7, 12 };
979 int v_dist = fieldmv ? 1 : 4; // vertical offset for lower sub-blocks
980 int v_edge_pos = s->v_edge_pos >> 1;
982 uint8_t (*lutuv)[256];
984 if (s->flags & CODEC_FLAG_GRAY)
987 for (i = 0; i < 4; i++) {
988 int d = i < 2 ? dir: dir2;
990 uvmx_field[i] = (tx + ((tx & 3) == 3)) >> 1;
993 uvmy_field[i] = (ty >> 4) * 8 + s_rndtblfield[ty & 0xF];
995 uvmy_field[i] = (ty + ((ty & 3) == 3)) >> 1;
998 for (i = 0; i < 4; i++) {
999 off = (i & 1) * 4 + ((i & 2) ? v_dist * s->uvlinesize : 0);
1000 uvsrc_x = s->mb_x * 8 + (i & 1) * 4 + (uvmx_field[i] >> 2);
1001 uvsrc_y = s->mb_y * 8 + ((i & 2) ? v_dist : 0) + (uvmy_field[i] >> 2);
1002 // FIXME: implement proper pull-back (see vc1cropmv.c, vc1CROPMV_ChromaPullBack())
1003 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
1004 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
1005 if (i < 2 ? dir : dir2) {
1006 srcU = s->next_picture.f->data[1];
1007 srcV = s->next_picture.f->data[2];
1008 lutuv = v->next_lutuv;
1009 use_ic = v->next_use_ic;
1011 srcU = s->last_picture.f->data[1];
1012 srcV = s->last_picture.f->data[2];
1013 lutuv = v->last_lutuv;
1014 use_ic = v->last_use_ic;
1018 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
1019 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
1020 uvmx_field[i] = (uvmx_field[i] & 3) << 1;
1021 uvmy_field[i] = (uvmy_field[i] & 3) << 1;
1023 if (fieldmv && !(uvsrc_y & 1))
1024 v_edge_pos = (s->v_edge_pos >> 1) - 1;
1026 if (fieldmv && (uvsrc_y & 1) && uvsrc_y < 2)
1029 || s->h_edge_pos < 10 || v_edge_pos < (5 << fieldmv)
1030 || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 5
1031 || (unsigned)uvsrc_y > v_edge_pos - (5 << fieldmv)) {
1032 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcU,
1033 s->uvlinesize, s->uvlinesize,
1034 5, (5 << fieldmv), uvsrc_x, uvsrc_y,
1035 s->h_edge_pos >> 1, v_edge_pos);
1036 s->vdsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV,
1037 s->uvlinesize, s->uvlinesize,
1038 5, (5 << fieldmv), uvsrc_x, uvsrc_y,
1039 s->h_edge_pos >> 1, v_edge_pos);
1040 srcU = s->edge_emu_buffer;
1041 srcV = s->edge_emu_buffer + 16;
1043 /* if we deal with intensity compensation we need to scale source blocks */
1046 uint8_t *src, *src2;
1050 for (j = 0; j < 5; j++) {
1051 int f = (uvsrc_y + (j << fieldmv)) & 1;
1052 for (i = 0; i < 5; i++) {
1053 src[i] = lutuv[f][src[i]];
1054 src2[i] = lutuv[f][src2[i]];
1056 src += s->uvlinesize << fieldmv;
1057 src2 += s->uvlinesize << fieldmv;
1063 h264chroma->avg_h264_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1064 h264chroma->avg_h264_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1066 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1067 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1071 h264chroma->put_h264_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1072 h264chroma->put_h264_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1074 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1075 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1081 /***********************************************************************/
1083 * @name VC-1 Block-level functions
1084 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
1090 * @brief Get macroblock-level quantizer scale
1092 #define GET_MQUANT() \
1093 if (v->dquantfrm) { \
1095 if (v->dqprofile == DQPROFILE_ALL_MBS) { \
1096 if (v->dqbilevel) { \
1097 mquant = (get_bits1(gb)) ? v->altpq : v->pq; \
1099 mqdiff = get_bits(gb, 3); \
1101 mquant = v->pq + mqdiff; \
1103 mquant = get_bits(gb, 5); \
1106 if (v->dqprofile == DQPROFILE_SINGLE_EDGE) \
1107 edges = 1 << v->dqsbedge; \
1108 else if (v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
1109 edges = (3 << v->dqsbedge) % 15; \
1110 else if (v->dqprofile == DQPROFILE_FOUR_EDGES) \
1112 if ((edges&1) && !s->mb_x) \
1113 mquant = v->altpq; \
1114 if ((edges&2) && s->first_slice_line) \
1115 mquant = v->altpq; \
1116 if ((edges&4) && s->mb_x == (s->mb_width - 1)) \
1117 mquant = v->altpq; \
1118 if ((edges&8) && s->mb_y == (s->mb_height - 1)) \
1119 mquant = v->altpq; \
1120 if (!mquant || mquant > 31) { \
1121 av_log(v->s.avctx, AV_LOG_ERROR, \
1122 "Overriding invalid mquant %d\n", mquant); \
1128 * @def GET_MVDATA(_dmv_x, _dmv_y)
1129 * @brief Get MV differentials
1130 * @see MVDATA decoding from 8.3.5.2, p(1)20
1131 * @param _dmv_x Horizontal differential for decoded MV
1132 * @param _dmv_y Vertical differential for decoded MV
1134 #define GET_MVDATA(_dmv_x, _dmv_y) \
1135 index = 1 + get_vlc2(gb, ff_vc1_mv_diff_vlc[s->mv_table_index].table, \
1136 VC1_MV_DIFF_VLC_BITS, 2); \
1138 mb_has_coeffs = 1; \
1141 mb_has_coeffs = 0; \
1144 _dmv_x = _dmv_y = 0; \
1145 } else if (index == 35) { \
1146 _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
1147 _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
1148 } else if (index == 36) { \
1153 index1 = index % 6; \
1154 if (!s->quarter_sample && index1 == 5) val = 1; \
1156 if (size_table[index1] - val > 0) \
1157 val = get_bits(gb, size_table[index1] - val); \
1159 sign = 0 - (val&1); \
1160 _dmv_x = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1162 index1 = index / 6; \
1163 if (!s->quarter_sample && index1 == 5) val = 1; \
1165 if (size_table[index1] - val > 0) \
1166 val = get_bits(gb, size_table[index1] - val); \
1168 sign = 0 - (val & 1); \
1169 _dmv_y = (sign ^ ((val >> 1) + offset_table[index1])) - sign; \
1172 static av_always_inline void get_mvdata_interlaced(VC1Context *v, int *dmv_x,
1173 int *dmv_y, int *pred_flag)
1176 int extend_x = 0, extend_y = 0;
1177 GetBitContext *gb = &v->s.gb;
1180 const int* offs_tab;
1183 bits = VC1_2REF_MVDATA_VLC_BITS;
1186 bits = VC1_1REF_MVDATA_VLC_BITS;
1189 switch (v->dmvrange) {
1197 extend_x = extend_y = 1;
1200 index = get_vlc2(gb, v->imv_vlc->table, bits, 3);
1202 *dmv_x = get_bits(gb, v->k_x);
1203 *dmv_y = get_bits(gb, v->k_y);
1206 *pred_flag = *dmv_y & 1;
1207 *dmv_y = (*dmv_y + *pred_flag) >> 1;
1209 *dmv_y = (*dmv_y + (*dmv_y & 1)) >> 1;
1214 av_assert0(index < esc);
1216 offs_tab = offset_table2;
1218 offs_tab = offset_table1;
1219 index1 = (index + 1) % 9;
1221 val = get_bits(gb, index1 + extend_x);
1222 sign = 0 -(val & 1);
1223 *dmv_x = (sign ^ ((val >> 1) + offs_tab[index1])) - sign;
1227 offs_tab = offset_table2;
1229 offs_tab = offset_table1;
1230 index1 = (index + 1) / 9;
1231 if (index1 > v->numref) {
1232 val = get_bits(gb, (index1 + (extend_y << v->numref)) >> v->numref);
1233 sign = 0 - (val & 1);
1234 *dmv_y = (sign ^ ((val >> 1) + offs_tab[index1 >> v->numref])) - sign;
1237 if (v->numref && pred_flag)
1238 *pred_flag = index1 & 1;
1242 static av_always_inline int scaleforsame_x(VC1Context *v, int n /* MV */, int dir)
1244 int scaledvalue, refdist;
1245 int scalesame1, scalesame2;
1246 int scalezone1_x, zone1offset_x;
1247 int table_index = dir ^ v->second_field;
1249 if (v->s.pict_type != AV_PICTURE_TYPE_B)
1250 refdist = v->refdist;
1252 refdist = dir ? v->brfd : v->frfd;
1255 scalesame1 = ff_vc1_field_mvpred_scales[table_index][1][refdist];
1256 scalesame2 = ff_vc1_field_mvpred_scales[table_index][2][refdist];
1257 scalezone1_x = ff_vc1_field_mvpred_scales[table_index][3][refdist];
1258 zone1offset_x = ff_vc1_field_mvpred_scales[table_index][5][refdist];
1263 if (FFABS(n) < scalezone1_x)
1264 scaledvalue = (n * scalesame1) >> 8;
1267 scaledvalue = ((n * scalesame2) >> 8) - zone1offset_x;
1269 scaledvalue = ((n * scalesame2) >> 8) + zone1offset_x;
1272 return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1275 static av_always_inline int scaleforsame_y(VC1Context *v, int i, int n /* MV */, int dir)
1277 int scaledvalue, refdist;
1278 int scalesame1, scalesame2;
1279 int scalezone1_y, zone1offset_y;
1280 int table_index = dir ^ v->second_field;
1282 if (v->s.pict_type != AV_PICTURE_TYPE_B)
1283 refdist = v->refdist;
1285 refdist = dir ? v->brfd : v->frfd;
1288 scalesame1 = ff_vc1_field_mvpred_scales[table_index][1][refdist];
1289 scalesame2 = ff_vc1_field_mvpred_scales[table_index][2][refdist];
1290 scalezone1_y = ff_vc1_field_mvpred_scales[table_index][4][refdist];
1291 zone1offset_y = ff_vc1_field_mvpred_scales[table_index][6][refdist];
1296 if (FFABS(n) < scalezone1_y)
1297 scaledvalue = (n * scalesame1) >> 8;
1300 scaledvalue = ((n * scalesame2) >> 8) - zone1offset_y;
1302 scaledvalue = ((n * scalesame2) >> 8) + zone1offset_y;
1306 if (v->cur_field_type && !v->ref_field_type[dir])
1307 return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1309 return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1312 static av_always_inline int scaleforopp_x(VC1Context *v, int n /* MV */)
1314 int scalezone1_x, zone1offset_x;
1315 int scaleopp1, scaleopp2, brfd;
1318 brfd = FFMIN(v->brfd, 3);
1319 scalezone1_x = ff_vc1_b_field_mvpred_scales[3][brfd];
1320 zone1offset_x = ff_vc1_b_field_mvpred_scales[5][brfd];
1321 scaleopp1 = ff_vc1_b_field_mvpred_scales[1][brfd];
1322 scaleopp2 = ff_vc1_b_field_mvpred_scales[2][brfd];
1327 if (FFABS(n) < scalezone1_x)
1328 scaledvalue = (n * scaleopp1) >> 8;
1331 scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_x;
1333 scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_x;
1336 return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1339 static av_always_inline int scaleforopp_y(VC1Context *v, int n /* MV */, int dir)
1341 int scalezone1_y, zone1offset_y;
1342 int scaleopp1, scaleopp2, brfd;
1345 brfd = FFMIN(v->brfd, 3);
1346 scalezone1_y = ff_vc1_b_field_mvpred_scales[4][brfd];
1347 zone1offset_y = ff_vc1_b_field_mvpred_scales[6][brfd];
1348 scaleopp1 = ff_vc1_b_field_mvpred_scales[1][brfd];
1349 scaleopp2 = ff_vc1_b_field_mvpred_scales[2][brfd];
1354 if (FFABS(n) < scalezone1_y)
1355 scaledvalue = (n * scaleopp1) >> 8;
1358 scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_y;
1360 scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_y;
1363 if (v->cur_field_type && !v->ref_field_type[dir]) {
1364 return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1366 return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1370 static av_always_inline int scaleforsame(VC1Context *v, int i, int n /* MV */,
1373 int brfd, scalesame;
1374 int hpel = 1 - v->s.quarter_sample;
1377 if (v->s.pict_type != AV_PICTURE_TYPE_B || v->second_field || !dir) {
1379 n = scaleforsame_y(v, i, n, dir) << hpel;
1381 n = scaleforsame_x(v, n, dir) << hpel;
1384 brfd = FFMIN(v->brfd, 3);
1385 scalesame = ff_vc1_b_field_mvpred_scales[0][brfd];
1387 n = (n * scalesame >> 8) << hpel;
1391 static av_always_inline int scaleforopp(VC1Context *v, int n /* MV */,
1394 int refdist, scaleopp;
1395 int hpel = 1 - v->s.quarter_sample;
1398 if (v->s.pict_type == AV_PICTURE_TYPE_B && !v->second_field && dir == 1) {
1400 n = scaleforopp_y(v, n, dir) << hpel;
1402 n = scaleforopp_x(v, n) << hpel;
1405 if (v->s.pict_type != AV_PICTURE_TYPE_B)
1406 refdist = FFMIN(v->refdist, 3);
1408 refdist = dir ? v->brfd : v->frfd;
1409 scaleopp = ff_vc1_field_mvpred_scales[dir ^ v->second_field][0][refdist];
1411 n = (n * scaleopp >> 8) << hpel;
1415 /** Predict and set motion vector
1417 static inline void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
1418 int mv1, int r_x, int r_y, uint8_t* is_intra,
1419 int pred_flag, int dir)
1421 MpegEncContext *s = &v->s;
1422 int xy, wrap, off = 0;
1426 int mixedmv_pic, num_samefield = 0, num_oppfield = 0;
1427 int opposite, a_f, b_f, c_f;
1428 int16_t field_predA[2];
1429 int16_t field_predB[2];
1430 int16_t field_predC[2];
1431 int a_valid, b_valid, c_valid;
1432 int hybridmv_thresh, y_bias = 0;
1434 if (v->mv_mode == MV_PMODE_MIXED_MV ||
1435 ((v->mv_mode == MV_PMODE_INTENSITY_COMP) && (v->mv_mode2 == MV_PMODE_MIXED_MV)))
1439 /* scale MV difference to be quad-pel */
1440 dmv_x <<= 1 - s->quarter_sample;
1441 dmv_y <<= 1 - s->quarter_sample;
1443 wrap = s->b8_stride;
1444 xy = s->block_index[n];
1447 s->mv[0][n][0] = s->current_picture.motion_val[0][xy + v->blocks_off][0] = 0;
1448 s->mv[0][n][1] = s->current_picture.motion_val[0][xy + v->blocks_off][1] = 0;
1449 s->current_picture.motion_val[1][xy + v->blocks_off][0] = 0;
1450 s->current_picture.motion_val[1][xy + v->blocks_off][1] = 0;
1451 if (mv1) { /* duplicate motion data for 1-MV block */
1452 s->current_picture.motion_val[0][xy + 1 + v->blocks_off][0] = 0;
1453 s->current_picture.motion_val[0][xy + 1 + v->blocks_off][1] = 0;
1454 s->current_picture.motion_val[0][xy + wrap + v->blocks_off][0] = 0;
1455 s->current_picture.motion_val[0][xy + wrap + v->blocks_off][1] = 0;
1456 s->current_picture.motion_val[0][xy + wrap + 1 + v->blocks_off][0] = 0;
1457 s->current_picture.motion_val[0][xy + wrap + 1 + v->blocks_off][1] = 0;
1458 v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1459 s->current_picture.motion_val[1][xy + 1 + v->blocks_off][0] = 0;
1460 s->current_picture.motion_val[1][xy + 1 + v->blocks_off][1] = 0;
1461 s->current_picture.motion_val[1][xy + wrap][0] = 0;
1462 s->current_picture.motion_val[1][xy + wrap + v->blocks_off][1] = 0;
1463 s->current_picture.motion_val[1][xy + wrap + 1 + v->blocks_off][0] = 0;
1464 s->current_picture.motion_val[1][xy + wrap + 1 + v->blocks_off][1] = 0;
1469 C = s->current_picture.motion_val[dir][xy - 1 + v->blocks_off];
1470 A = s->current_picture.motion_val[dir][xy - wrap + v->blocks_off];
1472 if (v->field_mode && mixedmv_pic)
1473 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
1475 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2;
1477 //in 4-MV mode different blocks have different B predictor position
1480 off = (s->mb_x > 0) ? -1 : 1;
1483 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 1;
1492 B = s->current_picture.motion_val[dir][xy - wrap + off + v->blocks_off];
1494 a_valid = !s->first_slice_line || (n == 2 || n == 3);
1495 b_valid = a_valid && (s->mb_width > 1);
1496 c_valid = s->mb_x || (n == 1 || n == 3);
1497 if (v->field_mode) {
1498 a_valid = a_valid && !is_intra[xy - wrap];
1499 b_valid = b_valid && !is_intra[xy - wrap + off];
1500 c_valid = c_valid && !is_intra[xy - 1];
1504 a_f = v->mv_f[dir][xy - wrap + v->blocks_off];
1505 num_oppfield += a_f;
1506 num_samefield += 1 - a_f;
1507 field_predA[0] = A[0];
1508 field_predA[1] = A[1];
1510 field_predA[0] = field_predA[1] = 0;
1514 b_f = v->mv_f[dir][xy - wrap + off + v->blocks_off];
1515 num_oppfield += b_f;
1516 num_samefield += 1 - b_f;
1517 field_predB[0] = B[0];
1518 field_predB[1] = B[1];
1520 field_predB[0] = field_predB[1] = 0;
1524 c_f = v->mv_f[dir][xy - 1 + v->blocks_off];
1525 num_oppfield += c_f;
1526 num_samefield += 1 - c_f;
1527 field_predC[0] = C[0];
1528 field_predC[1] = C[1];
1530 field_predC[0] = field_predC[1] = 0;
1534 if (v->field_mode) {
1536 // REFFIELD determines if the last field or the second-last field is
1537 // to be used as reference
1538 opposite = 1 - v->reffield;
1540 if (num_samefield <= num_oppfield)
1541 opposite = 1 - pred_flag;
1543 opposite = pred_flag;
1548 if (a_valid && !a_f) {
1549 field_predA[0] = scaleforopp(v, field_predA[0], 0, dir);
1550 field_predA[1] = scaleforopp(v, field_predA[1], 1, dir);
1552 if (b_valid && !b_f) {
1553 field_predB[0] = scaleforopp(v, field_predB[0], 0, dir);
1554 field_predB[1] = scaleforopp(v, field_predB[1], 1, dir);
1556 if (c_valid && !c_f) {
1557 field_predC[0] = scaleforopp(v, field_predC[0], 0, dir);
1558 field_predC[1] = scaleforopp(v, field_predC[1], 1, dir);
1560 v->mv_f[dir][xy + v->blocks_off] = 1;
1561 v->ref_field_type[dir] = !v->cur_field_type;
1563 if (a_valid && a_f) {
1564 field_predA[0] = scaleforsame(v, n, field_predA[0], 0, dir);
1565 field_predA[1] = scaleforsame(v, n, field_predA[1], 1, dir);
1567 if (b_valid && b_f) {
1568 field_predB[0] = scaleforsame(v, n, field_predB[0], 0, dir);
1569 field_predB[1] = scaleforsame(v, n, field_predB[1], 1, dir);
1571 if (c_valid && c_f) {
1572 field_predC[0] = scaleforsame(v, n, field_predC[0], 0, dir);
1573 field_predC[1] = scaleforsame(v, n, field_predC[1], 1, dir);
1575 v->mv_f[dir][xy + v->blocks_off] = 0;
1576 v->ref_field_type[dir] = v->cur_field_type;
1580 px = field_predA[0];
1581 py = field_predA[1];
1582 } else if (c_valid) {
1583 px = field_predC[0];
1584 py = field_predC[1];
1585 } else if (b_valid) {
1586 px = field_predB[0];
1587 py = field_predB[1];
1593 if (num_samefield + num_oppfield > 1) {
1594 px = mid_pred(field_predA[0], field_predB[0], field_predC[0]);
1595 py = mid_pred(field_predA[1], field_predB[1], field_predC[1]);
1598 /* Pullback MV as specified in 8.3.5.3.4 */
1599 if (!v->field_mode) {
1601 qx = (s->mb_x << 6) + ((n == 1 || n == 3) ? 32 : 0);
1602 qy = (s->mb_y << 6) + ((n == 2 || n == 3) ? 32 : 0);
1603 X = (s->mb_width << 6) - 4;
1604 Y = (s->mb_height << 6) - 4;
1606 if (qx + px < -60) px = -60 - qx;
1607 if (qy + py < -60) py = -60 - qy;
1609 if (qx + px < -28) px = -28 - qx;
1610 if (qy + py < -28) py = -28 - qy;
1612 if (qx + px > X) px = X - qx;
1613 if (qy + py > Y) py = Y - qy;
1616 if (!v->field_mode || s->pict_type != AV_PICTURE_TYPE_B) {
1617 /* Calculate hybrid prediction as specified in 8.3.5.3.5 (also 10.3.5.4.3.5) */
1618 hybridmv_thresh = 32;
1619 if (a_valid && c_valid) {
1620 if (is_intra[xy - wrap])
1621 sum = FFABS(px) + FFABS(py);
1623 sum = FFABS(px - field_predA[0]) + FFABS(py - field_predA[1]);
1624 if (sum > hybridmv_thresh) {
1625 if (get_bits1(&s->gb)) { // read HYBRIDPRED bit
1626 px = field_predA[0];
1627 py = field_predA[1];
1629 px = field_predC[0];
1630 py = field_predC[1];
1633 if (is_intra[xy - 1])
1634 sum = FFABS(px) + FFABS(py);
1636 sum = FFABS(px - field_predC[0]) + FFABS(py - field_predC[1]);
1637 if (sum > hybridmv_thresh) {
1638 if (get_bits1(&s->gb)) {
1639 px = field_predA[0];
1640 py = field_predA[1];
1642 px = field_predC[0];
1643 py = field_predC[1];
1650 if (v->field_mode && v->numref)
1652 if (v->field_mode && v->cur_field_type && v->ref_field_type[dir] == 0)
1654 /* store MV using signed modulus of MV range defined in 4.11 */
1655 s->mv[dir][n][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1656 s->mv[dir][n][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1] = ((py + dmv_y + r_y - y_bias) & ((r_y << 1) - 1)) - r_y + y_bias;
1657 if (mv1) { /* duplicate motion data for 1-MV block */
1658 s->current_picture.motion_val[dir][xy + 1 + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
1659 s->current_picture.motion_val[dir][xy + 1 + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
1660 s->current_picture.motion_val[dir][xy + wrap + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
1661 s->current_picture.motion_val[dir][xy + wrap + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
1662 s->current_picture.motion_val[dir][xy + wrap + 1 + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
1663 s->current_picture.motion_val[dir][xy + wrap + 1 + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
1664 v->mv_f[dir][xy + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1665 v->mv_f[dir][xy + wrap + v->blocks_off] = v->mv_f[dir][xy + wrap + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1669 /** Predict and set motion vector for interlaced frame picture MBs
1671 static inline void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
1672 int mvn, int r_x, int r_y, uint8_t* is_intra, int dir)
1674 MpegEncContext *s = &v->s;
1675 int xy, wrap, off = 0;
1676 int A[2], B[2], C[2];
1678 int a_valid = 0, b_valid = 0, c_valid = 0;
1679 int field_a, field_b, field_c; // 0: same, 1: opposit
1680 int total_valid, num_samefield, num_oppfield;
1681 int pos_c, pos_b, n_adj;
1683 wrap = s->b8_stride;
1684 xy = s->block_index[n];
1687 s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = 0;
1688 s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = 0;
1689 s->current_picture.motion_val[1][xy][0] = 0;
1690 s->current_picture.motion_val[1][xy][1] = 0;
1691 if (mvn == 1) { /* duplicate motion data for 1-MV block */
1692 s->current_picture.motion_val[0][xy + 1][0] = 0;
1693 s->current_picture.motion_val[0][xy + 1][1] = 0;
1694 s->current_picture.motion_val[0][xy + wrap][0] = 0;
1695 s->current_picture.motion_val[0][xy + wrap][1] = 0;
1696 s->current_picture.motion_val[0][xy + wrap + 1][0] = 0;
1697 s->current_picture.motion_val[0][xy + wrap + 1][1] = 0;
1698 v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1699 s->current_picture.motion_val[1][xy + 1][0] = 0;
1700 s->current_picture.motion_val[1][xy + 1][1] = 0;
1701 s->current_picture.motion_val[1][xy + wrap][0] = 0;
1702 s->current_picture.motion_val[1][xy + wrap][1] = 0;
1703 s->current_picture.motion_val[1][xy + wrap + 1][0] = 0;
1704 s->current_picture.motion_val[1][xy + wrap + 1][1] = 0;
1709 off = ((n == 0) || (n == 1)) ? 1 : -1;
1711 if (s->mb_x || (n == 1) || (n == 3)) {
1712 if ((v->blk_mv_type[xy]) // current block (MB) has a field MV
1713 || (!v->blk_mv_type[xy] && !v->blk_mv_type[xy - 1])) { // or both have frame MV
1714 A[0] = s->current_picture.motion_val[dir][xy - 1][0];
1715 A[1] = s->current_picture.motion_val[dir][xy - 1][1];
1717 } else { // current block has frame mv and cand. has field MV (so average)
1718 A[0] = (s->current_picture.motion_val[dir][xy - 1][0]
1719 + s->current_picture.motion_val[dir][xy - 1 + off * wrap][0] + 1) >> 1;
1720 A[1] = (s->current_picture.motion_val[dir][xy - 1][1]
1721 + s->current_picture.motion_val[dir][xy - 1 + off * wrap][1] + 1) >> 1;
1724 if (!(n & 1) && v->is_intra[s->mb_x - 1]) {
1730 /* Predict B and C */
1731 B[0] = B[1] = C[0] = C[1] = 0;
1732 if (n == 0 || n == 1 || v->blk_mv_type[xy]) {
1733 if (!s->first_slice_line) {
1734 if (!v->is_intra[s->mb_x - s->mb_stride]) {
1737 pos_b = s->block_index[n_adj] - 2 * wrap;
1738 if (v->blk_mv_type[pos_b] && v->blk_mv_type[xy]) {
1739 n_adj = (n & 2) | (n & 1);
1741 B[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap][0];
1742 B[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap][1];
1743 if (v->blk_mv_type[pos_b] && !v->blk_mv_type[xy]) {
1744 B[0] = (B[0] + s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap][0] + 1) >> 1;
1745 B[1] = (B[1] + s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap][1] + 1) >> 1;
1748 if (s->mb_width > 1) {
1749 if (!v->is_intra[s->mb_x - s->mb_stride + 1]) {
1752 pos_c = s->block_index[2] - 2 * wrap + 2;
1753 if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1756 C[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap + 2][0];
1757 C[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap + 2][1];
1758 if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1759 C[0] = (1 + C[0] + (s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap + 2][0])) >> 1;
1760 C[1] = (1 + C[1] + (s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap + 2][1])) >> 1;
1762 if (s->mb_x == s->mb_width - 1) {
1763 if (!v->is_intra[s->mb_x - s->mb_stride - 1]) {
1766 pos_c = s->block_index[3] - 2 * wrap - 2;
1767 if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1770 C[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap - 2][0];
1771 C[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap - 2][1];
1772 if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1773 C[0] = (1 + C[0] + s->current_picture.motion_val[dir][s->block_index[1] - 2 * wrap - 2][0]) >> 1;
1774 C[1] = (1 + C[1] + s->current_picture.motion_val[dir][s->block_index[1] - 2 * wrap - 2][1]) >> 1;
1783 pos_b = s->block_index[1];
1785 B[0] = s->current_picture.motion_val[dir][pos_b][0];
1786 B[1] = s->current_picture.motion_val[dir][pos_b][1];
1787 pos_c = s->block_index[0];
1789 C[0] = s->current_picture.motion_val[dir][pos_c][0];
1790 C[1] = s->current_picture.motion_val[dir][pos_c][1];
1793 total_valid = a_valid + b_valid + c_valid;
1794 // check if predictor A is out of bounds
1795 if (!s->mb_x && !(n == 1 || n == 3)) {
1798 // check if predictor B is out of bounds
1799 if ((s->first_slice_line && v->blk_mv_type[xy]) || (s->first_slice_line && !(n & 2))) {
1800 B[0] = B[1] = C[0] = C[1] = 0;
1802 if (!v->blk_mv_type[xy]) {
1803 if (s->mb_width == 1) {
1807 if (total_valid >= 2) {
1808 px = mid_pred(A[0], B[0], C[0]);
1809 py = mid_pred(A[1], B[1], C[1]);
1810 } else if (total_valid) {
1811 if (a_valid) { px = A[0]; py = A[1]; }
1812 else if (b_valid) { px = B[0]; py = B[1]; }
1813 else { px = C[0]; py = C[1]; }
1818 field_a = (A[1] & 4) ? 1 : 0;
1822 field_b = (B[1] & 4) ? 1 : 0;
1826 field_c = (C[1] & 4) ? 1 : 0;
1830 num_oppfield = field_a + field_b + field_c;
1831 num_samefield = total_valid - num_oppfield;
1832 if (total_valid == 3) {
1833 if ((num_samefield == 3) || (num_oppfield == 3)) {
1834 px = mid_pred(A[0], B[0], C[0]);
1835 py = mid_pred(A[1], B[1], C[1]);
1836 } else if (num_samefield >= num_oppfield) {
1837 /* take one MV from same field set depending on priority
1838 the check for B may not be necessary */
1839 px = !field_a ? A[0] : B[0];
1840 py = !field_a ? A[1] : B[1];
1842 px = field_a ? A[0] : B[0];
1843 py = field_a ? A[1] : B[1];
1845 } else if (total_valid == 2) {
1846 if (num_samefield >= num_oppfield) {
1847 if (!field_a && a_valid) {
1850 } else if (!field_b && b_valid) {
1853 } else /*if (c_valid)*/ {
1854 av_assert1(c_valid);
1857 } /*else px = py = 0;*/
1859 if (field_a && a_valid) {
1862 } else /*if (field_b && b_valid)*/ {
1863 av_assert1(field_b && b_valid);
1866 } /*else if (c_valid) {
1871 } else if (total_valid == 1) {
1872 px = (a_valid) ? A[0] : ((b_valid) ? B[0] : C[0]);
1873 py = (a_valid) ? A[1] : ((b_valid) ? B[1] : C[1]);
1877 /* store MV using signed modulus of MV range defined in 4.11 */
1878 s->mv[dir][n][0] = s->current_picture.motion_val[dir][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1879 s->mv[dir][n][1] = s->current_picture.motion_val[dir][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
1880 if (mvn == 1) { /* duplicate motion data for 1-MV block */
1881 s->current_picture.motion_val[dir][xy + 1 ][0] = s->current_picture.motion_val[dir][xy][0];
1882 s->current_picture.motion_val[dir][xy + 1 ][1] = s->current_picture.motion_val[dir][xy][1];
1883 s->current_picture.motion_val[dir][xy + wrap ][0] = s->current_picture.motion_val[dir][xy][0];
1884 s->current_picture.motion_val[dir][xy + wrap ][1] = s->current_picture.motion_val[dir][xy][1];
1885 s->current_picture.motion_val[dir][xy + wrap + 1][0] = s->current_picture.motion_val[dir][xy][0];
1886 s->current_picture.motion_val[dir][xy + wrap + 1][1] = s->current_picture.motion_val[dir][xy][1];
1887 } else if (mvn == 2) { /* duplicate motion data for 2-Field MV block */
1888 s->current_picture.motion_val[dir][xy + 1][0] = s->current_picture.motion_val[dir][xy][0];
1889 s->current_picture.motion_val[dir][xy + 1][1] = s->current_picture.motion_val[dir][xy][1];
1890 s->mv[dir][n + 1][0] = s->mv[dir][n][0];
1891 s->mv[dir][n + 1][1] = s->mv[dir][n][1];
1895 /** Motion compensation for direct or interpolated blocks in B-frames
1897 static void vc1_interp_mc(VC1Context *v)
1899 MpegEncContext *s = &v->s;
1900 H264ChromaContext *h264chroma = &v->h264chroma;
1901 uint8_t *srcY, *srcU, *srcV;
1902 int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
1904 int v_edge_pos = s->v_edge_pos >> v->field_mode;
1905 int use_ic = v->next_use_ic;
1907 if (!v->field_mode && !v->s.next_picture.f->data[0])
1910 mx = s->mv[1][0][0];
1911 my = s->mv[1][0][1];
1912 uvmx = (mx + ((mx & 3) == 3)) >> 1;
1913 uvmy = (my + ((my & 3) == 3)) >> 1;
1914 if (v->field_mode) {
1915 if (v->cur_field_type != v->ref_field_type[1]) {
1916 my = my - 2 + 4 * v->cur_field_type;
1917 uvmy = uvmy - 2 + 4 * v->cur_field_type;
1921 uvmx = uvmx + ((uvmx < 0) ? -(uvmx & 1) : (uvmx & 1));
1922 uvmy = uvmy + ((uvmy < 0) ? -(uvmy & 1) : (uvmy & 1));
1924 srcY = s->next_picture.f->data[0];
1925 srcU = s->next_picture.f->data[1];
1926 srcV = s->next_picture.f->data[2];
1928 src_x = s->mb_x * 16 + (mx >> 2);
1929 src_y = s->mb_y * 16 + (my >> 2);
1930 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
1931 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
1933 if (v->profile != PROFILE_ADVANCED) {
1934 src_x = av_clip( src_x, -16, s->mb_width * 16);
1935 src_y = av_clip( src_y, -16, s->mb_height * 16);
1936 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
1937 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
1939 src_x = av_clip( src_x, -17, s->avctx->coded_width);
1940 src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
1941 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
1942 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
1945 srcY += src_y * s->linesize + src_x;
1946 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
1947 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
1949 if (v->field_mode && v->ref_field_type[1]) {
1950 srcY += s->current_picture_ptr->f->linesize[0];
1951 srcU += s->current_picture_ptr->f->linesize[1];
1952 srcV += s->current_picture_ptr->f->linesize[2];
1955 /* for grayscale we should not try to read from unknown area */
1956 if (s->flags & CODEC_FLAG_GRAY) {
1957 srcU = s->edge_emu_buffer + 18 * s->linesize;
1958 srcV = s->edge_emu_buffer + 18 * s->linesize;
1961 if (v->rangeredfrm || s->h_edge_pos < 22 || v_edge_pos < 22 || use_ic
1962 || (unsigned)(src_x - 1) > s->h_edge_pos - (mx & 3) - 16 - 3
1963 || (unsigned)(src_y - 1) > v_edge_pos - (my & 3) - 16 - 3) {
1964 uint8_t *ubuf = s->edge_emu_buffer + 19 * s->linesize;
1965 uint8_t *vbuf = ubuf + 9 * s->uvlinesize;
1967 srcY -= s->mspel * (1 + s->linesize);
1968 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcY,
1969 s->linesize, s->linesize,
1970 17 + s->mspel * 2, 17 + s->mspel * 2,
1971 src_x - s->mspel, src_y - s->mspel,
1972 s->h_edge_pos, v_edge_pos);
1973 srcY = s->edge_emu_buffer;
1974 s->vdsp.emulated_edge_mc(ubuf, srcU,
1975 s->uvlinesize, s->uvlinesize,
1978 s->h_edge_pos >> 1, v_edge_pos >> 1);
1979 s->vdsp.emulated_edge_mc(vbuf, srcV,
1980 s->uvlinesize, s->uvlinesize,
1983 s->h_edge_pos >> 1, v_edge_pos >> 1);
1986 /* if we deal with range reduction we need to scale source blocks */
1987 if (v->rangeredfrm) {
1989 uint8_t *src, *src2;
1992 for (j = 0; j < 17 + s->mspel * 2; j++) {
1993 for (i = 0; i < 17 + s->mspel * 2; i++)
1994 src[i] = ((src[i] - 128) >> 1) + 128;
1999 for (j = 0; j < 9; j++) {
2000 for (i = 0; i < 9; i++) {
2001 src[i] = ((src[i] - 128) >> 1) + 128;
2002 src2[i] = ((src2[i] - 128) >> 1) + 128;
2004 src += s->uvlinesize;
2005 src2 += s->uvlinesize;
2010 uint8_t (*luty )[256] = v->next_luty;
2011 uint8_t (*lutuv)[256] = v->next_lutuv;
2013 uint8_t *src, *src2;
2016 for (j = 0; j < 17 + s->mspel * 2; j++) {
2017 int f = v->field_mode ? v->ref_field_type[1] : ((j+src_y - s->mspel) & 1);
2018 for (i = 0; i < 17 + s->mspel * 2; i++)
2019 src[i] = luty[f][src[i]];
2024 for (j = 0; j < 9; j++) {
2025 int f = v->field_mode ? v->ref_field_type[1] : ((j+uvsrc_y) & 1);
2026 for (i = 0; i < 9; i++) {
2027 src[i] = lutuv[f][src[i]];
2028 src2[i] = lutuv[f][src2[i]];
2030 src += s->uvlinesize;
2031 src2 += s->uvlinesize;
2034 srcY += s->mspel * (1 + s->linesize);
2041 dxy = ((my & 3) << 2) | (mx & 3);
2042 v->vc1dsp.avg_vc1_mspel_pixels_tab[0][dxy](s->dest[0] + off , srcY , s->linesize, v->rnd);
2044 dxy = (my & 2) | ((mx & 2) >> 1);
2047 s->hdsp.avg_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
2049 s->hdsp.avg_no_rnd_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize, 16);
2052 if (s->flags & CODEC_FLAG_GRAY) return;
2053 /* Chroma MC always uses qpel blilinear */
2054 uvmx = (uvmx & 3) << 1;
2055 uvmy = (uvmy & 3) << 1;
2057 h264chroma->avg_h264_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
2058 h264chroma->avg_h264_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
2060 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
2061 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
2065 static av_always_inline int scale_mv(int value, int bfrac, int inv, int qs)
2069 #if B_FRACTION_DEN==256
2073 return 2 * ((value * n + 255) >> 9);
2074 return (value * n + 128) >> 8;
2077 n -= B_FRACTION_DEN;
2079 return 2 * ((value * n + B_FRACTION_DEN - 1) / (2 * B_FRACTION_DEN));
2080 return (value * n + B_FRACTION_DEN/2) / B_FRACTION_DEN;
2084 /** Reconstruct motion vector for B-frame and do motion compensation
2086 static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2],
2087 int direct, int mode)
2094 if (mode == BMV_TYPE_INTERPOLATED) {
2100 vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD));
2103 static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2],
2104 int direct, int mvtype)
2106 MpegEncContext *s = &v->s;
2107 int xy, wrap, off = 0;
2112 const uint8_t *is_intra = v->mb_type[0];
2114 av_assert0(!v->field_mode);
2118 /* scale MV difference to be quad-pel */
2119 dmv_x[0] <<= 1 - s->quarter_sample;
2120 dmv_y[0] <<= 1 - s->quarter_sample;
2121 dmv_x[1] <<= 1 - s->quarter_sample;
2122 dmv_y[1] <<= 1 - s->quarter_sample;
2124 wrap = s->b8_stride;
2125 xy = s->block_index[0];
2128 s->current_picture.motion_val[0][xy][0] =
2129 s->current_picture.motion_val[0][xy][1] =
2130 s->current_picture.motion_val[1][xy][0] =
2131 s->current_picture.motion_val[1][xy][1] = 0;
2134 if (direct && s->next_picture_ptr->field_picture)
2135 av_log(s->avctx, AV_LOG_WARNING, "Mixed frame/field direct mode not supported\n");
2137 s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
2138 s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
2139 s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
2140 s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
2142 /* Pullback predicted motion vectors as specified in 8.4.5.4 */
2143 s->mv[0][0][0] = av_clip(s->mv[0][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2144 s->mv[0][0][1] = av_clip(s->mv[0][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2145 s->mv[1][0][0] = av_clip(s->mv[1][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2146 s->mv[1][0][1] = av_clip(s->mv[1][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2148 s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
2149 s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
2150 s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
2151 s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
2155 if ((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2156 C = s->current_picture.motion_val[0][xy - 2];
2157 A = s->current_picture.motion_val[0][xy - wrap * 2];
2158 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2159 B = s->current_picture.motion_val[0][xy - wrap * 2 + off];
2161 if (!s->mb_x) C[0] = C[1] = 0;
2162 if (!s->first_slice_line) { // predictor A is not out of bounds
2163 if (s->mb_width == 1) {
2167 px = mid_pred(A[0], B[0], C[0]);
2168 py = mid_pred(A[1], B[1], C[1]);
2170 } else if (s->mb_x) { // predictor C is not out of bounds
2176 /* Pullback MV as specified in 8.3.5.3.4 */
2179 if (v->profile < PROFILE_ADVANCED) {
2180 qx = (s->mb_x << 5);
2181 qy = (s->mb_y << 5);
2182 X = (s->mb_width << 5) - 4;
2183 Y = (s->mb_height << 5) - 4;
2184 if (qx + px < -28) px = -28 - qx;
2185 if (qy + py < -28) py = -28 - qy;
2186 if (qx + px > X) px = X - qx;
2187 if (qy + py > Y) py = Y - qy;
2189 qx = (s->mb_x << 6);
2190 qy = (s->mb_y << 6);
2191 X = (s->mb_width << 6) - 4;
2192 Y = (s->mb_height << 6) - 4;
2193 if (qx + px < -60) px = -60 - qx;
2194 if (qy + py < -60) py = -60 - qy;
2195 if (qx + px > X) px = X - qx;
2196 if (qy + py > Y) py = Y - qy;
2199 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2200 if (0 && !s->first_slice_line && s->mb_x) {
2201 if (is_intra[xy - wrap])
2202 sum = FFABS(px) + FFABS(py);
2204 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2206 if (get_bits1(&s->gb)) {
2214 if (is_intra[xy - 2])
2215 sum = FFABS(px) + FFABS(py);
2217 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2219 if (get_bits1(&s->gb)) {
2229 /* store MV using signed modulus of MV range defined in 4.11 */
2230 s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x;
2231 s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
2233 if ((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2234 C = s->current_picture.motion_val[1][xy - 2];
2235 A = s->current_picture.motion_val[1][xy - wrap * 2];
2236 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2237 B = s->current_picture.motion_val[1][xy - wrap * 2 + off];
2241 if (!s->first_slice_line) { // predictor A is not out of bounds
2242 if (s->mb_width == 1) {
2246 px = mid_pred(A[0], B[0], C[0]);
2247 py = mid_pred(A[1], B[1], C[1]);
2249 } else if (s->mb_x) { // predictor C is not out of bounds
2255 /* Pullback MV as specified in 8.3.5.3.4 */
2258 if (v->profile < PROFILE_ADVANCED) {
2259 qx = (s->mb_x << 5);
2260 qy = (s->mb_y << 5);
2261 X = (s->mb_width << 5) - 4;
2262 Y = (s->mb_height << 5) - 4;
2263 if (qx + px < -28) px = -28 - qx;
2264 if (qy + py < -28) py = -28 - qy;
2265 if (qx + px > X) px = X - qx;
2266 if (qy + py > Y) py = Y - qy;
2268 qx = (s->mb_x << 6);
2269 qy = (s->mb_y << 6);
2270 X = (s->mb_width << 6) - 4;
2271 Y = (s->mb_height << 6) - 4;
2272 if (qx + px < -60) px = -60 - qx;
2273 if (qy + py < -60) py = -60 - qy;
2274 if (qx + px > X) px = X - qx;
2275 if (qy + py > Y) py = Y - qy;
2278 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2279 if (0 && !s->first_slice_line && s->mb_x) {
2280 if (is_intra[xy - wrap])
2281 sum = FFABS(px) + FFABS(py);
2283 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2285 if (get_bits1(&s->gb)) {
2293 if (is_intra[xy - 2])
2294 sum = FFABS(px) + FFABS(py);
2296 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2298 if (get_bits1(&s->gb)) {
2308 /* store MV using signed modulus of MV range defined in 4.11 */
2310 s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
2311 s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
2313 s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
2314 s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
2315 s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
2316 s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
2319 static inline void vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dmv_y, int mv1, int *pred_flag)
2321 int dir = (v->bmvtype == BMV_TYPE_BACKWARD) ? 1 : 0;
2322 MpegEncContext *s = &v->s;
2323 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2325 if (v->bmvtype == BMV_TYPE_DIRECT) {
2326 int total_opp, k, f;
2327 if (s->next_picture.mb_type[mb_pos + v->mb_off] != MB_TYPE_INTRA) {
2328 s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][0],
2329 v->bfraction, 0, s->quarter_sample);
2330 s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][1],
2331 v->bfraction, 0, s->quarter_sample);
2332 s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][0],
2333 v->bfraction, 1, s->quarter_sample);
2334 s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][1],
2335 v->bfraction, 1, s->quarter_sample);
2337 total_opp = v->mv_f_next[0][s->block_index[0] + v->blocks_off]
2338 + v->mv_f_next[0][s->block_index[1] + v->blocks_off]
2339 + v->mv_f_next[0][s->block_index[2] + v->blocks_off]
2340 + v->mv_f_next[0][s->block_index[3] + v->blocks_off];
2341 f = (total_opp > 2) ? 1 : 0;
2343 s->mv[0][0][0] = s->mv[0][0][1] = 0;
2344 s->mv[1][0][0] = s->mv[1][0][1] = 0;
2347 v->ref_field_type[0] = v->ref_field_type[1] = v->cur_field_type ^ f;
2348 for (k = 0; k < 4; k++) {
2349 s->current_picture.motion_val[0][s->block_index[k] + v->blocks_off][0] = s->mv[0][0][0];
2350 s->current_picture.motion_val[0][s->block_index[k] + v->blocks_off][1] = s->mv[0][0][1];
2351 s->current_picture.motion_val[1][s->block_index[k] + v->blocks_off][0] = s->mv[1][0][0];
2352 s->current_picture.motion_val[1][s->block_index[k] + v->blocks_off][1] = s->mv[1][0][1];
2353 v->mv_f[0][s->block_index[k] + v->blocks_off] = f;
2354 v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
2358 if (v->bmvtype == BMV_TYPE_INTERPOLATED) {
2359 vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2360 vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2363 if (dir) { // backward
2364 vc1_pred_mv(v, n, dmv_x[1], dmv_y[1], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2365 if (n == 3 || mv1) {
2366 vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
2369 vc1_pred_mv(v, n, dmv_x[0], dmv_y[0], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2370 if (n == 3 || mv1) {
2371 vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], 0, 1);
2376 /** Get predicted DC value for I-frames only
2377 * prediction dir: left=0, top=1
2378 * @param s MpegEncContext
2379 * @param overlap flag indicating that overlap filtering is used
2380 * @param pq integer part of picture quantizer
2381 * @param[in] n block index in the current MB
2382 * @param dc_val_ptr Pointer to DC predictor
2383 * @param dir_ptr Prediction direction for use in AC prediction
2385 static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2386 int16_t **dc_val_ptr, int *dir_ptr)
2388 int a, b, c, wrap, pred, scale;
2390 static const uint16_t dcpred[32] = {
2391 -1, 1024, 512, 341, 256, 205, 171, 146, 128,
2392 114, 102, 93, 85, 79, 73, 68, 64,
2393 60, 57, 54, 51, 49, 47, 45, 43,
2394 41, 39, 38, 37, 35, 34, 33
2397 /* find prediction - wmv3_dc_scale always used here in fact */
2398 if (n < 4) scale = s->y_dc_scale;
2399 else scale = s->c_dc_scale;
2401 wrap = s->block_wrap[n];
2402 dc_val = s->dc_val[0] + s->block_index[n];
2408 b = dc_val[ - 1 - wrap];
2409 a = dc_val[ - wrap];
2411 if (pq < 9 || !overlap) {
2412 /* Set outer values */
2413 if (s->first_slice_line && (n != 2 && n != 3))
2414 b = a = dcpred[scale];
2415 if (s->mb_x == 0 && (n != 1 && n != 3))
2416 b = c = dcpred[scale];
2418 /* Set outer values */
2419 if (s->first_slice_line && (n != 2 && n != 3))
2421 if (s->mb_x == 0 && (n != 1 && n != 3))
2425 if (abs(a - b) <= abs(b - c)) {
2427 *dir_ptr = 1; // left
2430 *dir_ptr = 0; // top
2433 /* update predictor */
2434 *dc_val_ptr = &dc_val[0];
2439 /** Get predicted DC value
2440 * prediction dir: left=0, top=1
2441 * @param s MpegEncContext
2442 * @param overlap flag indicating that overlap filtering is used
2443 * @param pq integer part of picture quantizer
2444 * @param[in] n block index in the current MB
2445 * @param a_avail flag indicating top block availability
2446 * @param c_avail flag indicating left block availability
2447 * @param dc_val_ptr Pointer to DC predictor
2448 * @param dir_ptr Prediction direction for use in AC prediction
2450 static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2451 int a_avail, int c_avail,
2452 int16_t **dc_val_ptr, int *dir_ptr)
2454 int a, b, c, wrap, pred;
2456 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2460 wrap = s->block_wrap[n];
2461 dc_val = s->dc_val[0] + s->block_index[n];
2467 b = dc_val[ - 1 - wrap];
2468 a = dc_val[ - wrap];
2469 /* scale predictors if needed */
2470 q1 = s->current_picture.qscale_table[mb_pos];
2471 dqscale_index = s->y_dc_scale_table[q1] - 1;
2472 if (dqscale_index < 0)
2474 if (c_avail && (n != 1 && n != 3)) {
2475 q2 = s->current_picture.qscale_table[mb_pos - 1];
2477 c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2479 if (a_avail && (n != 2 && n != 3)) {
2480 q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2482 a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2484 if (a_avail && c_avail && (n != 3)) {
2489 off -= s->mb_stride;
2490 q2 = s->current_picture.qscale_table[off];
2492 b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2495 if (a_avail && c_avail) {
2496 if (abs(a - b) <= abs(b - c)) {
2498 *dir_ptr = 1; // left
2501 *dir_ptr = 0; // top
2503 } else if (a_avail) {
2505 *dir_ptr = 0; // top
2506 } else if (c_avail) {
2508 *dir_ptr = 1; // left
2511 *dir_ptr = 1; // left
2514 /* update predictor */
2515 *dc_val_ptr = &dc_val[0];
2519 /** @} */ // Block group
2522 * @name VC1 Macroblock-level functions in Simple/Main Profiles
2523 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
2527 static inline int vc1_coded_block_pred(MpegEncContext * s, int n,
2528 uint8_t **coded_block_ptr)
2530 int xy, wrap, pred, a, b, c;
2532 xy = s->block_index[n];
2533 wrap = s->b8_stride;
2538 a = s->coded_block[xy - 1 ];
2539 b = s->coded_block[xy - 1 - wrap];
2540 c = s->coded_block[xy - wrap];
2549 *coded_block_ptr = &s->coded_block[xy];
2555 * Decode one AC coefficient
2556 * @param v The VC1 context
2557 * @param last Last coefficient
2558 * @param skip How much zero coefficients to skip
2559 * @param value Decoded AC coefficient value
2560 * @param codingset set of VLC to decode data
2563 static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip,
2564 int *value, int codingset)
2566 GetBitContext *gb = &v->s.gb;
2567 int index, escape, run = 0, level = 0, lst = 0;
2569 index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2570 if (index != ff_vc1_ac_sizes[codingset] - 1) {
2571 run = vc1_index_decode_table[codingset][index][0];
2572 level = vc1_index_decode_table[codingset][index][1];
2573 lst = index >= vc1_last_decode_table[codingset] || get_bits_left(gb) < 0;
2577 escape = decode210(gb);
2579 index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2580 run = vc1_index_decode_table[codingset][index][0];
2581 level = vc1_index_decode_table[codingset][index][1];
2582 lst = index >= vc1_last_decode_table[codingset];
2585 level += vc1_last_delta_level_table[codingset][run];
2587 level += vc1_delta_level_table[codingset][run];
2590 run += vc1_last_delta_run_table[codingset][level] + 1;
2592 run += vc1_delta_run_table[codingset][level] + 1;
2598 lst = get_bits1(gb);
2599 if (v->s.esc3_level_length == 0) {
2600 if (v->pq < 8 || v->dquantfrm) { // table 59
2601 v->s.esc3_level_length = get_bits(gb, 3);
2602 if (!v->s.esc3_level_length)
2603 v->s.esc3_level_length = get_bits(gb, 2) + 8;
2604 } else { // table 60
2605 v->s.esc3_level_length = get_unary(gb, 1, 6) + 2;
2607 v->s.esc3_run_length = 3 + get_bits(gb, 2);
2609 run = get_bits(gb, v->s.esc3_run_length);
2610 sign = get_bits1(gb);
2611 level = get_bits(gb, v->s.esc3_level_length);
2622 /** Decode intra block in intra frames - should be faster than decode_intra_block
2623 * @param v VC1Context
2624 * @param block block to decode
2625 * @param[in] n subblock index
2626 * @param coded are AC coeffs present or not
2627 * @param codingset set of VLC to decode data
2629 static int vc1_decode_i_block(VC1Context *v, int16_t block[64], int n,
2630 int coded, int codingset)
2632 GetBitContext *gb = &v->s.gb;
2633 MpegEncContext *s = &v->s;
2634 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2637 int16_t *ac_val, *ac_val2;
2640 /* Get DC differential */
2642 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2644 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2647 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2651 if (dcdiff == 119 /* ESC index value */) {
2652 /* TODO: Optimize */
2653 if (v->pq == 1) dcdiff = get_bits(gb, 10);
2654 else if (v->pq == 2) dcdiff = get_bits(gb, 9);
2655 else dcdiff = get_bits(gb, 8);
2658 dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2659 else if (v->pq == 2)
2660 dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2667 dcdiff += vc1_i_pred_dc(&v->s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir);
2670 /* Store the quantized DC coeff, used for prediction */
2672 block[0] = dcdiff * s->y_dc_scale;
2674 block[0] = dcdiff * s->c_dc_scale;
2685 int last = 0, skip, value;
2686 const uint8_t *zz_table;
2690 scale = v->pq * 2 + v->halfpq;
2694 zz_table = v->zz_8x8[2];
2696 zz_table = v->zz_8x8[3];
2698 zz_table = v->zz_8x8[1];
2700 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2702 if (dc_pred_dir) // left
2705 ac_val -= 16 * s->block_wrap[n];
2708 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2712 block[zz_table[i++]] = value;
2715 /* apply AC prediction if needed */
2717 if (dc_pred_dir) { // left
2718 for (k = 1; k < 8; k++)
2719 block[k << v->left_blk_sh] += ac_val[k];
2721 for (k = 1; k < 8; k++)
2722 block[k << v->top_blk_sh] += ac_val[k + 8];
2725 /* save AC coeffs for further prediction */
2726 for (k = 1; k < 8; k++) {
2727 ac_val2[k] = block[k << v->left_blk_sh];
2728 ac_val2[k + 8] = block[k << v->top_blk_sh];
2731 /* scale AC coeffs */
2732 for (k = 1; k < 64; k++)
2736 block[k] += (block[k] < 0) ? -v->pq : v->pq;
2739 if (s->ac_pred) i = 63;
2745 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2749 scale = v->pq * 2 + v->halfpq;
2750 memset(ac_val2, 0, 16 * 2);
2751 if (dc_pred_dir) { // left
2754 memcpy(ac_val2, ac_val, 8 * 2);
2756 ac_val -= 16 * s->block_wrap[n];
2758 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2761 /* apply AC prediction if needed */
2763 if (dc_pred_dir) { //left
2764 for (k = 1; k < 8; k++) {
2765 block[k << v->left_blk_sh] = ac_val[k] * scale;
2766 if (!v->pquantizer && block[k << v->left_blk_sh])
2767 block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -v->pq : v->pq;
2770 for (k = 1; k < 8; k++) {
2771 block[k << v->top_blk_sh] = ac_val[k + 8] * scale;
2772 if (!v->pquantizer && block[k << v->top_blk_sh])
2773 block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -v->pq : v->pq;
2779 s->block_last_index[n] = i;
2784 /** Decode intra block in intra frames - should be faster than decode_intra_block
2785 * @param v VC1Context
2786 * @param block block to decode
2787 * @param[in] n subblock number
2788 * @param coded are AC coeffs present or not
2789 * @param codingset set of VLC to decode data
2790 * @param mquant quantizer value for this macroblock
2792 static int vc1_decode_i_block_adv(VC1Context *v, int16_t block[64], int n,
2793 int coded, int codingset, int mquant)
2795 GetBitContext *gb = &v->s.gb;
2796 MpegEncContext *s = &v->s;
2797 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2799 int16_t *dc_val = NULL;
2800 int16_t *ac_val, *ac_val2;
2802 int a_avail = v->a_avail, c_avail = v->c_avail;
2803 int use_pred = s->ac_pred;
2806 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2808 /* Get DC differential */
2810 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2812 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2815 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2819 if (dcdiff == 119 /* ESC index value */) {
2820 /* TODO: Optimize */
2821 if (mquant == 1) dcdiff = get_bits(gb, 10);
2822 else if (mquant == 2) dcdiff = get_bits(gb, 9);
2823 else dcdiff = get_bits(gb, 8);
2826 dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2827 else if (mquant == 2)
2828 dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2835 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir);
2838 /* Store the quantized DC coeff, used for prediction */
2840 block[0] = dcdiff * s->y_dc_scale;
2842 block[0] = dcdiff * s->c_dc_scale;
2848 /* check if AC is needed at all */
2849 if (!a_avail && !c_avail)
2851 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2854 scale = mquant * 2 + ((mquant == v->pq) ? v->halfpq : 0);
2856 if (dc_pred_dir) // left
2859 ac_val -= 16 * s->block_wrap[n];
2861 q1 = s->current_picture.qscale_table[mb_pos];
2862 if ( dc_pred_dir && c_avail && mb_pos)
2863 q2 = s->current_picture.qscale_table[mb_pos - 1];
2864 if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
2865 q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2866 if ( dc_pred_dir && n == 1)
2868 if (!dc_pred_dir && n == 2)
2874 int last = 0, skip, value;
2875 const uint8_t *zz_table;
2879 if (!use_pred && v->fcm == ILACE_FRAME) {
2880 zz_table = v->zzi_8x8;
2882 if (!dc_pred_dir) // top
2883 zz_table = v->zz_8x8[2];
2885 zz_table = v->zz_8x8[3];
2888 if (v->fcm != ILACE_FRAME)
2889 zz_table = v->zz_8x8[1];
2891 zz_table = v->zzi_8x8;
2895 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2899 block[zz_table[i++]] = value;
2902 /* apply AC prediction if needed */
2904 /* scale predictors if needed*/
2905 if (q2 && q1 != q2) {
2906 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2907 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2910 return AVERROR_INVALIDDATA;
2911 if (dc_pred_dir) { // left
2912 for (k = 1; k < 8; k++)
2913 block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2915 for (k = 1; k < 8; k++)
2916 block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2919 if (dc_pred_dir) { //left
2920 for (k = 1; k < 8; k++)
2921 block[k << v->left_blk_sh] += ac_val[k];
2923 for (k = 1; k < 8; k++)
2924 block[k << v->top_blk_sh] += ac_val[k + 8];
2928 /* save AC coeffs for further prediction */
2929 for (k = 1; k < 8; k++) {
2930 ac_val2[k ] = block[k << v->left_blk_sh];
2931 ac_val2[k + 8] = block[k << v->top_blk_sh];
2934 /* scale AC coeffs */
2935 for (k = 1; k < 64; k++)
2939 block[k] += (block[k] < 0) ? -mquant : mquant;
2942 if (use_pred) i = 63;
2943 } else { // no AC coeffs
2946 memset(ac_val2, 0, 16 * 2);
2947 if (dc_pred_dir) { // left
2949 memcpy(ac_val2, ac_val, 8 * 2);
2950 if (q2 && q1 != q2) {
2951 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2952 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2954 return AVERROR_INVALIDDATA;
2955 for (k = 1; k < 8; k++)
2956 ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2961 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2962 if (q2 && q1 != q2) {
2963 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2964 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2966 return AVERROR_INVALIDDATA;
2967 for (k = 1; k < 8; k++)
2968 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2973 /* apply AC prediction if needed */
2975 if (dc_pred_dir) { // left
2976 for (k = 1; k < 8; k++) {
2977 block[k << v->left_blk_sh] = ac_val2[k] * scale;
2978 if (!v->pquantizer && block[k << v->left_blk_sh])
2979 block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
2982 for (k = 1; k < 8; k++) {
2983 block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
2984 if (!v->pquantizer && block[k << v->top_blk_sh])
2985 block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
2991 s->block_last_index[n] = i;
2996 /** Decode intra block in inter frames - more generic version than vc1_decode_i_block
2997 * @param v VC1Context
2998 * @param block block to decode
2999 * @param[in] n subblock index
3000 * @param coded are AC coeffs present or not
3001 * @param mquant block quantizer
3002 * @param codingset set of VLC to decode data
3004 static int vc1_decode_intra_block(VC1Context *v, int16_t block[64], int n,
3005 int coded, int mquant, int codingset)
3007 GetBitContext *gb = &v->s.gb;
3008 MpegEncContext *s = &v->s;
3009 int dc_pred_dir = 0; /* Direction of the DC prediction used */
3011 int16_t *dc_val = NULL;
3012 int16_t *ac_val, *ac_val2;
3014 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3015 int a_avail = v->a_avail, c_avail = v->c_avail;
3016 int use_pred = s->ac_pred;
3020 s->bdsp.clear_block(block);
3022 /* XXX: Guard against dumb values of mquant */
3023 mquant = (mquant < 1) ? 0 : ((mquant > 31) ? 31 : mquant);
3025 /* Set DC scale - y and c use the same */
3026 s->y_dc_scale = s->y_dc_scale_table[mquant];
3027 s->c_dc_scale = s->c_dc_scale_table[mquant];
3029 /* Get DC differential */
3031 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
3033 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
3036 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
3040 if (dcdiff == 119 /* ESC index value */) {
3041 /* TODO: Optimize */
3042 if (mquant == 1) dcdiff = get_bits(gb, 10);
3043 else if (mquant == 2) dcdiff = get_bits(gb, 9);
3044 else dcdiff = get_bits(gb, 8);
3047 dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
3048 else if (mquant == 2)
3049 dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
3056 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir);
3059 /* Store the quantized DC coeff, used for prediction */
3062 block[0] = dcdiff * s->y_dc_scale;
3064 block[0] = dcdiff * s->c_dc_scale;
3070 /* check if AC is needed at all and adjust direction if needed */
3071 if (!a_avail) dc_pred_dir = 1;
3072 if (!c_avail) dc_pred_dir = 0;
3073 if (!a_avail && !c_avail) use_pred = 0;
3074 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
3077 scale = mquant * 2 + v->halfpq;
3079 if (dc_pred_dir) //left
3082 ac_val -= 16 * s->block_wrap[n];
3084 q1 = s->current_picture.qscale_table[mb_pos];
3085 if (dc_pred_dir && c_avail && mb_pos)
3086 q2 = s->current_picture.qscale_table[mb_pos - 1];
3087 if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
3088 q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
3089 if ( dc_pred_dir && n == 1)
3091 if (!dc_pred_dir && n == 2)
3093 if (n == 3) q2 = q1;
3096 int last = 0, skip, value;
3100 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
3104 if (v->fcm == PROGRESSIVE)
3105 block[v->zz_8x8[0][i++]] = value;
3107 if (use_pred && (v->fcm == ILACE_FRAME)) {
3108 if (!dc_pred_dir) // top
3109 block[v->zz_8x8[2][i++]] = value;
3111 block[v->zz_8x8[3][i++]] = value;
3113 block[v->zzi_8x8[i++]] = value;
3118 /* apply AC prediction if needed */
3120 /* scale predictors if needed*/
3121 if (q2 && q1 != q2) {
3122 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3123 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3126 return AVERROR_INVALIDDATA;
3127 if (dc_pred_dir) { // left
3128 for (k = 1; k < 8; k++)
3129 block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3131 for (k = 1; k < 8; k++)
3132 block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3135 if (dc_pred_dir) { // left
3136 for (k = 1; k < 8; k++)
3137 block[k << v->left_blk_sh] += ac_val[k];
3139 for (k = 1; k < 8; k++)
3140 block[k << v->top_blk_sh] += ac_val[k + 8];
3144 /* save AC coeffs for further prediction */
3145 for (k = 1; k < 8; k++) {
3146 ac_val2[k ] = block[k << v->left_blk_sh];
3147 ac_val2[k + 8] = block[k << v->top_blk_sh];
3150 /* scale AC coeffs */
3151 for (k = 1; k < 64; k++)
3155 block[k] += (block[k] < 0) ? -mquant : mquant;
3158 if (use_pred) i = 63;
3159 } else { // no AC coeffs
3162 memset(ac_val2, 0, 16 * 2);
3163 if (dc_pred_dir) { // left
3165 memcpy(ac_val2, ac_val, 8 * 2);
3166 if (q2 && q1 != q2) {
3167 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3168 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3170 return AVERROR_INVALIDDATA;
3171 for (k = 1; k < 8; k++)
3172 ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3177 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
3178 if (q2 && q1 != q2) {
3179 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3180 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3182 return AVERROR_INVALIDDATA;
3183 for (k = 1; k < 8; k++)
3184 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3189 /* apply AC prediction if needed */
3191 if (dc_pred_dir) { // left
3192 for (k = 1; k < 8; k++) {
3193 block[k << v->left_blk_sh] = ac_val2[k] * scale;
3194 if (!v->pquantizer && block[k << v->left_blk_sh])
3195 block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
3198 for (k = 1; k < 8; k++) {
3199 block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
3200 if (!v->pquantizer && block[k << v->top_blk_sh])
3201 block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
3207 s->block_last_index[n] = i;
3214 static int vc1_decode_p_block(VC1Context *v, int16_t block[64], int n,
3215 int mquant, int ttmb, int first_block,
3216 uint8_t *dst, int linesize, int skip_block,
3219 MpegEncContext *s = &v->s;
3220 GetBitContext *gb = &s->gb;
3223 int scale, off, idx, last, skip, value;
3224 int ttblk = ttmb & 7;
3227 s->bdsp.clear_block(block);
3230 ttblk = ff_vc1_ttblk_to_tt[v->tt_index][get_vlc2(gb, ff_vc1_ttblk_vlc[v->tt_index].table, VC1_TTBLK_VLC_BITS, 1)];
3232 if (ttblk == TT_4X4) {
3233 subblkpat = ~(get_vlc2(gb, ff_vc1_subblkpat_vlc[v->tt_index].table, VC1_SUBBLKPAT_VLC_BITS, 1) + 1);
3235 if ((ttblk != TT_8X8 && ttblk != TT_4X4)
3236 && ((v->ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))
3237 || (!v->res_rtm_flag && !first_block))) {
3238 subblkpat = decode012(gb);
3240 subblkpat ^= 3; // swap decoded pattern bits
3241 if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM)
3243 if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT)
3246 scale = 2 * mquant + ((v->pq == mquant) ? v->halfpq : 0);
3248 // convert transforms like 8X4_TOP to generic TT and SUBBLKPAT
3249 if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) {
3250 subblkpat = 2 - (ttblk == TT_8X4_TOP);
3253 if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) {
3254 subblkpat = 2 - (ttblk == TT_4X8_LEFT);
3263 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3268 idx = v->zz_8x8[0][i++];
3270 idx = v->zzi_8x8[i++];
3271 block[idx] = value * scale;
3273 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3277 v->vc1dsp.vc1_inv_trans_8x8_dc(dst, linesize, block);
3279 v->vc1dsp.vc1_inv_trans_8x8(block);
3280 s->idsp.add_pixels_clamped(block, dst, linesize);
3285 pat = ~subblkpat & 0xF;
3286 for (j = 0; j < 4; j++) {
3287 last = subblkpat & (1 << (3 - j));
3289 off = (j & 1) * 4 + (j & 2) * 16;
3291 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3296 idx = ff_vc1_simple_progressive_4x4_zz[i++];
3298 idx = ff_vc1_adv_interlaced_4x4_zz[i++];
3299 block[idx + off] = value * scale;
3301 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3303 if (!(subblkpat & (1 << (3 - j))) && !skip_block) {
3305 v->vc1dsp.vc1_inv_trans_4x4_dc(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3307 v->vc1dsp.vc1_inv_trans_4x4(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3312 pat = ~((subblkpat & 2) * 6 + (subblkpat & 1) * 3) & 0xF;
3313 for (j = 0; j < 2; j++) {
3314 last = subblkpat & (1 << (1 - j));
3318 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3323 idx = v->zz_8x4[i++] + off;
3325 idx = ff_vc1_adv_interlaced_8x4_zz[i++] + off;
3326 block[idx] = value * scale;
3328 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3330 if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3332 v->vc1dsp.vc1_inv_trans_8x4_dc(dst + j * 4 * linesize, linesize, block + off);
3334 v->vc1dsp.vc1_inv_trans_8x4(dst + j * 4 * linesize, linesize, block + off);
3339 pat = ~(subblkpat * 5) & 0xF;
3340 for (j = 0; j < 2; j++) {
3341 last = subblkpat & (1 << (1 - j));
3345 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3350 idx = v->zz_4x8[i++] + off;
3352 idx = ff_vc1_adv_interlaced_4x8_zz[i++] + off;
3353 block[idx] = value * scale;
3355 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3357 if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3359 v->vc1dsp.vc1_inv_trans_4x8_dc(dst + j * 4, linesize, block + off);
3361 v->vc1dsp.vc1_inv_trans_4x8(dst + j*4, linesize, block + off);
3367 *ttmb_out |= ttblk << (n * 4);
3371 /** @} */ // Macroblock group
3373 static const int size_table [6] = { 0, 2, 3, 4, 5, 8 };
3374 static const int offset_table[6] = { 0, 1, 3, 7, 15, 31 };
3376 static av_always_inline void vc1_apply_p_v_loop_filter(VC1Context *v, int block_num)
3378 MpegEncContext *s = &v->s;
3379 int mb_cbp = v->cbp[s->mb_x - s->mb_stride],
3380 block_cbp = mb_cbp >> (block_num * 4), bottom_cbp,
3381 mb_is_intra = v->is_intra[s->mb_x - s->mb_stride],
3382 block_is_intra = mb_is_intra >> block_num, bottom_is_intra;
3383 int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3386 if (block_num > 3) {
3387 dst = s->dest[block_num - 3];
3389 dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 8) * linesize;
3391 if (s->mb_y != s->end_mb_y || block_num < 2) {
3395 if (block_num > 3) {
3396 bottom_cbp = v->cbp[s->mb_x] >> (block_num * 4);
3397 bottom_is_intra = v->is_intra[s->mb_x] >> block_num;
3398 mv = &v->luma_mv[s->mb_x - s->mb_stride];
3399 mv_stride = s->mb_stride;
3401 bottom_cbp = (block_num < 2) ? (mb_cbp >> ((block_num + 2) * 4))
3402 : (v->cbp[s->mb_x] >> ((block_num - 2) * 4));
3403 bottom_is_intra = (block_num < 2) ? (mb_is_intra >> (block_num + 2))
3404 : (v->is_intra[s->mb_x] >> (block_num - 2));
3405 mv_stride = s->b8_stride;
3406 mv = &s->current_picture.motion_val[0][s->block_index[block_num] - 2 * mv_stride];
3409 if (bottom_is_intra & 1 || block_is_intra & 1 ||
3410 mv[0][0] != mv[mv_stride][0] || mv[0][1] != mv[mv_stride][1]) {
3411 v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3413 idx = ((bottom_cbp >> 2) | block_cbp) & 3;
3415 v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3418 v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3420 v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3425 dst -= 4 * linesize;
3426 ttblk = (v->ttblk[s->mb_x - s->mb_stride] >> (block_num * 4)) & 0xF;
3427 if (ttblk == TT_4X4 || ttblk == TT_8X4) {
3428 idx = (block_cbp | (block_cbp >> 2)) & 3;
3430 v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3433 v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3435 v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3440 static av_always_inline void vc1_apply_p_h_loop_filter(VC1Context *v, int block_num)
3442 MpegEncContext *s = &v->s;
3443 int mb_cbp = v->cbp[s->mb_x - 1 - s->mb_stride],
3444 block_cbp = mb_cbp >> (block_num * 4), right_cbp,
3445 mb_is_intra = v->is_intra[s->mb_x - 1 - s->mb_stride],
3446 block_is_intra = mb_is_intra >> block_num, right_is_intra;
3447 int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3450 if (block_num > 3) {
3451 dst = s->dest[block_num - 3] - 8 * linesize;
3453 dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 16) * linesize - 8;
3456 if (s->mb_x != s->mb_width || !(block_num & 5)) {
3459 if (block_num > 3) {
3460 right_cbp = v->cbp[s->mb_x - s->mb_stride] >> (block_num * 4);
3461 right_is_intra = v->is_intra[s->mb_x - s->mb_stride] >> block_num;
3462 mv = &v->luma_mv[s->mb_x - s->mb_stride - 1];
3464 right_cbp = (block_num & 1) ? (v->cbp[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3465 : (mb_cbp >> ((block_num + 1) * 4));
3466 right_is_intra = (block_num & 1) ? (v->is_intra[s->mb_x - s->mb_stride] >> (block_num - 1))
3467 : (mb_is_intra >> (block_num + 1));
3468 mv = &s->current_picture.motion_val[0][s->block_index[block_num] - s->b8_stride * 2 - 2];
3470 if (block_is_intra & 1 || right_is_intra & 1 || mv[0][0] != mv[1][0] || mv[0][1] != mv[1][1]) {
3471 v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3473 idx = ((right_cbp >> 1) | block_cbp) & 5; // FIXME check
3475 v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3478 v->vc1dsp.vc1_h_loop_filter4(dst + 4 * linesize, linesize, v->pq);
3480 v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3486 ttblk = (v->ttblk[s->mb_x - s->mb_stride - 1] >> (block_num * 4)) & 0xf;
3487 if (ttblk == TT_4X4 || ttblk == TT_4X8) {
3488 idx = (block_cbp | (block_cbp >> 1)) & 5;
3490 v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3493 v->vc1dsp.vc1_h_loop_filter4(dst + linesize * 4, linesize, v->pq);
3495 v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3500 static void vc1_apply_p_loop_filter(VC1Context *v)
3502 MpegEncContext *s = &v->s;
3505 for (i = 0; i < 6; i++) {
3506 vc1_apply_p_v_loop_filter(v, i);
3509 /* V always precedes H, therefore we run H one MB before V;
3510 * at the end of a row, we catch up to complete the row */
3512 for (i = 0; i < 6; i++) {
3513 vc1_apply_p_h_loop_filter(v, i);
3515 if (s->mb_x == s->mb_width - 1) {
3517 ff_update_block_index(s);
3518 for (i = 0; i < 6; i++) {
3519 vc1_apply_p_h_loop_filter(v, i);
3525 /** Decode one P-frame MB
3527 static int vc1_decode_p_mb(VC1Context *v)
3529 MpegEncContext *s = &v->s;
3530 GetBitContext *gb = &s->gb;
3532 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3533 int cbp; /* cbp decoding stuff */
3534 int mqdiff, mquant; /* MB quantization */
3535 int ttmb = v->ttfrm; /* MB Transform type */
3537 int mb_has_coeffs = 1; /* last_flag */
3538 int dmv_x, dmv_y; /* Differential MV components */
3539 int index, index1; /* LUT indexes */
3540 int val, sign; /* temp values */
3541 int first_block = 1;
3543 int skipped, fourmv;
3544 int block_cbp = 0, pat, block_tt = 0, block_intra = 0;
3546 mquant = v->pq; /* lossy initialization */
3548 if (v->mv_type_is_raw)
3549 fourmv = get_bits1(gb);
3551 fourmv = v->mv_type_mb_plane[mb_pos];
3553 skipped = get_bits1(gb);
3555 skipped = v->s.mbskip_table[mb_pos];
3557 if (!fourmv) { /* 1MV mode */
3559 GET_MVDATA(dmv_x, dmv_y);
3562 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
3563 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
3565 s->current_picture.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;
3566 vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3568 /* FIXME Set DC val for inter block ? */
3569 if (s->mb_intra && !mb_has_coeffs) {
3571 s->ac_pred = get_bits1(gb);
3573 } else if (mb_has_coeffs) {
3575 s->ac_pred = get_bits1(gb);
3576 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3582 s->current_picture.qscale_table[mb_pos] = mquant;
3584 if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
3585 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table,
3586 VC1_TTMB_VLC_BITS, 2);
3587 if (!s->mb_intra) vc1_mc_1mv(v, 0);
3589 for (i = 0; i < 6; i++) {
3590 s->dc_val[0][s->block_index[i]] = 0;
3592 val = ((cbp >> (5 - i)) & 1);
3593 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3594 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3596 /* check if prediction blocks A and C are available */
3597 v->a_avail = v->c_avail = 0;
3598 if (i == 2 || i == 3 || !s->first_slice_line)
3599 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3600 if (i == 1 || i == 3 || s->mb_x)
3601 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3603 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3604 (i & 4) ? v->codingset2 : v->codingset);
3605 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3607 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3609 for (j = 0; j < 64; j++)
3610 s->block[i][j] <<= 1;
3611 s->idsp.put_signed_pixels_clamped(s->block[i],
3612 s->dest[dst_idx] + off,
3613 i & 4 ? s->uvlinesize
3615 if (v->pq >= 9 && v->overlap) {
3617 v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3619 v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3621 block_cbp |= 0xF << (i << 2);
3622 block_intra |= 1 << i;
3624 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block,
3625 s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize,
3626 (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
3627 block_cbp |= pat << (i << 2);
3628 if (!v->ttmbf && ttmb < 8)
3635 for (i = 0; i < 6; i++) {
3636 v->mb_type[0][s->block_index[i]] = 0;
3637 s->dc_val[0][s->block_index[i]] = 0;
3639 s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
3640 s->current_picture.qscale_table[mb_pos] = 0;
3641 vc1_pred_mv(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3644 } else { // 4MV mode
3645 if (!skipped /* unskipped MB */) {
3646 int intra_count = 0, coded_inter = 0;
3647 int is_intra[6], is_coded[6];
3649 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3650 for (i = 0; i < 6; i++) {
3651 val = ((cbp >> (5 - i)) & 1);
3652 s->dc_val[0][s->block_index[i]] = 0;
3659 GET_MVDATA(dmv_x, dmv_y);
3661 vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3663 vc1_mc_4mv_luma(v, i, 0, 0);
3664 intra_count += s->mb_intra;
3665 is_intra[i] = s->mb_intra;
3666 is_coded[i] = mb_has_coeffs;
3669 is_intra[i] = (intra_count >= 3);
3673 vc1_mc_4mv_chroma(v, 0);
3674 v->mb_type[0][s->block_index[i]] = is_intra[i];
3676 coded_inter = !is_intra[i] && is_coded[i];
3678 // if there are no coded blocks then don't do anything more
3680 if (!intra_count && !coded_inter)
3683 s->current_picture.qscale_table[mb_pos] = mquant;
3684 /* test if block is intra and has pred */
3687 for (i = 0; i < 6; i++)
3689 if (((!s->first_slice_line || (i == 2 || i == 3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]])
3690 || ((s->mb_x || (i == 1 || i == 3)) && v->mb_type[0][s->block_index[i] - 1])) {
3696 s->ac_pred = get_bits1(gb);
3700 if (!v->ttmbf && coded_inter)
3701 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3702 for (i = 0; i < 6; i++) {
3704 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3705 s->mb_intra = is_intra[i];
3707 /* check if prediction blocks A and C are available */
3708 v->a_avail = v->c_avail = 0;
3709 if (i == 2 || i == 3 || !s->first_slice_line)
3710 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3711 if (i == 1 || i == 3 || s->mb_x)
3712 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3714 vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant,
3715 (i & 4) ? v->codingset2 : v->codingset);
3716 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3718 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3720 for (j = 0; j < 64; j++)
3721 s->block[i][j] <<= 1;
3722 s->idsp.put_signed_pixels_clamped(s->block[i],
3723 s->dest[dst_idx] + off,
3724 (i & 4) ? s->uvlinesize
3726 if (v->pq >= 9 && v->overlap) {
3728 v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3730 v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3732 block_cbp |= 0xF << (i << 2);
3733 block_intra |= 1 << i;
3734 } else if (is_coded[i]) {
3735 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3736 first_block, s->dest[dst_idx] + off,
3737 (i & 4) ? s->uvlinesize : s->linesize,
3738 (i & 4) && (s->flags & CODEC_FLAG_GRAY),
3740 block_cbp |= pat << (i << 2);
3741 if (!v->ttmbf && ttmb < 8)
3746 } else { // skipped MB
3748 s->current_picture.qscale_table[mb_pos] = 0;
3749 for (i = 0; i < 6; i++) {
3750 v->mb_type[0][s->block_index[i]] = 0;
3751 s->dc_val[0][s->block_index[i]] = 0;
3753 for (i = 0; i < 4; i++) {
3754 vc1_pred_mv(v, i, 0, 0, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3755 vc1_mc_4mv_luma(v, i, 0, 0);
3757 vc1_mc_4mv_chroma(v, 0);
3758 s->current_picture.qscale_table[mb_pos] = 0;
3762 v->cbp[s->mb_x] = block_cbp;
3763 v->ttblk[s->mb_x] = block_tt;
3764 v->is_intra[s->mb_x] = block_intra;
3769 /* Decode one macroblock in an interlaced frame p picture */
3771 static int vc1_decode_p_mb_intfr(VC1Context *v)
3773 MpegEncContext *s = &v->s;
3774 GetBitContext *gb = &s->gb;
3776 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3777 int cbp = 0; /* cbp decoding stuff */
3778 int mqdiff, mquant; /* MB quantization */
3779 int ttmb = v->ttfrm; /* MB Transform type */
3781 int mb_has_coeffs = 1; /* last_flag */
3782 int dmv_x, dmv_y; /* Differential MV components */
3783 int val; /* temp value */
3784 int first_block = 1;
3786 int skipped, fourmv = 0, twomv = 0;
3787 int block_cbp = 0, pat, block_tt = 0;
3788 int idx_mbmode = 0, mvbp;
3789 int stride_y, fieldtx;
3791 mquant = v->pq; /* Lossy initialization */
3794 skipped = get_bits1(gb);
3796 skipped = v->s.mbskip_table[mb_pos];
3798 if (v->fourmvswitch)
3799 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_4MV_MBMODE_VLC_BITS, 2); // try getting this done
3801 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2); // in a single line
3802 switch (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0]) {
3803 /* store the motion vector type in a flag (useful later) */
3804 case MV_PMODE_INTFR_4MV:
3806 v->blk_mv_type[s->block_index[0]] = 0;
3807 v->blk_mv_type[s->block_index[1]] = 0;
3808 v->blk_mv_type[s->block_index[2]] = 0;
3809 v->blk_mv_type[s->block_index[3]] = 0;
3811 case MV_PMODE_INTFR_4MV_FIELD:
3813 v->blk_mv_type[s->block_index[0]] = 1;
3814 v->blk_mv_type[s->block_index[1]] = 1;
3815 v->blk_mv_type[s->block_index[2]] = 1;
3816 v->blk_mv_type[s->block_index[3]] = 1;
3818 case MV_PMODE_INTFR_2MV_FIELD:
3820 v->blk_mv_type[s->block_index[0]] = 1;
3821 v->blk_mv_type[s->block_index[1]] = 1;
3822 v->blk_mv_type[s->block_index[2]] = 1;
3823 v->blk_mv_type[s->block_index[3]] = 1;
3825 case MV_PMODE_INTFR_1MV:
3826 v->blk_mv_type[s->block_index[0]] = 0;
3827 v->blk_mv_type[s->block_index[1]] = 0;
3828 v->blk_mv_type[s->block_index[2]] = 0;
3829 v->blk_mv_type[s->block_index[3]] = 0;
3832 if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
3833 for (i = 0; i < 4; i++) {
3834 s->current_picture.motion_val[1][s->block_index[i]][0] = 0;
3835 s->current_picture.motion_val[1][s->block_index[i]][1] = 0;
3837 s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
3839 v->is_intra[s->mb_x] = 0x3F;
3840 for (i = 0; i < 6; i++)
3841 v->mb_type[0][s->block_index[i]] = 1;
3842 fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
3843 mb_has_coeffs = get_bits1(gb);
3845 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3846 v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
3848 s->current_picture.qscale_table[mb_pos] = mquant;
3849 /* Set DC scale - y and c use the same (not sure if necessary here) */
3850 s->y_dc_scale = s->y_dc_scale_table[mquant];
3851 s->c_dc_scale = s->c_dc_scale_table[mquant];
3853 for (i = 0; i < 6; i++) {
3854 s->dc_val[0][s->block_index[i]] = 0;
3856 val = ((cbp >> (5 - i)) & 1);
3857 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3858 v->a_avail = v->c_avail = 0;
3859 if (i == 2 || i == 3 || !s->first_slice_line)
3860 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3861 if (i == 1 || i == 3 || s->mb_x)
3862 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3864 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3865 (i & 4) ? v->codingset2 : v->codingset);
3866 if ((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3867 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3869 stride_y = s->linesize << fieldtx;
3870 off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->linesize : (i & 1) * 8 + 4 * (i & 2) * s->linesize;
3872 stride_y = s->uvlinesize;
3875 s->idsp.put_signed_pixels_clamped(s->block[i],
3876 s->dest[dst_idx] + off,
3881 } else { // inter MB
3882 mb_has_coeffs = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][3];
3884 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3885 if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD) {
3886 v->twomvbp = get_vlc2(gb, v->twomvbp_vlc->table, VC1_2MV_BLOCK_PATTERN_VLC_BITS, 1);
3888 if ((ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV)
3889 || (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV_FIELD)) {
3890 v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
3893 s->mb_intra = v->is_intra[s->mb_x] = 0;
3894 for (i = 0; i < 6; i++)
3895 v->mb_type[0][s->block_index[i]] = 0;
3896 fieldtx = v->fieldtx_plane[mb_pos] = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][1];
3897 /* for all motion vector read MVDATA and motion compensate each block */
3901 for (i = 0; i < 6; i++) {
3904 val = ((mvbp >> (3 - i)) & 1);
3906 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3908 vc1_pred_mv_intfr(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0);
3909 vc1_mc_4mv_luma(v, i, 0, 0);
3910 } else if (i == 4) {
3911 vc1_mc_4mv_chroma4(v, 0, 0, 0);
3918 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3920 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], 0);
3921 vc1_mc_4mv_luma(v, 0, 0, 0);
3922 vc1_mc_4mv_luma(v, 1, 0, 0);
3925 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3927 vc1_pred_mv_intfr(v, 2, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], 0);
3928 vc1_mc_4mv_luma(v, 2, 0, 0);
3929 vc1_mc_4mv_luma(v, 3, 0, 0);
3930 vc1_mc_4mv_chroma4(v, 0, 0, 0);
3932 mvbp = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][2];
3935 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3937 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0);
3941 GET_MQUANT(); // p. 227
3942 s->current_picture.qscale_table[mb_pos] = mquant;
3943 if (!v->ttmbf && cbp)
3944 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3945 for (i = 0; i < 6; i++) {
3946 s->dc_val[0][s->block_index[i]] = 0;
3948 val = ((cbp >> (5 - i)) & 1);
3950 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3952 off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->linesize));
3954 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3955 first_block, s->dest[dst_idx] + off,
3956 (i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
3957 (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
3958 block_cbp |= pat << (i << 2);
3959 if (!v->ttmbf && ttmb < 8)
3966 s->mb_intra = v->is_intra[s->mb_x] = 0;
3967 for (i = 0; i < 6; i++) {
3968 v->mb_type[0][s->block_index[i]] = 0;
3969 s->dc_val[0][s->block_index[i]] = 0;
3971 s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
3972 s->current_picture.qscale_table[mb_pos] = 0;
3973 v->blk_mv_type[s->block_index[0]] = 0;
3974 v->blk_mv_type[s->block_index[1]] = 0;
3975 v->blk_mv_type[s->block_index[2]] = 0;
3976 v->blk_mv_type[s->block_index[3]] = 0;
3977 vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0);
3980 if (s->mb_x == s->mb_width - 1)
3981 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0])*s->mb_stride);
3985 static int vc1_decode_p_mb_intfi(VC1Context *v)
3987 MpegEncContext *s = &v->s;
3988 GetBitContext *gb = &s->gb;
3990 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3991 int cbp = 0; /* cbp decoding stuff */
3992 int mqdiff, mquant; /* MB quantization */
3993 int ttmb = v->ttfrm; /* MB Transform type */
3995 int mb_has_coeffs = 1; /* last_flag */
3996 int dmv_x, dmv_y; /* Differential MV components */
3997 int val; /* temp values */
3998 int first_block = 1;
4001 int block_cbp = 0, pat, block_tt = 0;
4004 mquant = v->pq; /* Lossy initialization */
4006 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
4007 if (idx_mbmode <= 1) { // intra MB
4009 v->is_intra[s->mb_x] = 0x3F;
4010 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
4011 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
4012 s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4014 s->current_picture.qscale_table[mb_pos] = mquant;
4015 /* Set DC scale - y and c use the same (not sure if necessary here) */
4016 s->y_dc_scale = s->y_dc_scale_table[mquant];
4017 s->c_dc_scale = s->c_dc_scale_table[mquant];
4018 v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4019 mb_has_coeffs = idx_mbmode & 1;
4021 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
4023 for (i = 0; i < 6; i++) {
4024 s->dc_val[0][s->block_index[i]] = 0;
4025 v->mb_type[0][s->block_index[i]] = 1;
4027 val = ((cbp >> (5 - i)) & 1);
4028 v->a_avail = v->c_avail = 0;
4029 if (i == 2 || i == 3 || !s->first_slice_line)
4030 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4031 if (i == 1 || i == 3 || s->mb_x)
4032 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4034 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4035 (i & 4) ? v->codingset2 : v->codingset);
4036 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4038 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4039 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4040 s->idsp.put_signed_pixels_clamped(s->block[i],
4041 s->dest[dst_idx] + off,
4042 (i & 4) ? s->uvlinesize
4044 // TODO: loop filter
4047 s->mb_intra = v->is_intra[s->mb_x] = 0;
4048 s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
4049 for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
4050 if (idx_mbmode <= 5) { // 1-MV
4051 dmv_x = dmv_y = pred_flag = 0;
4052 if (idx_mbmode & 1) {
4053 get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
4055 vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
4057 mb_has_coeffs = !(idx_mbmode & 2);
4059 v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
4060 for (i = 0; i < 6; i++) {
4062 dmv_x = dmv_y = pred_flag = 0;
4063 val = ((v->fourmvbp >> (3 - i)) & 1);
4065 get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
4067 vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
4068 vc1_mc_4mv_luma(v, i, 0, 0);
4070 vc1_mc_4mv_chroma(v, 0);
4072 mb_has_coeffs = idx_mbmode & 1;
4075 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4079 s->current_picture.qscale_table[mb_pos] = mquant;
4080 if (!v->ttmbf && cbp) {
4081 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4084 for (i = 0; i < 6; i++) {
4085 s->dc_val[0][s->block_index[i]] = 0;
4087 val = ((cbp >> (5 - i)) & 1);
4088 off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
4090 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4091 first_block, s->dest[dst_idx] + off,
4092 (i & 4) ? s->uvlinesize : s->linesize,
4093 (i & 4) && (s->flags & CODEC_FLAG_GRAY),
4095 block_cbp |= pat << (i << 2);
4096 if (!v->ttmbf && ttmb < 8) ttmb = -1;
4101 if (s->mb_x == s->mb_width - 1)
4102 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
4106 /** Decode one B-frame MB (in Main profile)
4108 static void vc1_decode_b_mb(VC1Context *v)
4110 MpegEncContext *s = &v->s;
4111 GetBitContext *gb = &s->gb;
4113 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4114 int cbp = 0; /* cbp decoding stuff */
4115 int mqdiff, mquant; /* MB quantization */
4116 int ttmb = v->ttfrm; /* MB Transform type */
4117 int mb_has_coeffs = 0; /* last_flag */
4118 int index, index1; /* LUT indexes */
4119 int val, sign; /* temp values */
4120 int first_block = 1;
4122 int skipped, direct;
4123 int dmv_x[2], dmv_y[2];
4124 int bmvtype = BMV_TYPE_BACKWARD;
4126 mquant = v->pq; /* lossy initialization */
4130 direct = get_bits1(gb);
4132 direct = v->direct_mb_plane[mb_pos];
4134 skipped = get_bits1(gb);
4136 skipped = v->s.mbskip_table[mb_pos];
4138 dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4139 for (i = 0; i < 6; i++) {
4140 v->mb_type[0][s->block_index[i]] = 0;
4141 s->dc_val[0][s->block_index[i]] = 0;
4143 s->current_picture.qscale_table[mb_pos] = 0;
4147 GET_MVDATA(dmv_x[0], dmv_y[0]);
4148 dmv_x[1] = dmv_x[0];
4149 dmv_y[1] = dmv_y[0];
4151 if (skipped || !s->mb_intra) {
4152 bmvtype = decode012(gb);
4155 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
4158 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
4161 bmvtype = BMV_TYPE_INTERPOLATED;
4162 dmv_x[0] = dmv_y[0] = 0;
4166 for (i = 0; i < 6; i++)
4167 v->mb_type[0][s->block_index[i]] = s->mb_intra;
4171 bmvtype = BMV_TYPE_INTERPOLATED;
4172 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4173 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4177 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4180 s->current_picture.qscale_table[mb_pos] = mquant;
4182 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4183 dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
4184 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4185 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4187 if (!mb_has_coeffs && !s->mb_intra) {
4188 /* no coded blocks - effectively skipped */
4189 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4190 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4193 if (s->mb_intra && !mb_has_coeffs) {
4195 s->current_picture.qscale_table[mb_pos] = mquant;
4196 s->ac_pred = get_bits1(gb);
4198 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4200 if (bmvtype == BMV_TYPE_INTERPOLATED) {
4201 GET_MVDATA(dmv_x[0], dmv_y[0]);
4202 if (!mb_has_coeffs) {
4203 /* interpolated skipped block */
4204 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4205 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4209 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4211 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4214 s->ac_pred = get_bits1(gb);
4215 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4217 s->current_picture.qscale_table[mb_pos] = mquant;
4218 if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
4219 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4223 for (i = 0; i < 6; i++) {
4224 s->dc_val[0][s->block_index[i]] = 0;
4226 val = ((cbp >> (5 - i)) & 1);
4227 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4228 v->mb_type[0][s->block_index[i]] = s->mb_intra;
4230 /* check if prediction blocks A and C are available */
4231 v->a_avail = v->c_avail = 0;
4232 if (i == 2 || i == 3 || !s->first_slice_line)
4233 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4234 if (i == 1 || i == 3 || s->mb_x)
4235 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4237 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4238 (i & 4) ? v->codingset2 : v->codingset);
4239 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4241 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4243 for (j = 0; j < 64; j++)
4244 s->block[i][j] <<= 1;
4245 s->idsp.put_signed_pixels_clamped(s->block[i],
4246 s->dest[dst_idx] + off,
4247 i & 4 ? s->uvlinesize
4250 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4251 first_block, s->dest[dst_idx] + off,
4252 (i & 4) ? s->uvlinesize : s->linesize,
4253 (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4254 if (!v->ttmbf && ttmb < 8)
4261 /** Decode one B-frame MB (in interlaced field B picture)
4263 static void vc1_decode_b_mb_intfi(VC1Context *v)
4265 MpegEncContext *s = &v->s;
4266 GetBitContext *gb = &s->gb;
4268 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4269 int cbp = 0; /* cbp decoding stuff */
4270 int mqdiff, mquant; /* MB quantization */
4271 int ttmb = v->ttfrm; /* MB Transform type */
4272 int mb_has_coeffs = 0; /* last_flag */
4273 int val; /* temp value */
4274 int first_block = 1;
4277 int dmv_x[2], dmv_y[2], pred_flag[2];
4278 int bmvtype = BMV_TYPE_BACKWARD;
4281 mquant = v->pq; /* Lossy initialization */
4284 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
4285 if (idx_mbmode <= 1) { // intra MB
4287 v->is_intra[s->mb_x] = 0x3F;
4288 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
4289 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
4290 s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4292 s->current_picture.qscale_table[mb_pos] = mquant;
4293 /* Set DC scale - y and c use the same (not sure if necessary here) */
4294 s->y_dc_scale = s->y_dc_scale_table[mquant];
4295 s->c_dc_scale = s->c_dc_scale_table[mquant];
4296 v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4297 mb_has_coeffs = idx_mbmode & 1;
4299 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
4301 for (i = 0; i < 6; i++) {
4302 s->dc_val[0][s->block_index[i]] = 0;
4304 val = ((cbp >> (5 - i)) & 1);
4305 v->mb_type[0][s->block_index[i]] = s->mb_intra;
4306 v->a_avail = v->c_avail = 0;
4307 if (i == 2 || i == 3 || !s->first_slice_line)
4308 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4309 if (i == 1 || i == 3 || s->mb_x)
4310 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4312 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4313 (i & 4) ? v->codingset2 : v->codingset);
4314 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4316 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4318 for (j = 0; j < 64; j++)
4319 s->block[i][j] <<= 1;
4320 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4321 s->idsp.put_signed_pixels_clamped(s->block[i],
4322 s->dest[dst_idx] + off,
4323 (i & 4) ? s->uvlinesize
4325 // TODO: yet to perform loop filter
4328 s->mb_intra = v->is_intra[s->mb_x] = 0;
4329 s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
4330 for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
4332 fwd = v->forward_mb_plane[mb_pos] = get_bits1(gb);
4334 fwd = v->forward_mb_plane[mb_pos];
4335 if (idx_mbmode <= 5) { // 1-MV
4337 dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4338 pred_flag[0] = pred_flag[1] = 0;
4340 bmvtype = BMV_TYPE_FORWARD;
4342 bmvtype = decode012(gb);
4345 bmvtype = BMV_TYPE_BACKWARD;
4348 bmvtype = BMV_TYPE_DIRECT;
4351 bmvtype = BMV_TYPE_INTERPOLATED;
4352 interpmvp = get_bits1(gb);
4355 v->bmvtype = bmvtype;
4356 if (bmvtype != BMV_TYPE_DIRECT && idx_mbmode & 1) {
4357 get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD], &dmv_y[bmvtype == BMV_TYPE_BACKWARD], &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
4360 get_mvdata_interlaced(v, &dmv_x[1], &dmv_y[1], &pred_flag[1]);
4362 if (bmvtype == BMV_TYPE_DIRECT) {
4363 dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4364 dmv_x[1] = dmv_y[1] = pred_flag[0] = 0;
4365 if (!s->next_picture_ptr->field_picture) {
4366 av_log(s->avctx, AV_LOG_ERROR, "Mixed field/frame direct mode not supported\n");
4370 vc1_pred_b_mv_intfi(v, 0, dmv_x, dmv_y, 1, pred_flag);
4371 vc1_b_mc(v, dmv_x, dmv_y, (bmvtype == BMV_TYPE_DIRECT), bmvtype);
4372 mb_has_coeffs = !(idx_mbmode & 2);
4375 bmvtype = BMV_TYPE_FORWARD;
4376 v->bmvtype = bmvtype;
4377 v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
4378 for (i = 0; i < 6; i++) {
4380 dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4381 dmv_x[1] = dmv_y[1] = pred_flag[1] = 0;
4382 val = ((v->fourmvbp >> (3 - i)) & 1);
4384 get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD],
4385 &dmv_y[bmvtype == BMV_TYPE_BACKWARD],
4386 &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
4388 vc1_pred_b_mv_intfi(v, i, dmv_x, dmv_y, 0, pred_flag);
4389 vc1_mc_4mv_luma(v, i, bmvtype == BMV_TYPE_BACKWARD, 0);
4391 vc1_mc_4mv_chroma(v, bmvtype == BMV_TYPE_BACKWARD);
4393 mb_has_coeffs = idx_mbmode & 1;
4396 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4400 s->current_picture.qscale_table[mb_pos] = mquant;
4401 if (!v->ttmbf && cbp) {
4402 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4405 for (i = 0; i < 6; i++) {
4406 s->dc_val[0][s->block_index[i]] = 0;
4408 val = ((cbp >> (5 - i)) & 1);
4409 off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
4411 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4412 first_block, s->dest[dst_idx] + off,
4413 (i & 4) ? s->uvlinesize : s->linesize,
4414 (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4415 if (!v->ttmbf && ttmb < 8)
4423 /** Decode one B-frame MB (in interlaced frame B picture)
4425 static int vc1_decode_b_mb_intfr(VC1Context *v)
4427 MpegEncContext *s = &v->s;
4428 GetBitContext *gb = &s->gb;
4430 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4431 int cbp = 0; /* cbp decoding stuff */
4432 int mqdiff, mquant; /* MB quantization */
4433 int ttmb = v->ttfrm; /* MB Transform type */
4434 int mvsw = 0; /* motion vector switch */
4435 int mb_has_coeffs = 1; /* last_flag */
4436 int dmv_x, dmv_y; /* Differential MV components */
4437 int val; /* temp value */
4438 int first_block = 1;
4440 int skipped, direct, twomv = 0;
4441 int block_cbp = 0, pat, block_tt = 0;
4442 int idx_mbmode = 0, mvbp;
4443 int stride_y, fieldtx;
4444 int bmvtype = BMV_TYPE_BACKWARD;
4447 mquant = v->pq; /* Lossy initialization */
4450 skipped = get_bits1(gb);
4452 skipped = v->s.mbskip_table[mb_pos];
4455 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2);
4456 if (ff_vc1_mbmode_intfrp[0][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD) {
4458 v->blk_mv_type[s->block_index[0]] = 1;
4459 v->blk_mv_type[s->block_index[1]] = 1;
4460 v->blk_mv_type[s->block_index[2]] = 1;
4461 v->blk_mv_type[s->block_index[3]] = 1;
4463 v->blk_mv_type[s->block_index[0]] = 0;
4464 v->blk_mv_type[s->block_index[1]] = 0;
4465 v->blk_mv_type[s->block_index[2]] = 0;
4466 v->blk_mv_type[s->block_index[3]] = 0;
4471 direct = get_bits1(gb);
4473 direct = v->direct_mb_plane[mb_pos];
4476 if (s->next_picture_ptr->field_picture)
4477 av_log(s->avctx, AV_LOG_WARNING, "Mixed frame/field direct mode not supported\n");
4478 s->mv[0][0][0] = s->current_picture.motion_val[0][s->block_index[0]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][0], v->bfraction, 0, s->quarter_sample);
4479 s->mv[0][0][1] = s->current_picture.motion_val[0][s->block_index[0]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][1], v->bfraction, 0, s->quarter_sample);
4480 s->mv[1][0][0] = s->current_picture.motion_val[1][s->block_index[0]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][0], v->bfraction, 1, s->quarter_sample);
4481 s->mv[1][0][1] = s->current_picture.motion_val[1][s->block_index[0]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][1], v->bfraction, 1, s->quarter_sample);
4484 s->mv[0][2][0] = s->current_picture.motion_val[0][s->block_index[2]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][0], v->bfraction, 0, s->quarter_sample);
4485 s->mv[0][2][1] = s->current_picture.motion_val[0][s->block_index[2]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][1], v->bfraction, 0, s->quarter_sample);
4486 s->mv[1][2][0] = s->current_picture.motion_val[1][s->block_index[2]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][0], v->bfraction, 1, s->quarter_sample);
4487 s->mv[1][2][1] = s->current_picture.motion_val[1][s->block_index[2]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][1], v->bfraction, 1, s->quarter_sample);
4489 for (i = 1; i < 4; i += 2) {
4490 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = s->mv[0][i-1][0];
4491 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = s->mv[0][i-1][1];
4492 s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = s->mv[1][i-1][0];
4493 s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = s->mv[1][i-1][1];
4496 for (i = 1; i < 4; i++) {
4497 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = s->mv[0][0][0];
4498 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = s->mv[0][0][1];
4499 s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = s->mv[1][0][0];
4500 s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = s->mv[1][0][1];
4505 if (ff_vc1_mbmode_intfrp[0][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
4506 for (i = 0; i < 4; i++) {
4507 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = 0;
4508 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = 0;
4509 s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = 0;
4510 s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = 0;
4512 s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
4514 v->is_intra[s->mb_x] = 0x3F;
4515 for (i = 0; i < 6; i++)
4516 v->mb_type[0][s->block_index[i]] = 1;
4517 fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
4518 mb_has_coeffs = get_bits1(gb);
4520 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4521 v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4523 s->current_picture.qscale_table[mb_pos] = mquant;
4524 /* Set DC scale - y and c use the same (not sure if necessary here) */
4525 s->y_dc_scale = s->y_dc_scale_table[mquant];
4526 s->c_dc_scale = s->c_dc_scale_table[mquant];
4528 for (i = 0; i < 6; i++) {
4529 s->dc_val[0][s->block_index[i]] = 0;
4531 val = ((cbp >> (5 - i)) & 1);
4532 v->mb_type[0][s->block_index[i]] = s->mb_intra;
4533 v->a_avail = v->c_avail = 0;
4534 if (i == 2 || i == 3 || !s->first_slice_line)
4535 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4536 if (i == 1 || i == 3 || s->mb_x)
4537 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4539 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4540 (i & 4) ? v->codingset2 : v->codingset);
4541 if (i > 3 && (s->flags & CODEC_FLAG_GRAY))
4543 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4545 stride_y = s->linesize << fieldtx;
4546 off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->linesize : (i & 1) * 8 + 4 * (i & 2) * s->linesize;
4548 stride_y = s->uvlinesize;
4551 s->idsp.put_signed_pixels_clamped(s->block[i],
4552 s->dest[dst_idx] + off,
4556 s->mb_intra = v->is_intra[s->mb_x] = 0;
4558 if (skipped || !s->mb_intra) {
4559 bmvtype = decode012(gb);
4562 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
4565 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
4568 bmvtype = BMV_TYPE_INTERPOLATED;
4572 if (twomv && bmvtype != BMV_TYPE_INTERPOLATED)
4573 mvsw = get_bits1(gb);
4576 if (!skipped) { // inter MB
4577 mb_has_coeffs = ff_vc1_mbmode_intfrp[0][idx_mbmode][3];
4579 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4581 if (bmvtype == BMV_TYPE_INTERPOLATED && twomv) {
4582 v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
4583 } else if (bmvtype == BMV_TYPE_INTERPOLATED || twomv) {
4584 v->twomvbp = get_vlc2(gb, v->twomvbp_vlc->table, VC1_2MV_BLOCK_PATTERN_VLC_BITS, 1);
4588 for (i = 0; i < 6; i++)
4589 v->mb_type[0][s->block_index[i]] = 0;
4590 fieldtx = v->fieldtx_plane[mb_pos] = ff_vc1_mbmode_intfrp[0][idx_mbmode][1];
4591 /* for all motion vector read MVDATA and motion compensate each block */
4595 for (i = 0; i < 4; i++) {
4596 vc1_mc_4mv_luma(v, i, 0, 0);
4597 vc1_mc_4mv_luma(v, i, 1, 1);
4599 vc1_mc_4mv_chroma4(v, 0, 0, 0);
4600 vc1_mc_4mv_chroma4(v, 1, 1, 1);
4605 } else if (twomv && bmvtype == BMV_TYPE_INTERPOLATED) {
4607 for (i = 0; i < 4; i++) {
4610 val = ((mvbp >> (3 - i)) & 1);
4612 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4614 vc1_pred_mv_intfr(v, j, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir);
4615 vc1_mc_4mv_luma(v, j, dir, dir);
4616 vc1_mc_4mv_luma(v, j+1, dir, dir);
4619 vc1_mc_4mv_chroma4(v, 0, 0, 0);
4620 vc1_mc_4mv_chroma4(v, 1, 1, 1);
4621 } else if (bmvtype == BMV_TYPE_INTERPOLATED) {
4625 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4627 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0);
4632 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4634 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 1);
4637 dir = bmvtype == BMV_TYPE_BACKWARD;
4644 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4645 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir);
4649 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4650 vc1_pred_mv_intfr(v, 2, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir2);
4653 for (i = 0; i < 2; i++) {
4654 s->mv[dir][i+2][0] = s->mv[dir][i][0] = s->current_picture.motion_val[dir][s->block_index[i+2]][0] = s->current_picture.motion_val[dir][s->block_index[i]][0];
4655 s->mv[dir][i+2][1] = s->mv[dir][i][1] = s->current_picture.motion_val[dir][s->block_index[i+2]][1] = s->current_picture.motion_val[dir][s->block_index[i]][1];
4656 s->mv[dir2][i+2][0] = s->mv[dir2][i][0] = s->current_picture.motion_val[dir2][s->block_index[i]][0] = s->current_picture.motion_val[dir2][s->block_index[i+2]][0];
4657 s->mv[dir2][i+2][1] = s->mv[dir2][i][1] = s->current_picture.motion_val[dir2][s->block_index[i]][1] = s->current_picture.motion_val[dir2][s->block_index[i+2]][1];
4660 vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, v->mb_type[0], !dir);
4661 vc1_pred_mv_intfr(v, 2, 0, 0, 2, v->range_x, v->range_y, v->mb_type[0], !dir);
4664 vc1_mc_4mv_luma(v, 0, dir, 0);
4665 vc1_mc_4mv_luma(v, 1, dir, 0);
4666 vc1_mc_4mv_luma(v, 2, dir2, 0);
4667 vc1_mc_4mv_luma(v, 3, dir2, 0);
4668 vc1_mc_4mv_chroma4(v, dir, dir2, 0);
4670 dir = bmvtype == BMV_TYPE_BACKWARD;
4672 mvbp = ff_vc1_mbmode_intfrp[0][idx_mbmode][2];
4675 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4677 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], dir);
4678 v->blk_mv_type[s->block_index[0]] = 1;
4679 v->blk_mv_type[s->block_index[1]] = 1;
4680 v->blk_mv_type[s->block_index[2]] = 1;
4681 v->blk_mv_type[s->block_index[3]] = 1;
4682 vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, 0, !dir);
4683 for (i = 0; i < 2; i++) {
4684 s->mv[!dir][i+2][0] = s->mv[!dir][i][0] = s->current_picture.motion_val[!dir][s->block_index[i+2]][0] = s->current_picture.motion_val[!dir][s->block_index[i]][0];
4685 s->mv[!dir][i+2][1] = s->mv[!dir][i][1] = s->current_picture.motion_val[!dir][s->block_index[i+2]][1] = s->current_picture.motion_val[!dir][s->block_index[i]][1];
4691 GET_MQUANT(); // p. 227
4692 s->current_picture.qscale_table[mb_pos] = mquant;
4693 if (!v->ttmbf && cbp)
4694 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4695 for (i = 0; i < 6; i++) {
4696 s->dc_val[0][s->block_index[i]] = 0;
4698 val = ((cbp >> (5 - i)) & 1);
4700 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4702 off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->linesize));
4704 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4705 first_block, s->dest[dst_idx] + off,
4706 (i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
4707 (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
4708 block_cbp |= pat << (i << 2);
4709 if (!v->ttmbf && ttmb < 8)
4717 for (i = 0; i < 6; i++) {
4718 v->mb_type[0][s->block_index[i]] = 0;
4719 s->dc_val[0][s->block_index[i]] = 0;
4721 s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
4722 s->current_picture.qscale_table[mb_pos] = 0;
4723 v->blk_mv_type[s->block_index[0]] = 0;
4724 v->blk_mv_type[s->block_index[1]] = 0;
4725 v->blk_mv_type[s->block_index[2]] = 0;
4726 v->blk_mv_type[s->block_index[3]] = 0;
4729 if (bmvtype == BMV_TYPE_INTERPOLATED) {
4730 vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0);
4731 vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 1);
4733 dir = bmvtype == BMV_TYPE_BACKWARD;
4734 vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], dir);
4739 for (i = 0; i < 2; i++) {
4740 s->mv[dir][i+2][0] = s->mv[dir][i][0] = s->current_picture.motion_val[dir][s->block_index[i+2]][0] = s->current_picture.motion_val[dir][s->block_index[i]][0];
4741 s->mv[dir][i+2][1] = s->mv[dir][i][1] = s->current_picture.motion_val[dir][s->block_index[i+2]][1] = s->current_picture.motion_val[dir][s->block_index[i]][1];
4742 s->mv[dir2][i+2][0] = s->mv[dir2][i][0] = s->current_picture.motion_val[dir2][s->block_index[i]][0] = s->current_picture.motion_val[dir2][s->block_index[i+2]][0];
4743 s->mv[dir2][i+2][1] = s->mv[dir2][i][1] = s->current_picture.motion_val[dir2][s->block_index[i]][1] = s->current_picture.motion_val[dir2][s->block_index[i+2]][1];
4746 v->blk_mv_type[s->block_index[0]] = 1;
4747 v->blk_mv_type[s->block_index[1]] = 1;
4748 v->blk_mv_type[s->block_index[2]] = 1;
4749 v->blk_mv_type[s->block_index[3]] = 1;
4750 vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, 0, !dir);
4751 for (i = 0; i < 2; i++) {
4752 s->mv[!dir][i+2][0] = s->mv[!dir][i][0] = s->current_picture.motion_val[!dir][s->block_index[i+2]][0] = s->current_picture.motion_val[!dir][s->block_index[i]][0];
4753 s->mv[!dir][i+2][1] = s->mv[!dir][i][1] = s->current_picture.motion_val[!dir][s->block_index[i+2]][1] = s->current_picture.motion_val[!dir][s->block_index[i]][1];
4760 if (direct || bmvtype == BMV_TYPE_INTERPOLATED) {
4765 if (s->mb_x == s->mb_width - 1)
4766 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
4767 v->cbp[s->mb_x] = block_cbp;
4768 v->ttblk[s->mb_x] = block_tt;
4772 /** Decode blocks of I-frame
4774 static void vc1_decode_i_blocks(VC1Context *v)
4777 MpegEncContext *s = &v->s;
4782 /* select codingmode used for VLC tables selection */
4783 switch (v->y_ac_table_index) {
4785 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
4788 v->codingset = CS_HIGH_MOT_INTRA;
4791 v->codingset = CS_MID_RATE_INTRA;
4795 switch (v->c_ac_table_index) {
4797 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
4800 v->codingset2 = CS_HIGH_MOT_INTER;
4803 v->codingset2 = CS_MID_RATE_INTER;
4807 /* Set DC scale - y and c use the same */
4808 s->y_dc_scale = s->y_dc_scale_table[v->pq];
4809 s->c_dc_scale = s->c_dc_scale_table[v->pq];
4812 s->mb_x = s->mb_y = 0;
4814 s->first_slice_line = 1;
4815 for (s->mb_y = 0; s->mb_y < s->end_mb_y; s->mb_y++) {
4817 init_block_index(v);
4818 for (; s->mb_x < v->end_mb_x; s->mb_x++) {
4820 ff_update_block_index(s);
4821 dst[0] = s->dest[0];
4822 dst[1] = dst[0] + 8;
4823 dst[2] = s->dest[0] + s->linesize * 8;
4824 dst[3] = dst[2] + 8;
4825 dst[4] = s->dest[1];
4826 dst[5] = s->dest[2];
4827 s->bdsp.clear_blocks(s->block[0]);
4828 mb_pos = s->mb_x + s->mb_y * s->mb_width;
4829 s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
4830 s->current_picture.qscale_table[mb_pos] = v->pq;
4831 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
4832 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
4834 // do actual MB decoding and displaying
4835 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
4836 v->s.ac_pred = get_bits1(&v->s.gb);
4838 for (k = 0; k < 6; k++) {
4839 val = ((cbp >> (5 - k)) & 1);
4842 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
4846 cbp |= val << (5 - k);
4848 vc1_decode_i_block(v, s->block[k], k, val, (k < 4) ? v->codingset : v->codingset2);
4850 if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
4852 v->vc1dsp.vc1_inv_trans_8x8(s->block[k]);
4853 if (v->pq >= 9 && v->overlap) {
4855 for (j = 0; j < 64; j++)
4856 s->block[k][j] <<= 1;
4857 s->idsp.put_signed_pixels_clamped(s->block[k], dst[k],
4858 k & 4 ? s->uvlinesize
4862 for (j = 0; j < 64; j++)
4863 s->block[k][j] = (s->block[k][j] - 64) << 1;
4864 s->idsp.put_pixels_clamped(s->block[k], dst[k],
4865 k & 4 ? s->uvlinesize
4870 if (v->pq >= 9 && v->overlap) {
4872 v->vc1dsp.vc1_h_overlap(s->dest[0], s->linesize);
4873 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
4874 if (!(s->flags & CODEC_FLAG_GRAY)) {
4875 v->vc1dsp.vc1_h_overlap(s->dest[1], s->uvlinesize);
4876 v->vc1dsp.vc1_h_overlap(s->dest[2], s->uvlinesize);
4879 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize);
4880 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
4881 if (!s->first_slice_line) {
4882 v->vc1dsp.vc1_v_overlap(s->dest[0], s->linesize);
4883 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize);
4884 if (!(s->flags & CODEC_FLAG_GRAY)) {
4885 v->vc1dsp.vc1_v_overlap(s->dest[1], s->uvlinesize);
4886 v->vc1dsp.vc1_v_overlap(s->dest[2], s->uvlinesize);
4889 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
4890 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
4892 if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
4894 if (get_bits_count(&s->gb) > v->bits) {
4895 ff_er_add_slice(&s->er, 0, 0, s->mb_x, s->mb_y, ER_MB_ERROR);
4896 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
4897 get_bits_count(&s->gb), v->bits);
4901 if (!v->s.loop_filter)
4902 ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
4904 ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
4906 s->first_slice_line = 0;
4908 if (v->s.loop_filter)
4909 ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
4911 /* This is intentionally mb_height and not end_mb_y - unlike in advanced
4912 * profile, these only differ are when decoding MSS2 rectangles. */
4913 ff_er_add_slice(&s->er, 0, 0, s->mb_width - 1, s->mb_height - 1, ER_MB_END);
4916 /** Decode blocks of I-frame for advanced profile
4918 static void vc1_decode_i_blocks_adv(VC1Context *v)
4921 MpegEncContext *s = &v->s;
4927 GetBitContext *gb = &s->gb;
4929 /* select codingmode used for VLC tables selection */
4930 switch (v->y_ac_table_index) {
4932 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
4935 v->codingset = CS_HIGH_MOT_INTRA;
4938 v->codingset = CS_MID_RATE_INTRA;
4942 switch (v->c_ac_table_index) {
4944 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
4947 v->codingset2 = CS_HIGH_MOT_INTER;
4950 v->codingset2 = CS_MID_RATE_INTER;
4955 s->mb_x = s->mb_y = 0;
4957 s->first_slice_line = 1;
4958 s->mb_y = s->start_mb_y;
4959 if (s->start_mb_y) {
4961 init_block_index(v);
4962 memset(&s->coded_block[s->block_index[0] - s->b8_stride], 0,
4963 (1 + s->b8_stride) * sizeof(*s->coded_block));
4965 for (; s->mb_y < s->end_mb_y; s->mb_y++) {
4967 init_block_index(v);
4968 for (;s->mb_x < s->mb_width; s->mb_x++) {
4969 int16_t (*block)[64] = v->block[v->cur_blk_idx];
4970 ff_update_block_index(s);
4971 s->bdsp.clear_blocks(block[0]);
4972 mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4973 s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4974 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
4975 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
4977 // do actual MB decoding and displaying
4978 if (v->fieldtx_is_raw)
4979 v->fieldtx_plane[mb_pos] = get_bits1(&v->s.gb);
4980 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
4981 if ( v->acpred_is_raw)
4982 v->s.ac_pred = get_bits1(&v->s.gb);
4984 v->s.ac_pred = v->acpred_plane[mb_pos];
4986 if (v->condover == CONDOVER_SELECT && v->overflg_is_raw)
4987 v->over_flags_plane[mb_pos] = get_bits1(&v->s.gb);
4991 s->current_picture.qscale_table[mb_pos] = mquant;
4992 /* Set DC scale - y and c use the same */
4993 s->y_dc_scale = s->y_dc_scale_table[mquant];
4994 s->c_dc_scale = s->c_dc_scale_table[mquant];
4996 for (k = 0; k < 6; k++) {
4997 val = ((cbp >> (5 - k)) & 1);
5000 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
5004 cbp |= val << (5 - k);
5006 v->a_avail = !s->first_slice_line || (k == 2 || k == 3);
5007 v->c_avail = !!s->mb_x || (k == 1 || k == 3);
5009 vc1_decode_i_block_adv(v, block[k], k, val,
5010 (k < 4) ? v->codingset : v->codingset2, mquant);
5012 if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
5014 v->vc1dsp.vc1_inv_trans_8x8(block[k]);
5017 vc1_smooth_overlap_filter_iblk(v);
5018 vc1_put_signed_blocks_clamped(v);
5019 if (v->s.loop_filter) vc1_loop_filter_iblk_delayed(v, v->pq);
5021 if (get_bits_count(&s->gb) > v->bits) {
5022 // TODO: may need modification to handle slice coding
5023 ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
5024 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
5025 get_bits_count(&s->gb), v->bits);
5029 if (!v->s.loop_filter)
5030 ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
5032 ff_mpeg_draw_horiz_band(s, (s->mb_y-1) * 16, 16);
5033 s->first_slice_line = 0;
5036 /* raw bottom MB row */
5038 init_block_index(v);
5040 for (;s->mb_x < s->mb_width; s->mb_x++) {
5041 ff_update_block_index(s);
5042 vc1_put_signed_blocks_clamped(v);
5043 if (v->s.loop_filter)
5044 vc1_loop_filter_iblk_delayed(v, v->pq);
5046 if (v->s.loop_filter)
5047 ff_mpeg_draw_horiz_band(s, (s->end_mb_y-1)*16, 16);
5048 ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
5049 (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
5052 static void vc1_decode_p_blocks(VC1Context *v)
5054 MpegEncContext *s = &v->s;
5055 int apply_loop_filter;
5057 /* select codingmode used for VLC tables selection */
5058 switch (v->c_ac_table_index) {
5060 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
5063 v->codingset = CS_HIGH_MOT_INTRA;
5066 v->codingset = CS_MID_RATE_INTRA;
5070 switch (v->c_ac_table_index) {
5072 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
5075 v->codingset2 = CS_HIGH_MOT_INTER;
5078 v->codingset2 = CS_MID_RATE_INTER;
5082 apply_loop_filter = s->loop_filter && !(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY) &&
5083 v->fcm == PROGRESSIVE;
5084 s->first_slice_line = 1;
5085 memset(v->cbp_base, 0, sizeof(v->cbp_base[0])*2*s->mb_stride);
5086 for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
5088 init_block_index(v);
5089 for (; s->mb_x < s->mb_width; s->mb_x++) {
5090 ff_update_block_index(s);
5092 if (v->fcm == ILACE_FIELD)
5093 vc1_decode_p_mb_intfi(v);
5094 else if (v->fcm == ILACE_FRAME)
5095 vc1_decode_p_mb_intfr(v);
5096 else vc1_decode_p_mb(v);
5097 if (s->mb_y != s->start_mb_y && apply_loop_filter)
5098 vc1_apply_p_loop_filter(v);
5099 if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
5100 // TODO: may need modification to handle slice coding
5101 ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
5102 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
5103 get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
5107 memmove(v->cbp_base, v->cbp, sizeof(v->cbp_base[0]) * s->mb_stride);
5108 memmove(v->ttblk_base, v->ttblk, sizeof(v->ttblk_base[0]) * s->mb_stride);
5109 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
5110 memmove(v->luma_mv_base, v->luma_mv, sizeof(v->luma_mv_base[0]) * s->mb_stride);
5111 if (s->mb_y != s->start_mb_y) ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
5112 s->first_slice_line = 0;
5114 if (apply_loop_filter) {
5116 init_block_index(v);
5117 for (; s->mb_x < s->mb_width; s->mb_x++) {
5118 ff_update_block_index(s);
5119 vc1_apply_p_loop_filter(v);
5122 if (s->end_mb_y >= s->start_mb_y)
5123 ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
5124 ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
5125 (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
5128 static void vc1_decode_b_blocks(VC1Context *v)
5130 MpegEncContext *s = &v->s;
5132 /* select codingmode used for VLC tables selection */
5133 switch (v->c_ac_table_index) {
5135 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
5138 v->codingset = CS_HIGH_MOT_INTRA;
5141 v->codingset = CS_MID_RATE_INTRA;
5145 switch (v->c_ac_table_index) {
5147 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
5150 v->codingset2 = CS_HIGH_MOT_INTER;
5153 v->codingset2 = CS_MID_RATE_INTER;
5157 s->first_slice_line = 1;
5158 for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
5160 init_block_index(v);
5161 for (; s->mb_x < s->mb_width; s->mb_x++) {
5162 ff_update_block_index(s);
5164 if (v->fcm == ILACE_FIELD)
5165 vc1_decode_b_mb_intfi(v);
5166 else if (v->fcm == ILACE_FRAME)
5167 vc1_decode_b_mb_intfr(v);
5170 if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
5171 // TODO: may need modification to handle slice coding
5172 ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
5173 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
5174 get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
5177 if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
5179 if (!v->s.loop_filter)
5180 ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
5182 ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
5183 s->first_slice_line = 0;
5185 if (v->s.loop_filter)
5186 ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
5187 ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
5188 (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
5191 static void vc1_decode_skip_blocks(VC1Context *v)
5193 MpegEncContext *s = &v->s;
5195 if (!v->s.last_picture.f->data[0])
5198 ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_width - 1, s->end_mb_y - 1, ER_MB_END);
5199 s->first_slice_line = 1;
5200 for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
5202 init_block_index(v);
5203 ff_update_block_index(s);
5204 memcpy(s->dest[0], s->last_picture.f->data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16);
5205 memcpy(s->dest[1], s->last_picture.f->data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
5206 memcpy(s->dest[2], s->last_picture.f->data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
5207 ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
5208 s->first_slice_line = 0;
5210 s->pict_type = AV_PICTURE_TYPE_P;
5213 void ff_vc1_decode_blocks(VC1Context *v)
5216 v->s.esc3_level_length = 0;
5218 ff_intrax8_decode_picture(&v->x8, 2*v->pq + v->halfpq, v->pq * !v->pquantizer);
5221 v->left_blk_idx = -1;
5222 v->topleft_blk_idx = 1;
5224 switch (v->s.pict_type) {
5225 case AV_PICTURE_TYPE_I:
5226 if (v->profile == PROFILE_ADVANCED)
5227 vc1_decode_i_blocks_adv(v);
5229 vc1_decode_i_blocks(v);
5231 case AV_PICTURE_TYPE_P:
5232 if (v->p_frame_skipped)
5233 vc1_decode_skip_blocks(v);
5235 vc1_decode_p_blocks(v);
5237 case AV_PICTURE_TYPE_B:
5239 if (v->profile == PROFILE_ADVANCED)
5240 vc1_decode_i_blocks_adv(v);
5242 vc1_decode_i_blocks(v);
5244 vc1_decode_b_blocks(v);
5250 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
5254 * Transform coefficients for both sprites in 16.16 fixed point format,
5255 * in the order they appear in the bitstream:
5257 * rotation 1 (unused)
5259 * rotation 2 (unused)
5266 int effect_type, effect_flag;
5267 int effect_pcount1, effect_pcount2; ///< amount of effect parameters stored in effect_params
5268 int effect_params1[15], effect_params2[10]; ///< effect parameters in 16.16 fixed point format
5271 static inline int get_fp_val(GetBitContext* gb)
5273 return (get_bits_long(gb, 30) - (1 << 29)) << 1;
5276 static void vc1_sprite_parse_transform(GetBitContext* gb, int c[7])
5280 switch (get_bits(gb, 2)) {
5283 c[2] = get_fp_val(gb);
5287 c[0] = c[4] = get_fp_val(gb);
5288 c[2] = get_fp_val(gb);
5291 c[0] = get_fp_val(gb);
5292 c[2] = get_fp_val(gb);
5293 c[4] = get_fp_val(gb);
5296 c[0] = get_fp_val(gb);
5297 c[1] = get_fp_val(gb);
5298 c[2] = get_fp_val(gb);
5299 c[3] = get_fp_val(gb);
5300 c[4] = get_fp_val(gb);
5303 c[5] = get_fp_val(gb);
5305 c[6] = get_fp_val(gb);
5310 static int vc1_parse_sprites(VC1Context *v, GetBitContext* gb, SpriteData* sd)
5312 AVCodecContext *avctx = v->s.avctx;
5315 for (sprite = 0; sprite <= v->two_sprites; sprite++) {
5316 vc1_sprite_parse_transform(gb, sd->coefs[sprite]);
5317 if (sd->coefs[sprite][1] || sd->coefs[sprite][3])
5318 avpriv_request_sample(avctx, "Non-zero rotation coefficients");
5319 av_log(avctx, AV_LOG_DEBUG, sprite ? "S2:" : "S1:");
5320 for (i = 0; i < 7; i++)
5321 av_log(avctx, AV_LOG_DEBUG, " %d.%.3d",
5322 sd->coefs[sprite][i] / (1<<16),
5323 (abs(sd->coefs[sprite][i]) & 0xFFFF) * 1000 / (1 << 16));
5324 av_log(avctx, AV_LOG_DEBUG, "\n");
5328 if (sd->effect_type = get_bits_long(gb, 30)) {
5329 switch (sd->effect_pcount1 = get_bits(gb, 4)) {
5331 vc1_sprite_parse_transform(gb, sd->effect_params1);
5334 vc1_sprite_parse_transform(gb, sd->effect_params1);
5335 vc1_sprite_parse_transform(gb, sd->effect_params1 + 7);
5338 for (i = 0; i < sd->effect_pcount1; i++)
5339 sd->effect_params1[i] = get_fp_val(gb);
5341 if (sd->effect_type != 13 || sd->effect_params1[0] != sd->coefs[0][6]) {
5342 // effect 13 is simple alpha blending and matches the opacity above
5343 av_log(avctx, AV_LOG_DEBUG, "Effect: %d; params: ", sd->effect_type);
5344 for (i = 0; i < sd->effect_pcount1; i++)
5345 av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
5346 sd->effect_params1[i] / (1 << 16),
5347 (abs(sd->effect_params1[i]) & 0xFFFF) * 1000 / (1 << 16));
5348 av_log(avctx, AV_LOG_DEBUG, "\n");
5351 sd->effect_pcount2 = get_bits(gb, 16);
5352 if (sd->effect_pcount2 > 10) {
5353 av_log(avctx, AV_LOG_ERROR, "Too many effect parameters\n");
5354 return AVERROR_INVALIDDATA;
5355 } else if (sd->effect_pcount2) {
5357 av_log(avctx, AV_LOG_DEBUG, "Effect params 2: ");
5358 while (++i < sd->effect_pcount2) {
5359 sd->effect_params2[i] = get_fp_val(gb);
5360 av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
5361 sd->effect_params2[i] / (1 << 16),
5362 (abs(sd->effect_params2[i]) & 0xFFFF) * 1000 / (1 << 16));
5364 av_log(avctx, AV_LOG_DEBUG, "\n");
5367 if (sd->effect_flag = get_bits1(gb))
5368 av_log(avctx, AV_LOG_DEBUG, "Effect flag set\n");
5370 if (get_bits_count(gb) >= gb->size_in_bits +
5371 (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE ? 64 : 0)) {
5372 av_log(avctx, AV_LOG_ERROR, "Buffer overrun\n");
5373 return AVERROR_INVALIDDATA;
5375 if (get_bits_count(gb) < gb->size_in_bits - 8)
5376 av_log(avctx, AV_LOG_WARNING, "Buffer not fully read\n");
5381 static void vc1_draw_sprites(VC1Context *v, SpriteData* sd)
5383 int i, plane, row, sprite;
5384 int sr_cache[2][2] = { { -1, -1 }, { -1, -1 } };
5385 uint8_t* src_h[2][2];
5386 int xoff[2], xadv[2], yoff[2], yadv[2], alpha;
5388 MpegEncContext *s = &v->s;
5390 for (i = 0; i <= v->two_sprites; i++) {
5391 xoff[i] = av_clip(sd->coefs[i][2], 0, v->sprite_width-1 << 16);
5392 xadv[i] = sd->coefs[i][0];
5393 if (xadv[i] != 1<<16 || (v->sprite_width << 16) - (v->output_width << 16) - xoff[i])
5394 xadv[i] = av_clip(xadv[i], 0, ((v->sprite_width<<16) - xoff[i] - 1) / v->output_width);
5396 yoff[i] = av_clip(sd->coefs[i][5], 0, v->sprite_height-1 << 16);
5397 yadv[i] = av_clip(sd->coefs[i][4], 0, ((v->sprite_height << 16) - yoff[i]) / v->output_height);
5399 alpha = av_clip(sd->coefs[1][6], 0, (1<<16) - 1);
5401 for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++) {
5402 int width = v->output_width>>!!plane;
5404 for (row = 0; row < v->output_height>>!!plane; row++) {
5405 uint8_t *dst = v->sprite_output_frame->data[plane] +
5406 v->sprite_output_frame->linesize[plane] * row;
5408 for (sprite = 0; sprite <= v->two_sprites; sprite++) {
5409 uint8_t *iplane = s->current_picture.f->data[plane];
5410 int iline = s->current_picture.f->linesize[plane];
5411 int ycoord = yoff[sprite] + yadv[sprite] * row;
5412 int yline = ycoord >> 16;
5414 ysub[sprite] = ycoord & 0xFFFF;
5416 iplane = s->last_picture.f->data[plane];
5417 iline = s->last_picture.f->linesize[plane];
5419 next_line = FFMIN(yline + 1, (v->sprite_height >> !!plane) - 1) * iline;
5420 if (!(xoff[sprite] & 0xFFFF) && xadv[sprite] == 1 << 16) {
5421 src_h[sprite][0] = iplane + (xoff[sprite] >> 16) + yline * iline;
5423 src_h[sprite][1] = iplane + (xoff[sprite] >> 16) + next_line;
5425 if (sr_cache[sprite][0] != yline) {
5426 if (sr_cache[sprite][1] == yline) {
5427 FFSWAP(uint8_t*, v->sr_rows[sprite][0], v->sr_rows[sprite][1]);
5428 FFSWAP(int, sr_cache[sprite][0], sr_cache[sprite][1]);
5430 v->vc1dsp.sprite_h(v->sr_rows[sprite][0], iplane + yline * iline, xoff[sprite], xadv[sprite], width);
5431 sr_cache[sprite][0] = yline;
5434 if (ysub[sprite] && sr_cache[sprite][1] != yline + 1) {
5435 v->vc1dsp.sprite_h(v->sr_rows[sprite][1],
5436 iplane + next_line, xoff[sprite],
5437 xadv[sprite], width);
5438 sr_cache[sprite][1] = yline + 1;
5440 src_h[sprite][0] = v->sr_rows[sprite][0];
5441 src_h[sprite][1] = v->sr_rows[sprite][1];
5445 if (!v->two_sprites) {
5447 v->vc1dsp.sprite_v_single(dst, src_h[0][0], src_h[0][1], ysub[0], width);
5449 memcpy(dst, src_h[0][0], width);
5452 if (ysub[0] && ysub[1]) {
5453 v->vc1dsp.sprite_v_double_twoscale(dst, src_h[0][0], src_h[0][1], ysub[0],
5454 src_h[1][0], src_h[1][1], ysub[1], alpha, width);
5455 } else if (ysub[0]) {
5456 v->vc1dsp.sprite_v_double_onescale(dst, src_h[0][0], src_h[0][1], ysub[0],
5457 src_h[1][0], alpha, width);
5458 } else if (ysub[1]) {
5459 v->vc1dsp.sprite_v_double_onescale(dst, src_h[1][0], src_h[1][1], ysub[1],
5460 src_h[0][0], (1<<16)-1-alpha, width);
5462 v->vc1dsp.sprite_v_double_noscale(dst, src_h[0][0], src_h[1][0], alpha, width);
5468 for (i = 0; i <= v->two_sprites; i++) {
5478 static int vc1_decode_sprites(VC1Context *v, GetBitContext* gb)
5481 MpegEncContext *s = &v->s;
5482 AVCodecContext *avctx = s->avctx;
5485 memset(&sd, 0, sizeof(sd));
5487 ret = vc1_parse_sprites(v, gb, &sd);
5491 if (!s->current_picture.f || !s->current_picture.f->data[0]) {
5492 av_log(avctx, AV_LOG_ERROR, "Got no sprites\n");
5496 if (v->two_sprites && (!s->last_picture_ptr || !s->last_picture.f->data[0])) {
5497 av_log(avctx, AV_LOG_WARNING, "Need two sprites, only got one\n");
5501 av_frame_unref(v->sprite_output_frame);
5502 if ((ret = ff_get_buffer(avctx, v->sprite_output_frame, 0)) < 0)
5505 vc1_draw_sprites(v, &sd);
5510 static void vc1_sprite_flush(AVCodecContext *avctx)
5512 VC1Context *v = avctx->priv_data;
5513 MpegEncContext *s = &v->s;
5514 AVFrame *f = s->current_picture.f;
5517 /* Windows Media Image codecs have a convergence interval of two keyframes.
5518 Since we can't enforce it, clear to black the missing sprite. This is
5519 wrong but it looks better than doing nothing. */
5521 if (f && f->data[0])
5522 for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++)
5523 for (i = 0; i < v->sprite_height>>!!plane; i++)
5524 memset(f->data[plane] + i * f->linesize[plane],
5525 plane ? 128 : 0, f->linesize[plane]);
5530 av_cold int ff_vc1_decode_init_alloc_tables(VC1Context *v)
5532 MpegEncContext *s = &v->s;
5534 int mb_height = FFALIGN(s->mb_height, 2);
5536 /* Allocate mb bitplanes */
5537 v->mv_type_mb_plane = av_malloc (s->mb_stride * mb_height);
5538 v->direct_mb_plane = av_malloc (s->mb_stride * mb_height);
5539 v->forward_mb_plane = av_malloc (s->mb_stride * mb_height);
5540 v->fieldtx_plane = av_mallocz(s->mb_stride * mb_height);
5541 v->acpred_plane = av_malloc (s->mb_stride * mb_height);
5542 v->over_flags_plane = av_malloc (s->mb_stride * mb_height);
5544 v->n_allocated_blks = s->mb_width + 2;
5545 v->block = av_malloc(sizeof(*v->block) * v->n_allocated_blks);
5546 v->cbp_base = av_malloc(sizeof(v->cbp_base[0]) * 2 * s->mb_stride);
5547 v->cbp = v->cbp_base + s->mb_stride;
5548 v->ttblk_base = av_malloc(sizeof(v->ttblk_base[0]) * 2 * s->mb_stride);
5549 v->ttblk = v->ttblk_base + s->mb_stride;
5550 v->is_intra_base = av_mallocz(sizeof(v->is_intra_base[0]) * 2 * s->mb_stride);
5551 v->is_intra = v->is_intra_base + s->mb_stride;
5552 v->luma_mv_base = av_mallocz(sizeof(v->luma_mv_base[0]) * 2 * s->mb_stride);
5553 v->luma_mv = v->luma_mv_base + s->mb_stride;
5555 /* allocate block type info in that way so it could be used with s->block_index[] */
5556 v->mb_type_base = av_malloc(s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2);
5557 v->mb_type[0] = v->mb_type_base + s->b8_stride + 1;
5558 v->mb_type[1] = v->mb_type_base + s->b8_stride * (mb_height * 2 + 1) + s->mb_stride + 1;
5559 v->mb_type[2] = v->mb_type[1] + s->mb_stride * (mb_height + 1);
5561 /* allocate memory to store block level MV info */
5562 v->blk_mv_type_base = av_mallocz( s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2);
5563 v->blk_mv_type = v->blk_mv_type_base + s->b8_stride + 1;
5564 v->mv_f_base = av_mallocz(2 * (s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2));
5565 v->mv_f[0] = v->mv_f_base + s->b8_stride + 1;
5566 v->mv_f[1] = v->mv_f[0] + (s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2);
5567 v->mv_f_next_base = av_mallocz(2 * (s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2));
5568 v->mv_f_next[0] = v->mv_f_next_base + s->b8_stride + 1;
5569 v->mv_f_next[1] = v->mv_f_next[0] + (s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2);
5571 /* Init coded blocks info */
5572 if (v->profile == PROFILE_ADVANCED) {
5573 // if (alloc_bitplane(&v->over_flags_plane, s->mb_width, s->mb_height) < 0)
5575 // if (alloc_bitplane(&v->ac_pred_plane, s->mb_width, s->mb_height) < 0)
5579 ff_intrax8_common_init(&v->x8,s);
5581 if (s->avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || s->avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5582 for (i = 0; i < 4; i++)
5583 if (!(v->sr_rows[i >> 1][i & 1] = av_malloc(v->output_width)))
5584 return AVERROR(ENOMEM);
5587 if (!v->mv_type_mb_plane || !v->direct_mb_plane || !v->acpred_plane || !v->over_flags_plane ||
5588 !v->block || !v->cbp_base || !v->ttblk_base || !v->is_intra_base || !v->luma_mv_base ||
5590 av_freep(&v->mv_type_mb_plane);
5591 av_freep(&v->direct_mb_plane);
5592 av_freep(&v->acpred_plane);
5593 av_freep(&v->over_flags_plane);
5594 av_freep(&v->block);
5595 av_freep(&v->cbp_base);
5596 av_freep(&v->ttblk_base);
5597 av_freep(&v->is_intra_base);
5598 av_freep(&v->luma_mv_base);
5599 av_freep(&v->mb_type_base);
5600 return AVERROR(ENOMEM);
5606 av_cold void ff_vc1_init_transposed_scantables(VC1Context *v)
5609 for (i = 0; i < 64; i++) {
5610 #define transpose(x) (((x) >> 3) | (((x) & 7) << 3))
5611 v->zz_8x8[0][i] = transpose(ff_wmv1_scantable[0][i]);
5612 v->zz_8x8[1][i] = transpose(ff_wmv1_scantable[1][i]);
5613 v->zz_8x8[2][i] = transpose(ff_wmv1_scantable[2][i]);
5614 v->zz_8x8[3][i] = transpose(ff_wmv1_scantable[3][i]);
5615 v->zzi_8x8[i] = transpose(ff_vc1_adv_interlaced_8x8_zz[i]);
5621 /** Initialize a VC1/WMV3 decoder
5622 * @todo TODO: Handle VC-1 IDUs (Transport level?)
5623 * @todo TODO: Decypher remaining bits in extra_data
5625 static av_cold int vc1_decode_init(AVCodecContext *avctx)
5627 VC1Context *v = avctx->priv_data;
5628 MpegEncContext *s = &v->s;
5632 /* save the container output size for WMImage */
5633 v->output_width = avctx->width;
5634 v->output_height = avctx->height;
5636 if (!avctx->extradata_size || !avctx->extradata)
5638 if (!(avctx->flags & CODEC_FLAG_GRAY))
5639 avctx->pix_fmt = ff_get_format(avctx, avctx->codec->pix_fmts);
5641 avctx->pix_fmt = AV_PIX_FMT_GRAY8;
5644 if ((ret = ff_vc1_init_common(v)) < 0)
5646 // ensure static VLC tables are initialized
5647 if ((ret = ff_msmpeg4_decode_init(avctx)) < 0)
5649 if ((ret = ff_vc1_decode_init_alloc_tables(v)) < 0)
5651 // Hack to ensure the above functions will be called
5652 // again once we know all necessary settings.
5653 // That this is necessary might indicate a bug.
5654 ff_vc1_decode_end(avctx);
5656 ff_blockdsp_init(&s->bdsp, avctx);
5657 ff_h264chroma_init(&v->h264chroma, 8);
5658 ff_qpeldsp_init(&s->qdsp);
5660 if (avctx->codec_id == AV_CODEC_ID_WMV3 || avctx->codec_id == AV_CODEC_ID_WMV3IMAGE) {
5663 // looks like WMV3 has a sequence header stored in the extradata
5664 // advanced sequence header may be before the first frame
5665 // the last byte of the extradata is a version number, 1 for the
5666 // samples we can decode
5668 init_get_bits(&gb, avctx->extradata, avctx->extradata_size*8);
5670 if ((ret = ff_vc1_decode_sequence_header(avctx, v, &gb)) < 0)
5673 count = avctx->extradata_size*8 - get_bits_count(&gb);
5675 av_log(avctx, AV_LOG_INFO, "Extra data: %i bits left, value: %X\n",
5676 count, get_bits(&gb, count));
5677 } else if (count < 0) {
5678 av_log(avctx, AV_LOG_INFO, "Read %i bits in overflow\n", -count);
5680 } else { // VC1/WVC1/WVP2
5681 const uint8_t *start = avctx->extradata;
5682 uint8_t *end = avctx->extradata + avctx->extradata_size;
5683 const uint8_t *next;
5684 int size, buf2_size;
5685 uint8_t *buf2 = NULL;
5686 int seq_initialized = 0, ep_initialized = 0;
5688 if (avctx->extradata_size < 16) {
5689 av_log(avctx, AV_LOG_ERROR, "Extradata size too small: %i\n", avctx->extradata_size);
5693 buf2 = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
5694 start = find_next_marker(start, end); // in WVC1 extradata first byte is its size, but can be 0 in mkv
5696 for (; next < end; start = next) {
5697 next = find_next_marker(start + 4, end);
5698 size = next - start - 4;
5701 buf2_size = vc1_unescape_buffer(start + 4, size, buf2);
5702 init_get_bits(&gb, buf2, buf2_size * 8);
5703 switch (AV_RB32(start)) {
5704 case VC1_CODE_SEQHDR:
5705 if ((ret = ff_vc1_decode_sequence_header(avctx, v, &gb)) < 0) {
5709 seq_initialized = 1;
5711 case VC1_CODE_ENTRYPOINT:
5712 if ((ret = ff_vc1_decode_entry_point(avctx, v, &gb)) < 0) {
5721 if (!seq_initialized || !ep_initialized) {
5722 av_log(avctx, AV_LOG_ERROR, "Incomplete extradata\n");
5725 v->res_sprite = (avctx->codec_id == AV_CODEC_ID_VC1IMAGE);
5728 v->sprite_output_frame = av_frame_alloc();
5729 if (!v->sprite_output_frame)
5730 return AVERROR(ENOMEM);
5732 avctx->profile = v->profile;
5733 if (v->profile == PROFILE_ADVANCED)
5734 avctx->level = v->level;
5736 avctx->has_b_frames = !!avctx->max_b_frames;
5738 s->mb_width = (avctx->coded_width + 15) >> 4;
5739 s->mb_height = (avctx->coded_height + 15) >> 4;
5741 if (v->profile == PROFILE_ADVANCED || v->res_fasttx) {
5742 ff_vc1_init_transposed_scantables(v);
5744 memcpy(v->zz_8x8, ff_wmv1_scantable, 4*64);
5749 if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5750 v->sprite_width = avctx->coded_width;
5751 v->sprite_height = avctx->coded_height;
5753 avctx->coded_width = avctx->width = v->output_width;
5754 avctx->coded_height = avctx->height = v->output_height;
5756 // prevent 16.16 overflows
5757 if (v->sprite_width > 1 << 14 ||
5758 v->sprite_height > 1 << 14 ||
5759 v->output_width > 1 << 14 ||
5760 v->output_height > 1 << 14) return -1;
5762 if ((v->sprite_width&1) || (v->sprite_height&1)) {
5763 avpriv_request_sample(avctx, "odd sprites support");
5764 return AVERROR_PATCHWELCOME;
5770 /** Close a VC1/WMV3 decoder
5771 * @warning Initial try at using MpegEncContext stuff
5773 av_cold int ff_vc1_decode_end(AVCodecContext *avctx)
5775 VC1Context *v = avctx->priv_data;
5778 av_frame_free(&v->sprite_output_frame);
5780 for (i = 0; i < 4; i++)
5781 av_freep(&v->sr_rows[i >> 1][i & 1]);
5782 av_freep(&v->hrd_rate);
5783 av_freep(&v->hrd_buffer);
5784 ff_mpv_common_end(&v->s);
5785 av_freep(&v->mv_type_mb_plane);
5786 av_freep(&v->direct_mb_plane);
5787 av_freep(&v->forward_mb_plane);
5788 av_freep(&v->fieldtx_plane);
5789 av_freep(&v->acpred_plane);
5790 av_freep(&v->over_flags_plane);
5791 av_freep(&v->mb_type_base);
5792 av_freep(&v->blk_mv_type_base);
5793 av_freep(&v->mv_f_base);
5794 av_freep(&v->mv_f_next_base);
5795 av_freep(&v->block);
5796 av_freep(&v->cbp_base);
5797 av_freep(&v->ttblk_base);
5798 av_freep(&v->is_intra_base); // FIXME use v->mb_type[]
5799 av_freep(&v->luma_mv_base);
5800 ff_intrax8_common_end(&v->x8);
5805 /** Decode a VC1/WMV3 frame
5806 * @todo TODO: Handle VC-1 IDUs (Transport level?)
5808 static int vc1_decode_frame(AVCodecContext *avctx, void *data,
5809 int *got_frame, AVPacket *avpkt)
5811 const uint8_t *buf = avpkt->data;
5812 int buf_size = avpkt->size, n_slices = 0, i, ret;
5813 VC1Context *v = avctx->priv_data;
5814 MpegEncContext *s = &v->s;
5815 AVFrame *pict = data;
5816 uint8_t *buf2 = NULL;
5817 const uint8_t *buf_start = buf, *buf_start_second_field = NULL;
5818 int mb_height, n_slices1=-1;
5823 } *slices = NULL, *tmp;
5825 v->second_field = 0;
5827 if(s->flags & CODEC_FLAG_LOW_DELAY)
5830 /* no supplementary picture */
5831 if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == VC1_CODE_ENDOFSEQ)) {
5832 /* special case for last picture */
5833 if (s->low_delay == 0 && s->next_picture_ptr) {
5834 if ((ret = av_frame_ref(pict, s->next_picture_ptr->f)) < 0)
5836 s->next_picture_ptr = NULL;
5844 if (s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) {
5845 if (v->profile < PROFILE_ADVANCED)
5846 avctx->pix_fmt = AV_PIX_FMT_VDPAU_WMV3;
5848 avctx->pix_fmt = AV_PIX_FMT_VDPAU_VC1;
5851 //for advanced profile we may need to parse and unescape data
5852 if (avctx->codec_id == AV_CODEC_ID_VC1 || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5854 buf2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5856 return AVERROR(ENOMEM);
5858 if (IS_MARKER(AV_RB32(buf))) { /* frame starts with marker and needs to be parsed */
5859 const uint8_t *start, *end, *next;
5863 for (start = buf, end = buf + buf_size; next < end; start = next) {
5864 next = find_next_marker(start + 4, end);
5865 size = next - start - 4;
5866 if (size <= 0) continue;
5867 switch (AV_RB32(start)) {
5868 case VC1_CODE_FRAME:
5869 if (avctx->hwaccel ||
5870 s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
5872 buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
5874 case VC1_CODE_FIELD: {
5876 if (avctx->hwaccel ||
5877 s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
5878 buf_start_second_field = start;
5879 tmp = av_realloc_array(slices, sizeof(*slices), (n_slices+1));
5883 slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5884 if (!slices[n_slices].buf)
5886 buf_size3 = vc1_unescape_buffer(start + 4, size,
5887 slices[n_slices].buf);
5888 init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5890 /* assuming that the field marker is at the exact middle,
5891 hope it's correct */
5892 slices[n_slices].mby_start = s->mb_height + 1 >> 1;
5893 n_slices1 = n_slices - 1; // index of the last slice of the first field
5897 case VC1_CODE_ENTRYPOINT: /* it should be before frame data */
5898 buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
5899 init_get_bits(&s->gb, buf2, buf_size2 * 8);
5900 ff_vc1_decode_entry_point(avctx, v, &s->gb);
5902 case VC1_CODE_SLICE: {
5904 tmp = av_realloc_array(slices, sizeof(*slices), (n_slices+1));
5908 slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5909 if (!slices[n_slices].buf)
5911 buf_size3 = vc1_unescape_buffer(start + 4, size,
5912 slices[n_slices].buf);
5913 init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5915 slices[n_slices].mby_start = get_bits(&slices[n_slices].gb, 9);
5921 } else if (v->interlace && ((buf[0] & 0xC0) == 0xC0)) { /* WVC1 interlaced stores both fields divided by marker */
5922 const uint8_t *divider;
5925 divider = find_next_marker(buf, buf + buf_size);
5926 if ((divider == (buf + buf_size)) || AV_RB32(divider) != VC1_CODE_FIELD) {
5927 av_log(avctx, AV_LOG_ERROR, "Error in WVC1 interlaced frame\n");
5929 } else { // found field marker, unescape second field
5930 if (avctx->hwaccel ||
5931 s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
5932 buf_start_second_field = divider;
5933 tmp = av_realloc_array(slices, sizeof(*slices), (n_slices+1));
5937 slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5938 if (!slices[n_slices].buf)
5940 buf_size3 = vc1_unescape_buffer(divider + 4, buf + buf_size - divider - 4, slices[n_slices].buf);
5941 init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5943 slices[n_slices].mby_start = s->mb_height + 1 >> 1;
5944 n_slices1 = n_slices - 1;
5947 buf_size2 = vc1_unescape_buffer(buf, divider - buf, buf2);
5949 buf_size2 = vc1_unescape_buffer(buf, buf_size, buf2);
5951 init_get_bits(&s->gb, buf2, buf_size2*8);
5953 init_get_bits(&s->gb, buf, buf_size*8);
5955 if (v->res_sprite) {
5956 v->new_sprite = !get_bits1(&s->gb);
5957 v->two_sprites = get_bits1(&s->gb);
5958 /* res_sprite means a Windows Media Image stream, AV_CODEC_ID_*IMAGE means
5959 we're using the sprite compositor. These are intentionally kept separate
5960 so you can get the raw sprites by using the wmv3 decoder for WMVP or
5961 the vc1 one for WVP2 */
5962 if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5963 if (v->new_sprite) {
5964 // switch AVCodecContext parameters to those of the sprites
5965 avctx->width = avctx->coded_width = v->sprite_width;
5966 avctx->height = avctx->coded_height = v->sprite_height;
5973 if (s->context_initialized &&
5974 (s->width != avctx->coded_width ||
5975 s->height != avctx->coded_height)) {
5976 ff_vc1_decode_end(avctx);
5979 if (!s->context_initialized) {
5980 if (ff_msmpeg4_decode_init(avctx) < 0)
5982 if (ff_vc1_decode_init_alloc_tables(v) < 0) {
5983 ff_mpv_common_end(s);
5987 s->low_delay = !avctx->has_b_frames || v->res_sprite;
5989 if (v->profile == PROFILE_ADVANCED) {
5990 if(avctx->coded_width<=1 || avctx->coded_height<=1)
5992 s->h_edge_pos = avctx->coded_width;
5993 s->v_edge_pos = avctx->coded_height;
5997 // do parse frame header
5998 v->pic_header_flag = 0;
5999 v->first_pic_header_flag = 1;
6000 if (v->profile < PROFILE_ADVANCED) {
6001 if (ff_vc1_parse_frame_header(v, &s->gb) < 0) {
6005 if (ff_vc1_parse_frame_header_adv(v, &s->gb) < 0) {
6009 v->first_pic_header_flag = 0;
6011 if (avctx->debug & FF_DEBUG_PICT_INFO)
6012 av_log(v->s.avctx, AV_LOG_DEBUG, "pict_type: %c\n", av_get_picture_type_char(s->pict_type));
6014 if ((avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE)
6015 && s->pict_type != AV_PICTURE_TYPE_I) {
6016 av_log(v->s.avctx, AV_LOG_ERROR, "Sprite decoder: expected I-frame\n");
6020 if ((s->mb_height >> v->field_mode) == 0) {
6021 av_log(v->s.avctx, AV_LOG_ERROR, "image too short\n");
6025 // for skipping the frame
6026 s->current_picture.f->pict_type = s->pict_type;
6027 s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
6029 /* skip B-frames if we don't have reference frames */
6030 if (!s->last_picture_ptr && (s->pict_type == AV_PICTURE_TYPE_B || s->droppable)) {
6031 av_log(v->s.avctx, AV_LOG_DEBUG, "Skipping B frame without reference frames\n");
6034 if ((avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B) ||
6035 (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I) ||
6036 avctx->skip_frame >= AVDISCARD_ALL) {
6040 if (s->next_p_frame_damaged) {
6041 if (s->pict_type == AV_PICTURE_TYPE_B)
6044 s->next_p_frame_damaged = 0;
6047 if (ff_mpv_frame_start(s, avctx) < 0) {
6051 v->s.current_picture_ptr->field_picture = v->field_mode;
6052 v->s.current_picture_ptr->f->interlaced_frame = (v->fcm != PROGRESSIVE);
6053 v->s.current_picture_ptr->f->top_field_first = v->tff;
6055 // process pulldown flags
6056 s->current_picture_ptr->f->repeat_pict = 0;
6057 // Pulldown flags are only valid when 'broadcast' has been set.
6058 // So ticks_per_frame will be 2
6061 s->current_picture_ptr->f->repeat_pict = 1;
6062 } else if (v->rptfrm) {
6064 s->current_picture_ptr->f->repeat_pict = v->rptfrm * 2;
6067 s->me.qpel_put = s->qdsp.put_qpel_pixels_tab;
6068 s->me.qpel_avg = s->qdsp.avg_qpel_pixels_tab;
6070 if ((CONFIG_VC1_VDPAU_DECODER)
6071 &&s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) {
6072 if (v->field_mode && buf_start_second_field) {
6073 ff_vdpau_vc1_decode_picture(s, buf_start, buf_start_second_field - buf_start);
6074 ff_vdpau_vc1_decode_picture(s, buf_start_second_field, (buf + buf_size) - buf_start_second_field);
6076 ff_vdpau_vc1_decode_picture(s, buf_start, (buf + buf_size) - buf_start);
6078 } else if (avctx->hwaccel) {
6079 if (v->field_mode && buf_start_second_field) {
6080 // decode first field
6081 s->picture_structure = PICT_BOTTOM_FIELD - v->tff;
6082 if (avctx->hwaccel->start_frame(avctx, buf_start, buf_start_second_field - buf_start) < 0)
6084 if (avctx->hwaccel->decode_slice(avctx, buf_start, buf_start_second_field - buf_start) < 0)
6086 if (avctx->hwaccel->end_frame(avctx) < 0)
6089 // decode second field
6090 s->gb = slices[n_slices1 + 1].gb;
6091 s->picture_structure = PICT_TOP_FIELD + v->tff;
6092 v->second_field = 1;
6093 v->pic_header_flag = 0;
6094 if (ff_vc1_parse_frame_header_adv(v, &s->gb) < 0) {
6095 av_log(avctx, AV_LOG_ERROR, "parsing header for second field failed");
6098 v->s.current_picture_ptr->f->pict_type = v->s.pict_type;
6100 if (avctx->hwaccel->start_frame(avctx, buf_start_second_field, (buf + buf_size) - buf_start_second_field) < 0)
6102 if (avctx->hwaccel->decode_slice(avctx, buf_start_second_field, (buf + buf_size) - buf_start_second_field) < 0)
6104 if (avctx->hwaccel->end_frame(avctx) < 0)
6107 s->picture_structure = PICT_FRAME;
6108 if (avctx->hwaccel->start_frame(avctx, buf_start, (buf + buf_size) - buf_start) < 0)
6110 if (avctx->hwaccel->decode_slice(avctx, buf_start, (buf + buf_size) - buf_start) < 0)
6112 if (avctx->hwaccel->end_frame(avctx) < 0)
6118 ff_mpeg_er_frame_start(s);
6120 v->bits = buf_size * 8;
6121 v->end_mb_x = s->mb_width;
6122 if (v->field_mode) {
6123 s->current_picture.f->linesize[0] <<= 1;
6124 s->current_picture.f->linesize[1] <<= 1;
6125 s->current_picture.f->linesize[2] <<= 1;
6127 s->uvlinesize <<= 1;
6129 mb_height = s->mb_height >> v->field_mode;
6131 av_assert0 (mb_height > 0);
6133 for (i = 0; i <= n_slices; i++) {
6134 if (i > 0 && slices[i - 1].mby_start >= mb_height) {
6135 if (v->field_mode <= 0) {
6136 av_log(v->s.avctx, AV_LOG_ERROR, "Slice %d starts beyond "
6137 "picture boundary (%d >= %d)\n", i,
6138 slices[i - 1].mby_start, mb_height);
6141 v->second_field = 1;
6142 av_assert0((s->mb_height & 1) == 0);
6143 v->blocks_off = s->b8_stride * (s->mb_height&~1);
6144 v->mb_off = s->mb_stride * s->mb_height >> 1;
6146 v->second_field = 0;
6151 v->pic_header_flag = 0;
6152 if (v->field_mode && i == n_slices1 + 2) {
6153 if ((header_ret = ff_vc1_parse_frame_header_adv(v, &s->gb)) < 0) {
6154 av_log(v->s.avctx, AV_LOG_ERROR, "Field header damaged\n");
6155 if (avctx->err_recognition & AV_EF_EXPLODE)
6159 } else if (get_bits1(&s->gb)) {
6160 v->pic_header_flag = 1;
6161 if ((header_ret = ff_vc1_parse_frame_header_adv(v, &s->gb)) < 0) {
6162 av_log(v->s.avctx, AV_LOG_ERROR, "Slice header damaged\n");
6163 if (avctx->err_recognition & AV_EF_EXPLODE)
6171 s->start_mb_y = (i == 0) ? 0 : FFMAX(0, slices[i-1].mby_start % mb_height);
6172 if (!v->field_mode || v->second_field)
6173 s->end_mb_y = (i == n_slices ) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
6175 if (i >= n_slices) {
6176 av_log(v->s.avctx, AV_LOG_ERROR, "first field slice count too large\n");
6179 s->end_mb_y = (i <= n_slices1 + 1) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
6181 if (s->end_mb_y <= s->start_mb_y) {
6182 av_log(v->s.avctx, AV_LOG_ERROR, "end mb y %d %d invalid\n", s->end_mb_y, s->start_mb_y);
6185 if (!v->p_frame_skipped && s->pict_type != AV_PICTURE_TYPE_I && !v->cbpcy_vlc) {
6186 av_log(v->s.avctx, AV_LOG_ERROR, "missing cbpcy_vlc\n");
6189 ff_vc1_decode_blocks(v);
6191 s->gb = slices[i].gb;
6193 if (v->field_mode) {
6194 v->second_field = 0;
6195 s->current_picture.f->linesize[0] >>= 1;
6196 s->current_picture.f->linesize[1] >>= 1;
6197 s->current_picture.f->linesize[2] >>= 1;
6199 s->uvlinesize >>= 1;
6200 if (v->s.pict_type != AV_PICTURE_TYPE_BI && v->s.pict_type != AV_PICTURE_TYPE_B) {
6201 FFSWAP(uint8_t *, v->mv_f_next[0], v->mv_f[0]);
6202 FFSWAP(uint8_t *, v->mv_f_next[1], v->mv_f[1]);
6205 av_dlog(s->avctx, "Consumed %i/%i bits\n",
6206 get_bits_count(&s->gb), s->gb.size_in_bits);
6207 // if (get_bits_count(&s->gb) > buf_size * 8)
6209 if(s->er.error_occurred && s->pict_type == AV_PICTURE_TYPE_B)
6212 ff_er_frame_end(&s->er);
6215 ff_mpv_frame_end(s);
6217 if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
6219 avctx->width = avctx->coded_width = v->output_width;
6220 avctx->height = avctx->coded_height = v->output_height;
6221 if (avctx->skip_frame >= AVDISCARD_NONREF)
6223 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
6224 if (vc1_decode_sprites(v, &s->gb))
6227 if ((ret = av_frame_ref(pict, v->sprite_output_frame)) < 0)
6231 if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
6232 if ((ret = av_frame_ref(pict, s->current_picture_ptr->f)) < 0)
6234 ff_print_debug_info(s, s->current_picture_ptr, pict);
6236 } else if (s->last_picture_ptr) {
6237 if ((ret = av_frame_ref(pict, s->last_picture_ptr->f)) < 0)
6239 ff_print_debug_info(s, s->last_picture_ptr, pict);
6246 for (i = 0; i < n_slices; i++)
6247 av_free(slices[i].buf);
6253 for (i = 0; i < n_slices; i++)
6254 av_free(slices[i].buf);
6260 static const AVProfile profiles[] = {
6261 { FF_PROFILE_VC1_SIMPLE, "Simple" },
6262 { FF_PROFILE_VC1_MAIN, "Main" },
6263 { FF_PROFILE_VC1_COMPLEX, "Complex" },
6264 { FF_PROFILE_VC1_ADVANCED, "Advanced" },
6265 { FF_PROFILE_UNKNOWN },
6268 static const enum AVPixelFormat vc1_hwaccel_pixfmt_list_420[] = {
6269 #if CONFIG_VC1_DXVA2_HWACCEL
6270 AV_PIX_FMT_DXVA2_VLD,
6272 #if CONFIG_VC1_VAAPI_HWACCEL
6273 AV_PIX_FMT_VAAPI_VLD,
6275 #if CONFIG_VC1_VDPAU_HWACCEL
6282 AVCodec ff_vc1_decoder = {
6284 .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1"),
6285 .type = AVMEDIA_TYPE_VIDEO,
6286 .id = AV_CODEC_ID_VC1,
6287 .priv_data_size = sizeof(VC1Context),
6288 .init = vc1_decode_init,
6289 .close = ff_vc1_decode_end,
6290 .decode = vc1_decode_frame,
6291 .flush = ff_mpeg_flush,
6292 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
6293 .pix_fmts = vc1_hwaccel_pixfmt_list_420,
6294 .profiles = NULL_IF_CONFIG_SMALL(profiles)
6297 #if CONFIG_WMV3_DECODER
6298 AVCodec ff_wmv3_decoder = {
6300 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9"),
6301 .type = AVMEDIA_TYPE_VIDEO,
6302 .id = AV_CODEC_ID_WMV3,
6303 .priv_data_size = sizeof(VC1Context),
6304 .init = vc1_decode_init,
6305 .close = ff_vc1_decode_end,
6306 .decode = vc1_decode_frame,
6307 .flush = ff_mpeg_flush,
6308 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
6309 .pix_fmts = vc1_hwaccel_pixfmt_list_420,
6310 .profiles = NULL_IF_CONFIG_SMALL(profiles)
6314 #if CONFIG_WMV3_VDPAU_DECODER
6315 AVCodec ff_wmv3_vdpau_decoder = {
6316 .name = "wmv3_vdpau",
6317 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 VDPAU"),
6318 .type = AVMEDIA_TYPE_VIDEO,
6319 .id = AV_CODEC_ID_WMV3,
6320 .priv_data_size = sizeof(VC1Context),
6321 .init = vc1_decode_init,
6322 .close = ff_vc1_decode_end,
6323 .decode = vc1_decode_frame,
6324 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU,
6325 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_VDPAU_WMV3, AV_PIX_FMT_NONE },
6326 .profiles = NULL_IF_CONFIG_SMALL(profiles)
6330 #if CONFIG_VC1_VDPAU_DECODER
6331 AVCodec ff_vc1_vdpau_decoder = {
6332 .name = "vc1_vdpau",
6333 .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1 VDPAU"),
6334 .type = AVMEDIA_TYPE_VIDEO,
6335 .id = AV_CODEC_ID_VC1,
6336 .priv_data_size = sizeof(VC1Context),
6337 .init = vc1_decode_init,
6338 .close = ff_vc1_decode_end,
6339 .decode = vc1_decode_frame,
6340 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU,
6341 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_VDPAU_VC1, AV_PIX_FMT_NONE },
6342 .profiles = NULL_IF_CONFIG_SMALL(profiles)
6346 #if CONFIG_WMV3IMAGE_DECODER
6347 AVCodec ff_wmv3image_decoder = {
6348 .name = "wmv3image",
6349 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image"),
6350 .type = AVMEDIA_TYPE_VIDEO,
6351 .id = AV_CODEC_ID_WMV3IMAGE,
6352 .priv_data_size = sizeof(VC1Context),
6353 .init = vc1_decode_init,
6354 .close = ff_vc1_decode_end,
6355 .decode = vc1_decode_frame,
6356 .capabilities = CODEC_CAP_DR1,
6357 .flush = vc1_sprite_flush,
6358 .pix_fmts = (const enum AVPixelFormat[]) {
6365 #if CONFIG_VC1IMAGE_DECODER
6366 AVCodec ff_vc1image_decoder = {
6368 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image v2"),
6369 .type = AVMEDIA_TYPE_VIDEO,
6370 .id = AV_CODEC_ID_VC1IMAGE,
6371 .priv_data_size = sizeof(VC1Context),
6372 .init = vc1_decode_init,
6373 .close = ff_vc1_decode_end,
6374 .decode = vc1_decode_frame,
6375 .capabilities = CODEC_CAP_DR1,
6376 .flush = vc1_sprite_flush,
6377 .pix_fmts = (const enum AVPixelFormat[]) {