2 * VC-1 and WMV3 decoder
3 * Copyright (c) 2011 Mashiat Sarker Shakkhar
4 * Copyright (c) 2006-2007 Konstantin Shishkov
5 * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
7 * This file is part of FFmpeg.
9 * FFmpeg is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * FFmpeg is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with FFmpeg; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * VC-1 and WMV3 decoder
31 #include "error_resilience.h"
32 #include "mpegutils.h"
33 #include "mpegvideo.h"
35 #include "h264chroma.h"
38 #include "vc1acdata.h"
39 #include "msmpeg4data.h"
42 #include "vdpau_internal.h"
43 #include "libavutil/avassert.h"
48 #define MB_INTRA_VLC_BITS 9
52 // offset tables for interlaced picture MVDATA decoding
53 static const int offset_table1[9] = { 0, 1, 2, 4, 8, 16, 32, 64, 128 };
54 static const int offset_table2[9] = { 0, 1, 3, 7, 15, 31, 63, 127, 255 };
56 /***********************************************************************/
58 * @name VC-1 Bitplane decoding
64 static void init_block_index(VC1Context *v)
66 MpegEncContext *s = &v->s;
67 ff_init_block_index(s);
68 if (v->field_mode && !(v->second_field ^ v->tff)) {
69 s->dest[0] += s->current_picture_ptr->f->linesize[0];
70 s->dest[1] += s->current_picture_ptr->f->linesize[1];
71 s->dest[2] += s->current_picture_ptr->f->linesize[2];
75 /** @} */ //Bitplane group
77 static void vc1_put_signed_blocks_clamped(VC1Context *v)
79 MpegEncContext *s = &v->s;
80 int topleft_mb_pos, top_mb_pos;
81 int stride_y, fieldtx = 0;
84 /* The put pixels loop is always one MB row behind the decoding loop,
85 * because we can only put pixels when overlap filtering is done, and
86 * for filtering of the bottom edge of a MB, we need the next MB row
88 * Within the row, the put pixels loop is also one MB col behind the
89 * decoding loop. The reason for this is again, because for filtering
90 * of the right MB edge, we need the next MB present. */
91 if (!s->first_slice_line) {
93 topleft_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x - 1;
94 if (v->fcm == ILACE_FRAME)
95 fieldtx = v->fieldtx_plane[topleft_mb_pos];
96 stride_y = s->linesize << fieldtx;
97 v_dist = (16 - fieldtx) >> (fieldtx == 0);
98 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][0],
99 s->dest[0] - 16 * s->linesize - 16,
101 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][1],
102 s->dest[0] - 16 * s->linesize - 8,
104 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][2],
105 s->dest[0] - v_dist * s->linesize - 16,
107 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][3],
108 s->dest[0] - v_dist * s->linesize - 8,
110 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][4],
111 s->dest[1] - 8 * s->uvlinesize - 8,
113 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][5],
114 s->dest[2] - 8 * s->uvlinesize - 8,
117 if (s->mb_x == s->mb_width - 1) {
118 top_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x;
119 if (v->fcm == ILACE_FRAME)
120 fieldtx = v->fieldtx_plane[top_mb_pos];
121 stride_y = s->linesize << fieldtx;
122 v_dist = fieldtx ? 15 : 8;
123 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][0],
124 s->dest[0] - 16 * s->linesize,
126 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][1],
127 s->dest[0] - 16 * s->linesize + 8,
129 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][2],
130 s->dest[0] - v_dist * s->linesize,
132 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][3],
133 s->dest[0] - v_dist * s->linesize + 8,
135 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][4],
136 s->dest[1] - 8 * s->uvlinesize,
138 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][5],
139 s->dest[2] - 8 * s->uvlinesize,
144 #define inc_blk_idx(idx) do { \
146 if (idx >= v->n_allocated_blks) \
150 inc_blk_idx(v->topleft_blk_idx);
151 inc_blk_idx(v->top_blk_idx);
152 inc_blk_idx(v->left_blk_idx);
153 inc_blk_idx(v->cur_blk_idx);
156 static void vc1_loop_filter_iblk(VC1Context *v, int pq)
158 MpegEncContext *s = &v->s;
160 if (!s->first_slice_line) {
161 v->vc1dsp.vc1_v_loop_filter16(s->dest[0], s->linesize, pq);
163 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
164 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
165 for (j = 0; j < 2; j++) {
166 v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1], s->uvlinesize, pq);
168 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
171 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] + 8 * s->linesize, s->linesize, pq);
173 if (s->mb_y == s->end_mb_y - 1) {
175 v->vc1dsp.vc1_h_loop_filter16(s->dest[0], s->linesize, pq);
176 v->vc1dsp.vc1_h_loop_filter8(s->dest[1], s->uvlinesize, pq);
177 v->vc1dsp.vc1_h_loop_filter8(s->dest[2], s->uvlinesize, pq);
179 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] + 8, s->linesize, pq);
183 static void vc1_loop_filter_iblk_delayed(VC1Context *v, int pq)
185 MpegEncContext *s = &v->s;
188 /* The loopfilter runs 1 row and 1 column behind the overlap filter, which
189 * means it runs two rows/cols behind the decoding loop. */
190 if (!s->first_slice_line) {
192 if (s->mb_y >= s->start_mb_y + 2) {
193 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
196 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 16, s->linesize, pq);
197 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 8, s->linesize, pq);
198 for (j = 0; j < 2; j++) {
199 v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
201 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize - 8, s->uvlinesize, pq);
205 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize - 16, s->linesize, pq);
208 if (s->mb_x == s->mb_width - 1) {
209 if (s->mb_y >= s->start_mb_y + 2) {
210 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
213 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize, s->linesize, pq);
214 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize + 8, s->linesize, pq);
215 for (j = 0; j < 2; j++) {
216 v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
218 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize, s->uvlinesize, pq);
222 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize, s->linesize, pq);
225 if (s->mb_y == s->end_mb_y) {
228 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
229 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 8, s->linesize, pq);
231 for (j = 0; j < 2; j++) {
232 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
237 if (s->mb_x == s->mb_width - 1) {
239 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
240 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
242 for (j = 0; j < 2; j++) {
243 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
251 static void vc1_smooth_overlap_filter_iblk(VC1Context *v)
253 MpegEncContext *s = &v->s;
256 if (v->condover == CONDOVER_NONE)
259 mb_pos = s->mb_x + s->mb_y * s->mb_stride;
261 /* Within a MB, the horizontal overlap always runs before the vertical.
262 * To accomplish that, we run the H on left and internal borders of the
263 * currently decoded MB. Then, we wait for the next overlap iteration
264 * to do H overlap on the right edge of this MB, before moving over and
265 * running the V overlap. Therefore, the V overlap makes us trail by one
266 * MB col and the H overlap filter makes us trail by one MB row. This
267 * is reflected in the time at which we run the put_pixels loop. */
268 if (v->condover == CONDOVER_ALL || v->pq >= 9 || v->over_flags_plane[mb_pos]) {
269 if (s->mb_x && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
270 v->over_flags_plane[mb_pos - 1])) {
271 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][1],
272 v->block[v->cur_blk_idx][0]);
273 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][3],
274 v->block[v->cur_blk_idx][2]);
275 if (!(s->flags & CODEC_FLAG_GRAY)) {
276 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][4],
277 v->block[v->cur_blk_idx][4]);
278 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][5],
279 v->block[v->cur_blk_idx][5]);
282 v->vc1dsp.vc1_h_s_overlap(v->block[v->cur_blk_idx][0],
283 v->block[v->cur_blk_idx][1]);
284 v->vc1dsp.vc1_h_s_overlap(v->block[v->cur_blk_idx][2],
285 v->block[v->cur_blk_idx][3]);
287 if (s->mb_x == s->mb_width - 1) {
288 if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
289 v->over_flags_plane[mb_pos - s->mb_stride])) {
290 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][2],
291 v->block[v->cur_blk_idx][0]);
292 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][3],
293 v->block[v->cur_blk_idx][1]);
294 if (!(s->flags & CODEC_FLAG_GRAY)) {
295 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][4],
296 v->block[v->cur_blk_idx][4]);
297 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][5],
298 v->block[v->cur_blk_idx][5]);
301 v->vc1dsp.vc1_v_s_overlap(v->block[v->cur_blk_idx][0],
302 v->block[v->cur_blk_idx][2]);
303 v->vc1dsp.vc1_v_s_overlap(v->block[v->cur_blk_idx][1],
304 v->block[v->cur_blk_idx][3]);
307 if (s->mb_x && (v->condover == CONDOVER_ALL || v->over_flags_plane[mb_pos - 1])) {
308 if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
309 v->over_flags_plane[mb_pos - s->mb_stride - 1])) {
310 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][2],
311 v->block[v->left_blk_idx][0]);
312 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][3],
313 v->block[v->left_blk_idx][1]);
314 if (!(s->flags & CODEC_FLAG_GRAY)) {
315 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][4],
316 v->block[v->left_blk_idx][4]);
317 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][5],
318 v->block[v->left_blk_idx][5]);
321 v->vc1dsp.vc1_v_s_overlap(v->block[v->left_blk_idx][0],
322 v->block[v->left_blk_idx][2]);
323 v->vc1dsp.vc1_v_s_overlap(v->block[v->left_blk_idx][1],
324 v->block[v->left_blk_idx][3]);
328 /** Do motion compensation over 1 macroblock
329 * Mostly adapted hpel_motion and qpel_motion from mpegvideo.c
331 static void vc1_mc_1mv(VC1Context *v, int dir)
333 MpegEncContext *s = &v->s;
334 H264ChromaContext *h264chroma = &v->h264chroma;
335 uint8_t *srcY, *srcU, *srcV;
336 int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
337 int v_edge_pos = s->v_edge_pos >> v->field_mode;
339 uint8_t (*luty)[256], (*lutuv)[256];
342 if ((!v->field_mode ||
343 (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
344 !v->s.last_picture.f->data[0])
347 mx = s->mv[dir][0][0];
348 my = s->mv[dir][0][1];
350 // store motion vectors for further use in B frames
351 if (s->pict_type == AV_PICTURE_TYPE_P) {
352 for (i = 0; i < 4; i++) {
353 s->current_picture.motion_val[1][s->block_index[i] + v->blocks_off][0] = mx;
354 s->current_picture.motion_val[1][s->block_index[i] + v->blocks_off][1] = my;
358 uvmx = (mx + ((mx & 3) == 3)) >> 1;
359 uvmy = (my + ((my & 3) == 3)) >> 1;
360 v->luma_mv[s->mb_x][0] = uvmx;
361 v->luma_mv[s->mb_x][1] = uvmy;
364 v->cur_field_type != v->ref_field_type[dir]) {
365 my = my - 2 + 4 * v->cur_field_type;
366 uvmy = uvmy - 2 + 4 * v->cur_field_type;
369 // fastuvmc shall be ignored for interlaced frame picture
370 if (v->fastuvmc && (v->fcm != ILACE_FRAME)) {
371 uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
372 uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
375 if (v->field_mode && (v->cur_field_type != v->ref_field_type[dir]) && v->second_field) {
376 srcY = s->current_picture.f->data[0];
377 srcU = s->current_picture.f->data[1];
378 srcV = s->current_picture.f->data[2];
380 lutuv = v->curr_lutuv;
381 use_ic = *v->curr_use_ic;
383 srcY = s->last_picture.f->data[0];
384 srcU = s->last_picture.f->data[1];
385 srcV = s->last_picture.f->data[2];
387 lutuv = v->last_lutuv;
388 use_ic = v->last_use_ic;
391 srcY = s->next_picture.f->data[0];
392 srcU = s->next_picture.f->data[1];
393 srcV = s->next_picture.f->data[2];
395 lutuv = v->next_lutuv;
396 use_ic = v->next_use_ic;
399 if (!srcY || !srcU) {
400 av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
404 src_x = s->mb_x * 16 + (mx >> 2);
405 src_y = s->mb_y * 16 + (my >> 2);
406 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
407 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
409 if (v->profile != PROFILE_ADVANCED) {
410 src_x = av_clip( src_x, -16, s->mb_width * 16);
411 src_y = av_clip( src_y, -16, s->mb_height * 16);
412 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
413 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
415 src_x = av_clip( src_x, -17, s->avctx->coded_width);
416 src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
417 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
418 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
421 srcY += src_y * s->linesize + src_x;
422 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
423 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
425 if (v->field_mode && v->ref_field_type[dir]) {
426 srcY += s->current_picture_ptr->f->linesize[0];
427 srcU += s->current_picture_ptr->f->linesize[1];
428 srcV += s->current_picture_ptr->f->linesize[2];
431 /* for grayscale we should not try to read from unknown area */
432 if (s->flags & CODEC_FLAG_GRAY) {
433 srcU = s->edge_emu_buffer + 18 * s->linesize;
434 srcV = s->edge_emu_buffer + 18 * s->linesize;
437 if (v->rangeredfrm || use_ic
438 || s->h_edge_pos < 22 || v_edge_pos < 22
439 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel * 3
440 || (unsigned)(src_y - 1) > v_edge_pos - (my&3) - 16 - 3) {
441 uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
443 srcY -= s->mspel * (1 + s->linesize);
444 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcY,
445 s->linesize, s->linesize,
446 17 + s->mspel * 2, 17 + s->mspel * 2,
447 src_x - s->mspel, src_y - s->mspel,
448 s->h_edge_pos, v_edge_pos);
449 srcY = s->edge_emu_buffer;
450 s->vdsp.emulated_edge_mc(uvbuf, srcU,
451 s->uvlinesize, s->uvlinesize,
454 s->h_edge_pos >> 1, v_edge_pos >> 1);
455 s->vdsp.emulated_edge_mc(uvbuf + 16, srcV,
456 s->uvlinesize, s->uvlinesize,
459 s->h_edge_pos >> 1, v_edge_pos >> 1);
462 /* if we deal with range reduction we need to scale source blocks */
463 if (v->rangeredfrm) {
468 for (j = 0; j < 17 + s->mspel * 2; j++) {
469 for (i = 0; i < 17 + s->mspel * 2; i++)
470 src[i] = ((src[i] - 128) >> 1) + 128;
475 for (j = 0; j < 9; j++) {
476 for (i = 0; i < 9; i++) {
477 src[i] = ((src[i] - 128) >> 1) + 128;
478 src2[i] = ((src2[i] - 128) >> 1) + 128;
480 src += s->uvlinesize;
481 src2 += s->uvlinesize;
484 /* if we deal with intensity compensation we need to scale source blocks */
490 for (j = 0; j < 17 + s->mspel * 2; j++) {
491 int f = v->field_mode ? v->ref_field_type[dir] : ((j + src_y - s->mspel) & 1) ;
492 for (i = 0; i < 17 + s->mspel * 2; i++)
493 src[i] = luty[f][src[i]];
498 for (j = 0; j < 9; j++) {
499 int f = v->field_mode ? v->ref_field_type[dir] : ((j + uvsrc_y) & 1);
500 for (i = 0; i < 9; i++) {
501 src[i] = lutuv[f][src[i]];
502 src2[i] = lutuv[f][src2[i]];
504 src += s->uvlinesize;
505 src2 += s->uvlinesize;
508 srcY += s->mspel * (1 + s->linesize);
512 dxy = ((my & 3) << 2) | (mx & 3);
513 v->vc1dsp.put_vc1_mspel_pixels_tab[0][dxy](s->dest[0] , srcY , s->linesize, v->rnd);
514 } else { // hpel mc - always used for luma
515 dxy = (my & 2) | ((mx & 2) >> 1);
517 s->hdsp.put_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
519 s->hdsp.put_no_rnd_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
522 if (s->flags & CODEC_FLAG_GRAY) return;
523 /* Chroma MC always uses qpel bilinear */
524 uvmx = (uvmx & 3) << 1;
525 uvmy = (uvmy & 3) << 1;
527 h264chroma->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
528 h264chroma->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
530 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
531 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
535 static inline int median4(int a, int b, int c, int d)
538 if (c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2;
539 else return (FFMIN(b, c) + FFMAX(a, d)) / 2;
541 if (c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2;
542 else return (FFMIN(a, c) + FFMAX(b, d)) / 2;
546 /** Do motion compensation for 4-MV macroblock - luminance block
548 static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir, int avg)
550 MpegEncContext *s = &v->s;
552 int dxy, mx, my, src_x, src_y;
554 int fieldmv = (v->fcm == ILACE_FRAME) ? v->blk_mv_type[s->block_index[n]] : 0;
555 int v_edge_pos = s->v_edge_pos >> v->field_mode;
556 uint8_t (*luty)[256];
559 if ((!v->field_mode ||
560 (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
561 !v->s.last_picture.f->data[0])
564 mx = s->mv[dir][n][0];
565 my = s->mv[dir][n][1];
568 if (v->field_mode && (v->cur_field_type != v->ref_field_type[dir]) && v->second_field) {
569 srcY = s->current_picture.f->data[0];
571 use_ic = *v->curr_use_ic;
573 srcY = s->last_picture.f->data[0];
575 use_ic = v->last_use_ic;
578 srcY = s->next_picture.f->data[0];
580 use_ic = v->next_use_ic;
584 av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
589 if (v->cur_field_type != v->ref_field_type[dir])
590 my = my - 2 + 4 * v->cur_field_type;
593 if (s->pict_type == AV_PICTURE_TYPE_P && n == 3 && v->field_mode) {
594 int same_count = 0, opp_count = 0, k;
595 int chosen_mv[2][4][2], f;
597 for (k = 0; k < 4; k++) {
598 f = v->mv_f[0][s->block_index[k] + v->blocks_off];
599 chosen_mv[f][f ? opp_count : same_count][0] = s->mv[0][k][0];
600 chosen_mv[f][f ? opp_count : same_count][1] = s->mv[0][k][1];
604 f = opp_count > same_count;
605 switch (f ? opp_count : same_count) {
607 tx = median4(chosen_mv[f][0][0], chosen_mv[f][1][0],
608 chosen_mv[f][2][0], chosen_mv[f][3][0]);
609 ty = median4(chosen_mv[f][0][1], chosen_mv[f][1][1],
610 chosen_mv[f][2][1], chosen_mv[f][3][1]);
613 tx = mid_pred(chosen_mv[f][0][0], chosen_mv[f][1][0], chosen_mv[f][2][0]);
614 ty = mid_pred(chosen_mv[f][0][1], chosen_mv[f][1][1], chosen_mv[f][2][1]);
617 tx = (chosen_mv[f][0][0] + chosen_mv[f][1][0]) / 2;
618 ty = (chosen_mv[f][0][1] + chosen_mv[f][1][1]) / 2;
623 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
624 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
625 for (k = 0; k < 4; k++)
626 v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
629 if (v->fcm == ILACE_FRAME) { // not sure if needed for other types of picture
631 int width = s->avctx->coded_width;
632 int height = s->avctx->coded_height >> 1;
633 if (s->pict_type == AV_PICTURE_TYPE_P) {
634 s->current_picture.motion_val[1][s->block_index[n] + v->blocks_off][0] = mx;
635 s->current_picture.motion_val[1][s->block_index[n] + v->blocks_off][1] = my;
637 qx = (s->mb_x * 16) + (mx >> 2);
638 qy = (s->mb_y * 8) + (my >> 3);
643 mx -= 4 * (qx - width);
646 else if (qy > height + 1)
647 my -= 8 * (qy - height - 1);
650 if ((v->fcm == ILACE_FRAME) && fieldmv)
651 off = ((n > 1) ? s->linesize : 0) + (n & 1) * 8;
653 off = s->linesize * 4 * (n & 2) + (n & 1) * 8;
655 src_x = s->mb_x * 16 + (n & 1) * 8 + (mx >> 2);
657 src_y = s->mb_y * 16 + (n & 2) * 4 + (my >> 2);
659 src_y = s->mb_y * 16 + ((n > 1) ? 1 : 0) + (my >> 2);
661 if (v->profile != PROFILE_ADVANCED) {
662 src_x = av_clip(src_x, -16, s->mb_width * 16);
663 src_y = av_clip(src_y, -16, s->mb_height * 16);
665 src_x = av_clip(src_x, -17, s->avctx->coded_width);
666 if (v->fcm == ILACE_FRAME) {
668 src_y = av_clip(src_y, -17, s->avctx->coded_height + 1);
670 src_y = av_clip(src_y, -18, s->avctx->coded_height);
672 src_y = av_clip(src_y, -18, s->avctx->coded_height + 1);
676 srcY += src_y * s->linesize + src_x;
677 if (v->field_mode && v->ref_field_type[dir])
678 srcY += s->current_picture_ptr->f->linesize[0];
680 if (fieldmv && !(src_y & 1))
682 if (fieldmv && (src_y & 1) && src_y < 4)
684 if (v->rangeredfrm || use_ic
685 || s->h_edge_pos < 13 || v_edge_pos < 23
686 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx & 3) - 8 - s->mspel * 2
687 || (unsigned)(src_y - (s->mspel << fieldmv)) > v_edge_pos - (my & 3) - ((8 + s->mspel * 2) << fieldmv)) {
688 srcY -= s->mspel * (1 + (s->linesize << fieldmv));
689 /* check emulate edge stride and offset */
690 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcY,
691 s->linesize, s->linesize,
692 9 + s->mspel * 2, (9 + s->mspel * 2) << fieldmv,
693 src_x - s->mspel, src_y - (s->mspel << fieldmv),
694 s->h_edge_pos, v_edge_pos);
695 srcY = s->edge_emu_buffer;
696 /* if we deal with range reduction we need to scale source blocks */
697 if (v->rangeredfrm) {
702 for (j = 0; j < 9 + s->mspel * 2; j++) {
703 for (i = 0; i < 9 + s->mspel * 2; i++)
704 src[i] = ((src[i] - 128) >> 1) + 128;
705 src += s->linesize << fieldmv;
708 /* if we deal with intensity compensation we need to scale source blocks */
714 for (j = 0; j < 9 + s->mspel * 2; j++) {
715 int f = v->field_mode ? v->ref_field_type[dir] : (((j<<fieldmv)+src_y - (s->mspel << fieldmv)) & 1);
716 for (i = 0; i < 9 + s->mspel * 2; i++)
717 src[i] = luty[f][src[i]];
718 src += s->linesize << fieldmv;
721 srcY += s->mspel * (1 + (s->linesize << fieldmv));
725 dxy = ((my & 3) << 2) | (mx & 3);
727 v->vc1dsp.avg_vc1_mspel_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize << fieldmv, v->rnd);
729 v->vc1dsp.put_vc1_mspel_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize << fieldmv, v->rnd);
730 } else { // hpel mc - always used for luma
731 dxy = (my & 2) | ((mx & 2) >> 1);
733 s->hdsp.put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
735 s->hdsp.put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
739 static av_always_inline int get_chroma_mv(int *mvx, int *mvy, int *a, int flag, int *tx, int *ty)
742 static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
744 idx = ((a[3] != flag) << 3)
745 | ((a[2] != flag) << 2)
746 | ((a[1] != flag) << 1)
749 *tx = median4(mvx[0], mvx[1], mvx[2], mvx[3]);
750 *ty = median4(mvy[0], mvy[1], mvy[2], mvy[3]);
752 } else if (count[idx] == 1) {
755 *tx = mid_pred(mvx[1], mvx[2], mvx[3]);
756 *ty = mid_pred(mvy[1], mvy[2], mvy[3]);
759 *tx = mid_pred(mvx[0], mvx[2], mvx[3]);
760 *ty = mid_pred(mvy[0], mvy[2], mvy[3]);
763 *tx = mid_pred(mvx[0], mvx[1], mvx[3]);
764 *ty = mid_pred(mvy[0], mvy[1], mvy[3]);
767 *tx = mid_pred(mvx[0], mvx[1], mvx[2]);
768 *ty = mid_pred(mvy[0], mvy[1], mvy[2]);
771 } else if (count[idx] == 2) {
773 for (i = 0; i < 3; i++)
778 for (i = t1 + 1; i < 4; i++)
783 *tx = (mvx[t1] + mvx[t2]) / 2;
784 *ty = (mvy[t1] + mvy[t2]) / 2;
792 /** Do motion compensation for 4-MV macroblock - both chroma blocks
794 static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
796 MpegEncContext *s = &v->s;
797 H264ChromaContext *h264chroma = &v->h264chroma;
798 uint8_t *srcU, *srcV;
799 int uvmx, uvmy, uvsrc_x, uvsrc_y;
800 int k, tx = 0, ty = 0;
801 int mvx[4], mvy[4], intra[4], mv_f[4];
803 int chroma_ref_type = v->cur_field_type;
804 int v_edge_pos = s->v_edge_pos >> v->field_mode;
805 uint8_t (*lutuv)[256];
808 if (!v->field_mode && !v->s.last_picture.f->data[0])
810 if (s->flags & CODEC_FLAG_GRAY)
813 for (k = 0; k < 4; k++) {
814 mvx[k] = s->mv[dir][k][0];
815 mvy[k] = s->mv[dir][k][1];
816 intra[k] = v->mb_type[0][s->block_index[k]];
818 mv_f[k] = v->mv_f[dir][s->block_index[k] + v->blocks_off];
821 /* calculate chroma MV vector from four luma MVs */
822 if (!v->field_mode || (v->field_mode && !v->numref)) {
823 valid_count = get_chroma_mv(mvx, mvy, intra, 0, &tx, &ty);
824 chroma_ref_type = v->reffield;
826 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
827 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
828 v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
829 return; //no need to do MC for intra blocks
833 if (mv_f[0] + mv_f[1] + mv_f[2] + mv_f[3] > 2)
835 valid_count = get_chroma_mv(mvx, mvy, mv_f, dominant, &tx, &ty);
837 chroma_ref_type = !v->cur_field_type;
839 if (v->field_mode && chroma_ref_type == 1 && v->cur_field_type == 1 && !v->s.last_picture.f->data[0])
841 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
842 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
843 uvmx = (tx + ((tx & 3) == 3)) >> 1;
844 uvmy = (ty + ((ty & 3) == 3)) >> 1;
846 v->luma_mv[s->mb_x][0] = uvmx;
847 v->luma_mv[s->mb_x][1] = uvmy;
850 uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
851 uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
853 // Field conversion bias
854 if (v->cur_field_type != chroma_ref_type)
855 uvmy += 2 - 4 * chroma_ref_type;
857 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
858 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
860 if (v->profile != PROFILE_ADVANCED) {
861 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
862 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
864 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
865 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
869 if (v->field_mode && (v->cur_field_type != chroma_ref_type) && v->second_field) {
870 srcU = s->current_picture.f->data[1];
871 srcV = s->current_picture.f->data[2];
872 lutuv = v->curr_lutuv;
873 use_ic = *v->curr_use_ic;
875 srcU = s->last_picture.f->data[1];
876 srcV = s->last_picture.f->data[2];
877 lutuv = v->last_lutuv;
878 use_ic = v->last_use_ic;
881 srcU = s->next_picture.f->data[1];
882 srcV = s->next_picture.f->data[2];
883 lutuv = v->next_lutuv;
884 use_ic = v->next_use_ic;
888 av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
892 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
893 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
896 if (chroma_ref_type) {
897 srcU += s->current_picture_ptr->f->linesize[1];
898 srcV += s->current_picture_ptr->f->linesize[2];
902 if (v->rangeredfrm || use_ic
903 || s->h_edge_pos < 18 || v_edge_pos < 18
904 || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
905 || (unsigned)uvsrc_y > (v_edge_pos >> 1) - 9) {
906 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcU,
907 s->uvlinesize, s->uvlinesize,
908 8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
909 s->h_edge_pos >> 1, v_edge_pos >> 1);
910 s->vdsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV,
911 s->uvlinesize, s->uvlinesize,
912 8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
913 s->h_edge_pos >> 1, v_edge_pos >> 1);
914 srcU = s->edge_emu_buffer;
915 srcV = s->edge_emu_buffer + 16;
917 /* if we deal with range reduction we need to scale source blocks */
918 if (v->rangeredfrm) {
924 for (j = 0; j < 9; j++) {
925 for (i = 0; i < 9; i++) {
926 src[i] = ((src[i] - 128) >> 1) + 128;
927 src2[i] = ((src2[i] - 128) >> 1) + 128;
929 src += s->uvlinesize;
930 src2 += s->uvlinesize;
933 /* if we deal with intensity compensation we need to scale source blocks */
940 for (j = 0; j < 9; j++) {
941 int f = v->field_mode ? chroma_ref_type : ((j + uvsrc_y) & 1);
942 for (i = 0; i < 9; i++) {
943 src[i] = lutuv[f][src[i]];
944 src2[i] = lutuv[f][src2[i]];
946 src += s->uvlinesize;
947 src2 += s->uvlinesize;
952 /* Chroma MC always uses qpel bilinear */
953 uvmx = (uvmx & 3) << 1;
954 uvmy = (uvmy & 3) << 1;
956 h264chroma->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
957 h264chroma->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
959 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
960 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
964 /** Do motion compensation for 4-MV interlaced frame chroma macroblock (both U and V)
966 static void vc1_mc_4mv_chroma4(VC1Context *v, int dir, int dir2, int avg)
968 MpegEncContext *s = &v->s;
969 H264ChromaContext *h264chroma = &v->h264chroma;
970 uint8_t *srcU, *srcV;
971 int uvsrc_x, uvsrc_y;
972 int uvmx_field[4], uvmy_field[4];
974 int fieldmv = v->blk_mv_type[s->block_index[0]];
975 static const int s_rndtblfield[16] = { 0, 0, 1, 2, 4, 4, 5, 6, 2, 2, 3, 8, 6, 6, 7, 12 };
976 int v_dist = fieldmv ? 1 : 4; // vertical offset for lower sub-blocks
977 int v_edge_pos = s->v_edge_pos >> 1;
979 uint8_t (*lutuv)[256];
981 if (s->flags & CODEC_FLAG_GRAY)
984 for (i = 0; i < 4; i++) {
985 int d = i < 2 ? dir: dir2;
987 uvmx_field[i] = (tx + ((tx & 3) == 3)) >> 1;
990 uvmy_field[i] = (ty >> 4) * 8 + s_rndtblfield[ty & 0xF];
992 uvmy_field[i] = (ty + ((ty & 3) == 3)) >> 1;
995 for (i = 0; i < 4; i++) {
996 off = (i & 1) * 4 + ((i & 2) ? v_dist * s->uvlinesize : 0);
997 uvsrc_x = s->mb_x * 8 + (i & 1) * 4 + (uvmx_field[i] >> 2);
998 uvsrc_y = s->mb_y * 8 + ((i & 2) ? v_dist : 0) + (uvmy_field[i] >> 2);
999 // FIXME: implement proper pull-back (see vc1cropmv.c, vc1CROPMV_ChromaPullBack())
1000 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
1001 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
1002 if (i < 2 ? dir : dir2) {
1003 srcU = s->next_picture.f->data[1];
1004 srcV = s->next_picture.f->data[2];
1005 lutuv = v->next_lutuv;
1006 use_ic = v->next_use_ic;
1008 srcU = s->last_picture.f->data[1];
1009 srcV = s->last_picture.f->data[2];
1010 lutuv = v->last_lutuv;
1011 use_ic = v->last_use_ic;
1015 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
1016 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
1017 uvmx_field[i] = (uvmx_field[i] & 3) << 1;
1018 uvmy_field[i] = (uvmy_field[i] & 3) << 1;
1020 if (fieldmv && !(uvsrc_y & 1))
1021 v_edge_pos = (s->v_edge_pos >> 1) - 1;
1023 if (fieldmv && (uvsrc_y & 1) && uvsrc_y < 2)
1026 || s->h_edge_pos < 10 || v_edge_pos < (5 << fieldmv)
1027 || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 5
1028 || (unsigned)uvsrc_y > v_edge_pos - (5 << fieldmv)) {
1029 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcU,
1030 s->uvlinesize, s->uvlinesize,
1031 5, (5 << fieldmv), uvsrc_x, uvsrc_y,
1032 s->h_edge_pos >> 1, v_edge_pos);
1033 s->vdsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV,
1034 s->uvlinesize, s->uvlinesize,
1035 5, (5 << fieldmv), uvsrc_x, uvsrc_y,
1036 s->h_edge_pos >> 1, v_edge_pos);
1037 srcU = s->edge_emu_buffer;
1038 srcV = s->edge_emu_buffer + 16;
1040 /* if we deal with intensity compensation we need to scale source blocks */
1043 uint8_t *src, *src2;
1047 for (j = 0; j < 5; j++) {
1048 int f = (uvsrc_y + (j << fieldmv)) & 1;
1049 for (i = 0; i < 5; i++) {
1050 src[i] = lutuv[f][src[i]];
1051 src2[i] = lutuv[f][src2[i]];
1053 src += s->uvlinesize << fieldmv;
1054 src2 += s->uvlinesize << fieldmv;
1060 h264chroma->avg_h264_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1061 h264chroma->avg_h264_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1063 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1064 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1068 h264chroma->put_h264_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1069 h264chroma->put_h264_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1071 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1072 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1078 /***********************************************************************/
1080 * @name VC-1 Block-level functions
1081 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
1087 * @brief Get macroblock-level quantizer scale
1089 #define GET_MQUANT() \
1090 if (v->dquantfrm) { \
1092 if (v->dqprofile == DQPROFILE_ALL_MBS) { \
1093 if (v->dqbilevel) { \
1094 mquant = (get_bits1(gb)) ? v->altpq : v->pq; \
1096 mqdiff = get_bits(gb, 3); \
1098 mquant = v->pq + mqdiff; \
1100 mquant = get_bits(gb, 5); \
1103 if (v->dqprofile == DQPROFILE_SINGLE_EDGE) \
1104 edges = 1 << v->dqsbedge; \
1105 else if (v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
1106 edges = (3 << v->dqsbedge) % 15; \
1107 else if (v->dqprofile == DQPROFILE_FOUR_EDGES) \
1109 if ((edges&1) && !s->mb_x) \
1110 mquant = v->altpq; \
1111 if ((edges&2) && s->first_slice_line) \
1112 mquant = v->altpq; \
1113 if ((edges&4) && s->mb_x == (s->mb_width - 1)) \
1114 mquant = v->altpq; \
1115 if ((edges&8) && s->mb_y == (s->mb_height - 1)) \
1116 mquant = v->altpq; \
1117 if (!mquant || mquant > 31) { \
1118 av_log(v->s.avctx, AV_LOG_ERROR, \
1119 "Overriding invalid mquant %d\n", mquant); \
1125 * @def GET_MVDATA(_dmv_x, _dmv_y)
1126 * @brief Get MV differentials
1127 * @see MVDATA decoding from 8.3.5.2, p(1)20
1128 * @param _dmv_x Horizontal differential for decoded MV
1129 * @param _dmv_y Vertical differential for decoded MV
1131 #define GET_MVDATA(_dmv_x, _dmv_y) \
1132 index = 1 + get_vlc2(gb, ff_vc1_mv_diff_vlc[s->mv_table_index].table, \
1133 VC1_MV_DIFF_VLC_BITS, 2); \
1135 mb_has_coeffs = 1; \
1138 mb_has_coeffs = 0; \
1141 _dmv_x = _dmv_y = 0; \
1142 } else if (index == 35) { \
1143 _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
1144 _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
1145 } else if (index == 36) { \
1150 index1 = index % 6; \
1151 if (!s->quarter_sample && index1 == 5) val = 1; \
1153 if (size_table[index1] - val > 0) \
1154 val = get_bits(gb, size_table[index1] - val); \
1156 sign = 0 - (val&1); \
1157 _dmv_x = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1159 index1 = index / 6; \
1160 if (!s->quarter_sample && index1 == 5) val = 1; \
1162 if (size_table[index1] - val > 0) \
1163 val = get_bits(gb, size_table[index1] - val); \
1165 sign = 0 - (val & 1); \
1166 _dmv_y = (sign ^ ((val >> 1) + offset_table[index1])) - sign; \
1169 static av_always_inline void get_mvdata_interlaced(VC1Context *v, int *dmv_x,
1170 int *dmv_y, int *pred_flag)
1173 int extend_x = 0, extend_y = 0;
1174 GetBitContext *gb = &v->s.gb;
1177 const int* offs_tab;
1180 bits = VC1_2REF_MVDATA_VLC_BITS;
1183 bits = VC1_1REF_MVDATA_VLC_BITS;
1186 switch (v->dmvrange) {
1194 extend_x = extend_y = 1;
1197 index = get_vlc2(gb, v->imv_vlc->table, bits, 3);
1199 *dmv_x = get_bits(gb, v->k_x);
1200 *dmv_y = get_bits(gb, v->k_y);
1203 *pred_flag = *dmv_y & 1;
1204 *dmv_y = (*dmv_y + *pred_flag) >> 1;
1206 *dmv_y = (*dmv_y + (*dmv_y & 1)) >> 1;
1211 av_assert0(index < esc);
1213 offs_tab = offset_table2;
1215 offs_tab = offset_table1;
1216 index1 = (index + 1) % 9;
1218 val = get_bits(gb, index1 + extend_x);
1219 sign = 0 -(val & 1);
1220 *dmv_x = (sign ^ ((val >> 1) + offs_tab[index1])) - sign;
1224 offs_tab = offset_table2;
1226 offs_tab = offset_table1;
1227 index1 = (index + 1) / 9;
1228 if (index1 > v->numref) {
1229 val = get_bits(gb, (index1 + (extend_y << v->numref)) >> v->numref);
1230 sign = 0 - (val & 1);
1231 *dmv_y = (sign ^ ((val >> 1) + offs_tab[index1 >> v->numref])) - sign;
1234 if (v->numref && pred_flag)
1235 *pred_flag = index1 & 1;
1239 static av_always_inline int scaleforsame_x(VC1Context *v, int n /* MV */, int dir)
1241 int scaledvalue, refdist;
1242 int scalesame1, scalesame2;
1243 int scalezone1_x, zone1offset_x;
1244 int table_index = dir ^ v->second_field;
1246 if (v->s.pict_type != AV_PICTURE_TYPE_B)
1247 refdist = v->refdist;
1249 refdist = dir ? v->brfd : v->frfd;
1252 scalesame1 = ff_vc1_field_mvpred_scales[table_index][1][refdist];
1253 scalesame2 = ff_vc1_field_mvpred_scales[table_index][2][refdist];
1254 scalezone1_x = ff_vc1_field_mvpred_scales[table_index][3][refdist];
1255 zone1offset_x = ff_vc1_field_mvpred_scales[table_index][5][refdist];
1260 if (FFABS(n) < scalezone1_x)
1261 scaledvalue = (n * scalesame1) >> 8;
1264 scaledvalue = ((n * scalesame2) >> 8) - zone1offset_x;
1266 scaledvalue = ((n * scalesame2) >> 8) + zone1offset_x;
1269 return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1272 static av_always_inline int scaleforsame_y(VC1Context *v, int i, int n /* MV */, int dir)
1274 int scaledvalue, refdist;
1275 int scalesame1, scalesame2;
1276 int scalezone1_y, zone1offset_y;
1277 int table_index = dir ^ v->second_field;
1279 if (v->s.pict_type != AV_PICTURE_TYPE_B)
1280 refdist = v->refdist;
1282 refdist = dir ? v->brfd : v->frfd;
1285 scalesame1 = ff_vc1_field_mvpred_scales[table_index][1][refdist];
1286 scalesame2 = ff_vc1_field_mvpred_scales[table_index][2][refdist];
1287 scalezone1_y = ff_vc1_field_mvpred_scales[table_index][4][refdist];
1288 zone1offset_y = ff_vc1_field_mvpred_scales[table_index][6][refdist];
1293 if (FFABS(n) < scalezone1_y)
1294 scaledvalue = (n * scalesame1) >> 8;
1297 scaledvalue = ((n * scalesame2) >> 8) - zone1offset_y;
1299 scaledvalue = ((n * scalesame2) >> 8) + zone1offset_y;
1303 if (v->cur_field_type && !v->ref_field_type[dir])
1304 return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1306 return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1309 static av_always_inline int scaleforopp_x(VC1Context *v, int n /* MV */)
1311 int scalezone1_x, zone1offset_x;
1312 int scaleopp1, scaleopp2, brfd;
1315 brfd = FFMIN(v->brfd, 3);
1316 scalezone1_x = ff_vc1_b_field_mvpred_scales[3][brfd];
1317 zone1offset_x = ff_vc1_b_field_mvpred_scales[5][brfd];
1318 scaleopp1 = ff_vc1_b_field_mvpred_scales[1][brfd];
1319 scaleopp2 = ff_vc1_b_field_mvpred_scales[2][brfd];
1324 if (FFABS(n) < scalezone1_x)
1325 scaledvalue = (n * scaleopp1) >> 8;
1328 scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_x;
1330 scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_x;
1333 return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1336 static av_always_inline int scaleforopp_y(VC1Context *v, int n /* MV */, int dir)
1338 int scalezone1_y, zone1offset_y;
1339 int scaleopp1, scaleopp2, brfd;
1342 brfd = FFMIN(v->brfd, 3);
1343 scalezone1_y = ff_vc1_b_field_mvpred_scales[4][brfd];
1344 zone1offset_y = ff_vc1_b_field_mvpred_scales[6][brfd];
1345 scaleopp1 = ff_vc1_b_field_mvpred_scales[1][brfd];
1346 scaleopp2 = ff_vc1_b_field_mvpred_scales[2][brfd];
1351 if (FFABS(n) < scalezone1_y)
1352 scaledvalue = (n * scaleopp1) >> 8;
1355 scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_y;
1357 scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_y;
1360 if (v->cur_field_type && !v->ref_field_type[dir]) {
1361 return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1363 return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1367 static av_always_inline int scaleforsame(VC1Context *v, int i, int n /* MV */,
1370 int brfd, scalesame;
1371 int hpel = 1 - v->s.quarter_sample;
1374 if (v->s.pict_type != AV_PICTURE_TYPE_B || v->second_field || !dir) {
1376 n = scaleforsame_y(v, i, n, dir) << hpel;
1378 n = scaleforsame_x(v, n, dir) << hpel;
1381 brfd = FFMIN(v->brfd, 3);
1382 scalesame = ff_vc1_b_field_mvpred_scales[0][brfd];
1384 n = (n * scalesame >> 8) << hpel;
1388 static av_always_inline int scaleforopp(VC1Context *v, int n /* MV */,
1391 int refdist, scaleopp;
1392 int hpel = 1 - v->s.quarter_sample;
1395 if (v->s.pict_type == AV_PICTURE_TYPE_B && !v->second_field && dir == 1) {
1397 n = scaleforopp_y(v, n, dir) << hpel;
1399 n = scaleforopp_x(v, n) << hpel;
1402 if (v->s.pict_type != AV_PICTURE_TYPE_B)
1403 refdist = FFMIN(v->refdist, 3);
1405 refdist = dir ? v->brfd : v->frfd;
1406 scaleopp = ff_vc1_field_mvpred_scales[dir ^ v->second_field][0][refdist];
1408 n = (n * scaleopp >> 8) << hpel;
1412 /** Predict and set motion vector
1414 static inline void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
1415 int mv1, int r_x, int r_y, uint8_t* is_intra,
1416 int pred_flag, int dir)
1418 MpegEncContext *s = &v->s;
1419 int xy, wrap, off = 0;
1423 int mixedmv_pic, num_samefield = 0, num_oppfield = 0;
1424 int opposite, a_f, b_f, c_f;
1425 int16_t field_predA[2];
1426 int16_t field_predB[2];
1427 int16_t field_predC[2];
1428 int a_valid, b_valid, c_valid;
1429 int hybridmv_thresh, y_bias = 0;
1431 if (v->mv_mode == MV_PMODE_MIXED_MV ||
1432 ((v->mv_mode == MV_PMODE_INTENSITY_COMP) && (v->mv_mode2 == MV_PMODE_MIXED_MV)))
1436 /* scale MV difference to be quad-pel */
1437 dmv_x <<= 1 - s->quarter_sample;
1438 dmv_y <<= 1 - s->quarter_sample;
1440 wrap = s->b8_stride;
1441 xy = s->block_index[n];
1444 s->mv[0][n][0] = s->current_picture.motion_val[0][xy + v->blocks_off][0] = 0;
1445 s->mv[0][n][1] = s->current_picture.motion_val[0][xy + v->blocks_off][1] = 0;
1446 s->current_picture.motion_val[1][xy + v->blocks_off][0] = 0;
1447 s->current_picture.motion_val[1][xy + v->blocks_off][1] = 0;
1448 if (mv1) { /* duplicate motion data for 1-MV block */
1449 s->current_picture.motion_val[0][xy + 1 + v->blocks_off][0] = 0;
1450 s->current_picture.motion_val[0][xy + 1 + v->blocks_off][1] = 0;
1451 s->current_picture.motion_val[0][xy + wrap + v->blocks_off][0] = 0;
1452 s->current_picture.motion_val[0][xy + wrap + v->blocks_off][1] = 0;
1453 s->current_picture.motion_val[0][xy + wrap + 1 + v->blocks_off][0] = 0;
1454 s->current_picture.motion_val[0][xy + wrap + 1 + v->blocks_off][1] = 0;
1455 v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1456 s->current_picture.motion_val[1][xy + 1 + v->blocks_off][0] = 0;
1457 s->current_picture.motion_val[1][xy + 1 + v->blocks_off][1] = 0;
1458 s->current_picture.motion_val[1][xy + wrap][0] = 0;
1459 s->current_picture.motion_val[1][xy + wrap + v->blocks_off][1] = 0;
1460 s->current_picture.motion_val[1][xy + wrap + 1 + v->blocks_off][0] = 0;
1461 s->current_picture.motion_val[1][xy + wrap + 1 + v->blocks_off][1] = 0;
1466 C = s->current_picture.motion_val[dir][xy - 1 + v->blocks_off];
1467 A = s->current_picture.motion_val[dir][xy - wrap + v->blocks_off];
1469 if (v->field_mode && mixedmv_pic)
1470 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
1472 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2;
1474 //in 4-MV mode different blocks have different B predictor position
1477 off = (s->mb_x > 0) ? -1 : 1;
1480 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 1;
1489 B = s->current_picture.motion_val[dir][xy - wrap + off + v->blocks_off];
1491 a_valid = !s->first_slice_line || (n == 2 || n == 3);
1492 b_valid = a_valid && (s->mb_width > 1);
1493 c_valid = s->mb_x || (n == 1 || n == 3);
1494 if (v->field_mode) {
1495 a_valid = a_valid && !is_intra[xy - wrap];
1496 b_valid = b_valid && !is_intra[xy - wrap + off];
1497 c_valid = c_valid && !is_intra[xy - 1];
1501 a_f = v->mv_f[dir][xy - wrap + v->blocks_off];
1502 num_oppfield += a_f;
1503 num_samefield += 1 - a_f;
1504 field_predA[0] = A[0];
1505 field_predA[1] = A[1];
1507 field_predA[0] = field_predA[1] = 0;
1511 b_f = v->mv_f[dir][xy - wrap + off + v->blocks_off];
1512 num_oppfield += b_f;
1513 num_samefield += 1 - b_f;
1514 field_predB[0] = B[0];
1515 field_predB[1] = B[1];
1517 field_predB[0] = field_predB[1] = 0;
1521 c_f = v->mv_f[dir][xy - 1 + v->blocks_off];
1522 num_oppfield += c_f;
1523 num_samefield += 1 - c_f;
1524 field_predC[0] = C[0];
1525 field_predC[1] = C[1];
1527 field_predC[0] = field_predC[1] = 0;
1531 if (v->field_mode) {
1533 // REFFIELD determines if the last field or the second-last field is
1534 // to be used as reference
1535 opposite = 1 - v->reffield;
1537 if (num_samefield <= num_oppfield)
1538 opposite = 1 - pred_flag;
1540 opposite = pred_flag;
1545 if (a_valid && !a_f) {
1546 field_predA[0] = scaleforopp(v, field_predA[0], 0, dir);
1547 field_predA[1] = scaleforopp(v, field_predA[1], 1, dir);
1549 if (b_valid && !b_f) {
1550 field_predB[0] = scaleforopp(v, field_predB[0], 0, dir);
1551 field_predB[1] = scaleforopp(v, field_predB[1], 1, dir);
1553 if (c_valid && !c_f) {
1554 field_predC[0] = scaleforopp(v, field_predC[0], 0, dir);
1555 field_predC[1] = scaleforopp(v, field_predC[1], 1, dir);
1557 v->mv_f[dir][xy + v->blocks_off] = 1;
1558 v->ref_field_type[dir] = !v->cur_field_type;
1560 if (a_valid && a_f) {
1561 field_predA[0] = scaleforsame(v, n, field_predA[0], 0, dir);
1562 field_predA[1] = scaleforsame(v, n, field_predA[1], 1, dir);
1564 if (b_valid && b_f) {
1565 field_predB[0] = scaleforsame(v, n, field_predB[0], 0, dir);
1566 field_predB[1] = scaleforsame(v, n, field_predB[1], 1, dir);
1568 if (c_valid && c_f) {
1569 field_predC[0] = scaleforsame(v, n, field_predC[0], 0, dir);
1570 field_predC[1] = scaleforsame(v, n, field_predC[1], 1, dir);
1572 v->mv_f[dir][xy + v->blocks_off] = 0;
1573 v->ref_field_type[dir] = v->cur_field_type;
1577 px = field_predA[0];
1578 py = field_predA[1];
1579 } else if (c_valid) {
1580 px = field_predC[0];
1581 py = field_predC[1];
1582 } else if (b_valid) {
1583 px = field_predB[0];
1584 py = field_predB[1];
1590 if (num_samefield + num_oppfield > 1) {
1591 px = mid_pred(field_predA[0], field_predB[0], field_predC[0]);
1592 py = mid_pred(field_predA[1], field_predB[1], field_predC[1]);
1595 /* Pullback MV as specified in 8.3.5.3.4 */
1596 if (!v->field_mode) {
1598 qx = (s->mb_x << 6) + ((n == 1 || n == 3) ? 32 : 0);
1599 qy = (s->mb_y << 6) + ((n == 2 || n == 3) ? 32 : 0);
1600 X = (s->mb_width << 6) - 4;
1601 Y = (s->mb_height << 6) - 4;
1603 if (qx + px < -60) px = -60 - qx;
1604 if (qy + py < -60) py = -60 - qy;
1606 if (qx + px < -28) px = -28 - qx;
1607 if (qy + py < -28) py = -28 - qy;
1609 if (qx + px > X) px = X - qx;
1610 if (qy + py > Y) py = Y - qy;
1613 if (!v->field_mode || s->pict_type != AV_PICTURE_TYPE_B) {
1614 /* Calculate hybrid prediction as specified in 8.3.5.3.5 (also 10.3.5.4.3.5) */
1615 hybridmv_thresh = 32;
1616 if (a_valid && c_valid) {
1617 if (is_intra[xy - wrap])
1618 sum = FFABS(px) + FFABS(py);
1620 sum = FFABS(px - field_predA[0]) + FFABS(py - field_predA[1]);
1621 if (sum > hybridmv_thresh) {
1622 if (get_bits1(&s->gb)) { // read HYBRIDPRED bit
1623 px = field_predA[0];
1624 py = field_predA[1];
1626 px = field_predC[0];
1627 py = field_predC[1];
1630 if (is_intra[xy - 1])
1631 sum = FFABS(px) + FFABS(py);
1633 sum = FFABS(px - field_predC[0]) + FFABS(py - field_predC[1]);
1634 if (sum > hybridmv_thresh) {
1635 if (get_bits1(&s->gb)) {
1636 px = field_predA[0];
1637 py = field_predA[1];
1639 px = field_predC[0];
1640 py = field_predC[1];
1647 if (v->field_mode && v->numref)
1649 if (v->field_mode && v->cur_field_type && v->ref_field_type[dir] == 0)
1651 /* store MV using signed modulus of MV range defined in 4.11 */
1652 s->mv[dir][n][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1653 s->mv[dir][n][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1] = ((py + dmv_y + r_y - y_bias) & ((r_y << 1) - 1)) - r_y + y_bias;
1654 if (mv1) { /* duplicate motion data for 1-MV block */
1655 s->current_picture.motion_val[dir][xy + 1 + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
1656 s->current_picture.motion_val[dir][xy + 1 + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
1657 s->current_picture.motion_val[dir][xy + wrap + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
1658 s->current_picture.motion_val[dir][xy + wrap + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
1659 s->current_picture.motion_val[dir][xy + wrap + 1 + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
1660 s->current_picture.motion_val[dir][xy + wrap + 1 + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
1661 v->mv_f[dir][xy + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1662 v->mv_f[dir][xy + wrap + v->blocks_off] = v->mv_f[dir][xy + wrap + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1666 /** Predict and set motion vector for interlaced frame picture MBs
1668 static inline void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
1669 int mvn, int r_x, int r_y, uint8_t* is_intra, int dir)
1671 MpegEncContext *s = &v->s;
1672 int xy, wrap, off = 0;
1673 int A[2], B[2], C[2];
1675 int a_valid = 0, b_valid = 0, c_valid = 0;
1676 int field_a, field_b, field_c; // 0: same, 1: opposit
1677 int total_valid, num_samefield, num_oppfield;
1678 int pos_c, pos_b, n_adj;
1680 wrap = s->b8_stride;
1681 xy = s->block_index[n];
1684 s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = 0;
1685 s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = 0;
1686 s->current_picture.motion_val[1][xy][0] = 0;
1687 s->current_picture.motion_val[1][xy][1] = 0;
1688 if (mvn == 1) { /* duplicate motion data for 1-MV block */
1689 s->current_picture.motion_val[0][xy + 1][0] = 0;
1690 s->current_picture.motion_val[0][xy + 1][1] = 0;
1691 s->current_picture.motion_val[0][xy + wrap][0] = 0;
1692 s->current_picture.motion_val[0][xy + wrap][1] = 0;
1693 s->current_picture.motion_val[0][xy + wrap + 1][0] = 0;
1694 s->current_picture.motion_val[0][xy + wrap + 1][1] = 0;
1695 v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1696 s->current_picture.motion_val[1][xy + 1][0] = 0;
1697 s->current_picture.motion_val[1][xy + 1][1] = 0;
1698 s->current_picture.motion_val[1][xy + wrap][0] = 0;
1699 s->current_picture.motion_val[1][xy + wrap][1] = 0;
1700 s->current_picture.motion_val[1][xy + wrap + 1][0] = 0;
1701 s->current_picture.motion_val[1][xy + wrap + 1][1] = 0;
1706 off = ((n == 0) || (n == 1)) ? 1 : -1;
1708 if (s->mb_x || (n == 1) || (n == 3)) {
1709 if ((v->blk_mv_type[xy]) // current block (MB) has a field MV
1710 || (!v->blk_mv_type[xy] && !v->blk_mv_type[xy - 1])) { // or both have frame MV
1711 A[0] = s->current_picture.motion_val[dir][xy - 1][0];
1712 A[1] = s->current_picture.motion_val[dir][xy - 1][1];
1714 } else { // current block has frame mv and cand. has field MV (so average)
1715 A[0] = (s->current_picture.motion_val[dir][xy - 1][0]
1716 + s->current_picture.motion_val[dir][xy - 1 + off * wrap][0] + 1) >> 1;
1717 A[1] = (s->current_picture.motion_val[dir][xy - 1][1]
1718 + s->current_picture.motion_val[dir][xy - 1 + off * wrap][1] + 1) >> 1;
1721 if (!(n & 1) && v->is_intra[s->mb_x - 1]) {
1727 /* Predict B and C */
1728 B[0] = B[1] = C[0] = C[1] = 0;
1729 if (n == 0 || n == 1 || v->blk_mv_type[xy]) {
1730 if (!s->first_slice_line) {
1731 if (!v->is_intra[s->mb_x - s->mb_stride]) {
1734 pos_b = s->block_index[n_adj] - 2 * wrap;
1735 if (v->blk_mv_type[pos_b] && v->blk_mv_type[xy]) {
1736 n_adj = (n & 2) | (n & 1);
1738 B[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap][0];
1739 B[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap][1];
1740 if (v->blk_mv_type[pos_b] && !v->blk_mv_type[xy]) {
1741 B[0] = (B[0] + s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap][0] + 1) >> 1;
1742 B[1] = (B[1] + s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap][1] + 1) >> 1;
1745 if (s->mb_width > 1) {
1746 if (!v->is_intra[s->mb_x - s->mb_stride + 1]) {
1749 pos_c = s->block_index[2] - 2 * wrap + 2;
1750 if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1753 C[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap + 2][0];
1754 C[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap + 2][1];
1755 if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1756 C[0] = (1 + C[0] + (s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap + 2][0])) >> 1;
1757 C[1] = (1 + C[1] + (s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap + 2][1])) >> 1;
1759 if (s->mb_x == s->mb_width - 1) {
1760 if (!v->is_intra[s->mb_x - s->mb_stride - 1]) {
1763 pos_c = s->block_index[3] - 2 * wrap - 2;
1764 if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1767 C[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap - 2][0];
1768 C[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap - 2][1];
1769 if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1770 C[0] = (1 + C[0] + s->current_picture.motion_val[dir][s->block_index[1] - 2 * wrap - 2][0]) >> 1;
1771 C[1] = (1 + C[1] + s->current_picture.motion_val[dir][s->block_index[1] - 2 * wrap - 2][1]) >> 1;
1780 pos_b = s->block_index[1];
1782 B[0] = s->current_picture.motion_val[dir][pos_b][0];
1783 B[1] = s->current_picture.motion_val[dir][pos_b][1];
1784 pos_c = s->block_index[0];
1786 C[0] = s->current_picture.motion_val[dir][pos_c][0];
1787 C[1] = s->current_picture.motion_val[dir][pos_c][1];
1790 total_valid = a_valid + b_valid + c_valid;
1791 // check if predictor A is out of bounds
1792 if (!s->mb_x && !(n == 1 || n == 3)) {
1795 // check if predictor B is out of bounds
1796 if ((s->first_slice_line && v->blk_mv_type[xy]) || (s->first_slice_line && !(n & 2))) {
1797 B[0] = B[1] = C[0] = C[1] = 0;
1799 if (!v->blk_mv_type[xy]) {
1800 if (s->mb_width == 1) {
1804 if (total_valid >= 2) {
1805 px = mid_pred(A[0], B[0], C[0]);
1806 py = mid_pred(A[1], B[1], C[1]);
1807 } else if (total_valid) {
1808 if (a_valid) { px = A[0]; py = A[1]; }
1809 else if (b_valid) { px = B[0]; py = B[1]; }
1810 else { px = C[0]; py = C[1]; }
1815 field_a = (A[1] & 4) ? 1 : 0;
1819 field_b = (B[1] & 4) ? 1 : 0;
1823 field_c = (C[1] & 4) ? 1 : 0;
1827 num_oppfield = field_a + field_b + field_c;
1828 num_samefield = total_valid - num_oppfield;
1829 if (total_valid == 3) {
1830 if ((num_samefield == 3) || (num_oppfield == 3)) {
1831 px = mid_pred(A[0], B[0], C[0]);
1832 py = mid_pred(A[1], B[1], C[1]);
1833 } else if (num_samefield >= num_oppfield) {
1834 /* take one MV from same field set depending on priority
1835 the check for B may not be necessary */
1836 px = !field_a ? A[0] : B[0];
1837 py = !field_a ? A[1] : B[1];
1839 px = field_a ? A[0] : B[0];
1840 py = field_a ? A[1] : B[1];
1842 } else if (total_valid == 2) {
1843 if (num_samefield >= num_oppfield) {
1844 if (!field_a && a_valid) {
1847 } else if (!field_b && b_valid) {
1850 } else /*if (c_valid)*/ {
1851 av_assert1(c_valid);
1854 } /*else px = py = 0;*/
1856 if (field_a && a_valid) {
1859 } else /*if (field_b && b_valid)*/ {
1860 av_assert1(field_b && b_valid);
1863 } /*else if (c_valid) {
1868 } else if (total_valid == 1) {
1869 px = (a_valid) ? A[0] : ((b_valid) ? B[0] : C[0]);
1870 py = (a_valid) ? A[1] : ((b_valid) ? B[1] : C[1]);
1874 /* store MV using signed modulus of MV range defined in 4.11 */
1875 s->mv[dir][n][0] = s->current_picture.motion_val[dir][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1876 s->mv[dir][n][1] = s->current_picture.motion_val[dir][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
1877 if (mvn == 1) { /* duplicate motion data for 1-MV block */
1878 s->current_picture.motion_val[dir][xy + 1 ][0] = s->current_picture.motion_val[dir][xy][0];
1879 s->current_picture.motion_val[dir][xy + 1 ][1] = s->current_picture.motion_val[dir][xy][1];
1880 s->current_picture.motion_val[dir][xy + wrap ][0] = s->current_picture.motion_val[dir][xy][0];
1881 s->current_picture.motion_val[dir][xy + wrap ][1] = s->current_picture.motion_val[dir][xy][1];
1882 s->current_picture.motion_val[dir][xy + wrap + 1][0] = s->current_picture.motion_val[dir][xy][0];
1883 s->current_picture.motion_val[dir][xy + wrap + 1][1] = s->current_picture.motion_val[dir][xy][1];
1884 } else if (mvn == 2) { /* duplicate motion data for 2-Field MV block */
1885 s->current_picture.motion_val[dir][xy + 1][0] = s->current_picture.motion_val[dir][xy][0];
1886 s->current_picture.motion_val[dir][xy + 1][1] = s->current_picture.motion_val[dir][xy][1];
1887 s->mv[dir][n + 1][0] = s->mv[dir][n][0];
1888 s->mv[dir][n + 1][1] = s->mv[dir][n][1];
1892 /** Motion compensation for direct or interpolated blocks in B-frames
1894 static void vc1_interp_mc(VC1Context *v)
1896 MpegEncContext *s = &v->s;
1897 H264ChromaContext *h264chroma = &v->h264chroma;
1898 uint8_t *srcY, *srcU, *srcV;
1899 int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
1901 int v_edge_pos = s->v_edge_pos >> v->field_mode;
1902 int use_ic = v->next_use_ic;
1904 if (!v->field_mode && !v->s.next_picture.f->data[0])
1907 mx = s->mv[1][0][0];
1908 my = s->mv[1][0][1];
1909 uvmx = (mx + ((mx & 3) == 3)) >> 1;
1910 uvmy = (my + ((my & 3) == 3)) >> 1;
1911 if (v->field_mode) {
1912 if (v->cur_field_type != v->ref_field_type[1]) {
1913 my = my - 2 + 4 * v->cur_field_type;
1914 uvmy = uvmy - 2 + 4 * v->cur_field_type;
1918 uvmx = uvmx + ((uvmx < 0) ? -(uvmx & 1) : (uvmx & 1));
1919 uvmy = uvmy + ((uvmy < 0) ? -(uvmy & 1) : (uvmy & 1));
1921 srcY = s->next_picture.f->data[0];
1922 srcU = s->next_picture.f->data[1];
1923 srcV = s->next_picture.f->data[2];
1925 src_x = s->mb_x * 16 + (mx >> 2);
1926 src_y = s->mb_y * 16 + (my >> 2);
1927 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
1928 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
1930 if (v->profile != PROFILE_ADVANCED) {
1931 src_x = av_clip( src_x, -16, s->mb_width * 16);
1932 src_y = av_clip( src_y, -16, s->mb_height * 16);
1933 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
1934 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
1936 src_x = av_clip( src_x, -17, s->avctx->coded_width);
1937 src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
1938 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
1939 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
1942 srcY += src_y * s->linesize + src_x;
1943 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
1944 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
1946 if (v->field_mode && v->ref_field_type[1]) {
1947 srcY += s->current_picture_ptr->f->linesize[0];
1948 srcU += s->current_picture_ptr->f->linesize[1];
1949 srcV += s->current_picture_ptr->f->linesize[2];
1952 /* for grayscale we should not try to read from unknown area */
1953 if (s->flags & CODEC_FLAG_GRAY) {
1954 srcU = s->edge_emu_buffer + 18 * s->linesize;
1955 srcV = s->edge_emu_buffer + 18 * s->linesize;
1958 if (v->rangeredfrm || s->h_edge_pos < 22 || v_edge_pos < 22 || use_ic
1959 || (unsigned)(src_x - 1) > s->h_edge_pos - (mx & 3) - 16 - 3
1960 || (unsigned)(src_y - 1) > v_edge_pos - (my & 3) - 16 - 3) {
1961 uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
1963 srcY -= s->mspel * (1 + s->linesize);
1964 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcY,
1965 s->linesize, s->linesize,
1966 17 + s->mspel * 2, 17 + s->mspel * 2,
1967 src_x - s->mspel, src_y - s->mspel,
1968 s->h_edge_pos, v_edge_pos);
1969 srcY = s->edge_emu_buffer;
1970 s->vdsp.emulated_edge_mc(uvbuf, srcU,
1971 s->uvlinesize, s->uvlinesize,
1974 s->h_edge_pos >> 1, v_edge_pos >> 1);
1975 s->vdsp.emulated_edge_mc(uvbuf + 16, srcV,
1976 s->uvlinesize, s->uvlinesize,
1979 s->h_edge_pos >> 1, v_edge_pos >> 1);
1982 /* if we deal with range reduction we need to scale source blocks */
1983 if (v->rangeredfrm) {
1985 uint8_t *src, *src2;
1988 for (j = 0; j < 17 + s->mspel * 2; j++) {
1989 for (i = 0; i < 17 + s->mspel * 2; i++)
1990 src[i] = ((src[i] - 128) >> 1) + 128;
1995 for (j = 0; j < 9; j++) {
1996 for (i = 0; i < 9; i++) {
1997 src[i] = ((src[i] - 128) >> 1) + 128;
1998 src2[i] = ((src2[i] - 128) >> 1) + 128;
2000 src += s->uvlinesize;
2001 src2 += s->uvlinesize;
2006 uint8_t (*luty )[256] = v->next_luty;
2007 uint8_t (*lutuv)[256] = v->next_lutuv;
2009 uint8_t *src, *src2;
2012 for (j = 0; j < 17 + s->mspel * 2; j++) {
2013 int f = v->field_mode ? v->ref_field_type[1] : ((j+src_y - s->mspel) & 1);
2014 for (i = 0; i < 17 + s->mspel * 2; i++)
2015 src[i] = luty[f][src[i]];
2020 for (j = 0; j < 9; j++) {
2021 int f = v->field_mode ? v->ref_field_type[1] : ((j+uvsrc_y) & 1);
2022 for (i = 0; i < 9; i++) {
2023 src[i] = lutuv[f][src[i]];
2024 src2[i] = lutuv[f][src2[i]];
2026 src += s->uvlinesize;
2027 src2 += s->uvlinesize;
2030 srcY += s->mspel * (1 + s->linesize);
2037 dxy = ((my & 3) << 2) | (mx & 3);
2038 v->vc1dsp.avg_vc1_mspel_pixels_tab[0][dxy](s->dest[0] + off , srcY , s->linesize, v->rnd);
2040 dxy = (my & 2) | ((mx & 2) >> 1);
2043 s->hdsp.avg_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
2045 s->hdsp.avg_no_rnd_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize, 16);
2048 if (s->flags & CODEC_FLAG_GRAY) return;
2049 /* Chroma MC always uses qpel blilinear */
2050 uvmx = (uvmx & 3) << 1;
2051 uvmy = (uvmy & 3) << 1;
2053 h264chroma->avg_h264_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
2054 h264chroma->avg_h264_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
2056 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
2057 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
2061 static av_always_inline int scale_mv(int value, int bfrac, int inv, int qs)
2065 #if B_FRACTION_DEN==256
2069 return 2 * ((value * n + 255) >> 9);
2070 return (value * n + 128) >> 8;
2073 n -= B_FRACTION_DEN;
2075 return 2 * ((value * n + B_FRACTION_DEN - 1) / (2 * B_FRACTION_DEN));
2076 return (value * n + B_FRACTION_DEN/2) / B_FRACTION_DEN;
2080 /** Reconstruct motion vector for B-frame and do motion compensation
2082 static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2],
2083 int direct, int mode)
2090 if (mode == BMV_TYPE_INTERPOLATED) {
2096 vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD));
2099 static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2],
2100 int direct, int mvtype)
2102 MpegEncContext *s = &v->s;
2103 int xy, wrap, off = 0;
2108 const uint8_t *is_intra = v->mb_type[0];
2110 av_assert0(!v->field_mode);
2114 /* scale MV difference to be quad-pel */
2115 dmv_x[0] <<= 1 - s->quarter_sample;
2116 dmv_y[0] <<= 1 - s->quarter_sample;
2117 dmv_x[1] <<= 1 - s->quarter_sample;
2118 dmv_y[1] <<= 1 - s->quarter_sample;
2120 wrap = s->b8_stride;
2121 xy = s->block_index[0];
2124 s->current_picture.motion_val[0][xy][0] =
2125 s->current_picture.motion_val[0][xy][1] =
2126 s->current_picture.motion_val[1][xy][0] =
2127 s->current_picture.motion_val[1][xy][1] = 0;
2130 if (direct && s->next_picture_ptr->field_picture)
2131 av_log(s->avctx, AV_LOG_WARNING, "Mixed frame/field direct mode not supported\n");
2133 s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
2134 s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
2135 s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
2136 s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
2138 /* Pullback predicted motion vectors as specified in 8.4.5.4 */
2139 s->mv[0][0][0] = av_clip(s->mv[0][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2140 s->mv[0][0][1] = av_clip(s->mv[0][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2141 s->mv[1][0][0] = av_clip(s->mv[1][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2142 s->mv[1][0][1] = av_clip(s->mv[1][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2144 s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
2145 s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
2146 s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
2147 s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
2151 if ((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2152 C = s->current_picture.motion_val[0][xy - 2];
2153 A = s->current_picture.motion_val[0][xy - wrap * 2];
2154 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2155 B = s->current_picture.motion_val[0][xy - wrap * 2 + off];
2157 if (!s->mb_x) C[0] = C[1] = 0;
2158 if (!s->first_slice_line) { // predictor A is not out of bounds
2159 if (s->mb_width == 1) {
2163 px = mid_pred(A[0], B[0], C[0]);
2164 py = mid_pred(A[1], B[1], C[1]);
2166 } else if (s->mb_x) { // predictor C is not out of bounds
2172 /* Pullback MV as specified in 8.3.5.3.4 */
2175 if (v->profile < PROFILE_ADVANCED) {
2176 qx = (s->mb_x << 5);
2177 qy = (s->mb_y << 5);
2178 X = (s->mb_width << 5) - 4;
2179 Y = (s->mb_height << 5) - 4;
2180 if (qx + px < -28) px = -28 - qx;
2181 if (qy + py < -28) py = -28 - qy;
2182 if (qx + px > X) px = X - qx;
2183 if (qy + py > Y) py = Y - qy;
2185 qx = (s->mb_x << 6);
2186 qy = (s->mb_y << 6);
2187 X = (s->mb_width << 6) - 4;
2188 Y = (s->mb_height << 6) - 4;
2189 if (qx + px < -60) px = -60 - qx;
2190 if (qy + py < -60) py = -60 - qy;
2191 if (qx + px > X) px = X - qx;
2192 if (qy + py > Y) py = Y - qy;
2195 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2196 if (0 && !s->first_slice_line && s->mb_x) {
2197 if (is_intra[xy - wrap])
2198 sum = FFABS(px) + FFABS(py);
2200 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2202 if (get_bits1(&s->gb)) {
2210 if (is_intra[xy - 2])
2211 sum = FFABS(px) + FFABS(py);
2213 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2215 if (get_bits1(&s->gb)) {
2225 /* store MV using signed modulus of MV range defined in 4.11 */
2226 s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x;
2227 s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
2229 if ((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2230 C = s->current_picture.motion_val[1][xy - 2];
2231 A = s->current_picture.motion_val[1][xy - wrap * 2];
2232 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2233 B = s->current_picture.motion_val[1][xy - wrap * 2 + off];
2237 if (!s->first_slice_line) { // predictor A is not out of bounds
2238 if (s->mb_width == 1) {
2242 px = mid_pred(A[0], B[0], C[0]);
2243 py = mid_pred(A[1], B[1], C[1]);
2245 } else if (s->mb_x) { // predictor C is not out of bounds
2251 /* Pullback MV as specified in 8.3.5.3.4 */
2254 if (v->profile < PROFILE_ADVANCED) {
2255 qx = (s->mb_x << 5);
2256 qy = (s->mb_y << 5);
2257 X = (s->mb_width << 5) - 4;
2258 Y = (s->mb_height << 5) - 4;
2259 if (qx + px < -28) px = -28 - qx;
2260 if (qy + py < -28) py = -28 - qy;
2261 if (qx + px > X) px = X - qx;
2262 if (qy + py > Y) py = Y - qy;
2264 qx = (s->mb_x << 6);
2265 qy = (s->mb_y << 6);
2266 X = (s->mb_width << 6) - 4;
2267 Y = (s->mb_height << 6) - 4;
2268 if (qx + px < -60) px = -60 - qx;
2269 if (qy + py < -60) py = -60 - qy;
2270 if (qx + px > X) px = X - qx;
2271 if (qy + py > Y) py = Y - qy;
2274 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2275 if (0 && !s->first_slice_line && s->mb_x) {
2276 if (is_intra[xy - wrap])
2277 sum = FFABS(px) + FFABS(py);
2279 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2281 if (get_bits1(&s->gb)) {
2289 if (is_intra[xy - 2])
2290 sum = FFABS(px) + FFABS(py);
2292 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2294 if (get_bits1(&s->gb)) {
2304 /* store MV using signed modulus of MV range defined in 4.11 */
2306 s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
2307 s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
2309 s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
2310 s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
2311 s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
2312 s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
2315 static inline void vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dmv_y, int mv1, int *pred_flag)
2317 int dir = (v->bmvtype == BMV_TYPE_BACKWARD) ? 1 : 0;
2318 MpegEncContext *s = &v->s;
2319 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2321 if (v->bmvtype == BMV_TYPE_DIRECT) {
2322 int total_opp, k, f;
2323 if (s->next_picture.mb_type[mb_pos + v->mb_off] != MB_TYPE_INTRA) {
2324 s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][0],
2325 v->bfraction, 0, s->quarter_sample);
2326 s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][1],
2327 v->bfraction, 0, s->quarter_sample);
2328 s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][0],
2329 v->bfraction, 1, s->quarter_sample);
2330 s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][1],
2331 v->bfraction, 1, s->quarter_sample);
2333 total_opp = v->mv_f_next[0][s->block_index[0] + v->blocks_off]
2334 + v->mv_f_next[0][s->block_index[1] + v->blocks_off]
2335 + v->mv_f_next[0][s->block_index[2] + v->blocks_off]
2336 + v->mv_f_next[0][s->block_index[3] + v->blocks_off];
2337 f = (total_opp > 2) ? 1 : 0;
2339 s->mv[0][0][0] = s->mv[0][0][1] = 0;
2340 s->mv[1][0][0] = s->mv[1][0][1] = 0;
2343 v->ref_field_type[0] = v->ref_field_type[1] = v->cur_field_type ^ f;
2344 for (k = 0; k < 4; k++) {
2345 s->current_picture.motion_val[0][s->block_index[k] + v->blocks_off][0] = s->mv[0][0][0];
2346 s->current_picture.motion_val[0][s->block_index[k] + v->blocks_off][1] = s->mv[0][0][1];
2347 s->current_picture.motion_val[1][s->block_index[k] + v->blocks_off][0] = s->mv[1][0][0];
2348 s->current_picture.motion_val[1][s->block_index[k] + v->blocks_off][1] = s->mv[1][0][1];
2349 v->mv_f[0][s->block_index[k] + v->blocks_off] = f;
2350 v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
2354 if (v->bmvtype == BMV_TYPE_INTERPOLATED) {
2355 vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2356 vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2359 if (dir) { // backward
2360 vc1_pred_mv(v, n, dmv_x[1], dmv_y[1], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2361 if (n == 3 || mv1) {
2362 vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
2365 vc1_pred_mv(v, n, dmv_x[0], dmv_y[0], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2366 if (n == 3 || mv1) {
2367 vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], 0, 1);
2372 /** Get predicted DC value for I-frames only
2373 * prediction dir: left=0, top=1
2374 * @param s MpegEncContext
2375 * @param overlap flag indicating that overlap filtering is used
2376 * @param pq integer part of picture quantizer
2377 * @param[in] n block index in the current MB
2378 * @param dc_val_ptr Pointer to DC predictor
2379 * @param dir_ptr Prediction direction for use in AC prediction
2381 static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2382 int16_t **dc_val_ptr, int *dir_ptr)
2384 int a, b, c, wrap, pred, scale;
2386 static const uint16_t dcpred[32] = {
2387 -1, 1024, 512, 341, 256, 205, 171, 146, 128,
2388 114, 102, 93, 85, 79, 73, 68, 64,
2389 60, 57, 54, 51, 49, 47, 45, 43,
2390 41, 39, 38, 37, 35, 34, 33
2393 /* find prediction - wmv3_dc_scale always used here in fact */
2394 if (n < 4) scale = s->y_dc_scale;
2395 else scale = s->c_dc_scale;
2397 wrap = s->block_wrap[n];
2398 dc_val = s->dc_val[0] + s->block_index[n];
2404 b = dc_val[ - 1 - wrap];
2405 a = dc_val[ - wrap];
2407 if (pq < 9 || !overlap) {
2408 /* Set outer values */
2409 if (s->first_slice_line && (n != 2 && n != 3))
2410 b = a = dcpred[scale];
2411 if (s->mb_x == 0 && (n != 1 && n != 3))
2412 b = c = dcpred[scale];
2414 /* Set outer values */
2415 if (s->first_slice_line && (n != 2 && n != 3))
2417 if (s->mb_x == 0 && (n != 1 && n != 3))
2421 if (abs(a - b) <= abs(b - c)) {
2423 *dir_ptr = 1; // left
2426 *dir_ptr = 0; // top
2429 /* update predictor */
2430 *dc_val_ptr = &dc_val[0];
2435 /** Get predicted DC value
2436 * prediction dir: left=0, top=1
2437 * @param s MpegEncContext
2438 * @param overlap flag indicating that overlap filtering is used
2439 * @param pq integer part of picture quantizer
2440 * @param[in] n block index in the current MB
2441 * @param a_avail flag indicating top block availability
2442 * @param c_avail flag indicating left block availability
2443 * @param dc_val_ptr Pointer to DC predictor
2444 * @param dir_ptr Prediction direction for use in AC prediction
2446 static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2447 int a_avail, int c_avail,
2448 int16_t **dc_val_ptr, int *dir_ptr)
2450 int a, b, c, wrap, pred;
2452 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2456 wrap = s->block_wrap[n];
2457 dc_val = s->dc_val[0] + s->block_index[n];
2463 b = dc_val[ - 1 - wrap];
2464 a = dc_val[ - wrap];
2465 /* scale predictors if needed */
2466 q1 = s->current_picture.qscale_table[mb_pos];
2467 dqscale_index = s->y_dc_scale_table[q1] - 1;
2468 if (dqscale_index < 0)
2470 if (c_avail && (n != 1 && n != 3)) {
2471 q2 = s->current_picture.qscale_table[mb_pos - 1];
2473 c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2475 if (a_avail && (n != 2 && n != 3)) {
2476 q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2478 a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2480 if (a_avail && c_avail && (n != 3)) {
2485 off -= s->mb_stride;
2486 q2 = s->current_picture.qscale_table[off];
2488 b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2491 if (a_avail && c_avail) {
2492 if (abs(a - b) <= abs(b - c)) {
2494 *dir_ptr = 1; // left
2497 *dir_ptr = 0; // top
2499 } else if (a_avail) {
2501 *dir_ptr = 0; // top
2502 } else if (c_avail) {
2504 *dir_ptr = 1; // left
2507 *dir_ptr = 1; // left
2510 /* update predictor */
2511 *dc_val_ptr = &dc_val[0];
2515 /** @} */ // Block group
2518 * @name VC1 Macroblock-level functions in Simple/Main Profiles
2519 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
2523 static inline int vc1_coded_block_pred(MpegEncContext * s, int n,
2524 uint8_t **coded_block_ptr)
2526 int xy, wrap, pred, a, b, c;
2528 xy = s->block_index[n];
2529 wrap = s->b8_stride;
2534 a = s->coded_block[xy - 1 ];
2535 b = s->coded_block[xy - 1 - wrap];
2536 c = s->coded_block[xy - wrap];
2545 *coded_block_ptr = &s->coded_block[xy];
2551 * Decode one AC coefficient
2552 * @param v The VC1 context
2553 * @param last Last coefficient
2554 * @param skip How much zero coefficients to skip
2555 * @param value Decoded AC coefficient value
2556 * @param codingset set of VLC to decode data
2559 static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip,
2560 int *value, int codingset)
2562 GetBitContext *gb = &v->s.gb;
2563 int index, escape, run = 0, level = 0, lst = 0;
2565 index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2566 if (index != ff_vc1_ac_sizes[codingset] - 1) {
2567 run = vc1_index_decode_table[codingset][index][0];
2568 level = vc1_index_decode_table[codingset][index][1];
2569 lst = index >= vc1_last_decode_table[codingset] || get_bits_left(gb) < 0;
2573 escape = decode210(gb);
2575 index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2576 run = vc1_index_decode_table[codingset][index][0];
2577 level = vc1_index_decode_table[codingset][index][1];
2578 lst = index >= vc1_last_decode_table[codingset];
2581 level += vc1_last_delta_level_table[codingset][run];
2583 level += vc1_delta_level_table[codingset][run];
2586 run += vc1_last_delta_run_table[codingset][level] + 1;
2588 run += vc1_delta_run_table[codingset][level] + 1;
2594 lst = get_bits1(gb);
2595 if (v->s.esc3_level_length == 0) {
2596 if (v->pq < 8 || v->dquantfrm) { // table 59
2597 v->s.esc3_level_length = get_bits(gb, 3);
2598 if (!v->s.esc3_level_length)
2599 v->s.esc3_level_length = get_bits(gb, 2) + 8;
2600 } else { // table 60
2601 v->s.esc3_level_length = get_unary(gb, 1, 6) + 2;
2603 v->s.esc3_run_length = 3 + get_bits(gb, 2);
2605 run = get_bits(gb, v->s.esc3_run_length);
2606 sign = get_bits1(gb);
2607 level = get_bits(gb, v->s.esc3_level_length);
2618 /** Decode intra block in intra frames - should be faster than decode_intra_block
2619 * @param v VC1Context
2620 * @param block block to decode
2621 * @param[in] n subblock index
2622 * @param coded are AC coeffs present or not
2623 * @param codingset set of VLC to decode data
2625 static int vc1_decode_i_block(VC1Context *v, int16_t block[64], int n,
2626 int coded, int codingset)
2628 GetBitContext *gb = &v->s.gb;
2629 MpegEncContext *s = &v->s;
2630 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2633 int16_t *ac_val, *ac_val2;
2636 /* Get DC differential */
2638 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2640 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2643 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2647 if (dcdiff == 119 /* ESC index value */) {
2648 /* TODO: Optimize */
2649 if (v->pq == 1) dcdiff = get_bits(gb, 10);
2650 else if (v->pq == 2) dcdiff = get_bits(gb, 9);
2651 else dcdiff = get_bits(gb, 8);
2654 dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2655 else if (v->pq == 2)
2656 dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2663 dcdiff += vc1_i_pred_dc(&v->s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir);
2666 /* Store the quantized DC coeff, used for prediction */
2668 block[0] = dcdiff * s->y_dc_scale;
2670 block[0] = dcdiff * s->c_dc_scale;
2681 int last = 0, skip, value;
2682 const uint8_t *zz_table;
2686 scale = v->pq * 2 + v->halfpq;
2690 zz_table = v->zz_8x8[2];
2692 zz_table = v->zz_8x8[3];
2694 zz_table = v->zz_8x8[1];
2696 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2698 if (dc_pred_dir) // left
2701 ac_val -= 16 * s->block_wrap[n];
2704 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2708 block[zz_table[i++]] = value;
2711 /* apply AC prediction if needed */
2713 if (dc_pred_dir) { // left
2714 for (k = 1; k < 8; k++)
2715 block[k << v->left_blk_sh] += ac_val[k];
2717 for (k = 1; k < 8; k++)
2718 block[k << v->top_blk_sh] += ac_val[k + 8];
2721 /* save AC coeffs for further prediction */
2722 for (k = 1; k < 8; k++) {
2723 ac_val2[k] = block[k << v->left_blk_sh];
2724 ac_val2[k + 8] = block[k << v->top_blk_sh];
2727 /* scale AC coeffs */
2728 for (k = 1; k < 64; k++)
2732 block[k] += (block[k] < 0) ? -v->pq : v->pq;
2735 if (s->ac_pred) i = 63;
2741 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2745 scale = v->pq * 2 + v->halfpq;
2746 memset(ac_val2, 0, 16 * 2);
2747 if (dc_pred_dir) { // left
2750 memcpy(ac_val2, ac_val, 8 * 2);
2752 ac_val -= 16 * s->block_wrap[n];
2754 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2757 /* apply AC prediction if needed */
2759 if (dc_pred_dir) { //left
2760 for (k = 1; k < 8; k++) {
2761 block[k << v->left_blk_sh] = ac_val[k] * scale;
2762 if (!v->pquantizer && block[k << v->left_blk_sh])
2763 block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -v->pq : v->pq;
2766 for (k = 1; k < 8; k++) {
2767 block[k << v->top_blk_sh] = ac_val[k + 8] * scale;
2768 if (!v->pquantizer && block[k << v->top_blk_sh])
2769 block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -v->pq : v->pq;
2775 s->block_last_index[n] = i;
2780 /** Decode intra block in intra frames - should be faster than decode_intra_block
2781 * @param v VC1Context
2782 * @param block block to decode
2783 * @param[in] n subblock number
2784 * @param coded are AC coeffs present or not
2785 * @param codingset set of VLC to decode data
2786 * @param mquant quantizer value for this macroblock
2788 static int vc1_decode_i_block_adv(VC1Context *v, int16_t block[64], int n,
2789 int coded, int codingset, int mquant)
2791 GetBitContext *gb = &v->s.gb;
2792 MpegEncContext *s = &v->s;
2793 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2795 int16_t *dc_val = NULL;
2796 int16_t *ac_val, *ac_val2;
2798 int a_avail = v->a_avail, c_avail = v->c_avail;
2799 int use_pred = s->ac_pred;
2802 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2804 /* Get DC differential */
2806 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2808 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2811 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2815 if (dcdiff == 119 /* ESC index value */) {
2816 /* TODO: Optimize */
2817 if (mquant == 1) dcdiff = get_bits(gb, 10);
2818 else if (mquant == 2) dcdiff = get_bits(gb, 9);
2819 else dcdiff = get_bits(gb, 8);
2822 dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2823 else if (mquant == 2)
2824 dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2831 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir);
2834 /* Store the quantized DC coeff, used for prediction */
2836 block[0] = dcdiff * s->y_dc_scale;
2838 block[0] = dcdiff * s->c_dc_scale;
2844 /* check if AC is needed at all */
2845 if (!a_avail && !c_avail)
2847 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2850 scale = mquant * 2 + ((mquant == v->pq) ? v->halfpq : 0);
2852 if (dc_pred_dir) // left
2855 ac_val -= 16 * s->block_wrap[n];
2857 q1 = s->current_picture.qscale_table[mb_pos];
2858 if ( dc_pred_dir && c_avail && mb_pos)
2859 q2 = s->current_picture.qscale_table[mb_pos - 1];
2860 if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
2861 q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2862 if ( dc_pred_dir && n == 1)
2864 if (!dc_pred_dir && n == 2)
2870 int last = 0, skip, value;
2871 const uint8_t *zz_table;
2875 if (!use_pred && v->fcm == ILACE_FRAME) {
2876 zz_table = v->zzi_8x8;
2878 if (!dc_pred_dir) // top
2879 zz_table = v->zz_8x8[2];
2881 zz_table = v->zz_8x8[3];
2884 if (v->fcm != ILACE_FRAME)
2885 zz_table = v->zz_8x8[1];
2887 zz_table = v->zzi_8x8;
2891 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2895 block[zz_table[i++]] = value;
2898 /* apply AC prediction if needed */
2900 /* scale predictors if needed*/
2901 if (q2 && q1 != q2) {
2902 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2903 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2906 return AVERROR_INVALIDDATA;
2907 if (dc_pred_dir) { // left
2908 for (k = 1; k < 8; k++)
2909 block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2911 for (k = 1; k < 8; k++)
2912 block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2915 if (dc_pred_dir) { //left
2916 for (k = 1; k < 8; k++)
2917 block[k << v->left_blk_sh] += ac_val[k];
2919 for (k = 1; k < 8; k++)
2920 block[k << v->top_blk_sh] += ac_val[k + 8];
2924 /* save AC coeffs for further prediction */
2925 for (k = 1; k < 8; k++) {
2926 ac_val2[k ] = block[k << v->left_blk_sh];
2927 ac_val2[k + 8] = block[k << v->top_blk_sh];
2930 /* scale AC coeffs */
2931 for (k = 1; k < 64; k++)
2935 block[k] += (block[k] < 0) ? -mquant : mquant;
2938 if (use_pred) i = 63;
2939 } else { // no AC coeffs
2942 memset(ac_val2, 0, 16 * 2);
2943 if (dc_pred_dir) { // left
2945 memcpy(ac_val2, ac_val, 8 * 2);
2946 if (q2 && q1 != q2) {
2947 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2948 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2950 return AVERROR_INVALIDDATA;
2951 for (k = 1; k < 8; k++)
2952 ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2957 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2958 if (q2 && q1 != q2) {
2959 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2960 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2962 return AVERROR_INVALIDDATA;
2963 for (k = 1; k < 8; k++)
2964 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2969 /* apply AC prediction if needed */
2971 if (dc_pred_dir) { // left
2972 for (k = 1; k < 8; k++) {
2973 block[k << v->left_blk_sh] = ac_val2[k] * scale;
2974 if (!v->pquantizer && block[k << v->left_blk_sh])
2975 block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
2978 for (k = 1; k < 8; k++) {
2979 block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
2980 if (!v->pquantizer && block[k << v->top_blk_sh])
2981 block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
2987 s->block_last_index[n] = i;
2992 /** Decode intra block in inter frames - more generic version than vc1_decode_i_block
2993 * @param v VC1Context
2994 * @param block block to decode
2995 * @param[in] n subblock index
2996 * @param coded are AC coeffs present or not
2997 * @param mquant block quantizer
2998 * @param codingset set of VLC to decode data
3000 static int vc1_decode_intra_block(VC1Context *v, int16_t block[64], int n,
3001 int coded, int mquant, int codingset)
3003 GetBitContext *gb = &v->s.gb;
3004 MpegEncContext *s = &v->s;
3005 int dc_pred_dir = 0; /* Direction of the DC prediction used */
3007 int16_t *dc_val = NULL;
3008 int16_t *ac_val, *ac_val2;
3010 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3011 int a_avail = v->a_avail, c_avail = v->c_avail;
3012 int use_pred = s->ac_pred;
3016 s->dsp.clear_block(block);
3018 /* XXX: Guard against dumb values of mquant */
3019 mquant = (mquant < 1) ? 0 : ((mquant > 31) ? 31 : mquant);
3021 /* Set DC scale - y and c use the same */
3022 s->y_dc_scale = s->y_dc_scale_table[mquant];
3023 s->c_dc_scale = s->c_dc_scale_table[mquant];
3025 /* Get DC differential */
3027 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
3029 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
3032 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
3036 if (dcdiff == 119 /* ESC index value */) {
3037 /* TODO: Optimize */
3038 if (mquant == 1) dcdiff = get_bits(gb, 10);
3039 else if (mquant == 2) dcdiff = get_bits(gb, 9);
3040 else dcdiff = get_bits(gb, 8);
3043 dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
3044 else if (mquant == 2)
3045 dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
3052 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir);
3055 /* Store the quantized DC coeff, used for prediction */
3058 block[0] = dcdiff * s->y_dc_scale;
3060 block[0] = dcdiff * s->c_dc_scale;
3066 /* check if AC is needed at all and adjust direction if needed */
3067 if (!a_avail) dc_pred_dir = 1;
3068 if (!c_avail) dc_pred_dir = 0;
3069 if (!a_avail && !c_avail) use_pred = 0;
3070 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
3073 scale = mquant * 2 + v->halfpq;
3075 if (dc_pred_dir) //left
3078 ac_val -= 16 * s->block_wrap[n];
3080 q1 = s->current_picture.qscale_table[mb_pos];
3081 if (dc_pred_dir && c_avail && mb_pos)
3082 q2 = s->current_picture.qscale_table[mb_pos - 1];
3083 if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
3084 q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
3085 if ( dc_pred_dir && n == 1)
3087 if (!dc_pred_dir && n == 2)
3089 if (n == 3) q2 = q1;
3092 int last = 0, skip, value;
3096 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
3100 if (v->fcm == PROGRESSIVE)
3101 block[v->zz_8x8[0][i++]] = value;
3103 if (use_pred && (v->fcm == ILACE_FRAME)) {
3104 if (!dc_pred_dir) // top
3105 block[v->zz_8x8[2][i++]] = value;
3107 block[v->zz_8x8[3][i++]] = value;
3109 block[v->zzi_8x8[i++]] = value;
3114 /* apply AC prediction if needed */
3116 /* scale predictors if needed*/
3117 if (q2 && q1 != q2) {
3118 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3119 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3122 return AVERROR_INVALIDDATA;
3123 if (dc_pred_dir) { // left
3124 for (k = 1; k < 8; k++)
3125 block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3127 for (k = 1; k < 8; k++)
3128 block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3131 if (dc_pred_dir) { // left
3132 for (k = 1; k < 8; k++)
3133 block[k << v->left_blk_sh] += ac_val[k];
3135 for (k = 1; k < 8; k++)
3136 block[k << v->top_blk_sh] += ac_val[k + 8];
3140 /* save AC coeffs for further prediction */
3141 for (k = 1; k < 8; k++) {
3142 ac_val2[k ] = block[k << v->left_blk_sh];
3143 ac_val2[k + 8] = block[k << v->top_blk_sh];
3146 /* scale AC coeffs */
3147 for (k = 1; k < 64; k++)
3151 block[k] += (block[k] < 0) ? -mquant : mquant;
3154 if (use_pred) i = 63;
3155 } else { // no AC coeffs
3158 memset(ac_val2, 0, 16 * 2);
3159 if (dc_pred_dir) { // left
3161 memcpy(ac_val2, ac_val, 8 * 2);
3162 if (q2 && q1 != q2) {
3163 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3164 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3166 return AVERROR_INVALIDDATA;
3167 for (k = 1; k < 8; k++)
3168 ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3173 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
3174 if (q2 && q1 != q2) {
3175 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3176 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3178 return AVERROR_INVALIDDATA;
3179 for (k = 1; k < 8; k++)
3180 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3185 /* apply AC prediction if needed */
3187 if (dc_pred_dir) { // left
3188 for (k = 1; k < 8; k++) {
3189 block[k << v->left_blk_sh] = ac_val2[k] * scale;
3190 if (!v->pquantizer && block[k << v->left_blk_sh])
3191 block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
3194 for (k = 1; k < 8; k++) {
3195 block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
3196 if (!v->pquantizer && block[k << v->top_blk_sh])
3197 block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
3203 s->block_last_index[n] = i;
3210 static int vc1_decode_p_block(VC1Context *v, int16_t block[64], int n,
3211 int mquant, int ttmb, int first_block,
3212 uint8_t *dst, int linesize, int skip_block,
3215 MpegEncContext *s = &v->s;
3216 GetBitContext *gb = &s->gb;
3219 int scale, off, idx, last, skip, value;
3220 int ttblk = ttmb & 7;
3223 s->dsp.clear_block(block);
3226 ttblk = ff_vc1_ttblk_to_tt[v->tt_index][get_vlc2(gb, ff_vc1_ttblk_vlc[v->tt_index].table, VC1_TTBLK_VLC_BITS, 1)];
3228 if (ttblk == TT_4X4) {
3229 subblkpat = ~(get_vlc2(gb, ff_vc1_subblkpat_vlc[v->tt_index].table, VC1_SUBBLKPAT_VLC_BITS, 1) + 1);
3231 if ((ttblk != TT_8X8 && ttblk != TT_4X4)
3232 && ((v->ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))
3233 || (!v->res_rtm_flag && !first_block))) {
3234 subblkpat = decode012(gb);
3236 subblkpat ^= 3; // swap decoded pattern bits
3237 if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM)
3239 if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT)
3242 scale = 2 * mquant + ((v->pq == mquant) ? v->halfpq : 0);
3244 // convert transforms like 8X4_TOP to generic TT and SUBBLKPAT
3245 if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) {
3246 subblkpat = 2 - (ttblk == TT_8X4_TOP);
3249 if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) {
3250 subblkpat = 2 - (ttblk == TT_4X8_LEFT);
3259 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3264 idx = v->zz_8x8[0][i++];
3266 idx = v->zzi_8x8[i++];
3267 block[idx] = value * scale;
3269 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3273 v->vc1dsp.vc1_inv_trans_8x8_dc(dst, linesize, block);
3275 v->vc1dsp.vc1_inv_trans_8x8(block);
3276 s->dsp.add_pixels_clamped(block, dst, linesize);
3281 pat = ~subblkpat & 0xF;
3282 for (j = 0; j < 4; j++) {
3283 last = subblkpat & (1 << (3 - j));
3285 off = (j & 1) * 4 + (j & 2) * 16;
3287 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3292 idx = ff_vc1_simple_progressive_4x4_zz[i++];
3294 idx = ff_vc1_adv_interlaced_4x4_zz[i++];
3295 block[idx + off] = value * scale;
3297 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3299 if (!(subblkpat & (1 << (3 - j))) && !skip_block) {
3301 v->vc1dsp.vc1_inv_trans_4x4_dc(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3303 v->vc1dsp.vc1_inv_trans_4x4(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3308 pat = ~((subblkpat & 2) * 6 + (subblkpat & 1) * 3) & 0xF;
3309 for (j = 0; j < 2; j++) {
3310 last = subblkpat & (1 << (1 - j));
3314 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3319 idx = v->zz_8x4[i++] + off;
3321 idx = ff_vc1_adv_interlaced_8x4_zz[i++] + off;
3322 block[idx] = value * scale;
3324 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3326 if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3328 v->vc1dsp.vc1_inv_trans_8x4_dc(dst + j * 4 * linesize, linesize, block + off);
3330 v->vc1dsp.vc1_inv_trans_8x4(dst + j * 4 * linesize, linesize, block + off);
3335 pat = ~(subblkpat * 5) & 0xF;
3336 for (j = 0; j < 2; j++) {
3337 last = subblkpat & (1 << (1 - j));
3341 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3346 idx = v->zz_4x8[i++] + off;
3348 idx = ff_vc1_adv_interlaced_4x8_zz[i++] + off;
3349 block[idx] = value * scale;
3351 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3353 if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3355 v->vc1dsp.vc1_inv_trans_4x8_dc(dst + j * 4, linesize, block + off);
3357 v->vc1dsp.vc1_inv_trans_4x8(dst + j*4, linesize, block + off);
3363 *ttmb_out |= ttblk << (n * 4);
3367 /** @} */ // Macroblock group
3369 static const int size_table [6] = { 0, 2, 3, 4, 5, 8 };
3370 static const int offset_table[6] = { 0, 1, 3, 7, 15, 31 };
3372 static av_always_inline void vc1_apply_p_v_loop_filter(VC1Context *v, int block_num)
3374 MpegEncContext *s = &v->s;
3375 int mb_cbp = v->cbp[s->mb_x - s->mb_stride],
3376 block_cbp = mb_cbp >> (block_num * 4), bottom_cbp,
3377 mb_is_intra = v->is_intra[s->mb_x - s->mb_stride],
3378 block_is_intra = mb_is_intra >> (block_num * 4), bottom_is_intra;
3379 int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3382 if (block_num > 3) {
3383 dst = s->dest[block_num - 3];
3385 dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 8) * linesize;
3387 if (s->mb_y != s->end_mb_y || block_num < 2) {
3391 if (block_num > 3) {
3392 bottom_cbp = v->cbp[s->mb_x] >> (block_num * 4);
3393 bottom_is_intra = v->is_intra[s->mb_x] >> (block_num * 4);
3394 mv = &v->luma_mv[s->mb_x - s->mb_stride];
3395 mv_stride = s->mb_stride;
3397 bottom_cbp = (block_num < 2) ? (mb_cbp >> ((block_num + 2) * 4))
3398 : (v->cbp[s->mb_x] >> ((block_num - 2) * 4));
3399 bottom_is_intra = (block_num < 2) ? (mb_is_intra >> ((block_num + 2) * 4))
3400 : (v->is_intra[s->mb_x] >> ((block_num - 2) * 4));
3401 mv_stride = s->b8_stride;
3402 mv = &s->current_picture.motion_val[0][s->block_index[block_num] - 2 * mv_stride];
3405 if (bottom_is_intra & 1 || block_is_intra & 1 ||
3406 mv[0][0] != mv[mv_stride][0] || mv[0][1] != mv[mv_stride][1]) {
3407 v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3409 idx = ((bottom_cbp >> 2) | block_cbp) & 3;
3411 v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3414 v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3416 v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3421 dst -= 4 * linesize;
3422 ttblk = (v->ttblk[s->mb_x - s->mb_stride] >> (block_num * 4)) & 0xF;
3423 if (ttblk == TT_4X4 || ttblk == TT_8X4) {
3424 idx = (block_cbp | (block_cbp >> 2)) & 3;
3426 v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3429 v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3431 v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3436 static av_always_inline void vc1_apply_p_h_loop_filter(VC1Context *v, int block_num)
3438 MpegEncContext *s = &v->s;
3439 int mb_cbp = v->cbp[s->mb_x - 1 - s->mb_stride],
3440 block_cbp = mb_cbp >> (block_num * 4), right_cbp,
3441 mb_is_intra = v->is_intra[s->mb_x - 1 - s->mb_stride],
3442 block_is_intra = mb_is_intra >> (block_num * 4), right_is_intra;
3443 int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3446 if (block_num > 3) {
3447 dst = s->dest[block_num - 3] - 8 * linesize;
3449 dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 16) * linesize - 8;
3452 if (s->mb_x != s->mb_width || !(block_num & 5)) {
3455 if (block_num > 3) {
3456 right_cbp = v->cbp[s->mb_x - s->mb_stride] >> (block_num * 4);
3457 right_is_intra = v->is_intra[s->mb_x - s->mb_stride] >> (block_num * 4);
3458 mv = &v->luma_mv[s->mb_x - s->mb_stride - 1];
3460 right_cbp = (block_num & 1) ? (v->cbp[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3461 : (mb_cbp >> ((block_num + 1) * 4));
3462 right_is_intra = (block_num & 1) ? (v->is_intra[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3463 : (mb_is_intra >> ((block_num + 1) * 4));
3464 mv = &s->current_picture.motion_val[0][s->block_index[block_num] - s->b8_stride * 2 - 2];
3466 if (block_is_intra & 1 || right_is_intra & 1 || mv[0][0] != mv[1][0] || mv[0][1] != mv[1][1]) {
3467 v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3469 idx = ((right_cbp >> 1) | block_cbp) & 5; // FIXME check
3471 v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3474 v->vc1dsp.vc1_h_loop_filter4(dst + 4 * linesize, linesize, v->pq);
3476 v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3482 ttblk = (v->ttblk[s->mb_x - s->mb_stride - 1] >> (block_num * 4)) & 0xf;
3483 if (ttblk == TT_4X4 || ttblk == TT_4X8) {
3484 idx = (block_cbp | (block_cbp >> 1)) & 5;
3486 v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3489 v->vc1dsp.vc1_h_loop_filter4(dst + linesize * 4, linesize, v->pq);
3491 v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3496 static void vc1_apply_p_loop_filter(VC1Context *v)
3498 MpegEncContext *s = &v->s;
3501 for (i = 0; i < 6; i++) {
3502 vc1_apply_p_v_loop_filter(v, i);
3505 /* V always precedes H, therefore we run H one MB before V;
3506 * at the end of a row, we catch up to complete the row */
3508 for (i = 0; i < 6; i++) {
3509 vc1_apply_p_h_loop_filter(v, i);
3511 if (s->mb_x == s->mb_width - 1) {
3513 ff_update_block_index(s);
3514 for (i = 0; i < 6; i++) {
3515 vc1_apply_p_h_loop_filter(v, i);
3521 /** Decode one P-frame MB
3523 static int vc1_decode_p_mb(VC1Context *v)
3525 MpegEncContext *s = &v->s;
3526 GetBitContext *gb = &s->gb;
3528 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3529 int cbp; /* cbp decoding stuff */
3530 int mqdiff, mquant; /* MB quantization */
3531 int ttmb = v->ttfrm; /* MB Transform type */
3533 int mb_has_coeffs = 1; /* last_flag */
3534 int dmv_x, dmv_y; /* Differential MV components */
3535 int index, index1; /* LUT indexes */
3536 int val, sign; /* temp values */
3537 int first_block = 1;
3539 int skipped, fourmv;
3540 int block_cbp = 0, pat, block_tt = 0, block_intra = 0;
3542 mquant = v->pq; /* lossy initialization */
3544 if (v->mv_type_is_raw)
3545 fourmv = get_bits1(gb);
3547 fourmv = v->mv_type_mb_plane[mb_pos];
3549 skipped = get_bits1(gb);
3551 skipped = v->s.mbskip_table[mb_pos];
3553 if (!fourmv) { /* 1MV mode */
3555 GET_MVDATA(dmv_x, dmv_y);
3558 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
3559 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
3561 s->current_picture.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;
3562 vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3564 /* FIXME Set DC val for inter block ? */
3565 if (s->mb_intra && !mb_has_coeffs) {
3567 s->ac_pred = get_bits1(gb);
3569 } else if (mb_has_coeffs) {
3571 s->ac_pred = get_bits1(gb);
3572 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3578 s->current_picture.qscale_table[mb_pos] = mquant;
3580 if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
3581 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table,
3582 VC1_TTMB_VLC_BITS, 2);
3583 if (!s->mb_intra) vc1_mc_1mv(v, 0);
3585 for (i = 0; i < 6; i++) {
3586 s->dc_val[0][s->block_index[i]] = 0;
3588 val = ((cbp >> (5 - i)) & 1);
3589 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3590 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3592 /* check if prediction blocks A and C are available */
3593 v->a_avail = v->c_avail = 0;
3594 if (i == 2 || i == 3 || !s->first_slice_line)
3595 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3596 if (i == 1 || i == 3 || s->mb_x)
3597 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3599 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3600 (i & 4) ? v->codingset2 : v->codingset);
3601 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3603 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3605 for (j = 0; j < 64; j++)
3606 s->block[i][j] <<= 1;
3607 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3608 if (v->pq >= 9 && v->overlap) {
3610 v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3612 v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3614 block_cbp |= 0xF << (i << 2);
3615 block_intra |= 1 << i;
3617 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block,
3618 s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize,
3619 (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
3620 block_cbp |= pat << (i << 2);
3621 if (!v->ttmbf && ttmb < 8)
3628 for (i = 0; i < 6; i++) {
3629 v->mb_type[0][s->block_index[i]] = 0;
3630 s->dc_val[0][s->block_index[i]] = 0;
3632 s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
3633 s->current_picture.qscale_table[mb_pos] = 0;
3634 vc1_pred_mv(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3637 } else { // 4MV mode
3638 if (!skipped /* unskipped MB */) {
3639 int intra_count = 0, coded_inter = 0;
3640 int is_intra[6], is_coded[6];
3642 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3643 for (i = 0; i < 6; i++) {
3644 val = ((cbp >> (5 - i)) & 1);
3645 s->dc_val[0][s->block_index[i]] = 0;
3652 GET_MVDATA(dmv_x, dmv_y);
3654 vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3656 vc1_mc_4mv_luma(v, i, 0, 0);
3657 intra_count += s->mb_intra;
3658 is_intra[i] = s->mb_intra;
3659 is_coded[i] = mb_has_coeffs;
3662 is_intra[i] = (intra_count >= 3);
3666 vc1_mc_4mv_chroma(v, 0);
3667 v->mb_type[0][s->block_index[i]] = is_intra[i];
3669 coded_inter = !is_intra[i] & is_coded[i];
3671 // if there are no coded blocks then don't do anything more
3673 if (!intra_count && !coded_inter)
3676 s->current_picture.qscale_table[mb_pos] = mquant;
3677 /* test if block is intra and has pred */
3680 for (i = 0; i < 6; i++)
3682 if (((!s->first_slice_line || (i == 2 || i == 3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]])
3683 || ((s->mb_x || (i == 1 || i == 3)) && v->mb_type[0][s->block_index[i] - 1])) {
3689 s->ac_pred = get_bits1(gb);
3693 if (!v->ttmbf && coded_inter)
3694 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3695 for (i = 0; i < 6; i++) {
3697 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3698 s->mb_intra = is_intra[i];
3700 /* check if prediction blocks A and C are available */
3701 v->a_avail = v->c_avail = 0;
3702 if (i == 2 || i == 3 || !s->first_slice_line)
3703 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3704 if (i == 1 || i == 3 || s->mb_x)
3705 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3707 vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant,
3708 (i & 4) ? v->codingset2 : v->codingset);
3709 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3711 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3713 for (j = 0; j < 64; j++)
3714 s->block[i][j] <<= 1;
3715 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off,
3716 (i & 4) ? s->uvlinesize : s->linesize);
3717 if (v->pq >= 9 && v->overlap) {
3719 v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3721 v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3723 block_cbp |= 0xF << (i << 2);
3724 block_intra |= 1 << i;
3725 } else if (is_coded[i]) {
3726 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3727 first_block, s->dest[dst_idx] + off,
3728 (i & 4) ? s->uvlinesize : s->linesize,
3729 (i & 4) && (s->flags & CODEC_FLAG_GRAY),
3731 block_cbp |= pat << (i << 2);
3732 if (!v->ttmbf && ttmb < 8)
3737 } else { // skipped MB
3739 s->current_picture.qscale_table[mb_pos] = 0;
3740 for (i = 0; i < 6; i++) {
3741 v->mb_type[0][s->block_index[i]] = 0;
3742 s->dc_val[0][s->block_index[i]] = 0;
3744 for (i = 0; i < 4; i++) {
3745 vc1_pred_mv(v, i, 0, 0, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3746 vc1_mc_4mv_luma(v, i, 0, 0);
3748 vc1_mc_4mv_chroma(v, 0);
3749 s->current_picture.qscale_table[mb_pos] = 0;
3753 v->cbp[s->mb_x] = block_cbp;
3754 v->ttblk[s->mb_x] = block_tt;
3755 v->is_intra[s->mb_x] = block_intra;
3760 /* Decode one macroblock in an interlaced frame p picture */
3762 static int vc1_decode_p_mb_intfr(VC1Context *v)
3764 MpegEncContext *s = &v->s;
3765 GetBitContext *gb = &s->gb;
3767 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3768 int cbp = 0; /* cbp decoding stuff */
3769 int mqdiff, mquant; /* MB quantization */
3770 int ttmb = v->ttfrm; /* MB Transform type */
3772 int mb_has_coeffs = 1; /* last_flag */
3773 int dmv_x, dmv_y; /* Differential MV components */
3774 int val; /* temp value */
3775 int first_block = 1;
3777 int skipped, fourmv = 0, twomv = 0;
3778 int block_cbp = 0, pat, block_tt = 0;
3779 int idx_mbmode = 0, mvbp;
3780 int stride_y, fieldtx;
3782 mquant = v->pq; /* Lossy initialization */
3785 skipped = get_bits1(gb);
3787 skipped = v->s.mbskip_table[mb_pos];
3789 if (v->fourmvswitch)
3790 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_4MV_MBMODE_VLC_BITS, 2); // try getting this done
3792 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2); // in a single line
3793 switch (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0]) {
3794 /* store the motion vector type in a flag (useful later) */
3795 case MV_PMODE_INTFR_4MV:
3797 v->blk_mv_type[s->block_index[0]] = 0;
3798 v->blk_mv_type[s->block_index[1]] = 0;
3799 v->blk_mv_type[s->block_index[2]] = 0;
3800 v->blk_mv_type[s->block_index[3]] = 0;
3802 case MV_PMODE_INTFR_4MV_FIELD:
3804 v->blk_mv_type[s->block_index[0]] = 1;
3805 v->blk_mv_type[s->block_index[1]] = 1;
3806 v->blk_mv_type[s->block_index[2]] = 1;
3807 v->blk_mv_type[s->block_index[3]] = 1;
3809 case MV_PMODE_INTFR_2MV_FIELD:
3811 v->blk_mv_type[s->block_index[0]] = 1;
3812 v->blk_mv_type[s->block_index[1]] = 1;
3813 v->blk_mv_type[s->block_index[2]] = 1;
3814 v->blk_mv_type[s->block_index[3]] = 1;
3816 case MV_PMODE_INTFR_1MV:
3817 v->blk_mv_type[s->block_index[0]] = 0;
3818 v->blk_mv_type[s->block_index[1]] = 0;
3819 v->blk_mv_type[s->block_index[2]] = 0;
3820 v->blk_mv_type[s->block_index[3]] = 0;
3823 if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
3824 for (i = 0; i < 4; i++) {
3825 s->current_picture.motion_val[1][s->block_index[i]][0] = 0;
3826 s->current_picture.motion_val[1][s->block_index[i]][1] = 0;
3828 s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
3829 s->mb_intra = v->is_intra[s->mb_x] = 1;
3830 for (i = 0; i < 6; i++)
3831 v->mb_type[0][s->block_index[i]] = 1;
3832 fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
3833 mb_has_coeffs = get_bits1(gb);
3835 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3836 v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
3838 s->current_picture.qscale_table[mb_pos] = mquant;
3839 /* Set DC scale - y and c use the same (not sure if necessary here) */
3840 s->y_dc_scale = s->y_dc_scale_table[mquant];
3841 s->c_dc_scale = s->c_dc_scale_table[mquant];
3843 for (i = 0; i < 6; i++) {
3844 s->dc_val[0][s->block_index[i]] = 0;
3846 val = ((cbp >> (5 - i)) & 1);
3847 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3848 v->a_avail = v->c_avail = 0;
3849 if (i == 2 || i == 3 || !s->first_slice_line)
3850 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3851 if (i == 1 || i == 3 || s->mb_x)
3852 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3854 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3855 (i & 4) ? v->codingset2 : v->codingset);
3856 if ((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3857 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3859 stride_y = s->linesize << fieldtx;
3860 off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->linesize : (i & 1) * 8 + 4 * (i & 2) * s->linesize;
3862 stride_y = s->uvlinesize;
3865 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, stride_y);
3869 } else { // inter MB
3870 mb_has_coeffs = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][3];
3872 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3873 if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD) {
3874 v->twomvbp = get_vlc2(gb, v->twomvbp_vlc->table, VC1_2MV_BLOCK_PATTERN_VLC_BITS, 1);
3876 if ((ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV)
3877 || (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV_FIELD)) {
3878 v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
3881 s->mb_intra = v->is_intra[s->mb_x] = 0;
3882 for (i = 0; i < 6; i++)
3883 v->mb_type[0][s->block_index[i]] = 0;
3884 fieldtx = v->fieldtx_plane[mb_pos] = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][1];
3885 /* for all motion vector read MVDATA and motion compensate each block */
3889 for (i = 0; i < 6; i++) {
3892 val = ((mvbp >> (3 - i)) & 1);
3894 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3896 vc1_pred_mv_intfr(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0);
3897 vc1_mc_4mv_luma(v, i, 0, 0);
3898 } else if (i == 4) {
3899 vc1_mc_4mv_chroma4(v, 0, 0, 0);
3906 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3908 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], 0);
3909 vc1_mc_4mv_luma(v, 0, 0, 0);
3910 vc1_mc_4mv_luma(v, 1, 0, 0);
3913 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3915 vc1_pred_mv_intfr(v, 2, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], 0);
3916 vc1_mc_4mv_luma(v, 2, 0, 0);
3917 vc1_mc_4mv_luma(v, 3, 0, 0);
3918 vc1_mc_4mv_chroma4(v, 0, 0, 0);
3920 mvbp = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][2];
3923 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3925 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0);
3929 GET_MQUANT(); // p. 227
3930 s->current_picture.qscale_table[mb_pos] = mquant;
3931 if (!v->ttmbf && cbp)
3932 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3933 for (i = 0; i < 6; i++) {
3934 s->dc_val[0][s->block_index[i]] = 0;
3936 val = ((cbp >> (5 - i)) & 1);
3938 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3940 off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->linesize));
3942 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3943 first_block, s->dest[dst_idx] + off,
3944 (i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
3945 (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
3946 block_cbp |= pat << (i << 2);
3947 if (!v->ttmbf && ttmb < 8)
3954 s->mb_intra = v->is_intra[s->mb_x] = 0;
3955 for (i = 0; i < 6; i++) {
3956 v->mb_type[0][s->block_index[i]] = 0;
3957 s->dc_val[0][s->block_index[i]] = 0;
3959 s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
3960 s->current_picture.qscale_table[mb_pos] = 0;
3961 v->blk_mv_type[s->block_index[0]] = 0;
3962 v->blk_mv_type[s->block_index[1]] = 0;
3963 v->blk_mv_type[s->block_index[2]] = 0;
3964 v->blk_mv_type[s->block_index[3]] = 0;
3965 vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0);
3968 if (s->mb_x == s->mb_width - 1)
3969 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0])*s->mb_stride);
3973 static int vc1_decode_p_mb_intfi(VC1Context *v)
3975 MpegEncContext *s = &v->s;
3976 GetBitContext *gb = &s->gb;
3978 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3979 int cbp = 0; /* cbp decoding stuff */
3980 int mqdiff, mquant; /* MB quantization */
3981 int ttmb = v->ttfrm; /* MB Transform type */
3983 int mb_has_coeffs = 1; /* last_flag */
3984 int dmv_x, dmv_y; /* Differential MV components */
3985 int val; /* temp values */
3986 int first_block = 1;
3989 int block_cbp = 0, pat, block_tt = 0;
3992 mquant = v->pq; /* Lossy initialization */
3994 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
3995 if (idx_mbmode <= 1) { // intra MB
3996 s->mb_intra = v->is_intra[s->mb_x] = 1;
3997 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
3998 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
3999 s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4001 s->current_picture.qscale_table[mb_pos] = mquant;
4002 /* Set DC scale - y and c use the same (not sure if necessary here) */
4003 s->y_dc_scale = s->y_dc_scale_table[mquant];
4004 s->c_dc_scale = s->c_dc_scale_table[mquant];
4005 v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4006 mb_has_coeffs = idx_mbmode & 1;
4008 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
4010 for (i = 0; i < 6; i++) {
4011 s->dc_val[0][s->block_index[i]] = 0;
4012 v->mb_type[0][s->block_index[i]] = 1;
4014 val = ((cbp >> (5 - i)) & 1);
4015 v->a_avail = v->c_avail = 0;
4016 if (i == 2 || i == 3 || !s->first_slice_line)
4017 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4018 if (i == 1 || i == 3 || s->mb_x)
4019 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4021 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4022 (i & 4) ? v->codingset2 : v->codingset);
4023 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4025 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4026 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4027 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize);
4028 // TODO: loop filter
4031 s->mb_intra = v->is_intra[s->mb_x] = 0;
4032 s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
4033 for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
4034 if (idx_mbmode <= 5) { // 1-MV
4035 dmv_x = dmv_y = pred_flag = 0;
4036 if (idx_mbmode & 1) {
4037 get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
4039 vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
4041 mb_has_coeffs = !(idx_mbmode & 2);
4043 v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
4044 for (i = 0; i < 6; i++) {
4046 dmv_x = dmv_y = pred_flag = 0;
4047 val = ((v->fourmvbp >> (3 - i)) & 1);
4049 get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
4051 vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
4052 vc1_mc_4mv_luma(v, i, 0, 0);
4054 vc1_mc_4mv_chroma(v, 0);
4056 mb_has_coeffs = idx_mbmode & 1;
4059 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4063 s->current_picture.qscale_table[mb_pos] = mquant;
4064 if (!v->ttmbf && cbp) {
4065 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4068 for (i = 0; i < 6; i++) {
4069 s->dc_val[0][s->block_index[i]] = 0;
4071 val = ((cbp >> (5 - i)) & 1);
4072 off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
4074 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4075 first_block, s->dest[dst_idx] + off,
4076 (i & 4) ? s->uvlinesize : s->linesize,
4077 (i & 4) && (s->flags & CODEC_FLAG_GRAY),
4079 block_cbp |= pat << (i << 2);
4080 if (!v->ttmbf && ttmb < 8) ttmb = -1;
4085 if (s->mb_x == s->mb_width - 1)
4086 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
4090 /** Decode one B-frame MB (in Main profile)
4092 static void vc1_decode_b_mb(VC1Context *v)
4094 MpegEncContext *s = &v->s;
4095 GetBitContext *gb = &s->gb;
4097 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4098 int cbp = 0; /* cbp decoding stuff */
4099 int mqdiff, mquant; /* MB quantization */
4100 int ttmb = v->ttfrm; /* MB Transform type */
4101 int mb_has_coeffs = 0; /* last_flag */
4102 int index, index1; /* LUT indexes */
4103 int val, sign; /* temp values */
4104 int first_block = 1;
4106 int skipped, direct;
4107 int dmv_x[2], dmv_y[2];
4108 int bmvtype = BMV_TYPE_BACKWARD;
4110 mquant = v->pq; /* lossy initialization */
4114 direct = get_bits1(gb);
4116 direct = v->direct_mb_plane[mb_pos];
4118 skipped = get_bits1(gb);
4120 skipped = v->s.mbskip_table[mb_pos];
4122 dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4123 for (i = 0; i < 6; i++) {
4124 v->mb_type[0][s->block_index[i]] = 0;
4125 s->dc_val[0][s->block_index[i]] = 0;
4127 s->current_picture.qscale_table[mb_pos] = 0;
4131 GET_MVDATA(dmv_x[0], dmv_y[0]);
4132 dmv_x[1] = dmv_x[0];
4133 dmv_y[1] = dmv_y[0];
4135 if (skipped || !s->mb_intra) {
4136 bmvtype = decode012(gb);
4139 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
4142 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
4145 bmvtype = BMV_TYPE_INTERPOLATED;
4146 dmv_x[0] = dmv_y[0] = 0;
4150 for (i = 0; i < 6; i++)
4151 v->mb_type[0][s->block_index[i]] = s->mb_intra;
4155 bmvtype = BMV_TYPE_INTERPOLATED;
4156 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4157 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4161 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4164 s->current_picture.qscale_table[mb_pos] = mquant;
4166 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4167 dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
4168 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4169 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4171 if (!mb_has_coeffs && !s->mb_intra) {
4172 /* no coded blocks - effectively skipped */
4173 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4174 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4177 if (s->mb_intra && !mb_has_coeffs) {
4179 s->current_picture.qscale_table[mb_pos] = mquant;
4180 s->ac_pred = get_bits1(gb);
4182 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4184 if (bmvtype == BMV_TYPE_INTERPOLATED) {
4185 GET_MVDATA(dmv_x[0], dmv_y[0]);
4186 if (!mb_has_coeffs) {
4187 /* interpolated skipped block */
4188 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4189 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4193 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4195 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4198 s->ac_pred = get_bits1(gb);
4199 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4201 s->current_picture.qscale_table[mb_pos] = mquant;
4202 if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
4203 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4207 for (i = 0; i < 6; i++) {
4208 s->dc_val[0][s->block_index[i]] = 0;
4210 val = ((cbp >> (5 - i)) & 1);
4211 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4212 v->mb_type[0][s->block_index[i]] = s->mb_intra;
4214 /* check if prediction blocks A and C are available */
4215 v->a_avail = v->c_avail = 0;
4216 if (i == 2 || i == 3 || !s->first_slice_line)
4217 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4218 if (i == 1 || i == 3 || s->mb_x)
4219 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4221 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4222 (i & 4) ? v->codingset2 : v->codingset);
4223 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4225 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4227 for (j = 0; j < 64; j++)
4228 s->block[i][j] <<= 1;
4229 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
4231 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4232 first_block, s->dest[dst_idx] + off,
4233 (i & 4) ? s->uvlinesize : s->linesize,
4234 (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4235 if (!v->ttmbf && ttmb < 8)
4242 /** Decode one B-frame MB (in interlaced field B picture)
4244 static void vc1_decode_b_mb_intfi(VC1Context *v)
4246 MpegEncContext *s = &v->s;
4247 GetBitContext *gb = &s->gb;
4249 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4250 int cbp = 0; /* cbp decoding stuff */
4251 int mqdiff, mquant; /* MB quantization */
4252 int ttmb = v->ttfrm; /* MB Transform type */
4253 int mb_has_coeffs = 0; /* last_flag */
4254 int val; /* temp value */
4255 int first_block = 1;
4258 int dmv_x[2], dmv_y[2], pred_flag[2];
4259 int bmvtype = BMV_TYPE_BACKWARD;
4262 mquant = v->pq; /* Lossy initialization */
4265 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
4266 if (idx_mbmode <= 1) { // intra MB
4267 s->mb_intra = v->is_intra[s->mb_x] = 1;
4268 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
4269 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
4270 s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4272 s->current_picture.qscale_table[mb_pos] = mquant;
4273 /* Set DC scale - y and c use the same (not sure if necessary here) */
4274 s->y_dc_scale = s->y_dc_scale_table[mquant];
4275 s->c_dc_scale = s->c_dc_scale_table[mquant];
4276 v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4277 mb_has_coeffs = idx_mbmode & 1;
4279 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
4281 for (i = 0; i < 6; i++) {
4282 s->dc_val[0][s->block_index[i]] = 0;
4284 val = ((cbp >> (5 - i)) & 1);
4285 v->mb_type[0][s->block_index[i]] = s->mb_intra;
4286 v->a_avail = v->c_avail = 0;
4287 if (i == 2 || i == 3 || !s->first_slice_line)
4288 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4289 if (i == 1 || i == 3 || s->mb_x)
4290 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4292 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4293 (i & 4) ? v->codingset2 : v->codingset);
4294 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4296 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4298 for (j = 0; j < 64; j++)
4299 s->block[i][j] <<= 1;
4300 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4301 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize);
4302 // TODO: yet to perform loop filter
4305 s->mb_intra = v->is_intra[s->mb_x] = 0;
4306 s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
4307 for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
4309 fwd = v->forward_mb_plane[mb_pos] = get_bits1(gb);
4311 fwd = v->forward_mb_plane[mb_pos];
4312 if (idx_mbmode <= 5) { // 1-MV
4314 dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4315 pred_flag[0] = pred_flag[1] = 0;
4317 bmvtype = BMV_TYPE_FORWARD;
4319 bmvtype = decode012(gb);
4322 bmvtype = BMV_TYPE_BACKWARD;
4325 bmvtype = BMV_TYPE_DIRECT;
4328 bmvtype = BMV_TYPE_INTERPOLATED;
4329 interpmvp = get_bits1(gb);
4332 v->bmvtype = bmvtype;
4333 if (bmvtype != BMV_TYPE_DIRECT && idx_mbmode & 1) {
4334 get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD], &dmv_y[bmvtype == BMV_TYPE_BACKWARD], &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
4337 get_mvdata_interlaced(v, &dmv_x[1], &dmv_y[1], &pred_flag[1]);
4339 if (bmvtype == BMV_TYPE_DIRECT) {
4340 dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4341 dmv_x[1] = dmv_y[1] = pred_flag[0] = 0;
4342 if (!s->next_picture_ptr->field_picture) {
4343 av_log(s->avctx, AV_LOG_ERROR, "Mixed field/frame direct mode not supported\n");
4347 vc1_pred_b_mv_intfi(v, 0, dmv_x, dmv_y, 1, pred_flag);
4348 vc1_b_mc(v, dmv_x, dmv_y, (bmvtype == BMV_TYPE_DIRECT), bmvtype);
4349 mb_has_coeffs = !(idx_mbmode & 2);
4352 bmvtype = BMV_TYPE_FORWARD;
4353 v->bmvtype = bmvtype;
4354 v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
4355 for (i = 0; i < 6; i++) {
4357 dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4358 dmv_x[1] = dmv_y[1] = pred_flag[1] = 0;
4359 val = ((v->fourmvbp >> (3 - i)) & 1);
4361 get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD],
4362 &dmv_y[bmvtype == BMV_TYPE_BACKWARD],
4363 &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
4365 vc1_pred_b_mv_intfi(v, i, dmv_x, dmv_y, 0, pred_flag);
4366 vc1_mc_4mv_luma(v, i, bmvtype == BMV_TYPE_BACKWARD, 0);
4368 vc1_mc_4mv_chroma(v, bmvtype == BMV_TYPE_BACKWARD);
4370 mb_has_coeffs = idx_mbmode & 1;
4373 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4377 s->current_picture.qscale_table[mb_pos] = mquant;
4378 if (!v->ttmbf && cbp) {
4379 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4382 for (i = 0; i < 6; i++) {
4383 s->dc_val[0][s->block_index[i]] = 0;
4385 val = ((cbp >> (5 - i)) & 1);
4386 off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
4388 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4389 first_block, s->dest[dst_idx] + off,
4390 (i & 4) ? s->uvlinesize : s->linesize,
4391 (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4392 if (!v->ttmbf && ttmb < 8)
4400 /** Decode one B-frame MB (in interlaced frame B picture)
4402 static int vc1_decode_b_mb_intfr(VC1Context *v)
4404 MpegEncContext *s = &v->s;
4405 GetBitContext *gb = &s->gb;
4407 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4408 int cbp = 0; /* cbp decoding stuff */
4409 int mqdiff, mquant; /* MB quantization */
4410 int ttmb = v->ttfrm; /* MB Transform type */
4411 int mvsw = 0; /* motion vector switch */
4412 int mb_has_coeffs = 1; /* last_flag */
4413 int dmv_x, dmv_y; /* Differential MV components */
4414 int val; /* temp value */
4415 int first_block = 1;
4417 int skipped, direct, twomv = 0;
4418 int block_cbp = 0, pat, block_tt = 0;
4419 int idx_mbmode = 0, mvbp;
4420 int stride_y, fieldtx;
4421 int bmvtype = BMV_TYPE_BACKWARD;
4424 mquant = v->pq; /* Lossy initialization */
4427 skipped = get_bits1(gb);
4429 skipped = v->s.mbskip_table[mb_pos];
4432 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2);
4433 if (ff_vc1_mbmode_intfrp[0][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD) {
4435 v->blk_mv_type[s->block_index[0]] = 1;
4436 v->blk_mv_type[s->block_index[1]] = 1;
4437 v->blk_mv_type[s->block_index[2]] = 1;
4438 v->blk_mv_type[s->block_index[3]] = 1;
4440 v->blk_mv_type[s->block_index[0]] = 0;
4441 v->blk_mv_type[s->block_index[1]] = 0;
4442 v->blk_mv_type[s->block_index[2]] = 0;
4443 v->blk_mv_type[s->block_index[3]] = 0;
4448 direct = get_bits1(gb);
4450 direct = v->direct_mb_plane[mb_pos];
4453 if (s->next_picture_ptr->field_picture)
4454 av_log(s->avctx, AV_LOG_WARNING, "Mixed frame/field direct mode not supported\n");
4455 s->mv[0][0][0] = s->current_picture.motion_val[0][s->block_index[0]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][0], v->bfraction, 0, s->quarter_sample);
4456 s->mv[0][0][1] = s->current_picture.motion_val[0][s->block_index[0]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][1], v->bfraction, 0, s->quarter_sample);
4457 s->mv[1][0][0] = s->current_picture.motion_val[1][s->block_index[0]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][0], v->bfraction, 1, s->quarter_sample);
4458 s->mv[1][0][1] = s->current_picture.motion_val[1][s->block_index[0]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][1], v->bfraction, 1, s->quarter_sample);
4461 s->mv[0][2][0] = s->current_picture.motion_val[0][s->block_index[2]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][0], v->bfraction, 0, s->quarter_sample);
4462 s->mv[0][2][1] = s->current_picture.motion_val[0][s->block_index[2]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][1], v->bfraction, 0, s->quarter_sample);
4463 s->mv[1][2][0] = s->current_picture.motion_val[1][s->block_index[2]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][0], v->bfraction, 1, s->quarter_sample);
4464 s->mv[1][2][1] = s->current_picture.motion_val[1][s->block_index[2]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][1], v->bfraction, 1, s->quarter_sample);
4466 for (i = 1; i < 4; i += 2) {
4467 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = s->mv[0][i-1][0];
4468 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = s->mv[0][i-1][1];
4469 s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = s->mv[1][i-1][0];
4470 s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = s->mv[1][i-1][1];
4473 for (i = 1; i < 4; i++) {
4474 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = s->mv[0][0][0];
4475 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = s->mv[0][0][1];
4476 s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = s->mv[1][0][0];
4477 s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = s->mv[1][0][1];
4482 if (ff_vc1_mbmode_intfrp[0][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
4483 for (i = 0; i < 4; i++) {
4484 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = 0;
4485 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = 0;
4486 s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = 0;
4487 s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = 0;
4489 s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
4490 s->mb_intra = v->is_intra[s->mb_x] = 1;
4491 for (i = 0; i < 6; i++)
4492 v->mb_type[0][s->block_index[i]] = 1;
4493 fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
4494 mb_has_coeffs = get_bits1(gb);
4496 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4497 v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4499 s->current_picture.qscale_table[mb_pos] = mquant;
4500 /* Set DC scale - y and c use the same (not sure if necessary here) */
4501 s->y_dc_scale = s->y_dc_scale_table[mquant];
4502 s->c_dc_scale = s->c_dc_scale_table[mquant];
4504 for (i = 0; i < 6; i++) {
4505 s->dc_val[0][s->block_index[i]] = 0;
4507 val = ((cbp >> (5 - i)) & 1);
4508 v->mb_type[0][s->block_index[i]] = s->mb_intra;
4509 v->a_avail = v->c_avail = 0;
4510 if (i == 2 || i == 3 || !s->first_slice_line)
4511 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4512 if (i == 1 || i == 3 || s->mb_x)
4513 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4515 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4516 (i & 4) ? v->codingset2 : v->codingset);
4517 if (i > 3 && (s->flags & CODEC_FLAG_GRAY))
4519 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4521 stride_y = s->linesize << fieldtx;
4522 off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->linesize : (i & 1) * 8 + 4 * (i & 2) * s->linesize;
4524 stride_y = s->uvlinesize;
4527 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, stride_y);
4530 s->mb_intra = v->is_intra[s->mb_x] = 0;
4532 if (skipped || !s->mb_intra) {
4533 bmvtype = decode012(gb);
4536 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
4539 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
4542 bmvtype = BMV_TYPE_INTERPOLATED;
4546 if (twomv && bmvtype != BMV_TYPE_INTERPOLATED)
4547 mvsw = get_bits1(gb);
4550 if (!skipped) { // inter MB
4551 mb_has_coeffs = ff_vc1_mbmode_intfrp[0][idx_mbmode][3];
4553 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4555 if (bmvtype == BMV_TYPE_INTERPOLATED && twomv) {
4556 v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
4557 } else if (bmvtype == BMV_TYPE_INTERPOLATED || twomv) {
4558 v->twomvbp = get_vlc2(gb, v->twomvbp_vlc->table, VC1_2MV_BLOCK_PATTERN_VLC_BITS, 1);
4562 for (i = 0; i < 6; i++)
4563 v->mb_type[0][s->block_index[i]] = 0;
4564 fieldtx = v->fieldtx_plane[mb_pos] = ff_vc1_mbmode_intfrp[0][idx_mbmode][1];
4565 /* for all motion vector read MVDATA and motion compensate each block */
4569 for (i = 0; i < 4; i++) {
4570 vc1_mc_4mv_luma(v, i, 0, 0);
4571 vc1_mc_4mv_luma(v, i, 1, 1);
4573 vc1_mc_4mv_chroma4(v, 0, 0, 0);
4574 vc1_mc_4mv_chroma4(v, 1, 1, 1);
4579 } else if (twomv && bmvtype == BMV_TYPE_INTERPOLATED) {
4581 for (i = 0; i < 4; i++) {
4584 val = ((mvbp >> (3 - i)) & 1);
4586 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4588 vc1_pred_mv_intfr(v, j, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir);
4589 vc1_mc_4mv_luma(v, j, dir, dir);
4590 vc1_mc_4mv_luma(v, j+1, dir, dir);
4593 vc1_mc_4mv_chroma4(v, 0, 0, 0);
4594 vc1_mc_4mv_chroma4(v, 1, 1, 1);
4595 } else if (bmvtype == BMV_TYPE_INTERPOLATED) {
4599 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4601 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0);
4606 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4608 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 1);
4611 dir = bmvtype == BMV_TYPE_BACKWARD;
4618 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4619 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir);
4623 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4624 vc1_pred_mv_intfr(v, 2, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir2);
4627 for (i = 0; i < 2; i++) {
4628 s->mv[dir][i+2][0] = s->mv[dir][i][0] = s->current_picture.motion_val[dir][s->block_index[i+2]][0] = s->current_picture.motion_val[dir][s->block_index[i]][0];
4629 s->mv[dir][i+2][1] = s->mv[dir][i][1] = s->current_picture.motion_val[dir][s->block_index[i+2]][1] = s->current_picture.motion_val[dir][s->block_index[i]][1];
4630 s->mv[dir2][i+2][0] = s->mv[dir2][i][0] = s->current_picture.motion_val[dir2][s->block_index[i]][0] = s->current_picture.motion_val[dir2][s->block_index[i+2]][0];
4631 s->mv[dir2][i+2][1] = s->mv[dir2][i][1] = s->current_picture.motion_val[dir2][s->block_index[i]][1] = s->current_picture.motion_val[dir2][s->block_index[i+2]][1];
4634 vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, v->mb_type[0], !dir);
4635 vc1_pred_mv_intfr(v, 2, 0, 0, 2, v->range_x, v->range_y, v->mb_type[0], !dir);
4638 vc1_mc_4mv_luma(v, 0, dir, 0);
4639 vc1_mc_4mv_luma(v, 1, dir, 0);
4640 vc1_mc_4mv_luma(v, 2, dir2, 0);
4641 vc1_mc_4mv_luma(v, 3, dir2, 0);
4642 vc1_mc_4mv_chroma4(v, dir, dir2, 0);
4644 dir = bmvtype == BMV_TYPE_BACKWARD;
4646 mvbp = ff_vc1_mbmode_intfrp[0][idx_mbmode][2];
4649 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4651 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], dir);
4652 v->blk_mv_type[s->block_index[0]] = 1;
4653 v->blk_mv_type[s->block_index[1]] = 1;
4654 v->blk_mv_type[s->block_index[2]] = 1;
4655 v->blk_mv_type[s->block_index[3]] = 1;
4656 vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, 0, !dir);
4657 for (i = 0; i < 2; i++) {
4658 s->mv[!dir][i+2][0] = s->mv[!dir][i][0] = s->current_picture.motion_val[!dir][s->block_index[i+2]][0] = s->current_picture.motion_val[!dir][s->block_index[i]][0];
4659 s->mv[!dir][i+2][1] = s->mv[!dir][i][1] = s->current_picture.motion_val[!dir][s->block_index[i+2]][1] = s->current_picture.motion_val[!dir][s->block_index[i]][1];
4665 GET_MQUANT(); // p. 227
4666 s->current_picture.qscale_table[mb_pos] = mquant;
4667 if (!v->ttmbf && cbp)
4668 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4669 for (i = 0; i < 6; i++) {
4670 s->dc_val[0][s->block_index[i]] = 0;
4672 val = ((cbp >> (5 - i)) & 1);
4674 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4676 off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->linesize));
4678 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4679 first_block, s->dest[dst_idx] + off,
4680 (i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
4681 (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
4682 block_cbp |= pat << (i << 2);
4683 if (!v->ttmbf && ttmb < 8)
4691 for (i = 0; i < 6; i++) {
4692 v->mb_type[0][s->block_index[i]] = 0;
4693 s->dc_val[0][s->block_index[i]] = 0;
4695 s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
4696 s->current_picture.qscale_table[mb_pos] = 0;
4697 v->blk_mv_type[s->block_index[0]] = 0;
4698 v->blk_mv_type[s->block_index[1]] = 0;
4699 v->blk_mv_type[s->block_index[2]] = 0;
4700 v->blk_mv_type[s->block_index[3]] = 0;
4703 if (bmvtype == BMV_TYPE_INTERPOLATED) {
4704 vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0);
4705 vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 1);
4707 dir = bmvtype == BMV_TYPE_BACKWARD;
4708 vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], dir);
4713 for (i = 0; i < 2; i++) {
4714 s->mv[dir][i+2][0] = s->mv[dir][i][0] = s->current_picture.motion_val[dir][s->block_index[i+2]][0] = s->current_picture.motion_val[dir][s->block_index[i]][0];
4715 s->mv[dir][i+2][1] = s->mv[dir][i][1] = s->current_picture.motion_val[dir][s->block_index[i+2]][1] = s->current_picture.motion_val[dir][s->block_index[i]][1];
4716 s->mv[dir2][i+2][0] = s->mv[dir2][i][0] = s->current_picture.motion_val[dir2][s->block_index[i]][0] = s->current_picture.motion_val[dir2][s->block_index[i+2]][0];
4717 s->mv[dir2][i+2][1] = s->mv[dir2][i][1] = s->current_picture.motion_val[dir2][s->block_index[i]][1] = s->current_picture.motion_val[dir2][s->block_index[i+2]][1];
4720 v->blk_mv_type[s->block_index[0]] = 1;
4721 v->blk_mv_type[s->block_index[1]] = 1;
4722 v->blk_mv_type[s->block_index[2]] = 1;
4723 v->blk_mv_type[s->block_index[3]] = 1;
4724 vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, 0, !dir);
4725 for (i = 0; i < 2; i++) {
4726 s->mv[!dir][i+2][0] = s->mv[!dir][i][0] = s->current_picture.motion_val[!dir][s->block_index[i+2]][0] = s->current_picture.motion_val[!dir][s->block_index[i]][0];
4727 s->mv[!dir][i+2][1] = s->mv[!dir][i][1] = s->current_picture.motion_val[!dir][s->block_index[i+2]][1] = s->current_picture.motion_val[!dir][s->block_index[i]][1];
4734 if (direct || bmvtype == BMV_TYPE_INTERPOLATED) {
4739 if (s->mb_x == s->mb_width - 1)
4740 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
4741 v->cbp[s->mb_x] = block_cbp;
4742 v->ttblk[s->mb_x] = block_tt;
4746 /** Decode blocks of I-frame
4748 static void vc1_decode_i_blocks(VC1Context *v)
4751 MpegEncContext *s = &v->s;
4756 /* select codingmode used for VLC tables selection */
4757 switch (v->y_ac_table_index) {
4759 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
4762 v->codingset = CS_HIGH_MOT_INTRA;
4765 v->codingset = CS_MID_RATE_INTRA;
4769 switch (v->c_ac_table_index) {
4771 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
4774 v->codingset2 = CS_HIGH_MOT_INTER;
4777 v->codingset2 = CS_MID_RATE_INTER;
4781 /* Set DC scale - y and c use the same */
4782 s->y_dc_scale = s->y_dc_scale_table[v->pq];
4783 s->c_dc_scale = s->c_dc_scale_table[v->pq];
4786 s->mb_x = s->mb_y = 0;
4788 s->first_slice_line = 1;
4789 for (s->mb_y = 0; s->mb_y < s->end_mb_y; s->mb_y++) {
4791 init_block_index(v);
4792 for (; s->mb_x < v->end_mb_x; s->mb_x++) {
4794 ff_update_block_index(s);
4795 dst[0] = s->dest[0];
4796 dst[1] = dst[0] + 8;
4797 dst[2] = s->dest[0] + s->linesize * 8;
4798 dst[3] = dst[2] + 8;
4799 dst[4] = s->dest[1];
4800 dst[5] = s->dest[2];
4801 s->dsp.clear_blocks(s->block[0]);
4802 mb_pos = s->mb_x + s->mb_y * s->mb_width;
4803 s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
4804 s->current_picture.qscale_table[mb_pos] = v->pq;
4805 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
4806 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
4808 // do actual MB decoding and displaying
4809 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
4810 v->s.ac_pred = get_bits1(&v->s.gb);
4812 for (k = 0; k < 6; k++) {
4813 val = ((cbp >> (5 - k)) & 1);
4816 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
4820 cbp |= val << (5 - k);
4822 vc1_decode_i_block(v, s->block[k], k, val, (k < 4) ? v->codingset : v->codingset2);
4824 if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
4826 v->vc1dsp.vc1_inv_trans_8x8(s->block[k]);
4827 if (v->pq >= 9 && v->overlap) {
4829 for (j = 0; j < 64; j++)
4830 s->block[k][j] <<= 1;
4831 s->dsp.put_signed_pixels_clamped(s->block[k], dst[k], k & 4 ? s->uvlinesize : s->linesize);
4834 for (j = 0; j < 64; j++)
4835 s->block[k][j] = (s->block[k][j] - 64) << 1;
4836 s->dsp.put_pixels_clamped(s->block[k], dst[k], k & 4 ? s->uvlinesize : s->linesize);
4840 if (v->pq >= 9 && v->overlap) {
4842 v->vc1dsp.vc1_h_overlap(s->dest[0], s->linesize);
4843 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
4844 if (!(s->flags & CODEC_FLAG_GRAY)) {
4845 v->vc1dsp.vc1_h_overlap(s->dest[1], s->uvlinesize);
4846 v->vc1dsp.vc1_h_overlap(s->dest[2], s->uvlinesize);
4849 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize);
4850 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
4851 if (!s->first_slice_line) {
4852 v->vc1dsp.vc1_v_overlap(s->dest[0], s->linesize);
4853 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize);
4854 if (!(s->flags & CODEC_FLAG_GRAY)) {
4855 v->vc1dsp.vc1_v_overlap(s->dest[1], s->uvlinesize);
4856 v->vc1dsp.vc1_v_overlap(s->dest[2], s->uvlinesize);
4859 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
4860 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
4862 if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
4864 if (get_bits_count(&s->gb) > v->bits) {
4865 ff_er_add_slice(&s->er, 0, 0, s->mb_x, s->mb_y, ER_MB_ERROR);
4866 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
4867 get_bits_count(&s->gb), v->bits);
4871 if (!v->s.loop_filter)
4872 ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
4874 ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
4876 s->first_slice_line = 0;
4878 if (v->s.loop_filter)
4879 ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
4881 /* This is intentionally mb_height and not end_mb_y - unlike in advanced
4882 * profile, these only differ are when decoding MSS2 rectangles. */
4883 ff_er_add_slice(&s->er, 0, 0, s->mb_width - 1, s->mb_height - 1, ER_MB_END);
4886 /** Decode blocks of I-frame for advanced profile
4888 static void vc1_decode_i_blocks_adv(VC1Context *v)
4891 MpegEncContext *s = &v->s;
4897 GetBitContext *gb = &s->gb;
4899 /* select codingmode used for VLC tables selection */
4900 switch (v->y_ac_table_index) {
4902 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
4905 v->codingset = CS_HIGH_MOT_INTRA;
4908 v->codingset = CS_MID_RATE_INTRA;
4912 switch (v->c_ac_table_index) {
4914 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
4917 v->codingset2 = CS_HIGH_MOT_INTER;
4920 v->codingset2 = CS_MID_RATE_INTER;
4925 s->mb_x = s->mb_y = 0;
4927 s->first_slice_line = 1;
4928 s->mb_y = s->start_mb_y;
4929 if (s->start_mb_y) {
4931 init_block_index(v);
4932 memset(&s->coded_block[s->block_index[0] - s->b8_stride], 0,
4933 (1 + s->b8_stride) * sizeof(*s->coded_block));
4935 for (; s->mb_y < s->end_mb_y; s->mb_y++) {
4937 init_block_index(v);
4938 for (;s->mb_x < s->mb_width; s->mb_x++) {
4939 int16_t (*block)[64] = v->block[v->cur_blk_idx];
4940 ff_update_block_index(s);
4941 s->dsp.clear_blocks(block[0]);
4942 mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4943 s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4944 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
4945 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
4947 // do actual MB decoding and displaying
4948 if (v->fieldtx_is_raw)
4949 v->fieldtx_plane[mb_pos] = get_bits1(&v->s.gb);
4950 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
4951 if ( v->acpred_is_raw)
4952 v->s.ac_pred = get_bits1(&v->s.gb);
4954 v->s.ac_pred = v->acpred_plane[mb_pos];
4956 if (v->condover == CONDOVER_SELECT && v->overflg_is_raw)
4957 v->over_flags_plane[mb_pos] = get_bits1(&v->s.gb);
4961 s->current_picture.qscale_table[mb_pos] = mquant;
4962 /* Set DC scale - y and c use the same */
4963 s->y_dc_scale = s->y_dc_scale_table[mquant];
4964 s->c_dc_scale = s->c_dc_scale_table[mquant];
4966 for (k = 0; k < 6; k++) {
4967 val = ((cbp >> (5 - k)) & 1);
4970 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
4974 cbp |= val << (5 - k);
4976 v->a_avail = !s->first_slice_line || (k == 2 || k == 3);
4977 v->c_avail = !!s->mb_x || (k == 1 || k == 3);
4979 vc1_decode_i_block_adv(v, block[k], k, val,
4980 (k < 4) ? v->codingset : v->codingset2, mquant);
4982 if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
4984 v->vc1dsp.vc1_inv_trans_8x8(block[k]);
4987 vc1_smooth_overlap_filter_iblk(v);
4988 vc1_put_signed_blocks_clamped(v);
4989 if (v->s.loop_filter) vc1_loop_filter_iblk_delayed(v, v->pq);
4991 if (get_bits_count(&s->gb) > v->bits) {
4992 // TODO: may need modification to handle slice coding
4993 ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
4994 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
4995 get_bits_count(&s->gb), v->bits);
4999 if (!v->s.loop_filter)
5000 ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
5002 ff_mpeg_draw_horiz_band(s, (s->mb_y-1) * 16, 16);
5003 s->first_slice_line = 0;
5006 /* raw bottom MB row */
5008 init_block_index(v);
5010 for (;s->mb_x < s->mb_width; s->mb_x++) {
5011 ff_update_block_index(s);
5012 vc1_put_signed_blocks_clamped(v);
5013 if (v->s.loop_filter)
5014 vc1_loop_filter_iblk_delayed(v, v->pq);
5016 if (v->s.loop_filter)
5017 ff_mpeg_draw_horiz_band(s, (s->end_mb_y-1)*16, 16);
5018 ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
5019 (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
5022 static void vc1_decode_p_blocks(VC1Context *v)
5024 MpegEncContext *s = &v->s;
5025 int apply_loop_filter;
5027 /* select codingmode used for VLC tables selection */
5028 switch (v->c_ac_table_index) {
5030 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
5033 v->codingset = CS_HIGH_MOT_INTRA;
5036 v->codingset = CS_MID_RATE_INTRA;
5040 switch (v->c_ac_table_index) {
5042 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
5045 v->codingset2 = CS_HIGH_MOT_INTER;
5048 v->codingset2 = CS_MID_RATE_INTER;
5052 apply_loop_filter = s->loop_filter && !(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY) &&
5053 v->fcm == PROGRESSIVE;
5054 s->first_slice_line = 1;
5055 memset(v->cbp_base, 0, sizeof(v->cbp_base[0])*2*s->mb_stride);
5056 for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
5058 init_block_index(v);
5059 for (; s->mb_x < s->mb_width; s->mb_x++) {
5060 ff_update_block_index(s);
5062 if (v->fcm == ILACE_FIELD)
5063 vc1_decode_p_mb_intfi(v);
5064 else if (v->fcm == ILACE_FRAME)
5065 vc1_decode_p_mb_intfr(v);
5066 else vc1_decode_p_mb(v);
5067 if (s->mb_y != s->start_mb_y && apply_loop_filter)
5068 vc1_apply_p_loop_filter(v);
5069 if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
5070 // TODO: may need modification to handle slice coding
5071 ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
5072 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
5073 get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
5077 memmove(v->cbp_base, v->cbp, sizeof(v->cbp_base[0]) * s->mb_stride);
5078 memmove(v->ttblk_base, v->ttblk, sizeof(v->ttblk_base[0]) * s->mb_stride);
5079 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
5080 memmove(v->luma_mv_base, v->luma_mv, sizeof(v->luma_mv_base[0]) * s->mb_stride);
5081 if (s->mb_y != s->start_mb_y) ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
5082 s->first_slice_line = 0;
5084 if (apply_loop_filter) {
5086 init_block_index(v);
5087 for (; s->mb_x < s->mb_width; s->mb_x++) {
5088 ff_update_block_index(s);
5089 vc1_apply_p_loop_filter(v);
5092 if (s->end_mb_y >= s->start_mb_y)
5093 ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
5094 ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
5095 (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
5098 static void vc1_decode_b_blocks(VC1Context *v)
5100 MpegEncContext *s = &v->s;
5102 /* select codingmode used for VLC tables selection */
5103 switch (v->c_ac_table_index) {
5105 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
5108 v->codingset = CS_HIGH_MOT_INTRA;
5111 v->codingset = CS_MID_RATE_INTRA;
5115 switch (v->c_ac_table_index) {
5117 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
5120 v->codingset2 = CS_HIGH_MOT_INTER;
5123 v->codingset2 = CS_MID_RATE_INTER;
5127 s->first_slice_line = 1;
5128 for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
5130 init_block_index(v);
5131 for (; s->mb_x < s->mb_width; s->mb_x++) {
5132 ff_update_block_index(s);
5134 if (v->fcm == ILACE_FIELD)
5135 vc1_decode_b_mb_intfi(v);
5136 else if (v->fcm == ILACE_FRAME)
5137 vc1_decode_b_mb_intfr(v);
5140 if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
5141 // TODO: may need modification to handle slice coding
5142 ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
5143 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
5144 get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
5147 if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
5149 if (!v->s.loop_filter)
5150 ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
5152 ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
5153 s->first_slice_line = 0;
5155 if (v->s.loop_filter)
5156 ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
5157 ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
5158 (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
5161 static void vc1_decode_skip_blocks(VC1Context *v)
5163 MpegEncContext *s = &v->s;
5165 if (!v->s.last_picture.f->data[0])
5168 ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_width - 1, s->end_mb_y - 1, ER_MB_END);
5169 s->first_slice_line = 1;
5170 for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
5172 init_block_index(v);
5173 ff_update_block_index(s);
5174 memcpy(s->dest[0], s->last_picture.f->data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16);
5175 memcpy(s->dest[1], s->last_picture.f->data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
5176 memcpy(s->dest[2], s->last_picture.f->data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
5177 ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
5178 s->first_slice_line = 0;
5180 s->pict_type = AV_PICTURE_TYPE_P;
5183 void ff_vc1_decode_blocks(VC1Context *v)
5186 v->s.esc3_level_length = 0;
5188 ff_intrax8_decode_picture(&v->x8, 2*v->pq + v->halfpq, v->pq * !v->pquantizer);
5191 v->left_blk_idx = -1;
5192 v->topleft_blk_idx = 1;
5194 switch (v->s.pict_type) {
5195 case AV_PICTURE_TYPE_I:
5196 if (v->profile == PROFILE_ADVANCED)
5197 vc1_decode_i_blocks_adv(v);
5199 vc1_decode_i_blocks(v);
5201 case AV_PICTURE_TYPE_P:
5202 if (v->p_frame_skipped)
5203 vc1_decode_skip_blocks(v);
5205 vc1_decode_p_blocks(v);
5207 case AV_PICTURE_TYPE_B:
5209 if (v->profile == PROFILE_ADVANCED)
5210 vc1_decode_i_blocks_adv(v);
5212 vc1_decode_i_blocks(v);
5214 vc1_decode_b_blocks(v);
5220 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
5224 * Transform coefficients for both sprites in 16.16 fixed point format,
5225 * in the order they appear in the bitstream:
5227 * rotation 1 (unused)
5229 * rotation 2 (unused)
5236 int effect_type, effect_flag;
5237 int effect_pcount1, effect_pcount2; ///< amount of effect parameters stored in effect_params
5238 int effect_params1[15], effect_params2[10]; ///< effect parameters in 16.16 fixed point format
5241 static inline int get_fp_val(GetBitContext* gb)
5243 return (get_bits_long(gb, 30) - (1 << 29)) << 1;
5246 static void vc1_sprite_parse_transform(GetBitContext* gb, int c[7])
5250 switch (get_bits(gb, 2)) {
5253 c[2] = get_fp_val(gb);
5257 c[0] = c[4] = get_fp_val(gb);
5258 c[2] = get_fp_val(gb);
5261 c[0] = get_fp_val(gb);
5262 c[2] = get_fp_val(gb);
5263 c[4] = get_fp_val(gb);
5266 c[0] = get_fp_val(gb);
5267 c[1] = get_fp_val(gb);
5268 c[2] = get_fp_val(gb);
5269 c[3] = get_fp_val(gb);
5270 c[4] = get_fp_val(gb);
5273 c[5] = get_fp_val(gb);
5275 c[6] = get_fp_val(gb);
5280 static int vc1_parse_sprites(VC1Context *v, GetBitContext* gb, SpriteData* sd)
5282 AVCodecContext *avctx = v->s.avctx;
5285 for (sprite = 0; sprite <= v->two_sprites; sprite++) {
5286 vc1_sprite_parse_transform(gb, sd->coefs[sprite]);
5287 if (sd->coefs[sprite][1] || sd->coefs[sprite][3])
5288 avpriv_request_sample(avctx, "Non-zero rotation coefficients");
5289 av_log(avctx, AV_LOG_DEBUG, sprite ? "S2:" : "S1:");
5290 for (i = 0; i < 7; i++)
5291 av_log(avctx, AV_LOG_DEBUG, " %d.%.3d",
5292 sd->coefs[sprite][i] / (1<<16),
5293 (abs(sd->coefs[sprite][i]) & 0xFFFF) * 1000 / (1 << 16));
5294 av_log(avctx, AV_LOG_DEBUG, "\n");
5298 if (sd->effect_type = get_bits_long(gb, 30)) {
5299 switch (sd->effect_pcount1 = get_bits(gb, 4)) {
5301 vc1_sprite_parse_transform(gb, sd->effect_params1);
5304 vc1_sprite_parse_transform(gb, sd->effect_params1);
5305 vc1_sprite_parse_transform(gb, sd->effect_params1 + 7);
5308 for (i = 0; i < sd->effect_pcount1; i++)
5309 sd->effect_params1[i] = get_fp_val(gb);
5311 if (sd->effect_type != 13 || sd->effect_params1[0] != sd->coefs[0][6]) {
5312 // effect 13 is simple alpha blending and matches the opacity above
5313 av_log(avctx, AV_LOG_DEBUG, "Effect: %d; params: ", sd->effect_type);
5314 for (i = 0; i < sd->effect_pcount1; i++)
5315 av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
5316 sd->effect_params1[i] / (1 << 16),
5317 (abs(sd->effect_params1[i]) & 0xFFFF) * 1000 / (1 << 16));
5318 av_log(avctx, AV_LOG_DEBUG, "\n");
5321 sd->effect_pcount2 = get_bits(gb, 16);
5322 if (sd->effect_pcount2 > 10) {
5323 av_log(avctx, AV_LOG_ERROR, "Too many effect parameters\n");
5324 return AVERROR_INVALIDDATA;
5325 } else if (sd->effect_pcount2) {
5327 av_log(avctx, AV_LOG_DEBUG, "Effect params 2: ");
5328 while (++i < sd->effect_pcount2) {
5329 sd->effect_params2[i] = get_fp_val(gb);
5330 av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
5331 sd->effect_params2[i] / (1 << 16),
5332 (abs(sd->effect_params2[i]) & 0xFFFF) * 1000 / (1 << 16));
5334 av_log(avctx, AV_LOG_DEBUG, "\n");
5337 if (sd->effect_flag = get_bits1(gb))
5338 av_log(avctx, AV_LOG_DEBUG, "Effect flag set\n");
5340 if (get_bits_count(gb) >= gb->size_in_bits +
5341 (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE ? 64 : 0)) {
5342 av_log(avctx, AV_LOG_ERROR, "Buffer overrun\n");
5343 return AVERROR_INVALIDDATA;
5345 if (get_bits_count(gb) < gb->size_in_bits - 8)
5346 av_log(avctx, AV_LOG_WARNING, "Buffer not fully read\n");
5351 static void vc1_draw_sprites(VC1Context *v, SpriteData* sd)
5353 int i, plane, row, sprite;
5354 int sr_cache[2][2] = { { -1, -1 }, { -1, -1 } };
5355 uint8_t* src_h[2][2];
5356 int xoff[2], xadv[2], yoff[2], yadv[2], alpha;
5358 MpegEncContext *s = &v->s;
5360 for (i = 0; i <= v->two_sprites; i++) {
5361 xoff[i] = av_clip(sd->coefs[i][2], 0, v->sprite_width-1 << 16);
5362 xadv[i] = sd->coefs[i][0];
5363 if (xadv[i] != 1<<16 || (v->sprite_width << 16) - (v->output_width << 16) - xoff[i])
5364 xadv[i] = av_clip(xadv[i], 0, ((v->sprite_width<<16) - xoff[i] - 1) / v->output_width);
5366 yoff[i] = av_clip(sd->coefs[i][5], 0, v->sprite_height-1 << 16);
5367 yadv[i] = av_clip(sd->coefs[i][4], 0, ((v->sprite_height << 16) - yoff[i]) / v->output_height);
5369 alpha = av_clip(sd->coefs[1][6], 0, (1<<16) - 1);
5371 for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++) {
5372 int width = v->output_width>>!!plane;
5374 for (row = 0; row < v->output_height>>!!plane; row++) {
5375 uint8_t *dst = v->sprite_output_frame->data[plane] +
5376 v->sprite_output_frame->linesize[plane] * row;
5378 for (sprite = 0; sprite <= v->two_sprites; sprite++) {
5379 uint8_t *iplane = s->current_picture.f->data[plane];
5380 int iline = s->current_picture.f->linesize[plane];
5381 int ycoord = yoff[sprite] + yadv[sprite] * row;
5382 int yline = ycoord >> 16;
5384 ysub[sprite] = ycoord & 0xFFFF;
5386 iplane = s->last_picture.f->data[plane];
5387 iline = s->last_picture.f->linesize[plane];
5389 next_line = FFMIN(yline + 1, (v->sprite_height >> !!plane) - 1) * iline;
5390 if (!(xoff[sprite] & 0xFFFF) && xadv[sprite] == 1 << 16) {
5391 src_h[sprite][0] = iplane + (xoff[sprite] >> 16) + yline * iline;
5393 src_h[sprite][1] = iplane + (xoff[sprite] >> 16) + next_line;
5395 if (sr_cache[sprite][0] != yline) {
5396 if (sr_cache[sprite][1] == yline) {
5397 FFSWAP(uint8_t*, v->sr_rows[sprite][0], v->sr_rows[sprite][1]);
5398 FFSWAP(int, sr_cache[sprite][0], sr_cache[sprite][1]);
5400 v->vc1dsp.sprite_h(v->sr_rows[sprite][0], iplane + yline * iline, xoff[sprite], xadv[sprite], width);
5401 sr_cache[sprite][0] = yline;
5404 if (ysub[sprite] && sr_cache[sprite][1] != yline + 1) {
5405 v->vc1dsp.sprite_h(v->sr_rows[sprite][1],
5406 iplane + next_line, xoff[sprite],
5407 xadv[sprite], width);
5408 sr_cache[sprite][1] = yline + 1;
5410 src_h[sprite][0] = v->sr_rows[sprite][0];
5411 src_h[sprite][1] = v->sr_rows[sprite][1];
5415 if (!v->two_sprites) {
5417 v->vc1dsp.sprite_v_single(dst, src_h[0][0], src_h[0][1], ysub[0], width);
5419 memcpy(dst, src_h[0][0], width);
5422 if (ysub[0] && ysub[1]) {
5423 v->vc1dsp.sprite_v_double_twoscale(dst, src_h[0][0], src_h[0][1], ysub[0],
5424 src_h[1][0], src_h[1][1], ysub[1], alpha, width);
5425 } else if (ysub[0]) {
5426 v->vc1dsp.sprite_v_double_onescale(dst, src_h[0][0], src_h[0][1], ysub[0],
5427 src_h[1][0], alpha, width);
5428 } else if (ysub[1]) {
5429 v->vc1dsp.sprite_v_double_onescale(dst, src_h[1][0], src_h[1][1], ysub[1],
5430 src_h[0][0], (1<<16)-1-alpha, width);
5432 v->vc1dsp.sprite_v_double_noscale(dst, src_h[0][0], src_h[1][0], alpha, width);
5438 for (i = 0; i <= v->two_sprites; i++) {
5448 static int vc1_decode_sprites(VC1Context *v, GetBitContext* gb)
5451 MpegEncContext *s = &v->s;
5452 AVCodecContext *avctx = s->avctx;
5455 memset(&sd, 0, sizeof(sd));
5457 ret = vc1_parse_sprites(v, gb, &sd);
5461 if (!s->current_picture.f->data[0]) {
5462 av_log(avctx, AV_LOG_ERROR, "Got no sprites\n");
5466 if (v->two_sprites && (!s->last_picture_ptr || !s->last_picture.f->data[0])) {
5467 av_log(avctx, AV_LOG_WARNING, "Need two sprites, only got one\n");
5471 av_frame_unref(v->sprite_output_frame);
5472 if ((ret = ff_get_buffer(avctx, v->sprite_output_frame, 0)) < 0)
5475 vc1_draw_sprites(v, &sd);
5480 static void vc1_sprite_flush(AVCodecContext *avctx)
5482 VC1Context *v = avctx->priv_data;
5483 MpegEncContext *s = &v->s;
5484 AVFrame *f = s->current_picture.f;
5487 /* Windows Media Image codecs have a convergence interval of two keyframes.
5488 Since we can't enforce it, clear to black the missing sprite. This is
5489 wrong but it looks better than doing nothing. */
5492 for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++)
5493 for (i = 0; i < v->sprite_height>>!!plane; i++)
5494 memset(f->data[plane] + i * f->linesize[plane],
5495 plane ? 128 : 0, f->linesize[plane]);
5500 av_cold int ff_vc1_decode_init_alloc_tables(VC1Context *v)
5502 MpegEncContext *s = &v->s;
5504 int mb_height = FFALIGN(s->mb_height, 2);
5506 /* Allocate mb bitplanes */
5507 v->mv_type_mb_plane = av_malloc (s->mb_stride * mb_height);
5508 v->direct_mb_plane = av_malloc (s->mb_stride * mb_height);
5509 v->forward_mb_plane = av_malloc (s->mb_stride * mb_height);
5510 v->fieldtx_plane = av_mallocz(s->mb_stride * mb_height);
5511 v->acpred_plane = av_malloc (s->mb_stride * mb_height);
5512 v->over_flags_plane = av_malloc (s->mb_stride * mb_height);
5514 v->n_allocated_blks = s->mb_width + 2;
5515 v->block = av_malloc(sizeof(*v->block) * v->n_allocated_blks);
5516 v->cbp_base = av_malloc(sizeof(v->cbp_base[0]) * 2 * s->mb_stride);
5517 v->cbp = v->cbp_base + s->mb_stride;
5518 v->ttblk_base = av_malloc(sizeof(v->ttblk_base[0]) * 2 * s->mb_stride);
5519 v->ttblk = v->ttblk_base + s->mb_stride;
5520 v->is_intra_base = av_mallocz(sizeof(v->is_intra_base[0]) * 2 * s->mb_stride);
5521 v->is_intra = v->is_intra_base + s->mb_stride;
5522 v->luma_mv_base = av_mallocz(sizeof(v->luma_mv_base[0]) * 2 * s->mb_stride);
5523 v->luma_mv = v->luma_mv_base + s->mb_stride;
5525 /* allocate block type info in that way so it could be used with s->block_index[] */
5526 v->mb_type_base = av_malloc(s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2);
5527 v->mb_type[0] = v->mb_type_base + s->b8_stride + 1;
5528 v->mb_type[1] = v->mb_type_base + s->b8_stride * (mb_height * 2 + 1) + s->mb_stride + 1;
5529 v->mb_type[2] = v->mb_type[1] + s->mb_stride * (mb_height + 1);
5531 /* allocate memory to store block level MV info */
5532 v->blk_mv_type_base = av_mallocz( s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2);
5533 v->blk_mv_type = v->blk_mv_type_base + s->b8_stride + 1;
5534 v->mv_f_base = av_mallocz(2 * (s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2));
5535 v->mv_f[0] = v->mv_f_base + s->b8_stride + 1;
5536 v->mv_f[1] = v->mv_f[0] + (s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2);
5537 v->mv_f_next_base = av_mallocz(2 * (s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2));
5538 v->mv_f_next[0] = v->mv_f_next_base + s->b8_stride + 1;
5539 v->mv_f_next[1] = v->mv_f_next[0] + (s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2);
5541 /* Init coded blocks info */
5542 if (v->profile == PROFILE_ADVANCED) {
5543 // if (alloc_bitplane(&v->over_flags_plane, s->mb_width, s->mb_height) < 0)
5545 // if (alloc_bitplane(&v->ac_pred_plane, s->mb_width, s->mb_height) < 0)
5549 ff_intrax8_common_init(&v->x8,s);
5551 if (s->avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || s->avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5552 for (i = 0; i < 4; i++)
5553 if (!(v->sr_rows[i >> 1][i & 1] = av_malloc(v->output_width)))
5554 return AVERROR(ENOMEM);
5557 if (!v->mv_type_mb_plane || !v->direct_mb_plane || !v->acpred_plane || !v->over_flags_plane ||
5558 !v->block || !v->cbp_base || !v->ttblk_base || !v->is_intra_base || !v->luma_mv_base ||
5560 av_freep(&v->mv_type_mb_plane);
5561 av_freep(&v->direct_mb_plane);
5562 av_freep(&v->acpred_plane);
5563 av_freep(&v->over_flags_plane);
5564 av_freep(&v->block);
5565 av_freep(&v->cbp_base);
5566 av_freep(&v->ttblk_base);
5567 av_freep(&v->is_intra_base);
5568 av_freep(&v->luma_mv_base);
5569 av_freep(&v->mb_type_base);
5570 return AVERROR(ENOMEM);
5576 av_cold void ff_vc1_init_transposed_scantables(VC1Context *v)
5579 for (i = 0; i < 64; i++) {
5580 #define transpose(x) ((x >> 3) | ((x & 7) << 3))
5581 v->zz_8x8[0][i] = transpose(ff_wmv1_scantable[0][i]);
5582 v->zz_8x8[1][i] = transpose(ff_wmv1_scantable[1][i]);
5583 v->zz_8x8[2][i] = transpose(ff_wmv1_scantable[2][i]);
5584 v->zz_8x8[3][i] = transpose(ff_wmv1_scantable[3][i]);
5585 v->zzi_8x8[i] = transpose(ff_vc1_adv_interlaced_8x8_zz[i]);
5591 /** Initialize a VC1/WMV3 decoder
5592 * @todo TODO: Handle VC-1 IDUs (Transport level?)
5593 * @todo TODO: Decypher remaining bits in extra_data
5595 static av_cold int vc1_decode_init(AVCodecContext *avctx)
5597 VC1Context *v = avctx->priv_data;
5598 MpegEncContext *s = &v->s;
5602 /* save the container output size for WMImage */
5603 v->output_width = avctx->width;
5604 v->output_height = avctx->height;
5606 if (!avctx->extradata_size || !avctx->extradata)
5608 if (!(avctx->flags & CODEC_FLAG_GRAY))
5609 avctx->pix_fmt = ff_get_format(avctx, avctx->codec->pix_fmts);
5611 avctx->pix_fmt = AV_PIX_FMT_GRAY8;
5614 if ((ret = ff_vc1_init_common(v)) < 0)
5616 // ensure static VLC tables are initialized
5617 if ((ret = ff_msmpeg4_decode_init(avctx)) < 0)
5619 if ((ret = ff_vc1_decode_init_alloc_tables(v)) < 0)
5621 // Hack to ensure the above functions will be called
5622 // again once we know all necessary settings.
5623 // That this is necessary might indicate a bug.
5624 ff_vc1_decode_end(avctx);
5626 ff_h264chroma_init(&v->h264chroma, 8);
5628 if (avctx->codec_id == AV_CODEC_ID_WMV3 || avctx->codec_id == AV_CODEC_ID_WMV3IMAGE) {
5631 // looks like WMV3 has a sequence header stored in the extradata
5632 // advanced sequence header may be before the first frame
5633 // the last byte of the extradata is a version number, 1 for the
5634 // samples we can decode
5636 init_get_bits(&gb, avctx->extradata, avctx->extradata_size*8);
5638 if ((ret = ff_vc1_decode_sequence_header(avctx, v, &gb)) < 0)
5641 count = avctx->extradata_size*8 - get_bits_count(&gb);
5643 av_log(avctx, AV_LOG_INFO, "Extra data: %i bits left, value: %X\n",
5644 count, get_bits(&gb, count));
5645 } else if (count < 0) {
5646 av_log(avctx, AV_LOG_INFO, "Read %i bits in overflow\n", -count);
5648 } else { // VC1/WVC1/WVP2
5649 const uint8_t *start = avctx->extradata;
5650 uint8_t *end = avctx->extradata + avctx->extradata_size;
5651 const uint8_t *next;
5652 int size, buf2_size;
5653 uint8_t *buf2 = NULL;
5654 int seq_initialized = 0, ep_initialized = 0;
5656 if (avctx->extradata_size < 16) {
5657 av_log(avctx, AV_LOG_ERROR, "Extradata size too small: %i\n", avctx->extradata_size);
5661 buf2 = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
5662 start = find_next_marker(start, end); // in WVC1 extradata first byte is its size, but can be 0 in mkv
5664 for (; next < end; start = next) {
5665 next = find_next_marker(start + 4, end);
5666 size = next - start - 4;
5669 buf2_size = vc1_unescape_buffer(start + 4, size, buf2);
5670 init_get_bits(&gb, buf2, buf2_size * 8);
5671 switch (AV_RB32(start)) {
5672 case VC1_CODE_SEQHDR:
5673 if ((ret = ff_vc1_decode_sequence_header(avctx, v, &gb)) < 0) {
5677 seq_initialized = 1;
5679 case VC1_CODE_ENTRYPOINT:
5680 if ((ret = ff_vc1_decode_entry_point(avctx, v, &gb)) < 0) {
5689 if (!seq_initialized || !ep_initialized) {
5690 av_log(avctx, AV_LOG_ERROR, "Incomplete extradata\n");
5693 v->res_sprite = (avctx->codec_id == AV_CODEC_ID_VC1IMAGE);
5696 v->sprite_output_frame = av_frame_alloc();
5697 if (!v->sprite_output_frame)
5698 return AVERROR(ENOMEM);
5700 avctx->profile = v->profile;
5701 if (v->profile == PROFILE_ADVANCED)
5702 avctx->level = v->level;
5704 avctx->has_b_frames = !!avctx->max_b_frames;
5706 s->mb_width = (avctx->coded_width + 15) >> 4;
5707 s->mb_height = (avctx->coded_height + 15) >> 4;
5709 if (v->profile == PROFILE_ADVANCED || v->res_fasttx) {
5710 ff_vc1_init_transposed_scantables(v);
5712 memcpy(v->zz_8x8, ff_wmv1_scantable, 4*64);
5717 if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5718 v->sprite_width = avctx->coded_width;
5719 v->sprite_height = avctx->coded_height;
5721 avctx->coded_width = avctx->width = v->output_width;
5722 avctx->coded_height = avctx->height = v->output_height;
5724 // prevent 16.16 overflows
5725 if (v->sprite_width > 1 << 14 ||
5726 v->sprite_height > 1 << 14 ||
5727 v->output_width > 1 << 14 ||
5728 v->output_height > 1 << 14) return -1;
5730 if ((v->sprite_width&1) || (v->sprite_height&1)) {
5731 avpriv_request_sample(avctx, "odd sprites support");
5732 return AVERROR_PATCHWELCOME;
5738 /** Close a VC1/WMV3 decoder
5739 * @warning Initial try at using MpegEncContext stuff
5741 av_cold int ff_vc1_decode_end(AVCodecContext *avctx)
5743 VC1Context *v = avctx->priv_data;
5746 av_frame_free(&v->sprite_output_frame);
5748 for (i = 0; i < 4; i++)
5749 av_freep(&v->sr_rows[i >> 1][i & 1]);
5750 av_freep(&v->hrd_rate);
5751 av_freep(&v->hrd_buffer);
5752 ff_MPV_common_end(&v->s);
5753 av_freep(&v->mv_type_mb_plane);
5754 av_freep(&v->direct_mb_plane);
5755 av_freep(&v->forward_mb_plane);
5756 av_freep(&v->fieldtx_plane);
5757 av_freep(&v->acpred_plane);
5758 av_freep(&v->over_flags_plane);
5759 av_freep(&v->mb_type_base);
5760 av_freep(&v->blk_mv_type_base);
5761 av_freep(&v->mv_f_base);
5762 av_freep(&v->mv_f_next_base);
5763 av_freep(&v->block);
5764 av_freep(&v->cbp_base);
5765 av_freep(&v->ttblk_base);
5766 av_freep(&v->is_intra_base); // FIXME use v->mb_type[]
5767 av_freep(&v->luma_mv_base);
5768 ff_intrax8_common_end(&v->x8);
5773 /** Decode a VC1/WMV3 frame
5774 * @todo TODO: Handle VC-1 IDUs (Transport level?)
5776 static int vc1_decode_frame(AVCodecContext *avctx, void *data,
5777 int *got_frame, AVPacket *avpkt)
5779 const uint8_t *buf = avpkt->data;
5780 int buf_size = avpkt->size, n_slices = 0, i, ret;
5781 VC1Context *v = avctx->priv_data;
5782 MpegEncContext *s = &v->s;
5783 AVFrame *pict = data;
5784 uint8_t *buf2 = NULL;
5785 const uint8_t *buf_start = buf, *buf_start_second_field = NULL;
5786 int mb_height, n_slices1=-1;
5791 } *slices = NULL, *tmp;
5793 v->second_field = 0;
5795 if(s->flags & CODEC_FLAG_LOW_DELAY)
5798 /* no supplementary picture */
5799 if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == VC1_CODE_ENDOFSEQ)) {
5800 /* special case for last picture */
5801 if (s->low_delay == 0 && s->next_picture_ptr) {
5802 if ((ret = av_frame_ref(pict, s->next_picture_ptr->f)) < 0)
5804 s->next_picture_ptr = NULL;
5812 if (s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) {
5813 if (v->profile < PROFILE_ADVANCED)
5814 avctx->pix_fmt = AV_PIX_FMT_VDPAU_WMV3;
5816 avctx->pix_fmt = AV_PIX_FMT_VDPAU_VC1;
5819 //for advanced profile we may need to parse and unescape data
5820 if (avctx->codec_id == AV_CODEC_ID_VC1 || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5822 buf2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5824 return AVERROR(ENOMEM);
5826 if (IS_MARKER(AV_RB32(buf))) { /* frame starts with marker and needs to be parsed */
5827 const uint8_t *start, *end, *next;
5831 for (start = buf, end = buf + buf_size; next < end; start = next) {
5832 next = find_next_marker(start + 4, end);
5833 size = next - start - 4;
5834 if (size <= 0) continue;
5835 switch (AV_RB32(start)) {
5836 case VC1_CODE_FRAME:
5837 if (avctx->hwaccel ||
5838 s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
5840 buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
5842 case VC1_CODE_FIELD: {
5844 if (avctx->hwaccel ||
5845 s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
5846 buf_start_second_field = start;
5847 tmp = av_realloc_array(slices, sizeof(*slices), (n_slices+1));
5851 slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5852 if (!slices[n_slices].buf)
5854 buf_size3 = vc1_unescape_buffer(start + 4, size,
5855 slices[n_slices].buf);
5856 init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5858 /* assuming that the field marker is at the exact middle,
5859 hope it's correct */
5860 slices[n_slices].mby_start = s->mb_height + 1 >> 1;
5861 n_slices1 = n_slices - 1; // index of the last slice of the first field
5865 case VC1_CODE_ENTRYPOINT: /* it should be before frame data */
5866 buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
5867 init_get_bits(&s->gb, buf2, buf_size2 * 8);
5868 ff_vc1_decode_entry_point(avctx, v, &s->gb);
5870 case VC1_CODE_SLICE: {
5872 tmp = av_realloc_array(slices, sizeof(*slices), (n_slices+1));
5876 slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5877 if (!slices[n_slices].buf)
5879 buf_size3 = vc1_unescape_buffer(start + 4, size,
5880 slices[n_slices].buf);
5881 init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5883 slices[n_slices].mby_start = get_bits(&slices[n_slices].gb, 9);
5889 } else if (v->interlace && ((buf[0] & 0xC0) == 0xC0)) { /* WVC1 interlaced stores both fields divided by marker */
5890 const uint8_t *divider;
5893 divider = find_next_marker(buf, buf + buf_size);
5894 if ((divider == (buf + buf_size)) || AV_RB32(divider) != VC1_CODE_FIELD) {
5895 av_log(avctx, AV_LOG_ERROR, "Error in WVC1 interlaced frame\n");
5897 } else { // found field marker, unescape second field
5898 if (avctx->hwaccel ||
5899 s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
5900 buf_start_second_field = divider;
5901 tmp = av_realloc_array(slices, sizeof(*slices), (n_slices+1));
5905 slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5906 if (!slices[n_slices].buf)
5908 buf_size3 = vc1_unescape_buffer(divider + 4, buf + buf_size - divider - 4, slices[n_slices].buf);
5909 init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5911 slices[n_slices].mby_start = s->mb_height + 1 >> 1;
5912 n_slices1 = n_slices - 1;
5915 buf_size2 = vc1_unescape_buffer(buf, divider - buf, buf2);
5917 buf_size2 = vc1_unescape_buffer(buf, buf_size, buf2);
5919 init_get_bits(&s->gb, buf2, buf_size2*8);
5921 init_get_bits(&s->gb, buf, buf_size*8);
5923 if (v->res_sprite) {
5924 v->new_sprite = !get_bits1(&s->gb);
5925 v->two_sprites = get_bits1(&s->gb);
5926 /* res_sprite means a Windows Media Image stream, AV_CODEC_ID_*IMAGE means
5927 we're using the sprite compositor. These are intentionally kept separate
5928 so you can get the raw sprites by using the wmv3 decoder for WMVP or
5929 the vc1 one for WVP2 */
5930 if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5931 if (v->new_sprite) {
5932 // switch AVCodecContext parameters to those of the sprites
5933 avctx->width = avctx->coded_width = v->sprite_width;
5934 avctx->height = avctx->coded_height = v->sprite_height;
5941 if (s->context_initialized &&
5942 (s->width != avctx->coded_width ||
5943 s->height != avctx->coded_height)) {
5944 ff_vc1_decode_end(avctx);
5947 if (!s->context_initialized) {
5948 if (ff_msmpeg4_decode_init(avctx) < 0)
5950 if (ff_vc1_decode_init_alloc_tables(v) < 0) {
5951 ff_MPV_common_end(s);
5955 s->low_delay = !avctx->has_b_frames || v->res_sprite;
5957 if (v->profile == PROFILE_ADVANCED) {
5958 if(avctx->coded_width<=1 || avctx->coded_height<=1)
5960 s->h_edge_pos = avctx->coded_width;
5961 s->v_edge_pos = avctx->coded_height;
5965 // do parse frame header
5966 v->pic_header_flag = 0;
5967 v->first_pic_header_flag = 1;
5968 if (v->profile < PROFILE_ADVANCED) {
5969 if (ff_vc1_parse_frame_header(v, &s->gb) < 0) {
5973 if (ff_vc1_parse_frame_header_adv(v, &s->gb) < 0) {
5977 v->first_pic_header_flag = 0;
5979 if (avctx->debug & FF_DEBUG_PICT_INFO)
5980 av_log(v->s.avctx, AV_LOG_DEBUG, "pict_type: %c\n", av_get_picture_type_char(s->pict_type));
5982 if ((avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE)
5983 && s->pict_type != AV_PICTURE_TYPE_I) {
5984 av_log(v->s.avctx, AV_LOG_ERROR, "Sprite decoder: expected I-frame\n");
5988 if ((s->mb_height >> v->field_mode) == 0) {
5989 av_log(v->s.avctx, AV_LOG_ERROR, "image too short\n");
5993 // for skipping the frame
5994 s->current_picture.f->pict_type = s->pict_type;
5995 s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
5997 /* skip B-frames if we don't have reference frames */
5998 if (s->last_picture_ptr == NULL && (s->pict_type == AV_PICTURE_TYPE_B || s->droppable)) {
5999 av_log(v->s.avctx, AV_LOG_DEBUG, "Skipping B frame without reference frames\n");
6002 if ((avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B) ||
6003 (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I) ||
6004 avctx->skip_frame >= AVDISCARD_ALL) {
6008 if (s->next_p_frame_damaged) {
6009 if (s->pict_type == AV_PICTURE_TYPE_B)
6012 s->next_p_frame_damaged = 0;
6015 if (ff_MPV_frame_start(s, avctx) < 0) {
6019 v->s.current_picture_ptr->field_picture = v->field_mode;
6020 v->s.current_picture_ptr->f->interlaced_frame = (v->fcm != PROGRESSIVE);
6021 v->s.current_picture_ptr->f->top_field_first = v->tff;
6023 // process pulldown flags
6024 s->current_picture_ptr->f->repeat_pict = 0;
6025 // Pulldown flags are only valid when 'broadcast' has been set.
6026 // So ticks_per_frame will be 2
6029 s->current_picture_ptr->f->repeat_pict = 1;
6030 } else if (v->rptfrm) {
6032 s->current_picture_ptr->f->repeat_pict = v->rptfrm * 2;
6035 s->me.qpel_put = s->dsp.put_qpel_pixels_tab;
6036 s->me.qpel_avg = s->dsp.avg_qpel_pixels_tab;
6038 if ((CONFIG_VC1_VDPAU_DECODER)
6039 &&s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) {
6040 if (v->field_mode && buf_start_second_field) {
6041 ff_vdpau_vc1_decode_picture(s, buf_start, buf_start_second_field - buf_start);
6042 ff_vdpau_vc1_decode_picture(s, buf_start_second_field, (buf + buf_size) - buf_start_second_field);
6044 ff_vdpau_vc1_decode_picture(s, buf_start, (buf + buf_size) - buf_start);
6046 } else if (avctx->hwaccel) {
6047 if (v->field_mode && buf_start_second_field) {
6048 // decode first field
6049 s->picture_structure = PICT_BOTTOM_FIELD - v->tff;
6050 if (avctx->hwaccel->start_frame(avctx, buf_start, buf_start_second_field - buf_start) < 0)
6052 if (avctx->hwaccel->decode_slice(avctx, buf_start, buf_start_second_field - buf_start) < 0)
6054 if (avctx->hwaccel->end_frame(avctx) < 0)
6057 // decode second field
6058 s->gb = slices[n_slices1 + 1].gb;
6059 s->picture_structure = PICT_TOP_FIELD + v->tff;
6060 v->second_field = 1;
6061 v->pic_header_flag = 0;
6062 if (ff_vc1_parse_frame_header_adv(v, &s->gb) < 0) {
6063 av_log(avctx, AV_LOG_ERROR, "parsing header for second field failed");
6066 v->s.current_picture_ptr->f->pict_type = v->s.pict_type;
6068 if (avctx->hwaccel->start_frame(avctx, buf_start_second_field, (buf + buf_size) - buf_start_second_field) < 0)
6070 if (avctx->hwaccel->decode_slice(avctx, buf_start_second_field, (buf + buf_size) - buf_start_second_field) < 0)
6072 if (avctx->hwaccel->end_frame(avctx) < 0)
6075 s->picture_structure = PICT_FRAME;
6076 if (avctx->hwaccel->start_frame(avctx, buf_start, (buf + buf_size) - buf_start) < 0)
6078 if (avctx->hwaccel->decode_slice(avctx, buf_start, (buf + buf_size) - buf_start) < 0)
6080 if (avctx->hwaccel->end_frame(avctx) < 0)
6086 ff_mpeg_er_frame_start(s);
6088 v->bits = buf_size * 8;
6089 v->end_mb_x = s->mb_width;
6090 if (v->field_mode) {
6091 s->current_picture.f->linesize[0] <<= 1;
6092 s->current_picture.f->linesize[1] <<= 1;
6093 s->current_picture.f->linesize[2] <<= 1;
6095 s->uvlinesize <<= 1;
6097 mb_height = s->mb_height >> v->field_mode;
6099 av_assert0 (mb_height > 0);
6101 for (i = 0; i <= n_slices; i++) {
6102 if (i > 0 && slices[i - 1].mby_start >= mb_height) {
6103 if (v->field_mode <= 0) {
6104 av_log(v->s.avctx, AV_LOG_ERROR, "Slice %d starts beyond "
6105 "picture boundary (%d >= %d)\n", i,
6106 slices[i - 1].mby_start, mb_height);
6109 v->second_field = 1;
6110 av_assert0((s->mb_height & 1) == 0);
6111 v->blocks_off = s->b8_stride * (s->mb_height&~1);
6112 v->mb_off = s->mb_stride * s->mb_height >> 1;
6114 v->second_field = 0;
6119 v->pic_header_flag = 0;
6120 if (v->field_mode && i == n_slices1 + 2) {
6121 if ((header_ret = ff_vc1_parse_frame_header_adv(v, &s->gb)) < 0) {
6122 av_log(v->s.avctx, AV_LOG_ERROR, "Field header damaged\n");
6123 if (avctx->err_recognition & AV_EF_EXPLODE)
6127 } else if (get_bits1(&s->gb)) {
6128 v->pic_header_flag = 1;
6129 if ((header_ret = ff_vc1_parse_frame_header_adv(v, &s->gb)) < 0) {
6130 av_log(v->s.avctx, AV_LOG_ERROR, "Slice header damaged\n");
6131 if (avctx->err_recognition & AV_EF_EXPLODE)
6139 s->start_mb_y = (i == 0) ? 0 : FFMAX(0, slices[i-1].mby_start % mb_height);
6140 if (!v->field_mode || v->second_field)
6141 s->end_mb_y = (i == n_slices ) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
6143 if (i >= n_slices) {
6144 av_log(v->s.avctx, AV_LOG_ERROR, "first field slice count too large\n");
6147 s->end_mb_y = (i <= n_slices1 + 1) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
6149 if (s->end_mb_y <= s->start_mb_y) {
6150 av_log(v->s.avctx, AV_LOG_ERROR, "end mb y %d %d invalid\n", s->end_mb_y, s->start_mb_y);
6153 if (!v->p_frame_skipped && s->pict_type != AV_PICTURE_TYPE_I && !v->cbpcy_vlc) {
6154 av_log(v->s.avctx, AV_LOG_ERROR, "missing cbpcy_vlc\n");
6157 ff_vc1_decode_blocks(v);
6159 s->gb = slices[i].gb;
6161 if (v->field_mode) {
6162 v->second_field = 0;
6163 s->current_picture.f->linesize[0] >>= 1;
6164 s->current_picture.f->linesize[1] >>= 1;
6165 s->current_picture.f->linesize[2] >>= 1;
6167 s->uvlinesize >>= 1;
6168 if (v->s.pict_type != AV_PICTURE_TYPE_BI && v->s.pict_type != AV_PICTURE_TYPE_B) {
6169 FFSWAP(uint8_t *, v->mv_f_next[0], v->mv_f[0]);
6170 FFSWAP(uint8_t *, v->mv_f_next[1], v->mv_f[1]);
6173 av_dlog(s->avctx, "Consumed %i/%i bits\n",
6174 get_bits_count(&s->gb), s->gb.size_in_bits);
6175 // if (get_bits_count(&s->gb) > buf_size * 8)
6177 if(s->er.error_occurred && s->pict_type == AV_PICTURE_TYPE_B)
6180 ff_er_frame_end(&s->er);
6183 ff_MPV_frame_end(s);
6185 if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
6187 avctx->width = avctx->coded_width = v->output_width;
6188 avctx->height = avctx->coded_height = v->output_height;
6189 if (avctx->skip_frame >= AVDISCARD_NONREF)
6191 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
6192 if (vc1_decode_sprites(v, &s->gb))
6195 if ((ret = av_frame_ref(pict, v->sprite_output_frame)) < 0)
6199 if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
6200 if ((ret = av_frame_ref(pict, s->current_picture_ptr->f)) < 0)
6202 ff_print_debug_info(s, s->current_picture_ptr, pict);
6204 } else if (s->last_picture_ptr != NULL) {
6205 if ((ret = av_frame_ref(pict, s->last_picture_ptr->f)) < 0)
6207 ff_print_debug_info(s, s->last_picture_ptr, pict);
6214 for (i = 0; i < n_slices; i++)
6215 av_free(slices[i].buf);
6221 for (i = 0; i < n_slices; i++)
6222 av_free(slices[i].buf);
6228 static const AVProfile profiles[] = {
6229 { FF_PROFILE_VC1_SIMPLE, "Simple" },
6230 { FF_PROFILE_VC1_MAIN, "Main" },
6231 { FF_PROFILE_VC1_COMPLEX, "Complex" },
6232 { FF_PROFILE_VC1_ADVANCED, "Advanced" },
6233 { FF_PROFILE_UNKNOWN },
6236 static const enum AVPixelFormat vc1_hwaccel_pixfmt_list_420[] = {
6237 #if CONFIG_VC1_DXVA2_HWACCEL
6238 AV_PIX_FMT_DXVA2_VLD,
6240 #if CONFIG_VC1_VAAPI_HWACCEL
6241 AV_PIX_FMT_VAAPI_VLD,
6243 #if CONFIG_VC1_VDPAU_HWACCEL
6250 AVCodec ff_vc1_decoder = {
6252 .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1"),
6253 .type = AVMEDIA_TYPE_VIDEO,
6254 .id = AV_CODEC_ID_VC1,
6255 .priv_data_size = sizeof(VC1Context),
6256 .init = vc1_decode_init,
6257 .close = ff_vc1_decode_end,
6258 .decode = vc1_decode_frame,
6259 .flush = ff_mpeg_flush,
6260 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
6261 .pix_fmts = vc1_hwaccel_pixfmt_list_420,
6262 .profiles = NULL_IF_CONFIG_SMALL(profiles)
6265 #if CONFIG_WMV3_DECODER
6266 AVCodec ff_wmv3_decoder = {
6268 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9"),
6269 .type = AVMEDIA_TYPE_VIDEO,
6270 .id = AV_CODEC_ID_WMV3,
6271 .priv_data_size = sizeof(VC1Context),
6272 .init = vc1_decode_init,
6273 .close = ff_vc1_decode_end,
6274 .decode = vc1_decode_frame,
6275 .flush = ff_mpeg_flush,
6276 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
6277 .pix_fmts = vc1_hwaccel_pixfmt_list_420,
6278 .profiles = NULL_IF_CONFIG_SMALL(profiles)
6282 #if CONFIG_WMV3_VDPAU_DECODER
6283 AVCodec ff_wmv3_vdpau_decoder = {
6284 .name = "wmv3_vdpau",
6285 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 VDPAU"),
6286 .type = AVMEDIA_TYPE_VIDEO,
6287 .id = AV_CODEC_ID_WMV3,
6288 .priv_data_size = sizeof(VC1Context),
6289 .init = vc1_decode_init,
6290 .close = ff_vc1_decode_end,
6291 .decode = vc1_decode_frame,
6292 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU,
6293 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_VDPAU_WMV3, AV_PIX_FMT_NONE },
6294 .profiles = NULL_IF_CONFIG_SMALL(profiles)
6298 #if CONFIG_VC1_VDPAU_DECODER
6299 AVCodec ff_vc1_vdpau_decoder = {
6300 .name = "vc1_vdpau",
6301 .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1 VDPAU"),
6302 .type = AVMEDIA_TYPE_VIDEO,
6303 .id = AV_CODEC_ID_VC1,
6304 .priv_data_size = sizeof(VC1Context),
6305 .init = vc1_decode_init,
6306 .close = ff_vc1_decode_end,
6307 .decode = vc1_decode_frame,
6308 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU,
6309 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_VDPAU_VC1, AV_PIX_FMT_NONE },
6310 .profiles = NULL_IF_CONFIG_SMALL(profiles)
6314 #if CONFIG_WMV3IMAGE_DECODER
6315 AVCodec ff_wmv3image_decoder = {
6316 .name = "wmv3image",
6317 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image"),
6318 .type = AVMEDIA_TYPE_VIDEO,
6319 .id = AV_CODEC_ID_WMV3IMAGE,
6320 .priv_data_size = sizeof(VC1Context),
6321 .init = vc1_decode_init,
6322 .close = ff_vc1_decode_end,
6323 .decode = vc1_decode_frame,
6324 .capabilities = CODEC_CAP_DR1,
6325 .flush = vc1_sprite_flush,
6326 .pix_fmts = (const enum AVPixelFormat[]) {
6333 #if CONFIG_VC1IMAGE_DECODER
6334 AVCodec ff_vc1image_decoder = {
6336 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image v2"),
6337 .type = AVMEDIA_TYPE_VIDEO,
6338 .id = AV_CODEC_ID_VC1IMAGE,
6339 .priv_data_size = sizeof(VC1Context),
6340 .init = vc1_decode_init,
6341 .close = ff_vc1_decode_end,
6342 .decode = vc1_decode_frame,
6343 .capabilities = CODEC_CAP_DR1,
6344 .flush = vc1_sprite_flush,
6345 .pix_fmts = (const enum AVPixelFormat[]) {