2 * VC-1 and WMV3 decoder
3 * Copyright (c) 2011 Mashiat Sarker Shakkhar
4 * Copyright (c) 2006-2007 Konstantin Shishkov
5 * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
7 * This file is part of Libav.
9 * Libav is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * Libav is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with Libav; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * VC-1 and WMV3 decoder
31 #include "error_resilience.h"
32 #include "mpegutils.h"
33 #include "mpegvideo.h"
35 #include "h264chroma.h"
38 #include "vc1acdata.h"
39 #include "msmpeg4data.h"
46 #define MB_INTRA_VLC_BITS 9
50 // offset tables for interlaced picture MVDATA decoding
51 static const int offset_table1[9] = { 0, 1, 2, 4, 8, 16, 32, 64, 128 };
52 static const int offset_table2[9] = { 0, 1, 3, 7, 15, 31, 63, 127, 255 };
54 /***********************************************************************/
56 * @name VC-1 Bitplane decoding
74 /** @} */ //imode defines
76 static void init_block_index(VC1Context *v)
78 MpegEncContext *s = &v->s;
79 ff_init_block_index(s);
80 if (v->field_mode && !(v->second_field ^ v->tff)) {
81 s->dest[0] += s->current_picture_ptr->f->linesize[0];
82 s->dest[1] += s->current_picture_ptr->f->linesize[1];
83 s->dest[2] += s->current_picture_ptr->f->linesize[2];
87 /** @} */ //Bitplane group
89 static void vc1_put_signed_blocks_clamped(VC1Context *v)
91 MpegEncContext *s = &v->s;
92 int topleft_mb_pos, top_mb_pos;
93 int stride_y, fieldtx = 0;
96 /* The put pixels loop is always one MB row behind the decoding loop,
97 * because we can only put pixels when overlap filtering is done, and
98 * for filtering of the bottom edge of a MB, we need the next MB row
100 * Within the row, the put pixels loop is also one MB col behind the
101 * decoding loop. The reason for this is again, because for filtering
102 * of the right MB edge, we need the next MB present. */
103 if (!s->first_slice_line) {
105 topleft_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x - 1;
106 if (v->fcm == ILACE_FRAME)
107 fieldtx = v->fieldtx_plane[topleft_mb_pos];
108 stride_y = s->linesize << fieldtx;
109 v_dist = (16 - fieldtx) >> (fieldtx == 0);
110 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][0],
111 s->dest[0] - 16 * s->linesize - 16,
113 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][1],
114 s->dest[0] - 16 * s->linesize - 8,
116 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][2],
117 s->dest[0] - v_dist * s->linesize - 16,
119 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][3],
120 s->dest[0] - v_dist * s->linesize - 8,
122 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][4],
123 s->dest[1] - 8 * s->uvlinesize - 8,
125 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][5],
126 s->dest[2] - 8 * s->uvlinesize - 8,
129 if (s->mb_x == s->mb_width - 1) {
130 top_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x;
131 if (v->fcm == ILACE_FRAME)
132 fieldtx = v->fieldtx_plane[top_mb_pos];
133 stride_y = s->linesize << fieldtx;
134 v_dist = fieldtx ? 15 : 8;
135 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][0],
136 s->dest[0] - 16 * s->linesize,
138 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][1],
139 s->dest[0] - 16 * s->linesize + 8,
141 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][2],
142 s->dest[0] - v_dist * s->linesize,
144 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][3],
145 s->dest[0] - v_dist * s->linesize + 8,
147 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][4],
148 s->dest[1] - 8 * s->uvlinesize,
150 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][5],
151 s->dest[2] - 8 * s->uvlinesize,
156 #define inc_blk_idx(idx) do { \
158 if (idx >= v->n_allocated_blks) \
162 inc_blk_idx(v->topleft_blk_idx);
163 inc_blk_idx(v->top_blk_idx);
164 inc_blk_idx(v->left_blk_idx);
165 inc_blk_idx(v->cur_blk_idx);
168 static void vc1_loop_filter_iblk(VC1Context *v, int pq)
170 MpegEncContext *s = &v->s;
172 if (!s->first_slice_line) {
173 v->vc1dsp.vc1_v_loop_filter16(s->dest[0], s->linesize, pq);
175 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
176 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
177 for (j = 0; j < 2; j++) {
178 v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1], s->uvlinesize, pq);
180 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
183 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] + 8 * s->linesize, s->linesize, pq);
185 if (s->mb_y == s->end_mb_y - 1) {
187 v->vc1dsp.vc1_h_loop_filter16(s->dest[0], s->linesize, pq);
188 v->vc1dsp.vc1_h_loop_filter8(s->dest[1], s->uvlinesize, pq);
189 v->vc1dsp.vc1_h_loop_filter8(s->dest[2], s->uvlinesize, pq);
191 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] + 8, s->linesize, pq);
195 static void vc1_loop_filter_iblk_delayed(VC1Context *v, int pq)
197 MpegEncContext *s = &v->s;
200 /* The loopfilter runs 1 row and 1 column behind the overlap filter, which
201 * means it runs two rows/cols behind the decoding loop. */
202 if (!s->first_slice_line) {
204 if (s->mb_y >= s->start_mb_y + 2) {
205 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
208 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 16, s->linesize, pq);
209 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 8, s->linesize, pq);
210 for (j = 0; j < 2; j++) {
211 v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
213 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize - 8, s->uvlinesize, pq);
217 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize - 16, s->linesize, pq);
220 if (s->mb_x == s->mb_width - 1) {
221 if (s->mb_y >= s->start_mb_y + 2) {
222 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
225 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize, s->linesize, pq);
226 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize + 8, s->linesize, pq);
227 for (j = 0; j < 2; j++) {
228 v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
230 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize, s->uvlinesize, pq);
234 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize, s->linesize, pq);
237 if (s->mb_y == s->end_mb_y) {
240 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
241 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 8, s->linesize, pq);
243 for (j = 0; j < 2; j++) {
244 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
249 if (s->mb_x == s->mb_width - 1) {
251 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
252 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
254 for (j = 0; j < 2; j++) {
255 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
263 static void vc1_smooth_overlap_filter_iblk(VC1Context *v)
265 MpegEncContext *s = &v->s;
268 if (v->condover == CONDOVER_NONE)
271 mb_pos = s->mb_x + s->mb_y * s->mb_stride;
273 /* Within a MB, the horizontal overlap always runs before the vertical.
274 * To accomplish that, we run the H on left and internal borders of the
275 * currently decoded MB. Then, we wait for the next overlap iteration
276 * to do H overlap on the right edge of this MB, before moving over and
277 * running the V overlap. Therefore, the V overlap makes us trail by one
278 * MB col and the H overlap filter makes us trail by one MB row. This
279 * is reflected in the time at which we run the put_pixels loop. */
280 if (v->condover == CONDOVER_ALL || v->pq >= 9 || v->over_flags_plane[mb_pos]) {
281 if (s->mb_x && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
282 v->over_flags_plane[mb_pos - 1])) {
283 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][1],
284 v->block[v->cur_blk_idx][0]);
285 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][3],
286 v->block[v->cur_blk_idx][2]);
287 if (!(s->flags & CODEC_FLAG_GRAY)) {
288 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][4],
289 v->block[v->cur_blk_idx][4]);
290 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][5],
291 v->block[v->cur_blk_idx][5]);
294 v->vc1dsp.vc1_h_s_overlap(v->block[v->cur_blk_idx][0],
295 v->block[v->cur_blk_idx][1]);
296 v->vc1dsp.vc1_h_s_overlap(v->block[v->cur_blk_idx][2],
297 v->block[v->cur_blk_idx][3]);
299 if (s->mb_x == s->mb_width - 1) {
300 if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
301 v->over_flags_plane[mb_pos - s->mb_stride])) {
302 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][2],
303 v->block[v->cur_blk_idx][0]);
304 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][3],
305 v->block[v->cur_blk_idx][1]);
306 if (!(s->flags & CODEC_FLAG_GRAY)) {
307 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][4],
308 v->block[v->cur_blk_idx][4]);
309 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][5],
310 v->block[v->cur_blk_idx][5]);
313 v->vc1dsp.vc1_v_s_overlap(v->block[v->cur_blk_idx][0],
314 v->block[v->cur_blk_idx][2]);
315 v->vc1dsp.vc1_v_s_overlap(v->block[v->cur_blk_idx][1],
316 v->block[v->cur_blk_idx][3]);
319 if (s->mb_x && (v->condover == CONDOVER_ALL || v->over_flags_plane[mb_pos - 1])) {
320 if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
321 v->over_flags_plane[mb_pos - s->mb_stride - 1])) {
322 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][2],
323 v->block[v->left_blk_idx][0]);
324 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][3],
325 v->block[v->left_blk_idx][1]);
326 if (!(s->flags & CODEC_FLAG_GRAY)) {
327 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][4],
328 v->block[v->left_blk_idx][4]);
329 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][5],
330 v->block[v->left_blk_idx][5]);
333 v->vc1dsp.vc1_v_s_overlap(v->block[v->left_blk_idx][0],
334 v->block[v->left_blk_idx][2]);
335 v->vc1dsp.vc1_v_s_overlap(v->block[v->left_blk_idx][1],
336 v->block[v->left_blk_idx][3]);
340 /** Do motion compensation over 1 macroblock
341 * Mostly adapted hpel_motion and qpel_motion from mpegvideo.c
343 static void vc1_mc_1mv(VC1Context *v, int dir)
345 MpegEncContext *s = &v->s;
346 H264ChromaContext *h264chroma = &v->h264chroma;
347 uint8_t *srcY, *srcU, *srcV;
348 int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
349 int v_edge_pos = s->v_edge_pos >> v->field_mode;
351 uint8_t (*luty)[256], (*lutuv)[256];
354 if ((!v->field_mode ||
355 (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
356 !v->s.last_picture.f->data[0])
359 mx = s->mv[dir][0][0];
360 my = s->mv[dir][0][1];
362 // store motion vectors for further use in B frames
363 if (s->pict_type == AV_PICTURE_TYPE_P) {
364 for (i = 0; i < 4; i++) {
365 s->current_picture.motion_val[1][s->block_index[i] + v->blocks_off][0] = mx;
366 s->current_picture.motion_val[1][s->block_index[i] + v->blocks_off][1] = my;
370 uvmx = (mx + ((mx & 3) == 3)) >> 1;
371 uvmy = (my + ((my & 3) == 3)) >> 1;
372 v->luma_mv[s->mb_x][0] = uvmx;
373 v->luma_mv[s->mb_x][1] = uvmy;
376 v->cur_field_type != v->ref_field_type[dir]) {
377 my = my - 2 + 4 * v->cur_field_type;
378 uvmy = uvmy - 2 + 4 * v->cur_field_type;
381 // fastuvmc shall be ignored for interlaced frame picture
382 if (v->fastuvmc && (v->fcm != ILACE_FRAME)) {
383 uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
384 uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
387 if (v->field_mode && (v->cur_field_type != v->ref_field_type[dir]) && v->second_field) {
388 srcY = s->current_picture.f->data[0];
389 srcU = s->current_picture.f->data[1];
390 srcV = s->current_picture.f->data[2];
392 lutuv = v->curr_lutuv;
393 use_ic = v->curr_use_ic;
395 srcY = s->last_picture.f->data[0];
396 srcU = s->last_picture.f->data[1];
397 srcV = s->last_picture.f->data[2];
399 lutuv = v->last_lutuv;
400 use_ic = v->last_use_ic;
403 srcY = s->next_picture.f->data[0];
404 srcU = s->next_picture.f->data[1];
405 srcV = s->next_picture.f->data[2];
407 lutuv = v->next_lutuv;
408 use_ic = v->next_use_ic;
411 if (!srcY || !srcU) {
412 av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
416 src_x = s->mb_x * 16 + (mx >> 2);
417 src_y = s->mb_y * 16 + (my >> 2);
418 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
419 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
421 if (v->profile != PROFILE_ADVANCED) {
422 src_x = av_clip( src_x, -16, s->mb_width * 16);
423 src_y = av_clip( src_y, -16, s->mb_height * 16);
424 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
425 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
427 src_x = av_clip( src_x, -17, s->avctx->coded_width);
428 src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
429 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
430 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
433 srcY += src_y * s->linesize + src_x;
434 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
435 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
437 if (v->field_mode && v->ref_field_type[dir]) {
438 srcY += s->current_picture_ptr->f->linesize[0];
439 srcU += s->current_picture_ptr->f->linesize[1];
440 srcV += s->current_picture_ptr->f->linesize[2];
443 /* for grayscale we should not try to read from unknown area */
444 if (s->flags & CODEC_FLAG_GRAY) {
445 srcU = s->edge_emu_buffer + 18 * s->linesize;
446 srcV = s->edge_emu_buffer + 18 * s->linesize;
449 if (v->rangeredfrm || use_ic
450 || s->h_edge_pos < 22 || v_edge_pos < 22
451 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel * 3
452 || (unsigned)(src_y - 1) > v_edge_pos - (my&3) - 16 - 3) {
453 uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
455 srcY -= s->mspel * (1 + s->linesize);
456 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcY,
457 s->linesize, s->linesize,
458 17 + s->mspel * 2, 17 + s->mspel * 2,
459 src_x - s->mspel, src_y - s->mspel,
460 s->h_edge_pos, v_edge_pos);
461 srcY = s->edge_emu_buffer;
462 s->vdsp.emulated_edge_mc(uvbuf, srcU,
463 s->uvlinesize, s->uvlinesize,
465 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
466 s->vdsp.emulated_edge_mc(uvbuf + 16, srcV,
467 s->uvlinesize, s->uvlinesize,
469 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
472 /* if we deal with range reduction we need to scale source blocks */
473 if (v->rangeredfrm) {
478 for (j = 0; j < 17 + s->mspel * 2; j++) {
479 for (i = 0; i < 17 + s->mspel * 2; i++)
480 src[i] = ((src[i] - 128) >> 1) + 128;
485 for (j = 0; j < 9; j++) {
486 for (i = 0; i < 9; i++) {
487 src[i] = ((src[i] - 128) >> 1) + 128;
488 src2[i] = ((src2[i] - 128) >> 1) + 128;
490 src += s->uvlinesize;
491 src2 += s->uvlinesize;
494 /* if we deal with intensity compensation we need to scale source blocks */
500 for (j = 0; j < 17 + s->mspel * 2; j++) {
501 int f = v->field_mode ? v->ref_field_type[dir] : ((j + src_y - s->mspel) & 1) ;
502 for (i = 0; i < 17 + s->mspel * 2; i++)
503 src[i] = luty[f][src[i]];
508 for (j = 0; j < 9; j++) {
509 int f = v->field_mode ? v->ref_field_type[dir] : ((j + uvsrc_y) & 1);
510 for (i = 0; i < 9; i++) {
511 src[i] = lutuv[f][src[i]];
512 src2[i] = lutuv[f][src2[i]];
514 src += s->uvlinesize;
515 src2 += s->uvlinesize;
518 srcY += s->mspel * (1 + s->linesize);
522 dxy = ((my & 3) << 2) | (mx & 3);
523 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] , srcY , s->linesize, v->rnd);
524 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8, srcY + 8, s->linesize, v->rnd);
525 srcY += s->linesize * 8;
526 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize , srcY , s->linesize, v->rnd);
527 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
528 } else { // hpel mc - always used for luma
529 dxy = (my & 2) | ((mx & 2) >> 1);
531 s->hdsp.put_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
533 s->hdsp.put_no_rnd_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
536 if (s->flags & CODEC_FLAG_GRAY) return;
537 /* Chroma MC always uses qpel bilinear */
538 uvmx = (uvmx & 3) << 1;
539 uvmy = (uvmy & 3) << 1;
541 h264chroma->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
542 h264chroma->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
544 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
545 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
549 static inline int median4(int a, int b, int c, int d)
552 if (c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2;
553 else return (FFMIN(b, c) + FFMAX(a, d)) / 2;
555 if (c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2;
556 else return (FFMIN(a, c) + FFMAX(b, d)) / 2;
560 /** Do motion compensation for 4-MV macroblock - luminance block
562 static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir, int avg)
564 MpegEncContext *s = &v->s;
566 int dxy, mx, my, src_x, src_y;
568 int fieldmv = (v->fcm == ILACE_FRAME) ? v->blk_mv_type[s->block_index[n]] : 0;
569 int v_edge_pos = s->v_edge_pos >> v->field_mode;
570 uint8_t (*luty)[256];
573 if ((!v->field_mode ||
574 (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
575 !v->s.last_picture.f->data[0])
578 mx = s->mv[dir][n][0];
579 my = s->mv[dir][n][1];
582 if (v->field_mode && (v->cur_field_type != v->ref_field_type[dir]) && v->second_field) {
583 srcY = s->current_picture.f->data[0];
585 use_ic = v->curr_use_ic;
587 srcY = s->last_picture.f->data[0];
589 use_ic = v->last_use_ic;
592 srcY = s->next_picture.f->data[0];
594 use_ic = v->next_use_ic;
598 av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
603 if (v->cur_field_type != v->ref_field_type[dir])
604 my = my - 2 + 4 * v->cur_field_type;
607 if (s->pict_type == AV_PICTURE_TYPE_P && n == 3 && v->field_mode) {
608 int same_count = 0, opp_count = 0, k;
609 int chosen_mv[2][4][2], f;
611 for (k = 0; k < 4; k++) {
612 f = v->mv_f[0][s->block_index[k] + v->blocks_off];
613 chosen_mv[f][f ? opp_count : same_count][0] = s->mv[0][k][0];
614 chosen_mv[f][f ? opp_count : same_count][1] = s->mv[0][k][1];
618 f = opp_count > same_count;
619 switch (f ? opp_count : same_count) {
621 tx = median4(chosen_mv[f][0][0], chosen_mv[f][1][0],
622 chosen_mv[f][2][0], chosen_mv[f][3][0]);
623 ty = median4(chosen_mv[f][0][1], chosen_mv[f][1][1],
624 chosen_mv[f][2][1], chosen_mv[f][3][1]);
627 tx = mid_pred(chosen_mv[f][0][0], chosen_mv[f][1][0], chosen_mv[f][2][0]);
628 ty = mid_pred(chosen_mv[f][0][1], chosen_mv[f][1][1], chosen_mv[f][2][1]);
631 tx = (chosen_mv[f][0][0] + chosen_mv[f][1][0]) / 2;
632 ty = (chosen_mv[f][0][1] + chosen_mv[f][1][1]) / 2;
635 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
636 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
637 for (k = 0; k < 4; k++)
638 v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
641 if (v->fcm == ILACE_FRAME) { // not sure if needed for other types of picture
643 int width = s->avctx->coded_width;
644 int height = s->avctx->coded_height >> 1;
645 if (s->pict_type == AV_PICTURE_TYPE_P) {
646 s->current_picture.motion_val[1][s->block_index[n] + v->blocks_off][0] = mx;
647 s->current_picture.motion_val[1][s->block_index[n] + v->blocks_off][1] = my;
649 qx = (s->mb_x * 16) + (mx >> 2);
650 qy = (s->mb_y * 8) + (my >> 3);
655 mx -= 4 * (qx - width);
658 else if (qy > height + 1)
659 my -= 8 * (qy - height - 1);
662 if ((v->fcm == ILACE_FRAME) && fieldmv)
663 off = ((n > 1) ? s->linesize : 0) + (n & 1) * 8;
665 off = s->linesize * 4 * (n & 2) + (n & 1) * 8;
667 src_x = s->mb_x * 16 + (n & 1) * 8 + (mx >> 2);
669 src_y = s->mb_y * 16 + (n & 2) * 4 + (my >> 2);
671 src_y = s->mb_y * 16 + ((n > 1) ? 1 : 0) + (my >> 2);
673 if (v->profile != PROFILE_ADVANCED) {
674 src_x = av_clip(src_x, -16, s->mb_width * 16);
675 src_y = av_clip(src_y, -16, s->mb_height * 16);
677 src_x = av_clip(src_x, -17, s->avctx->coded_width);
678 if (v->fcm == ILACE_FRAME) {
680 src_y = av_clip(src_y, -17, s->avctx->coded_height + 1);
682 src_y = av_clip(src_y, -18, s->avctx->coded_height);
684 src_y = av_clip(src_y, -18, s->avctx->coded_height + 1);
688 srcY += src_y * s->linesize + src_x;
689 if (v->field_mode && v->ref_field_type[dir])
690 srcY += s->current_picture_ptr->f->linesize[0];
692 if (fieldmv && !(src_y & 1))
694 if (fieldmv && (src_y & 1) && src_y < 4)
696 if (v->rangeredfrm || use_ic
697 || s->h_edge_pos < 13 || v_edge_pos < 23
698 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx & 3) - 8 - s->mspel * 2
699 || (unsigned)(src_y - (s->mspel << fieldmv)) > v_edge_pos - (my & 3) - ((8 + s->mspel * 2) << fieldmv)) {
700 srcY -= s->mspel * (1 + (s->linesize << fieldmv));
701 /* check emulate edge stride and offset */
702 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcY,
703 s->linesize, s->linesize,
704 9 + s->mspel * 2, (9 + s->mspel * 2) << fieldmv,
705 src_x - s->mspel, src_y - (s->mspel << fieldmv),
706 s->h_edge_pos, v_edge_pos);
707 srcY = s->edge_emu_buffer;
708 /* if we deal with range reduction we need to scale source blocks */
709 if (v->rangeredfrm) {
714 for (j = 0; j < 9 + s->mspel * 2; j++) {
715 for (i = 0; i < 9 + s->mspel * 2; i++)
716 src[i] = ((src[i] - 128) >> 1) + 128;
717 src += s->linesize << fieldmv;
720 /* if we deal with intensity compensation we need to scale source blocks */
726 for (j = 0; j < 9 + s->mspel * 2; j++) {
727 int f = v->field_mode ? v->ref_field_type[dir] : (((j<<fieldmv)+src_y - (s->mspel << fieldmv)) & 1);
728 for (i = 0; i < 9 + s->mspel * 2; i++)
729 src[i] = luty[f][src[i]];
730 src += s->linesize << fieldmv;
733 srcY += s->mspel * (1 + (s->linesize << fieldmv));
737 dxy = ((my & 3) << 2) | (mx & 3);
739 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize << fieldmv, v->rnd);
741 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize << fieldmv, v->rnd);
742 } else { // hpel mc - always used for luma
743 dxy = (my & 2) | ((mx & 2) >> 1);
745 s->hdsp.put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
747 s->hdsp.put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
751 static av_always_inline int get_chroma_mv(int *mvx, int *mvy, int *a, int flag, int *tx, int *ty)
754 static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
756 idx = ((a[3] != flag) << 3)
757 | ((a[2] != flag) << 2)
758 | ((a[1] != flag) << 1)
761 *tx = median4(mvx[0], mvx[1], mvx[2], mvx[3]);
762 *ty = median4(mvy[0], mvy[1], mvy[2], mvy[3]);
764 } else if (count[idx] == 1) {
767 *tx = mid_pred(mvx[1], mvx[2], mvx[3]);
768 *ty = mid_pred(mvy[1], mvy[2], mvy[3]);
771 *tx = mid_pred(mvx[0], mvx[2], mvx[3]);
772 *ty = mid_pred(mvy[0], mvy[2], mvy[3]);
775 *tx = mid_pred(mvx[0], mvx[1], mvx[3]);
776 *ty = mid_pred(mvy[0], mvy[1], mvy[3]);
779 *tx = mid_pred(mvx[0], mvx[1], mvx[2]);
780 *ty = mid_pred(mvy[0], mvy[1], mvy[2]);
783 } else if (count[idx] == 2) {
785 for (i = 0; i < 3; i++)
790 for (i = t1 + 1; i < 4; i++)
795 *tx = (mvx[t1] + mvx[t2]) / 2;
796 *ty = (mvy[t1] + mvy[t2]) / 2;
804 /** Do motion compensation for 4-MV macroblock - both chroma blocks
806 static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
808 MpegEncContext *s = &v->s;
809 H264ChromaContext *h264chroma = &v->h264chroma;
810 uint8_t *srcU, *srcV;
811 int uvmx, uvmy, uvsrc_x, uvsrc_y;
812 int k, tx = 0, ty = 0;
813 int mvx[4], mvy[4], intra[4], mv_f[4];
815 int chroma_ref_type = v->cur_field_type;
816 int v_edge_pos = s->v_edge_pos >> v->field_mode;
817 uint8_t (*lutuv)[256];
820 if (!v->field_mode && !v->s.last_picture.f->data[0])
822 if (s->flags & CODEC_FLAG_GRAY)
825 for (k = 0; k < 4; k++) {
826 mvx[k] = s->mv[dir][k][0];
827 mvy[k] = s->mv[dir][k][1];
828 intra[k] = v->mb_type[0][s->block_index[k]];
830 mv_f[k] = v->mv_f[dir][s->block_index[k] + v->blocks_off];
833 /* calculate chroma MV vector from four luma MVs */
834 if (!v->field_mode || (v->field_mode && !v->numref)) {
835 valid_count = get_chroma_mv(mvx, mvy, intra, 0, &tx, &ty);
836 chroma_ref_type = v->reffield;
838 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
839 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
840 v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
841 return; //no need to do MC for intra blocks
845 if (mv_f[0] + mv_f[1] + mv_f[2] + mv_f[3] > 2)
847 valid_count = get_chroma_mv(mvx, mvy, mv_f, dominant, &tx, &ty);
849 chroma_ref_type = !v->cur_field_type;
851 if (v->field_mode && chroma_ref_type == 1 && v->cur_field_type == 1 && !v->s.last_picture.f->data[0])
853 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
854 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
855 uvmx = (tx + ((tx & 3) == 3)) >> 1;
856 uvmy = (ty + ((ty & 3) == 3)) >> 1;
858 v->luma_mv[s->mb_x][0] = uvmx;
859 v->luma_mv[s->mb_x][1] = uvmy;
862 uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
863 uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
865 // Field conversion bias
866 if (v->cur_field_type != chroma_ref_type)
867 uvmy += 2 - 4 * chroma_ref_type;
869 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
870 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
872 if (v->profile != PROFILE_ADVANCED) {
873 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
874 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
876 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
877 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
881 if (v->field_mode && (v->cur_field_type != chroma_ref_type) && v->second_field) {
882 srcU = s->current_picture.f->data[1];
883 srcV = s->current_picture.f->data[2];
884 lutuv = v->curr_lutuv;
885 use_ic = v->curr_use_ic;
887 srcU = s->last_picture.f->data[1];
888 srcV = s->last_picture.f->data[2];
889 lutuv = v->last_lutuv;
890 use_ic = v->last_use_ic;
893 srcU = s->next_picture.f->data[1];
894 srcV = s->next_picture.f->data[2];
895 lutuv = v->next_lutuv;
896 use_ic = v->next_use_ic;
900 av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
904 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
905 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
908 if (chroma_ref_type) {
909 srcU += s->current_picture_ptr->f->linesize[1];
910 srcV += s->current_picture_ptr->f->linesize[2];
914 if (v->rangeredfrm || use_ic
915 || s->h_edge_pos < 18 || v_edge_pos < 18
916 || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
917 || (unsigned)uvsrc_y > (v_edge_pos >> 1) - 9) {
918 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcU,
919 s->uvlinesize, s->uvlinesize,
920 8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
921 s->h_edge_pos >> 1, v_edge_pos >> 1);
922 s->vdsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV,
923 s->uvlinesize, s->uvlinesize,
924 8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
925 s->h_edge_pos >> 1, v_edge_pos >> 1);
926 srcU = s->edge_emu_buffer;
927 srcV = s->edge_emu_buffer + 16;
929 /* if we deal with range reduction we need to scale source blocks */
930 if (v->rangeredfrm) {
936 for (j = 0; j < 9; j++) {
937 for (i = 0; i < 9; i++) {
938 src[i] = ((src[i] - 128) >> 1) + 128;
939 src2[i] = ((src2[i] - 128) >> 1) + 128;
941 src += s->uvlinesize;
942 src2 += s->uvlinesize;
945 /* if we deal with intensity compensation we need to scale source blocks */
952 for (j = 0; j < 9; j++) {
953 int f = v->field_mode ? chroma_ref_type : ((j + uvsrc_y) & 1);
954 for (i = 0; i < 9; i++) {
955 src[i] = lutuv[f][src[i]];
956 src2[i] = lutuv[f][src2[i]];
958 src += s->uvlinesize;
959 src2 += s->uvlinesize;
964 /* Chroma MC always uses qpel bilinear */
965 uvmx = (uvmx & 3) << 1;
966 uvmy = (uvmy & 3) << 1;
968 h264chroma->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
969 h264chroma->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
971 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
972 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
976 /** Do motion compensation for 4-MV interlaced frame chroma macroblock (both U and V)
978 static void vc1_mc_4mv_chroma4(VC1Context *v, int dir, int dir2, int avg)
980 MpegEncContext *s = &v->s;
981 H264ChromaContext *h264chroma = &v->h264chroma;
982 uint8_t *srcU, *srcV;
983 int uvsrc_x, uvsrc_y;
984 int uvmx_field[4], uvmy_field[4];
986 int fieldmv = v->blk_mv_type[s->block_index[0]];
987 static const int s_rndtblfield[16] = { 0, 0, 1, 2, 4, 4, 5, 6, 2, 2, 3, 8, 6, 6, 7, 12 };
988 int v_dist = fieldmv ? 1 : 4; // vertical offset for lower sub-blocks
989 int v_edge_pos = s->v_edge_pos >> 1;
991 uint8_t (*lutuv)[256];
993 if (s->flags & CODEC_FLAG_GRAY)
996 for (i = 0; i < 4; i++) {
997 int d = i < 2 ? dir: dir2;
999 uvmx_field[i] = (tx + ((tx & 3) == 3)) >> 1;
1000 ty = s->mv[d][i][1];
1002 uvmy_field[i] = (ty >> 4) * 8 + s_rndtblfield[ty & 0xF];
1004 uvmy_field[i] = (ty + ((ty & 3) == 3)) >> 1;
1007 for (i = 0; i < 4; i++) {
1008 off = (i & 1) * 4 + ((i & 2) ? v_dist * s->uvlinesize : 0);
1009 uvsrc_x = s->mb_x * 8 + (i & 1) * 4 + (uvmx_field[i] >> 2);
1010 uvsrc_y = s->mb_y * 8 + ((i & 2) ? v_dist : 0) + (uvmy_field[i] >> 2);
1011 // FIXME: implement proper pull-back (see vc1cropmv.c, vc1CROPMV_ChromaPullBack())
1012 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
1013 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
1014 if (i < 2 ? dir : dir2) {
1015 srcU = s->next_picture.f->data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
1016 srcV = s->next_picture.f->data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
1017 lutuv = v->next_lutuv;
1018 use_ic = v->next_use_ic;
1020 srcU = s->last_picture.f->data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
1021 srcV = s->last_picture.f->data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
1022 lutuv = v->last_lutuv;
1023 use_ic = v->last_use_ic;
1025 uvmx_field[i] = (uvmx_field[i] & 3) << 1;
1026 uvmy_field[i] = (uvmy_field[i] & 3) << 1;
1028 if (fieldmv && !(uvsrc_y & 1))
1030 if (fieldmv && (uvsrc_y & 1) && uvsrc_y < 2)
1033 || s->h_edge_pos < 10 || v_edge_pos < (5 << fieldmv)
1034 || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 5
1035 || (unsigned)uvsrc_y > v_edge_pos - (5 << fieldmv)) {
1036 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcU,
1037 s->uvlinesize, s->uvlinesize,
1038 5, (5 << fieldmv), uvsrc_x, uvsrc_y,
1039 s->h_edge_pos >> 1, v_edge_pos);
1040 s->vdsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV,
1041 s->uvlinesize, s->uvlinesize,
1042 5, (5 << fieldmv), uvsrc_x, uvsrc_y,
1043 s->h_edge_pos >> 1, v_edge_pos);
1044 srcU = s->edge_emu_buffer;
1045 srcV = s->edge_emu_buffer + 16;
1047 /* if we deal with intensity compensation we need to scale source blocks */
1050 uint8_t *src, *src2;
1054 for (j = 0; j < 5; j++) {
1055 int f = (uvsrc_y + (j << fieldmv)) & 1;
1056 for (i = 0; i < 5; i++) {
1057 src[i] = lutuv[f][src[i]];
1058 src2[i] = lutuv[f][src2[i]];
1060 src += s->uvlinesize << fieldmv;
1061 src2 += s->uvlinesize << fieldmv;
1067 h264chroma->avg_h264_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1068 h264chroma->avg_h264_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1070 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1071 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1075 h264chroma->put_h264_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1076 h264chroma->put_h264_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1078 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1079 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1085 /***********************************************************************/
1087 * @name VC-1 Block-level functions
1088 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
1094 * @brief Get macroblock-level quantizer scale
1096 #define GET_MQUANT() \
1097 if (v->dquantfrm) { \
1099 if (v->dqprofile == DQPROFILE_ALL_MBS) { \
1100 if (v->dqbilevel) { \
1101 mquant = (get_bits1(gb)) ? v->altpq : v->pq; \
1103 mqdiff = get_bits(gb, 3); \
1105 mquant = v->pq + mqdiff; \
1107 mquant = get_bits(gb, 5); \
1110 if (v->dqprofile == DQPROFILE_SINGLE_EDGE) \
1111 edges = 1 << v->dqsbedge; \
1112 else if (v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
1113 edges = (3 << v->dqsbedge) % 15; \
1114 else if (v->dqprofile == DQPROFILE_FOUR_EDGES) \
1116 if ((edges&1) && !s->mb_x) \
1117 mquant = v->altpq; \
1118 if ((edges&2) && s->first_slice_line) \
1119 mquant = v->altpq; \
1120 if ((edges&4) && s->mb_x == (s->mb_width - 1)) \
1121 mquant = v->altpq; \
1122 if ((edges&8) && s->mb_y == (s->mb_height - 1)) \
1123 mquant = v->altpq; \
1124 if (!mquant || mquant > 31) { \
1125 av_log(v->s.avctx, AV_LOG_ERROR, \
1126 "Overriding invalid mquant %d\n", mquant); \
1132 * @def GET_MVDATA(_dmv_x, _dmv_y)
1133 * @brief Get MV differentials
1134 * @see MVDATA decoding from 8.3.5.2, p(1)20
1135 * @param _dmv_x Horizontal differential for decoded MV
1136 * @param _dmv_y Vertical differential for decoded MV
1138 #define GET_MVDATA(_dmv_x, _dmv_y) \
1139 index = 1 + get_vlc2(gb, ff_vc1_mv_diff_vlc[s->mv_table_index].table, \
1140 VC1_MV_DIFF_VLC_BITS, 2); \
1142 mb_has_coeffs = 1; \
1145 mb_has_coeffs = 0; \
1148 _dmv_x = _dmv_y = 0; \
1149 } else if (index == 35) { \
1150 _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
1151 _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
1152 } else if (index == 36) { \
1157 index1 = index % 6; \
1158 if (!s->quarter_sample && index1 == 5) val = 1; \
1160 if (size_table[index1] - val > 0) \
1161 val = get_bits(gb, size_table[index1] - val); \
1163 sign = 0 - (val&1); \
1164 _dmv_x = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1166 index1 = index / 6; \
1167 if (!s->quarter_sample && index1 == 5) val = 1; \
1169 if (size_table[index1] - val > 0) \
1170 val = get_bits(gb, size_table[index1] - val); \
1172 sign = 0 - (val & 1); \
1173 _dmv_y = (sign ^ ((val >> 1) + offset_table[index1])) - sign; \
1176 static av_always_inline void get_mvdata_interlaced(VC1Context *v, int *dmv_x,
1177 int *dmv_y, int *pred_flag)
1180 int extend_x = 0, extend_y = 0;
1181 GetBitContext *gb = &v->s.gb;
1184 const int* offs_tab;
1187 bits = VC1_2REF_MVDATA_VLC_BITS;
1190 bits = VC1_1REF_MVDATA_VLC_BITS;
1193 switch (v->dmvrange) {
1201 extend_x = extend_y = 1;
1204 index = get_vlc2(gb, v->imv_vlc->table, bits, 3);
1206 *dmv_x = get_bits(gb, v->k_x);
1207 *dmv_y = get_bits(gb, v->k_y);
1210 *pred_flag = *dmv_y & 1;
1211 *dmv_y = (*dmv_y + *pred_flag) >> 1;
1213 *dmv_y = (*dmv_y + (*dmv_y & 1)) >> 1;
1219 offs_tab = offset_table2;
1221 offs_tab = offset_table1;
1222 index1 = (index + 1) % 9;
1224 val = get_bits(gb, index1 + extend_x);
1225 sign = 0 -(val & 1);
1226 *dmv_x = (sign ^ ((val >> 1) + offs_tab[index1])) - sign;
1230 offs_tab = offset_table2;
1232 offs_tab = offset_table1;
1233 index1 = (index + 1) / 9;
1234 if (index1 > v->numref) {
1235 val = get_bits(gb, (index1 + (extend_y << v->numref)) >> v->numref);
1236 sign = 0 - (val & 1);
1237 *dmv_y = (sign ^ ((val >> 1) + offs_tab[index1 >> v->numref])) - sign;
1240 if (v->numref && pred_flag)
1241 *pred_flag = index1 & 1;
1245 static av_always_inline int scaleforsame_x(VC1Context *v, int n /* MV */, int dir)
1247 int scaledvalue, refdist;
1248 int scalesame1, scalesame2;
1249 int scalezone1_x, zone1offset_x;
1250 int table_index = dir ^ v->second_field;
1252 if (v->s.pict_type != AV_PICTURE_TYPE_B)
1253 refdist = v->refdist;
1255 refdist = dir ? v->brfd : v->frfd;
1258 scalesame1 = ff_vc1_field_mvpred_scales[table_index][1][refdist];
1259 scalesame2 = ff_vc1_field_mvpred_scales[table_index][2][refdist];
1260 scalezone1_x = ff_vc1_field_mvpred_scales[table_index][3][refdist];
1261 zone1offset_x = ff_vc1_field_mvpred_scales[table_index][5][refdist];
1266 if (FFABS(n) < scalezone1_x)
1267 scaledvalue = (n * scalesame1) >> 8;
1270 scaledvalue = ((n * scalesame2) >> 8) - zone1offset_x;
1272 scaledvalue = ((n * scalesame2) >> 8) + zone1offset_x;
1275 return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1278 static av_always_inline int scaleforsame_y(VC1Context *v, int i, int n /* MV */, int dir)
1280 int scaledvalue, refdist;
1281 int scalesame1, scalesame2;
1282 int scalezone1_y, zone1offset_y;
1283 int table_index = dir ^ v->second_field;
1285 if (v->s.pict_type != AV_PICTURE_TYPE_B)
1286 refdist = v->refdist;
1288 refdist = dir ? v->brfd : v->frfd;
1291 scalesame1 = ff_vc1_field_mvpred_scales[table_index][1][refdist];
1292 scalesame2 = ff_vc1_field_mvpred_scales[table_index][2][refdist];
1293 scalezone1_y = ff_vc1_field_mvpred_scales[table_index][4][refdist];
1294 zone1offset_y = ff_vc1_field_mvpred_scales[table_index][6][refdist];
1299 if (FFABS(n) < scalezone1_y)
1300 scaledvalue = (n * scalesame1) >> 8;
1303 scaledvalue = ((n * scalesame2) >> 8) - zone1offset_y;
1305 scaledvalue = ((n * scalesame2) >> 8) + zone1offset_y;
1309 if (v->cur_field_type && !v->ref_field_type[dir])
1310 return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1312 return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1315 static av_always_inline int scaleforopp_x(VC1Context *v, int n /* MV */)
1317 int scalezone1_x, zone1offset_x;
1318 int scaleopp1, scaleopp2, brfd;
1321 brfd = FFMIN(v->brfd, 3);
1322 scalezone1_x = ff_vc1_b_field_mvpred_scales[3][brfd];
1323 zone1offset_x = ff_vc1_b_field_mvpred_scales[5][brfd];
1324 scaleopp1 = ff_vc1_b_field_mvpred_scales[1][brfd];
1325 scaleopp2 = ff_vc1_b_field_mvpred_scales[2][brfd];
1330 if (FFABS(n) < scalezone1_x)
1331 scaledvalue = (n * scaleopp1) >> 8;
1334 scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_x;
1336 scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_x;
1339 return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1342 static av_always_inline int scaleforopp_y(VC1Context *v, int n /* MV */, int dir)
1344 int scalezone1_y, zone1offset_y;
1345 int scaleopp1, scaleopp2, brfd;
1348 brfd = FFMIN(v->brfd, 3);
1349 scalezone1_y = ff_vc1_b_field_mvpred_scales[4][brfd];
1350 zone1offset_y = ff_vc1_b_field_mvpred_scales[6][brfd];
1351 scaleopp1 = ff_vc1_b_field_mvpred_scales[1][brfd];
1352 scaleopp2 = ff_vc1_b_field_mvpred_scales[2][brfd];
1357 if (FFABS(n) < scalezone1_y)
1358 scaledvalue = (n * scaleopp1) >> 8;
1361 scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_y;
1363 scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_y;
1366 if (v->cur_field_type && !v->ref_field_type[dir]) {
1367 return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1369 return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1373 static av_always_inline int scaleforsame(VC1Context *v, int i, int n /* MV */,
1376 int brfd, scalesame;
1377 int hpel = 1 - v->s.quarter_sample;
1380 if (v->s.pict_type != AV_PICTURE_TYPE_B || v->second_field || !dir) {
1382 n = scaleforsame_y(v, i, n, dir) << hpel;
1384 n = scaleforsame_x(v, n, dir) << hpel;
1387 brfd = FFMIN(v->brfd, 3);
1388 scalesame = ff_vc1_b_field_mvpred_scales[0][brfd];
1390 n = (n * scalesame >> 8) << hpel;
1394 static av_always_inline int scaleforopp(VC1Context *v, int n /* MV */,
1397 int refdist, scaleopp;
1398 int hpel = 1 - v->s.quarter_sample;
1401 if (v->s.pict_type == AV_PICTURE_TYPE_B && !v->second_field && dir == 1) {
1403 n = scaleforopp_y(v, n, dir) << hpel;
1405 n = scaleforopp_x(v, n) << hpel;
1408 if (v->s.pict_type != AV_PICTURE_TYPE_B)
1409 refdist = FFMIN(v->refdist, 3);
1411 refdist = dir ? v->brfd : v->frfd;
1412 scaleopp = ff_vc1_field_mvpred_scales[dir ^ v->second_field][0][refdist];
1414 n = (n * scaleopp >> 8) << hpel;
1418 /** Predict and set motion vector
1420 static inline void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
1421 int mv1, int r_x, int r_y, uint8_t* is_intra,
1422 int pred_flag, int dir)
1424 MpegEncContext *s = &v->s;
1425 int xy, wrap, off = 0;
1429 int mixedmv_pic, num_samefield = 0, num_oppfield = 0;
1430 int opposite, a_f, b_f, c_f;
1431 int16_t field_predA[2];
1432 int16_t field_predB[2];
1433 int16_t field_predC[2];
1434 int a_valid, b_valid, c_valid;
1435 int hybridmv_thresh, y_bias = 0;
1437 if (v->mv_mode == MV_PMODE_MIXED_MV ||
1438 ((v->mv_mode == MV_PMODE_INTENSITY_COMP) && (v->mv_mode2 == MV_PMODE_MIXED_MV)))
1442 /* scale MV difference to be quad-pel */
1443 dmv_x <<= 1 - s->quarter_sample;
1444 dmv_y <<= 1 - s->quarter_sample;
1446 wrap = s->b8_stride;
1447 xy = s->block_index[n];
1450 s->mv[0][n][0] = s->current_picture.motion_val[0][xy + v->blocks_off][0] = 0;
1451 s->mv[0][n][1] = s->current_picture.motion_val[0][xy + v->blocks_off][1] = 0;
1452 s->current_picture.motion_val[1][xy + v->blocks_off][0] = 0;
1453 s->current_picture.motion_val[1][xy + v->blocks_off][1] = 0;
1454 if (mv1) { /* duplicate motion data for 1-MV block */
1455 s->current_picture.motion_val[0][xy + 1 + v->blocks_off][0] = 0;
1456 s->current_picture.motion_val[0][xy + 1 + v->blocks_off][1] = 0;
1457 s->current_picture.motion_val[0][xy + wrap + v->blocks_off][0] = 0;
1458 s->current_picture.motion_val[0][xy + wrap + v->blocks_off][1] = 0;
1459 s->current_picture.motion_val[0][xy + wrap + 1 + v->blocks_off][0] = 0;
1460 s->current_picture.motion_val[0][xy + wrap + 1 + v->blocks_off][1] = 0;
1461 v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1462 s->current_picture.motion_val[1][xy + 1 + v->blocks_off][0] = 0;
1463 s->current_picture.motion_val[1][xy + 1 + v->blocks_off][1] = 0;
1464 s->current_picture.motion_val[1][xy + wrap][0] = 0;
1465 s->current_picture.motion_val[1][xy + wrap + v->blocks_off][1] = 0;
1466 s->current_picture.motion_val[1][xy + wrap + 1 + v->blocks_off][0] = 0;
1467 s->current_picture.motion_val[1][xy + wrap + 1 + v->blocks_off][1] = 0;
1472 C = s->current_picture.motion_val[dir][xy - 1 + v->blocks_off];
1473 A = s->current_picture.motion_val[dir][xy - wrap + v->blocks_off];
1475 if (v->field_mode && mixedmv_pic)
1476 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
1478 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2;
1480 //in 4-MV mode different blocks have different B predictor position
1483 off = (s->mb_x > 0) ? -1 : 1;
1486 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 1;
1495 B = s->current_picture.motion_val[dir][xy - wrap + off + v->blocks_off];
1497 a_valid = !s->first_slice_line || (n == 2 || n == 3);
1498 b_valid = a_valid && (s->mb_width > 1);
1499 c_valid = s->mb_x || (n == 1 || n == 3);
1500 if (v->field_mode) {
1501 a_valid = a_valid && !is_intra[xy - wrap];
1502 b_valid = b_valid && !is_intra[xy - wrap + off];
1503 c_valid = c_valid && !is_intra[xy - 1];
1507 a_f = v->mv_f[dir][xy - wrap + v->blocks_off];
1508 num_oppfield += a_f;
1509 num_samefield += 1 - a_f;
1510 field_predA[0] = A[0];
1511 field_predA[1] = A[1];
1513 field_predA[0] = field_predA[1] = 0;
1517 b_f = v->mv_f[dir][xy - wrap + off + v->blocks_off];
1518 num_oppfield += b_f;
1519 num_samefield += 1 - b_f;
1520 field_predB[0] = B[0];
1521 field_predB[1] = B[1];
1523 field_predB[0] = field_predB[1] = 0;
1527 c_f = v->mv_f[dir][xy - 1 + v->blocks_off];
1528 num_oppfield += c_f;
1529 num_samefield += 1 - c_f;
1530 field_predC[0] = C[0];
1531 field_predC[1] = C[1];
1533 field_predC[0] = field_predC[1] = 0;
1537 if (v->field_mode) {
1539 // REFFIELD determines if the last field or the second-last field is
1540 // to be used as reference
1541 opposite = 1 - v->reffield;
1543 if (num_samefield <= num_oppfield)
1544 opposite = 1 - pred_flag;
1546 opposite = pred_flag;
1551 if (a_valid && !a_f) {
1552 field_predA[0] = scaleforopp(v, field_predA[0], 0, dir);
1553 field_predA[1] = scaleforopp(v, field_predA[1], 1, dir);
1555 if (b_valid && !b_f) {
1556 field_predB[0] = scaleforopp(v, field_predB[0], 0, dir);
1557 field_predB[1] = scaleforopp(v, field_predB[1], 1, dir);
1559 if (c_valid && !c_f) {
1560 field_predC[0] = scaleforopp(v, field_predC[0], 0, dir);
1561 field_predC[1] = scaleforopp(v, field_predC[1], 1, dir);
1563 v->mv_f[dir][xy + v->blocks_off] = 1;
1564 v->ref_field_type[dir] = !v->cur_field_type;
1566 if (a_valid && a_f) {
1567 field_predA[0] = scaleforsame(v, n, field_predA[0], 0, dir);
1568 field_predA[1] = scaleforsame(v, n, field_predA[1], 1, dir);
1570 if (b_valid && b_f) {
1571 field_predB[0] = scaleforsame(v, n, field_predB[0], 0, dir);
1572 field_predB[1] = scaleforsame(v, n, field_predB[1], 1, dir);
1574 if (c_valid && c_f) {
1575 field_predC[0] = scaleforsame(v, n, field_predC[0], 0, dir);
1576 field_predC[1] = scaleforsame(v, n, field_predC[1], 1, dir);
1578 v->mv_f[dir][xy + v->blocks_off] = 0;
1579 v->ref_field_type[dir] = v->cur_field_type;
1583 px = field_predA[0];
1584 py = field_predA[1];
1585 } else if (c_valid) {
1586 px = field_predC[0];
1587 py = field_predC[1];
1588 } else if (b_valid) {
1589 px = field_predB[0];
1590 py = field_predB[1];
1596 if (num_samefield + num_oppfield > 1) {
1597 px = mid_pred(field_predA[0], field_predB[0], field_predC[0]);
1598 py = mid_pred(field_predA[1], field_predB[1], field_predC[1]);
1601 /* Pullback MV as specified in 8.3.5.3.4 */
1602 if (!v->field_mode) {
1604 qx = (s->mb_x << 6) + ((n == 1 || n == 3) ? 32 : 0);
1605 qy = (s->mb_y << 6) + ((n == 2 || n == 3) ? 32 : 0);
1606 X = (s->mb_width << 6) - 4;
1607 Y = (s->mb_height << 6) - 4;
1609 if (qx + px < -60) px = -60 - qx;
1610 if (qy + py < -60) py = -60 - qy;
1612 if (qx + px < -28) px = -28 - qx;
1613 if (qy + py < -28) py = -28 - qy;
1615 if (qx + px > X) px = X - qx;
1616 if (qy + py > Y) py = Y - qy;
1619 if (!v->field_mode || s->pict_type != AV_PICTURE_TYPE_B) {
1620 /* Calculate hybrid prediction as specified in 8.3.5.3.5 (also 10.3.5.4.3.5) */
1621 hybridmv_thresh = 32;
1622 if (a_valid && c_valid) {
1623 if (is_intra[xy - wrap])
1624 sum = FFABS(px) + FFABS(py);
1626 sum = FFABS(px - field_predA[0]) + FFABS(py - field_predA[1]);
1627 if (sum > hybridmv_thresh) {
1628 if (get_bits1(&s->gb)) { // read HYBRIDPRED bit
1629 px = field_predA[0];
1630 py = field_predA[1];
1632 px = field_predC[0];
1633 py = field_predC[1];
1636 if (is_intra[xy - 1])
1637 sum = FFABS(px) + FFABS(py);
1639 sum = FFABS(px - field_predC[0]) + FFABS(py - field_predC[1]);
1640 if (sum > hybridmv_thresh) {
1641 if (get_bits1(&s->gb)) {
1642 px = field_predA[0];
1643 py = field_predA[1];
1645 px = field_predC[0];
1646 py = field_predC[1];
1653 if (v->field_mode && v->numref)
1655 if (v->field_mode && v->cur_field_type && v->ref_field_type[dir] == 0)
1657 /* store MV using signed modulus of MV range defined in 4.11 */
1658 s->mv[dir][n][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1659 s->mv[dir][n][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1] = ((py + dmv_y + r_y - y_bias) & ((r_y << 1) - 1)) - r_y + y_bias;
1660 if (mv1) { /* duplicate motion data for 1-MV block */
1661 s->current_picture.motion_val[dir][xy + 1 + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
1662 s->current_picture.motion_val[dir][xy + 1 + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
1663 s->current_picture.motion_val[dir][xy + wrap + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
1664 s->current_picture.motion_val[dir][xy + wrap + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
1665 s->current_picture.motion_val[dir][xy + wrap + 1 + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
1666 s->current_picture.motion_val[dir][xy + wrap + 1 + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
1667 v->mv_f[dir][xy + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1668 v->mv_f[dir][xy + wrap + v->blocks_off] = v->mv_f[dir][xy + wrap + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1672 /** Predict and set motion vector for interlaced frame picture MBs
1674 static inline void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
1675 int mvn, int r_x, int r_y, uint8_t* is_intra, int dir)
1677 MpegEncContext *s = &v->s;
1678 int xy, wrap, off = 0;
1679 int A[2], B[2], C[2];
1681 int a_valid = 0, b_valid = 0, c_valid = 0;
1682 int field_a, field_b, field_c; // 0: same, 1: opposit
1683 int total_valid, num_samefield, num_oppfield;
1684 int pos_c, pos_b, n_adj;
1686 wrap = s->b8_stride;
1687 xy = s->block_index[n];
1690 s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = 0;
1691 s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = 0;
1692 s->current_picture.motion_val[1][xy][0] = 0;
1693 s->current_picture.motion_val[1][xy][1] = 0;
1694 if (mvn == 1) { /* duplicate motion data for 1-MV block */
1695 s->current_picture.motion_val[0][xy + 1][0] = 0;
1696 s->current_picture.motion_val[0][xy + 1][1] = 0;
1697 s->current_picture.motion_val[0][xy + wrap][0] = 0;
1698 s->current_picture.motion_val[0][xy + wrap][1] = 0;
1699 s->current_picture.motion_val[0][xy + wrap + 1][0] = 0;
1700 s->current_picture.motion_val[0][xy + wrap + 1][1] = 0;
1701 v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1702 s->current_picture.motion_val[1][xy + 1][0] = 0;
1703 s->current_picture.motion_val[1][xy + 1][1] = 0;
1704 s->current_picture.motion_val[1][xy + wrap][0] = 0;
1705 s->current_picture.motion_val[1][xy + wrap][1] = 0;
1706 s->current_picture.motion_val[1][xy + wrap + 1][0] = 0;
1707 s->current_picture.motion_val[1][xy + wrap + 1][1] = 0;
1712 off = ((n == 0) || (n == 1)) ? 1 : -1;
1714 if (s->mb_x || (n == 1) || (n == 3)) {
1715 if ((v->blk_mv_type[xy]) // current block (MB) has a field MV
1716 || (!v->blk_mv_type[xy] && !v->blk_mv_type[xy - 1])) { // or both have frame MV
1717 A[0] = s->current_picture.motion_val[dir][xy - 1][0];
1718 A[1] = s->current_picture.motion_val[dir][xy - 1][1];
1720 } else { // current block has frame mv and cand. has field MV (so average)
1721 A[0] = (s->current_picture.motion_val[dir][xy - 1][0]
1722 + s->current_picture.motion_val[dir][xy - 1 + off * wrap][0] + 1) >> 1;
1723 A[1] = (s->current_picture.motion_val[dir][xy - 1][1]
1724 + s->current_picture.motion_val[dir][xy - 1 + off * wrap][1] + 1) >> 1;
1727 if (!(n & 1) && v->is_intra[s->mb_x - 1]) {
1733 /* Predict B and C */
1734 B[0] = B[1] = C[0] = C[1] = 0;
1735 if (n == 0 || n == 1 || v->blk_mv_type[xy]) {
1736 if (!s->first_slice_line) {
1737 if (!v->is_intra[s->mb_x - s->mb_stride]) {
1740 pos_b = s->block_index[n_adj] - 2 * wrap;
1741 if (v->blk_mv_type[pos_b] && v->blk_mv_type[xy]) {
1742 n_adj = (n & 2) | (n & 1);
1744 B[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap][0];
1745 B[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap][1];
1746 if (v->blk_mv_type[pos_b] && !v->blk_mv_type[xy]) {
1747 B[0] = (B[0] + s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap][0] + 1) >> 1;
1748 B[1] = (B[1] + s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap][1] + 1) >> 1;
1751 if (s->mb_width > 1) {
1752 if (!v->is_intra[s->mb_x - s->mb_stride + 1]) {
1755 pos_c = s->block_index[2] - 2 * wrap + 2;
1756 if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1759 C[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap + 2][0];
1760 C[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap + 2][1];
1761 if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1762 C[0] = (1 + C[0] + (s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap + 2][0])) >> 1;
1763 C[1] = (1 + C[1] + (s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap + 2][1])) >> 1;
1765 if (s->mb_x == s->mb_width - 1) {
1766 if (!v->is_intra[s->mb_x - s->mb_stride - 1]) {
1769 pos_c = s->block_index[3] - 2 * wrap - 2;
1770 if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1773 C[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap - 2][0];
1774 C[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap - 2][1];
1775 if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1776 C[0] = (1 + C[0] + s->current_picture.motion_val[dir][s->block_index[1] - 2 * wrap - 2][0]) >> 1;
1777 C[1] = (1 + C[1] + s->current_picture.motion_val[dir][s->block_index[1] - 2 * wrap - 2][1]) >> 1;
1786 pos_b = s->block_index[1];
1788 B[0] = s->current_picture.motion_val[dir][pos_b][0];
1789 B[1] = s->current_picture.motion_val[dir][pos_b][1];
1790 pos_c = s->block_index[0];
1792 C[0] = s->current_picture.motion_val[dir][pos_c][0];
1793 C[1] = s->current_picture.motion_val[dir][pos_c][1];
1796 total_valid = a_valid + b_valid + c_valid;
1797 // check if predictor A is out of bounds
1798 if (!s->mb_x && !(n == 1 || n == 3)) {
1801 // check if predictor B is out of bounds
1802 if ((s->first_slice_line && v->blk_mv_type[xy]) || (s->first_slice_line && !(n & 2))) {
1803 B[0] = B[1] = C[0] = C[1] = 0;
1805 if (!v->blk_mv_type[xy]) {
1806 if (s->mb_width == 1) {
1810 if (total_valid >= 2) {
1811 px = mid_pred(A[0], B[0], C[0]);
1812 py = mid_pred(A[1], B[1], C[1]);
1813 } else if (total_valid) {
1814 if (a_valid) { px = A[0]; py = A[1]; }
1815 if (b_valid) { px = B[0]; py = B[1]; }
1816 if (c_valid) { px = C[0]; py = C[1]; }
1822 field_a = (A[1] & 4) ? 1 : 0;
1826 field_b = (B[1] & 4) ? 1 : 0;
1830 field_c = (C[1] & 4) ? 1 : 0;
1834 num_oppfield = field_a + field_b + field_c;
1835 num_samefield = total_valid - num_oppfield;
1836 if (total_valid == 3) {
1837 if ((num_samefield == 3) || (num_oppfield == 3)) {
1838 px = mid_pred(A[0], B[0], C[0]);
1839 py = mid_pred(A[1], B[1], C[1]);
1840 } else if (num_samefield >= num_oppfield) {
1841 /* take one MV from same field set depending on priority
1842 the check for B may not be necessary */
1843 px = !field_a ? A[0] : B[0];
1844 py = !field_a ? A[1] : B[1];
1846 px = field_a ? A[0] : B[0];
1847 py = field_a ? A[1] : B[1];
1849 } else if (total_valid == 2) {
1850 if (num_samefield >= num_oppfield) {
1851 if (!field_a && a_valid) {
1854 } else if (!field_b && b_valid) {
1857 } else if (c_valid) {
1862 if (field_a && a_valid) {
1865 } else if (field_b && b_valid) {
1868 } else if (c_valid) {
1874 } else if (total_valid == 1) {
1875 px = (a_valid) ? A[0] : ((b_valid) ? B[0] : C[0]);
1876 py = (a_valid) ? A[1] : ((b_valid) ? B[1] : C[1]);
1881 /* store MV using signed modulus of MV range defined in 4.11 */
1882 s->mv[dir][n][0] = s->current_picture.motion_val[dir][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1883 s->mv[dir][n][1] = s->current_picture.motion_val[dir][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
1884 if (mvn == 1) { /* duplicate motion data for 1-MV block */
1885 s->current_picture.motion_val[dir][xy + 1 ][0] = s->current_picture.motion_val[dir][xy][0];
1886 s->current_picture.motion_val[dir][xy + 1 ][1] = s->current_picture.motion_val[dir][xy][1];
1887 s->current_picture.motion_val[dir][xy + wrap ][0] = s->current_picture.motion_val[dir][xy][0];
1888 s->current_picture.motion_val[dir][xy + wrap ][1] = s->current_picture.motion_val[dir][xy][1];
1889 s->current_picture.motion_val[dir][xy + wrap + 1][0] = s->current_picture.motion_val[dir][xy][0];
1890 s->current_picture.motion_val[dir][xy + wrap + 1][1] = s->current_picture.motion_val[dir][xy][1];
1891 } else if (mvn == 2) { /* duplicate motion data for 2-Field MV block */
1892 s->current_picture.motion_val[dir][xy + 1][0] = s->current_picture.motion_val[dir][xy][0];
1893 s->current_picture.motion_val[dir][xy + 1][1] = s->current_picture.motion_val[dir][xy][1];
1894 s->mv[dir][n + 1][0] = s->mv[dir][n][0];
1895 s->mv[dir][n + 1][1] = s->mv[dir][n][1];
1899 /** Motion compensation for direct or interpolated blocks in B-frames
1901 static void vc1_interp_mc(VC1Context *v)
1903 MpegEncContext *s = &v->s;
1904 H264ChromaContext *h264chroma = &v->h264chroma;
1905 uint8_t *srcY, *srcU, *srcV;
1906 int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
1908 int v_edge_pos = s->v_edge_pos >> v->field_mode;
1909 int use_ic = v->next_use_ic;
1911 if (!v->field_mode && !v->s.next_picture.f->data[0])
1914 mx = s->mv[1][0][0];
1915 my = s->mv[1][0][1];
1916 uvmx = (mx + ((mx & 3) == 3)) >> 1;
1917 uvmy = (my + ((my & 3) == 3)) >> 1;
1918 if (v->field_mode) {
1919 if (v->cur_field_type != v->ref_field_type[1])
1920 my = my - 2 + 4 * v->cur_field_type;
1921 uvmy = uvmy - 2 + 4 * v->cur_field_type;
1924 uvmx = uvmx + ((uvmx < 0) ? -(uvmx & 1) : (uvmx & 1));
1925 uvmy = uvmy + ((uvmy < 0) ? -(uvmy & 1) : (uvmy & 1));
1927 srcY = s->next_picture.f->data[0];
1928 srcU = s->next_picture.f->data[1];
1929 srcV = s->next_picture.f->data[2];
1931 src_x = s->mb_x * 16 + (mx >> 2);
1932 src_y = s->mb_y * 16 + (my >> 2);
1933 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
1934 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
1936 if (v->profile != PROFILE_ADVANCED) {
1937 src_x = av_clip( src_x, -16, s->mb_width * 16);
1938 src_y = av_clip( src_y, -16, s->mb_height * 16);
1939 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
1940 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
1942 src_x = av_clip( src_x, -17, s->avctx->coded_width);
1943 src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
1944 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
1945 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
1948 srcY += src_y * s->linesize + src_x;
1949 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
1950 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
1952 if (v->field_mode && v->ref_field_type[1]) {
1953 srcY += s->current_picture_ptr->f->linesize[0];
1954 srcU += s->current_picture_ptr->f->linesize[1];
1955 srcV += s->current_picture_ptr->f->linesize[2];
1958 /* for grayscale we should not try to read from unknown area */
1959 if (s->flags & CODEC_FLAG_GRAY) {
1960 srcU = s->edge_emu_buffer + 18 * s->linesize;
1961 srcV = s->edge_emu_buffer + 18 * s->linesize;
1964 if (v->rangeredfrm || s->h_edge_pos < 22 || v_edge_pos < 22 || use_ic
1965 || (unsigned)(src_x - 1) > s->h_edge_pos - (mx & 3) - 16 - 3
1966 || (unsigned)(src_y - 1) > v_edge_pos - (my & 3) - 16 - 3) {
1967 uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
1969 srcY -= s->mspel * (1 + s->linesize);
1970 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcY,
1971 s->linesize, s->linesize,
1972 17 + s->mspel * 2, 17 + s->mspel * 2,
1973 src_x - s->mspel, src_y - s->mspel,
1974 s->h_edge_pos, v_edge_pos);
1975 srcY = s->edge_emu_buffer;
1976 s->vdsp.emulated_edge_mc(uvbuf, srcU,
1977 s->uvlinesize, s->uvlinesize,
1979 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
1980 s->vdsp.emulated_edge_mc(uvbuf + 16, srcV,
1981 s->uvlinesize, s->uvlinesize,
1983 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
1986 /* if we deal with range reduction we need to scale source blocks */
1987 if (v->rangeredfrm) {
1989 uint8_t *src, *src2;
1992 for (j = 0; j < 17 + s->mspel * 2; j++) {
1993 for (i = 0; i < 17 + s->mspel * 2; i++)
1994 src[i] = ((src[i] - 128) >> 1) + 128;
1999 for (j = 0; j < 9; j++) {
2000 for (i = 0; i < 9; i++) {
2001 src[i] = ((src[i] - 128) >> 1) + 128;
2002 src2[i] = ((src2[i] - 128) >> 1) + 128;
2004 src += s->uvlinesize;
2005 src2 += s->uvlinesize;
2010 uint8_t (*luty )[256] = v->next_luty;
2011 uint8_t (*lutuv)[256] = v->next_lutuv;
2013 uint8_t *src, *src2;
2016 for (j = 0; j < 17 + s->mspel * 2; j++) {
2017 int f = v->field_mode ? v->ref_field_type[1] : ((j+src_y - s->mspel) & 1);
2018 for (i = 0; i < 17 + s->mspel * 2; i++)
2019 src[i] = luty[f][src[i]];
2024 for (j = 0; j < 9; j++) {
2025 int f = v->field_mode ? v->ref_field_type[1] : ((j+uvsrc_y) & 1);
2026 for (i = 0; i < 9; i++) {
2027 src[i] = lutuv[f][src[i]];
2028 src2[i] = lutuv[f][src2[i]];
2030 src += s->uvlinesize;
2031 src2 += s->uvlinesize;
2034 srcY += s->mspel * (1 + s->linesize);
2041 dxy = ((my & 3) << 2) | (mx & 3);
2042 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off , srcY , s->linesize, v->rnd);
2043 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8, srcY + 8, s->linesize, v->rnd);
2044 srcY += s->linesize * 8;
2045 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize , srcY , s->linesize, v->rnd);
2046 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
2048 dxy = (my & 2) | ((mx & 2) >> 1);
2051 s->hdsp.avg_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
2053 s->hdsp.avg_no_rnd_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize, 16);
2056 if (s->flags & CODEC_FLAG_GRAY) return;
2057 /* Chroma MC always uses qpel blilinear */
2058 uvmx = (uvmx & 3) << 1;
2059 uvmy = (uvmy & 3) << 1;
2061 h264chroma->avg_h264_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
2062 h264chroma->avg_h264_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
2064 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
2065 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
2069 static av_always_inline int scale_mv(int value, int bfrac, int inv, int qs)
2073 #if B_FRACTION_DEN==256
2077 return 2 * ((value * n + 255) >> 9);
2078 return (value * n + 128) >> 8;
2081 n -= B_FRACTION_DEN;
2083 return 2 * ((value * n + B_FRACTION_DEN - 1) / (2 * B_FRACTION_DEN));
2084 return (value * n + B_FRACTION_DEN/2) / B_FRACTION_DEN;
2088 /** Reconstruct motion vector for B-frame and do motion compensation
2090 static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2],
2091 int direct, int mode)
2098 if (mode == BMV_TYPE_INTERPOLATED) {
2104 vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD));
2107 static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2],
2108 int direct, int mvtype)
2110 MpegEncContext *s = &v->s;
2111 int xy, wrap, off = 0;
2116 const uint8_t *is_intra = v->mb_type[0];
2120 /* scale MV difference to be quad-pel */
2121 dmv_x[0] <<= 1 - s->quarter_sample;
2122 dmv_y[0] <<= 1 - s->quarter_sample;
2123 dmv_x[1] <<= 1 - s->quarter_sample;
2124 dmv_y[1] <<= 1 - s->quarter_sample;
2126 wrap = s->b8_stride;
2127 xy = s->block_index[0];
2130 s->current_picture.motion_val[0][xy + v->blocks_off][0] =
2131 s->current_picture.motion_val[0][xy + v->blocks_off][1] =
2132 s->current_picture.motion_val[1][xy + v->blocks_off][0] =
2133 s->current_picture.motion_val[1][xy + v->blocks_off][1] = 0;
2136 if (!v->field_mode) {
2137 s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
2138 s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
2139 s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
2140 s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
2142 /* Pullback predicted motion vectors as specified in 8.4.5.4 */
2143 s->mv[0][0][0] = av_clip(s->mv[0][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2144 s->mv[0][0][1] = av_clip(s->mv[0][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2145 s->mv[1][0][0] = av_clip(s->mv[1][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2146 s->mv[1][0][1] = av_clip(s->mv[1][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2149 s->current_picture.motion_val[0][xy + v->blocks_off][0] = s->mv[0][0][0];
2150 s->current_picture.motion_val[0][xy + v->blocks_off][1] = s->mv[0][0][1];
2151 s->current_picture.motion_val[1][xy + v->blocks_off][0] = s->mv[1][0][0];
2152 s->current_picture.motion_val[1][xy + v->blocks_off][1] = s->mv[1][0][1];
2156 if ((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2157 C = s->current_picture.motion_val[0][xy - 2];
2158 A = s->current_picture.motion_val[0][xy - wrap * 2];
2159 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2160 B = s->current_picture.motion_val[0][xy - wrap * 2 + off];
2162 if (!s->mb_x) C[0] = C[1] = 0;
2163 if (!s->first_slice_line) { // predictor A is not out of bounds
2164 if (s->mb_width == 1) {
2168 px = mid_pred(A[0], B[0], C[0]);
2169 py = mid_pred(A[1], B[1], C[1]);
2171 } else if (s->mb_x) { // predictor C is not out of bounds
2177 /* Pullback MV as specified in 8.3.5.3.4 */
2180 if (v->profile < PROFILE_ADVANCED) {
2181 qx = (s->mb_x << 5);
2182 qy = (s->mb_y << 5);
2183 X = (s->mb_width << 5) - 4;
2184 Y = (s->mb_height << 5) - 4;
2185 if (qx + px < -28) px = -28 - qx;
2186 if (qy + py < -28) py = -28 - qy;
2187 if (qx + px > X) px = X - qx;
2188 if (qy + py > Y) py = Y - qy;
2190 qx = (s->mb_x << 6);
2191 qy = (s->mb_y << 6);
2192 X = (s->mb_width << 6) - 4;
2193 Y = (s->mb_height << 6) - 4;
2194 if (qx + px < -60) px = -60 - qx;
2195 if (qy + py < -60) py = -60 - qy;
2196 if (qx + px > X) px = X - qx;
2197 if (qy + py > Y) py = Y - qy;
2200 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2201 if (0 && !s->first_slice_line && s->mb_x) {
2202 if (is_intra[xy - wrap])
2203 sum = FFABS(px) + FFABS(py);
2205 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2207 if (get_bits1(&s->gb)) {
2215 if (is_intra[xy - 2])
2216 sum = FFABS(px) + FFABS(py);
2218 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2220 if (get_bits1(&s->gb)) {
2230 /* store MV using signed modulus of MV range defined in 4.11 */
2231 s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x;
2232 s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
2234 if ((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2235 C = s->current_picture.motion_val[1][xy - 2];
2236 A = s->current_picture.motion_val[1][xy - wrap * 2];
2237 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2238 B = s->current_picture.motion_val[1][xy - wrap * 2 + off];
2242 if (!s->first_slice_line) { // predictor A is not out of bounds
2243 if (s->mb_width == 1) {
2247 px = mid_pred(A[0], B[0], C[0]);
2248 py = mid_pred(A[1], B[1], C[1]);
2250 } else if (s->mb_x) { // predictor C is not out of bounds
2256 /* Pullback MV as specified in 8.3.5.3.4 */
2259 if (v->profile < PROFILE_ADVANCED) {
2260 qx = (s->mb_x << 5);
2261 qy = (s->mb_y << 5);
2262 X = (s->mb_width << 5) - 4;
2263 Y = (s->mb_height << 5) - 4;
2264 if (qx + px < -28) px = -28 - qx;
2265 if (qy + py < -28) py = -28 - qy;
2266 if (qx + px > X) px = X - qx;
2267 if (qy + py > Y) py = Y - qy;
2269 qx = (s->mb_x << 6);
2270 qy = (s->mb_y << 6);
2271 X = (s->mb_width << 6) - 4;
2272 Y = (s->mb_height << 6) - 4;
2273 if (qx + px < -60) px = -60 - qx;
2274 if (qy + py < -60) py = -60 - qy;
2275 if (qx + px > X) px = X - qx;
2276 if (qy + py > Y) py = Y - qy;
2279 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2280 if (0 && !s->first_slice_line && s->mb_x) {
2281 if (is_intra[xy - wrap])
2282 sum = FFABS(px) + FFABS(py);
2284 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2286 if (get_bits1(&s->gb)) {
2294 if (is_intra[xy - 2])
2295 sum = FFABS(px) + FFABS(py);
2297 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2299 if (get_bits1(&s->gb)) {
2309 /* store MV using signed modulus of MV range defined in 4.11 */
2311 s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
2312 s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
2314 s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
2315 s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
2316 s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
2317 s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
2320 static inline void vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dmv_y, int mv1, int *pred_flag)
2322 int dir = (v->bmvtype == BMV_TYPE_BACKWARD) ? 1 : 0;
2323 MpegEncContext *s = &v->s;
2324 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2326 if (v->bmvtype == BMV_TYPE_DIRECT) {
2327 int total_opp, k, f;
2328 if (s->next_picture.mb_type[mb_pos + v->mb_off] != MB_TYPE_INTRA) {
2329 s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][0],
2330 v->bfraction, 0, s->quarter_sample);
2331 s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][1],
2332 v->bfraction, 0, s->quarter_sample);
2333 s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][0],
2334 v->bfraction, 1, s->quarter_sample);
2335 s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][1],
2336 v->bfraction, 1, s->quarter_sample);
2338 total_opp = v->mv_f_next[0][s->block_index[0] + v->blocks_off]
2339 + v->mv_f_next[0][s->block_index[1] + v->blocks_off]
2340 + v->mv_f_next[0][s->block_index[2] + v->blocks_off]
2341 + v->mv_f_next[0][s->block_index[3] + v->blocks_off];
2342 f = (total_opp > 2) ? 1 : 0;
2344 s->mv[0][0][0] = s->mv[0][0][1] = 0;
2345 s->mv[1][0][0] = s->mv[1][0][1] = 0;
2348 v->ref_field_type[0] = v->ref_field_type[1] = v->cur_field_type ^ f;
2349 for (k = 0; k < 4; k++) {
2350 s->current_picture.motion_val[0][s->block_index[k] + v->blocks_off][0] = s->mv[0][0][0];
2351 s->current_picture.motion_val[0][s->block_index[k] + v->blocks_off][1] = s->mv[0][0][1];
2352 s->current_picture.motion_val[1][s->block_index[k] + v->blocks_off][0] = s->mv[1][0][0];
2353 s->current_picture.motion_val[1][s->block_index[k] + v->blocks_off][1] = s->mv[1][0][1];
2354 v->mv_f[0][s->block_index[k] + v->blocks_off] = f;
2355 v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
2359 if (v->bmvtype == BMV_TYPE_INTERPOLATED) {
2360 vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2361 vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2364 if (dir) { // backward
2365 vc1_pred_mv(v, n, dmv_x[1], dmv_y[1], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2366 if (n == 3 || mv1) {
2367 vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
2370 vc1_pred_mv(v, n, dmv_x[0], dmv_y[0], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2371 if (n == 3 || mv1) {
2372 vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], 0, 1);
2377 /** Get predicted DC value for I-frames only
2378 * prediction dir: left=0, top=1
2379 * @param s MpegEncContext
2380 * @param overlap flag indicating that overlap filtering is used
2381 * @param pq integer part of picture quantizer
2382 * @param[in] n block index in the current MB
2383 * @param dc_val_ptr Pointer to DC predictor
2384 * @param dir_ptr Prediction direction for use in AC prediction
2386 static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2387 int16_t **dc_val_ptr, int *dir_ptr)
2389 int a, b, c, wrap, pred, scale;
2391 static const uint16_t dcpred[32] = {
2392 -1, 1024, 512, 341, 256, 205, 171, 146, 128,
2393 114, 102, 93, 85, 79, 73, 68, 64,
2394 60, 57, 54, 51, 49, 47, 45, 43,
2395 41, 39, 38, 37, 35, 34, 33
2398 /* find prediction - wmv3_dc_scale always used here in fact */
2399 if (n < 4) scale = s->y_dc_scale;
2400 else scale = s->c_dc_scale;
2402 wrap = s->block_wrap[n];
2403 dc_val = s->dc_val[0] + s->block_index[n];
2409 b = dc_val[ - 1 - wrap];
2410 a = dc_val[ - wrap];
2412 if (pq < 9 || !overlap) {
2413 /* Set outer values */
2414 if (s->first_slice_line && (n != 2 && n != 3))
2415 b = a = dcpred[scale];
2416 if (s->mb_x == 0 && (n != 1 && n != 3))
2417 b = c = dcpred[scale];
2419 /* Set outer values */
2420 if (s->first_slice_line && (n != 2 && n != 3))
2422 if (s->mb_x == 0 && (n != 1 && n != 3))
2426 if (abs(a - b) <= abs(b - c)) {
2428 *dir_ptr = 1; // left
2431 *dir_ptr = 0; // top
2434 /* update predictor */
2435 *dc_val_ptr = &dc_val[0];
2440 /** Get predicted DC value
2441 * prediction dir: left=0, top=1
2442 * @param s MpegEncContext
2443 * @param overlap flag indicating that overlap filtering is used
2444 * @param pq integer part of picture quantizer
2445 * @param[in] n block index in the current MB
2446 * @param a_avail flag indicating top block availability
2447 * @param c_avail flag indicating left block availability
2448 * @param dc_val_ptr Pointer to DC predictor
2449 * @param dir_ptr Prediction direction for use in AC prediction
2451 static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2452 int a_avail, int c_avail,
2453 int16_t **dc_val_ptr, int *dir_ptr)
2455 int a, b, c, wrap, pred;
2457 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2461 wrap = s->block_wrap[n];
2462 dc_val = s->dc_val[0] + s->block_index[n];
2468 b = dc_val[ - 1 - wrap];
2469 a = dc_val[ - wrap];
2470 /* scale predictors if needed */
2471 q1 = s->current_picture.qscale_table[mb_pos];
2472 dqscale_index = s->y_dc_scale_table[q1] - 1;
2473 if (dqscale_index < 0)
2475 if (c_avail && (n != 1 && n != 3)) {
2476 q2 = s->current_picture.qscale_table[mb_pos - 1];
2478 c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2480 if (a_avail && (n != 2 && n != 3)) {
2481 q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2483 a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2485 if (a_avail && c_avail && (n != 3)) {
2490 off -= s->mb_stride;
2491 q2 = s->current_picture.qscale_table[off];
2493 b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2496 if (a_avail && c_avail) {
2497 if (abs(a - b) <= abs(b - c)) {
2499 *dir_ptr = 1; // left
2502 *dir_ptr = 0; // top
2504 } else if (a_avail) {
2506 *dir_ptr = 0; // top
2507 } else if (c_avail) {
2509 *dir_ptr = 1; // left
2512 *dir_ptr = 1; // left
2515 /* update predictor */
2516 *dc_val_ptr = &dc_val[0];
2520 /** @} */ // Block group
2523 * @name VC1 Macroblock-level functions in Simple/Main Profiles
2524 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
2528 static inline int vc1_coded_block_pred(MpegEncContext * s, int n,
2529 uint8_t **coded_block_ptr)
2531 int xy, wrap, pred, a, b, c;
2533 xy = s->block_index[n];
2534 wrap = s->b8_stride;
2539 a = s->coded_block[xy - 1 ];
2540 b = s->coded_block[xy - 1 - wrap];
2541 c = s->coded_block[xy - wrap];
2550 *coded_block_ptr = &s->coded_block[xy];
2556 * Decode one AC coefficient
2557 * @param v The VC1 context
2558 * @param last Last coefficient
2559 * @param skip How much zero coefficients to skip
2560 * @param value Decoded AC coefficient value
2561 * @param codingset set of VLC to decode data
2564 static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip,
2565 int *value, int codingset)
2567 GetBitContext *gb = &v->s.gb;
2568 int index, escape, run = 0, level = 0, lst = 0;
2570 index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2571 if (index != ff_vc1_ac_sizes[codingset] - 1) {
2572 run = vc1_index_decode_table[codingset][index][0];
2573 level = vc1_index_decode_table[codingset][index][1];
2574 lst = index >= vc1_last_decode_table[codingset] || get_bits_left(gb) < 0;
2578 escape = decode210(gb);
2580 index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2581 run = vc1_index_decode_table[codingset][index][0];
2582 level = vc1_index_decode_table[codingset][index][1];
2583 lst = index >= vc1_last_decode_table[codingset];
2586 level += vc1_last_delta_level_table[codingset][run];
2588 level += vc1_delta_level_table[codingset][run];
2591 run += vc1_last_delta_run_table[codingset][level] + 1;
2593 run += vc1_delta_run_table[codingset][level] + 1;
2599 lst = get_bits1(gb);
2600 if (v->s.esc3_level_length == 0) {
2601 if (v->pq < 8 || v->dquantfrm) { // table 59
2602 v->s.esc3_level_length = get_bits(gb, 3);
2603 if (!v->s.esc3_level_length)
2604 v->s.esc3_level_length = get_bits(gb, 2) + 8;
2605 } else { // table 60
2606 v->s.esc3_level_length = get_unary(gb, 1, 6) + 2;
2608 v->s.esc3_run_length = 3 + get_bits(gb, 2);
2610 run = get_bits(gb, v->s.esc3_run_length);
2611 sign = get_bits1(gb);
2612 level = get_bits(gb, v->s.esc3_level_length);
2623 /** Decode intra block in intra frames - should be faster than decode_intra_block
2624 * @param v VC1Context
2625 * @param block block to decode
2626 * @param[in] n subblock index
2627 * @param coded are AC coeffs present or not
2628 * @param codingset set of VLC to decode data
2630 static int vc1_decode_i_block(VC1Context *v, int16_t block[64], int n,
2631 int coded, int codingset)
2633 GetBitContext *gb = &v->s.gb;
2634 MpegEncContext *s = &v->s;
2635 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2638 int16_t *ac_val, *ac_val2;
2641 /* Get DC differential */
2643 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2645 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2648 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2652 if (dcdiff == 119 /* ESC index value */) {
2653 /* TODO: Optimize */
2654 if (v->pq == 1) dcdiff = get_bits(gb, 10);
2655 else if (v->pq == 2) dcdiff = get_bits(gb, 9);
2656 else dcdiff = get_bits(gb, 8);
2659 dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2660 else if (v->pq == 2)
2661 dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2668 dcdiff += vc1_i_pred_dc(&v->s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir);
2671 /* Store the quantized DC coeff, used for prediction */
2673 block[0] = dcdiff * s->y_dc_scale;
2675 block[0] = dcdiff * s->c_dc_scale;
2686 int last = 0, skip, value;
2687 const uint8_t *zz_table;
2691 scale = v->pq * 2 + v->halfpq;
2695 zz_table = v->zz_8x8[2];
2697 zz_table = v->zz_8x8[3];
2699 zz_table = v->zz_8x8[1];
2701 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2703 if (dc_pred_dir) // left
2706 ac_val -= 16 * s->block_wrap[n];
2709 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2713 block[zz_table[i++]] = value;
2716 /* apply AC prediction if needed */
2718 if (dc_pred_dir) { // left
2719 for (k = 1; k < 8; k++)
2720 block[k << v->left_blk_sh] += ac_val[k];
2722 for (k = 1; k < 8; k++)
2723 block[k << v->top_blk_sh] += ac_val[k + 8];
2726 /* save AC coeffs for further prediction */
2727 for (k = 1; k < 8; k++) {
2728 ac_val2[k] = block[k << v->left_blk_sh];
2729 ac_val2[k + 8] = block[k << v->top_blk_sh];
2732 /* scale AC coeffs */
2733 for (k = 1; k < 64; k++)
2737 block[k] += (block[k] < 0) ? -v->pq : v->pq;
2740 if (s->ac_pred) i = 63;
2746 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2750 scale = v->pq * 2 + v->halfpq;
2751 memset(ac_val2, 0, 16 * 2);
2752 if (dc_pred_dir) { // left
2755 memcpy(ac_val2, ac_val, 8 * 2);
2757 ac_val -= 16 * s->block_wrap[n];
2759 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2762 /* apply AC prediction if needed */
2764 if (dc_pred_dir) { //left
2765 for (k = 1; k < 8; k++) {
2766 block[k << v->left_blk_sh] = ac_val[k] * scale;
2767 if (!v->pquantizer && block[k << v->left_blk_sh])
2768 block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -v->pq : v->pq;
2771 for (k = 1; k < 8; k++) {
2772 block[k << v->top_blk_sh] = ac_val[k + 8] * scale;
2773 if (!v->pquantizer && block[k << v->top_blk_sh])
2774 block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -v->pq : v->pq;
2780 s->block_last_index[n] = i;
2785 /** Decode intra block in intra frames - should be faster than decode_intra_block
2786 * @param v VC1Context
2787 * @param block block to decode
2788 * @param[in] n subblock number
2789 * @param coded are AC coeffs present or not
2790 * @param codingset set of VLC to decode data
2791 * @param mquant quantizer value for this macroblock
2793 static int vc1_decode_i_block_adv(VC1Context *v, int16_t block[64], int n,
2794 int coded, int codingset, int mquant)
2796 GetBitContext *gb = &v->s.gb;
2797 MpegEncContext *s = &v->s;
2798 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2801 int16_t *ac_val, *ac_val2;
2803 int a_avail = v->a_avail, c_avail = v->c_avail;
2804 int use_pred = s->ac_pred;
2807 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2809 /* Get DC differential */
2811 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2813 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2816 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2820 if (dcdiff == 119 /* ESC index value */) {
2821 /* TODO: Optimize */
2822 if (mquant == 1) dcdiff = get_bits(gb, 10);
2823 else if (mquant == 2) dcdiff = get_bits(gb, 9);
2824 else dcdiff = get_bits(gb, 8);
2827 dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2828 else if (mquant == 2)
2829 dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2836 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir);
2839 /* Store the quantized DC coeff, used for prediction */
2841 block[0] = dcdiff * s->y_dc_scale;
2843 block[0] = dcdiff * s->c_dc_scale;
2849 /* check if AC is needed at all */
2850 if (!a_avail && !c_avail)
2852 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2855 scale = mquant * 2 + ((mquant == v->pq) ? v->halfpq : 0);
2857 if (dc_pred_dir) // left
2860 ac_val -= 16 * s->block_wrap[n];
2862 q1 = s->current_picture.qscale_table[mb_pos];
2863 if ( dc_pred_dir && c_avail && mb_pos)
2864 q2 = s->current_picture.qscale_table[mb_pos - 1];
2865 if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
2866 q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2867 if ( dc_pred_dir && n == 1)
2869 if (!dc_pred_dir && n == 2)
2875 int last = 0, skip, value;
2876 const uint8_t *zz_table;
2880 if (!use_pred && v->fcm == ILACE_FRAME) {
2881 zz_table = v->zzi_8x8;
2883 if (!dc_pred_dir) // top
2884 zz_table = v->zz_8x8[2];
2886 zz_table = v->zz_8x8[3];
2889 if (v->fcm != ILACE_FRAME)
2890 zz_table = v->zz_8x8[1];
2892 zz_table = v->zzi_8x8;
2896 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2900 block[zz_table[i++]] = value;
2903 /* apply AC prediction if needed */
2905 /* scale predictors if needed*/
2906 if (q2 && q1 != q2) {
2907 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2908 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2911 return AVERROR_INVALIDDATA;
2912 if (dc_pred_dir) { // left
2913 for (k = 1; k < 8; k++)
2914 block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2916 for (k = 1; k < 8; k++)
2917 block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2920 if (dc_pred_dir) { //left
2921 for (k = 1; k < 8; k++)
2922 block[k << v->left_blk_sh] += ac_val[k];
2924 for (k = 1; k < 8; k++)
2925 block[k << v->top_blk_sh] += ac_val[k + 8];
2929 /* save AC coeffs for further prediction */
2930 for (k = 1; k < 8; k++) {
2931 ac_val2[k ] = block[k << v->left_blk_sh];
2932 ac_val2[k + 8] = block[k << v->top_blk_sh];
2935 /* scale AC coeffs */
2936 for (k = 1; k < 64; k++)
2940 block[k] += (block[k] < 0) ? -mquant : mquant;
2943 if (use_pred) i = 63;
2944 } else { // no AC coeffs
2947 memset(ac_val2, 0, 16 * 2);
2948 if (dc_pred_dir) { // left
2950 memcpy(ac_val2, ac_val, 8 * 2);
2951 if (q2 && q1 != q2) {
2952 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2953 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2955 return AVERROR_INVALIDDATA;
2956 for (k = 1; k < 8; k++)
2957 ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2962 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2963 if (q2 && q1 != q2) {
2964 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2965 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2967 return AVERROR_INVALIDDATA;
2968 for (k = 1; k < 8; k++)
2969 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2974 /* apply AC prediction if needed */
2976 if (dc_pred_dir) { // left
2977 for (k = 1; k < 8; k++) {
2978 block[k << v->left_blk_sh] = ac_val2[k] * scale;
2979 if (!v->pquantizer && block[k << v->left_blk_sh])
2980 block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
2983 for (k = 1; k < 8; k++) {
2984 block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
2985 if (!v->pquantizer && block[k << v->top_blk_sh])
2986 block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
2992 s->block_last_index[n] = i;
2997 /** Decode intra block in inter frames - more generic version than vc1_decode_i_block
2998 * @param v VC1Context
2999 * @param block block to decode
3000 * @param[in] n subblock index
3001 * @param coded are AC coeffs present or not
3002 * @param mquant block quantizer
3003 * @param codingset set of VLC to decode data
3005 static int vc1_decode_intra_block(VC1Context *v, int16_t block[64], int n,
3006 int coded, int mquant, int codingset)
3008 GetBitContext *gb = &v->s.gb;
3009 MpegEncContext *s = &v->s;
3010 int dc_pred_dir = 0; /* Direction of the DC prediction used */
3013 int16_t *ac_val, *ac_val2;
3015 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3016 int a_avail = v->a_avail, c_avail = v->c_avail;
3017 int use_pred = s->ac_pred;
3021 s->dsp.clear_block(block);
3023 /* XXX: Guard against dumb values of mquant */
3024 mquant = (mquant < 1) ? 0 : ((mquant > 31) ? 31 : mquant);
3026 /* Set DC scale - y and c use the same */
3027 s->y_dc_scale = s->y_dc_scale_table[mquant];
3028 s->c_dc_scale = s->c_dc_scale_table[mquant];
3030 /* Get DC differential */
3032 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
3034 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
3037 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
3041 if (dcdiff == 119 /* ESC index value */) {
3042 /* TODO: Optimize */
3043 if (mquant == 1) dcdiff = get_bits(gb, 10);
3044 else if (mquant == 2) dcdiff = get_bits(gb, 9);
3045 else dcdiff = get_bits(gb, 8);
3048 dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
3049 else if (mquant == 2)
3050 dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
3057 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir);
3060 /* Store the quantized DC coeff, used for prediction */
3063 block[0] = dcdiff * s->y_dc_scale;
3065 block[0] = dcdiff * s->c_dc_scale;
3071 /* check if AC is needed at all and adjust direction if needed */
3072 if (!a_avail) dc_pred_dir = 1;
3073 if (!c_avail) dc_pred_dir = 0;
3074 if (!a_avail && !c_avail) use_pred = 0;
3075 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
3078 scale = mquant * 2 + v->halfpq;
3080 if (dc_pred_dir) //left
3083 ac_val -= 16 * s->block_wrap[n];
3085 q1 = s->current_picture.qscale_table[mb_pos];
3086 if (dc_pred_dir && c_avail && mb_pos)
3087 q2 = s->current_picture.qscale_table[mb_pos - 1];
3088 if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
3089 q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
3090 if ( dc_pred_dir && n == 1)
3092 if (!dc_pred_dir && n == 2)
3094 if (n == 3) q2 = q1;
3097 int last = 0, skip, value;
3101 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
3105 if (v->fcm == PROGRESSIVE)
3106 block[v->zz_8x8[0][i++]] = value;
3108 if (use_pred && (v->fcm == ILACE_FRAME)) {
3109 if (!dc_pred_dir) // top
3110 block[v->zz_8x8[2][i++]] = value;
3112 block[v->zz_8x8[3][i++]] = value;
3114 block[v->zzi_8x8[i++]] = value;
3119 /* apply AC prediction if needed */
3121 /* scale predictors if needed*/
3122 if (q2 && q1 != q2) {
3123 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3124 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3127 return AVERROR_INVALIDDATA;
3128 if (dc_pred_dir) { // left
3129 for (k = 1; k < 8; k++)
3130 block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3132 for (k = 1; k < 8; k++)
3133 block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3136 if (dc_pred_dir) { // left
3137 for (k = 1; k < 8; k++)
3138 block[k << v->left_blk_sh] += ac_val[k];
3140 for (k = 1; k < 8; k++)
3141 block[k << v->top_blk_sh] += ac_val[k + 8];
3145 /* save AC coeffs for further prediction */
3146 for (k = 1; k < 8; k++) {
3147 ac_val2[k ] = block[k << v->left_blk_sh];
3148 ac_val2[k + 8] = block[k << v->top_blk_sh];
3151 /* scale AC coeffs */
3152 for (k = 1; k < 64; k++)
3156 block[k] += (block[k] < 0) ? -mquant : mquant;
3159 if (use_pred) i = 63;
3160 } else { // no AC coeffs
3163 memset(ac_val2, 0, 16 * 2);
3164 if (dc_pred_dir) { // left
3166 memcpy(ac_val2, ac_val, 8 * 2);
3167 if (q2 && q1 != q2) {
3168 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3169 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3171 return AVERROR_INVALIDDATA;
3172 for (k = 1; k < 8; k++)
3173 ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3178 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
3179 if (q2 && q1 != q2) {
3180 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3181 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3183 return AVERROR_INVALIDDATA;
3184 for (k = 1; k < 8; k++)
3185 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3190 /* apply AC prediction if needed */
3192 if (dc_pred_dir) { // left
3193 for (k = 1; k < 8; k++) {
3194 block[k << v->left_blk_sh] = ac_val2[k] * scale;
3195 if (!v->pquantizer && block[k << v->left_blk_sh])
3196 block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
3199 for (k = 1; k < 8; k++) {
3200 block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
3201 if (!v->pquantizer && block[k << v->top_blk_sh])
3202 block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
3208 s->block_last_index[n] = i;
3215 static int vc1_decode_p_block(VC1Context *v, int16_t block[64], int n,
3216 int mquant, int ttmb, int first_block,
3217 uint8_t *dst, int linesize, int skip_block,
3220 MpegEncContext *s = &v->s;
3221 GetBitContext *gb = &s->gb;
3224 int scale, off, idx, last, skip, value;
3225 int ttblk = ttmb & 7;
3228 s->dsp.clear_block(block);
3231 ttblk = ff_vc1_ttblk_to_tt[v->tt_index][get_vlc2(gb, ff_vc1_ttblk_vlc[v->tt_index].table, VC1_TTBLK_VLC_BITS, 1)];
3233 if (ttblk == TT_4X4) {
3234 subblkpat = ~(get_vlc2(gb, ff_vc1_subblkpat_vlc[v->tt_index].table, VC1_SUBBLKPAT_VLC_BITS, 1) + 1);
3236 if ((ttblk != TT_8X8 && ttblk != TT_4X4)
3237 && ((v->ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))
3238 || (!v->res_rtm_flag && !first_block))) {
3239 subblkpat = decode012(gb);
3241 subblkpat ^= 3; // swap decoded pattern bits
3242 if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM)
3244 if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT)
3247 scale = 2 * mquant + ((v->pq == mquant) ? v->halfpq : 0);
3249 // convert transforms like 8X4_TOP to generic TT and SUBBLKPAT
3250 if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) {
3251 subblkpat = 2 - (ttblk == TT_8X4_TOP);
3254 if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) {
3255 subblkpat = 2 - (ttblk == TT_4X8_LEFT);
3264 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3269 idx = v->zz_8x8[0][i++];
3271 idx = v->zzi_8x8[i++];
3272 block[idx] = value * scale;
3274 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3278 v->vc1dsp.vc1_inv_trans_8x8_dc(dst, linesize, block);
3280 v->vc1dsp.vc1_inv_trans_8x8(block);
3281 s->dsp.add_pixels_clamped(block, dst, linesize);
3286 pat = ~subblkpat & 0xF;
3287 for (j = 0; j < 4; j++) {
3288 last = subblkpat & (1 << (3 - j));
3290 off = (j & 1) * 4 + (j & 2) * 16;
3292 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3297 idx = ff_vc1_simple_progressive_4x4_zz[i++];
3299 idx = ff_vc1_adv_interlaced_4x4_zz[i++];
3300 block[idx + off] = value * scale;
3302 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3304 if (!(subblkpat & (1 << (3 - j))) && !skip_block) {
3306 v->vc1dsp.vc1_inv_trans_4x4_dc(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3308 v->vc1dsp.vc1_inv_trans_4x4(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3313 pat = ~((subblkpat & 2) * 6 + (subblkpat & 1) * 3) & 0xF;
3314 for (j = 0; j < 2; j++) {
3315 last = subblkpat & (1 << (1 - j));
3319 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3324 idx = v->zz_8x4[i++] + off;
3326 idx = ff_vc1_adv_interlaced_8x4_zz[i++] + off;
3327 block[idx] = value * scale;
3329 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3331 if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3333 v->vc1dsp.vc1_inv_trans_8x4_dc(dst + j * 4 * linesize, linesize, block + off);
3335 v->vc1dsp.vc1_inv_trans_8x4(dst + j * 4 * linesize, linesize, block + off);
3340 pat = ~(subblkpat * 5) & 0xF;
3341 for (j = 0; j < 2; j++) {
3342 last = subblkpat & (1 << (1 - j));
3346 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3351 idx = v->zz_4x8[i++] + off;
3353 idx = ff_vc1_adv_interlaced_4x8_zz[i++] + off;
3354 block[idx] = value * scale;
3356 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3358 if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3360 v->vc1dsp.vc1_inv_trans_4x8_dc(dst + j * 4, linesize, block + off);
3362 v->vc1dsp.vc1_inv_trans_4x8(dst + j*4, linesize, block + off);
3368 *ttmb_out |= ttblk << (n * 4);
3372 /** @} */ // Macroblock group
3374 static const int size_table [6] = { 0, 2, 3, 4, 5, 8 };
3375 static const int offset_table[6] = { 0, 1, 3, 7, 15, 31 };
3377 static av_always_inline void vc1_apply_p_v_loop_filter(VC1Context *v, int block_num)
3379 MpegEncContext *s = &v->s;
3380 int mb_cbp = v->cbp[s->mb_x - s->mb_stride],
3381 block_cbp = mb_cbp >> (block_num * 4), bottom_cbp,
3382 mb_is_intra = v->is_intra[s->mb_x - s->mb_stride],
3383 block_is_intra = mb_is_intra >> (block_num * 4), bottom_is_intra;
3384 int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3387 if (block_num > 3) {
3388 dst = s->dest[block_num - 3];
3390 dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 8) * linesize;
3392 if (s->mb_y != s->end_mb_y || block_num < 2) {
3396 if (block_num > 3) {
3397 bottom_cbp = v->cbp[s->mb_x] >> (block_num * 4);
3398 bottom_is_intra = v->is_intra[s->mb_x] >> (block_num * 4);
3399 mv = &v->luma_mv[s->mb_x - s->mb_stride];
3400 mv_stride = s->mb_stride;
3402 bottom_cbp = (block_num < 2) ? (mb_cbp >> ((block_num + 2) * 4))
3403 : (v->cbp[s->mb_x] >> ((block_num - 2) * 4));
3404 bottom_is_intra = (block_num < 2) ? (mb_is_intra >> ((block_num + 2) * 4))
3405 : (v->is_intra[s->mb_x] >> ((block_num - 2) * 4));
3406 mv_stride = s->b8_stride;
3407 mv = &s->current_picture.motion_val[0][s->block_index[block_num] - 2 * mv_stride];
3410 if (bottom_is_intra & 1 || block_is_intra & 1 ||
3411 mv[0][0] != mv[mv_stride][0] || mv[0][1] != mv[mv_stride][1]) {
3412 v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3414 idx = ((bottom_cbp >> 2) | block_cbp) & 3;
3416 v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3419 v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3421 v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3426 dst -= 4 * linesize;
3427 ttblk = (v->ttblk[s->mb_x - s->mb_stride] >> (block_num * 4)) & 0xF;
3428 if (ttblk == TT_4X4 || ttblk == TT_8X4) {
3429 idx = (block_cbp | (block_cbp >> 2)) & 3;
3431 v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3434 v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3436 v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3441 static av_always_inline void vc1_apply_p_h_loop_filter(VC1Context *v, int block_num)
3443 MpegEncContext *s = &v->s;
3444 int mb_cbp = v->cbp[s->mb_x - 1 - s->mb_stride],
3445 block_cbp = mb_cbp >> (block_num * 4), right_cbp,
3446 mb_is_intra = v->is_intra[s->mb_x - 1 - s->mb_stride],
3447 block_is_intra = mb_is_intra >> (block_num * 4), right_is_intra;
3448 int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3451 if (block_num > 3) {
3452 dst = s->dest[block_num - 3] - 8 * linesize;
3454 dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 16) * linesize - 8;
3457 if (s->mb_x != s->mb_width || !(block_num & 5)) {
3460 if (block_num > 3) {
3461 right_cbp = v->cbp[s->mb_x - s->mb_stride] >> (block_num * 4);
3462 right_is_intra = v->is_intra[s->mb_x - s->mb_stride] >> (block_num * 4);
3463 mv = &v->luma_mv[s->mb_x - s->mb_stride - 1];
3465 right_cbp = (block_num & 1) ? (v->cbp[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3466 : (mb_cbp >> ((block_num + 1) * 4));
3467 right_is_intra = (block_num & 1) ? (v->is_intra[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3468 : (mb_is_intra >> ((block_num + 1) * 4));
3469 mv = &s->current_picture.motion_val[0][s->block_index[block_num] - s->b8_stride * 2 - 2];
3471 if (block_is_intra & 1 || right_is_intra & 1 || mv[0][0] != mv[1][0] || mv[0][1] != mv[1][1]) {
3472 v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3474 idx = ((right_cbp >> 1) | block_cbp) & 5; // FIXME check
3476 v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3479 v->vc1dsp.vc1_h_loop_filter4(dst + 4 * linesize, linesize, v->pq);
3481 v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3487 ttblk = (v->ttblk[s->mb_x - s->mb_stride - 1] >> (block_num * 4)) & 0xf;
3488 if (ttblk == TT_4X4 || ttblk == TT_4X8) {
3489 idx = (block_cbp | (block_cbp >> 1)) & 5;
3491 v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3494 v->vc1dsp.vc1_h_loop_filter4(dst + linesize * 4, linesize, v->pq);
3496 v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3501 static void vc1_apply_p_loop_filter(VC1Context *v)
3503 MpegEncContext *s = &v->s;
3506 for (i = 0; i < 6; i++) {
3507 vc1_apply_p_v_loop_filter(v, i);
3510 /* V always precedes H, therefore we run H one MB before V;
3511 * at the end of a row, we catch up to complete the row */
3513 for (i = 0; i < 6; i++) {
3514 vc1_apply_p_h_loop_filter(v, i);
3516 if (s->mb_x == s->mb_width - 1) {
3518 ff_update_block_index(s);
3519 for (i = 0; i < 6; i++) {
3520 vc1_apply_p_h_loop_filter(v, i);
3526 /** Decode one P-frame MB
3528 static int vc1_decode_p_mb(VC1Context *v)
3530 MpegEncContext *s = &v->s;
3531 GetBitContext *gb = &s->gb;
3533 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3534 int cbp; /* cbp decoding stuff */
3535 int mqdiff, mquant; /* MB quantization */
3536 int ttmb = v->ttfrm; /* MB Transform type */
3538 int mb_has_coeffs = 1; /* last_flag */
3539 int dmv_x, dmv_y; /* Differential MV components */
3540 int index, index1; /* LUT indexes */
3541 int val, sign; /* temp values */
3542 int first_block = 1;
3544 int skipped, fourmv;
3545 int block_cbp = 0, pat, block_tt = 0, block_intra = 0;
3547 mquant = v->pq; /* lossy initialization */
3549 if (v->mv_type_is_raw)
3550 fourmv = get_bits1(gb);
3552 fourmv = v->mv_type_mb_plane[mb_pos];
3554 skipped = get_bits1(gb);
3556 skipped = v->s.mbskip_table[mb_pos];
3558 if (!fourmv) { /* 1MV mode */
3560 GET_MVDATA(dmv_x, dmv_y);
3563 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
3564 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
3566 s->current_picture.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;
3567 vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3569 /* FIXME Set DC val for inter block ? */
3570 if (s->mb_intra && !mb_has_coeffs) {
3572 s->ac_pred = get_bits1(gb);
3574 } else if (mb_has_coeffs) {
3576 s->ac_pred = get_bits1(gb);
3577 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3583 s->current_picture.qscale_table[mb_pos] = mquant;
3585 if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
3586 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table,
3587 VC1_TTMB_VLC_BITS, 2);
3588 if (!s->mb_intra) vc1_mc_1mv(v, 0);
3590 for (i = 0; i < 6; i++) {
3591 s->dc_val[0][s->block_index[i]] = 0;
3593 val = ((cbp >> (5 - i)) & 1);
3594 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3595 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3597 /* check if prediction blocks A and C are available */
3598 v->a_avail = v->c_avail = 0;
3599 if (i == 2 || i == 3 || !s->first_slice_line)
3600 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3601 if (i == 1 || i == 3 || s->mb_x)
3602 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3604 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3605 (i & 4) ? v->codingset2 : v->codingset);
3606 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3608 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3610 for (j = 0; j < 64; j++)
3611 s->block[i][j] <<= 1;
3612 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3613 if (v->pq >= 9 && v->overlap) {
3615 v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3617 v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3619 block_cbp |= 0xF << (i << 2);
3620 block_intra |= 1 << i;
3622 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block,
3623 s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize,
3624 (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
3625 block_cbp |= pat << (i << 2);
3626 if (!v->ttmbf && ttmb < 8)
3633 for (i = 0; i < 6; i++) {
3634 v->mb_type[0][s->block_index[i]] = 0;
3635 s->dc_val[0][s->block_index[i]] = 0;
3637 s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
3638 s->current_picture.qscale_table[mb_pos] = 0;
3639 vc1_pred_mv(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3642 } else { // 4MV mode
3643 if (!skipped /* unskipped MB */) {
3644 int intra_count = 0, coded_inter = 0;
3645 int is_intra[6], is_coded[6];
3647 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3648 for (i = 0; i < 6; i++) {
3649 val = ((cbp >> (5 - i)) & 1);
3650 s->dc_val[0][s->block_index[i]] = 0;
3657 GET_MVDATA(dmv_x, dmv_y);
3659 vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3661 vc1_mc_4mv_luma(v, i, 0, 0);
3662 intra_count += s->mb_intra;
3663 is_intra[i] = s->mb_intra;
3664 is_coded[i] = mb_has_coeffs;
3667 is_intra[i] = (intra_count >= 3);
3671 vc1_mc_4mv_chroma(v, 0);
3672 v->mb_type[0][s->block_index[i]] = is_intra[i];
3674 coded_inter = !is_intra[i] & is_coded[i];
3676 // if there are no coded blocks then don't do anything more
3678 if (!intra_count && !coded_inter)
3681 s->current_picture.qscale_table[mb_pos] = mquant;
3682 /* test if block is intra and has pred */
3685 for (i = 0; i < 6; i++)
3687 if (((!s->first_slice_line || (i == 2 || i == 3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]])
3688 || ((s->mb_x || (i == 1 || i == 3)) && v->mb_type[0][s->block_index[i] - 1])) {
3694 s->ac_pred = get_bits1(gb);
3698 if (!v->ttmbf && coded_inter)
3699 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3700 for (i = 0; i < 6; i++) {
3702 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3703 s->mb_intra = is_intra[i];
3705 /* check if prediction blocks A and C are available */
3706 v->a_avail = v->c_avail = 0;
3707 if (i == 2 || i == 3 || !s->first_slice_line)
3708 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3709 if (i == 1 || i == 3 || s->mb_x)
3710 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3712 vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant,
3713 (i & 4) ? v->codingset2 : v->codingset);
3714 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3716 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3718 for (j = 0; j < 64; j++)
3719 s->block[i][j] <<= 1;
3720 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off,
3721 (i & 4) ? s->uvlinesize : s->linesize);
3722 if (v->pq >= 9 && v->overlap) {
3724 v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3726 v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3728 block_cbp |= 0xF << (i << 2);
3729 block_intra |= 1 << i;
3730 } else if (is_coded[i]) {
3731 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3732 first_block, s->dest[dst_idx] + off,
3733 (i & 4) ? s->uvlinesize : s->linesize,
3734 (i & 4) && (s->flags & CODEC_FLAG_GRAY),
3736 block_cbp |= pat << (i << 2);
3737 if (!v->ttmbf && ttmb < 8)
3742 } else { // skipped MB
3744 s->current_picture.qscale_table[mb_pos] = 0;
3745 for (i = 0; i < 6; i++) {
3746 v->mb_type[0][s->block_index[i]] = 0;
3747 s->dc_val[0][s->block_index[i]] = 0;
3749 for (i = 0; i < 4; i++) {
3750 vc1_pred_mv(v, i, 0, 0, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3751 vc1_mc_4mv_luma(v, i, 0, 0);
3753 vc1_mc_4mv_chroma(v, 0);
3754 s->current_picture.qscale_table[mb_pos] = 0;
3758 v->cbp[s->mb_x] = block_cbp;
3759 v->ttblk[s->mb_x] = block_tt;
3760 v->is_intra[s->mb_x] = block_intra;
3765 /* Decode one macroblock in an interlaced frame p picture */
3767 static int vc1_decode_p_mb_intfr(VC1Context *v)
3769 MpegEncContext *s = &v->s;
3770 GetBitContext *gb = &s->gb;
3772 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3773 int cbp = 0; /* cbp decoding stuff */
3774 int mqdiff, mquant; /* MB quantization */
3775 int ttmb = v->ttfrm; /* MB Transform type */
3777 int mb_has_coeffs = 1; /* last_flag */
3778 int dmv_x, dmv_y; /* Differential MV components */
3779 int val; /* temp value */
3780 int first_block = 1;
3782 int skipped, fourmv = 0, twomv = 0;
3783 int block_cbp = 0, pat, block_tt = 0;
3784 int idx_mbmode = 0, mvbp;
3785 int stride_y, fieldtx;
3787 mquant = v->pq; /* Loosy initialization */
3790 skipped = get_bits1(gb);
3792 skipped = v->s.mbskip_table[mb_pos];
3794 if (v->fourmvswitch)
3795 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_4MV_MBMODE_VLC_BITS, 2); // try getting this done
3797 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2); // in a single line
3798 switch (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0]) {
3799 /* store the motion vector type in a flag (useful later) */
3800 case MV_PMODE_INTFR_4MV:
3802 v->blk_mv_type[s->block_index[0]] = 0;
3803 v->blk_mv_type[s->block_index[1]] = 0;
3804 v->blk_mv_type[s->block_index[2]] = 0;
3805 v->blk_mv_type[s->block_index[3]] = 0;
3807 case MV_PMODE_INTFR_4MV_FIELD:
3809 v->blk_mv_type[s->block_index[0]] = 1;
3810 v->blk_mv_type[s->block_index[1]] = 1;
3811 v->blk_mv_type[s->block_index[2]] = 1;
3812 v->blk_mv_type[s->block_index[3]] = 1;
3814 case MV_PMODE_INTFR_2MV_FIELD:
3816 v->blk_mv_type[s->block_index[0]] = 1;
3817 v->blk_mv_type[s->block_index[1]] = 1;
3818 v->blk_mv_type[s->block_index[2]] = 1;
3819 v->blk_mv_type[s->block_index[3]] = 1;
3821 case MV_PMODE_INTFR_1MV:
3822 v->blk_mv_type[s->block_index[0]] = 0;
3823 v->blk_mv_type[s->block_index[1]] = 0;
3824 v->blk_mv_type[s->block_index[2]] = 0;
3825 v->blk_mv_type[s->block_index[3]] = 0;
3828 if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
3829 for (i = 0; i < 4; i++) {
3830 s->current_picture.motion_val[1][s->block_index[i]][0] = 0;
3831 s->current_picture.motion_val[1][s->block_index[i]][1] = 0;
3833 s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
3834 s->mb_intra = v->is_intra[s->mb_x] = 1;
3835 for (i = 0; i < 6; i++)
3836 v->mb_type[0][s->block_index[i]] = 1;
3837 fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
3838 mb_has_coeffs = get_bits1(gb);
3840 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3841 v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
3843 s->current_picture.qscale_table[mb_pos] = mquant;
3844 /* Set DC scale - y and c use the same (not sure if necessary here) */
3845 s->y_dc_scale = s->y_dc_scale_table[mquant];
3846 s->c_dc_scale = s->c_dc_scale_table[mquant];
3848 for (i = 0; i < 6; i++) {
3849 s->dc_val[0][s->block_index[i]] = 0;
3851 val = ((cbp >> (5 - i)) & 1);
3852 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3853 v->a_avail = v->c_avail = 0;
3854 if (i == 2 || i == 3 || !s->first_slice_line)
3855 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3856 if (i == 1 || i == 3 || s->mb_x)
3857 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3859 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3860 (i & 4) ? v->codingset2 : v->codingset);
3861 if ((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3862 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3864 stride_y = s->linesize << fieldtx;
3865 off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->linesize : (i & 1) * 8 + 4 * (i & 2) * s->linesize;
3867 stride_y = s->uvlinesize;
3870 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, stride_y);
3874 } else { // inter MB
3875 mb_has_coeffs = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][3];
3877 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3878 if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD) {
3879 v->twomvbp = get_vlc2(gb, v->twomvbp_vlc->table, VC1_2MV_BLOCK_PATTERN_VLC_BITS, 1);
3881 if ((ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV)
3882 || (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV_FIELD)) {
3883 v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
3886 s->mb_intra = v->is_intra[s->mb_x] = 0;
3887 for (i = 0; i < 6; i++)
3888 v->mb_type[0][s->block_index[i]] = 0;
3889 fieldtx = v->fieldtx_plane[mb_pos] = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][1];
3890 /* for all motion vector read MVDATA and motion compensate each block */
3894 for (i = 0; i < 6; i++) {
3897 val = ((mvbp >> (3 - i)) & 1);
3899 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3901 vc1_pred_mv_intfr(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0);
3902 vc1_mc_4mv_luma(v, i, 0, 0);
3903 } else if (i == 4) {
3904 vc1_mc_4mv_chroma4(v, 0, 0, 0);
3911 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3913 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], 0);
3914 vc1_mc_4mv_luma(v, 0, 0, 0);
3915 vc1_mc_4mv_luma(v, 1, 0, 0);
3918 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3920 vc1_pred_mv_intfr(v, 2, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], 0);
3921 vc1_mc_4mv_luma(v, 2, 0, 0);
3922 vc1_mc_4mv_luma(v, 3, 0, 0);
3923 vc1_mc_4mv_chroma4(v, 0, 0, 0);
3925 mvbp = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][2];
3928 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3930 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0);
3934 GET_MQUANT(); // p. 227
3935 s->current_picture.qscale_table[mb_pos] = mquant;
3936 if (!v->ttmbf && cbp)
3937 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3938 for (i = 0; i < 6; i++) {
3939 s->dc_val[0][s->block_index[i]] = 0;
3941 val = ((cbp >> (5 - i)) & 1);
3943 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3945 off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->linesize));
3947 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3948 first_block, s->dest[dst_idx] + off,
3949 (i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
3950 (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
3951 block_cbp |= pat << (i << 2);
3952 if (!v->ttmbf && ttmb < 8)
3959 s->mb_intra = v->is_intra[s->mb_x] = 0;
3960 for (i = 0; i < 6; i++) {
3961 v->mb_type[0][s->block_index[i]] = 0;
3962 s->dc_val[0][s->block_index[i]] = 0;
3964 s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
3965 s->current_picture.qscale_table[mb_pos] = 0;
3966 v->blk_mv_type[s->block_index[0]] = 0;
3967 v->blk_mv_type[s->block_index[1]] = 0;
3968 v->blk_mv_type[s->block_index[2]] = 0;
3969 v->blk_mv_type[s->block_index[3]] = 0;
3970 vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0);
3973 if (s->mb_x == s->mb_width - 1)
3974 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0])*s->mb_stride);
3978 static int vc1_decode_p_mb_intfi(VC1Context *v)
3980 MpegEncContext *s = &v->s;
3981 GetBitContext *gb = &s->gb;
3983 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3984 int cbp = 0; /* cbp decoding stuff */
3985 int mqdiff, mquant; /* MB quantization */
3986 int ttmb = v->ttfrm; /* MB Transform type */
3988 int mb_has_coeffs = 1; /* last_flag */
3989 int dmv_x, dmv_y; /* Differential MV components */
3990 int val; /* temp values */
3991 int first_block = 1;
3994 int block_cbp = 0, pat, block_tt = 0;
3997 mquant = v->pq; /* Loosy initialization */
3999 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
4000 if (idx_mbmode <= 1) { // intra MB
4001 s->mb_intra = v->is_intra[s->mb_x] = 1;
4002 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
4003 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
4004 s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4006 s->current_picture.qscale_table[mb_pos] = mquant;
4007 /* Set DC scale - y and c use the same (not sure if necessary here) */
4008 s->y_dc_scale = s->y_dc_scale_table[mquant];
4009 s->c_dc_scale = s->c_dc_scale_table[mquant];
4010 v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4011 mb_has_coeffs = idx_mbmode & 1;
4013 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
4015 for (i = 0; i < 6; i++) {
4016 s->dc_val[0][s->block_index[i]] = 0;
4017 v->mb_type[0][s->block_index[i]] = 1;
4019 val = ((cbp >> (5 - i)) & 1);
4020 v->a_avail = v->c_avail = 0;
4021 if (i == 2 || i == 3 || !s->first_slice_line)
4022 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4023 if (i == 1 || i == 3 || s->mb_x)
4024 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4026 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4027 (i & 4) ? v->codingset2 : v->codingset);
4028 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4030 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4031 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4032 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize);
4033 // TODO: loop filter
4036 s->mb_intra = v->is_intra[s->mb_x] = 0;
4037 s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
4038 for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
4039 if (idx_mbmode <= 5) { // 1-MV
4040 dmv_x = dmv_y = pred_flag = 0;
4041 if (idx_mbmode & 1) {
4042 get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
4044 vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
4046 mb_has_coeffs = !(idx_mbmode & 2);
4048 v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
4049 for (i = 0; i < 6; i++) {
4051 dmv_x = dmv_y = pred_flag = 0;
4052 val = ((v->fourmvbp >> (3 - i)) & 1);
4054 get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
4056 vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
4057 vc1_mc_4mv_luma(v, i, 0, 0);
4059 vc1_mc_4mv_chroma(v, 0);
4061 mb_has_coeffs = idx_mbmode & 1;
4064 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4068 s->current_picture.qscale_table[mb_pos] = mquant;
4069 if (!v->ttmbf && cbp) {
4070 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4073 for (i = 0; i < 6; i++) {
4074 s->dc_val[0][s->block_index[i]] = 0;
4076 val = ((cbp >> (5 - i)) & 1);
4077 off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
4079 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4080 first_block, s->dest[dst_idx] + off,
4081 (i & 4) ? s->uvlinesize : s->linesize,
4082 (i & 4) && (s->flags & CODEC_FLAG_GRAY),
4084 block_cbp |= pat << (i << 2);
4085 if (!v->ttmbf && ttmb < 8) ttmb = -1;
4090 if (s->mb_x == s->mb_width - 1)
4091 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
4095 /** Decode one B-frame MB (in Main profile)
4097 static void vc1_decode_b_mb(VC1Context *v)
4099 MpegEncContext *s = &v->s;
4100 GetBitContext *gb = &s->gb;
4102 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4103 int cbp = 0; /* cbp decoding stuff */
4104 int mqdiff, mquant; /* MB quantization */
4105 int ttmb = v->ttfrm; /* MB Transform type */
4106 int mb_has_coeffs = 0; /* last_flag */
4107 int index, index1; /* LUT indexes */
4108 int val, sign; /* temp values */
4109 int first_block = 1;
4111 int skipped, direct;
4112 int dmv_x[2], dmv_y[2];
4113 int bmvtype = BMV_TYPE_BACKWARD;
4115 mquant = v->pq; /* lossy initialization */
4119 direct = get_bits1(gb);
4121 direct = v->direct_mb_plane[mb_pos];
4123 skipped = get_bits1(gb);
4125 skipped = v->s.mbskip_table[mb_pos];
4127 dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4128 for (i = 0; i < 6; i++) {
4129 v->mb_type[0][s->block_index[i]] = 0;
4130 s->dc_val[0][s->block_index[i]] = 0;
4132 s->current_picture.qscale_table[mb_pos] = 0;
4136 GET_MVDATA(dmv_x[0], dmv_y[0]);
4137 dmv_x[1] = dmv_x[0];
4138 dmv_y[1] = dmv_y[0];
4140 if (skipped || !s->mb_intra) {
4141 bmvtype = decode012(gb);
4144 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
4147 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
4150 bmvtype = BMV_TYPE_INTERPOLATED;
4151 dmv_x[0] = dmv_y[0] = 0;
4155 for (i = 0; i < 6; i++)
4156 v->mb_type[0][s->block_index[i]] = s->mb_intra;
4160 bmvtype = BMV_TYPE_INTERPOLATED;
4161 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4162 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4166 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4169 s->current_picture.qscale_table[mb_pos] = mquant;
4171 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4172 dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
4173 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4174 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4176 if (!mb_has_coeffs && !s->mb_intra) {
4177 /* no coded blocks - effectively skipped */
4178 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4179 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4182 if (s->mb_intra && !mb_has_coeffs) {
4184 s->current_picture.qscale_table[mb_pos] = mquant;
4185 s->ac_pred = get_bits1(gb);
4187 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4189 if (bmvtype == BMV_TYPE_INTERPOLATED) {
4190 GET_MVDATA(dmv_x[0], dmv_y[0]);
4191 if (!mb_has_coeffs) {
4192 /* interpolated skipped block */
4193 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4194 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4198 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4200 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4203 s->ac_pred = get_bits1(gb);
4204 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4206 s->current_picture.qscale_table[mb_pos] = mquant;
4207 if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
4208 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4212 for (i = 0; i < 6; i++) {
4213 s->dc_val[0][s->block_index[i]] = 0;
4215 val = ((cbp >> (5 - i)) & 1);
4216 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4217 v->mb_type[0][s->block_index[i]] = s->mb_intra;
4219 /* check if prediction blocks A and C are available */
4220 v->a_avail = v->c_avail = 0;
4221 if (i == 2 || i == 3 || !s->first_slice_line)
4222 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4223 if (i == 1 || i == 3 || s->mb_x)
4224 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4226 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4227 (i & 4) ? v->codingset2 : v->codingset);
4228 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4230 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4232 for (j = 0; j < 64; j++)
4233 s->block[i][j] <<= 1;
4234 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
4236 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4237 first_block, s->dest[dst_idx] + off,
4238 (i & 4) ? s->uvlinesize : s->linesize,
4239 (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4240 if (!v->ttmbf && ttmb < 8)
4247 /** Decode one B-frame MB (in interlaced field B picture)
4249 static void vc1_decode_b_mb_intfi(VC1Context *v)
4251 MpegEncContext *s = &v->s;
4252 GetBitContext *gb = &s->gb;
4254 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4255 int cbp = 0; /* cbp decoding stuff */
4256 int mqdiff, mquant; /* MB quantization */
4257 int ttmb = v->ttfrm; /* MB Transform type */
4258 int mb_has_coeffs = 0; /* last_flag */
4259 int val; /* temp value */
4260 int first_block = 1;
4263 int dmv_x[2], dmv_y[2], pred_flag[2];
4264 int bmvtype = BMV_TYPE_BACKWARD;
4265 int idx_mbmode, interpmvp;
4267 mquant = v->pq; /* Loosy initialization */
4270 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
4271 if (idx_mbmode <= 1) { // intra MB
4272 s->mb_intra = v->is_intra[s->mb_x] = 1;
4273 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
4274 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
4275 s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4277 s->current_picture.qscale_table[mb_pos] = mquant;
4278 /* Set DC scale - y and c use the same (not sure if necessary here) */
4279 s->y_dc_scale = s->y_dc_scale_table[mquant];
4280 s->c_dc_scale = s->c_dc_scale_table[mquant];
4281 v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4282 mb_has_coeffs = idx_mbmode & 1;
4284 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
4286 for (i = 0; i < 6; i++) {
4287 s->dc_val[0][s->block_index[i]] = 0;
4289 val = ((cbp >> (5 - i)) & 1);
4290 v->mb_type[0][s->block_index[i]] = s->mb_intra;
4291 v->a_avail = v->c_avail = 0;
4292 if (i == 2 || i == 3 || !s->first_slice_line)
4293 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4294 if (i == 1 || i == 3 || s->mb_x)
4295 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4297 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4298 (i & 4) ? v->codingset2 : v->codingset);
4299 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4301 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4303 for (j = 0; j < 64; j++)
4304 s->block[i][j] <<= 1;
4305 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4306 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize);
4307 // TODO: yet to perform loop filter
4310 s->mb_intra = v->is_intra[s->mb_x] = 0;
4311 s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
4312 for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
4314 fwd = v->forward_mb_plane[mb_pos] = get_bits1(gb);
4316 fwd = v->forward_mb_plane[mb_pos];
4317 if (idx_mbmode <= 5) { // 1-MV
4318 dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4319 pred_flag[0] = pred_flag[1] = 0;
4321 bmvtype = BMV_TYPE_FORWARD;
4323 bmvtype = decode012(gb);
4326 bmvtype = BMV_TYPE_BACKWARD;
4329 bmvtype = BMV_TYPE_DIRECT;
4332 bmvtype = BMV_TYPE_INTERPOLATED;
4333 interpmvp = get_bits1(gb);
4336 v->bmvtype = bmvtype;
4337 if (bmvtype != BMV_TYPE_DIRECT && idx_mbmode & 1) {
4338 get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD], &dmv_y[bmvtype == BMV_TYPE_BACKWARD], &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
4340 if (bmvtype == BMV_TYPE_INTERPOLATED && interpmvp) {
4341 get_mvdata_interlaced(v, &dmv_x[1], &dmv_y[1], &pred_flag[1]);
4343 if (bmvtype == BMV_TYPE_DIRECT) {
4344 dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4345 dmv_x[1] = dmv_y[1] = pred_flag[0] = 0;
4347 vc1_pred_b_mv_intfi(v, 0, dmv_x, dmv_y, 1, pred_flag);
4348 vc1_b_mc(v, dmv_x, dmv_y, (bmvtype == BMV_TYPE_DIRECT), bmvtype);
4349 mb_has_coeffs = !(idx_mbmode & 2);
4352 bmvtype = BMV_TYPE_FORWARD;
4353 v->bmvtype = bmvtype;
4354 v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
4355 for (i = 0; i < 6; i++) {
4357 dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4358 dmv_x[1] = dmv_y[1] = pred_flag[1] = 0;
4359 val = ((v->fourmvbp >> (3 - i)) & 1);
4361 get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD],
4362 &dmv_y[bmvtype == BMV_TYPE_BACKWARD],
4363 &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
4365 vc1_pred_b_mv_intfi(v, i, dmv_x, dmv_y, 0, pred_flag);
4366 vc1_mc_4mv_luma(v, i, bmvtype == BMV_TYPE_BACKWARD, 0);
4368 vc1_mc_4mv_chroma(v, bmvtype == BMV_TYPE_BACKWARD);
4370 mb_has_coeffs = idx_mbmode & 1;
4373 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4377 s->current_picture.qscale_table[mb_pos] = mquant;
4378 if (!v->ttmbf && cbp) {
4379 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4382 for (i = 0; i < 6; i++) {
4383 s->dc_val[0][s->block_index[i]] = 0;
4385 val = ((cbp >> (5 - i)) & 1);
4386 off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
4388 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4389 first_block, s->dest[dst_idx] + off,
4390 (i & 4) ? s->uvlinesize : s->linesize,
4391 (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4392 if (!v->ttmbf && ttmb < 8)
4400 /** Decode one B-frame MB (in interlaced frame B picture)
4402 static int vc1_decode_b_mb_intfr(VC1Context *v)
4404 MpegEncContext *s = &v->s;
4405 GetBitContext *gb = &s->gb;
4407 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4408 int cbp = 0; /* cbp decoding stuff */
4409 int mqdiff, mquant; /* MB quantization */
4410 int ttmb = v->ttfrm; /* MB Transform type */
4411 int mvsw = 0; /* motion vector switch */
4412 int mb_has_coeffs = 1; /* last_flag */
4413 int dmv_x, dmv_y; /* Differential MV components */
4414 int val; /* temp value */
4415 int first_block = 1;
4417 int skipped, direct, twomv = 0;
4418 int block_cbp = 0, pat, block_tt = 0;
4419 int idx_mbmode = 0, mvbp;
4420 int stride_y, fieldtx;
4421 int bmvtype = BMV_TYPE_BACKWARD;
4424 mquant = v->pq; /* Lossy initialization */
4427 skipped = get_bits1(gb);
4429 skipped = v->s.mbskip_table[mb_pos];
4432 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2);
4433 if (ff_vc1_mbmode_intfrp[0][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD) {
4435 v->blk_mv_type[s->block_index[0]] = 1;
4436 v->blk_mv_type[s->block_index[1]] = 1;
4437 v->blk_mv_type[s->block_index[2]] = 1;
4438 v->blk_mv_type[s->block_index[3]] = 1;
4440 v->blk_mv_type[s->block_index[0]] = 0;
4441 v->blk_mv_type[s->block_index[1]] = 0;
4442 v->blk_mv_type[s->block_index[2]] = 0;
4443 v->blk_mv_type[s->block_index[3]] = 0;
4448 direct = get_bits1(gb);
4450 direct = v->direct_mb_plane[mb_pos];
4453 s->mv[0][0][0] = s->current_picture.motion_val[0][s->block_index[0]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][0], v->bfraction, 0, s->quarter_sample);
4454 s->mv[0][0][1] = s->current_picture.motion_val[0][s->block_index[0]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][1], v->bfraction, 0, s->quarter_sample);
4455 s->mv[1][0][0] = s->current_picture.motion_val[1][s->block_index[0]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][0], v->bfraction, 1, s->quarter_sample);
4456 s->mv[1][0][1] = s->current_picture.motion_val[1][s->block_index[0]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][1], v->bfraction, 1, s->quarter_sample);
4459 s->mv[0][2][0] = s->current_picture.motion_val[0][s->block_index[2]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][0], v->bfraction, 0, s->quarter_sample);
4460 s->mv[0][2][1] = s->current_picture.motion_val[0][s->block_index[2]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][1], v->bfraction, 0, s->quarter_sample);
4461 s->mv[1][2][0] = s->current_picture.motion_val[1][s->block_index[2]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][0], v->bfraction, 1, s->quarter_sample);
4462 s->mv[1][2][1] = s->current_picture.motion_val[1][s->block_index[2]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][1], v->bfraction, 1, s->quarter_sample);
4464 for (i = 1; i < 4; i += 2) {
4465 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = s->mv[0][i-1][0];
4466 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = s->mv[0][i-1][1];
4467 s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = s->mv[1][i-1][0];
4468 s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = s->mv[1][i-1][1];
4471 for (i = 1; i < 4; i++) {
4472 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = s->mv[0][0][0];
4473 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = s->mv[0][0][1];
4474 s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = s->mv[1][0][0];
4475 s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = s->mv[1][0][1];
4480 if (ff_vc1_mbmode_intfrp[0][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
4481 for (i = 0; i < 4; i++) {
4482 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = 0;
4483 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = 0;
4484 s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = 0;
4485 s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = 0;
4487 s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
4488 s->mb_intra = v->is_intra[s->mb_x] = 1;
4489 for (i = 0; i < 6; i++)
4490 v->mb_type[0][s->block_index[i]] = 1;
4491 fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
4492 mb_has_coeffs = get_bits1(gb);
4494 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4495 v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4497 s->current_picture.qscale_table[mb_pos] = mquant;
4498 /* Set DC scale - y and c use the same (not sure if necessary here) */
4499 s->y_dc_scale = s->y_dc_scale_table[mquant];
4500 s->c_dc_scale = s->c_dc_scale_table[mquant];
4502 for (i = 0; i < 6; i++) {
4503 s->dc_val[0][s->block_index[i]] = 0;
4505 val = ((cbp >> (5 - i)) & 1);
4506 v->mb_type[0][s->block_index[i]] = s->mb_intra;
4507 v->a_avail = v->c_avail = 0;
4508 if (i == 2 || i == 3 || !s->first_slice_line)
4509 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4510 if (i == 1 || i == 3 || s->mb_x)
4511 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4513 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4514 (i & 4) ? v->codingset2 : v->codingset);
4515 if (i > 3 && (s->flags & CODEC_FLAG_GRAY))
4517 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4519 stride_y = s->linesize << fieldtx;
4520 off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->linesize : (i & 1) * 8 + 4 * (i & 2) * s->linesize;
4522 stride_y = s->uvlinesize;
4525 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, stride_y);
4528 s->mb_intra = v->is_intra[s->mb_x] = 0;
4530 if (skipped || !s->mb_intra) {
4531 bmvtype = decode012(gb);
4534 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
4537 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
4540 bmvtype = BMV_TYPE_INTERPOLATED;
4544 if (twomv && bmvtype != BMV_TYPE_INTERPOLATED)
4545 mvsw = get_bits1(gb);
4548 if (!skipped) { // inter MB
4549 mb_has_coeffs = ff_vc1_mbmode_intfrp[0][idx_mbmode][3];
4551 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4553 if (bmvtype == BMV_TYPE_INTERPOLATED & twomv) {
4554 v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
4555 } else if (bmvtype == BMV_TYPE_INTERPOLATED | twomv) {
4556 v->twomvbp = get_vlc2(gb, v->twomvbp_vlc->table, VC1_2MV_BLOCK_PATTERN_VLC_BITS, 1);
4560 for (i = 0; i < 6; i++)
4561 v->mb_type[0][s->block_index[i]] = 0;
4562 fieldtx = v->fieldtx_plane[mb_pos] = ff_vc1_mbmode_intfrp[0][idx_mbmode][1];
4563 /* for all motion vector read MVDATA and motion compensate each block */
4567 for (i = 0; i < 4; i++) {
4568 vc1_mc_4mv_luma(v, i, 0, 0);
4569 vc1_mc_4mv_luma(v, i, 1, 1);
4571 vc1_mc_4mv_chroma4(v, 0, 0, 0);
4572 vc1_mc_4mv_chroma4(v, 1, 1, 1);
4577 } else if (twomv && bmvtype == BMV_TYPE_INTERPOLATED) {
4579 for (i = 0; i < 4; i++) {
4582 val = ((mvbp >> (3 - i)) & 1);
4584 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4586 vc1_pred_mv_intfr(v, j, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir);
4587 vc1_mc_4mv_luma(v, j, dir, dir);
4588 vc1_mc_4mv_luma(v, j+1, dir, dir);
4591 vc1_mc_4mv_chroma4(v, 0, 0, 0);
4592 vc1_mc_4mv_chroma4(v, 1, 1, 1);
4593 } else if (bmvtype == BMV_TYPE_INTERPOLATED) {
4597 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4599 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0);
4604 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4606 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 1);
4609 dir = bmvtype == BMV_TYPE_BACKWARD;
4616 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4617 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir);
4621 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4622 vc1_pred_mv_intfr(v, 2, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir2);
4625 for (i = 0; i < 2; i++) {
4626 s->mv[dir][i+2][0] = s->mv[dir][i][0] = s->current_picture.motion_val[dir][s->block_index[i+2]][0] = s->current_picture.motion_val[dir][s->block_index[i]][0];
4627 s->mv[dir][i+2][1] = s->mv[dir][i][1] = s->current_picture.motion_val[dir][s->block_index[i+2]][1] = s->current_picture.motion_val[dir][s->block_index[i]][1];
4628 s->mv[dir2][i+2][0] = s->mv[dir2][i][0] = s->current_picture.motion_val[dir2][s->block_index[i]][0] = s->current_picture.motion_val[dir2][s->block_index[i+2]][0];
4629 s->mv[dir2][i+2][1] = s->mv[dir2][i][1] = s->current_picture.motion_val[dir2][s->block_index[i]][1] = s->current_picture.motion_val[dir2][s->block_index[i+2]][1];
4632 vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, v->mb_type[0], !dir);
4633 vc1_pred_mv_intfr(v, 2, 0, 0, 2, v->range_x, v->range_y, v->mb_type[0], !dir);
4636 vc1_mc_4mv_luma(v, 0, dir, 0);
4637 vc1_mc_4mv_luma(v, 1, dir, 0);
4638 vc1_mc_4mv_luma(v, 2, dir2, 0);
4639 vc1_mc_4mv_luma(v, 3, dir2, 0);
4640 vc1_mc_4mv_chroma4(v, dir, dir2, 0);
4642 dir = bmvtype == BMV_TYPE_BACKWARD;
4644 mvbp = ff_vc1_mbmode_intfrp[0][idx_mbmode][2];
4647 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4649 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], dir);
4650 v->blk_mv_type[s->block_index[0]] = 1;
4651 v->blk_mv_type[s->block_index[1]] = 1;
4652 v->blk_mv_type[s->block_index[2]] = 1;
4653 v->blk_mv_type[s->block_index[3]] = 1;
4654 vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, 0, !dir);
4655 for (i = 0; i < 2; i++) {
4656 s->mv[!dir][i+2][0] = s->mv[!dir][i][0] = s->current_picture.motion_val[!dir][s->block_index[i+2]][0] = s->current_picture.motion_val[!dir][s->block_index[i]][0];
4657 s->mv[!dir][i+2][1] = s->mv[!dir][i][1] = s->current_picture.motion_val[!dir][s->block_index[i+2]][1] = s->current_picture.motion_val[!dir][s->block_index[i]][1];
4663 GET_MQUANT(); // p. 227
4664 s->current_picture.qscale_table[mb_pos] = mquant;
4665 if (!v->ttmbf && cbp)
4666 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4667 for (i = 0; i < 6; i++) {
4668 s->dc_val[0][s->block_index[i]] = 0;
4670 val = ((cbp >> (5 - i)) & 1);
4672 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4674 off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->linesize));
4676 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4677 first_block, s->dest[dst_idx] + off,
4678 (i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
4679 (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
4680 block_cbp |= pat << (i << 2);
4681 if (!v->ttmbf && ttmb < 8)
4689 for (i = 0; i < 6; i++) {
4690 v->mb_type[0][s->block_index[i]] = 0;
4691 s->dc_val[0][s->block_index[i]] = 0;
4693 s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
4694 s->current_picture.qscale_table[mb_pos] = 0;
4695 v->blk_mv_type[s->block_index[0]] = 0;
4696 v->blk_mv_type[s->block_index[1]] = 0;
4697 v->blk_mv_type[s->block_index[2]] = 0;
4698 v->blk_mv_type[s->block_index[3]] = 0;
4701 if (bmvtype == BMV_TYPE_INTERPOLATED) {
4702 vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0);
4703 vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 1);
4705 dir = bmvtype == BMV_TYPE_BACKWARD;
4706 vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], dir);
4711 for (i = 0; i < 2; i++) {
4712 s->mv[dir][i+2][0] = s->mv[dir][i][0] = s->current_picture.motion_val[dir][s->block_index[i+2]][0] = s->current_picture.motion_val[dir][s->block_index[i]][0];
4713 s->mv[dir][i+2][1] = s->mv[dir][i][1] = s->current_picture.motion_val[dir][s->block_index[i+2]][1] = s->current_picture.motion_val[dir][s->block_index[i]][1];
4714 s->mv[dir2][i+2][0] = s->mv[dir2][i][0] = s->current_picture.motion_val[dir2][s->block_index[i]][0] = s->current_picture.motion_val[dir2][s->block_index[i+2]][0];
4715 s->mv[dir2][i+2][1] = s->mv[dir2][i][1] = s->current_picture.motion_val[dir2][s->block_index[i]][1] = s->current_picture.motion_val[dir2][s->block_index[i+2]][1];
4718 v->blk_mv_type[s->block_index[0]] = 1;
4719 v->blk_mv_type[s->block_index[1]] = 1;
4720 v->blk_mv_type[s->block_index[2]] = 1;
4721 v->blk_mv_type[s->block_index[3]] = 1;
4722 vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, 0, !dir);
4723 for (i = 0; i < 2; i++) {
4724 s->mv[!dir][i+2][0] = s->mv[!dir][i][0] = s->current_picture.motion_val[!dir][s->block_index[i+2]][0] = s->current_picture.motion_val[!dir][s->block_index[i]][0];
4725 s->mv[!dir][i+2][1] = s->mv[!dir][i][1] = s->current_picture.motion_val[!dir][s->block_index[i+2]][1] = s->current_picture.motion_val[!dir][s->block_index[i]][1];
4732 if (direct || bmvtype == BMV_TYPE_INTERPOLATED) {
4737 if (s->mb_x == s->mb_width - 1)
4738 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
4739 v->cbp[s->mb_x] = block_cbp;
4740 v->ttblk[s->mb_x] = block_tt;
4744 /** Decode blocks of I-frame
4746 static void vc1_decode_i_blocks(VC1Context *v)
4749 MpegEncContext *s = &v->s;
4754 /* select codingmode used for VLC tables selection */
4755 switch (v->y_ac_table_index) {
4757 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
4760 v->codingset = CS_HIGH_MOT_INTRA;
4763 v->codingset = CS_MID_RATE_INTRA;
4767 switch (v->c_ac_table_index) {
4769 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
4772 v->codingset2 = CS_HIGH_MOT_INTER;
4775 v->codingset2 = CS_MID_RATE_INTER;
4779 /* Set DC scale - y and c use the same */
4780 s->y_dc_scale = s->y_dc_scale_table[v->pq];
4781 s->c_dc_scale = s->c_dc_scale_table[v->pq];
4784 s->mb_x = s->mb_y = 0;
4786 s->first_slice_line = 1;
4787 for (s->mb_y = 0; s->mb_y < s->end_mb_y; s->mb_y++) {
4789 init_block_index(v);
4790 for (; s->mb_x < v->end_mb_x; s->mb_x++) {
4792 ff_update_block_index(s);
4793 dst[0] = s->dest[0];
4794 dst[1] = dst[0] + 8;
4795 dst[2] = s->dest[0] + s->linesize * 8;
4796 dst[3] = dst[2] + 8;
4797 dst[4] = s->dest[1];
4798 dst[5] = s->dest[2];
4799 s->dsp.clear_blocks(s->block[0]);
4800 mb_pos = s->mb_x + s->mb_y * s->mb_width;
4801 s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
4802 s->current_picture.qscale_table[mb_pos] = v->pq;
4803 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
4804 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
4806 // do actual MB decoding and displaying
4807 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
4808 v->s.ac_pred = get_bits1(&v->s.gb);
4810 for (k = 0; k < 6; k++) {
4811 val = ((cbp >> (5 - k)) & 1);
4814 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
4818 cbp |= val << (5 - k);
4820 vc1_decode_i_block(v, s->block[k], k, val, (k < 4) ? v->codingset : v->codingset2);
4822 if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
4824 v->vc1dsp.vc1_inv_trans_8x8(s->block[k]);
4825 if (v->pq >= 9 && v->overlap) {
4827 for (j = 0; j < 64; j++)
4828 s->block[k][j] <<= 1;
4829 s->dsp.put_signed_pixels_clamped(s->block[k], dst[k], k & 4 ? s->uvlinesize : s->linesize);
4832 for (j = 0; j < 64; j++)
4833 s->block[k][j] = (s->block[k][j] - 64) << 1;
4834 s->dsp.put_pixels_clamped(s->block[k], dst[k], k & 4 ? s->uvlinesize : s->linesize);
4838 if (v->pq >= 9 && v->overlap) {
4840 v->vc1dsp.vc1_h_overlap(s->dest[0], s->linesize);
4841 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
4842 if (!(s->flags & CODEC_FLAG_GRAY)) {
4843 v->vc1dsp.vc1_h_overlap(s->dest[1], s->uvlinesize);
4844 v->vc1dsp.vc1_h_overlap(s->dest[2], s->uvlinesize);
4847 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize);
4848 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
4849 if (!s->first_slice_line) {
4850 v->vc1dsp.vc1_v_overlap(s->dest[0], s->linesize);
4851 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize);
4852 if (!(s->flags & CODEC_FLAG_GRAY)) {
4853 v->vc1dsp.vc1_v_overlap(s->dest[1], s->uvlinesize);
4854 v->vc1dsp.vc1_v_overlap(s->dest[2], s->uvlinesize);
4857 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
4858 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
4860 if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
4862 if (get_bits_count(&s->gb) > v->bits) {
4863 ff_er_add_slice(&s->er, 0, 0, s->mb_x, s->mb_y, ER_MB_ERROR);
4864 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
4865 get_bits_count(&s->gb), v->bits);
4869 if (!v->s.loop_filter)
4870 ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
4872 ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
4874 s->first_slice_line = 0;
4876 if (v->s.loop_filter)
4877 ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
4879 /* This is intentionally mb_height and not end_mb_y - unlike in advanced
4880 * profile, these only differ are when decoding MSS2 rectangles. */
4881 ff_er_add_slice(&s->er, 0, 0, s->mb_width - 1, s->mb_height - 1, ER_MB_END);
4884 /** Decode blocks of I-frame for advanced profile
4886 static void vc1_decode_i_blocks_adv(VC1Context *v)
4889 MpegEncContext *s = &v->s;
4895 GetBitContext *gb = &s->gb;
4897 /* select codingmode used for VLC tables selection */
4898 switch (v->y_ac_table_index) {
4900 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
4903 v->codingset = CS_HIGH_MOT_INTRA;
4906 v->codingset = CS_MID_RATE_INTRA;
4910 switch (v->c_ac_table_index) {
4912 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
4915 v->codingset2 = CS_HIGH_MOT_INTER;
4918 v->codingset2 = CS_MID_RATE_INTER;
4923 s->mb_x = s->mb_y = 0;
4925 s->first_slice_line = 1;
4926 s->mb_y = s->start_mb_y;
4927 if (s->start_mb_y) {
4929 init_block_index(v);
4930 memset(&s->coded_block[s->block_index[0] - s->b8_stride], 0,
4931 (1 + s->b8_stride) * sizeof(*s->coded_block));
4933 for (; s->mb_y < s->end_mb_y; s->mb_y++) {
4935 init_block_index(v);
4936 for (;s->mb_x < s->mb_width; s->mb_x++) {
4937 int16_t (*block)[64] = v->block[v->cur_blk_idx];
4938 ff_update_block_index(s);
4939 s->dsp.clear_blocks(block[0]);
4940 mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4941 s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4942 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
4943 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
4945 // do actual MB decoding and displaying
4946 if (v->fieldtx_is_raw)
4947 v->fieldtx_plane[mb_pos] = get_bits1(&v->s.gb);
4948 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
4949 if ( v->acpred_is_raw)
4950 v->s.ac_pred = get_bits1(&v->s.gb);
4952 v->s.ac_pred = v->acpred_plane[mb_pos];
4954 if (v->condover == CONDOVER_SELECT && v->overflg_is_raw)
4955 v->over_flags_plane[mb_pos] = get_bits1(&v->s.gb);
4959 s->current_picture.qscale_table[mb_pos] = mquant;
4960 /* Set DC scale - y and c use the same */
4961 s->y_dc_scale = s->y_dc_scale_table[mquant];
4962 s->c_dc_scale = s->c_dc_scale_table[mquant];
4964 for (k = 0; k < 6; k++) {
4965 val = ((cbp >> (5 - k)) & 1);
4968 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
4972 cbp |= val << (5 - k);
4974 v->a_avail = !s->first_slice_line || (k == 2 || k == 3);
4975 v->c_avail = !!s->mb_x || (k == 1 || k == 3);
4977 vc1_decode_i_block_adv(v, block[k], k, val,
4978 (k < 4) ? v->codingset : v->codingset2, mquant);
4980 if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
4982 v->vc1dsp.vc1_inv_trans_8x8(block[k]);
4985 vc1_smooth_overlap_filter_iblk(v);
4986 vc1_put_signed_blocks_clamped(v);
4987 if (v->s.loop_filter) vc1_loop_filter_iblk_delayed(v, v->pq);
4989 if (get_bits_count(&s->gb) > v->bits) {
4990 // TODO: may need modification to handle slice coding
4991 ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
4992 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
4993 get_bits_count(&s->gb), v->bits);
4997 if (!v->s.loop_filter)
4998 ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
5000 ff_mpeg_draw_horiz_band(s, (s->mb_y-1) * 16, 16);
5001 s->first_slice_line = 0;
5004 /* raw bottom MB row */
5006 init_block_index(v);
5008 for (;s->mb_x < s->mb_width; s->mb_x++) {
5009 ff_update_block_index(s);
5010 vc1_put_signed_blocks_clamped(v);
5011 if (v->s.loop_filter)
5012 vc1_loop_filter_iblk_delayed(v, v->pq);
5014 if (v->s.loop_filter)
5015 ff_mpeg_draw_horiz_band(s, (s->end_mb_y-1)*16, 16);
5016 ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
5017 (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
5020 static void vc1_decode_p_blocks(VC1Context *v)
5022 MpegEncContext *s = &v->s;
5023 int apply_loop_filter;
5025 /* select codingmode used for VLC tables selection */
5026 switch (v->c_ac_table_index) {
5028 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
5031 v->codingset = CS_HIGH_MOT_INTRA;
5034 v->codingset = CS_MID_RATE_INTRA;
5038 switch (v->c_ac_table_index) {
5040 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
5043 v->codingset2 = CS_HIGH_MOT_INTER;
5046 v->codingset2 = CS_MID_RATE_INTER;
5050 apply_loop_filter = s->loop_filter && !(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY) &&
5051 v->fcm == PROGRESSIVE;
5052 s->first_slice_line = 1;
5053 memset(v->cbp_base, 0, sizeof(v->cbp_base[0])*2*s->mb_stride);
5054 for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
5056 init_block_index(v);
5057 for (; s->mb_x < s->mb_width; s->mb_x++) {
5058 ff_update_block_index(s);
5060 if (v->fcm == ILACE_FIELD)
5061 vc1_decode_p_mb_intfi(v);
5062 else if (v->fcm == ILACE_FRAME)
5063 vc1_decode_p_mb_intfr(v);
5064 else vc1_decode_p_mb(v);
5065 if (s->mb_y != s->start_mb_y && apply_loop_filter)
5066 vc1_apply_p_loop_filter(v);
5067 if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
5068 // TODO: may need modification to handle slice coding
5069 ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
5070 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
5071 get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
5075 memmove(v->cbp_base, v->cbp, sizeof(v->cbp_base[0]) * s->mb_stride);
5076 memmove(v->ttblk_base, v->ttblk, sizeof(v->ttblk_base[0]) * s->mb_stride);
5077 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
5078 memmove(v->luma_mv_base, v->luma_mv, sizeof(v->luma_mv_base[0]) * s->mb_stride);
5079 if (s->mb_y != s->start_mb_y) ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
5080 s->first_slice_line = 0;
5082 if (apply_loop_filter) {
5084 init_block_index(v);
5085 for (; s->mb_x < s->mb_width; s->mb_x++) {
5086 ff_update_block_index(s);
5087 vc1_apply_p_loop_filter(v);
5090 if (s->end_mb_y >= s->start_mb_y)
5091 ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
5092 ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
5093 (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
5096 static void vc1_decode_b_blocks(VC1Context *v)
5098 MpegEncContext *s = &v->s;
5100 /* select codingmode used for VLC tables selection */
5101 switch (v->c_ac_table_index) {
5103 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
5106 v->codingset = CS_HIGH_MOT_INTRA;
5109 v->codingset = CS_MID_RATE_INTRA;
5113 switch (v->c_ac_table_index) {
5115 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
5118 v->codingset2 = CS_HIGH_MOT_INTER;
5121 v->codingset2 = CS_MID_RATE_INTER;
5125 s->first_slice_line = 1;
5126 for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
5128 init_block_index(v);
5129 for (; s->mb_x < s->mb_width; s->mb_x++) {
5130 ff_update_block_index(s);
5132 if (v->fcm == ILACE_FIELD)
5133 vc1_decode_b_mb_intfi(v);
5134 else if (v->fcm == ILACE_FRAME)
5135 vc1_decode_b_mb_intfr(v);
5138 if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
5139 // TODO: may need modification to handle slice coding
5140 ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
5141 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
5142 get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
5145 if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
5147 if (!v->s.loop_filter)
5148 ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
5150 ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
5151 s->first_slice_line = 0;
5153 if (v->s.loop_filter)
5154 ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
5155 ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
5156 (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
5159 static void vc1_decode_skip_blocks(VC1Context *v)
5161 MpegEncContext *s = &v->s;
5163 if (!v->s.last_picture.f->data[0])
5166 ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_width - 1, s->end_mb_y - 1, ER_MB_END);
5167 s->first_slice_line = 1;
5168 for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
5170 init_block_index(v);
5171 ff_update_block_index(s);
5172 memcpy(s->dest[0], s->last_picture.f->data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16);
5173 memcpy(s->dest[1], s->last_picture.f->data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
5174 memcpy(s->dest[2], s->last_picture.f->data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
5175 ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
5176 s->first_slice_line = 0;
5178 s->pict_type = AV_PICTURE_TYPE_P;
5181 void ff_vc1_decode_blocks(VC1Context *v)
5184 v->s.esc3_level_length = 0;
5186 ff_intrax8_decode_picture(&v->x8, 2*v->pq + v->halfpq, v->pq * !v->pquantizer);
5189 v->left_blk_idx = -1;
5190 v->topleft_blk_idx = 1;
5192 switch (v->s.pict_type) {
5193 case AV_PICTURE_TYPE_I:
5194 if (v->profile == PROFILE_ADVANCED)
5195 vc1_decode_i_blocks_adv(v);
5197 vc1_decode_i_blocks(v);
5199 case AV_PICTURE_TYPE_P:
5200 if (v->p_frame_skipped)
5201 vc1_decode_skip_blocks(v);
5203 vc1_decode_p_blocks(v);
5205 case AV_PICTURE_TYPE_B:
5207 if (v->profile == PROFILE_ADVANCED)
5208 vc1_decode_i_blocks_adv(v);
5210 vc1_decode_i_blocks(v);
5212 vc1_decode_b_blocks(v);
5218 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
5222 * Transform coefficients for both sprites in 16.16 fixed point format,
5223 * in the order they appear in the bitstream:
5225 * rotation 1 (unused)
5227 * rotation 2 (unused)
5234 int effect_type, effect_flag;
5235 int effect_pcount1, effect_pcount2; ///< amount of effect parameters stored in effect_params
5236 int effect_params1[15], effect_params2[10]; ///< effect parameters in 16.16 fixed point format
5239 static inline int get_fp_val(GetBitContext* gb)
5241 return (get_bits_long(gb, 30) - (1 << 29)) << 1;
5244 static void vc1_sprite_parse_transform(GetBitContext* gb, int c[7])
5248 switch (get_bits(gb, 2)) {
5251 c[2] = get_fp_val(gb);
5255 c[0] = c[4] = get_fp_val(gb);
5256 c[2] = get_fp_val(gb);
5259 c[0] = get_fp_val(gb);
5260 c[2] = get_fp_val(gb);
5261 c[4] = get_fp_val(gb);
5264 c[0] = get_fp_val(gb);
5265 c[1] = get_fp_val(gb);
5266 c[2] = get_fp_val(gb);
5267 c[3] = get_fp_val(gb);
5268 c[4] = get_fp_val(gb);
5271 c[5] = get_fp_val(gb);
5273 c[6] = get_fp_val(gb);
5278 static void vc1_parse_sprites(VC1Context *v, GetBitContext* gb, SpriteData* sd)
5280 AVCodecContext *avctx = v->s.avctx;
5283 for (sprite = 0; sprite <= v->two_sprites; sprite++) {
5284 vc1_sprite_parse_transform(gb, sd->coefs[sprite]);
5285 if (sd->coefs[sprite][1] || sd->coefs[sprite][3])
5286 avpriv_request_sample(avctx, "Non-zero rotation coefficients");
5287 av_log(avctx, AV_LOG_DEBUG, sprite ? "S2:" : "S1:");
5288 for (i = 0; i < 7; i++)
5289 av_log(avctx, AV_LOG_DEBUG, " %d.%.3d",
5290 sd->coefs[sprite][i] / (1<<16),
5291 (abs(sd->coefs[sprite][i]) & 0xFFFF) * 1000 / (1 << 16));
5292 av_log(avctx, AV_LOG_DEBUG, "\n");
5296 if (sd->effect_type = get_bits_long(gb, 30)) {
5297 switch (sd->effect_pcount1 = get_bits(gb, 4)) {
5299 vc1_sprite_parse_transform(gb, sd->effect_params1);
5302 vc1_sprite_parse_transform(gb, sd->effect_params1);
5303 vc1_sprite_parse_transform(gb, sd->effect_params1 + 7);
5306 for (i = 0; i < sd->effect_pcount1; i++)
5307 sd->effect_params1[i] = get_fp_val(gb);
5309 if (sd->effect_type != 13 || sd->effect_params1[0] != sd->coefs[0][6]) {
5310 // effect 13 is simple alpha blending and matches the opacity above
5311 av_log(avctx, AV_LOG_DEBUG, "Effect: %d; params: ", sd->effect_type);
5312 for (i = 0; i < sd->effect_pcount1; i++)
5313 av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
5314 sd->effect_params1[i] / (1 << 16),
5315 (abs(sd->effect_params1[i]) & 0xFFFF) * 1000 / (1 << 16));
5316 av_log(avctx, AV_LOG_DEBUG, "\n");
5319 sd->effect_pcount2 = get_bits(gb, 16);
5320 if (sd->effect_pcount2 > 10) {
5321 av_log(avctx, AV_LOG_ERROR, "Too many effect parameters\n");
5323 } else if (sd->effect_pcount2) {
5325 av_log(avctx, AV_LOG_DEBUG, "Effect params 2: ");
5326 while (++i < sd->effect_pcount2) {
5327 sd->effect_params2[i] = get_fp_val(gb);
5328 av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
5329 sd->effect_params2[i] / (1 << 16),
5330 (abs(sd->effect_params2[i]) & 0xFFFF) * 1000 / (1 << 16));
5332 av_log(avctx, AV_LOG_DEBUG, "\n");
5335 if (sd->effect_flag = get_bits1(gb))
5336 av_log(avctx, AV_LOG_DEBUG, "Effect flag set\n");
5338 if (get_bits_count(gb) >= gb->size_in_bits +
5339 (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE ? 64 : 0))
5340 av_log(avctx, AV_LOG_ERROR, "Buffer overrun\n");
5341 if (get_bits_count(gb) < gb->size_in_bits - 8)
5342 av_log(avctx, AV_LOG_WARNING, "Buffer not fully read\n");
5345 static void vc1_draw_sprites(VC1Context *v, SpriteData* sd)
5347 int i, plane, row, sprite;
5348 int sr_cache[2][2] = { { -1, -1 }, { -1, -1 } };
5349 uint8_t* src_h[2][2];
5350 int xoff[2], xadv[2], yoff[2], yadv[2], alpha;
5352 MpegEncContext *s = &v->s;
5354 for (i = 0; i < 2; i++) {
5355 xoff[i] = av_clip(sd->coefs[i][2], 0, v->sprite_width-1 << 16);
5356 xadv[i] = sd->coefs[i][0];
5357 if (xadv[i] != 1<<16 || (v->sprite_width << 16) - (v->output_width << 16) - xoff[i])
5358 xadv[i] = av_clip(xadv[i], 0, ((v->sprite_width<<16) - xoff[i] - 1) / v->output_width);
5360 yoff[i] = av_clip(sd->coefs[i][5], 0, v->sprite_height-1 << 16);
5361 yadv[i] = av_clip(sd->coefs[i][4], 0, ((v->sprite_height << 16) - yoff[i]) / v->output_height);
5363 alpha = av_clip(sd->coefs[1][6], 0, (1<<16) - 1);
5365 for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++) {
5366 int width = v->output_width>>!!plane;
5368 for (row = 0; row < v->output_height>>!!plane; row++) {
5369 uint8_t *dst = v->sprite_output_frame->data[plane] +
5370 v->sprite_output_frame->linesize[plane] * row;
5372 for (sprite = 0; sprite <= v->two_sprites; sprite++) {
5373 uint8_t *iplane = s->current_picture.f->data[plane];
5374 int iline = s->current_picture.f->linesize[plane];
5375 int ycoord = yoff[sprite] + yadv[sprite] * row;
5376 int yline = ycoord >> 16;
5378 ysub[sprite] = ycoord & 0xFFFF;
5380 iplane = s->last_picture.f->data[plane];
5381 iline = s->last_picture.f->linesize[plane];
5383 next_line = FFMIN(yline + 1, (v->sprite_height >> !!plane) - 1) * iline;
5384 if (!(xoff[sprite] & 0xFFFF) && xadv[sprite] == 1 << 16) {
5385 src_h[sprite][0] = iplane + (xoff[sprite] >> 16) + yline * iline;
5387 src_h[sprite][1] = iplane + (xoff[sprite] >> 16) + next_line;
5389 if (sr_cache[sprite][0] != yline) {
5390 if (sr_cache[sprite][1] == yline) {
5391 FFSWAP(uint8_t*, v->sr_rows[sprite][0], v->sr_rows[sprite][1]);
5392 FFSWAP(int, sr_cache[sprite][0], sr_cache[sprite][1]);
5394 v->vc1dsp.sprite_h(v->sr_rows[sprite][0], iplane + yline * iline, xoff[sprite], xadv[sprite], width);
5395 sr_cache[sprite][0] = yline;
5398 if (ysub[sprite] && sr_cache[sprite][1] != yline + 1) {
5399 v->vc1dsp.sprite_h(v->sr_rows[sprite][1],
5400 iplane + next_line, xoff[sprite],
5401 xadv[sprite], width);
5402 sr_cache[sprite][1] = yline + 1;
5404 src_h[sprite][0] = v->sr_rows[sprite][0];
5405 src_h[sprite][1] = v->sr_rows[sprite][1];
5409 if (!v->two_sprites) {
5411 v->vc1dsp.sprite_v_single(dst, src_h[0][0], src_h[0][1], ysub[0], width);
5413 memcpy(dst, src_h[0][0], width);
5416 if (ysub[0] && ysub[1]) {
5417 v->vc1dsp.sprite_v_double_twoscale(dst, src_h[0][0], src_h[0][1], ysub[0],
5418 src_h[1][0], src_h[1][1], ysub[1], alpha, width);
5419 } else if (ysub[0]) {
5420 v->vc1dsp.sprite_v_double_onescale(dst, src_h[0][0], src_h[0][1], ysub[0],
5421 src_h[1][0], alpha, width);
5422 } else if (ysub[1]) {
5423 v->vc1dsp.sprite_v_double_onescale(dst, src_h[1][0], src_h[1][1], ysub[1],
5424 src_h[0][0], (1<<16)-1-alpha, width);
5426 v->vc1dsp.sprite_v_double_noscale(dst, src_h[0][0], src_h[1][0], alpha, width);
5432 for (i = 0; i < 2; i++) {
5442 static int vc1_decode_sprites(VC1Context *v, GetBitContext* gb)
5444 MpegEncContext *s = &v->s;
5445 AVCodecContext *avctx = s->avctx;
5448 vc1_parse_sprites(v, gb, &sd);
5450 if (!s->current_picture.f->data[0]) {
5451 av_log(avctx, AV_LOG_ERROR, "Got no sprites\n");
5455 if (v->two_sprites && (!s->last_picture_ptr || !s->last_picture.f->data[0])) {
5456 av_log(avctx, AV_LOG_WARNING, "Need two sprites, only got one\n");
5460 av_frame_unref(v->sprite_output_frame);
5461 if (ff_get_buffer(avctx, v->sprite_output_frame, 0) < 0) {
5462 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
5466 vc1_draw_sprites(v, &sd);
5471 static void vc1_sprite_flush(AVCodecContext *avctx)
5473 VC1Context *v = avctx->priv_data;
5474 MpegEncContext *s = &v->s;
5475 AVFrame *f = s->current_picture.f;
5478 /* Windows Media Image codecs have a convergence interval of two keyframes.
5479 Since we can't enforce it, clear to black the missing sprite. This is
5480 wrong but it looks better than doing nothing. */
5483 for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++)
5484 for (i = 0; i < v->sprite_height>>!!plane; i++)
5485 memset(f->data[plane] + i * f->linesize[plane],
5486 plane ? 128 : 0, f->linesize[plane]);
5491 av_cold int ff_vc1_decode_init_alloc_tables(VC1Context *v)
5493 MpegEncContext *s = &v->s;
5495 int mb_height = FFALIGN(s->mb_height, 2);
5497 /* Allocate mb bitplanes */
5498 v->mv_type_mb_plane = av_malloc (s->mb_stride * mb_height);
5499 v->direct_mb_plane = av_malloc (s->mb_stride * mb_height);
5500 v->forward_mb_plane = av_malloc (s->mb_stride * mb_height);
5501 v->fieldtx_plane = av_mallocz(s->mb_stride * mb_height);
5502 v->acpred_plane = av_malloc (s->mb_stride * mb_height);
5503 v->over_flags_plane = av_malloc (s->mb_stride * mb_height);
5505 v->n_allocated_blks = s->mb_width + 2;
5506 v->block = av_malloc(sizeof(*v->block) * v->n_allocated_blks);
5507 v->cbp_base = av_malloc(sizeof(v->cbp_base[0]) * 2 * s->mb_stride);
5508 v->cbp = v->cbp_base + s->mb_stride;
5509 v->ttblk_base = av_malloc(sizeof(v->ttblk_base[0]) * 2 * s->mb_stride);
5510 v->ttblk = v->ttblk_base + s->mb_stride;
5511 v->is_intra_base = av_mallocz(sizeof(v->is_intra_base[0]) * 2 * s->mb_stride);
5512 v->is_intra = v->is_intra_base + s->mb_stride;
5513 v->luma_mv_base = av_malloc(sizeof(v->luma_mv_base[0]) * 2 * s->mb_stride);
5514 v->luma_mv = v->luma_mv_base + s->mb_stride;
5516 /* allocate block type info in that way so it could be used with s->block_index[] */
5517 v->mb_type_base = av_malloc(s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2);
5518 v->mb_type[0] = v->mb_type_base + s->b8_stride + 1;
5519 v->mb_type[1] = v->mb_type_base + s->b8_stride * (mb_height * 2 + 1) + s->mb_stride + 1;
5520 v->mb_type[2] = v->mb_type[1] + s->mb_stride * (mb_height + 1);
5522 /* allocate memory to store block level MV info */
5523 v->blk_mv_type_base = av_mallocz( s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2);
5524 v->blk_mv_type = v->blk_mv_type_base + s->b8_stride + 1;
5525 v->mv_f_base = av_mallocz(2 * (s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2));
5526 v->mv_f[0] = v->mv_f_base + s->b8_stride + 1;
5527 v->mv_f[1] = v->mv_f[0] + (s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2);
5528 v->mv_f_next_base = av_mallocz(2 * (s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2));
5529 v->mv_f_next[0] = v->mv_f_next_base + s->b8_stride + 1;
5530 v->mv_f_next[1] = v->mv_f_next[0] + (s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2);
5532 /* Init coded blocks info */
5533 if (v->profile == PROFILE_ADVANCED) {
5534 // if (alloc_bitplane(&v->over_flags_plane, s->mb_width, s->mb_height) < 0)
5536 // if (alloc_bitplane(&v->ac_pred_plane, s->mb_width, s->mb_height) < 0)
5540 ff_intrax8_common_init(&v->x8,s);
5542 if (s->avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || s->avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5543 for (i = 0; i < 4; i++)
5544 if (!(v->sr_rows[i >> 1][i & 1] = av_malloc(v->output_width))) return -1;
5547 if (!v->mv_type_mb_plane || !v->direct_mb_plane || !v->acpred_plane || !v->over_flags_plane ||
5548 !v->block || !v->cbp_base || !v->ttblk_base || !v->is_intra_base || !v->luma_mv_base ||
5550 av_freep(&v->mv_type_mb_plane);
5551 av_freep(&v->direct_mb_plane);
5552 av_freep(&v->acpred_plane);
5553 av_freep(&v->over_flags_plane);
5554 av_freep(&v->block);
5555 av_freep(&v->cbp_base);
5556 av_freep(&v->ttblk_base);
5557 av_freep(&v->is_intra_base);
5558 av_freep(&v->luma_mv_base);
5559 av_freep(&v->mb_type_base);
5560 return AVERROR(ENOMEM);
5566 av_cold void ff_vc1_init_transposed_scantables(VC1Context *v)
5569 for (i = 0; i < 64; i++) {
5570 #define transpose(x) ((x >> 3) | ((x & 7) << 3))
5571 v->zz_8x8[0][i] = transpose(ff_wmv1_scantable[0][i]);
5572 v->zz_8x8[1][i] = transpose(ff_wmv1_scantable[1][i]);
5573 v->zz_8x8[2][i] = transpose(ff_wmv1_scantable[2][i]);
5574 v->zz_8x8[3][i] = transpose(ff_wmv1_scantable[3][i]);
5575 v->zzi_8x8[i] = transpose(ff_vc1_adv_interlaced_8x8_zz[i]);
5581 /** Initialize a VC1/WMV3 decoder
5582 * @todo TODO: Handle VC-1 IDUs (Transport level?)
5583 * @todo TODO: Decypher remaining bits in extra_data
5585 static av_cold int vc1_decode_init(AVCodecContext *avctx)
5587 VC1Context *v = avctx->priv_data;
5588 MpegEncContext *s = &v->s;
5591 /* save the container output size for WMImage */
5592 v->output_width = avctx->width;
5593 v->output_height = avctx->height;
5595 if (!avctx->extradata_size || !avctx->extradata)
5597 if (!(avctx->flags & CODEC_FLAG_GRAY))
5598 avctx->pix_fmt = avctx->get_format(avctx, avctx->codec->pix_fmts);
5600 avctx->pix_fmt = AV_PIX_FMT_GRAY8;
5601 avctx->hwaccel = ff_find_hwaccel(avctx);
5604 if (ff_vc1_init_common(v) < 0)
5606 ff_h264chroma_init(&v->h264chroma, 8);
5607 ff_vc1dsp_init(&v->vc1dsp);
5609 if (avctx->codec_id == AV_CODEC_ID_WMV3 || avctx->codec_id == AV_CODEC_ID_WMV3IMAGE) {
5612 // looks like WMV3 has a sequence header stored in the extradata
5613 // advanced sequence header may be before the first frame
5614 // the last byte of the extradata is a version number, 1 for the
5615 // samples we can decode
5617 init_get_bits(&gb, avctx->extradata, avctx->extradata_size*8);
5619 if (ff_vc1_decode_sequence_header(avctx, v, &gb) < 0)
5622 count = avctx->extradata_size*8 - get_bits_count(&gb);
5624 av_log(avctx, AV_LOG_INFO, "Extra data: %i bits left, value: %X\n",
5625 count, get_bits(&gb, count));
5626 } else if (count < 0) {
5627 av_log(avctx, AV_LOG_INFO, "Read %i bits in overflow\n", -count);
5629 } else { // VC1/WVC1/WVP2
5630 const uint8_t *start = avctx->extradata;
5631 uint8_t *end = avctx->extradata + avctx->extradata_size;
5632 const uint8_t *next;
5633 int size, buf2_size;
5634 uint8_t *buf2 = NULL;
5635 int seq_initialized = 0, ep_initialized = 0;
5637 if (avctx->extradata_size < 16) {
5638 av_log(avctx, AV_LOG_ERROR, "Extradata size too small: %i\n", avctx->extradata_size);
5642 buf2 = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
5643 start = find_next_marker(start, end); // in WVC1 extradata first byte is its size, but can be 0 in mkv
5645 for (; next < end; start = next) {
5646 next = find_next_marker(start + 4, end);
5647 size = next - start - 4;
5650 buf2_size = vc1_unescape_buffer(start + 4, size, buf2);
5651 init_get_bits(&gb, buf2, buf2_size * 8);
5652 switch (AV_RB32(start)) {
5653 case VC1_CODE_SEQHDR:
5654 if (ff_vc1_decode_sequence_header(avctx, v, &gb) < 0) {
5658 seq_initialized = 1;
5660 case VC1_CODE_ENTRYPOINT:
5661 if (ff_vc1_decode_entry_point(avctx, v, &gb) < 0) {
5670 if (!seq_initialized || !ep_initialized) {
5671 av_log(avctx, AV_LOG_ERROR, "Incomplete extradata\n");
5674 v->res_sprite = (avctx->codec_id == AV_CODEC_ID_VC1IMAGE);
5677 v->sprite_output_frame = av_frame_alloc();
5678 if (!v->sprite_output_frame)
5679 return AVERROR(ENOMEM);
5681 avctx->profile = v->profile;
5682 if (v->profile == PROFILE_ADVANCED)
5683 avctx->level = v->level;
5685 avctx->has_b_frames = !!avctx->max_b_frames;
5687 s->mb_width = (avctx->coded_width + 15) >> 4;
5688 s->mb_height = (avctx->coded_height + 15) >> 4;
5690 if (v->profile == PROFILE_ADVANCED || v->res_fasttx) {
5691 ff_vc1_init_transposed_scantables(v);
5693 memcpy(v->zz_8x8, ff_wmv1_scantable, 4*64);
5698 if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5699 v->sprite_width = avctx->coded_width;
5700 v->sprite_height = avctx->coded_height;
5702 avctx->coded_width = avctx->width = v->output_width;
5703 avctx->coded_height = avctx->height = v->output_height;
5705 // prevent 16.16 overflows
5706 if (v->sprite_width > 1 << 14 ||
5707 v->sprite_height > 1 << 14 ||
5708 v->output_width > 1 << 14 ||
5709 v->output_height > 1 << 14) return -1;
5714 /** Close a VC1/WMV3 decoder
5715 * @warning Initial try at using MpegEncContext stuff
5717 av_cold int ff_vc1_decode_end(AVCodecContext *avctx)
5719 VC1Context *v = avctx->priv_data;
5722 av_frame_free(&v->sprite_output_frame);
5724 for (i = 0; i < 4; i++)
5725 av_freep(&v->sr_rows[i >> 1][i & 1]);
5726 av_freep(&v->hrd_rate);
5727 av_freep(&v->hrd_buffer);
5728 ff_MPV_common_end(&v->s);
5729 av_freep(&v->mv_type_mb_plane);
5730 av_freep(&v->direct_mb_plane);
5731 av_freep(&v->forward_mb_plane);
5732 av_freep(&v->fieldtx_plane);
5733 av_freep(&v->acpred_plane);
5734 av_freep(&v->over_flags_plane);
5735 av_freep(&v->mb_type_base);
5736 av_freep(&v->blk_mv_type_base);
5737 av_freep(&v->mv_f_base);
5738 av_freep(&v->mv_f_next_base);
5739 av_freep(&v->block);
5740 av_freep(&v->cbp_base);
5741 av_freep(&v->ttblk_base);
5742 av_freep(&v->is_intra_base); // FIXME use v->mb_type[]
5743 av_freep(&v->luma_mv_base);
5744 ff_intrax8_common_end(&v->x8);
5749 /** Decode a VC1/WMV3 frame
5750 * @todo TODO: Handle VC-1 IDUs (Transport level?)
5752 static int vc1_decode_frame(AVCodecContext *avctx, void *data,
5753 int *got_frame, AVPacket *avpkt)
5755 const uint8_t *buf = avpkt->data;
5756 int buf_size = avpkt->size, n_slices = 0, i, ret;
5757 VC1Context *v = avctx->priv_data;
5758 MpegEncContext *s = &v->s;
5759 AVFrame *pict = data;
5760 uint8_t *buf2 = NULL;
5761 const uint8_t *buf_start = buf;
5762 int mb_height, n_slices1;
5767 } *slices = NULL, *tmp;
5769 /* no supplementary picture */
5770 if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == VC1_CODE_ENDOFSEQ)) {
5771 /* special case for last picture */
5772 if (s->low_delay == 0 && s->next_picture_ptr) {
5773 if ((ret = av_frame_ref(pict, s->next_picture_ptr->f)) < 0)
5775 s->next_picture_ptr = NULL;
5783 //for advanced profile we may need to parse and unescape data
5784 if (avctx->codec_id == AV_CODEC_ID_VC1 || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5786 buf2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5788 if (IS_MARKER(AV_RB32(buf))) { /* frame starts with marker and needs to be parsed */
5789 const uint8_t *start, *end, *next;
5793 for (start = buf, end = buf + buf_size; next < end; start = next) {
5794 next = find_next_marker(start + 4, end);
5795 size = next - start - 4;
5796 if (size <= 0) continue;
5797 switch (AV_RB32(start)) {
5798 case VC1_CODE_FRAME:
5801 buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
5803 case VC1_CODE_FIELD: {
5805 tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5809 slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5810 if (!slices[n_slices].buf)
5812 buf_size3 = vc1_unescape_buffer(start + 4, size,
5813 slices[n_slices].buf);
5814 init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5816 /* assuming that the field marker is at the exact middle,
5817 hope it's correct */
5818 slices[n_slices].mby_start = s->mb_height >> 1;
5819 n_slices1 = n_slices - 1; // index of the last slice of the first field
5823 case VC1_CODE_ENTRYPOINT: /* it should be before frame data */
5824 buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
5825 init_get_bits(&s->gb, buf2, buf_size2 * 8);
5826 ff_vc1_decode_entry_point(avctx, v, &s->gb);
5828 case VC1_CODE_SLICE: {
5830 tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5834 slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5835 if (!slices[n_slices].buf)
5837 buf_size3 = vc1_unescape_buffer(start + 4, size,
5838 slices[n_slices].buf);
5839 init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5841 slices[n_slices].mby_start = get_bits(&slices[n_slices].gb, 9);
5847 } else if (v->interlace && ((buf[0] & 0xC0) == 0xC0)) { /* WVC1 interlaced stores both fields divided by marker */
5848 const uint8_t *divider;
5851 divider = find_next_marker(buf, buf + buf_size);
5852 if ((divider == (buf + buf_size)) || AV_RB32(divider) != VC1_CODE_FIELD) {
5853 av_log(avctx, AV_LOG_ERROR, "Error in WVC1 interlaced frame\n");
5855 } else { // found field marker, unescape second field
5856 tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5860 slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5861 if (!slices[n_slices].buf)
5863 buf_size3 = vc1_unescape_buffer(divider + 4, buf + buf_size - divider - 4, slices[n_slices].buf);
5864 init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5866 slices[n_slices].mby_start = s->mb_height >> 1;
5867 n_slices1 = n_slices - 1;
5870 buf_size2 = vc1_unescape_buffer(buf, divider - buf, buf2);
5872 buf_size2 = vc1_unescape_buffer(buf, buf_size, buf2);
5874 init_get_bits(&s->gb, buf2, buf_size2*8);
5876 init_get_bits(&s->gb, buf, buf_size*8);
5878 if (v->res_sprite) {
5879 v->new_sprite = !get_bits1(&s->gb);
5880 v->two_sprites = get_bits1(&s->gb);
5881 /* res_sprite means a Windows Media Image stream, AV_CODEC_ID_*IMAGE means
5882 we're using the sprite compositor. These are intentionally kept separate
5883 so you can get the raw sprites by using the wmv3 decoder for WMVP or
5884 the vc1 one for WVP2 */
5885 if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5886 if (v->new_sprite) {
5887 // switch AVCodecContext parameters to those of the sprites
5888 avctx->width = avctx->coded_width = v->sprite_width;
5889 avctx->height = avctx->coded_height = v->sprite_height;
5896 if (s->context_initialized &&
5897 (s->width != avctx->coded_width ||
5898 s->height != avctx->coded_height)) {
5899 ff_vc1_decode_end(avctx);
5902 if (!s->context_initialized) {
5903 if (ff_msmpeg4_decode_init(avctx) < 0)
5905 if (ff_vc1_decode_init_alloc_tables(v) < 0) {
5906 ff_MPV_common_end(s);
5910 s->low_delay = !avctx->has_b_frames || v->res_sprite;
5912 if (v->profile == PROFILE_ADVANCED) {
5913 s->h_edge_pos = avctx->coded_width;
5914 s->v_edge_pos = avctx->coded_height;
5918 // do parse frame header
5919 v->pic_header_flag = 0;
5920 v->first_pic_header_flag = 1;
5921 if (v->profile < PROFILE_ADVANCED) {
5922 if (ff_vc1_parse_frame_header(v, &s->gb) < 0) {
5926 if (ff_vc1_parse_frame_header_adv(v, &s->gb) < 0) {
5930 v->first_pic_header_flag = 0;
5932 if ((avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE)
5933 && s->pict_type != AV_PICTURE_TYPE_I) {
5934 av_log(v->s.avctx, AV_LOG_ERROR, "Sprite decoder: expected I-frame\n");
5938 // for skipping the frame
5939 s->current_picture.f->pict_type = s->pict_type;
5940 s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
5942 /* skip B-frames if we don't have reference frames */
5943 if (s->last_picture_ptr == NULL && (s->pict_type == AV_PICTURE_TYPE_B || s->droppable)) {
5946 if ((avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B) ||
5947 (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I) ||
5948 avctx->skip_frame >= AVDISCARD_ALL) {
5952 if (s->next_p_frame_damaged) {
5953 if (s->pict_type == AV_PICTURE_TYPE_B)
5956 s->next_p_frame_damaged = 0;
5959 if (ff_MPV_frame_start(s, avctx) < 0) {
5963 // process pulldown flags
5964 s->current_picture_ptr->f->repeat_pict = 0;
5965 // Pulldown flags are only valid when 'broadcast' has been set.
5966 // So ticks_per_frame will be 2
5969 s->current_picture_ptr->f->repeat_pict = 1;
5970 } else if (v->rptfrm) {
5972 s->current_picture_ptr->f->repeat_pict = v->rptfrm * 2;
5975 s->me.qpel_put = s->dsp.put_qpel_pixels_tab;
5976 s->me.qpel_avg = s->dsp.avg_qpel_pixels_tab;
5978 if (avctx->hwaccel) {
5979 if (avctx->hwaccel->start_frame(avctx, buf, buf_size) < 0)
5981 if (avctx->hwaccel->decode_slice(avctx, buf_start, (buf + buf_size) - buf_start) < 0)
5983 if (avctx->hwaccel->end_frame(avctx) < 0)
5988 ff_mpeg_er_frame_start(s);
5990 v->bits = buf_size * 8;
5991 v->end_mb_x = s->mb_width;
5992 if (v->field_mode) {
5993 s->current_picture.f->linesize[0] <<= 1;
5994 s->current_picture.f->linesize[1] <<= 1;
5995 s->current_picture.f->linesize[2] <<= 1;
5997 s->uvlinesize <<= 1;
5999 mb_height = s->mb_height >> v->field_mode;
6002 av_log(v->s.avctx, AV_LOG_ERROR, "Invalid mb_height.\n");
6006 for (i = 0; i <= n_slices; i++) {
6007 if (i > 0 && slices[i - 1].mby_start >= mb_height) {
6008 if (v->field_mode <= 0) {
6009 av_log(v->s.avctx, AV_LOG_ERROR, "Slice %d starts beyond "
6010 "picture boundary (%d >= %d)\n", i,
6011 slices[i - 1].mby_start, mb_height);
6014 v->second_field = 1;
6015 v->blocks_off = s->mb_width * s->mb_height << 1;
6016 v->mb_off = s->mb_stride * s->mb_height >> 1;
6018 v->second_field = 0;
6023 v->pic_header_flag = 0;
6024 if (v->field_mode && i == n_slices1 + 2) {
6025 if ((header_ret = ff_vc1_parse_frame_header_adv(v, &s->gb)) < 0) {
6026 av_log(v->s.avctx, AV_LOG_ERROR, "Field header damaged\n");
6027 if (avctx->err_recognition & AV_EF_EXPLODE)
6031 } else if (get_bits1(&s->gb)) {
6032 v->pic_header_flag = 1;
6033 if ((header_ret = ff_vc1_parse_frame_header_adv(v, &s->gb)) < 0) {
6034 av_log(v->s.avctx, AV_LOG_ERROR, "Slice header damaged\n");
6035 if (avctx->err_recognition & AV_EF_EXPLODE)
6043 s->start_mb_y = (i == 0) ? 0 : FFMAX(0, slices[i-1].mby_start % mb_height);
6044 if (!v->field_mode || v->second_field)
6045 s->end_mb_y = (i == n_slices ) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
6047 s->end_mb_y = (i <= n_slices1 + 1) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
6048 ff_vc1_decode_blocks(v);
6050 s->gb = slices[i].gb;
6052 if (v->field_mode) {
6053 v->second_field = 0;
6054 s->current_picture.f->linesize[0] >>= 1;
6055 s->current_picture.f->linesize[1] >>= 1;
6056 s->current_picture.f->linesize[2] >>= 1;
6058 s->uvlinesize >>= 1;
6059 if (v->s.pict_type != AV_PICTURE_TYPE_BI && v->s.pict_type != AV_PICTURE_TYPE_B) {
6060 FFSWAP(uint8_t *, v->mv_f_next[0], v->mv_f[0]);
6061 FFSWAP(uint8_t *, v->mv_f_next[1], v->mv_f[1]);
6064 av_dlog(s->avctx, "Consumed %i/%i bits\n",
6065 get_bits_count(&s->gb), s->gb.size_in_bits);
6066 // if (get_bits_count(&s->gb) > buf_size * 8)
6069 ff_er_frame_end(&s->er);
6072 ff_MPV_frame_end(s);
6074 if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
6076 avctx->width = avctx->coded_width = v->output_width;
6077 avctx->height = avctx->coded_height = v->output_height;
6078 if (avctx->skip_frame >= AVDISCARD_NONREF)
6080 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
6081 if (vc1_decode_sprites(v, &s->gb))
6084 if ((ret = av_frame_ref(pict, v->sprite_output_frame)) < 0)
6088 if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
6089 if ((ret = av_frame_ref(pict, s->current_picture_ptr->f)) < 0)
6091 ff_print_debug_info(s, s->current_picture_ptr);
6093 } else if (s->last_picture_ptr != NULL) {
6094 if ((ret = av_frame_ref(pict, s->last_picture_ptr->f)) < 0)
6096 ff_print_debug_info(s, s->last_picture_ptr);
6103 for (i = 0; i < n_slices; i++)
6104 av_free(slices[i].buf);
6110 for (i = 0; i < n_slices; i++)
6111 av_free(slices[i].buf);
6117 static const AVProfile profiles[] = {
6118 { FF_PROFILE_VC1_SIMPLE, "Simple" },
6119 { FF_PROFILE_VC1_MAIN, "Main" },
6120 { FF_PROFILE_VC1_COMPLEX, "Complex" },
6121 { FF_PROFILE_VC1_ADVANCED, "Advanced" },
6122 { FF_PROFILE_UNKNOWN },
6125 static const enum AVPixelFormat vc1_hwaccel_pixfmt_list_420[] = {
6126 #if CONFIG_VC1_DXVA2_HWACCEL
6127 AV_PIX_FMT_DXVA2_VLD,
6129 #if CONFIG_VC1_VAAPI_HWACCEL
6130 AV_PIX_FMT_VAAPI_VLD,
6132 #if CONFIG_VC1_VDPAU_HWACCEL
6139 AVCodec ff_vc1_decoder = {
6141 .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1"),
6142 .type = AVMEDIA_TYPE_VIDEO,
6143 .id = AV_CODEC_ID_VC1,
6144 .priv_data_size = sizeof(VC1Context),
6145 .init = vc1_decode_init,
6146 .close = ff_vc1_decode_end,
6147 .decode = vc1_decode_frame,
6148 .flush = ff_mpeg_flush,
6149 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
6150 .pix_fmts = vc1_hwaccel_pixfmt_list_420,
6151 .profiles = NULL_IF_CONFIG_SMALL(profiles)
6154 #if CONFIG_WMV3_DECODER
6155 AVCodec ff_wmv3_decoder = {
6157 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9"),
6158 .type = AVMEDIA_TYPE_VIDEO,
6159 .id = AV_CODEC_ID_WMV3,
6160 .priv_data_size = sizeof(VC1Context),
6161 .init = vc1_decode_init,
6162 .close = ff_vc1_decode_end,
6163 .decode = vc1_decode_frame,
6164 .flush = ff_mpeg_flush,
6165 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
6166 .pix_fmts = vc1_hwaccel_pixfmt_list_420,
6167 .profiles = NULL_IF_CONFIG_SMALL(profiles)
6171 #if CONFIG_WMV3IMAGE_DECODER
6172 AVCodec ff_wmv3image_decoder = {
6173 .name = "wmv3image",
6174 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image"),
6175 .type = AVMEDIA_TYPE_VIDEO,
6176 .id = AV_CODEC_ID_WMV3IMAGE,
6177 .priv_data_size = sizeof(VC1Context),
6178 .init = vc1_decode_init,
6179 .close = ff_vc1_decode_end,
6180 .decode = vc1_decode_frame,
6181 .capabilities = CODEC_CAP_DR1,
6182 .flush = vc1_sprite_flush,
6183 .pix_fmts = (const enum AVPixelFormat[]) {
6190 #if CONFIG_VC1IMAGE_DECODER
6191 AVCodec ff_vc1image_decoder = {
6193 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image v2"),
6194 .type = AVMEDIA_TYPE_VIDEO,
6195 .id = AV_CODEC_ID_VC1IMAGE,
6196 .priv_data_size = sizeof(VC1Context),
6197 .init = vc1_decode_init,
6198 .close = ff_vc1_decode_end,
6199 .decode = vc1_decode_frame,
6200 .capabilities = CODEC_CAP_DR1,
6201 .flush = vc1_sprite_flush,
6202 .pix_fmts = (const enum AVPixelFormat[]) {