2 * VC-1 and WMV3 decoder
3 * Copyright (c) 2011 Mashiat Sarker Shakkhar
4 * Copyright (c) 2006-2007 Konstantin Shishkov
5 * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
7 * This file is part of FFmpeg.
9 * FFmpeg is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * FFmpeg is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with FFmpeg; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * VC-1 and WMV3 decoder
31 #include "error_resilience.h"
32 #include "mpegutils.h"
33 #include "mpegvideo.h"
35 #include "h264chroma.h"
38 #include "vc1acdata.h"
39 #include "msmpeg4data.h"
42 #include "vdpau_internal.h"
43 #include "libavutil/avassert.h"
48 #define MB_INTRA_VLC_BITS 9
52 // offset tables for interlaced picture MVDATA decoding
53 static const int offset_table1[9] = { 0, 1, 2, 4, 8, 16, 32, 64, 128 };
54 static const int offset_table2[9] = { 0, 1, 3, 7, 15, 31, 63, 127, 255 };
56 /***********************************************************************/
58 * @name VC-1 Bitplane decoding
64 static void init_block_index(VC1Context *v)
66 MpegEncContext *s = &v->s;
67 ff_init_block_index(s);
68 if (v->field_mode && !(v->second_field ^ v->tff)) {
69 s->dest[0] += s->current_picture_ptr->f.linesize[0];
70 s->dest[1] += s->current_picture_ptr->f.linesize[1];
71 s->dest[2] += s->current_picture_ptr->f.linesize[2];
75 /** @} */ //Bitplane group
77 static void vc1_put_signed_blocks_clamped(VC1Context *v)
79 MpegEncContext *s = &v->s;
80 int topleft_mb_pos, top_mb_pos;
81 int stride_y, fieldtx = 0;
84 /* The put pixels loop is always one MB row behind the decoding loop,
85 * because we can only put pixels when overlap filtering is done, and
86 * for filtering of the bottom edge of a MB, we need the next MB row
88 * Within the row, the put pixels loop is also one MB col behind the
89 * decoding loop. The reason for this is again, because for filtering
90 * of the right MB edge, we need the next MB present. */
91 if (!s->first_slice_line) {
93 topleft_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x - 1;
94 if (v->fcm == ILACE_FRAME)
95 fieldtx = v->fieldtx_plane[topleft_mb_pos];
96 stride_y = s->linesize << fieldtx;
97 v_dist = (16 - fieldtx) >> (fieldtx == 0);
98 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][0],
99 s->dest[0] - 16 * s->linesize - 16,
101 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][1],
102 s->dest[0] - 16 * s->linesize - 8,
104 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][2],
105 s->dest[0] - v_dist * s->linesize - 16,
107 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][3],
108 s->dest[0] - v_dist * s->linesize - 8,
110 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][4],
111 s->dest[1] - 8 * s->uvlinesize - 8,
113 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][5],
114 s->dest[2] - 8 * s->uvlinesize - 8,
117 if (s->mb_x == s->mb_width - 1) {
118 top_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x;
119 if (v->fcm == ILACE_FRAME)
120 fieldtx = v->fieldtx_plane[top_mb_pos];
121 stride_y = s->linesize << fieldtx;
122 v_dist = fieldtx ? 15 : 8;
123 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][0],
124 s->dest[0] - 16 * s->linesize,
126 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][1],
127 s->dest[0] - 16 * s->linesize + 8,
129 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][2],
130 s->dest[0] - v_dist * s->linesize,
132 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][3],
133 s->dest[0] - v_dist * s->linesize + 8,
135 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][4],
136 s->dest[1] - 8 * s->uvlinesize,
138 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][5],
139 s->dest[2] - 8 * s->uvlinesize,
144 #define inc_blk_idx(idx) do { \
146 if (idx >= v->n_allocated_blks) \
150 inc_blk_idx(v->topleft_blk_idx);
151 inc_blk_idx(v->top_blk_idx);
152 inc_blk_idx(v->left_blk_idx);
153 inc_blk_idx(v->cur_blk_idx);
156 static void vc1_loop_filter_iblk(VC1Context *v, int pq)
158 MpegEncContext *s = &v->s;
160 if (!s->first_slice_line) {
161 v->vc1dsp.vc1_v_loop_filter16(s->dest[0], s->linesize, pq);
163 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
164 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
165 for (j = 0; j < 2; j++) {
166 v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1], s->uvlinesize, pq);
168 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
171 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] + 8 * s->linesize, s->linesize, pq);
173 if (s->mb_y == s->end_mb_y - 1) {
175 v->vc1dsp.vc1_h_loop_filter16(s->dest[0], s->linesize, pq);
176 v->vc1dsp.vc1_h_loop_filter8(s->dest[1], s->uvlinesize, pq);
177 v->vc1dsp.vc1_h_loop_filter8(s->dest[2], s->uvlinesize, pq);
179 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] + 8, s->linesize, pq);
183 static void vc1_loop_filter_iblk_delayed(VC1Context *v, int pq)
185 MpegEncContext *s = &v->s;
188 /* The loopfilter runs 1 row and 1 column behind the overlap filter, which
189 * means it runs two rows/cols behind the decoding loop. */
190 if (!s->first_slice_line) {
192 if (s->mb_y >= s->start_mb_y + 2) {
193 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
196 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 16, s->linesize, pq);
197 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 8, s->linesize, pq);
198 for (j = 0; j < 2; j++) {
199 v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
201 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize - 8, s->uvlinesize, pq);
205 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize - 16, s->linesize, pq);
208 if (s->mb_x == s->mb_width - 1) {
209 if (s->mb_y >= s->start_mb_y + 2) {
210 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
213 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize, s->linesize, pq);
214 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize + 8, s->linesize, pq);
215 for (j = 0; j < 2; j++) {
216 v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
218 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize, s->uvlinesize, pq);
222 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize, s->linesize, pq);
225 if (s->mb_y == s->end_mb_y) {
228 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
229 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 8, s->linesize, pq);
231 for (j = 0; j < 2; j++) {
232 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
237 if (s->mb_x == s->mb_width - 1) {
239 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
240 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
242 for (j = 0; j < 2; j++) {
243 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
251 static void vc1_smooth_overlap_filter_iblk(VC1Context *v)
253 MpegEncContext *s = &v->s;
256 if (v->condover == CONDOVER_NONE)
259 mb_pos = s->mb_x + s->mb_y * s->mb_stride;
261 /* Within a MB, the horizontal overlap always runs before the vertical.
262 * To accomplish that, we run the H on left and internal borders of the
263 * currently decoded MB. Then, we wait for the next overlap iteration
264 * to do H overlap on the right edge of this MB, before moving over and
265 * running the V overlap. Therefore, the V overlap makes us trail by one
266 * MB col and the H overlap filter makes us trail by one MB row. This
267 * is reflected in the time at which we run the put_pixels loop. */
268 if (v->condover == CONDOVER_ALL || v->pq >= 9 || v->over_flags_plane[mb_pos]) {
269 if (s->mb_x && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
270 v->over_flags_plane[mb_pos - 1])) {
271 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][1],
272 v->block[v->cur_blk_idx][0]);
273 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][3],
274 v->block[v->cur_blk_idx][2]);
275 if (!(s->flags & CODEC_FLAG_GRAY)) {
276 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][4],
277 v->block[v->cur_blk_idx][4]);
278 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][5],
279 v->block[v->cur_blk_idx][5]);
282 v->vc1dsp.vc1_h_s_overlap(v->block[v->cur_blk_idx][0],
283 v->block[v->cur_blk_idx][1]);
284 v->vc1dsp.vc1_h_s_overlap(v->block[v->cur_blk_idx][2],
285 v->block[v->cur_blk_idx][3]);
287 if (s->mb_x == s->mb_width - 1) {
288 if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
289 v->over_flags_plane[mb_pos - s->mb_stride])) {
290 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][2],
291 v->block[v->cur_blk_idx][0]);
292 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][3],
293 v->block[v->cur_blk_idx][1]);
294 if (!(s->flags & CODEC_FLAG_GRAY)) {
295 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][4],
296 v->block[v->cur_blk_idx][4]);
297 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][5],
298 v->block[v->cur_blk_idx][5]);
301 v->vc1dsp.vc1_v_s_overlap(v->block[v->cur_blk_idx][0],
302 v->block[v->cur_blk_idx][2]);
303 v->vc1dsp.vc1_v_s_overlap(v->block[v->cur_blk_idx][1],
304 v->block[v->cur_blk_idx][3]);
307 if (s->mb_x && (v->condover == CONDOVER_ALL || v->over_flags_plane[mb_pos - 1])) {
308 if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
309 v->over_flags_plane[mb_pos - s->mb_stride - 1])) {
310 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][2],
311 v->block[v->left_blk_idx][0]);
312 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][3],
313 v->block[v->left_blk_idx][1]);
314 if (!(s->flags & CODEC_FLAG_GRAY)) {
315 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][4],
316 v->block[v->left_blk_idx][4]);
317 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][5],
318 v->block[v->left_blk_idx][5]);
321 v->vc1dsp.vc1_v_s_overlap(v->block[v->left_blk_idx][0],
322 v->block[v->left_blk_idx][2]);
323 v->vc1dsp.vc1_v_s_overlap(v->block[v->left_blk_idx][1],
324 v->block[v->left_blk_idx][3]);
328 /** Do motion compensation over 1 macroblock
329 * Mostly adapted hpel_motion and qpel_motion from mpegvideo.c
331 static void vc1_mc_1mv(VC1Context *v, int dir)
333 MpegEncContext *s = &v->s;
334 H264ChromaContext *h264chroma = &v->h264chroma;
335 uint8_t *srcY, *srcU, *srcV;
336 int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
337 int v_edge_pos = s->v_edge_pos >> v->field_mode;
339 uint8_t (*luty)[256], (*lutuv)[256];
342 if ((!v->field_mode ||
343 (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
344 !v->s.last_picture.f.data[0])
347 mx = s->mv[dir][0][0];
348 my = s->mv[dir][0][1];
350 // store motion vectors for further use in B frames
351 if (s->pict_type == AV_PICTURE_TYPE_P) {
352 for (i = 0; i < 4; i++) {
353 s->current_picture.motion_val[1][s->block_index[i] + v->blocks_off][0] = mx;
354 s->current_picture.motion_val[1][s->block_index[i] + v->blocks_off][1] = my;
358 uvmx = (mx + ((mx & 3) == 3)) >> 1;
359 uvmy = (my + ((my & 3) == 3)) >> 1;
360 v->luma_mv[s->mb_x][0] = uvmx;
361 v->luma_mv[s->mb_x][1] = uvmy;
364 v->cur_field_type != v->ref_field_type[dir]) {
365 my = my - 2 + 4 * v->cur_field_type;
366 uvmy = uvmy - 2 + 4 * v->cur_field_type;
369 // fastuvmc shall be ignored for interlaced frame picture
370 if (v->fastuvmc && (v->fcm != ILACE_FRAME)) {
371 uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
372 uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
375 if (v->field_mode && (v->cur_field_type != v->ref_field_type[dir]) && v->second_field) {
376 srcY = s->current_picture.f.data[0];
377 srcU = s->current_picture.f.data[1];
378 srcV = s->current_picture.f.data[2];
380 lutuv = v->curr_lutuv;
381 use_ic = *v->curr_use_ic;
383 srcY = s->last_picture.f.data[0];
384 srcU = s->last_picture.f.data[1];
385 srcV = s->last_picture.f.data[2];
387 lutuv = v->last_lutuv;
388 use_ic = v->last_use_ic;
391 srcY = s->next_picture.f.data[0];
392 srcU = s->next_picture.f.data[1];
393 srcV = s->next_picture.f.data[2];
395 lutuv = v->next_lutuv;
396 use_ic = v->next_use_ic;
399 if (!srcY || !srcU) {
400 av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
404 src_x = s->mb_x * 16 + (mx >> 2);
405 src_y = s->mb_y * 16 + (my >> 2);
406 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
407 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
409 if (v->profile != PROFILE_ADVANCED) {
410 src_x = av_clip( src_x, -16, s->mb_width * 16);
411 src_y = av_clip( src_y, -16, s->mb_height * 16);
412 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
413 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
415 src_x = av_clip( src_x, -17, s->avctx->coded_width);
416 src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
417 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
418 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
421 srcY += src_y * s->linesize + src_x;
422 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
423 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
425 if (v->field_mode && v->ref_field_type[dir]) {
426 srcY += s->current_picture_ptr->f.linesize[0];
427 srcU += s->current_picture_ptr->f.linesize[1];
428 srcV += s->current_picture_ptr->f.linesize[2];
431 /* for grayscale we should not try to read from unknown area */
432 if (s->flags & CODEC_FLAG_GRAY) {
433 srcU = s->edge_emu_buffer + 18 * s->linesize;
434 srcV = s->edge_emu_buffer + 18 * s->linesize;
437 if (v->rangeredfrm || use_ic
438 || s->h_edge_pos < 22 || v_edge_pos < 22
439 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel * 3
440 || (unsigned)(src_y - 1) > v_edge_pos - (my&3) - 16 - 3) {
441 uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
443 srcY -= s->mspel * (1 + s->linesize);
444 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcY,
445 s->linesize, s->linesize,
446 17 + s->mspel * 2, 17 + s->mspel * 2,
447 src_x - s->mspel, src_y - s->mspel,
448 s->h_edge_pos, v_edge_pos);
449 srcY = s->edge_emu_buffer;
450 s->vdsp.emulated_edge_mc(uvbuf, srcU,
451 s->uvlinesize, s->uvlinesize,
454 s->h_edge_pos >> 1, v_edge_pos >> 1);
455 s->vdsp.emulated_edge_mc(uvbuf + 16, srcV,
456 s->uvlinesize, s->uvlinesize,
459 s->h_edge_pos >> 1, v_edge_pos >> 1);
462 /* if we deal with range reduction we need to scale source blocks */
463 if (v->rangeredfrm) {
468 for (j = 0; j < 17 + s->mspel * 2; j++) {
469 for (i = 0; i < 17 + s->mspel * 2; i++)
470 src[i] = ((src[i] - 128) >> 1) + 128;
475 for (j = 0; j < 9; j++) {
476 for (i = 0; i < 9; i++) {
477 src[i] = ((src[i] - 128) >> 1) + 128;
478 src2[i] = ((src2[i] - 128) >> 1) + 128;
480 src += s->uvlinesize;
481 src2 += s->uvlinesize;
484 /* if we deal with intensity compensation we need to scale source blocks */
490 for (j = 0; j < 17 + s->mspel * 2; j++) {
491 int f = v->field_mode ? v->ref_field_type[dir] : ((j + src_y - s->mspel) & 1) ;
492 for (i = 0; i < 17 + s->mspel * 2; i++)
493 src[i] = luty[f][src[i]];
498 for (j = 0; j < 9; j++) {
499 int f = v->field_mode ? v->ref_field_type[dir] : ((j + uvsrc_y) & 1);
500 for (i = 0; i < 9; i++) {
501 src[i] = lutuv[f][src[i]];
502 src2[i] = lutuv[f][src2[i]];
504 src += s->uvlinesize;
505 src2 += s->uvlinesize;
508 srcY += s->mspel * (1 + s->linesize);
512 dxy = ((my & 3) << 2) | (mx & 3);
513 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] , srcY , s->linesize, v->rnd);
514 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8, srcY + 8, s->linesize, v->rnd);
515 srcY += s->linesize * 8;
516 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize , srcY , s->linesize, v->rnd);
517 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
518 } else { // hpel mc - always used for luma
519 dxy = (my & 2) | ((mx & 2) >> 1);
521 s->hdsp.put_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
523 s->hdsp.put_no_rnd_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
526 if (s->flags & CODEC_FLAG_GRAY) return;
527 /* Chroma MC always uses qpel bilinear */
528 uvmx = (uvmx & 3) << 1;
529 uvmy = (uvmy & 3) << 1;
531 h264chroma->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
532 h264chroma->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
534 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
535 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
539 static inline int median4(int a, int b, int c, int d)
542 if (c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2;
543 else return (FFMIN(b, c) + FFMAX(a, d)) / 2;
545 if (c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2;
546 else return (FFMIN(a, c) + FFMAX(b, d)) / 2;
550 /** Do motion compensation for 4-MV macroblock - luminance block
552 static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir, int avg)
554 MpegEncContext *s = &v->s;
556 int dxy, mx, my, src_x, src_y;
558 int fieldmv = (v->fcm == ILACE_FRAME) ? v->blk_mv_type[s->block_index[n]] : 0;
559 int v_edge_pos = s->v_edge_pos >> v->field_mode;
560 uint8_t (*luty)[256];
563 if ((!v->field_mode ||
564 (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
565 !v->s.last_picture.f.data[0])
568 mx = s->mv[dir][n][0];
569 my = s->mv[dir][n][1];
572 if (v->field_mode && (v->cur_field_type != v->ref_field_type[dir]) && v->second_field) {
573 srcY = s->current_picture.f.data[0];
575 use_ic = *v->curr_use_ic;
577 srcY = s->last_picture.f.data[0];
579 use_ic = v->last_use_ic;
582 srcY = s->next_picture.f.data[0];
584 use_ic = v->next_use_ic;
588 av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
593 if (v->cur_field_type != v->ref_field_type[dir])
594 my = my - 2 + 4 * v->cur_field_type;
597 if (s->pict_type == AV_PICTURE_TYPE_P && n == 3 && v->field_mode) {
598 int same_count = 0, opp_count = 0, k;
599 int chosen_mv[2][4][2], f;
601 for (k = 0; k < 4; k++) {
602 f = v->mv_f[0][s->block_index[k] + v->blocks_off];
603 chosen_mv[f][f ? opp_count : same_count][0] = s->mv[0][k][0];
604 chosen_mv[f][f ? opp_count : same_count][1] = s->mv[0][k][1];
608 f = opp_count > same_count;
609 switch (f ? opp_count : same_count) {
611 tx = median4(chosen_mv[f][0][0], chosen_mv[f][1][0],
612 chosen_mv[f][2][0], chosen_mv[f][3][0]);
613 ty = median4(chosen_mv[f][0][1], chosen_mv[f][1][1],
614 chosen_mv[f][2][1], chosen_mv[f][3][1]);
617 tx = mid_pred(chosen_mv[f][0][0], chosen_mv[f][1][0], chosen_mv[f][2][0]);
618 ty = mid_pred(chosen_mv[f][0][1], chosen_mv[f][1][1], chosen_mv[f][2][1]);
621 tx = (chosen_mv[f][0][0] + chosen_mv[f][1][0]) / 2;
622 ty = (chosen_mv[f][0][1] + chosen_mv[f][1][1]) / 2;
627 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
628 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
629 for (k = 0; k < 4; k++)
630 v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
633 if (v->fcm == ILACE_FRAME) { // not sure if needed for other types of picture
635 int width = s->avctx->coded_width;
636 int height = s->avctx->coded_height >> 1;
637 if (s->pict_type == AV_PICTURE_TYPE_P) {
638 s->current_picture.motion_val[1][s->block_index[n] + v->blocks_off][0] = mx;
639 s->current_picture.motion_val[1][s->block_index[n] + v->blocks_off][1] = my;
641 qx = (s->mb_x * 16) + (mx >> 2);
642 qy = (s->mb_y * 8) + (my >> 3);
647 mx -= 4 * (qx - width);
650 else if (qy > height + 1)
651 my -= 8 * (qy - height - 1);
654 if ((v->fcm == ILACE_FRAME) && fieldmv)
655 off = ((n > 1) ? s->linesize : 0) + (n & 1) * 8;
657 off = s->linesize * 4 * (n & 2) + (n & 1) * 8;
659 src_x = s->mb_x * 16 + (n & 1) * 8 + (mx >> 2);
661 src_y = s->mb_y * 16 + (n & 2) * 4 + (my >> 2);
663 src_y = s->mb_y * 16 + ((n > 1) ? 1 : 0) + (my >> 2);
665 if (v->profile != PROFILE_ADVANCED) {
666 src_x = av_clip(src_x, -16, s->mb_width * 16);
667 src_y = av_clip(src_y, -16, s->mb_height * 16);
669 src_x = av_clip(src_x, -17, s->avctx->coded_width);
670 if (v->fcm == ILACE_FRAME) {
672 src_y = av_clip(src_y, -17, s->avctx->coded_height + 1);
674 src_y = av_clip(src_y, -18, s->avctx->coded_height);
676 src_y = av_clip(src_y, -18, s->avctx->coded_height + 1);
680 srcY += src_y * s->linesize + src_x;
681 if (v->field_mode && v->ref_field_type[dir])
682 srcY += s->current_picture_ptr->f.linesize[0];
684 if (fieldmv && !(src_y & 1))
686 if (fieldmv && (src_y & 1) && src_y < 4)
688 if (v->rangeredfrm || use_ic
689 || s->h_edge_pos < 13 || v_edge_pos < 23
690 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx & 3) - 8 - s->mspel * 2
691 || (unsigned)(src_y - (s->mspel << fieldmv)) > v_edge_pos - (my & 3) - ((8 + s->mspel * 2) << fieldmv)) {
692 srcY -= s->mspel * (1 + (s->linesize << fieldmv));
693 /* check emulate edge stride and offset */
694 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcY,
695 s->linesize, s->linesize,
696 9 + s->mspel * 2, (9 + s->mspel * 2) << fieldmv,
697 src_x - s->mspel, src_y - (s->mspel << fieldmv),
698 s->h_edge_pos, v_edge_pos);
699 srcY = s->edge_emu_buffer;
700 /* if we deal with range reduction we need to scale source blocks */
701 if (v->rangeredfrm) {
706 for (j = 0; j < 9 + s->mspel * 2; j++) {
707 for (i = 0; i < 9 + s->mspel * 2; i++)
708 src[i] = ((src[i] - 128) >> 1) + 128;
709 src += s->linesize << fieldmv;
712 /* if we deal with intensity compensation we need to scale source blocks */
718 for (j = 0; j < 9 + s->mspel * 2; j++) {
719 int f = v->field_mode ? v->ref_field_type[dir] : (((j<<fieldmv)+src_y - (s->mspel << fieldmv)) & 1);
720 for (i = 0; i < 9 + s->mspel * 2; i++)
721 src[i] = luty[f][src[i]];
722 src += s->linesize << fieldmv;
725 srcY += s->mspel * (1 + (s->linesize << fieldmv));
729 dxy = ((my & 3) << 2) | (mx & 3);
731 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize << fieldmv, v->rnd);
733 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize << fieldmv, v->rnd);
734 } else { // hpel mc - always used for luma
735 dxy = (my & 2) | ((mx & 2) >> 1);
737 s->hdsp.put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
739 s->hdsp.put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
743 static av_always_inline int get_chroma_mv(int *mvx, int *mvy, int *a, int flag, int *tx, int *ty)
746 static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
748 idx = ((a[3] != flag) << 3)
749 | ((a[2] != flag) << 2)
750 | ((a[1] != flag) << 1)
753 *tx = median4(mvx[0], mvx[1], mvx[2], mvx[3]);
754 *ty = median4(mvy[0], mvy[1], mvy[2], mvy[3]);
756 } else if (count[idx] == 1) {
759 *tx = mid_pred(mvx[1], mvx[2], mvx[3]);
760 *ty = mid_pred(mvy[1], mvy[2], mvy[3]);
763 *tx = mid_pred(mvx[0], mvx[2], mvx[3]);
764 *ty = mid_pred(mvy[0], mvy[2], mvy[3]);
767 *tx = mid_pred(mvx[0], mvx[1], mvx[3]);
768 *ty = mid_pred(mvy[0], mvy[1], mvy[3]);
771 *tx = mid_pred(mvx[0], mvx[1], mvx[2]);
772 *ty = mid_pred(mvy[0], mvy[1], mvy[2]);
775 } else if (count[idx] == 2) {
777 for (i = 0; i < 3; i++)
782 for (i = t1 + 1; i < 4; i++)
787 *tx = (mvx[t1] + mvx[t2]) / 2;
788 *ty = (mvy[t1] + mvy[t2]) / 2;
796 /** Do motion compensation for 4-MV macroblock - both chroma blocks
798 static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
800 MpegEncContext *s = &v->s;
801 H264ChromaContext *h264chroma = &v->h264chroma;
802 uint8_t *srcU, *srcV;
803 int uvmx, uvmy, uvsrc_x, uvsrc_y;
804 int k, tx = 0, ty = 0;
805 int mvx[4], mvy[4], intra[4], mv_f[4];
807 int chroma_ref_type = v->cur_field_type;
808 int v_edge_pos = s->v_edge_pos >> v->field_mode;
809 uint8_t (*lutuv)[256];
812 if (!v->field_mode && !v->s.last_picture.f.data[0])
814 if (s->flags & CODEC_FLAG_GRAY)
817 for (k = 0; k < 4; k++) {
818 mvx[k] = s->mv[dir][k][0];
819 mvy[k] = s->mv[dir][k][1];
820 intra[k] = v->mb_type[0][s->block_index[k]];
822 mv_f[k] = v->mv_f[dir][s->block_index[k] + v->blocks_off];
825 /* calculate chroma MV vector from four luma MVs */
826 if (!v->field_mode || (v->field_mode && !v->numref)) {
827 valid_count = get_chroma_mv(mvx, mvy, intra, 0, &tx, &ty);
828 chroma_ref_type = v->reffield;
830 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
831 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
832 v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
833 return; //no need to do MC for intra blocks
837 if (mv_f[0] + mv_f[1] + mv_f[2] + mv_f[3] > 2)
839 valid_count = get_chroma_mv(mvx, mvy, mv_f, dominant, &tx, &ty);
841 chroma_ref_type = !v->cur_field_type;
843 if (v->field_mode && chroma_ref_type == 1 && v->cur_field_type == 1 && !v->s.last_picture.f.data[0])
845 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
846 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
847 uvmx = (tx + ((tx & 3) == 3)) >> 1;
848 uvmy = (ty + ((ty & 3) == 3)) >> 1;
850 v->luma_mv[s->mb_x][0] = uvmx;
851 v->luma_mv[s->mb_x][1] = uvmy;
854 uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
855 uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
857 // Field conversion bias
858 if (v->cur_field_type != chroma_ref_type)
859 uvmy += 2 - 4 * chroma_ref_type;
861 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
862 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
864 if (v->profile != PROFILE_ADVANCED) {
865 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
866 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
868 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
869 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
873 if (v->field_mode && (v->cur_field_type != chroma_ref_type) && v->second_field) {
874 srcU = s->current_picture.f.data[1];
875 srcV = s->current_picture.f.data[2];
876 lutuv = v->curr_lutuv;
877 use_ic = *v->curr_use_ic;
879 srcU = s->last_picture.f.data[1];
880 srcV = s->last_picture.f.data[2];
881 lutuv = v->last_lutuv;
882 use_ic = v->last_use_ic;
885 srcU = s->next_picture.f.data[1];
886 srcV = s->next_picture.f.data[2];
887 lutuv = v->next_lutuv;
888 use_ic = v->next_use_ic;
892 av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
896 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
897 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
900 if (chroma_ref_type) {
901 srcU += s->current_picture_ptr->f.linesize[1];
902 srcV += s->current_picture_ptr->f.linesize[2];
906 if (v->rangeredfrm || use_ic
907 || s->h_edge_pos < 18 || v_edge_pos < 18
908 || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
909 || (unsigned)uvsrc_y > (v_edge_pos >> 1) - 9) {
910 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcU,
911 s->uvlinesize, s->uvlinesize,
912 8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
913 s->h_edge_pos >> 1, v_edge_pos >> 1);
914 s->vdsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV,
915 s->uvlinesize, s->uvlinesize,
916 8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
917 s->h_edge_pos >> 1, v_edge_pos >> 1);
918 srcU = s->edge_emu_buffer;
919 srcV = s->edge_emu_buffer + 16;
921 /* if we deal with range reduction we need to scale source blocks */
922 if (v->rangeredfrm) {
928 for (j = 0; j < 9; j++) {
929 for (i = 0; i < 9; i++) {
930 src[i] = ((src[i] - 128) >> 1) + 128;
931 src2[i] = ((src2[i] - 128) >> 1) + 128;
933 src += s->uvlinesize;
934 src2 += s->uvlinesize;
937 /* if we deal with intensity compensation we need to scale source blocks */
944 for (j = 0; j < 9; j++) {
945 int f = v->field_mode ? chroma_ref_type : ((j + uvsrc_y) & 1);
946 for (i = 0; i < 9; i++) {
947 src[i] = lutuv[f][src[i]];
948 src2[i] = lutuv[f][src2[i]];
950 src += s->uvlinesize;
951 src2 += s->uvlinesize;
956 /* Chroma MC always uses qpel bilinear */
957 uvmx = (uvmx & 3) << 1;
958 uvmy = (uvmy & 3) << 1;
960 h264chroma->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
961 h264chroma->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
963 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
964 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
968 /** Do motion compensation for 4-MV interlaced frame chroma macroblock (both U and V)
970 static void vc1_mc_4mv_chroma4(VC1Context *v, int dir, int dir2, int avg)
972 MpegEncContext *s = &v->s;
973 H264ChromaContext *h264chroma = &v->h264chroma;
974 uint8_t *srcU, *srcV;
975 int uvsrc_x, uvsrc_y;
976 int uvmx_field[4], uvmy_field[4];
978 int fieldmv = v->blk_mv_type[s->block_index[0]];
979 static const int s_rndtblfield[16] = { 0, 0, 1, 2, 4, 4, 5, 6, 2, 2, 3, 8, 6, 6, 7, 12 };
980 int v_dist = fieldmv ? 1 : 4; // vertical offset for lower sub-blocks
981 int v_edge_pos = s->v_edge_pos >> 1;
983 uint8_t (*lutuv)[256];
985 if (s->flags & CODEC_FLAG_GRAY)
988 for (i = 0; i < 4; i++) {
989 int d = i < 2 ? dir: dir2;
991 uvmx_field[i] = (tx + ((tx & 3) == 3)) >> 1;
994 uvmy_field[i] = (ty >> 4) * 8 + s_rndtblfield[ty & 0xF];
996 uvmy_field[i] = (ty + ((ty & 3) == 3)) >> 1;
999 for (i = 0; i < 4; i++) {
1000 off = (i & 1) * 4 + ((i & 2) ? v_dist * s->uvlinesize : 0);
1001 uvsrc_x = s->mb_x * 8 + (i & 1) * 4 + (uvmx_field[i] >> 2);
1002 uvsrc_y = s->mb_y * 8 + ((i & 2) ? v_dist : 0) + (uvmy_field[i] >> 2);
1003 // FIXME: implement proper pull-back (see vc1cropmv.c, vc1CROPMV_ChromaPullBack())
1004 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
1005 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
1006 if (i < 2 ? dir : dir2) {
1007 srcU = s->next_picture.f.data[1];
1008 srcV = s->next_picture.f.data[2];
1009 lutuv = v->next_lutuv;
1010 use_ic = v->next_use_ic;
1012 srcU = s->last_picture.f.data[1];
1013 srcV = s->last_picture.f.data[2];
1014 lutuv = v->last_lutuv;
1015 use_ic = v->last_use_ic;
1019 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
1020 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
1021 uvmx_field[i] = (uvmx_field[i] & 3) << 1;
1022 uvmy_field[i] = (uvmy_field[i] & 3) << 1;
1024 if (fieldmv && !(uvsrc_y & 1))
1025 v_edge_pos = (s->v_edge_pos >> 1) - 1;
1027 if (fieldmv && (uvsrc_y & 1) && uvsrc_y < 2)
1030 || s->h_edge_pos < 10 || v_edge_pos < (5 << fieldmv)
1031 || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 5
1032 || (unsigned)uvsrc_y > v_edge_pos - (5 << fieldmv)) {
1033 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcU,
1034 s->uvlinesize, s->uvlinesize,
1035 5, (5 << fieldmv), uvsrc_x, uvsrc_y,
1036 s->h_edge_pos >> 1, v_edge_pos);
1037 s->vdsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV,
1038 s->uvlinesize, s->uvlinesize,
1039 5, (5 << fieldmv), uvsrc_x, uvsrc_y,
1040 s->h_edge_pos >> 1, v_edge_pos);
1041 srcU = s->edge_emu_buffer;
1042 srcV = s->edge_emu_buffer + 16;
1044 /* if we deal with intensity compensation we need to scale source blocks */
1047 uint8_t *src, *src2;
1051 for (j = 0; j < 5; j++) {
1052 int f = (uvsrc_y + (j << fieldmv)) & 1;
1053 for (i = 0; i < 5; i++) {
1054 src[i] = lutuv[f][src[i]];
1055 src2[i] = lutuv[f][src2[i]];
1057 src += s->uvlinesize << fieldmv;
1058 src2 += s->uvlinesize << fieldmv;
1064 h264chroma->avg_h264_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1065 h264chroma->avg_h264_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1067 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1068 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1072 h264chroma->put_h264_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1073 h264chroma->put_h264_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1075 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1076 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1082 /***********************************************************************/
1084 * @name VC-1 Block-level functions
1085 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
1091 * @brief Get macroblock-level quantizer scale
1093 #define GET_MQUANT() \
1094 if (v->dquantfrm) { \
1096 if (v->dqprofile == DQPROFILE_ALL_MBS) { \
1097 if (v->dqbilevel) { \
1098 mquant = (get_bits1(gb)) ? v->altpq : v->pq; \
1100 mqdiff = get_bits(gb, 3); \
1102 mquant = v->pq + mqdiff; \
1104 mquant = get_bits(gb, 5); \
1107 if (v->dqprofile == DQPROFILE_SINGLE_EDGE) \
1108 edges = 1 << v->dqsbedge; \
1109 else if (v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
1110 edges = (3 << v->dqsbedge) % 15; \
1111 else if (v->dqprofile == DQPROFILE_FOUR_EDGES) \
1113 if ((edges&1) && !s->mb_x) \
1114 mquant = v->altpq; \
1115 if ((edges&2) && s->first_slice_line) \
1116 mquant = v->altpq; \
1117 if ((edges&4) && s->mb_x == (s->mb_width - 1)) \
1118 mquant = v->altpq; \
1119 if ((edges&8) && s->mb_y == (s->mb_height - 1)) \
1120 mquant = v->altpq; \
1121 if (!mquant || mquant > 31) { \
1122 av_log(v->s.avctx, AV_LOG_ERROR, \
1123 "Overriding invalid mquant %d\n", mquant); \
1129 * @def GET_MVDATA(_dmv_x, _dmv_y)
1130 * @brief Get MV differentials
1131 * @see MVDATA decoding from 8.3.5.2, p(1)20
1132 * @param _dmv_x Horizontal differential for decoded MV
1133 * @param _dmv_y Vertical differential for decoded MV
1135 #define GET_MVDATA(_dmv_x, _dmv_y) \
1136 index = 1 + get_vlc2(gb, ff_vc1_mv_diff_vlc[s->mv_table_index].table, \
1137 VC1_MV_DIFF_VLC_BITS, 2); \
1139 mb_has_coeffs = 1; \
1142 mb_has_coeffs = 0; \
1145 _dmv_x = _dmv_y = 0; \
1146 } else if (index == 35) { \
1147 _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
1148 _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
1149 } else if (index == 36) { \
1154 index1 = index % 6; \
1155 if (!s->quarter_sample && index1 == 5) val = 1; \
1157 if (size_table[index1] - val > 0) \
1158 val = get_bits(gb, size_table[index1] - val); \
1160 sign = 0 - (val&1); \
1161 _dmv_x = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1163 index1 = index / 6; \
1164 if (!s->quarter_sample && index1 == 5) val = 1; \
1166 if (size_table[index1] - val > 0) \
1167 val = get_bits(gb, size_table[index1] - val); \
1169 sign = 0 - (val & 1); \
1170 _dmv_y = (sign ^ ((val >> 1) + offset_table[index1])) - sign; \
1173 static av_always_inline void get_mvdata_interlaced(VC1Context *v, int *dmv_x,
1174 int *dmv_y, int *pred_flag)
1177 int extend_x = 0, extend_y = 0;
1178 GetBitContext *gb = &v->s.gb;
1181 const int* offs_tab;
1184 bits = VC1_2REF_MVDATA_VLC_BITS;
1187 bits = VC1_1REF_MVDATA_VLC_BITS;
1190 switch (v->dmvrange) {
1198 extend_x = extend_y = 1;
1201 index = get_vlc2(gb, v->imv_vlc->table, bits, 3);
1203 *dmv_x = get_bits(gb, v->k_x);
1204 *dmv_y = get_bits(gb, v->k_y);
1207 *pred_flag = *dmv_y & 1;
1208 *dmv_y = (*dmv_y + *pred_flag) >> 1;
1210 *dmv_y = (*dmv_y + (*dmv_y & 1)) >> 1;
1215 av_assert0(index < esc);
1217 offs_tab = offset_table2;
1219 offs_tab = offset_table1;
1220 index1 = (index + 1) % 9;
1222 val = get_bits(gb, index1 + extend_x);
1223 sign = 0 -(val & 1);
1224 *dmv_x = (sign ^ ((val >> 1) + offs_tab[index1])) - sign;
1228 offs_tab = offset_table2;
1230 offs_tab = offset_table1;
1231 index1 = (index + 1) / 9;
1232 if (index1 > v->numref) {
1233 val = get_bits(gb, (index1 + (extend_y << v->numref)) >> v->numref);
1234 sign = 0 - (val & 1);
1235 *dmv_y = (sign ^ ((val >> 1) + offs_tab[index1 >> v->numref])) - sign;
1238 if (v->numref && pred_flag)
1239 *pred_flag = index1 & 1;
1243 static av_always_inline int scaleforsame_x(VC1Context *v, int n /* MV */, int dir)
1245 int scaledvalue, refdist;
1246 int scalesame1, scalesame2;
1247 int scalezone1_x, zone1offset_x;
1248 int table_index = dir ^ v->second_field;
1250 if (v->s.pict_type != AV_PICTURE_TYPE_B)
1251 refdist = v->refdist;
1253 refdist = dir ? v->brfd : v->frfd;
1256 scalesame1 = ff_vc1_field_mvpred_scales[table_index][1][refdist];
1257 scalesame2 = ff_vc1_field_mvpred_scales[table_index][2][refdist];
1258 scalezone1_x = ff_vc1_field_mvpred_scales[table_index][3][refdist];
1259 zone1offset_x = ff_vc1_field_mvpred_scales[table_index][5][refdist];
1264 if (FFABS(n) < scalezone1_x)
1265 scaledvalue = (n * scalesame1) >> 8;
1268 scaledvalue = ((n * scalesame2) >> 8) - zone1offset_x;
1270 scaledvalue = ((n * scalesame2) >> 8) + zone1offset_x;
1273 return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1276 static av_always_inline int scaleforsame_y(VC1Context *v, int i, int n /* MV */, int dir)
1278 int scaledvalue, refdist;
1279 int scalesame1, scalesame2;
1280 int scalezone1_y, zone1offset_y;
1281 int table_index = dir ^ v->second_field;
1283 if (v->s.pict_type != AV_PICTURE_TYPE_B)
1284 refdist = v->refdist;
1286 refdist = dir ? v->brfd : v->frfd;
1289 scalesame1 = ff_vc1_field_mvpred_scales[table_index][1][refdist];
1290 scalesame2 = ff_vc1_field_mvpred_scales[table_index][2][refdist];
1291 scalezone1_y = ff_vc1_field_mvpred_scales[table_index][4][refdist];
1292 zone1offset_y = ff_vc1_field_mvpred_scales[table_index][6][refdist];
1297 if (FFABS(n) < scalezone1_y)
1298 scaledvalue = (n * scalesame1) >> 8;
1301 scaledvalue = ((n * scalesame2) >> 8) - zone1offset_y;
1303 scaledvalue = ((n * scalesame2) >> 8) + zone1offset_y;
1307 if (v->cur_field_type && !v->ref_field_type[dir])
1308 return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1310 return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1313 static av_always_inline int scaleforopp_x(VC1Context *v, int n /* MV */)
1315 int scalezone1_x, zone1offset_x;
1316 int scaleopp1, scaleopp2, brfd;
1319 brfd = FFMIN(v->brfd, 3);
1320 scalezone1_x = ff_vc1_b_field_mvpred_scales[3][brfd];
1321 zone1offset_x = ff_vc1_b_field_mvpred_scales[5][brfd];
1322 scaleopp1 = ff_vc1_b_field_mvpred_scales[1][brfd];
1323 scaleopp2 = ff_vc1_b_field_mvpred_scales[2][brfd];
1328 if (FFABS(n) < scalezone1_x)
1329 scaledvalue = (n * scaleopp1) >> 8;
1332 scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_x;
1334 scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_x;
1337 return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1340 static av_always_inline int scaleforopp_y(VC1Context *v, int n /* MV */, int dir)
1342 int scalezone1_y, zone1offset_y;
1343 int scaleopp1, scaleopp2, brfd;
1346 brfd = FFMIN(v->brfd, 3);
1347 scalezone1_y = ff_vc1_b_field_mvpred_scales[4][brfd];
1348 zone1offset_y = ff_vc1_b_field_mvpred_scales[6][brfd];
1349 scaleopp1 = ff_vc1_b_field_mvpred_scales[1][brfd];
1350 scaleopp2 = ff_vc1_b_field_mvpred_scales[2][brfd];
1355 if (FFABS(n) < scalezone1_y)
1356 scaledvalue = (n * scaleopp1) >> 8;
1359 scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_y;
1361 scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_y;
1364 if (v->cur_field_type && !v->ref_field_type[dir]) {
1365 return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1367 return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1371 static av_always_inline int scaleforsame(VC1Context *v, int i, int n /* MV */,
1374 int brfd, scalesame;
1375 int hpel = 1 - v->s.quarter_sample;
1378 if (v->s.pict_type != AV_PICTURE_TYPE_B || v->second_field || !dir) {
1380 n = scaleforsame_y(v, i, n, dir) << hpel;
1382 n = scaleforsame_x(v, n, dir) << hpel;
1385 brfd = FFMIN(v->brfd, 3);
1386 scalesame = ff_vc1_b_field_mvpred_scales[0][brfd];
1388 n = (n * scalesame >> 8) << hpel;
1392 static av_always_inline int scaleforopp(VC1Context *v, int n /* MV */,
1395 int refdist, scaleopp;
1396 int hpel = 1 - v->s.quarter_sample;
1399 if (v->s.pict_type == AV_PICTURE_TYPE_B && !v->second_field && dir == 1) {
1401 n = scaleforopp_y(v, n, dir) << hpel;
1403 n = scaleforopp_x(v, n) << hpel;
1406 if (v->s.pict_type != AV_PICTURE_TYPE_B)
1407 refdist = FFMIN(v->refdist, 3);
1409 refdist = dir ? v->brfd : v->frfd;
1410 scaleopp = ff_vc1_field_mvpred_scales[dir ^ v->second_field][0][refdist];
1412 n = (n * scaleopp >> 8) << hpel;
1416 /** Predict and set motion vector
1418 static inline void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
1419 int mv1, int r_x, int r_y, uint8_t* is_intra,
1420 int pred_flag, int dir)
1422 MpegEncContext *s = &v->s;
1423 int xy, wrap, off = 0;
1427 int mixedmv_pic, num_samefield = 0, num_oppfield = 0;
1428 int opposite, a_f, b_f, c_f;
1429 int16_t field_predA[2];
1430 int16_t field_predB[2];
1431 int16_t field_predC[2];
1432 int a_valid, b_valid, c_valid;
1433 int hybridmv_thresh, y_bias = 0;
1435 if (v->mv_mode == MV_PMODE_MIXED_MV ||
1436 ((v->mv_mode == MV_PMODE_INTENSITY_COMP) && (v->mv_mode2 == MV_PMODE_MIXED_MV)))
1440 /* scale MV difference to be quad-pel */
1441 dmv_x <<= 1 - s->quarter_sample;
1442 dmv_y <<= 1 - s->quarter_sample;
1444 wrap = s->b8_stride;
1445 xy = s->block_index[n];
1448 s->mv[0][n][0] = s->current_picture.motion_val[0][xy + v->blocks_off][0] = 0;
1449 s->mv[0][n][1] = s->current_picture.motion_val[0][xy + v->blocks_off][1] = 0;
1450 s->current_picture.motion_val[1][xy + v->blocks_off][0] = 0;
1451 s->current_picture.motion_val[1][xy + v->blocks_off][1] = 0;
1452 if (mv1) { /* duplicate motion data for 1-MV block */
1453 s->current_picture.motion_val[0][xy + 1 + v->blocks_off][0] = 0;
1454 s->current_picture.motion_val[0][xy + 1 + v->blocks_off][1] = 0;
1455 s->current_picture.motion_val[0][xy + wrap + v->blocks_off][0] = 0;
1456 s->current_picture.motion_val[0][xy + wrap + v->blocks_off][1] = 0;
1457 s->current_picture.motion_val[0][xy + wrap + 1 + v->blocks_off][0] = 0;
1458 s->current_picture.motion_val[0][xy + wrap + 1 + v->blocks_off][1] = 0;
1459 v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1460 s->current_picture.motion_val[1][xy + 1 + v->blocks_off][0] = 0;
1461 s->current_picture.motion_val[1][xy + 1 + v->blocks_off][1] = 0;
1462 s->current_picture.motion_val[1][xy + wrap][0] = 0;
1463 s->current_picture.motion_val[1][xy + wrap + v->blocks_off][1] = 0;
1464 s->current_picture.motion_val[1][xy + wrap + 1 + v->blocks_off][0] = 0;
1465 s->current_picture.motion_val[1][xy + wrap + 1 + v->blocks_off][1] = 0;
1470 C = s->current_picture.motion_val[dir][xy - 1 + v->blocks_off];
1471 A = s->current_picture.motion_val[dir][xy - wrap + v->blocks_off];
1473 if (v->field_mode && mixedmv_pic)
1474 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
1476 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2;
1478 //in 4-MV mode different blocks have different B predictor position
1481 off = (s->mb_x > 0) ? -1 : 1;
1484 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 1;
1493 B = s->current_picture.motion_val[dir][xy - wrap + off + v->blocks_off];
1495 a_valid = !s->first_slice_line || (n == 2 || n == 3);
1496 b_valid = a_valid && (s->mb_width > 1);
1497 c_valid = s->mb_x || (n == 1 || n == 3);
1498 if (v->field_mode) {
1499 a_valid = a_valid && !is_intra[xy - wrap];
1500 b_valid = b_valid && !is_intra[xy - wrap + off];
1501 c_valid = c_valid && !is_intra[xy - 1];
1505 a_f = v->mv_f[dir][xy - wrap + v->blocks_off];
1506 num_oppfield += a_f;
1507 num_samefield += 1 - a_f;
1508 field_predA[0] = A[0];
1509 field_predA[1] = A[1];
1511 field_predA[0] = field_predA[1] = 0;
1515 b_f = v->mv_f[dir][xy - wrap + off + v->blocks_off];
1516 num_oppfield += b_f;
1517 num_samefield += 1 - b_f;
1518 field_predB[0] = B[0];
1519 field_predB[1] = B[1];
1521 field_predB[0] = field_predB[1] = 0;
1525 c_f = v->mv_f[dir][xy - 1 + v->blocks_off];
1526 num_oppfield += c_f;
1527 num_samefield += 1 - c_f;
1528 field_predC[0] = C[0];
1529 field_predC[1] = C[1];
1531 field_predC[0] = field_predC[1] = 0;
1535 if (v->field_mode) {
1537 // REFFIELD determines if the last field or the second-last field is
1538 // to be used as reference
1539 opposite = 1 - v->reffield;
1541 if (num_samefield <= num_oppfield)
1542 opposite = 1 - pred_flag;
1544 opposite = pred_flag;
1549 if (a_valid && !a_f) {
1550 field_predA[0] = scaleforopp(v, field_predA[0], 0, dir);
1551 field_predA[1] = scaleforopp(v, field_predA[1], 1, dir);
1553 if (b_valid && !b_f) {
1554 field_predB[0] = scaleforopp(v, field_predB[0], 0, dir);
1555 field_predB[1] = scaleforopp(v, field_predB[1], 1, dir);
1557 if (c_valid && !c_f) {
1558 field_predC[0] = scaleforopp(v, field_predC[0], 0, dir);
1559 field_predC[1] = scaleforopp(v, field_predC[1], 1, dir);
1561 v->mv_f[dir][xy + v->blocks_off] = 1;
1562 v->ref_field_type[dir] = !v->cur_field_type;
1564 if (a_valid && a_f) {
1565 field_predA[0] = scaleforsame(v, n, field_predA[0], 0, dir);
1566 field_predA[1] = scaleforsame(v, n, field_predA[1], 1, dir);
1568 if (b_valid && b_f) {
1569 field_predB[0] = scaleforsame(v, n, field_predB[0], 0, dir);
1570 field_predB[1] = scaleforsame(v, n, field_predB[1], 1, dir);
1572 if (c_valid && c_f) {
1573 field_predC[0] = scaleforsame(v, n, field_predC[0], 0, dir);
1574 field_predC[1] = scaleforsame(v, n, field_predC[1], 1, dir);
1576 v->mv_f[dir][xy + v->blocks_off] = 0;
1577 v->ref_field_type[dir] = v->cur_field_type;
1581 px = field_predA[0];
1582 py = field_predA[1];
1583 } else if (c_valid) {
1584 px = field_predC[0];
1585 py = field_predC[1];
1586 } else if (b_valid) {
1587 px = field_predB[0];
1588 py = field_predB[1];
1594 if (num_samefield + num_oppfield > 1) {
1595 px = mid_pred(field_predA[0], field_predB[0], field_predC[0]);
1596 py = mid_pred(field_predA[1], field_predB[1], field_predC[1]);
1599 /* Pullback MV as specified in 8.3.5.3.4 */
1600 if (!v->field_mode) {
1602 qx = (s->mb_x << 6) + ((n == 1 || n == 3) ? 32 : 0);
1603 qy = (s->mb_y << 6) + ((n == 2 || n == 3) ? 32 : 0);
1604 X = (s->mb_width << 6) - 4;
1605 Y = (s->mb_height << 6) - 4;
1607 if (qx + px < -60) px = -60 - qx;
1608 if (qy + py < -60) py = -60 - qy;
1610 if (qx + px < -28) px = -28 - qx;
1611 if (qy + py < -28) py = -28 - qy;
1613 if (qx + px > X) px = X - qx;
1614 if (qy + py > Y) py = Y - qy;
1617 if (!v->field_mode || s->pict_type != AV_PICTURE_TYPE_B) {
1618 /* Calculate hybrid prediction as specified in 8.3.5.3.5 (also 10.3.5.4.3.5) */
1619 hybridmv_thresh = 32;
1620 if (a_valid && c_valid) {
1621 if (is_intra[xy - wrap])
1622 sum = FFABS(px) + FFABS(py);
1624 sum = FFABS(px - field_predA[0]) + FFABS(py - field_predA[1]);
1625 if (sum > hybridmv_thresh) {
1626 if (get_bits1(&s->gb)) { // read HYBRIDPRED bit
1627 px = field_predA[0];
1628 py = field_predA[1];
1630 px = field_predC[0];
1631 py = field_predC[1];
1634 if (is_intra[xy - 1])
1635 sum = FFABS(px) + FFABS(py);
1637 sum = FFABS(px - field_predC[0]) + FFABS(py - field_predC[1]);
1638 if (sum > hybridmv_thresh) {
1639 if (get_bits1(&s->gb)) {
1640 px = field_predA[0];
1641 py = field_predA[1];
1643 px = field_predC[0];
1644 py = field_predC[1];
1651 if (v->field_mode && v->numref)
1653 if (v->field_mode && v->cur_field_type && v->ref_field_type[dir] == 0)
1655 /* store MV using signed modulus of MV range defined in 4.11 */
1656 s->mv[dir][n][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1657 s->mv[dir][n][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1] = ((py + dmv_y + r_y - y_bias) & ((r_y << 1) - 1)) - r_y + y_bias;
1658 if (mv1) { /* duplicate motion data for 1-MV block */
1659 s->current_picture.motion_val[dir][xy + 1 + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
1660 s->current_picture.motion_val[dir][xy + 1 + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
1661 s->current_picture.motion_val[dir][xy + wrap + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
1662 s->current_picture.motion_val[dir][xy + wrap + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
1663 s->current_picture.motion_val[dir][xy + wrap + 1 + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
1664 s->current_picture.motion_val[dir][xy + wrap + 1 + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
1665 v->mv_f[dir][xy + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1666 v->mv_f[dir][xy + wrap + v->blocks_off] = v->mv_f[dir][xy + wrap + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1670 /** Predict and set motion vector for interlaced frame picture MBs
1672 static inline void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
1673 int mvn, int r_x, int r_y, uint8_t* is_intra, int dir)
1675 MpegEncContext *s = &v->s;
1676 int xy, wrap, off = 0;
1677 int A[2], B[2], C[2];
1679 int a_valid = 0, b_valid = 0, c_valid = 0;
1680 int field_a, field_b, field_c; // 0: same, 1: opposit
1681 int total_valid, num_samefield, num_oppfield;
1682 int pos_c, pos_b, n_adj;
1684 wrap = s->b8_stride;
1685 xy = s->block_index[n];
1688 s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = 0;
1689 s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = 0;
1690 s->current_picture.motion_val[1][xy][0] = 0;
1691 s->current_picture.motion_val[1][xy][1] = 0;
1692 if (mvn == 1) { /* duplicate motion data for 1-MV block */
1693 s->current_picture.motion_val[0][xy + 1][0] = 0;
1694 s->current_picture.motion_val[0][xy + 1][1] = 0;
1695 s->current_picture.motion_val[0][xy + wrap][0] = 0;
1696 s->current_picture.motion_val[0][xy + wrap][1] = 0;
1697 s->current_picture.motion_val[0][xy + wrap + 1][0] = 0;
1698 s->current_picture.motion_val[0][xy + wrap + 1][1] = 0;
1699 v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1700 s->current_picture.motion_val[1][xy + 1][0] = 0;
1701 s->current_picture.motion_val[1][xy + 1][1] = 0;
1702 s->current_picture.motion_val[1][xy + wrap][0] = 0;
1703 s->current_picture.motion_val[1][xy + wrap][1] = 0;
1704 s->current_picture.motion_val[1][xy + wrap + 1][0] = 0;
1705 s->current_picture.motion_val[1][xy + wrap + 1][1] = 0;
1710 off = ((n == 0) || (n == 1)) ? 1 : -1;
1712 if (s->mb_x || (n == 1) || (n == 3)) {
1713 if ((v->blk_mv_type[xy]) // current block (MB) has a field MV
1714 || (!v->blk_mv_type[xy] && !v->blk_mv_type[xy - 1])) { // or both have frame MV
1715 A[0] = s->current_picture.motion_val[dir][xy - 1][0];
1716 A[1] = s->current_picture.motion_val[dir][xy - 1][1];
1718 } else { // current block has frame mv and cand. has field MV (so average)
1719 A[0] = (s->current_picture.motion_val[dir][xy - 1][0]
1720 + s->current_picture.motion_val[dir][xy - 1 + off * wrap][0] + 1) >> 1;
1721 A[1] = (s->current_picture.motion_val[dir][xy - 1][1]
1722 + s->current_picture.motion_val[dir][xy - 1 + off * wrap][1] + 1) >> 1;
1725 if (!(n & 1) && v->is_intra[s->mb_x - 1]) {
1731 /* Predict B and C */
1732 B[0] = B[1] = C[0] = C[1] = 0;
1733 if (n == 0 || n == 1 || v->blk_mv_type[xy]) {
1734 if (!s->first_slice_line) {
1735 if (!v->is_intra[s->mb_x - s->mb_stride]) {
1738 pos_b = s->block_index[n_adj] - 2 * wrap;
1739 if (v->blk_mv_type[pos_b] && v->blk_mv_type[xy]) {
1740 n_adj = (n & 2) | (n & 1);
1742 B[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap][0];
1743 B[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap][1];
1744 if (v->blk_mv_type[pos_b] && !v->blk_mv_type[xy]) {
1745 B[0] = (B[0] + s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap][0] + 1) >> 1;
1746 B[1] = (B[1] + s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap][1] + 1) >> 1;
1749 if (s->mb_width > 1) {
1750 if (!v->is_intra[s->mb_x - s->mb_stride + 1]) {
1753 pos_c = s->block_index[2] - 2 * wrap + 2;
1754 if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1757 C[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap + 2][0];
1758 C[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap + 2][1];
1759 if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1760 C[0] = (1 + C[0] + (s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap + 2][0])) >> 1;
1761 C[1] = (1 + C[1] + (s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap + 2][1])) >> 1;
1763 if (s->mb_x == s->mb_width - 1) {
1764 if (!v->is_intra[s->mb_x - s->mb_stride - 1]) {
1767 pos_c = s->block_index[3] - 2 * wrap - 2;
1768 if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1771 C[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap - 2][0];
1772 C[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap - 2][1];
1773 if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1774 C[0] = (1 + C[0] + s->current_picture.motion_val[dir][s->block_index[1] - 2 * wrap - 2][0]) >> 1;
1775 C[1] = (1 + C[1] + s->current_picture.motion_val[dir][s->block_index[1] - 2 * wrap - 2][1]) >> 1;
1784 pos_b = s->block_index[1];
1786 B[0] = s->current_picture.motion_val[dir][pos_b][0];
1787 B[1] = s->current_picture.motion_val[dir][pos_b][1];
1788 pos_c = s->block_index[0];
1790 C[0] = s->current_picture.motion_val[dir][pos_c][0];
1791 C[1] = s->current_picture.motion_val[dir][pos_c][1];
1794 total_valid = a_valid + b_valid + c_valid;
1795 // check if predictor A is out of bounds
1796 if (!s->mb_x && !(n == 1 || n == 3)) {
1799 // check if predictor B is out of bounds
1800 if ((s->first_slice_line && v->blk_mv_type[xy]) || (s->first_slice_line && !(n & 2))) {
1801 B[0] = B[1] = C[0] = C[1] = 0;
1803 if (!v->blk_mv_type[xy]) {
1804 if (s->mb_width == 1) {
1808 if (total_valid >= 2) {
1809 px = mid_pred(A[0], B[0], C[0]);
1810 py = mid_pred(A[1], B[1], C[1]);
1811 } else if (total_valid) {
1812 if (a_valid) { px = A[0]; py = A[1]; }
1813 else if (b_valid) { px = B[0]; py = B[1]; }
1814 else { px = C[0]; py = C[1]; }
1819 field_a = (A[1] & 4) ? 1 : 0;
1823 field_b = (B[1] & 4) ? 1 : 0;
1827 field_c = (C[1] & 4) ? 1 : 0;
1831 num_oppfield = field_a + field_b + field_c;
1832 num_samefield = total_valid - num_oppfield;
1833 if (total_valid == 3) {
1834 if ((num_samefield == 3) || (num_oppfield == 3)) {
1835 px = mid_pred(A[0], B[0], C[0]);
1836 py = mid_pred(A[1], B[1], C[1]);
1837 } else if (num_samefield >= num_oppfield) {
1838 /* take one MV from same field set depending on priority
1839 the check for B may not be necessary */
1840 px = !field_a ? A[0] : B[0];
1841 py = !field_a ? A[1] : B[1];
1843 px = field_a ? A[0] : B[0];
1844 py = field_a ? A[1] : B[1];
1846 } else if (total_valid == 2) {
1847 if (num_samefield >= num_oppfield) {
1848 if (!field_a && a_valid) {
1851 } else if (!field_b && b_valid) {
1854 } else /*if (c_valid)*/ {
1855 av_assert1(c_valid);
1858 } /*else px = py = 0;*/
1860 if (field_a && a_valid) {
1863 } else /*if (field_b && b_valid)*/ {
1864 av_assert1(field_b && b_valid);
1867 } /*else if (c_valid) {
1872 } else if (total_valid == 1) {
1873 px = (a_valid) ? A[0] : ((b_valid) ? B[0] : C[0]);
1874 py = (a_valid) ? A[1] : ((b_valid) ? B[1] : C[1]);
1878 /* store MV using signed modulus of MV range defined in 4.11 */
1879 s->mv[dir][n][0] = s->current_picture.motion_val[dir][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1880 s->mv[dir][n][1] = s->current_picture.motion_val[dir][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
1881 if (mvn == 1) { /* duplicate motion data for 1-MV block */
1882 s->current_picture.motion_val[dir][xy + 1 ][0] = s->current_picture.motion_val[dir][xy][0];
1883 s->current_picture.motion_val[dir][xy + 1 ][1] = s->current_picture.motion_val[dir][xy][1];
1884 s->current_picture.motion_val[dir][xy + wrap ][0] = s->current_picture.motion_val[dir][xy][0];
1885 s->current_picture.motion_val[dir][xy + wrap ][1] = s->current_picture.motion_val[dir][xy][1];
1886 s->current_picture.motion_val[dir][xy + wrap + 1][0] = s->current_picture.motion_val[dir][xy][0];
1887 s->current_picture.motion_val[dir][xy + wrap + 1][1] = s->current_picture.motion_val[dir][xy][1];
1888 } else if (mvn == 2) { /* duplicate motion data for 2-Field MV block */
1889 s->current_picture.motion_val[dir][xy + 1][0] = s->current_picture.motion_val[dir][xy][0];
1890 s->current_picture.motion_val[dir][xy + 1][1] = s->current_picture.motion_val[dir][xy][1];
1891 s->mv[dir][n + 1][0] = s->mv[dir][n][0];
1892 s->mv[dir][n + 1][1] = s->mv[dir][n][1];
1896 /** Motion compensation for direct or interpolated blocks in B-frames
1898 static void vc1_interp_mc(VC1Context *v)
1900 MpegEncContext *s = &v->s;
1901 H264ChromaContext *h264chroma = &v->h264chroma;
1902 uint8_t *srcY, *srcU, *srcV;
1903 int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
1905 int v_edge_pos = s->v_edge_pos >> v->field_mode;
1906 int use_ic = v->next_use_ic;
1908 if (!v->field_mode && !v->s.next_picture.f.data[0])
1911 mx = s->mv[1][0][0];
1912 my = s->mv[1][0][1];
1913 uvmx = (mx + ((mx & 3) == 3)) >> 1;
1914 uvmy = (my + ((my & 3) == 3)) >> 1;
1915 if (v->field_mode) {
1916 if (v->cur_field_type != v->ref_field_type[1])
1917 my = my - 2 + 4 * v->cur_field_type;
1918 uvmy = uvmy - 2 + 4 * v->cur_field_type;
1921 uvmx = uvmx + ((uvmx < 0) ? -(uvmx & 1) : (uvmx & 1));
1922 uvmy = uvmy + ((uvmy < 0) ? -(uvmy & 1) : (uvmy & 1));
1924 srcY = s->next_picture.f.data[0];
1925 srcU = s->next_picture.f.data[1];
1926 srcV = s->next_picture.f.data[2];
1928 src_x = s->mb_x * 16 + (mx >> 2);
1929 src_y = s->mb_y * 16 + (my >> 2);
1930 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
1931 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
1933 if (v->profile != PROFILE_ADVANCED) {
1934 src_x = av_clip( src_x, -16, s->mb_width * 16);
1935 src_y = av_clip( src_y, -16, s->mb_height * 16);
1936 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
1937 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
1939 src_x = av_clip( src_x, -17, s->avctx->coded_width);
1940 src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
1941 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
1942 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
1945 srcY += src_y * s->linesize + src_x;
1946 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
1947 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
1949 if (v->field_mode && v->ref_field_type[1]) {
1950 srcY += s->current_picture_ptr->f.linesize[0];
1951 srcU += s->current_picture_ptr->f.linesize[1];
1952 srcV += s->current_picture_ptr->f.linesize[2];
1955 /* for grayscale we should not try to read from unknown area */
1956 if (s->flags & CODEC_FLAG_GRAY) {
1957 srcU = s->edge_emu_buffer + 18 * s->linesize;
1958 srcV = s->edge_emu_buffer + 18 * s->linesize;
1961 if (v->rangeredfrm || s->h_edge_pos < 22 || v_edge_pos < 22 || use_ic
1962 || (unsigned)(src_x - 1) > s->h_edge_pos - (mx & 3) - 16 - 3
1963 || (unsigned)(src_y - 1) > v_edge_pos - (my & 3) - 16 - 3) {
1964 uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
1966 srcY -= s->mspel * (1 + s->linesize);
1967 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcY,
1968 s->linesize, s->linesize,
1969 17 + s->mspel * 2, 17 + s->mspel * 2,
1970 src_x - s->mspel, src_y - s->mspel,
1971 s->h_edge_pos, v_edge_pos);
1972 srcY = s->edge_emu_buffer;
1973 s->vdsp.emulated_edge_mc(uvbuf, srcU,
1974 s->uvlinesize, s->uvlinesize,
1977 s->h_edge_pos >> 1, v_edge_pos >> 1);
1978 s->vdsp.emulated_edge_mc(uvbuf + 16, srcV,
1979 s->uvlinesize, s->uvlinesize,
1982 s->h_edge_pos >> 1, v_edge_pos >> 1);
1985 /* if we deal with range reduction we need to scale source blocks */
1986 if (v->rangeredfrm) {
1988 uint8_t *src, *src2;
1991 for (j = 0; j < 17 + s->mspel * 2; j++) {
1992 for (i = 0; i < 17 + s->mspel * 2; i++)
1993 src[i] = ((src[i] - 128) >> 1) + 128;
1998 for (j = 0; j < 9; j++) {
1999 for (i = 0; i < 9; i++) {
2000 src[i] = ((src[i] - 128) >> 1) + 128;
2001 src2[i] = ((src2[i] - 128) >> 1) + 128;
2003 src += s->uvlinesize;
2004 src2 += s->uvlinesize;
2009 uint8_t (*luty )[256] = v->next_luty;
2010 uint8_t (*lutuv)[256] = v->next_lutuv;
2012 uint8_t *src, *src2;
2015 for (j = 0; j < 17 + s->mspel * 2; j++) {
2016 int f = v->field_mode ? v->ref_field_type[1] : ((j+src_y - s->mspel) & 1);
2017 for (i = 0; i < 17 + s->mspel * 2; i++)
2018 src[i] = luty[f][src[i]];
2023 for (j = 0; j < 9; j++) {
2024 int f = v->field_mode ? v->ref_field_type[1] : ((j+uvsrc_y) & 1);
2025 for (i = 0; i < 9; i++) {
2026 src[i] = lutuv[f][src[i]];
2027 src2[i] = lutuv[f][src2[i]];
2029 src += s->uvlinesize;
2030 src2 += s->uvlinesize;
2033 srcY += s->mspel * (1 + s->linesize);
2040 dxy = ((my & 3) << 2) | (mx & 3);
2041 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off , srcY , s->linesize, v->rnd);
2042 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8, srcY + 8, s->linesize, v->rnd);
2043 srcY += s->linesize * 8;
2044 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize , srcY , s->linesize, v->rnd);
2045 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
2047 dxy = (my & 2) | ((mx & 2) >> 1);
2050 s->hdsp.avg_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
2052 s->hdsp.avg_no_rnd_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize, 16);
2055 if (s->flags & CODEC_FLAG_GRAY) return;
2056 /* Chroma MC always uses qpel blilinear */
2057 uvmx = (uvmx & 3) << 1;
2058 uvmy = (uvmy & 3) << 1;
2060 h264chroma->avg_h264_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
2061 h264chroma->avg_h264_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
2063 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
2064 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
2068 static av_always_inline int scale_mv(int value, int bfrac, int inv, int qs)
2072 #if B_FRACTION_DEN==256
2076 return 2 * ((value * n + 255) >> 9);
2077 return (value * n + 128) >> 8;
2080 n -= B_FRACTION_DEN;
2082 return 2 * ((value * n + B_FRACTION_DEN - 1) / (2 * B_FRACTION_DEN));
2083 return (value * n + B_FRACTION_DEN/2) / B_FRACTION_DEN;
2087 /** Reconstruct motion vector for B-frame and do motion compensation
2089 static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2],
2090 int direct, int mode)
2097 if (mode == BMV_TYPE_INTERPOLATED) {
2103 vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD));
2106 static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2],
2107 int direct, int mvtype)
2109 MpegEncContext *s = &v->s;
2110 int xy, wrap, off = 0;
2115 const uint8_t *is_intra = v->mb_type[0];
2117 av_assert0(!v->field_mode);
2121 /* scale MV difference to be quad-pel */
2122 dmv_x[0] <<= 1 - s->quarter_sample;
2123 dmv_y[0] <<= 1 - s->quarter_sample;
2124 dmv_x[1] <<= 1 - s->quarter_sample;
2125 dmv_y[1] <<= 1 - s->quarter_sample;
2127 wrap = s->b8_stride;
2128 xy = s->block_index[0];
2131 s->current_picture.motion_val[0][xy][0] =
2132 s->current_picture.motion_val[0][xy][1] =
2133 s->current_picture.motion_val[1][xy][0] =
2134 s->current_picture.motion_val[1][xy][1] = 0;
2137 if (direct && s->next_picture_ptr->field_picture)
2138 av_log(s->avctx, AV_LOG_WARNING, "Mixed frame/field direct mode not supported\n");
2140 s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
2141 s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
2142 s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
2143 s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
2145 /* Pullback predicted motion vectors as specified in 8.4.5.4 */
2146 s->mv[0][0][0] = av_clip(s->mv[0][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2147 s->mv[0][0][1] = av_clip(s->mv[0][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2148 s->mv[1][0][0] = av_clip(s->mv[1][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2149 s->mv[1][0][1] = av_clip(s->mv[1][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2151 s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
2152 s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
2153 s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
2154 s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
2158 if ((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2159 C = s->current_picture.motion_val[0][xy - 2];
2160 A = s->current_picture.motion_val[0][xy - wrap * 2];
2161 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2162 B = s->current_picture.motion_val[0][xy - wrap * 2 + off];
2164 if (!s->mb_x) C[0] = C[1] = 0;
2165 if (!s->first_slice_line) { // predictor A is not out of bounds
2166 if (s->mb_width == 1) {
2170 px = mid_pred(A[0], B[0], C[0]);
2171 py = mid_pred(A[1], B[1], C[1]);
2173 } else if (s->mb_x) { // predictor C is not out of bounds
2179 /* Pullback MV as specified in 8.3.5.3.4 */
2182 if (v->profile < PROFILE_ADVANCED) {
2183 qx = (s->mb_x << 5);
2184 qy = (s->mb_y << 5);
2185 X = (s->mb_width << 5) - 4;
2186 Y = (s->mb_height << 5) - 4;
2187 if (qx + px < -28) px = -28 - qx;
2188 if (qy + py < -28) py = -28 - qy;
2189 if (qx + px > X) px = X - qx;
2190 if (qy + py > Y) py = Y - qy;
2192 qx = (s->mb_x << 6);
2193 qy = (s->mb_y << 6);
2194 X = (s->mb_width << 6) - 4;
2195 Y = (s->mb_height << 6) - 4;
2196 if (qx + px < -60) px = -60 - qx;
2197 if (qy + py < -60) py = -60 - qy;
2198 if (qx + px > X) px = X - qx;
2199 if (qy + py > Y) py = Y - qy;
2202 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2203 if (0 && !s->first_slice_line && s->mb_x) {
2204 if (is_intra[xy - wrap])
2205 sum = FFABS(px) + FFABS(py);
2207 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2209 if (get_bits1(&s->gb)) {
2217 if (is_intra[xy - 2])
2218 sum = FFABS(px) + FFABS(py);
2220 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2222 if (get_bits1(&s->gb)) {
2232 /* store MV using signed modulus of MV range defined in 4.11 */
2233 s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x;
2234 s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
2236 if ((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2237 C = s->current_picture.motion_val[1][xy - 2];
2238 A = s->current_picture.motion_val[1][xy - wrap * 2];
2239 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2240 B = s->current_picture.motion_val[1][xy - wrap * 2 + off];
2244 if (!s->first_slice_line) { // predictor A is not out of bounds
2245 if (s->mb_width == 1) {
2249 px = mid_pred(A[0], B[0], C[0]);
2250 py = mid_pred(A[1], B[1], C[1]);
2252 } else if (s->mb_x) { // predictor C is not out of bounds
2258 /* Pullback MV as specified in 8.3.5.3.4 */
2261 if (v->profile < PROFILE_ADVANCED) {
2262 qx = (s->mb_x << 5);
2263 qy = (s->mb_y << 5);
2264 X = (s->mb_width << 5) - 4;
2265 Y = (s->mb_height << 5) - 4;
2266 if (qx + px < -28) px = -28 - qx;
2267 if (qy + py < -28) py = -28 - qy;
2268 if (qx + px > X) px = X - qx;
2269 if (qy + py > Y) py = Y - qy;
2271 qx = (s->mb_x << 6);
2272 qy = (s->mb_y << 6);
2273 X = (s->mb_width << 6) - 4;
2274 Y = (s->mb_height << 6) - 4;
2275 if (qx + px < -60) px = -60 - qx;
2276 if (qy + py < -60) py = -60 - qy;
2277 if (qx + px > X) px = X - qx;
2278 if (qy + py > Y) py = Y - qy;
2281 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2282 if (0 && !s->first_slice_line && s->mb_x) {
2283 if (is_intra[xy - wrap])
2284 sum = FFABS(px) + FFABS(py);
2286 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2288 if (get_bits1(&s->gb)) {
2296 if (is_intra[xy - 2])
2297 sum = FFABS(px) + FFABS(py);
2299 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2301 if (get_bits1(&s->gb)) {
2311 /* store MV using signed modulus of MV range defined in 4.11 */
2313 s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
2314 s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
2316 s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
2317 s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
2318 s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
2319 s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
2322 static inline void vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dmv_y, int mv1, int *pred_flag)
2324 int dir = (v->bmvtype == BMV_TYPE_BACKWARD) ? 1 : 0;
2325 MpegEncContext *s = &v->s;
2326 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2328 if (v->bmvtype == BMV_TYPE_DIRECT) {
2329 int total_opp, k, f;
2330 if (s->next_picture.mb_type[mb_pos + v->mb_off] != MB_TYPE_INTRA) {
2331 s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][0],
2332 v->bfraction, 0, s->quarter_sample);
2333 s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][1],
2334 v->bfraction, 0, s->quarter_sample);
2335 s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][0],
2336 v->bfraction, 1, s->quarter_sample);
2337 s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][1],
2338 v->bfraction, 1, s->quarter_sample);
2340 total_opp = v->mv_f_next[0][s->block_index[0] + v->blocks_off]
2341 + v->mv_f_next[0][s->block_index[1] + v->blocks_off]
2342 + v->mv_f_next[0][s->block_index[2] + v->blocks_off]
2343 + v->mv_f_next[0][s->block_index[3] + v->blocks_off];
2344 f = (total_opp > 2) ? 1 : 0;
2346 s->mv[0][0][0] = s->mv[0][0][1] = 0;
2347 s->mv[1][0][0] = s->mv[1][0][1] = 0;
2350 v->ref_field_type[0] = v->ref_field_type[1] = v->cur_field_type ^ f;
2351 for (k = 0; k < 4; k++) {
2352 s->current_picture.motion_val[0][s->block_index[k] + v->blocks_off][0] = s->mv[0][0][0];
2353 s->current_picture.motion_val[0][s->block_index[k] + v->blocks_off][1] = s->mv[0][0][1];
2354 s->current_picture.motion_val[1][s->block_index[k] + v->blocks_off][0] = s->mv[1][0][0];
2355 s->current_picture.motion_val[1][s->block_index[k] + v->blocks_off][1] = s->mv[1][0][1];
2356 v->mv_f[0][s->block_index[k] + v->blocks_off] = f;
2357 v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
2361 if (v->bmvtype == BMV_TYPE_INTERPOLATED) {
2362 vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2363 vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2366 if (dir) { // backward
2367 vc1_pred_mv(v, n, dmv_x[1], dmv_y[1], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2368 if (n == 3 || mv1) {
2369 vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
2372 vc1_pred_mv(v, n, dmv_x[0], dmv_y[0], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2373 if (n == 3 || mv1) {
2374 vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], 0, 1);
2379 /** Get predicted DC value for I-frames only
2380 * prediction dir: left=0, top=1
2381 * @param s MpegEncContext
2382 * @param overlap flag indicating that overlap filtering is used
2383 * @param pq integer part of picture quantizer
2384 * @param[in] n block index in the current MB
2385 * @param dc_val_ptr Pointer to DC predictor
2386 * @param dir_ptr Prediction direction for use in AC prediction
2388 static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2389 int16_t **dc_val_ptr, int *dir_ptr)
2391 int a, b, c, wrap, pred, scale;
2393 static const uint16_t dcpred[32] = {
2394 -1, 1024, 512, 341, 256, 205, 171, 146, 128,
2395 114, 102, 93, 85, 79, 73, 68, 64,
2396 60, 57, 54, 51, 49, 47, 45, 43,
2397 41, 39, 38, 37, 35, 34, 33
2400 /* find prediction - wmv3_dc_scale always used here in fact */
2401 if (n < 4) scale = s->y_dc_scale;
2402 else scale = s->c_dc_scale;
2404 wrap = s->block_wrap[n];
2405 dc_val = s->dc_val[0] + s->block_index[n];
2411 b = dc_val[ - 1 - wrap];
2412 a = dc_val[ - wrap];
2414 if (pq < 9 || !overlap) {
2415 /* Set outer values */
2416 if (s->first_slice_line && (n != 2 && n != 3))
2417 b = a = dcpred[scale];
2418 if (s->mb_x == 0 && (n != 1 && n != 3))
2419 b = c = dcpred[scale];
2421 /* Set outer values */
2422 if (s->first_slice_line && (n != 2 && n != 3))
2424 if (s->mb_x == 0 && (n != 1 && n != 3))
2428 if (abs(a - b) <= abs(b - c)) {
2430 *dir_ptr = 1; // left
2433 *dir_ptr = 0; // top
2436 /* update predictor */
2437 *dc_val_ptr = &dc_val[0];
2442 /** Get predicted DC value
2443 * prediction dir: left=0, top=1
2444 * @param s MpegEncContext
2445 * @param overlap flag indicating that overlap filtering is used
2446 * @param pq integer part of picture quantizer
2447 * @param[in] n block index in the current MB
2448 * @param a_avail flag indicating top block availability
2449 * @param c_avail flag indicating left block availability
2450 * @param dc_val_ptr Pointer to DC predictor
2451 * @param dir_ptr Prediction direction for use in AC prediction
2453 static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2454 int a_avail, int c_avail,
2455 int16_t **dc_val_ptr, int *dir_ptr)
2457 int a, b, c, wrap, pred;
2459 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2463 wrap = s->block_wrap[n];
2464 dc_val = s->dc_val[0] + s->block_index[n];
2470 b = dc_val[ - 1 - wrap];
2471 a = dc_val[ - wrap];
2472 /* scale predictors if needed */
2473 q1 = s->current_picture.qscale_table[mb_pos];
2474 dqscale_index = s->y_dc_scale_table[q1] - 1;
2475 if (dqscale_index < 0)
2477 if (c_avail && (n != 1 && n != 3)) {
2478 q2 = s->current_picture.qscale_table[mb_pos - 1];
2480 c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2482 if (a_avail && (n != 2 && n != 3)) {
2483 q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2485 a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2487 if (a_avail && c_avail && (n != 3)) {
2492 off -= s->mb_stride;
2493 q2 = s->current_picture.qscale_table[off];
2495 b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2498 if (a_avail && c_avail) {
2499 if (abs(a - b) <= abs(b - c)) {
2501 *dir_ptr = 1; // left
2504 *dir_ptr = 0; // top
2506 } else if (a_avail) {
2508 *dir_ptr = 0; // top
2509 } else if (c_avail) {
2511 *dir_ptr = 1; // left
2514 *dir_ptr = 1; // left
2517 /* update predictor */
2518 *dc_val_ptr = &dc_val[0];
2522 /** @} */ // Block group
2525 * @name VC1 Macroblock-level functions in Simple/Main Profiles
2526 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
2530 static inline int vc1_coded_block_pred(MpegEncContext * s, int n,
2531 uint8_t **coded_block_ptr)
2533 int xy, wrap, pred, a, b, c;
2535 xy = s->block_index[n];
2536 wrap = s->b8_stride;
2541 a = s->coded_block[xy - 1 ];
2542 b = s->coded_block[xy - 1 - wrap];
2543 c = s->coded_block[xy - wrap];
2552 *coded_block_ptr = &s->coded_block[xy];
2558 * Decode one AC coefficient
2559 * @param v The VC1 context
2560 * @param last Last coefficient
2561 * @param skip How much zero coefficients to skip
2562 * @param value Decoded AC coefficient value
2563 * @param codingset set of VLC to decode data
2566 static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip,
2567 int *value, int codingset)
2569 GetBitContext *gb = &v->s.gb;
2570 int index, escape, run = 0, level = 0, lst = 0;
2572 index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2573 if (index != ff_vc1_ac_sizes[codingset] - 1) {
2574 run = vc1_index_decode_table[codingset][index][0];
2575 level = vc1_index_decode_table[codingset][index][1];
2576 lst = index >= vc1_last_decode_table[codingset] || get_bits_left(gb) < 0;
2580 escape = decode210(gb);
2582 index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2583 run = vc1_index_decode_table[codingset][index][0];
2584 level = vc1_index_decode_table[codingset][index][1];
2585 lst = index >= vc1_last_decode_table[codingset];
2588 level += vc1_last_delta_level_table[codingset][run];
2590 level += vc1_delta_level_table[codingset][run];
2593 run += vc1_last_delta_run_table[codingset][level] + 1;
2595 run += vc1_delta_run_table[codingset][level] + 1;
2601 lst = get_bits1(gb);
2602 if (v->s.esc3_level_length == 0) {
2603 if (v->pq < 8 || v->dquantfrm) { // table 59
2604 v->s.esc3_level_length = get_bits(gb, 3);
2605 if (!v->s.esc3_level_length)
2606 v->s.esc3_level_length = get_bits(gb, 2) + 8;
2607 } else { // table 60
2608 v->s.esc3_level_length = get_unary(gb, 1, 6) + 2;
2610 v->s.esc3_run_length = 3 + get_bits(gb, 2);
2612 run = get_bits(gb, v->s.esc3_run_length);
2613 sign = get_bits1(gb);
2614 level = get_bits(gb, v->s.esc3_level_length);
2625 /** Decode intra block in intra frames - should be faster than decode_intra_block
2626 * @param v VC1Context
2627 * @param block block to decode
2628 * @param[in] n subblock index
2629 * @param coded are AC coeffs present or not
2630 * @param codingset set of VLC to decode data
2632 static int vc1_decode_i_block(VC1Context *v, int16_t block[64], int n,
2633 int coded, int codingset)
2635 GetBitContext *gb = &v->s.gb;
2636 MpegEncContext *s = &v->s;
2637 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2640 int16_t *ac_val, *ac_val2;
2643 /* Get DC differential */
2645 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2647 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2650 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2654 if (dcdiff == 119 /* ESC index value */) {
2655 /* TODO: Optimize */
2656 if (v->pq == 1) dcdiff = get_bits(gb, 10);
2657 else if (v->pq == 2) dcdiff = get_bits(gb, 9);
2658 else dcdiff = get_bits(gb, 8);
2661 dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2662 else if (v->pq == 2)
2663 dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2670 dcdiff += vc1_i_pred_dc(&v->s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir);
2673 /* Store the quantized DC coeff, used for prediction */
2675 block[0] = dcdiff * s->y_dc_scale;
2677 block[0] = dcdiff * s->c_dc_scale;
2688 int last = 0, skip, value;
2689 const uint8_t *zz_table;
2693 scale = v->pq * 2 + v->halfpq;
2697 zz_table = v->zz_8x8[2];
2699 zz_table = v->zz_8x8[3];
2701 zz_table = v->zz_8x8[1];
2703 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2705 if (dc_pred_dir) // left
2708 ac_val -= 16 * s->block_wrap[n];
2711 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2715 block[zz_table[i++]] = value;
2718 /* apply AC prediction if needed */
2720 if (dc_pred_dir) { // left
2721 for (k = 1; k < 8; k++)
2722 block[k << v->left_blk_sh] += ac_val[k];
2724 for (k = 1; k < 8; k++)
2725 block[k << v->top_blk_sh] += ac_val[k + 8];
2728 /* save AC coeffs for further prediction */
2729 for (k = 1; k < 8; k++) {
2730 ac_val2[k] = block[k << v->left_blk_sh];
2731 ac_val2[k + 8] = block[k << v->top_blk_sh];
2734 /* scale AC coeffs */
2735 for (k = 1; k < 64; k++)
2739 block[k] += (block[k] < 0) ? -v->pq : v->pq;
2742 if (s->ac_pred) i = 63;
2748 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2752 scale = v->pq * 2 + v->halfpq;
2753 memset(ac_val2, 0, 16 * 2);
2754 if (dc_pred_dir) { // left
2757 memcpy(ac_val2, ac_val, 8 * 2);
2759 ac_val -= 16 * s->block_wrap[n];
2761 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2764 /* apply AC prediction if needed */
2766 if (dc_pred_dir) { //left
2767 for (k = 1; k < 8; k++) {
2768 block[k << v->left_blk_sh] = ac_val[k] * scale;
2769 if (!v->pquantizer && block[k << v->left_blk_sh])
2770 block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -v->pq : v->pq;
2773 for (k = 1; k < 8; k++) {
2774 block[k << v->top_blk_sh] = ac_val[k + 8] * scale;
2775 if (!v->pquantizer && block[k << v->top_blk_sh])
2776 block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -v->pq : v->pq;
2782 s->block_last_index[n] = i;
2787 /** Decode intra block in intra frames - should be faster than decode_intra_block
2788 * @param v VC1Context
2789 * @param block block to decode
2790 * @param[in] n subblock number
2791 * @param coded are AC coeffs present or not
2792 * @param codingset set of VLC to decode data
2793 * @param mquant quantizer value for this macroblock
2795 static int vc1_decode_i_block_adv(VC1Context *v, int16_t block[64], int n,
2796 int coded, int codingset, int mquant)
2798 GetBitContext *gb = &v->s.gb;
2799 MpegEncContext *s = &v->s;
2800 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2802 int16_t *dc_val = NULL;
2803 int16_t *ac_val, *ac_val2;
2805 int a_avail = v->a_avail, c_avail = v->c_avail;
2806 int use_pred = s->ac_pred;
2809 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2811 /* Get DC differential */
2813 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2815 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2818 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2822 if (dcdiff == 119 /* ESC index value */) {
2823 /* TODO: Optimize */
2824 if (mquant == 1) dcdiff = get_bits(gb, 10);
2825 else if (mquant == 2) dcdiff = get_bits(gb, 9);
2826 else dcdiff = get_bits(gb, 8);
2829 dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2830 else if (mquant == 2)
2831 dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2838 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir);
2841 /* Store the quantized DC coeff, used for prediction */
2843 block[0] = dcdiff * s->y_dc_scale;
2845 block[0] = dcdiff * s->c_dc_scale;
2851 /* check if AC is needed at all */
2852 if (!a_avail && !c_avail)
2854 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2857 scale = mquant * 2 + ((mquant == v->pq) ? v->halfpq : 0);
2859 if (dc_pred_dir) // left
2862 ac_val -= 16 * s->block_wrap[n];
2864 q1 = s->current_picture.qscale_table[mb_pos];
2865 if ( dc_pred_dir && c_avail && mb_pos)
2866 q2 = s->current_picture.qscale_table[mb_pos - 1];
2867 if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
2868 q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2869 if ( dc_pred_dir && n == 1)
2871 if (!dc_pred_dir && n == 2)
2877 int last = 0, skip, value;
2878 const uint8_t *zz_table;
2882 if (!use_pred && v->fcm == ILACE_FRAME) {
2883 zz_table = v->zzi_8x8;
2885 if (!dc_pred_dir) // top
2886 zz_table = v->zz_8x8[2];
2888 zz_table = v->zz_8x8[3];
2891 if (v->fcm != ILACE_FRAME)
2892 zz_table = v->zz_8x8[1];
2894 zz_table = v->zzi_8x8;
2898 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2902 block[zz_table[i++]] = value;
2905 /* apply AC prediction if needed */
2907 /* scale predictors if needed*/
2908 if (q2 && q1 != q2) {
2909 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2910 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2913 return AVERROR_INVALIDDATA;
2914 if (dc_pred_dir) { // left
2915 for (k = 1; k < 8; k++)
2916 block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2918 for (k = 1; k < 8; k++)
2919 block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2922 if (dc_pred_dir) { //left
2923 for (k = 1; k < 8; k++)
2924 block[k << v->left_blk_sh] += ac_val[k];
2926 for (k = 1; k < 8; k++)
2927 block[k << v->top_blk_sh] += ac_val[k + 8];
2931 /* save AC coeffs for further prediction */
2932 for (k = 1; k < 8; k++) {
2933 ac_val2[k ] = block[k << v->left_blk_sh];
2934 ac_val2[k + 8] = block[k << v->top_blk_sh];
2937 /* scale AC coeffs */
2938 for (k = 1; k < 64; k++)
2942 block[k] += (block[k] < 0) ? -mquant : mquant;
2945 if (use_pred) i = 63;
2946 } else { // no AC coeffs
2949 memset(ac_val2, 0, 16 * 2);
2950 if (dc_pred_dir) { // left
2952 memcpy(ac_val2, ac_val, 8 * 2);
2953 if (q2 && q1 != q2) {
2954 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2955 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2957 return AVERROR_INVALIDDATA;
2958 for (k = 1; k < 8; k++)
2959 ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2964 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2965 if (q2 && q1 != q2) {
2966 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2967 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2969 return AVERROR_INVALIDDATA;
2970 for (k = 1; k < 8; k++)
2971 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2976 /* apply AC prediction if needed */
2978 if (dc_pred_dir) { // left
2979 for (k = 1; k < 8; k++) {
2980 block[k << v->left_blk_sh] = ac_val2[k] * scale;
2981 if (!v->pquantizer && block[k << v->left_blk_sh])
2982 block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
2985 for (k = 1; k < 8; k++) {
2986 block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
2987 if (!v->pquantizer && block[k << v->top_blk_sh])
2988 block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
2994 s->block_last_index[n] = i;
2999 /** Decode intra block in inter frames - more generic version than vc1_decode_i_block
3000 * @param v VC1Context
3001 * @param block block to decode
3002 * @param[in] n subblock index
3003 * @param coded are AC coeffs present or not
3004 * @param mquant block quantizer
3005 * @param codingset set of VLC to decode data
3007 static int vc1_decode_intra_block(VC1Context *v, int16_t block[64], int n,
3008 int coded, int mquant, int codingset)
3010 GetBitContext *gb = &v->s.gb;
3011 MpegEncContext *s = &v->s;
3012 int dc_pred_dir = 0; /* Direction of the DC prediction used */
3014 int16_t *dc_val = NULL;
3015 int16_t *ac_val, *ac_val2;
3017 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3018 int a_avail = v->a_avail, c_avail = v->c_avail;
3019 int use_pred = s->ac_pred;
3023 s->dsp.clear_block(block);
3025 /* XXX: Guard against dumb values of mquant */
3026 mquant = (mquant < 1) ? 0 : ((mquant > 31) ? 31 : mquant);
3028 /* Set DC scale - y and c use the same */
3029 s->y_dc_scale = s->y_dc_scale_table[mquant];
3030 s->c_dc_scale = s->c_dc_scale_table[mquant];
3032 /* Get DC differential */
3034 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
3036 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
3039 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
3043 if (dcdiff == 119 /* ESC index value */) {
3044 /* TODO: Optimize */
3045 if (mquant == 1) dcdiff = get_bits(gb, 10);
3046 else if (mquant == 2) dcdiff = get_bits(gb, 9);
3047 else dcdiff = get_bits(gb, 8);
3050 dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
3051 else if (mquant == 2)
3052 dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
3059 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir);
3062 /* Store the quantized DC coeff, used for prediction */
3065 block[0] = dcdiff * s->y_dc_scale;
3067 block[0] = dcdiff * s->c_dc_scale;
3073 /* check if AC is needed at all and adjust direction if needed */
3074 if (!a_avail) dc_pred_dir = 1;
3075 if (!c_avail) dc_pred_dir = 0;
3076 if (!a_avail && !c_avail) use_pred = 0;
3077 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
3080 scale = mquant * 2 + v->halfpq;
3082 if (dc_pred_dir) //left
3085 ac_val -= 16 * s->block_wrap[n];
3087 q1 = s->current_picture.qscale_table[mb_pos];
3088 if (dc_pred_dir && c_avail && mb_pos)
3089 q2 = s->current_picture.qscale_table[mb_pos - 1];
3090 if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
3091 q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
3092 if ( dc_pred_dir && n == 1)
3094 if (!dc_pred_dir && n == 2)
3096 if (n == 3) q2 = q1;
3099 int last = 0, skip, value;
3103 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
3107 if (v->fcm == PROGRESSIVE)
3108 block[v->zz_8x8[0][i++]] = value;
3110 if (use_pred && (v->fcm == ILACE_FRAME)) {
3111 if (!dc_pred_dir) // top
3112 block[v->zz_8x8[2][i++]] = value;
3114 block[v->zz_8x8[3][i++]] = value;
3116 block[v->zzi_8x8[i++]] = value;
3121 /* apply AC prediction if needed */
3123 /* scale predictors if needed*/
3124 if (q2 && q1 != q2) {
3125 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3126 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3129 return AVERROR_INVALIDDATA;
3130 if (dc_pred_dir) { // left
3131 for (k = 1; k < 8; k++)
3132 block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3134 for (k = 1; k < 8; k++)
3135 block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3138 if (dc_pred_dir) { // left
3139 for (k = 1; k < 8; k++)
3140 block[k << v->left_blk_sh] += ac_val[k];
3142 for (k = 1; k < 8; k++)
3143 block[k << v->top_blk_sh] += ac_val[k + 8];
3147 /* save AC coeffs for further prediction */
3148 for (k = 1; k < 8; k++) {
3149 ac_val2[k ] = block[k << v->left_blk_sh];
3150 ac_val2[k + 8] = block[k << v->top_blk_sh];
3153 /* scale AC coeffs */
3154 for (k = 1; k < 64; k++)
3158 block[k] += (block[k] < 0) ? -mquant : mquant;
3161 if (use_pred) i = 63;
3162 } else { // no AC coeffs
3165 memset(ac_val2, 0, 16 * 2);
3166 if (dc_pred_dir) { // left
3168 memcpy(ac_val2, ac_val, 8 * 2);
3169 if (q2 && q1 != q2) {
3170 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3171 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3173 return AVERROR_INVALIDDATA;
3174 for (k = 1; k < 8; k++)
3175 ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3180 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
3181 if (q2 && q1 != q2) {
3182 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3183 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3185 return AVERROR_INVALIDDATA;
3186 for (k = 1; k < 8; k++)
3187 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3192 /* apply AC prediction if needed */
3194 if (dc_pred_dir) { // left
3195 for (k = 1; k < 8; k++) {
3196 block[k << v->left_blk_sh] = ac_val2[k] * scale;
3197 if (!v->pquantizer && block[k << v->left_blk_sh])
3198 block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
3201 for (k = 1; k < 8; k++) {
3202 block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
3203 if (!v->pquantizer && block[k << v->top_blk_sh])
3204 block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
3210 s->block_last_index[n] = i;
3217 static int vc1_decode_p_block(VC1Context *v, int16_t block[64], int n,
3218 int mquant, int ttmb, int first_block,
3219 uint8_t *dst, int linesize, int skip_block,
3222 MpegEncContext *s = &v->s;
3223 GetBitContext *gb = &s->gb;
3226 int scale, off, idx, last, skip, value;
3227 int ttblk = ttmb & 7;
3230 s->dsp.clear_block(block);
3233 ttblk = ff_vc1_ttblk_to_tt[v->tt_index][get_vlc2(gb, ff_vc1_ttblk_vlc[v->tt_index].table, VC1_TTBLK_VLC_BITS, 1)];
3235 if (ttblk == TT_4X4) {
3236 subblkpat = ~(get_vlc2(gb, ff_vc1_subblkpat_vlc[v->tt_index].table, VC1_SUBBLKPAT_VLC_BITS, 1) + 1);
3238 if ((ttblk != TT_8X8 && ttblk != TT_4X4)
3239 && ((v->ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))
3240 || (!v->res_rtm_flag && !first_block))) {
3241 subblkpat = decode012(gb);
3243 subblkpat ^= 3; // swap decoded pattern bits
3244 if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM)
3246 if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT)
3249 scale = 2 * mquant + ((v->pq == mquant) ? v->halfpq : 0);
3251 // convert transforms like 8X4_TOP to generic TT and SUBBLKPAT
3252 if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) {
3253 subblkpat = 2 - (ttblk == TT_8X4_TOP);
3256 if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) {
3257 subblkpat = 2 - (ttblk == TT_4X8_LEFT);
3266 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3271 idx = v->zz_8x8[0][i++];
3273 idx = v->zzi_8x8[i++];
3274 block[idx] = value * scale;
3276 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3280 v->vc1dsp.vc1_inv_trans_8x8_dc(dst, linesize, block);
3282 v->vc1dsp.vc1_inv_trans_8x8(block);
3283 s->dsp.add_pixels_clamped(block, dst, linesize);
3288 pat = ~subblkpat & 0xF;
3289 for (j = 0; j < 4; j++) {
3290 last = subblkpat & (1 << (3 - j));
3292 off = (j & 1) * 4 + (j & 2) * 16;
3294 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3299 idx = ff_vc1_simple_progressive_4x4_zz[i++];
3301 idx = ff_vc1_adv_interlaced_4x4_zz[i++];
3302 block[idx + off] = value * scale;
3304 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3306 if (!(subblkpat & (1 << (3 - j))) && !skip_block) {
3308 v->vc1dsp.vc1_inv_trans_4x4_dc(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3310 v->vc1dsp.vc1_inv_trans_4x4(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3315 pat = ~((subblkpat & 2) * 6 + (subblkpat & 1) * 3) & 0xF;
3316 for (j = 0; j < 2; j++) {
3317 last = subblkpat & (1 << (1 - j));
3321 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3326 idx = v->zz_8x4[i++] + off;
3328 idx = ff_vc1_adv_interlaced_8x4_zz[i++] + off;
3329 block[idx] = value * scale;
3331 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3333 if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3335 v->vc1dsp.vc1_inv_trans_8x4_dc(dst + j * 4 * linesize, linesize, block + off);
3337 v->vc1dsp.vc1_inv_trans_8x4(dst + j * 4 * linesize, linesize, block + off);
3342 pat = ~(subblkpat * 5) & 0xF;
3343 for (j = 0; j < 2; j++) {
3344 last = subblkpat & (1 << (1 - j));
3348 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3353 idx = v->zz_4x8[i++] + off;
3355 idx = ff_vc1_adv_interlaced_4x8_zz[i++] + off;
3356 block[idx] = value * scale;
3358 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3360 if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3362 v->vc1dsp.vc1_inv_trans_4x8_dc(dst + j * 4, linesize, block + off);
3364 v->vc1dsp.vc1_inv_trans_4x8(dst + j*4, linesize, block + off);
3370 *ttmb_out |= ttblk << (n * 4);
3374 /** @} */ // Macroblock group
3376 static const int size_table [6] = { 0, 2, 3, 4, 5, 8 };
3377 static const int offset_table[6] = { 0, 1, 3, 7, 15, 31 };
3379 static av_always_inline void vc1_apply_p_v_loop_filter(VC1Context *v, int block_num)
3381 MpegEncContext *s = &v->s;
3382 int mb_cbp = v->cbp[s->mb_x - s->mb_stride],
3383 block_cbp = mb_cbp >> (block_num * 4), bottom_cbp,
3384 mb_is_intra = v->is_intra[s->mb_x - s->mb_stride],
3385 block_is_intra = mb_is_intra >> (block_num * 4), bottom_is_intra;
3386 int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3389 if (block_num > 3) {
3390 dst = s->dest[block_num - 3];
3392 dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 8) * linesize;
3394 if (s->mb_y != s->end_mb_y || block_num < 2) {
3398 if (block_num > 3) {
3399 bottom_cbp = v->cbp[s->mb_x] >> (block_num * 4);
3400 bottom_is_intra = v->is_intra[s->mb_x] >> (block_num * 4);
3401 mv = &v->luma_mv[s->mb_x - s->mb_stride];
3402 mv_stride = s->mb_stride;
3404 bottom_cbp = (block_num < 2) ? (mb_cbp >> ((block_num + 2) * 4))
3405 : (v->cbp[s->mb_x] >> ((block_num - 2) * 4));
3406 bottom_is_intra = (block_num < 2) ? (mb_is_intra >> ((block_num + 2) * 4))
3407 : (v->is_intra[s->mb_x] >> ((block_num - 2) * 4));
3408 mv_stride = s->b8_stride;
3409 mv = &s->current_picture.motion_val[0][s->block_index[block_num] - 2 * mv_stride];
3412 if (bottom_is_intra & 1 || block_is_intra & 1 ||
3413 mv[0][0] != mv[mv_stride][0] || mv[0][1] != mv[mv_stride][1]) {
3414 v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3416 idx = ((bottom_cbp >> 2) | block_cbp) & 3;
3418 v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3421 v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3423 v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3428 dst -= 4 * linesize;
3429 ttblk = (v->ttblk[s->mb_x - s->mb_stride] >> (block_num * 4)) & 0xF;
3430 if (ttblk == TT_4X4 || ttblk == TT_8X4) {
3431 idx = (block_cbp | (block_cbp >> 2)) & 3;
3433 v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3436 v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3438 v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3443 static av_always_inline void vc1_apply_p_h_loop_filter(VC1Context *v, int block_num)
3445 MpegEncContext *s = &v->s;
3446 int mb_cbp = v->cbp[s->mb_x - 1 - s->mb_stride],
3447 block_cbp = mb_cbp >> (block_num * 4), right_cbp,
3448 mb_is_intra = v->is_intra[s->mb_x - 1 - s->mb_stride],
3449 block_is_intra = mb_is_intra >> (block_num * 4), right_is_intra;
3450 int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3453 if (block_num > 3) {
3454 dst = s->dest[block_num - 3] - 8 * linesize;
3456 dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 16) * linesize - 8;
3459 if (s->mb_x != s->mb_width || !(block_num & 5)) {
3462 if (block_num > 3) {
3463 right_cbp = v->cbp[s->mb_x - s->mb_stride] >> (block_num * 4);
3464 right_is_intra = v->is_intra[s->mb_x - s->mb_stride] >> (block_num * 4);
3465 mv = &v->luma_mv[s->mb_x - s->mb_stride - 1];
3467 right_cbp = (block_num & 1) ? (v->cbp[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3468 : (mb_cbp >> ((block_num + 1) * 4));
3469 right_is_intra = (block_num & 1) ? (v->is_intra[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3470 : (mb_is_intra >> ((block_num + 1) * 4));
3471 mv = &s->current_picture.motion_val[0][s->block_index[block_num] - s->b8_stride * 2 - 2];
3473 if (block_is_intra & 1 || right_is_intra & 1 || mv[0][0] != mv[1][0] || mv[0][1] != mv[1][1]) {
3474 v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3476 idx = ((right_cbp >> 1) | block_cbp) & 5; // FIXME check
3478 v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3481 v->vc1dsp.vc1_h_loop_filter4(dst + 4 * linesize, linesize, v->pq);
3483 v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3489 ttblk = (v->ttblk[s->mb_x - s->mb_stride - 1] >> (block_num * 4)) & 0xf;
3490 if (ttblk == TT_4X4 || ttblk == TT_4X8) {
3491 idx = (block_cbp | (block_cbp >> 1)) & 5;
3493 v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3496 v->vc1dsp.vc1_h_loop_filter4(dst + linesize * 4, linesize, v->pq);
3498 v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3503 static void vc1_apply_p_loop_filter(VC1Context *v)
3505 MpegEncContext *s = &v->s;
3508 for (i = 0; i < 6; i++) {
3509 vc1_apply_p_v_loop_filter(v, i);
3512 /* V always precedes H, therefore we run H one MB before V;
3513 * at the end of a row, we catch up to complete the row */
3515 for (i = 0; i < 6; i++) {
3516 vc1_apply_p_h_loop_filter(v, i);
3518 if (s->mb_x == s->mb_width - 1) {
3520 ff_update_block_index(s);
3521 for (i = 0; i < 6; i++) {
3522 vc1_apply_p_h_loop_filter(v, i);
3528 /** Decode one P-frame MB
3530 static int vc1_decode_p_mb(VC1Context *v)
3532 MpegEncContext *s = &v->s;
3533 GetBitContext *gb = &s->gb;
3535 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3536 int cbp; /* cbp decoding stuff */
3537 int mqdiff, mquant; /* MB quantization */
3538 int ttmb = v->ttfrm; /* MB Transform type */
3540 int mb_has_coeffs = 1; /* last_flag */
3541 int dmv_x, dmv_y; /* Differential MV components */
3542 int index, index1; /* LUT indexes */
3543 int val, sign; /* temp values */
3544 int first_block = 1;
3546 int skipped, fourmv;
3547 int block_cbp = 0, pat, block_tt = 0, block_intra = 0;
3549 mquant = v->pq; /* lossy initialization */
3551 if (v->mv_type_is_raw)
3552 fourmv = get_bits1(gb);
3554 fourmv = v->mv_type_mb_plane[mb_pos];
3556 skipped = get_bits1(gb);
3558 skipped = v->s.mbskip_table[mb_pos];
3560 if (!fourmv) { /* 1MV mode */
3562 GET_MVDATA(dmv_x, dmv_y);
3565 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
3566 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
3568 s->current_picture.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;
3569 vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3571 /* FIXME Set DC val for inter block ? */
3572 if (s->mb_intra && !mb_has_coeffs) {
3574 s->ac_pred = get_bits1(gb);
3576 } else if (mb_has_coeffs) {
3578 s->ac_pred = get_bits1(gb);
3579 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3585 s->current_picture.qscale_table[mb_pos] = mquant;
3587 if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
3588 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table,
3589 VC1_TTMB_VLC_BITS, 2);
3590 if (!s->mb_intra) vc1_mc_1mv(v, 0);
3592 for (i = 0; i < 6; i++) {
3593 s->dc_val[0][s->block_index[i]] = 0;
3595 val = ((cbp >> (5 - i)) & 1);
3596 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3597 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3599 /* check if prediction blocks A and C are available */
3600 v->a_avail = v->c_avail = 0;
3601 if (i == 2 || i == 3 || !s->first_slice_line)
3602 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3603 if (i == 1 || i == 3 || s->mb_x)
3604 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3606 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3607 (i & 4) ? v->codingset2 : v->codingset);
3608 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3610 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3612 for (j = 0; j < 64; j++)
3613 s->block[i][j] <<= 1;
3614 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3615 if (v->pq >= 9 && v->overlap) {
3617 v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3619 v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3621 block_cbp |= 0xF << (i << 2);
3622 block_intra |= 1 << i;
3624 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block,
3625 s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize,
3626 (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
3627 block_cbp |= pat << (i << 2);
3628 if (!v->ttmbf && ttmb < 8)
3635 for (i = 0; i < 6; i++) {
3636 v->mb_type[0][s->block_index[i]] = 0;
3637 s->dc_val[0][s->block_index[i]] = 0;
3639 s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
3640 s->current_picture.qscale_table[mb_pos] = 0;
3641 vc1_pred_mv(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3644 } else { // 4MV mode
3645 if (!skipped /* unskipped MB */) {
3646 int intra_count = 0, coded_inter = 0;
3647 int is_intra[6], is_coded[6];
3649 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3650 for (i = 0; i < 6; i++) {
3651 val = ((cbp >> (5 - i)) & 1);
3652 s->dc_val[0][s->block_index[i]] = 0;
3659 GET_MVDATA(dmv_x, dmv_y);
3661 vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3663 vc1_mc_4mv_luma(v, i, 0, 0);
3664 intra_count += s->mb_intra;
3665 is_intra[i] = s->mb_intra;
3666 is_coded[i] = mb_has_coeffs;
3669 is_intra[i] = (intra_count >= 3);
3673 vc1_mc_4mv_chroma(v, 0);
3674 v->mb_type[0][s->block_index[i]] = is_intra[i];
3676 coded_inter = !is_intra[i] & is_coded[i];
3678 // if there are no coded blocks then don't do anything more
3680 if (!intra_count && !coded_inter)
3683 s->current_picture.qscale_table[mb_pos] = mquant;
3684 /* test if block is intra and has pred */
3687 for (i = 0; i < 6; i++)
3689 if (((!s->first_slice_line || (i == 2 || i == 3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]])
3690 || ((s->mb_x || (i == 1 || i == 3)) && v->mb_type[0][s->block_index[i] - 1])) {
3696 s->ac_pred = get_bits1(gb);
3700 if (!v->ttmbf && coded_inter)
3701 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3702 for (i = 0; i < 6; i++) {
3704 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3705 s->mb_intra = is_intra[i];
3707 /* check if prediction blocks A and C are available */
3708 v->a_avail = v->c_avail = 0;
3709 if (i == 2 || i == 3 || !s->first_slice_line)
3710 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3711 if (i == 1 || i == 3 || s->mb_x)
3712 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3714 vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant,
3715 (i & 4) ? v->codingset2 : v->codingset);
3716 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3718 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3720 for (j = 0; j < 64; j++)
3721 s->block[i][j] <<= 1;
3722 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off,
3723 (i & 4) ? s->uvlinesize : s->linesize);
3724 if (v->pq >= 9 && v->overlap) {
3726 v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3728 v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3730 block_cbp |= 0xF << (i << 2);
3731 block_intra |= 1 << i;
3732 } else if (is_coded[i]) {
3733 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3734 first_block, s->dest[dst_idx] + off,
3735 (i & 4) ? s->uvlinesize : s->linesize,
3736 (i & 4) && (s->flags & CODEC_FLAG_GRAY),
3738 block_cbp |= pat << (i << 2);
3739 if (!v->ttmbf && ttmb < 8)
3744 } else { // skipped MB
3746 s->current_picture.qscale_table[mb_pos] = 0;
3747 for (i = 0; i < 6; i++) {
3748 v->mb_type[0][s->block_index[i]] = 0;
3749 s->dc_val[0][s->block_index[i]] = 0;
3751 for (i = 0; i < 4; i++) {
3752 vc1_pred_mv(v, i, 0, 0, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3753 vc1_mc_4mv_luma(v, i, 0, 0);
3755 vc1_mc_4mv_chroma(v, 0);
3756 s->current_picture.qscale_table[mb_pos] = 0;
3760 v->cbp[s->mb_x] = block_cbp;
3761 v->ttblk[s->mb_x] = block_tt;
3762 v->is_intra[s->mb_x] = block_intra;
3767 /* Decode one macroblock in an interlaced frame p picture */
3769 static int vc1_decode_p_mb_intfr(VC1Context *v)
3771 MpegEncContext *s = &v->s;
3772 GetBitContext *gb = &s->gb;
3774 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3775 int cbp = 0; /* cbp decoding stuff */
3776 int mqdiff, mquant; /* MB quantization */
3777 int ttmb = v->ttfrm; /* MB Transform type */
3779 int mb_has_coeffs = 1; /* last_flag */
3780 int dmv_x, dmv_y; /* Differential MV components */
3781 int val; /* temp value */
3782 int first_block = 1;
3784 int skipped, fourmv = 0, twomv = 0;
3785 int block_cbp = 0, pat, block_tt = 0;
3786 int idx_mbmode = 0, mvbp;
3787 int stride_y, fieldtx;
3789 mquant = v->pq; /* Lossy initialization */
3792 skipped = get_bits1(gb);
3794 skipped = v->s.mbskip_table[mb_pos];
3796 if (v->fourmvswitch)
3797 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_4MV_MBMODE_VLC_BITS, 2); // try getting this done
3799 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2); // in a single line
3800 switch (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0]) {
3801 /* store the motion vector type in a flag (useful later) */
3802 case MV_PMODE_INTFR_4MV:
3804 v->blk_mv_type[s->block_index[0]] = 0;
3805 v->blk_mv_type[s->block_index[1]] = 0;
3806 v->blk_mv_type[s->block_index[2]] = 0;
3807 v->blk_mv_type[s->block_index[3]] = 0;
3809 case MV_PMODE_INTFR_4MV_FIELD:
3811 v->blk_mv_type[s->block_index[0]] = 1;
3812 v->blk_mv_type[s->block_index[1]] = 1;
3813 v->blk_mv_type[s->block_index[2]] = 1;
3814 v->blk_mv_type[s->block_index[3]] = 1;
3816 case MV_PMODE_INTFR_2MV_FIELD:
3818 v->blk_mv_type[s->block_index[0]] = 1;
3819 v->blk_mv_type[s->block_index[1]] = 1;
3820 v->blk_mv_type[s->block_index[2]] = 1;
3821 v->blk_mv_type[s->block_index[3]] = 1;
3823 case MV_PMODE_INTFR_1MV:
3824 v->blk_mv_type[s->block_index[0]] = 0;
3825 v->blk_mv_type[s->block_index[1]] = 0;
3826 v->blk_mv_type[s->block_index[2]] = 0;
3827 v->blk_mv_type[s->block_index[3]] = 0;
3830 if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
3831 for (i = 0; i < 4; i++) {
3832 s->current_picture.motion_val[1][s->block_index[i]][0] = 0;
3833 s->current_picture.motion_val[1][s->block_index[i]][1] = 0;
3835 s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
3836 s->mb_intra = v->is_intra[s->mb_x] = 1;
3837 for (i = 0; i < 6; i++)
3838 v->mb_type[0][s->block_index[i]] = 1;
3839 fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
3840 mb_has_coeffs = get_bits1(gb);
3842 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3843 v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
3845 s->current_picture.qscale_table[mb_pos] = mquant;
3846 /* Set DC scale - y and c use the same (not sure if necessary here) */
3847 s->y_dc_scale = s->y_dc_scale_table[mquant];
3848 s->c_dc_scale = s->c_dc_scale_table[mquant];
3850 for (i = 0; i < 6; i++) {
3851 s->dc_val[0][s->block_index[i]] = 0;
3853 val = ((cbp >> (5 - i)) & 1);
3854 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3855 v->a_avail = v->c_avail = 0;
3856 if (i == 2 || i == 3 || !s->first_slice_line)
3857 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3858 if (i == 1 || i == 3 || s->mb_x)
3859 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3861 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3862 (i & 4) ? v->codingset2 : v->codingset);
3863 if ((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3864 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3866 stride_y = s->linesize << fieldtx;
3867 off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->linesize : (i & 1) * 8 + 4 * (i & 2) * s->linesize;
3869 stride_y = s->uvlinesize;
3872 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, stride_y);
3876 } else { // inter MB
3877 mb_has_coeffs = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][3];
3879 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3880 if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD) {
3881 v->twomvbp = get_vlc2(gb, v->twomvbp_vlc->table, VC1_2MV_BLOCK_PATTERN_VLC_BITS, 1);
3883 if ((ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV)
3884 || (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV_FIELD)) {
3885 v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
3888 s->mb_intra = v->is_intra[s->mb_x] = 0;
3889 for (i = 0; i < 6; i++)
3890 v->mb_type[0][s->block_index[i]] = 0;
3891 fieldtx = v->fieldtx_plane[mb_pos] = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][1];
3892 /* for all motion vector read MVDATA and motion compensate each block */
3896 for (i = 0; i < 6; i++) {
3899 val = ((mvbp >> (3 - i)) & 1);
3901 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3903 vc1_pred_mv_intfr(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0);
3904 vc1_mc_4mv_luma(v, i, 0, 0);
3905 } else if (i == 4) {
3906 vc1_mc_4mv_chroma4(v, 0, 0, 0);
3913 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3915 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], 0);
3916 vc1_mc_4mv_luma(v, 0, 0, 0);
3917 vc1_mc_4mv_luma(v, 1, 0, 0);
3920 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3922 vc1_pred_mv_intfr(v, 2, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], 0);
3923 vc1_mc_4mv_luma(v, 2, 0, 0);
3924 vc1_mc_4mv_luma(v, 3, 0, 0);
3925 vc1_mc_4mv_chroma4(v, 0, 0, 0);
3927 mvbp = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][2];
3930 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3932 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0);
3936 GET_MQUANT(); // p. 227
3937 s->current_picture.qscale_table[mb_pos] = mquant;
3938 if (!v->ttmbf && cbp)
3939 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3940 for (i = 0; i < 6; i++) {
3941 s->dc_val[0][s->block_index[i]] = 0;
3943 val = ((cbp >> (5 - i)) & 1);
3945 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3947 off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->linesize));
3949 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3950 first_block, s->dest[dst_idx] + off,
3951 (i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
3952 (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
3953 block_cbp |= pat << (i << 2);
3954 if (!v->ttmbf && ttmb < 8)
3961 s->mb_intra = v->is_intra[s->mb_x] = 0;
3962 for (i = 0; i < 6; i++) {
3963 v->mb_type[0][s->block_index[i]] = 0;
3964 s->dc_val[0][s->block_index[i]] = 0;
3966 s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
3967 s->current_picture.qscale_table[mb_pos] = 0;
3968 v->blk_mv_type[s->block_index[0]] = 0;
3969 v->blk_mv_type[s->block_index[1]] = 0;
3970 v->blk_mv_type[s->block_index[2]] = 0;
3971 v->blk_mv_type[s->block_index[3]] = 0;
3972 vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0);
3975 if (s->mb_x == s->mb_width - 1)
3976 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0])*s->mb_stride);
3980 static int vc1_decode_p_mb_intfi(VC1Context *v)
3982 MpegEncContext *s = &v->s;
3983 GetBitContext *gb = &s->gb;
3985 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3986 int cbp = 0; /* cbp decoding stuff */
3987 int mqdiff, mquant; /* MB quantization */
3988 int ttmb = v->ttfrm; /* MB Transform type */
3990 int mb_has_coeffs = 1; /* last_flag */
3991 int dmv_x, dmv_y; /* Differential MV components */
3992 int val; /* temp values */
3993 int first_block = 1;
3996 int block_cbp = 0, pat, block_tt = 0;
3999 mquant = v->pq; /* Lossy initialization */
4001 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
4002 if (idx_mbmode <= 1) { // intra MB
4003 s->mb_intra = v->is_intra[s->mb_x] = 1;
4004 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
4005 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
4006 s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4008 s->current_picture.qscale_table[mb_pos] = mquant;
4009 /* Set DC scale - y and c use the same (not sure if necessary here) */
4010 s->y_dc_scale = s->y_dc_scale_table[mquant];
4011 s->c_dc_scale = s->c_dc_scale_table[mquant];
4012 v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4013 mb_has_coeffs = idx_mbmode & 1;
4015 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
4017 for (i = 0; i < 6; i++) {
4018 s->dc_val[0][s->block_index[i]] = 0;
4019 v->mb_type[0][s->block_index[i]] = 1;
4021 val = ((cbp >> (5 - i)) & 1);
4022 v->a_avail = v->c_avail = 0;
4023 if (i == 2 || i == 3 || !s->first_slice_line)
4024 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4025 if (i == 1 || i == 3 || s->mb_x)
4026 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4028 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4029 (i & 4) ? v->codingset2 : v->codingset);
4030 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4032 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4033 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4034 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize);
4035 // TODO: loop filter
4038 s->mb_intra = v->is_intra[s->mb_x] = 0;
4039 s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
4040 for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
4041 if (idx_mbmode <= 5) { // 1-MV
4042 dmv_x = dmv_y = pred_flag = 0;
4043 if (idx_mbmode & 1) {
4044 get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
4046 vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
4048 mb_has_coeffs = !(idx_mbmode & 2);
4050 v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
4051 for (i = 0; i < 6; i++) {
4053 dmv_x = dmv_y = pred_flag = 0;
4054 val = ((v->fourmvbp >> (3 - i)) & 1);
4056 get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
4058 vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
4059 vc1_mc_4mv_luma(v, i, 0, 0);
4061 vc1_mc_4mv_chroma(v, 0);
4063 mb_has_coeffs = idx_mbmode & 1;
4066 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4070 s->current_picture.qscale_table[mb_pos] = mquant;
4071 if (!v->ttmbf && cbp) {
4072 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4075 for (i = 0; i < 6; i++) {
4076 s->dc_val[0][s->block_index[i]] = 0;
4078 val = ((cbp >> (5 - i)) & 1);
4079 off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
4081 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4082 first_block, s->dest[dst_idx] + off,
4083 (i & 4) ? s->uvlinesize : s->linesize,
4084 (i & 4) && (s->flags & CODEC_FLAG_GRAY),
4086 block_cbp |= pat << (i << 2);
4087 if (!v->ttmbf && ttmb < 8) ttmb = -1;
4092 if (s->mb_x == s->mb_width - 1)
4093 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
4097 /** Decode one B-frame MB (in Main profile)
4099 static void vc1_decode_b_mb(VC1Context *v)
4101 MpegEncContext *s = &v->s;
4102 GetBitContext *gb = &s->gb;
4104 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4105 int cbp = 0; /* cbp decoding stuff */
4106 int mqdiff, mquant; /* MB quantization */
4107 int ttmb = v->ttfrm; /* MB Transform type */
4108 int mb_has_coeffs = 0; /* last_flag */
4109 int index, index1; /* LUT indexes */
4110 int val, sign; /* temp values */
4111 int first_block = 1;
4113 int skipped, direct;
4114 int dmv_x[2], dmv_y[2];
4115 int bmvtype = BMV_TYPE_BACKWARD;
4117 mquant = v->pq; /* lossy initialization */
4121 direct = get_bits1(gb);
4123 direct = v->direct_mb_plane[mb_pos];
4125 skipped = get_bits1(gb);
4127 skipped = v->s.mbskip_table[mb_pos];
4129 dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4130 for (i = 0; i < 6; i++) {
4131 v->mb_type[0][s->block_index[i]] = 0;
4132 s->dc_val[0][s->block_index[i]] = 0;
4134 s->current_picture.qscale_table[mb_pos] = 0;
4138 GET_MVDATA(dmv_x[0], dmv_y[0]);
4139 dmv_x[1] = dmv_x[0];
4140 dmv_y[1] = dmv_y[0];
4142 if (skipped || !s->mb_intra) {
4143 bmvtype = decode012(gb);
4146 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
4149 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
4152 bmvtype = BMV_TYPE_INTERPOLATED;
4153 dmv_x[0] = dmv_y[0] = 0;
4157 for (i = 0; i < 6; i++)
4158 v->mb_type[0][s->block_index[i]] = s->mb_intra;
4162 bmvtype = BMV_TYPE_INTERPOLATED;
4163 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4164 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4168 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4171 s->current_picture.qscale_table[mb_pos] = mquant;
4173 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4174 dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
4175 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4176 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4178 if (!mb_has_coeffs && !s->mb_intra) {
4179 /* no coded blocks - effectively skipped */
4180 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4181 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4184 if (s->mb_intra && !mb_has_coeffs) {
4186 s->current_picture.qscale_table[mb_pos] = mquant;
4187 s->ac_pred = get_bits1(gb);
4189 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4191 if (bmvtype == BMV_TYPE_INTERPOLATED) {
4192 GET_MVDATA(dmv_x[0], dmv_y[0]);
4193 if (!mb_has_coeffs) {
4194 /* interpolated skipped block */
4195 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4196 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4200 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4202 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4205 s->ac_pred = get_bits1(gb);
4206 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4208 s->current_picture.qscale_table[mb_pos] = mquant;
4209 if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
4210 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4214 for (i = 0; i < 6; i++) {
4215 s->dc_val[0][s->block_index[i]] = 0;
4217 val = ((cbp >> (5 - i)) & 1);
4218 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4219 v->mb_type[0][s->block_index[i]] = s->mb_intra;
4221 /* check if prediction blocks A and C are available */
4222 v->a_avail = v->c_avail = 0;
4223 if (i == 2 || i == 3 || !s->first_slice_line)
4224 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4225 if (i == 1 || i == 3 || s->mb_x)
4226 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4228 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4229 (i & 4) ? v->codingset2 : v->codingset);
4230 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4232 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4234 for (j = 0; j < 64; j++)
4235 s->block[i][j] <<= 1;
4236 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
4238 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4239 first_block, s->dest[dst_idx] + off,
4240 (i & 4) ? s->uvlinesize : s->linesize,
4241 (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4242 if (!v->ttmbf && ttmb < 8)
4249 /** Decode one B-frame MB (in interlaced field B picture)
4251 static void vc1_decode_b_mb_intfi(VC1Context *v)
4253 MpegEncContext *s = &v->s;
4254 GetBitContext *gb = &s->gb;
4256 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4257 int cbp = 0; /* cbp decoding stuff */
4258 int mqdiff, mquant; /* MB quantization */
4259 int ttmb = v->ttfrm; /* MB Transform type */
4260 int mb_has_coeffs = 0; /* last_flag */
4261 int val; /* temp value */
4262 int first_block = 1;
4265 int dmv_x[2], dmv_y[2], pred_flag[2];
4266 int bmvtype = BMV_TYPE_BACKWARD;
4269 mquant = v->pq; /* Lossy initialization */
4272 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
4273 if (idx_mbmode <= 1) { // intra MB
4274 s->mb_intra = v->is_intra[s->mb_x] = 1;
4275 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
4276 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
4277 s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4279 s->current_picture.qscale_table[mb_pos] = mquant;
4280 /* Set DC scale - y and c use the same (not sure if necessary here) */
4281 s->y_dc_scale = s->y_dc_scale_table[mquant];
4282 s->c_dc_scale = s->c_dc_scale_table[mquant];
4283 v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4284 mb_has_coeffs = idx_mbmode & 1;
4286 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
4288 for (i = 0; i < 6; i++) {
4289 s->dc_val[0][s->block_index[i]] = 0;
4291 val = ((cbp >> (5 - i)) & 1);
4292 v->mb_type[0][s->block_index[i]] = s->mb_intra;
4293 v->a_avail = v->c_avail = 0;
4294 if (i == 2 || i == 3 || !s->first_slice_line)
4295 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4296 if (i == 1 || i == 3 || s->mb_x)
4297 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4299 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4300 (i & 4) ? v->codingset2 : v->codingset);
4301 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4303 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4305 for (j = 0; j < 64; j++)
4306 s->block[i][j] <<= 1;
4307 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4308 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize);
4309 // TODO: yet to perform loop filter
4312 s->mb_intra = v->is_intra[s->mb_x] = 0;
4313 s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
4314 for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
4316 fwd = v->forward_mb_plane[mb_pos] = get_bits1(gb);
4318 fwd = v->forward_mb_plane[mb_pos];
4319 if (idx_mbmode <= 5) { // 1-MV
4321 dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4322 pred_flag[0] = pred_flag[1] = 0;
4324 bmvtype = BMV_TYPE_FORWARD;
4326 bmvtype = decode012(gb);
4329 bmvtype = BMV_TYPE_BACKWARD;
4332 bmvtype = BMV_TYPE_DIRECT;
4335 bmvtype = BMV_TYPE_INTERPOLATED;
4336 interpmvp = get_bits1(gb);
4339 v->bmvtype = bmvtype;
4340 if (bmvtype != BMV_TYPE_DIRECT && idx_mbmode & 1) {
4341 get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD], &dmv_y[bmvtype == BMV_TYPE_BACKWARD], &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
4344 get_mvdata_interlaced(v, &dmv_x[1], &dmv_y[1], &pred_flag[1]);
4346 if (bmvtype == BMV_TYPE_DIRECT) {
4347 dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4348 dmv_x[1] = dmv_y[1] = pred_flag[0] = 0;
4349 if (!s->next_picture_ptr->field_picture) {
4350 av_log(s->avctx, AV_LOG_ERROR, "Mixed field/frame direct mode not supported\n");
4354 vc1_pred_b_mv_intfi(v, 0, dmv_x, dmv_y, 1, pred_flag);
4355 vc1_b_mc(v, dmv_x, dmv_y, (bmvtype == BMV_TYPE_DIRECT), bmvtype);
4356 mb_has_coeffs = !(idx_mbmode & 2);
4359 bmvtype = BMV_TYPE_FORWARD;
4360 v->bmvtype = bmvtype;
4361 v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
4362 for (i = 0; i < 6; i++) {
4364 dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4365 dmv_x[1] = dmv_y[1] = pred_flag[1] = 0;
4366 val = ((v->fourmvbp >> (3 - i)) & 1);
4368 get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD],
4369 &dmv_y[bmvtype == BMV_TYPE_BACKWARD],
4370 &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
4372 vc1_pred_b_mv_intfi(v, i, dmv_x, dmv_y, 0, pred_flag);
4373 vc1_mc_4mv_luma(v, i, bmvtype == BMV_TYPE_BACKWARD, 0);
4375 vc1_mc_4mv_chroma(v, bmvtype == BMV_TYPE_BACKWARD);
4377 mb_has_coeffs = idx_mbmode & 1;
4380 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4384 s->current_picture.qscale_table[mb_pos] = mquant;
4385 if (!v->ttmbf && cbp) {
4386 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4389 for (i = 0; i < 6; i++) {
4390 s->dc_val[0][s->block_index[i]] = 0;
4392 val = ((cbp >> (5 - i)) & 1);
4393 off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
4395 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4396 first_block, s->dest[dst_idx] + off,
4397 (i & 4) ? s->uvlinesize : s->linesize,
4398 (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4399 if (!v->ttmbf && ttmb < 8)
4407 /** Decode one B-frame MB (in interlaced frame B picture)
4409 static int vc1_decode_b_mb_intfr(VC1Context *v)
4411 MpegEncContext *s = &v->s;
4412 GetBitContext *gb = &s->gb;
4414 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4415 int cbp = 0; /* cbp decoding stuff */
4416 int mqdiff, mquant; /* MB quantization */
4417 int ttmb = v->ttfrm; /* MB Transform type */
4418 int mvsw = 0; /* motion vector switch */
4419 int mb_has_coeffs = 1; /* last_flag */
4420 int dmv_x, dmv_y; /* Differential MV components */
4421 int val; /* temp value */
4422 int first_block = 1;
4424 int skipped, direct, twomv = 0;
4425 int block_cbp = 0, pat, block_tt = 0;
4426 int idx_mbmode = 0, mvbp;
4427 int stride_y, fieldtx;
4428 int bmvtype = BMV_TYPE_BACKWARD;
4431 mquant = v->pq; /* Lossy initialization */
4434 skipped = get_bits1(gb);
4436 skipped = v->s.mbskip_table[mb_pos];
4439 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2);
4440 if (ff_vc1_mbmode_intfrp[0][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD) {
4442 v->blk_mv_type[s->block_index[0]] = 1;
4443 v->blk_mv_type[s->block_index[1]] = 1;
4444 v->blk_mv_type[s->block_index[2]] = 1;
4445 v->blk_mv_type[s->block_index[3]] = 1;
4447 v->blk_mv_type[s->block_index[0]] = 0;
4448 v->blk_mv_type[s->block_index[1]] = 0;
4449 v->blk_mv_type[s->block_index[2]] = 0;
4450 v->blk_mv_type[s->block_index[3]] = 0;
4455 direct = get_bits1(gb);
4457 direct = v->direct_mb_plane[mb_pos];
4460 if (s->next_picture_ptr->field_picture)
4461 av_log(s->avctx, AV_LOG_WARNING, "Mixed frame/field direct mode not supported\n");
4462 s->mv[0][0][0] = s->current_picture.motion_val[0][s->block_index[0]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][0], v->bfraction, 0, s->quarter_sample);
4463 s->mv[0][0][1] = s->current_picture.motion_val[0][s->block_index[0]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][1], v->bfraction, 0, s->quarter_sample);
4464 s->mv[1][0][0] = s->current_picture.motion_val[1][s->block_index[0]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][0], v->bfraction, 1, s->quarter_sample);
4465 s->mv[1][0][1] = s->current_picture.motion_val[1][s->block_index[0]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][1], v->bfraction, 1, s->quarter_sample);
4468 s->mv[0][2][0] = s->current_picture.motion_val[0][s->block_index[2]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][0], v->bfraction, 0, s->quarter_sample);
4469 s->mv[0][2][1] = s->current_picture.motion_val[0][s->block_index[2]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][1], v->bfraction, 0, s->quarter_sample);
4470 s->mv[1][2][0] = s->current_picture.motion_val[1][s->block_index[2]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][0], v->bfraction, 1, s->quarter_sample);
4471 s->mv[1][2][1] = s->current_picture.motion_val[1][s->block_index[2]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][1], v->bfraction, 1, s->quarter_sample);
4473 for (i = 1; i < 4; i += 2) {
4474 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = s->mv[0][i-1][0];
4475 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = s->mv[0][i-1][1];
4476 s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = s->mv[1][i-1][0];
4477 s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = s->mv[1][i-1][1];
4480 for (i = 1; i < 4; i++) {
4481 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = s->mv[0][0][0];
4482 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = s->mv[0][0][1];
4483 s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = s->mv[1][0][0];
4484 s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = s->mv[1][0][1];
4489 if (ff_vc1_mbmode_intfrp[0][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
4490 for (i = 0; i < 4; i++) {
4491 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = 0;
4492 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = 0;
4493 s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = 0;
4494 s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = 0;
4496 s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
4497 s->mb_intra = v->is_intra[s->mb_x] = 1;
4498 for (i = 0; i < 6; i++)
4499 v->mb_type[0][s->block_index[i]] = 1;
4500 fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
4501 mb_has_coeffs = get_bits1(gb);
4503 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4504 v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4506 s->current_picture.qscale_table[mb_pos] = mquant;
4507 /* Set DC scale - y and c use the same (not sure if necessary here) */
4508 s->y_dc_scale = s->y_dc_scale_table[mquant];
4509 s->c_dc_scale = s->c_dc_scale_table[mquant];
4511 for (i = 0; i < 6; i++) {
4512 s->dc_val[0][s->block_index[i]] = 0;
4514 val = ((cbp >> (5 - i)) & 1);
4515 v->mb_type[0][s->block_index[i]] = s->mb_intra;
4516 v->a_avail = v->c_avail = 0;
4517 if (i == 2 || i == 3 || !s->first_slice_line)
4518 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4519 if (i == 1 || i == 3 || s->mb_x)
4520 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4522 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4523 (i & 4) ? v->codingset2 : v->codingset);
4524 if (i > 3 && (s->flags & CODEC_FLAG_GRAY))
4526 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4528 stride_y = s->linesize << fieldtx;
4529 off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->linesize : (i & 1) * 8 + 4 * (i & 2) * s->linesize;
4531 stride_y = s->uvlinesize;
4534 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, stride_y);
4537 s->mb_intra = v->is_intra[s->mb_x] = 0;
4539 if (skipped || !s->mb_intra) {
4540 bmvtype = decode012(gb);
4543 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
4546 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
4549 bmvtype = BMV_TYPE_INTERPOLATED;
4553 if (twomv && bmvtype != BMV_TYPE_INTERPOLATED)
4554 mvsw = get_bits1(gb);
4557 if (!skipped) { // inter MB
4558 mb_has_coeffs = ff_vc1_mbmode_intfrp[0][idx_mbmode][3];
4560 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4562 if (bmvtype == BMV_TYPE_INTERPOLATED && twomv) {
4563 v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
4564 } else if (bmvtype == BMV_TYPE_INTERPOLATED || twomv) {
4565 v->twomvbp = get_vlc2(gb, v->twomvbp_vlc->table, VC1_2MV_BLOCK_PATTERN_VLC_BITS, 1);
4569 for (i = 0; i < 6; i++)
4570 v->mb_type[0][s->block_index[i]] = 0;
4571 fieldtx = v->fieldtx_plane[mb_pos] = ff_vc1_mbmode_intfrp[0][idx_mbmode][1];
4572 /* for all motion vector read MVDATA and motion compensate each block */
4576 for (i = 0; i < 4; i++) {
4577 vc1_mc_4mv_luma(v, i, 0, 0);
4578 vc1_mc_4mv_luma(v, i, 1, 1);
4580 vc1_mc_4mv_chroma4(v, 0, 0, 0);
4581 vc1_mc_4mv_chroma4(v, 1, 1, 1);
4586 } else if (twomv && bmvtype == BMV_TYPE_INTERPOLATED) {
4588 for (i = 0; i < 4; i++) {
4591 val = ((mvbp >> (3 - i)) & 1);
4593 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4595 vc1_pred_mv_intfr(v, j, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir);
4596 vc1_mc_4mv_luma(v, j, dir, dir);
4597 vc1_mc_4mv_luma(v, j+1, dir, dir);
4600 vc1_mc_4mv_chroma4(v, 0, 0, 0);
4601 vc1_mc_4mv_chroma4(v, 1, 1, 1);
4602 } else if (bmvtype == BMV_TYPE_INTERPOLATED) {
4606 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4608 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0);
4613 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4615 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 1);
4618 dir = bmvtype == BMV_TYPE_BACKWARD;
4625 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4626 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir);
4630 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4631 vc1_pred_mv_intfr(v, 2, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir2);
4634 for (i = 0; i < 2; i++) {
4635 s->mv[dir][i+2][0] = s->mv[dir][i][0] = s->current_picture.motion_val[dir][s->block_index[i+2]][0] = s->current_picture.motion_val[dir][s->block_index[i]][0];
4636 s->mv[dir][i+2][1] = s->mv[dir][i][1] = s->current_picture.motion_val[dir][s->block_index[i+2]][1] = s->current_picture.motion_val[dir][s->block_index[i]][1];
4637 s->mv[dir2][i+2][0] = s->mv[dir2][i][0] = s->current_picture.motion_val[dir2][s->block_index[i]][0] = s->current_picture.motion_val[dir2][s->block_index[i+2]][0];
4638 s->mv[dir2][i+2][1] = s->mv[dir2][i][1] = s->current_picture.motion_val[dir2][s->block_index[i]][1] = s->current_picture.motion_val[dir2][s->block_index[i+2]][1];
4641 vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, v->mb_type[0], !dir);
4642 vc1_pred_mv_intfr(v, 2, 0, 0, 2, v->range_x, v->range_y, v->mb_type[0], !dir);
4645 vc1_mc_4mv_luma(v, 0, dir, 0);
4646 vc1_mc_4mv_luma(v, 1, dir, 0);
4647 vc1_mc_4mv_luma(v, 2, dir2, 0);
4648 vc1_mc_4mv_luma(v, 3, dir2, 0);
4649 vc1_mc_4mv_chroma4(v, dir, dir2, 0);
4651 dir = bmvtype == BMV_TYPE_BACKWARD;
4653 mvbp = ff_vc1_mbmode_intfrp[0][idx_mbmode][2];
4656 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4658 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], dir);
4659 v->blk_mv_type[s->block_index[0]] = 1;
4660 v->blk_mv_type[s->block_index[1]] = 1;
4661 v->blk_mv_type[s->block_index[2]] = 1;
4662 v->blk_mv_type[s->block_index[3]] = 1;
4663 vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, 0, !dir);
4664 for (i = 0; i < 2; i++) {
4665 s->mv[!dir][i+2][0] = s->mv[!dir][i][0] = s->current_picture.motion_val[!dir][s->block_index[i+2]][0] = s->current_picture.motion_val[!dir][s->block_index[i]][0];
4666 s->mv[!dir][i+2][1] = s->mv[!dir][i][1] = s->current_picture.motion_val[!dir][s->block_index[i+2]][1] = s->current_picture.motion_val[!dir][s->block_index[i]][1];
4672 GET_MQUANT(); // p. 227
4673 s->current_picture.qscale_table[mb_pos] = mquant;
4674 if (!v->ttmbf && cbp)
4675 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4676 for (i = 0; i < 6; i++) {
4677 s->dc_val[0][s->block_index[i]] = 0;
4679 val = ((cbp >> (5 - i)) & 1);
4681 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4683 off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->linesize));
4685 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4686 first_block, s->dest[dst_idx] + off,
4687 (i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
4688 (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
4689 block_cbp |= pat << (i << 2);
4690 if (!v->ttmbf && ttmb < 8)
4698 for (i = 0; i < 6; i++) {
4699 v->mb_type[0][s->block_index[i]] = 0;
4700 s->dc_val[0][s->block_index[i]] = 0;
4702 s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
4703 s->current_picture.qscale_table[mb_pos] = 0;
4704 v->blk_mv_type[s->block_index[0]] = 0;
4705 v->blk_mv_type[s->block_index[1]] = 0;
4706 v->blk_mv_type[s->block_index[2]] = 0;
4707 v->blk_mv_type[s->block_index[3]] = 0;
4710 if (bmvtype == BMV_TYPE_INTERPOLATED) {
4711 vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0);
4712 vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 1);
4714 dir = bmvtype == BMV_TYPE_BACKWARD;
4715 vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], dir);
4720 for (i = 0; i < 2; i++) {
4721 s->mv[dir][i+2][0] = s->mv[dir][i][0] = s->current_picture.motion_val[dir][s->block_index[i+2]][0] = s->current_picture.motion_val[dir][s->block_index[i]][0];
4722 s->mv[dir][i+2][1] = s->mv[dir][i][1] = s->current_picture.motion_val[dir][s->block_index[i+2]][1] = s->current_picture.motion_val[dir][s->block_index[i]][1];
4723 s->mv[dir2][i+2][0] = s->mv[dir2][i][0] = s->current_picture.motion_val[dir2][s->block_index[i]][0] = s->current_picture.motion_val[dir2][s->block_index[i+2]][0];
4724 s->mv[dir2][i+2][1] = s->mv[dir2][i][1] = s->current_picture.motion_val[dir2][s->block_index[i]][1] = s->current_picture.motion_val[dir2][s->block_index[i+2]][1];
4727 v->blk_mv_type[s->block_index[0]] = 1;
4728 v->blk_mv_type[s->block_index[1]] = 1;
4729 v->blk_mv_type[s->block_index[2]] = 1;
4730 v->blk_mv_type[s->block_index[3]] = 1;
4731 vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, 0, !dir);
4732 for (i = 0; i < 2; i++) {
4733 s->mv[!dir][i+2][0] = s->mv[!dir][i][0] = s->current_picture.motion_val[!dir][s->block_index[i+2]][0] = s->current_picture.motion_val[!dir][s->block_index[i]][0];
4734 s->mv[!dir][i+2][1] = s->mv[!dir][i][1] = s->current_picture.motion_val[!dir][s->block_index[i+2]][1] = s->current_picture.motion_val[!dir][s->block_index[i]][1];
4741 if (direct || bmvtype == BMV_TYPE_INTERPOLATED) {
4746 if (s->mb_x == s->mb_width - 1)
4747 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
4748 v->cbp[s->mb_x] = block_cbp;
4749 v->ttblk[s->mb_x] = block_tt;
4753 /** Decode blocks of I-frame
4755 static void vc1_decode_i_blocks(VC1Context *v)
4758 MpegEncContext *s = &v->s;
4763 /* select codingmode used for VLC tables selection */
4764 switch (v->y_ac_table_index) {
4766 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
4769 v->codingset = CS_HIGH_MOT_INTRA;
4772 v->codingset = CS_MID_RATE_INTRA;
4776 switch (v->c_ac_table_index) {
4778 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
4781 v->codingset2 = CS_HIGH_MOT_INTER;
4784 v->codingset2 = CS_MID_RATE_INTER;
4788 /* Set DC scale - y and c use the same */
4789 s->y_dc_scale = s->y_dc_scale_table[v->pq];
4790 s->c_dc_scale = s->c_dc_scale_table[v->pq];
4793 s->mb_x = s->mb_y = 0;
4795 s->first_slice_line = 1;
4796 for (s->mb_y = 0; s->mb_y < s->end_mb_y; s->mb_y++) {
4798 init_block_index(v);
4799 for (; s->mb_x < v->end_mb_x; s->mb_x++) {
4801 ff_update_block_index(s);
4802 dst[0] = s->dest[0];
4803 dst[1] = dst[0] + 8;
4804 dst[2] = s->dest[0] + s->linesize * 8;
4805 dst[3] = dst[2] + 8;
4806 dst[4] = s->dest[1];
4807 dst[5] = s->dest[2];
4808 s->dsp.clear_blocks(s->block[0]);
4809 mb_pos = s->mb_x + s->mb_y * s->mb_width;
4810 s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
4811 s->current_picture.qscale_table[mb_pos] = v->pq;
4812 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
4813 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
4815 // do actual MB decoding and displaying
4816 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
4817 v->s.ac_pred = get_bits1(&v->s.gb);
4819 for (k = 0; k < 6; k++) {
4820 val = ((cbp >> (5 - k)) & 1);
4823 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
4827 cbp |= val << (5 - k);
4829 vc1_decode_i_block(v, s->block[k], k, val, (k < 4) ? v->codingset : v->codingset2);
4831 if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
4833 v->vc1dsp.vc1_inv_trans_8x8(s->block[k]);
4834 if (v->pq >= 9 && v->overlap) {
4836 for (j = 0; j < 64; j++)
4837 s->block[k][j] <<= 1;
4838 s->dsp.put_signed_pixels_clamped(s->block[k], dst[k], k & 4 ? s->uvlinesize : s->linesize);
4841 for (j = 0; j < 64; j++)
4842 s->block[k][j] = (s->block[k][j] - 64) << 1;
4843 s->dsp.put_pixels_clamped(s->block[k], dst[k], k & 4 ? s->uvlinesize : s->linesize);
4847 if (v->pq >= 9 && v->overlap) {
4849 v->vc1dsp.vc1_h_overlap(s->dest[0], s->linesize);
4850 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
4851 if (!(s->flags & CODEC_FLAG_GRAY)) {
4852 v->vc1dsp.vc1_h_overlap(s->dest[1], s->uvlinesize);
4853 v->vc1dsp.vc1_h_overlap(s->dest[2], s->uvlinesize);
4856 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize);
4857 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
4858 if (!s->first_slice_line) {
4859 v->vc1dsp.vc1_v_overlap(s->dest[0], s->linesize);
4860 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize);
4861 if (!(s->flags & CODEC_FLAG_GRAY)) {
4862 v->vc1dsp.vc1_v_overlap(s->dest[1], s->uvlinesize);
4863 v->vc1dsp.vc1_v_overlap(s->dest[2], s->uvlinesize);
4866 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
4867 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
4869 if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
4871 if (get_bits_count(&s->gb) > v->bits) {
4872 ff_er_add_slice(&s->er, 0, 0, s->mb_x, s->mb_y, ER_MB_ERROR);
4873 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
4874 get_bits_count(&s->gb), v->bits);
4878 if (!v->s.loop_filter)
4879 ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
4881 ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
4883 s->first_slice_line = 0;
4885 if (v->s.loop_filter)
4886 ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
4888 /* This is intentionally mb_height and not end_mb_y - unlike in advanced
4889 * profile, these only differ are when decoding MSS2 rectangles. */
4890 ff_er_add_slice(&s->er, 0, 0, s->mb_width - 1, s->mb_height - 1, ER_MB_END);
4893 /** Decode blocks of I-frame for advanced profile
4895 static void vc1_decode_i_blocks_adv(VC1Context *v)
4898 MpegEncContext *s = &v->s;
4904 GetBitContext *gb = &s->gb;
4906 /* select codingmode used for VLC tables selection */
4907 switch (v->y_ac_table_index) {
4909 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
4912 v->codingset = CS_HIGH_MOT_INTRA;
4915 v->codingset = CS_MID_RATE_INTRA;
4919 switch (v->c_ac_table_index) {
4921 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
4924 v->codingset2 = CS_HIGH_MOT_INTER;
4927 v->codingset2 = CS_MID_RATE_INTER;
4932 s->mb_x = s->mb_y = 0;
4934 s->first_slice_line = 1;
4935 s->mb_y = s->start_mb_y;
4936 if (s->start_mb_y) {
4938 init_block_index(v);
4939 memset(&s->coded_block[s->block_index[0] - s->b8_stride], 0,
4940 (1 + s->b8_stride) * sizeof(*s->coded_block));
4942 for (; s->mb_y < s->end_mb_y; s->mb_y++) {
4944 init_block_index(v);
4945 for (;s->mb_x < s->mb_width; s->mb_x++) {
4946 int16_t (*block)[64] = v->block[v->cur_blk_idx];
4947 ff_update_block_index(s);
4948 s->dsp.clear_blocks(block[0]);
4949 mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4950 s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4951 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
4952 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
4954 // do actual MB decoding and displaying
4955 if (v->fieldtx_is_raw)
4956 v->fieldtx_plane[mb_pos] = get_bits1(&v->s.gb);
4957 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
4958 if ( v->acpred_is_raw)
4959 v->s.ac_pred = get_bits1(&v->s.gb);
4961 v->s.ac_pred = v->acpred_plane[mb_pos];
4963 if (v->condover == CONDOVER_SELECT && v->overflg_is_raw)
4964 v->over_flags_plane[mb_pos] = get_bits1(&v->s.gb);
4968 s->current_picture.qscale_table[mb_pos] = mquant;
4969 /* Set DC scale - y and c use the same */
4970 s->y_dc_scale = s->y_dc_scale_table[mquant];
4971 s->c_dc_scale = s->c_dc_scale_table[mquant];
4973 for (k = 0; k < 6; k++) {
4974 val = ((cbp >> (5 - k)) & 1);
4977 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
4981 cbp |= val << (5 - k);
4983 v->a_avail = !s->first_slice_line || (k == 2 || k == 3);
4984 v->c_avail = !!s->mb_x || (k == 1 || k == 3);
4986 vc1_decode_i_block_adv(v, block[k], k, val,
4987 (k < 4) ? v->codingset : v->codingset2, mquant);
4989 if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
4991 v->vc1dsp.vc1_inv_trans_8x8(block[k]);
4994 vc1_smooth_overlap_filter_iblk(v);
4995 vc1_put_signed_blocks_clamped(v);
4996 if (v->s.loop_filter) vc1_loop_filter_iblk_delayed(v, v->pq);
4998 if (get_bits_count(&s->gb) > v->bits) {
4999 // TODO: may need modification to handle slice coding
5000 ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
5001 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
5002 get_bits_count(&s->gb), v->bits);
5006 if (!v->s.loop_filter)
5007 ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
5009 ff_mpeg_draw_horiz_band(s, (s->mb_y-1) * 16, 16);
5010 s->first_slice_line = 0;
5013 /* raw bottom MB row */
5015 init_block_index(v);
5017 for (;s->mb_x < s->mb_width; s->mb_x++) {
5018 ff_update_block_index(s);
5019 vc1_put_signed_blocks_clamped(v);
5020 if (v->s.loop_filter)
5021 vc1_loop_filter_iblk_delayed(v, v->pq);
5023 if (v->s.loop_filter)
5024 ff_mpeg_draw_horiz_band(s, (s->end_mb_y-1)*16, 16);
5025 ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
5026 (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
5029 static void vc1_decode_p_blocks(VC1Context *v)
5031 MpegEncContext *s = &v->s;
5032 int apply_loop_filter;
5034 /* select codingmode used for VLC tables selection */
5035 switch (v->c_ac_table_index) {
5037 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
5040 v->codingset = CS_HIGH_MOT_INTRA;
5043 v->codingset = CS_MID_RATE_INTRA;
5047 switch (v->c_ac_table_index) {
5049 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
5052 v->codingset2 = CS_HIGH_MOT_INTER;
5055 v->codingset2 = CS_MID_RATE_INTER;
5059 apply_loop_filter = s->loop_filter && !(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY) &&
5060 v->fcm == PROGRESSIVE;
5061 s->first_slice_line = 1;
5062 memset(v->cbp_base, 0, sizeof(v->cbp_base[0])*2*s->mb_stride);
5063 for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
5065 init_block_index(v);
5066 for (; s->mb_x < s->mb_width; s->mb_x++) {
5067 ff_update_block_index(s);
5069 if (v->fcm == ILACE_FIELD)
5070 vc1_decode_p_mb_intfi(v);
5071 else if (v->fcm == ILACE_FRAME)
5072 vc1_decode_p_mb_intfr(v);
5073 else vc1_decode_p_mb(v);
5074 if (s->mb_y != s->start_mb_y && apply_loop_filter)
5075 vc1_apply_p_loop_filter(v);
5076 if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
5077 // TODO: may need modification to handle slice coding
5078 ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
5079 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
5080 get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
5084 memmove(v->cbp_base, v->cbp, sizeof(v->cbp_base[0]) * s->mb_stride);
5085 memmove(v->ttblk_base, v->ttblk, sizeof(v->ttblk_base[0]) * s->mb_stride);
5086 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
5087 memmove(v->luma_mv_base, v->luma_mv, sizeof(v->luma_mv_base[0]) * s->mb_stride);
5088 if (s->mb_y != s->start_mb_y) ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
5089 s->first_slice_line = 0;
5091 if (apply_loop_filter) {
5093 init_block_index(v);
5094 for (; s->mb_x < s->mb_width; s->mb_x++) {
5095 ff_update_block_index(s);
5096 vc1_apply_p_loop_filter(v);
5099 if (s->end_mb_y >= s->start_mb_y)
5100 ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
5101 ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
5102 (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
5105 static void vc1_decode_b_blocks(VC1Context *v)
5107 MpegEncContext *s = &v->s;
5109 /* select codingmode used for VLC tables selection */
5110 switch (v->c_ac_table_index) {
5112 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
5115 v->codingset = CS_HIGH_MOT_INTRA;
5118 v->codingset = CS_MID_RATE_INTRA;
5122 switch (v->c_ac_table_index) {
5124 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
5127 v->codingset2 = CS_HIGH_MOT_INTER;
5130 v->codingset2 = CS_MID_RATE_INTER;
5134 s->first_slice_line = 1;
5135 for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
5137 init_block_index(v);
5138 for (; s->mb_x < s->mb_width; s->mb_x++) {
5139 ff_update_block_index(s);
5141 if (v->fcm == ILACE_FIELD)
5142 vc1_decode_b_mb_intfi(v);
5143 else if (v->fcm == ILACE_FRAME)
5144 vc1_decode_b_mb_intfr(v);
5147 if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
5148 // TODO: may need modification to handle slice coding
5149 ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
5150 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
5151 get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
5154 if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
5156 if (!v->s.loop_filter)
5157 ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
5159 ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
5160 s->first_slice_line = 0;
5162 if (v->s.loop_filter)
5163 ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
5164 ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
5165 (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
5168 static void vc1_decode_skip_blocks(VC1Context *v)
5170 MpegEncContext *s = &v->s;
5172 if (!v->s.last_picture.f.data[0])
5175 ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_width - 1, s->end_mb_y - 1, ER_MB_END);
5176 s->first_slice_line = 1;
5177 for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
5179 init_block_index(v);
5180 ff_update_block_index(s);
5181 memcpy(s->dest[0], s->last_picture.f.data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16);
5182 memcpy(s->dest[1], s->last_picture.f.data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
5183 memcpy(s->dest[2], s->last_picture.f.data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
5184 ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
5185 s->first_slice_line = 0;
5187 s->pict_type = AV_PICTURE_TYPE_P;
5190 void ff_vc1_decode_blocks(VC1Context *v)
5193 v->s.esc3_level_length = 0;
5195 ff_intrax8_decode_picture(&v->x8, 2*v->pq + v->halfpq, v->pq * !v->pquantizer);
5198 v->left_blk_idx = -1;
5199 v->topleft_blk_idx = 1;
5201 switch (v->s.pict_type) {
5202 case AV_PICTURE_TYPE_I:
5203 if (v->profile == PROFILE_ADVANCED)
5204 vc1_decode_i_blocks_adv(v);
5206 vc1_decode_i_blocks(v);
5208 case AV_PICTURE_TYPE_P:
5209 if (v->p_frame_skipped)
5210 vc1_decode_skip_blocks(v);
5212 vc1_decode_p_blocks(v);
5214 case AV_PICTURE_TYPE_B:
5216 if (v->profile == PROFILE_ADVANCED)
5217 vc1_decode_i_blocks_adv(v);
5219 vc1_decode_i_blocks(v);
5221 vc1_decode_b_blocks(v);
5227 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
5231 * Transform coefficients for both sprites in 16.16 fixed point format,
5232 * in the order they appear in the bitstream:
5234 * rotation 1 (unused)
5236 * rotation 2 (unused)
5243 int effect_type, effect_flag;
5244 int effect_pcount1, effect_pcount2; ///< amount of effect parameters stored in effect_params
5245 int effect_params1[15], effect_params2[10]; ///< effect parameters in 16.16 fixed point format
5248 static inline int get_fp_val(GetBitContext* gb)
5250 return (get_bits_long(gb, 30) - (1 << 29)) << 1;
5253 static void vc1_sprite_parse_transform(GetBitContext* gb, int c[7])
5257 switch (get_bits(gb, 2)) {
5260 c[2] = get_fp_val(gb);
5264 c[0] = c[4] = get_fp_val(gb);
5265 c[2] = get_fp_val(gb);
5268 c[0] = get_fp_val(gb);
5269 c[2] = get_fp_val(gb);
5270 c[4] = get_fp_val(gb);
5273 c[0] = get_fp_val(gb);
5274 c[1] = get_fp_val(gb);
5275 c[2] = get_fp_val(gb);
5276 c[3] = get_fp_val(gb);
5277 c[4] = get_fp_val(gb);
5280 c[5] = get_fp_val(gb);
5282 c[6] = get_fp_val(gb);
5287 static int vc1_parse_sprites(VC1Context *v, GetBitContext* gb, SpriteData* sd)
5289 AVCodecContext *avctx = v->s.avctx;
5292 for (sprite = 0; sprite <= v->two_sprites; sprite++) {
5293 vc1_sprite_parse_transform(gb, sd->coefs[sprite]);
5294 if (sd->coefs[sprite][1] || sd->coefs[sprite][3])
5295 avpriv_request_sample(avctx, "Non-zero rotation coefficients");
5296 av_log(avctx, AV_LOG_DEBUG, sprite ? "S2:" : "S1:");
5297 for (i = 0; i < 7; i++)
5298 av_log(avctx, AV_LOG_DEBUG, " %d.%.3d",
5299 sd->coefs[sprite][i] / (1<<16),
5300 (abs(sd->coefs[sprite][i]) & 0xFFFF) * 1000 / (1 << 16));
5301 av_log(avctx, AV_LOG_DEBUG, "\n");
5305 if (sd->effect_type = get_bits_long(gb, 30)) {
5306 switch (sd->effect_pcount1 = get_bits(gb, 4)) {
5308 vc1_sprite_parse_transform(gb, sd->effect_params1);
5311 vc1_sprite_parse_transform(gb, sd->effect_params1);
5312 vc1_sprite_parse_transform(gb, sd->effect_params1 + 7);
5315 for (i = 0; i < sd->effect_pcount1; i++)
5316 sd->effect_params1[i] = get_fp_val(gb);
5318 if (sd->effect_type != 13 || sd->effect_params1[0] != sd->coefs[0][6]) {
5319 // effect 13 is simple alpha blending and matches the opacity above
5320 av_log(avctx, AV_LOG_DEBUG, "Effect: %d; params: ", sd->effect_type);
5321 for (i = 0; i < sd->effect_pcount1; i++)
5322 av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
5323 sd->effect_params1[i] / (1 << 16),
5324 (abs(sd->effect_params1[i]) & 0xFFFF) * 1000 / (1 << 16));
5325 av_log(avctx, AV_LOG_DEBUG, "\n");
5328 sd->effect_pcount2 = get_bits(gb, 16);
5329 if (sd->effect_pcount2 > 10) {
5330 av_log(avctx, AV_LOG_ERROR, "Too many effect parameters\n");
5331 return AVERROR_INVALIDDATA;
5332 } else if (sd->effect_pcount2) {
5334 av_log(avctx, AV_LOG_DEBUG, "Effect params 2: ");
5335 while (++i < sd->effect_pcount2) {
5336 sd->effect_params2[i] = get_fp_val(gb);
5337 av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
5338 sd->effect_params2[i] / (1 << 16),
5339 (abs(sd->effect_params2[i]) & 0xFFFF) * 1000 / (1 << 16));
5341 av_log(avctx, AV_LOG_DEBUG, "\n");
5344 if (sd->effect_flag = get_bits1(gb))
5345 av_log(avctx, AV_LOG_DEBUG, "Effect flag set\n");
5347 if (get_bits_count(gb) >= gb->size_in_bits +
5348 (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE ? 64 : 0)) {
5349 av_log(avctx, AV_LOG_ERROR, "Buffer overrun\n");
5350 return AVERROR_INVALIDDATA;
5352 if (get_bits_count(gb) < gb->size_in_bits - 8)
5353 av_log(avctx, AV_LOG_WARNING, "Buffer not fully read\n");
5358 static void vc1_draw_sprites(VC1Context *v, SpriteData* sd)
5360 int i, plane, row, sprite;
5361 int sr_cache[2][2] = { { -1, -1 }, { -1, -1 } };
5362 uint8_t* src_h[2][2];
5363 int xoff[2], xadv[2], yoff[2], yadv[2], alpha;
5365 MpegEncContext *s = &v->s;
5367 for (i = 0; i <= v->two_sprites; i++) {
5368 xoff[i] = av_clip(sd->coefs[i][2], 0, v->sprite_width-1 << 16);
5369 xadv[i] = sd->coefs[i][0];
5370 if (xadv[i] != 1<<16 || (v->sprite_width << 16) - (v->output_width << 16) - xoff[i])
5371 xadv[i] = av_clip(xadv[i], 0, ((v->sprite_width<<16) - xoff[i] - 1) / v->output_width);
5373 yoff[i] = av_clip(sd->coefs[i][5], 0, v->sprite_height-1 << 16);
5374 yadv[i] = av_clip(sd->coefs[i][4], 0, ((v->sprite_height << 16) - yoff[i]) / v->output_height);
5376 alpha = av_clip(sd->coefs[1][6], 0, (1<<16) - 1);
5378 for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++) {
5379 int width = v->output_width>>!!plane;
5381 for (row = 0; row < v->output_height>>!!plane; row++) {
5382 uint8_t *dst = v->sprite_output_frame->data[plane] +
5383 v->sprite_output_frame->linesize[plane] * row;
5385 for (sprite = 0; sprite <= v->two_sprites; sprite++) {
5386 uint8_t *iplane = s->current_picture.f.data[plane];
5387 int iline = s->current_picture.f.linesize[plane];
5388 int ycoord = yoff[sprite] + yadv[sprite] * row;
5389 int yline = ycoord >> 16;
5391 ysub[sprite] = ycoord & 0xFFFF;
5393 iplane = s->last_picture.f.data[plane];
5394 iline = s->last_picture.f.linesize[plane];
5396 next_line = FFMIN(yline + 1, (v->sprite_height >> !!plane) - 1) * iline;
5397 if (!(xoff[sprite] & 0xFFFF) && xadv[sprite] == 1 << 16) {
5398 src_h[sprite][0] = iplane + (xoff[sprite] >> 16) + yline * iline;
5400 src_h[sprite][1] = iplane + (xoff[sprite] >> 16) + next_line;
5402 if (sr_cache[sprite][0] != yline) {
5403 if (sr_cache[sprite][1] == yline) {
5404 FFSWAP(uint8_t*, v->sr_rows[sprite][0], v->sr_rows[sprite][1]);
5405 FFSWAP(int, sr_cache[sprite][0], sr_cache[sprite][1]);
5407 v->vc1dsp.sprite_h(v->sr_rows[sprite][0], iplane + yline * iline, xoff[sprite], xadv[sprite], width);
5408 sr_cache[sprite][0] = yline;
5411 if (ysub[sprite] && sr_cache[sprite][1] != yline + 1) {
5412 v->vc1dsp.sprite_h(v->sr_rows[sprite][1],
5413 iplane + next_line, xoff[sprite],
5414 xadv[sprite], width);
5415 sr_cache[sprite][1] = yline + 1;
5417 src_h[sprite][0] = v->sr_rows[sprite][0];
5418 src_h[sprite][1] = v->sr_rows[sprite][1];
5422 if (!v->two_sprites) {
5424 v->vc1dsp.sprite_v_single(dst, src_h[0][0], src_h[0][1], ysub[0], width);
5426 memcpy(dst, src_h[0][0], width);
5429 if (ysub[0] && ysub[1]) {
5430 v->vc1dsp.sprite_v_double_twoscale(dst, src_h[0][0], src_h[0][1], ysub[0],
5431 src_h[1][0], src_h[1][1], ysub[1], alpha, width);
5432 } else if (ysub[0]) {
5433 v->vc1dsp.sprite_v_double_onescale(dst, src_h[0][0], src_h[0][1], ysub[0],
5434 src_h[1][0], alpha, width);
5435 } else if (ysub[1]) {
5436 v->vc1dsp.sprite_v_double_onescale(dst, src_h[1][0], src_h[1][1], ysub[1],
5437 src_h[0][0], (1<<16)-1-alpha, width);
5439 v->vc1dsp.sprite_v_double_noscale(dst, src_h[0][0], src_h[1][0], alpha, width);
5445 for (i = 0; i <= v->two_sprites; i++) {
5455 static int vc1_decode_sprites(VC1Context *v, GetBitContext* gb)
5458 MpegEncContext *s = &v->s;
5459 AVCodecContext *avctx = s->avctx;
5462 memset(&sd, 0, sizeof(sd));
5464 ret = vc1_parse_sprites(v, gb, &sd);
5468 if (!s->current_picture.f.data[0]) {
5469 av_log(avctx, AV_LOG_ERROR, "Got no sprites\n");
5473 if (v->two_sprites && (!s->last_picture_ptr || !s->last_picture.f.data[0])) {
5474 av_log(avctx, AV_LOG_WARNING, "Need two sprites, only got one\n");
5478 av_frame_unref(v->sprite_output_frame);
5479 if ((ret = ff_get_buffer(avctx, v->sprite_output_frame, 0)) < 0)
5482 vc1_draw_sprites(v, &sd);
5487 static void vc1_sprite_flush(AVCodecContext *avctx)
5489 VC1Context *v = avctx->priv_data;
5490 MpegEncContext *s = &v->s;
5491 AVFrame *f = &s->current_picture.f;
5494 /* Windows Media Image codecs have a convergence interval of two keyframes.
5495 Since we can't enforce it, clear to black the missing sprite. This is
5496 wrong but it looks better than doing nothing. */
5499 for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++)
5500 for (i = 0; i < v->sprite_height>>!!plane; i++)
5501 memset(f->data[plane] + i * f->linesize[plane],
5502 plane ? 128 : 0, f->linesize[plane]);
5507 av_cold int ff_vc1_decode_init_alloc_tables(VC1Context *v)
5509 MpegEncContext *s = &v->s;
5511 int mb_height = FFALIGN(s->mb_height, 2);
5513 /* Allocate mb bitplanes */
5514 v->mv_type_mb_plane = av_malloc (s->mb_stride * mb_height);
5515 v->direct_mb_plane = av_malloc (s->mb_stride * mb_height);
5516 v->forward_mb_plane = av_malloc (s->mb_stride * mb_height);
5517 v->fieldtx_plane = av_mallocz(s->mb_stride * mb_height);
5518 v->acpred_plane = av_malloc (s->mb_stride * mb_height);
5519 v->over_flags_plane = av_malloc (s->mb_stride * mb_height);
5521 v->n_allocated_blks = s->mb_width + 2;
5522 v->block = av_malloc(sizeof(*v->block) * v->n_allocated_blks);
5523 v->cbp_base = av_malloc(sizeof(v->cbp_base[0]) * 2 * s->mb_stride);
5524 v->cbp = v->cbp_base + s->mb_stride;
5525 v->ttblk_base = av_malloc(sizeof(v->ttblk_base[0]) * 2 * s->mb_stride);
5526 v->ttblk = v->ttblk_base + s->mb_stride;
5527 v->is_intra_base = av_mallocz(sizeof(v->is_intra_base[0]) * 2 * s->mb_stride);
5528 v->is_intra = v->is_intra_base + s->mb_stride;
5529 v->luma_mv_base = av_mallocz(sizeof(v->luma_mv_base[0]) * 2 * s->mb_stride);
5530 v->luma_mv = v->luma_mv_base + s->mb_stride;
5532 /* allocate block type info in that way so it could be used with s->block_index[] */
5533 v->mb_type_base = av_malloc(s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2);
5534 v->mb_type[0] = v->mb_type_base + s->b8_stride + 1;
5535 v->mb_type[1] = v->mb_type_base + s->b8_stride * (mb_height * 2 + 1) + s->mb_stride + 1;
5536 v->mb_type[2] = v->mb_type[1] + s->mb_stride * (mb_height + 1);
5538 /* allocate memory to store block level MV info */
5539 v->blk_mv_type_base = av_mallocz( s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2);
5540 v->blk_mv_type = v->blk_mv_type_base + s->b8_stride + 1;
5541 v->mv_f_base = av_mallocz(2 * (s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2));
5542 v->mv_f[0] = v->mv_f_base + s->b8_stride + 1;
5543 v->mv_f[1] = v->mv_f[0] + (s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2);
5544 v->mv_f_next_base = av_mallocz(2 * (s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2));
5545 v->mv_f_next[0] = v->mv_f_next_base + s->b8_stride + 1;
5546 v->mv_f_next[1] = v->mv_f_next[0] + (s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2);
5548 /* Init coded blocks info */
5549 if (v->profile == PROFILE_ADVANCED) {
5550 // if (alloc_bitplane(&v->over_flags_plane, s->mb_width, s->mb_height) < 0)
5552 // if (alloc_bitplane(&v->ac_pred_plane, s->mb_width, s->mb_height) < 0)
5556 ff_intrax8_common_init(&v->x8,s);
5558 if (s->avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || s->avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5559 for (i = 0; i < 4; i++)
5560 if (!(v->sr_rows[i >> 1][i & 1] = av_malloc(v->output_width)))
5561 return AVERROR(ENOMEM);
5564 if (!v->mv_type_mb_plane || !v->direct_mb_plane || !v->acpred_plane || !v->over_flags_plane ||
5565 !v->block || !v->cbp_base || !v->ttblk_base || !v->is_intra_base || !v->luma_mv_base ||
5567 av_freep(&v->mv_type_mb_plane);
5568 av_freep(&v->direct_mb_plane);
5569 av_freep(&v->acpred_plane);
5570 av_freep(&v->over_flags_plane);
5571 av_freep(&v->block);
5572 av_freep(&v->cbp_base);
5573 av_freep(&v->ttblk_base);
5574 av_freep(&v->is_intra_base);
5575 av_freep(&v->luma_mv_base);
5576 av_freep(&v->mb_type_base);
5577 return AVERROR(ENOMEM);
5583 av_cold void ff_vc1_init_transposed_scantables(VC1Context *v)
5586 for (i = 0; i < 64; i++) {
5587 #define transpose(x) ((x >> 3) | ((x & 7) << 3))
5588 v->zz_8x8[0][i] = transpose(ff_wmv1_scantable[0][i]);
5589 v->zz_8x8[1][i] = transpose(ff_wmv1_scantable[1][i]);
5590 v->zz_8x8[2][i] = transpose(ff_wmv1_scantable[2][i]);
5591 v->zz_8x8[3][i] = transpose(ff_wmv1_scantable[3][i]);
5592 v->zzi_8x8[i] = transpose(ff_vc1_adv_interlaced_8x8_zz[i]);
5598 /** Initialize a VC1/WMV3 decoder
5599 * @todo TODO: Handle VC-1 IDUs (Transport level?)
5600 * @todo TODO: Decypher remaining bits in extra_data
5602 static av_cold int vc1_decode_init(AVCodecContext *avctx)
5604 VC1Context *v = avctx->priv_data;
5605 MpegEncContext *s = &v->s;
5609 /* save the container output size for WMImage */
5610 v->output_width = avctx->width;
5611 v->output_height = avctx->height;
5613 if (!avctx->extradata_size || !avctx->extradata)
5615 if (!(avctx->flags & CODEC_FLAG_GRAY))
5616 avctx->pix_fmt = avctx->get_format(avctx, avctx->codec->pix_fmts);
5618 avctx->pix_fmt = AV_PIX_FMT_GRAY8;
5619 avctx->hwaccel = ff_find_hwaccel(avctx);
5622 if ((ret = ff_vc1_init_common(v)) < 0)
5624 // ensure static VLC tables are initialized
5625 if ((ret = ff_msmpeg4_decode_init(avctx)) < 0)
5627 if ((ret = ff_vc1_decode_init_alloc_tables(v)) < 0)
5629 // Hack to ensure the above functions will be called
5630 // again once we know all necessary settings.
5631 // That this is necessary might indicate a bug.
5632 ff_vc1_decode_end(avctx);
5634 ff_h264chroma_init(&v->h264chroma, 8);
5635 ff_vc1dsp_init(&v->vc1dsp);
5637 if (avctx->codec_id == AV_CODEC_ID_WMV3 || avctx->codec_id == AV_CODEC_ID_WMV3IMAGE) {
5640 // looks like WMV3 has a sequence header stored in the extradata
5641 // advanced sequence header may be before the first frame
5642 // the last byte of the extradata is a version number, 1 for the
5643 // samples we can decode
5645 init_get_bits(&gb, avctx->extradata, avctx->extradata_size*8);
5647 if ((ret = ff_vc1_decode_sequence_header(avctx, v, &gb)) < 0)
5650 count = avctx->extradata_size*8 - get_bits_count(&gb);
5652 av_log(avctx, AV_LOG_INFO, "Extra data: %i bits left, value: %X\n",
5653 count, get_bits(&gb, count));
5654 } else if (count < 0) {
5655 av_log(avctx, AV_LOG_INFO, "Read %i bits in overflow\n", -count);
5657 } else { // VC1/WVC1/WVP2
5658 const uint8_t *start = avctx->extradata;
5659 uint8_t *end = avctx->extradata + avctx->extradata_size;
5660 const uint8_t *next;
5661 int size, buf2_size;
5662 uint8_t *buf2 = NULL;
5663 int seq_initialized = 0, ep_initialized = 0;
5665 if (avctx->extradata_size < 16) {
5666 av_log(avctx, AV_LOG_ERROR, "Extradata size too small: %i\n", avctx->extradata_size);
5670 buf2 = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
5671 start = find_next_marker(start, end); // in WVC1 extradata first byte is its size, but can be 0 in mkv
5673 for (; next < end; start = next) {
5674 next = find_next_marker(start + 4, end);
5675 size = next - start - 4;
5678 buf2_size = vc1_unescape_buffer(start + 4, size, buf2);
5679 init_get_bits(&gb, buf2, buf2_size * 8);
5680 switch (AV_RB32(start)) {
5681 case VC1_CODE_SEQHDR:
5682 if ((ret = ff_vc1_decode_sequence_header(avctx, v, &gb)) < 0) {
5686 seq_initialized = 1;
5688 case VC1_CODE_ENTRYPOINT:
5689 if ((ret = ff_vc1_decode_entry_point(avctx, v, &gb)) < 0) {
5698 if (!seq_initialized || !ep_initialized) {
5699 av_log(avctx, AV_LOG_ERROR, "Incomplete extradata\n");
5702 v->res_sprite = (avctx->codec_id == AV_CODEC_ID_VC1IMAGE);
5705 v->sprite_output_frame = av_frame_alloc();
5706 if (!v->sprite_output_frame)
5707 return AVERROR(ENOMEM);
5709 avctx->profile = v->profile;
5710 if (v->profile == PROFILE_ADVANCED)
5711 avctx->level = v->level;
5713 avctx->has_b_frames = !!avctx->max_b_frames;
5715 s->mb_width = (avctx->coded_width + 15) >> 4;
5716 s->mb_height = (avctx->coded_height + 15) >> 4;
5718 if (v->profile == PROFILE_ADVANCED || v->res_fasttx) {
5719 ff_vc1_init_transposed_scantables(v);
5721 memcpy(v->zz_8x8, ff_wmv1_scantable, 4*64);
5726 if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5727 v->sprite_width = avctx->coded_width;
5728 v->sprite_height = avctx->coded_height;
5730 avctx->coded_width = avctx->width = v->output_width;
5731 avctx->coded_height = avctx->height = v->output_height;
5733 // prevent 16.16 overflows
5734 if (v->sprite_width > 1 << 14 ||
5735 v->sprite_height > 1 << 14 ||
5736 v->output_width > 1 << 14 ||
5737 v->output_height > 1 << 14) return -1;
5739 if ((v->sprite_width&1) || (v->sprite_height&1)) {
5740 avpriv_request_sample(avctx, "odd sprites support");
5741 return AVERROR_PATCHWELCOME;
5747 /** Close a VC1/WMV3 decoder
5748 * @warning Initial try at using MpegEncContext stuff
5750 av_cold int ff_vc1_decode_end(AVCodecContext *avctx)
5752 VC1Context *v = avctx->priv_data;
5755 av_frame_free(&v->sprite_output_frame);
5757 for (i = 0; i < 4; i++)
5758 av_freep(&v->sr_rows[i >> 1][i & 1]);
5759 av_freep(&v->hrd_rate);
5760 av_freep(&v->hrd_buffer);
5761 ff_MPV_common_end(&v->s);
5762 av_freep(&v->mv_type_mb_plane);
5763 av_freep(&v->direct_mb_plane);
5764 av_freep(&v->forward_mb_plane);
5765 av_freep(&v->fieldtx_plane);
5766 av_freep(&v->acpred_plane);
5767 av_freep(&v->over_flags_plane);
5768 av_freep(&v->mb_type_base);
5769 av_freep(&v->blk_mv_type_base);
5770 av_freep(&v->mv_f_base);
5771 av_freep(&v->mv_f_next_base);
5772 av_freep(&v->block);
5773 av_freep(&v->cbp_base);
5774 av_freep(&v->ttblk_base);
5775 av_freep(&v->is_intra_base); // FIXME use v->mb_type[]
5776 av_freep(&v->luma_mv_base);
5777 ff_intrax8_common_end(&v->x8);
5782 /** Decode a VC1/WMV3 frame
5783 * @todo TODO: Handle VC-1 IDUs (Transport level?)
5785 static int vc1_decode_frame(AVCodecContext *avctx, void *data,
5786 int *got_frame, AVPacket *avpkt)
5788 const uint8_t *buf = avpkt->data;
5789 int buf_size = avpkt->size, n_slices = 0, i, ret;
5790 VC1Context *v = avctx->priv_data;
5791 MpegEncContext *s = &v->s;
5792 AVFrame *pict = data;
5793 uint8_t *buf2 = NULL;
5794 const uint8_t *buf_start = buf, *buf_start_second_field = NULL;
5795 int mb_height, n_slices1=-1;
5800 } *slices = NULL, *tmp;
5802 v->second_field = 0;
5804 if(s->flags & CODEC_FLAG_LOW_DELAY)
5807 /* no supplementary picture */
5808 if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == VC1_CODE_ENDOFSEQ)) {
5809 /* special case for last picture */
5810 if (s->low_delay == 0 && s->next_picture_ptr) {
5811 if ((ret = av_frame_ref(pict, &s->next_picture_ptr->f)) < 0)
5813 s->next_picture_ptr = NULL;
5821 if (s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) {
5822 if (v->profile < PROFILE_ADVANCED)
5823 avctx->pix_fmt = AV_PIX_FMT_VDPAU_WMV3;
5825 avctx->pix_fmt = AV_PIX_FMT_VDPAU_VC1;
5828 //for advanced profile we may need to parse and unescape data
5829 if (avctx->codec_id == AV_CODEC_ID_VC1 || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5831 buf2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5833 return AVERROR(ENOMEM);
5835 if (IS_MARKER(AV_RB32(buf))) { /* frame starts with marker and needs to be parsed */
5836 const uint8_t *start, *end, *next;
5840 for (start = buf, end = buf + buf_size; next < end; start = next) {
5841 next = find_next_marker(start + 4, end);
5842 size = next - start - 4;
5843 if (size <= 0) continue;
5844 switch (AV_RB32(start)) {
5845 case VC1_CODE_FRAME:
5846 if (avctx->hwaccel ||
5847 s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
5849 buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
5851 case VC1_CODE_FIELD: {
5853 if (avctx->hwaccel ||
5854 s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
5855 buf_start_second_field = start;
5856 tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5860 slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5861 if (!slices[n_slices].buf)
5863 buf_size3 = vc1_unescape_buffer(start + 4, size,
5864 slices[n_slices].buf);
5865 init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5867 /* assuming that the field marker is at the exact middle,
5868 hope it's correct */
5869 slices[n_slices].mby_start = s->mb_height + 1 >> 1;
5870 n_slices1 = n_slices - 1; // index of the last slice of the first field
5874 case VC1_CODE_ENTRYPOINT: /* it should be before frame data */
5875 buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
5876 init_get_bits(&s->gb, buf2, buf_size2 * 8);
5877 ff_vc1_decode_entry_point(avctx, v, &s->gb);
5879 case VC1_CODE_SLICE: {
5881 tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5885 slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5886 if (!slices[n_slices].buf)
5888 buf_size3 = vc1_unescape_buffer(start + 4, size,
5889 slices[n_slices].buf);
5890 init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5892 slices[n_slices].mby_start = get_bits(&slices[n_slices].gb, 9);
5898 } else if (v->interlace && ((buf[0] & 0xC0) == 0xC0)) { /* WVC1 interlaced stores both fields divided by marker */
5899 const uint8_t *divider;
5902 divider = find_next_marker(buf, buf + buf_size);
5903 if ((divider == (buf + buf_size)) || AV_RB32(divider) != VC1_CODE_FIELD) {
5904 av_log(avctx, AV_LOG_ERROR, "Error in WVC1 interlaced frame\n");
5906 } else { // found field marker, unescape second field
5907 if (avctx->hwaccel ||
5908 s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
5909 buf_start_second_field = divider;
5910 tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5914 slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5915 if (!slices[n_slices].buf)
5917 buf_size3 = vc1_unescape_buffer(divider + 4, buf + buf_size - divider - 4, slices[n_slices].buf);
5918 init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5920 slices[n_slices].mby_start = s->mb_height + 1 >> 1;
5921 n_slices1 = n_slices - 1;
5924 buf_size2 = vc1_unescape_buffer(buf, divider - buf, buf2);
5926 buf_size2 = vc1_unescape_buffer(buf, buf_size, buf2);
5928 init_get_bits(&s->gb, buf2, buf_size2*8);
5930 init_get_bits(&s->gb, buf, buf_size*8);
5932 if (v->res_sprite) {
5933 v->new_sprite = !get_bits1(&s->gb);
5934 v->two_sprites = get_bits1(&s->gb);
5935 /* res_sprite means a Windows Media Image stream, AV_CODEC_ID_*IMAGE means
5936 we're using the sprite compositor. These are intentionally kept separate
5937 so you can get the raw sprites by using the wmv3 decoder for WMVP or
5938 the vc1 one for WVP2 */
5939 if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5940 if (v->new_sprite) {
5941 // switch AVCodecContext parameters to those of the sprites
5942 avctx->width = avctx->coded_width = v->sprite_width;
5943 avctx->height = avctx->coded_height = v->sprite_height;
5950 if (s->context_initialized &&
5951 (s->width != avctx->coded_width ||
5952 s->height != avctx->coded_height)) {
5953 ff_vc1_decode_end(avctx);
5956 if (!s->context_initialized) {
5957 if (ff_msmpeg4_decode_init(avctx) < 0)
5959 if (ff_vc1_decode_init_alloc_tables(v) < 0) {
5960 ff_MPV_common_end(s);
5964 s->low_delay = !avctx->has_b_frames || v->res_sprite;
5966 if (v->profile == PROFILE_ADVANCED) {
5967 if(avctx->coded_width<=1 || avctx->coded_height<=1)
5969 s->h_edge_pos = avctx->coded_width;
5970 s->v_edge_pos = avctx->coded_height;
5974 // do parse frame header
5975 v->pic_header_flag = 0;
5976 v->first_pic_header_flag = 1;
5977 if (v->profile < PROFILE_ADVANCED) {
5978 if (ff_vc1_parse_frame_header(v, &s->gb) < 0) {
5982 if (ff_vc1_parse_frame_header_adv(v, &s->gb) < 0) {
5986 v->first_pic_header_flag = 0;
5988 if (avctx->debug & FF_DEBUG_PICT_INFO)
5989 av_log(v->s.avctx, AV_LOG_DEBUG, "pict_type: %c\n", av_get_picture_type_char(s->pict_type));
5991 if ((avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE)
5992 && s->pict_type != AV_PICTURE_TYPE_I) {
5993 av_log(v->s.avctx, AV_LOG_ERROR, "Sprite decoder: expected I-frame\n");
5997 if ((s->mb_height >> v->field_mode) == 0) {
5998 av_log(v->s.avctx, AV_LOG_ERROR, "image too short\n");
6002 // for skipping the frame
6003 s->current_picture.f.pict_type = s->pict_type;
6004 s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
6006 /* skip B-frames if we don't have reference frames */
6007 if (s->last_picture_ptr == NULL && (s->pict_type == AV_PICTURE_TYPE_B || s->droppable)) {
6010 if ((avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B) ||
6011 (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I) ||
6012 avctx->skip_frame >= AVDISCARD_ALL) {
6016 if (s->next_p_frame_damaged) {
6017 if (s->pict_type == AV_PICTURE_TYPE_B)
6020 s->next_p_frame_damaged = 0;
6023 if (ff_MPV_frame_start(s, avctx) < 0) {
6027 v->s.current_picture_ptr->field_picture = v->field_mode;
6028 v->s.current_picture_ptr->f.interlaced_frame = (v->fcm != PROGRESSIVE);
6029 v->s.current_picture_ptr->f.top_field_first = v->tff;
6031 // process pulldown flags
6032 s->current_picture_ptr->f.repeat_pict = 0;
6033 // Pulldown flags are only valid when 'broadcast' has been set.
6034 // So ticks_per_frame will be 2
6037 s->current_picture_ptr->f.repeat_pict = 1;
6038 } else if (v->rptfrm) {
6040 s->current_picture_ptr->f.repeat_pict = v->rptfrm * 2;
6043 s->me.qpel_put = s->dsp.put_qpel_pixels_tab;
6044 s->me.qpel_avg = s->dsp.avg_qpel_pixels_tab;
6046 if ((CONFIG_VC1_VDPAU_DECODER)
6047 &&s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) {
6048 if (v->field_mode && buf_start_second_field) {
6049 ff_vdpau_vc1_decode_picture(s, buf_start, buf_start_second_field - buf_start);
6050 ff_vdpau_vc1_decode_picture(s, buf_start_second_field, (buf + buf_size) - buf_start_second_field);
6052 ff_vdpau_vc1_decode_picture(s, buf_start, (buf + buf_size) - buf_start);
6054 } else if (avctx->hwaccel) {
6055 if (v->field_mode && buf_start_second_field) {
6056 // decode first field
6057 s->picture_structure = PICT_BOTTOM_FIELD - v->tff;
6058 if (avctx->hwaccel->start_frame(avctx, buf_start, buf_start_second_field - buf_start) < 0)
6060 if (avctx->hwaccel->decode_slice(avctx, buf_start, buf_start_second_field - buf_start) < 0)
6062 if (avctx->hwaccel->end_frame(avctx) < 0)
6065 // decode second field
6066 s->gb = slices[n_slices1 + 1].gb;
6067 s->picture_structure = PICT_TOP_FIELD + v->tff;
6068 v->second_field = 1;
6069 v->pic_header_flag = 0;
6070 if (ff_vc1_parse_frame_header_adv(v, &s->gb) < 0) {
6071 av_log(avctx, AV_LOG_ERROR, "parsing header for second field failed");
6074 v->s.current_picture_ptr->f.pict_type = v->s.pict_type;
6076 if (avctx->hwaccel->start_frame(avctx, buf_start_second_field, (buf + buf_size) - buf_start_second_field) < 0)
6078 if (avctx->hwaccel->decode_slice(avctx, buf_start_second_field, (buf + buf_size) - buf_start_second_field) < 0)
6080 if (avctx->hwaccel->end_frame(avctx) < 0)
6083 s->picture_structure = PICT_FRAME;
6084 if (avctx->hwaccel->start_frame(avctx, buf_start, (buf + buf_size) - buf_start) < 0)
6086 if (avctx->hwaccel->decode_slice(avctx, buf_start, (buf + buf_size) - buf_start) < 0)
6088 if (avctx->hwaccel->end_frame(avctx) < 0)
6094 ff_mpeg_er_frame_start(s);
6096 v->bits = buf_size * 8;
6097 v->end_mb_x = s->mb_width;
6098 if (v->field_mode) {
6099 s->current_picture.f.linesize[0] <<= 1;
6100 s->current_picture.f.linesize[1] <<= 1;
6101 s->current_picture.f.linesize[2] <<= 1;
6103 s->uvlinesize <<= 1;
6105 mb_height = s->mb_height >> v->field_mode;
6107 av_assert0 (mb_height > 0);
6109 for (i = 0; i <= n_slices; i++) {
6110 if (i > 0 && slices[i - 1].mby_start >= mb_height) {
6111 if (v->field_mode <= 0) {
6112 av_log(v->s.avctx, AV_LOG_ERROR, "Slice %d starts beyond "
6113 "picture boundary (%d >= %d)\n", i,
6114 slices[i - 1].mby_start, mb_height);
6117 v->second_field = 1;
6118 av_assert0((s->mb_height & 1) == 0);
6119 v->blocks_off = s->b8_stride * (s->mb_height&~1);
6120 v->mb_off = s->mb_stride * s->mb_height >> 1;
6122 v->second_field = 0;
6127 v->pic_header_flag = 0;
6128 if (v->field_mode && i == n_slices1 + 2) {
6129 if ((header_ret = ff_vc1_parse_frame_header_adv(v, &s->gb)) < 0) {
6130 av_log(v->s.avctx, AV_LOG_ERROR, "Field header damaged\n");
6131 if (avctx->err_recognition & AV_EF_EXPLODE)
6135 } else if (get_bits1(&s->gb)) {
6136 v->pic_header_flag = 1;
6137 if ((header_ret = ff_vc1_parse_frame_header_adv(v, &s->gb)) < 0) {
6138 av_log(v->s.avctx, AV_LOG_ERROR, "Slice header damaged\n");
6139 if (avctx->err_recognition & AV_EF_EXPLODE)
6147 s->start_mb_y = (i == 0) ? 0 : FFMAX(0, slices[i-1].mby_start % mb_height);
6148 if (!v->field_mode || v->second_field)
6149 s->end_mb_y = (i == n_slices ) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
6151 if (i >= n_slices) {
6152 av_log(v->s.avctx, AV_LOG_ERROR, "first field slice count too large\n");
6155 s->end_mb_y = (i <= n_slices1 + 1) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
6157 if (s->end_mb_y <= s->start_mb_y) {
6158 av_log(v->s.avctx, AV_LOG_ERROR, "end mb y %d %d invalid\n", s->end_mb_y, s->start_mb_y);
6161 if (!v->p_frame_skipped && s->pict_type != AV_PICTURE_TYPE_I && !v->cbpcy_vlc) {
6162 av_log(v->s.avctx, AV_LOG_ERROR, "missing cbpcy_vlc\n");
6165 ff_vc1_decode_blocks(v);
6167 s->gb = slices[i].gb;
6169 if (v->field_mode) {
6170 v->second_field = 0;
6171 s->current_picture.f.linesize[0] >>= 1;
6172 s->current_picture.f.linesize[1] >>= 1;
6173 s->current_picture.f.linesize[2] >>= 1;
6175 s->uvlinesize >>= 1;
6176 if (v->s.pict_type != AV_PICTURE_TYPE_BI && v->s.pict_type != AV_PICTURE_TYPE_B) {
6177 FFSWAP(uint8_t *, v->mv_f_next[0], v->mv_f[0]);
6178 FFSWAP(uint8_t *, v->mv_f_next[1], v->mv_f[1]);
6181 av_dlog(s->avctx, "Consumed %i/%i bits\n",
6182 get_bits_count(&s->gb), s->gb.size_in_bits);
6183 // if (get_bits_count(&s->gb) > buf_size * 8)
6185 if(s->er.error_occurred && s->pict_type == AV_PICTURE_TYPE_B)
6188 ff_er_frame_end(&s->er);
6191 ff_MPV_frame_end(s);
6193 if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
6195 avctx->width = avctx->coded_width = v->output_width;
6196 avctx->height = avctx->coded_height = v->output_height;
6197 if (avctx->skip_frame >= AVDISCARD_NONREF)
6199 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
6200 if (vc1_decode_sprites(v, &s->gb))
6203 if ((ret = av_frame_ref(pict, v->sprite_output_frame)) < 0)
6207 if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
6208 if ((ret = av_frame_ref(pict, &s->current_picture_ptr->f)) < 0)
6210 ff_print_debug_info(s, s->current_picture_ptr, pict);
6212 } else if (s->last_picture_ptr != NULL) {
6213 if ((ret = av_frame_ref(pict, &s->last_picture_ptr->f)) < 0)
6215 ff_print_debug_info(s, s->last_picture_ptr, pict);
6222 for (i = 0; i < n_slices; i++)
6223 av_free(slices[i].buf);
6229 for (i = 0; i < n_slices; i++)
6230 av_free(slices[i].buf);
6236 static const AVProfile profiles[] = {
6237 { FF_PROFILE_VC1_SIMPLE, "Simple" },
6238 { FF_PROFILE_VC1_MAIN, "Main" },
6239 { FF_PROFILE_VC1_COMPLEX, "Complex" },
6240 { FF_PROFILE_VC1_ADVANCED, "Advanced" },
6241 { FF_PROFILE_UNKNOWN },
6244 static const enum AVPixelFormat vc1_hwaccel_pixfmt_list_420[] = {
6246 AV_PIX_FMT_DXVA2_VLD,
6249 AV_PIX_FMT_VAAPI_VLD,
6258 AVCodec ff_vc1_decoder = {
6260 .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1"),
6261 .type = AVMEDIA_TYPE_VIDEO,
6262 .id = AV_CODEC_ID_VC1,
6263 .priv_data_size = sizeof(VC1Context),
6264 .init = vc1_decode_init,
6265 .close = ff_vc1_decode_end,
6266 .decode = vc1_decode_frame,
6267 .flush = ff_mpeg_flush,
6268 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
6269 .pix_fmts = vc1_hwaccel_pixfmt_list_420,
6270 .profiles = NULL_IF_CONFIG_SMALL(profiles)
6273 #if CONFIG_WMV3_DECODER
6274 AVCodec ff_wmv3_decoder = {
6276 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9"),
6277 .type = AVMEDIA_TYPE_VIDEO,
6278 .id = AV_CODEC_ID_WMV3,
6279 .priv_data_size = sizeof(VC1Context),
6280 .init = vc1_decode_init,
6281 .close = ff_vc1_decode_end,
6282 .decode = vc1_decode_frame,
6283 .flush = ff_mpeg_flush,
6284 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
6285 .pix_fmts = vc1_hwaccel_pixfmt_list_420,
6286 .profiles = NULL_IF_CONFIG_SMALL(profiles)
6290 #if CONFIG_WMV3_VDPAU_DECODER
6291 AVCodec ff_wmv3_vdpau_decoder = {
6292 .name = "wmv3_vdpau",
6293 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 VDPAU"),
6294 .type = AVMEDIA_TYPE_VIDEO,
6295 .id = AV_CODEC_ID_WMV3,
6296 .priv_data_size = sizeof(VC1Context),
6297 .init = vc1_decode_init,
6298 .close = ff_vc1_decode_end,
6299 .decode = vc1_decode_frame,
6300 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU,
6301 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_VDPAU_WMV3, AV_PIX_FMT_NONE },
6302 .profiles = NULL_IF_CONFIG_SMALL(profiles)
6306 #if CONFIG_VC1_VDPAU_DECODER
6307 AVCodec ff_vc1_vdpau_decoder = {
6308 .name = "vc1_vdpau",
6309 .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1 VDPAU"),
6310 .type = AVMEDIA_TYPE_VIDEO,
6311 .id = AV_CODEC_ID_VC1,
6312 .priv_data_size = sizeof(VC1Context),
6313 .init = vc1_decode_init,
6314 .close = ff_vc1_decode_end,
6315 .decode = vc1_decode_frame,
6316 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU,
6317 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_VDPAU_VC1, AV_PIX_FMT_NONE },
6318 .profiles = NULL_IF_CONFIG_SMALL(profiles)
6322 #if CONFIG_WMV3IMAGE_DECODER
6323 AVCodec ff_wmv3image_decoder = {
6324 .name = "wmv3image",
6325 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image"),
6326 .type = AVMEDIA_TYPE_VIDEO,
6327 .id = AV_CODEC_ID_WMV3IMAGE,
6328 .priv_data_size = sizeof(VC1Context),
6329 .init = vc1_decode_init,
6330 .close = ff_vc1_decode_end,
6331 .decode = vc1_decode_frame,
6332 .capabilities = CODEC_CAP_DR1,
6333 .flush = vc1_sprite_flush,
6334 .pix_fmts = ff_pixfmt_list_420
6338 #if CONFIG_VC1IMAGE_DECODER
6339 AVCodec ff_vc1image_decoder = {
6341 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image v2"),
6342 .type = AVMEDIA_TYPE_VIDEO,
6343 .id = AV_CODEC_ID_VC1IMAGE,
6344 .priv_data_size = sizeof(VC1Context),
6345 .init = vc1_decode_init,
6346 .close = ff_vc1_decode_end,
6347 .decode = vc1_decode_frame,
6348 .capabilities = CODEC_CAP_DR1,
6349 .flush = vc1_sprite_flush,
6350 .pix_fmts = ff_pixfmt_list_420