2 * H.26L/H.264/AVC/JVT/14496-10/... motion vector prediction
3 * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 * H.264 / AVC / MPEG-4 part10 motion vector prediction.
25 * @author Michael Niedermayer <michaelni@gmx.at>
28 #ifndef AVCODEC_H264_MVPRED_H
29 #define AVCODEC_H264_MVPRED_H
34 #include "mpegutils.h"
35 #include "libavutil/avassert.h"
36 #include "libavutil/mem_internal.h"
39 static av_always_inline int fetch_diagonal_mv(const H264Context *h, H264SliceContext *sl,
41 int i, int list, int part_width)
43 const int topright_ref = sl->ref_cache[list][i - 8 + part_width];
45 /* there is no consistent mapping of mvs to neighboring locations that will
46 * make mbaff happy, so we can't move all this logic to fill_caches */
48 #define SET_DIAG_MV(MV_OP, REF_OP, XY, Y4) \
49 const int xy = XY, y4 = Y4; \
50 const int mb_type = mb_types[xy + (y4 >> 2) * h->mb_stride]; \
51 if (!USES_LIST(mb_type, list)) \
52 return LIST_NOT_USED; \
53 mv = h->cur_pic_ptr->motion_val[list][h->mb2b_xy[xy] + 3 + y4 * h->b_stride]; \
54 sl->mv_cache[list][scan8[0] - 2][0] = mv[0]; \
55 sl->mv_cache[list][scan8[0] - 2][1] = mv[1] MV_OP; \
56 return h->cur_pic_ptr->ref_index[list][4 * xy + 1 + (y4 & ~1)] REF_OP;
58 if (topright_ref == PART_NOT_AVAILABLE
59 && i >= scan8[0] + 8 && (i & 7) == 4
60 && sl->ref_cache[list][scan8[0] - 1] != PART_NOT_AVAILABLE) {
61 const uint32_t *mb_types = h->cur_pic_ptr->mb_type;
63 AV_ZERO32(sl->mv_cache[list][scan8[0] - 2]);
64 *C = sl->mv_cache[list][scan8[0] - 2];
66 if (!MB_FIELD(sl) && IS_INTERLACED(sl->left_type[0])) {
67 SET_DIAG_MV(* 2, >> 1, sl->left_mb_xy[0] + h->mb_stride,
68 (sl->mb_y & 1) * 2 + (i >> 5));
70 if (MB_FIELD(sl) && !IS_INTERLACED(sl->left_type[0])) {
71 // left shift will turn LIST_NOT_USED into PART_NOT_AVAILABLE, but that's OK.
72 SET_DIAG_MV(/ 2, *2, sl->left_mb_xy[i >= 36], ((i >> 2)) & 3);
78 if (topright_ref != PART_NOT_AVAILABLE) {
79 *C = sl->mv_cache[list][i - 8 + part_width];
82 ff_tlog(h->avctx, "topright MV not available\n");
84 *C = sl->mv_cache[list][i - 8 - 1];
85 return sl->ref_cache[list][i - 8 - 1];
90 * Get the predicted MV.
91 * @param n the block index
92 * @param part_width the width of the partition (4, 8,16) -> (1, 2, 4)
93 * @param mx the x component of the predicted motion vector
94 * @param my the y component of the predicted motion vector
96 static av_always_inline void pred_motion(const H264Context *const h,
99 int part_width, int list, int ref,
100 int *const mx, int *const my)
102 const int index8 = scan8[n];
103 const int top_ref = sl->ref_cache[list][index8 - 8];
104 const int left_ref = sl->ref_cache[list][index8 - 1];
105 const int16_t *const A = sl->mv_cache[list][index8 - 1];
106 const int16_t *const B = sl->mv_cache[list][index8 - 8];
108 int diagonal_ref, match_count;
110 av_assert2(part_width == 1 || part_width == 2 || part_width == 4);
120 diagonal_ref = fetch_diagonal_mv(h, sl, &C, index8, list, part_width);
121 match_count = (diagonal_ref == ref) + (top_ref == ref) + (left_ref == ref);
122 ff_tlog(h->avctx, "pred_motion match_count=%d\n", match_count);
123 if (match_count > 1) { //most common
124 *mx = mid_pred(A[0], B[0], C[0]);
125 *my = mid_pred(A[1], B[1], C[1]);
126 } else if (match_count == 1) {
127 if (left_ref == ref) {
130 } else if (top_ref == ref) {
138 if (top_ref == PART_NOT_AVAILABLE &&
139 diagonal_ref == PART_NOT_AVAILABLE &&
140 left_ref != PART_NOT_AVAILABLE) {
144 *mx = mid_pred(A[0], B[0], C[0]);
145 *my = mid_pred(A[1], B[1], C[1]);
150 "pred_motion (%2d %2d %2d) (%2d %2d %2d) (%2d %2d %2d) -> (%2d %2d %2d) at %2d %2d %d list %d\n",
151 top_ref, B[0], B[1], diagonal_ref, C[0], C[1], left_ref,
152 A[0], A[1], ref, *mx, *my, sl->mb_x, sl->mb_y, n, list);
156 * Get the directionally predicted 16x8 MV.
157 * @param n the block index
158 * @param mx the x component of the predicted motion vector
159 * @param my the y component of the predicted motion vector
161 static av_always_inline void pred_16x8_motion(const H264Context *const h,
162 H264SliceContext *sl,
163 int n, int list, int ref,
164 int *const mx, int *const my)
167 const int top_ref = sl->ref_cache[list][scan8[0] - 8];
168 const int16_t *const B = sl->mv_cache[list][scan8[0] - 8];
170 ff_tlog(h->avctx, "pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d\n",
171 top_ref, B[0], B[1], sl->mb_x, sl->mb_y, n, list);
173 if (top_ref == ref) {
179 const int left_ref = sl->ref_cache[list][scan8[8] - 1];
180 const int16_t *const A = sl->mv_cache[list][scan8[8] - 1];
182 ff_tlog(h->avctx, "pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d\n",
183 left_ref, A[0], A[1], sl->mb_x, sl->mb_y, n, list);
185 if (left_ref == ref) {
193 pred_motion(h, sl, n, 4, list, ref, mx, my);
197 * Get the directionally predicted 8x16 MV.
198 * @param n the block index
199 * @param mx the x component of the predicted motion vector
200 * @param my the y component of the predicted motion vector
202 static av_always_inline void pred_8x16_motion(const H264Context *const h,
203 H264SliceContext *sl,
204 int n, int list, int ref,
205 int *const mx, int *const my)
208 const int left_ref = sl->ref_cache[list][scan8[0] - 1];
209 const int16_t *const A = sl->mv_cache[list][scan8[0] - 1];
211 ff_tlog(h->avctx, "pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d\n",
212 left_ref, A[0], A[1], sl->mb_x, sl->mb_y, n, list);
214 if (left_ref == ref) {
223 diagonal_ref = fetch_diagonal_mv(h, sl, &C, scan8[4], list, 2);
225 ff_tlog(h->avctx, "pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d\n",
226 diagonal_ref, C[0], C[1], sl->mb_x, sl->mb_y, n, list);
228 if (diagonal_ref == ref) {
236 pred_motion(h, sl, n, 2, list, ref, mx, my);
239 #define FIX_MV_MBAFF(type, refn, mvn, idx) \
240 if (FRAME_MBAFF(h)) { \
241 if (MB_FIELD(sl)) { \
242 if (!IS_INTERLACED(type)) { \
244 AV_COPY32(mvbuf[idx], mvn); \
245 mvbuf[idx][1] /= 2; \
249 if (IS_INTERLACED(type)) { \
251 AV_COPY32(mvbuf[idx], mvn); \
252 mvbuf[idx][1] *= 2; \
258 static av_always_inline void pred_pskip_motion(const H264Context *const h,
259 H264SliceContext *sl)
261 DECLARE_ALIGNED(4, static const int16_t, zeromv)[2] = { 0 };
262 DECLARE_ALIGNED(4, int16_t, mvbuf)[3][2];
263 int8_t *ref = h->cur_pic.ref_index[0];
264 int16_t(*mv)[2] = h->cur_pic.motion_val[0];
265 int top_ref, left_ref, diagonal_ref, match_count, mx, my;
266 const int16_t *A, *B, *C;
267 int b_stride = h->b_stride;
269 fill_rectangle(&sl->ref_cache[0][scan8[0]], 4, 4, 8, 0, 1);
271 /* To avoid doing an entire fill_decode_caches, we inline the relevant
273 * FIXME: this is a partial duplicate of the logic in fill_decode_caches,
274 * but it's faster this way. Is there a way to avoid this duplication?
276 if (USES_LIST(sl->left_type[LTOP], 0)) {
277 left_ref = ref[4 * sl->left_mb_xy[LTOP] + 1 + (sl->left_block[0] & ~1)];
278 A = mv[h->mb2b_xy[sl->left_mb_xy[LTOP]] + 3 + b_stride * sl->left_block[0]];
279 FIX_MV_MBAFF(sl->left_type[LTOP], left_ref, A, 0);
280 if (!(left_ref | AV_RN32A(A)))
282 } else if (sl->left_type[LTOP]) {
283 left_ref = LIST_NOT_USED;
289 if (USES_LIST(sl->top_type, 0)) {
290 top_ref = ref[4 * sl->top_mb_xy + 2];
291 B = mv[h->mb2b_xy[sl->top_mb_xy] + 3 * b_stride];
292 FIX_MV_MBAFF(sl->top_type, top_ref, B, 1);
293 if (!(top_ref | AV_RN32A(B)))
295 } else if (sl->top_type) {
296 top_ref = LIST_NOT_USED;
302 ff_tlog(h->avctx, "pred_pskip: (%d) (%d) at %2d %2d\n",
303 top_ref, left_ref, sl->mb_x, sl->mb_y);
305 if (USES_LIST(sl->topright_type, 0)) {
306 diagonal_ref = ref[4 * sl->topright_mb_xy + 2];
307 C = mv[h->mb2b_xy[sl->topright_mb_xy] + 3 * b_stride];
308 FIX_MV_MBAFF(sl->topright_type, diagonal_ref, C, 2);
309 } else if (sl->topright_type) {
310 diagonal_ref = LIST_NOT_USED;
313 if (USES_LIST(sl->topleft_type, 0)) {
314 diagonal_ref = ref[4 * sl->topleft_mb_xy + 1 +
315 (sl->topleft_partition & 2)];
316 C = mv[h->mb2b_xy[sl->topleft_mb_xy] + 3 + b_stride +
317 (sl->topleft_partition & 2 * b_stride)];
318 FIX_MV_MBAFF(sl->topleft_type, diagonal_ref, C, 2);
319 } else if (sl->topleft_type) {
320 diagonal_ref = LIST_NOT_USED;
323 diagonal_ref = PART_NOT_AVAILABLE;
328 match_count = !diagonal_ref + !top_ref + !left_ref;
329 ff_tlog(h->avctx, "pred_pskip_motion match_count=%d\n", match_count);
330 if (match_count > 1) {
331 mx = mid_pred(A[0], B[0], C[0]);
332 my = mid_pred(A[1], B[1], C[1]);
333 } else if (match_count == 1) {
337 } else if (!top_ref) {
345 mx = mid_pred(A[0], B[0], C[0]);
346 my = mid_pred(A[1], B[1], C[1]);
349 fill_rectangle(sl->mv_cache[0][scan8[0]], 4, 4, 8, pack16to32(mx, my), 4);
353 fill_rectangle(sl->mv_cache[0][scan8[0]], 4, 4, 8, 0, 4);
357 static void fill_decode_neighbors(const H264Context *h, H264SliceContext *sl, int mb_type)
359 const int mb_xy = sl->mb_xy;
360 int topleft_xy, top_xy, topright_xy, left_xy[LEFT_MBS];
361 static const uint8_t left_block_options[4][32] = {
362 { 0, 1, 2, 3, 7, 10, 8, 11, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4, 3 + 3 * 4, 1 + 4 * 4, 1 + 8 * 4, 1 + 5 * 4, 1 + 9 * 4 },
363 { 2, 2, 3, 3, 8, 11, 8, 11, 3 + 2 * 4, 3 + 2 * 4, 3 + 3 * 4, 3 + 3 * 4, 1 + 5 * 4, 1 + 9 * 4, 1 + 5 * 4, 1 + 9 * 4 },
364 { 0, 0, 1, 1, 7, 10, 7, 10, 3 + 0 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 1 * 4, 1 + 4 * 4, 1 + 8 * 4, 1 + 4 * 4, 1 + 8 * 4 },
365 { 0, 2, 0, 2, 7, 10, 7, 10, 3 + 0 * 4, 3 + 2 * 4, 3 + 0 * 4, 3 + 2 * 4, 1 + 4 * 4, 1 + 8 * 4, 1 + 4 * 4, 1 + 8 * 4 }
368 sl->topleft_partition = -1;
370 top_xy = mb_xy - (h->mb_stride << MB_FIELD(sl));
372 /* Wow, what a mess, why didn't they simplify the interlacing & intra
373 * stuff, I can't imagine that these complex rules are worth it. */
375 topleft_xy = top_xy - 1;
376 topright_xy = top_xy + 1;
377 left_xy[LBOT] = left_xy[LTOP] = mb_xy - 1;
378 sl->left_block = left_block_options[0];
379 if (FRAME_MBAFF(h)) {
380 const int left_mb_field_flag = IS_INTERLACED(h->cur_pic.mb_type[mb_xy - 1]);
381 const int curr_mb_field_flag = IS_INTERLACED(mb_type);
383 if (left_mb_field_flag != curr_mb_field_flag) {
384 left_xy[LBOT] = left_xy[LTOP] = mb_xy - h->mb_stride - 1;
385 if (curr_mb_field_flag) {
386 left_xy[LBOT] += h->mb_stride;
387 sl->left_block = left_block_options[3];
389 topleft_xy += h->mb_stride;
390 /* take top left mv from the middle of the mb, as opposed
391 * to all other modes which use the bottom right partition */
392 sl->topleft_partition = 0;
393 sl->left_block = left_block_options[1];
397 if (curr_mb_field_flag) {
398 topleft_xy += h->mb_stride & (((h->cur_pic.mb_type[top_xy - 1] >> 7) & 1) - 1);
399 topright_xy += h->mb_stride & (((h->cur_pic.mb_type[top_xy + 1] >> 7) & 1) - 1);
400 top_xy += h->mb_stride & (((h->cur_pic.mb_type[top_xy] >> 7) & 1) - 1);
402 if (left_mb_field_flag != curr_mb_field_flag) {
403 if (curr_mb_field_flag) {
404 left_xy[LBOT] += h->mb_stride;
405 sl->left_block = left_block_options[3];
407 sl->left_block = left_block_options[2];
413 sl->topleft_mb_xy = topleft_xy;
414 sl->top_mb_xy = top_xy;
415 sl->topright_mb_xy = topright_xy;
416 sl->left_mb_xy[LTOP] = left_xy[LTOP];
417 sl->left_mb_xy[LBOT] = left_xy[LBOT];
418 //FIXME do we need all in the context?
420 sl->topleft_type = h->cur_pic.mb_type[topleft_xy];
421 sl->top_type = h->cur_pic.mb_type[top_xy];
422 sl->topright_type = h->cur_pic.mb_type[topright_xy];
423 sl->left_type[LTOP] = h->cur_pic.mb_type[left_xy[LTOP]];
424 sl->left_type[LBOT] = h->cur_pic.mb_type[left_xy[LBOT]];
427 if (h->slice_table[topleft_xy] != sl->slice_num)
428 sl->topleft_type = 0;
429 if (h->slice_table[top_xy] != sl->slice_num)
431 if (h->slice_table[left_xy[LTOP]] != sl->slice_num)
432 sl->left_type[LTOP] = sl->left_type[LBOT] = 0;
434 if (h->slice_table[topleft_xy] != sl->slice_num) {
435 sl->topleft_type = 0;
436 if (h->slice_table[top_xy] != sl->slice_num)
438 if (h->slice_table[left_xy[LTOP]] != sl->slice_num)
439 sl->left_type[LTOP] = sl->left_type[LBOT] = 0;
442 if (h->slice_table[topright_xy] != sl->slice_num)
443 sl->topright_type = 0;
446 static void fill_decode_caches(const H264Context *h, H264SliceContext *sl, int mb_type)
448 int topleft_xy, top_xy, topright_xy, left_xy[LEFT_MBS];
449 int topleft_type, top_type, topright_type, left_type[LEFT_MBS];
450 const uint8_t *left_block = sl->left_block;
455 topleft_xy = sl->topleft_mb_xy;
456 top_xy = sl->top_mb_xy;
457 topright_xy = sl->topright_mb_xy;
458 left_xy[LTOP] = sl->left_mb_xy[LTOP];
459 left_xy[LBOT] = sl->left_mb_xy[LBOT];
460 topleft_type = sl->topleft_type;
461 top_type = sl->top_type;
462 topright_type = sl->topright_type;
463 left_type[LTOP] = sl->left_type[LTOP];
464 left_type[LBOT] = sl->left_type[LBOT];
466 if (!IS_SKIP(mb_type)) {
467 if (IS_INTRA(mb_type)) {
468 int type_mask = h->ps.pps->constrained_intra_pred ? IS_INTRA(-1) : -1;
469 sl->topleft_samples_available =
470 sl->top_samples_available =
471 sl->left_samples_available = 0xFFFF;
472 sl->topright_samples_available = 0xEEEA;
474 if (!(top_type & type_mask)) {
475 sl->topleft_samples_available = 0xB3FF;
476 sl->top_samples_available = 0x33FF;
477 sl->topright_samples_available = 0x26EA;
479 if (IS_INTERLACED(mb_type) != IS_INTERLACED(left_type[LTOP])) {
480 if (IS_INTERLACED(mb_type)) {
481 if (!(left_type[LTOP] & type_mask)) {
482 sl->topleft_samples_available &= 0xDFFF;
483 sl->left_samples_available &= 0x5FFF;
485 if (!(left_type[LBOT] & type_mask)) {
486 sl->topleft_samples_available &= 0xFF5F;
487 sl->left_samples_available &= 0xFF5F;
490 int left_typei = h->cur_pic.mb_type[left_xy[LTOP] + h->mb_stride];
492 av_assert2(left_xy[LTOP] == left_xy[LBOT]);
493 if (!((left_typei & type_mask) && (left_type[LTOP] & type_mask))) {
494 sl->topleft_samples_available &= 0xDF5F;
495 sl->left_samples_available &= 0x5F5F;
499 if (!(left_type[LTOP] & type_mask)) {
500 sl->topleft_samples_available &= 0xDF5F;
501 sl->left_samples_available &= 0x5F5F;
505 if (!(topleft_type & type_mask))
506 sl->topleft_samples_available &= 0x7FFF;
508 if (!(topright_type & type_mask))
509 sl->topright_samples_available &= 0xFBFF;
511 if (IS_INTRA4x4(mb_type)) {
512 if (IS_INTRA4x4(top_type)) {
513 AV_COPY32(sl->intra4x4_pred_mode_cache + 4 + 8 * 0, sl->intra4x4_pred_mode + h->mb2br_xy[top_xy]);
515 sl->intra4x4_pred_mode_cache[4 + 8 * 0] =
516 sl->intra4x4_pred_mode_cache[5 + 8 * 0] =
517 sl->intra4x4_pred_mode_cache[6 + 8 * 0] =
518 sl->intra4x4_pred_mode_cache[7 + 8 * 0] = 2 - 3 * !(top_type & type_mask);
520 for (i = 0; i < 2; i++) {
521 if (IS_INTRA4x4(left_type[LEFT(i)])) {
522 int8_t *mode = sl->intra4x4_pred_mode + h->mb2br_xy[left_xy[LEFT(i)]];
523 sl->intra4x4_pred_mode_cache[3 + 8 * 1 + 2 * 8 * i] = mode[6 - left_block[0 + 2 * i]];
524 sl->intra4x4_pred_mode_cache[3 + 8 * 2 + 2 * 8 * i] = mode[6 - left_block[1 + 2 * i]];
526 sl->intra4x4_pred_mode_cache[3 + 8 * 1 + 2 * 8 * i] =
527 sl->intra4x4_pred_mode_cache[3 + 8 * 2 + 2 * 8 * i] = 2 - 3 * !(left_type[LEFT(i)] & type_mask);
541 /* FIXME: constraint_intra_pred & partitioning & nnz
542 * (let us hope this is just a typo in the spec) */
543 nnz_cache = sl->non_zero_count_cache;
545 nnz = h->non_zero_count[top_xy];
546 AV_COPY32(&nnz_cache[4 + 8 * 0], &nnz[4 * 3]);
547 if (!h->chroma_y_shift) {
548 AV_COPY32(&nnz_cache[4 + 8 * 5], &nnz[4 * 7]);
549 AV_COPY32(&nnz_cache[4 + 8 * 10], &nnz[4 * 11]);
551 AV_COPY32(&nnz_cache[4 + 8 * 5], &nnz[4 * 5]);
552 AV_COPY32(&nnz_cache[4 + 8 * 10], &nnz[4 * 9]);
555 uint32_t top_empty = CABAC(h) && !IS_INTRA(mb_type) ? 0 : 0x40404040;
556 AV_WN32A(&nnz_cache[4 + 8 * 0], top_empty);
557 AV_WN32A(&nnz_cache[4 + 8 * 5], top_empty);
558 AV_WN32A(&nnz_cache[4 + 8 * 10], top_empty);
561 for (i = 0; i < 2; i++) {
562 if (left_type[LEFT(i)]) {
563 nnz = h->non_zero_count[left_xy[LEFT(i)]];
564 nnz_cache[3 + 8 * 1 + 2 * 8 * i] = nnz[left_block[8 + 0 + 2 * i]];
565 nnz_cache[3 + 8 * 2 + 2 * 8 * i] = nnz[left_block[8 + 1 + 2 * i]];
567 nnz_cache[3 + 8 * 6 + 2 * 8 * i] = nnz[left_block[8 + 0 + 2 * i] + 4 * 4];
568 nnz_cache[3 + 8 * 7 + 2 * 8 * i] = nnz[left_block[8 + 1 + 2 * i] + 4 * 4];
569 nnz_cache[3 + 8 * 11 + 2 * 8 * i] = nnz[left_block[8 + 0 + 2 * i] + 8 * 4];
570 nnz_cache[3 + 8 * 12 + 2 * 8 * i] = nnz[left_block[8 + 1 + 2 * i] + 8 * 4];
571 } else if (CHROMA422(h)) {
572 nnz_cache[3 + 8 * 6 + 2 * 8 * i] = nnz[left_block[8 + 0 + 2 * i] - 2 + 4 * 4];
573 nnz_cache[3 + 8 * 7 + 2 * 8 * i] = nnz[left_block[8 + 1 + 2 * i] - 2 + 4 * 4];
574 nnz_cache[3 + 8 * 11 + 2 * 8 * i] = nnz[left_block[8 + 0 + 2 * i] - 2 + 8 * 4];
575 nnz_cache[3 + 8 * 12 + 2 * 8 * i] = nnz[left_block[8 + 1 + 2 * i] - 2 + 8 * 4];
577 nnz_cache[3 + 8 * 6 + 8 * i] = nnz[left_block[8 + 4 + 2 * i]];
578 nnz_cache[3 + 8 * 11 + 8 * i] = nnz[left_block[8 + 5 + 2 * i]];
581 nnz_cache[3 + 8 * 1 + 2 * 8 * i] =
582 nnz_cache[3 + 8 * 2 + 2 * 8 * i] =
583 nnz_cache[3 + 8 * 6 + 2 * 8 * i] =
584 nnz_cache[3 + 8 * 7 + 2 * 8 * i] =
585 nnz_cache[3 + 8 * 11 + 2 * 8 * i] =
586 nnz_cache[3 + 8 * 12 + 2 * 8 * i] = CABAC(h) && !IS_INTRA(mb_type) ? 0 : 64;
593 sl->top_cbp = h->cbp_table[top_xy];
595 sl->top_cbp = IS_INTRA(mb_type) ? 0x7CF : 0x00F;
597 if (left_type[LTOP]) {
598 sl->left_cbp = (h->cbp_table[left_xy[LTOP]] & 0x7F0) |
599 ((h->cbp_table[left_xy[LTOP]] >> (left_block[0] & (~1))) & 2) |
600 (((h->cbp_table[left_xy[LBOT]] >> (left_block[2] & (~1))) & 2) << 2);
602 sl->left_cbp = IS_INTRA(mb_type) ? 0x7CF : 0x00F;
607 if (IS_INTER(mb_type) || (IS_DIRECT(mb_type) && sl->direct_spatial_mv_pred)) {
609 int b_stride = h->b_stride;
610 for (list = 0; list < sl->list_count; list++) {
611 int8_t *ref_cache = &sl->ref_cache[list][scan8[0]];
612 int8_t *ref = h->cur_pic.ref_index[list];
613 int16_t(*mv_cache)[2] = &sl->mv_cache[list][scan8[0]];
614 int16_t(*mv)[2] = h->cur_pic.motion_val[list];
615 if (!USES_LIST(mb_type, list))
617 av_assert2(!(IS_DIRECT(mb_type) && !sl->direct_spatial_mv_pred));
619 if (USES_LIST(top_type, list)) {
620 const int b_xy = h->mb2b_xy[top_xy] + 3 * b_stride;
621 AV_COPY128(mv_cache[0 - 1 * 8], mv[b_xy + 0]);
622 ref_cache[0 - 1 * 8] =
623 ref_cache[1 - 1 * 8] = ref[4 * top_xy + 2];
624 ref_cache[2 - 1 * 8] =
625 ref_cache[3 - 1 * 8] = ref[4 * top_xy + 3];
627 AV_ZERO128(mv_cache[0 - 1 * 8]);
628 AV_WN32A(&ref_cache[0 - 1 * 8],
629 ((top_type ? LIST_NOT_USED : PART_NOT_AVAILABLE) & 0xFF) * 0x01010101u);
632 if (mb_type & (MB_TYPE_16x8 | MB_TYPE_8x8)) {
633 for (i = 0; i < 2; i++) {
634 int cache_idx = -1 + i * 2 * 8;
635 if (USES_LIST(left_type[LEFT(i)], list)) {
636 const int b_xy = h->mb2b_xy[left_xy[LEFT(i)]] + 3;
637 const int b8_xy = 4 * left_xy[LEFT(i)] + 1;
638 AV_COPY32(mv_cache[cache_idx],
639 mv[b_xy + b_stride * left_block[0 + i * 2]]);
640 AV_COPY32(mv_cache[cache_idx + 8],
641 mv[b_xy + b_stride * left_block[1 + i * 2]]);
642 ref_cache[cache_idx] = ref[b8_xy + (left_block[0 + i * 2] & ~1)];
643 ref_cache[cache_idx + 8] = ref[b8_xy + (left_block[1 + i * 2] & ~1)];
645 AV_ZERO32(mv_cache[cache_idx]);
646 AV_ZERO32(mv_cache[cache_idx + 8]);
647 ref_cache[cache_idx] =
648 ref_cache[cache_idx + 8] = (left_type[LEFT(i)]) ? LIST_NOT_USED
649 : PART_NOT_AVAILABLE;
653 if (USES_LIST(left_type[LTOP], list)) {
654 const int b_xy = h->mb2b_xy[left_xy[LTOP]] + 3;
655 const int b8_xy = 4 * left_xy[LTOP] + 1;
656 AV_COPY32(mv_cache[-1], mv[b_xy + b_stride * left_block[0]]);
657 ref_cache[-1] = ref[b8_xy + (left_block[0] & ~1)];
659 AV_ZERO32(mv_cache[-1]);
660 ref_cache[-1] = left_type[LTOP] ? LIST_NOT_USED
661 : PART_NOT_AVAILABLE;
665 if (USES_LIST(topright_type, list)) {
666 const int b_xy = h->mb2b_xy[topright_xy] + 3 * b_stride;
667 AV_COPY32(mv_cache[4 - 1 * 8], mv[b_xy]);
668 ref_cache[4 - 1 * 8] = ref[4 * topright_xy + 2];
670 AV_ZERO32(mv_cache[4 - 1 * 8]);
671 ref_cache[4 - 1 * 8] = topright_type ? LIST_NOT_USED
672 : PART_NOT_AVAILABLE;
674 if(ref_cache[2 - 1*8] < 0 || ref_cache[4 - 1 * 8] < 0) {
675 if (USES_LIST(topleft_type, list)) {
676 const int b_xy = h->mb2b_xy[topleft_xy] + 3 + b_stride +
677 (sl->topleft_partition & 2 * b_stride);
678 const int b8_xy = 4 * topleft_xy + 1 + (sl->topleft_partition & 2);
679 AV_COPY32(mv_cache[-1 - 1 * 8], mv[b_xy]);
680 ref_cache[-1 - 1 * 8] = ref[b8_xy];
682 AV_ZERO32(mv_cache[-1 - 1 * 8]);
683 ref_cache[-1 - 1 * 8] = topleft_type ? LIST_NOT_USED
684 : PART_NOT_AVAILABLE;
688 if ((mb_type & (MB_TYPE_SKIP | MB_TYPE_DIRECT2)) && !FRAME_MBAFF(h))
691 if (!(mb_type & (MB_TYPE_SKIP | MB_TYPE_DIRECT2))) {
692 uint8_t(*mvd_cache)[2] = &sl->mvd_cache[list][scan8[0]];
693 uint8_t(*mvd)[2] = sl->mvd_table[list];
694 ref_cache[2 + 8 * 0] =
695 ref_cache[2 + 8 * 2] = PART_NOT_AVAILABLE;
696 AV_ZERO32(mv_cache[2 + 8 * 0]);
697 AV_ZERO32(mv_cache[2 + 8 * 2]);
700 if (USES_LIST(top_type, list)) {
701 const int b_xy = h->mb2br_xy[top_xy];
702 AV_COPY64(mvd_cache[0 - 1 * 8], mvd[b_xy + 0]);
704 AV_ZERO64(mvd_cache[0 - 1 * 8]);
706 if (USES_LIST(left_type[LTOP], list)) {
707 const int b_xy = h->mb2br_xy[left_xy[LTOP]] + 6;
708 AV_COPY16(mvd_cache[-1 + 0 * 8], mvd[b_xy - left_block[0]]);
709 AV_COPY16(mvd_cache[-1 + 1 * 8], mvd[b_xy - left_block[1]]);
711 AV_ZERO16(mvd_cache[-1 + 0 * 8]);
712 AV_ZERO16(mvd_cache[-1 + 1 * 8]);
714 if (USES_LIST(left_type[LBOT], list)) {
715 const int b_xy = h->mb2br_xy[left_xy[LBOT]] + 6;
716 AV_COPY16(mvd_cache[-1 + 2 * 8], mvd[b_xy - left_block[2]]);
717 AV_COPY16(mvd_cache[-1 + 3 * 8], mvd[b_xy - left_block[3]]);
719 AV_ZERO16(mvd_cache[-1 + 2 * 8]);
720 AV_ZERO16(mvd_cache[-1 + 3 * 8]);
722 AV_ZERO16(mvd_cache[2 + 8 * 0]);
723 AV_ZERO16(mvd_cache[2 + 8 * 2]);
724 if (sl->slice_type_nos == AV_PICTURE_TYPE_B) {
725 uint8_t *direct_cache = &sl->direct_cache[scan8[0]];
726 uint8_t *direct_table = h->direct_table;
727 fill_rectangle(direct_cache, 4, 4, 8, MB_TYPE_16x16 >> 1, 1);
729 if (IS_DIRECT(top_type)) {
730 AV_WN32A(&direct_cache[-1 * 8],
731 0x01010101u * (MB_TYPE_DIRECT2 >> 1));
732 } else if (IS_8X8(top_type)) {
733 int b8_xy = 4 * top_xy;
734 direct_cache[0 - 1 * 8] = direct_table[b8_xy + 2];
735 direct_cache[2 - 1 * 8] = direct_table[b8_xy + 3];
737 AV_WN32A(&direct_cache[-1 * 8],
738 0x01010101 * (MB_TYPE_16x16 >> 1));
741 if (IS_DIRECT(left_type[LTOP]))
742 direct_cache[-1 + 0 * 8] = MB_TYPE_DIRECT2 >> 1;
743 else if (IS_8X8(left_type[LTOP]))
744 direct_cache[-1 + 0 * 8] = direct_table[4 * left_xy[LTOP] + 1 + (left_block[0] & ~1)];
746 direct_cache[-1 + 0 * 8] = MB_TYPE_16x16 >> 1;
748 if (IS_DIRECT(left_type[LBOT]))
749 direct_cache[-1 + 2 * 8] = MB_TYPE_DIRECT2 >> 1;
750 else if (IS_8X8(left_type[LBOT]))
751 direct_cache[-1 + 2 * 8] = direct_table[4 * left_xy[LBOT] + 1 + (left_block[2] & ~1)];
753 direct_cache[-1 + 2 * 8] = MB_TYPE_16x16 >> 1;
759 MAP_F2F(scan8[0] - 1 - 1 * 8, topleft_type) \
760 MAP_F2F(scan8[0] + 0 - 1 * 8, top_type) \
761 MAP_F2F(scan8[0] + 1 - 1 * 8, top_type) \
762 MAP_F2F(scan8[0] + 2 - 1 * 8, top_type) \
763 MAP_F2F(scan8[0] + 3 - 1 * 8, top_type) \
764 MAP_F2F(scan8[0] + 4 - 1 * 8, topright_type) \
765 MAP_F2F(scan8[0] - 1 + 0 * 8, left_type[LTOP]) \
766 MAP_F2F(scan8[0] - 1 + 1 * 8, left_type[LTOP]) \
767 MAP_F2F(scan8[0] - 1 + 2 * 8, left_type[LBOT]) \
768 MAP_F2F(scan8[0] - 1 + 3 * 8, left_type[LBOT])
770 if (FRAME_MBAFF(h)) {
773 #define MAP_F2F(idx, mb_type) \
774 if (!IS_INTERLACED(mb_type) && sl->ref_cache[list][idx] >= 0) { \
775 sl->ref_cache[list][idx] *= 2; \
776 sl->mv_cache[list][idx][1] /= 2; \
777 sl->mvd_cache[list][idx][1] >>= 1; \
784 #define MAP_F2F(idx, mb_type) \
785 if (IS_INTERLACED(mb_type) && sl->ref_cache[list][idx] >= 0) { \
786 sl->ref_cache[list][idx] >>= 1; \
787 sl->mv_cache[list][idx][1] *= 2; \
788 sl->mvd_cache[list][idx][1] <<= 1; \
798 sl->neighbor_transform_size = !!IS_8x8DCT(top_type) + !!IS_8x8DCT(left_type[LTOP]);
802 * decodes a P_SKIP or B_SKIP macroblock
804 static void av_unused decode_mb_skip(const H264Context *h, H264SliceContext *sl)
806 const int mb_xy = sl->mb_xy;
809 memset(h->non_zero_count[mb_xy], 0, 48);
812 mb_type |= MB_TYPE_INTERLACED;
814 if (sl->slice_type_nos == AV_PICTURE_TYPE_B) {
815 // just for fill_caches. pred_direct_motion will set the real mb_type
816 mb_type |= MB_TYPE_L0L1 | MB_TYPE_DIRECT2 | MB_TYPE_SKIP;
817 if (sl->direct_spatial_mv_pred) {
818 fill_decode_neighbors(h, sl, mb_type);
819 fill_decode_caches(h, sl, mb_type); //FIXME check what is needed and what not ...
821 ff_h264_pred_direct_motion(h, sl, &mb_type);
822 mb_type |= MB_TYPE_SKIP;
824 mb_type |= MB_TYPE_16x16 | MB_TYPE_P0L0 | MB_TYPE_P1L0 | MB_TYPE_SKIP;
826 fill_decode_neighbors(h, sl, mb_type);
827 pred_pskip_motion(h, sl);
830 write_back_motion(h, sl, mb_type);
831 h->cur_pic.mb_type[mb_xy] = mb_type;
832 h->cur_pic.qscale_table[mb_xy] = sl->qscale;
833 h->slice_table[mb_xy] = sl->slice_num;
834 sl->prev_mb_skipped = 1;
837 #endif /* AVCODEC_H264_MVPRED_H */