2 * H.26L/H.264/AVC/JVT/14496-10/... motion vector predicion
3 * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 * H.264 / AVC / MPEG4 part10 motion vector predicion.
25 * @author Michael Niedermayer <michaelni@gmx.at>
28 #ifndef AVCODEC_H264_MVPRED_H
29 #define AVCODEC_H264_MVPRED_H
34 #include "libavutil/avassert.h"
37 static av_always_inline int fetch_diagonal_mv(H264Context *h, const int16_t **C,
38 int i, int list, int part_width)
40 const int topright_ref = h->ref_cache[list][i - 8 + part_width];
42 /* there is no consistent mapping of mvs to neighboring locations that will
43 * make mbaff happy, so we can't move all this logic to fill_caches */
45 #define SET_DIAG_MV(MV_OP, REF_OP, XY, Y4) \
46 const int xy = XY, y4 = Y4; \
47 const int mb_type = mb_types[xy + (y4 >> 2) * h->mb_stride]; \
48 if (!USES_LIST(mb_type, list)) \
49 return LIST_NOT_USED; \
50 mv = h->cur_pic_ptr->motion_val[list][h->mb2b_xy[xy] + 3 + y4 * h->b_stride]; \
51 h->mv_cache[list][scan8[0] - 2][0] = mv[0]; \
52 h->mv_cache[list][scan8[0] - 2][1] = mv[1] MV_OP; \
53 return h->cur_pic_ptr->ref_index[list][4 * xy + 1 + (y4 & ~1)] REF_OP;
55 if (topright_ref == PART_NOT_AVAILABLE
56 && i >= scan8[0] + 8 && (i & 7) == 4
57 && h->ref_cache[list][scan8[0] - 1] != PART_NOT_AVAILABLE) {
58 const uint32_t *mb_types = h->cur_pic_ptr->mb_type;
60 AV_ZERO32(h->mv_cache[list][scan8[0] - 2]);
61 *C = h->mv_cache[list][scan8[0] - 2];
63 if (!MB_FIELD(h) && IS_INTERLACED(h->left_type[0])) {
64 SET_DIAG_MV(* 2, >> 1, h->left_mb_xy[0] + h->mb_stride,
65 (h->mb_y & 1) * 2 + (i >> 5));
67 if (MB_FIELD(h) && !IS_INTERLACED(h->left_type[0])) {
68 // left shift will turn LIST_NOT_USED into PART_NOT_AVAILABLE, but that's OK.
69 SET_DIAG_MV(/ 2, << 1, h->left_mb_xy[i >= 36], ((i >> 2)) & 3);
75 if (topright_ref != PART_NOT_AVAILABLE) {
76 *C = h->mv_cache[list][i - 8 + part_width];
79 tprintf(h->avctx, "topright MV not available\n");
81 *C = h->mv_cache[list][i - 8 - 1];
82 return h->ref_cache[list][i - 8 - 1];
87 * Get the predicted MV.
88 * @param n the block index
89 * @param part_width the width of the partition (4, 8,16) -> (1, 2, 4)
90 * @param mx the x component of the predicted motion vector
91 * @param my the y component of the predicted motion vector
93 static av_always_inline void pred_motion(H264Context *const h, int n,
94 int part_width, int list, int ref,
95 int *const mx, int *const my)
97 const int index8 = scan8[n];
98 const int top_ref = h->ref_cache[list][index8 - 8];
99 const int left_ref = h->ref_cache[list][index8 - 1];
100 const int16_t *const A = h->mv_cache[list][index8 - 1];
101 const int16_t *const B = h->mv_cache[list][index8 - 8];
103 int diagonal_ref, match_count;
105 av_assert2(part_width == 1 || part_width == 2 || part_width == 4);
115 diagonal_ref = fetch_diagonal_mv(h, &C, index8, list, part_width);
116 match_count = (diagonal_ref == ref) + (top_ref == ref) + (left_ref == ref);
117 tprintf(h->avctx, "pred_motion match_count=%d\n", match_count);
118 if (match_count > 1) { //most common
119 *mx = mid_pred(A[0], B[0], C[0]);
120 *my = mid_pred(A[1], B[1], C[1]);
121 } else if (match_count == 1) {
122 if (left_ref == ref) {
125 } else if (top_ref == ref) {
133 if (top_ref == PART_NOT_AVAILABLE &&
134 diagonal_ref == PART_NOT_AVAILABLE &&
135 left_ref != PART_NOT_AVAILABLE) {
139 *mx = mid_pred(A[0], B[0], C[0]);
140 *my = mid_pred(A[1], B[1], C[1]);
145 "pred_motion (%2d %2d %2d) (%2d %2d %2d) (%2d %2d %2d) -> (%2d %2d %2d) at %2d %2d %d list %d\n",
146 top_ref, B[0], B[1], diagonal_ref, C[0], C[1], left_ref,
147 A[0], A[1], ref, *mx, *my, h->mb_x, h->mb_y, n, list);
151 * Get the directionally predicted 16x8 MV.
152 * @param n the block index
153 * @param mx the x component of the predicted motion vector
154 * @param my the y component of the predicted motion vector
156 static av_always_inline void pred_16x8_motion(H264Context *const h,
157 int n, int list, int ref,
158 int *const mx, int *const my)
161 const int top_ref = h->ref_cache[list][scan8[0] - 8];
162 const int16_t *const B = h->mv_cache[list][scan8[0] - 8];
164 tprintf(h->avctx, "pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d\n",
165 top_ref, B[0], B[1], h->mb_x, h->mb_y, n, list);
167 if (top_ref == ref) {
173 const int left_ref = h->ref_cache[list][scan8[8] - 1];
174 const int16_t *const A = h->mv_cache[list][scan8[8] - 1];
176 tprintf(h->avctx, "pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d\n",
177 left_ref, A[0], A[1], h->mb_x, h->mb_y, n, list);
179 if (left_ref == ref) {
187 pred_motion(h, n, 4, list, ref, mx, my);
191 * Get the directionally predicted 8x16 MV.
192 * @param n the block index
193 * @param mx the x component of the predicted motion vector
194 * @param my the y component of the predicted motion vector
196 static av_always_inline void pred_8x16_motion(H264Context *const h,
197 int n, int list, int ref,
198 int *const mx, int *const my)
201 const int left_ref = h->ref_cache[list][scan8[0] - 1];
202 const int16_t *const A = h->mv_cache[list][scan8[0] - 1];
204 tprintf(h->avctx, "pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d\n",
205 left_ref, A[0], A[1], h->mb_x, h->mb_y, n, list);
207 if (left_ref == ref) {
216 diagonal_ref = fetch_diagonal_mv(h, &C, scan8[4], list, 2);
218 tprintf(h->avctx, "pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d\n",
219 diagonal_ref, C[0], C[1], h->mb_x, h->mb_y, n, list);
221 if (diagonal_ref == ref) {
229 pred_motion(h, n, 2, list, ref, mx, my);
232 #define FIX_MV_MBAFF(type, refn, mvn, idx) \
233 if (FRAME_MBAFF(h)) { \
235 if (!IS_INTERLACED(type)) { \
237 AV_COPY32(mvbuf[idx], mvn); \
238 mvbuf[idx][1] /= 2; \
242 if (IS_INTERLACED(type)) { \
244 AV_COPY32(mvbuf[idx], mvn); \
245 mvbuf[idx][1] <<= 1; \
251 static av_always_inline void pred_pskip_motion(H264Context *const h)
253 DECLARE_ALIGNED(4, static const int16_t, zeromv)[2] = { 0 };
254 DECLARE_ALIGNED(4, int16_t, mvbuf)[3][2];
255 int8_t *ref = h->cur_pic.ref_index[0];
256 int16_t(*mv)[2] = h->cur_pic.motion_val[0];
257 int top_ref, left_ref, diagonal_ref, match_count, mx, my;
258 const int16_t *A, *B, *C;
259 int b_stride = h->b_stride;
261 fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, 0, 1);
263 /* To avoid doing an entire fill_decode_caches, we inline the relevant
265 * FIXME: this is a partial duplicate of the logic in fill_decode_caches,
266 * but it's faster this way. Is there a way to avoid this duplication?
268 if (USES_LIST(h->left_type[LTOP], 0)) {
269 left_ref = ref[4 * h->left_mb_xy[LTOP] + 1 + (h->left_block[0] & ~1)];
270 A = mv[h->mb2b_xy[h->left_mb_xy[LTOP]] + 3 + b_stride * h->left_block[0]];
271 FIX_MV_MBAFF(h->left_type[LTOP], left_ref, A, 0);
272 if (!(left_ref | AV_RN32A(A)))
274 } else if (h->left_type[LTOP]) {
275 left_ref = LIST_NOT_USED;
281 if (USES_LIST(h->top_type, 0)) {
282 top_ref = ref[4 * h->top_mb_xy + 2];
283 B = mv[h->mb2b_xy[h->top_mb_xy] + 3 * b_stride];
284 FIX_MV_MBAFF(h->top_type, top_ref, B, 1);
285 if (!(top_ref | AV_RN32A(B)))
287 } else if (h->top_type) {
288 top_ref = LIST_NOT_USED;
294 tprintf(h->avctx, "pred_pskip: (%d) (%d) at %2d %2d\n",
295 top_ref, left_ref, h->mb_x, h->mb_y);
297 if (USES_LIST(h->topright_type, 0)) {
298 diagonal_ref = ref[4 * h->topright_mb_xy + 2];
299 C = mv[h->mb2b_xy[h->topright_mb_xy] + 3 * b_stride];
300 FIX_MV_MBAFF(h->topright_type, diagonal_ref, C, 2);
301 } else if (h->topright_type) {
302 diagonal_ref = LIST_NOT_USED;
305 if (USES_LIST(h->topleft_type, 0)) {
306 diagonal_ref = ref[4 * h->topleft_mb_xy + 1 +
307 (h->topleft_partition & 2)];
308 C = mv[h->mb2b_xy[h->topleft_mb_xy] + 3 + b_stride +
309 (h->topleft_partition & 2 * b_stride)];
310 FIX_MV_MBAFF(h->topleft_type, diagonal_ref, C, 2);
311 } else if (h->topleft_type) {
312 diagonal_ref = LIST_NOT_USED;
315 diagonal_ref = PART_NOT_AVAILABLE;
320 match_count = !diagonal_ref + !top_ref + !left_ref;
321 tprintf(h->avctx, "pred_pskip_motion match_count=%d\n", match_count);
322 if (match_count > 1) {
323 mx = mid_pred(A[0], B[0], C[0]);
324 my = mid_pred(A[1], B[1], C[1]);
325 } else if (match_count == 1) {
329 } else if (!top_ref) {
337 mx = mid_pred(A[0], B[0], C[0]);
338 my = mid_pred(A[1], B[1], C[1]);
341 fill_rectangle(h->mv_cache[0][scan8[0]], 4, 4, 8, pack16to32(mx, my), 4);
345 fill_rectangle(h->mv_cache[0][scan8[0]], 4, 4, 8, 0, 4);
349 static void fill_decode_neighbors(H264Context *h, int mb_type)
351 const int mb_xy = h->mb_xy;
352 int topleft_xy, top_xy, topright_xy, left_xy[LEFT_MBS];
353 static const uint8_t left_block_options[4][32] = {
354 { 0, 1, 2, 3, 7, 10, 8, 11, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4, 3 + 3 * 4, 1 + 4 * 4, 1 + 8 * 4, 1 + 5 * 4, 1 + 9 * 4 },
355 { 2, 2, 3, 3, 8, 11, 8, 11, 3 + 2 * 4, 3 + 2 * 4, 3 + 3 * 4, 3 + 3 * 4, 1 + 5 * 4, 1 + 9 * 4, 1 + 5 * 4, 1 + 9 * 4 },
356 { 0, 0, 1, 1, 7, 10, 7, 10, 3 + 0 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 1 * 4, 1 + 4 * 4, 1 + 8 * 4, 1 + 4 * 4, 1 + 8 * 4 },
357 { 0, 2, 0, 2, 7, 10, 7, 10, 3 + 0 * 4, 3 + 2 * 4, 3 + 0 * 4, 3 + 2 * 4, 1 + 4 * 4, 1 + 8 * 4, 1 + 4 * 4, 1 + 8 * 4 }
360 h->topleft_partition = -1;
362 top_xy = mb_xy - (h->mb_stride << MB_FIELD(h));
364 /* Wow, what a mess, why didn't they simplify the interlacing & intra
365 * stuff, I can't imagine that these complex rules are worth it. */
367 topleft_xy = top_xy - 1;
368 topright_xy = top_xy + 1;
369 left_xy[LBOT] = left_xy[LTOP] = mb_xy - 1;
370 h->left_block = left_block_options[0];
371 if (FRAME_MBAFF(h)) {
372 const int left_mb_field_flag = IS_INTERLACED(h->cur_pic.mb_type[mb_xy - 1]);
373 const int curr_mb_field_flag = IS_INTERLACED(mb_type);
375 if (left_mb_field_flag != curr_mb_field_flag) {
376 left_xy[LBOT] = left_xy[LTOP] = mb_xy - h->mb_stride - 1;
377 if (curr_mb_field_flag) {
378 left_xy[LBOT] += h->mb_stride;
379 h->left_block = left_block_options[3];
381 topleft_xy += h->mb_stride;
382 /* take top left mv from the middle of the mb, as opposed
383 * to all other modes which use the bottom right partition */
384 h->topleft_partition = 0;
385 h->left_block = left_block_options[1];
389 if (curr_mb_field_flag) {
390 topleft_xy += h->mb_stride & (((h->cur_pic.mb_type[top_xy - 1] >> 7) & 1) - 1);
391 topright_xy += h->mb_stride & (((h->cur_pic.mb_type[top_xy + 1] >> 7) & 1) - 1);
392 top_xy += h->mb_stride & (((h->cur_pic.mb_type[top_xy] >> 7) & 1) - 1);
394 if (left_mb_field_flag != curr_mb_field_flag) {
395 if (curr_mb_field_flag) {
396 left_xy[LBOT] += h->mb_stride;
397 h->left_block = left_block_options[3];
399 h->left_block = left_block_options[2];
405 h->topleft_mb_xy = topleft_xy;
406 h->top_mb_xy = top_xy;
407 h->topright_mb_xy = topright_xy;
408 h->left_mb_xy[LTOP] = left_xy[LTOP];
409 h->left_mb_xy[LBOT] = left_xy[LBOT];
410 //FIXME do we need all in the context?
412 h->topleft_type = h->cur_pic.mb_type[topleft_xy];
413 h->top_type = h->cur_pic.mb_type[top_xy];
414 h->topright_type = h->cur_pic.mb_type[topright_xy];
415 h->left_type[LTOP] = h->cur_pic.mb_type[left_xy[LTOP]];
416 h->left_type[LBOT] = h->cur_pic.mb_type[left_xy[LBOT]];
419 if (h->slice_table[topleft_xy] != h->slice_num)
421 if (h->slice_table[top_xy] != h->slice_num)
423 if (h->slice_table[left_xy[LTOP]] != h->slice_num)
424 h->left_type[LTOP] = h->left_type[LBOT] = 0;
426 if (h->slice_table[topleft_xy] != h->slice_num) {
428 if (h->slice_table[top_xy] != h->slice_num)
430 if (h->slice_table[left_xy[LTOP]] != h->slice_num)
431 h->left_type[LTOP] = h->left_type[LBOT] = 0;
434 if (h->slice_table[topright_xy] != h->slice_num)
435 h->topright_type = 0;
438 static void fill_decode_caches(H264Context *h, int mb_type)
440 int topleft_xy, top_xy, topright_xy, left_xy[LEFT_MBS];
441 int topleft_type, top_type, topright_type, left_type[LEFT_MBS];
442 const uint8_t *left_block = h->left_block;
447 topleft_xy = h->topleft_mb_xy;
448 top_xy = h->top_mb_xy;
449 topright_xy = h->topright_mb_xy;
450 left_xy[LTOP] = h->left_mb_xy[LTOP];
451 left_xy[LBOT] = h->left_mb_xy[LBOT];
452 topleft_type = h->topleft_type;
453 top_type = h->top_type;
454 topright_type = h->topright_type;
455 left_type[LTOP] = h->left_type[LTOP];
456 left_type[LBOT] = h->left_type[LBOT];
458 if (!IS_SKIP(mb_type)) {
459 if (IS_INTRA(mb_type)) {
460 int type_mask = h->pps.constrained_intra_pred ? IS_INTRA(-1) : -1;
461 h->topleft_samples_available =
462 h->top_samples_available =
463 h->left_samples_available = 0xFFFF;
464 h->topright_samples_available = 0xEEEA;
466 if (!(top_type & type_mask)) {
467 h->topleft_samples_available = 0xB3FF;
468 h->top_samples_available = 0x33FF;
469 h->topright_samples_available = 0x26EA;
471 if (IS_INTERLACED(mb_type) != IS_INTERLACED(left_type[LTOP])) {
472 if (IS_INTERLACED(mb_type)) {
473 if (!(left_type[LTOP] & type_mask)) {
474 h->topleft_samples_available &= 0xDFFF;
475 h->left_samples_available &= 0x5FFF;
477 if (!(left_type[LBOT] & type_mask)) {
478 h->topleft_samples_available &= 0xFF5F;
479 h->left_samples_available &= 0xFF5F;
482 int left_typei = h->cur_pic.mb_type[left_xy[LTOP] + h->mb_stride];
484 av_assert2(left_xy[LTOP] == left_xy[LBOT]);
485 if (!((left_typei & type_mask) && (left_type[LTOP] & type_mask))) {
486 h->topleft_samples_available &= 0xDF5F;
487 h->left_samples_available &= 0x5F5F;
491 if (!(left_type[LTOP] & type_mask)) {
492 h->topleft_samples_available &= 0xDF5F;
493 h->left_samples_available &= 0x5F5F;
497 if (!(topleft_type & type_mask))
498 h->topleft_samples_available &= 0x7FFF;
500 if (!(topright_type & type_mask))
501 h->topright_samples_available &= 0xFBFF;
503 if (IS_INTRA4x4(mb_type)) {
504 if (IS_INTRA4x4(top_type)) {
505 AV_COPY32(h->intra4x4_pred_mode_cache + 4 + 8 * 0, h->intra4x4_pred_mode + h->mb2br_xy[top_xy]);
507 h->intra4x4_pred_mode_cache[4 + 8 * 0] =
508 h->intra4x4_pred_mode_cache[5 + 8 * 0] =
509 h->intra4x4_pred_mode_cache[6 + 8 * 0] =
510 h->intra4x4_pred_mode_cache[7 + 8 * 0] = 2 - 3 * !(top_type & type_mask);
512 for (i = 0; i < 2; i++) {
513 if (IS_INTRA4x4(left_type[LEFT(i)])) {
514 int8_t *mode = h->intra4x4_pred_mode + h->mb2br_xy[left_xy[LEFT(i)]];
515 h->intra4x4_pred_mode_cache[3 + 8 * 1 + 2 * 8 * i] = mode[6 - left_block[0 + 2 * i]];
516 h->intra4x4_pred_mode_cache[3 + 8 * 2 + 2 * 8 * i] = mode[6 - left_block[1 + 2 * i]];
518 h->intra4x4_pred_mode_cache[3 + 8 * 1 + 2 * 8 * i] =
519 h->intra4x4_pred_mode_cache[3 + 8 * 2 + 2 * 8 * i] = 2 - 3 * !(left_type[LEFT(i)] & type_mask);
533 /* FIXME: constraint_intra_pred & partitioning & nnz
534 * (let us hope this is just a typo in the spec) */
535 nnz_cache = h->non_zero_count_cache;
537 nnz = h->non_zero_count[top_xy];
538 AV_COPY32(&nnz_cache[4 + 8 * 0], &nnz[4 * 3]);
539 if (!h->chroma_y_shift) {
540 AV_COPY32(&nnz_cache[4 + 8 * 5], &nnz[4 * 7]);
541 AV_COPY32(&nnz_cache[4 + 8 * 10], &nnz[4 * 11]);
543 AV_COPY32(&nnz_cache[4 + 8 * 5], &nnz[4 * 5]);
544 AV_COPY32(&nnz_cache[4 + 8 * 10], &nnz[4 * 9]);
547 uint32_t top_empty = CABAC(h) && !IS_INTRA(mb_type) ? 0 : 0x40404040;
548 AV_WN32A(&nnz_cache[4 + 8 * 0], top_empty);
549 AV_WN32A(&nnz_cache[4 + 8 * 5], top_empty);
550 AV_WN32A(&nnz_cache[4 + 8 * 10], top_empty);
553 for (i = 0; i < 2; i++) {
554 if (left_type[LEFT(i)]) {
555 nnz = h->non_zero_count[left_xy[LEFT(i)]];
556 nnz_cache[3 + 8 * 1 + 2 * 8 * i] = nnz[left_block[8 + 0 + 2 * i]];
557 nnz_cache[3 + 8 * 2 + 2 * 8 * i] = nnz[left_block[8 + 1 + 2 * i]];
559 nnz_cache[3 + 8 * 6 + 2 * 8 * i] = nnz[left_block[8 + 0 + 2 * i] + 4 * 4];
560 nnz_cache[3 + 8 * 7 + 2 * 8 * i] = nnz[left_block[8 + 1 + 2 * i] + 4 * 4];
561 nnz_cache[3 + 8 * 11 + 2 * 8 * i] = nnz[left_block[8 + 0 + 2 * i] + 8 * 4];
562 nnz_cache[3 + 8 * 12 + 2 * 8 * i] = nnz[left_block[8 + 1 + 2 * i] + 8 * 4];
563 } else if (CHROMA422(h)) {
564 nnz_cache[3 + 8 * 6 + 2 * 8 * i] = nnz[left_block[8 + 0 + 2 * i] - 2 + 4 * 4];
565 nnz_cache[3 + 8 * 7 + 2 * 8 * i] = nnz[left_block[8 + 1 + 2 * i] - 2 + 4 * 4];
566 nnz_cache[3 + 8 * 11 + 2 * 8 * i] = nnz[left_block[8 + 0 + 2 * i] - 2 + 8 * 4];
567 nnz_cache[3 + 8 * 12 + 2 * 8 * i] = nnz[left_block[8 + 1 + 2 * i] - 2 + 8 * 4];
569 nnz_cache[3 + 8 * 6 + 8 * i] = nnz[left_block[8 + 4 + 2 * i]];
570 nnz_cache[3 + 8 * 11 + 8 * i] = nnz[left_block[8 + 5 + 2 * i]];
573 nnz_cache[3 + 8 * 1 + 2 * 8 * i] =
574 nnz_cache[3 + 8 * 2 + 2 * 8 * i] =
575 nnz_cache[3 + 8 * 6 + 2 * 8 * i] =
576 nnz_cache[3 + 8 * 7 + 2 * 8 * i] =
577 nnz_cache[3 + 8 * 11 + 2 * 8 * i] =
578 nnz_cache[3 + 8 * 12 + 2 * 8 * i] = CABAC(h) && !IS_INTRA(mb_type) ? 0 : 64;
585 h->top_cbp = h->cbp_table[top_xy];
587 h->top_cbp = IS_INTRA(mb_type) ? 0x7CF : 0x00F;
589 if (left_type[LTOP]) {
590 h->left_cbp = (h->cbp_table[left_xy[LTOP]] & 0x7F0) |
591 ((h->cbp_table[left_xy[LTOP]] >> (left_block[0] & (~1))) & 2) |
592 (((h->cbp_table[left_xy[LBOT]] >> (left_block[2] & (~1))) & 2) << 2);
594 h->left_cbp = IS_INTRA(mb_type) ? 0x7CF : 0x00F;
599 if (IS_INTER(mb_type) || (IS_DIRECT(mb_type) && h->direct_spatial_mv_pred)) {
601 int b_stride = h->b_stride;
602 for (list = 0; list < h->list_count; list++) {
603 int8_t *ref_cache = &h->ref_cache[list][scan8[0]];
604 int8_t *ref = h->cur_pic.ref_index[list];
605 int16_t(*mv_cache)[2] = &h->mv_cache[list][scan8[0]];
606 int16_t(*mv)[2] = h->cur_pic.motion_val[list];
607 if (!USES_LIST(mb_type, list))
609 av_assert2(!(IS_DIRECT(mb_type) && !h->direct_spatial_mv_pred));
611 if (USES_LIST(top_type, list)) {
612 const int b_xy = h->mb2b_xy[top_xy] + 3 * b_stride;
613 AV_COPY128(mv_cache[0 - 1 * 8], mv[b_xy + 0]);
614 ref_cache[0 - 1 * 8] =
615 ref_cache[1 - 1 * 8] = ref[4 * top_xy + 2];
616 ref_cache[2 - 1 * 8] =
617 ref_cache[3 - 1 * 8] = ref[4 * top_xy + 3];
619 AV_ZERO128(mv_cache[0 - 1 * 8]);
620 AV_WN32A(&ref_cache[0 - 1 * 8],
621 ((top_type ? LIST_NOT_USED : PART_NOT_AVAILABLE) & 0xFF) * 0x01010101u);
624 if (mb_type & (MB_TYPE_16x8 | MB_TYPE_8x8)) {
625 for (i = 0; i < 2; i++) {
626 int cache_idx = -1 + i * 2 * 8;
627 if (USES_LIST(left_type[LEFT(i)], list)) {
628 const int b_xy = h->mb2b_xy[left_xy[LEFT(i)]] + 3;
629 const int b8_xy = 4 * left_xy[LEFT(i)] + 1;
630 AV_COPY32(mv_cache[cache_idx],
631 mv[b_xy + b_stride * left_block[0 + i * 2]]);
632 AV_COPY32(mv_cache[cache_idx + 8],
633 mv[b_xy + b_stride * left_block[1 + i * 2]]);
634 ref_cache[cache_idx] = ref[b8_xy + (left_block[0 + i * 2] & ~1)];
635 ref_cache[cache_idx + 8] = ref[b8_xy + (left_block[1 + i * 2] & ~1)];
637 AV_ZERO32(mv_cache[cache_idx]);
638 AV_ZERO32(mv_cache[cache_idx + 8]);
639 ref_cache[cache_idx] =
640 ref_cache[cache_idx + 8] = (left_type[LEFT(i)]) ? LIST_NOT_USED
641 : PART_NOT_AVAILABLE;
645 if (USES_LIST(left_type[LTOP], list)) {
646 const int b_xy = h->mb2b_xy[left_xy[LTOP]] + 3;
647 const int b8_xy = 4 * left_xy[LTOP] + 1;
648 AV_COPY32(mv_cache[-1], mv[b_xy + b_stride * left_block[0]]);
649 ref_cache[-1] = ref[b8_xy + (left_block[0] & ~1)];
651 AV_ZERO32(mv_cache[-1]);
652 ref_cache[-1] = left_type[LTOP] ? LIST_NOT_USED
653 : PART_NOT_AVAILABLE;
657 if (USES_LIST(topright_type, list)) {
658 const int b_xy = h->mb2b_xy[topright_xy] + 3 * b_stride;
659 AV_COPY32(mv_cache[4 - 1 * 8], mv[b_xy]);
660 ref_cache[4 - 1 * 8] = ref[4 * topright_xy + 2];
662 AV_ZERO32(mv_cache[4 - 1 * 8]);
663 ref_cache[4 - 1 * 8] = topright_type ? LIST_NOT_USED
664 : PART_NOT_AVAILABLE;
666 if(ref_cache[2 - 1*8] < 0 || ref_cache[4 - 1 * 8] < 0) {
667 if (USES_LIST(topleft_type, list)) {
668 const int b_xy = h->mb2b_xy[topleft_xy] + 3 + b_stride +
669 (h->topleft_partition & 2 * b_stride);
670 const int b8_xy = 4 * topleft_xy + 1 + (h->topleft_partition & 2);
671 AV_COPY32(mv_cache[-1 - 1 * 8], mv[b_xy]);
672 ref_cache[-1 - 1 * 8] = ref[b8_xy];
674 AV_ZERO32(mv_cache[-1 - 1 * 8]);
675 ref_cache[-1 - 1 * 8] = topleft_type ? LIST_NOT_USED
676 : PART_NOT_AVAILABLE;
680 if ((mb_type & (MB_TYPE_SKIP | MB_TYPE_DIRECT2)) && !FRAME_MBAFF(h))
683 if (!(mb_type & (MB_TYPE_SKIP | MB_TYPE_DIRECT2))) {
684 uint8_t(*mvd_cache)[2] = &h->mvd_cache[list][scan8[0]];
685 uint8_t(*mvd)[2] = h->mvd_table[list];
686 ref_cache[2 + 8 * 0] =
687 ref_cache[2 + 8 * 2] = PART_NOT_AVAILABLE;
688 AV_ZERO32(mv_cache[2 + 8 * 0]);
689 AV_ZERO32(mv_cache[2 + 8 * 2]);
692 if (USES_LIST(top_type, list)) {
693 const int b_xy = h->mb2br_xy[top_xy];
694 AV_COPY64(mvd_cache[0 - 1 * 8], mvd[b_xy + 0]);
696 AV_ZERO64(mvd_cache[0 - 1 * 8]);
698 if (USES_LIST(left_type[LTOP], list)) {
699 const int b_xy = h->mb2br_xy[left_xy[LTOP]] + 6;
700 AV_COPY16(mvd_cache[-1 + 0 * 8], mvd[b_xy - left_block[0]]);
701 AV_COPY16(mvd_cache[-1 + 1 * 8], mvd[b_xy - left_block[1]]);
703 AV_ZERO16(mvd_cache[-1 + 0 * 8]);
704 AV_ZERO16(mvd_cache[-1 + 1 * 8]);
706 if (USES_LIST(left_type[LBOT], list)) {
707 const int b_xy = h->mb2br_xy[left_xy[LBOT]] + 6;
708 AV_COPY16(mvd_cache[-1 + 2 * 8], mvd[b_xy - left_block[2]]);
709 AV_COPY16(mvd_cache[-1 + 3 * 8], mvd[b_xy - left_block[3]]);
711 AV_ZERO16(mvd_cache[-1 + 2 * 8]);
712 AV_ZERO16(mvd_cache[-1 + 3 * 8]);
714 AV_ZERO16(mvd_cache[2 + 8 * 0]);
715 AV_ZERO16(mvd_cache[2 + 8 * 2]);
716 if (h->slice_type_nos == AV_PICTURE_TYPE_B) {
717 uint8_t *direct_cache = &h->direct_cache[scan8[0]];
718 uint8_t *direct_table = h->direct_table;
719 fill_rectangle(direct_cache, 4, 4, 8, MB_TYPE_16x16 >> 1, 1);
721 if (IS_DIRECT(top_type)) {
722 AV_WN32A(&direct_cache[-1 * 8],
723 0x01010101u * (MB_TYPE_DIRECT2 >> 1));
724 } else if (IS_8X8(top_type)) {
725 int b8_xy = 4 * top_xy;
726 direct_cache[0 - 1 * 8] = direct_table[b8_xy + 2];
727 direct_cache[2 - 1 * 8] = direct_table[b8_xy + 3];
729 AV_WN32A(&direct_cache[-1 * 8],
730 0x01010101 * (MB_TYPE_16x16 >> 1));
733 if (IS_DIRECT(left_type[LTOP]))
734 direct_cache[-1 + 0 * 8] = MB_TYPE_DIRECT2 >> 1;
735 else if (IS_8X8(left_type[LTOP]))
736 direct_cache[-1 + 0 * 8] = direct_table[4 * left_xy[LTOP] + 1 + (left_block[0] & ~1)];
738 direct_cache[-1 + 0 * 8] = MB_TYPE_16x16 >> 1;
740 if (IS_DIRECT(left_type[LBOT]))
741 direct_cache[-1 + 2 * 8] = MB_TYPE_DIRECT2 >> 1;
742 else if (IS_8X8(left_type[LBOT]))
743 direct_cache[-1 + 2 * 8] = direct_table[4 * left_xy[LBOT] + 1 + (left_block[2] & ~1)];
745 direct_cache[-1 + 2 * 8] = MB_TYPE_16x16 >> 1;
751 MAP_F2F(scan8[0] - 1 - 1 * 8, topleft_type) \
752 MAP_F2F(scan8[0] + 0 - 1 * 8, top_type) \
753 MAP_F2F(scan8[0] + 1 - 1 * 8, top_type) \
754 MAP_F2F(scan8[0] + 2 - 1 * 8, top_type) \
755 MAP_F2F(scan8[0] + 3 - 1 * 8, top_type) \
756 MAP_F2F(scan8[0] + 4 - 1 * 8, topright_type) \
757 MAP_F2F(scan8[0] - 1 + 0 * 8, left_type[LTOP]) \
758 MAP_F2F(scan8[0] - 1 + 1 * 8, left_type[LTOP]) \
759 MAP_F2F(scan8[0] - 1 + 2 * 8, left_type[LBOT]) \
760 MAP_F2F(scan8[0] - 1 + 3 * 8, left_type[LBOT])
762 if (FRAME_MBAFF(h)) {
765 #define MAP_F2F(idx, mb_type) \
766 if (!IS_INTERLACED(mb_type) && h->ref_cache[list][idx] >= 0) { \
767 h->ref_cache[list][idx] <<= 1; \
768 h->mv_cache[list][idx][1] /= 2; \
769 h->mvd_cache[list][idx][1] >>= 1; \
776 #define MAP_F2F(idx, mb_type) \
777 if (IS_INTERLACED(mb_type) && h->ref_cache[list][idx] >= 0) { \
778 h->ref_cache[list][idx] >>= 1; \
779 h->mv_cache[list][idx][1] <<= 1; \
780 h->mvd_cache[list][idx][1] <<= 1; \
790 h->neighbor_transform_size = !!IS_8x8DCT(top_type) + !!IS_8x8DCT(left_type[LTOP]);
794 * decodes a P_SKIP or B_SKIP macroblock
796 static void av_unused decode_mb_skip(H264Context *h)
798 const int mb_xy = h->mb_xy;
801 memset(h->non_zero_count[mb_xy], 0, 48);
804 mb_type |= MB_TYPE_INTERLACED;
806 if (h->slice_type_nos == AV_PICTURE_TYPE_B) {
807 // just for fill_caches. pred_direct_motion will set the real mb_type
808 mb_type |= MB_TYPE_L0L1 | MB_TYPE_DIRECT2 | MB_TYPE_SKIP;
809 if (h->direct_spatial_mv_pred) {
810 fill_decode_neighbors(h, mb_type);
811 fill_decode_caches(h, mb_type); //FIXME check what is needed and what not ...
813 ff_h264_pred_direct_motion(h, &mb_type);
814 mb_type |= MB_TYPE_SKIP;
816 mb_type |= MB_TYPE_16x16 | MB_TYPE_P0L0 | MB_TYPE_P1L0 | MB_TYPE_SKIP;
818 fill_decode_neighbors(h, mb_type);
819 pred_pskip_motion(h);
822 write_back_motion(h, mb_type);
823 h->cur_pic.mb_type[mb_xy] = mb_type;
824 h->cur_pic.qscale_table[mb_xy] = h->qscale;
825 h->slice_table[mb_xy] = h->slice_num;
826 h->prev_mb_skipped = 1;
829 #endif /* AVCODEC_H264_MVPRED_H */