2 * H.26L/H.264/AVC/JVT/14496-10/... motion vector predicion
3 * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 * H.264 / AVC / MPEG4 part10 motion vector predicion.
25 * @author Michael Niedermayer <michaelni@gmx.at>
28 #ifndef AVCODEC_H264_MVPRED_H
29 #define AVCODEC_H264_MVPRED_H
34 #include "libavutil/avassert.h"
37 static av_always_inline int fetch_diagonal_mv(H264Context *h, const int16_t **C,
38 int i, int list, int part_width)
40 const int topright_ref = h->ref_cache[list][i - 8 + part_width];
41 MpegEncContext *s = &h->s;
43 /* there is no consistent mapping of mvs to neighboring locations that will
44 * make mbaff happy, so we can't move all this logic to fill_caches */
46 #define SET_DIAG_MV(MV_OP, REF_OP, XY, Y4) \
47 const int xy = XY, y4 = Y4; \
48 const int mb_type = mb_types[xy + (y4 >> 2) * s->mb_stride]; \
49 if (!USES_LIST(mb_type, list)) \
50 return LIST_NOT_USED; \
51 mv = s->current_picture_ptr->f.motion_val[list][h->mb2b_xy[xy] + 3 + y4 * h->b_stride]; \
52 h->mv_cache[list][scan8[0] - 2][0] = mv[0]; \
53 h->mv_cache[list][scan8[0] - 2][1] = mv[1] MV_OP; \
54 return s->current_picture_ptr->f.ref_index[list][4 * xy + 1 + (y4 & ~1)] REF_OP;
56 if (topright_ref == PART_NOT_AVAILABLE
57 && i >= scan8[0] + 8 && (i & 7) == 4
58 && h->ref_cache[list][scan8[0] - 1] != PART_NOT_AVAILABLE) {
59 const uint32_t *mb_types = s->current_picture_ptr->f.mb_type;
61 AV_ZERO32(h->mv_cache[list][scan8[0] - 2]);
62 *C = h->mv_cache[list][scan8[0] - 2];
64 if (!MB_FIELD && IS_INTERLACED(h->left_type[0])) {
65 SET_DIAG_MV(* 2, >> 1, h->left_mb_xy[0] + s->mb_stride,
66 (s->mb_y & 1) * 2 + (i >> 5));
68 if (MB_FIELD && !IS_INTERLACED(h->left_type[0])) {
69 // left shift will turn LIST_NOT_USED into PART_NOT_AVAILABLE, but that's OK.
70 SET_DIAG_MV(/ 2, << 1, h->left_mb_xy[i >= 36], ((i >> 2)) & 3);
76 if (topright_ref != PART_NOT_AVAILABLE) {
77 *C = h->mv_cache[list][i - 8 + part_width];
80 tprintf(s->avctx, "topright MV not available\n");
82 *C = h->mv_cache[list][i - 8 - 1];
83 return h->ref_cache[list][i - 8 - 1];
88 * Get the predicted MV.
89 * @param n the block index
90 * @param part_width the width of the partition (4, 8,16) -> (1, 2, 4)
91 * @param mx the x component of the predicted motion vector
92 * @param my the y component of the predicted motion vector
94 static av_always_inline void pred_motion(H264Context *const h, int n,
95 int part_width, int list, int ref,
96 int *const mx, int *const my)
98 const int index8 = scan8[n];
99 const int top_ref = h->ref_cache[list][index8 - 8];
100 const int left_ref = h->ref_cache[list][index8 - 1];
101 const int16_t *const A = h->mv_cache[list][index8 - 1];
102 const int16_t *const B = h->mv_cache[list][index8 - 8];
104 int diagonal_ref, match_count;
106 av_assert2(part_width == 1 || part_width == 2 || part_width == 4);
116 diagonal_ref = fetch_diagonal_mv(h, &C, index8, list, part_width);
117 match_count = (diagonal_ref == ref) + (top_ref == ref) + (left_ref == ref);
118 tprintf(h->s.avctx, "pred_motion match_count=%d\n", match_count);
119 if (match_count > 1) { //most common
120 *mx = mid_pred(A[0], B[0], C[0]);
121 *my = mid_pred(A[1], B[1], C[1]);
122 } else if (match_count == 1) {
123 if (left_ref == ref) {
126 } else if (top_ref == ref) {
134 if (top_ref == PART_NOT_AVAILABLE &&
135 diagonal_ref == PART_NOT_AVAILABLE &&
136 left_ref != PART_NOT_AVAILABLE) {
140 *mx = mid_pred(A[0], B[0], C[0]);
141 *my = mid_pred(A[1], B[1], C[1]);
146 "pred_motion (%2d %2d %2d) (%2d %2d %2d) (%2d %2d %2d) -> (%2d %2d %2d) at %2d %2d %d list %d\n",
147 top_ref, B[0], B[1], diagonal_ref, C[0], C[1], left_ref,
148 A[0], A[1], ref, *mx, *my, h->s.mb_x, h->s.mb_y, n, list);
152 * Get the directionally predicted 16x8 MV.
153 * @param n the block index
154 * @param mx the x component of the predicted motion vector
155 * @param my the y component of the predicted motion vector
157 static av_always_inline void pred_16x8_motion(H264Context *const h,
158 int n, int list, int ref,
159 int *const mx, int *const my)
162 const int top_ref = h->ref_cache[list][scan8[0] - 8];
163 const int16_t *const B = h->mv_cache[list][scan8[0] - 8];
165 tprintf(h->s.avctx, "pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d\n",
166 top_ref, B[0], B[1], h->s.mb_x, h->s.mb_y, n, list);
168 if (top_ref == ref) {
174 const int left_ref = h->ref_cache[list][scan8[8] - 1];
175 const int16_t *const A = h->mv_cache[list][scan8[8] - 1];
177 tprintf(h->s.avctx, "pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d\n",
178 left_ref, A[0], A[1], h->s.mb_x, h->s.mb_y, n, list);
180 if (left_ref == ref) {
188 pred_motion(h, n, 4, list, ref, mx, my);
192 * Get the directionally predicted 8x16 MV.
193 * @param n the block index
194 * @param mx the x component of the predicted motion vector
195 * @param my the y component of the predicted motion vector
197 static av_always_inline void pred_8x16_motion(H264Context *const h,
198 int n, int list, int ref,
199 int *const mx, int *const my)
202 const int left_ref = h->ref_cache[list][scan8[0] - 1];
203 const int16_t *const A = h->mv_cache[list][scan8[0] - 1];
205 tprintf(h->s.avctx, "pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d\n",
206 left_ref, A[0], A[1], h->s.mb_x, h->s.mb_y, n, list);
208 if (left_ref == ref) {
217 diagonal_ref = fetch_diagonal_mv(h, &C, scan8[4], list, 2);
219 tprintf(h->s.avctx, "pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d\n",
220 diagonal_ref, C[0], C[1], h->s.mb_x, h->s.mb_y, n, list);
222 if (diagonal_ref == ref) {
230 pred_motion(h, n, 2, list, ref, mx, my);
233 #define FIX_MV_MBAFF(type, refn, mvn, idx) \
236 if (!IS_INTERLACED(type)) { \
238 AV_COPY32(mvbuf[idx], mvn); \
239 mvbuf[idx][1] /= 2; \
243 if (IS_INTERLACED(type)) { \
245 AV_COPY32(mvbuf[idx], mvn); \
246 mvbuf[idx][1] <<= 1; \
252 static av_always_inline void pred_pskip_motion(H264Context *const h)
254 DECLARE_ALIGNED(4, static const int16_t, zeromv)[2] = { 0 };
255 DECLARE_ALIGNED(4, int16_t, mvbuf)[3][2];
256 MpegEncContext *const s = &h->s;
257 int8_t *ref = s->current_picture.f.ref_index[0];
258 int16_t(*mv)[2] = s->current_picture.f.motion_val[0];
259 int top_ref, left_ref, diagonal_ref, match_count, mx, my;
260 const int16_t *A, *B, *C;
261 int b_stride = h->b_stride;
263 fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, 0, 1);
265 /* To avoid doing an entire fill_decode_caches, we inline the relevant
267 * FIXME: this is a partial duplicate of the logic in fill_decode_caches,
268 * but it's faster this way. Is there a way to avoid this duplication?
270 if (USES_LIST(h->left_type[LTOP], 0)) {
271 left_ref = ref[4 * h->left_mb_xy[LTOP] + 1 + (h->left_block[0] & ~1)];
272 A = mv[h->mb2b_xy[h->left_mb_xy[LTOP]] + 3 + b_stride * h->left_block[0]];
273 FIX_MV_MBAFF(h->left_type[LTOP], left_ref, A, 0);
274 if (!(left_ref | AV_RN32A(A)))
276 } else if (h->left_type[LTOP]) {
277 left_ref = LIST_NOT_USED;
283 if (USES_LIST(h->top_type, 0)) {
284 top_ref = ref[4 * h->top_mb_xy + 2];
285 B = mv[h->mb2b_xy[h->top_mb_xy] + 3 * b_stride];
286 FIX_MV_MBAFF(h->top_type, top_ref, B, 1);
287 if (!(top_ref | AV_RN32A(B)))
289 } else if (h->top_type) {
290 top_ref = LIST_NOT_USED;
296 tprintf(h->s.avctx, "pred_pskip: (%d) (%d) at %2d %2d\n",
297 top_ref, left_ref, h->s.mb_x, h->s.mb_y);
299 if (USES_LIST(h->topright_type, 0)) {
300 diagonal_ref = ref[4 * h->topright_mb_xy + 2];
301 C = mv[h->mb2b_xy[h->topright_mb_xy] + 3 * b_stride];
302 FIX_MV_MBAFF(h->topright_type, diagonal_ref, C, 2);
303 } else if (h->topright_type) {
304 diagonal_ref = LIST_NOT_USED;
307 if (USES_LIST(h->topleft_type, 0)) {
308 diagonal_ref = ref[4 * h->topleft_mb_xy + 1 +
309 (h->topleft_partition & 2)];
310 C = mv[h->mb2b_xy[h->topleft_mb_xy] + 3 + b_stride +
311 (h->topleft_partition & 2 * b_stride)];
312 FIX_MV_MBAFF(h->topleft_type, diagonal_ref, C, 2);
313 } else if (h->topleft_type) {
314 diagonal_ref = LIST_NOT_USED;
317 diagonal_ref = PART_NOT_AVAILABLE;
322 match_count = !diagonal_ref + !top_ref + !left_ref;
323 tprintf(h->s.avctx, "pred_pskip_motion match_count=%d\n", match_count);
324 if (match_count > 1) {
325 mx = mid_pred(A[0], B[0], C[0]);
326 my = mid_pred(A[1], B[1], C[1]);
327 } else if (match_count == 1) {
331 } else if (!top_ref) {
339 mx = mid_pred(A[0], B[0], C[0]);
340 my = mid_pred(A[1], B[1], C[1]);
343 fill_rectangle(h->mv_cache[0][scan8[0]], 4, 4, 8, pack16to32(mx, my), 4);
347 fill_rectangle(h->mv_cache[0][scan8[0]], 4, 4, 8, 0, 4);
351 static void fill_decode_neighbors(H264Context *h, int mb_type)
353 MpegEncContext *const s = &h->s;
354 const int mb_xy = h->mb_xy;
355 int topleft_xy, top_xy, topright_xy, left_xy[LEFT_MBS];
356 static const uint8_t left_block_options[4][32] = {
357 { 0, 1, 2, 3, 7, 10, 8, 11, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4, 3 + 3 * 4, 1 + 4 * 4, 1 + 8 * 4, 1 + 5 * 4, 1 + 9 * 4 },
358 { 2, 2, 3, 3, 8, 11, 8, 11, 3 + 2 * 4, 3 + 2 * 4, 3 + 3 * 4, 3 + 3 * 4, 1 + 5 * 4, 1 + 9 * 4, 1 + 5 * 4, 1 + 9 * 4 },
359 { 0, 0, 1, 1, 7, 10, 7, 10, 3 + 0 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 1 * 4, 1 + 4 * 4, 1 + 8 * 4, 1 + 4 * 4, 1 + 8 * 4 },
360 { 0, 2, 0, 2, 7, 10, 7, 10, 3 + 0 * 4, 3 + 2 * 4, 3 + 0 * 4, 3 + 2 * 4, 1 + 4 * 4, 1 + 8 * 4, 1 + 4 * 4, 1 + 8 * 4 }
363 h->topleft_partition = -1;
365 top_xy = mb_xy - (s->mb_stride << MB_FIELD);
367 /* Wow, what a mess, why didn't they simplify the interlacing & intra
368 * stuff, I can't imagine that these complex rules are worth it. */
370 topleft_xy = top_xy - 1;
371 topright_xy = top_xy + 1;
372 left_xy[LBOT] = left_xy[LTOP] = mb_xy - 1;
373 h->left_block = left_block_options[0];
375 const int left_mb_field_flag = IS_INTERLACED(s->current_picture.f.mb_type[mb_xy - 1]);
376 const int curr_mb_field_flag = IS_INTERLACED(mb_type);
378 if (left_mb_field_flag != curr_mb_field_flag) {
379 left_xy[LBOT] = left_xy[LTOP] = mb_xy - s->mb_stride - 1;
380 if (curr_mb_field_flag) {
381 left_xy[LBOT] += s->mb_stride;
382 h->left_block = left_block_options[3];
384 topleft_xy += s->mb_stride;
385 /* take top left mv from the middle of the mb, as opposed
386 * to all other modes which use the bottom right partition */
387 h->topleft_partition = 0;
388 h->left_block = left_block_options[1];
392 if (curr_mb_field_flag) {
393 topleft_xy += s->mb_stride & (((s->current_picture.f.mb_type[top_xy - 1] >> 7) & 1) - 1);
394 topright_xy += s->mb_stride & (((s->current_picture.f.mb_type[top_xy + 1] >> 7) & 1) - 1);
395 top_xy += s->mb_stride & (((s->current_picture.f.mb_type[top_xy] >> 7) & 1) - 1);
397 if (left_mb_field_flag != curr_mb_field_flag) {
398 if (curr_mb_field_flag) {
399 left_xy[LBOT] += s->mb_stride;
400 h->left_block = left_block_options[3];
402 h->left_block = left_block_options[2];
408 h->topleft_mb_xy = topleft_xy;
409 h->top_mb_xy = top_xy;
410 h->topright_mb_xy = topright_xy;
411 h->left_mb_xy[LTOP] = left_xy[LTOP];
412 h->left_mb_xy[LBOT] = left_xy[LBOT];
413 //FIXME do we need all in the context?
415 h->topleft_type = s->current_picture.f.mb_type[topleft_xy];
416 h->top_type = s->current_picture.f.mb_type[top_xy];
417 h->topright_type = s->current_picture.f.mb_type[topright_xy];
418 h->left_type[LTOP] = s->current_picture.f.mb_type[left_xy[LTOP]];
419 h->left_type[LBOT] = s->current_picture.f.mb_type[left_xy[LBOT]];
422 if (h->slice_table[topleft_xy] != h->slice_num)
424 if (h->slice_table[top_xy] != h->slice_num)
426 if (h->slice_table[left_xy[LTOP]] != h->slice_num)
427 h->left_type[LTOP] = h->left_type[LBOT] = 0;
429 if (h->slice_table[topleft_xy] != h->slice_num) {
431 if (h->slice_table[top_xy] != h->slice_num)
433 if (h->slice_table[left_xy[LTOP]] != h->slice_num)
434 h->left_type[LTOP] = h->left_type[LBOT] = 0;
437 if (h->slice_table[topright_xy] != h->slice_num)
438 h->topright_type = 0;
441 static void fill_decode_caches(H264Context *h, int mb_type)
443 MpegEncContext *const s = &h->s;
444 int topleft_xy, top_xy, topright_xy, left_xy[LEFT_MBS];
445 int topleft_type, top_type, topright_type, left_type[LEFT_MBS];
446 const uint8_t *left_block = h->left_block;
451 topleft_xy = h->topleft_mb_xy;
452 top_xy = h->top_mb_xy;
453 topright_xy = h->topright_mb_xy;
454 left_xy[LTOP] = h->left_mb_xy[LTOP];
455 left_xy[LBOT] = h->left_mb_xy[LBOT];
456 topleft_type = h->topleft_type;
457 top_type = h->top_type;
458 topright_type = h->topright_type;
459 left_type[LTOP] = h->left_type[LTOP];
460 left_type[LBOT] = h->left_type[LBOT];
462 if (!IS_SKIP(mb_type)) {
463 if (IS_INTRA(mb_type)) {
464 int type_mask = h->pps.constrained_intra_pred ? IS_INTRA(-1) : -1;
465 h->topleft_samples_available =
466 h->top_samples_available =
467 h->left_samples_available = 0xFFFF;
468 h->topright_samples_available = 0xEEEA;
470 if (!(top_type & type_mask)) {
471 h->topleft_samples_available = 0xB3FF;
472 h->top_samples_available = 0x33FF;
473 h->topright_samples_available = 0x26EA;
475 if (IS_INTERLACED(mb_type) != IS_INTERLACED(left_type[LTOP])) {
476 if (IS_INTERLACED(mb_type)) {
477 if (!(left_type[LTOP] & type_mask)) {
478 h->topleft_samples_available &= 0xDFFF;
479 h->left_samples_available &= 0x5FFF;
481 if (!(left_type[LBOT] & type_mask)) {
482 h->topleft_samples_available &= 0xFF5F;
483 h->left_samples_available &= 0xFF5F;
486 int left_typei = s->current_picture.f.mb_type[left_xy[LTOP] + s->mb_stride];
488 av_assert2(left_xy[LTOP] == left_xy[LBOT]);
489 if (!((left_typei & type_mask) && (left_type[LTOP] & type_mask))) {
490 h->topleft_samples_available &= 0xDF5F;
491 h->left_samples_available &= 0x5F5F;
495 if (!(left_type[LTOP] & type_mask)) {
496 h->topleft_samples_available &= 0xDF5F;
497 h->left_samples_available &= 0x5F5F;
501 if (!(topleft_type & type_mask))
502 h->topleft_samples_available &= 0x7FFF;
504 if (!(topright_type & type_mask))
505 h->topright_samples_available &= 0xFBFF;
507 if (IS_INTRA4x4(mb_type)) {
508 if (IS_INTRA4x4(top_type)) {
509 AV_COPY32(h->intra4x4_pred_mode_cache + 4 + 8 * 0, h->intra4x4_pred_mode + h->mb2br_xy[top_xy]);
511 h->intra4x4_pred_mode_cache[4 + 8 * 0] =
512 h->intra4x4_pred_mode_cache[5 + 8 * 0] =
513 h->intra4x4_pred_mode_cache[6 + 8 * 0] =
514 h->intra4x4_pred_mode_cache[7 + 8 * 0] = 2 - 3 * !(top_type & type_mask);
516 for (i = 0; i < 2; i++) {
517 if (IS_INTRA4x4(left_type[LEFT(i)])) {
518 int8_t *mode = h->intra4x4_pred_mode + h->mb2br_xy[left_xy[LEFT(i)]];
519 h->intra4x4_pred_mode_cache[3 + 8 * 1 + 2 * 8 * i] = mode[6 - left_block[0 + 2 * i]];
520 h->intra4x4_pred_mode_cache[3 + 8 * 2 + 2 * 8 * i] = mode[6 - left_block[1 + 2 * i]];
522 h->intra4x4_pred_mode_cache[3 + 8 * 1 + 2 * 8 * i] =
523 h->intra4x4_pred_mode_cache[3 + 8 * 2 + 2 * 8 * i] = 2 - 3 * !(left_type[LEFT(i)] & type_mask);
537 /* FIXME: constraint_intra_pred & partitioning & nnz
538 * (let us hope this is just a typo in the spec) */
539 nnz_cache = h->non_zero_count_cache;
541 nnz = h->non_zero_count[top_xy];
542 AV_COPY32(&nnz_cache[4 + 8 * 0], &nnz[4 * 3]);
543 if (!s->chroma_y_shift) {
544 AV_COPY32(&nnz_cache[4 + 8 * 5], &nnz[4 * 7]);
545 AV_COPY32(&nnz_cache[4 + 8 * 10], &nnz[4 * 11]);
547 AV_COPY32(&nnz_cache[4 + 8 * 5], &nnz[4 * 5]);
548 AV_COPY32(&nnz_cache[4 + 8 * 10], &nnz[4 * 9]);
551 uint32_t top_empty = CABAC && !IS_INTRA(mb_type) ? 0 : 0x40404040;
552 AV_WN32A(&nnz_cache[4 + 8 * 0], top_empty);
553 AV_WN32A(&nnz_cache[4 + 8 * 5], top_empty);
554 AV_WN32A(&nnz_cache[4 + 8 * 10], top_empty);
557 for (i = 0; i < 2; i++) {
558 if (left_type[LEFT(i)]) {
559 nnz = h->non_zero_count[left_xy[LEFT(i)]];
560 nnz_cache[3 + 8 * 1 + 2 * 8 * i] = nnz[left_block[8 + 0 + 2 * i]];
561 nnz_cache[3 + 8 * 2 + 2 * 8 * i] = nnz[left_block[8 + 1 + 2 * i]];
563 nnz_cache[3 + 8 * 6 + 2 * 8 * i] = nnz[left_block[8 + 0 + 2 * i] + 4 * 4];
564 nnz_cache[3 + 8 * 7 + 2 * 8 * i] = nnz[left_block[8 + 1 + 2 * i] + 4 * 4];
565 nnz_cache[3 + 8 * 11 + 2 * 8 * i] = nnz[left_block[8 + 0 + 2 * i] + 8 * 4];
566 nnz_cache[3 + 8 * 12 + 2 * 8 * i] = nnz[left_block[8 + 1 + 2 * i] + 8 * 4];
567 } else if (CHROMA422) {
568 nnz_cache[3 + 8 * 6 + 2 * 8 * i] = nnz[left_block[8 + 0 + 2 * i] - 2 + 4 * 4];
569 nnz_cache[3 + 8 * 7 + 2 * 8 * i] = nnz[left_block[8 + 1 + 2 * i] - 2 + 4 * 4];
570 nnz_cache[3 + 8 * 11 + 2 * 8 * i] = nnz[left_block[8 + 0 + 2 * i] - 2 + 8 * 4];
571 nnz_cache[3 + 8 * 12 + 2 * 8 * i] = nnz[left_block[8 + 1 + 2 * i] - 2 + 8 * 4];
573 nnz_cache[3 + 8 * 6 + 8 * i] = nnz[left_block[8 + 4 + 2 * i]];
574 nnz_cache[3 + 8 * 11 + 8 * i] = nnz[left_block[8 + 5 + 2 * i]];
577 nnz_cache[3 + 8 * 1 + 2 * 8 * i] =
578 nnz_cache[3 + 8 * 2 + 2 * 8 * i] =
579 nnz_cache[3 + 8 * 6 + 2 * 8 * i] =
580 nnz_cache[3 + 8 * 7 + 2 * 8 * i] =
581 nnz_cache[3 + 8 * 11 + 2 * 8 * i] =
582 nnz_cache[3 + 8 * 12 + 2 * 8 * i] = CABAC && !IS_INTRA(mb_type) ? 0 : 64;
589 h->top_cbp = h->cbp_table[top_xy];
591 h->top_cbp = IS_INTRA(mb_type) ? 0x7CF : 0x00F;
593 if (left_type[LTOP]) {
594 h->left_cbp = (h->cbp_table[left_xy[LTOP]] & 0x7F0) |
595 ((h->cbp_table[left_xy[LTOP]] >> (left_block[0] & (~1))) & 2) |
596 (((h->cbp_table[left_xy[LBOT]] >> (left_block[2] & (~1))) & 2) << 2);
598 h->left_cbp = IS_INTRA(mb_type) ? 0x7CF : 0x00F;
603 if (IS_INTER(mb_type) || (IS_DIRECT(mb_type) && h->direct_spatial_mv_pred)) {
605 int b_stride = h->b_stride;
606 for (list = 0; list < h->list_count; list++) {
607 int8_t *ref_cache = &h->ref_cache[list][scan8[0]];
608 int8_t *ref = s->current_picture.f.ref_index[list];
609 int16_t(*mv_cache)[2] = &h->mv_cache[list][scan8[0]];
610 int16_t(*mv)[2] = s->current_picture.f.motion_val[list];
611 if (!USES_LIST(mb_type, list))
613 av_assert2(!(IS_DIRECT(mb_type) && !h->direct_spatial_mv_pred));
615 if (USES_LIST(top_type, list)) {
616 const int b_xy = h->mb2b_xy[top_xy] + 3 * b_stride;
617 AV_COPY128(mv_cache[0 - 1 * 8], mv[b_xy + 0]);
618 ref_cache[0 - 1 * 8] =
619 ref_cache[1 - 1 * 8] = ref[4 * top_xy + 2];
620 ref_cache[2 - 1 * 8] =
621 ref_cache[3 - 1 * 8] = ref[4 * top_xy + 3];
623 AV_ZERO128(mv_cache[0 - 1 * 8]);
624 AV_WN32A(&ref_cache[0 - 1 * 8],
625 ((top_type ? LIST_NOT_USED : PART_NOT_AVAILABLE) & 0xFF) * 0x01010101u);
628 if (mb_type & (MB_TYPE_16x8 | MB_TYPE_8x8)) {
629 for (i = 0; i < 2; i++) {
630 int cache_idx = -1 + i * 2 * 8;
631 if (USES_LIST(left_type[LEFT(i)], list)) {
632 const int b_xy = h->mb2b_xy[left_xy[LEFT(i)]] + 3;
633 const int b8_xy = 4 * left_xy[LEFT(i)] + 1;
634 AV_COPY32(mv_cache[cache_idx],
635 mv[b_xy + b_stride * left_block[0 + i * 2]]);
636 AV_COPY32(mv_cache[cache_idx + 8],
637 mv[b_xy + b_stride * left_block[1 + i * 2]]);
638 ref_cache[cache_idx] = ref[b8_xy + (left_block[0 + i * 2] & ~1)];
639 ref_cache[cache_idx + 8] = ref[b8_xy + (left_block[1 + i * 2] & ~1)];
641 AV_ZERO32(mv_cache[cache_idx]);
642 AV_ZERO32(mv_cache[cache_idx + 8]);
643 ref_cache[cache_idx] =
644 ref_cache[cache_idx + 8] = (left_type[LEFT(i)]) ? LIST_NOT_USED
645 : PART_NOT_AVAILABLE;
649 if (USES_LIST(left_type[LTOP], list)) {
650 const int b_xy = h->mb2b_xy[left_xy[LTOP]] + 3;
651 const int b8_xy = 4 * left_xy[LTOP] + 1;
652 AV_COPY32(mv_cache[-1], mv[b_xy + b_stride * left_block[0]]);
653 ref_cache[-1] = ref[b8_xy + (left_block[0] & ~1)];
655 AV_ZERO32(mv_cache[-1]);
656 ref_cache[-1] = left_type[LTOP] ? LIST_NOT_USED
657 : PART_NOT_AVAILABLE;
661 if (USES_LIST(topright_type, list)) {
662 const int b_xy = h->mb2b_xy[topright_xy] + 3 * b_stride;
663 AV_COPY32(mv_cache[4 - 1 * 8], mv[b_xy]);
664 ref_cache[4 - 1 * 8] = ref[4 * topright_xy + 2];
666 AV_ZERO32(mv_cache[4 - 1 * 8]);
667 ref_cache[4 - 1 * 8] = topright_type ? LIST_NOT_USED
668 : PART_NOT_AVAILABLE;
670 if(ref_cache[2 - 1*8] < 0 || ref_cache[4 - 1*8] < 0){
671 if (USES_LIST(topleft_type, list)) {
672 const int b_xy = h->mb2b_xy[topleft_xy] + 3 + b_stride +
673 (h->topleft_partition & 2 * b_stride);
674 const int b8_xy = 4 * topleft_xy + 1 + (h->topleft_partition & 2);
675 AV_COPY32(mv_cache[-1 - 1 * 8], mv[b_xy]);
676 ref_cache[-1 - 1 * 8] = ref[b8_xy];
678 AV_ZERO32(mv_cache[-1 - 1 * 8]);
679 ref_cache[-1 - 1 * 8] = topleft_type ? LIST_NOT_USED
680 : PART_NOT_AVAILABLE;
684 if ((mb_type & (MB_TYPE_SKIP | MB_TYPE_DIRECT2)) && !FRAME_MBAFF)
687 if (!(mb_type & (MB_TYPE_SKIP | MB_TYPE_DIRECT2))) {
688 uint8_t(*mvd_cache)[2] = &h->mvd_cache[list][scan8[0]];
689 uint8_t(*mvd)[2] = h->mvd_table[list];
690 ref_cache[2 + 8 * 0] =
691 ref_cache[2 + 8 * 2] = PART_NOT_AVAILABLE;
692 AV_ZERO32(mv_cache[2 + 8 * 0]);
693 AV_ZERO32(mv_cache[2 + 8 * 2]);
696 if (USES_LIST(top_type, list)) {
697 const int b_xy = h->mb2br_xy[top_xy];
698 AV_COPY64(mvd_cache[0 - 1 * 8], mvd[b_xy + 0]);
700 AV_ZERO64(mvd_cache[0 - 1 * 8]);
702 if (USES_LIST(left_type[LTOP], list)) {
703 const int b_xy = h->mb2br_xy[left_xy[LTOP]] + 6;
704 AV_COPY16(mvd_cache[-1 + 0 * 8], mvd[b_xy - left_block[0]]);
705 AV_COPY16(mvd_cache[-1 + 1 * 8], mvd[b_xy - left_block[1]]);
707 AV_ZERO16(mvd_cache[-1 + 0 * 8]);
708 AV_ZERO16(mvd_cache[-1 + 1 * 8]);
710 if (USES_LIST(left_type[LBOT], list)) {
711 const int b_xy = h->mb2br_xy[left_xy[LBOT]] + 6;
712 AV_COPY16(mvd_cache[-1 + 2 * 8], mvd[b_xy - left_block[2]]);
713 AV_COPY16(mvd_cache[-1 + 3 * 8], mvd[b_xy - left_block[3]]);
715 AV_ZERO16(mvd_cache[-1 + 2 * 8]);
716 AV_ZERO16(mvd_cache[-1 + 3 * 8]);
718 AV_ZERO16(mvd_cache[2 + 8 * 0]);
719 AV_ZERO16(mvd_cache[2 + 8 * 2]);
720 if (h->slice_type_nos == AV_PICTURE_TYPE_B) {
721 uint8_t *direct_cache = &h->direct_cache[scan8[0]];
722 uint8_t *direct_table = h->direct_table;
723 fill_rectangle(direct_cache, 4, 4, 8, MB_TYPE_16x16 >> 1, 1);
725 if (IS_DIRECT(top_type)) {
726 AV_WN32A(&direct_cache[-1 * 8],
727 0x01010101u * (MB_TYPE_DIRECT2 >> 1));
728 } else if (IS_8X8(top_type)) {
729 int b8_xy = 4 * top_xy;
730 direct_cache[0 - 1 * 8] = direct_table[b8_xy + 2];
731 direct_cache[2 - 1 * 8] = direct_table[b8_xy + 3];
733 AV_WN32A(&direct_cache[-1 * 8],
734 0x01010101 * (MB_TYPE_16x16 >> 1));
737 if (IS_DIRECT(left_type[LTOP]))
738 direct_cache[-1 + 0 * 8] = MB_TYPE_DIRECT2 >> 1;
739 else if (IS_8X8(left_type[LTOP]))
740 direct_cache[-1 + 0 * 8] = direct_table[4 * left_xy[LTOP] + 1 + (left_block[0] & ~1)];
742 direct_cache[-1 + 0 * 8] = MB_TYPE_16x16 >> 1;
744 if (IS_DIRECT(left_type[LBOT]))
745 direct_cache[-1 + 2 * 8] = MB_TYPE_DIRECT2 >> 1;
746 else if (IS_8X8(left_type[LBOT]))
747 direct_cache[-1 + 2 * 8] = direct_table[4 * left_xy[LBOT] + 1 + (left_block[2] & ~1)];
749 direct_cache[-1 + 2 * 8] = MB_TYPE_16x16 >> 1;
755 MAP_F2F(scan8[0] - 1 - 1 * 8, topleft_type) \
756 MAP_F2F(scan8[0] + 0 - 1 * 8, top_type) \
757 MAP_F2F(scan8[0] + 1 - 1 * 8, top_type) \
758 MAP_F2F(scan8[0] + 2 - 1 * 8, top_type) \
759 MAP_F2F(scan8[0] + 3 - 1 * 8, top_type) \
760 MAP_F2F(scan8[0] + 4 - 1 * 8, topright_type) \
761 MAP_F2F(scan8[0] - 1 + 0 * 8, left_type[LTOP]) \
762 MAP_F2F(scan8[0] - 1 + 1 * 8, left_type[LTOP]) \
763 MAP_F2F(scan8[0] - 1 + 2 * 8, left_type[LBOT]) \
764 MAP_F2F(scan8[0] - 1 + 3 * 8, left_type[LBOT])
769 #define MAP_F2F(idx, mb_type) \
770 if (!IS_INTERLACED(mb_type) && h->ref_cache[list][idx] >= 0) { \
771 h->ref_cache[list][idx] <<= 1; \
772 h->mv_cache[list][idx][1] /= 2; \
773 h->mvd_cache[list][idx][1] >>= 1; \
780 #define MAP_F2F(idx, mb_type) \
781 if (IS_INTERLACED(mb_type) && h->ref_cache[list][idx] >= 0) { \
782 h->ref_cache[list][idx] >>= 1; \
783 h->mv_cache[list][idx][1] <<= 1; \
784 h->mvd_cache[list][idx][1] <<= 1; \
794 h->neighbor_transform_size = !!IS_8x8DCT(top_type) + !!IS_8x8DCT(left_type[LTOP]);
798 * decodes a P_SKIP or B_SKIP macroblock
800 static void av_unused decode_mb_skip(H264Context *h)
802 MpegEncContext *const s = &h->s;
803 const int mb_xy = h->mb_xy;
806 memset(h->non_zero_count[mb_xy], 0, 48);
809 mb_type |= MB_TYPE_INTERLACED;
811 if (h->slice_type_nos == AV_PICTURE_TYPE_B) {
812 // just for fill_caches. pred_direct_motion will set the real mb_type
813 mb_type |= MB_TYPE_L0L1 | MB_TYPE_DIRECT2 | MB_TYPE_SKIP;
814 if (h->direct_spatial_mv_pred) {
815 fill_decode_neighbors(h, mb_type);
816 fill_decode_caches(h, mb_type); //FIXME check what is needed and what not ...
818 ff_h264_pred_direct_motion(h, &mb_type);
819 mb_type |= MB_TYPE_SKIP;
821 mb_type |= MB_TYPE_16x16 | MB_TYPE_P0L0 | MB_TYPE_P1L0 | MB_TYPE_SKIP;
823 fill_decode_neighbors(h, mb_type);
824 pred_pskip_motion(h);
827 write_back_motion(h, mb_type);
828 s->current_picture.f.mb_type[mb_xy] = mb_type;
829 s->current_picture.f.qscale_table[mb_xy] = s->qscale;
830 h->slice_table[mb_xy] = h->slice_num;
831 h->prev_mb_skipped = 1;
834 #endif /* AVCODEC_H264_MVPRED_H */