2 * H.26L/H.264/AVC/JVT/14496-10/... motion vector predicion
3 * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 * H.264 / AVC / MPEG4 part10 motion vector predicion.
25 * @author Michael Niedermayer <michaelni@gmx.at>
28 #ifndef AVCODEC_H264_MVPRED_H
29 #define AVCODEC_H264_MVPRED_H
38 static av_always_inline int fetch_diagonal_mv(H264Context *h, const int16_t **C, int i, int list, int part_width){
39 const int topright_ref= h->ref_cache[list][ i - 8 + part_width ];
40 MpegEncContext *s = &h->s;
42 /* there is no consistent mapping of mvs to neighboring locations that will
43 * make mbaff happy, so we can't move all this logic to fill_caches */
46 #define SET_DIAG_MV(MV_OP, REF_OP, XY, Y4)\
47 const int xy = XY, y4 = Y4;\
48 const int mb_type = mb_types[xy+(y4>>2)*s->mb_stride];\
49 if(!USES_LIST(mb_type,list))\
50 return LIST_NOT_USED;\
51 mv = s->current_picture_ptr->f.motion_val[list][h->mb2b_xy[xy] + 3 + y4*h->b_stride];\
52 h->mv_cache[list][scan8[0]-2][0] = mv[0];\
53 h->mv_cache[list][scan8[0]-2][1] = mv[1] MV_OP;\
54 return s->current_picture_ptr->f.ref_index[list][4*xy + 1 + (y4 & ~1)] REF_OP;
56 if(topright_ref == PART_NOT_AVAILABLE
57 && i >= scan8[0]+8 && (i&7)==4
58 && h->ref_cache[list][scan8[0]-1] != PART_NOT_AVAILABLE){
59 const uint32_t *mb_types = s->current_picture_ptr->f.mb_type;
61 AV_ZERO32(h->mv_cache[list][scan8[0]-2]);
62 *C = h->mv_cache[list][scan8[0]-2];
65 && IS_INTERLACED(h->left_type[0])){
66 SET_DIAG_MV(*2, >>1, h->left_mb_xy[0]+s->mb_stride, (s->mb_y&1)*2+(i>>5));
69 && !IS_INTERLACED(h->left_type[0])){
70 // left shift will turn LIST_NOT_USED into PART_NOT_AVAILABLE, but that's OK.
71 SET_DIAG_MV(/2, <<1, h->left_mb_xy[i>=36], ((i>>2))&3);
77 if(topright_ref != PART_NOT_AVAILABLE){
78 *C= h->mv_cache[list][ i - 8 + part_width ];
81 tprintf(s->avctx, "topright MV not available\n");
83 *C= h->mv_cache[list][ i - 8 - 1 ];
84 return h->ref_cache[list][ i - 8 - 1 ];
89 * Get the predicted MV.
90 * @param n the block index
91 * @param part_width the width of the partition (4, 8,16) -> (1, 2, 4)
92 * @param mx the x component of the predicted motion vector
93 * @param my the y component of the predicted motion vector
95 static av_always_inline void pred_motion(H264Context * const h, int n, int part_width, int list, int ref, int * const mx, int * const my){
96 const int index8= scan8[n];
97 const int top_ref= h->ref_cache[list][ index8 - 8 ];
98 const int left_ref= h->ref_cache[list][ index8 - 1 ];
99 const int16_t * const A= h->mv_cache[list][ index8 - 1 ];
100 const int16_t * const B= h->mv_cache[list][ index8 - 8 ];
102 int diagonal_ref, match_count;
104 assert(part_width==1 || part_width==2 || part_width==4);
114 diagonal_ref= fetch_diagonal_mv(h, &C, index8, list, part_width);
115 match_count= (diagonal_ref==ref) + (top_ref==ref) + (left_ref==ref);
116 tprintf(h->s.avctx, "pred_motion match_count=%d\n", match_count);
117 if(match_count > 1){ //most common
118 *mx= mid_pred(A[0], B[0], C[0]);
119 *my= mid_pred(A[1], B[1], C[1]);
120 }else if(match_count==1){
124 }else if(top_ref==ref){
132 if(top_ref == PART_NOT_AVAILABLE && diagonal_ref == PART_NOT_AVAILABLE && left_ref != PART_NOT_AVAILABLE){
136 *mx= mid_pred(A[0], B[0], C[0]);
137 *my= mid_pred(A[1], B[1], C[1]);
141 tprintf(h->s.avctx, "pred_motion (%2d %2d %2d) (%2d %2d %2d) (%2d %2d %2d) -> (%2d %2d %2d) at %2d %2d %d list %d\n", top_ref, B[0], B[1], diagonal_ref, C[0], C[1], left_ref, A[0], A[1], ref, *mx, *my, h->s.mb_x, h->s.mb_y, n, list);
145 * Get the directionally predicted 16x8 MV.
146 * @param n the block index
147 * @param mx the x component of the predicted motion vector
148 * @param my the y component of the predicted motion vector
150 static av_always_inline void pred_16x8_motion(H264Context * const h, int n, int list, int ref, int * const mx, int * const my){
152 const int top_ref= h->ref_cache[list][ scan8[0] - 8 ];
153 const int16_t * const B= h->mv_cache[list][ scan8[0] - 8 ];
155 tprintf(h->s.avctx, "pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d\n", top_ref, B[0], B[1], h->s.mb_x, h->s.mb_y, n, list);
163 const int left_ref= h->ref_cache[list][ scan8[8] - 1 ];
164 const int16_t * const A= h->mv_cache[list][ scan8[8] - 1 ];
166 tprintf(h->s.avctx, "pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d\n", left_ref, A[0], A[1], h->s.mb_x, h->s.mb_y, n, list);
176 pred_motion(h, n, 4, list, ref, mx, my);
180 * Get the directionally predicted 8x16 MV.
181 * @param n the block index
182 * @param mx the x component of the predicted motion vector
183 * @param my the y component of the predicted motion vector
185 static av_always_inline void pred_8x16_motion(H264Context * const h, int n, int list, int ref, int * const mx, int * const my){
187 const int left_ref= h->ref_cache[list][ scan8[0] - 1 ];
188 const int16_t * const A= h->mv_cache[list][ scan8[0] - 1 ];
190 tprintf(h->s.avctx, "pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d\n", left_ref, A[0], A[1], h->s.mb_x, h->s.mb_y, n, list);
201 diagonal_ref= fetch_diagonal_mv(h, &C, scan8[4], list, 2);
203 tprintf(h->s.avctx, "pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d\n", diagonal_ref, C[0], C[1], h->s.mb_x, h->s.mb_y, n, list);
205 if(diagonal_ref == ref){
213 pred_motion(h, n, 2, list, ref, mx, my);
216 #define FIX_MV_MBAFF(type, refn, mvn, idx)\
219 if(!IS_INTERLACED(type)){\
221 AV_COPY32(mvbuf[idx], mvn);\
226 if(IS_INTERLACED(type)){\
228 AV_COPY32(mvbuf[idx], mvn);\
229 mvbuf[idx][1] <<= 1;\
235 static av_always_inline void pred_pskip_motion(H264Context * const h){
236 DECLARE_ALIGNED(4, static const int16_t, zeromv)[2] = {0};
237 DECLARE_ALIGNED(4, int16_t, mvbuf)[3][2];
238 MpegEncContext * const s = &h->s;
239 int8_t *ref = s->current_picture.f.ref_index[0];
240 int16_t (*mv)[2] = s->current_picture.f.motion_val[0];
241 int top_ref, left_ref, diagonal_ref, match_count, mx, my;
242 const int16_t *A, *B, *C;
243 int b_stride = h->b_stride;
245 fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, 0, 1);
247 /* To avoid doing an entire fill_decode_caches, we inline the relevant parts here.
248 * FIXME: this is a partial duplicate of the logic in fill_decode_caches, but it's
249 * faster this way. Is there a way to avoid this duplication?
251 if(USES_LIST(h->left_type[LTOP], 0)){
252 left_ref = ref[4*h->left_mb_xy[LTOP] + 1 + (h->left_block[0]&~1)];
253 A = mv[h->mb2b_xy[h->left_mb_xy[LTOP]] + 3 + b_stride*h->left_block[0]];
254 FIX_MV_MBAFF(h->left_type[LTOP], left_ref, A, 0);
255 if(!(left_ref | AV_RN32A(A))){
258 }else if(h->left_type[LTOP]){
259 left_ref = LIST_NOT_USED;
265 if(USES_LIST(h->top_type, 0)){
266 top_ref = ref[4*h->top_mb_xy + 2];
267 B = mv[h->mb2b_xy[h->top_mb_xy] + 3*b_stride];
268 FIX_MV_MBAFF(h->top_type, top_ref, B, 1);
269 if(!(top_ref | AV_RN32A(B))){
272 }else if(h->top_type){
273 top_ref = LIST_NOT_USED;
279 tprintf(h->s.avctx, "pred_pskip: (%d) (%d) at %2d %2d\n", top_ref, left_ref, h->s.mb_x, h->s.mb_y);
281 if(USES_LIST(h->topright_type, 0)){
282 diagonal_ref = ref[4*h->topright_mb_xy + 2];
283 C = mv[h->mb2b_xy[h->topright_mb_xy] + 3*b_stride];
284 FIX_MV_MBAFF(h->topright_type, diagonal_ref, C, 2);
285 }else if(h->topright_type){
286 diagonal_ref = LIST_NOT_USED;
289 if(USES_LIST(h->topleft_type, 0)){
290 diagonal_ref = ref[4*h->topleft_mb_xy + 1 + (h->topleft_partition & 2)];
291 C = mv[h->mb2b_xy[h->topleft_mb_xy] + 3 + b_stride + (h->topleft_partition & 2*b_stride)];
292 FIX_MV_MBAFF(h->topleft_type, diagonal_ref, C, 2);
293 }else if(h->topleft_type){
294 diagonal_ref = LIST_NOT_USED;
297 diagonal_ref = PART_NOT_AVAILABLE;
302 match_count= !diagonal_ref + !top_ref + !left_ref;
303 tprintf(h->s.avctx, "pred_pskip_motion match_count=%d\n", match_count);
305 mx = mid_pred(A[0], B[0], C[0]);
306 my = mid_pred(A[1], B[1], C[1]);
307 }else if(match_count==1){
319 mx = mid_pred(A[0], B[0], C[0]);
320 my = mid_pred(A[1], B[1], C[1]);
323 fill_rectangle( h->mv_cache[0][scan8[0]], 4, 4, 8, pack16to32(mx,my), 4);
326 fill_rectangle( h->mv_cache[0][scan8[0]], 4, 4, 8, 0, 4);
330 static void fill_decode_neighbors(H264Context *h, int mb_type){
331 MpegEncContext * const s = &h->s;
332 const int mb_xy= h->mb_xy;
333 int topleft_xy, top_xy, topright_xy, left_xy[LEFT_MBS];
334 static const uint8_t left_block_options[4][32]={
335 {0,1,2,3,7,10,8,11,3+0*4, 3+1*4, 3+2*4, 3+3*4, 1+4*4, 1+8*4, 1+5*4, 1+9*4},
336 {2,2,3,3,8,11,8,11,3+2*4, 3+2*4, 3+3*4, 3+3*4, 1+5*4, 1+9*4, 1+5*4, 1+9*4},
337 {0,0,1,1,7,10,7,10,3+0*4, 3+0*4, 3+1*4, 3+1*4, 1+4*4, 1+8*4, 1+4*4, 1+8*4},
338 {0,2,0,2,7,10,7,10,3+0*4, 3+2*4, 3+0*4, 3+2*4, 1+4*4, 1+8*4, 1+4*4, 1+8*4}
341 h->topleft_partition= -1;
343 top_xy = mb_xy - (s->mb_stride << MB_FIELD);
345 /* Wow, what a mess, why didn't they simplify the interlacing & intra
346 * stuff, I can't imagine that these complex rules are worth it. */
348 topleft_xy = top_xy - 1;
349 topright_xy= top_xy + 1;
350 left_xy[LBOT] = left_xy[LTOP] = mb_xy-1;
351 h->left_block = left_block_options[0];
353 const int left_mb_field_flag = IS_INTERLACED(s->current_picture.f.mb_type[mb_xy - 1]);
354 const int curr_mb_field_flag = IS_INTERLACED(mb_type);
356 if (left_mb_field_flag != curr_mb_field_flag) {
357 left_xy[LBOT] = left_xy[LTOP] = mb_xy - s->mb_stride - 1;
358 if (curr_mb_field_flag) {
359 left_xy[LBOT] += s->mb_stride;
360 h->left_block = left_block_options[3];
362 topleft_xy += s->mb_stride;
363 // take top left mv from the middle of the mb, as opposed to all other modes which use the bottom right partition
364 h->topleft_partition = 0;
365 h->left_block = left_block_options[1];
369 if(curr_mb_field_flag){
370 topleft_xy += s->mb_stride & (((s->current_picture.f.mb_type[top_xy - 1] >> 7) & 1) - 1);
371 topright_xy += s->mb_stride & (((s->current_picture.f.mb_type[top_xy + 1] >> 7) & 1) - 1);
372 top_xy += s->mb_stride & (((s->current_picture.f.mb_type[top_xy ] >> 7) & 1) - 1);
374 if (left_mb_field_flag != curr_mb_field_flag) {
375 if (curr_mb_field_flag) {
376 left_xy[LBOT] += s->mb_stride;
377 h->left_block = left_block_options[3];
379 h->left_block = left_block_options[2];
385 h->topleft_mb_xy = topleft_xy;
386 h->top_mb_xy = top_xy;
387 h->topright_mb_xy= topright_xy;
388 h->left_mb_xy[LTOP] = left_xy[LTOP];
389 h->left_mb_xy[LBOT] = left_xy[LBOT];
390 //FIXME do we need all in the context?
392 h->topleft_type = s->current_picture.f.mb_type[topleft_xy];
393 h->top_type = s->current_picture.f.mb_type[top_xy];
394 h->topright_type = s->current_picture.f.mb_type[topright_xy];
395 h->left_type[LTOP] = s->current_picture.f.mb_type[left_xy[LTOP]];
396 h->left_type[LBOT] = s->current_picture.f.mb_type[left_xy[LBOT]];
399 if(h->slice_table[topleft_xy ] != h->slice_num) h->topleft_type = 0;
400 if(h->slice_table[top_xy ] != h->slice_num) h->top_type = 0;
401 if(h->slice_table[left_xy[LTOP] ] != h->slice_num) h->left_type[LTOP] = h->left_type[LBOT] = 0;
403 if(h->slice_table[topleft_xy ] != h->slice_num){
405 if(h->slice_table[top_xy ] != h->slice_num) h->top_type = 0;
406 if(h->slice_table[left_xy[LTOP] ] != h->slice_num) h->left_type[LTOP] = h->left_type[LBOT] = 0;
409 if(h->slice_table[topright_xy] != h->slice_num) h->topright_type= 0;
412 static void fill_decode_caches(H264Context *h, int mb_type){
413 MpegEncContext * const s = &h->s;
414 int topleft_xy, top_xy, topright_xy, left_xy[LEFT_MBS];
415 int topleft_type, top_type, topright_type, left_type[LEFT_MBS];
416 const uint8_t * left_block= h->left_block;
421 topleft_xy = h->topleft_mb_xy;
422 top_xy = h->top_mb_xy;
423 topright_xy = h->topright_mb_xy;
424 left_xy[LTOP] = h->left_mb_xy[LTOP];
425 left_xy[LBOT] = h->left_mb_xy[LBOT];
426 topleft_type = h->topleft_type;
427 top_type = h->top_type;
428 topright_type = h->topright_type;
429 left_type[LTOP]= h->left_type[LTOP];
430 left_type[LBOT]= h->left_type[LBOT];
432 if(!IS_SKIP(mb_type)){
433 if(IS_INTRA(mb_type)){
434 int type_mask= h->pps.constrained_intra_pred ? IS_INTRA(-1) : -1;
435 h->topleft_samples_available=
436 h->top_samples_available=
437 h->left_samples_available= 0xFFFF;
438 h->topright_samples_available= 0xEEEA;
440 if(!(top_type & type_mask)){
441 h->topleft_samples_available= 0xB3FF;
442 h->top_samples_available= 0x33FF;
443 h->topright_samples_available= 0x26EA;
445 if(IS_INTERLACED(mb_type) != IS_INTERLACED(left_type[LTOP])){
446 if(IS_INTERLACED(mb_type)){
447 if(!(left_type[LTOP] & type_mask)){
448 h->topleft_samples_available&= 0xDFFF;
449 h->left_samples_available&= 0x5FFF;
451 if(!(left_type[LBOT] & type_mask)){
452 h->topleft_samples_available&= 0xFF5F;
453 h->left_samples_available&= 0xFF5F;
456 int left_typei = s->current_picture.f.mb_type[left_xy[LTOP] + s->mb_stride];
458 assert(left_xy[LTOP] == left_xy[LBOT]);
459 if(!((left_typei & type_mask) && (left_type[LTOP] & type_mask))){
460 h->topleft_samples_available&= 0xDF5F;
461 h->left_samples_available&= 0x5F5F;
465 if(!(left_type[LTOP] & type_mask)){
466 h->topleft_samples_available&= 0xDF5F;
467 h->left_samples_available&= 0x5F5F;
471 if(!(topleft_type & type_mask))
472 h->topleft_samples_available&= 0x7FFF;
474 if(!(topright_type & type_mask))
475 h->topright_samples_available&= 0xFBFF;
477 if(IS_INTRA4x4(mb_type)){
478 if(IS_INTRA4x4(top_type)){
479 AV_COPY32(h->intra4x4_pred_mode_cache+4+8*0, h->intra4x4_pred_mode + h->mb2br_xy[top_xy]);
481 h->intra4x4_pred_mode_cache[4+8*0]=
482 h->intra4x4_pred_mode_cache[5+8*0]=
483 h->intra4x4_pred_mode_cache[6+8*0]=
484 h->intra4x4_pred_mode_cache[7+8*0]= 2 - 3*!(top_type & type_mask);
487 if(IS_INTRA4x4(left_type[LEFT(i)])){
488 int8_t *mode= h->intra4x4_pred_mode + h->mb2br_xy[left_xy[LEFT(i)]];
489 h->intra4x4_pred_mode_cache[3+8*1 + 2*8*i]= mode[6-left_block[0+2*i]];
490 h->intra4x4_pred_mode_cache[3+8*2 + 2*8*i]= mode[6-left_block[1+2*i]];
492 h->intra4x4_pred_mode_cache[3+8*1 + 2*8*i]=
493 h->intra4x4_pred_mode_cache[3+8*2 + 2*8*i]= 2 - 3*!(left_type[LEFT(i)] & type_mask);
508 //FIXME constraint_intra_pred & partitioning & nnz (let us hope this is just a typo in the spec)
509 nnz_cache = h->non_zero_count_cache;
511 nnz = h->non_zero_count[top_xy];
512 AV_COPY32(&nnz_cache[4+8* 0], &nnz[4*3]);
513 if(!s->chroma_y_shift){
514 AV_COPY32(&nnz_cache[4+8* 5], &nnz[4* 7]);
515 AV_COPY32(&nnz_cache[4+8*10], &nnz[4*11]);
517 AV_COPY32(&nnz_cache[4+8* 5], &nnz[4* 5]);
518 AV_COPY32(&nnz_cache[4+8*10], &nnz[4* 9]);
521 uint32_t top_empty = CABAC && !IS_INTRA(mb_type) ? 0 : 0x40404040;
522 AV_WN32A(&nnz_cache[4+8* 0], top_empty);
523 AV_WN32A(&nnz_cache[4+8* 5], top_empty);
524 AV_WN32A(&nnz_cache[4+8*10], top_empty);
527 for (i=0; i<2; i++) {
528 if(left_type[LEFT(i)]){
529 nnz = h->non_zero_count[left_xy[LEFT(i)]];
530 nnz_cache[3+8* 1 + 2*8*i]= nnz[left_block[8+0+2*i]];
531 nnz_cache[3+8* 2 + 2*8*i]= nnz[left_block[8+1+2*i]];
533 nnz_cache[3+8* 6 + 2*8*i]= nnz[left_block[8+0+2*i]+4*4];
534 nnz_cache[3+8* 7 + 2*8*i]= nnz[left_block[8+1+2*i]+4*4];
535 nnz_cache[3+8*11 + 2*8*i]= nnz[left_block[8+0+2*i]+8*4];
536 nnz_cache[3+8*12 + 2*8*i]= nnz[left_block[8+1+2*i]+8*4];
537 }else if(CHROMA422) {
538 nnz_cache[3+8* 6 + 2*8*i]= nnz[left_block[8+0+2*i]-2+4*4];
539 nnz_cache[3+8* 7 + 2*8*i]= nnz[left_block[8+1+2*i]-2+4*4];
540 nnz_cache[3+8*11 + 2*8*i]= nnz[left_block[8+0+2*i]-2+8*4];
541 nnz_cache[3+8*12 + 2*8*i]= nnz[left_block[8+1+2*i]-2+8*4];
543 nnz_cache[3+8* 6 + 8*i]= nnz[left_block[8+4+2*i]];
544 nnz_cache[3+8*11 + 8*i]= nnz[left_block[8+5+2*i]];
547 nnz_cache[3+8* 1 + 2*8*i]=
548 nnz_cache[3+8* 2 + 2*8*i]=
549 nnz_cache[3+8* 6 + 2*8*i]=
550 nnz_cache[3+8* 7 + 2*8*i]=
551 nnz_cache[3+8*11 + 2*8*i]=
552 nnz_cache[3+8*12 + 2*8*i]= CABAC && !IS_INTRA(mb_type) ? 0 : 64;
559 h->top_cbp = h->cbp_table[top_xy];
561 h->top_cbp = IS_INTRA(mb_type) ? 0x7CF : 0x00F;
564 if (left_type[LTOP]) {
565 h->left_cbp = (h->cbp_table[left_xy[LTOP]] & 0x7F0)
566 | ((h->cbp_table[left_xy[LTOP]]>>(left_block[0]&(~1)))&2)
567 | (((h->cbp_table[left_xy[LBOT]]>>(left_block[2]&(~1)))&2) << 2);
569 h->left_cbp = IS_INTRA(mb_type) ? 0x7CF : 0x00F;
574 if(IS_INTER(mb_type) || (IS_DIRECT(mb_type) && h->direct_spatial_mv_pred)){
576 int b_stride = h->b_stride;
577 for(list=0; list<h->list_count; list++){
578 int8_t *ref_cache = &h->ref_cache[list][scan8[0]];
579 int8_t *ref = s->current_picture.f.ref_index[list];
580 int16_t (*mv_cache)[2] = &h->mv_cache[list][scan8[0]];
581 int16_t (*mv)[2] = s->current_picture.f.motion_val[list];
582 if(!USES_LIST(mb_type, list)){
585 assert(!(IS_DIRECT(mb_type) && !h->direct_spatial_mv_pred));
587 if(USES_LIST(top_type, list)){
588 const int b_xy= h->mb2b_xy[top_xy] + 3*b_stride;
589 AV_COPY128(mv_cache[0 - 1*8], mv[b_xy + 0]);
591 ref_cache[1 - 1*8]= ref[4*top_xy + 2];
593 ref_cache[3 - 1*8]= ref[4*top_xy + 3];
595 AV_ZERO128(mv_cache[0 - 1*8]);
596 AV_WN32A(&ref_cache[0 - 1*8], ((top_type ? LIST_NOT_USED : PART_NOT_AVAILABLE)&0xFF)*0x01010101u);
599 if(mb_type & (MB_TYPE_16x8|MB_TYPE_8x8)){
601 int cache_idx = -1 + i*2*8;
602 if(USES_LIST(left_type[LEFT(i)], list)){
603 const int b_xy= h->mb2b_xy[left_xy[LEFT(i)]] + 3;
604 const int b8_xy= 4*left_xy[LEFT(i)] + 1;
605 AV_COPY32(mv_cache[cache_idx ], mv[b_xy + b_stride*left_block[0+i*2]]);
606 AV_COPY32(mv_cache[cache_idx+8], mv[b_xy + b_stride*left_block[1+i*2]]);
607 ref_cache[cache_idx ]= ref[b8_xy + (left_block[0+i*2]&~1)];
608 ref_cache[cache_idx+8]= ref[b8_xy + (left_block[1+i*2]&~1)];
610 AV_ZERO32(mv_cache[cache_idx ]);
611 AV_ZERO32(mv_cache[cache_idx+8]);
612 ref_cache[cache_idx ]=
613 ref_cache[cache_idx+8]= (left_type[LEFT(i)]) ? LIST_NOT_USED : PART_NOT_AVAILABLE;
617 if(USES_LIST(left_type[LTOP], list)){
618 const int b_xy= h->mb2b_xy[left_xy[LTOP]] + 3;
619 const int b8_xy= 4*left_xy[LTOP] + 1;
620 AV_COPY32(mv_cache[-1], mv[b_xy + b_stride*left_block[0]]);
621 ref_cache[-1]= ref[b8_xy + (left_block[0]&~1)];
623 AV_ZERO32(mv_cache[-1]);
624 ref_cache[-1]= left_type[LTOP] ? LIST_NOT_USED : PART_NOT_AVAILABLE;
628 if(USES_LIST(topright_type, list)){
629 const int b_xy= h->mb2b_xy[topright_xy] + 3*b_stride;
630 AV_COPY32(mv_cache[4 - 1*8], mv[b_xy]);
631 ref_cache[4 - 1*8]= ref[4*topright_xy + 2];
633 AV_ZERO32(mv_cache[4 - 1*8]);
634 ref_cache[4 - 1*8]= topright_type ? LIST_NOT_USED : PART_NOT_AVAILABLE;
636 if(ref_cache[2 - 1*8] < 0 || ref_cache[4 - 1*8] < 0){
637 if(USES_LIST(topleft_type, list)){
638 const int b_xy = h->mb2b_xy[topleft_xy] + 3 + b_stride + (h->topleft_partition & 2*b_stride);
639 const int b8_xy= 4*topleft_xy + 1 + (h->topleft_partition & 2);
640 AV_COPY32(mv_cache[-1 - 1*8], mv[b_xy]);
641 ref_cache[-1 - 1*8]= ref[b8_xy];
643 AV_ZERO32(mv_cache[-1 - 1*8]);
644 ref_cache[-1 - 1*8]= topleft_type ? LIST_NOT_USED : PART_NOT_AVAILABLE;
648 if((mb_type&(MB_TYPE_SKIP|MB_TYPE_DIRECT2)) && !FRAME_MBAFF)
651 if(!(mb_type&(MB_TYPE_SKIP|MB_TYPE_DIRECT2))){
652 uint8_t (*mvd_cache)[2] = &h->mvd_cache[list][scan8[0]];
653 uint8_t (*mvd)[2] = h->mvd_table[list];
655 ref_cache[2+8*2] = PART_NOT_AVAILABLE;
656 AV_ZERO32(mv_cache[2+8*0]);
657 AV_ZERO32(mv_cache[2+8*2]);
660 if(USES_LIST(top_type, list)){
661 const int b_xy= h->mb2br_xy[top_xy];
662 AV_COPY64(mvd_cache[0 - 1*8], mvd[b_xy + 0]);
664 AV_ZERO64(mvd_cache[0 - 1*8]);
666 if(USES_LIST(left_type[LTOP], list)){
667 const int b_xy= h->mb2br_xy[left_xy[LTOP]] + 6;
668 AV_COPY16(mvd_cache[-1 + 0*8], mvd[b_xy - left_block[0]]);
669 AV_COPY16(mvd_cache[-1 + 1*8], mvd[b_xy - left_block[1]]);
671 AV_ZERO16(mvd_cache[-1 + 0*8]);
672 AV_ZERO16(mvd_cache[-1 + 1*8]);
674 if(USES_LIST(left_type[LBOT], list)){
675 const int b_xy= h->mb2br_xy[left_xy[LBOT]] + 6;
676 AV_COPY16(mvd_cache[-1 + 2*8], mvd[b_xy - left_block[2]]);
677 AV_COPY16(mvd_cache[-1 + 3*8], mvd[b_xy - left_block[3]]);
679 AV_ZERO16(mvd_cache[-1 + 2*8]);
680 AV_ZERO16(mvd_cache[-1 + 3*8]);
682 AV_ZERO16(mvd_cache[2+8*0]);
683 AV_ZERO16(mvd_cache[2+8*2]);
684 if(h->slice_type_nos == AV_PICTURE_TYPE_B){
685 uint8_t *direct_cache = &h->direct_cache[scan8[0]];
686 uint8_t *direct_table = h->direct_table;
687 fill_rectangle(direct_cache, 4, 4, 8, MB_TYPE_16x16>>1, 1);
689 if(IS_DIRECT(top_type)){
690 AV_WN32A(&direct_cache[-1*8], 0x01010101u*(MB_TYPE_DIRECT2>>1));
691 }else if(IS_8X8(top_type)){
692 int b8_xy = 4*top_xy;
693 direct_cache[0 - 1*8]= direct_table[b8_xy + 2];
694 direct_cache[2 - 1*8]= direct_table[b8_xy + 3];
696 AV_WN32A(&direct_cache[-1*8], 0x01010101*(MB_TYPE_16x16>>1));
699 if(IS_DIRECT(left_type[LTOP]))
700 direct_cache[-1 + 0*8]= MB_TYPE_DIRECT2>>1;
701 else if(IS_8X8(left_type[LTOP]))
702 direct_cache[-1 + 0*8]= direct_table[4*left_xy[LTOP] + 1 + (left_block[0]&~1)];
704 direct_cache[-1 + 0*8]= MB_TYPE_16x16>>1;
706 if(IS_DIRECT(left_type[LBOT]))
707 direct_cache[-1 + 2*8]= MB_TYPE_DIRECT2>>1;
708 else if(IS_8X8(left_type[LBOT]))
709 direct_cache[-1 + 2*8]= direct_table[4*left_xy[LBOT] + 1 + (left_block[2]&~1)];
711 direct_cache[-1 + 2*8]= MB_TYPE_16x16>>1;
717 MAP_F2F(scan8[0] - 1 - 1*8, topleft_type)\
718 MAP_F2F(scan8[0] + 0 - 1*8, top_type)\
719 MAP_F2F(scan8[0] + 1 - 1*8, top_type)\
720 MAP_F2F(scan8[0] + 2 - 1*8, top_type)\
721 MAP_F2F(scan8[0] + 3 - 1*8, top_type)\
722 MAP_F2F(scan8[0] + 4 - 1*8, topright_type)\
723 MAP_F2F(scan8[0] - 1 + 0*8, left_type[LTOP])\
724 MAP_F2F(scan8[0] - 1 + 1*8, left_type[LTOP])\
725 MAP_F2F(scan8[0] - 1 + 2*8, left_type[LBOT])\
726 MAP_F2F(scan8[0] - 1 + 3*8, left_type[LBOT])
728 #define MAP_F2F(idx, mb_type)\
729 if(!IS_INTERLACED(mb_type) && h->ref_cache[list][idx] >= 0){\
730 h->ref_cache[list][idx] <<= 1;\
731 h->mv_cache[list][idx][1] /= 2;\
732 h->mvd_cache[list][idx][1] >>=1;\
737 #define MAP_F2F(idx, mb_type)\
738 if(IS_INTERLACED(mb_type) && h->ref_cache[list][idx] >= 0){\
739 h->ref_cache[list][idx] >>= 1;\
740 h->mv_cache[list][idx][1] <<= 1;\
741 h->mvd_cache[list][idx][1] <<= 1;\
750 h->neighbor_transform_size= !!IS_8x8DCT(top_type) + !!IS_8x8DCT(left_type[LTOP]);
754 * decodes a P_SKIP or B_SKIP macroblock
756 static void av_unused decode_mb_skip(H264Context *h){
757 MpegEncContext * const s = &h->s;
758 const int mb_xy= h->mb_xy;
761 memset(h->non_zero_count[mb_xy], 0, 48);
764 mb_type|= MB_TYPE_INTERLACED;
766 if( h->slice_type_nos == AV_PICTURE_TYPE_B )
768 // just for fill_caches. pred_direct_motion will set the real mb_type
769 mb_type|= MB_TYPE_L0L1|MB_TYPE_DIRECT2|MB_TYPE_SKIP;
770 if(h->direct_spatial_mv_pred){
771 fill_decode_neighbors(h, mb_type);
772 fill_decode_caches(h, mb_type); //FIXME check what is needed and what not ...
774 ff_h264_pred_direct_motion(h, &mb_type);
775 mb_type|= MB_TYPE_SKIP;
779 mb_type|= MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P1L0|MB_TYPE_SKIP;
781 fill_decode_neighbors(h, mb_type);
782 pred_pskip_motion(h);
785 write_back_motion(h, mb_type);
786 s->current_picture.f.mb_type[mb_xy] = mb_type;
787 s->current_picture.f.qscale_table[mb_xy] = s->qscale;
788 h->slice_table[ mb_xy ]= h->slice_num;
789 h->prev_mb_skipped= 1;
792 #endif /* AVCODEC_H264_MVPRED_H */