2 * H.26L/H.264/AVC/JVT/14496-10/... direct mb/block decoding
3 * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
5 * This file is part of Libav.
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 * H.264 / AVC / MPEG4 part10 direct mb/block decoding.
25 * @author Michael Niedermayer <michaelni@gmx.at>
31 #include "mpegutils.h"
32 #include "rectangle.h"
37 static int get_scale_factor(H264Context *const h, int poc, int poc1, int i)
39 int poc0 = h->ref_list[0][i].poc;
40 int td = av_clip(poc1 - poc0, -128, 127);
41 if (td == 0 || h->ref_list[0][i].long_ref) {
44 int tb = av_clip(poc - poc0, -128, 127);
45 int tx = (16384 + (FFABS(td) >> 1)) / td;
46 return av_clip((tb * tx + 32) >> 6, -1024, 1023);
50 void ff_h264_direct_dist_scale_factor(H264Context *const h)
52 const int poc = FIELD_PICTURE(h) ? h->cur_pic_ptr->field_poc[h->picture_structure == PICT_BOTTOM_FIELD]
53 : h->cur_pic_ptr->poc;
54 const int poc1 = h->ref_list[1][0].poc;
58 for (field = 0; field < 2; field++) {
59 const int poc = h->cur_pic_ptr->field_poc[field];
60 const int poc1 = h->ref_list[1][0].field_poc[field];
61 for (i = 0; i < 2 * h->ref_count[0]; i++)
62 h->dist_scale_factor_field[field][i ^ field] =
63 get_scale_factor(h, poc, poc1, i + 16);
66 for (i = 0; i < h->ref_count[0]; i++)
67 h->dist_scale_factor[i] = get_scale_factor(h, poc, poc1, i);
70 static void fill_colmap(H264Context *h, int map[2][16 + 32], int list,
71 int field, int colfield, int mbafi)
73 H264Picture *const ref1 = &h->ref_list[1][0];
74 int j, old_ref, rfield;
75 int start = mbafi ? 16 : 0;
76 int end = mbafi ? 16 + 2 * h->ref_count[0] : h->ref_count[0];
77 int interl = mbafi || h->picture_structure != PICT_FRAME;
79 /* bogus; fills in for missing frames */
80 memset(map[list], 0, sizeof(map[list]));
82 for (rfield = 0; rfield < 2; rfield++) {
83 for (old_ref = 0; old_ref < ref1->ref_count[colfield][list]; old_ref++) {
84 int poc = ref1->ref_poc[colfield][list][old_ref];
88 // FIXME: store all MBAFF references so this is not needed
89 else if (interl && (poc & 3) == 3)
90 poc = (poc & ~3) + rfield + 1;
92 for (j = start; j < end; j++) {
93 if (4 * h->ref_list[0][j].frame_num +
94 (h->ref_list[0][j].reference & 3) == poc) {
95 int cur_ref = mbafi ? (j - 16) ^ field : j;
97 map[list][2 * old_ref + (rfield ^ field) + 16] = cur_ref;
98 if (rfield == field || !interl)
99 map[list][old_ref] = cur_ref;
107 void ff_h264_direct_ref_list_init(H264Context *const h)
109 H264Picture *const ref1 = &h->ref_list[1][0];
110 H264Picture *const cur = h->cur_pic_ptr;
112 int sidx = (h->picture_structure & 1) ^ 1;
113 int ref1sidx = (ref1->reference & 1) ^ 1;
115 for (list = 0; list < 2; list++) {
116 cur->ref_count[sidx][list] = h->ref_count[list];
117 for (j = 0; j < h->ref_count[list]; j++)
118 cur->ref_poc[sidx][list][j] = 4 * h->ref_list[list][j].frame_num +
119 (h->ref_list[list][j].reference & 3);
122 if (h->picture_structure == PICT_FRAME) {
123 memcpy(cur->ref_count[1], cur->ref_count[0], sizeof(cur->ref_count[0]));
124 memcpy(cur->ref_poc[1], cur->ref_poc[0], sizeof(cur->ref_poc[0]));
127 cur->mbaff = FRAME_MBAFF(h);
130 if (h->picture_structure == PICT_FRAME) {
131 int cur_poc = h->cur_pic_ptr->poc;
132 int *col_poc = h->ref_list[1]->field_poc;
133 h->col_parity = (FFABS(col_poc[0] - cur_poc) >=
134 FFABS(col_poc[1] - cur_poc));
136 sidx = h->col_parity;
137 // FL -> FL & differ parity
138 } else if (!(h->picture_structure & h->ref_list[1][0].reference) &&
139 !h->ref_list[1][0].mbaff) {
140 h->col_fieldoff = 2 * h->ref_list[1][0].reference - 3;
143 if (h->slice_type_nos != AV_PICTURE_TYPE_B || h->direct_spatial_mv_pred)
146 for (list = 0; list < 2; list++) {
147 fill_colmap(h, h->map_col_to_list0, list, sidx, ref1sidx, 0);
149 for (field = 0; field < 2; field++)
150 fill_colmap(h, h->map_col_to_list0_field[field], list, field,
155 static void await_reference_mb_row(H264Context *const h, H264Picture *ref,
158 int ref_field = ref->reference - 1;
159 int ref_field_picture = ref->field_picture;
160 int ref_height = 16 * h->mb_height >> ref_field_picture;
162 if (!HAVE_THREADS || !(h->avctx->active_thread_type & FF_THREAD_FRAME))
165 /* FIXME: It can be safe to access mb stuff
166 * even if pixels aren't deblocked yet. */
168 ff_thread_await_progress(&ref->tf,
169 FFMIN(16 * mb_y >> ref_field_picture,
171 ref_field_picture && ref_field);
174 static void pred_spatial_direct_motion(H264Context *const h, int *mb_type)
177 int b4_stride = h->b_stride;
178 int mb_xy = h->mb_xy, mb_y = h->mb_y;
180 const int16_t (*l1mv0)[2], (*l1mv1)[2];
181 const int8_t *l1ref0, *l1ref1;
182 const int is_b8x8 = IS_8X8(*mb_type);
183 unsigned int sub_mb_type = MB_TYPE_L0L1;
189 assert(h->ref_list[1][0].reference & 3);
191 await_reference_mb_row(h, &h->ref_list[1][0],
192 h->mb_y + !!IS_INTERLACED(*mb_type));
194 #define MB_TYPE_16x16_OR_INTRA (MB_TYPE_16x16 | MB_TYPE_INTRA4x4 | \
195 MB_TYPE_INTRA16x16 | MB_TYPE_INTRA_PCM)
197 /* ref = min(neighbors) */
198 for (list = 0; list < 2; list++) {
199 int left_ref = h->ref_cache[list][scan8[0] - 1];
200 int top_ref = h->ref_cache[list][scan8[0] - 8];
201 int refc = h->ref_cache[list][scan8[0] - 8 + 4];
202 const int16_t *C = h->mv_cache[list][scan8[0] - 8 + 4];
203 if (refc == PART_NOT_AVAILABLE) {
204 refc = h->ref_cache[list][scan8[0] - 8 - 1];
205 C = h->mv_cache[list][scan8[0] - 8 - 1];
207 ref[list] = FFMIN3((unsigned)left_ref,
210 if (ref[list] >= 0) {
211 /* This is just pred_motion() but with the cases removed that
212 * cannot happen for direct blocks. */
213 const int16_t *const A = h->mv_cache[list][scan8[0] - 1];
214 const int16_t *const B = h->mv_cache[list][scan8[0] - 8];
216 int match_count = (left_ref == ref[list]) +
217 (top_ref == ref[list]) +
220 if (match_count > 1) { // most common
221 mv[list] = pack16to32(mid_pred(A[0], B[0], C[0]),
222 mid_pred(A[1], B[1], C[1]));
224 assert(match_count == 1);
225 if (left_ref == ref[list])
226 mv[list] = AV_RN32A(A);
227 else if (top_ref == ref[list])
228 mv[list] = AV_RN32A(B);
230 mv[list] = AV_RN32A(C);
233 int mask = ~(MB_TYPE_L0 << (2 * list));
241 if (ref[0] < 0 && ref[1] < 0) {
244 *mb_type |= MB_TYPE_L0L1;
245 sub_mb_type |= MB_TYPE_L0L1;
248 if (!(is_b8x8 | mv[0] | mv[1])) {
249 fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, (uint8_t)ref[0], 1);
250 fill_rectangle(&h->ref_cache[1][scan8[0]], 4, 4, 8, (uint8_t)ref[1], 1);
251 fill_rectangle(&h->mv_cache[0][scan8[0]], 4, 4, 8, 0, 4);
252 fill_rectangle(&h->mv_cache[1][scan8[0]], 4, 4, 8, 0, 4);
253 *mb_type = (*mb_type & ~(MB_TYPE_8x8 | MB_TYPE_16x8 | MB_TYPE_8x16 |
254 MB_TYPE_P1L0 | MB_TYPE_P1L1)) |
255 MB_TYPE_16x16 | MB_TYPE_DIRECT2;
259 if (IS_INTERLACED(h->ref_list[1][0].mb_type[mb_xy])) { // AFL/AFR/FR/FL -> AFL/FL
260 if (!IS_INTERLACED(*mb_type)) { // AFR/FR -> AFL/FL
261 mb_y = (h->mb_y & ~1) + h->col_parity;
263 ((h->mb_y & ~1) + h->col_parity) * h->mb_stride;
266 mb_y += h->col_fieldoff;
267 mb_xy += h->mb_stride * h->col_fieldoff; // non-zero for FL -> FL & differ parity
270 } else { // AFL/AFR/FR/FL -> AFR/FR
271 if (IS_INTERLACED(*mb_type)) { // AFL /FL -> AFR/FR
273 mb_xy = (h->mb_y & ~1) * h->mb_stride + h->mb_x;
274 mb_type_col[0] = h->ref_list[1][0].mb_type[mb_xy];
275 mb_type_col[1] = h->ref_list[1][0].mb_type[mb_xy + h->mb_stride];
276 b8_stride = 2 + 4 * h->mb_stride;
278 if (IS_INTERLACED(mb_type_col[0]) !=
279 IS_INTERLACED(mb_type_col[1])) {
280 mb_type_col[0] &= ~MB_TYPE_INTERLACED;
281 mb_type_col[1] &= ~MB_TYPE_INTERLACED;
284 sub_mb_type |= MB_TYPE_16x16 | MB_TYPE_DIRECT2; /* B_SUB_8x8 */
285 if ((mb_type_col[0] & MB_TYPE_16x16_OR_INTRA) &&
286 (mb_type_col[1] & MB_TYPE_16x16_OR_INTRA) &&
288 *mb_type |= MB_TYPE_16x8 | MB_TYPE_DIRECT2; /* B_16x8 */
290 *mb_type |= MB_TYPE_8x8;
292 } else { // AFR/FR -> AFR/FR
295 mb_type_col[1] = h->ref_list[1][0].mb_type[mb_xy];
297 sub_mb_type |= MB_TYPE_16x16 | MB_TYPE_DIRECT2; /* B_SUB_8x8 */
298 if (!is_b8x8 && (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)) {
299 *mb_type |= MB_TYPE_16x16 | MB_TYPE_DIRECT2; /* B_16x16 */
300 } else if (!is_b8x8 &&
301 (mb_type_col[0] & (MB_TYPE_16x8 | MB_TYPE_8x16))) {
302 *mb_type |= MB_TYPE_DIRECT2 |
303 (mb_type_col[0] & (MB_TYPE_16x8 | MB_TYPE_8x16));
305 if (!h->sps.direct_8x8_inference_flag) {
306 /* FIXME: Save sub mb types from previous frames (or derive
307 * from MVs) so we know exactly what block size to use. */
308 sub_mb_type += (MB_TYPE_8x8 - MB_TYPE_16x16); /* B_SUB_4x4 */
310 *mb_type |= MB_TYPE_8x8;
315 await_reference_mb_row(h, &h->ref_list[1][0], mb_y);
317 l1mv0 = &h->ref_list[1][0].motion_val[0][h->mb2b_xy[mb_xy]];
318 l1mv1 = &h->ref_list[1][0].motion_val[1][h->mb2b_xy[mb_xy]];
319 l1ref0 = &h->ref_list[1][0].ref_index[0][4 * mb_xy];
320 l1ref1 = &h->ref_list[1][0].ref_index[1][4 * mb_xy];
325 l1mv0 += 2 * b4_stride;
326 l1mv1 += 2 * b4_stride;
330 if (IS_INTERLACED(*mb_type) != IS_INTERLACED(mb_type_col[0])) {
332 for (i8 = 0; i8 < 4; i8++) {
335 int xy8 = x8 + y8 * b8_stride;
336 int xy4 = x8 * 3 + y8 * b4_stride;
339 if (is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8]))
341 h->sub_mb_type[i8] = sub_mb_type;
343 fill_rectangle(&h->ref_cache[0][scan8[i8 * 4]], 2, 2, 8,
345 fill_rectangle(&h->ref_cache[1][scan8[i8 * 4]], 2, 2, 8,
347 if (!IS_INTRA(mb_type_col[y8]) && !h->ref_list[1][0].long_ref &&
348 ((l1ref0[xy8] == 0 &&
349 FFABS(l1mv0[xy4][0]) <= 1 &&
350 FFABS(l1mv0[xy4][1]) <= 1) ||
353 FFABS(l1mv1[xy4][0]) <= 1 &&
354 FFABS(l1mv1[xy4][1]) <= 1))) {
366 fill_rectangle(&h->mv_cache[0][scan8[i8 * 4]], 2, 2, 8, a, 4);
367 fill_rectangle(&h->mv_cache[1][scan8[i8 * 4]], 2, 2, 8, b, 4);
369 if (!is_b8x8 && !(n & 3))
370 *mb_type = (*mb_type & ~(MB_TYPE_8x8 | MB_TYPE_16x8 | MB_TYPE_8x16 |
371 MB_TYPE_P1L0 | MB_TYPE_P1L1)) |
372 MB_TYPE_16x16 | MB_TYPE_DIRECT2;
373 } else if (IS_16X16(*mb_type)) {
376 fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, (uint8_t)ref[0], 1);
377 fill_rectangle(&h->ref_cache[1][scan8[0]], 4, 4, 8, (uint8_t)ref[1], 1);
378 if (!IS_INTRA(mb_type_col[0]) && !h->ref_list[1][0].long_ref &&
380 FFABS(l1mv0[0][0]) <= 1 &&
381 FFABS(l1mv0[0][1]) <= 1) ||
382 (l1ref0[0] < 0 && !l1ref1[0] &&
383 FFABS(l1mv1[0][0]) <= 1 &&
384 FFABS(l1mv1[0][1]) <= 1 &&
385 h->x264_build > 33U))) {
395 fill_rectangle(&h->mv_cache[0][scan8[0]], 4, 4, 8, a, 4);
396 fill_rectangle(&h->mv_cache[1][scan8[0]], 4, 4, 8, b, 4);
399 for (i8 = 0; i8 < 4; i8++) {
400 const int x8 = i8 & 1;
401 const int y8 = i8 >> 1;
403 if (is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8]))
405 h->sub_mb_type[i8] = sub_mb_type;
407 fill_rectangle(&h->mv_cache[0][scan8[i8 * 4]], 2, 2, 8, mv[0], 4);
408 fill_rectangle(&h->mv_cache[1][scan8[i8 * 4]], 2, 2, 8, mv[1], 4);
409 fill_rectangle(&h->ref_cache[0][scan8[i8 * 4]], 2, 2, 8,
411 fill_rectangle(&h->ref_cache[1][scan8[i8 * 4]], 2, 2, 8,
414 assert(b8_stride == 2);
416 if (!IS_INTRA(mb_type_col[0]) && !h->ref_list[1][0].long_ref &&
420 h->x264_build > 33U))) {
421 const int16_t (*l1mv)[2] = l1ref0[i8] == 0 ? l1mv0 : l1mv1;
422 if (IS_SUB_8X8(sub_mb_type)) {
423 const int16_t *mv_col = l1mv[x8 * 3 + y8 * 3 * b4_stride];
424 if (FFABS(mv_col[0]) <= 1 && FFABS(mv_col[1]) <= 1) {
426 fill_rectangle(&h->mv_cache[0][scan8[i8 * 4]], 2, 2,
429 fill_rectangle(&h->mv_cache[1][scan8[i8 * 4]], 2, 2,
435 for (i4 = 0; i4 < 4; i4++) {
436 const int16_t *mv_col = l1mv[x8 * 2 + (i4 & 1) +
437 (y8 * 2 + (i4 >> 1)) * b4_stride];
438 if (FFABS(mv_col[0]) <= 1 && FFABS(mv_col[1]) <= 1) {
440 AV_ZERO32(h->mv_cache[0][scan8[i8 * 4 + i4]]);
442 AV_ZERO32(h->mv_cache[1][scan8[i8 * 4 + i4]]);
447 h->sub_mb_type[i8] += MB_TYPE_16x16 - MB_TYPE_8x8;
452 if (!is_b8x8 && !(n & 15))
453 *mb_type = (*mb_type & ~(MB_TYPE_8x8 | MB_TYPE_16x8 | MB_TYPE_8x16 |
454 MB_TYPE_P1L0 | MB_TYPE_P1L1)) |
455 MB_TYPE_16x16 | MB_TYPE_DIRECT2;
459 static void pred_temp_direct_motion(H264Context *const h, int *mb_type)
462 int b4_stride = h->b_stride;
463 int mb_xy = h->mb_xy, mb_y = h->mb_y;
465 const int16_t (*l1mv0)[2], (*l1mv1)[2];
466 const int8_t *l1ref0, *l1ref1;
467 const int is_b8x8 = IS_8X8(*mb_type);
468 unsigned int sub_mb_type;
471 assert(h->ref_list[1][0].reference & 3);
473 await_reference_mb_row(h, &h->ref_list[1][0],
474 h->mb_y + !!IS_INTERLACED(*mb_type));
476 if (IS_INTERLACED(h->ref_list[1][0].mb_type[mb_xy])) { // AFL/AFR/FR/FL -> AFL/FL
477 if (!IS_INTERLACED(*mb_type)) { // AFR/FR -> AFL/FL
478 mb_y = (h->mb_y & ~1) + h->col_parity;
480 ((h->mb_y & ~1) + h->col_parity) * h->mb_stride;
483 mb_y += h->col_fieldoff;
484 mb_xy += h->mb_stride * h->col_fieldoff; // non-zero for FL -> FL & differ parity
487 } else { // AFL/AFR/FR/FL -> AFR/FR
488 if (IS_INTERLACED(*mb_type)) { // AFL /FL -> AFR/FR
490 mb_xy = h->mb_x + (h->mb_y & ~1) * h->mb_stride;
491 mb_type_col[0] = h->ref_list[1][0].mb_type[mb_xy];
492 mb_type_col[1] = h->ref_list[1][0].mb_type[mb_xy + h->mb_stride];
493 b8_stride = 2 + 4 * h->mb_stride;
495 if (IS_INTERLACED(mb_type_col[0]) !=
496 IS_INTERLACED(mb_type_col[1])) {
497 mb_type_col[0] &= ~MB_TYPE_INTERLACED;
498 mb_type_col[1] &= ~MB_TYPE_INTERLACED;
501 sub_mb_type = MB_TYPE_16x16 | MB_TYPE_P0L0 | MB_TYPE_P0L1 |
502 MB_TYPE_DIRECT2; /* B_SUB_8x8 */
504 if ((mb_type_col[0] & MB_TYPE_16x16_OR_INTRA) &&
505 (mb_type_col[1] & MB_TYPE_16x16_OR_INTRA) &&
507 *mb_type |= MB_TYPE_16x8 | MB_TYPE_L0L1 |
508 MB_TYPE_DIRECT2; /* B_16x8 */
510 *mb_type |= MB_TYPE_8x8 | MB_TYPE_L0L1;
512 } else { // AFR/FR -> AFR/FR
515 mb_type_col[1] = h->ref_list[1][0].mb_type[mb_xy];
517 sub_mb_type = MB_TYPE_16x16 | MB_TYPE_P0L0 | MB_TYPE_P0L1 |
518 MB_TYPE_DIRECT2; /* B_SUB_8x8 */
519 if (!is_b8x8 && (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)) {
520 *mb_type |= MB_TYPE_16x16 | MB_TYPE_P0L0 | MB_TYPE_P0L1 |
521 MB_TYPE_DIRECT2; /* B_16x16 */
522 } else if (!is_b8x8 &&
523 (mb_type_col[0] & (MB_TYPE_16x8 | MB_TYPE_8x16))) {
524 *mb_type |= MB_TYPE_L0L1 | MB_TYPE_DIRECT2 |
525 (mb_type_col[0] & (MB_TYPE_16x8 | MB_TYPE_8x16));
527 if (!h->sps.direct_8x8_inference_flag) {
528 /* FIXME: save sub mb types from previous frames (or derive
529 * from MVs) so we know exactly what block size to use */
530 sub_mb_type = MB_TYPE_8x8 | MB_TYPE_P0L0 | MB_TYPE_P0L1 |
531 MB_TYPE_DIRECT2; /* B_SUB_4x4 */
533 *mb_type |= MB_TYPE_8x8 | MB_TYPE_L0L1;
538 await_reference_mb_row(h, &h->ref_list[1][0], mb_y);
540 l1mv0 = &h->ref_list[1][0].motion_val[0][h->mb2b_xy[mb_xy]];
541 l1mv1 = &h->ref_list[1][0].motion_val[1][h->mb2b_xy[mb_xy]];
542 l1ref0 = &h->ref_list[1][0].ref_index[0][4 * mb_xy];
543 l1ref1 = &h->ref_list[1][0].ref_index[1][4 * mb_xy];
548 l1mv0 += 2 * b4_stride;
549 l1mv1 += 2 * b4_stride;
554 const int *map_col_to_list0[2] = { h->map_col_to_list0[0],
555 h->map_col_to_list0[1] };
556 const int *dist_scale_factor = h->dist_scale_factor;
559 if (FRAME_MBAFF(h) && IS_INTERLACED(*mb_type)) {
560 map_col_to_list0[0] = h->map_col_to_list0_field[h->mb_y & 1][0];
561 map_col_to_list0[1] = h->map_col_to_list0_field[h->mb_y & 1][1];
562 dist_scale_factor = h->dist_scale_factor_field[h->mb_y & 1];
564 ref_offset = (h->ref_list[1][0].mbaff << 4) & (mb_type_col[0] >> 3);
566 if (IS_INTERLACED(*mb_type) != IS_INTERLACED(mb_type_col[0])) {
567 int y_shift = 2 * !IS_INTERLACED(*mb_type);
568 assert(h->sps.direct_8x8_inference_flag);
570 for (i8 = 0; i8 < 4; i8++) {
571 const int x8 = i8 & 1;
572 const int y8 = i8 >> 1;
574 const int16_t (*l1mv)[2] = l1mv0;
576 if (is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8]))
578 h->sub_mb_type[i8] = sub_mb_type;
580 fill_rectangle(&h->ref_cache[1][scan8[i8 * 4]], 2, 2, 8, 0, 1);
581 if (IS_INTRA(mb_type_col[y8])) {
582 fill_rectangle(&h->ref_cache[0][scan8[i8 * 4]], 2, 2, 8, 0, 1);
583 fill_rectangle(&h->mv_cache[0][scan8[i8 * 4]], 2, 2, 8, 0, 4);
584 fill_rectangle(&h->mv_cache[1][scan8[i8 * 4]], 2, 2, 8, 0, 4);
588 ref0 = l1ref0[x8 + y8 * b8_stride];
590 ref0 = map_col_to_list0[0][ref0 + ref_offset];
592 ref0 = map_col_to_list0[1][l1ref1[x8 + y8 * b8_stride] +
596 scale = dist_scale_factor[ref0];
597 fill_rectangle(&h->ref_cache[0][scan8[i8 * 4]], 2, 2, 8,
601 const int16_t *mv_col = l1mv[x8 * 3 + y8 * b4_stride];
602 int my_col = (mv_col[1] << y_shift) / 2;
603 int mx = (scale * mv_col[0] + 128) >> 8;
604 int my = (scale * my_col + 128) >> 8;
605 fill_rectangle(&h->mv_cache[0][scan8[i8 * 4]], 2, 2, 8,
606 pack16to32(mx, my), 4);
607 fill_rectangle(&h->mv_cache[1][scan8[i8 * 4]], 2, 2, 8,
608 pack16to32(mx - mv_col[0], my - my_col), 4);
614 /* one-to-one mv scaling */
616 if (IS_16X16(*mb_type)) {
619 fill_rectangle(&h->ref_cache[1][scan8[0]], 4, 4, 8, 0, 1);
620 if (IS_INTRA(mb_type_col[0])) {
623 const int ref0 = l1ref0[0] >= 0 ? map_col_to_list0[0][l1ref0[0] + ref_offset]
624 : map_col_to_list0[1][l1ref1[0] + ref_offset];
625 const int scale = dist_scale_factor[ref0];
626 const int16_t *mv_col = l1ref0[0] >= 0 ? l1mv0[0] : l1mv1[0];
628 mv_l0[0] = (scale * mv_col[0] + 128) >> 8;
629 mv_l0[1] = (scale * mv_col[1] + 128) >> 8;
631 mv0 = pack16to32(mv_l0[0], mv_l0[1]);
632 mv1 = pack16to32(mv_l0[0] - mv_col[0], mv_l0[1] - mv_col[1]);
634 fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
635 fill_rectangle(&h->mv_cache[0][scan8[0]], 4, 4, 8, mv0, 4);
636 fill_rectangle(&h->mv_cache[1][scan8[0]], 4, 4, 8, mv1, 4);
638 for (i8 = 0; i8 < 4; i8++) {
639 const int x8 = i8 & 1;
640 const int y8 = i8 >> 1;
642 const int16_t (*l1mv)[2] = l1mv0;
644 if (is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8]))
646 h->sub_mb_type[i8] = sub_mb_type;
647 fill_rectangle(&h->ref_cache[1][scan8[i8 * 4]], 2, 2, 8, 0, 1);
648 if (IS_INTRA(mb_type_col[0])) {
649 fill_rectangle(&h->ref_cache[0][scan8[i8 * 4]], 2, 2, 8, 0, 1);
650 fill_rectangle(&h->mv_cache[0][scan8[i8 * 4]], 2, 2, 8, 0, 4);
651 fill_rectangle(&h->mv_cache[1][scan8[i8 * 4]], 2, 2, 8, 0, 4);
655 assert(b8_stride == 2);
658 ref0 = map_col_to_list0[0][ref0 + ref_offset];
660 ref0 = map_col_to_list0[1][l1ref1[i8] + ref_offset];
663 scale = dist_scale_factor[ref0];
665 fill_rectangle(&h->ref_cache[0][scan8[i8 * 4]], 2, 2, 8,
667 if (IS_SUB_8X8(sub_mb_type)) {
668 const int16_t *mv_col = l1mv[x8 * 3 + y8 * 3 * b4_stride];
669 int mx = (scale * mv_col[0] + 128) >> 8;
670 int my = (scale * mv_col[1] + 128) >> 8;
671 fill_rectangle(&h->mv_cache[0][scan8[i8 * 4]], 2, 2, 8,
672 pack16to32(mx, my), 4);
673 fill_rectangle(&h->mv_cache[1][scan8[i8 * 4]], 2, 2, 8,
674 pack16to32(mx - mv_col[0], my - mv_col[1]), 4);
676 for (i4 = 0; i4 < 4; i4++) {
677 const int16_t *mv_col = l1mv[x8 * 2 + (i4 & 1) +
678 (y8 * 2 + (i4 >> 1)) * b4_stride];
679 int16_t *mv_l0 = h->mv_cache[0][scan8[i8 * 4 + i4]];
680 mv_l0[0] = (scale * mv_col[0] + 128) >> 8;
681 mv_l0[1] = (scale * mv_col[1] + 128) >> 8;
682 AV_WN32A(h->mv_cache[1][scan8[i8 * 4 + i4]],
683 pack16to32(mv_l0[0] - mv_col[0],
684 mv_l0[1] - mv_col[1]));
692 void ff_h264_pred_direct_motion(H264Context *const h, int *mb_type)
694 if (h->direct_spatial_mv_pred)
695 pred_spatial_direct_motion(h, mb_type);
697 pred_temp_direct_motion(h, mb_type);