X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=common%2Fmacroblock.c;h=e0307e13df6724f2d98ff253357c148b366f21b1;hb=665c02975800d1f6d44d2250f869ccfe78405c19;hp=49804e587548a4ef58543ebde123c9edb8b4b85a;hpb=1ab45c8f7411f7b4453ddff66919910e823ed33b;p=x264 diff --git a/common/macroblock.c b/common/macroblock.c index 49804e58..e0307e13 100644 --- a/common/macroblock.c +++ b/common/macroblock.c @@ -21,97 +21,7 @@ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA. *****************************************************************************/ -#include -#include -#include - #include "common.h" -#include "macroblock.h" - -static const uint8_t block_idx_x[16] = -{ - 0, 1, 0, 1, 2, 3, 2, 3, 0, 1, 0, 1, 2, 3, 2, 3 -}; -static const uint8_t block_idx_y[16] = -{ - 0, 0, 1, 1, 0, 0, 1, 1, 2, 2, 3, 3, 2, 2, 3, 3 -}; -static const uint8_t block_idx_xy[4][4] = -{ - { 0, 2, 8, 10}, - { 1, 3, 9, 11}, - { 4, 6, 12, 14}, - { 5, 7, 13, 15} -}; - -static const int dequant_mf[6][4][4] = -{ - { {10, 13, 10, 13}, {13, 16, 13, 16}, {10, 13, 10, 13}, {13, 16, 13, 16} }, - { {11, 14, 11, 14}, {14, 18, 14, 18}, {11, 14, 11, 14}, {14, 18, 14, 18} }, - { {13, 16, 13, 16}, {16, 20, 16, 20}, {13, 16, 13, 16}, {16, 20, 16, 20} }, - { {14, 18, 14, 18}, {18, 23, 18, 23}, {14, 18, 14, 18}, {18, 23, 18, 23} }, - { {16, 20, 16, 20}, {20, 25, 20, 25}, {16, 20, 16, 20}, {20, 25, 20, 25} }, - { {18, 23, 18, 23}, {23, 29, 23, 29}, {18, 23, 18, 23}, {23, 29, 23, 29} } -}; - -static const int dequant8_mf[6][8][8] = -{ - { - {20, 19, 25, 19, 20, 19, 25, 19}, - {19, 18, 24, 18, 19, 18, 24, 18}, - {25, 24, 32, 24, 25, 24, 32, 24}, - {19, 18, 24, 18, 19, 18, 24, 18}, - {20, 19, 25, 19, 20, 19, 25, 19}, - {19, 18, 24, 18, 19, 18, 24, 18}, - {25, 24, 32, 24, 25, 24, 32, 24}, - {19, 18, 24, 18, 19, 18, 24, 18} - }, { - {22, 21, 28, 21, 22, 21, 28, 21}, - {21, 19, 26, 19, 21, 19, 26, 19}, - {28, 26, 35, 26, 28, 26, 35, 26}, - {21, 19, 26, 19, 21, 19, 26, 19}, - {22, 21, 28, 21, 22, 21, 28, 21}, - {21, 19, 26, 19, 21, 19, 26, 19}, - {28, 26, 35, 26, 28, 26, 35, 26}, - {21, 19, 26, 19, 21, 19, 26, 19} - }, { - {26, 24, 33, 24, 26, 24, 33, 24}, - {24, 23, 31, 23, 24, 23, 31, 23}, - {33, 31, 42, 31, 33, 31, 42, 31}, - {24, 23, 31, 23, 24, 23, 31, 23}, - {26, 24, 33, 24, 26, 24, 33, 24}, - {24, 23, 31, 23, 24, 23, 31, 23}, - {33, 31, 42, 31, 33, 31, 42, 31}, - {24, 23, 31, 23, 24, 23, 31, 23} - }, { - {28, 26, 35, 26, 28, 26, 35, 26}, - {26, 25, 33, 25, 26, 25, 33, 25}, - {35, 33, 45, 33, 35, 33, 45, 33}, - {26, 25, 33, 25, 26, 25, 33, 25}, - {28, 26, 35, 26, 28, 26, 35, 26}, - {26, 25, 33, 25, 26, 25, 33, 25}, - {35, 33, 45, 33, 35, 33, 45, 33}, - {26, 25, 33, 25, 26, 25, 33, 25} - }, { - {32, 30, 40, 30, 32, 30, 40, 30}, - {30, 28, 38, 28, 30, 28, 38, 28}, - {40, 38, 51, 38, 40, 38, 51, 38}, - {30, 28, 38, 28, 30, 28, 38, 28}, - {32, 30, 40, 30, 32, 30, 40, 30}, - {30, 28, 38, 28, 30, 28, 38, 28}, - {40, 38, 51, 38, 40, 38, 51, 38}, - {30, 28, 38, 28, 30, 28, 38, 28} - }, { - {36, 34, 46, 34, 36, 34, 46, 34}, - {34, 32, 43, 32, 34, 32, 43, 32}, - {46, 43, 58, 43, 46, 43, 58, 43}, - {34, 32, 43, 32, 34, 32, 43, 32}, - {36, 34, 46, 34, 36, 34, 46, 34}, - {34, 32, 43, 32, 34, 32, 43, 32}, - {46, 43, 58, 43, 46, 43, 58, 43}, - {34, 32, 43, 32, 34, 32, 43, 32} - } -}; int x264_mb_predict_intra4x4_mode( x264_t *h, int idx ) { @@ -140,133 +50,41 @@ int x264_mb_predict_non_zero_code( x264_t *h, int idx ) return i_ret & 0x7f; } -int x264_mb_transform_8x8_allowed( x264_t *h, int i_mb_type ) +int x264_mb_transform_8x8_allowed( x264_t *h ) { - int i; - if( i_mb_type == P_8x8 || i_mb_type == B_8x8 ) - { - for( i = 0; i < 4; i++ ) - if( !IS_SUB8x8(h->mb.i_sub_partition[i]) - || ( h->mb.i_sub_partition[i] == D_DIRECT_8x8 && !h->sps->b_direct8x8_inference ) ) - { - return 0; - } - } - if( i_mb_type == B_DIRECT && !h->sps->b_direct8x8_inference ) + // intra and skip are disallowed + // large partitions are allowed + // direct and 8x8 are conditional + static const uint8_t partition_tab[X264_MBTYPE_MAX] = { + 0,0,0,0,1,2,0,2,1,1,1,1,1,1,1,1,1,2,0, + }; + int p, i; + + if( !h->pps->b_transform_8x8_mode ) return 0; - - return 1; -} - -/**************************************************************************** - * Scan and Quant functions - ****************************************************************************/ -void x264_mb_dequant_2x2_dc( int16_t dct[2][2], int i_qscale ) -{ - const int i_qbits = i_qscale/6 - 1; - - if( i_qbits >= 0 ) - { - const int i_dmf = dequant_mf[i_qscale%6][0][0] << i_qbits; - - dct[0][0] = dct[0][0] * i_dmf; - dct[0][1] = dct[0][1] * i_dmf; - dct[1][0] = dct[1][0] * i_dmf; - dct[1][1] = dct[1][1] * i_dmf; - } - else - { - const int i_dmf = dequant_mf[i_qscale%6][0][0]; - - dct[0][0] = ( dct[0][0] * i_dmf ) >> 1; - dct[0][1] = ( dct[0][1] * i_dmf ) >> 1; - dct[1][0] = ( dct[1][0] * i_dmf ) >> 1; - dct[1][1] = ( dct[1][1] * i_dmf ) >> 1; - } -} - -void x264_mb_dequant_4x4_dc( int16_t dct[4][4], int i_qscale ) -{ - const int i_qbits = i_qscale/6 - 2; - int x,y; - - if( i_qbits >= 0 ) - { - const int i_dmf = dequant_mf[i_qscale%6][0][0] << i_qbits; - - for( y = 0; y < 4; y++ ) - { - for( x = 0; x < 4; x++ ) - { - dct[y][x] = dct[y][x] * i_dmf; - } - } - } - else - { - const int i_dmf = dequant_mf[i_qscale%6][0][0]; - const int f = -i_qbits; // 1 << (-1-i_qbits) - - for( y = 0; y < 4; y++ ) - { - for( x = 0; x < 4; x++ ) - { - dct[y][x] = ( dct[y][x] * i_dmf + f ) >> (-i_qbits); - } - } - } -} - -void x264_mb_dequant_4x4( int16_t dct[4][4], int i_qscale ) -{ - const int i_mf = i_qscale%6; - const int i_qbits = i_qscale/6; - int y; - - for( y = 0; y < 4; y++ ) - { - dct[y][0] = ( dct[y][0] * dequant_mf[i_mf][y][0] ) << i_qbits; - dct[y][1] = ( dct[y][1] * dequant_mf[i_mf][y][1] ) << i_qbits; - dct[y][2] = ( dct[y][2] * dequant_mf[i_mf][y][2] ) << i_qbits; - dct[y][3] = ( dct[y][3] * dequant_mf[i_mf][y][3] ) << i_qbits; - } -} - -void x264_mb_dequant_8x8( int16_t dct[8][8], int i_qscale ) -{ - const int i_mf = i_qscale%6; - int y; - - if( i_qscale >= 12 ) - { - const int i_qbits = (i_qscale/6) - 2; - for( y = 0; y < 8; y++ ) - { - dct[y][0] = ( dct[y][0] * dequant8_mf[i_mf][y][0] ) << i_qbits; - dct[y][1] = ( dct[y][1] * dequant8_mf[i_mf][y][1] ) << i_qbits; - dct[y][2] = ( dct[y][2] * dequant8_mf[i_mf][y][2] ) << i_qbits; - dct[y][3] = ( dct[y][3] * dequant8_mf[i_mf][y][3] ) << i_qbits; - dct[y][4] = ( dct[y][4] * dequant8_mf[i_mf][y][4] ) << i_qbits; - dct[y][5] = ( dct[y][5] * dequant8_mf[i_mf][y][5] ) << i_qbits; - dct[y][6] = ( dct[y][6] * dequant8_mf[i_mf][y][6] ) << i_qbits; - dct[y][7] = ( dct[y][7] * dequant8_mf[i_mf][y][7] ) << i_qbits; - } + p = partition_tab[h->mb.i_type]; + if( p < 2 ) + return p; + else if( h->mb.i_type == B_DIRECT ) + return h->sps->b_direct8x8_inference; + else if( h->mb.i_type == P_8x8 ) + { + if( !(h->param.analyse.inter & X264_ANALYSE_PSUB8x8) ) + return 1; + for( i=0; i<4; i++ ) + if( h->mb.i_sub_partition[i] != D_L0_8x8 ) + return 0; + return 1; } - else + else // B_8x8 { - const int i_qbits = 2 - (i_qscale/6); - const int i_round = i_qbits; // 1<<(i_qbits-1) - for( y = 0; y < 8; y++ ) - { - dct[y][0] = ( dct[y][0] * dequant8_mf[i_mf][y][0] + i_round ) >> i_qbits; - dct[y][1] = ( dct[y][1] * dequant8_mf[i_mf][y][1] + i_round ) >> i_qbits; - dct[y][2] = ( dct[y][2] * dequant8_mf[i_mf][y][2] + i_round ) >> i_qbits; - dct[y][3] = ( dct[y][3] * dequant8_mf[i_mf][y][3] + i_round ) >> i_qbits; - dct[y][4] = ( dct[y][4] * dequant8_mf[i_mf][y][4] + i_round ) >> i_qbits; - dct[y][5] = ( dct[y][5] * dequant8_mf[i_mf][y][5] + i_round ) >> i_qbits; - dct[y][6] = ( dct[y][6] * dequant8_mf[i_mf][y][6] + i_round ) >> i_qbits; - dct[y][7] = ( dct[y][7] * dequant8_mf[i_mf][y][7] + i_round ) >> i_qbits; - } + // x264 currently doesn't use sub-8x8 B partitions, so don't check for them + if( h->sps->b_direct8x8_inference ) + return 1; + for( i=0; i<4; i++ ) + if( h->mb.i_sub_partition[i] == D_DIRECT_8x8 ) + return 0; + return 1; } } @@ -441,52 +259,91 @@ static int x264_mb_predict_mv_direct16x16_temporal( x264_t *h ) { int i_mb_4x4 = 16 * h->mb.i_mb_stride * h->mb.i_mb_y + 4 * h->mb.i_mb_x; int i_mb_8x8 = 4 * h->mb.i_mb_stride * h->mb.i_mb_y + 2 * h->mb.i_mb_x; - int i; + int i8, i4; + int b8x8; + const int type_col = h->fref1[0]->mb_type[ h->mb.i_mb_xy ]; x264_macroblock_cache_ref( h, 0, 0, 4, 4, 1, 0 ); - if( IS_INTRA( h->fref1[0]->mb_type[ h->mb.i_mb_xy ] ) ) + if( IS_INTRA( type_col ) ) { x264_macroblock_cache_ref( h, 0, 0, 4, 4, 0, 0 ); x264_macroblock_cache_mv( h, 0, 0, 4, 4, 0, 0, 0 ); x264_macroblock_cache_mv( h, 0, 0, 4, 4, 1, 0, 0 ); return 1; } + b8x8 = h->sps->b_direct8x8_inference || + (type_col != P_8x8 && type_col != B_SKIP && type_col != B_DIRECT && type_col != B_8x8); - /* FIXME: optimize per block size */ - for( i = 0; i < 4; i++ ) + for( i8 = 0; i8 < 4; i8++ ) { - const int x8 = 2*(i%2); - const int y8 = 2*(i/2); - const int i_part_8x8 = i_mb_8x8 + x8/2 + y8 * h->mb.i_mb_stride; + const int x8 = i8%2; + const int y8 = i8/2; + const int i_part_8x8 = i_mb_8x8 + x8 + y8 * h->mb.i_b8_stride; const int i_ref = h->mb.map_col_to_list0[ h->fref1[0]->ref[0][ i_part_8x8 ] ]; if( i_ref >= 0 ) { const int dist_scale_factor = h->mb.dist_scale_factor[i_ref][0]; - int x4, y4; - x264_macroblock_cache_ref( h, x8, y8, 2, 2, 0, i_ref ); + x264_macroblock_cache_ref( h, 2*x8, 2*y8, 2, 2, 0, i_ref ); - for( y4 = y8; y4 < y8+2; y4++ ) - for( x4 = x8; x4 < x8+2; x4++ ) + if( b8x8 ) + { + const int16_t *mv_col = h->fref1[0]->mv[0][ i_mb_4x4 + 3*x8 + 3*y8 * h->mb.i_b4_stride]; + int mv_l0[2]; + mv_l0[0] = ( dist_scale_factor * mv_col[0] + 128 ) >> 8; + mv_l0[1] = ( dist_scale_factor * mv_col[1] + 128 ) >> 8; + x264_macroblock_cache_mv( h, 2*x8, 2*y8, 2, 2, 0, mv_l0[0], mv_l0[1] ); + x264_macroblock_cache_mv( h, 2*x8, 2*y8, 2, 2, 1, mv_l0[0] - mv_col[0], mv_l0[1] - mv_col[1] ); + } + else + { + for( i4 = 0; i4 < 4; i4++ ) { - const int16_t *mv_col = h->fref1[0]->mv[0][ i_mb_4x4 + x4 + y4 * 4 * h->mb.i_mb_stride ]; + const int x4 = i4%2 + 2*x8; + const int y4 = i4/2 + 2*y8; + const int16_t *mv_col = h->fref1[0]->mv[0][ i_mb_4x4 + x4 + y4 * h->mb.i_b4_stride ]; int mv_l0[2]; mv_l0[0] = ( dist_scale_factor * mv_col[0] + 128 ) >> 8; mv_l0[1] = ( dist_scale_factor * mv_col[1] + 128 ) >> 8; x264_macroblock_cache_mv( h, x4, y4, 1, 1, 0, mv_l0[0], mv_l0[1] ); x264_macroblock_cache_mv( h, x4, y4, 1, 1, 1, mv_l0[0] - mv_col[0], mv_l0[1] - mv_col[1] ); } + } } else { - /* the colocated ref isn't in the current list0 */ + /* the collocated ref isn't in the current list0 */ /* FIXME: we might still be able to use direct_8x8 on some partitions */ + /* FIXME: with B-pyramid + extensive ref list reordering + * (not currently used), we would also have to check + * l1mv1 like in spatial mode */ return 0; } } + if( h->param.i_threads > 1 ) + { + int di = b8x8 ? 4 : 1; + for( i4=0; i4<16; i4+=di ) + { + if( h->mb.cache.mv[0][x264_scan8[i4]][1] > h->mb.mv_max_spel[1] + || h->mb.cache.mv[1][x264_scan8[i4]][1] > h->mb.mv_max_spel[1] ) + { +#if 0 + fprintf(stderr, "direct_temporal: (%d,%d) (%d,%d) > %d \n", + h->mb.cache.mv[0][x264_scan8[i4]][0], + h->mb.cache.mv[0][x264_scan8[i4]][1], + h->mb.cache.mv[1][x264_scan8[i4]][0], + h->mb.cache.mv[1][x264_scan8[i4]][1], + h->mb.mv_max_spel[1]); +#endif + return 0; + } + } + } + return 1; } @@ -496,8 +353,12 @@ static int x264_mb_predict_mv_direct16x16_spatial( x264_t *h ) int mv[2][2]; int i_list; int i8, i4; - const int8_t *l1ref = &h->fref1[0]->ref[0][ h->mb.i_b8_xy ]; - const int16_t (*l1mv)[2] = (const int16_t (*)[2]) &h->fref1[0]->mv[0][ h->mb.i_b4_xy ]; + int b8x8; + const int8_t *l1ref0 = &h->fref1[0]->ref[0][ h->mb.i_b8_xy ]; + const int8_t *l1ref1 = &h->fref1[0]->ref[1][ h->mb.i_b8_xy ]; + const int16_t (*l1mv0)[2] = (const int16_t (*)[2]) &h->fref1[0]->mv[0][ h->mb.i_b4_xy ]; + const int16_t (*l1mv1)[2] = (const int16_t (*)[2]) &h->fref1[0]->mv[1][ h->mb.i_b4_xy ]; + const int type_col = h->fref1[0]->mb_type[ h->mb.i_mb_xy ]; for( i_list=0; i_list<2; i_list++ ) { @@ -541,24 +402,58 @@ static int x264_mb_predict_mv_direct16x16_spatial( x264_t *h ) x264_macroblock_cache_mv( h, 0, 0, 4, 4, 0, mv[0][0], mv[0][1] ); x264_macroblock_cache_mv( h, 0, 0, 4, 4, 1, mv[1][0], mv[1][1] ); + if( IS_INTRA( type_col ) ) + return 1; + + if( h->param.i_threads > 1 + && ( mv[0][1] > h->mb.mv_max_spel[1] + || mv[1][1] > h->mb.mv_max_spel[1] ) ) + { +#if 0 + fprintf(stderr, "direct_spatial: (%d,%d) (%d,%d) > %d \n", + mv[0][0], mv[0][1], mv[1][0], mv[1][1], + h->mb.mv_max_spel[1]); +#endif + return 0; + } + + b8x8 = h->sps->b_direct8x8_inference || + (type_col != P_8x8 && type_col != B_SKIP && type_col != B_DIRECT && type_col != B_8x8); + /* col_zero_flag */ for( i8=0; i8<4; i8++ ) { const int x8 = i8%2; const int y8 = i8/2; - if( l1ref[ x8 + y8 * h->mb.i_b8_stride ] == 0 ) + const int o8 = x8 + y8 * h->mb.i_b8_stride; + if( l1ref0[o8] == 0 || ( l1ref0[o8] < 0 && l1ref1[o8] == 0 ) ) { - for( i4=0; i4<4; i4++ ) + const int16_t (*l1mv)[2] = (l1ref0[o8] == 0) ? l1mv0 : l1mv1; + if( b8x8 ) { - const int x4 = i4%2 + 2*x8; - const int y4 = i4/2 + 2*y8; - const int16_t *mvcol = l1mv[x4 + y4 * h->mb.i_b4_stride]; + const int16_t *mvcol = l1mv[3*x8 + 3*y8 * h->mb.i_b4_stride]; if( abs( mvcol[0] ) <= 1 && abs( mvcol[1] ) <= 1 ) { if( ref[0] == 0 ) - x264_macroblock_cache_mv( h, x4, y4, 1, 1, 0, 0, 0 ); + x264_macroblock_cache_mv( h, 2*x8, 2*y8, 2, 2, 0, 0, 0 ); if( ref[1] == 0 ) - x264_macroblock_cache_mv( h, x4, y4, 1, 1, 1, 0, 0 ); + x264_macroblock_cache_mv( h, 2*x8, 2*y8, 2, 2, 1, 0, 0 ); + } + } + else + { + for( i4=0; i4<4; i4++ ) + { + const int x4 = i4%2 + 2*x8; + const int y4 = i4/2 + 2*y8; + const int16_t *mvcol = l1mv[x4 + y4 * h->mb.i_b4_stride]; + if( abs( mvcol[0] ) <= 1 && abs( mvcol[1] ) <= 1 ) + { + if( ref[0] == 0 ) + x264_macroblock_cache_mv( h, x4, y4, 1, 1, 0, 0, 0 ); + if( ref[1] == 0 ) + x264_macroblock_cache_mv( h, x4, y4, 1, 1, 1, 0, 0 ); + } } } } @@ -567,7 +462,7 @@ static int x264_mb_predict_mv_direct16x16_spatial( x264_t *h ) return 1; } -int x264_mb_predict_mv_direct16x16( x264_t *h ) +int x264_mb_predict_mv_direct16x16( x264_t *h, int *b_changed ) { int b_available; if( h->param.analyse.i_direct_mv_pred == X264_DIRECT_PRED_NONE ) @@ -577,6 +472,29 @@ int x264_mb_predict_mv_direct16x16( x264_t *h ) else b_available = x264_mb_predict_mv_direct16x16_temporal( h ); + if( b_changed != NULL && b_available ) + { + int type_col = h->fref1[0]->mb_type[ h->mb.i_mb_xy ]; + if( IS_INTRA(type_col) || type_col == P_SKIP ) + { + *b_changed = h->mb.cache.direct_ref[0][0] != h->mb.cache.ref[0][X264_SCAN8_0] + || h->mb.cache.direct_ref[1][0] != h->mb.cache.ref[1][X264_SCAN8_0] + || *(uint32_t*)h->mb.cache.direct_mv[0][X264_SCAN8_0] != *(uint32_t*)h->mb.cache.mv[0][X264_SCAN8_0] + || *(uint32_t*)h->mb.cache.direct_mv[1][X264_SCAN8_0] != *(uint32_t*)h->mb.cache.mv[1][X264_SCAN8_0]; + } + else + { + int i, l; + *b_changed = 0; + for( l = 0; l < 2; l++ ) + for( i = 0; i < 4; i++ ) + *b_changed |= h->mb.cache.direct_ref[l][i] != h->mb.cache.ref[l][x264_scan8[i*4]]; + *b_changed = *b_changed || memcmp(h->mb.cache.direct_mv, h->mb.cache.mv, sizeof(h->mb.cache.mv)); + } + if( !*b_changed ) + return b_available; + } + /* cache ref & mv */ if( b_available ) { @@ -606,100 +524,133 @@ void x264_mb_load_mv_direct8x8( x264_t *h, int idx ) } } +#define FIXED_SCALE 256 + /* This just improves encoder performance, it's not part of the spec */ -void x264_mb_predict_mv_ref16x16( x264_t *h, int i_list, int i_ref, int mvc[5][2], int *i_mvc ) +void x264_mb_predict_mv_ref16x16( x264_t *h, int i_list, int i_ref, int mvc[8][2], int *i_mvc ) { int16_t (*mvr)[2] = h->mb.mvr[i_list][i_ref]; int i = 0; - /* temporal */ - if( h->sh.i_type == SLICE_TYPE_B ) +#define SET_MVP(mvp) { \ + mvc[i][0] = mvp[0]; \ + mvc[i][1] = mvp[1]; \ + i++; \ + } + + /* b_direct */ + if( h->sh.i_type == SLICE_TYPE_B + && h->mb.cache.ref[i_list][x264_scan8[12]] == i_ref ) { - if( h->mb.cache.ref[i_list][x264_scan8[12]] == i_ref ) - { - /* FIXME: use direct_mv to be clearer? */ - int16_t *mvp = h->mb.cache.mv[i_list][x264_scan8[12]]; - mvc[i][0] = mvp[0]; - mvc[i][1] = mvp[1]; - i++; - } + SET_MVP( h->mb.cache.mv[i_list][x264_scan8[12]] ); } - /* spatial */ + /* spatial predictors */ if( h->mb.i_neighbour & MB_LEFT ) { int i_mb_l = h->mb.i_mb_xy - 1; /* skip MBs didn't go through the whole search process, so mvr is undefined */ if( !IS_SKIP( h->mb.type[i_mb_l] ) ) - { - mvc[i][0] = mvr[i_mb_l][0]; - mvc[i][1] = mvr[i_mb_l][1]; - i++; - } + SET_MVP( mvr[i_mb_l] ); } if( h->mb.i_neighbour & MB_TOP ) { - int i_mb_t = h->mb.i_mb_xy - h->mb.i_mb_stride; + int i_mb_t = h->mb.i_mb_top_xy; if( !IS_SKIP( h->mb.type[i_mb_t] ) ) - { - mvc[i][0] = mvr[i_mb_t][0]; - mvc[i][1] = mvr[i_mb_t][1]; - i++; - } + SET_MVP( mvr[i_mb_t] ); if( h->mb.i_neighbour & MB_TOPLEFT && !IS_SKIP( h->mb.type[i_mb_t - 1] ) ) - { - mvc[i][0] = mvr[i_mb_t - 1][0]; - mvc[i][1] = mvr[i_mb_t - 1][1]; - i++; - } + SET_MVP( mvr[i_mb_t-1] ); if( h->mb.i_mb_x < h->mb.i_mb_stride - 1 && !IS_SKIP( h->mb.type[i_mb_t + 1] ) ) - { - mvc[i][0] = mvr[i_mb_t + 1][0]; - mvc[i][1] = mvr[i_mb_t + 1][1]; - i++; + SET_MVP( mvr[i_mb_t+1] ); + } +#undef SET_MVP + + /* temporal predictors */ + /* FIXME temporal scaling w/ interlace */ + if( h->fref0[0]->i_ref[0] > 0 && !h->sh.b_mbaff ) + { + x264_frame_t *l0 = h->fref0[0]; + +#define SET_TMVP(dx, dy) { \ + int i_b4 = h->mb.i_b4_xy + dx*4 + dy*4*h->mb.i_b4_stride; \ + int i_b8 = h->mb.i_b8_xy + dx*2 + dy*2*h->mb.i_b8_stride; \ + int ref_col = l0->ref[0][i_b8]; \ + if( ref_col >= 0 ) \ + { \ + int scale = (h->fdec->i_poc - h->fdec->ref_poc[0][i_ref]) * l0->inv_ref_poc[ref_col];\ + mvc[i][0] = l0->mv[0][i_b4][0] * scale / FIXED_SCALE; \ + mvc[i][1] = l0->mv[0][i_b4][1] * scale / FIXED_SCALE; \ + i++; \ + } \ } + + SET_TMVP(0,0); + if( h->mb.i_mb_x < h->sps->i_mb_width-1 ) + SET_TMVP(1,0); + if( h->mb.i_mb_y < h->sps->i_mb_height-1 ) + SET_TMVP(0,1); +#undef SET_TMVP } + *i_mvc = i; } +/* Set up a lookup table for delta pocs to reduce an IDIV to an IMUL */ +static void setup_inverse_delta_pocs( x264_t *h ) +{ + int i; + for( i = 0; i < h->i_ref0; i++ ) + { + int delta = h->fdec->i_poc - h->fref0[i]->i_poc; + h->fdec->inv_ref_poc[i] = (FIXED_SCALE + delta/2) / delta; + } +} + static inline void x264_mb_mc_0xywh( x264_t *h, int x, int y, int width, int height ) { const int i8 = x264_scan8[0]+x+8*y; const int i_ref = h->mb.cache.ref[0][i8]; const int mvx = x264_clip3( h->mb.cache.mv[0][i8][0], h->mb.mv_min[0], h->mb.mv_max[0] ); - const int mvy = x264_clip3( h->mb.cache.mv[0][i8][1], h->mb.mv_min[1], h->mb.mv_max[1] ); + int mvy = x264_clip3( h->mb.cache.mv[0][i8][1], h->mb.mv_min[1], h->mb.mv_max[1] ); - h->mc.mc_luma( h->mb.pic.p_fref[0][i_ref], h->mb.pic.i_stride[0], - &h->mb.pic.p_fdec[0][4*y * h->mb.pic.i_stride[0]+4*x], h->mb.pic.i_stride[0], - mvx + 4*4*x, mvy + 4*4*y, 4*width, 4*height ); + h->mc.mc_luma( &h->mb.pic.p_fdec[0][4*y*FDEC_STRIDE+4*x], FDEC_STRIDE, + h->mb.pic.p_fref[0][i_ref], h->mb.pic.i_stride[0], + mvx + 4*4*x, mvy + 4*4*y, 4*width, 4*height ); - h->mc.mc_chroma( &h->mb.pic.p_fref[0][i_ref][4][2*y*h->mb.pic.i_stride[1]+2*x], h->mb.pic.i_stride[1], - &h->mb.pic.p_fdec[1][2*y*h->mb.pic.i_stride[1]+2*x], h->mb.pic.i_stride[1], - mvx, mvy, 2*width, 2*height ); + // chroma is offset if MCing from a field of opposite parity + if( h->mb.b_interlaced & i_ref ) + mvy += (h->mb.i_mb_y & 1)*4 - 2; - h->mc.mc_chroma( &h->mb.pic.p_fref[0][i_ref][5][2*y*h->mb.pic.i_stride[2]+2*x], h->mb.pic.i_stride[2], - &h->mb.pic.p_fdec[2][2*y*h->mb.pic.i_stride[2]+2*x], h->mb.pic.i_stride[2], - mvx, mvy, 2*width, 2*height ); + h->mc.mc_chroma( &h->mb.pic.p_fdec[1][2*y*FDEC_STRIDE+2*x], FDEC_STRIDE, + &h->mb.pic.p_fref[0][i_ref][4][2*y*h->mb.pic.i_stride[1]+2*x], h->mb.pic.i_stride[1], + mvx, mvy, 2*width, 2*height ); + + h->mc.mc_chroma( &h->mb.pic.p_fdec[2][2*y*FDEC_STRIDE+2*x], FDEC_STRIDE, + &h->mb.pic.p_fref[0][i_ref][5][2*y*h->mb.pic.i_stride[2]+2*x], h->mb.pic.i_stride[2], + mvx, mvy, 2*width, 2*height ); } static inline void x264_mb_mc_1xywh( x264_t *h, int x, int y, int width, int height ) { const int i8 = x264_scan8[0]+x+8*y; const int i_ref = h->mb.cache.ref[1][i8]; const int mvx = x264_clip3( h->mb.cache.mv[1][i8][0], h->mb.mv_min[0], h->mb.mv_max[0] ); - const int mvy = x264_clip3( h->mb.cache.mv[1][i8][1], h->mb.mv_min[1], h->mb.mv_max[1] ); + int mvy = x264_clip3( h->mb.cache.mv[1][i8][1], h->mb.mv_min[1], h->mb.mv_max[1] ); + + h->mc.mc_luma( &h->mb.pic.p_fdec[0][4*y*FDEC_STRIDE+4*x], FDEC_STRIDE, + h->mb.pic.p_fref[1][i_ref], h->mb.pic.i_stride[0], + mvx + 4*4*x, mvy + 4*4*y, 4*width, 4*height ); - h->mc.mc_luma( h->mb.pic.p_fref[1][i_ref], h->mb.pic.i_stride[0], - &h->mb.pic.p_fdec[0][4*y *h->mb.pic.i_stride[0]+4*x], h->mb.pic.i_stride[0], - mvx + 4*4*x, mvy + 4*4*y, 4*width, 4*height ); + if( h->mb.b_interlaced & i_ref ) + mvy += (h->mb.i_mb_y & 1)*4 - 2; - h->mc.mc_chroma( &h->mb.pic.p_fref[1][i_ref][4][2*y*h->mb.pic.i_stride[1]+2*x], h->mb.pic.i_stride[1], - &h->mb.pic.p_fdec[1][2*y*h->mb.pic.i_stride[1]+2*x], h->mb.pic.i_stride[1], - mvx, mvy, 2*width, 2*height ); + h->mc.mc_chroma( &h->mb.pic.p_fdec[1][2*y*FDEC_STRIDE+2*x], FDEC_STRIDE, + &h->mb.pic.p_fref[1][i_ref][4][2*y*h->mb.pic.i_stride[1]+2*x], h->mb.pic.i_stride[1], + mvx, mvy, 2*width, 2*height ); - h->mc.mc_chroma( &h->mb.pic.p_fref[1][i_ref][5][2*y*h->mb.pic.i_stride[2]+2*x], h->mb.pic.i_stride[2], - &h->mb.pic.p_fdec[2][2*y*h->mb.pic.i_stride[2]+2*x], h->mb.pic.i_stride[2], - mvx, mvy, 2*width, 2*height ); + h->mc.mc_chroma( &h->mb.pic.p_fdec[2][2*y*FDEC_STRIDE+2*x], FDEC_STRIDE, + &h->mb.pic.p_fref[1][i_ref][5][2*y*h->mb.pic.i_stride[2]+2*x], h->mb.pic.i_stride[2], + mvx, mvy, 2*width, 2*height ); } static inline void x264_mb_mc_01xywh( x264_t *h, int x, int y, int width, int height ) @@ -708,41 +659,44 @@ static inline void x264_mb_mc_01xywh( x264_t *h, int x, int y, int width, int he const int i_ref1 = h->mb.cache.ref[1][i8]; const int mvx1 = x264_clip3( h->mb.cache.mv[1][i8][0], h->mb.mv_min[0], h->mb.mv_max[0] ); - const int mvy1 = x264_clip3( h->mb.cache.mv[1][i8][1], h->mb.mv_min[1], h->mb.mv_max[1] ); + int mvy1 = x264_clip3( h->mb.cache.mv[1][i8][1], h->mb.mv_min[1], h->mb.mv_max[1] ); DECLARE_ALIGNED( uint8_t, tmp[16*16], 16 ); int i_mode = x264_size2pixel[height][width]; x264_mb_mc_0xywh( h, x, y, width, height ); - h->mc.mc_luma( h->mb.pic.p_fref[1][i_ref1], h->mb.pic.i_stride[0], - tmp, 16, mvx1 + 4*4*x, mvy1 + 4*4*y, 4*width, 4*height ); + h->mc.mc_luma( tmp, 16, h->mb.pic.p_fref[1][i_ref1], h->mb.pic.i_stride[0], + mvx1 + 4*4*x, mvy1 + 4*4*y, 4*width, 4*height ); + + if( h->mb.b_interlaced & i_ref1 ) + mvy1 += (h->mb.i_mb_y & 1)*4 - 2; if( h->param.analyse.b_weighted_bipred ) { const int i_ref0 = h->mb.cache.ref[0][i8]; const int weight = h->mb.bipred_weight[i_ref0][i_ref1]; - h->pixf.avg_weight[i_mode]( &h->mb.pic.p_fdec[0][4*y *h->mb.pic.i_stride[0]+4*x], h->mb.pic.i_stride[0], tmp, 16, weight ); + h->mc.avg_weight[i_mode]( &h->mb.pic.p_fdec[0][4*y*FDEC_STRIDE+4*x], FDEC_STRIDE, tmp, 16, weight ); - h->mc.mc_chroma( &h->mb.pic.p_fref[1][i_ref1][4][2*y*h->mb.pic.i_stride[1]+2*x], h->mb.pic.i_stride[1], - tmp, 16, mvx1, mvy1, 2*width, 2*height ); - h->pixf.avg_weight[i_mode+3]( &h->mb.pic.p_fdec[1][2*y*h->mb.pic.i_stride[1]+2*x], h->mb.pic.i_stride[1], tmp, 16, weight ); + h->mc.mc_chroma( tmp, 16, &h->mb.pic.p_fref[1][i_ref1][4][2*y*h->mb.pic.i_stride[1]+2*x], h->mb.pic.i_stride[1], + mvx1, mvy1, 2*width, 2*height ); + h->mc.avg_weight[i_mode+3]( &h->mb.pic.p_fdec[1][2*y*FDEC_STRIDE+2*x], FDEC_STRIDE, tmp, 16, weight ); - h->mc.mc_chroma( &h->mb.pic.p_fref[1][i_ref1][5][2*y*h->mb.pic.i_stride[2]+2*x], h->mb.pic.i_stride[2], - tmp, 16, mvx1, mvy1, 2*width, 2*height ); - h->pixf.avg_weight[i_mode+3]( &h->mb.pic.p_fdec[2][2*y*h->mb.pic.i_stride[2]+2*x], h->mb.pic.i_stride[2], tmp, 16, weight ); + h->mc.mc_chroma( tmp, 16, &h->mb.pic.p_fref[1][i_ref1][5][2*y*h->mb.pic.i_stride[2]+2*x], h->mb.pic.i_stride[2], + mvx1, mvy1, 2*width, 2*height ); + h->mc.avg_weight[i_mode+3]( &h->mb.pic.p_fdec[2][2*y*FDEC_STRIDE+2*x], FDEC_STRIDE, tmp, 16, weight ); } else { - h->pixf.avg[i_mode]( &h->mb.pic.p_fdec[0][4*y *h->mb.pic.i_stride[0]+4*x], h->mb.pic.i_stride[0], tmp, 16 ); + h->mc.avg[i_mode]( &h->mb.pic.p_fdec[0][4*y*FDEC_STRIDE+4*x], FDEC_STRIDE, tmp, 16 ); - h->mc.mc_chroma( &h->mb.pic.p_fref[1][i_ref1][4][2*y*h->mb.pic.i_stride[1]+2*x], h->mb.pic.i_stride[1], - tmp, 16, mvx1, mvy1, 2*width, 2*height ); - h->pixf.avg[i_mode+3]( &h->mb.pic.p_fdec[1][2*y*h->mb.pic.i_stride[1]+2*x], h->mb.pic.i_stride[1], tmp, 16 ); + h->mc.mc_chroma( tmp, 16, &h->mb.pic.p_fref[1][i_ref1][4][2*y*h->mb.pic.i_stride[1]+2*x], h->mb.pic.i_stride[1], + mvx1, mvy1, 2*width, 2*height ); + h->mc.avg[i_mode+3]( &h->mb.pic.p_fdec[1][2*y*FDEC_STRIDE+2*x], FDEC_STRIDE, tmp, 16 ); - h->mc.mc_chroma( &h->mb.pic.p_fref[1][i_ref1][5][2*y*h->mb.pic.i_stride[2]+2*x], h->mb.pic.i_stride[2], - tmp, 16, mvx1, mvy1, 2*width, 2*height ); - h->pixf.avg[i_mode+3]( &h->mb.pic.p_fdec[2][2*y*h->mb.pic.i_stride[2]+2*x], h->mb.pic.i_stride[2], tmp, 16 ); + h->mc.mc_chroma( tmp, 16, &h->mb.pic.p_fref[1][i_ref1][5][2*y*h->mb.pic.i_stride[2]+2*x], h->mb.pic.i_stride[2], + mvx1, mvy1, 2*width, 2*height ); + h->mc.avg[i_mode+3]( &h->mb.pic.p_fdec[2][2*y*FDEC_STRIDE+2*x], FDEC_STRIDE, tmp, 16 ); } } @@ -790,6 +744,69 @@ static void x264_mb_mc_direct8x8( x264_t *h, int x, int y ) } } +void x264_mb_mc_8x8( x264_t *h, int i8 ) +{ + const int x = 2*(i8&1); + const int y = 2*(i8>>1); + switch( h->mb.i_sub_partition[i8] ) + { + case D_L0_8x8: + x264_mb_mc_0xywh( h, x, y, 2, 2 ); + break; + case D_L0_8x4: + x264_mb_mc_0xywh( h, x, y+0, 2, 1 ); + x264_mb_mc_0xywh( h, x, y+1, 2, 1 ); + break; + case D_L0_4x8: + x264_mb_mc_0xywh( h, x+0, y, 1, 2 ); + x264_mb_mc_0xywh( h, x+1, y, 1, 2 ); + break; + case D_L0_4x4: + x264_mb_mc_0xywh( h, x+0, y+0, 1, 1 ); + x264_mb_mc_0xywh( h, x+1, y+0, 1, 1 ); + x264_mb_mc_0xywh( h, x+0, y+1, 1, 1 ); + x264_mb_mc_0xywh( h, x+1, y+1, 1, 1 ); + break; + case D_L1_8x8: + x264_mb_mc_1xywh( h, x, y, 2, 2 ); + break; + case D_L1_8x4: + x264_mb_mc_1xywh( h, x, y+0, 2, 1 ); + x264_mb_mc_1xywh( h, x, y+1, 2, 1 ); + break; + case D_L1_4x8: + x264_mb_mc_1xywh( h, x+0, y, 1, 2 ); + x264_mb_mc_1xywh( h, x+1, y, 1, 2 ); + break; + case D_L1_4x4: + x264_mb_mc_1xywh( h, x+0, y+0, 1, 1 ); + x264_mb_mc_1xywh( h, x+1, y+0, 1, 1 ); + x264_mb_mc_1xywh( h, x+0, y+1, 1, 1 ); + x264_mb_mc_1xywh( h, x+1, y+1, 1, 1 ); + break; + case D_BI_8x8: + x264_mb_mc_01xywh( h, x, y, 2, 2 ); + break; + case D_BI_8x4: + x264_mb_mc_01xywh( h, x, y+0, 2, 1 ); + x264_mb_mc_01xywh( h, x, y+1, 2, 1 ); + break; + case D_BI_4x8: + x264_mb_mc_01xywh( h, x+0, y, 1, 2 ); + x264_mb_mc_01xywh( h, x+1, y, 1, 2 ); + break; + case D_BI_4x4: + x264_mb_mc_01xywh( h, x+0, y+0, 1, 1 ); + x264_mb_mc_01xywh( h, x+1, y+0, 1, 1 ); + x264_mb_mc_01xywh( h, x+0, y+1, 1, 1 ); + x264_mb_mc_01xywh( h, x+1, y+1, 1, 1 ); + break; + case D_DIRECT_8x8: + x264_mb_mc_direct8x8( h, x, y ); + break; + } +} + void x264_mb_mc( x264_t *h ) { if( h->mb.i_type == P_L0 ) @@ -813,67 +830,7 @@ void x264_mb_mc( x264_t *h ) { int i; for( i = 0; i < 4; i++ ) - { - const int x = 2*(i%2); - const int y = 2*(i/2); - switch( h->mb.i_sub_partition[i] ) - { - case D_L0_8x8: - x264_mb_mc_0xywh( h, x, y, 2, 2 ); - break; - case D_L0_8x4: - x264_mb_mc_0xywh( h, x, y+0, 2, 1 ); - x264_mb_mc_0xywh( h, x, y+1, 2, 1 ); - break; - case D_L0_4x8: - x264_mb_mc_0xywh( h, x+0, y, 1, 2 ); - x264_mb_mc_0xywh( h, x+1, y, 1, 2 ); - break; - case D_L0_4x4: - x264_mb_mc_0xywh( h, x+0, y+0, 1, 1 ); - x264_mb_mc_0xywh( h, x+1, y+0, 1, 1 ); - x264_mb_mc_0xywh( h, x+0, y+1, 1, 1 ); - x264_mb_mc_0xywh( h, x+1, y+1, 1, 1 ); - break; - case D_L1_8x8: - x264_mb_mc_1xywh( h, x, y, 2, 2 ); - break; - case D_L1_8x4: - x264_mb_mc_1xywh( h, x, y+0, 2, 1 ); - x264_mb_mc_1xywh( h, x, y+1, 2, 1 ); - break; - case D_L1_4x8: - x264_mb_mc_1xywh( h, x+0, y, 1, 2 ); - x264_mb_mc_1xywh( h, x+1, y, 1, 2 ); - break; - case D_L1_4x4: - x264_mb_mc_1xywh( h, x+0, y+0, 1, 1 ); - x264_mb_mc_1xywh( h, x+1, y+0, 1, 1 ); - x264_mb_mc_1xywh( h, x+0, y+1, 1, 1 ); - x264_mb_mc_1xywh( h, x+1, y+1, 1, 1 ); - break; - case D_BI_8x8: - x264_mb_mc_01xywh( h, x, y, 2, 2 ); - break; - case D_BI_8x4: - x264_mb_mc_01xywh( h, x, y+0, 2, 1 ); - x264_mb_mc_01xywh( h, x, y+1, 2, 1 ); - break; - case D_BI_4x8: - x264_mb_mc_01xywh( h, x+0, y, 1, 2 ); - x264_mb_mc_01xywh( h, x+1, y, 1, 2 ); - break; - case D_BI_4x4: - x264_mb_mc_01xywh( h, x+0, y+0, 1, 1 ); - x264_mb_mc_01xywh( h, x+1, y+0, 1, 1 ); - x264_mb_mc_01xywh( h, x+0, y+1, 1, 1 ); - x264_mb_mc_01xywh( h, x+1, y+1, 1, 1 ); - break; - case D_DIRECT_8x8: - x264_mb_mc_direct8x8( h, x, y ); - break; - } - } + x264_mb_mc_8x8( h, i ); } else if( h->mb.i_type == B_SKIP || h->mb.i_type == B_DIRECT ) { @@ -924,7 +881,7 @@ void x264_mb_mc( x264_t *h ) } } -void x264_macroblock_cache_init( x264_t *h ) +int x264_macroblock_cache_init( x264_t *h ) { int i, j; int i_mb_count = h->mb.i_mb_count; @@ -933,44 +890,57 @@ void x264_macroblock_cache_init( x264_t *h ) h->mb.i_b8_stride = h->sps->i_mb_width * 2; h->mb.i_b4_stride = h->sps->i_mb_width * 4; - h->mb.qp = x264_malloc( i_mb_count * sizeof(int8_t) ); - h->mb.cbp = x264_malloc( i_mb_count * sizeof(int16_t) ); - h->mb.skipbp = x264_malloc( i_mb_count * sizeof(int8_t) ); - h->mb.mb_transform_size = x264_malloc( i_mb_count * sizeof(int8_t) ); + h->mb.b_interlaced = h->param.b_interlaced; + + CHECKED_MALLOC( h->mb.qp, i_mb_count * sizeof(int8_t) ); + CHECKED_MALLOC( h->mb.cbp, i_mb_count * sizeof(int16_t) ); + CHECKED_MALLOC( h->mb.skipbp, i_mb_count * sizeof(int8_t) ); + CHECKED_MALLOC( h->mb.mb_transform_size, i_mb_count * sizeof(int8_t) ); /* 0 -> 3 top(4), 4 -> 6 : left(3) */ - h->mb.intra4x4_pred_mode = x264_malloc( i_mb_count * 7 * sizeof( int8_t ) ); + CHECKED_MALLOC( h->mb.intra4x4_pred_mode, i_mb_count * 7 * sizeof(int8_t) ); /* all coeffs */ - h->mb.non_zero_count = x264_malloc( i_mb_count * 24 * sizeof( uint8_t ) ); + CHECKED_MALLOC( h->mb.non_zero_count, i_mb_count * 24 * sizeof(uint8_t) ); + CHECKED_MALLOC( h->mb.nnz_backup, h->sps->i_mb_width * 4 * 16 * sizeof(uint8_t) ); if( h->param.b_cabac ) { - h->mb.chroma_pred_mode = x264_malloc( i_mb_count * sizeof( int8_t) ); - h->mb.mvd[0] = x264_malloc( 2*16 * i_mb_count * sizeof( int16_t ) ); - h->mb.mvd[1] = x264_malloc( 2*16 * i_mb_count * sizeof( int16_t ) ); + CHECKED_MALLOC( h->mb.chroma_pred_mode, i_mb_count * sizeof(int8_t) ); + CHECKED_MALLOC( h->mb.mvd[0], 2*16 * i_mb_count * sizeof(int16_t) ); + CHECKED_MALLOC( h->mb.mvd[1], 2*16 * i_mb_count * sizeof(int16_t) ); } for( i=0; i<2; i++ ) { - int i_refs = i ? 1 + h->param.b_bframe_pyramid : h->param.i_frame_reference; + int i_refs = X264_MIN(16, (i ? 1 : h->param.i_frame_reference) + h->param.b_bframe_pyramid) << h->param.b_interlaced; for( j=0; j < i_refs; j++ ) - h->mb.mvr[i][j] = x264_malloc( 2 * i_mb_count * sizeof( int16_t ) ); + CHECKED_MALLOC( h->mb.mvr[i][j], 2 * i_mb_count * sizeof(int16_t) ); } - /* init with not avaiable (for top right idx=7,15) */ + for( i=0; i<=h->param.b_interlaced; i++ ) + for( j=0; j<3; j++ ) + { + CHECKED_MALLOC( h->mb.intra_border_backup[i][j], h->fdec->i_stride[j] ); + h->mb.intra_border_backup[i][j] += 8; + } + + /* init with not available (for top right idx=7,15) */ memset( h->mb.cache.ref[0], -2, X264_SCAN8_SIZE * sizeof( int8_t ) ); memset( h->mb.cache.ref[1], -2, X264_SCAN8_SIZE * sizeof( int8_t ) ); + + return 0; +fail: return -1; } void x264_macroblock_cache_end( x264_t *h ) { int i, j; + for( i=0; i<=h->param.b_interlaced; i++ ) + for( j=0; j<3; j++ ) + x264_free( h->mb.intra_border_backup[i][j] - 8 ); for( i=0; i<2; i++ ) - { - int i_refs = i ? 1 + h->param.b_bframe_pyramid : h->param.i_frame_reference; - for( j=0; j < i_refs; j++ ) + for( j=0; j<32; j++ ) x264_free( h->mb.mvr[i][j] ); - } if( h->param.b_cabac ) { x264_free( h->mb.chroma_pred_mode ); @@ -979,6 +949,7 @@ void x264_macroblock_cache_end( x264_t *h ) } x264_free( h->mb.intra4x4_pred_mode ); x264_free( h->mb.non_zero_count ); + x264_free( h->mb.nnz_backup ); x264_free( h->mb.mb_transform_size ); x264_free( h->mb.skipbp ); x264_free( h->mb.cbp ); @@ -1019,56 +990,49 @@ void x264_macroblock_slice_init( x264_t *h ) } if( h->sh.i_type == SLICE_TYPE_P ) memset( h->mb.cache.skip, 0, X264_SCAN8_SIZE * sizeof( int8_t ) ); + + setup_inverse_delta_pocs( h ); } +void x264_prefetch_fenc( x264_t *h, x264_frame_t *fenc, int i_mb_x, int i_mb_y ) +{ + int stride_y = fenc->i_stride[0]; + int stride_uv = fenc->i_stride[1]; + int off_y = 16 * (i_mb_x + i_mb_y * stride_y); + int off_uv = 8 * (i_mb_x + i_mb_y * stride_uv); + h->mc.prefetch_fenc( fenc->plane[0]+off_y, stride_y, + fenc->plane[1+(i_mb_x&1)]+off_uv, stride_uv, i_mb_x ); +} void x264_macroblock_cache_load( x264_t *h, int i_mb_x, int i_mb_y ) { - const int i_mb_4x4 = 4*(i_mb_y * h->mb.i_b4_stride + i_mb_x); - const int i_mb_8x8 = 2*(i_mb_y * h->mb.i_b8_stride + i_mb_x); - int i_mb_xy = i_mb_y * h->mb.i_mb_stride + i_mb_x; - int i_top_xy = i_mb_xy - h->mb.i_mb_stride; + int i_mb_4x4 = 4*(i_mb_y * h->mb.i_b4_stride + i_mb_x); + int i_mb_8x8 = 2*(i_mb_y * h->mb.i_b8_stride + i_mb_x); + int i_top_y = i_mb_y - (1 << h->mb.b_interlaced); + int i_top_xy = i_top_y * h->mb.i_mb_stride + i_mb_x; + int i_top_4x4 = (4*i_top_y+3) * h->mb.i_b4_stride + 4*i_mb_x; + int i_top_8x8 = (2*i_top_y+1) * h->mb.i_b8_stride + 2*i_mb_x; int i_left_xy = -1; int i_top_type = -1; /* gcc warn */ int i_left_type= -1; int i; + assert( h->mb.i_b8_stride == 2*h->mb.i_mb_stride ); + assert( h->mb.i_b4_stride == 4*h->mb.i_mb_stride ); + /* init index */ h->mb.i_mb_x = i_mb_x; h->mb.i_mb_y = i_mb_y; h->mb.i_mb_xy = i_mb_xy; h->mb.i_b8_xy = i_mb_8x8; h->mb.i_b4_xy = i_mb_4x4; + h->mb.i_mb_top_xy = i_top_xy; h->mb.i_neighbour = 0; - /* load picture pointers */ - for( i = 0; i < 3; i++ ) - { - const int w = (i == 0 ? 16 : 8); - const int i_stride = h->fdec->i_stride[i]; - int j; - - h->mb.pic.i_stride[i] = i_stride; - - h->mb.pic.p_fenc[i] = &h->fenc->plane[i][ w * ( i_mb_x + i_mb_y * i_stride )]; - h->mb.pic.p_fdec[i] = &h->fdec->plane[i][ w * ( i_mb_x + i_mb_y * i_stride )]; - - for( j = 0; j < h->i_ref0; j++ ) - { - h->mb.pic.p_fref[0][j][i==0 ? 0:i+3] = &h->fref0[j]->plane[i][ w * ( i_mb_x + i_mb_y * i_stride )]; - h->mb.pic.p_fref[0][j][i+1] = &h->fref0[j]->filtered[i+1][ 16 * ( i_mb_x + i_mb_y * h->fdec->i_stride[0] )]; - } - for( j = 0; j < h->i_ref1; j++ ) - { - h->mb.pic.p_fref[1][j][i==0 ? 0:i+3] = &h->fref1[j]->plane[i][ w * ( i_mb_x + i_mb_y * i_stride )]; - h->mb.pic.p_fref[1][j][i+1] = &h->fref1[j]->filtered[i+1][ 16 * ( i_mb_x + i_mb_y * h->fdec->i_stride[0] )]; - } - } - /* load cache */ - if( i_mb_xy >= h->sh.i_first_mb + h->mb.i_mb_stride ) + if( i_top_xy >= h->sh.i_first_mb ) { h->mb.i_mb_type_top = i_top_type= h->mb.type[i_top_xy]; @@ -1176,14 +1140,88 @@ void x264_macroblock_cache_load( x264_t *h, int i_mb_x, int i_mb_y ) else h->mb.i_mb_type_topleft = -1; - if( h->param.analyse.b_transform_8x8 ) + if( h->pps->b_transform_8x8_mode ) + { + h->mb.cache.i_neighbour_transform_size = + ( i_left_type >= 0 && h->mb.mb_transform_size[i_left_xy] ) + + ( i_top_type >= 0 && h->mb.mb_transform_size[i_top_xy] ); + } + + if( h->sh.b_mbaff ) + { + h->mb.pic.i_fref[0] = h->i_ref0 << h->mb.b_interlaced; + h->mb.pic.i_fref[1] = h->i_ref1 << h->mb.b_interlaced; + h->mb.cache.i_neighbour_interlaced = + !!(h->mb.i_neighbour & MB_LEFT) + + !!(h->mb.i_neighbour & MB_TOP); + } + + /* fdec: fenc: + * yyyyyyy + * yYYYY YYYY + * yYYYY YYYY + * yYYYY YYYY + * yYYYY YYYY + * uuu vvv UUVV + * uUU vVV UUVV + * uUU vVV + */ + h->mb.pic.p_fenc[0] = h->mb.pic.fenc_buf; + h->mb.pic.p_fenc[1] = h->mb.pic.fenc_buf + 16*FENC_STRIDE; + h->mb.pic.p_fenc[2] = h->mb.pic.fenc_buf + 16*FENC_STRIDE + 8; + h->mb.pic.p_fdec[0] = h->mb.pic.fdec_buf + 2*FDEC_STRIDE; + h->mb.pic.p_fdec[1] = h->mb.pic.fdec_buf + 19*FDEC_STRIDE; + h->mb.pic.p_fdec[2] = h->mb.pic.fdec_buf + 19*FDEC_STRIDE + 16; + + /* load picture pointers */ + for( i = 0; i < 3; i++ ) + { + const int w = (i == 0 ? 16 : 8); + const int i_stride = h->fdec->i_stride[i]; + const int i_stride2 = i_stride << h->mb.b_interlaced; + const int i_pix_offset = h->mb.b_interlaced + ? w * (i_mb_x + (i_mb_y&~1) * i_stride) + (i_mb_y&1) * i_stride + : w * (i_mb_x + i_mb_y * i_stride); + int ref_pix_offset[2] = { i_pix_offset, i_pix_offset }; + const uint8_t *plane_fdec = &h->fdec->plane[i][i_pix_offset]; + const uint8_t *intra_fdec = &h->mb.intra_border_backup[i_mb_y & h->sh.b_mbaff][i][i_mb_x*16>>!!i]; + x264_frame_t **fref[2] = { h->fref0, h->fref1 }; + int j, k, l; + + if( h->mb.b_interlaced ) + ref_pix_offset[1] += (1-2*(i_mb_y&1)) * i_stride; + + h->mb.pic.i_stride[i] = i_stride2; + + h->mc.copy[i?PIXEL_8x8:PIXEL_16x16]( h->mb.pic.p_fenc[i], FENC_STRIDE, + &h->fenc->plane[i][i_pix_offset], i_stride2, w ); + memcpy( &h->mb.pic.p_fdec[i][-1-FDEC_STRIDE], intra_fdec-1, w*3/2+1 ); + for( j = 0; j < w; j++ ) + h->mb.pic.p_fdec[i][-1+j*FDEC_STRIDE] = plane_fdec[-1+j*i_stride2]; + + for( l=0; l<2; l++ ) + { + for( j=0; jmb.pic.i_fref[l]; j++ ) + { + h->mb.pic.p_fref[l][j][i==0 ? 0:i+3] = &fref[l][j >> h->mb.b_interlaced]->plane[i][ref_pix_offset[j&1]]; + if( i == 0 ) + for( k = 1; k < 4; k++ ) + h->mb.pic.p_fref[l][j][k] = &fref[l][j >> h->mb.b_interlaced]->filtered[k][ref_pix_offset[j&1]]; + } + } + } + + if( h->fdec->integral ) { - h->mb.cache.transform_size[0] = (h->mb.i_neighbour&MB_LEFT) - && h->mb.mb_transform_size[i_left_xy]; - h->mb.cache.transform_size[1] = (h->mb.i_neighbour&MB_TOP) - && h->mb.mb_transform_size[i_top_xy]; + assert( !h->mb.b_interlaced ); + for( i = 0; i < h->mb.pic.i_fref[0]; i++ ) + h->mb.pic.p_integral[0][i] = &h->fref0[i]->integral[ 16 * ( i_mb_x + i_mb_y * h->fdec->i_stride[0] )]; + for( i = 0; i < h->mb.pic.i_fref[1]; i++ ) + h->mb.pic.p_integral[1][i] = &h->fref1[i]->integral[ 16 * ( i_mb_x + i_mb_y * h->fdec->i_stride[0] )]; } + x264_prefetch_fenc( h, h->fenc, i_mb_x, i_mb_y ); + /* load ref/mv/mvd */ if( h->sh.i_type != SLICE_TYPE_I ) { @@ -1203,8 +1241,8 @@ void x264_macroblock_cache_load( x264_t *h, int i_mb_x, int i_mb_y ) if( h->mb.i_neighbour & MB_TOPLEFT ) { const int i8 = x264_scan8[0] - 1 - 1*8; - const int ir = i_mb_8x8 - s8x8 - 1; - const int iv = i_mb_4x4 - s4x4 - 1; + const int ir = i_top_8x8 - 1; + const int iv = i_top_4x4 - 1; h->mb.cache.ref[i_list][i8] = h->mb.ref[i_list][ir]; h->mb.cache.mv[i_list][i8][0] = h->mb.mv[i_list][iv][0]; h->mb.cache.mv[i_list][i8][1] = h->mb.mv[i_list][iv][1]; @@ -1220,9 +1258,8 @@ void x264_macroblock_cache_load( x264_t *h, int i_mb_x, int i_mb_y ) if( h->mb.i_neighbour & MB_TOP ) { const int i8 = x264_scan8[0] - 8; - const int ir = i_mb_8x8 - s8x8; - const int iv = i_mb_4x4 - s4x4; - + const int ir = i_top_8x8; + const int iv = i_top_4x4; h->mb.cache.ref[i_list][i8+0] = h->mb.cache.ref[i_list][i8+1] = h->mb.ref[i_list][ir + 0]; h->mb.cache.ref[i_list][i8+2] = @@ -1248,9 +1285,8 @@ void x264_macroblock_cache_load( x264_t *h, int i_mb_x, int i_mb_y ) if( h->mb.i_neighbour & MB_TOPRIGHT ) { const int i8 = x264_scan8[0] + 4 - 1*8; - const int ir = i_mb_8x8 - s8x8 + 2; - const int iv = i_mb_4x4 - s4x4 + 4; - + const int ir = i_top_8x8 + 2; + const int iv = i_top_4x4 + 4; h->mb.cache.ref[i_list][i8] = h->mb.ref[i_list][ir]; h->mb.cache.mv[i_list][i8][0] = h->mb.mv[i_list][iv][0]; h->mb.cache.mv[i_list][i8][1] = h->mb.mv[i_list][iv][1]; @@ -1268,7 +1304,6 @@ void x264_macroblock_cache_load( x264_t *h, int i_mb_x, int i_mb_y ) const int i8 = x264_scan8[0] - 1; const int ir = i_mb_8x8 - 1; const int iv = i_mb_4x4 - 1; - h->mb.cache.ref[i_list][i8+0*8] = h->mb.cache.ref[i_list][i8+1*8] = h->mb.ref[i_list][ir + 0*s8x8]; h->mb.cache.ref[i_list][i8+2*8] = @@ -1296,7 +1331,7 @@ void x264_macroblock_cache_load( x264_t *h, int i_mb_x, int i_mb_y ) if( i_top_type >= 0 ) { const int i8 = x264_scan8[0] - 8; - const int iv = i_mb_4x4 - s4x4; + const int iv = i_top_4x4; for( i = 0; i < 4; i++ ) { h->mb.cache.mvd[i_list][i8+i][0] = h->mb.mvd[i_list][iv + i][0]; @@ -1350,10 +1385,11 @@ void x264_macroblock_cache_load( x264_t *h, int i_mb_x, int i_mb_y ) h->mb.cache.skip[x264_scan8[4] - 8] = h->mb.skipbp[i_top_xy] & 0x8; } } + + if( h->sh.i_type == SLICE_TYPE_P ) + x264_mb_predict_mv_pskip( h, h->mb.cache.pskip_mv ); } - // FIXME skip this if I_4x4 and I_8x8 are disabled? - // assumes MB_TOPRIGHT = MB_TOP<<1 h->mb.i_neighbour4[0] = h->mb.i_neighbour8[0] = (h->mb.i_neighbour & (MB_TOP|MB_LEFT|MB_TOPLEFT)) | ((h->mb.i_neighbour & MB_TOP) ? MB_TOPRIGHT : 0); @@ -1389,11 +1425,30 @@ void x264_macroblock_cache_save( x264_t *h ) int i; - if( IS_SKIP( h->mb.i_type ) ) - h->mb.qp[i_mb_xy] = h->mb.i_last_qp; + for( i = 0; i < 3; i++ ) + { + int w = i ? 8 : 16; + int i_stride = h->fdec->i_stride[i]; + int i_stride2 = i_stride << h->mb.b_interlaced; + int i_pix_offset = h->mb.b_interlaced + ? w * (h->mb.i_mb_x + (h->mb.i_mb_y&~1) * i_stride) + (h->mb.i_mb_y&1) * i_stride + : w * (h->mb.i_mb_x + h->mb.i_mb_y * i_stride); + h->mc.copy[i?PIXEL_8x8:PIXEL_16x16]( + &h->fdec->plane[i][i_pix_offset], i_stride2, + h->mb.pic.p_fdec[i], FDEC_STRIDE, w ); + } + + x264_prefetch_fenc( h, h->fdec, h->mb.i_mb_x, h->mb.i_mb_y ); + + h->mb.type[i_mb_xy] = i_mb_type; - h->mb.i_last_dqp = h->mb.qp[i_mb_xy] - h->mb.i_last_qp; - h->mb.i_last_qp = h->mb.qp[i_mb_xy]; + if( h->mb.i_type != I_16x16 && h->mb.i_cbp_luma == 0 && h->mb.i_cbp_chroma == 0 ) + h->mb.i_qp = h->mb.i_last_qp; + h->mb.qp[i_mb_xy] = h->mb.i_qp; + + h->mb.i_last_dqp = h->mb.i_qp - h->mb.i_last_qp; + h->mb.i_last_qp = h->mb.i_qp; + h->mb.i_mb_prev_xy = h->mb.i_mb_xy; /* save intra4x4 */ if( i_mb_type == I_4x4 ) @@ -1434,6 +1489,8 @@ void x264_macroblock_cache_save( x264_t *h ) } } + if( h->mb.i_cbp_luma == 0 && h->mb.i_type != I_8x8 ) + h->mb.b_transform_8x8 = 0; h->mb.mb_transform_size[i_mb_xy] = h->mb.b_transform_8x8; if( !IS_INTRA( i_mb_type ) ) @@ -1484,7 +1541,7 @@ void x264_macroblock_cache_save( x264_t *h ) if( h->param.b_cabac ) { if( i_mb_type == I_4x4 || i_mb_type == I_16x16 ) - h->mb.chroma_pred_mode[i_mb_xy] = h->mb.i_chroma_pred_mode; + h->mb.chroma_pred_mode[i_mb_xy] = x264_mb_pred_mode8x8c_fix[ h->mb.i_chroma_pred_mode ]; else h->mb.chroma_pred_mode[i_mb_xy] = I_PRED_CHROMA_DC; @@ -1569,4 +1626,10 @@ void x264_macroblock_bipred_init( x264_t *h ) h->mb.bipred_weight[i_ref0][i_ref1] = 32; } } + if( h->sh.b_mbaff ) + { + for( i_ref0 = 2*h->i_ref0-1; i_ref0 >= 0; i_ref0-- ) + for( i_ref1 = 2*h->i_ref1-1; i_ref1 >= 0; i_ref1-- ) + h->mb.bipred_weight[i_ref0][i_ref1] = h->mb.bipred_weight[i_ref0>>1][i_ref1>>1]; + } }