M32( mv ) = 0;
}
else
- {
x264_mb_predict_mv_16x16( h, 0, 0, mv );
- }
}
static int x264_mb_predict_mv_direct16x16_temporal( x264_t *h )
{
int i_mb_4x4 = 16 * h->mb.i_mb_stride * h->mb.i_mb_y + 4 * h->mb.i_mb_x;
int i_mb_8x8 = 4 * h->mb.i_mb_stride * h->mb.i_mb_y + 2 * h->mb.i_mb_x;
- int i8;
const int type_col = h->fref1[0]->mb_type[h->mb.i_mb_xy];
const int partition_col = h->fref1[0]->mb_partition[h->mb.i_mb_xy];
int width = 4 >> ((D_16x16 - partition_col)&1);
int height = 4 >> ((D_16x16 - partition_col)>>1);
- for( i8 = 0; i8 < max_i8; i8 += step )
+ for( int i8 = 0; i8 < max_i8; i8 += step )
{
- const int x8 = i8%2;
- const int y8 = i8/2;
- const int i_part_8x8 = i_mb_8x8 + x8 + y8 * h->mb.i_b8_stride;
- const int i_ref1_ref = h->fref1[0]->ref[0][i_part_8x8];
- const int i_ref = (map_col_to_list0(i_ref1_ref>>h->sh.b_mbaff) << h->sh.b_mbaff) + (i_ref1_ref&h->sh.b_mbaff);
+ int x8 = i8&1;
+ int y8 = i8>>1;
+ int i_part_8x8 = i_mb_8x8 + x8 + y8 * h->mb.i_b8_stride;
+ int i_ref1_ref = h->fref1[0]->ref[0][i_part_8x8];
+ int i_ref = (map_col_to_list0(i_ref1_ref>>h->sh.b_mbaff) << h->sh.b_mbaff) + (i_ref1_ref&h->sh.b_mbaff);
if( i_ref >= 0 )
{
- const int dist_scale_factor = h->mb.dist_scale_factor[i_ref][0];
- const int16_t *mv_col = h->fref1[0]->mv[0][i_mb_4x4 + 3*x8 + 3*y8 * h->mb.i_b4_stride];
- const int l0x = ( dist_scale_factor * mv_col[0] + 128 ) >> 8;
- const int l0y = ( dist_scale_factor * mv_col[1] + 128 ) >> 8;
+ int dist_scale_factor = h->mb.dist_scale_factor[i_ref][0];
+ int16_t *mv_col = h->fref1[0]->mv[0][i_mb_4x4 + 3*x8 + 3*y8 * h->mb.i_b4_stride];
+ int l0x = ( dist_scale_factor * mv_col[0] + 128 ) >> 8;
+ int l0y = ( dist_scale_factor * mv_col[1] + 128 ) >> 8;
if( h->param.i_threads > 1 && (l0y > h->mb.mv_max_spel[1] || l0y-mv_col[1] > h->mb.mv_max_spel[1]) )
return 0;
x264_macroblock_cache_ref( h, 2*x8, 2*y8, width, height, 0, i_ref );
{
int8_t ref[2];
ALIGNED_ARRAY_8( int16_t, mv,[2],[2] );
- int i_list, i8, i_ref;
const int8_t *l1ref0 = &h->fref1[0]->ref[0][h->mb.i_b8_xy];
const int8_t *l1ref1 = &h->fref1[0]->ref[1][h->mb.i_b8_xy];
const int16_t (*l1mv[2])[2] = { (const int16_t (*)[2]) &h->fref1[0]->mv[0][h->mb.i_b4_xy],
h->mb.i_partition = partition_col;
- for( i_list = 0; i_list < 2; i_list++ )
+ for( int i_list = 0; i_list < 2; i_list++ )
{
int i_refa = h->mb.cache.ref[i_list][X264_SCAN8_0 - 1];
int16_t *mv_a = h->mb.cache.mv[i_list][X264_SCAN8_0 - 1];
mv_c = h->mb.cache.mv[i_list][X264_SCAN8_0 - 8 - 1];
}
- i_ref = X264_MIN3( (unsigned)i_refa, (unsigned)i_refb, (unsigned)i_refc );
+ int i_ref = X264_MIN3( (unsigned)i_refa, (unsigned)i_refb, (unsigned)i_refc );
if( i_ref < 0 )
{
i_ref = -1;
return 1;
}
- if( !M64( mv ) || IS_INTRA( type_col ) || (ref[0]&&ref[1]) )
- return 1;
-
if( h->param.i_threads > 1
&& ( mv[0][1] > h->mb.mv_max_spel[1]
|| mv[1][1] > h->mb.mv_max_spel[1] ) )
return 0;
}
+ if( !M64( mv ) || IS_INTRA( type_col ) || (ref[0]&&ref[1]) )
+ return 1;
+
/* Don't do any checks other than the ones we have to, based
* on the size of the colocated partitions.
* Depends on the enum order: D_8x8, D_16x8, D_8x16, D_16x16 */
int height = 4 >> ((D_16x16 - partition_col)>>1);
/* col_zero_flag */
- for( i8 = 0; i8 < max_i8; i8 += step )
+ for( int i8 = 0; i8 < max_i8; i8 += step )
{
const int x8 = i8&1;
const int y8 = i8>>1;
/* cache ref & mv */
if( b_available )
- {
- int l;
- for( l = 0; l < 2; l++ )
+ for( int l = 0; l < 2; l++ )
{
CP32( h->mb.cache.direct_mv[l][0], h->mb.cache.mv[l][x264_scan8[ 0]] );
CP32( h->mb.cache.direct_mv[l][1], h->mb.cache.mv[l][x264_scan8[ 4]] );
h->mb.cache.direct_ref[l][3] = h->mb.cache.ref[l][x264_scan8[12]];
h->mb.cache.direct_partition = h->mb.i_partition;
}
- }
return b_available;
}
int16_t (*mvr)[2] = h->mb.mvr[i_list][i_ref];
int i = 0;
-#define SET_MVP(mvp) { \
+#define SET_MVP(mvp)\
+ { \
CP32( mvc[i], mvp ); \
i++; \
}
if( h->mb.i_neighbour & MB_LEFT )
{
int i_mb_l = h->mb.i_mb_xy - 1;
- /* skip MBs didn't go through the whole search process, so mvr is undefined */
- if( !IS_SKIP( h->mb.type[i_mb_l] ) )
- SET_MVP( mvr[i_mb_l] );
+ SET_MVP( mvr[i_mb_l] );
}
if( h->mb.i_neighbour & MB_TOP )
{
int i_mb_t = h->mb.i_mb_top_xy;
- if( !IS_SKIP( h->mb.type[i_mb_t] ) )
- SET_MVP( mvr[i_mb_t] );
+ SET_MVP( mvr[i_mb_t] );
- if( h->mb.i_neighbour & MB_TOPLEFT && !IS_SKIP( h->mb.type[i_mb_t - 1] ) )
+ if( h->mb.i_neighbour & MB_TOPLEFT )
SET_MVP( mvr[i_mb_t-1] );
- if( h->mb.i_mb_x < h->mb.i_mb_stride - 1 && !IS_SKIP( h->mb.type[i_mb_t + 1] ) )
+ if( h->mb.i_mb_x < h->mb.i_mb_stride - 1 )
SET_MVP( mvr[i_mb_t+1] );
}
#undef SET_MVP
if( h->fref0[0]->i_ref[0] > 0 )
{
x264_frame_t *l0 = h->fref0[0];
+ x264_frame_t **fref = i_list ? h->fref1 : h->fref0;
int field = h->mb.i_mb_y&1;
int curpoc = h->fdec->i_poc + field*h->sh.i_delta_poc_bottom;
- int refpoc = h->fref0[i_ref>>h->sh.b_mbaff]->i_poc;
+ int refpoc = fref[i_ref>>h->sh.b_mbaff]->i_poc;
if( h->sh.b_mbaff && field^(i_ref&1) )
refpoc += h->sh.i_delta_poc_bottom;
/* Set up a lookup table for delta pocs to reduce an IDIV to an IMUL */
static void setup_inverse_delta_pocs( x264_t *h )
{
- int i, field;
- for( field = 0; field <= h->sh.b_mbaff; field++ )
+ for( int field = 0; field <= h->sh.b_mbaff; field++ )
{
int curpoc = h->fdec->i_poc + field*h->sh.i_delta_poc_bottom;
- for( i = 0; i < (h->i_ref0<<h->sh.b_mbaff); i++ )
+ for( int i = 0; i < (h->i_ref0<<h->sh.b_mbaff); i++ )
{
int refpoc = h->fref0[i>>h->sh.b_mbaff]->i_poc;
if( h->sh.b_mbaff && field^(i&1) )
static NOINLINE void x264_mb_mc_0xywh( x264_t *h, int x, int y, int width, int height )
{
- const int i8 = x264_scan8[0]+x+8*y;
- const int i_ref = h->mb.cache.ref[0][i8];
- const int mvx = x264_clip3( h->mb.cache.mv[0][i8][0], h->mb.mv_min[0], h->mb.mv_max[0] ) + 4*4*x;
- int mvy = x264_clip3( h->mb.cache.mv[0][i8][1], h->mb.mv_min[1], h->mb.mv_max[1] ) + 4*4*y;
+ int i8 = x264_scan8[0]+x+8*y;
+ int i_ref = h->mb.cache.ref[0][i8];
+ int mvx = x264_clip3( h->mb.cache.mv[0][i8][0], h->mb.mv_min[0], h->mb.mv_max[0] ) + 4*4*x;
+ int mvy = x264_clip3( h->mb.cache.mv[0][i8][1], h->mb.mv_min[1], h->mb.mv_max[1] ) + 4*4*y;
h->mc.mc_luma( &h->mb.pic.p_fdec[0][4*y*FDEC_STRIDE+4*x], FDEC_STRIDE,
h->mb.pic.p_fref[0][i_ref], h->mb.pic.i_stride[0],
}
static NOINLINE void x264_mb_mc_1xywh( x264_t *h, int x, int y, int width, int height )
{
- const int i8 = x264_scan8[0]+x+8*y;
- const int i_ref = h->mb.cache.ref[1][i8];
- const int mvx = x264_clip3( h->mb.cache.mv[1][i8][0], h->mb.mv_min[0], h->mb.mv_max[0] ) + 4*4*x;
- int mvy = x264_clip3( h->mb.cache.mv[1][i8][1], h->mb.mv_min[1], h->mb.mv_max[1] ) + 4*4*y;
+ int i8 = x264_scan8[0]+x+8*y;
+ int i_ref = h->mb.cache.ref[1][i8];
+ int mvx = x264_clip3( h->mb.cache.mv[1][i8][0], h->mb.mv_min[0], h->mb.mv_max[0] ) + 4*4*x;
+ int mvy = x264_clip3( h->mb.cache.mv[1][i8][1], h->mb.mv_min[1], h->mb.mv_max[1] ) + 4*4*y;
h->mc.mc_luma( &h->mb.pic.p_fdec[0][4*y*FDEC_STRIDE+4*x], FDEC_STRIDE,
h->mb.pic.p_fref[1][i_ref], h->mb.pic.i_stride[0],
static NOINLINE void x264_mb_mc_01xywh( x264_t *h, int x, int y, int width, int height )
{
- const int i8 = x264_scan8[0]+x+8*y;
- const int i_ref0 = h->mb.cache.ref[0][i8];
- const int i_ref1 = h->mb.cache.ref[1][i8];
- const int weight = h->mb.bipred_weight[i_ref0][i_ref1];
- const int mvx0 = x264_clip3( h->mb.cache.mv[0][i8][0], h->mb.mv_min[0], h->mb.mv_max[0] ) + 4*4*x;
- const int mvx1 = x264_clip3( h->mb.cache.mv[1][i8][0], h->mb.mv_min[0], h->mb.mv_max[0] ) + 4*4*x;
- int mvy0 = x264_clip3( h->mb.cache.mv[0][i8][1], h->mb.mv_min[1], h->mb.mv_max[1] ) + 4*4*y;
- int mvy1 = x264_clip3( h->mb.cache.mv[1][i8][1], h->mb.mv_min[1], h->mb.mv_max[1] ) + 4*4*y;
- int i_mode = x264_size2pixel[height][width];
- int i_stride0 = 16, i_stride1 = 16;
+ int i8 = x264_scan8[0]+x+8*y;
+ int i_ref0 = h->mb.cache.ref[0][i8];
+ int i_ref1 = h->mb.cache.ref[1][i8];
+ int weight = h->mb.bipred_weight[i_ref0][i_ref1];
+ int mvx0 = x264_clip3( h->mb.cache.mv[0][i8][0], h->mb.mv_min[0], h->mb.mv_max[0] ) + 4*4*x;
+ int mvx1 = x264_clip3( h->mb.cache.mv[1][i8][0], h->mb.mv_min[0], h->mb.mv_max[0] ) + 4*4*x;
+ int mvy0 = x264_clip3( h->mb.cache.mv[0][i8][1], h->mb.mv_min[1], h->mb.mv_max[1] ) + 4*4*y;
+ int mvy1 = x264_clip3( h->mb.cache.mv[1][i8][1], h->mb.mv_min[1], h->mb.mv_max[1] ) + 4*4*y;
+ int i_mode = x264_size2pixel[height][width];
+ int i_stride0 = 16, i_stride1 = 16;
ALIGNED_ARRAY_16( uint8_t, tmp0,[16*16] );
ALIGNED_ARRAY_16( uint8_t, tmp1,[16*16] );
uint8_t *src0, *src1;
void x264_mb_mc_8x8( x264_t *h, int i8 )
{
- const int x = 2*(i8&1);
- const int y = 2*(i8>>1);
+ int x = 2*(i8&1);
+ int y = 2*(i8>>1);
if( h->sh.i_type == SLICE_TYPE_P )
{
}
else
{
- const int i8 = x264_scan8[0] + x + 8*y;
+ int scan8 = x264_scan8[0] + x + 8*y;
- if( h->mb.cache.ref[0][i8] >= 0 )
- if( h->mb.cache.ref[1][i8] >= 0 )
+ if( h->mb.cache.ref[0][scan8] >= 0 )
+ if( h->mb.cache.ref[1][scan8] >= 0 )
x264_mb_mc_01xywh( h, x, y, 2, 2 );
else
x264_mb_mc_0xywh( h, x, y, 2, 2 );
{
if( h->mb.i_partition == D_8x8 )
{
- int i;
- for( i = 0; i < 4; i++ )
+ for( int i = 0; i < 4; i++ )
x264_mb_mc_8x8( h, i );
}
else
{
- const int ref0a = h->mb.cache.ref[0][x264_scan8[ 0]];
- const int ref0b = h->mb.cache.ref[0][x264_scan8[12]];
- const int ref1a = h->mb.cache.ref[1][x264_scan8[ 0]];
- const int ref1b = h->mb.cache.ref[1][x264_scan8[12]];
+ int ref0a = h->mb.cache.ref[0][x264_scan8[ 0]];
+ int ref0b = h->mb.cache.ref[0][x264_scan8[12]];
+ int ref1a = h->mb.cache.ref[1][x264_scan8[ 0]];
+ int ref1b = h->mb.cache.ref[1][x264_scan8[12]];
if( h->mb.i_partition == D_16x16 )
{
int x264_macroblock_cache_init( x264_t *h )
{
- int i, j;
int i_mb_count = h->mb.i_mb_count;
h->mb.i_mb_stride = h->sps->i_mb_width;
if( h->param.b_cabac )
{
CHECKED_MALLOC( h->mb.chroma_pred_mode, i_mb_count * sizeof(int8_t) );
- CHECKED_MALLOC( h->mb.mvd[0], 2*16 * i_mb_count * sizeof(int16_t) );
- CHECKED_MALLOC( h->mb.mvd[1], 2*16 * i_mb_count * sizeof(int16_t) );
+ CHECKED_MALLOC( h->mb.mvd[0], i_mb_count * sizeof( **h->mb.mvd ) );
+ CHECKED_MALLOC( h->mb.mvd[1], i_mb_count * sizeof( **h->mb.mvd ) );
}
- for( i=0; i<2; i++ )
+ for( int i = 0; i < 2; i++ )
{
int i_refs = X264_MIN(16, (i ? 1 + !!h->param.i_bframe_pyramid : h->param.i_frame_reference) ) << h->param.b_interlaced;
if( h->param.analyse.i_weighted_pred == X264_WEIGHTP_SMART )
else if( h->param.analyse.i_weighted_pred == X264_WEIGHTP_BLIND )
i_refs = X264_MIN(16, i_refs + 1); //blind weights add one duplicate frame
- for( j=0; j < i_refs; j++ )
+ for( int j = 0; j < i_refs; j++ )
CHECKED_MALLOC( h->mb.mvr[i][j], 2 * i_mb_count * sizeof(int16_t) );
}
int i_padv = PADV << h->param.b_interlaced;
#define ALIGN(x,a) (((x)+((a)-1))&~((a)-1))
int align = h->param.cpu&X264_CPU_CACHELINE_64 ? 64 : h->param.cpu&X264_CPU_CACHELINE_32 ? 32 : 16;
- int i_stride, luma_plane_size;
+ int i_stride, luma_plane_size = 0;
int numweightbuf;
if( h->param.analyse.i_weighted_pred == X264_WEIGHTP_FAKE )
numweightbuf = 1;
}
- for( i = 0; i < numweightbuf; i++ )
+ for( int i = 0; i < numweightbuf; i++ )
CHECKED_MALLOC( h->mb.p_weight_buf[i], luma_plane_size );
#undef ALIGN
}
- for( i=0; i<=h->param.b_interlaced; i++ )
- for( j=0; j<3; j++ )
+ for( int i = 0; i <= h->param.b_interlaced; i++ )
+ for( int j = 0; j < 3; j++ )
{
/* shouldn't really be initialized, just silences a valgrind false-positive in predict_8x8_filter_mmx */
CHECKED_MALLOCZERO( h->mb.intra_border_backup[i][j], (h->sps->i_mb_width*16+32)>>!!j );
}
void x264_macroblock_cache_end( x264_t *h )
{
- int i, j;
- for( i=0; i<=h->param.b_interlaced; i++ )
- for( j=0; j<3; j++ )
+ for( int i = 0; i <= h->param.b_interlaced; i++ )
+ for( int j = 0; j < 3; j++ )
x264_free( h->mb.intra_border_backup[i][j] - 8 );
- for( i=0; i<2; i++ )
- for( j=0; j<32; j++ )
+ for( int i = 0; i < 2; i++ )
+ for( int j = 0; j < 32; j++ )
x264_free( h->mb.mvr[i][j] );
- for( i=0; i<16; i++ )
+ for( int i = 0; i < 16; i++ )
x264_free( h->mb.p_weight_buf[i] );
if( h->param.b_cabac )
}
void x264_macroblock_slice_init( x264_t *h )
{
- int i, j;
-
h->mb.mv[0] = h->fdec->mv[0];
h->mb.mv[1] = h->fdec->mv[1];
h->mb.ref[0] = h->fdec->ref[0];
h->fdec->i_ref[0] = h->i_ref0;
h->fdec->i_ref[1] = h->i_ref1;
- for( i = 0; i < h->i_ref0; i++ )
+ for( int i = 0; i < h->i_ref0; i++ )
h->fdec->ref_poc[0][i] = h->fref0[i]->i_poc;
if( h->sh.i_type == SLICE_TYPE_B )
{
- for( i = 0; i < h->i_ref1; i++ )
+ for( int i = 0; i < h->i_ref1; i++ )
h->fdec->ref_poc[1][i] = h->fref1[i]->i_poc;
map_col_to_list0(-1) = -1;
map_col_to_list0(-2) = -2;
- for( i = 0; i < h->fref1[0]->i_ref[0]; i++ )
+ for( int i = 0; i < h->fref1[0]->i_ref[0]; i++ )
{
int poc = h->fref1[0]->ref_poc[0][i];
map_col_to_list0(i) = -2;
- for( j = 0; j < h->i_ref0; j++ )
+ for( int j = 0; j < h->i_ref0; j++ )
if( h->fref0[j]->i_poc == poc )
{
map_col_to_list0(i) = j;
}
}
if( h->sh.i_type == SLICE_TYPE_P )
- memset( h->mb.cache.skip, 0, X264_SCAN8_SIZE * sizeof( int8_t ) );
+ memset( h->mb.cache.skip, 0, sizeof( h->mb.cache.skip ) );
/* init with not available (for top right idx=7,15) */
- memset( h->mb.cache.ref[0], -2, X264_SCAN8_SIZE * sizeof( int8_t ) );
- memset( h->mb.cache.ref[1], -2, X264_SCAN8_SIZE * sizeof( int8_t ) );
+ memset( h->mb.cache.ref, -2, sizeof( h->mb.cache.ref ) );
setup_inverse_delta_pocs( h );
void x264_macroblock_thread_init( x264_t *h )
{
+ h->mb.i_me_method = h->param.analyse.i_me_method;
+ h->mb.i_subpel_refine = h->param.analyse.i_subpel_refine;
+ if( h->sh.i_type == SLICE_TYPE_B && (h->mb.i_subpel_refine == 6 || h->mb.i_subpel_refine == 8) )
+ h->mb.i_subpel_refine--;
+ h->mb.b_chroma_me = h->param.analyse.b_chroma_me && h->sh.i_type == SLICE_TYPE_P
+ && h->mb.i_subpel_refine >= 5;
+ h->mb.b_dct_decimate = h->sh.i_type == SLICE_TYPE_B ||
+ (h->param.analyse.b_dct_decimate && h->sh.i_type != SLICE_TYPE_I);
+
+
/* fdec: fenc:
* yyyyyyy
* yYYYY YYYY
static NOINLINE void copy_column8( uint8_t *dst, uint8_t *src )
{
// input pointers are offset by 4 rows because that's faster (smaller instruction size on x86)
- int i;
- for( i = -4; i < 4; i++ )
+ for( int i = -4; i < 4; i++ )
dst[i*FDEC_STRIDE] = src[i*FDEC_STRIDE];
}
&h->mb.intra_border_backup[i_mb_y & h->sh.b_mbaff][i][i_mb_x*16>>!!i];
int ref_pix_offset[2] = { i_pix_offset, i_pix_offset };
x264_frame_t **fref[2] = { h->fref0, h->fref1 };
- int j, k;
if( h->mb.b_interlaced )
ref_pix_offset[1] += (1-2*(i_mb_y&1)) * i_stride;
h->mb.pic.i_stride[i] = i_stride2;
else
memset( &h->mb.pic.p_fdec[i][-1-FDEC_STRIDE], 0, w*3/2+1 );
if( h->mb.b_interlaced || h->mb.b_reencode_mb )
- for( j = 0; j < w; j++ )
+ for( int j = 0; j < w; j++ )
h->mb.pic.p_fdec[i][-1+j*FDEC_STRIDE] = plane_fdec[-1+j*i_stride2];
- for( j = 0; j < h->mb.pic.i_fref[0]; j++ )
+ for( int j = 0; j < h->mb.pic.i_fref[0]; j++ )
{
h->mb.pic.p_fref[0][j][i==0 ? 0:i+3] = &fref[0][j >> h->mb.b_interlaced]->plane[i][ref_pix_offset[j&1]];
if( i == 0 )
{
- for( k = 1; k < 4; k++ )
+ for( int k = 1; k < 4; k++ )
h->mb.pic.p_fref[0][j][k] = &fref[0][j >> h->mb.b_interlaced]->filtered[k][ref_pix_offset[j&1]];
if( h->sh.weight[j][0].weightfn )
h->mb.pic.p_fref_w[j] = &h->fenc->weighted[j >> h->mb.b_interlaced][ref_pix_offset[j&1]];
}
}
if( h->sh.i_type == SLICE_TYPE_B )
- for( j = 0; j < h->mb.pic.i_fref[1]; j++ )
+ for( int j = 0; j < h->mb.pic.i_fref[1]; j++ )
{
h->mb.pic.p_fref[1][j][i==0 ? 0:i+3] = &fref[1][j >> h->mb.b_interlaced]->plane[i][ref_pix_offset[j&1]];
if( i == 0 )
- for( k = 1; k < 4; k++ )
+ for( int k = 1; k < 4; k++ )
h->mb.pic.p_fref[1][j][k] = &fref[1][j >> h->mb.b_interlaced]->filtered[k][ref_pix_offset[j&1]];
}
}
int i_top_type = -1; /* gcc warn */
int i_left_type= -1;
- int i;
-
/* init index */
h->mb.i_mb_x = i_mb_x;
h->mb.i_mb_y = i_mb_y;
if( h->fdec->integral )
{
assert( !h->mb.b_interlaced );
- for( i = 0; i < h->mb.pic.i_fref[0]; i++ )
+ for( int i = 0; i < h->mb.pic.i_fref[0]; i++ )
h->mb.pic.p_integral[0][i] = &h->fref0[i]->integral[ 16 * ( i_mb_x + i_mb_y * h->fdec->i_stride[0] )];
- for( i = 0; i < h->mb.pic.i_fref[1]; i++ )
+ for( int i = 0; i < h->mb.pic.i_fref[1]; i++ )
h->mb.pic.p_integral[1][i] = &h->fref1[i]->integral[ 16 * ( i_mb_x + i_mb_y * h->fdec->i_stride[0] )];
}
const int s8x8 = h->mb.i_b8_stride;
const int s4x4 = h->mb.i_b4_stride;
- int i_list;
-
- for( i_list = 0; i_list < (h->sh.i_type == SLICE_TYPE_B ? 2 : 1 ); i_list++ )
+ for( int i_list = 0; i_list < (h->sh.i_type == SLICE_TYPE_B ? 2 : 1 ); i_list++ )
{
/*
h->mb.cache.ref[i_list][x264_scan8[5 ]+1] =
{
const int i8 = x264_scan8[0] + 4 - 1*8;
h->mb.cache.ref[i_list][i8] = -2;
- M32( h->mb.cache.mv[i_list][i8] ) = 0;
}
if( h->mb.i_neighbour & MB_LEFT )
else
{
const int i8 = x264_scan8[0] - 1;
- for( i = 0; i < 4; i++ )
+ for( int i = 0; i < 4; i++ )
{
h->mb.cache.ref[i_list][i8+i*8] = -2;
M32( h->mb.cache.mv[i_list][i8+i*8] ) = 0;
if( h->param.b_cabac )
{
if( i_top_type >= 0 )
- {
- const int i8 = x264_scan8[0] - 8;
- const int iv = i_top_4x4;
- CP64( h->mb.cache.mvd[i_list][i8+0], h->mb.mvd[i_list][iv+0] );
- CP64( h->mb.cache.mvd[i_list][i8+2], h->mb.mvd[i_list][iv+2] );
- }
+ CP64( h->mb.cache.mvd[i_list][x264_scan8[0] - 8], h->mb.mvd[i_list][i_top_xy][0] );
else
- {
- const int i8 = x264_scan8[0] - 8;
- M64( h->mb.cache.mvd[i_list][i8+0] ) = 0;
- M64( h->mb.cache.mvd[i_list][i8+2] ) = 0;
- }
+ M64( h->mb.cache.mvd[i_list][x264_scan8[0] - 8] ) = 0;
if( i_left_type >= 0 )
{
- const int i8 = x264_scan8[0] - 1;
- const int iv = i_mb_4x4 - 1;
- CP32( h->mb.cache.mvd[i_list][i8+0*8], h->mb.mvd[i_list][iv + 0*s4x4] );
- CP32( h->mb.cache.mvd[i_list][i8+1*8], h->mb.mvd[i_list][iv + 1*s4x4] );
- CP32( h->mb.cache.mvd[i_list][i8+2*8], h->mb.mvd[i_list][iv + 2*s4x4] );
- CP32( h->mb.cache.mvd[i_list][i8+3*8], h->mb.mvd[i_list][iv + 3*s4x4] );
+ CP16( h->mb.cache.mvd[i_list][x264_scan8[0 ] - 1], h->mb.mvd[i_list][i_left_xy][4] );
+ CP16( h->mb.cache.mvd[i_list][x264_scan8[2 ] - 1], h->mb.mvd[i_list][i_left_xy][5] );
+ CP16( h->mb.cache.mvd[i_list][x264_scan8[8 ] - 1], h->mb.mvd[i_list][i_left_xy][6] );
+ CP16( h->mb.cache.mvd[i_list][x264_scan8[10] - 1], h->mb.mvd[i_list][i_left_xy][3] );
}
else
- {
- const int i8 = x264_scan8[0] - 1;
- for( i = 0; i < 4; i++ )
- M32( h->mb.cache.mvd[i_list][i8+i*8] ) = 0;
- }
+ for( int i = 0; i < 4; i++ )
+ M16( h->mb.cache.mvd[i_list][x264_scan8[0]-1+i*8] ) = 0;
}
}
int i_pix_offset = h->mb.b_interlaced
? w * (h->mb.i_mb_x + (h->mb.i_mb_y&~1) * i_stride) + (h->mb.i_mb_y&1) * i_stride
: w * (h->mb.i_mb_x + h->mb.i_mb_y * i_stride);
- h->mc.copy[i?PIXEL_8x8:PIXEL_16x16](
- &h->fdec->plane[i][i_pix_offset], i_stride2,
- h->mb.pic.p_fdec[i], FDEC_STRIDE, w );
+ h->mc.copy[i?PIXEL_8x8:PIXEL_16x16]( &h->fdec->plane[i][i_pix_offset], i_stride2,
+ h->mb.pic.p_fdec[i], FDEC_STRIDE, w );
}
void x264_macroblock_cache_save( x264_t *h )
int8_t *intra4x4_pred_mode = h->mb.intra4x4_pred_mode[i_mb_xy];
uint8_t *non_zero_count = h->mb.non_zero_count[i_mb_xy];
- int y;
-
x264_macroblock_store_pic( h, 0 );
x264_macroblock_store_pic( h, 1 );
x264_macroblock_store_pic( h, 2 );
h->mb.i_cbp_luma = 0xf;
h->mb.cbp[i_mb_xy] = 0x72f; /* all set */
h->mb.b_transform_8x8 = 0;
- memset( non_zero_count, 16, 24 );
+ memset( non_zero_count, 16, sizeof( *h->mb.non_zero_count ) );
}
else
{
h->mb.ref[0][i_mb_8x8+1+0*s8x8] = h->mb.cache.ref[0][x264_scan8[4]];
h->mb.ref[0][i_mb_8x8+0+1*s8x8] = h->mb.cache.ref[0][x264_scan8[8]];
h->mb.ref[0][i_mb_8x8+1+1*s8x8] = h->mb.cache.ref[0][x264_scan8[12]];
- for( y = 0; y < 4; y++ )
+ for( int y = 0; y < 4; y++ )
{
CP64( h->mb.mv[0][i_mb_4x4+y*s4x4+0], h->mb.cache.mv[0][x264_scan8[0]+8*y+0] );
CP64( h->mb.mv[0][i_mb_4x4+y*s4x4+2], h->mb.cache.mv[0][x264_scan8[0]+8*y+2] );
h->mb.ref[1][i_mb_8x8+1+0*s8x8] = h->mb.cache.ref[1][x264_scan8[4]];
h->mb.ref[1][i_mb_8x8+0+1*s8x8] = h->mb.cache.ref[1][x264_scan8[8]];
h->mb.ref[1][i_mb_8x8+1+1*s8x8] = h->mb.cache.ref[1][x264_scan8[12]];
- for( y = 0; y < 4; y++ )
+ for( int y = 0; y < 4; y++ )
{
CP64( h->mb.mv[1][i_mb_4x4+y*s4x4+0], h->mb.cache.mv[1][x264_scan8[0]+8*y+0] );
CP64( h->mb.mv[1][i_mb_4x4+y*s4x4+2], h->mb.cache.mv[1][x264_scan8[0]+8*y+2] );
}
else
{
- int i_list;
- for( i_list = 0; i_list < (h->sh.i_type == SLICE_TYPE_B ? 2 : 1 ); i_list++ )
+ for( int i_list = 0; i_list < (h->sh.i_type == SLICE_TYPE_B ? 2 : 1 ); i_list++ )
{
M16( &h->mb.ref[i_list][i_mb_8x8+0*s8x8] ) = (uint8_t)(-1) * 0x0101;
M16( &h->mb.ref[i_list][i_mb_8x8+1*s8x8] ) = (uint8_t)(-1) * 0x0101;
- for( y = 0; y < 4; y++ )
+ for( int y = 0; y < 4; y++ )
{
M64( h->mb.mv[i_list][i_mb_4x4+y*s4x4+0] ) = 0;
M64( h->mb.mv[i_list][i_mb_4x4+y*s4x4+2] ) = 0;
if( !IS_INTRA( i_mb_type ) && !IS_SKIP( i_mb_type ) && !IS_DIRECT( i_mb_type ) )
{
- for( y = 0; y < 4; y++ )
+ CP64( h->mb.mvd[0][i_mb_xy][0], h->mb.cache.mvd[0][x264_scan8[10]] );
+ CP16( h->mb.mvd[0][i_mb_xy][4], h->mb.cache.mvd[0][x264_scan8[5 ]] );
+ CP16( h->mb.mvd[0][i_mb_xy][5], h->mb.cache.mvd[0][x264_scan8[7 ]] );
+ CP16( h->mb.mvd[0][i_mb_xy][6], h->mb.cache.mvd[0][x264_scan8[13]] );
+ if( h->sh.i_type == SLICE_TYPE_B )
{
- CP64( h->mb.mvd[0][i_mb_4x4+y*s4x4+0], h->mb.cache.mvd[0][x264_scan8[0]+8*y+0] );
- CP64( h->mb.mvd[0][i_mb_4x4+y*s4x4+2], h->mb.cache.mvd[0][x264_scan8[0]+8*y+2] );
+ CP64( h->mb.mvd[1][i_mb_xy][0], h->mb.cache.mvd[1][x264_scan8[10]] );
+ CP16( h->mb.mvd[1][i_mb_xy][4], h->mb.cache.mvd[1][x264_scan8[5 ]] );
+ CP16( h->mb.mvd[1][i_mb_xy][5], h->mb.cache.mvd[1][x264_scan8[7 ]] );
+ CP16( h->mb.mvd[1][i_mb_xy][6], h->mb.cache.mvd[1][x264_scan8[13]] );
}
- if( h->sh.i_type == SLICE_TYPE_B )
- for( y = 0; y < 4; y++ )
- {
- CP64( h->mb.mvd[1][i_mb_4x4+y*s4x4+0], h->mb.cache.mvd[1][x264_scan8[0]+8*y+0] );
- CP64( h->mb.mvd[1][i_mb_4x4+y*s4x4+2], h->mb.cache.mvd[1][x264_scan8[0]+8*y+2] );
- }
}
else
{
- for( y = 0; y < 4; y++ )
+ M64( h->mb.mvd[0][i_mb_xy][0] ) = 0;
+ M64( h->mb.mvd[0][i_mb_xy][4] ) = 0;
+ if( h->sh.i_type == SLICE_TYPE_B )
{
- M64( h->mb.mvd[0][i_mb_4x4+y*s4x4+0] ) = 0;
- M64( h->mb.mvd[0][i_mb_4x4+y*s4x4+2] ) = 0;
+ M64( h->mb.mvd[1][i_mb_xy][0] ) = 0;
+ M64( h->mb.mvd[1][i_mb_xy][4] ) = 0;
}
- if( h->sh.i_type == SLICE_TYPE_B )
- for( y = 0; y < 4; y++ )
- {
- M64( h->mb.mvd[1][i_mb_4x4+y*s4x4+0] ) = 0;
- M64( h->mb.mvd[1][i_mb_4x4+y*s4x4+2] ) = 0;
- }
}
if( h->sh.i_type == SLICE_TYPE_B )
void x264_macroblock_bipred_init( x264_t *h )
{
- int i_ref0, i_ref1, field;
- for( field = 0; field <= h->sh.b_mbaff; field++ )
- for( i_ref0 = 0; i_ref0 < (h->i_ref0<<h->sh.b_mbaff); i_ref0++ )
+ for( int field = 0; field <= h->sh.b_mbaff; field++ )
+ for( int i_ref0 = 0; i_ref0 < (h->i_ref0<<h->sh.b_mbaff); i_ref0++ )
{
int poc0 = h->fref0[i_ref0>>h->sh.b_mbaff]->i_poc;
if( h->sh.b_mbaff && field^(i_ref0&1) )
poc0 += h->sh.i_delta_poc_bottom;
- for( i_ref1 = 0; i_ref1 < (h->i_ref1<<h->sh.b_mbaff); i_ref1++ )
+ for( int i_ref1 = 0; i_ref1 < (h->i_ref1<<h->sh.b_mbaff); i_ref1++ )
{
int dist_scale_factor;
int poc1 = h->fref1[i_ref1>>h->sh.b_mbaff]->i_poc;