/*****************************************************************************
* macroblock.c: h264 encoder library
*****************************************************************************
- * Copyright (C) 2003 Laurent Aimar
- * $Id: macroblock.c,v 1.1 2004/06/03 19:27:06 fenrir Exp $
+ * Copyright (C) 2003-2008 x264 project
*
* Authors: Laurent Aimar <fenrir@via.ecp.fr>
+ * Loren Merritt <lorenm@u.washington.edu>
+ * Fiona Glaser <fiona@x264.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA.
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
*****************************************************************************/
#include "common.h"
-
-int x264_mb_predict_intra4x4_mode( x264_t *h, int idx )
-{
- const int ma = h->mb.cache.intra4x4_pred_mode[x264_scan8[idx] - 1];
- const int mb = h->mb.cache.intra4x4_pred_mode[x264_scan8[idx] - 8];
- const int m = X264_MIN( x264_mb_pred_mode4x4_fix(ma),
- x264_mb_pred_mode4x4_fix(mb) );
-
- if( m < 0 )
- return I_PRED_4x4_DC;
-
- return m;
-}
-
-int x264_mb_predict_non_zero_code( x264_t *h, int idx )
-{
- const int za = h->mb.cache.non_zero_count[x264_scan8[idx] - 1];
- const int zb = h->mb.cache.non_zero_count[x264_scan8[idx] - 8];
-
- int i_ret = za + zb;
-
- if( i_ret < 0x80 )
- {
- i_ret = ( i_ret + 1 ) >> 1;
- }
- return i_ret & 0x7f;
-}
-
-int x264_mb_transform_8x8_allowed( x264_t *h )
-{
- // intra and skip are disallowed
- // large partitions are allowed
- // direct and 8x8 are conditional
- static const uint8_t partition_tab[X264_MBTYPE_MAX] = {
- 0,0,0,0,1,2,0,2,1,1,1,1,1,1,1,1,1,2,0,
- };
- int p, i;
-
- if( !h->pps->b_transform_8x8_mode )
- return 0;
- p = partition_tab[h->mb.i_type];
- if( p < 2 )
- return p;
- else if( h->mb.i_type == B_DIRECT )
- return h->sps->b_direct8x8_inference;
- else if( h->mb.i_type == P_8x8 )
- {
- if( !(h->param.analyse.inter & X264_ANALYSE_PSUB8x8) )
- return 1;
- for( i=0; i<4; i++ )
- if( h->mb.i_sub_partition[i] != D_L0_8x8 )
- return 0;
- return 1;
- }
- else // B_8x8
- {
- // x264 currently doesn't use sub-8x8 B partitions, so don't check for them
- if( h->sps->b_direct8x8_inference )
- return 1;
- for( i=0; i<4; i++ )
- if( h->mb.i_sub_partition[i] == D_DIRECT_8x8 )
- return 0;
- return 1;
- }
-}
+#include "encoder/me.h"
void x264_mb_predict_mv( x264_t *h, int i_list, int idx, int i_width, int16_t mvp[2] )
{
int i_refc = h->mb.cache.ref[i_list][i8 - 8 + i_width ];
int16_t *mv_c = h->mb.cache.mv[i_list][i8 - 8 + i_width];
- int i_count;
+ int i_count = 0;
- if( (idx&0x03) == 3 || ( i_width == 2 && (idx&0x3) == 2 )|| i_refc == -2 )
+ if( (idx&3) >= 2 + (i_width&1) || i_refc == -2 )
{
i_refc = h->mb.cache.ref[i_list][i8 - 8 - 1];
mv_c = h->mb.cache.mv[i_list][i8 - 8 - 1];
if( h->mb.i_partition == D_16x8 )
{
- if( idx == 0 && i_refb == i_ref )
+ if( idx == 0 )
{
- *(uint32_t*)mvp = *(uint32_t*)mv_b;
- return;
+ if( i_refb == i_ref )
+ {
+ *(uint32_t*)mvp = *(uint32_t*)mv_b;
+ return;
+ }
}
- else if( idx != 0 && i_refa == i_ref )
+ else
{
- *(uint32_t*)mvp = *(uint32_t*)mv_a;
- return;
+ if( i_refa == i_ref )
+ {
+ *(uint32_t*)mvp = *(uint32_t*)mv_a;
+ return;
+ }
}
}
else if( h->mb.i_partition == D_8x16 )
{
- if( idx == 0 && i_refa == i_ref )
+ if( idx == 0 )
{
- *(uint32_t*)mvp = *(uint32_t*)mv_a;
- return;
+ if( i_refa == i_ref )
+ {
+ *(uint32_t*)mvp = *(uint32_t*)mv_a;
+ return;
+ }
}
- else if( idx != 0 && i_refc == i_ref )
+ else
{
- *(uint32_t*)mvp = *(uint32_t*)mv_c;
- return;
+ if( i_refc == i_ref )
+ {
+ *(uint32_t*)mvp = *(uint32_t*)mv_c;
+ return;
+ }
}
}
- i_count = 0;
if( i_refa == i_ref ) i_count++;
if( i_refb == i_ref ) i_count++;
if( i_refc == i_ref ) i_count++;
if( i_count > 1 )
+ {
+median:
x264_median_mv( mvp, mv_a, mv_b, mv_c );
+ }
else if( i_count == 1 )
{
if( i_refa == i_ref )
else if( i_refb == -2 && i_refc == -2 && i_refa != -2 )
*(uint32_t*)mvp = *(uint32_t*)mv_a;
else
- x264_median_mv( mvp, mv_a, mv_b, mv_c );
+ goto median;
}
void x264_mb_predict_mv_16x16( x264_t *h, int i_list, int i_ref, int16_t mvp[2] )
int i_refc = h->mb.cache.ref[i_list][X264_SCAN8_0 - 8 + 4];
int16_t *mv_c = h->mb.cache.mv[i_list][X264_SCAN8_0 - 8 + 4];
- int i_count;
+ int i_count = 0;
if( i_refc == -2 )
{
mv_c = h->mb.cache.mv[i_list][X264_SCAN8_0 - 8 - 1];
}
- i_count = 0;
if( i_refa == i_ref ) i_count++;
if( i_refb == i_ref ) i_count++;
if( i_refc == i_ref ) i_count++;
if( i_count > 1 )
+ {
+median:
x264_median_mv( mvp, mv_a, mv_b, mv_c );
+ }
else if( i_count == 1 )
{
if( i_refa == i_ref )
else if( i_refb == -2 && i_refc == -2 && i_refa != -2 )
*(uint32_t*)mvp = *(uint32_t*)mv_a;
else
- x264_median_mv( mvp, mv_a, mv_b, mv_c );
+ goto median;
}
int16_t *mv_b = h->mb.cache.mv[0][X264_SCAN8_0 - 8];
if( i_refa == -2 || i_refb == -2 ||
- ( i_refa == 0 && *(uint32_t*)mv_a == 0 ) ||
- ( i_refb == 0 && *(uint32_t*)mv_b == 0 ) )
+ !( i_refa | *(uint32_t*)mv_a ) ||
+ !( i_refb | *(uint32_t*)mv_b ) )
{
*(uint32_t*)mv = 0;
}
{
int i_mb_4x4 = 16 * h->mb.i_mb_stride * h->mb.i_mb_y + 4 * h->mb.i_mb_x;
int i_mb_8x8 = 4 * h->mb.i_mb_stride * h->mb.i_mb_y + 2 * h->mb.i_mb_x;
- int i8, i4;
- int b8x8;
+ int i8;
const int type_col = h->fref1[0]->mb_type[ h->mb.i_mb_xy ];
-
+
x264_macroblock_cache_ref( h, 0, 0, 4, 4, 1, 0 );
-
+
if( IS_INTRA( type_col ) )
{
x264_macroblock_cache_ref( h, 0, 0, 4, 4, 0, 0 );
x264_macroblock_cache_mv( h, 0, 0, 4, 4, 1, 0 );
return 1;
}
- b8x8 = h->sps->b_direct8x8_inference ||
- (type_col != P_8x8 && type_col != B_SKIP && type_col != B_DIRECT && type_col != B_8x8);
for( i8 = 0; i8 < 4; i8++ )
{
if( i_ref >= 0 )
{
const int dist_scale_factor = h->mb.dist_scale_factor[i_ref][0];
-
+ const int16_t *mv_col = h->fref1[0]->mv[0][ i_mb_4x4 + 3*x8 + 3*y8 * h->mb.i_b4_stride];
+ const int l0x = ( dist_scale_factor * mv_col[0] + 128 ) >> 8;
+ const int l0y = ( dist_scale_factor * mv_col[1] + 128 ) >> 8;
+ if( h->param.i_threads > 1 && (l0y > h->mb.mv_max_spel[1] || l0y-mv_col[1] > h->mb.mv_max_spel[1]) )
+ return 0;
x264_macroblock_cache_ref( h, 2*x8, 2*y8, 2, 2, 0, i_ref );
-
- if( b8x8 )
- {
- const int16_t *mv_col = h->fref1[0]->mv[0][ i_mb_4x4 + 3*x8 + 3*y8 * h->mb.i_b4_stride];
- const int l0x = ( dist_scale_factor * mv_col[0] + 128 ) >> 8;
- const int l0y = ( dist_scale_factor * mv_col[1] + 128 ) >> 8;
- x264_macroblock_cache_mv( h, 2*x8, 2*y8, 2, 2, 0, pack16to32_mask(l0x, l0y) );
- x264_macroblock_cache_mv( h, 2*x8, 2*y8, 2, 2, 1, pack16to32_mask(l0x-mv_col[0], l0y-mv_col[1]) );
- }
- else
- {
- for( i4 = 0; i4 < 4; i4++ )
- {
- const int x4 = i4%2 + 2*x8;
- const int y4 = i4/2 + 2*y8;
- const int16_t *mv_col = h->fref1[0]->mv[0][ i_mb_4x4 + x4 + y4 * h->mb.i_b4_stride ];
- const int l0x = ( dist_scale_factor * mv_col[0] + 128 ) >> 8;
- const int l0y = ( dist_scale_factor * mv_col[1] + 128 ) >> 8;
- x264_macroblock_cache_mv( h, x4, y4, 1, 1, 0, pack16to32_mask(l0x, l0y) );
- x264_macroblock_cache_mv( h, x4, y4, 1, 1, 1, pack16to32_mask(l0x-mv_col[0], l0y-mv_col[1]) );
- }
- }
+ x264_macroblock_cache_mv( h, 2*x8, 2*y8, 2, 2, 0, pack16to32_mask(l0x, l0y) );
+ x264_macroblock_cache_mv( h, 2*x8, 2*y8, 2, 2, 1, pack16to32_mask(l0x-mv_col[0], l0y-mv_col[1]) );
}
else
{
}
}
- if( h->param.i_threads > 1 )
- {
- int di = b8x8 ? 4 : 1;
- for( i4=0; i4<16; i4+=di )
- {
- if( h->mb.cache.mv[0][x264_scan8[i4]][1] > h->mb.mv_max_spel[1]
- || h->mb.cache.mv[1][x264_scan8[i4]][1] > h->mb.mv_max_spel[1] )
- {
-#if 0
- fprintf(stderr, "direct_temporal: (%d,%d) (%d,%d) > %d \n",
- h->mb.cache.mv[0][x264_scan8[i4]][0],
- h->mb.cache.mv[0][x264_scan8[i4]][1],
- h->mb.cache.mv[1][x264_scan8[i4]][0],
- h->mb.cache.mv[1][x264_scan8[i4]][1],
- h->mb.mv_max_spel[1]);
-#endif
- return 0;
- }
- }
- }
-
return 1;
}
static int x264_mb_predict_mv_direct16x16_spatial( x264_t *h )
{
int ref[2];
- DECLARE_ALIGNED_8( int16_t mv[2][2] );
+ ALIGNED_8( int16_t mv[2][2] );
int i_list;
- int i8, i4;
- int b8x8;
+ int i8;
const int8_t *l1ref0 = &h->fref1[0]->ref[0][ h->mb.i_b8_xy ];
const int8_t *l1ref1 = &h->fref1[0]->ref[1][ h->mb.i_b8_xy ];
const int16_t (*l1mv0)[2] = (const int16_t (*)[2]) &h->fref1[0]->mv[0][ h->mb.i_b4_xy ];
if( ref[0] < 0 && ref[1] < 0 )
{
- ref[0] =
- ref[1] = 0;
- *(uint64_t*)mv[0] = 0;
+ x264_macroblock_cache_ref( h, 0, 0, 4, 4, 0, 0 );
+ x264_macroblock_cache_ref( h, 0, 0, 4, 4, 1, 0 );
+ x264_macroblock_cache_mv( h, 0, 0, 4, 4, 0, 0 );
+ x264_macroblock_cache_mv( h, 0, 0, 4, 4, 1, 0 );
+ return 1;
}
+
+ if( ref[0] >= 0 )
+ x264_mb_predict_mv_16x16( h, 0, ref[0], mv[0] );
else
- {
- for( i_list=0; i_list<2; i_list++ )
- {
- if( ref[i_list] >= 0 )
- x264_mb_predict_mv_16x16( h, i_list, ref[i_list], mv[i_list] );
- else
- *(uint32_t*)mv[i_list] = 0;
- }
- }
+ *(uint32_t*)mv[0] = 0;
+ if( ref[1] >= 0 )
+ x264_mb_predict_mv_16x16( h, 1, ref[1], mv[1] );
+ else
+ *(uint32_t*)mv[1] = 0;
x264_macroblock_cache_ref( h, 0, 0, 4, 4, 0, ref[0] );
x264_macroblock_cache_ref( h, 0, 0, 4, 4, 1, ref[1] );
x264_macroblock_cache_mv_ptr( h, 0, 0, 4, 4, 0, mv[0] );
x264_macroblock_cache_mv_ptr( h, 0, 0, 4, 4, 1, mv[1] );
- if( IS_INTRA( type_col ) )
- return 1;
-
if( h->param.i_threads > 1
&& ( mv[0][1] > h->mb.mv_max_spel[1]
|| mv[1][1] > h->mb.mv_max_spel[1] ) )
return 0;
}
- b8x8 = h->sps->b_direct8x8_inference ||
- (type_col != P_8x8 && type_col != B_SKIP && type_col != B_DIRECT && type_col != B_8x8);
+ if( IS_INTRA( type_col ) || (ref[0]&&ref[1]) )
+ return 1;
/* col_zero_flag */
for( i8=0; i8<4; i8++ )
const int x8 = i8%2;
const int y8 = i8/2;
const int o8 = x8 + y8 * h->mb.i_b8_stride;
- if( l1ref0[o8] == 0 || ( l1ref0[o8] < 0 && l1ref1[o8] == 0 ) )
+ const int o4 = 3*(x8 + y8 * h->mb.i_b4_stride);
+ if( l1ref0[o8] == 0 )
{
- const int16_t (*l1mv)[2] = (l1ref0[o8] == 0) ? l1mv0 : l1mv1;
- if( b8x8 )
+ if( abs( l1mv0[o4][0] ) <= 1 && abs( l1mv0[o4][1] ) <= 1 )
{
- const int16_t *mvcol = l1mv[3*x8 + 3*y8 * h->mb.i_b4_stride];
- if( abs( mvcol[0] ) <= 1 && abs( mvcol[1] ) <= 1 )
- {
- if( ref[0] == 0 )
- x264_macroblock_cache_mv( h, 2*x8, 2*y8, 2, 2, 0, 0 );
- if( ref[1] == 0 )
- x264_macroblock_cache_mv( h, 2*x8, 2*y8, 2, 2, 1, 0 );
- }
+ if( ref[0] == 0 ) x264_macroblock_cache_mv( h, 2*x8, 2*y8, 2, 2, 0, 0 );
+ if( ref[1] == 0 ) x264_macroblock_cache_mv( h, 2*x8, 2*y8, 2, 2, 1, 0 );
}
- else
+ }
+ else if( l1ref0[o8] < 0 && l1ref1[o8] == 0 )
+ {
+ if( abs( l1mv1[o4][0] ) <= 1 && abs( l1mv1[o4][1] ) <= 1 )
{
- for( i4=0; i4<4; i4++ )
- {
- const int x4 = i4%2 + 2*x8;
- const int y4 = i4/2 + 2*y8;
- const int16_t *mvcol = l1mv[x4 + y4 * h->mb.i_b4_stride];
- if( abs( mvcol[0] ) <= 1 && abs( mvcol[1] ) <= 1 )
- {
- if( ref[0] == 0 )
- x264_macroblock_cache_mv( h, x4, y4, 1, 1, 0, 0 );
- if( ref[1] == 0 )
- x264_macroblock_cache_mv( h, x4, y4, 1, 1, 1, 0 );
- }
- }
+ if( ref[0] == 0 ) x264_macroblock_cache_mv( h, 2*x8, 2*y8, 2, 2, 0, 0 );
+ if( ref[1] == 0 ) x264_macroblock_cache_mv( h, 2*x8, 2*y8, 2, 2, 1, 0 );
}
}
}
*(uint64_t*)h->mb.cache.direct_mv[1][x264_scan8[idx*4]+8];
}
-#define FIXED_SCALE 256
-
/* This just improves encoder performance, it's not part of the spec */
-void x264_mb_predict_mv_ref16x16( x264_t *h, int i_list, int i_ref, int16_t mvc[8][2], int *i_mvc )
+void x264_mb_predict_mv_ref16x16( x264_t *h, int i_list, int i_ref, int16_t mvc[9][2], int *i_mvc )
{
int16_t (*mvr)[2] = h->mb.mvr[i_list][i_ref];
int i = 0;
SET_MVP( h->mb.cache.mv[i_list][x264_scan8[12]] );
}
+ if( i_ref == 0 && h->frames.b_have_lowres )
+ {
+ int16_t (*lowres_mv)[2] = i_list ? h->fenc->lowres_mvs[1][h->fref1[0]->i_frame-h->fenc->i_frame-1]
+ : h->fenc->lowres_mvs[0][h->fenc->i_frame-h->fref0[0]->i_frame-1];
+ if( lowres_mv[0][0] != 0x7fff ) *(uint32_t*)mvc[i++] = (*(uint32_t*)lowres_mv[h->mb.i_mb_xy]*2)&0xfffeffff;
+ }
+
/* spatial predictors */
if( h->mb.i_neighbour & MB_LEFT )
{
if( ref_col >= 0 ) \
{ \
int scale = (h->fdec->i_poc - h->fdec->ref_poc[0][i_ref]) * l0->inv_ref_poc[ref_col];\
- mvc[i][0] = l0->mv[0][i_b4][0] * scale / FIXED_SCALE; \
- mvc[i][1] = l0->mv[0][i_b4][1] * scale / FIXED_SCALE; \
+ mvc[i][0] = (l0->mv[0][i_b4][0]*scale + 128) >> 8;\
+ mvc[i][1] = (l0->mv[0][i_b4][1]*scale + 128) >> 8;\
i++; \
} \
}
#undef SET_TMVP
}
- if(i == 0)
- *(uint32_t*)mvc[i] = 0;
-
*i_mvc = i;
}
for( i = 0; i < h->i_ref0; i++ )
{
int delta = h->fdec->i_poc - h->fref0[i]->i_poc;
- h->fdec->inv_ref_poc[i] = (FIXED_SCALE + delta/2) / delta;
+ h->fdec->inv_ref_poc[i] = (256 + delta/2) / delta;
}
}
static inline void x264_mb_mc_01xywh( x264_t *h, int x, int y, int width, int height )
{
const int i8 = x264_scan8[0]+x+8*y;
-
+ const int i_ref0 = h->mb.cache.ref[0][i8];
const int i_ref1 = h->mb.cache.ref[1][i8];
+ const int weight = h->mb.bipred_weight[i_ref0][i_ref1];
+ const int mvx0 = x264_clip3( h->mb.cache.mv[0][i8][0], h->mb.mv_min[0], h->mb.mv_max[0] );
const int mvx1 = x264_clip3( h->mb.cache.mv[1][i8][0], h->mb.mv_min[0], h->mb.mv_max[0] );
+ int mvy0 = x264_clip3( h->mb.cache.mv[0][i8][1], h->mb.mv_min[1], h->mb.mv_max[1] );
int mvy1 = x264_clip3( h->mb.cache.mv[1][i8][1], h->mb.mv_min[1], h->mb.mv_max[1] );
- DECLARE_ALIGNED_16( uint8_t tmp[16*16] );
- int i_mode = x264_size2pixel[height][width];
-
- x264_mb_mc_0xywh( h, x, y, width, height );
-
- h->mc.mc_luma( tmp, 16, h->mb.pic.p_fref[1][i_ref1], h->mb.pic.i_stride[0],
- mvx1 + 4*4*x, mvy1 + 4*4*y, 4*width, 4*height );
-
+ int i_mode = x264_size2pixel[height][width];
+ int i_stride0 = 16, i_stride1 = 16;
+ ALIGNED_ARRAY_16( uint8_t, tmp0,[16*16] );
+ ALIGNED_ARRAY_16( uint8_t, tmp1,[16*16] );
+ uint8_t *src0, *src1;
+
+ src0 = h->mc.get_ref( tmp0, &i_stride0, h->mb.pic.p_fref[0][i_ref0], h->mb.pic.i_stride[0],
+ mvx0 + 4*4*x, mvy0 + 4*4*y, 4*width, 4*height );
+ src1 = h->mc.get_ref( tmp1, &i_stride1, h->mb.pic.p_fref[1][i_ref1], h->mb.pic.i_stride[0],
+ mvx1 + 4*4*x, mvy1 + 4*4*y, 4*width, 4*height );
+ h->mc.avg[i_mode]( &h->mb.pic.p_fdec[0][4*y*FDEC_STRIDE+4*x], FDEC_STRIDE,
+ src0, i_stride0, src1, i_stride1, weight );
+
+ if( h->mb.b_interlaced & i_ref0 )
+ mvy0 += (h->mb.i_mb_y & 1)*4 - 2;
if( h->mb.b_interlaced & i_ref1 )
mvy1 += (h->mb.i_mb_y & 1)*4 - 2;
- if( h->param.analyse.b_weighted_bipred )
- {
- const int i_ref0 = h->mb.cache.ref[0][i8];
- const int weight = h->mb.bipred_weight[i_ref0][i_ref1];
-
- h->mc.avg_weight[i_mode]( &h->mb.pic.p_fdec[0][4*y*FDEC_STRIDE+4*x], FDEC_STRIDE, tmp, 16, weight );
-
- h->mc.mc_chroma( tmp, 16, &h->mb.pic.p_fref[1][i_ref1][4][2*y*h->mb.pic.i_stride[1]+2*x], h->mb.pic.i_stride[1],
- mvx1, mvy1, 2*width, 2*height );
- h->mc.avg_weight[i_mode+3]( &h->mb.pic.p_fdec[1][2*y*FDEC_STRIDE+2*x], FDEC_STRIDE, tmp, 16, weight );
-
- h->mc.mc_chroma( tmp, 16, &h->mb.pic.p_fref[1][i_ref1][5][2*y*h->mb.pic.i_stride[2]+2*x], h->mb.pic.i_stride[2],
- mvx1, mvy1, 2*width, 2*height );
- h->mc.avg_weight[i_mode+3]( &h->mb.pic.p_fdec[2][2*y*FDEC_STRIDE+2*x], FDEC_STRIDE, tmp, 16, weight );
- }
- else
- {
- h->mc.avg[i_mode]( &h->mb.pic.p_fdec[0][4*y*FDEC_STRIDE+4*x], FDEC_STRIDE, tmp, 16 );
-
- h->mc.mc_chroma( tmp, 16, &h->mb.pic.p_fref[1][i_ref1][4][2*y*h->mb.pic.i_stride[1]+2*x], h->mb.pic.i_stride[1],
- mvx1, mvy1, 2*width, 2*height );
- h->mc.avg[i_mode+3]( &h->mb.pic.p_fdec[1][2*y*FDEC_STRIDE+2*x], FDEC_STRIDE, tmp, 16 );
-
- h->mc.mc_chroma( tmp, 16, &h->mb.pic.p_fref[1][i_ref1][5][2*y*h->mb.pic.i_stride[2]+2*x], h->mb.pic.i_stride[2],
- mvx1, mvy1, 2*width, 2*height );
- h->mc.avg[i_mode+3]( &h->mb.pic.p_fdec[2][2*y*FDEC_STRIDE+2*x], FDEC_STRIDE, tmp, 16 );
- }
+ h->mc.mc_chroma( tmp0, 16, &h->mb.pic.p_fref[0][i_ref0][4][2*y*h->mb.pic.i_stride[1]+2*x], h->mb.pic.i_stride[1],
+ mvx0, mvy0, 2*width, 2*height );
+ h->mc.mc_chroma( tmp1, 16, &h->mb.pic.p_fref[1][i_ref1][4][2*y*h->mb.pic.i_stride[1]+2*x], h->mb.pic.i_stride[1],
+ mvx1, mvy1, 2*width, 2*height );
+ h->mc.avg[i_mode+3]( &h->mb.pic.p_fdec[1][2*y*FDEC_STRIDE+2*x], FDEC_STRIDE, tmp0, 16, tmp1, 16, weight );
+ h->mc.mc_chroma( tmp0, 16, &h->mb.pic.p_fref[0][i_ref0][5][2*y*h->mb.pic.i_stride[2]+2*x], h->mb.pic.i_stride[2],
+ mvx0, mvy0, 2*width, 2*height );
+ h->mc.mc_chroma( tmp1, 16, &h->mb.pic.p_fref[1][i_ref1][5][2*y*h->mb.pic.i_stride[2]+2*x], h->mb.pic.i_stride[2],
+ mvx1, mvy1, 2*width, 2*height );
+ h->mc.avg[i_mode+3]( &h->mb.pic.p_fdec[2][2*y*FDEC_STRIDE+2*x], FDEC_STRIDE, tmp0, 16, tmp1, 16, weight );
}
static void x264_mb_mc_direct8x8( x264_t *h, int x, int y )
{
const int i8 = x264_scan8[0] + x + 8*y;
- /* FIXME: optimize based on current block size, not global settings? */
- if( h->sps->b_direct8x8_inference )
- {
- if( h->mb.cache.ref[0][i8] >= 0 )
- if( h->mb.cache.ref[1][i8] >= 0 )
- x264_mb_mc_01xywh( h, x, y, 2, 2 );
- else
- x264_mb_mc_0xywh( h, x, y, 2, 2 );
+ if( h->mb.cache.ref[0][i8] >= 0 )
+ if( h->mb.cache.ref[1][i8] >= 0 )
+ x264_mb_mc_01xywh( h, x, y, 2, 2 );
else
- x264_mb_mc_1xywh( h, x, y, 2, 2 );
- }
+ x264_mb_mc_0xywh( h, x, y, 2, 2 );
else
- {
- if( h->mb.cache.ref[0][i8] >= 0 )
- {
- if( h->mb.cache.ref[1][i8] >= 0 )
- {
- x264_mb_mc_01xywh( h, x+0, y+0, 1, 1 );
- x264_mb_mc_01xywh( h, x+1, y+0, 1, 1 );
- x264_mb_mc_01xywh( h, x+0, y+1, 1, 1 );
- x264_mb_mc_01xywh( h, x+1, y+1, 1, 1 );
- }
- else
- {
- x264_mb_mc_0xywh( h, x+0, y+0, 1, 1 );
- x264_mb_mc_0xywh( h, x+1, y+0, 1, 1 );
- x264_mb_mc_0xywh( h, x+0, y+1, 1, 1 );
- x264_mb_mc_0xywh( h, x+1, y+1, 1, 1 );
- }
- }
- else
- {
- x264_mb_mc_1xywh( h, x+0, y+0, 1, 1 );
- x264_mb_mc_1xywh( h, x+1, y+0, 1, 1 );
- x264_mb_mc_1xywh( h, x+0, y+1, 1, 1 );
- x264_mb_mc_1xywh( h, x+1, y+1, 1, 1 );
- }
- }
+ x264_mb_mc_1xywh( h, x, y, 2, 2 );
}
void x264_mb_mc_8x8( x264_t *h, int i8 )
case D_L1_8x8:
x264_mb_mc_1xywh( h, x, y, 2, 2 );
break;
- case D_L1_8x4:
- x264_mb_mc_1xywh( h, x, y+0, 2, 1 );
- x264_mb_mc_1xywh( h, x, y+1, 2, 1 );
- break;
- case D_L1_4x8:
- x264_mb_mc_1xywh( h, x+0, y, 1, 2 );
- x264_mb_mc_1xywh( h, x+1, y, 1, 2 );
- break;
- case D_L1_4x4:
- x264_mb_mc_1xywh( h, x+0, y+0, 1, 1 );
- x264_mb_mc_1xywh( h, x+1, y+0, 1, 1 );
- x264_mb_mc_1xywh( h, x+0, y+1, 1, 1 );
- x264_mb_mc_1xywh( h, x+1, y+1, 1, 1 );
- break;
case D_BI_8x8:
x264_mb_mc_01xywh( h, x, y, 2, 2 );
break;
- case D_BI_8x4:
- x264_mb_mc_01xywh( h, x, y+0, 2, 1 );
- x264_mb_mc_01xywh( h, x, y+1, 2, 1 );
- break;
- case D_BI_4x8:
- x264_mb_mc_01xywh( h, x+0, y, 1, 2 );
- x264_mb_mc_01xywh( h, x+1, y, 1, 2 );
- break;
- case D_BI_4x4:
- x264_mb_mc_01xywh( h, x+0, y+0, 1, 1 );
- x264_mb_mc_01xywh( h, x+1, y+0, 1, 1 );
- x264_mb_mc_01xywh( h, x+0, y+1, 1, 1 );
- x264_mb_mc_01xywh( h, x+1, y+1, 1, 1 );
- break;
case D_DIRECT_8x8:
x264_mb_mc_direct8x8( h, x, y );
break;
}
else /* B_*x* */
{
- int b_list0[2];
- int b_list1[2];
+ const uint8_t *b_list0 = x264_mb_type_list_table[h->mb.i_type][0];
+ const uint8_t *b_list1 = x264_mb_type_list_table[h->mb.i_type][1];
- int i;
-
- /* init ref list utilisations */
- for( i = 0; i < 2; i++ )
- {
- b_list0[i] = x264_mb_type_list0_table[h->mb.i_type][i];
- b_list1[i] = x264_mb_type_list1_table[h->mb.i_type][i];
- }
if( h->mb.i_partition == D_16x16 )
{
if( b_list0[0] && b_list1[0] ) x264_mb_mc_01xywh( h, 0, 0, 4, 4 );
for( i=0; i<=h->param.b_interlaced; i++ )
for( j=0; j<3; j++ )
{
- CHECKED_MALLOC( h->mb.intra_border_backup[i][j], h->fdec->i_stride[j] );
+ /* shouldn't really be initialized, just silences a valgrind false-positive in predict_8x8_filter_mmx */
+ CHECKED_MALLOCZERO( h->mb.intra_border_backup[i][j], (h->sps->i_mb_width*16+32)>>!!j );
h->mb.intra_border_backup[i][j] += 8;
}
memset( h->mb.cache.ref[0], -2, X264_SCAN8_SIZE * sizeof( int8_t ) );
memset( h->mb.cache.ref[1], -2, X264_SCAN8_SIZE * sizeof( int8_t ) );
+ /* fdec: fenc:
+ * yyyyyyy
+ * yYYYY YYYY
+ * yYYYY YYYY
+ * yYYYY YYYY
+ * yYYYY YYYY
+ * uuu vvv UUVV
+ * uUU vVV UUVV
+ * uUU vVV
+ */
+ h->mb.pic.p_fenc[0] = h->mb.pic.fenc_buf;
+ h->mb.pic.p_fenc[1] = h->mb.pic.fenc_buf + 16*FENC_STRIDE;
+ h->mb.pic.p_fenc[2] = h->mb.pic.fenc_buf + 16*FENC_STRIDE + 8;
+ h->mb.pic.p_fdec[0] = h->mb.pic.fdec_buf + 2*FDEC_STRIDE;
+ h->mb.pic.p_fdec[1] = h->mb.pic.fdec_buf + 19*FDEC_STRIDE;
+ h->mb.pic.p_fdec[2] = h->mb.pic.fdec_buf + 19*FDEC_STRIDE + 16;
+
+ h->mb.i_neighbour4[6] =
+ h->mb.i_neighbour4[9] =
+ h->mb.i_neighbour4[12] =
+ h->mb.i_neighbour4[14] = MB_LEFT|MB_TOP|MB_TOPLEFT|MB_TOPRIGHT;
+ h->mb.i_neighbour4[3] =
+ h->mb.i_neighbour4[7] =
+ h->mb.i_neighbour4[11] =
+ h->mb.i_neighbour4[13] =
+ h->mb.i_neighbour4[15] =
+ h->mb.i_neighbour8[3] = MB_LEFT|MB_TOP|MB_TOPLEFT;
+
+ int buf_hpel = (h->param.i_width+48) * sizeof(int16_t);
+ int buf_ssim = h->param.analyse.b_ssim * 8 * (h->param.i_width/4+3) * sizeof(int);
+ int me_range = X264_MIN(h->param.analyse.i_me_range, h->param.analyse.i_mv_range);
+ int buf_tesa = (h->param.analyse.i_me_method >= X264_ME_ESA) *
+ ((me_range*2+18) * sizeof(int16_t) + (me_range+4) * (me_range+1) * 4 * sizeof(mvsad_t));
+ int buf_mbtree = h->param.rc.b_mb_tree * ((h->sps->i_mb_width+3)&~3) * sizeof(int);
+ CHECKED_MALLOC( h->scratch_buffer, X264_MAX4( buf_hpel, buf_ssim, buf_tesa, buf_mbtree ) );
+
return 0;
fail: return -1;
}
x264_free( h->mb.skipbp );
x264_free( h->mb.cbp );
x264_free( h->mb.qp );
+ x264_free( h->scratch_buffer );
}
void x264_macroblock_slice_init( x264_t *h )
{
static NOINLINE void copy_column8( uint8_t *dst, uint8_t *src )
{
+ // input pointers are offset by 4 rows because that's faster (smaller instruction size on x86)
int i;
- for(i=0; i<8; i++)
+ for( i = -4; i < 4; i++ )
dst[i*FDEC_STRIDE] = src[i*FDEC_STRIDE];
}
static void ALWAYS_INLINE x264_macroblock_load_pic_pointers( x264_t *h, int i_mb_x, int i_mb_y, int i)
{
const int w = (i == 0 ? 16 : 8);
- const int i_stride = h->fdec->i_stride[i];
+ const int i_stride = h->fdec->i_stride[!!i];
const int i_stride2 = i_stride << h->mb.b_interlaced;
const int i_pix_offset = h->mb.b_interlaced
? w * (i_mb_x + (i_mb_y&~1) * i_stride) + (i_mb_y&1) * i_stride
if( h->mb.b_interlaced )
ref_pix_offset[1] += (1-2*(i_mb_y&1)) * i_stride;
h->mb.pic.i_stride[i] = i_stride2;
+ h->mb.pic.p_fenc_plane[i] = &h->fenc->plane[i][i_pix_offset];
h->mc.copy[i?PIXEL_8x8:PIXEL_16x16]( h->mb.pic.p_fenc[i], FENC_STRIDE,
- &h->fenc->plane[i][i_pix_offset], i_stride2, w );
+ h->mb.pic.p_fenc_plane[i], i_stride2, w );
memcpy( &h->mb.pic.p_fdec[i][-1-FDEC_STRIDE], intra_fdec-1, w*3/2+1 );
- if( h->mb.b_interlaced )
+ if( h->mb.b_interlaced || h->mb.b_reencode_mb )
{
const uint8_t *plane_fdec = &h->fdec->plane[i][i_pix_offset];
for( j = 0; j < w; j++ )
int i;
- assert( h->mb.i_b8_stride == 2*h->mb.i_mb_stride );
- assert( h->mb.i_b4_stride == 4*h->mb.i_mb_stride );
-
/* init index */
h->mb.i_mb_x = i_mb_x;
h->mb.i_mb_y = i_mb_y;
{
h->mb.i_mb_type_top =
i_top_type= h->mb.type[i_top_xy];
+ h->mb.cache.i_cbp_top = h->mb.cbp[i_top_xy];
h->mb.i_neighbour |= MB_TOP;
else
{
h->mb.i_mb_type_top = -1;
+ h->mb.cache.i_cbp_top = -1;
/* load intra4x4 */
h->mb.cache.intra4x4_pred_mode[x264_scan8[0] - 8] =
i_left_xy = i_mb_xy - 1;
h->mb.i_mb_type_left =
i_left_type = h->mb.type[i_left_xy];
+ h->mb.cache.i_cbp_left = h->mb.cbp[h->mb.i_mb_xy - 1];
h->mb.i_neighbour |= MB_LEFT;
else
{
h->mb.i_mb_type_left = -1;
+ h->mb.cache.i_cbp_left = -1;
h->mb.cache.intra4x4_pred_mode[x264_scan8[0 ] - 1] =
h->mb.cache.intra4x4_pred_mode[x264_scan8[2 ] - 1] =
+ !!(h->mb.i_neighbour & MB_TOP);
}
- /* fdec: fenc:
- * yyyyyyy
- * yYYYY YYYY
- * yYYYY YYYY
- * yYYYY YYYY
- * yYYYY YYYY
- * uuu vvv UUVV
- * uUU vVV UUVV
- * uUU vVV
- */
- h->mb.pic.p_fenc[0] = h->mb.pic.fenc_buf;
- h->mb.pic.p_fenc[1] = h->mb.pic.fenc_buf + 16*FENC_STRIDE;
- h->mb.pic.p_fenc[2] = h->mb.pic.fenc_buf + 16*FENC_STRIDE + 8;
- h->mb.pic.p_fdec[0] = h->mb.pic.fdec_buf + 2*FDEC_STRIDE;
- h->mb.pic.p_fdec[1] = h->mb.pic.fdec_buf + 19*FDEC_STRIDE;
- h->mb.pic.p_fdec[2] = h->mb.pic.fdec_buf + 19*FDEC_STRIDE + 16;
-
- if( !h->mb.b_interlaced )
+ if( !h->mb.b_interlaced && !h->mb.b_reencode_mb )
{
- copy_column8( h->mb.pic.p_fdec[0]-1, h->mb.pic.p_fdec[0]+15 );
- copy_column8( h->mb.pic.p_fdec[0]-1+8*FDEC_STRIDE, h->mb.pic.p_fdec[0]+15+8*FDEC_STRIDE );
- copy_column8( h->mb.pic.p_fdec[1]-1, h->mb.pic.p_fdec[1]+7 );
- copy_column8( h->mb.pic.p_fdec[2]-1, h->mb.pic.p_fdec[2]+7 );
+ copy_column8( h->mb.pic.p_fdec[0]-1+ 4*FDEC_STRIDE, h->mb.pic.p_fdec[0]+15+ 4*FDEC_STRIDE );
+ copy_column8( h->mb.pic.p_fdec[0]-1+12*FDEC_STRIDE, h->mb.pic.p_fdec[0]+15+12*FDEC_STRIDE );
+ copy_column8( h->mb.pic.p_fdec[1]-1+ 4*FDEC_STRIDE, h->mb.pic.p_fdec[1]+ 7+ 4*FDEC_STRIDE );
+ copy_column8( h->mb.pic.p_fdec[2]-1+ 4*FDEC_STRIDE, h->mb.pic.p_fdec[2]+ 7+ 4*FDEC_STRIDE );
}
/* load picture pointers */
h->mb.cache.ref[i_list][i8+2*8] =
h->mb.cache.ref[i_list][i8+3*8] = h->mb.ref[i_list][ir + 1*s8x8];
- for( i = 0; i < 4; i++ )
- *(uint32_t*)h->mb.cache.mv[i_list][i8+i*8] = *(uint32_t*)h->mb.mv[i_list][iv + i*s4x4];
+ *(uint32_t*)h->mb.cache.mv[i_list][i8+0*8] = *(uint32_t*)h->mb.mv[i_list][iv + 0*s4x4];
+ *(uint32_t*)h->mb.cache.mv[i_list][i8+1*8] = *(uint32_t*)h->mb.mv[i_list][iv + 1*s4x4];
+ *(uint32_t*)h->mb.cache.mv[i_list][i8+2*8] = *(uint32_t*)h->mb.mv[i_list][iv + 2*s4x4];
+ *(uint32_t*)h->mb.cache.mv[i_list][i8+3*8] = *(uint32_t*)h->mb.mv[i_list][iv + 3*s4x4];
}
else
{
{
const int i8 = x264_scan8[0] - 1;
const int iv = i_mb_4x4 - 1;
- for( i = 0; i < 4; i++ )
- *(uint32_t*)h->mb.cache.mvd[i_list][i8+i*8] = *(uint32_t*)h->mb.mvd[i_list][iv + i*s4x4];
+ *(uint32_t*)h->mb.cache.mvd[i_list][i8+0*8] = *(uint32_t*)h->mb.mvd[i_list][iv + 0*s4x4];
+ *(uint32_t*)h->mb.cache.mvd[i_list][i8+1*8] = *(uint32_t*)h->mb.mvd[i_list][iv + 1*s4x4];
+ *(uint32_t*)h->mb.cache.mvd[i_list][i8+2*8] = *(uint32_t*)h->mb.mvd[i_list][iv + 2*s4x4];
+ *(uint32_t*)h->mb.cache.mvd[i_list][i8+3*8] = *(uint32_t*)h->mb.mvd[i_list][iv + 3*s4x4];
}
else
{
/* load skip */
if( h->sh.i_type == SLICE_TYPE_B && h->param.b_cabac )
{
- memset( h->mb.cache.skip, 0, X264_SCAN8_SIZE * sizeof( int8_t ) );
- if( i_left_type >= 0 )
- {
- uint8_t skipbp = h->mb.skipbp[i_left_xy];
- h->mb.cache.skip[x264_scan8[0] - 1] = skipbp & 0x2;
- h->mb.cache.skip[x264_scan8[8] - 1] = skipbp & 0x8;
- }
- if( i_top_type >= 0 )
- {
- uint8_t skipbp = h->mb.skipbp[i_top_xy];
- h->mb.cache.skip[x264_scan8[0] - 8] = skipbp & 0x4;
- h->mb.cache.skip[x264_scan8[4] - 8] = skipbp & 0x8;
- }
+ uint8_t skipbp;
+ x264_macroblock_cache_skip( h, 0, 0, 4, 4, 0 );
+ skipbp = i_left_type >= 0 ? h->mb.skipbp[i_left_xy] : 0;
+ h->mb.cache.skip[x264_scan8[0] - 1] = skipbp & 0x2;
+ h->mb.cache.skip[x264_scan8[8] - 1] = skipbp & 0x8;
+ skipbp = i_top_type >= 0 ? h->mb.skipbp[i_top_xy] : 0;
+ h->mb.cache.skip[x264_scan8[0] - 8] = skipbp & 0x4;
+ h->mb.cache.skip[x264_scan8[4] - 8] = skipbp & 0x8;
}
if( h->sh.i_type == SLICE_TYPE_P )
h->mb.i_neighbour4[8] =
h->mb.i_neighbour4[10] =
h->mb.i_neighbour8[2] = MB_TOP|MB_TOPRIGHT | ((h->mb.i_neighbour & MB_LEFT) ? (MB_LEFT|MB_TOPLEFT) : 0);
- h->mb.i_neighbour4[3] =
- h->mb.i_neighbour4[7] =
- h->mb.i_neighbour4[11] =
- h->mb.i_neighbour4[13] =
- h->mb.i_neighbour4[15] =
- h->mb.i_neighbour8[3] = MB_LEFT|MB_TOP|MB_TOPLEFT;
h->mb.i_neighbour4[5] =
h->mb.i_neighbour8[1] = MB_LEFT | (h->mb.i_neighbour & MB_TOPRIGHT)
| ((h->mb.i_neighbour & MB_TOP) ? MB_TOP|MB_TOPLEFT : 0);
- h->mb.i_neighbour4[6] =
- h->mb.i_neighbour4[9] =
- h->mb.i_neighbour4[12] =
- h->mb.i_neighbour4[14] = MB_LEFT|MB_TOP|MB_TOPLEFT|MB_TOPRIGHT;
}
static void ALWAYS_INLINE x264_macroblock_store_pic( x264_t *h, int i)
{
int w = i ? 8 : 16;
- int i_stride = h->fdec->i_stride[i];
+ int i_stride = h->fdec->i_stride[!!i];
int i_stride2 = i_stride << h->mb.b_interlaced;
int i_pix_offset = h->mb.b_interlaced
? w * (h->mb.i_mb_x + (h->mb.i_mb_y&~1) * i_stride) + (h->mb.i_mb_y&1) * i_stride
x264_prefetch_fenc( h, h->fdec, h->mb.i_mb_x, h->mb.i_mb_y );
h->mb.type[i_mb_xy] = i_mb_type;
-
- if( h->mb.i_type != I_16x16 && h->mb.i_cbp_luma == 0 && h->mb.i_cbp_chroma == 0 )
- h->mb.i_qp = h->mb.i_last_qp;
- h->mb.qp[i_mb_xy] = h->mb.i_qp;
-
- h->mb.i_last_dqp = h->mb.i_qp - h->mb.i_last_qp;
- h->mb.i_last_qp = h->mb.i_qp;
- h->mb.i_mb_prev_xy = h->mb.i_mb_xy;
+ h->mb.i_mb_prev_xy = i_mb_xy;
/* save intra4x4 */
if( i_mb_type == I_4x4 )
if( i_mb_type == I_PCM )
{
+ h->mb.qp[i_mb_xy] = 0;
+ h->mb.i_last_dqp = 0;
+ h->mb.i_cbp_chroma = 2;
+ h->mb.i_cbp_luma = 0xf;
h->mb.cbp[i_mb_xy] = 0x72f; /* all set */
+ h->mb.b_transform_8x8 = 0;
for( i = 0; i < 16 + 2*4; i++ )
non_zero_count[i] = 16;
}
else
{
/* save non zero count */
- for( y = 0; y < 4; y++ )
- *(uint32_t*)&non_zero_count[y*4] = *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[0]+y*8];
- for( y = 0; y < 4; y++ )
- *(uint16_t*)&non_zero_count[16+y*2] = *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[16+y*2]-1] >> 8;
-
+ *(uint32_t*)&non_zero_count[0*4] = *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[0]+0*8];
+ *(uint32_t*)&non_zero_count[1*4] = *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[0]+1*8];
+ *(uint32_t*)&non_zero_count[2*4] = *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[0]+2*8];
+ *(uint32_t*)&non_zero_count[3*4] = *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[0]+3*8];
+ *(uint16_t*)&non_zero_count[16+0*2] = *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[16+0*2]-1] >> 8;
+ *(uint16_t*)&non_zero_count[16+1*2] = *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[16+1*2]-1] >> 8;
+ *(uint16_t*)&non_zero_count[16+2*2] = *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[16+2*2]-1] >> 8;
+ *(uint16_t*)&non_zero_count[16+3*2] = *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[16+3*2]-1] >> 8;
+
+ if( h->mb.i_type != I_16x16 && h->mb.i_cbp_luma == 0 && h->mb.i_cbp_chroma == 0 )
+ h->mb.i_qp = h->mb.i_last_qp;
+ h->mb.qp[i_mb_xy] = h->mb.i_qp;
+ h->mb.i_last_dqp = h->mb.i_qp - h->mb.i_last_qp;
+ h->mb.i_last_qp = h->mb.i_qp;
}
if( h->mb.i_cbp_luma == 0 && h->mb.i_type != I_8x8 )
h->mb.b_transform_8x8 = 0;
h->mb.mb_transform_size[i_mb_xy] = h->mb.b_transform_8x8;
- if( !IS_INTRA( i_mb_type ) )
+ if( h->sh.i_type != SLICE_TYPE_I )
{
- h->mb.ref[0][i_mb_8x8+0+0*s8x8] = h->mb.cache.ref[0][x264_scan8[0]];
- h->mb.ref[0][i_mb_8x8+1+0*s8x8] = h->mb.cache.ref[0][x264_scan8[4]];
- h->mb.ref[0][i_mb_8x8+0+1*s8x8] = h->mb.cache.ref[0][x264_scan8[8]];
- h->mb.ref[0][i_mb_8x8+1+1*s8x8] = h->mb.cache.ref[0][x264_scan8[12]];
- for( y = 0; y < 4; y++ )
- {
- *(uint64_t*)h->mb.mv[0][i_mb_4x4+y*s4x4+0] = *(uint64_t*)h->mb.cache.mv[0][x264_scan8[0]+8*y+0];
- *(uint64_t*)h->mb.mv[0][i_mb_4x4+y*s4x4+2] = *(uint64_t*)h->mb.cache.mv[0][x264_scan8[0]+8*y+2];
- }
- if(h->sh.i_type == SLICE_TYPE_B)
+ if( !IS_INTRA( i_mb_type ) )
{
- h->mb.ref[1][i_mb_8x8+0+0*s8x8] = h->mb.cache.ref[1][x264_scan8[0]];
- h->mb.ref[1][i_mb_8x8+1+0*s8x8] = h->mb.cache.ref[1][x264_scan8[4]];
- h->mb.ref[1][i_mb_8x8+0+1*s8x8] = h->mb.cache.ref[1][x264_scan8[8]];
- h->mb.ref[1][i_mb_8x8+1+1*s8x8] = h->mb.cache.ref[1][x264_scan8[12]];
+ h->mb.ref[0][i_mb_8x8+0+0*s8x8] = h->mb.cache.ref[0][x264_scan8[0]];
+ h->mb.ref[0][i_mb_8x8+1+0*s8x8] = h->mb.cache.ref[0][x264_scan8[4]];
+ h->mb.ref[0][i_mb_8x8+0+1*s8x8] = h->mb.cache.ref[0][x264_scan8[8]];
+ h->mb.ref[0][i_mb_8x8+1+1*s8x8] = h->mb.cache.ref[0][x264_scan8[12]];
for( y = 0; y < 4; y++ )
{
- *(uint64_t*)h->mb.mv[1][i_mb_4x4+y*s4x4+0] = *(uint64_t*)h->mb.cache.mv[1][x264_scan8[0]+8*y+0];
- *(uint64_t*)h->mb.mv[1][i_mb_4x4+y*s4x4+2] = *(uint64_t*)h->mb.cache.mv[1][x264_scan8[0]+8*y+2];
+ *(uint64_t*)h->mb.mv[0][i_mb_4x4+y*s4x4+0] = *(uint64_t*)h->mb.cache.mv[0][x264_scan8[0]+8*y+0];
+ *(uint64_t*)h->mb.mv[0][i_mb_4x4+y*s4x4+2] = *(uint64_t*)h->mb.cache.mv[0][x264_scan8[0]+8*y+2];
+ }
+ if( h->sh.i_type == SLICE_TYPE_B )
+ {
+ h->mb.ref[1][i_mb_8x8+0+0*s8x8] = h->mb.cache.ref[1][x264_scan8[0]];
+ h->mb.ref[1][i_mb_8x8+1+0*s8x8] = h->mb.cache.ref[1][x264_scan8[4]];
+ h->mb.ref[1][i_mb_8x8+0+1*s8x8] = h->mb.cache.ref[1][x264_scan8[8]];
+ h->mb.ref[1][i_mb_8x8+1+1*s8x8] = h->mb.cache.ref[1][x264_scan8[12]];
+ for( y = 0; y < 4; y++ )
+ {
+ *(uint64_t*)h->mb.mv[1][i_mb_4x4+y*s4x4+0] = *(uint64_t*)h->mb.cache.mv[1][x264_scan8[0]+8*y+0];
+ *(uint64_t*)h->mb.mv[1][i_mb_4x4+y*s4x4+2] = *(uint64_t*)h->mb.cache.mv[1][x264_scan8[0]+8*y+2];
+ }
}
}
- }
- else
- {
- int i_list;
- for( i_list = 0; i_list < (h->sh.i_type == SLICE_TYPE_B ? 2 : 1 ); i_list++ )
+ else
{
- *(uint16_t*)&h->mb.ref[i_list][i_mb_8x8+0*s8x8] = (uint8_t)(-1) * 0x0101;
- *(uint16_t*)&h->mb.ref[i_list][i_mb_8x8+1*s8x8] = (uint8_t)(-1) * 0x0101;
- for( y = 0; y < 4; y++ )
+ int i_list;
+ for( i_list = 0; i_list < (h->sh.i_type == SLICE_TYPE_B ? 2 : 1 ); i_list++ )
{
- *(uint64_t*)h->mb.mv[i_list][i_mb_4x4+y*s4x4+0] = 0;
- *(uint64_t*)h->mb.mv[i_list][i_mb_4x4+y*s4x4+2] = 0;
+ *(uint16_t*)&h->mb.ref[i_list][i_mb_8x8+0*s8x8] = (uint8_t)(-1) * 0x0101;
+ *(uint16_t*)&h->mb.ref[i_list][i_mb_8x8+1*s8x8] = (uint8_t)(-1) * 0x0101;
+ for( y = 0; y < 4; y++ )
+ {
+ *(uint64_t*)h->mb.mv[i_list][i_mb_4x4+y*s4x4+0] = 0;
+ *(uint64_t*)h->mb.mv[i_list][i_mb_4x4+y*s4x4+2] = 0;
+ }
}
}
}
if( h->param.b_cabac )
{
- if( i_mb_type == I_4x4 || i_mb_type == I_16x16 )
+ if( IS_INTRA(i_mb_type) && i_mb_type != I_PCM )
h->mb.chroma_pred_mode[i_mb_xy] = x264_mb_pred_mode8x8c_fix[ h->mb.i_chroma_pred_mode ];
else
h->mb.chroma_pred_mode[i_mb_xy] = I_PRED_CHROMA_DC;
h->mb.skipbp[i_mb_xy] = 0xf;
else if( i_mb_type == B_8x8 )
{
- int skipbp = 0;
- for( i = 0; i < 4; i++ )
- skipbp |= ( h->mb.i_sub_partition[i] == D_DIRECT_8x8 ) << i;
+ int skipbp = ( h->mb.i_sub_partition[0] == D_DIRECT_8x8 ) << 0;
+ skipbp |= ( h->mb.i_sub_partition[1] == D_DIRECT_8x8 ) << 1;
+ skipbp |= ( h->mb.i_sub_partition[2] == D_DIRECT_8x8 ) << 2;
+ skipbp |= ( h->mb.i_sub_partition[3] == D_DIRECT_8x8 ) << 3;
h->mb.skipbp[i_mb_xy] = skipbp;
}
else
if( h->param.analyse.b_weighted_bipred
&& dist_scale_factor >= -64
&& dist_scale_factor <= 128 )
+ {
h->mb.bipred_weight[i_ref0][i_ref1] = 64 - dist_scale_factor;
+ // ssse3 implementation of biweight doesn't support the extrema.
+ // if we ever generate them, we'll have to drop that optimization.
+ assert( dist_scale_factor >= -63 && dist_scale_factor <= 127 );
+ }
else
h->mb.bipred_weight[i_ref0][i_ref1] = 32;
}