/*****************************************************************************
* mc-c.c: x86 motion compensation
*****************************************************************************
- * Copyright (C) 2003-2014 x264 project
+ * Copyright (C) 2003-2016 x264 project
*
* Authors: Laurent Aimar <fenrir@via.ecp.fr>
* Loren Merritt <lorenm@u.washington.edu>
void x264_plane_copy_core_sse( pixel *, intptr_t, pixel *, intptr_t, int w, int h );
void x264_plane_copy_core_avx( pixel *, intptr_t, pixel *, intptr_t, int w, int h );
void x264_plane_copy_c( pixel *, intptr_t, pixel *, intptr_t, int w, int h );
+void x264_plane_copy_swap_core_ssse3( pixel *, intptr_t, pixel *, intptr_t, int w, int h );
+void x264_plane_copy_swap_core_avx2 ( pixel *, intptr_t, pixel *, intptr_t, int w, int h );
+void x264_plane_copy_swap_c( pixel *, intptr_t, pixel *, intptr_t, int w, int h );
void x264_plane_copy_interleave_core_mmx2( pixel *dst, intptr_t i_dst,
pixel *srcu, intptr_t i_srcu,
pixel *srcv, intptr_t i_srcv, int w, int h );
PLANE_COPY(16, sse)
PLANE_COPY(32, avx)
+#define PLANE_COPY_SWAP(align, cpu)\
+static void x264_plane_copy_swap_##cpu( pixel *dst, intptr_t i_dst, pixel *src, intptr_t i_src, int w, int h )\
+{\
+ int c_w = (align>>1) / sizeof(pixel) - 1;\
+ if( !(w&c_w) )\
+ x264_plane_copy_swap_core_##cpu( dst, i_dst, src, i_src, w, h );\
+ else if( w > c_w )\
+ {\
+ if( --h > 0 )\
+ {\
+ if( i_src > 0 )\
+ {\
+ x264_plane_copy_swap_core_##cpu( dst, i_dst, src, i_src, (w+c_w)&~c_w, h );\
+ dst += i_dst * h;\
+ src += i_src * h;\
+ }\
+ else\
+ x264_plane_copy_swap_core_##cpu( dst+i_dst, i_dst, src+i_src, i_src, (w+c_w)&~c_w, h );\
+ }\
+ x264_plane_copy_swap_core_##cpu( dst, 0, src, 0, w&~c_w, 1 );\
+ for( int x = 2*(w&~c_w); x < 2*w; x += 2 )\
+ {\
+ dst[x] = src[x+1];\
+ dst[x+1] = src[x];\
+ }\
+ }\
+ else\
+ x264_plane_copy_swap_c( dst, i_dst, src, i_src, w, h );\
+}
+
+PLANE_COPY_SWAP(16, ssse3)
+PLANE_COPY_SWAP(32, avx2)
+
#define PLANE_INTERLEAVE(cpu) \
static void x264_plane_copy_interleave_##cpu( pixel *dst, intptr_t i_dst,\
pixel *srcu, intptr_t i_srcu,\
pixel *srcv, intptr_t i_srcv, int w, int h )\
{\
- if( !(w&15) ) {\
+ int c_w = 16 / sizeof(pixel) - 1;\
+ if( !(w&c_w) )\
x264_plane_copy_interleave_core_##cpu( dst, i_dst, srcu, i_srcu, srcv, i_srcv, w, h );\
- } else if( w < 16 || (i_srcu ^ i_srcv) ) {\
- x264_plane_copy_interleave_c( dst, i_dst, srcu, i_srcu, srcv, i_srcv, w, h );\
- } else if( i_srcu > 0 ) {\
- x264_plane_copy_interleave_core_##cpu( dst, i_dst, srcu, i_srcu, srcv, i_srcv, (w+15)&~15, h-1 );\
- x264_plane_copy_interleave_c( dst+i_dst*(h-1), 0, srcu+i_srcu*(h-1), 0, srcv+i_srcv*(h-1), 0, w, 1 );\
- } else {\
+ else if( w > c_w && (i_srcu ^ i_srcv) >= 0 ) /* only works correctly for strides with identical signs */\
+ {\
+ if( --h > 0 )\
+ {\
+ if( i_srcu > 0 )\
+ {\
+ x264_plane_copy_interleave_core_##cpu( dst, i_dst, srcu, i_srcu, srcv, i_srcv, (w+c_w)&~c_w, h );\
+ dst += i_dst * h;\
+ srcu += i_srcu * h;\
+ srcv += i_srcv * h;\
+ }\
+ else\
+ x264_plane_copy_interleave_core_##cpu( dst+i_dst, i_dst, srcu+i_srcu, i_srcu, srcv+i_srcv, i_srcv, (w+c_w)&~c_w, h );\
+ }\
x264_plane_copy_interleave_c( dst, 0, srcu, 0, srcv, 0, w, 1 );\
- x264_plane_copy_interleave_core_##cpu( dst+i_dst, i_dst, srcu+i_srcu, i_srcu, srcv+i_srcv, i_srcv, (w+15)&~15, h-1 );\
}\
+ else\
+ x264_plane_copy_interleave_c( dst, i_dst, srcu, i_srcu, srcv, i_srcv, w, h );\
}
PLANE_INTERLEAVE(mmx2)
#endif
#if HAVE_X86_INLINE_ASM
-#define CLIP_ADD(s,x)\
+#undef MC_CLIP_ADD
+#define MC_CLIP_ADD(s,x)\
do\
{\
int temp;\
s = temp;\
} while(0)
-#define CLIP_ADD2(s,x)\
+#undef MC_CLIP_ADD2
+#define MC_CLIP_ADD2(s,x)\
do\
{\
asm("movd %0, %%xmm0 \n"\
:"m"(M32(x))\
);\
} while(0)
-#else
-#define CLIP_ADD(s,x) (s) = X264_MIN((s)+(x),(1<<15)-1)
-#define CLIP_ADD2(s,x)\
-do\
-{\
- CLIP_ADD((s)[0], (x)[0]);\
- CLIP_ADD((s)[1], (x)[1]);\
-} while(0)
#endif
-#define PROPAGATE_LIST(cpu)\
-void x264_mbtree_propagate_list_internal_##cpu( int16_t (*mvs)[2], int16_t *propagate_amount,\
- uint16_t *lowres_costs, int16_t *output,\
- int bipred_weight, int mb_y, int len );\
-\
-static void x264_mbtree_propagate_list_##cpu( x264_t *h, uint16_t *ref_costs, int16_t (*mvs)[2],\
- int16_t *propagate_amount, uint16_t *lowres_costs,\
- int bipred_weight, int mb_y, int len, int list )\
-{\
- int16_t *current = h->scratch_buffer2;\
-\
- x264_mbtree_propagate_list_internal_##cpu( mvs, propagate_amount, lowres_costs,\
- current, bipred_weight, mb_y, len );\
-\
- unsigned stride = h->mb.i_mb_stride;\
- unsigned width = h->mb.i_mb_width;\
- unsigned height = h->mb.i_mb_height;\
-\
- for( unsigned i = 0; i < len; current += 32 )\
- {\
- int end = X264_MIN( i+8, len );\
- for( ; i < end; i++, current += 2 )\
- {\
- if( !(lowres_costs[i] & (1 << (list+LOWRES_COST_SHIFT))) )\
- continue;\
-\
- unsigned mbx = current[0];\
- unsigned mby = current[1];\
- unsigned idx0 = mbx + mby * stride;\
- unsigned idx2 = idx0 + stride;\
-\
- /* Shortcut for the simple/common case of zero MV */\
- if( !M32( mvs[i] ) )\
- {\
- CLIP_ADD( ref_costs[idx0], current[16] );\
- continue;\
- }\
-\
- if( mbx < width-1 && mby < height-1 )\
- {\
- CLIP_ADD2( ref_costs+idx0, current+16 );\
- CLIP_ADD2( ref_costs+idx2, current+32 );\
- }\
- else\
- {\
- /* Note: this takes advantage of unsigned representation to\
- * catch negative mbx/mby. */\
- if( mby < height )\
- {\
- if( mbx < width )\
- CLIP_ADD( ref_costs[idx0+0], current[16] );\
- if( mbx+1 < width )\
- CLIP_ADD( ref_costs[idx0+1], current[17] );\
- }\
- if( mby+1 < height )\
- {\
- if( mbx < width )\
- CLIP_ADD( ref_costs[idx2+0], current[32] );\
- if( mbx+1 < width )\
- CLIP_ADD( ref_costs[idx2+1], current[33] );\
- }\
- }\
- }\
- }\
-}
-
PROPAGATE_LIST(ssse3)
PROPAGATE_LIST(avx)
-#undef CLIP_ADD
-#undef CLIP_ADD2
void x264_mc_init_mmx( int cpu, x264_mc_functions_t *pf )
{
return;
pf->frame_init_lowres_core = x264_frame_init_lowres_core_ssse3;
+ pf->plane_copy_swap = x264_plane_copy_swap_ssse3;
pf->plane_copy_deinterleave_v210 = x264_plane_copy_deinterleave_v210_ssse3;
pf->mbtree_propagate_list = x264_mbtree_propagate_list_ssse3;
pf->avg[PIXEL_4x8] = x264_pixel_avg_4x8_ssse3;
pf->avg[PIXEL_4x4] = x264_pixel_avg_4x4_ssse3;
pf->avg[PIXEL_4x2] = x264_pixel_avg_4x2_ssse3;
+ pf->plane_copy_swap = x264_plane_copy_swap_ssse3;
pf->plane_copy_deinterleave_rgb = x264_plane_copy_deinterleave_rgb_ssse3;
pf->mbtree_propagate_list = x264_mbtree_propagate_list_ssse3;
if( !(cpu&X264_CPU_AVX2) )
return;
+ pf->plane_copy_swap = x264_plane_copy_swap_avx2;
pf->get_ref = get_ref_avx2;
pf->mbtree_propagate_cost = x264_mbtree_propagate_cost_avx2;
}