-
-/* NASM functions */
-extern void x264_pixel_avg_16x16_sse2( uint8_t *, int, uint8_t *, int );
-extern void x264_pixel_avg_16x8_sse2( uint8_t *, int, uint8_t *, int );
-extern void x264_pixel_avg_16x16_mmxext( uint8_t *, int, uint8_t *, int );
-extern void x264_pixel_avg_16x8_mmxext( uint8_t *, int, uint8_t *, int );
-extern void x264_pixel_avg_8x16_mmxext( uint8_t *, int, uint8_t *, int );
-extern void x264_pixel_avg_8x8_mmxext( uint8_t *, int, uint8_t *, int );
-extern void x264_pixel_avg_8x4_mmxext( uint8_t *, int, uint8_t *, int );
-extern void x264_pixel_avg_4x8_mmxext( uint8_t *, int, uint8_t *, int );
-extern void x264_pixel_avg_4x4_mmxext( uint8_t *, int, uint8_t *, int );
-extern void x264_pixel_avg_4x2_mmxext( uint8_t *, int, uint8_t *, int );
-extern void x264_mc_copy_w4_mmx( uint8_t *, int, uint8_t *, int, int );
-extern void x264_mc_copy_w8_mmx( uint8_t *, int, uint8_t *, int, int );
-extern void x264_mc_copy_w16_mmx( uint8_t *, int, uint8_t *, int, int );
-extern void x264_mc_copy_w16_sse2( uint8_t *, int, uint8_t *, int, int );
-extern void x264_mc_copy_w16_sse3( uint8_t *, int, uint8_t *, int, int );
-extern void x264_mc_copy_w16_aligned_sse2( uint8_t *, int, uint8_t *, int, int );
-extern void x264_pixel_avg_weight_4x4_mmxext( uint8_t *, int, uint8_t *, int, int );
-extern void x264_pixel_avg_weight_w8_mmxext( uint8_t *, int, uint8_t *, int, int, int );
-extern void x264_pixel_avg_weight_w8_sse2( uint8_t *, int, uint8_t *, int, int, int );
-extern void x264_pixel_avg_weight_w16_sse2( uint8_t *, int, uint8_t *, int, int, int );
-extern void x264_pixel_avg_weight_w16_mmxext( uint8_t *, int, uint8_t *, int, int, int );
-extern void x264_prefetch_fenc_mmxext( uint8_t *, int, uint8_t *, int, int );
-extern void x264_prefetch_ref_mmxext( uint8_t *, int, int );
-extern void x264_mc_chroma_mmxext( uint8_t *src, int i_src_stride,
- uint8_t *dst, int i_dst_stride,
- int dx, int dy, int i_width, int i_height );
-extern void x264_mc_chroma_sse2( uint8_t *src, int i_src_stride,
- uint8_t *dst, int i_dst_stride,
- int dx, int dy, int i_width, int i_height );
-extern void x264_mc_chroma_ssse3( uint8_t *src, int i_src_stride,
- uint8_t *dst, int i_dst_stride,
- int dx, int dy, int i_width, int i_height );
-extern void x264_plane_copy_mmxext( uint8_t *, int, uint8_t *, int, int w, int h);
-extern void *x264_memcpy_aligned_mmx( void * dst, const void * src, size_t n );
-extern void *x264_memcpy_aligned_sse2( void * dst, const void * src, size_t n );
-extern void x264_memzero_aligned_mmx( void * dst, int n );
-extern void x264_memzero_aligned_sse2( void * dst, int n );
+#include "mc.h"
+
+#define DECL_SUF( func, args )\
+ void func##_mmx2 args;\
+ void func##_sse2 args;\
+ void func##_ssse3 args;
+
+DECL_SUF( x264_pixel_avg_16x16, ( pixel *, int, pixel *, int, pixel *, int, int ))
+DECL_SUF( x264_pixel_avg_16x8, ( pixel *, int, pixel *, int, pixel *, int, int ))
+DECL_SUF( x264_pixel_avg_8x16, ( pixel *, int, pixel *, int, pixel *, int, int ))
+DECL_SUF( x264_pixel_avg_8x8, ( pixel *, int, pixel *, int, pixel *, int, int ))
+DECL_SUF( x264_pixel_avg_8x4, ( pixel *, int, pixel *, int, pixel *, int, int ))
+DECL_SUF( x264_pixel_avg_4x16, ( pixel *, int, pixel *, int, pixel *, int, int ))
+DECL_SUF( x264_pixel_avg_4x8, ( pixel *, int, pixel *, int, pixel *, int, int ))
+DECL_SUF( x264_pixel_avg_4x4, ( pixel *, int, pixel *, int, pixel *, int, int ))
+DECL_SUF( x264_pixel_avg_4x2, ( pixel *, int, pixel *, int, pixel *, int, int ))
+
+#define MC_WEIGHT(w,type) \
+ void x264_mc_weight_w##w##_##type( pixel *,int, pixel *,int, const x264_weight_t *,int );
+
+#define MC_WEIGHT_OFFSET(w,type) \
+ void x264_mc_offsetadd_w##w##_##type( pixel *,int, pixel *,int, const x264_weight_t *,int ); \
+ void x264_mc_offsetsub_w##w##_##type( pixel *,int, pixel *,int, const x264_weight_t *,int ); \
+ MC_WEIGHT(w,type)
+
+MC_WEIGHT_OFFSET( 4, mmx2 )
+MC_WEIGHT_OFFSET( 8, mmx2 )
+MC_WEIGHT_OFFSET( 12, mmx2 )
+MC_WEIGHT_OFFSET( 16, mmx2 )
+MC_WEIGHT_OFFSET( 20, mmx2 )
+MC_WEIGHT_OFFSET( 12, sse2 )
+MC_WEIGHT_OFFSET( 16, sse2 )
+MC_WEIGHT_OFFSET( 20, sse2 )
+#if HIGH_BIT_DEPTH
+MC_WEIGHT_OFFSET( 8, sse2 )
+#endif
+MC_WEIGHT( 8, sse2 )
+MC_WEIGHT( 4, ssse3 )
+MC_WEIGHT( 8, ssse3 )
+MC_WEIGHT( 12, ssse3 )
+MC_WEIGHT( 16, ssse3 )
+MC_WEIGHT( 20, ssse3 )
+MC_WEIGHT( 4, avx )
+MC_WEIGHT( 8, avx )
+MC_WEIGHT( 12, avx )
+MC_WEIGHT( 16, avx )
+MC_WEIGHT( 20, avx )
+#undef MC_OFFSET
+#undef MC_WEIGHT
+
+void x264_mc_copy_w4_mmx( pixel *, int, pixel *, int, int );
+void x264_mc_copy_w8_mmx( pixel *, int, pixel *, int, int );
+void x264_mc_copy_w8_sse2( pixel *, int, pixel *, int, int );
+void x264_mc_copy_w16_mmx( pixel *, int, pixel *, int, int );
+void x264_mc_copy_w16_sse2( pixel *, int, pixel *, int, int );
+void x264_mc_copy_w16_aligned_sse2( pixel *, int, pixel *, int, int );
+void x264_prefetch_fenc_420_mmx2( pixel *, int, pixel *, int, int );
+void x264_prefetch_fenc_422_mmx2( pixel *, int, pixel *, int, int );
+void x264_prefetch_ref_mmx2( pixel *, int, int );
+void x264_plane_copy_core_mmx2( pixel *, int, pixel *, int, int w, int h);
+void x264_plane_copy_c( pixel *, int, pixel *, int, int w, int h );
+void x264_plane_copy_interleave_core_mmx2( pixel *dst, int i_dst,
+ pixel *srcu, int i_srcu,
+ pixel *srcv, int i_srcv, int w, int h );
+void x264_plane_copy_interleave_core_sse2( pixel *dst, int i_dst,
+ pixel *srcu, int i_srcu,
+ pixel *srcv, int i_srcv, int w, int h );
+void x264_plane_copy_interleave_core_avx( pixel *dst, int i_dst,
+ pixel *srcu, int i_srcu,
+ pixel *srcv, int i_srcv, int w, int h );
+void x264_plane_copy_interleave_c( pixel *dst, int i_dst,
+ pixel *srcu, int i_srcu,
+ pixel *srcv, int i_srcv, int w, int h );
+void x264_plane_copy_deinterleave_mmx( pixel *dstu, int i_dstu,
+ pixel *dstv, int i_dstv,
+ pixel *src, int i_src, int w, int h );
+void x264_plane_copy_deinterleave_sse2( pixel *dstu, int i_dstu,
+ pixel *dstv, int i_dstv,
+ pixel *src, int i_src, int w, int h );
+void x264_plane_copy_deinterleave_ssse3( uint8_t *dstu, int i_dstu,
+ uint8_t *dstv, int i_dstv,
+ uint8_t *src, int i_src, int w, int h );
+void x264_plane_copy_deinterleave_avx( uint16_t *dstu, int i_dstu,
+ uint16_t *dstv, int i_dstv,
+ uint16_t *src, int i_src, int w, int h );
+void x264_store_interleave_chroma_mmx2( pixel *dst, int i_dst, pixel *srcu, pixel *srcv, int height );
+void x264_store_interleave_chroma_sse2( pixel *dst, int i_dst, pixel *srcu, pixel *srcv, int height );
+void x264_store_interleave_chroma_avx( pixel *dst, int i_dst, pixel *srcu, pixel *srcv, int height );
+void x264_load_deinterleave_chroma_fenc_mmx( pixel *dst, pixel *src, int i_src, int height );
+void x264_load_deinterleave_chroma_fenc_sse2( pixel *dst, pixel *src, int i_src, int height );
+void x264_load_deinterleave_chroma_fenc_ssse3( uint8_t *dst, uint8_t *src, int i_src, int height );
+void x264_load_deinterleave_chroma_fenc_avx( uint16_t *dst, uint16_t *src, int i_src, int height );
+void x264_load_deinterleave_chroma_fdec_mmx( pixel *dst, pixel *src, int i_src, int height );
+void x264_load_deinterleave_chroma_fdec_sse2( pixel *dst, pixel *src, int i_src, int height );
+void x264_load_deinterleave_chroma_fdec_ssse3( uint8_t *dst, uint8_t *src, int i_src, int height );
+void x264_load_deinterleave_chroma_fdec_avx( uint16_t *dst, uint16_t *src, int i_src, int height );
+void *x264_memcpy_aligned_mmx( void * dst, const void * src, size_t n );
+void *x264_memcpy_aligned_sse2( void * dst, const void * src, size_t n );
+void x264_memzero_aligned_mmx( void * dst, int n );
+void x264_memzero_aligned_sse2( void * dst, int n );
+void x264_integral_init4h_sse4( uint16_t *sum, uint8_t *pix, int stride );
+void x264_integral_init8h_sse4( uint16_t *sum, uint8_t *pix, int stride );
+void x264_integral_init8h_avx ( uint16_t *sum, uint8_t *pix, int stride );
+void x264_integral_init4v_mmx( uint16_t *sum8, uint16_t *sum4, int stride );
+void x264_integral_init4v_sse2( uint16_t *sum8, uint16_t *sum4, int stride );
+void x264_integral_init8v_mmx( uint16_t *sum8, int stride );
+void x264_integral_init8v_sse2( uint16_t *sum8, int stride );
+void x264_integral_init4v_ssse3( uint16_t *sum8, uint16_t *sum4, int stride );
+void x264_mbtree_propagate_cost_sse2( int *dst, uint16_t *propagate_in, uint16_t *intra_costs,
+ uint16_t *inter_costs, uint16_t *inv_qscales, float *fps_factor, int len );
+void x264_mbtree_propagate_cost_avx( int *dst, uint16_t *propagate_in, uint16_t *intra_costs,
+ uint16_t *inter_costs, uint16_t *inv_qscales, float *fps_factor, int len );
+void x264_mbtree_propagate_cost_fma4( int *dst, uint16_t *propagate_in, uint16_t *intra_costs,
+ uint16_t *inter_costs, uint16_t *inv_qscales, float *fps_factor, int len );
+
+#define MC_CHROMA(cpu)\
+void x264_mc_chroma_##cpu( pixel *dstu, pixel *dstv, int i_dst,\
+ pixel *src, int i_src,\
+ int dx, int dy, int i_width, int i_height );
+MC_CHROMA(mmx2)
+MC_CHROMA(sse2)
+MC_CHROMA(sse2_misalign)
+MC_CHROMA(ssse3)
+MC_CHROMA(ssse3_cache64)
+MC_CHROMA(avx)
+MC_CHROMA(avx_cache64)
+
+#define LOWRES(cpu)\
+void x264_frame_init_lowres_core_##cpu( pixel *src0, pixel *dst0, pixel *dsth, pixel *dstv, pixel *dstc,\
+ int src_stride, int dst_stride, int width, int height );
+LOWRES(mmx2)
+LOWRES(cache32_mmx2)
+LOWRES(sse2)
+LOWRES(ssse3)