-extern void x264_mc_copy_w4_mmx( uint8_t *, int, uint8_t *, int, int );
-extern void x264_mc_copy_w8_mmx( uint8_t *, int, uint8_t *, int, int );
-extern void x264_mc_copy_w16_mmx( uint8_t *, int, uint8_t *, int, int );
-extern void x264_mc_copy_w16_sse2( uint8_t *, int, uint8_t *, int, int );
-extern void x264_mc_copy_w16_sse3( uint8_t *, int, uint8_t *, int, int );
-extern void x264_mc_copy_w16_aligned_sse2( uint8_t *, int, uint8_t *, int, int );
-extern void x264_prefetch_fenc_mmxext( uint8_t *, int, uint8_t *, int, int );
-extern void x264_prefetch_ref_mmxext( uint8_t *, int, int );
-extern void x264_mc_chroma_mmxext( uint8_t *src, int i_src_stride,
- uint8_t *dst, int i_dst_stride,
- int dx, int dy, int i_width, int i_height );
-extern void x264_mc_chroma_sse2( uint8_t *src, int i_src_stride,
- uint8_t *dst, int i_dst_stride,
- int dx, int dy, int i_width, int i_height );
-extern void x264_mc_chroma_ssse3( uint8_t *src, int i_src_stride,
- uint8_t *dst, int i_dst_stride,
- int dx, int dy, int i_width, int i_height );
-extern void x264_mc_chroma_ssse3_cache64( uint8_t *src, int i_src_stride,
- uint8_t *dst, int i_dst_stride,
- int dx, int dy, int i_width, int i_height );
-extern void x264_plane_copy_mmxext( uint8_t *, int, uint8_t *, int, int w, int h);
-extern void *x264_memcpy_aligned_mmx( void * dst, const void * src, size_t n );
-extern void *x264_memcpy_aligned_sse2( void * dst, const void * src, size_t n );
-extern void x264_memzero_aligned_mmx( void * dst, int n );
-extern void x264_memzero_aligned_sse2( void * dst, int n );
-extern void x264_integral_init4h_sse4( uint16_t *sum, uint8_t *pix, int stride );
-extern void x264_integral_init8h_sse4( uint16_t *sum, uint8_t *pix, int stride );
-extern void x264_integral_init4v_mmx( uint16_t *sum8, uint16_t *sum4, int stride );
-extern void x264_integral_init4v_sse2( uint16_t *sum8, uint16_t *sum4, int stride );
-extern void x264_integral_init8v_mmx( uint16_t *sum8, int stride );
-extern void x264_integral_init8v_sse2( uint16_t *sum8, int stride );
-extern void x264_integral_init4v_ssse3( uint16_t *sum8, uint16_t *sum4, int stride );
-extern void x264_mbtree_propagate_cost_sse2( int *dst, uint16_t *propagate_in, uint16_t *intra_costs,
- uint16_t *inter_costs, uint16_t *inv_qscales, int len );
-#define LOWRES(cpu) \
-extern void x264_frame_init_lowres_core_##cpu( uint8_t *src0, uint8_t *dst0, uint8_t *dsth, uint8_t *dstv, uint8_t *dstc,\
- int src_stride, int dst_stride, int width, int height );
-LOWRES(mmxext)
-LOWRES(cache32_mmxext)
+void x264_mc_copy_w4_mmx ( pixel *, intptr_t, pixel *, intptr_t, int );
+void x264_mc_copy_w8_mmx ( pixel *, intptr_t, pixel *, intptr_t, int );
+void x264_mc_copy_w8_sse ( pixel *, intptr_t, pixel *, intptr_t, int );
+void x264_mc_copy_w16_mmx( pixel *, intptr_t, pixel *, intptr_t, int );
+void x264_mc_copy_w16_sse( pixel *, intptr_t, pixel *, intptr_t, int );
+void x264_mc_copy_w16_aligned_sse( pixel *, intptr_t, pixel *, intptr_t, int );
+void x264_mc_copy_w16_avx( uint16_t *, intptr_t, uint16_t *, intptr_t, int );
+void x264_mc_copy_w16_aligned_avx( uint16_t *, intptr_t, uint16_t *, intptr_t, int );
+void x264_prefetch_fenc_420_mmx2( pixel *, intptr_t, pixel *, intptr_t, int );
+void x264_prefetch_fenc_422_mmx2( pixel *, intptr_t, pixel *, intptr_t, int );
+void x264_prefetch_ref_mmx2( pixel *, intptr_t, int );
+void x264_plane_copy_core_sse( pixel *, intptr_t, pixel *, intptr_t, int w, int h );
+void x264_plane_copy_core_avx( pixel *, intptr_t, pixel *, intptr_t, int w, int h );
+void x264_plane_copy_c( pixel *, intptr_t, pixel *, intptr_t, int w, int h );
+void x264_plane_copy_swap_core_ssse3( pixel *, intptr_t, pixel *, intptr_t, int w, int h );
+void x264_plane_copy_swap_core_avx2 ( pixel *, intptr_t, pixel *, intptr_t, int w, int h );
+void x264_plane_copy_swap_c( pixel *, intptr_t, pixel *, intptr_t, int w, int h );
+void x264_plane_copy_interleave_core_mmx2( pixel *dst, intptr_t i_dst,
+ pixel *srcu, intptr_t i_srcu,
+ pixel *srcv, intptr_t i_srcv, int w, int h );
+void x264_plane_copy_interleave_core_sse2( pixel *dst, intptr_t i_dst,
+ pixel *srcu, intptr_t i_srcu,
+ pixel *srcv, intptr_t i_srcv, int w, int h );
+void x264_plane_copy_interleave_core_avx( pixel *dst, intptr_t i_dst,
+ pixel *srcu, intptr_t i_srcu,
+ pixel *srcv, intptr_t i_srcv, int w, int h );
+void x264_plane_copy_interleave_c( pixel *dst, intptr_t i_dst,
+ pixel *srcu, intptr_t i_srcu,
+ pixel *srcv, intptr_t i_srcv, int w, int h );
+void x264_plane_copy_deinterleave_mmx( pixel *dstu, intptr_t i_dstu,
+ pixel *dstv, intptr_t i_dstv,
+ pixel *src, intptr_t i_src, int w, int h );
+void x264_plane_copy_deinterleave_sse2( pixel *dstu, intptr_t i_dstu,
+ pixel *dstv, intptr_t i_dstv,
+ pixel *src, intptr_t i_src, int w, int h );
+void x264_plane_copy_deinterleave_ssse3( uint8_t *dstu, intptr_t i_dstu,
+ uint8_t *dstv, intptr_t i_dstv,
+ uint8_t *src, intptr_t i_src, int w, int h );
+void x264_plane_copy_deinterleave_avx( uint16_t *dstu, intptr_t i_dstu,
+ uint16_t *dstv, intptr_t i_dstv,
+ uint16_t *src, intptr_t i_src, int w, int h );
+void x264_plane_copy_deinterleave_rgb_sse2 ( pixel *dsta, intptr_t i_dsta,
+ pixel *dstb, intptr_t i_dstb,
+ pixel *dstc, intptr_t i_dstc,
+ pixel *src, intptr_t i_src, int pw, int w, int h );
+void x264_plane_copy_deinterleave_rgb_ssse3( pixel *dsta, intptr_t i_dsta,
+ pixel *dstb, intptr_t i_dstb,
+ pixel *dstc, intptr_t i_dstc,
+ pixel *src, intptr_t i_src, int pw, int w, int h );
+void x264_plane_copy_deinterleave_v210_ssse3( uint16_t *dstu, intptr_t i_dstu,
+ uint16_t *dstv, intptr_t i_dstv,
+ uint32_t *src, intptr_t i_src, int w, int h );
+void x264_plane_copy_deinterleave_v210_avx ( uint16_t *dstu, intptr_t i_dstu,
+ uint16_t *dstv, intptr_t i_dstv,
+ uint32_t *src, intptr_t i_src, int w, int h );
+void x264_plane_copy_deinterleave_v210_avx2 ( uint16_t *dstu, intptr_t i_dstu,
+ uint16_t *dstv, intptr_t i_dstv,
+ uint32_t *src, intptr_t i_src, int w, int h );
+void x264_store_interleave_chroma_mmx2( pixel *dst, intptr_t i_dst, pixel *srcu, pixel *srcv, int height );
+void x264_store_interleave_chroma_sse2( pixel *dst, intptr_t i_dst, pixel *srcu, pixel *srcv, int height );
+void x264_store_interleave_chroma_avx ( pixel *dst, intptr_t i_dst, pixel *srcu, pixel *srcv, int height );
+void x264_load_deinterleave_chroma_fenc_mmx ( pixel *dst, pixel *src, intptr_t i_src, int height );
+void x264_load_deinterleave_chroma_fenc_sse2( pixel *dst, pixel *src, intptr_t i_src, int height );
+void x264_load_deinterleave_chroma_fenc_ssse3( uint8_t *dst, uint8_t *src, intptr_t i_src, int height );
+void x264_load_deinterleave_chroma_fenc_avx( uint16_t *dst, uint16_t *src, intptr_t i_src, int height );
+void x264_load_deinterleave_chroma_fdec_mmx ( pixel *dst, pixel *src, intptr_t i_src, int height );
+void x264_load_deinterleave_chroma_fdec_sse2( pixel *dst, pixel *src, intptr_t i_src, int height );
+void x264_load_deinterleave_chroma_fdec_ssse3( uint8_t *dst, uint8_t *src, intptr_t i_src, int height );
+void x264_load_deinterleave_chroma_fdec_avx( uint16_t *dst, uint16_t *src, intptr_t i_src, int height );
+void *x264_memcpy_aligned_mmx( void *dst, const void *src, size_t n );
+void *x264_memcpy_aligned_sse( void *dst, const void *src, size_t n );
+void x264_memzero_aligned_mmx( void *dst, size_t n );
+void x264_memzero_aligned_sse( void *dst, size_t n );
+void x264_memzero_aligned_avx( void *dst, size_t n );
+void x264_integral_init4h_sse4( uint16_t *sum, uint8_t *pix, intptr_t stride );
+void x264_integral_init4h_avx2( uint16_t *sum, uint8_t *pix, intptr_t stride );
+void x264_integral_init8h_sse4( uint16_t *sum, uint8_t *pix, intptr_t stride );
+void x264_integral_init8h_avx ( uint16_t *sum, uint8_t *pix, intptr_t stride );
+void x264_integral_init8h_avx2( uint16_t *sum, uint8_t *pix, intptr_t stride );
+void x264_integral_init4v_mmx ( uint16_t *sum8, uint16_t *sum4, intptr_t stride );
+void x264_integral_init4v_sse2 ( uint16_t *sum8, uint16_t *sum4, intptr_t stride );
+void x264_integral_init4v_ssse3( uint16_t *sum8, uint16_t *sum4, intptr_t stride );
+void x264_integral_init4v_avx2( uint16_t *sum8, uint16_t *sum4, intptr_t stride );
+void x264_integral_init8v_mmx ( uint16_t *sum8, intptr_t stride );
+void x264_integral_init8v_sse2( uint16_t *sum8, intptr_t stride );
+void x264_integral_init8v_avx2( uint16_t *sum8, intptr_t stride );
+void x264_mbtree_propagate_cost_sse2( int16_t *dst, uint16_t *propagate_in, uint16_t *intra_costs,
+ uint16_t *inter_costs, uint16_t *inv_qscales, float *fps_factor, int len );
+void x264_mbtree_propagate_cost_avx ( int16_t *dst, uint16_t *propagate_in, uint16_t *intra_costs,
+ uint16_t *inter_costs, uint16_t *inv_qscales, float *fps_factor, int len );
+void x264_mbtree_propagate_cost_fma4( int16_t *dst, uint16_t *propagate_in, uint16_t *intra_costs,
+ uint16_t *inter_costs, uint16_t *inv_qscales, float *fps_factor, int len );
+void x264_mbtree_propagate_cost_avx2( int16_t *dst, uint16_t *propagate_in, uint16_t *intra_costs,
+ uint16_t *inter_costs, uint16_t *inv_qscales, float *fps_factor, int len );
+
+#define MC_CHROMA(cpu)\
+void x264_mc_chroma_##cpu( pixel *dstu, pixel *dstv, intptr_t i_dst, pixel *src, intptr_t i_src,\
+ int dx, int dy, int i_width, int i_height );
+MC_CHROMA(mmx2)
+MC_CHROMA(sse2)
+MC_CHROMA(ssse3)
+MC_CHROMA(ssse3_cache64)
+MC_CHROMA(avx)
+MC_CHROMA(avx2)
+
+#define LOWRES(cpu)\
+void x264_frame_init_lowres_core_##cpu( pixel *src0, pixel *dst0, pixel *dsth, pixel *dstv, pixel *dstc,\
+ intptr_t src_stride, intptr_t dst_stride, int width, int height );
+LOWRES(mmx2)
+LOWRES(cache32_mmx2)