ALIGNED_16( dctcoef fenc_dct4[16][16] );
/* Psy RD SATD/SA8D scores cache */
- ALIGNED_16( uint64_t fenc_hadamard_cache[9] );
- ALIGNED_16( uint32_t fenc_satd_cache[32] );
+ ALIGNED_N( uint64_t fenc_hadamard_cache[9] );
+ ALIGNED_N( uint32_t fenc_satd_cache[32] );
/* pointer over mb of the frame to be compressed */
pixel *p_fenc[3]; /* y,u,v */
;-----------------------------------------------------------------------------
; void *memzero_aligned( void *dst, size_t n );
;-----------------------------------------------------------------------------
-%macro MEMZERO 0
+%macro MEMZERO 1
cglobal memzero_aligned, 2,2
add r0, r1
neg r1
%endif
.loop:
%assign i 0
-%rep 8
+%rep %1
mova [r0 + r1 + i], m0
%assign i i+mmsize
%endrep
- add r1, mmsize*8
+ add r1, mmsize*%1
jl .loop
RET
%endmacro
INIT_MMX mmx
-MEMZERO
+MEMZERO 8
INIT_XMM sse
-MEMZERO
-
-
+MEMZERO 8
+INIT_YMM avx
+MEMZERO 4
%if HIGH_BIT_DEPTH == 0
;-----------------------------------------------------------------------------
void *x264_memcpy_aligned_sse( void *dst, const void *src, size_t n );
void x264_memzero_aligned_mmx( void *dst, size_t n );
void x264_memzero_aligned_sse( void *dst, size_t n );
+void x264_memzero_aligned_avx( void *dst, size_t n );
void x264_integral_init4h_sse4( uint16_t *sum, uint8_t *pix, intptr_t stride );
void x264_integral_init4h_avx2( uint16_t *sum, uint8_t *pix, intptr_t stride );
void x264_integral_init8h_sse4( uint16_t *sum, uint8_t *pix, intptr_t stride );
if( !(cpu&X264_CPU_AVX) )
return;
+ pf->memzero_aligned = x264_memzero_aligned_avx;
pf->mbtree_propagate_cost = x264_mbtree_propagate_cost_avx;
if( cpu&X264_CPU_FMA4 )
uint64_t bcostrd = COST_MAX64;
uint16_t amvd;
/* each byte of visited represents 8 possible m1y positions, so a 4D array isn't needed */
- ALIGNED_ARRAY_16( uint8_t, visited,[8],[8][8] );
+ ALIGNED_ARRAY_N( uint8_t, visited,[8],[8][8] );
/* all permutations of an offset in up to 2 of the dimensions */
ALIGNED_4( static const int8_t dia4d[33][4] ) =
{