b x264_var_end
.endfunc
+function x264_pixel_var_8x16_neon
+ vld1.64 {d16}, [r0,:64], r1
+ vld1.64 {d18}, [r0,:64], r1
+ vmull.u8 q1, d16, d16
+ vmovl.u8 q0, d16
+ vld1.64 {d20}, [r0,:64], r1
+ vmull.u8 q2, d18, d18
+ vaddw.u8 q0, q0, d18
+
+ mov ip, #12
+
+ vld1.64 {d22}, [r0,:64], r1
+ VAR_SQR_SUM q1, q1, q14, d20, vpaddl.u16
+ vld1.64 {d16}, [r0,:64], r1
+ VAR_SQR_SUM q2, q2, q15, d22, vpaddl.u16
+
+1: subs ip, ip, #4
+ vld1.64 {d18}, [r0,:64], r1
+ VAR_SQR_SUM q1, q14, q12, d16
+ vld1.64 {d20}, [r0,:64], r1
+ VAR_SQR_SUM q2, q15, q13, d18
+ vld1.64 {d22}, [r0,:64], r1
+ VAR_SQR_SUM q1, q12, q14, d20
+ beq 2f
+ vld1.64 {d16}, [r0,:64], r1
+ VAR_SQR_SUM q2, q13, q15, d22
+ b 1b
+2:
+ VAR_SQR_SUM q2, q13, q15, d22
+ b x264_var_end
+.endfunc
+
function x264_pixel_var_16x16_neon
vld1.64 {d16-d17}, [r0,:128], r1
vmull.u8 q12, d16, d16
int x264_pixel_sa8d_16x16_neon( uint8_t *, intptr_t, uint8_t *, intptr_t );
uint64_t x264_pixel_var_8x8_neon ( uint8_t *, intptr_t );
+uint64_t x264_pixel_var_8x16_neon ( uint8_t *, intptr_t );
uint64_t x264_pixel_var_16x16_neon( uint8_t *, intptr_t );
int x264_pixel_var2_8x8_neon( uint8_t *, intptr_t, uint8_t *, intptr_t, int * );