1 /*****************************************************************************
2 * mc-c.c: aarch64 motion compensation
3 *****************************************************************************
4 * Copyright (C) 2009-2015 x264 project
6 * Authors: David Conrad <lessen42@gmail.com>
7 * Janne Grunau <janne-x264@jannau.net>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
23 * This program is also available under a commercial proprietary license.
24 * For more information, contact us at licensing@x264.com.
25 *****************************************************************************/
27 #include "common/common.h"
30 void x264_prefetch_ref_aarch64( uint8_t *, intptr_t, int );
31 void x264_prefetch_fenc_420_aarch64( uint8_t *, intptr_t, uint8_t *, intptr_t, int );
32 void x264_prefetch_fenc_422_aarch64( uint8_t *, intptr_t, uint8_t *, intptr_t, int );
34 void *x264_memcpy_aligned_neon( void *dst, const void *src, size_t n );
35 void x264_memzero_aligned_neon( void *dst, size_t n );
37 void x264_pixel_avg_16x16_neon( uint8_t *, intptr_t, uint8_t *, intptr_t, uint8_t *, intptr_t, int );
38 void x264_pixel_avg_16x8_neon ( uint8_t *, intptr_t, uint8_t *, intptr_t, uint8_t *, intptr_t, int );
39 void x264_pixel_avg_8x16_neon ( uint8_t *, intptr_t, uint8_t *, intptr_t, uint8_t *, intptr_t, int );
40 void x264_pixel_avg_8x8_neon ( uint8_t *, intptr_t, uint8_t *, intptr_t, uint8_t *, intptr_t, int );
41 void x264_pixel_avg_8x4_neon ( uint8_t *, intptr_t, uint8_t *, intptr_t, uint8_t *, intptr_t, int );
42 void x264_pixel_avg_4x16_neon ( uint8_t *, intptr_t, uint8_t *, intptr_t, uint8_t *, intptr_t, int );
43 void x264_pixel_avg_4x8_neon ( uint8_t *, intptr_t, uint8_t *, intptr_t, uint8_t *, intptr_t, int );
44 void x264_pixel_avg_4x4_neon ( uint8_t *, intptr_t, uint8_t *, intptr_t, uint8_t *, intptr_t, int );
45 void x264_pixel_avg_4x2_neon ( uint8_t *, intptr_t, uint8_t *, intptr_t, uint8_t *, intptr_t, int );
47 void x264_pixel_avg2_w4_neon ( uint8_t *, intptr_t, uint8_t *, intptr_t, uint8_t *, int );
48 void x264_pixel_avg2_w8_neon ( uint8_t *, intptr_t, uint8_t *, intptr_t, uint8_t *, int );
49 void x264_pixel_avg2_w16_neon( uint8_t *, intptr_t, uint8_t *, intptr_t, uint8_t *, int );
50 void x264_pixel_avg2_w20_neon( uint8_t *, intptr_t, uint8_t *, intptr_t, uint8_t *, int );
52 void x264_plane_copy_neon( pixel *dst, intptr_t i_dst,
53 pixel *src, intptr_t i_src, int w, int h );
54 void x264_plane_copy_deinterleave_neon( pixel *dstu, intptr_t i_dstu,
55 pixel *dstv, intptr_t i_dstv,
56 pixel *src, intptr_t i_src, int w, int h );
57 void x264_plane_copy_deinterleave_rgb_neon( pixel *dsta, intptr_t i_dsta,
58 pixel *dstb, intptr_t i_dstb,
59 pixel *dstc, intptr_t i_dstc,
60 pixel *src, intptr_t i_src, int pw, int w, int h );
61 void x264_plane_copy_interleave_neon( pixel *dst, intptr_t i_dst,
62 pixel *srcu, intptr_t i_srcu,
63 pixel *srcv, intptr_t i_srcv, int w, int h );
65 void x264_store_interleave_chroma_neon( pixel *dst, intptr_t i_dst, pixel *srcu, pixel *srcv, int height );
66 void x264_load_deinterleave_chroma_fdec_neon( pixel *dst, pixel *src, intptr_t i_src, int height );
67 void x264_load_deinterleave_chroma_fenc_neon( pixel *dst, pixel *src, intptr_t i_src, int height );
69 #define MC_WEIGHT(func)\
70 void x264_mc_weight_w20##func##_neon( uint8_t *, intptr_t, uint8_t *, intptr_t, const x264_weight_t *, int );\
71 void x264_mc_weight_w16##func##_neon( uint8_t *, intptr_t, uint8_t *, intptr_t, const x264_weight_t *, int );\
72 void x264_mc_weight_w8##func##_neon ( uint8_t *, intptr_t, uint8_t *, intptr_t, const x264_weight_t *, int );\
73 void x264_mc_weight_w4##func##_neon ( uint8_t *, intptr_t, uint8_t *, intptr_t, const x264_weight_t *, int );\
75 static void (* x264_mc##func##_wtab_neon[6])( uint8_t *, intptr_t, uint8_t *, intptr_t, const x264_weight_t *, int ) =\
77 x264_mc_weight_w4##func##_neon,\
78 x264_mc_weight_w4##func##_neon,\
79 x264_mc_weight_w8##func##_neon,\
80 x264_mc_weight_w16##func##_neon,\
81 x264_mc_weight_w16##func##_neon,\
82 x264_mc_weight_w20##func##_neon,\
90 void x264_mc_copy_w4_neon ( uint8_t *, intptr_t, uint8_t *, intptr_t, int );
91 void x264_mc_copy_w8_neon ( uint8_t *, intptr_t, uint8_t *, intptr_t, int );
92 void x264_mc_copy_w16_neon( uint8_t *, intptr_t, uint8_t *, intptr_t, int );
94 void x264_mc_chroma_neon( uint8_t *, uint8_t *, intptr_t, uint8_t *, intptr_t, int, int, int, int );
95 void integral_init4h_neon( uint16_t *, uint8_t *, intptr_t );
96 void integral_init4v_neon( uint16_t *, uint16_t *, intptr_t );
97 void integral_init8h_neon( uint16_t *, uint8_t *, intptr_t );
98 void integral_init8v_neon( uint16_t *, intptr_t );
99 void x264_frame_init_lowres_core_neon( uint8_t *, uint8_t *, uint8_t *, uint8_t *, uint8_t *, intptr_t, intptr_t, int, int );
101 void x264_mbtree_propagate_cost_neon( int16_t *, uint16_t *, uint16_t *, uint16_t *, uint16_t *, float *, int );
104 static void x264_weight_cache_neon( x264_t *h, x264_weight_t *w )
106 if( w->i_scale == 1<<w->i_denom )
108 if( w->i_offset < 0 )
110 w->weightfn = x264_mc_offsetsub_wtab_neon;
111 w->cachea[0] = -w->i_offset;
115 w->weightfn = x264_mc_offsetadd_wtab_neon;
116 w->cachea[0] = w->i_offset;
119 else if( !w->i_denom )
120 w->weightfn = x264_mc_nodenom_wtab_neon;
122 w->weightfn = x264_mc_wtab_neon;
125 static void (* const x264_pixel_avg_wtab_neon[6])( uint8_t *, intptr_t, uint8_t *, intptr_t, uint8_t *, int ) =
128 x264_pixel_avg2_w4_neon,
129 x264_pixel_avg2_w8_neon,
130 x264_pixel_avg2_w16_neon, // no slower than w12, so no point in a separate function
131 x264_pixel_avg2_w16_neon,
132 x264_pixel_avg2_w20_neon,
135 static void (* const x264_mc_copy_wtab_neon[5])( uint8_t *, intptr_t, uint8_t *, intptr_t, int ) =
138 x264_mc_copy_w4_neon,
139 x264_mc_copy_w8_neon,
141 x264_mc_copy_w16_neon,
144 static void mc_luma_neon( uint8_t *dst, intptr_t i_dst_stride,
145 uint8_t *src[4], intptr_t i_src_stride,
147 int i_width, int i_height, const x264_weight_t *weight )
149 int qpel_idx = ((mvy&3)<<2) + (mvx&3);
150 intptr_t offset = (mvy>>2)*i_src_stride + (mvx>>2);
151 uint8_t *src1 = src[x264_hpel_ref0[qpel_idx]] + offset;
152 if ( (mvy&3) == 3 ) // explict if() to force conditional add
153 src1 += i_src_stride;
155 if( qpel_idx & 5 ) /* qpel interpolation needed */
157 uint8_t *src2 = src[x264_hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);
158 x264_pixel_avg_wtab_neon[i_width>>2](
159 dst, i_dst_stride, src1, i_src_stride,
161 if( weight->weightfn )
162 weight->weightfn[i_width>>2]( dst, i_dst_stride, dst, i_dst_stride, weight, i_height );
164 else if( weight->weightfn )
165 weight->weightfn[i_width>>2]( dst, i_dst_stride, src1, i_src_stride, weight, i_height );
167 x264_mc_copy_wtab_neon[i_width>>2]( dst, i_dst_stride, src1, i_src_stride, i_height );
170 static uint8_t *get_ref_neon( uint8_t *dst, intptr_t *i_dst_stride,
171 uint8_t *src[4], intptr_t i_src_stride,
173 int i_width, int i_height, const x264_weight_t *weight )
175 int qpel_idx = ((mvy&3)<<2) + (mvx&3);
176 intptr_t offset = (mvy>>2)*i_src_stride + (mvx>>2);
177 uint8_t *src1 = src[x264_hpel_ref0[qpel_idx]] + offset;
178 if ( (mvy&3) == 3 ) // explict if() to force conditional add
179 src1 += i_src_stride;
181 if( qpel_idx & 5 ) /* qpel interpolation needed */
183 uint8_t *src2 = src[x264_hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);
184 x264_pixel_avg_wtab_neon[i_width>>2](
185 dst, *i_dst_stride, src1, i_src_stride,
187 if( weight->weightfn )
188 weight->weightfn[i_width>>2]( dst, *i_dst_stride, dst, *i_dst_stride, weight, i_height );
191 else if( weight->weightfn )
193 weight->weightfn[i_width>>2]( dst, *i_dst_stride, src1, i_src_stride, weight, i_height );
198 *i_dst_stride = i_src_stride;
203 void x264_hpel_filter_neon( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc,
204 uint8_t *src, intptr_t stride, int width,
205 int height, int16_t *buf );
206 #endif // !HIGH_BIT_DEPTH
208 #define CLIP_ADD(s,x) (s) = X264_MIN((s)+(x),(1<<15)-1)
209 #define CLIP_ADD2(s,x)\
212 CLIP_ADD((s)[0], (x)[0]);\
213 CLIP_ADD((s)[1], (x)[1]);\
216 void x264_mbtree_propagate_list_internal_neon( int16_t (*mvs)[2],
217 int16_t *propagate_amount,
218 uint16_t *lowres_costs,
220 int bipred_weight, int mb_y,
223 static void x264_mbtree_propagate_list_neon( x264_t *h, uint16_t *ref_costs,
225 int16_t *propagate_amount,
226 uint16_t *lowres_costs,
227 int bipred_weight, int mb_y,
230 int16_t *current = h->scratch_buffer2;
232 x264_mbtree_propagate_list_internal_neon( mvs, propagate_amount,
233 lowres_costs, current,
234 bipred_weight, mb_y, len );
236 unsigned stride = h->mb.i_mb_stride;
237 unsigned width = h->mb.i_mb_width;
238 unsigned height = h->mb.i_mb_height;
240 for( unsigned i = 0; i < len; current += 32 )
242 int end = X264_MIN( i+8, len );
243 for( ; i < end; i++, current += 2 )
245 if( !(lowres_costs[i] & (1 << (list+LOWRES_COST_SHIFT))) )
248 unsigned mbx = current[0];
249 unsigned mby = current[1];
250 unsigned idx0 = mbx + mby * stride;
251 unsigned idx2 = idx0 + stride;
253 /* Shortcut for the simple/common case of zero MV */
256 CLIP_ADD( ref_costs[idx0], current[16] );
260 if( mbx < width-1 && mby < height-1 )
262 CLIP_ADD2( ref_costs+idx0, current+16 );
263 CLIP_ADD2( ref_costs+idx2, current+32 );
267 /* Note: this takes advantage of unsigned representation to
268 * catch negative mbx/mby. */
272 CLIP_ADD( ref_costs[idx0+0], current[16] );
274 CLIP_ADD( ref_costs[idx0+1], current[17] );
279 CLIP_ADD( ref_costs[idx2+0], current[32] );
281 CLIP_ADD( ref_costs[idx2+1], current[33] );
291 void x264_mc_init_aarch64( int cpu, x264_mc_functions_t *pf )
294 if( cpu&X264_CPU_ARMV8 )
296 pf->prefetch_fenc_420 = x264_prefetch_fenc_420_aarch64;
297 pf->prefetch_fenc_422 = x264_prefetch_fenc_422_aarch64;
298 pf->prefetch_ref = x264_prefetch_ref_aarch64;
301 if( !(cpu&X264_CPU_NEON) )
304 pf->copy_16x16_unaligned = x264_mc_copy_w16_neon;
305 pf->copy[PIXEL_16x16] = x264_mc_copy_w16_neon;
306 pf->copy[PIXEL_8x8] = x264_mc_copy_w8_neon;
307 pf->copy[PIXEL_4x4] = x264_mc_copy_w4_neon;
309 pf->plane_copy = x264_plane_copy_neon;
310 pf->plane_copy_deinterleave = x264_plane_copy_deinterleave_neon;
311 pf->plane_copy_deinterleave_rgb = x264_plane_copy_deinterleave_rgb_neon;
312 pf->plane_copy_interleave = x264_plane_copy_interleave_neon;
314 pf->load_deinterleave_chroma_fdec = x264_load_deinterleave_chroma_fdec_neon;
315 pf->load_deinterleave_chroma_fenc = x264_load_deinterleave_chroma_fenc_neon;
316 pf->store_interleave_chroma = x264_store_interleave_chroma_neon;
318 pf->avg[PIXEL_16x16] = x264_pixel_avg_16x16_neon;
319 pf->avg[PIXEL_16x8] = x264_pixel_avg_16x8_neon;
320 pf->avg[PIXEL_8x16] = x264_pixel_avg_8x16_neon;
321 pf->avg[PIXEL_8x8] = x264_pixel_avg_8x8_neon;
322 pf->avg[PIXEL_8x4] = x264_pixel_avg_8x4_neon;
323 pf->avg[PIXEL_4x16] = x264_pixel_avg_4x16_neon;
324 pf->avg[PIXEL_4x8] = x264_pixel_avg_4x8_neon;
325 pf->avg[PIXEL_4x4] = x264_pixel_avg_4x4_neon;
326 pf->avg[PIXEL_4x2] = x264_pixel_avg_4x2_neon;
328 pf->weight = x264_mc_wtab_neon;
329 pf->offsetadd = x264_mc_offsetadd_wtab_neon;
330 pf->offsetsub = x264_mc_offsetsub_wtab_neon;
331 pf->weight_cache = x264_weight_cache_neon;
333 pf->mc_chroma = x264_mc_chroma_neon;
334 pf->mc_luma = mc_luma_neon;
335 pf->get_ref = get_ref_neon;
336 pf->hpel_filter = x264_hpel_filter_neon;
337 pf->frame_init_lowres_core = x264_frame_init_lowres_core_neon;
339 pf->integral_init4h = integral_init4h_neon;
340 pf->integral_init8h = integral_init8h_neon;
341 pf->integral_init4v = integral_init4v_neon;
342 pf->integral_init8v = integral_init8v_neon;
344 pf->mbtree_propagate_cost = x264_mbtree_propagate_cost_neon;
345 pf->mbtree_propagate_list = x264_mbtree_propagate_list_neon;
347 pf->memcpy_aligned = x264_memcpy_aligned_neon;
348 pf->memzero_aligned = x264_memzero_aligned_neon;
349 #endif // !HIGH_BIT_DEPTH