]> git.sesse.net Git - x264/blob - common/aarch64/mc-c.c
Bump dates to 2015
[x264] / common / aarch64 / mc-c.c
1 /*****************************************************************************
2  * mc-c.c: aarch64 motion compensation
3  *****************************************************************************
4  * Copyright (C) 2009-2015 x264 project
5  *
6  * Authors: David Conrad <lessen42@gmail.com>
7  *          Janne Grunau <janne-x264@jannau.net>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
22  *
23  * This program is also available under a commercial proprietary license.
24  * For more information, contact us at licensing@x264.com.
25  *****************************************************************************/
26
27 #include "common/common.h"
28 #include "mc.h"
29
30 void x264_prefetch_ref_aarch64( uint8_t *, intptr_t, int );
31 void x264_prefetch_fenc_420_aarch64( uint8_t *, intptr_t, uint8_t *, intptr_t, int );
32 void x264_prefetch_fenc_422_aarch64( uint8_t *, intptr_t, uint8_t *, intptr_t, int );
33
34 void *x264_memcpy_aligned_neon( void *dst, const void *src, size_t n );
35 void x264_memzero_aligned_neon( void *dst, size_t n );
36
37 void x264_pixel_avg_16x16_neon( uint8_t *, intptr_t, uint8_t *, intptr_t, uint8_t *, intptr_t, int );
38 void x264_pixel_avg_16x8_neon ( uint8_t *, intptr_t, uint8_t *, intptr_t, uint8_t *, intptr_t, int );
39 void x264_pixel_avg_8x16_neon ( uint8_t *, intptr_t, uint8_t *, intptr_t, uint8_t *, intptr_t, int );
40 void x264_pixel_avg_8x8_neon  ( uint8_t *, intptr_t, uint8_t *, intptr_t, uint8_t *, intptr_t, int );
41 void x264_pixel_avg_8x4_neon  ( uint8_t *, intptr_t, uint8_t *, intptr_t, uint8_t *, intptr_t, int );
42 void x264_pixel_avg_4x16_neon ( uint8_t *, intptr_t, uint8_t *, intptr_t, uint8_t *, intptr_t, int );
43 void x264_pixel_avg_4x8_neon  ( uint8_t *, intptr_t, uint8_t *, intptr_t, uint8_t *, intptr_t, int );
44 void x264_pixel_avg_4x4_neon  ( uint8_t *, intptr_t, uint8_t *, intptr_t, uint8_t *, intptr_t, int );
45 void x264_pixel_avg_4x2_neon  ( uint8_t *, intptr_t, uint8_t *, intptr_t, uint8_t *, intptr_t, int );
46
47 void x264_pixel_avg2_w4_neon ( uint8_t *, intptr_t, uint8_t *, intptr_t, uint8_t *, int );
48 void x264_pixel_avg2_w8_neon ( uint8_t *, intptr_t, uint8_t *, intptr_t, uint8_t *, int );
49 void x264_pixel_avg2_w16_neon( uint8_t *, intptr_t, uint8_t *, intptr_t, uint8_t *, int );
50 void x264_pixel_avg2_w20_neon( uint8_t *, intptr_t, uint8_t *, intptr_t, uint8_t *, int );
51
52 void x264_plane_copy_neon( pixel *dst, intptr_t i_dst,
53                            pixel *src, intptr_t i_src, int w, int h );
54 void x264_plane_copy_deinterleave_neon(  pixel *dstu, intptr_t i_dstu,
55                                          pixel *dstv, intptr_t i_dstv,
56                                          pixel *src,  intptr_t i_src, int w, int h );
57 void x264_plane_copy_deinterleave_rgb_neon( pixel *dsta, intptr_t i_dsta,
58                                             pixel *dstb, intptr_t i_dstb,
59                                             pixel *dstc, intptr_t i_dstc,
60                                             pixel *src,  intptr_t i_src, int pw, int w, int h );
61 void x264_plane_copy_interleave_neon( pixel *dst,  intptr_t i_dst,
62                                       pixel *srcu, intptr_t i_srcu,
63                                       pixel *srcv, intptr_t i_srcv, int w, int h );
64
65 void x264_store_interleave_chroma_neon( pixel *dst, intptr_t i_dst, pixel *srcu, pixel *srcv, int height );
66 void x264_load_deinterleave_chroma_fdec_neon( pixel *dst, pixel *src, intptr_t i_src, int height );
67 void x264_load_deinterleave_chroma_fenc_neon( pixel *dst, pixel *src, intptr_t i_src, int height );
68
69 #define MC_WEIGHT(func)\
70 void x264_mc_weight_w20##func##_neon( uint8_t *, intptr_t, uint8_t *, intptr_t, const x264_weight_t *, int );\
71 void x264_mc_weight_w16##func##_neon( uint8_t *, intptr_t, uint8_t *, intptr_t, const x264_weight_t *, int );\
72 void x264_mc_weight_w8##func##_neon ( uint8_t *, intptr_t, uint8_t *, intptr_t, const x264_weight_t *, int );\
73 void x264_mc_weight_w4##func##_neon ( uint8_t *, intptr_t, uint8_t *, intptr_t, const x264_weight_t *, int );\
74 \
75 static void (* x264_mc##func##_wtab_neon[6])( uint8_t *, intptr_t, uint8_t *, intptr_t, const x264_weight_t *, int ) =\
76 {\
77     x264_mc_weight_w4##func##_neon,\
78     x264_mc_weight_w4##func##_neon,\
79     x264_mc_weight_w8##func##_neon,\
80     x264_mc_weight_w16##func##_neon,\
81     x264_mc_weight_w16##func##_neon,\
82     x264_mc_weight_w20##func##_neon,\
83 };
84
85 MC_WEIGHT()
86 MC_WEIGHT(_nodenom)
87 MC_WEIGHT(_offsetadd)
88 MC_WEIGHT(_offsetsub)
89
90 void x264_mc_copy_w4_neon ( uint8_t *, intptr_t, uint8_t *, intptr_t, int );
91 void x264_mc_copy_w8_neon ( uint8_t *, intptr_t, uint8_t *, intptr_t, int );
92 void x264_mc_copy_w16_neon( uint8_t *, intptr_t, uint8_t *, intptr_t, int );
93
94 void x264_mc_chroma_neon( uint8_t *, uint8_t *, intptr_t, uint8_t *, intptr_t, int, int, int, int );
95 void integral_init4h_neon( uint16_t *, uint8_t *, intptr_t );
96 void integral_init4v_neon( uint16_t *, uint16_t *, intptr_t );
97 void integral_init8h_neon( uint16_t *, uint8_t *, intptr_t );
98 void integral_init8v_neon( uint16_t *, intptr_t );
99 void x264_frame_init_lowres_core_neon( uint8_t *, uint8_t *, uint8_t *, uint8_t *, uint8_t *, intptr_t, intptr_t, int, int );
100
101 void x264_mbtree_propagate_cost_neon( int16_t *, uint16_t *, uint16_t *, uint16_t *, uint16_t *, float *, int );
102
103 #if !HIGH_BIT_DEPTH
104 static void x264_weight_cache_neon( x264_t *h, x264_weight_t *w )
105 {
106     if( w->i_scale == 1<<w->i_denom )
107     {
108         if( w->i_offset < 0 )
109         {
110             w->weightfn = x264_mc_offsetsub_wtab_neon;
111             w->cachea[0] = -w->i_offset;
112         }
113         else
114         {
115             w->weightfn = x264_mc_offsetadd_wtab_neon;
116             w->cachea[0] = w->i_offset;
117         }
118     }
119     else if( !w->i_denom )
120         w->weightfn = x264_mc_nodenom_wtab_neon;
121     else
122         w->weightfn = x264_mc_wtab_neon;
123 }
124
125 static void (* const x264_pixel_avg_wtab_neon[6])( uint8_t *, intptr_t, uint8_t *, intptr_t, uint8_t *, int ) =
126 {
127     NULL,
128     x264_pixel_avg2_w4_neon,
129     x264_pixel_avg2_w8_neon,
130     x264_pixel_avg2_w16_neon,   // no slower than w12, so no point in a separate function
131     x264_pixel_avg2_w16_neon,
132     x264_pixel_avg2_w20_neon,
133 };
134
135 static void (* const x264_mc_copy_wtab_neon[5])( uint8_t *, intptr_t, uint8_t *, intptr_t, int ) =
136 {
137     NULL,
138     x264_mc_copy_w4_neon,
139     x264_mc_copy_w8_neon,
140     NULL,
141     x264_mc_copy_w16_neon,
142 };
143
144 static void mc_luma_neon( uint8_t *dst,    intptr_t i_dst_stride,
145                           uint8_t *src[4], intptr_t i_src_stride,
146                           int mvx, int mvy,
147                           int i_width, int i_height, const x264_weight_t *weight )
148 {
149     int qpel_idx = ((mvy&3)<<2) + (mvx&3);
150     intptr_t offset = (mvy>>2)*i_src_stride + (mvx>>2);
151     uint8_t *src1 = src[x264_hpel_ref0[qpel_idx]] + offset;
152     if ( (mvy&3) == 3 )             // explict if() to force conditional add
153         src1 += i_src_stride;
154
155     if( qpel_idx & 5 ) /* qpel interpolation needed */
156     {
157         uint8_t *src2 = src[x264_hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);
158         x264_pixel_avg_wtab_neon[i_width>>2](
159                 dst, i_dst_stride, src1, i_src_stride,
160                 src2, i_height );
161         if( weight->weightfn )
162             weight->weightfn[i_width>>2]( dst, i_dst_stride, dst, i_dst_stride, weight, i_height );
163     }
164     else if( weight->weightfn )
165         weight->weightfn[i_width>>2]( dst, i_dst_stride, src1, i_src_stride, weight, i_height );
166     else
167         x264_mc_copy_wtab_neon[i_width>>2]( dst, i_dst_stride, src1, i_src_stride, i_height );
168 }
169
170 static uint8_t *get_ref_neon( uint8_t *dst,   intptr_t *i_dst_stride,
171                               uint8_t *src[4], intptr_t i_src_stride,
172                               int mvx, int mvy,
173                               int i_width, int i_height, const x264_weight_t *weight )
174 {
175     int qpel_idx = ((mvy&3)<<2) + (mvx&3);
176     intptr_t offset = (mvy>>2)*i_src_stride + (mvx>>2);
177     uint8_t *src1 = src[x264_hpel_ref0[qpel_idx]] + offset;
178     if ( (mvy&3) == 3 )             // explict if() to force conditional add
179         src1 += i_src_stride;
180
181     if( qpel_idx & 5 ) /* qpel interpolation needed */
182     {
183         uint8_t *src2 = src[x264_hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);
184         x264_pixel_avg_wtab_neon[i_width>>2](
185                 dst, *i_dst_stride, src1, i_src_stride,
186                 src2, i_height );
187         if( weight->weightfn )
188             weight->weightfn[i_width>>2]( dst, *i_dst_stride, dst, *i_dst_stride, weight, i_height );
189         return dst;
190     }
191     else if( weight->weightfn )
192     {
193         weight->weightfn[i_width>>2]( dst, *i_dst_stride, src1, i_src_stride, weight, i_height );
194         return dst;
195     }
196     else
197     {
198         *i_dst_stride = i_src_stride;
199         return src1;
200     }
201 }
202
203 void x264_hpel_filter_neon( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc,
204                             uint8_t *src, intptr_t stride, int width,
205                             int height, int16_t *buf );
206 #endif // !HIGH_BIT_DEPTH
207
208 #define CLIP_ADD(s,x) (s) = X264_MIN((s)+(x),(1<<15)-1)
209 #define CLIP_ADD2(s,x)\
210 do\
211 {\
212     CLIP_ADD((s)[0], (x)[0]);\
213     CLIP_ADD((s)[1], (x)[1]);\
214 } while(0)
215
216 void x264_mbtree_propagate_list_internal_neon( int16_t (*mvs)[2],
217                                                int16_t *propagate_amount,
218                                                uint16_t *lowres_costs,
219                                                int16_t *output,
220                                                int bipred_weight, int mb_y,
221                                                int len );
222
223 static void x264_mbtree_propagate_list_neon( x264_t *h, uint16_t *ref_costs,
224                                              int16_t (*mvs)[2],
225                                              int16_t *propagate_amount,
226                                              uint16_t *lowres_costs,
227                                              int bipred_weight, int mb_y,
228                                              int len, int list )
229 {
230     int16_t *current = h->scratch_buffer2;
231
232     x264_mbtree_propagate_list_internal_neon( mvs, propagate_amount,
233                                               lowres_costs, current,
234                                               bipred_weight, mb_y, len );
235
236     unsigned stride = h->mb.i_mb_stride;
237     unsigned width = h->mb.i_mb_width;
238     unsigned height = h->mb.i_mb_height;
239
240     for( unsigned i = 0; i < len; current += 32 )
241     {
242         int end = X264_MIN( i+8, len );
243         for( ; i < end; i++, current += 2 )
244         {
245             if( !(lowres_costs[i] & (1 << (list+LOWRES_COST_SHIFT))) )
246                 continue;
247
248             unsigned mbx = current[0];
249             unsigned mby = current[1];
250             unsigned idx0 = mbx + mby * stride;
251             unsigned idx2 = idx0 + stride;
252
253             /* Shortcut for the simple/common case of zero MV */
254             if( !M32( mvs[i] ) )
255             {
256                 CLIP_ADD( ref_costs[idx0], current[16] );
257                 continue;
258             }
259
260             if( mbx < width-1 && mby < height-1 )
261             {
262                 CLIP_ADD2( ref_costs+idx0, current+16 );
263                 CLIP_ADD2( ref_costs+idx2, current+32 );
264             }
265             else
266             {
267                 /* Note: this takes advantage of unsigned representation to
268                  * catch negative mbx/mby. */
269                 if( mby < height )
270                 {
271                     if( mbx < width )
272                         CLIP_ADD( ref_costs[idx0+0], current[16] );
273                     if( mbx+1 < width )
274                         CLIP_ADD( ref_costs[idx0+1], current[17] );
275                 }
276                 if( mby+1 < height )
277                 {
278                     if( mbx < width )
279                         CLIP_ADD( ref_costs[idx2+0], current[32] );
280                     if( mbx+1 < width )
281                         CLIP_ADD( ref_costs[idx2+1], current[33] );
282                 }
283             }
284         }
285     }
286 }
287
288 #undef CLIP_ADD
289 #undef CLIP_ADD2
290
291 void x264_mc_init_aarch64( int cpu, x264_mc_functions_t *pf )
292 {
293 #if !HIGH_BIT_DEPTH
294     if( cpu&X264_CPU_ARMV8 )
295     {
296         pf->prefetch_fenc_420 = x264_prefetch_fenc_420_aarch64;
297         pf->prefetch_fenc_422 = x264_prefetch_fenc_422_aarch64;
298         pf->prefetch_ref      = x264_prefetch_ref_aarch64;
299     }
300
301     if( !(cpu&X264_CPU_NEON) )
302         return;
303
304     pf->copy_16x16_unaligned = x264_mc_copy_w16_neon;
305     pf->copy[PIXEL_16x16]    = x264_mc_copy_w16_neon;
306     pf->copy[PIXEL_8x8]      = x264_mc_copy_w8_neon;
307     pf->copy[PIXEL_4x4]      = x264_mc_copy_w4_neon;
308
309     pf->plane_copy                  = x264_plane_copy_neon;
310     pf->plane_copy_deinterleave     = x264_plane_copy_deinterleave_neon;
311     pf->plane_copy_deinterleave_rgb = x264_plane_copy_deinterleave_rgb_neon;
312     pf->plane_copy_interleave       = x264_plane_copy_interleave_neon;
313
314     pf->load_deinterleave_chroma_fdec = x264_load_deinterleave_chroma_fdec_neon;
315     pf->load_deinterleave_chroma_fenc = x264_load_deinterleave_chroma_fenc_neon;
316     pf->store_interleave_chroma       = x264_store_interleave_chroma_neon;
317
318     pf->avg[PIXEL_16x16] = x264_pixel_avg_16x16_neon;
319     pf->avg[PIXEL_16x8]  = x264_pixel_avg_16x8_neon;
320     pf->avg[PIXEL_8x16]  = x264_pixel_avg_8x16_neon;
321     pf->avg[PIXEL_8x8]   = x264_pixel_avg_8x8_neon;
322     pf->avg[PIXEL_8x4]   = x264_pixel_avg_8x4_neon;
323     pf->avg[PIXEL_4x16]  = x264_pixel_avg_4x16_neon;
324     pf->avg[PIXEL_4x8]   = x264_pixel_avg_4x8_neon;
325     pf->avg[PIXEL_4x4]   = x264_pixel_avg_4x4_neon;
326     pf->avg[PIXEL_4x2]   = x264_pixel_avg_4x2_neon;
327
328     pf->weight       = x264_mc_wtab_neon;
329     pf->offsetadd    = x264_mc_offsetadd_wtab_neon;
330     pf->offsetsub    = x264_mc_offsetsub_wtab_neon;
331     pf->weight_cache = x264_weight_cache_neon;
332
333     pf->mc_chroma = x264_mc_chroma_neon;
334     pf->mc_luma = mc_luma_neon;
335     pf->get_ref = get_ref_neon;
336     pf->hpel_filter = x264_hpel_filter_neon;
337     pf->frame_init_lowres_core = x264_frame_init_lowres_core_neon;
338
339     pf->integral_init4h = integral_init4h_neon;
340     pf->integral_init8h = integral_init8h_neon;
341     pf->integral_init4v = integral_init4v_neon;
342     pf->integral_init8v = integral_init8v_neon;
343
344     pf->mbtree_propagate_cost = x264_mbtree_propagate_cost_neon;
345     pf->mbtree_propagate_list = x264_mbtree_propagate_list_neon;
346
347     pf->memcpy_aligned  = x264_memcpy_aligned_neon;
348     pf->memzero_aligned = x264_memzero_aligned_neon;
349 #endif // !HIGH_BIT_DEPTH
350 }