1 /*****************************************************************************
2 * mc-c.c: x86 motion compensation
3 *****************************************************************************
4 * Copyright (C) 2003-2013 x264 project
6 * Authors: Laurent Aimar <fenrir@via.ecp.fr>
7 * Loren Merritt <lorenm@u.washington.edu>
8 * Fiona Glaser <fiona@x264.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
24 * This program is also available under a commercial proprietary license.
25 * For more information, contact us at licensing@x264.com.
26 *****************************************************************************/
32 #include "common/common.h"
35 #define DECL_SUF( func, args )\
36 void func##_mmx2 args;\
37 void func##_sse2 args;\
38 void func##_ssse3 args;\
39 void func##_avx2 args;
41 DECL_SUF( x264_pixel_avg_16x16, ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
42 DECL_SUF( x264_pixel_avg_16x8, ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
43 DECL_SUF( x264_pixel_avg_8x16, ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
44 DECL_SUF( x264_pixel_avg_8x8, ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
45 DECL_SUF( x264_pixel_avg_8x4, ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
46 DECL_SUF( x264_pixel_avg_4x16, ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
47 DECL_SUF( x264_pixel_avg_4x8, ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
48 DECL_SUF( x264_pixel_avg_4x4, ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
49 DECL_SUF( x264_pixel_avg_4x2, ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
51 #define MC_WEIGHT(w,type) \
52 void x264_mc_weight_w##w##_##type( pixel *, intptr_t, pixel *, intptr_t, const x264_weight_t *, int );
54 #define MC_WEIGHT_OFFSET(w,type) \
55 void x264_mc_offsetadd_w##w##_##type( pixel *, intptr_t, pixel *, intptr_t, const x264_weight_t *, int ); \
56 void x264_mc_offsetsub_w##w##_##type( pixel *, intptr_t, pixel *, intptr_t, const x264_weight_t *, int ); \
59 MC_WEIGHT_OFFSET( 4, mmx2 )
60 MC_WEIGHT_OFFSET( 8, mmx2 )
61 MC_WEIGHT_OFFSET( 12, mmx2 )
62 MC_WEIGHT_OFFSET( 16, mmx2 )
63 MC_WEIGHT_OFFSET( 20, mmx2 )
64 MC_WEIGHT_OFFSET( 12, sse2 )
65 MC_WEIGHT_OFFSET( 16, sse2 )
66 MC_WEIGHT_OFFSET( 20, sse2 )
68 MC_WEIGHT_OFFSET( 8, sse2 )
73 MC_WEIGHT( 12, ssse3 )
74 MC_WEIGHT( 16, ssse3 )
75 MC_WEIGHT( 20, ssse3 )
82 void x264_mc_copy_w4_mmx ( pixel *, intptr_t, pixel *, intptr_t, int );
83 void x264_mc_copy_w8_mmx ( pixel *, intptr_t, pixel *, intptr_t, int );
84 void x264_mc_copy_w8_sse ( pixel *, intptr_t, pixel *, intptr_t, int );
85 void x264_mc_copy_w16_mmx( pixel *, intptr_t, pixel *, intptr_t, int );
86 void x264_mc_copy_w16_sse( pixel *, intptr_t, pixel *, intptr_t, int );
87 void x264_mc_copy_w16_aligned_sse( pixel *, intptr_t, pixel *, intptr_t, int );
88 void x264_mc_copy_w16_avx( uint16_t *, intptr_t, uint16_t *, intptr_t, int );
89 void x264_mc_copy_w16_aligned_avx( uint16_t *, intptr_t, uint16_t *, intptr_t, int );
90 void x264_prefetch_fenc_420_mmx2( pixel *, intptr_t, pixel *, intptr_t, int );
91 void x264_prefetch_fenc_422_mmx2( pixel *, intptr_t, pixel *, intptr_t, int );
92 void x264_prefetch_ref_mmx2( pixel *, intptr_t, int );
93 void x264_plane_copy_core_mmx2( pixel *, intptr_t, pixel *, intptr_t, int w, int h );
94 void x264_plane_copy_c( pixel *, intptr_t, pixel *, intptr_t, int w, int h );
95 void x264_plane_copy_interleave_core_mmx2( pixel *dst, intptr_t i_dst,
96 pixel *srcu, intptr_t i_srcu,
97 pixel *srcv, intptr_t i_srcv, int w, int h );
98 void x264_plane_copy_interleave_core_sse2( pixel *dst, intptr_t i_dst,
99 pixel *srcu, intptr_t i_srcu,
100 pixel *srcv, intptr_t i_srcv, int w, int h );
101 void x264_plane_copy_interleave_core_avx( pixel *dst, intptr_t i_dst,
102 pixel *srcu, intptr_t i_srcu,
103 pixel *srcv, intptr_t i_srcv, int w, int h );
104 void x264_plane_copy_interleave_c( pixel *dst, intptr_t i_dst,
105 pixel *srcu, intptr_t i_srcu,
106 pixel *srcv, intptr_t i_srcv, int w, int h );
107 void x264_plane_copy_deinterleave_mmx( pixel *dstu, intptr_t i_dstu,
108 pixel *dstv, intptr_t i_dstv,
109 pixel *src, intptr_t i_src, int w, int h );
110 void x264_plane_copy_deinterleave_sse2( pixel *dstu, intptr_t i_dstu,
111 pixel *dstv, intptr_t i_dstv,
112 pixel *src, intptr_t i_src, int w, int h );
113 void x264_plane_copy_deinterleave_ssse3( uint8_t *dstu, intptr_t i_dstu,
114 uint8_t *dstv, intptr_t i_dstv,
115 uint8_t *src, intptr_t i_src, int w, int h );
116 void x264_plane_copy_deinterleave_avx( uint16_t *dstu, intptr_t i_dstu,
117 uint16_t *dstv, intptr_t i_dstv,
118 uint16_t *src, intptr_t i_src, int w, int h );
119 void x264_store_interleave_chroma_mmx2( pixel *dst, intptr_t i_dst, pixel *srcu, pixel *srcv, int height );
120 void x264_store_interleave_chroma_sse2( pixel *dst, intptr_t i_dst, pixel *srcu, pixel *srcv, int height );
121 void x264_store_interleave_chroma_avx ( pixel *dst, intptr_t i_dst, pixel *srcu, pixel *srcv, int height );
122 void x264_load_deinterleave_chroma_fenc_mmx ( pixel *dst, pixel *src, intptr_t i_src, int height );
123 void x264_load_deinterleave_chroma_fenc_sse2( pixel *dst, pixel *src, intptr_t i_src, int height );
124 void x264_load_deinterleave_chroma_fenc_ssse3( uint8_t *dst, uint8_t *src, intptr_t i_src, int height );
125 void x264_load_deinterleave_chroma_fenc_avx( uint16_t *dst, uint16_t *src, intptr_t i_src, int height );
126 void x264_load_deinterleave_chroma_fdec_mmx ( pixel *dst, pixel *src, intptr_t i_src, int height );
127 void x264_load_deinterleave_chroma_fdec_sse2( pixel *dst, pixel *src, intptr_t i_src, int height );
128 void x264_load_deinterleave_chroma_fdec_ssse3( uint8_t *dst, uint8_t *src, intptr_t i_src, int height );
129 void x264_load_deinterleave_chroma_fdec_avx( uint16_t *dst, uint16_t *src, intptr_t i_src, int height );
130 void *x264_memcpy_aligned_mmx( void *dst, const void *src, size_t n );
131 void *x264_memcpy_aligned_sse( void *dst, const void *src, size_t n );
132 void x264_memzero_aligned_mmx( void *dst, size_t n );
133 void x264_memzero_aligned_sse( void *dst, size_t n );
134 void x264_memzero_aligned_avx( void *dst, size_t n );
135 void x264_integral_init4h_sse4( uint16_t *sum, uint8_t *pix, intptr_t stride );
136 void x264_integral_init4h_avx2( uint16_t *sum, uint8_t *pix, intptr_t stride );
137 void x264_integral_init8h_sse4( uint16_t *sum, uint8_t *pix, intptr_t stride );
138 void x264_integral_init8h_avx ( uint16_t *sum, uint8_t *pix, intptr_t stride );
139 void x264_integral_init8h_avx2( uint16_t *sum, uint8_t *pix, intptr_t stride );
140 void x264_integral_init4v_mmx ( uint16_t *sum8, uint16_t *sum4, intptr_t stride );
141 void x264_integral_init4v_sse2 ( uint16_t *sum8, uint16_t *sum4, intptr_t stride );
142 void x264_integral_init4v_ssse3( uint16_t *sum8, uint16_t *sum4, intptr_t stride );
143 void x264_integral_init4v_avx2( uint16_t *sum8, uint16_t *sum4, intptr_t stride );
144 void x264_integral_init8v_mmx ( uint16_t *sum8, intptr_t stride );
145 void x264_integral_init8v_sse2( uint16_t *sum8, intptr_t stride );
146 void x264_integral_init8v_avx2( uint16_t *sum8, intptr_t stride );
147 void x264_mbtree_propagate_cost_sse2( int *dst, uint16_t *propagate_in, uint16_t *intra_costs,
148 uint16_t *inter_costs, uint16_t *inv_qscales, float *fps_factor, int len );
149 void x264_mbtree_propagate_cost_avx ( int *dst, uint16_t *propagate_in, uint16_t *intra_costs,
150 uint16_t *inter_costs, uint16_t *inv_qscales, float *fps_factor, int len );
151 void x264_mbtree_propagate_cost_fma4( int *dst, uint16_t *propagate_in, uint16_t *intra_costs,
152 uint16_t *inter_costs, uint16_t *inv_qscales, float *fps_factor, int len );
153 void x264_mbtree_propagate_cost_avx2_fma3( int *dst, uint16_t *propagate_in, uint16_t *intra_costs,
154 uint16_t *inter_costs, uint16_t *inv_qscales, float *fps_factor, int len );
156 #define MC_CHROMA(cpu)\
157 void x264_mc_chroma_##cpu( pixel *dstu, pixel *dstv, intptr_t i_dst, pixel *src, intptr_t i_src,\
158 int dx, int dy, int i_width, int i_height );
161 MC_CHROMA(sse2_misalign)
163 MC_CHROMA(ssse3_cache64)
168 void x264_frame_init_lowres_core_##cpu( pixel *src0, pixel *dst0, pixel *dsth, pixel *dstv, pixel *dstc,\
169 intptr_t src_stride, intptr_t dst_stride, int width, int height );
178 #define PIXEL_AVG_W(width,cpu)\
179 void x264_pixel_avg2_w##width##_##cpu( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t );
180 /* This declares some functions that don't exist, but that isn't a problem. */
181 #define PIXEL_AVG_WALL(cpu)\
182 PIXEL_AVG_W(4,cpu); PIXEL_AVG_W(8,cpu); PIXEL_AVG_W(10,cpu); PIXEL_AVG_W(12,cpu); PIXEL_AVG_W(16,cpu); PIXEL_AVG_W(18,cpu); PIXEL_AVG_W(20,cpu);
185 PIXEL_AVG_WALL(cache32_mmx2)
186 PIXEL_AVG_WALL(cache64_mmx2)
187 PIXEL_AVG_WALL(cache64_sse2)
189 PIXEL_AVG_WALL(sse2_misalign)
190 PIXEL_AVG_WALL(cache64_ssse3)
193 #define PIXEL_AVG_WTAB(instr, name1, name2, name3, name4, name5)\
194 static void (* const x264_pixel_avg_wtab_##instr[6])( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t ) =\
197 x264_pixel_avg2_w4_##name1,\
198 x264_pixel_avg2_w8_##name2,\
199 x264_pixel_avg2_w12_##name3,\
200 x264_pixel_avg2_w16_##name4,\
201 x264_pixel_avg2_w20_##name5,\
205 /* we can replace w12/w20 with w10/w18 as only 9/17 pixels in fact are important */
206 #define x264_pixel_avg2_w12_mmx2 x264_pixel_avg2_w10_mmx2
207 #define x264_pixel_avg2_w20_mmx2 x264_pixel_avg2_w18_mmx2
208 #define x264_pixel_avg2_w12_sse2 x264_pixel_avg2_w10_sse2
209 #define x264_pixel_avg2_w20_sse2 x264_pixel_avg2_w18_sse2
210 #define x264_pixel_avg2_w12_avx2 x264_pixel_avg2_w16_avx2
211 #define x264_pixel_avg2_w20_avx2 x264_pixel_avg2_w18_avx2
213 /* w16 sse2 is faster than w12 mmx as long as the cacheline issue is resolved */
214 #define x264_pixel_avg2_w12_cache64_ssse3 x264_pixel_avg2_w16_cache64_ssse3
215 #define x264_pixel_avg2_w12_cache64_sse2 x264_pixel_avg2_w16_cache64_sse2
216 #define x264_pixel_avg2_w12_sse3 x264_pixel_avg2_w16_sse3
217 #define x264_pixel_avg2_w12_sse2 x264_pixel_avg2_w16_sse2
218 #endif // HIGH_BIT_DEPTH
220 PIXEL_AVG_WTAB(mmx2, mmx2, mmx2, mmx2, mmx2, mmx2)
222 PIXEL_AVG_WTAB(sse2, mmx2, sse2, sse2, sse2, sse2)
223 PIXEL_AVG_WTAB(avx2, mmx2, sse2, avx2, avx2, avx2)
224 #else // !HIGH_BIT_DEPTH
226 PIXEL_AVG_WTAB(cache32_mmx2, mmx2, cache32_mmx2, cache32_mmx2, cache32_mmx2, cache32_mmx2)
227 PIXEL_AVG_WTAB(cache64_mmx2, mmx2, cache64_mmx2, cache64_mmx2, cache64_mmx2, cache64_mmx2)
229 PIXEL_AVG_WTAB(sse2, mmx2, mmx2, sse2, sse2, sse2)
230 PIXEL_AVG_WTAB(sse2_misalign, mmx2, mmx2, sse2, sse2, sse2_misalign)
231 PIXEL_AVG_WTAB(cache64_sse2, mmx2, cache64_mmx2, cache64_sse2, cache64_sse2, cache64_sse2)
232 PIXEL_AVG_WTAB(cache64_ssse3, mmx2, cache64_mmx2, cache64_ssse3, cache64_ssse3, cache64_sse2)
233 PIXEL_AVG_WTAB(cache64_ssse3_atom, mmx2, mmx2, cache64_ssse3, cache64_ssse3, sse2)
234 PIXEL_AVG_WTAB(avx2, mmx2, mmx2, sse2, sse2, avx2)
235 #endif // HIGH_BIT_DEPTH
237 #define MC_COPY_WTAB(instr, name1, name2, name3)\
238 static void (* const x264_mc_copy_wtab_##instr[5])( pixel *, intptr_t, pixel *, intptr_t, int ) =\
241 x264_mc_copy_w4_##name1,\
242 x264_mc_copy_w8_##name2,\
244 x264_mc_copy_w16_##name3,\
247 MC_COPY_WTAB(mmx,mmx,mmx,mmx)
249 MC_COPY_WTAB(sse,mmx,sse,sse)
250 MC_COPY_WTAB(avx,mmx,sse,avx)
252 MC_COPY_WTAB(sse,mmx,mmx,sse)
255 #define MC_WEIGHT_WTAB(function, instr, name1, name2, w12version)\
256 static void (* x264_mc_##function##_wtab_##instr[6])( pixel *, intptr_t, pixel *, intptr_t, const x264_weight_t *, int ) =\
258 x264_mc_##function##_w4_##name1,\
259 x264_mc_##function##_w4_##name1,\
260 x264_mc_##function##_w8_##name2,\
261 x264_mc_##function##_w##w12version##_##instr,\
262 x264_mc_##function##_w16_##instr,\
263 x264_mc_##function##_w20_##instr,\
267 MC_WEIGHT_WTAB(weight,mmx2,mmx2,mmx2,12)
268 MC_WEIGHT_WTAB(offsetadd,mmx2,mmx2,mmx2,12)
269 MC_WEIGHT_WTAB(offsetsub,mmx2,mmx2,mmx2,12)
270 MC_WEIGHT_WTAB(weight,sse2,mmx2,sse2,12)
271 MC_WEIGHT_WTAB(offsetadd,sse2,mmx2,sse2,16)
272 MC_WEIGHT_WTAB(offsetsub,sse2,mmx2,sse2,16)
274 static void x264_weight_cache_mmx2( x264_t *h, x264_weight_t *w )
276 if( w->i_scale == 1<<w->i_denom )
278 if( w->i_offset < 0 )
279 w->weightfn = h->mc.offsetsub;
281 w->weightfn = h->mc.offsetadd;
282 for( int i = 0; i < 8; i++ )
283 w->cachea[i] = abs(w->i_offset<<(BIT_DEPTH-8));
286 w->weightfn = h->mc.weight;
287 int den1 = 1<<w->i_denom;
288 int den2 = w->i_scale<<1;
289 int den3 = 1+(w->i_offset<<(BIT_DEPTH-8+1));
290 for( int i = 0; i < 8; i++ )
293 w->cacheb[i] = i&1 ? den3 : den2;
297 MC_WEIGHT_WTAB(weight,mmx2,mmx2,mmx2,12)
298 MC_WEIGHT_WTAB(offsetadd,mmx2,mmx2,mmx2,12)
299 MC_WEIGHT_WTAB(offsetsub,mmx2,mmx2,mmx2,12)
300 MC_WEIGHT_WTAB(weight,sse2,mmx2,sse2,16)
301 MC_WEIGHT_WTAB(offsetadd,sse2,mmx2,mmx2,16)
302 MC_WEIGHT_WTAB(offsetsub,sse2,mmx2,mmx2,16)
303 MC_WEIGHT_WTAB(weight,ssse3,ssse3,ssse3,16)
304 MC_WEIGHT_WTAB(weight,avx2,ssse3,avx2,16)
306 static void x264_weight_cache_mmx2( x264_t *h, x264_weight_t *w )
311 if( w->i_scale == 1<<w->i_denom )
313 if( w->i_offset < 0 )
314 w->weightfn = h->mc.offsetsub;
316 w->weightfn = h->mc.offsetadd;
317 memset( w->cachea, abs(w->i_offset), sizeof(w->cachea) );
320 w->weightfn = h->mc.weight;
321 den1 = 1 << (w->i_denom - 1) | w->i_offset << w->i_denom;
322 for( i = 0; i < 8; i++ )
324 w->cachea[i] = w->i_scale;
329 static void x264_weight_cache_ssse3( x264_t *h, x264_weight_t *w )
332 if( w->i_scale == 1<<w->i_denom )
334 if( w->i_offset < 0 )
335 w->weightfn = h->mc.offsetsub;
337 w->weightfn = h->mc.offsetadd;
339 memset( w->cachea, abs( w->i_offset ), sizeof(w->cachea) );
342 w->weightfn = h->mc.weight;
343 den1 = w->i_scale << (8 - w->i_denom);
344 for( i = 0; i < 8; i++ )
347 w->cacheb[i] = w->i_offset;
350 #endif // !HIGH_BIT_DEPTH
352 static const uint8_t hpel_ref0[16] = {0,1,1,1,0,1,1,1,2,3,3,3,0,1,1,1};
353 static const uint8_t hpel_ref1[16] = {0,0,0,0,2,2,3,2,2,2,3,2,2,2,3,2};
355 #define MC_LUMA(name,instr1,instr2)\
356 static void mc_luma_##name( pixel *dst, intptr_t i_dst_stride,\
357 pixel *src[4], intptr_t i_src_stride,\
359 int i_width, int i_height, const x264_weight_t *weight )\
361 int qpel_idx = ((mvy&3)<<2) + (mvx&3);\
362 int offset = (mvy>>2)*i_src_stride + (mvx>>2);\
363 pixel *src1 = src[hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;\
364 if( qpel_idx & 5 ) /* qpel interpolation needed */\
366 pixel *src2 = src[hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);\
367 x264_pixel_avg_wtab_##instr1[i_width>>2](\
368 dst, i_dst_stride, src1, i_src_stride,\
370 if( weight->weightfn )\
371 weight->weightfn[i_width>>2]( dst, i_dst_stride, dst, i_dst_stride, weight, i_height );\
373 else if( weight->weightfn )\
374 weight->weightfn[i_width>>2]( dst, i_dst_stride, src1, i_src_stride, weight, i_height );\
376 x264_mc_copy_wtab_##instr2[i_width>>2](dst, i_dst_stride, src1, i_src_stride, i_height );\
379 MC_LUMA(mmx2,mmx2,mmx)
380 MC_LUMA(sse2,sse2,sse)
382 MC_LUMA(avx2,avx2,avx)
385 MC_LUMA(cache32_mmx2,cache32_mmx2,mmx)
386 MC_LUMA(cache64_mmx2,cache64_mmx2,mmx)
388 MC_LUMA(cache64_sse2,cache64_sse2,sse)
389 MC_LUMA(cache64_ssse3,cache64_ssse3,sse)
390 MC_LUMA(cache64_ssse3_atom,cache64_ssse3_atom,sse)
391 #endif // !HIGH_BIT_DEPTH
393 #define GET_REF(name)\
394 static pixel *get_ref_##name( pixel *dst, intptr_t *i_dst_stride,\
395 pixel *src[4], intptr_t i_src_stride,\
397 int i_width, int i_height, const x264_weight_t *weight )\
399 int qpel_idx = ((mvy&3)<<2) + (mvx&3);\
400 int offset = (mvy>>2)*i_src_stride + (mvx>>2);\
401 pixel *src1 = src[hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;\
402 if( qpel_idx & 5 ) /* qpel interpolation needed */\
404 pixel *src2 = src[hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);\
405 x264_pixel_avg_wtab_##name[i_width>>2](\
406 dst, *i_dst_stride, src1, i_src_stride,\
408 if( weight->weightfn )\
409 weight->weightfn[i_width>>2]( dst, *i_dst_stride, dst, *i_dst_stride, weight, i_height );\
412 else if( weight->weightfn )\
414 weight->weightfn[i_width>>2]( dst, *i_dst_stride, src1, i_src_stride, weight, i_height );\
419 *i_dst_stride = i_src_stride;\
429 GET_REF(cache32_mmx2)
430 GET_REF(cache64_mmx2)
432 GET_REF(sse2_misalign)
433 GET_REF(cache64_sse2)
434 GET_REF(cache64_ssse3)
435 GET_REF(cache64_ssse3_atom)
436 #endif // !HIGH_BIT_DEPTH
438 #define HPEL(align, cpu, cpuv, cpuc, cpuh)\
439 void x264_hpel_filter_v_##cpuv( pixel *dst, pixel *src, int16_t *buf, intptr_t stride, intptr_t width);\
440 void x264_hpel_filter_c_##cpuc( pixel *dst, int16_t *buf, intptr_t width );\
441 void x264_hpel_filter_h_##cpuh( pixel *dst, pixel *src, intptr_t width );\
442 static void x264_hpel_filter_##cpu( pixel *dsth, pixel *dstv, pixel *dstc, pixel *src,\
443 intptr_t stride, int width, int height, int16_t *buf )\
445 intptr_t realign = (intptr_t)src & (align-1);\
453 x264_hpel_filter_v_##cpuv( dstv, src, buf+16, stride, width );\
454 x264_hpel_filter_c_##cpuc( dstc, buf+16, width );\
455 x264_hpel_filter_h_##cpuh( dsth, src, width );\
464 HPEL(8, mmx2, mmx2, mmx2, mmx2)
466 HPEL(16, sse2, sse2, sse2, sse2)
467 #else // !HIGH_BIT_DEPTH
468 HPEL(16, sse2_amd, mmx2, mmx2, sse2)
470 void x264_hpel_filter_sse2 ( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src, intptr_t stride, int width, int height, int16_t *buf );
471 void x264_hpel_filter_ssse3( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src, intptr_t stride, int width, int height, int16_t *buf );
472 void x264_hpel_filter_avx ( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src, intptr_t stride, int width, int height, int16_t *buf );
474 HPEL(16, sse2, sse2, sse2, sse2)
475 HPEL(16, ssse3, ssse3, ssse3, ssse3)
476 HPEL(16, avx, avx, avx, avx)
478 HPEL(32, avx2, avx2, avx2, avx2)
479 HPEL(16, sse2_misalign, sse2, sse2_misalign, sse2)
480 #endif // HIGH_BIT_DEPTH
482 static void x264_plane_copy_mmx2( pixel *dst, intptr_t i_dst, pixel *src, intptr_t i_src, int w, int h )
484 int c_w = 16/sizeof(pixel) - 1;
485 if( w < 256 ) { // tiny resolutions don't want non-temporal hints. dunno the exact threshold.
486 x264_plane_copy_c( dst, i_dst, src, i_src, w, h );
487 } else if( !(w&c_w) ) {
488 x264_plane_copy_core_mmx2( dst, i_dst, src, i_src, w, h );
489 } else if( i_src > 0 ) {
490 // have to use plain memcpy on the last line (in memory order) to avoid overreading src
491 x264_plane_copy_core_mmx2( dst, i_dst, src, i_src, (w+c_w)&~c_w, h-1 );
492 memcpy( dst+i_dst*(h-1), src+i_src*(h-1), w*sizeof(pixel) );
494 memcpy( dst, src, w*sizeof(pixel) );
495 x264_plane_copy_core_mmx2( dst+i_dst, i_dst, src+i_src, i_src, (w+c_w)&~c_w, h-1 );
499 #define PLANE_INTERLEAVE(cpu) \
500 static void x264_plane_copy_interleave_##cpu( pixel *dst, intptr_t i_dst,\
501 pixel *srcu, intptr_t i_srcu,\
502 pixel *srcv, intptr_t i_srcv, int w, int h )\
505 x264_plane_copy_interleave_core_##cpu( dst, i_dst, srcu, i_srcu, srcv, i_srcv, w, h );\
506 } else if( w < 16 || (i_srcu ^ i_srcv) ) {\
507 x264_plane_copy_interleave_c( dst, i_dst, srcu, i_srcu, srcv, i_srcv, w, h );\
508 } else if( i_srcu > 0 ) {\
509 x264_plane_copy_interleave_core_##cpu( dst, i_dst, srcu, i_srcu, srcv, i_srcv, (w+15)&~15, h-1 );\
510 x264_plane_copy_interleave_c( dst+i_dst*(h-1), 0, srcu+i_srcu*(h-1), 0, srcv+i_srcv*(h-1), 0, w, 1 );\
512 x264_plane_copy_interleave_c( dst, 0, srcu, 0, srcv, 0, w, 1 );\
513 x264_plane_copy_interleave_core_##cpu( dst+i_dst, i_dst, srcu+i_srcu, i_srcu, srcv+i_srcv, i_srcv, (w+15)&~15, h-1 );\
517 PLANE_INTERLEAVE(mmx2)
518 PLANE_INTERLEAVE(sse2)
520 PLANE_INTERLEAVE(avx)
523 void x264_mc_init_mmx( int cpu, x264_mc_functions_t *pf )
525 if( !(cpu&X264_CPU_MMX) )
528 pf->load_deinterleave_chroma_fenc = x264_load_deinterleave_chroma_fenc_mmx;
529 pf->load_deinterleave_chroma_fdec = x264_load_deinterleave_chroma_fdec_mmx;
531 pf->plane_copy_deinterleave = x264_plane_copy_deinterleave_mmx;
533 pf->copy_16x16_unaligned = x264_mc_copy_w16_mmx;
534 pf->copy[PIXEL_16x16] = x264_mc_copy_w16_mmx;
535 pf->copy[PIXEL_8x8] = x264_mc_copy_w8_mmx;
536 pf->copy[PIXEL_4x4] = x264_mc_copy_w4_mmx;
537 pf->memcpy_aligned = x264_memcpy_aligned_mmx;
538 pf->memzero_aligned = x264_memzero_aligned_mmx;
539 pf->integral_init4v = x264_integral_init4v_mmx;
540 pf->integral_init8v = x264_integral_init8v_mmx;
542 if( !(cpu&X264_CPU_MMX2) )
545 pf->prefetch_fenc_420 = x264_prefetch_fenc_420_mmx2;
546 pf->prefetch_fenc_422 = x264_prefetch_fenc_422_mmx2;
547 pf->prefetch_ref = x264_prefetch_ref_mmx2;
549 pf->plane_copy = x264_plane_copy_mmx2;
550 pf->plane_copy_interleave = x264_plane_copy_interleave_mmx2;
551 pf->store_interleave_chroma = x264_store_interleave_chroma_mmx2;
553 pf->avg[PIXEL_16x16] = x264_pixel_avg_16x16_mmx2;
554 pf->avg[PIXEL_16x8] = x264_pixel_avg_16x8_mmx2;
555 pf->avg[PIXEL_8x16] = x264_pixel_avg_8x16_mmx2;
556 pf->avg[PIXEL_8x8] = x264_pixel_avg_8x8_mmx2;
557 pf->avg[PIXEL_8x4] = x264_pixel_avg_8x4_mmx2;
558 pf->avg[PIXEL_4x16] = x264_pixel_avg_4x16_mmx2;
559 pf->avg[PIXEL_4x8] = x264_pixel_avg_4x8_mmx2;
560 pf->avg[PIXEL_4x4] = x264_pixel_avg_4x4_mmx2;
561 pf->avg[PIXEL_4x2] = x264_pixel_avg_4x2_mmx2;
563 pf->mc_luma = mc_luma_mmx2;
564 pf->get_ref = get_ref_mmx2;
565 pf->mc_chroma = x264_mc_chroma_mmx2;
566 pf->hpel_filter = x264_hpel_filter_mmx2;
567 pf->weight = x264_mc_weight_wtab_mmx2;
568 pf->weight_cache = x264_weight_cache_mmx2;
569 pf->offsetadd = x264_mc_offsetadd_wtab_mmx2;
570 pf->offsetsub = x264_mc_offsetsub_wtab_mmx2;
572 pf->frame_init_lowres_core = x264_frame_init_lowres_core_mmx2;
574 if( cpu&X264_CPU_SSE )
576 pf->memcpy_aligned = x264_memcpy_aligned_sse;
577 pf->memzero_aligned = x264_memzero_aligned_sse;
581 #if ARCH_X86 // all x86_64 cpus with cacheline split issues use sse2 instead
582 if( cpu&(X264_CPU_CACHELINE_32|X264_CPU_CACHELINE_64) )
583 pf->frame_init_lowres_core = x264_frame_init_lowres_core_cache32_mmx2;
586 if( !(cpu&X264_CPU_SSE2) )
589 pf->frame_init_lowres_core = x264_frame_init_lowres_core_sse2;
591 pf->load_deinterleave_chroma_fenc = x264_load_deinterleave_chroma_fenc_sse2;
592 pf->load_deinterleave_chroma_fdec = x264_load_deinterleave_chroma_fdec_sse2;
594 pf->plane_copy_interleave = x264_plane_copy_interleave_sse2;
595 pf->plane_copy_deinterleave = x264_plane_copy_deinterleave_sse2;
597 if( cpu&X264_CPU_SSE2_IS_FAST )
599 pf->get_ref = get_ref_sse2;
600 pf->mc_luma = mc_luma_sse2;
601 pf->hpel_filter = x264_hpel_filter_sse2;
604 pf->integral_init4v = x264_integral_init4v_sse2;
605 pf->integral_init8v = x264_integral_init8v_sse2;
606 pf->mbtree_propagate_cost = x264_mbtree_propagate_cost_sse2;
607 pf->store_interleave_chroma = x264_store_interleave_chroma_sse2;
608 pf->offsetadd = x264_mc_offsetadd_wtab_sse2;
609 pf->offsetsub = x264_mc_offsetsub_wtab_sse2;
611 if( cpu&X264_CPU_SSE2_IS_SLOW )
614 pf->avg[PIXEL_16x16] = x264_pixel_avg_16x16_sse2;
615 pf->avg[PIXEL_16x8] = x264_pixel_avg_16x8_sse2;
616 pf->avg[PIXEL_8x16] = x264_pixel_avg_8x16_sse2;
617 pf->avg[PIXEL_8x8] = x264_pixel_avg_8x8_sse2;
618 pf->avg[PIXEL_8x4] = x264_pixel_avg_8x4_sse2;
619 pf->avg[PIXEL_4x16] = x264_pixel_avg_4x16_sse2;
620 pf->avg[PIXEL_4x8] = x264_pixel_avg_4x8_sse2;
621 pf->avg[PIXEL_4x4] = x264_pixel_avg_4x4_sse2;
622 pf->avg[PIXEL_4x2] = x264_pixel_avg_4x2_sse2;
624 pf->copy[PIXEL_16x16] = x264_mc_copy_w16_aligned_sse;
625 pf->weight = x264_mc_weight_wtab_sse2;
627 if( !(cpu&X264_CPU_STACK_MOD4) )
628 pf->mc_chroma = x264_mc_chroma_sse2;
630 if( !(cpu&X264_CPU_SSSE3) )
633 pf->frame_init_lowres_core = x264_frame_init_lowres_core_ssse3;
635 if( !(cpu&(X264_CPU_SLOW_SHUFFLE|X264_CPU_SLOW_ATOM|X264_CPU_SLOW_PALIGNR)) )
636 pf->integral_init4v = x264_integral_init4v_ssse3;
638 if( !(cpu&X264_CPU_AVX) )
641 pf->frame_init_lowres_core = x264_frame_init_lowres_core_avx;
642 pf->load_deinterleave_chroma_fenc = x264_load_deinterleave_chroma_fenc_avx;
643 pf->load_deinterleave_chroma_fdec = x264_load_deinterleave_chroma_fdec_avx;
644 pf->plane_copy_interleave = x264_plane_copy_interleave_avx;
645 pf->plane_copy_deinterleave = x264_plane_copy_deinterleave_avx;
646 pf->store_interleave_chroma = x264_store_interleave_chroma_avx;
647 pf->copy[PIXEL_16x16] = x264_mc_copy_w16_aligned_avx;
649 if( !(cpu&X264_CPU_STACK_MOD4) )
650 pf->mc_chroma = x264_mc_chroma_avx;
652 if( cpu&X264_CPU_XOP )
653 pf->frame_init_lowres_core = x264_frame_init_lowres_core_xop;
655 if( cpu&X264_CPU_AVX2 )
656 pf->mc_luma = mc_luma_avx2;
657 #else // !HIGH_BIT_DEPTH
659 #if ARCH_X86 // all x86_64 cpus with cacheline split issues use sse2 instead
660 if( cpu&X264_CPU_CACHELINE_32 )
662 pf->mc_luma = mc_luma_cache32_mmx2;
663 pf->get_ref = get_ref_cache32_mmx2;
664 pf->frame_init_lowres_core = x264_frame_init_lowres_core_cache32_mmx2;
666 else if( cpu&X264_CPU_CACHELINE_64 )
668 pf->mc_luma = mc_luma_cache64_mmx2;
669 pf->get_ref = get_ref_cache64_mmx2;
670 pf->frame_init_lowres_core = x264_frame_init_lowres_core_cache32_mmx2;
674 if( !(cpu&X264_CPU_SSE2) )
677 pf->integral_init4v = x264_integral_init4v_sse2;
678 pf->integral_init8v = x264_integral_init8v_sse2;
679 pf->hpel_filter = x264_hpel_filter_sse2_amd;
680 pf->mbtree_propagate_cost = x264_mbtree_propagate_cost_sse2;
682 if( !(cpu&X264_CPU_SSE2_IS_SLOW) )
684 pf->weight = x264_mc_weight_wtab_sse2;
685 if( !(cpu&X264_CPU_SLOW_ATOM) )
687 pf->offsetadd = x264_mc_offsetadd_wtab_sse2;
688 pf->offsetsub = x264_mc_offsetsub_wtab_sse2;
691 pf->copy[PIXEL_16x16] = x264_mc_copy_w16_aligned_sse;
692 pf->avg[PIXEL_16x16] = x264_pixel_avg_16x16_sse2;
693 pf->avg[PIXEL_16x8] = x264_pixel_avg_16x8_sse2;
694 pf->avg[PIXEL_8x16] = x264_pixel_avg_8x16_sse2;
695 pf->avg[PIXEL_8x8] = x264_pixel_avg_8x8_sse2;
696 pf->avg[PIXEL_8x4] = x264_pixel_avg_8x4_sse2;
697 pf->hpel_filter = x264_hpel_filter_sse2;
698 if( cpu&X264_CPU_SSE_MISALIGN )
699 pf->hpel_filter = x264_hpel_filter_sse2_misalign;
700 pf->frame_init_lowres_core = x264_frame_init_lowres_core_sse2;
701 if( !(cpu&X264_CPU_STACK_MOD4) )
702 pf->mc_chroma = x264_mc_chroma_sse2;
704 if( cpu&X264_CPU_SSE2_IS_FAST )
706 pf->store_interleave_chroma = x264_store_interleave_chroma_sse2; // FIXME sse2fast? sse2medium?
707 pf->load_deinterleave_chroma_fenc = x264_load_deinterleave_chroma_fenc_sse2;
708 pf->load_deinterleave_chroma_fdec = x264_load_deinterleave_chroma_fdec_sse2;
709 pf->plane_copy_interleave = x264_plane_copy_interleave_sse2;
710 pf->plane_copy_deinterleave = x264_plane_copy_deinterleave_sse2;
711 pf->mc_luma = mc_luma_sse2;
712 pf->get_ref = get_ref_sse2;
713 if( cpu&X264_CPU_CACHELINE_64 )
715 pf->mc_luma = mc_luma_cache64_sse2;
716 pf->get_ref = get_ref_cache64_sse2;
718 if( cpu&X264_CPU_SSE_MISALIGN )
720 pf->get_ref = get_ref_sse2_misalign;
721 if( !(cpu&X264_CPU_STACK_MOD4) )
722 pf->mc_chroma = x264_mc_chroma_sse2_misalign;
727 if( !(cpu&X264_CPU_SSSE3) )
730 pf->avg[PIXEL_16x16] = x264_pixel_avg_16x16_ssse3;
731 pf->avg[PIXEL_16x8] = x264_pixel_avg_16x8_ssse3;
732 pf->avg[PIXEL_8x16] = x264_pixel_avg_8x16_ssse3;
733 pf->avg[PIXEL_8x8] = x264_pixel_avg_8x8_ssse3;
734 pf->avg[PIXEL_8x4] = x264_pixel_avg_8x4_ssse3;
735 pf->avg[PIXEL_4x16] = x264_pixel_avg_4x16_ssse3;
736 pf->avg[PIXEL_4x8] = x264_pixel_avg_4x8_ssse3;
737 pf->avg[PIXEL_4x4] = x264_pixel_avg_4x4_ssse3;
738 pf->avg[PIXEL_4x2] = x264_pixel_avg_4x2_ssse3;
740 if( !(cpu&X264_CPU_SLOW_PSHUFB) )
742 pf->load_deinterleave_chroma_fenc = x264_load_deinterleave_chroma_fenc_ssse3;
743 pf->load_deinterleave_chroma_fdec = x264_load_deinterleave_chroma_fdec_ssse3;
744 pf->plane_copy_deinterleave = x264_plane_copy_deinterleave_ssse3;
747 if( !(cpu&X264_CPU_SLOW_PALIGNR) )
750 if( !(cpu&X264_CPU_SLOW_ATOM) ) /* The 64-bit version is slower, but the 32-bit version is faster? */
752 pf->hpel_filter = x264_hpel_filter_ssse3;
753 pf->frame_init_lowres_core = x264_frame_init_lowres_core_ssse3;
755 if( !(cpu&X264_CPU_STACK_MOD4) )
756 pf->mc_chroma = x264_mc_chroma_ssse3;
758 if( cpu&X264_CPU_CACHELINE_64 )
760 if( !(cpu&X264_CPU_STACK_MOD4) )
761 pf->mc_chroma = x264_mc_chroma_ssse3_cache64;
762 pf->mc_luma = mc_luma_cache64_ssse3;
763 pf->get_ref = get_ref_cache64_ssse3;
764 if( cpu&X264_CPU_SLOW_ATOM )
766 pf->mc_luma = mc_luma_cache64_ssse3_atom;
767 pf->get_ref = get_ref_cache64_ssse3_atom;
771 pf->weight_cache = x264_weight_cache_ssse3;
772 pf->weight = x264_mc_weight_wtab_ssse3;
774 if( !(cpu&(X264_CPU_SLOW_SHUFFLE|X264_CPU_SLOW_ATOM|X264_CPU_SLOW_PALIGNR)) )
775 pf->integral_init4v = x264_integral_init4v_ssse3;
777 if( !(cpu&X264_CPU_SSE4) )
780 pf->integral_init4h = x264_integral_init4h_sse4;
781 pf->integral_init8h = x264_integral_init8h_sse4;
783 if( !(cpu&X264_CPU_AVX) )
786 pf->frame_init_lowres_core = x264_frame_init_lowres_core_avx;
787 pf->integral_init8h = x264_integral_init8h_avx;
788 pf->hpel_filter = x264_hpel_filter_avx;
790 if( !(cpu&X264_CPU_STACK_MOD4) )
791 pf->mc_chroma = x264_mc_chroma_avx;
793 if( cpu&X264_CPU_XOP )
794 pf->frame_init_lowres_core = x264_frame_init_lowres_core_xop;
796 if( cpu&X264_CPU_AVX2 )
798 pf->hpel_filter = x264_hpel_filter_avx2;
799 pf->mc_chroma = x264_mc_chroma_avx2;
800 pf->weight = x264_mc_weight_wtab_avx2;
801 pf->avg[PIXEL_16x16] = x264_pixel_avg_16x16_avx2;
802 pf->avg[PIXEL_16x8] = x264_pixel_avg_16x8_avx2;
803 pf->integral_init8v = x264_integral_init8v_avx2;
804 pf->integral_init4v = x264_integral_init4v_avx2;
805 pf->integral_init8h = x264_integral_init8h_avx2;
806 pf->integral_init4h = x264_integral_init4h_avx2;
807 pf->frame_init_lowres_core = x264_frame_init_lowres_core_avx2;
809 #endif // HIGH_BIT_DEPTH
811 if( !(cpu&X264_CPU_AVX) )
813 pf->memzero_aligned = x264_memzero_aligned_avx;
814 pf->mbtree_propagate_cost = x264_mbtree_propagate_cost_avx;
816 if( cpu&X264_CPU_FMA4 )
817 pf->mbtree_propagate_cost = x264_mbtree_propagate_cost_fma4;
819 if( !(cpu&X264_CPU_AVX2) )
821 pf->get_ref = get_ref_avx2;
823 if( cpu&X264_CPU_FMA3 )
824 pf->mbtree_propagate_cost = x264_mbtree_propagate_cost_avx2_fma3;