]> git.sesse.net Git - x264/blob - common/x86/mc-c.c
x86: Add asm for mbtree fixed point conversion
[x264] / common / x86 / mc-c.c
1 /*****************************************************************************
2  * mc-c.c: x86 motion compensation
3  *****************************************************************************
4  * Copyright (C) 2003-2016 x264 project
5  *
6  * Authors: Laurent Aimar <fenrir@via.ecp.fr>
7  *          Loren Merritt <lorenm@u.washington.edu>
8  *          Fiona Glaser <fiona@x264.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
23  *
24  * This program is also available under a commercial proprietary license.
25  * For more information, contact us at licensing@x264.com.
26  *****************************************************************************/
27
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <string.h>
31
32 #include "common/common.h"
33 #include "mc.h"
34
35 #define DECL_SUF( func, args )\
36     void func##_mmx2 args;\
37     void func##_sse2 args;\
38     void func##_ssse3 args;\
39     void func##_avx2 args;
40
41 DECL_SUF( x264_pixel_avg_16x16, ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
42 DECL_SUF( x264_pixel_avg_16x8,  ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
43 DECL_SUF( x264_pixel_avg_8x16,  ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
44 DECL_SUF( x264_pixel_avg_8x8,   ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
45 DECL_SUF( x264_pixel_avg_8x4,   ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
46 DECL_SUF( x264_pixel_avg_4x16,  ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
47 DECL_SUF( x264_pixel_avg_4x8,   ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
48 DECL_SUF( x264_pixel_avg_4x4,   ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
49 DECL_SUF( x264_pixel_avg_4x2,   ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
50
51 #define MC_WEIGHT(w,type) \
52     void x264_mc_weight_w##w##_##type( pixel *, intptr_t, pixel *, intptr_t, const x264_weight_t *, int );
53
54 #define MC_WEIGHT_OFFSET(w,type) \
55     void x264_mc_offsetadd_w##w##_##type( pixel *, intptr_t, pixel *, intptr_t, const x264_weight_t *, int ); \
56     void x264_mc_offsetsub_w##w##_##type( pixel *, intptr_t, pixel *, intptr_t, const x264_weight_t *, int ); \
57     MC_WEIGHT(w,type)
58
59 MC_WEIGHT_OFFSET( 4, mmx2 )
60 MC_WEIGHT_OFFSET( 8, mmx2 )
61 MC_WEIGHT_OFFSET( 12, mmx2 )
62 MC_WEIGHT_OFFSET( 16, mmx2 )
63 MC_WEIGHT_OFFSET( 20, mmx2 )
64 MC_WEIGHT_OFFSET( 12, sse2 )
65 MC_WEIGHT_OFFSET( 16, sse2 )
66 MC_WEIGHT_OFFSET( 20, sse2 )
67 #if HIGH_BIT_DEPTH
68 MC_WEIGHT_OFFSET( 8, sse2 )
69 #endif
70 MC_WEIGHT( 8, sse2  )
71 MC_WEIGHT( 4, ssse3 )
72 MC_WEIGHT( 8, ssse3 )
73 MC_WEIGHT( 12, ssse3 )
74 MC_WEIGHT( 16, ssse3 )
75 MC_WEIGHT( 20, ssse3 )
76 MC_WEIGHT( 8, avx2 )
77 MC_WEIGHT( 16, avx2 )
78 MC_WEIGHT( 20, avx2 )
79 #undef MC_OFFSET
80 #undef MC_WEIGHT
81
82 void x264_mc_copy_w4_mmx ( pixel *, intptr_t, pixel *, intptr_t, int );
83 void x264_mc_copy_w8_mmx ( pixel *, intptr_t, pixel *, intptr_t, int );
84 void x264_mc_copy_w8_sse ( pixel *, intptr_t, pixel *, intptr_t, int );
85 void x264_mc_copy_w16_mmx( pixel *, intptr_t, pixel *, intptr_t, int );
86 void x264_mc_copy_w16_sse( pixel *, intptr_t, pixel *, intptr_t, int );
87 void x264_mc_copy_w16_aligned_sse( pixel *, intptr_t, pixel *, intptr_t, int );
88 void x264_mc_copy_w16_avx( uint16_t *, intptr_t, uint16_t *, intptr_t, int );
89 void x264_mc_copy_w16_aligned_avx( uint16_t *, intptr_t, uint16_t *, intptr_t, int );
90 void x264_prefetch_fenc_420_mmx2( pixel *, intptr_t, pixel *, intptr_t, int );
91 void x264_prefetch_fenc_422_mmx2( pixel *, intptr_t, pixel *, intptr_t, int );
92 void x264_prefetch_ref_mmx2( pixel *, intptr_t, int );
93 void x264_plane_copy_core_sse( pixel *, intptr_t, pixel *, intptr_t, int w, int h );
94 void x264_plane_copy_core_avx( pixel *, intptr_t, pixel *, intptr_t, int w, int h );
95 void x264_plane_copy_c( pixel *, intptr_t, pixel *, intptr_t, int w, int h );
96 void x264_plane_copy_swap_core_ssse3( pixel *, intptr_t, pixel *, intptr_t, int w, int h );
97 void x264_plane_copy_swap_core_avx2 ( pixel *, intptr_t, pixel *, intptr_t, int w, int h );
98 void x264_plane_copy_swap_c( pixel *, intptr_t, pixel *, intptr_t, int w, int h );
99 void x264_plane_copy_interleave_core_mmx2( pixel *dst,  intptr_t i_dst,
100                                            pixel *srcu, intptr_t i_srcu,
101                                            pixel *srcv, intptr_t i_srcv, int w, int h );
102 void x264_plane_copy_interleave_core_sse2( pixel *dst,  intptr_t i_dst,
103                                            pixel *srcu, intptr_t i_srcu,
104                                            pixel *srcv, intptr_t i_srcv, int w, int h );
105 void x264_plane_copy_interleave_core_avx( pixel *dst,  intptr_t i_dst,
106                                           pixel *srcu, intptr_t i_srcu,
107                                           pixel *srcv, intptr_t i_srcv, int w, int h );
108 void x264_plane_copy_interleave_c( pixel *dst,  intptr_t i_dst,
109                                    pixel *srcu, intptr_t i_srcu,
110                                    pixel *srcv, intptr_t i_srcv, int w, int h );
111 void x264_plane_copy_deinterleave_mmx( pixel *dstu, intptr_t i_dstu,
112                                        pixel *dstv, intptr_t i_dstv,
113                                        pixel *src,  intptr_t i_src, int w, int h );
114 void x264_plane_copy_deinterleave_sse2( pixel *dstu, intptr_t i_dstu,
115                                         pixel *dstv, intptr_t i_dstv,
116                                         pixel *src,  intptr_t i_src, int w, int h );
117 void x264_plane_copy_deinterleave_ssse3( uint8_t *dstu, intptr_t i_dstu,
118                                          uint8_t *dstv, intptr_t i_dstv,
119                                          uint8_t *src,  intptr_t i_src, int w, int h );
120 void x264_plane_copy_deinterleave_avx( uint16_t *dstu, intptr_t i_dstu,
121                                        uint16_t *dstv, intptr_t i_dstv,
122                                        uint16_t *src,  intptr_t i_src, int w, int h );
123 void x264_plane_copy_deinterleave_rgb_sse2 ( pixel *dsta, intptr_t i_dsta,
124                                              pixel *dstb, intptr_t i_dstb,
125                                              pixel *dstc, intptr_t i_dstc,
126                                              pixel *src,  intptr_t i_src, int pw, int w, int h );
127 void x264_plane_copy_deinterleave_rgb_ssse3( pixel *dsta, intptr_t i_dsta,
128                                              pixel *dstb, intptr_t i_dstb,
129                                              pixel *dstc, intptr_t i_dstc,
130                                              pixel *src,  intptr_t i_src, int pw, int w, int h );
131 void x264_plane_copy_deinterleave_v210_ssse3( uint16_t *dstu, intptr_t i_dstu,
132                                               uint16_t *dstv, intptr_t i_dstv,
133                                               uint32_t *src,  intptr_t i_src, int w, int h );
134 void x264_plane_copy_deinterleave_v210_avx  ( uint16_t *dstu, intptr_t i_dstu,
135                                               uint16_t *dstv, intptr_t i_dstv,
136                                               uint32_t *src,  intptr_t i_src, int w, int h );
137 void x264_plane_copy_deinterleave_v210_avx2 ( uint16_t *dstu, intptr_t i_dstu,
138                                               uint16_t *dstv, intptr_t i_dstv,
139                                               uint32_t *src,  intptr_t i_src, int w, int h );
140 void x264_store_interleave_chroma_mmx2( pixel *dst, intptr_t i_dst, pixel *srcu, pixel *srcv, int height );
141 void x264_store_interleave_chroma_sse2( pixel *dst, intptr_t i_dst, pixel *srcu, pixel *srcv, int height );
142 void x264_store_interleave_chroma_avx ( pixel *dst, intptr_t i_dst, pixel *srcu, pixel *srcv, int height );
143 void x264_load_deinterleave_chroma_fenc_mmx ( pixel *dst, pixel *src, intptr_t i_src, int height );
144 void x264_load_deinterleave_chroma_fenc_sse2( pixel *dst, pixel *src, intptr_t i_src, int height );
145 void x264_load_deinterleave_chroma_fenc_ssse3( uint8_t *dst, uint8_t *src, intptr_t i_src, int height );
146 void x264_load_deinterleave_chroma_fenc_avx( uint16_t *dst, uint16_t *src, intptr_t i_src, int height );
147 void x264_load_deinterleave_chroma_fdec_mmx ( pixel *dst, pixel *src, intptr_t i_src, int height );
148 void x264_load_deinterleave_chroma_fdec_sse2( pixel *dst, pixel *src, intptr_t i_src, int height );
149 void x264_load_deinterleave_chroma_fdec_ssse3( uint8_t *dst, uint8_t *src, intptr_t i_src, int height );
150 void x264_load_deinterleave_chroma_fdec_avx( uint16_t *dst, uint16_t *src, intptr_t i_src, int height );
151 void *x264_memcpy_aligned_mmx( void *dst, const void *src, size_t n );
152 void *x264_memcpy_aligned_sse( void *dst, const void *src, size_t n );
153 void x264_memzero_aligned_mmx( void *dst, size_t n );
154 void x264_memzero_aligned_sse( void *dst, size_t n );
155 void x264_memzero_aligned_avx( void *dst, size_t n );
156 void x264_integral_init4h_sse4( uint16_t *sum, uint8_t *pix, intptr_t stride );
157 void x264_integral_init4h_avx2( uint16_t *sum, uint8_t *pix, intptr_t stride );
158 void x264_integral_init8h_sse4( uint16_t *sum, uint8_t *pix, intptr_t stride );
159 void x264_integral_init8h_avx ( uint16_t *sum, uint8_t *pix, intptr_t stride );
160 void x264_integral_init8h_avx2( uint16_t *sum, uint8_t *pix, intptr_t stride );
161 void x264_integral_init4v_mmx  ( uint16_t *sum8, uint16_t *sum4, intptr_t stride );
162 void x264_integral_init4v_sse2 ( uint16_t *sum8, uint16_t *sum4, intptr_t stride );
163 void x264_integral_init4v_ssse3( uint16_t *sum8, uint16_t *sum4, intptr_t stride );
164 void x264_integral_init4v_avx2( uint16_t *sum8, uint16_t *sum4, intptr_t stride );
165 void x264_integral_init8v_mmx ( uint16_t *sum8, intptr_t stride );
166 void x264_integral_init8v_sse2( uint16_t *sum8, intptr_t stride );
167 void x264_integral_init8v_avx2( uint16_t *sum8, intptr_t stride );
168 void x264_mbtree_propagate_cost_sse2( int16_t *dst, uint16_t *propagate_in, uint16_t *intra_costs,
169                                       uint16_t *inter_costs, uint16_t *inv_qscales, float *fps_factor, int len );
170 void x264_mbtree_propagate_cost_avx ( int16_t *dst, uint16_t *propagate_in, uint16_t *intra_costs,
171                                       uint16_t *inter_costs, uint16_t *inv_qscales, float *fps_factor, int len );
172 void x264_mbtree_propagate_cost_fma4( int16_t *dst, uint16_t *propagate_in, uint16_t *intra_costs,
173                                       uint16_t *inter_costs, uint16_t *inv_qscales, float *fps_factor, int len );
174 void x264_mbtree_propagate_cost_avx2( int16_t *dst, uint16_t *propagate_in, uint16_t *intra_costs,
175                                       uint16_t *inter_costs, uint16_t *inv_qscales, float *fps_factor, int len );
176 void x264_mbtree_fix8_pack_ssse3( uint16_t *dst, float *src, int count );
177 void x264_mbtree_fix8_pack_avx2 ( uint16_t *dst, float *src, int count );
178 void x264_mbtree_fix8_unpack_ssse3( float *dst, uint16_t *src, int count );
179 void x264_mbtree_fix8_unpack_avx2 ( float *dst, uint16_t *src, int count );
180
181 #define MC_CHROMA(cpu)\
182 void x264_mc_chroma_##cpu( pixel *dstu, pixel *dstv, intptr_t i_dst, pixel *src, intptr_t i_src,\
183                            int dx, int dy, int i_width, int i_height );
184 MC_CHROMA(mmx2)
185 MC_CHROMA(sse2)
186 MC_CHROMA(ssse3)
187 MC_CHROMA(ssse3_cache64)
188 MC_CHROMA(avx)
189 MC_CHROMA(avx2)
190
191 #define LOWRES(cpu)\
192 void x264_frame_init_lowres_core_##cpu( pixel *src0, pixel *dst0, pixel *dsth, pixel *dstv, pixel *dstc,\
193                                         intptr_t src_stride, intptr_t dst_stride, int width, int height );
194 LOWRES(mmx2)
195 LOWRES(cache32_mmx2)
196 LOWRES(sse2)
197 LOWRES(ssse3)
198 LOWRES(avx)
199 LOWRES(xop)
200 LOWRES(avx2)
201
202 #define PIXEL_AVG_W(width,cpu)\
203 void x264_pixel_avg2_w##width##_##cpu( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t );
204 /* This declares some functions that don't exist, but that isn't a problem. */
205 #define PIXEL_AVG_WALL(cpu)\
206 PIXEL_AVG_W(4,cpu); PIXEL_AVG_W(8,cpu); PIXEL_AVG_W(10,cpu); PIXEL_AVG_W(12,cpu); PIXEL_AVG_W(16,cpu); PIXEL_AVG_W(18,cpu); PIXEL_AVG_W(20,cpu);
207
208 PIXEL_AVG_WALL(mmx2)
209 PIXEL_AVG_WALL(cache32_mmx2)
210 PIXEL_AVG_WALL(cache64_mmx2)
211 PIXEL_AVG_WALL(cache64_sse2)
212 PIXEL_AVG_WALL(sse2)
213 PIXEL_AVG_WALL(cache64_ssse3)
214 PIXEL_AVG_WALL(avx2)
215
216 #define PIXEL_AVG_WTAB(instr, name1, name2, name3, name4, name5)\
217 static void (* const x264_pixel_avg_wtab_##instr[6])( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t ) =\
218 {\
219     NULL,\
220     x264_pixel_avg2_w4_##name1,\
221     x264_pixel_avg2_w8_##name2,\
222     x264_pixel_avg2_w12_##name3,\
223     x264_pixel_avg2_w16_##name4,\
224     x264_pixel_avg2_w20_##name5,\
225 };
226
227 #if HIGH_BIT_DEPTH
228 /* we can replace w12/w20 with w10/w18 as only 9/17 pixels in fact are important */
229 #define x264_pixel_avg2_w12_mmx2       x264_pixel_avg2_w10_mmx2
230 #define x264_pixel_avg2_w20_mmx2       x264_pixel_avg2_w18_mmx2
231 #define x264_pixel_avg2_w12_sse2         x264_pixel_avg2_w10_sse2
232 #define x264_pixel_avg2_w20_sse2         x264_pixel_avg2_w18_sse2
233 #define x264_pixel_avg2_w12_avx2         x264_pixel_avg2_w16_avx2
234 #define x264_pixel_avg2_w20_avx2         x264_pixel_avg2_w18_avx2
235 #else
236 /* w16 sse2 is faster than w12 mmx as long as the cacheline issue is resolved */
237 #define x264_pixel_avg2_w12_cache64_ssse3 x264_pixel_avg2_w16_cache64_ssse3
238 #define x264_pixel_avg2_w12_cache64_sse2 x264_pixel_avg2_w16_cache64_sse2
239 #define x264_pixel_avg2_w12_sse3         x264_pixel_avg2_w16_sse3
240 #define x264_pixel_avg2_w12_sse2         x264_pixel_avg2_w16_sse2
241 #endif // HIGH_BIT_DEPTH
242
243 PIXEL_AVG_WTAB(mmx2, mmx2, mmx2, mmx2, mmx2, mmx2)
244 #if HIGH_BIT_DEPTH
245 PIXEL_AVG_WTAB(sse2, mmx2, sse2, sse2, sse2, sse2)
246 PIXEL_AVG_WTAB(avx2, mmx2, sse2, avx2, avx2, avx2)
247 #else // !HIGH_BIT_DEPTH
248 #if ARCH_X86
249 PIXEL_AVG_WTAB(cache32_mmx2, mmx2, cache32_mmx2, cache32_mmx2, cache32_mmx2, cache32_mmx2)
250 PIXEL_AVG_WTAB(cache64_mmx2, mmx2, cache64_mmx2, cache64_mmx2, cache64_mmx2, cache64_mmx2)
251 #endif
252 PIXEL_AVG_WTAB(sse2, mmx2, mmx2, sse2, sse2, sse2)
253 PIXEL_AVG_WTAB(cache64_sse2, mmx2, cache64_mmx2, cache64_sse2, cache64_sse2, cache64_sse2)
254 PIXEL_AVG_WTAB(cache64_ssse3, mmx2, cache64_mmx2, cache64_ssse3, cache64_ssse3, cache64_sse2)
255 PIXEL_AVG_WTAB(cache64_ssse3_atom, mmx2, mmx2, cache64_ssse3, cache64_ssse3, sse2)
256 PIXEL_AVG_WTAB(avx2, mmx2, mmx2, sse2, sse2, avx2)
257 #endif // HIGH_BIT_DEPTH
258
259 #define MC_COPY_WTAB(instr, name1, name2, name3)\
260 static void (* const x264_mc_copy_wtab_##instr[5])( pixel *, intptr_t, pixel *, intptr_t, int ) =\
261 {\
262     NULL,\
263     x264_mc_copy_w4_##name1,\
264     x264_mc_copy_w8_##name2,\
265     NULL,\
266     x264_mc_copy_w16_##name3,\
267 };
268
269 MC_COPY_WTAB(mmx,mmx,mmx,mmx)
270 #if HIGH_BIT_DEPTH
271 MC_COPY_WTAB(sse,mmx,sse,sse)
272 MC_COPY_WTAB(avx,mmx,sse,avx)
273 #else
274 MC_COPY_WTAB(sse,mmx,mmx,sse)
275 #endif
276
277 #define MC_WEIGHT_WTAB(function, instr, name1, name2, w12version)\
278     static void (* x264_mc_##function##_wtab_##instr[6])( pixel *, intptr_t, pixel *, intptr_t, const x264_weight_t *, int ) =\
279 {\
280     x264_mc_##function##_w4_##name1,\
281     x264_mc_##function##_w4_##name1,\
282     x264_mc_##function##_w8_##name2,\
283     x264_mc_##function##_w##w12version##_##instr,\
284     x264_mc_##function##_w16_##instr,\
285     x264_mc_##function##_w20_##instr,\
286 };
287
288 #if HIGH_BIT_DEPTH
289 MC_WEIGHT_WTAB(weight,mmx2,mmx2,mmx2,12)
290 MC_WEIGHT_WTAB(offsetadd,mmx2,mmx2,mmx2,12)
291 MC_WEIGHT_WTAB(offsetsub,mmx2,mmx2,mmx2,12)
292 MC_WEIGHT_WTAB(weight,sse2,mmx2,sse2,12)
293 MC_WEIGHT_WTAB(offsetadd,sse2,mmx2,sse2,16)
294 MC_WEIGHT_WTAB(offsetsub,sse2,mmx2,sse2,16)
295
296 static void x264_weight_cache_mmx2( x264_t *h, x264_weight_t *w )
297 {
298     if( w->i_scale == 1<<w->i_denom )
299     {
300         if( w->i_offset < 0 )
301             w->weightfn = h->mc.offsetsub;
302         else
303             w->weightfn = h->mc.offsetadd;
304         for( int i = 0; i < 8; i++ )
305             w->cachea[i] = abs(w->i_offset<<(BIT_DEPTH-8));
306         return;
307     }
308     w->weightfn = h->mc.weight;
309     int den1 = 1<<w->i_denom;
310     int den2 = w->i_scale<<1;
311     int den3 = 1+(w->i_offset<<(BIT_DEPTH-8+1));
312     for( int i = 0; i < 8; i++ )
313     {
314         w->cachea[i] = den1;
315         w->cacheb[i] = i&1 ? den3 : den2;
316     }
317 }
318 #else
319 MC_WEIGHT_WTAB(weight,mmx2,mmx2,mmx2,12)
320 MC_WEIGHT_WTAB(offsetadd,mmx2,mmx2,mmx2,12)
321 MC_WEIGHT_WTAB(offsetsub,mmx2,mmx2,mmx2,12)
322 MC_WEIGHT_WTAB(weight,sse2,mmx2,sse2,16)
323 MC_WEIGHT_WTAB(offsetadd,sse2,mmx2,mmx2,16)
324 MC_WEIGHT_WTAB(offsetsub,sse2,mmx2,mmx2,16)
325 MC_WEIGHT_WTAB(weight,ssse3,ssse3,ssse3,16)
326 MC_WEIGHT_WTAB(weight,avx2,ssse3,avx2,16)
327
328 static void x264_weight_cache_mmx2( x264_t *h, x264_weight_t *w )
329 {
330     int i;
331     int16_t den1;
332
333     if( w->i_scale == 1<<w->i_denom )
334     {
335         if( w->i_offset < 0 )
336             w->weightfn = h->mc.offsetsub;
337         else
338             w->weightfn = h->mc.offsetadd;
339         memset( w->cachea, abs(w->i_offset), sizeof(w->cachea) );
340         return;
341     }
342     w->weightfn = h->mc.weight;
343     den1 = 1 << (w->i_denom - 1) | w->i_offset << w->i_denom;
344     for( i = 0; i < 8; i++ )
345     {
346         w->cachea[i] = w->i_scale;
347         w->cacheb[i] = den1;
348     }
349 }
350
351 static void x264_weight_cache_ssse3( x264_t *h, x264_weight_t *w )
352 {
353     int i, den1;
354     if( w->i_scale == 1<<w->i_denom )
355     {
356         if( w->i_offset < 0 )
357             w->weightfn = h->mc.offsetsub;
358         else
359             w->weightfn = h->mc.offsetadd;
360
361         memset( w->cachea, abs( w->i_offset ), sizeof(w->cachea) );
362         return;
363     }
364     w->weightfn = h->mc.weight;
365     den1 = w->i_scale << (8 - w->i_denom);
366     for( i = 0; i < 8; i++ )
367     {
368         w->cachea[i] = den1;
369         w->cacheb[i] = w->i_offset;
370     }
371 }
372 #endif // !HIGH_BIT_DEPTH
373
374 #define MC_LUMA(name,instr1,instr2)\
375 static void mc_luma_##name( pixel *dst,    intptr_t i_dst_stride,\
376                             pixel *src[4], intptr_t i_src_stride,\
377                             int mvx, int mvy,\
378                             int i_width, int i_height, const x264_weight_t *weight )\
379 {\
380     int qpel_idx = ((mvy&3)<<2) + (mvx&3);\
381     int offset = (mvy>>2)*i_src_stride + (mvx>>2);\
382     pixel *src1 = src[x264_hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;\
383     if( qpel_idx & 5 ) /* qpel interpolation needed */\
384     {\
385         pixel *src2 = src[x264_hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);\
386         x264_pixel_avg_wtab_##instr1[i_width>>2](\
387                 dst, i_dst_stride, src1, i_src_stride,\
388                 src2, i_height );\
389         if( weight->weightfn )\
390             weight->weightfn[i_width>>2]( dst, i_dst_stride, dst, i_dst_stride, weight, i_height );\
391     }\
392     else if( weight->weightfn )\
393         weight->weightfn[i_width>>2]( dst, i_dst_stride, src1, i_src_stride, weight, i_height );\
394     else\
395         x264_mc_copy_wtab_##instr2[i_width>>2](dst, i_dst_stride, src1, i_src_stride, i_height );\
396 }
397
398 MC_LUMA(mmx2,mmx2,mmx)
399 MC_LUMA(sse2,sse2,sse)
400 #if HIGH_BIT_DEPTH
401 MC_LUMA(avx2,avx2,avx)
402 #else
403 #if ARCH_X86
404 MC_LUMA(cache32_mmx2,cache32_mmx2,mmx)
405 MC_LUMA(cache64_mmx2,cache64_mmx2,mmx)
406 #endif
407 MC_LUMA(cache64_sse2,cache64_sse2,sse)
408 MC_LUMA(cache64_ssse3,cache64_ssse3,sse)
409 MC_LUMA(cache64_ssse3_atom,cache64_ssse3_atom,sse)
410 #endif // !HIGH_BIT_DEPTH
411
412 #define GET_REF(name)\
413 static pixel *get_ref_##name( pixel *dst,   intptr_t *i_dst_stride,\
414                               pixel *src[4], intptr_t i_src_stride,\
415                               int mvx, int mvy,\
416                               int i_width, int i_height, const x264_weight_t *weight )\
417 {\
418     int qpel_idx = ((mvy&3)<<2) + (mvx&3);\
419     int offset = (mvy>>2)*i_src_stride + (mvx>>2);\
420     pixel *src1 = src[x264_hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;\
421     if( qpel_idx & 5 ) /* qpel interpolation needed */\
422     {\
423         pixel *src2 = src[x264_hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);\
424         x264_pixel_avg_wtab_##name[i_width>>2](\
425                 dst, *i_dst_stride, src1, i_src_stride,\
426                 src2, i_height );\
427         if( weight->weightfn )\
428             weight->weightfn[i_width>>2]( dst, *i_dst_stride, dst, *i_dst_stride, weight, i_height );\
429         return dst;\
430     }\
431     else if( weight->weightfn )\
432     {\
433         weight->weightfn[i_width>>2]( dst, *i_dst_stride, src1, i_src_stride, weight, i_height );\
434         return dst;\
435     }\
436     else\
437     {\
438         *i_dst_stride = i_src_stride;\
439         return src1;\
440     }\
441 }
442
443 GET_REF(mmx2)
444 GET_REF(sse2)
445 GET_REF(avx2)
446 #if !HIGH_BIT_DEPTH
447 #if ARCH_X86
448 GET_REF(cache32_mmx2)
449 GET_REF(cache64_mmx2)
450 #endif
451 GET_REF(cache64_sse2)
452 GET_REF(cache64_ssse3)
453 GET_REF(cache64_ssse3_atom)
454 #endif // !HIGH_BIT_DEPTH
455
456 #define HPEL(align, cpu, cpuv, cpuc, cpuh)\
457 void x264_hpel_filter_v_##cpuv( pixel *dst, pixel *src, int16_t *buf, intptr_t stride, intptr_t width);\
458 void x264_hpel_filter_c_##cpuc( pixel *dst, int16_t *buf, intptr_t width );\
459 void x264_hpel_filter_h_##cpuh( pixel *dst, pixel *src, intptr_t width );\
460 static void x264_hpel_filter_##cpu( pixel *dsth, pixel *dstv, pixel *dstc, pixel *src,\
461                                     intptr_t stride, int width, int height, int16_t *buf )\
462 {\
463     intptr_t realign = (intptr_t)src & (align-1);\
464     src -= realign;\
465     dstv -= realign;\
466     dstc -= realign;\
467     dsth -= realign;\
468     width += realign;\
469     while( height-- )\
470     {\
471         x264_hpel_filter_v_##cpuv( dstv, src, buf+16, stride, width );\
472         x264_hpel_filter_c_##cpuc( dstc, buf+16, width );\
473         x264_hpel_filter_h_##cpuh( dsth, src, width );\
474         dsth += stride;\
475         dstv += stride;\
476         dstc += stride;\
477         src  += stride;\
478     }\
479     x264_sfence();\
480 }
481
482 HPEL(8, mmx2, mmx2, mmx2, mmx2)
483 #if HIGH_BIT_DEPTH
484 HPEL(16, sse2, sse2, sse2, sse2)
485 #else // !HIGH_BIT_DEPTH
486 HPEL(16, sse2_amd, mmx2, mmx2, sse2)
487 #if ARCH_X86_64
488 void x264_hpel_filter_sse2 ( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src, intptr_t stride, int width, int height, int16_t *buf );
489 void x264_hpel_filter_ssse3( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src, intptr_t stride, int width, int height, int16_t *buf );
490 void x264_hpel_filter_avx  ( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src, intptr_t stride, int width, int height, int16_t *buf );
491 void x264_hpel_filter_avx2 ( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src, intptr_t stride, int width, int height, int16_t *buf );
492 #else
493 HPEL(16, sse2, sse2, sse2, sse2)
494 HPEL(16, ssse3, ssse3, ssse3, ssse3)
495 HPEL(16, avx, avx, avx, avx)
496 HPEL(32, avx2, avx2, avx2, avx2)
497 #endif
498 #endif // HIGH_BIT_DEPTH
499
500 #define PLANE_COPY(align, cpu)\
501 static void x264_plane_copy_##cpu( pixel *dst, intptr_t i_dst, pixel *src, intptr_t i_src, int w, int h )\
502 {\
503     int c_w = (align) / sizeof(pixel) - 1;\
504     if( w < 256 ) /* tiny resolutions don't want non-temporal hints. dunno the exact threshold. */\
505         x264_plane_copy_c( dst, i_dst, src, i_src, w, h );\
506     else if( !(w&c_w) )\
507         x264_plane_copy_core_##cpu( dst, i_dst, src, i_src, w, h );\
508     else\
509     {\
510         if( --h > 0 )\
511         {\
512             if( i_src > 0 )\
513             {\
514                 x264_plane_copy_core_##cpu( dst, i_dst, src, i_src, (w+c_w)&~c_w, h );\
515                 dst += i_dst * h;\
516                 src += i_src * h;\
517             }\
518             else\
519                 x264_plane_copy_core_##cpu( dst+i_dst, i_dst, src+i_src, i_src, (w+c_w)&~c_w, h );\
520         }\
521         /* use plain memcpy on the last line (in memory order) to avoid overreading src. */\
522         memcpy( dst, src, w*sizeof(pixel) );\
523     }\
524 }
525
526 PLANE_COPY(16, sse)
527 PLANE_COPY(32, avx)
528
529 #define PLANE_COPY_SWAP(align, cpu)\
530 static void x264_plane_copy_swap_##cpu( pixel *dst, intptr_t i_dst, pixel *src, intptr_t i_src, int w, int h )\
531 {\
532     int c_w = (align>>1) / sizeof(pixel) - 1;\
533     if( !(w&c_w) )\
534         x264_plane_copy_swap_core_##cpu( dst, i_dst, src, i_src, w, h );\
535     else if( w > c_w )\
536     {\
537         if( --h > 0 )\
538         {\
539             if( i_src > 0 )\
540             {\
541                 x264_plane_copy_swap_core_##cpu( dst, i_dst, src, i_src, (w+c_w)&~c_w, h );\
542                 dst += i_dst * h;\
543                 src += i_src * h;\
544             }\
545             else\
546                 x264_plane_copy_swap_core_##cpu( dst+i_dst, i_dst, src+i_src, i_src, (w+c_w)&~c_w, h );\
547         }\
548         x264_plane_copy_swap_core_##cpu( dst, 0, src, 0, w&~c_w, 1 );\
549         for( int x = 2*(w&~c_w); x < 2*w; x += 2 )\
550         {\
551             dst[x]   = src[x+1];\
552             dst[x+1] = src[x];\
553         }\
554     }\
555     else\
556         x264_plane_copy_swap_c( dst, i_dst, src, i_src, w, h );\
557 }
558
559 PLANE_COPY_SWAP(16, ssse3)
560 PLANE_COPY_SWAP(32, avx2)
561
562 #define PLANE_INTERLEAVE(cpu) \
563 static void x264_plane_copy_interleave_##cpu( pixel *dst,  intptr_t i_dst,\
564                                               pixel *srcu, intptr_t i_srcu,\
565                                               pixel *srcv, intptr_t i_srcv, int w, int h )\
566 {\
567     int c_w = 16 / sizeof(pixel) - 1;\
568     if( !(w&c_w) )\
569         x264_plane_copy_interleave_core_##cpu( dst, i_dst, srcu, i_srcu, srcv, i_srcv, w, h );\
570     else if( w > c_w && (i_srcu ^ i_srcv) >= 0 ) /* only works correctly for strides with identical signs */\
571     {\
572         if( --h > 0 )\
573         {\
574             if( i_srcu > 0 )\
575             {\
576                 x264_plane_copy_interleave_core_##cpu( dst, i_dst, srcu, i_srcu, srcv, i_srcv, (w+c_w)&~c_w, h );\
577                 dst  += i_dst  * h;\
578                 srcu += i_srcu * h;\
579                 srcv += i_srcv * h;\
580             }\
581             else\
582                 x264_plane_copy_interleave_core_##cpu( dst+i_dst, i_dst, srcu+i_srcu, i_srcu, srcv+i_srcv, i_srcv, (w+c_w)&~c_w, h );\
583         }\
584         x264_plane_copy_interleave_c( dst, 0, srcu, 0, srcv, 0, w, 1 );\
585     }\
586     else\
587         x264_plane_copy_interleave_c( dst, i_dst, srcu, i_srcu, srcv, i_srcv, w, h );\
588 }
589
590 PLANE_INTERLEAVE(mmx2)
591 PLANE_INTERLEAVE(sse2)
592 #if HIGH_BIT_DEPTH
593 PLANE_INTERLEAVE(avx)
594 #endif
595
596 #if HAVE_X86_INLINE_ASM
597 #undef MC_CLIP_ADD
598 #define MC_CLIP_ADD(s,x)\
599 do\
600 {\
601     int temp;\
602     asm("movd       %0, %%xmm0     \n"\
603         "movd       %2, %%xmm1     \n"\
604         "paddsw %%xmm1, %%xmm0     \n"\
605         "movd   %%xmm0, %1         \n"\
606         :"+m"(s), "=&r"(temp)\
607         :"m"(x)\
608     );\
609     s = temp;\
610 } while(0)
611
612 #undef MC_CLIP_ADD2
613 #define MC_CLIP_ADD2(s,x)\
614 do\
615 {\
616     asm("movd       %0, %%xmm0     \n"\
617         "movd       %1, %%xmm1     \n"\
618         "paddsw %%xmm1, %%xmm0     \n"\
619         "movd   %%xmm0, %0         \n"\
620         :"+m"(M32(s))\
621         :"m"(M32(x))\
622     );\
623 } while(0)
624 #endif
625
626 PROPAGATE_LIST(ssse3)
627 PROPAGATE_LIST(avx)
628
629 void x264_mc_init_mmx( int cpu, x264_mc_functions_t *pf )
630 {
631     if( !(cpu&X264_CPU_MMX) )
632         return;
633
634     pf->load_deinterleave_chroma_fenc = x264_load_deinterleave_chroma_fenc_mmx;
635     pf->load_deinterleave_chroma_fdec = x264_load_deinterleave_chroma_fdec_mmx;
636
637     pf->plane_copy_deinterleave = x264_plane_copy_deinterleave_mmx;
638
639     pf->copy_16x16_unaligned = x264_mc_copy_w16_mmx;
640     pf->copy[PIXEL_16x16] = x264_mc_copy_w16_mmx;
641     pf->copy[PIXEL_8x8]   = x264_mc_copy_w8_mmx;
642     pf->copy[PIXEL_4x4]   = x264_mc_copy_w4_mmx;
643     pf->memcpy_aligned  = x264_memcpy_aligned_mmx;
644     pf->memzero_aligned = x264_memzero_aligned_mmx;
645     pf->integral_init4v = x264_integral_init4v_mmx;
646     pf->integral_init8v = x264_integral_init8v_mmx;
647
648     if( !(cpu&X264_CPU_MMX2) )
649         return;
650
651     pf->prefetch_fenc_420 = x264_prefetch_fenc_420_mmx2;
652     pf->prefetch_fenc_422 = x264_prefetch_fenc_422_mmx2;
653     pf->prefetch_ref  = x264_prefetch_ref_mmx2;
654
655     pf->plane_copy_interleave = x264_plane_copy_interleave_mmx2;
656     pf->store_interleave_chroma = x264_store_interleave_chroma_mmx2;
657
658     pf->avg[PIXEL_16x16] = x264_pixel_avg_16x16_mmx2;
659     pf->avg[PIXEL_16x8]  = x264_pixel_avg_16x8_mmx2;
660     pf->avg[PIXEL_8x16]  = x264_pixel_avg_8x16_mmx2;
661     pf->avg[PIXEL_8x8]   = x264_pixel_avg_8x8_mmx2;
662     pf->avg[PIXEL_8x4]   = x264_pixel_avg_8x4_mmx2;
663     pf->avg[PIXEL_4x16]  = x264_pixel_avg_4x16_mmx2;
664     pf->avg[PIXEL_4x8]   = x264_pixel_avg_4x8_mmx2;
665     pf->avg[PIXEL_4x4]   = x264_pixel_avg_4x4_mmx2;
666     pf->avg[PIXEL_4x2]   = x264_pixel_avg_4x2_mmx2;
667
668     pf->mc_luma = mc_luma_mmx2;
669     pf->get_ref = get_ref_mmx2;
670     pf->mc_chroma = x264_mc_chroma_mmx2;
671     pf->hpel_filter = x264_hpel_filter_mmx2;
672     pf->weight = x264_mc_weight_wtab_mmx2;
673     pf->weight_cache = x264_weight_cache_mmx2;
674     pf->offsetadd = x264_mc_offsetadd_wtab_mmx2;
675     pf->offsetsub = x264_mc_offsetsub_wtab_mmx2;
676
677     pf->frame_init_lowres_core = x264_frame_init_lowres_core_mmx2;
678
679     if( cpu&X264_CPU_SSE )
680     {
681         pf->memcpy_aligned  = x264_memcpy_aligned_sse;
682         pf->memzero_aligned = x264_memzero_aligned_sse;
683         pf->plane_copy = x264_plane_copy_sse;
684     }
685
686 #if HIGH_BIT_DEPTH
687 #if ARCH_X86 // all x86_64 cpus with cacheline split issues use sse2 instead
688     if( cpu&(X264_CPU_CACHELINE_32|X264_CPU_CACHELINE_64) )
689         pf->frame_init_lowres_core = x264_frame_init_lowres_core_cache32_mmx2;
690 #endif
691
692     if( !(cpu&X264_CPU_SSE2) )
693         return;
694
695     pf->frame_init_lowres_core = x264_frame_init_lowres_core_sse2;
696
697     pf->load_deinterleave_chroma_fenc = x264_load_deinterleave_chroma_fenc_sse2;
698     pf->load_deinterleave_chroma_fdec = x264_load_deinterleave_chroma_fdec_sse2;
699
700     pf->plane_copy_interleave   = x264_plane_copy_interleave_sse2;
701     pf->plane_copy_deinterleave = x264_plane_copy_deinterleave_sse2;
702
703     if( cpu&X264_CPU_SSE2_IS_FAST )
704     {
705         pf->get_ref = get_ref_sse2;
706         pf->mc_luma = mc_luma_sse2;
707         pf->hpel_filter = x264_hpel_filter_sse2;
708     }
709
710     pf->integral_init4v = x264_integral_init4v_sse2;
711     pf->integral_init8v = x264_integral_init8v_sse2;
712     pf->mbtree_propagate_cost = x264_mbtree_propagate_cost_sse2;
713     pf->store_interleave_chroma = x264_store_interleave_chroma_sse2;
714     pf->offsetadd = x264_mc_offsetadd_wtab_sse2;
715     pf->offsetsub = x264_mc_offsetsub_wtab_sse2;
716
717     if( cpu&X264_CPU_SSE2_IS_SLOW )
718         return;
719
720     pf->avg[PIXEL_16x16] = x264_pixel_avg_16x16_sse2;
721     pf->avg[PIXEL_16x8]  = x264_pixel_avg_16x8_sse2;
722     pf->avg[PIXEL_8x16]  = x264_pixel_avg_8x16_sse2;
723     pf->avg[PIXEL_8x8]   = x264_pixel_avg_8x8_sse2;
724     pf->avg[PIXEL_8x4]   = x264_pixel_avg_8x4_sse2;
725     pf->avg[PIXEL_4x16]  = x264_pixel_avg_4x16_sse2;
726     pf->avg[PIXEL_4x8]   = x264_pixel_avg_4x8_sse2;
727     pf->avg[PIXEL_4x4]   = x264_pixel_avg_4x4_sse2;
728     pf->avg[PIXEL_4x2]   = x264_pixel_avg_4x2_sse2;
729
730     pf->copy[PIXEL_16x16] = x264_mc_copy_w16_aligned_sse;
731     pf->weight = x264_mc_weight_wtab_sse2;
732
733     if( !(cpu&X264_CPU_STACK_MOD4) )
734         pf->mc_chroma = x264_mc_chroma_sse2;
735
736     if( !(cpu&X264_CPU_SSSE3) )
737         return;
738
739     pf->frame_init_lowres_core = x264_frame_init_lowres_core_ssse3;
740     pf->plane_copy_swap = x264_plane_copy_swap_ssse3;
741     pf->plane_copy_deinterleave_v210 = x264_plane_copy_deinterleave_v210_ssse3;
742     pf->mbtree_propagate_list = x264_mbtree_propagate_list_ssse3;
743     pf->mbtree_fix8_pack      = x264_mbtree_fix8_pack_ssse3;
744     pf->mbtree_fix8_unpack    = x264_mbtree_fix8_unpack_ssse3;
745
746     if( !(cpu&(X264_CPU_SLOW_SHUFFLE|X264_CPU_SLOW_ATOM|X264_CPU_SLOW_PALIGNR)) )
747         pf->integral_init4v = x264_integral_init4v_ssse3;
748
749     if( !(cpu&X264_CPU_AVX) )
750         return;
751
752     pf->frame_init_lowres_core = x264_frame_init_lowres_core_avx;
753     pf->load_deinterleave_chroma_fenc = x264_load_deinterleave_chroma_fenc_avx;
754     pf->load_deinterleave_chroma_fdec = x264_load_deinterleave_chroma_fdec_avx;
755     pf->plane_copy_interleave        = x264_plane_copy_interleave_avx;
756     pf->plane_copy_deinterleave      = x264_plane_copy_deinterleave_avx;
757     pf->plane_copy_deinterleave_v210 = x264_plane_copy_deinterleave_v210_avx;
758     pf->store_interleave_chroma      = x264_store_interleave_chroma_avx;
759     pf->copy[PIXEL_16x16]            = x264_mc_copy_w16_aligned_avx;
760
761     if( !(cpu&X264_CPU_STACK_MOD4) )
762         pf->mc_chroma = x264_mc_chroma_avx;
763
764     if( cpu&X264_CPU_XOP )
765         pf->frame_init_lowres_core = x264_frame_init_lowres_core_xop;
766
767     if( cpu&X264_CPU_AVX2 )
768     {
769         pf->mc_luma = mc_luma_avx2;
770         pf->plane_copy_deinterleave_v210 = x264_plane_copy_deinterleave_v210_avx2;
771     }
772 #else // !HIGH_BIT_DEPTH
773
774 #if ARCH_X86 // all x86_64 cpus with cacheline split issues use sse2 instead
775     if( cpu&X264_CPU_CACHELINE_32 )
776     {
777         pf->mc_luma = mc_luma_cache32_mmx2;
778         pf->get_ref = get_ref_cache32_mmx2;
779         pf->frame_init_lowres_core = x264_frame_init_lowres_core_cache32_mmx2;
780     }
781     else if( cpu&X264_CPU_CACHELINE_64 )
782     {
783         pf->mc_luma = mc_luma_cache64_mmx2;
784         pf->get_ref = get_ref_cache64_mmx2;
785         pf->frame_init_lowres_core = x264_frame_init_lowres_core_cache32_mmx2;
786     }
787 #endif
788
789     if( !(cpu&X264_CPU_SSE2) )
790         return;
791
792     pf->integral_init4v = x264_integral_init4v_sse2;
793     pf->integral_init8v = x264_integral_init8v_sse2;
794     pf->hpel_filter = x264_hpel_filter_sse2_amd;
795     pf->mbtree_propagate_cost = x264_mbtree_propagate_cost_sse2;
796     pf->plane_copy_deinterleave_rgb = x264_plane_copy_deinterleave_rgb_sse2;
797
798     if( !(cpu&X264_CPU_SSE2_IS_SLOW) )
799     {
800         pf->weight = x264_mc_weight_wtab_sse2;
801         if( !(cpu&X264_CPU_SLOW_ATOM) )
802         {
803             pf->offsetadd = x264_mc_offsetadd_wtab_sse2;
804             pf->offsetsub = x264_mc_offsetsub_wtab_sse2;
805         }
806
807         pf->copy[PIXEL_16x16] = x264_mc_copy_w16_aligned_sse;
808         pf->avg[PIXEL_16x16] = x264_pixel_avg_16x16_sse2;
809         pf->avg[PIXEL_16x8]  = x264_pixel_avg_16x8_sse2;
810         pf->avg[PIXEL_8x16] = x264_pixel_avg_8x16_sse2;
811         pf->avg[PIXEL_8x8]  = x264_pixel_avg_8x8_sse2;
812         pf->avg[PIXEL_8x4]  = x264_pixel_avg_8x4_sse2;
813         pf->hpel_filter = x264_hpel_filter_sse2;
814         pf->frame_init_lowres_core = x264_frame_init_lowres_core_sse2;
815         if( !(cpu&X264_CPU_STACK_MOD4) )
816             pf->mc_chroma = x264_mc_chroma_sse2;
817
818         if( cpu&X264_CPU_SSE2_IS_FAST )
819         {
820             pf->store_interleave_chroma = x264_store_interleave_chroma_sse2; // FIXME sse2fast? sse2medium?
821             pf->load_deinterleave_chroma_fenc = x264_load_deinterleave_chroma_fenc_sse2;
822             pf->load_deinterleave_chroma_fdec = x264_load_deinterleave_chroma_fdec_sse2;
823             pf->plane_copy_interleave   = x264_plane_copy_interleave_sse2;
824             pf->plane_copy_deinterleave = x264_plane_copy_deinterleave_sse2;
825             pf->mc_luma = mc_luma_sse2;
826             pf->get_ref = get_ref_sse2;
827             if( cpu&X264_CPU_CACHELINE_64 )
828             {
829                 pf->mc_luma = mc_luma_cache64_sse2;
830                 pf->get_ref = get_ref_cache64_sse2;
831             }
832         }
833     }
834
835     if( !(cpu&X264_CPU_SSSE3) )
836         return;
837
838     pf->avg[PIXEL_16x16] = x264_pixel_avg_16x16_ssse3;
839     pf->avg[PIXEL_16x8]  = x264_pixel_avg_16x8_ssse3;
840     pf->avg[PIXEL_8x16]  = x264_pixel_avg_8x16_ssse3;
841     pf->avg[PIXEL_8x8]   = x264_pixel_avg_8x8_ssse3;
842     pf->avg[PIXEL_8x4]   = x264_pixel_avg_8x4_ssse3;
843     pf->avg[PIXEL_4x16]  = x264_pixel_avg_4x16_ssse3;
844     pf->avg[PIXEL_4x8]   = x264_pixel_avg_4x8_ssse3;
845     pf->avg[PIXEL_4x4]   = x264_pixel_avg_4x4_ssse3;
846     pf->avg[PIXEL_4x2]   = x264_pixel_avg_4x2_ssse3;
847     pf->plane_copy_swap = x264_plane_copy_swap_ssse3;
848     pf->plane_copy_deinterleave_rgb = x264_plane_copy_deinterleave_rgb_ssse3;
849     pf->mbtree_propagate_list = x264_mbtree_propagate_list_ssse3;
850     pf->mbtree_fix8_pack      = x264_mbtree_fix8_pack_ssse3;
851     pf->mbtree_fix8_unpack    = x264_mbtree_fix8_unpack_ssse3;
852
853     if( !(cpu&X264_CPU_SLOW_PSHUFB) )
854     {
855         pf->load_deinterleave_chroma_fenc = x264_load_deinterleave_chroma_fenc_ssse3;
856         pf->load_deinterleave_chroma_fdec = x264_load_deinterleave_chroma_fdec_ssse3;
857         pf->plane_copy_deinterleave = x264_plane_copy_deinterleave_ssse3;
858     }
859
860     if( !(cpu&X264_CPU_SLOW_PALIGNR) )
861     {
862 #if ARCH_X86_64
863         if( !(cpu&X264_CPU_SLOW_ATOM) ) /* The 64-bit version is slower, but the 32-bit version is faster? */
864 #endif
865             pf->hpel_filter = x264_hpel_filter_ssse3;
866         pf->frame_init_lowres_core = x264_frame_init_lowres_core_ssse3;
867     }
868     if( !(cpu&X264_CPU_STACK_MOD4) )
869         pf->mc_chroma = x264_mc_chroma_ssse3;
870
871     if( cpu&X264_CPU_CACHELINE_64 )
872     {
873         if( !(cpu&X264_CPU_STACK_MOD4) )
874             pf->mc_chroma = x264_mc_chroma_ssse3_cache64;
875         pf->mc_luma = mc_luma_cache64_ssse3;
876         pf->get_ref = get_ref_cache64_ssse3;
877         if( cpu&X264_CPU_SLOW_ATOM )
878         {
879             pf->mc_luma = mc_luma_cache64_ssse3_atom;
880             pf->get_ref = get_ref_cache64_ssse3_atom;
881         }
882     }
883
884     pf->weight_cache = x264_weight_cache_ssse3;
885     pf->weight = x264_mc_weight_wtab_ssse3;
886
887     if( !(cpu&(X264_CPU_SLOW_SHUFFLE|X264_CPU_SLOW_ATOM|X264_CPU_SLOW_PALIGNR)) )
888         pf->integral_init4v = x264_integral_init4v_ssse3;
889
890     if( !(cpu&X264_CPU_SSE4) )
891         return;
892
893     pf->integral_init4h = x264_integral_init4h_sse4;
894     pf->integral_init8h = x264_integral_init8h_sse4;
895
896     if( !(cpu&X264_CPU_AVX) )
897         return;
898
899     pf->frame_init_lowres_core = x264_frame_init_lowres_core_avx;
900     pf->integral_init8h = x264_integral_init8h_avx;
901     pf->hpel_filter = x264_hpel_filter_avx;
902
903     if( !(cpu&X264_CPU_STACK_MOD4) )
904         pf->mc_chroma = x264_mc_chroma_avx;
905
906     if( cpu&X264_CPU_XOP )
907         pf->frame_init_lowres_core = x264_frame_init_lowres_core_xop;
908
909     if( cpu&X264_CPU_AVX2 )
910     {
911         pf->hpel_filter = x264_hpel_filter_avx2;
912         pf->mc_chroma = x264_mc_chroma_avx2;
913         pf->weight = x264_mc_weight_wtab_avx2;
914         pf->avg[PIXEL_16x16] = x264_pixel_avg_16x16_avx2;
915         pf->avg[PIXEL_16x8]  = x264_pixel_avg_16x8_avx2;
916         pf->integral_init8v = x264_integral_init8v_avx2;
917         pf->integral_init4v = x264_integral_init4v_avx2;
918         pf->integral_init8h = x264_integral_init8h_avx2;
919         pf->integral_init4h = x264_integral_init4h_avx2;
920         pf->frame_init_lowres_core = x264_frame_init_lowres_core_avx2;
921     }
922 #endif // HIGH_BIT_DEPTH
923
924     if( !(cpu&X264_CPU_AVX) )
925         return;
926     pf->memzero_aligned = x264_memzero_aligned_avx;
927     pf->plane_copy = x264_plane_copy_avx;
928     pf->mbtree_propagate_cost = x264_mbtree_propagate_cost_avx;
929     pf->mbtree_propagate_list = x264_mbtree_propagate_list_avx;
930
931     if( cpu&X264_CPU_FMA4 )
932         pf->mbtree_propagate_cost = x264_mbtree_propagate_cost_fma4;
933
934     if( !(cpu&X264_CPU_AVX2) )
935         return;
936     pf->plane_copy_swap = x264_plane_copy_swap_avx2;
937     pf->get_ref = get_ref_avx2;
938     pf->mbtree_propagate_cost = x264_mbtree_propagate_cost_avx2;
939     pf->mbtree_fix8_pack      = x264_mbtree_fix8_pack_avx2;
940     pf->mbtree_fix8_unpack    = x264_mbtree_fix8_unpack_avx2;
941 }