]> git.sesse.net Git - x264/blob - common/x86/mc-c.c
d893e063f03469987763866fcd1b5ed3dd979787
[x264] / common / x86 / mc-c.c
1 /*****************************************************************************
2  * mc-c.c: x86 motion compensation
3  *****************************************************************************
4  * Copyright (C) 2003-2015 x264 project
5  *
6  * Authors: Laurent Aimar <fenrir@via.ecp.fr>
7  *          Loren Merritt <lorenm@u.washington.edu>
8  *          Fiona Glaser <fiona@x264.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
23  *
24  * This program is also available under a commercial proprietary license.
25  * For more information, contact us at licensing@x264.com.
26  *****************************************************************************/
27
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <string.h>
31
32 #include "common/common.h"
33 #include "mc.h"
34
35 #define DECL_SUF( func, args )\
36     void func##_mmx2 args;\
37     void func##_sse2 args;\
38     void func##_ssse3 args;\
39     void func##_avx2 args;
40
41 DECL_SUF( x264_pixel_avg_16x16, ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
42 DECL_SUF( x264_pixel_avg_16x8,  ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
43 DECL_SUF( x264_pixel_avg_8x16,  ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
44 DECL_SUF( x264_pixel_avg_8x8,   ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
45 DECL_SUF( x264_pixel_avg_8x4,   ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
46 DECL_SUF( x264_pixel_avg_4x16,  ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
47 DECL_SUF( x264_pixel_avg_4x8,   ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
48 DECL_SUF( x264_pixel_avg_4x4,   ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
49 DECL_SUF( x264_pixel_avg_4x2,   ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
50
51 #define MC_WEIGHT(w,type) \
52     void x264_mc_weight_w##w##_##type( pixel *, intptr_t, pixel *, intptr_t, const x264_weight_t *, int );
53
54 #define MC_WEIGHT_OFFSET(w,type) \
55     void x264_mc_offsetadd_w##w##_##type( pixel *, intptr_t, pixel *, intptr_t, const x264_weight_t *, int ); \
56     void x264_mc_offsetsub_w##w##_##type( pixel *, intptr_t, pixel *, intptr_t, const x264_weight_t *, int ); \
57     MC_WEIGHT(w,type)
58
59 MC_WEIGHT_OFFSET( 4, mmx2 )
60 MC_WEIGHT_OFFSET( 8, mmx2 )
61 MC_WEIGHT_OFFSET( 12, mmx2 )
62 MC_WEIGHT_OFFSET( 16, mmx2 )
63 MC_WEIGHT_OFFSET( 20, mmx2 )
64 MC_WEIGHT_OFFSET( 12, sse2 )
65 MC_WEIGHT_OFFSET( 16, sse2 )
66 MC_WEIGHT_OFFSET( 20, sse2 )
67 #if HIGH_BIT_DEPTH
68 MC_WEIGHT_OFFSET( 8, sse2 )
69 #endif
70 MC_WEIGHT( 8, sse2  )
71 MC_WEIGHT( 4, ssse3 )
72 MC_WEIGHT( 8, ssse3 )
73 MC_WEIGHT( 12, ssse3 )
74 MC_WEIGHT( 16, ssse3 )
75 MC_WEIGHT( 20, ssse3 )
76 MC_WEIGHT( 8, avx2 )
77 MC_WEIGHT( 16, avx2 )
78 MC_WEIGHT( 20, avx2 )
79 #undef MC_OFFSET
80 #undef MC_WEIGHT
81
82 void x264_mc_copy_w4_mmx ( pixel *, intptr_t, pixel *, intptr_t, int );
83 void x264_mc_copy_w8_mmx ( pixel *, intptr_t, pixel *, intptr_t, int );
84 void x264_mc_copy_w8_sse ( pixel *, intptr_t, pixel *, intptr_t, int );
85 void x264_mc_copy_w16_mmx( pixel *, intptr_t, pixel *, intptr_t, int );
86 void x264_mc_copy_w16_sse( pixel *, intptr_t, pixel *, intptr_t, int );
87 void x264_mc_copy_w16_aligned_sse( pixel *, intptr_t, pixel *, intptr_t, int );
88 void x264_mc_copy_w16_avx( uint16_t *, intptr_t, uint16_t *, intptr_t, int );
89 void x264_mc_copy_w16_aligned_avx( uint16_t *, intptr_t, uint16_t *, intptr_t, int );
90 void x264_prefetch_fenc_420_mmx2( pixel *, intptr_t, pixel *, intptr_t, int );
91 void x264_prefetch_fenc_422_mmx2( pixel *, intptr_t, pixel *, intptr_t, int );
92 void x264_prefetch_ref_mmx2( pixel *, intptr_t, int );
93 void x264_plane_copy_core_sse( pixel *, intptr_t, pixel *, intptr_t, int w, int h );
94 void x264_plane_copy_core_avx( pixel *, intptr_t, pixel *, intptr_t, int w, int h );
95 void x264_plane_copy_c( pixel *, intptr_t, pixel *, intptr_t, int w, int h );
96 void x264_plane_copy_interleave_core_mmx2( pixel *dst,  intptr_t i_dst,
97                                            pixel *srcu, intptr_t i_srcu,
98                                            pixel *srcv, intptr_t i_srcv, int w, int h );
99 void x264_plane_copy_interleave_core_sse2( pixel *dst,  intptr_t i_dst,
100                                            pixel *srcu, intptr_t i_srcu,
101                                            pixel *srcv, intptr_t i_srcv, int w, int h );
102 void x264_plane_copy_interleave_core_avx( pixel *dst,  intptr_t i_dst,
103                                           pixel *srcu, intptr_t i_srcu,
104                                           pixel *srcv, intptr_t i_srcv, int w, int h );
105 void x264_plane_copy_interleave_c( pixel *dst,  intptr_t i_dst,
106                                    pixel *srcu, intptr_t i_srcu,
107                                    pixel *srcv, intptr_t i_srcv, int w, int h );
108 void x264_plane_copy_deinterleave_mmx( pixel *dstu, intptr_t i_dstu,
109                                        pixel *dstv, intptr_t i_dstv,
110                                        pixel *src,  intptr_t i_src, int w, int h );
111 void x264_plane_copy_deinterleave_sse2( pixel *dstu, intptr_t i_dstu,
112                                         pixel *dstv, intptr_t i_dstv,
113                                         pixel *src,  intptr_t i_src, int w, int h );
114 void x264_plane_copy_deinterleave_ssse3( uint8_t *dstu, intptr_t i_dstu,
115                                          uint8_t *dstv, intptr_t i_dstv,
116                                          uint8_t *src,  intptr_t i_src, int w, int h );
117 void x264_plane_copy_deinterleave_avx( uint16_t *dstu, intptr_t i_dstu,
118                                        uint16_t *dstv, intptr_t i_dstv,
119                                        uint16_t *src,  intptr_t i_src, int w, int h );
120 void x264_plane_copy_deinterleave_rgb_sse2 ( pixel *dsta, intptr_t i_dsta,
121                                              pixel *dstb, intptr_t i_dstb,
122                                              pixel *dstc, intptr_t i_dstc,
123                                              pixel *src,  intptr_t i_src, int pw, int w, int h );
124 void x264_plane_copy_deinterleave_rgb_ssse3( pixel *dsta, intptr_t i_dsta,
125                                              pixel *dstb, intptr_t i_dstb,
126                                              pixel *dstc, intptr_t i_dstc,
127                                              pixel *src,  intptr_t i_src, int pw, int w, int h );
128 void x264_plane_copy_deinterleave_v210_ssse3( uint16_t *dstu, intptr_t i_dstu,
129                                               uint16_t *dstv, intptr_t i_dstv,
130                                               uint32_t *src,  intptr_t i_src, int w, int h );
131 void x264_plane_copy_deinterleave_v210_avx  ( uint16_t *dstu, intptr_t i_dstu,
132                                               uint16_t *dstv, intptr_t i_dstv,
133                                               uint32_t *src,  intptr_t i_src, int w, int h );
134 void x264_plane_copy_deinterleave_v210_avx2 ( uint16_t *dstu, intptr_t i_dstu,
135                                               uint16_t *dstv, intptr_t i_dstv,
136                                               uint32_t *src,  intptr_t i_src, int w, int h );
137 void x264_store_interleave_chroma_mmx2( pixel *dst, intptr_t i_dst, pixel *srcu, pixel *srcv, int height );
138 void x264_store_interleave_chroma_sse2( pixel *dst, intptr_t i_dst, pixel *srcu, pixel *srcv, int height );
139 void x264_store_interleave_chroma_avx ( pixel *dst, intptr_t i_dst, pixel *srcu, pixel *srcv, int height );
140 void x264_load_deinterleave_chroma_fenc_mmx ( pixel *dst, pixel *src, intptr_t i_src, int height );
141 void x264_load_deinterleave_chroma_fenc_sse2( pixel *dst, pixel *src, intptr_t i_src, int height );
142 void x264_load_deinterleave_chroma_fenc_ssse3( uint8_t *dst, uint8_t *src, intptr_t i_src, int height );
143 void x264_load_deinterleave_chroma_fenc_avx( uint16_t *dst, uint16_t *src, intptr_t i_src, int height );
144 void x264_load_deinterleave_chroma_fdec_mmx ( pixel *dst, pixel *src, intptr_t i_src, int height );
145 void x264_load_deinterleave_chroma_fdec_sse2( pixel *dst, pixel *src, intptr_t i_src, int height );
146 void x264_load_deinterleave_chroma_fdec_ssse3( uint8_t *dst, uint8_t *src, intptr_t i_src, int height );
147 void x264_load_deinterleave_chroma_fdec_avx( uint16_t *dst, uint16_t *src, intptr_t i_src, int height );
148 void *x264_memcpy_aligned_mmx( void *dst, const void *src, size_t n );
149 void *x264_memcpy_aligned_sse( void *dst, const void *src, size_t n );
150 void x264_memzero_aligned_mmx( void *dst, size_t n );
151 void x264_memzero_aligned_sse( void *dst, size_t n );
152 void x264_memzero_aligned_avx( void *dst, size_t n );
153 void x264_integral_init4h_sse4( uint16_t *sum, uint8_t *pix, intptr_t stride );
154 void x264_integral_init4h_avx2( uint16_t *sum, uint8_t *pix, intptr_t stride );
155 void x264_integral_init8h_sse4( uint16_t *sum, uint8_t *pix, intptr_t stride );
156 void x264_integral_init8h_avx ( uint16_t *sum, uint8_t *pix, intptr_t stride );
157 void x264_integral_init8h_avx2( uint16_t *sum, uint8_t *pix, intptr_t stride );
158 void x264_integral_init4v_mmx  ( uint16_t *sum8, uint16_t *sum4, intptr_t stride );
159 void x264_integral_init4v_sse2 ( uint16_t *sum8, uint16_t *sum4, intptr_t stride );
160 void x264_integral_init4v_ssse3( uint16_t *sum8, uint16_t *sum4, intptr_t stride );
161 void x264_integral_init4v_avx2( uint16_t *sum8, uint16_t *sum4, intptr_t stride );
162 void x264_integral_init8v_mmx ( uint16_t *sum8, intptr_t stride );
163 void x264_integral_init8v_sse2( uint16_t *sum8, intptr_t stride );
164 void x264_integral_init8v_avx2( uint16_t *sum8, intptr_t stride );
165 void x264_mbtree_propagate_cost_sse2( int16_t *dst, uint16_t *propagate_in, uint16_t *intra_costs,
166                                       uint16_t *inter_costs, uint16_t *inv_qscales, float *fps_factor, int len );
167 void x264_mbtree_propagate_cost_avx ( int16_t *dst, uint16_t *propagate_in, uint16_t *intra_costs,
168                                       uint16_t *inter_costs, uint16_t *inv_qscales, float *fps_factor, int len );
169 void x264_mbtree_propagate_cost_fma4( int16_t *dst, uint16_t *propagate_in, uint16_t *intra_costs,
170                                       uint16_t *inter_costs, uint16_t *inv_qscales, float *fps_factor, int len );
171 void x264_mbtree_propagate_cost_avx2( int16_t *dst, uint16_t *propagate_in, uint16_t *intra_costs,
172                                       uint16_t *inter_costs, uint16_t *inv_qscales, float *fps_factor, int len );
173
174 #define MC_CHROMA(cpu)\
175 void x264_mc_chroma_##cpu( pixel *dstu, pixel *dstv, intptr_t i_dst, pixel *src, intptr_t i_src,\
176                            int dx, int dy, int i_width, int i_height );
177 MC_CHROMA(mmx2)
178 MC_CHROMA(sse2)
179 MC_CHROMA(ssse3)
180 MC_CHROMA(ssse3_cache64)
181 MC_CHROMA(avx)
182 MC_CHROMA(avx2)
183
184 #define LOWRES(cpu)\
185 void x264_frame_init_lowres_core_##cpu( pixel *src0, pixel *dst0, pixel *dsth, pixel *dstv, pixel *dstc,\
186                                         intptr_t src_stride, intptr_t dst_stride, int width, int height );
187 LOWRES(mmx2)
188 LOWRES(cache32_mmx2)
189 LOWRES(sse2)
190 LOWRES(ssse3)
191 LOWRES(avx)
192 LOWRES(xop)
193 LOWRES(avx2)
194
195 #define PIXEL_AVG_W(width,cpu)\
196 void x264_pixel_avg2_w##width##_##cpu( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t );
197 /* This declares some functions that don't exist, but that isn't a problem. */
198 #define PIXEL_AVG_WALL(cpu)\
199 PIXEL_AVG_W(4,cpu); PIXEL_AVG_W(8,cpu); PIXEL_AVG_W(10,cpu); PIXEL_AVG_W(12,cpu); PIXEL_AVG_W(16,cpu); PIXEL_AVG_W(18,cpu); PIXEL_AVG_W(20,cpu);
200
201 PIXEL_AVG_WALL(mmx2)
202 PIXEL_AVG_WALL(cache32_mmx2)
203 PIXEL_AVG_WALL(cache64_mmx2)
204 PIXEL_AVG_WALL(cache64_sse2)
205 PIXEL_AVG_WALL(sse2)
206 PIXEL_AVG_WALL(cache64_ssse3)
207 PIXEL_AVG_WALL(avx2)
208
209 #define PIXEL_AVG_WTAB(instr, name1, name2, name3, name4, name5)\
210 static void (* const x264_pixel_avg_wtab_##instr[6])( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t ) =\
211 {\
212     NULL,\
213     x264_pixel_avg2_w4_##name1,\
214     x264_pixel_avg2_w8_##name2,\
215     x264_pixel_avg2_w12_##name3,\
216     x264_pixel_avg2_w16_##name4,\
217     x264_pixel_avg2_w20_##name5,\
218 };
219
220 #if HIGH_BIT_DEPTH
221 /* we can replace w12/w20 with w10/w18 as only 9/17 pixels in fact are important */
222 #define x264_pixel_avg2_w12_mmx2       x264_pixel_avg2_w10_mmx2
223 #define x264_pixel_avg2_w20_mmx2       x264_pixel_avg2_w18_mmx2
224 #define x264_pixel_avg2_w12_sse2         x264_pixel_avg2_w10_sse2
225 #define x264_pixel_avg2_w20_sse2         x264_pixel_avg2_w18_sse2
226 #define x264_pixel_avg2_w12_avx2         x264_pixel_avg2_w16_avx2
227 #define x264_pixel_avg2_w20_avx2         x264_pixel_avg2_w18_avx2
228 #else
229 /* w16 sse2 is faster than w12 mmx as long as the cacheline issue is resolved */
230 #define x264_pixel_avg2_w12_cache64_ssse3 x264_pixel_avg2_w16_cache64_ssse3
231 #define x264_pixel_avg2_w12_cache64_sse2 x264_pixel_avg2_w16_cache64_sse2
232 #define x264_pixel_avg2_w12_sse3         x264_pixel_avg2_w16_sse3
233 #define x264_pixel_avg2_w12_sse2         x264_pixel_avg2_w16_sse2
234 #endif // HIGH_BIT_DEPTH
235
236 PIXEL_AVG_WTAB(mmx2, mmx2, mmx2, mmx2, mmx2, mmx2)
237 #if HIGH_BIT_DEPTH
238 PIXEL_AVG_WTAB(sse2, mmx2, sse2, sse2, sse2, sse2)
239 PIXEL_AVG_WTAB(avx2, mmx2, sse2, avx2, avx2, avx2)
240 #else // !HIGH_BIT_DEPTH
241 #if ARCH_X86
242 PIXEL_AVG_WTAB(cache32_mmx2, mmx2, cache32_mmx2, cache32_mmx2, cache32_mmx2, cache32_mmx2)
243 PIXEL_AVG_WTAB(cache64_mmx2, mmx2, cache64_mmx2, cache64_mmx2, cache64_mmx2, cache64_mmx2)
244 #endif
245 PIXEL_AVG_WTAB(sse2, mmx2, mmx2, sse2, sse2, sse2)
246 PIXEL_AVG_WTAB(cache64_sse2, mmx2, cache64_mmx2, cache64_sse2, cache64_sse2, cache64_sse2)
247 PIXEL_AVG_WTAB(cache64_ssse3, mmx2, cache64_mmx2, cache64_ssse3, cache64_ssse3, cache64_sse2)
248 PIXEL_AVG_WTAB(cache64_ssse3_atom, mmx2, mmx2, cache64_ssse3, cache64_ssse3, sse2)
249 PIXEL_AVG_WTAB(avx2, mmx2, mmx2, sse2, sse2, avx2)
250 #endif // HIGH_BIT_DEPTH
251
252 #define MC_COPY_WTAB(instr, name1, name2, name3)\
253 static void (* const x264_mc_copy_wtab_##instr[5])( pixel *, intptr_t, pixel *, intptr_t, int ) =\
254 {\
255     NULL,\
256     x264_mc_copy_w4_##name1,\
257     x264_mc_copy_w8_##name2,\
258     NULL,\
259     x264_mc_copy_w16_##name3,\
260 };
261
262 MC_COPY_WTAB(mmx,mmx,mmx,mmx)
263 #if HIGH_BIT_DEPTH
264 MC_COPY_WTAB(sse,mmx,sse,sse)
265 MC_COPY_WTAB(avx,mmx,sse,avx)
266 #else
267 MC_COPY_WTAB(sse,mmx,mmx,sse)
268 #endif
269
270 #define MC_WEIGHT_WTAB(function, instr, name1, name2, w12version)\
271     static void (* x264_mc_##function##_wtab_##instr[6])( pixel *, intptr_t, pixel *, intptr_t, const x264_weight_t *, int ) =\
272 {\
273     x264_mc_##function##_w4_##name1,\
274     x264_mc_##function##_w4_##name1,\
275     x264_mc_##function##_w8_##name2,\
276     x264_mc_##function##_w##w12version##_##instr,\
277     x264_mc_##function##_w16_##instr,\
278     x264_mc_##function##_w20_##instr,\
279 };
280
281 #if HIGH_BIT_DEPTH
282 MC_WEIGHT_WTAB(weight,mmx2,mmx2,mmx2,12)
283 MC_WEIGHT_WTAB(offsetadd,mmx2,mmx2,mmx2,12)
284 MC_WEIGHT_WTAB(offsetsub,mmx2,mmx2,mmx2,12)
285 MC_WEIGHT_WTAB(weight,sse2,mmx2,sse2,12)
286 MC_WEIGHT_WTAB(offsetadd,sse2,mmx2,sse2,16)
287 MC_WEIGHT_WTAB(offsetsub,sse2,mmx2,sse2,16)
288
289 static void x264_weight_cache_mmx2( x264_t *h, x264_weight_t *w )
290 {
291     if( w->i_scale == 1<<w->i_denom )
292     {
293         if( w->i_offset < 0 )
294             w->weightfn = h->mc.offsetsub;
295         else
296             w->weightfn = h->mc.offsetadd;
297         for( int i = 0; i < 8; i++ )
298             w->cachea[i] = abs(w->i_offset<<(BIT_DEPTH-8));
299         return;
300     }
301     w->weightfn = h->mc.weight;
302     int den1 = 1<<w->i_denom;
303     int den2 = w->i_scale<<1;
304     int den3 = 1+(w->i_offset<<(BIT_DEPTH-8+1));
305     for( int i = 0; i < 8; i++ )
306     {
307         w->cachea[i] = den1;
308         w->cacheb[i] = i&1 ? den3 : den2;
309     }
310 }
311 #else
312 MC_WEIGHT_WTAB(weight,mmx2,mmx2,mmx2,12)
313 MC_WEIGHT_WTAB(offsetadd,mmx2,mmx2,mmx2,12)
314 MC_WEIGHT_WTAB(offsetsub,mmx2,mmx2,mmx2,12)
315 MC_WEIGHT_WTAB(weight,sse2,mmx2,sse2,16)
316 MC_WEIGHT_WTAB(offsetadd,sse2,mmx2,mmx2,16)
317 MC_WEIGHT_WTAB(offsetsub,sse2,mmx2,mmx2,16)
318 MC_WEIGHT_WTAB(weight,ssse3,ssse3,ssse3,16)
319 MC_WEIGHT_WTAB(weight,avx2,ssse3,avx2,16)
320
321 static void x264_weight_cache_mmx2( x264_t *h, x264_weight_t *w )
322 {
323     int i;
324     int16_t den1;
325
326     if( w->i_scale == 1<<w->i_denom )
327     {
328         if( w->i_offset < 0 )
329             w->weightfn = h->mc.offsetsub;
330         else
331             w->weightfn = h->mc.offsetadd;
332         memset( w->cachea, abs(w->i_offset), sizeof(w->cachea) );
333         return;
334     }
335     w->weightfn = h->mc.weight;
336     den1 = 1 << (w->i_denom - 1) | w->i_offset << w->i_denom;
337     for( i = 0; i < 8; i++ )
338     {
339         w->cachea[i] = w->i_scale;
340         w->cacheb[i] = den1;
341     }
342 }
343
344 static void x264_weight_cache_ssse3( x264_t *h, x264_weight_t *w )
345 {
346     int i, den1;
347     if( w->i_scale == 1<<w->i_denom )
348     {
349         if( w->i_offset < 0 )
350             w->weightfn = h->mc.offsetsub;
351         else
352             w->weightfn = h->mc.offsetadd;
353
354         memset( w->cachea, abs( w->i_offset ), sizeof(w->cachea) );
355         return;
356     }
357     w->weightfn = h->mc.weight;
358     den1 = w->i_scale << (8 - w->i_denom);
359     for( i = 0; i < 8; i++ )
360     {
361         w->cachea[i] = den1;
362         w->cacheb[i] = w->i_offset;
363     }
364 }
365 #endif // !HIGH_BIT_DEPTH
366
367 #define MC_LUMA(name,instr1,instr2)\
368 static void mc_luma_##name( pixel *dst,    intptr_t i_dst_stride,\
369                             pixel *src[4], intptr_t i_src_stride,\
370                             int mvx, int mvy,\
371                             int i_width, int i_height, const x264_weight_t *weight )\
372 {\
373     int qpel_idx = ((mvy&3)<<2) + (mvx&3);\
374     int offset = (mvy>>2)*i_src_stride + (mvx>>2);\
375     pixel *src1 = src[x264_hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;\
376     if( qpel_idx & 5 ) /* qpel interpolation needed */\
377     {\
378         pixel *src2 = src[x264_hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);\
379         x264_pixel_avg_wtab_##instr1[i_width>>2](\
380                 dst, i_dst_stride, src1, i_src_stride,\
381                 src2, i_height );\
382         if( weight->weightfn )\
383             weight->weightfn[i_width>>2]( dst, i_dst_stride, dst, i_dst_stride, weight, i_height );\
384     }\
385     else if( weight->weightfn )\
386         weight->weightfn[i_width>>2]( dst, i_dst_stride, src1, i_src_stride, weight, i_height );\
387     else\
388         x264_mc_copy_wtab_##instr2[i_width>>2](dst, i_dst_stride, src1, i_src_stride, i_height );\
389 }
390
391 MC_LUMA(mmx2,mmx2,mmx)
392 MC_LUMA(sse2,sse2,sse)
393 #if HIGH_BIT_DEPTH
394 MC_LUMA(avx2,avx2,avx)
395 #else
396 #if ARCH_X86
397 MC_LUMA(cache32_mmx2,cache32_mmx2,mmx)
398 MC_LUMA(cache64_mmx2,cache64_mmx2,mmx)
399 #endif
400 MC_LUMA(cache64_sse2,cache64_sse2,sse)
401 MC_LUMA(cache64_ssse3,cache64_ssse3,sse)
402 MC_LUMA(cache64_ssse3_atom,cache64_ssse3_atom,sse)
403 #endif // !HIGH_BIT_DEPTH
404
405 #define GET_REF(name)\
406 static pixel *get_ref_##name( pixel *dst,   intptr_t *i_dst_stride,\
407                               pixel *src[4], intptr_t i_src_stride,\
408                               int mvx, int mvy,\
409                               int i_width, int i_height, const x264_weight_t *weight )\
410 {\
411     int qpel_idx = ((mvy&3)<<2) + (mvx&3);\
412     int offset = (mvy>>2)*i_src_stride + (mvx>>2);\
413     pixel *src1 = src[x264_hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;\
414     if( qpel_idx & 5 ) /* qpel interpolation needed */\
415     {\
416         pixel *src2 = src[x264_hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);\
417         x264_pixel_avg_wtab_##name[i_width>>2](\
418                 dst, *i_dst_stride, src1, i_src_stride,\
419                 src2, i_height );\
420         if( weight->weightfn )\
421             weight->weightfn[i_width>>2]( dst, *i_dst_stride, dst, *i_dst_stride, weight, i_height );\
422         return dst;\
423     }\
424     else if( weight->weightfn )\
425     {\
426         weight->weightfn[i_width>>2]( dst, *i_dst_stride, src1, i_src_stride, weight, i_height );\
427         return dst;\
428     }\
429     else\
430     {\
431         *i_dst_stride = i_src_stride;\
432         return src1;\
433     }\
434 }
435
436 GET_REF(mmx2)
437 GET_REF(sse2)
438 GET_REF(avx2)
439 #if !HIGH_BIT_DEPTH
440 #if ARCH_X86
441 GET_REF(cache32_mmx2)
442 GET_REF(cache64_mmx2)
443 #endif
444 GET_REF(cache64_sse2)
445 GET_REF(cache64_ssse3)
446 GET_REF(cache64_ssse3_atom)
447 #endif // !HIGH_BIT_DEPTH
448
449 #define HPEL(align, cpu, cpuv, cpuc, cpuh)\
450 void x264_hpel_filter_v_##cpuv( pixel *dst, pixel *src, int16_t *buf, intptr_t stride, intptr_t width);\
451 void x264_hpel_filter_c_##cpuc( pixel *dst, int16_t *buf, intptr_t width );\
452 void x264_hpel_filter_h_##cpuh( pixel *dst, pixel *src, intptr_t width );\
453 static void x264_hpel_filter_##cpu( pixel *dsth, pixel *dstv, pixel *dstc, pixel *src,\
454                                     intptr_t stride, int width, int height, int16_t *buf )\
455 {\
456     intptr_t realign = (intptr_t)src & (align-1);\
457     src -= realign;\
458     dstv -= realign;\
459     dstc -= realign;\
460     dsth -= realign;\
461     width += realign;\
462     while( height-- )\
463     {\
464         x264_hpel_filter_v_##cpuv( dstv, src, buf+16, stride, width );\
465         x264_hpel_filter_c_##cpuc( dstc, buf+16, width );\
466         x264_hpel_filter_h_##cpuh( dsth, src, width );\
467         dsth += stride;\
468         dstv += stride;\
469         dstc += stride;\
470         src  += stride;\
471     }\
472     x264_sfence();\
473 }
474
475 HPEL(8, mmx2, mmx2, mmx2, mmx2)
476 #if HIGH_BIT_DEPTH
477 HPEL(16, sse2, sse2, sse2, sse2)
478 #else // !HIGH_BIT_DEPTH
479 HPEL(16, sse2_amd, mmx2, mmx2, sse2)
480 #if ARCH_X86_64
481 void x264_hpel_filter_sse2 ( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src, intptr_t stride, int width, int height, int16_t *buf );
482 void x264_hpel_filter_ssse3( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src, intptr_t stride, int width, int height, int16_t *buf );
483 void x264_hpel_filter_avx  ( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src, intptr_t stride, int width, int height, int16_t *buf );
484 void x264_hpel_filter_avx2 ( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src, intptr_t stride, int width, int height, int16_t *buf );
485 #else
486 HPEL(16, sse2, sse2, sse2, sse2)
487 HPEL(16, ssse3, ssse3, ssse3, ssse3)
488 HPEL(16, avx, avx, avx, avx)
489 HPEL(32, avx2, avx2, avx2, avx2)
490 #endif
491 #endif // HIGH_BIT_DEPTH
492
493 #define PLANE_COPY(align, cpu)\
494 static void x264_plane_copy_##cpu( pixel *dst, intptr_t i_dst, pixel *src, intptr_t i_src, int w, int h )\
495 {\
496     int c_w = (align) / sizeof(pixel) - 1;\
497     if( w < 256 ) /* tiny resolutions don't want non-temporal hints. dunno the exact threshold. */\
498         x264_plane_copy_c( dst, i_dst, src, i_src, w, h );\
499     else if( !(w&c_w) )\
500         x264_plane_copy_core_##cpu( dst, i_dst, src, i_src, w, h );\
501     else\
502     {\
503         if( --h > 0 )\
504         {\
505             if( i_src > 0 )\
506             {\
507                 x264_plane_copy_core_##cpu( dst, i_dst, src, i_src, (w+c_w)&~c_w, h );\
508                 dst += i_dst * h;\
509                 src += i_src * h;\
510             }\
511             else\
512                 x264_plane_copy_core_##cpu( dst+i_dst, i_dst, src+i_src, i_src, (w+c_w)&~c_w, h );\
513         }\
514         /* use plain memcpy on the last line (in memory order) to avoid overreading src. */\
515         memcpy( dst, src, w*sizeof(pixel) );\
516     }\
517 }
518
519 PLANE_COPY(16, sse)
520 PLANE_COPY(32, avx)
521
522 #define PLANE_INTERLEAVE(cpu) \
523 static void x264_plane_copy_interleave_##cpu( pixel *dst,  intptr_t i_dst,\
524                                               pixel *srcu, intptr_t i_srcu,\
525                                               pixel *srcv, intptr_t i_srcv, int w, int h )\
526 {\
527     int c_w = 16 / sizeof(pixel) - 1;\
528     if( !(w&c_w) )\
529         x264_plane_copy_interleave_core_##cpu( dst, i_dst, srcu, i_srcu, srcv, i_srcv, w, h );\
530     else if( w > c_w && (i_srcu ^ i_srcv) >= 0 ) /* only works correctly for strides with identical signs */\
531     {\
532         if( --h > 0 )\
533         {\
534             if( i_srcu > 0 )\
535             {\
536                 x264_plane_copy_interleave_core_##cpu( dst, i_dst, srcu, i_srcu, srcv, i_srcv, (w+c_w)&~c_w, h );\
537                 dst  += i_dst  * h;\
538                 srcu += i_srcu * h;\
539                 srcv += i_srcv * h;\
540             }\
541             else\
542                 x264_plane_copy_interleave_core_##cpu( dst+i_dst, i_dst, srcu+i_srcu, i_srcu, srcv+i_srcv, i_srcv, (w+c_w)&~c_w, h );\
543         }\
544         x264_plane_copy_interleave_c( dst, 0, srcu, 0, srcv, 0, w, 1 );\
545     }\
546     else\
547         x264_plane_copy_interleave_c( dst, i_dst, srcu, i_srcu, srcv, i_srcv, w, h );\
548 }
549
550 PLANE_INTERLEAVE(mmx2)
551 PLANE_INTERLEAVE(sse2)
552 #if HIGH_BIT_DEPTH
553 PLANE_INTERLEAVE(avx)
554 #endif
555
556 #if HAVE_X86_INLINE_ASM
557 #define CLIP_ADD(s,x)\
558 do\
559 {\
560     int temp;\
561     asm("movd       %0, %%xmm0     \n"\
562         "movd       %2, %%xmm1     \n"\
563         "paddsw %%xmm1, %%xmm0     \n"\
564         "movd   %%xmm0, %1         \n"\
565         :"+m"(s), "=&r"(temp)\
566         :"m"(x)\
567     );\
568     s = temp;\
569 } while(0)
570
571 #define CLIP_ADD2(s,x)\
572 do\
573 {\
574     asm("movd       %0, %%xmm0     \n"\
575         "movd       %1, %%xmm1     \n"\
576         "paddsw %%xmm1, %%xmm0     \n"\
577         "movd   %%xmm0, %0         \n"\
578         :"+m"(M32(s))\
579         :"m"(M32(x))\
580     );\
581 } while(0)
582 #else
583 #define CLIP_ADD(s,x) (s) = X264_MIN((s)+(x),(1<<15)-1)
584 #define CLIP_ADD2(s,x)\
585 do\
586 {\
587     CLIP_ADD((s)[0], (x)[0]);\
588     CLIP_ADD((s)[1], (x)[1]);\
589 } while(0)
590 #endif
591
592 #define PROPAGATE_LIST(cpu)\
593 void x264_mbtree_propagate_list_internal_##cpu( int16_t (*mvs)[2], int16_t *propagate_amount,\
594                                                 uint16_t *lowres_costs, int16_t *output,\
595                                                 int bipred_weight, int mb_y, int len );\
596 \
597 static void x264_mbtree_propagate_list_##cpu( x264_t *h, uint16_t *ref_costs, int16_t (*mvs)[2],\
598                                               int16_t *propagate_amount, uint16_t *lowres_costs,\
599                                               int bipred_weight, int mb_y, int len, int list )\
600 {\
601     int16_t *current = h->scratch_buffer2;\
602 \
603     x264_mbtree_propagate_list_internal_##cpu( mvs, propagate_amount, lowres_costs,\
604                                                current, bipred_weight, mb_y, len );\
605 \
606     unsigned stride = h->mb.i_mb_stride;\
607     unsigned width = h->mb.i_mb_width;\
608     unsigned height = h->mb.i_mb_height;\
609 \
610     for( unsigned i = 0; i < len; current += 32 )\
611     {\
612         int end = X264_MIN( i+8, len );\
613         for( ; i < end; i++, current += 2 )\
614         {\
615             if( !(lowres_costs[i] & (1 << (list+LOWRES_COST_SHIFT))) )\
616                 continue;\
617 \
618             unsigned mbx = current[0];\
619             unsigned mby = current[1];\
620             unsigned idx0 = mbx + mby * stride;\
621             unsigned idx2 = idx0 + stride;\
622 \
623             /* Shortcut for the simple/common case of zero MV */\
624             if( !M32( mvs[i] ) )\
625             {\
626                 CLIP_ADD( ref_costs[idx0], current[16] );\
627                 continue;\
628             }\
629 \
630             if( mbx < width-1 && mby < height-1 )\
631             {\
632                 CLIP_ADD2( ref_costs+idx0, current+16 );\
633                 CLIP_ADD2( ref_costs+idx2, current+32 );\
634             }\
635             else\
636             {\
637                 /* Note: this takes advantage of unsigned representation to\
638                  * catch negative mbx/mby. */\
639                 if( mby < height )\
640                 {\
641                     if( mbx < width )\
642                         CLIP_ADD( ref_costs[idx0+0], current[16] );\
643                     if( mbx+1 < width )\
644                         CLIP_ADD( ref_costs[idx0+1], current[17] );\
645                 }\
646                 if( mby+1 < height )\
647                 {\
648                     if( mbx < width )\
649                         CLIP_ADD( ref_costs[idx2+0], current[32] );\
650                     if( mbx+1 < width )\
651                         CLIP_ADD( ref_costs[idx2+1], current[33] );\
652                 }\
653             }\
654         }\
655     }\
656 }
657
658 PROPAGATE_LIST(ssse3)
659 PROPAGATE_LIST(avx)
660 #undef CLIP_ADD
661 #undef CLIP_ADD2
662
663 void x264_mc_init_mmx( int cpu, x264_mc_functions_t *pf )
664 {
665     if( !(cpu&X264_CPU_MMX) )
666         return;
667
668     pf->load_deinterleave_chroma_fenc = x264_load_deinterleave_chroma_fenc_mmx;
669     pf->load_deinterleave_chroma_fdec = x264_load_deinterleave_chroma_fdec_mmx;
670
671     pf->plane_copy_deinterleave = x264_plane_copy_deinterleave_mmx;
672
673     pf->copy_16x16_unaligned = x264_mc_copy_w16_mmx;
674     pf->copy[PIXEL_16x16] = x264_mc_copy_w16_mmx;
675     pf->copy[PIXEL_8x8]   = x264_mc_copy_w8_mmx;
676     pf->copy[PIXEL_4x4]   = x264_mc_copy_w4_mmx;
677     pf->memcpy_aligned  = x264_memcpy_aligned_mmx;
678     pf->memzero_aligned = x264_memzero_aligned_mmx;
679     pf->integral_init4v = x264_integral_init4v_mmx;
680     pf->integral_init8v = x264_integral_init8v_mmx;
681
682     if( !(cpu&X264_CPU_MMX2) )
683         return;
684
685     pf->prefetch_fenc_420 = x264_prefetch_fenc_420_mmx2;
686     pf->prefetch_fenc_422 = x264_prefetch_fenc_422_mmx2;
687     pf->prefetch_ref  = x264_prefetch_ref_mmx2;
688
689     pf->plane_copy_interleave = x264_plane_copy_interleave_mmx2;
690     pf->store_interleave_chroma = x264_store_interleave_chroma_mmx2;
691
692     pf->avg[PIXEL_16x16] = x264_pixel_avg_16x16_mmx2;
693     pf->avg[PIXEL_16x8]  = x264_pixel_avg_16x8_mmx2;
694     pf->avg[PIXEL_8x16]  = x264_pixel_avg_8x16_mmx2;
695     pf->avg[PIXEL_8x8]   = x264_pixel_avg_8x8_mmx2;
696     pf->avg[PIXEL_8x4]   = x264_pixel_avg_8x4_mmx2;
697     pf->avg[PIXEL_4x16]  = x264_pixel_avg_4x16_mmx2;
698     pf->avg[PIXEL_4x8]   = x264_pixel_avg_4x8_mmx2;
699     pf->avg[PIXEL_4x4]   = x264_pixel_avg_4x4_mmx2;
700     pf->avg[PIXEL_4x2]   = x264_pixel_avg_4x2_mmx2;
701
702     pf->mc_luma = mc_luma_mmx2;
703     pf->get_ref = get_ref_mmx2;
704     pf->mc_chroma = x264_mc_chroma_mmx2;
705     pf->hpel_filter = x264_hpel_filter_mmx2;
706     pf->weight = x264_mc_weight_wtab_mmx2;
707     pf->weight_cache = x264_weight_cache_mmx2;
708     pf->offsetadd = x264_mc_offsetadd_wtab_mmx2;
709     pf->offsetsub = x264_mc_offsetsub_wtab_mmx2;
710
711     pf->frame_init_lowres_core = x264_frame_init_lowres_core_mmx2;
712
713     if( cpu&X264_CPU_SSE )
714     {
715         pf->memcpy_aligned  = x264_memcpy_aligned_sse;
716         pf->memzero_aligned = x264_memzero_aligned_sse;
717         pf->plane_copy = x264_plane_copy_sse;
718     }
719
720 #if HIGH_BIT_DEPTH
721 #if ARCH_X86 // all x86_64 cpus with cacheline split issues use sse2 instead
722     if( cpu&(X264_CPU_CACHELINE_32|X264_CPU_CACHELINE_64) )
723         pf->frame_init_lowres_core = x264_frame_init_lowres_core_cache32_mmx2;
724 #endif
725
726     if( !(cpu&X264_CPU_SSE2) )
727         return;
728
729     pf->frame_init_lowres_core = x264_frame_init_lowres_core_sse2;
730
731     pf->load_deinterleave_chroma_fenc = x264_load_deinterleave_chroma_fenc_sse2;
732     pf->load_deinterleave_chroma_fdec = x264_load_deinterleave_chroma_fdec_sse2;
733
734     pf->plane_copy_interleave   = x264_plane_copy_interleave_sse2;
735     pf->plane_copy_deinterleave = x264_plane_copy_deinterleave_sse2;
736
737     if( cpu&X264_CPU_SSE2_IS_FAST )
738     {
739         pf->get_ref = get_ref_sse2;
740         pf->mc_luma = mc_luma_sse2;
741         pf->hpel_filter = x264_hpel_filter_sse2;
742     }
743
744     pf->integral_init4v = x264_integral_init4v_sse2;
745     pf->integral_init8v = x264_integral_init8v_sse2;
746     pf->mbtree_propagate_cost = x264_mbtree_propagate_cost_sse2;
747     pf->store_interleave_chroma = x264_store_interleave_chroma_sse2;
748     pf->offsetadd = x264_mc_offsetadd_wtab_sse2;
749     pf->offsetsub = x264_mc_offsetsub_wtab_sse2;
750
751     if( cpu&X264_CPU_SSE2_IS_SLOW )
752         return;
753
754     pf->avg[PIXEL_16x16] = x264_pixel_avg_16x16_sse2;
755     pf->avg[PIXEL_16x8]  = x264_pixel_avg_16x8_sse2;
756     pf->avg[PIXEL_8x16]  = x264_pixel_avg_8x16_sse2;
757     pf->avg[PIXEL_8x8]   = x264_pixel_avg_8x8_sse2;
758     pf->avg[PIXEL_8x4]   = x264_pixel_avg_8x4_sse2;
759     pf->avg[PIXEL_4x16]  = x264_pixel_avg_4x16_sse2;
760     pf->avg[PIXEL_4x8]   = x264_pixel_avg_4x8_sse2;
761     pf->avg[PIXEL_4x4]   = x264_pixel_avg_4x4_sse2;
762     pf->avg[PIXEL_4x2]   = x264_pixel_avg_4x2_sse2;
763
764     pf->copy[PIXEL_16x16] = x264_mc_copy_w16_aligned_sse;
765     pf->weight = x264_mc_weight_wtab_sse2;
766
767     if( !(cpu&X264_CPU_STACK_MOD4) )
768         pf->mc_chroma = x264_mc_chroma_sse2;
769
770     if( !(cpu&X264_CPU_SSSE3) )
771         return;
772
773     pf->frame_init_lowres_core = x264_frame_init_lowres_core_ssse3;
774     pf->plane_copy_deinterleave_v210 = x264_plane_copy_deinterleave_v210_ssse3;
775     pf->mbtree_propagate_list = x264_mbtree_propagate_list_ssse3;
776
777     if( !(cpu&(X264_CPU_SLOW_SHUFFLE|X264_CPU_SLOW_ATOM|X264_CPU_SLOW_PALIGNR)) )
778         pf->integral_init4v = x264_integral_init4v_ssse3;
779
780     if( !(cpu&X264_CPU_AVX) )
781         return;
782
783     pf->frame_init_lowres_core = x264_frame_init_lowres_core_avx;
784     pf->load_deinterleave_chroma_fenc = x264_load_deinterleave_chroma_fenc_avx;
785     pf->load_deinterleave_chroma_fdec = x264_load_deinterleave_chroma_fdec_avx;
786     pf->plane_copy_interleave        = x264_plane_copy_interleave_avx;
787     pf->plane_copy_deinterleave      = x264_plane_copy_deinterleave_avx;
788     pf->plane_copy_deinterleave_v210 = x264_plane_copy_deinterleave_v210_avx;
789     pf->store_interleave_chroma      = x264_store_interleave_chroma_avx;
790     pf->copy[PIXEL_16x16]            = x264_mc_copy_w16_aligned_avx;
791
792     if( !(cpu&X264_CPU_STACK_MOD4) )
793         pf->mc_chroma = x264_mc_chroma_avx;
794
795     if( cpu&X264_CPU_XOP )
796         pf->frame_init_lowres_core = x264_frame_init_lowres_core_xop;
797
798     if( cpu&X264_CPU_AVX2 )
799     {
800         pf->mc_luma = mc_luma_avx2;
801         pf->plane_copy_deinterleave_v210 = x264_plane_copy_deinterleave_v210_avx2;
802     }
803 #else // !HIGH_BIT_DEPTH
804
805 #if ARCH_X86 // all x86_64 cpus with cacheline split issues use sse2 instead
806     if( cpu&X264_CPU_CACHELINE_32 )
807     {
808         pf->mc_luma = mc_luma_cache32_mmx2;
809         pf->get_ref = get_ref_cache32_mmx2;
810         pf->frame_init_lowres_core = x264_frame_init_lowres_core_cache32_mmx2;
811     }
812     else if( cpu&X264_CPU_CACHELINE_64 )
813     {
814         pf->mc_luma = mc_luma_cache64_mmx2;
815         pf->get_ref = get_ref_cache64_mmx2;
816         pf->frame_init_lowres_core = x264_frame_init_lowres_core_cache32_mmx2;
817     }
818 #endif
819
820     if( !(cpu&X264_CPU_SSE2) )
821         return;
822
823     pf->integral_init4v = x264_integral_init4v_sse2;
824     pf->integral_init8v = x264_integral_init8v_sse2;
825     pf->hpel_filter = x264_hpel_filter_sse2_amd;
826     pf->mbtree_propagate_cost = x264_mbtree_propagate_cost_sse2;
827     pf->plane_copy_deinterleave_rgb = x264_plane_copy_deinterleave_rgb_sse2;
828
829     if( !(cpu&X264_CPU_SSE2_IS_SLOW) )
830     {
831         pf->weight = x264_mc_weight_wtab_sse2;
832         if( !(cpu&X264_CPU_SLOW_ATOM) )
833         {
834             pf->offsetadd = x264_mc_offsetadd_wtab_sse2;
835             pf->offsetsub = x264_mc_offsetsub_wtab_sse2;
836         }
837
838         pf->copy[PIXEL_16x16] = x264_mc_copy_w16_aligned_sse;
839         pf->avg[PIXEL_16x16] = x264_pixel_avg_16x16_sse2;
840         pf->avg[PIXEL_16x8]  = x264_pixel_avg_16x8_sse2;
841         pf->avg[PIXEL_8x16] = x264_pixel_avg_8x16_sse2;
842         pf->avg[PIXEL_8x8]  = x264_pixel_avg_8x8_sse2;
843         pf->avg[PIXEL_8x4]  = x264_pixel_avg_8x4_sse2;
844         pf->hpel_filter = x264_hpel_filter_sse2;
845         pf->frame_init_lowres_core = x264_frame_init_lowres_core_sse2;
846         if( !(cpu&X264_CPU_STACK_MOD4) )
847             pf->mc_chroma = x264_mc_chroma_sse2;
848
849         if( cpu&X264_CPU_SSE2_IS_FAST )
850         {
851             pf->store_interleave_chroma = x264_store_interleave_chroma_sse2; // FIXME sse2fast? sse2medium?
852             pf->load_deinterleave_chroma_fenc = x264_load_deinterleave_chroma_fenc_sse2;
853             pf->load_deinterleave_chroma_fdec = x264_load_deinterleave_chroma_fdec_sse2;
854             pf->plane_copy_interleave   = x264_plane_copy_interleave_sse2;
855             pf->plane_copy_deinterleave = x264_plane_copy_deinterleave_sse2;
856             pf->mc_luma = mc_luma_sse2;
857             pf->get_ref = get_ref_sse2;
858             if( cpu&X264_CPU_CACHELINE_64 )
859             {
860                 pf->mc_luma = mc_luma_cache64_sse2;
861                 pf->get_ref = get_ref_cache64_sse2;
862             }
863         }
864     }
865
866     if( !(cpu&X264_CPU_SSSE3) )
867         return;
868
869     pf->avg[PIXEL_16x16] = x264_pixel_avg_16x16_ssse3;
870     pf->avg[PIXEL_16x8]  = x264_pixel_avg_16x8_ssse3;
871     pf->avg[PIXEL_8x16]  = x264_pixel_avg_8x16_ssse3;
872     pf->avg[PIXEL_8x8]   = x264_pixel_avg_8x8_ssse3;
873     pf->avg[PIXEL_8x4]   = x264_pixel_avg_8x4_ssse3;
874     pf->avg[PIXEL_4x16]  = x264_pixel_avg_4x16_ssse3;
875     pf->avg[PIXEL_4x8]   = x264_pixel_avg_4x8_ssse3;
876     pf->avg[PIXEL_4x4]   = x264_pixel_avg_4x4_ssse3;
877     pf->avg[PIXEL_4x2]   = x264_pixel_avg_4x2_ssse3;
878     pf->plane_copy_deinterleave_rgb = x264_plane_copy_deinterleave_rgb_ssse3;
879     pf->mbtree_propagate_list = x264_mbtree_propagate_list_ssse3;
880
881     if( !(cpu&X264_CPU_SLOW_PSHUFB) )
882     {
883         pf->load_deinterleave_chroma_fenc = x264_load_deinterleave_chroma_fenc_ssse3;
884         pf->load_deinterleave_chroma_fdec = x264_load_deinterleave_chroma_fdec_ssse3;
885         pf->plane_copy_deinterleave = x264_plane_copy_deinterleave_ssse3;
886     }
887
888     if( !(cpu&X264_CPU_SLOW_PALIGNR) )
889     {
890 #if ARCH_X86_64
891         if( !(cpu&X264_CPU_SLOW_ATOM) ) /* The 64-bit version is slower, but the 32-bit version is faster? */
892 #endif
893             pf->hpel_filter = x264_hpel_filter_ssse3;
894         pf->frame_init_lowres_core = x264_frame_init_lowres_core_ssse3;
895     }
896     if( !(cpu&X264_CPU_STACK_MOD4) )
897         pf->mc_chroma = x264_mc_chroma_ssse3;
898
899     if( cpu&X264_CPU_CACHELINE_64 )
900     {
901         if( !(cpu&X264_CPU_STACK_MOD4) )
902             pf->mc_chroma = x264_mc_chroma_ssse3_cache64;
903         pf->mc_luma = mc_luma_cache64_ssse3;
904         pf->get_ref = get_ref_cache64_ssse3;
905         if( cpu&X264_CPU_SLOW_ATOM )
906         {
907             pf->mc_luma = mc_luma_cache64_ssse3_atom;
908             pf->get_ref = get_ref_cache64_ssse3_atom;
909         }
910     }
911
912     pf->weight_cache = x264_weight_cache_ssse3;
913     pf->weight = x264_mc_weight_wtab_ssse3;
914
915     if( !(cpu&(X264_CPU_SLOW_SHUFFLE|X264_CPU_SLOW_ATOM|X264_CPU_SLOW_PALIGNR)) )
916         pf->integral_init4v = x264_integral_init4v_ssse3;
917
918     if( !(cpu&X264_CPU_SSE4) )
919         return;
920
921     pf->integral_init4h = x264_integral_init4h_sse4;
922     pf->integral_init8h = x264_integral_init8h_sse4;
923
924     if( !(cpu&X264_CPU_AVX) )
925         return;
926
927     pf->frame_init_lowres_core = x264_frame_init_lowres_core_avx;
928     pf->integral_init8h = x264_integral_init8h_avx;
929     pf->hpel_filter = x264_hpel_filter_avx;
930
931     if( !(cpu&X264_CPU_STACK_MOD4) )
932         pf->mc_chroma = x264_mc_chroma_avx;
933
934     if( cpu&X264_CPU_XOP )
935         pf->frame_init_lowres_core = x264_frame_init_lowres_core_xop;
936
937     if( cpu&X264_CPU_AVX2 )
938     {
939         pf->hpel_filter = x264_hpel_filter_avx2;
940         pf->mc_chroma = x264_mc_chroma_avx2;
941         pf->weight = x264_mc_weight_wtab_avx2;
942         pf->avg[PIXEL_16x16] = x264_pixel_avg_16x16_avx2;
943         pf->avg[PIXEL_16x8]  = x264_pixel_avg_16x8_avx2;
944         pf->integral_init8v = x264_integral_init8v_avx2;
945         pf->integral_init4v = x264_integral_init4v_avx2;
946         pf->integral_init8h = x264_integral_init8h_avx2;
947         pf->integral_init4h = x264_integral_init4h_avx2;
948         pf->frame_init_lowres_core = x264_frame_init_lowres_core_avx2;
949     }
950 #endif // HIGH_BIT_DEPTH
951
952     if( !(cpu&X264_CPU_AVX) )
953         return;
954     pf->memzero_aligned = x264_memzero_aligned_avx;
955     pf->plane_copy = x264_plane_copy_avx;
956     pf->mbtree_propagate_cost = x264_mbtree_propagate_cost_avx;
957     pf->mbtree_propagate_list = x264_mbtree_propagate_list_avx;
958
959     if( cpu&X264_CPU_FMA4 )
960         pf->mbtree_propagate_cost = x264_mbtree_propagate_cost_fma4;
961
962     if( !(cpu&X264_CPU_AVX2) )
963         return;
964     pf->get_ref = get_ref_avx2;
965     pf->mbtree_propagate_cost = x264_mbtree_propagate_cost_avx2;
966 }