]> git.sesse.net Git - x264/blob - common/x86/mc-c.c
b28073024c44cf48e7c1702c6db96d57be63541b
[x264] / common / x86 / mc-c.c
1 /*****************************************************************************
2  * mc-c.c: x86 motion compensation
3  *****************************************************************************
4  * Copyright (C) 2003-2014 x264 project
5  *
6  * Authors: Laurent Aimar <fenrir@via.ecp.fr>
7  *          Loren Merritt <lorenm@u.washington.edu>
8  *          Fiona Glaser <fiona@x264.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
23  *
24  * This program is also available under a commercial proprietary license.
25  * For more information, contact us at licensing@x264.com.
26  *****************************************************************************/
27
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <string.h>
31
32 #include "common/common.h"
33 #include "mc.h"
34
35 #define DECL_SUF( func, args )\
36     void func##_mmx2 args;\
37     void func##_sse2 args;\
38     void func##_ssse3 args;\
39     void func##_avx2 args;
40
41 DECL_SUF( x264_pixel_avg_16x16, ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
42 DECL_SUF( x264_pixel_avg_16x8,  ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
43 DECL_SUF( x264_pixel_avg_8x16,  ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
44 DECL_SUF( x264_pixel_avg_8x8,   ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
45 DECL_SUF( x264_pixel_avg_8x4,   ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
46 DECL_SUF( x264_pixel_avg_4x16,  ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
47 DECL_SUF( x264_pixel_avg_4x8,   ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
48 DECL_SUF( x264_pixel_avg_4x4,   ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
49 DECL_SUF( x264_pixel_avg_4x2,   ( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t, int ))
50
51 #define MC_WEIGHT(w,type) \
52     void x264_mc_weight_w##w##_##type( pixel *, intptr_t, pixel *, intptr_t, const x264_weight_t *, int );
53
54 #define MC_WEIGHT_OFFSET(w,type) \
55     void x264_mc_offsetadd_w##w##_##type( pixel *, intptr_t, pixel *, intptr_t, const x264_weight_t *, int ); \
56     void x264_mc_offsetsub_w##w##_##type( pixel *, intptr_t, pixel *, intptr_t, const x264_weight_t *, int ); \
57     MC_WEIGHT(w,type)
58
59 MC_WEIGHT_OFFSET( 4, mmx2 )
60 MC_WEIGHT_OFFSET( 8, mmx2 )
61 MC_WEIGHT_OFFSET( 12, mmx2 )
62 MC_WEIGHT_OFFSET( 16, mmx2 )
63 MC_WEIGHT_OFFSET( 20, mmx2 )
64 MC_WEIGHT_OFFSET( 12, sse2 )
65 MC_WEIGHT_OFFSET( 16, sse2 )
66 MC_WEIGHT_OFFSET( 20, sse2 )
67 #if HIGH_BIT_DEPTH
68 MC_WEIGHT_OFFSET( 8, sse2 )
69 #endif
70 MC_WEIGHT( 8, sse2  )
71 MC_WEIGHT( 4, ssse3 )
72 MC_WEIGHT( 8, ssse3 )
73 MC_WEIGHT( 12, ssse3 )
74 MC_WEIGHT( 16, ssse3 )
75 MC_WEIGHT( 20, ssse3 )
76 MC_WEIGHT( 8, avx2 )
77 MC_WEIGHT( 16, avx2 )
78 MC_WEIGHT( 20, avx2 )
79 #undef MC_OFFSET
80 #undef MC_WEIGHT
81
82 void x264_mc_copy_w4_mmx ( pixel *, intptr_t, pixel *, intptr_t, int );
83 void x264_mc_copy_w8_mmx ( pixel *, intptr_t, pixel *, intptr_t, int );
84 void x264_mc_copy_w8_sse ( pixel *, intptr_t, pixel *, intptr_t, int );
85 void x264_mc_copy_w16_mmx( pixel *, intptr_t, pixel *, intptr_t, int );
86 void x264_mc_copy_w16_sse( pixel *, intptr_t, pixel *, intptr_t, int );
87 void x264_mc_copy_w16_aligned_sse( pixel *, intptr_t, pixel *, intptr_t, int );
88 void x264_mc_copy_w16_avx( uint16_t *, intptr_t, uint16_t *, intptr_t, int );
89 void x264_mc_copy_w16_aligned_avx( uint16_t *, intptr_t, uint16_t *, intptr_t, int );
90 void x264_prefetch_fenc_420_mmx2( pixel *, intptr_t, pixel *, intptr_t, int );
91 void x264_prefetch_fenc_422_mmx2( pixel *, intptr_t, pixel *, intptr_t, int );
92 void x264_prefetch_ref_mmx2( pixel *, intptr_t, int );
93 void x264_plane_copy_core_mmx2( pixel *, intptr_t, pixel *, intptr_t, int w, int h );
94 void x264_plane_copy_c( pixel *, intptr_t, pixel *, intptr_t, int w, int h );
95 void x264_plane_copy_interleave_core_mmx2( pixel *dst,  intptr_t i_dst,
96                                            pixel *srcu, intptr_t i_srcu,
97                                            pixel *srcv, intptr_t i_srcv, int w, int h );
98 void x264_plane_copy_interleave_core_sse2( pixel *dst,  intptr_t i_dst,
99                                            pixel *srcu, intptr_t i_srcu,
100                                            pixel *srcv, intptr_t i_srcv, int w, int h );
101 void x264_plane_copy_interleave_core_avx( pixel *dst,  intptr_t i_dst,
102                                           pixel *srcu, intptr_t i_srcu,
103                                           pixel *srcv, intptr_t i_srcv, int w, int h );
104 void x264_plane_copy_interleave_c( pixel *dst,  intptr_t i_dst,
105                                    pixel *srcu, intptr_t i_srcu,
106                                    pixel *srcv, intptr_t i_srcv, int w, int h );
107 void x264_plane_copy_deinterleave_mmx( pixel *dstu, intptr_t i_dstu,
108                                        pixel *dstv, intptr_t i_dstv,
109                                        pixel *src,  intptr_t i_src, int w, int h );
110 void x264_plane_copy_deinterleave_sse2( pixel *dstu, intptr_t i_dstu,
111                                         pixel *dstv, intptr_t i_dstv,
112                                         pixel *src,  intptr_t i_src, int w, int h );
113 void x264_plane_copy_deinterleave_ssse3( uint8_t *dstu, intptr_t i_dstu,
114                                          uint8_t *dstv, intptr_t i_dstv,
115                                          uint8_t *src,  intptr_t i_src, int w, int h );
116 void x264_plane_copy_deinterleave_avx( uint16_t *dstu, intptr_t i_dstu,
117                                        uint16_t *dstv, intptr_t i_dstv,
118                                        uint16_t *src,  intptr_t i_src, int w, int h );
119 void x264_plane_copy_deinterleave_v210_ssse3( uint16_t *dstu, intptr_t i_dstu,
120                                               uint16_t *dstv, intptr_t i_dstv,
121                                               uint32_t *src,  intptr_t i_src, int w, int h );
122 void x264_plane_copy_deinterleave_v210_avx  ( uint16_t *dstu, intptr_t i_dstu,
123                                               uint16_t *dstv, intptr_t i_dstv,
124                                               uint32_t *src,  intptr_t i_src, int w, int h );
125 void x264_plane_copy_deinterleave_v210_avx2 ( uint16_t *dstu, intptr_t i_dstu,
126                                               uint16_t *dstv, intptr_t i_dstv,
127                                               uint32_t *src,  intptr_t i_src, int w, int h );
128 void x264_store_interleave_chroma_mmx2( pixel *dst, intptr_t i_dst, pixel *srcu, pixel *srcv, int height );
129 void x264_store_interleave_chroma_sse2( pixel *dst, intptr_t i_dst, pixel *srcu, pixel *srcv, int height );
130 void x264_store_interleave_chroma_avx ( pixel *dst, intptr_t i_dst, pixel *srcu, pixel *srcv, int height );
131 void x264_load_deinterleave_chroma_fenc_mmx ( pixel *dst, pixel *src, intptr_t i_src, int height );
132 void x264_load_deinterleave_chroma_fenc_sse2( pixel *dst, pixel *src, intptr_t i_src, int height );
133 void x264_load_deinterleave_chroma_fenc_ssse3( uint8_t *dst, uint8_t *src, intptr_t i_src, int height );
134 void x264_load_deinterleave_chroma_fenc_avx( uint16_t *dst, uint16_t *src, intptr_t i_src, int height );
135 void x264_load_deinterleave_chroma_fdec_mmx ( pixel *dst, pixel *src, intptr_t i_src, int height );
136 void x264_load_deinterleave_chroma_fdec_sse2( pixel *dst, pixel *src, intptr_t i_src, int height );
137 void x264_load_deinterleave_chroma_fdec_ssse3( uint8_t *dst, uint8_t *src, intptr_t i_src, int height );
138 void x264_load_deinterleave_chroma_fdec_avx( uint16_t *dst, uint16_t *src, intptr_t i_src, int height );
139 void *x264_memcpy_aligned_mmx( void *dst, const void *src, size_t n );
140 void *x264_memcpy_aligned_sse( void *dst, const void *src, size_t n );
141 void x264_memzero_aligned_mmx( void *dst, size_t n );
142 void x264_memzero_aligned_sse( void *dst, size_t n );
143 void x264_memzero_aligned_avx( void *dst, size_t n );
144 void x264_integral_init4h_sse4( uint16_t *sum, uint8_t *pix, intptr_t stride );
145 void x264_integral_init4h_avx2( uint16_t *sum, uint8_t *pix, intptr_t stride );
146 void x264_integral_init8h_sse4( uint16_t *sum, uint8_t *pix, intptr_t stride );
147 void x264_integral_init8h_avx ( uint16_t *sum, uint8_t *pix, intptr_t stride );
148 void x264_integral_init8h_avx2( uint16_t *sum, uint8_t *pix, intptr_t stride );
149 void x264_integral_init4v_mmx  ( uint16_t *sum8, uint16_t *sum4, intptr_t stride );
150 void x264_integral_init4v_sse2 ( uint16_t *sum8, uint16_t *sum4, intptr_t stride );
151 void x264_integral_init4v_ssse3( uint16_t *sum8, uint16_t *sum4, intptr_t stride );
152 void x264_integral_init4v_avx2( uint16_t *sum8, uint16_t *sum4, intptr_t stride );
153 void x264_integral_init8v_mmx ( uint16_t *sum8, intptr_t stride );
154 void x264_integral_init8v_sse2( uint16_t *sum8, intptr_t stride );
155 void x264_integral_init8v_avx2( uint16_t *sum8, intptr_t stride );
156 void x264_mbtree_propagate_cost_sse2( int *dst, uint16_t *propagate_in, uint16_t *intra_costs,
157                                       uint16_t *inter_costs, uint16_t *inv_qscales, float *fps_factor, int len );
158 void x264_mbtree_propagate_cost_avx ( int *dst, uint16_t *propagate_in, uint16_t *intra_costs,
159                                       uint16_t *inter_costs, uint16_t *inv_qscales, float *fps_factor, int len );
160 void x264_mbtree_propagate_cost_fma4( int *dst, uint16_t *propagate_in, uint16_t *intra_costs,
161                                       uint16_t *inter_costs, uint16_t *inv_qscales, float *fps_factor, int len );
162 void x264_mbtree_propagate_cost_avx2_fma3( int *dst, uint16_t *propagate_in, uint16_t *intra_costs,
163                                            uint16_t *inter_costs, uint16_t *inv_qscales, float *fps_factor, int len );
164
165 #define MC_CHROMA(cpu)\
166 void x264_mc_chroma_##cpu( pixel *dstu, pixel *dstv, intptr_t i_dst, pixel *src, intptr_t i_src,\
167                            int dx, int dy, int i_width, int i_height );
168 MC_CHROMA(mmx2)
169 MC_CHROMA(sse2)
170 MC_CHROMA(ssse3)
171 MC_CHROMA(ssse3_cache64)
172 MC_CHROMA(avx)
173 MC_CHROMA(avx2)
174
175 #define LOWRES(cpu)\
176 void x264_frame_init_lowres_core_##cpu( pixel *src0, pixel *dst0, pixel *dsth, pixel *dstv, pixel *dstc,\
177                                         intptr_t src_stride, intptr_t dst_stride, int width, int height );
178 LOWRES(mmx2)
179 LOWRES(cache32_mmx2)
180 LOWRES(sse2)
181 LOWRES(ssse3)
182 LOWRES(avx)
183 LOWRES(xop)
184 LOWRES(avx2)
185
186 #define PIXEL_AVG_W(width,cpu)\
187 void x264_pixel_avg2_w##width##_##cpu( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t );
188 /* This declares some functions that don't exist, but that isn't a problem. */
189 #define PIXEL_AVG_WALL(cpu)\
190 PIXEL_AVG_W(4,cpu); PIXEL_AVG_W(8,cpu); PIXEL_AVG_W(10,cpu); PIXEL_AVG_W(12,cpu); PIXEL_AVG_W(16,cpu); PIXEL_AVG_W(18,cpu); PIXEL_AVG_W(20,cpu);
191
192 PIXEL_AVG_WALL(mmx2)
193 PIXEL_AVG_WALL(cache32_mmx2)
194 PIXEL_AVG_WALL(cache64_mmx2)
195 PIXEL_AVG_WALL(cache64_sse2)
196 PIXEL_AVG_WALL(sse2)
197 PIXEL_AVG_WALL(cache64_ssse3)
198 PIXEL_AVG_WALL(avx2)
199
200 #define PIXEL_AVG_WTAB(instr, name1, name2, name3, name4, name5)\
201 static void (* const x264_pixel_avg_wtab_##instr[6])( pixel *, intptr_t, pixel *, intptr_t, pixel *, intptr_t ) =\
202 {\
203     NULL,\
204     x264_pixel_avg2_w4_##name1,\
205     x264_pixel_avg2_w8_##name2,\
206     x264_pixel_avg2_w12_##name3,\
207     x264_pixel_avg2_w16_##name4,\
208     x264_pixel_avg2_w20_##name5,\
209 };
210
211 #if HIGH_BIT_DEPTH
212 /* we can replace w12/w20 with w10/w18 as only 9/17 pixels in fact are important */
213 #define x264_pixel_avg2_w12_mmx2       x264_pixel_avg2_w10_mmx2
214 #define x264_pixel_avg2_w20_mmx2       x264_pixel_avg2_w18_mmx2
215 #define x264_pixel_avg2_w12_sse2         x264_pixel_avg2_w10_sse2
216 #define x264_pixel_avg2_w20_sse2         x264_pixel_avg2_w18_sse2
217 #define x264_pixel_avg2_w12_avx2         x264_pixel_avg2_w16_avx2
218 #define x264_pixel_avg2_w20_avx2         x264_pixel_avg2_w18_avx2
219 #else
220 /* w16 sse2 is faster than w12 mmx as long as the cacheline issue is resolved */
221 #define x264_pixel_avg2_w12_cache64_ssse3 x264_pixel_avg2_w16_cache64_ssse3
222 #define x264_pixel_avg2_w12_cache64_sse2 x264_pixel_avg2_w16_cache64_sse2
223 #define x264_pixel_avg2_w12_sse3         x264_pixel_avg2_w16_sse3
224 #define x264_pixel_avg2_w12_sse2         x264_pixel_avg2_w16_sse2
225 #endif // HIGH_BIT_DEPTH
226
227 PIXEL_AVG_WTAB(mmx2, mmx2, mmx2, mmx2, mmx2, mmx2)
228 #if HIGH_BIT_DEPTH
229 PIXEL_AVG_WTAB(sse2, mmx2, sse2, sse2, sse2, sse2)
230 PIXEL_AVG_WTAB(avx2, mmx2, sse2, avx2, avx2, avx2)
231 #else // !HIGH_BIT_DEPTH
232 #if ARCH_X86
233 PIXEL_AVG_WTAB(cache32_mmx2, mmx2, cache32_mmx2, cache32_mmx2, cache32_mmx2, cache32_mmx2)
234 PIXEL_AVG_WTAB(cache64_mmx2, mmx2, cache64_mmx2, cache64_mmx2, cache64_mmx2, cache64_mmx2)
235 #endif
236 PIXEL_AVG_WTAB(sse2, mmx2, mmx2, sse2, sse2, sse2)
237 PIXEL_AVG_WTAB(cache64_sse2, mmx2, cache64_mmx2, cache64_sse2, cache64_sse2, cache64_sse2)
238 PIXEL_AVG_WTAB(cache64_ssse3, mmx2, cache64_mmx2, cache64_ssse3, cache64_ssse3, cache64_sse2)
239 PIXEL_AVG_WTAB(cache64_ssse3_atom, mmx2, mmx2, cache64_ssse3, cache64_ssse3, sse2)
240 PIXEL_AVG_WTAB(avx2, mmx2, mmx2, sse2, sse2, avx2)
241 #endif // HIGH_BIT_DEPTH
242
243 #define MC_COPY_WTAB(instr, name1, name2, name3)\
244 static void (* const x264_mc_copy_wtab_##instr[5])( pixel *, intptr_t, pixel *, intptr_t, int ) =\
245 {\
246     NULL,\
247     x264_mc_copy_w4_##name1,\
248     x264_mc_copy_w8_##name2,\
249     NULL,\
250     x264_mc_copy_w16_##name3,\
251 };
252
253 MC_COPY_WTAB(mmx,mmx,mmx,mmx)
254 #if HIGH_BIT_DEPTH
255 MC_COPY_WTAB(sse,mmx,sse,sse)
256 MC_COPY_WTAB(avx,mmx,sse,avx)
257 #else
258 MC_COPY_WTAB(sse,mmx,mmx,sse)
259 #endif
260
261 #define MC_WEIGHT_WTAB(function, instr, name1, name2, w12version)\
262     static void (* x264_mc_##function##_wtab_##instr[6])( pixel *, intptr_t, pixel *, intptr_t, const x264_weight_t *, int ) =\
263 {\
264     x264_mc_##function##_w4_##name1,\
265     x264_mc_##function##_w4_##name1,\
266     x264_mc_##function##_w8_##name2,\
267     x264_mc_##function##_w##w12version##_##instr,\
268     x264_mc_##function##_w16_##instr,\
269     x264_mc_##function##_w20_##instr,\
270 };
271
272 #if HIGH_BIT_DEPTH
273 MC_WEIGHT_WTAB(weight,mmx2,mmx2,mmx2,12)
274 MC_WEIGHT_WTAB(offsetadd,mmx2,mmx2,mmx2,12)
275 MC_WEIGHT_WTAB(offsetsub,mmx2,mmx2,mmx2,12)
276 MC_WEIGHT_WTAB(weight,sse2,mmx2,sse2,12)
277 MC_WEIGHT_WTAB(offsetadd,sse2,mmx2,sse2,16)
278 MC_WEIGHT_WTAB(offsetsub,sse2,mmx2,sse2,16)
279
280 static void x264_weight_cache_mmx2( x264_t *h, x264_weight_t *w )
281 {
282     if( w->i_scale == 1<<w->i_denom )
283     {
284         if( w->i_offset < 0 )
285             w->weightfn = h->mc.offsetsub;
286         else
287             w->weightfn = h->mc.offsetadd;
288         for( int i = 0; i < 8; i++ )
289             w->cachea[i] = abs(w->i_offset<<(BIT_DEPTH-8));
290         return;
291     }
292     w->weightfn = h->mc.weight;
293     int den1 = 1<<w->i_denom;
294     int den2 = w->i_scale<<1;
295     int den3 = 1+(w->i_offset<<(BIT_DEPTH-8+1));
296     for( int i = 0; i < 8; i++ )
297     {
298         w->cachea[i] = den1;
299         w->cacheb[i] = i&1 ? den3 : den2;
300     }
301 }
302 #else
303 MC_WEIGHT_WTAB(weight,mmx2,mmx2,mmx2,12)
304 MC_WEIGHT_WTAB(offsetadd,mmx2,mmx2,mmx2,12)
305 MC_WEIGHT_WTAB(offsetsub,mmx2,mmx2,mmx2,12)
306 MC_WEIGHT_WTAB(weight,sse2,mmx2,sse2,16)
307 MC_WEIGHT_WTAB(offsetadd,sse2,mmx2,mmx2,16)
308 MC_WEIGHT_WTAB(offsetsub,sse2,mmx2,mmx2,16)
309 MC_WEIGHT_WTAB(weight,ssse3,ssse3,ssse3,16)
310 MC_WEIGHT_WTAB(weight,avx2,ssse3,avx2,16)
311
312 static void x264_weight_cache_mmx2( x264_t *h, x264_weight_t *w )
313 {
314     int i;
315     int16_t den1;
316
317     if( w->i_scale == 1<<w->i_denom )
318     {
319         if( w->i_offset < 0 )
320             w->weightfn = h->mc.offsetsub;
321         else
322             w->weightfn = h->mc.offsetadd;
323         memset( w->cachea, abs(w->i_offset), sizeof(w->cachea) );
324         return;
325     }
326     w->weightfn = h->mc.weight;
327     den1 = 1 << (w->i_denom - 1) | w->i_offset << w->i_denom;
328     for( i = 0; i < 8; i++ )
329     {
330         w->cachea[i] = w->i_scale;
331         w->cacheb[i] = den1;
332     }
333 }
334
335 static void x264_weight_cache_ssse3( x264_t *h, x264_weight_t *w )
336 {
337     int i, den1;
338     if( w->i_scale == 1<<w->i_denom )
339     {
340         if( w->i_offset < 0 )
341             w->weightfn = h->mc.offsetsub;
342         else
343             w->weightfn = h->mc.offsetadd;
344
345         memset( w->cachea, abs( w->i_offset ), sizeof(w->cachea) );
346         return;
347     }
348     w->weightfn = h->mc.weight;
349     den1 = w->i_scale << (8 - w->i_denom);
350     for( i = 0; i < 8; i++ )
351     {
352         w->cachea[i] = den1;
353         w->cacheb[i] = w->i_offset;
354     }
355 }
356 #endif // !HIGH_BIT_DEPTH
357
358 static const uint8_t hpel_ref0[16] = {0,1,1,1,0,1,1,1,2,3,3,3,0,1,1,1};
359 static const uint8_t hpel_ref1[16] = {0,0,0,0,2,2,3,2,2,2,3,2,2,2,3,2};
360
361 #define MC_LUMA(name,instr1,instr2)\
362 static void mc_luma_##name( pixel *dst,    intptr_t i_dst_stride,\
363                             pixel *src[4], intptr_t i_src_stride,\
364                             int mvx, int mvy,\
365                             int i_width, int i_height, const x264_weight_t *weight )\
366 {\
367     int qpel_idx = ((mvy&3)<<2) + (mvx&3);\
368     int offset = (mvy>>2)*i_src_stride + (mvx>>2);\
369     pixel *src1 = src[hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;\
370     if( qpel_idx & 5 ) /* qpel interpolation needed */\
371     {\
372         pixel *src2 = src[hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);\
373         x264_pixel_avg_wtab_##instr1[i_width>>2](\
374                 dst, i_dst_stride, src1, i_src_stride,\
375                 src2, i_height );\
376         if( weight->weightfn )\
377             weight->weightfn[i_width>>2]( dst, i_dst_stride, dst, i_dst_stride, weight, i_height );\
378     }\
379     else if( weight->weightfn )\
380         weight->weightfn[i_width>>2]( dst, i_dst_stride, src1, i_src_stride, weight, i_height );\
381     else\
382         x264_mc_copy_wtab_##instr2[i_width>>2](dst, i_dst_stride, src1, i_src_stride, i_height );\
383 }
384
385 MC_LUMA(mmx2,mmx2,mmx)
386 MC_LUMA(sse2,sse2,sse)
387 #if HIGH_BIT_DEPTH
388 MC_LUMA(avx2,avx2,avx)
389 #else
390 #if ARCH_X86
391 MC_LUMA(cache32_mmx2,cache32_mmx2,mmx)
392 MC_LUMA(cache64_mmx2,cache64_mmx2,mmx)
393 #endif
394 MC_LUMA(cache64_sse2,cache64_sse2,sse)
395 MC_LUMA(cache64_ssse3,cache64_ssse3,sse)
396 MC_LUMA(cache64_ssse3_atom,cache64_ssse3_atom,sse)
397 #endif // !HIGH_BIT_DEPTH
398
399 #define GET_REF(name)\
400 static pixel *get_ref_##name( pixel *dst,   intptr_t *i_dst_stride,\
401                               pixel *src[4], intptr_t i_src_stride,\
402                               int mvx, int mvy,\
403                               int i_width, int i_height, const x264_weight_t *weight )\
404 {\
405     int qpel_idx = ((mvy&3)<<2) + (mvx&3);\
406     int offset = (mvy>>2)*i_src_stride + (mvx>>2);\
407     pixel *src1 = src[hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;\
408     if( qpel_idx & 5 ) /* qpel interpolation needed */\
409     {\
410         pixel *src2 = src[hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);\
411         x264_pixel_avg_wtab_##name[i_width>>2](\
412                 dst, *i_dst_stride, src1, i_src_stride,\
413                 src2, i_height );\
414         if( weight->weightfn )\
415             weight->weightfn[i_width>>2]( dst, *i_dst_stride, dst, *i_dst_stride, weight, i_height );\
416         return dst;\
417     }\
418     else if( weight->weightfn )\
419     {\
420         weight->weightfn[i_width>>2]( dst, *i_dst_stride, src1, i_src_stride, weight, i_height );\
421         return dst;\
422     }\
423     else\
424     {\
425         *i_dst_stride = i_src_stride;\
426         return src1;\
427     }\
428 }
429
430 GET_REF(mmx2)
431 GET_REF(sse2)
432 GET_REF(avx2)
433 #if !HIGH_BIT_DEPTH
434 #if ARCH_X86
435 GET_REF(cache32_mmx2)
436 GET_REF(cache64_mmx2)
437 #endif
438 GET_REF(cache64_sse2)
439 GET_REF(cache64_ssse3)
440 GET_REF(cache64_ssse3_atom)
441 #endif // !HIGH_BIT_DEPTH
442
443 #define HPEL(align, cpu, cpuv, cpuc, cpuh)\
444 void x264_hpel_filter_v_##cpuv( pixel *dst, pixel *src, int16_t *buf, intptr_t stride, intptr_t width);\
445 void x264_hpel_filter_c_##cpuc( pixel *dst, int16_t *buf, intptr_t width );\
446 void x264_hpel_filter_h_##cpuh( pixel *dst, pixel *src, intptr_t width );\
447 static void x264_hpel_filter_##cpu( pixel *dsth, pixel *dstv, pixel *dstc, pixel *src,\
448                                     intptr_t stride, int width, int height, int16_t *buf )\
449 {\
450     intptr_t realign = (intptr_t)src & (align-1);\
451     src -= realign;\
452     dstv -= realign;\
453     dstc -= realign;\
454     dsth -= realign;\
455     width += realign;\
456     while( height-- )\
457     {\
458         x264_hpel_filter_v_##cpuv( dstv, src, buf+16, stride, width );\
459         x264_hpel_filter_c_##cpuc( dstc, buf+16, width );\
460         x264_hpel_filter_h_##cpuh( dsth, src, width );\
461         dsth += stride;\
462         dstv += stride;\
463         dstc += stride;\
464         src  += stride;\
465     }\
466     x264_sfence();\
467 }
468
469 HPEL(8, mmx2, mmx2, mmx2, mmx2)
470 #if HIGH_BIT_DEPTH
471 HPEL(16, sse2, sse2, sse2, sse2)
472 #else // !HIGH_BIT_DEPTH
473 HPEL(16, sse2_amd, mmx2, mmx2, sse2)
474 #if ARCH_X86_64
475 void x264_hpel_filter_sse2 ( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src, intptr_t stride, int width, int height, int16_t *buf );
476 void x264_hpel_filter_ssse3( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src, intptr_t stride, int width, int height, int16_t *buf );
477 void x264_hpel_filter_avx  ( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src, intptr_t stride, int width, int height, int16_t *buf );
478 void x264_hpel_filter_avx2 ( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src, intptr_t stride, int width, int height, int16_t *buf );
479 #else
480 HPEL(16, sse2, sse2, sse2, sse2)
481 HPEL(16, ssse3, ssse3, ssse3, ssse3)
482 HPEL(16, avx, avx, avx, avx)
483 HPEL(32, avx2, avx2, avx2, avx2)
484 #endif
485 #endif // HIGH_BIT_DEPTH
486
487 static void x264_plane_copy_mmx2( pixel *dst, intptr_t i_dst, pixel *src, intptr_t i_src, int w, int h )
488 {
489     int c_w = 16/sizeof(pixel) - 1;
490     if( w < 256 ) { // tiny resolutions don't want non-temporal hints. dunno the exact threshold.
491         x264_plane_copy_c( dst, i_dst, src, i_src, w, h );
492     } else if( !(w&c_w) ) {
493         x264_plane_copy_core_mmx2( dst, i_dst, src, i_src, w, h );
494     } else if( i_src > 0 ) {
495         // have to use plain memcpy on the last line (in memory order) to avoid overreading src
496         x264_plane_copy_core_mmx2( dst, i_dst, src, i_src, (w+c_w)&~c_w, h-1 );
497         memcpy( dst+i_dst*(h-1), src+i_src*(h-1), w*sizeof(pixel) );
498     } else {
499         memcpy( dst, src, w*sizeof(pixel) );
500         x264_plane_copy_core_mmx2( dst+i_dst, i_dst, src+i_src, i_src, (w+c_w)&~c_w, h-1 );
501     }
502 }
503
504 #define PLANE_INTERLEAVE(cpu) \
505 static void x264_plane_copy_interleave_##cpu( pixel *dst,  intptr_t i_dst,\
506                                               pixel *srcu, intptr_t i_srcu,\
507                                               pixel *srcv, intptr_t i_srcv, int w, int h )\
508 {\
509     if( !(w&15) ) {\
510         x264_plane_copy_interleave_core_##cpu( dst, i_dst, srcu, i_srcu, srcv, i_srcv, w, h );\
511     } else if( w < 16 || (i_srcu ^ i_srcv) ) {\
512         x264_plane_copy_interleave_c( dst, i_dst, srcu, i_srcu, srcv, i_srcv, w, h );\
513     } else if( i_srcu > 0 ) {\
514         x264_plane_copy_interleave_core_##cpu( dst, i_dst, srcu, i_srcu, srcv, i_srcv, (w+15)&~15, h-1 );\
515         x264_plane_copy_interleave_c( dst+i_dst*(h-1), 0, srcu+i_srcu*(h-1), 0, srcv+i_srcv*(h-1), 0, w, 1 );\
516     } else {\
517         x264_plane_copy_interleave_c( dst, 0, srcu, 0, srcv, 0, w, 1 );\
518         x264_plane_copy_interleave_core_##cpu( dst+i_dst, i_dst, srcu+i_srcu, i_srcu, srcv+i_srcv, i_srcv, (w+15)&~15, h-1 );\
519     }\
520 }
521
522 PLANE_INTERLEAVE(mmx2)
523 PLANE_INTERLEAVE(sse2)
524 #if HIGH_BIT_DEPTH
525 PLANE_INTERLEAVE(avx)
526 #endif
527
528 void x264_mc_init_mmx( int cpu, x264_mc_functions_t *pf )
529 {
530     if( !(cpu&X264_CPU_MMX) )
531         return;
532
533     pf->load_deinterleave_chroma_fenc = x264_load_deinterleave_chroma_fenc_mmx;
534     pf->load_deinterleave_chroma_fdec = x264_load_deinterleave_chroma_fdec_mmx;
535
536     pf->plane_copy_deinterleave = x264_plane_copy_deinterleave_mmx;
537
538     pf->copy_16x16_unaligned = x264_mc_copy_w16_mmx;
539     pf->copy[PIXEL_16x16] = x264_mc_copy_w16_mmx;
540     pf->copy[PIXEL_8x8]   = x264_mc_copy_w8_mmx;
541     pf->copy[PIXEL_4x4]   = x264_mc_copy_w4_mmx;
542     pf->memcpy_aligned  = x264_memcpy_aligned_mmx;
543     pf->memzero_aligned = x264_memzero_aligned_mmx;
544     pf->integral_init4v = x264_integral_init4v_mmx;
545     pf->integral_init8v = x264_integral_init8v_mmx;
546
547     if( !(cpu&X264_CPU_MMX2) )
548         return;
549
550     pf->prefetch_fenc_420 = x264_prefetch_fenc_420_mmx2;
551     pf->prefetch_fenc_422 = x264_prefetch_fenc_422_mmx2;
552     pf->prefetch_ref  = x264_prefetch_ref_mmx2;
553
554     pf->plane_copy = x264_plane_copy_mmx2;
555     pf->plane_copy_interleave = x264_plane_copy_interleave_mmx2;
556     pf->store_interleave_chroma = x264_store_interleave_chroma_mmx2;
557
558     pf->avg[PIXEL_16x16] = x264_pixel_avg_16x16_mmx2;
559     pf->avg[PIXEL_16x8]  = x264_pixel_avg_16x8_mmx2;
560     pf->avg[PIXEL_8x16]  = x264_pixel_avg_8x16_mmx2;
561     pf->avg[PIXEL_8x8]   = x264_pixel_avg_8x8_mmx2;
562     pf->avg[PIXEL_8x4]   = x264_pixel_avg_8x4_mmx2;
563     pf->avg[PIXEL_4x16]  = x264_pixel_avg_4x16_mmx2;
564     pf->avg[PIXEL_4x8]   = x264_pixel_avg_4x8_mmx2;
565     pf->avg[PIXEL_4x4]   = x264_pixel_avg_4x4_mmx2;
566     pf->avg[PIXEL_4x2]   = x264_pixel_avg_4x2_mmx2;
567
568     pf->mc_luma = mc_luma_mmx2;
569     pf->get_ref = get_ref_mmx2;
570     pf->mc_chroma = x264_mc_chroma_mmx2;
571     pf->hpel_filter = x264_hpel_filter_mmx2;
572     pf->weight = x264_mc_weight_wtab_mmx2;
573     pf->weight_cache = x264_weight_cache_mmx2;
574     pf->offsetadd = x264_mc_offsetadd_wtab_mmx2;
575     pf->offsetsub = x264_mc_offsetsub_wtab_mmx2;
576
577     pf->frame_init_lowres_core = x264_frame_init_lowres_core_mmx2;
578
579     if( cpu&X264_CPU_SSE )
580     {
581         pf->memcpy_aligned  = x264_memcpy_aligned_sse;
582         pf->memzero_aligned = x264_memzero_aligned_sse;
583     }
584
585 #if HIGH_BIT_DEPTH
586 #if ARCH_X86 // all x86_64 cpus with cacheline split issues use sse2 instead
587     if( cpu&(X264_CPU_CACHELINE_32|X264_CPU_CACHELINE_64) )
588         pf->frame_init_lowres_core = x264_frame_init_lowres_core_cache32_mmx2;
589 #endif
590
591     if( !(cpu&X264_CPU_SSE2) )
592         return;
593
594     pf->frame_init_lowres_core = x264_frame_init_lowres_core_sse2;
595
596     pf->load_deinterleave_chroma_fenc = x264_load_deinterleave_chroma_fenc_sse2;
597     pf->load_deinterleave_chroma_fdec = x264_load_deinterleave_chroma_fdec_sse2;
598
599     pf->plane_copy_interleave   = x264_plane_copy_interleave_sse2;
600     pf->plane_copy_deinterleave = x264_plane_copy_deinterleave_sse2;
601
602     if( cpu&X264_CPU_SSE2_IS_FAST )
603     {
604         pf->get_ref = get_ref_sse2;
605         pf->mc_luma = mc_luma_sse2;
606         pf->hpel_filter = x264_hpel_filter_sse2;
607     }
608
609     pf->integral_init4v = x264_integral_init4v_sse2;
610     pf->integral_init8v = x264_integral_init8v_sse2;
611     pf->mbtree_propagate_cost = x264_mbtree_propagate_cost_sse2;
612     pf->store_interleave_chroma = x264_store_interleave_chroma_sse2;
613     pf->offsetadd = x264_mc_offsetadd_wtab_sse2;
614     pf->offsetsub = x264_mc_offsetsub_wtab_sse2;
615
616     if( cpu&X264_CPU_SSE2_IS_SLOW )
617         return;
618
619     pf->avg[PIXEL_16x16] = x264_pixel_avg_16x16_sse2;
620     pf->avg[PIXEL_16x8]  = x264_pixel_avg_16x8_sse2;
621     pf->avg[PIXEL_8x16]  = x264_pixel_avg_8x16_sse2;
622     pf->avg[PIXEL_8x8]   = x264_pixel_avg_8x8_sse2;
623     pf->avg[PIXEL_8x4]   = x264_pixel_avg_8x4_sse2;
624     pf->avg[PIXEL_4x16]  = x264_pixel_avg_4x16_sse2;
625     pf->avg[PIXEL_4x8]   = x264_pixel_avg_4x8_sse2;
626     pf->avg[PIXEL_4x4]   = x264_pixel_avg_4x4_sse2;
627     pf->avg[PIXEL_4x2]   = x264_pixel_avg_4x2_sse2;
628
629     pf->copy[PIXEL_16x16] = x264_mc_copy_w16_aligned_sse;
630     pf->weight = x264_mc_weight_wtab_sse2;
631
632     if( !(cpu&X264_CPU_STACK_MOD4) )
633         pf->mc_chroma = x264_mc_chroma_sse2;
634
635     if( !(cpu&X264_CPU_SSSE3) )
636         return;
637
638     pf->frame_init_lowres_core = x264_frame_init_lowres_core_ssse3;
639     pf->plane_copy_deinterleave_v210 = x264_plane_copy_deinterleave_v210_ssse3;
640
641     if( !(cpu&(X264_CPU_SLOW_SHUFFLE|X264_CPU_SLOW_ATOM|X264_CPU_SLOW_PALIGNR)) )
642         pf->integral_init4v = x264_integral_init4v_ssse3;
643
644     if( !(cpu&X264_CPU_AVX) )
645         return;
646
647     pf->frame_init_lowres_core = x264_frame_init_lowres_core_avx;
648     pf->load_deinterleave_chroma_fenc = x264_load_deinterleave_chroma_fenc_avx;
649     pf->load_deinterleave_chroma_fdec = x264_load_deinterleave_chroma_fdec_avx;
650     pf->plane_copy_interleave        = x264_plane_copy_interleave_avx;
651     pf->plane_copy_deinterleave      = x264_plane_copy_deinterleave_avx;
652     pf->plane_copy_deinterleave_v210 = x264_plane_copy_deinterleave_v210_avx;
653     pf->store_interleave_chroma      = x264_store_interleave_chroma_avx;
654     pf->copy[PIXEL_16x16]            = x264_mc_copy_w16_aligned_avx;
655
656     if( !(cpu&X264_CPU_STACK_MOD4) )
657         pf->mc_chroma = x264_mc_chroma_avx;
658
659     if( cpu&X264_CPU_XOP )
660         pf->frame_init_lowres_core = x264_frame_init_lowres_core_xop;
661
662     if( cpu&X264_CPU_AVX2 )
663     {
664         pf->mc_luma = mc_luma_avx2;
665         pf->plane_copy_deinterleave_v210 = x264_plane_copy_deinterleave_v210_avx2;
666     }
667 #else // !HIGH_BIT_DEPTH
668
669 #if ARCH_X86 // all x86_64 cpus with cacheline split issues use sse2 instead
670     if( cpu&X264_CPU_CACHELINE_32 )
671     {
672         pf->mc_luma = mc_luma_cache32_mmx2;
673         pf->get_ref = get_ref_cache32_mmx2;
674         pf->frame_init_lowres_core = x264_frame_init_lowres_core_cache32_mmx2;
675     }
676     else if( cpu&X264_CPU_CACHELINE_64 )
677     {
678         pf->mc_luma = mc_luma_cache64_mmx2;
679         pf->get_ref = get_ref_cache64_mmx2;
680         pf->frame_init_lowres_core = x264_frame_init_lowres_core_cache32_mmx2;
681     }
682 #endif
683
684     if( !(cpu&X264_CPU_SSE2) )
685         return;
686
687     pf->integral_init4v = x264_integral_init4v_sse2;
688     pf->integral_init8v = x264_integral_init8v_sse2;
689     pf->hpel_filter = x264_hpel_filter_sse2_amd;
690     pf->mbtree_propagate_cost = x264_mbtree_propagate_cost_sse2;
691
692     if( !(cpu&X264_CPU_SSE2_IS_SLOW) )
693     {
694         pf->weight = x264_mc_weight_wtab_sse2;
695         if( !(cpu&X264_CPU_SLOW_ATOM) )
696         {
697             pf->offsetadd = x264_mc_offsetadd_wtab_sse2;
698             pf->offsetsub = x264_mc_offsetsub_wtab_sse2;
699         }
700
701         pf->copy[PIXEL_16x16] = x264_mc_copy_w16_aligned_sse;
702         pf->avg[PIXEL_16x16] = x264_pixel_avg_16x16_sse2;
703         pf->avg[PIXEL_16x8]  = x264_pixel_avg_16x8_sse2;
704         pf->avg[PIXEL_8x16] = x264_pixel_avg_8x16_sse2;
705         pf->avg[PIXEL_8x8]  = x264_pixel_avg_8x8_sse2;
706         pf->avg[PIXEL_8x4]  = x264_pixel_avg_8x4_sse2;
707         pf->hpel_filter = x264_hpel_filter_sse2;
708         pf->frame_init_lowres_core = x264_frame_init_lowres_core_sse2;
709         if( !(cpu&X264_CPU_STACK_MOD4) )
710             pf->mc_chroma = x264_mc_chroma_sse2;
711
712         if( cpu&X264_CPU_SSE2_IS_FAST )
713         {
714             pf->store_interleave_chroma = x264_store_interleave_chroma_sse2; // FIXME sse2fast? sse2medium?
715             pf->load_deinterleave_chroma_fenc = x264_load_deinterleave_chroma_fenc_sse2;
716             pf->load_deinterleave_chroma_fdec = x264_load_deinterleave_chroma_fdec_sse2;
717             pf->plane_copy_interleave   = x264_plane_copy_interleave_sse2;
718             pf->plane_copy_deinterleave = x264_plane_copy_deinterleave_sse2;
719             pf->mc_luma = mc_luma_sse2;
720             pf->get_ref = get_ref_sse2;
721             if( cpu&X264_CPU_CACHELINE_64 )
722             {
723                 pf->mc_luma = mc_luma_cache64_sse2;
724                 pf->get_ref = get_ref_cache64_sse2;
725             }
726         }
727     }
728
729     if( !(cpu&X264_CPU_SSSE3) )
730         return;
731
732     pf->avg[PIXEL_16x16] = x264_pixel_avg_16x16_ssse3;
733     pf->avg[PIXEL_16x8]  = x264_pixel_avg_16x8_ssse3;
734     pf->avg[PIXEL_8x16]  = x264_pixel_avg_8x16_ssse3;
735     pf->avg[PIXEL_8x8]   = x264_pixel_avg_8x8_ssse3;
736     pf->avg[PIXEL_8x4]   = x264_pixel_avg_8x4_ssse3;
737     pf->avg[PIXEL_4x16]  = x264_pixel_avg_4x16_ssse3;
738     pf->avg[PIXEL_4x8]   = x264_pixel_avg_4x8_ssse3;
739     pf->avg[PIXEL_4x4]   = x264_pixel_avg_4x4_ssse3;
740     pf->avg[PIXEL_4x2]   = x264_pixel_avg_4x2_ssse3;
741
742     if( !(cpu&X264_CPU_SLOW_PSHUFB) )
743     {
744         pf->load_deinterleave_chroma_fenc = x264_load_deinterleave_chroma_fenc_ssse3;
745         pf->load_deinterleave_chroma_fdec = x264_load_deinterleave_chroma_fdec_ssse3;
746         pf->plane_copy_deinterleave = x264_plane_copy_deinterleave_ssse3;
747     }
748
749     if( !(cpu&X264_CPU_SLOW_PALIGNR) )
750     {
751 #if ARCH_X86_64
752         if( !(cpu&X264_CPU_SLOW_ATOM) ) /* The 64-bit version is slower, but the 32-bit version is faster? */
753 #endif
754             pf->hpel_filter = x264_hpel_filter_ssse3;
755         pf->frame_init_lowres_core = x264_frame_init_lowres_core_ssse3;
756     }
757     if( !(cpu&X264_CPU_STACK_MOD4) )
758         pf->mc_chroma = x264_mc_chroma_ssse3;
759
760     if( cpu&X264_CPU_CACHELINE_64 )
761     {
762         if( !(cpu&X264_CPU_STACK_MOD4) )
763             pf->mc_chroma = x264_mc_chroma_ssse3_cache64;
764         pf->mc_luma = mc_luma_cache64_ssse3;
765         pf->get_ref = get_ref_cache64_ssse3;
766         if( cpu&X264_CPU_SLOW_ATOM )
767         {
768             pf->mc_luma = mc_luma_cache64_ssse3_atom;
769             pf->get_ref = get_ref_cache64_ssse3_atom;
770         }
771     }
772
773     pf->weight_cache = x264_weight_cache_ssse3;
774     pf->weight = x264_mc_weight_wtab_ssse3;
775
776     if( !(cpu&(X264_CPU_SLOW_SHUFFLE|X264_CPU_SLOW_ATOM|X264_CPU_SLOW_PALIGNR)) )
777         pf->integral_init4v = x264_integral_init4v_ssse3;
778
779     if( !(cpu&X264_CPU_SSE4) )
780         return;
781
782     pf->integral_init4h = x264_integral_init4h_sse4;
783     pf->integral_init8h = x264_integral_init8h_sse4;
784
785     if( !(cpu&X264_CPU_AVX) )
786         return;
787
788     pf->frame_init_lowres_core = x264_frame_init_lowres_core_avx;
789     pf->integral_init8h = x264_integral_init8h_avx;
790     pf->hpel_filter = x264_hpel_filter_avx;
791
792     if( !(cpu&X264_CPU_STACK_MOD4) )
793         pf->mc_chroma = x264_mc_chroma_avx;
794
795     if( cpu&X264_CPU_XOP )
796         pf->frame_init_lowres_core = x264_frame_init_lowres_core_xop;
797
798     if( cpu&X264_CPU_AVX2 )
799     {
800         pf->hpel_filter = x264_hpel_filter_avx2;
801         pf->mc_chroma = x264_mc_chroma_avx2;
802         pf->weight = x264_mc_weight_wtab_avx2;
803         pf->avg[PIXEL_16x16] = x264_pixel_avg_16x16_avx2;
804         pf->avg[PIXEL_16x8]  = x264_pixel_avg_16x8_avx2;
805         pf->integral_init8v = x264_integral_init8v_avx2;
806         pf->integral_init4v = x264_integral_init4v_avx2;
807         pf->integral_init8h = x264_integral_init8h_avx2;
808         pf->integral_init4h = x264_integral_init4h_avx2;
809         pf->frame_init_lowres_core = x264_frame_init_lowres_core_avx2;
810     }
811 #endif // HIGH_BIT_DEPTH
812
813     if( !(cpu&X264_CPU_AVX) )
814         return;
815     pf->memzero_aligned = x264_memzero_aligned_avx;
816     pf->mbtree_propagate_cost = x264_mbtree_propagate_cost_avx;
817
818     if( cpu&X264_CPU_FMA4 )
819         pf->mbtree_propagate_cost = x264_mbtree_propagate_cost_fma4;
820
821     if( !(cpu&X264_CPU_AVX2) )
822         return;
823     pf->get_ref = get_ref_avx2;
824
825     if( cpu&X264_CPU_FMA3 )
826         pf->mbtree_propagate_cost = x264_mbtree_propagate_cost_avx2_fma3;
827 }