1 /*****************************************************************************
2 * predict-c.c: intra prediction
3 *****************************************************************************
4 * Copyright (C) 2003-2011 x264 project
6 * Authors: Laurent Aimar <fenrir@via.ecp.fr>
7 * Loren Merritt <lorenm@u.washington.edu>
8 * Fiona Glaser <fiona@x264.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
24 * This program is also available under a commercial proprietary license.
25 * For more information, contact us at licensing@x264.com.
26 *****************************************************************************/
28 #include "common/common.h"
32 #define PREDICT_16x16_DC(name)\
33 void x264_predict_16x16_dc_##name( pixel *src )\
36 for( int i = 0; i < 16; i += 2 )\
38 dc += src[-1 + i * FDEC_STRIDE];\
39 dc += src[-1 + (i+1) * FDEC_STRIDE];\
41 x264_predict_16x16_dc_core_##name( src, dc );\
44 PREDICT_16x16_DC( mmx2 )
45 PREDICT_16x16_DC( sse2 )
47 #define PREDICT_16x16_DC_LEFT(name)\
48 static void x264_predict_16x16_dc_left_##name( pixel *src )\
51 for( int i = 0; i < 16; i += 2 )\
53 dc += src[-1 + i * FDEC_STRIDE];\
54 dc += src[-1 + (i+1) * FDEC_STRIDE];\
56 x264_predict_16x16_dc_left_core_##name( src, dc>>4 );\
59 PREDICT_16x16_DC_LEFT( mmx2 )
60 PREDICT_16x16_DC_LEFT( sse2 )
62 #define PREDICT_P_SUM(j,i)\
63 H += i * ( src[j+i - FDEC_STRIDE ] - src[j-i - FDEC_STRIDE ] );\
64 V += i * ( src[(j+i)*FDEC_STRIDE -1] - src[(j-i)*FDEC_STRIDE -1] );\
66 ALIGNED_16( static const int16_t pw_12345678[8] ) = {1,2,3,4,5,6,7,8};
67 ALIGNED_16( static const int16_t pw_m87654321[8] ) = {-8,-7,-6,-5,-4,-3,-2,-1};
68 ALIGNED_16( static const int16_t pw_m32101234[8] ) = {-3,-2,-1,0,1,2,3,4};
69 ALIGNED_8( static const int8_t pb_12345678[8] ) = {1,2,3,4,5,6,7,8};
70 ALIGNED_8( static const int8_t pb_m87654321[8] ) = {-8,-7,-6,-5,-4,-3,-2,-1};
71 ALIGNED_8( static const int8_t pb_m32101234[8] ) = {-3,-2,-1,0,1,2,3,4};
74 #define PREDICT_16x16_P(name)\
75 static void x264_predict_16x16_p_##name( pixel *src )\
89 a = 16 * ( src[15*FDEC_STRIDE -1] + src[15 - FDEC_STRIDE] );\
90 b = ( 5 * H + 32 ) >> 6;\
91 c = ( 5 * V + 32 ) >> 6;\
92 i00 = a - b * 7 - c * 7 + 16;\
93 x264_predict_16x16_p_core_##name( src, i00, b, c );\
96 PREDICT_16x16_P( mmx2 )
98 PREDICT_16x16_P( sse2 )
99 PREDICT_16x16_P( avx )
100 #endif //!HIGH_BIT_DEPTH
102 #if HAVE_X86_INLINE_ASM
104 static void x264_predict_16x16_p_sse2( uint16_t *src )
106 static void x264_predict_16x16_p_ssse3( uint8_t *src )
113 "movdqu %1, %%xmm1 \n"
114 "movdqa %2, %%xmm0 \n"
115 "pmaddwd %3, %%xmm0 \n"
116 "pmaddwd %4, %%xmm1 \n"
117 "paddd %%xmm1, %%xmm0 \n"
118 "movhlps %%xmm0, %%xmm1 \n"
119 "paddd %%xmm1, %%xmm0 \n"
120 "pshuflw $14, %%xmm0, %%xmm1 \n"
121 "paddd %%xmm1, %%xmm0 \n"
124 :"m"(src[-FDEC_STRIDE-1]), "m"(src[-FDEC_STRIDE+8]),
125 "m"(*pw_12345678), "m"(*pw_m87654321)
131 "palignr $7, %3, %%mm1 \n"
132 "pmaddubsw %4, %%mm0 \n"
133 "pmaddubsw %5, %%mm1 \n"
134 "paddw %%mm1, %%mm0 \n"
135 "pshufw $14, %%mm0, %%mm1 \n"
136 "paddw %%mm1, %%mm0 \n"
137 "pshufw $1, %%mm0, %%mm1 \n"
138 "paddw %%mm1, %%mm0 \n"
142 :"m"(src[-FDEC_STRIDE]), "m"(src[-FDEC_STRIDE+8]),
143 "m"(src[-FDEC_STRIDE-8]), "m"(*pb_12345678), "m"(*pb_m87654321)
146 V = 8 * ( src[15*FDEC_STRIDE-1] - src[-1*FDEC_STRIDE-1] )
147 + 7 * ( src[14*FDEC_STRIDE-1] - src[ 0*FDEC_STRIDE-1] )
148 + 6 * ( src[13*FDEC_STRIDE-1] - src[ 1*FDEC_STRIDE-1] )
149 + 5 * ( src[12*FDEC_STRIDE-1] - src[ 2*FDEC_STRIDE-1] )
150 + 4 * ( src[11*FDEC_STRIDE-1] - src[ 3*FDEC_STRIDE-1] )
151 + 3 * ( src[10*FDEC_STRIDE-1] - src[ 4*FDEC_STRIDE-1] )
152 + 2 * ( src[ 9*FDEC_STRIDE-1] - src[ 5*FDEC_STRIDE-1] )
153 + 1 * ( src[ 8*FDEC_STRIDE-1] - src[ 6*FDEC_STRIDE-1] );
154 a = 16 * ( src[15*FDEC_STRIDE -1] + src[15 - FDEC_STRIDE] );
155 b = ( 5 * H + 32 ) >> 6;
156 c = ( 5 * V + 32 ) >> 6;
157 i00 = a - b * 7 - c * 7 + 16;
158 /* b*15 + c*15 can overflow: it's easier to just branch away in this rare case
159 * than to try to consider it in the asm. */
160 if( BIT_DEPTH > 8 && (i00 > 0x7fff || abs(b) > 1092 || abs(c) > 1092) )
161 x264_predict_16x16_p_c( src );
163 x264_predict_16x16_p_core_sse2( src, i00, b, c );
169 #define PREDICT_8x8_P(name)\
170 static void x264_predict_8x8c_p_##name( uint8_t *src )\
180 a = 16 * ( src[7*FDEC_STRIDE -1] + src[7 - FDEC_STRIDE] );\
181 b = ( 17 * H + 16 ) >> 5;\
182 c = ( 17 * V + 16 ) >> 5;\
183 i00 = a -3*b -3*c + 16;\
184 x264_predict_8x8c_p_core_##name( src, i00, b, c );\
187 PREDICT_8x8_P( mmx2 )
189 PREDICT_8x8_P( sse2 )
191 #endif //!HIGH_BIT_DEPTH
193 #if HAVE_X86_INLINE_ASM
195 #define PREDICT_8x8C_P_CORE\
196 V = 1 * ( src[4*FDEC_STRIDE -1] - src[ 2*FDEC_STRIDE -1] )\
197 + 2 * ( src[5*FDEC_STRIDE -1] - src[ 1*FDEC_STRIDE -1] )\
198 + 3 * ( src[6*FDEC_STRIDE -1] - src[ 0*FDEC_STRIDE -1] )\
199 + 4 * ( src[7*FDEC_STRIDE -1] - src[-1*FDEC_STRIDE -1] );\
200 H += -4 * src[-1*FDEC_STRIDE -1];\
201 int a = 16 * ( src[7*FDEC_STRIDE -1] + src[7 - FDEC_STRIDE] );\
202 int b = ( 17 * H + 16 ) >> 5;\
203 int c = ( 17 * V + 16 ) >> 5;
206 #define PREDICT_8x8_P2(cpu1, cpu2)\
207 static void x264_predict_8x8c_p_ ## cpu1( pixel *src )\
211 "movdqa %1, %%xmm0 \n"\
212 "pmaddwd %2, %%xmm0 \n"\
213 "movhlps %%xmm0, %%xmm1 \n"\
214 "paddd %%xmm1, %%xmm0 \n"\
215 "pshuflw $14, %%xmm0, %%xmm1 \n"\
216 "paddd %%xmm1, %%xmm0 \n"\
217 "movd %%xmm0, %0 \n"\
219 :"m"(src[-FDEC_STRIDE]), "m"(*pw_m32101234)\
222 x264_predict_8x8c_p_core_ ## cpu2( src, a, b, c );\
225 PREDICT_8x8_P2(sse2, sse2)
226 PREDICT_8x8_P2( avx, avx)
228 #else //!HIGH_BIT_DEPTH
229 #define PREDICT_8x8_P2(cpu1, cpu2)\
230 static void x264_predict_8x8c_p_ ## cpu1( pixel *src )\
235 "pmaddubsw %2, %%mm0 \n"\
236 "pshufw $14, %%mm0, %%mm1 \n"\
237 "paddw %%mm1, %%mm0 \n"\
238 "pshufw $1, %%mm0, %%mm1 \n"\
239 "paddw %%mm1, %%mm0 \n"\
243 :"m"(src[-FDEC_STRIDE]), "m"(*pb_m32101234)\
246 int i00 = a -3*b -3*c + 16;\
247 x264_predict_8x8c_p_core_ ## cpu2( src, i00, b, c );\
250 PREDICT_8x8_P2(ssse3, sse2)
251 PREDICT_8x8_P2( avx, avx)
255 #if ARCH_X86_64 && !HIGH_BIT_DEPTH
256 static void x264_predict_8x8c_dc_left( uint8_t *src )
259 uint32_t s0 = 0, s1 = 0;
262 for( y = 0; y < 4; y++ )
264 s0 += src[y * FDEC_STRIDE - 1];
265 s1 += src[(y+4) * FDEC_STRIDE - 1];
267 dc0 = (( s0 + 2 ) >> 2) * 0x0101010101010101ULL;
268 dc1 = (( s1 + 2 ) >> 2) * 0x0101010101010101ULL;
270 for( y = 0; y < 4; y++ )
275 for( y = 0; y < 4; y++ )
282 #endif // ARCH_X86_64 && !HIGH_BIT_DEPTH
284 /****************************************************************************
285 * Exported functions:
286 ****************************************************************************/
287 void x264_predict_16x16_init_mmx( int cpu, x264_predict_t pf[7] )
289 if( !(cpu&X264_CPU_MMX2) )
291 pf[I_PRED_16x16_DC] = x264_predict_16x16_dc_mmx2;
292 pf[I_PRED_16x16_DC_TOP] = x264_predict_16x16_dc_top_mmx2;
293 pf[I_PRED_16x16_DC_LEFT] = x264_predict_16x16_dc_left_mmx2;
294 pf[I_PRED_16x16_V] = x264_predict_16x16_v_mmx2;
295 pf[I_PRED_16x16_H] = x264_predict_16x16_h_mmx2;
297 if( !(cpu&X264_CPU_SSE2) )
299 pf[I_PRED_16x16_DC] = x264_predict_16x16_dc_sse2;
300 pf[I_PRED_16x16_DC_TOP] = x264_predict_16x16_dc_top_sse2;
301 pf[I_PRED_16x16_DC_LEFT] = x264_predict_16x16_dc_left_sse2;
302 pf[I_PRED_16x16_V] = x264_predict_16x16_v_sse2;
303 pf[I_PRED_16x16_H] = x264_predict_16x16_h_sse2;
304 #if HAVE_X86_INLINE_ASM
305 pf[I_PRED_16x16_P] = x264_predict_16x16_p_sse2;
309 pf[I_PRED_16x16_P] = x264_predict_16x16_p_mmx2;
311 if( !(cpu&X264_CPU_SSE2) )
313 pf[I_PRED_16x16_DC] = x264_predict_16x16_dc_sse2;
314 pf[I_PRED_16x16_V] = x264_predict_16x16_v_sse2;
315 if( cpu&X264_CPU_SSE2_IS_SLOW )
317 pf[I_PRED_16x16_DC_TOP] = x264_predict_16x16_dc_top_sse2;
318 pf[I_PRED_16x16_DC_LEFT] = x264_predict_16x16_dc_left_sse2;
319 pf[I_PRED_16x16_P] = x264_predict_16x16_p_sse2;
320 if( !(cpu&X264_CPU_SSSE3) )
322 pf[I_PRED_16x16_H] = x264_predict_16x16_h_ssse3;
323 #if HAVE_X86_INLINE_ASM
324 pf[I_PRED_16x16_P] = x264_predict_16x16_p_ssse3;
326 if( !(cpu&X264_CPU_AVX) )
328 pf[I_PRED_16x16_P] = x264_predict_16x16_p_avx;
329 #endif // HIGH_BIT_DEPTH
332 void x264_predict_8x8c_init_mmx( int cpu, x264_predict_t pf[7] )
334 if( !(cpu&X264_CPU_MMX) )
337 pf[I_PRED_CHROMA_V] = x264_predict_8x8c_v_mmx;
338 if( !(cpu&X264_CPU_MMX2) )
340 pf[I_PRED_CHROMA_DC] = x264_predict_8x8c_dc_mmx2;
341 pf[I_PRED_CHROMA_H] = x264_predict_8x8c_h_mmx2;
342 if( !(cpu&X264_CPU_SSE2) )
344 pf[I_PRED_CHROMA_V] = x264_predict_8x8c_v_sse2;
345 pf[I_PRED_CHROMA_DC] = x264_predict_8x8c_dc_sse2;
346 pf[I_PRED_CHROMA_DC_TOP] = x264_predict_8x8c_dc_top_sse2;
347 pf[I_PRED_CHROMA_H] = x264_predict_8x8c_h_sse2;
348 #if HAVE_X86_INLINE_ASM
349 pf[I_PRED_CHROMA_P] = x264_predict_8x8c_p_sse2;
350 if( !(cpu&X264_CPU_AVX) )
352 pf[I_PRED_CHROMA_P] = x264_predict_8x8c_p_avx;
356 pf[I_PRED_CHROMA_DC_LEFT] = x264_predict_8x8c_dc_left;
358 pf[I_PRED_CHROMA_V] = x264_predict_8x8c_v_mmx;
359 if( !(cpu&X264_CPU_MMX2) )
361 pf[I_PRED_CHROMA_DC_TOP] = x264_predict_8x8c_dc_top_mmx2;
362 pf[I_PRED_CHROMA_H] = x264_predict_8x8c_h_mmx2;
364 pf[I_PRED_CHROMA_P] = x264_predict_8x8c_p_mmx2;
366 pf[I_PRED_CHROMA_DC] = x264_predict_8x8c_dc_mmx2;
367 if( !(cpu&X264_CPU_SSE2) )
369 pf[I_PRED_CHROMA_P] = x264_predict_8x8c_p_sse2;
370 if( !(cpu&X264_CPU_SSSE3) )
372 pf[I_PRED_CHROMA_H] = x264_predict_8x8c_h_ssse3;
373 #if HAVE_X86_INLINE_ASM
374 pf[I_PRED_CHROMA_P] = x264_predict_8x8c_p_ssse3;
375 if( !(cpu&X264_CPU_AVX) )
377 pf[I_PRED_CHROMA_P] = x264_predict_8x8c_p_avx;
379 #endif // HIGH_BIT_DEPTH
382 void x264_predict_8x16c_init_mmx( int cpu, x264_predict_t pf[7] )
384 if( !(cpu&X264_CPU_MMX) )
387 if( !(cpu&X264_CPU_MMX2) )
389 pf[I_PRED_CHROMA_DC] = x264_predict_8x16c_dc_mmx2;
390 pf[I_PRED_CHROMA_H] = x264_predict_8x16c_h_mmx2;
391 if( !(cpu&X264_CPU_SSE2) )
393 pf[I_PRED_CHROMA_V] = x264_predict_8x16c_v_sse2;
394 pf[I_PRED_CHROMA_DC_TOP] = x264_predict_8x16c_dc_top_sse2;
395 pf[I_PRED_CHROMA_DC] = x264_predict_8x16c_dc_sse2;
396 pf[I_PRED_CHROMA_H] = x264_predict_8x16c_h_sse2;
398 pf[I_PRED_CHROMA_V] = x264_predict_8x16c_v_mmx;
399 if( !(cpu&X264_CPU_MMX2) )
401 pf[I_PRED_CHROMA_DC_TOP] = x264_predict_8x16c_dc_top_mmx2;
402 pf[I_PRED_CHROMA_DC] = x264_predict_8x16c_dc_mmx2;
403 pf[I_PRED_CHROMA_H] = x264_predict_8x16c_h_mmx2;
404 if( !(cpu&X264_CPU_SSSE3) )
406 pf[I_PRED_CHROMA_H] = x264_predict_8x16c_h_ssse3;
407 #endif // HIGH_BIT_DEPTH
410 void x264_predict_8x8_init_mmx( int cpu, x264_predict8x8_t pf[12], x264_predict_8x8_filter_t *predict_8x8_filter )
412 if( !(cpu&X264_CPU_MMX2) )
415 if( !(cpu&X264_CPU_SSE2) )
417 pf[I_PRED_8x8_V] = x264_predict_8x8_v_sse2;
418 pf[I_PRED_8x8_H] = x264_predict_8x8_h_sse2;
419 pf[I_PRED_8x8_DC] = x264_predict_8x8_dc_sse2;
420 pf[I_PRED_8x8_DC_TOP] = x264_predict_8x8_dc_top_sse2;
421 pf[I_PRED_8x8_DC_LEFT]= x264_predict_8x8_dc_left_sse2;
422 pf[I_PRED_8x8_DDL] = x264_predict_8x8_ddl_sse2;
423 pf[I_PRED_8x8_DDR] = x264_predict_8x8_ddr_sse2;
424 pf[I_PRED_8x8_VL] = x264_predict_8x8_vl_sse2;
425 pf[I_PRED_8x8_VR] = x264_predict_8x8_vr_sse2;
426 pf[I_PRED_8x8_HD] = x264_predict_8x8_hd_sse2;
427 pf[I_PRED_8x8_HU] = x264_predict_8x8_hu_sse2;
428 *predict_8x8_filter = x264_predict_8x8_filter_sse2;
429 if( !(cpu&X264_CPU_SSSE3) )
431 pf[I_PRED_8x8_DDL] = x264_predict_8x8_ddl_ssse3;
432 pf[I_PRED_8x8_DDR] = x264_predict_8x8_ddr_ssse3;
433 pf[I_PRED_8x8_HD] = x264_predict_8x8_hd_ssse3;
434 pf[I_PRED_8x8_HU] = x264_predict_8x8_hu_ssse3;
435 pf[I_PRED_8x8_VL] = x264_predict_8x8_vl_ssse3;
436 pf[I_PRED_8x8_VR] = x264_predict_8x8_vr_ssse3;
437 *predict_8x8_filter = x264_predict_8x8_filter_ssse3;
438 if( cpu&X264_CPU_CACHELINE_64 )
440 pf[I_PRED_8x8_DDL]= x264_predict_8x8_ddl_ssse3_cache64;
441 pf[I_PRED_8x8_DDR]= x264_predict_8x8_ddr_ssse3_cache64;
443 if( !(cpu&X264_CPU_AVX) )
445 pf[I_PRED_8x8_HD] = x264_predict_8x8_hd_avx;
446 pf[I_PRED_8x8_HU] = x264_predict_8x8_hu_avx;
447 pf[I_PRED_8x8_VL] = x264_predict_8x8_vl_avx;
448 pf[I_PRED_8x8_VR] = x264_predict_8x8_vr_avx;
449 *predict_8x8_filter = x264_predict_8x8_filter_avx;
451 pf[I_PRED_8x8_V] = x264_predict_8x8_v_mmx2;
452 pf[I_PRED_8x8_H] = x264_predict_8x8_h_mmx2;
453 pf[I_PRED_8x8_DC] = x264_predict_8x8_dc_mmx2;
454 pf[I_PRED_8x8_DC_TOP] = x264_predict_8x8_dc_top_mmx2;
455 pf[I_PRED_8x8_DC_LEFT]= x264_predict_8x8_dc_left_mmx2;
456 pf[I_PRED_8x8_HD] = x264_predict_8x8_hd_mmx2;
457 pf[I_PRED_8x8_VL] = x264_predict_8x8_vl_mmx2;
458 *predict_8x8_filter = x264_predict_8x8_filter_mmx2;
460 pf[I_PRED_8x8_DDL] = x264_predict_8x8_ddl_mmx2;
461 pf[I_PRED_8x8_DDR] = x264_predict_8x8_ddr_mmx2;
462 pf[I_PRED_8x8_VR] = x264_predict_8x8_vr_mmx2;
463 pf[I_PRED_8x8_HU] = x264_predict_8x8_hu_mmx2;
465 if( !(cpu&X264_CPU_SSE2) )
467 pf[I_PRED_8x8_DDL] = x264_predict_8x8_ddl_sse2;
468 pf[I_PRED_8x8_VL] = x264_predict_8x8_vl_sse2;
469 pf[I_PRED_8x8_VR] = x264_predict_8x8_vr_sse2;
470 pf[I_PRED_8x8_DDR] = x264_predict_8x8_ddr_sse2;
471 pf[I_PRED_8x8_HD] = x264_predict_8x8_hd_sse2;
472 pf[I_PRED_8x8_HU] = x264_predict_8x8_hu_sse2;
473 if( !(cpu&X264_CPU_SSSE3) )
475 pf[I_PRED_8x8_DDL] = x264_predict_8x8_ddl_ssse3;
476 pf[I_PRED_8x8_VR] = x264_predict_8x8_vr_ssse3;
477 pf[I_PRED_8x8_HU] = x264_predict_8x8_hu_ssse3;
478 *predict_8x8_filter = x264_predict_8x8_filter_ssse3;
479 if( !(cpu&X264_CPU_AVX) )
481 pf[I_PRED_8x8_DDL] = x264_predict_8x8_ddl_avx;
482 pf[I_PRED_8x8_DDR] = x264_predict_8x8_ddr_avx;
483 pf[I_PRED_8x8_VL] = x264_predict_8x8_vl_avx;
484 pf[I_PRED_8x8_VR] = x264_predict_8x8_vr_avx;
485 pf[I_PRED_8x8_HD] = x264_predict_8x8_hd_avx;
486 #endif // HIGH_BIT_DEPTH
489 void x264_predict_4x4_init_mmx( int cpu, x264_predict_t pf[12] )
491 if( !(cpu&X264_CPU_MMX2) )
493 pf[I_PRED_4x4_DC] = x264_predict_4x4_dc_mmx2;
494 pf[I_PRED_4x4_DDL] = x264_predict_4x4_ddl_mmx2;
495 pf[I_PRED_4x4_DDR] = x264_predict_4x4_ddr_mmx2;
496 pf[I_PRED_4x4_VL] = x264_predict_4x4_vl_mmx2;
497 pf[I_PRED_4x4_HD] = x264_predict_4x4_hd_mmx2;
498 pf[I_PRED_4x4_HU] = x264_predict_4x4_hu_mmx2;
500 if( !(cpu&X264_CPU_SSE2) )
502 pf[I_PRED_4x4_DDL] = x264_predict_4x4_ddl_sse2;
503 pf[I_PRED_4x4_DDR] = x264_predict_4x4_ddr_sse2;
504 pf[I_PRED_4x4_HD] = x264_predict_4x4_hd_sse2;
505 pf[I_PRED_4x4_VL] = x264_predict_4x4_vl_sse2;
506 pf[I_PRED_4x4_VR] = x264_predict_4x4_vr_sse2;
507 if( !(cpu&X264_CPU_SSSE3) )
509 pf[I_PRED_4x4_DDR] = x264_predict_4x4_ddr_ssse3;
510 pf[I_PRED_4x4_VR] = x264_predict_4x4_vr_ssse3;
511 pf[I_PRED_4x4_HD] = x264_predict_4x4_hd_ssse3;
512 if( !(cpu&X264_CPU_AVX) )
514 pf[I_PRED_4x4_DDL] = x264_predict_4x4_ddl_avx;
515 pf[I_PRED_4x4_DDR] = x264_predict_4x4_ddr_avx;
516 pf[I_PRED_4x4_HD] = x264_predict_4x4_hd_avx;
517 pf[I_PRED_4x4_VL] = x264_predict_4x4_vl_avx;
518 pf[I_PRED_4x4_VR] = x264_predict_4x4_vr_avx;
520 pf[I_PRED_4x4_VR] = x264_predict_4x4_vr_mmx2;
521 if( !(cpu&X264_CPU_SSSE3) )
523 pf[I_PRED_4x4_DDR] = x264_predict_4x4_ddr_ssse3;
524 pf[I_PRED_4x4_VR] = x264_predict_4x4_vr_ssse3;
525 pf[I_PRED_4x4_HD] = x264_predict_4x4_hd_ssse3;
526 if( cpu&X264_CPU_CACHELINE_64 )
527 pf[I_PRED_4x4_VR] = x264_predict_4x4_vr_ssse3_cache64;
528 #endif // HIGH_BIT_DEPTH