1 /*****************************************************************************
2 * pixel.c: pixel metrics
3 *****************************************************************************
4 * Copyright (C) 2003-2011 x264 project
6 * Authors: Loren Merritt <lorenm@u.washington.edu>
7 * Laurent Aimar <fenrir@via.ecp.fr>
8 * Fiona Glaser <fiona@x264.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
24 * This program is also available under a commercial proprietary license.
25 * For more information, contact us at licensing@x264.com.
26 *****************************************************************************/
31 # include "x86/pixel.h"
32 # include "x86/predict.h"
35 # include "ppc/pixel.h"
38 # include "arm/pixel.h"
41 # include "sparc/pixel.h"
45 /****************************************************************************
47 ****************************************************************************/
48 #define PIXEL_SAD_C( name, lx, ly ) \
49 static int name( pixel *pix1, int i_stride_pix1, \
50 pixel *pix2, int i_stride_pix2 ) \
53 for( int y = 0; y < ly; y++ ) \
55 for( int x = 0; x < lx; x++ ) \
57 i_sum += abs( pix1[x] - pix2[x] ); \
59 pix1 += i_stride_pix1; \
60 pix2 += i_stride_pix2; \
66 PIXEL_SAD_C( x264_pixel_sad_16x16, 16, 16 )
67 PIXEL_SAD_C( x264_pixel_sad_16x8, 16, 8 )
68 PIXEL_SAD_C( x264_pixel_sad_8x16, 8, 16 )
69 PIXEL_SAD_C( x264_pixel_sad_8x8, 8, 8 )
70 PIXEL_SAD_C( x264_pixel_sad_8x4, 8, 4 )
71 PIXEL_SAD_C( x264_pixel_sad_4x16, 4, 16 )
72 PIXEL_SAD_C( x264_pixel_sad_4x8, 4, 8 )
73 PIXEL_SAD_C( x264_pixel_sad_4x4, 4, 4 )
75 /****************************************************************************
77 ****************************************************************************/
78 #define PIXEL_SSD_C( name, lx, ly ) \
79 static int name( pixel *pix1, int i_stride_pix1, \
80 pixel *pix2, int i_stride_pix2 ) \
83 for( int y = 0; y < ly; y++ ) \
85 for( int x = 0; x < lx; x++ ) \
87 int d = pix1[x] - pix2[x]; \
90 pix1 += i_stride_pix1; \
91 pix2 += i_stride_pix2; \
96 PIXEL_SSD_C( x264_pixel_ssd_16x16, 16, 16 )
97 PIXEL_SSD_C( x264_pixel_ssd_16x8, 16, 8 )
98 PIXEL_SSD_C( x264_pixel_ssd_8x16, 8, 16 )
99 PIXEL_SSD_C( x264_pixel_ssd_8x8, 8, 8 )
100 PIXEL_SSD_C( x264_pixel_ssd_8x4, 8, 4 )
101 PIXEL_SSD_C( x264_pixel_ssd_4x16, 4, 16 )
102 PIXEL_SSD_C( x264_pixel_ssd_4x8, 4, 8 )
103 PIXEL_SSD_C( x264_pixel_ssd_4x4, 4, 4 )
105 uint64_t x264_pixel_ssd_wxh( x264_pixel_function_t *pf, pixel *pix1, int i_pix1, pixel *pix2, int i_pix2, int i_width, int i_height )
109 int align = !(((intptr_t)pix1 | (intptr_t)pix2 | i_pix1 | i_pix2) & 15);
111 #define SSD(size) i_ssd += pf->ssd[size]( pix1 + y*i_pix1 + x, i_pix1, \
112 pix2 + y*i_pix2 + x, i_pix2 );
113 for( y = 0; y < i_height-15; y += 16 )
117 for( ; x < i_width-15; x += 16 )
119 for( ; x < i_width-7; x += 8 )
123 for( int x = 0; x < i_width-7; x += 8 )
127 #define SSD1 { int d = pix1[y*i_pix1+x] - pix2[y*i_pix2+x]; i_ssd += d*d; }
130 for( y = 0; y < (i_height & ~7); y++ )
131 for( int x = i_width & ~7; x < i_width; x++ )
136 for( y = i_height & ~7; y < i_height; y++ )
137 for( int x = 0; x < i_width; x++ )
145 static void pixel_ssd_nv12_core( pixel *pixuv1, int stride1, pixel *pixuv2, int stride2, int width, int height, uint64_t *ssd_u, uint64_t *ssd_v )
147 *ssd_u = 0, *ssd_v = 0;
148 for( int y = 0; y < height; y++, pixuv1+=stride1, pixuv2+=stride2 )
149 for( int x = 0; x < width; x++ )
151 int du = pixuv1[2*x] - pixuv2[2*x];
152 int dv = pixuv1[2*x+1] - pixuv2[2*x+1];
158 void x264_pixel_ssd_nv12( x264_pixel_function_t *pf, pixel *pix1, int i_pix1, pixel *pix2, int i_pix2, int i_width, int i_height, uint64_t *ssd_u, uint64_t *ssd_v )
160 pf->ssd_nv12_core( pix1, i_pix1, pix2, i_pix2, i_width&~7, i_height, ssd_u, ssd_v );
164 pixel_ssd_nv12_core( pix1+(i_width&~7), i_pix1, pix2+(i_width&~7), i_pix2, i_width&7, i_height, &tmp[0], &tmp[1] );
170 /****************************************************************************
172 ****************************************************************************/
173 #define PIXEL_VAR_C( name, w, h ) \
174 static uint64_t name( pixel *pix, int i_stride ) \
176 uint32_t sum = 0, sqr = 0; \
177 for( int y = 0; y < h; y++ ) \
179 for( int x = 0; x < w; x++ ) \
182 sqr += pix[x] * pix[x]; \
186 return sum + ((uint64_t)sqr << 32); \
189 PIXEL_VAR_C( x264_pixel_var_16x16, 16, 16 )
190 PIXEL_VAR_C( x264_pixel_var_8x16, 8, 16 )
191 PIXEL_VAR_C( x264_pixel_var_8x8, 8, 8 )
193 /****************************************************************************
195 ****************************************************************************/
196 #define PIXEL_VAR2_C( name, w, h, shift ) \
197 static int name( pixel *pix1, int i_stride1, pixel *pix2, int i_stride2, int *ssd ) \
199 uint32_t var = 0, sum = 0, sqr = 0; \
200 for( int y = 0; y < h; y++ ) \
202 for( int x = 0; x < w; x++ ) \
204 int diff = pix1[x] - pix2[x]; \
206 sqr += diff * diff; \
212 var = sqr - ((uint64_t)sum * sum >> shift); \
217 PIXEL_VAR2_C( x264_pixel_var2_8x16, 8, 16, 7 )
218 PIXEL_VAR2_C( x264_pixel_var2_8x8, 8, 8, 6 )
221 typedef uint32_t sum_t;
222 typedef uint64_t sum2_t;
224 typedef uint16_t sum_t;
225 typedef uint32_t sum2_t;
227 #define BITS_PER_SUM (8 * sizeof(sum_t))
229 #define HADAMARD4(d0, d1, d2, d3, s0, s1, s2, s3) {\
230 sum2_t t0 = s0 + s1;\
231 sum2_t t1 = s0 - s1;\
232 sum2_t t2 = s2 + s3;\
233 sum2_t t3 = s2 - s3;\
240 // in: a pseudo-simd number of the form x+(y<<16)
241 // return: abs(x)+(abs(y)<<16)
242 static ALWAYS_INLINE sum2_t abs2( sum2_t a )
244 sum2_t s = ((a>>(BITS_PER_SUM-1))&(((sum2_t)1<<BITS_PER_SUM)+1))*((sum_t)-1);
248 /****************************************************************************
249 * pixel_satd_WxH: sum of 4x4 Hadamard transformed differences
250 ****************************************************************************/
252 static NOINLINE int x264_pixel_satd_4x4( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
255 sum2_t a0, a1, a2, a3, b0, b1;
257 for( int i = 0; i < 4; i++, pix1 += i_pix1, pix2 += i_pix2 )
259 a0 = pix1[0] - pix2[0];
260 a1 = pix1[1] - pix2[1];
261 b0 = (a0+a1) + ((a0-a1)<<BITS_PER_SUM);
262 a2 = pix1[2] - pix2[2];
263 a3 = pix1[3] - pix2[3];
264 b1 = (a2+a3) + ((a2-a3)<<BITS_PER_SUM);
268 for( int i = 0; i < 2; i++ )
270 HADAMARD4( a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i] );
271 a0 = abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
272 sum += ((sum_t)a0) + (a0>>BITS_PER_SUM);
277 static NOINLINE int x264_pixel_satd_8x4( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
280 sum2_t a0, a1, a2, a3;
282 for( int i = 0; i < 4; i++, pix1 += i_pix1, pix2 += i_pix2 )
284 a0 = (pix1[0] - pix2[0]) + ((sum2_t)(pix1[4] - pix2[4]) << BITS_PER_SUM);
285 a1 = (pix1[1] - pix2[1]) + ((sum2_t)(pix1[5] - pix2[5]) << BITS_PER_SUM);
286 a2 = (pix1[2] - pix2[2]) + ((sum2_t)(pix1[6] - pix2[6]) << BITS_PER_SUM);
287 a3 = (pix1[3] - pix2[3]) + ((sum2_t)(pix1[7] - pix2[7]) << BITS_PER_SUM);
288 HADAMARD4( tmp[i][0], tmp[i][1], tmp[i][2], tmp[i][3], a0,a1,a2,a3 );
290 for( int i = 0; i < 4; i++ )
292 HADAMARD4( a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i] );
293 sum += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
295 return (((sum_t)sum) + (sum>>BITS_PER_SUM)) >> 1;
298 #define PIXEL_SATD_C( w, h, sub )\
299 static int x264_pixel_satd_##w##x##h( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )\
301 int sum = sub( pix1, i_pix1, pix2, i_pix2 )\
302 + sub( pix1+4*i_pix1, i_pix1, pix2+4*i_pix2, i_pix2 );\
304 sum+= sub( pix1+8, i_pix1, pix2+8, i_pix2 )\
305 + sub( pix1+8+4*i_pix1, i_pix1, pix2+8+4*i_pix2, i_pix2 );\
307 sum+= sub( pix1+8*i_pix1, i_pix1, pix2+8*i_pix2, i_pix2 )\
308 + sub( pix1+12*i_pix1, i_pix1, pix2+12*i_pix2, i_pix2 );\
309 if( w==16 && h==16 )\
310 sum+= sub( pix1+8+8*i_pix1, i_pix1, pix2+8+8*i_pix2, i_pix2 )\
311 + sub( pix1+8+12*i_pix1, i_pix1, pix2+8+12*i_pix2, i_pix2 );\
314 PIXEL_SATD_C( 16, 16, x264_pixel_satd_8x4 )
315 PIXEL_SATD_C( 16, 8, x264_pixel_satd_8x4 )
316 PIXEL_SATD_C( 8, 16, x264_pixel_satd_8x4 )
317 PIXEL_SATD_C( 8, 8, x264_pixel_satd_8x4 )
318 PIXEL_SATD_C( 4, 16, x264_pixel_satd_4x4 )
319 PIXEL_SATD_C( 4, 8, x264_pixel_satd_4x4 )
321 static NOINLINE int sa8d_8x8( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
324 sum2_t a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3;
326 for( int i = 0; i < 8; i++, pix1 += i_pix1, pix2 += i_pix2 )
328 a0 = pix1[0] - pix2[0];
329 a1 = pix1[1] - pix2[1];
330 b0 = (a0+a1) + ((a0-a1)<<BITS_PER_SUM);
331 a2 = pix1[2] - pix2[2];
332 a3 = pix1[3] - pix2[3];
333 b1 = (a2+a3) + ((a2-a3)<<BITS_PER_SUM);
334 a4 = pix1[4] - pix2[4];
335 a5 = pix1[5] - pix2[5];
336 b2 = (a4+a5) + ((a4-a5)<<BITS_PER_SUM);
337 a6 = pix1[6] - pix2[6];
338 a7 = pix1[7] - pix2[7];
339 b3 = (a6+a7) + ((a6-a7)<<BITS_PER_SUM);
340 HADAMARD4( tmp[i][0], tmp[i][1], tmp[i][2], tmp[i][3], b0,b1,b2,b3 );
342 for( int i = 0; i < 4; i++ )
344 HADAMARD4( a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i] );
345 HADAMARD4( a4, a5, a6, a7, tmp[4][i], tmp[5][i], tmp[6][i], tmp[7][i] );
346 b0 = abs2(a0+a4) + abs2(a0-a4);
347 b0 += abs2(a1+a5) + abs2(a1-a5);
348 b0 += abs2(a2+a6) + abs2(a2-a6);
349 b0 += abs2(a3+a7) + abs2(a3-a7);
350 sum += (sum_t)b0 + (b0>>BITS_PER_SUM);
355 static int x264_pixel_sa8d_8x8( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
357 int sum = sa8d_8x8( pix1, i_pix1, pix2, i_pix2 );
361 static int x264_pixel_sa8d_16x16( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
363 int sum = sa8d_8x8( pix1, i_pix1, pix2, i_pix2 )
364 + sa8d_8x8( pix1+8, i_pix1, pix2+8, i_pix2 )
365 + sa8d_8x8( pix1+8*i_pix1, i_pix1, pix2+8*i_pix2, i_pix2 )
366 + sa8d_8x8( pix1+8+8*i_pix1, i_pix1, pix2+8+8*i_pix2, i_pix2 );
371 static NOINLINE uint64_t pixel_hadamard_ac( pixel *pix, int stride )
374 sum2_t a0, a1, a2, a3, dc;
375 sum2_t sum4 = 0, sum8 = 0;
376 for( int i = 0; i < 8; i++, pix+=stride )
378 sum2_t *t = tmp + (i&3) + (i&4)*4;
379 a0 = (pix[0]+pix[1]) + ((sum2_t)(pix[0]-pix[1])<<BITS_PER_SUM);
380 a1 = (pix[2]+pix[3]) + ((sum2_t)(pix[2]-pix[3])<<BITS_PER_SUM);
383 a2 = (pix[4]+pix[5]) + ((sum2_t)(pix[4]-pix[5])<<BITS_PER_SUM);
384 a3 = (pix[6]+pix[7]) + ((sum2_t)(pix[6]-pix[7])<<BITS_PER_SUM);
388 for( int i = 0; i < 8; i++ )
390 HADAMARD4( a0, a1, a2, a3, tmp[i*4+0], tmp[i*4+1], tmp[i*4+2], tmp[i*4+3] );
395 sum4 += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
397 for( int i = 0; i < 8; i++ )
399 HADAMARD4( a0,a1,a2,a3, tmp[i], tmp[8+i], tmp[16+i], tmp[24+i] );
400 sum8 += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
402 dc = (sum_t)(tmp[0] + tmp[8] + tmp[16] + tmp[24]);
403 sum4 = (sum_t)sum4 + (sum4>>BITS_PER_SUM) - dc;
404 sum8 = (sum_t)sum8 + (sum8>>BITS_PER_SUM) - dc;
405 return ((uint64_t)sum8<<32) + sum4;
408 #define HADAMARD_AC(w,h) \
409 static uint64_t x264_pixel_hadamard_ac_##w##x##h( pixel *pix, int stride )\
411 uint64_t sum = pixel_hadamard_ac( pix, stride );\
413 sum += pixel_hadamard_ac( pix+8, stride );\
415 sum += pixel_hadamard_ac( pix+8*stride, stride );\
416 if( w==16 && h==16 )\
417 sum += pixel_hadamard_ac( pix+8*stride+8, stride );\
418 return ((sum>>34)<<32) + ((uint32_t)sum>>1);\
420 HADAMARD_AC( 16, 16 )
426 /****************************************************************************
428 ****************************************************************************/
429 #define SAD_X( size ) \
430 static void x264_pixel_sad_x3_##size( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2, int i_stride, int scores[3] )\
432 scores[0] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix0, i_stride );\
433 scores[1] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix1, i_stride );\
434 scores[2] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix2, i_stride );\
436 static void x264_pixel_sad_x4_##size( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2, pixel *pix3, int i_stride, int scores[4] )\
438 scores[0] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix0, i_stride );\
439 scores[1] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix1, i_stride );\
440 scores[2] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix2, i_stride );\
441 scores[3] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix3, i_stride );\
459 #endif // !HIGH_BIT_DEPTH
461 /****************************************************************************
463 * no faster than single satd, but needed for satd to be a drop-in replacement for sad
464 ****************************************************************************/
466 #define SATD_X( size, cpu ) \
467 static void x264_pixel_satd_x3_##size##cpu( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2, int i_stride, int scores[3] )\
469 scores[0] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix0, i_stride );\
470 scores[1] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix1, i_stride );\
471 scores[2] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix2, i_stride );\
473 static void x264_pixel_satd_x4_##size##cpu( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2, pixel *pix3, int i_stride, int scores[4] )\
475 scores[0] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix0, i_stride );\
476 scores[1] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix1, i_stride );\
477 scores[2] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix2, i_stride );\
478 scores[3] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix3, i_stride );\
480 #define SATD_X_DECL6( cpu )\
481 SATD_X( 16x16, cpu )\
487 #define SATD_X_DECL7( cpu )\
493 SATD_X_DECL7( _mmx2 )
495 SATD_X_DECL6( _sse2 )
496 SATD_X_DECL7( _ssse3 )
497 SATD_X_DECL7( _sse4 )
500 #endif // !HIGH_BIT_DEPTH
505 SATD_X_DECL7( _neon )
507 #endif // !HIGH_BIT_DEPTH
509 #define INTRA_MBCMP_8x8( mbcmp, cpu, cpu2 )\
510 void x264_intra_##mbcmp##_x3_8x8##cpu( pixel *fenc, pixel edge[36], int res[3] )\
512 ALIGNED_ARRAY_16( pixel, pix, [8*FDEC_STRIDE] );\
513 x264_predict_8x8_v##cpu2( pix, edge );\
514 res[0] = x264_pixel_##mbcmp##_8x8##cpu( pix, FDEC_STRIDE, fenc, FENC_STRIDE );\
515 x264_predict_8x8_h##cpu2( pix, edge );\
516 res[1] = x264_pixel_##mbcmp##_8x8##cpu( pix, FDEC_STRIDE, fenc, FENC_STRIDE );\
517 x264_predict_8x8_dc##cpu2( pix, edge );\
518 res[2] = x264_pixel_##mbcmp##_8x8##cpu( pix, FDEC_STRIDE, fenc, FENC_STRIDE );\
521 INTRA_MBCMP_8x8( sad,, _c )
522 INTRA_MBCMP_8x8(sa8d,, _c )
523 #if HIGH_BIT_DEPTH && HAVE_MMX
524 INTRA_MBCMP_8x8( sad, _mmx2, _c )
525 INTRA_MBCMP_8x8(sa8d, _sse2, _sse2 )
528 #define INTRA_MBCMP( mbcmp, size, pred1, pred2, pred3, chroma, cpu, cpu2 )\
529 void x264_intra_##mbcmp##_x3_##size##chroma##cpu( pixel *fenc, pixel *fdec, int res[3] )\
531 x264_predict_##size##chroma##_##pred1##cpu2( fdec );\
532 res[0] = x264_pixel_##mbcmp##_##size##cpu( fdec, FDEC_STRIDE, fenc, FENC_STRIDE );\
533 x264_predict_##size##chroma##_##pred2##cpu2( fdec );\
534 res[1] = x264_pixel_##mbcmp##_##size##cpu( fdec, FDEC_STRIDE, fenc, FENC_STRIDE );\
535 x264_predict_##size##chroma##_##pred3##cpu2( fdec );\
536 res[2] = x264_pixel_##mbcmp##_##size##cpu( fdec, FDEC_STRIDE, fenc, FENC_STRIDE );\
539 INTRA_MBCMP( sad, 4x4, v, h, dc, ,, _c )
540 INTRA_MBCMP(satd, 4x4, v, h, dc, ,, _c )
541 INTRA_MBCMP( sad, 8x8, dc, h, v, c,, _c )
542 INTRA_MBCMP(satd, 8x8, dc, h, v, c,, _c )
543 INTRA_MBCMP( sad, 8x16, dc, h, v, c,, _c )
544 INTRA_MBCMP(satd, 8x16, dc, h, v, c,, _c )
545 INTRA_MBCMP( sad, 16x16, v, h, dc, ,, _c )
546 INTRA_MBCMP(satd, 16x16, v, h, dc, ,, _c )
550 INTRA_MBCMP( sad, 4x4, v, h, dc, , _mmx2, _c )
551 INTRA_MBCMP( sad, 8x8, dc, h, v, c, _mmx2, _c )
552 INTRA_MBCMP( sad, 16x16, v, h, dc, , _mmx2, _mmx2 )
553 INTRA_MBCMP( sad, 8x8, dc, h, v, c, _sse2, _sse2 )
554 INTRA_MBCMP( sad, 16x16, v, h, dc, , _sse2, _sse2 )
555 INTRA_MBCMP( sad, 4x4, v, h, dc, , _ssse3, _c )
556 INTRA_MBCMP( sad, 8x8, dc, h, v, c, _ssse3, _sse2 )
557 INTRA_MBCMP( sad, 16x16, v, h, dc, , _ssse3, _sse2 )
559 #define x264_predict_8x16c_v_mmx2 x264_predict_8x16c_v_mmx
560 INTRA_MBCMP( sad, 8x16, dc, h, v, c, _mmx2, _mmx2 )
561 INTRA_MBCMP(satd, 8x16, dc, h, v, c, _mmx2, _mmx2 )
562 INTRA_MBCMP( sad, 8x16, dc, h, v, c, _sse2, _mmx2 )
563 INTRA_MBCMP(satd, 8x16, dc, h, v, c, _sse2, _mmx2 )
564 INTRA_MBCMP(satd, 8x16, dc, h, v, c, _ssse3, _mmx2 )
565 INTRA_MBCMP(satd, 8x16, dc, h, v, c, _sse4, _mmx2 )
566 INTRA_MBCMP(satd, 8x16, dc, h, v, c, _avx, _mmx2 )
567 INTRA_MBCMP(satd, 8x16, dc, h, v, c, _xop, _mmx2 )
571 // No C implementation of intra_satd_x9. See checkasm for its behavior,
572 // or see x264_mb_analyse_intra for the entirely different algorithm we
573 // use when lacking an asm implementation of it.
577 /****************************************************************************
578 * structural similarity metric
579 ****************************************************************************/
580 static void ssim_4x4x2_core( const pixel *pix1, int stride1,
581 const pixel *pix2, int stride2,
584 for( int z = 0; z < 2; z++ )
586 uint32_t s1 = 0, s2 = 0, ss = 0, s12 = 0;
587 for( int y = 0; y < 4; y++ )
588 for( int x = 0; x < 4; x++ )
590 int a = pix1[x+y*stride1];
591 int b = pix2[x+y*stride2];
607 static float ssim_end1( int s1, int s2, int ss, int s12 )
609 /* Maximum value for 10-bit is: ss*64 = (2^10-1)^2*16*4*64 = 4286582784, which will overflow in some cases.
610 * s1*s1, s2*s2, and s1*s2 also obtain this value for edge cases: ((2^10-1)*16*4)^2 = 4286582784.
611 * Maximum value for 9-bit is: ss*64 = (2^9-1)^2*16*4*64 = 1069551616, which will not overflow. */
614 static const float ssim_c1 = .01*.01*PIXEL_MAX*PIXEL_MAX*64;
615 static const float ssim_c2 = .03*.03*PIXEL_MAX*PIXEL_MAX*64*63;
618 static const int ssim_c1 = (int)(.01*.01*PIXEL_MAX*PIXEL_MAX*64 + .5);
619 static const int ssim_c2 = (int)(.03*.03*PIXEL_MAX*PIXEL_MAX*64*63 + .5);
625 type vars = fss*64 - fs1*fs1 - fs2*fs2;
626 type covar = fs12*64 - fs1*fs2;
627 return (float)(2*fs1*fs2 + ssim_c1) * (float)(2*covar + ssim_c2)
628 / ((float)(fs1*fs1 + fs2*fs2 + ssim_c1) * (float)(vars + ssim_c2));
632 static float ssim_end4( int sum0[5][4], int sum1[5][4], int width )
635 for( int i = 0; i < width; i++ )
636 ssim += ssim_end1( sum0[i][0] + sum0[i+1][0] + sum1[i][0] + sum1[i+1][0],
637 sum0[i][1] + sum0[i+1][1] + sum1[i][1] + sum1[i+1][1],
638 sum0[i][2] + sum0[i+1][2] + sum1[i][2] + sum1[i+1][2],
639 sum0[i][3] + sum0[i+1][3] + sum1[i][3] + sum1[i+1][3] );
643 float x264_pixel_ssim_wxh( x264_pixel_function_t *pf,
644 pixel *pix1, int stride1,
645 pixel *pix2, int stride2,
646 int width, int height, void *buf, int *cnt )
650 int (*sum0)[4] = buf;
651 int (*sum1)[4] = sum0 + (width >> 2) + 3;
654 for( int y = 1; y < height; y++ )
658 XCHG( void*, sum0, sum1 );
659 for( int x = 0; x < width; x+=2 )
660 pf->ssim_4x4x2_core( &pix1[4*(x+z*stride1)], stride1, &pix2[4*(x+z*stride2)], stride2, &sum0[x] );
662 for( int x = 0; x < width-1; x += 4 )
663 ssim += pf->ssim_end4( sum0+x, sum1+x, X264_MIN(4,width-x-1) );
665 *cnt = (height-1) * (width-1);
669 static int pixel_vsad( pixel *src, int stride, int height )
672 for( int i = 1; i < height; i++, src += stride )
673 for( int j = 0; j < 16; j++ )
674 score += abs(src[j] - src[j+stride]);
678 int x264_field_vsad( x264_t *h, int mb_x, int mb_y )
680 int score_field, score_frame;
681 int stride = h->fenc->i_stride[0];
682 int mb_stride = h->mb.i_mb_stride;
683 pixel *fenc = h->fenc->plane[0] + 16 * (mb_x + mb_y * stride);
684 int mb_xy = mb_x + mb_y*mb_stride;
686 /* We don't want to analyze pixels outside the frame, as it gives inaccurate results. */
687 int mbpair_height = X264_MIN( h->param.i_height - mb_y * 16, 32 );
688 score_frame = h->pixf.vsad( fenc, stride, mbpair_height );
689 score_field = h->pixf.vsad( fenc, stride*2, mbpair_height >> 1 );
690 score_field += h->pixf.vsad( fenc+stride, stride*2, mbpair_height >> 1 );
693 score_field += 512 - h->mb.field[mb_xy -1]*1024;
695 score_field += 512 - h->mb.field[mb_xy-mb_stride]*1024;
697 return (score_field < score_frame);
700 /****************************************************************************
701 * successive elimination
702 ****************************************************************************/
703 static int x264_pixel_ads4( int enc_dc[4], uint16_t *sums, int delta,
704 uint16_t *cost_mvx, int16_t *mvs, int width, int thresh )
707 for( int i = 0; i < width; i++, sums++ )
709 int ads = abs( enc_dc[0] - sums[0] )
710 + abs( enc_dc[1] - sums[8] )
711 + abs( enc_dc[2] - sums[delta] )
712 + abs( enc_dc[3] - sums[delta+8] )
720 static int x264_pixel_ads2( int enc_dc[2], uint16_t *sums, int delta,
721 uint16_t *cost_mvx, int16_t *mvs, int width, int thresh )
724 for( int i = 0; i < width; i++, sums++ )
726 int ads = abs( enc_dc[0] - sums[0] )
727 + abs( enc_dc[1] - sums[delta] )
735 static int x264_pixel_ads1( int enc_dc[1], uint16_t *sums, int delta,
736 uint16_t *cost_mvx, int16_t *mvs, int width, int thresh )
739 for( int i = 0; i<width; i++, sums++ )
741 int ads = abs( enc_dc[0] - sums[0] )
750 /****************************************************************************
752 ****************************************************************************/
753 void x264_pixel_init( int cpu, x264_pixel_function_t *pixf )
755 memset( pixf, 0, sizeof(*pixf) );
757 #define INIT2_NAME( name1, name2, cpu ) \
758 pixf->name1[PIXEL_16x16] = x264_pixel_##name2##_16x16##cpu;\
759 pixf->name1[PIXEL_16x8] = x264_pixel_##name2##_16x8##cpu;
760 #define INIT4_NAME( name1, name2, cpu ) \
761 INIT2_NAME( name1, name2, cpu ) \
762 pixf->name1[PIXEL_8x16] = x264_pixel_##name2##_8x16##cpu;\
763 pixf->name1[PIXEL_8x8] = x264_pixel_##name2##_8x8##cpu;
764 #define INIT5_NAME( name1, name2, cpu ) \
765 INIT4_NAME( name1, name2, cpu ) \
766 pixf->name1[PIXEL_8x4] = x264_pixel_##name2##_8x4##cpu;
767 #define INIT6_NAME( name1, name2, cpu ) \
768 INIT5_NAME( name1, name2, cpu ) \
769 pixf->name1[PIXEL_4x8] = x264_pixel_##name2##_4x8##cpu;
770 #define INIT7_NAME( name1, name2, cpu ) \
771 INIT6_NAME( name1, name2, cpu ) \
772 pixf->name1[PIXEL_4x4] = x264_pixel_##name2##_4x4##cpu;
773 #define INIT8_NAME( name1, name2, cpu ) \
774 INIT7_NAME( name1, name2, cpu ) \
775 pixf->name1[PIXEL_4x16] = x264_pixel_##name2##_4x16##cpu;
776 #define INIT2( name, cpu ) INIT2_NAME( name, name, cpu )
777 #define INIT4( name, cpu ) INIT4_NAME( name, name, cpu )
778 #define INIT5( name, cpu ) INIT5_NAME( name, name, cpu )
779 #define INIT6( name, cpu ) INIT6_NAME( name, name, cpu )
780 #define INIT7( name, cpu ) INIT7_NAME( name, name, cpu )
781 #define INIT8( name, cpu ) INIT8_NAME( name, name, cpu )
783 #define INIT_ADS( cpu ) \
784 pixf->ads[PIXEL_16x16] = x264_pixel_ads4##cpu;\
785 pixf->ads[PIXEL_16x8] = x264_pixel_ads2##cpu;\
786 pixf->ads[PIXEL_8x8] = x264_pixel_ads1##cpu;
789 INIT8_NAME( sad_aligned, sad, );
796 INIT4( hadamard_ac, );
799 pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16;
800 pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8;
801 pixf->var[PIXEL_16x16] = x264_pixel_var_16x16;
802 pixf->var[PIXEL_8x16] = x264_pixel_var_8x16;
803 pixf->var[PIXEL_8x8] = x264_pixel_var_8x8;
804 pixf->var2[PIXEL_8x16] = x264_pixel_var2_8x16;
805 pixf->var2[PIXEL_8x8] = x264_pixel_var2_8x8;
807 pixf->ssd_nv12_core = pixel_ssd_nv12_core;
808 pixf->ssim_4x4x2_core = ssim_4x4x2_core;
809 pixf->ssim_end4 = ssim_end4;
810 pixf->vsad = pixel_vsad;
812 pixf->intra_sad_x3_4x4 = x264_intra_sad_x3_4x4;
813 pixf->intra_satd_x3_4x4 = x264_intra_satd_x3_4x4;
814 pixf->intra_sad_x3_8x8 = x264_intra_sad_x3_8x8;
815 pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8;
816 pixf->intra_sad_x3_8x8c = x264_intra_sad_x3_8x8c;
817 pixf->intra_satd_x3_8x8c = x264_intra_satd_x3_8x8c;
818 pixf->intra_sad_x3_8x16c = x264_intra_sad_x3_8x16c;
819 pixf->intra_satd_x3_8x16c = x264_intra_satd_x3_8x16c;
820 pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16;
821 pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16;
825 if( cpu&X264_CPU_MMX2 )
828 INIT7( sad_x3, _mmx2 );
829 INIT7( sad_x4, _mmx2 );
830 INIT8( satd, _mmx2 );
831 INIT7( satd_x3, _mmx2 );
832 INIT7( satd_x4, _mmx2 );
833 INIT4( hadamard_ac, _mmx2 );
837 pixf->ssd_nv12_core = x264_pixel_ssd_nv12_core_mmx2;
838 pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_mmx2;
839 pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_mmx2;
841 pixf->var2[PIXEL_8x8] = x264_pixel_var2_8x8_mmx2;
842 pixf->var2[PIXEL_8x16] = x264_pixel_var2_8x16_mmx2;
845 pixf->intra_sad_x3_4x4 = x264_intra_sad_x3_4x4_mmx2;
846 pixf->intra_satd_x3_4x4 = x264_intra_satd_x3_4x4_mmx2;
847 pixf->intra_sad_x3_8x8 = x264_intra_sad_x3_8x8_mmx2;
848 pixf->intra_sad_x3_8x8c = x264_intra_sad_x3_8x8c_mmx2;
849 pixf->intra_satd_x3_8x8c = x264_intra_satd_x3_8x8c_mmx2;
850 pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16_mmx2;
851 pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16_mmx2;
853 if( cpu&X264_CPU_SSE2 )
855 INIT4_NAME( sad_aligned, sad, _sse2_aligned );
858 pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_sse2;
859 pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_sse2;
861 pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_sse2;
863 pixf->ssd_nv12_core = x264_pixel_ssd_nv12_core_sse2;
864 pixf->ssim_4x4x2_core = x264_pixel_ssim_4x4x2_core_sse2;
865 pixf->ssim_end4 = x264_pixel_ssim_end4_sse2;
866 pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_sse2;
867 pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_sse2;
868 pixf->var2[PIXEL_8x8] = x264_pixel_var2_8x8_sse2;
869 pixf->var2[PIXEL_8x16] = x264_pixel_var2_8x16_sse2;
870 pixf->intra_sad_x3_8x8 = x264_intra_sad_x3_8x8_sse2;
872 if( (cpu&X264_CPU_SSE2) && !(cpu&X264_CPU_SSE2_IS_SLOW) )
875 INIT2( sad_x3, _sse2 );
876 INIT2( sad_x4, _sse2 );
879 if( !(cpu&X264_CPU_STACK_MOD4) )
881 INIT4( hadamard_ac, _sse2 );
883 pixf->vsad = x264_pixel_vsad_sse2;
884 pixf->intra_sad_x3_8x8 = x264_intra_sad_x3_8x8_sse2;
885 pixf->intra_sad_x3_8x8c = x264_intra_sad_x3_8x8c_sse2;
886 pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16_sse2;
888 if( cpu&X264_CPU_SSE2_IS_FAST )
890 pixf->sad[PIXEL_8x16] = x264_pixel_sad_8x16_sse2;
891 pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_sse2;
892 pixf->sad_x3[PIXEL_8x8] = x264_pixel_sad_x3_8x8_sse2;
893 pixf->sad_x3[PIXEL_8x4] = x264_pixel_sad_x3_8x4_sse2;
894 pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_sse2;
895 pixf->sad_x4[PIXEL_8x8] = x264_pixel_sad_x4_8x8_sse2;
896 pixf->sad_x4[PIXEL_8x4] = x264_pixel_sad_x4_8x4_sse2;
898 if( cpu&X264_CPU_SSSE3 )
900 INIT4_NAME( sad_aligned, sad, _ssse3_aligned );
901 INIT7( sad, _ssse3 );
902 INIT7( sad_x3, _ssse3 );
903 INIT7( sad_x4, _ssse3 );
906 if( !(cpu&X264_CPU_STACK_MOD4) )
908 INIT4( hadamard_ac, _ssse3 );
910 pixf->vsad = x264_pixel_vsad_ssse3;
911 pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_ssse3;
912 pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_ssse3;
913 pixf->intra_sad_x3_4x4 = x264_intra_sad_x3_4x4_ssse3;
914 pixf->intra_sad_x3_8x8 = x264_intra_sad_x3_8x8_ssse3;
915 pixf->intra_sad_x3_8x8c = x264_intra_sad_x3_8x8c_ssse3;
916 pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16_ssse3;
918 if( cpu&X264_CPU_SSE4 )
920 if( !(cpu&X264_CPU_STACK_MOD4) )
922 INIT4( hadamard_ac, _sse4 );
924 pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_sse4;
925 pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_sse4;
927 if( cpu&X264_CPU_AVX )
930 if( !(cpu&X264_CPU_STACK_MOD4) )
932 INIT4( hadamard_ac, _avx );
934 pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_avx;
935 pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_avx;
936 pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_avx;
937 pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_avx;
938 pixf->ssd_nv12_core = x264_pixel_ssd_nv12_core_avx;
939 pixf->ssim_4x4x2_core = x264_pixel_ssim_4x4x2_core_avx;
940 pixf->ssim_end4 = x264_pixel_ssim_end4_avx;
942 if( cpu&X264_CPU_XOP )
944 pixf->vsad = x264_pixel_vsad_xop;
947 #else // !HIGH_BIT_DEPTH
949 if( cpu&X264_CPU_MMX )
954 if( cpu&X264_CPU_MMX2 )
957 INIT8_NAME( sad_aligned, sad, _mmx2 );
958 INIT7( sad_x3, _mmx2 );
959 INIT7( sad_x4, _mmx2 );
960 INIT8( satd, _mmx2 );
961 INIT7( satd_x3, _mmx2 );
962 INIT7( satd_x4, _mmx2 );
963 INIT4( hadamard_ac, _mmx2 );
965 pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_mmx2;
966 pixf->var[PIXEL_8x16] = x264_pixel_var_8x16_mmx2;
967 pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_mmx2;
968 pixf->ssd_nv12_core = x264_pixel_ssd_nv12_core_mmx2;
970 pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_mmx2;
971 pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_mmx2;
972 pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_mmx2;
973 pixf->ssim_4x4x2_core = x264_pixel_ssim_4x4x2_core_mmx2;
974 pixf->var2[PIXEL_8x8] = x264_pixel_var2_8x8_mmx2;
975 pixf->var2[PIXEL_8x16] = x264_pixel_var2_8x16_mmx2;
976 pixf->vsad = x264_pixel_vsad_mmx2;
978 if( cpu&X264_CPU_CACHELINE_32 )
980 INIT5( sad, _cache32_mmx2 );
981 INIT4( sad_x3, _cache32_mmx2 );
982 INIT4( sad_x4, _cache32_mmx2 );
984 else if( cpu&X264_CPU_CACHELINE_64 )
986 INIT5( sad, _cache64_mmx2 );
987 INIT4( sad_x3, _cache64_mmx2 );
988 INIT4( sad_x4, _cache64_mmx2 );
991 if( cpu&X264_CPU_CACHELINE_64 )
993 pixf->sad[PIXEL_8x16] = x264_pixel_sad_8x16_cache64_mmx2;
994 pixf->sad[PIXEL_8x8] = x264_pixel_sad_8x8_cache64_mmx2;
995 pixf->sad[PIXEL_8x4] = x264_pixel_sad_8x4_cache64_mmx2;
996 pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_cache64_mmx2;
997 pixf->sad_x3[PIXEL_8x8] = x264_pixel_sad_x3_8x8_cache64_mmx2;
998 pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_cache64_mmx2;
999 pixf->sad_x4[PIXEL_8x8] = x264_pixel_sad_x4_8x8_cache64_mmx2;
1002 pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16_mmx2;
1003 pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16_mmx2;
1004 pixf->intra_satd_x3_8x16c = x264_intra_satd_x3_8x16c_mmx2;
1005 pixf->intra_sad_x3_8x16c = x264_intra_sad_x3_8x16c_mmx2;
1006 pixf->intra_satd_x3_8x8c = x264_intra_satd_x3_8x8c_mmx2;
1007 pixf->intra_sad_x3_8x8c = x264_intra_sad_x3_8x8c_mmx2;
1008 pixf->intra_sad_x3_8x8 = x264_intra_sad_x3_8x8_mmx2;
1009 pixf->intra_satd_x3_4x4 = x264_intra_satd_x3_4x4_mmx2;
1010 pixf->intra_sad_x3_4x4 = x264_intra_sad_x3_4x4_mmx2;
1013 if( cpu&X264_CPU_SSE2 )
1015 INIT5( ssd, _sse2slow );
1016 INIT2_NAME( sad_aligned, sad, _sse2_aligned );
1017 pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_sse2;
1018 pixf->ssd_nv12_core = x264_pixel_ssd_nv12_core_sse2;
1019 pixf->ssim_4x4x2_core = x264_pixel_ssim_4x4x2_core_sse2;
1020 pixf->ssim_end4 = x264_pixel_ssim_end4_sse2;
1021 pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_sse2;
1022 pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_sse2;
1024 pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_sse2;
1026 pixf->var2[PIXEL_8x8] = x264_pixel_var2_8x8_sse2;
1027 pixf->var2[PIXEL_8x16] = x264_pixel_var2_8x16_sse2;
1028 pixf->vsad = x264_pixel_vsad_sse2;
1031 if( (cpu&X264_CPU_SSE2) && !(cpu&X264_CPU_SSE2_IS_SLOW) )
1033 INIT2( sad, _sse2 );
1034 INIT2( sad_x3, _sse2 );
1035 INIT2( sad_x4, _sse2 );
1036 INIT6( satd, _sse2 );
1037 pixf->satd[PIXEL_4x16] = x264_pixel_satd_4x16_sse2;
1038 INIT6( satd_x3, _sse2 );
1039 INIT6( satd_x4, _sse2 );
1040 if( !(cpu&X264_CPU_STACK_MOD4) )
1042 INIT4( hadamard_ac, _sse2 );
1045 pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_sse2;
1046 pixf->var[PIXEL_8x16] = x264_pixel_var_8x16_sse2;
1047 pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16_sse2;
1048 pixf->intra_satd_x3_8x16c = x264_intra_satd_x3_8x16c_sse2;
1049 pixf->intra_sad_x3_8x16c = x264_intra_sad_x3_8x16c_sse2;
1050 if( cpu&X264_CPU_CACHELINE_64 )
1052 INIT2( ssd, _sse2); /* faster for width 16 on p4 */
1054 INIT2( sad, _cache64_sse2 );
1055 INIT2( sad_x3, _cache64_sse2 );
1056 INIT2( sad_x4, _cache64_sse2 );
1058 if( cpu&X264_CPU_SSE2_IS_FAST )
1060 pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_cache64_sse2;
1061 pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_cache64_sse2;
1065 if( cpu&X264_CPU_SSE_MISALIGN )
1067 INIT2( sad_x3, _sse2_misalign );
1068 INIT2( sad_x4, _sse2_misalign );
1072 if( cpu&X264_CPU_SSE2_IS_FAST && !(cpu&X264_CPU_CACHELINE_64) )
1074 pixf->sad_aligned[PIXEL_8x16] = x264_pixel_sad_8x16_sse2;
1075 pixf->sad[PIXEL_8x16] = x264_pixel_sad_8x16_sse2;
1076 pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_sse2;
1077 pixf->sad_x3[PIXEL_8x8] = x264_pixel_sad_x3_8x8_sse2;
1078 pixf->sad_x3[PIXEL_8x4] = x264_pixel_sad_x3_8x4_sse2;
1079 pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_sse2;
1080 pixf->sad_x4[PIXEL_8x8] = x264_pixel_sad_x4_8x8_sse2;
1081 pixf->sad_x4[PIXEL_8x4] = x264_pixel_sad_x4_8x4_sse2;
1084 if( (cpu&X264_CPU_SSE3) && (cpu&X264_CPU_CACHELINE_64) )
1086 INIT2( sad, _sse3 );
1087 INIT2( sad_x3, _sse3 );
1088 INIT2( sad_x4, _sse3 );
1091 if( cpu&X264_CPU_SSSE3 )
1093 if( !(cpu&X264_CPU_STACK_MOD4) )
1095 INIT4( hadamard_ac, _ssse3 );
1096 pixf->intra_sad_x9_4x4 = x264_intra_sad_x9_4x4_ssse3;
1097 pixf->intra_satd_x9_4x4 = x264_intra_satd_x9_4x4_ssse3;
1098 pixf->intra_sad_x9_8x8 = x264_intra_sad_x9_8x8_ssse3;
1100 pixf->intra_sa8d_x9_8x8 = x264_intra_sa8d_x9_8x8_ssse3;
1104 if( !(cpu&X264_CPU_SLOW_ATOM) )
1106 INIT8( ssd, _ssse3 );
1107 pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_ssse3;
1108 pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_ssse3;
1109 INIT8( satd, _ssse3 );
1110 INIT7( satd_x3, _ssse3 );
1111 INIT7( satd_x4, _ssse3 );
1113 pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16_ssse3;
1114 pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16_ssse3;
1115 pixf->intra_satd_x3_8x16c = x264_intra_satd_x3_8x16c_ssse3;
1116 pixf->intra_satd_x3_8x8c = x264_intra_satd_x3_8x8c_ssse3;
1117 pixf->intra_sad_x3_8x8c = x264_intra_sad_x3_8x8c_ssse3;
1118 pixf->var2[PIXEL_8x8] = x264_pixel_var2_8x8_ssse3;
1119 pixf->var2[PIXEL_8x16] = x264_pixel_var2_8x16_ssse3;
1120 if( cpu&X264_CPU_CACHELINE_64 )
1122 INIT2( sad, _cache64_ssse3 );
1123 INIT2( sad_x3, _cache64_ssse3 );
1124 INIT2( sad_x4, _cache64_ssse3 );
1126 if( cpu&X264_CPU_SLOW_ATOM || !(cpu&X264_CPU_SHUFFLE_IS_FAST) )
1128 INIT5( ssd, _sse2 ); /* on conroe, sse2 is faster for width8/16 */
1132 if( cpu&X264_CPU_SSE4 )
1134 INIT8( satd, _sse4 );
1135 INIT7( satd_x3, _sse4 );
1136 INIT7( satd_x4, _sse4 );
1137 if( !(cpu&X264_CPU_STACK_MOD4) )
1139 INIT4( hadamard_ac, _sse4 );
1140 pixf->intra_sad_x9_4x4 = x264_intra_sad_x9_4x4_sse4;
1141 pixf->intra_satd_x9_4x4 = x264_intra_satd_x9_4x4_sse4;
1142 pixf->intra_sad_x9_8x8 = x264_intra_sad_x9_8x8_sse4;
1144 pixf->intra_sa8d_x9_8x8 = x264_intra_sa8d_x9_8x8_sse4;
1147 pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_sse4;
1148 pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_sse4;
1149 pixf->intra_satd_x3_8x16c = x264_intra_satd_x3_8x16c_sse4;
1152 if( cpu&X264_CPU_AVX )
1154 INIT8( satd, _avx );
1155 INIT7( satd_x3, _avx );
1156 INIT7( satd_x4, _avx );
1158 if( !(cpu&X264_CPU_STACK_MOD4) )
1160 INIT4( hadamard_ac, _avx );
1161 pixf->intra_sad_x9_4x4 = x264_intra_sad_x9_4x4_avx;
1162 pixf->intra_satd_x9_4x4 = x264_intra_satd_x9_4x4_avx;
1163 pixf->intra_sad_x9_8x8 = x264_intra_sad_x9_8x8_avx;
1165 pixf->intra_sa8d_x9_8x8 = x264_intra_sa8d_x9_8x8_avx;
1169 pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_avx;
1170 pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_avx;
1171 pixf->intra_satd_x3_8x16c = x264_intra_satd_x3_8x16c_avx;
1172 pixf->ssd_nv12_core = x264_pixel_ssd_nv12_core_avx;
1173 pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_avx;
1174 pixf->var[PIXEL_8x16] = x264_pixel_var_8x16_avx;
1175 pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_avx;
1176 pixf->ssim_4x4x2_core = x264_pixel_ssim_4x4x2_core_avx;
1177 pixf->ssim_end4 = x264_pixel_ssim_end4_avx;
1180 if( cpu&X264_CPU_XOP )
1182 INIT7( satd, _xop );
1183 INIT7( satd_x3, _xop );
1184 INIT7( satd_x4, _xop );
1185 if( !(cpu&X264_CPU_STACK_MOD4) )
1187 INIT4( hadamard_ac, _xop );
1188 pixf->intra_satd_x9_4x4 = x264_intra_satd_x9_4x4_xop;
1191 pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_xop;
1192 pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_xop;
1193 pixf->intra_satd_x3_8x16c = x264_intra_satd_x3_8x16c_xop;
1194 pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_xop;
1195 pixf->var[PIXEL_8x16] = x264_pixel_var_8x16_xop;
1196 pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_xop;
1197 pixf->var2[PIXEL_8x8] = x264_pixel_var2_8x8_xop;
1198 pixf->var2[PIXEL_8x16] = x264_pixel_var2_8x16_xop;
1203 if( cpu&X264_CPU_ARMV6 )
1205 pixf->sad[PIXEL_4x8] = x264_pixel_sad_4x8_armv6;
1206 pixf->sad[PIXEL_4x4] = x264_pixel_sad_4x4_armv6;
1207 pixf->sad_aligned[PIXEL_4x8] = x264_pixel_sad_4x8_armv6;
1208 pixf->sad_aligned[PIXEL_4x4] = x264_pixel_sad_4x4_armv6;
1210 if( cpu&X264_CPU_NEON )
1212 INIT5( sad, _neon );
1213 INIT5( sad_aligned, _neon );
1214 INIT7( sad_x3, _neon );
1215 INIT7( sad_x4, _neon );
1216 INIT7( ssd, _neon );
1217 INIT7( satd, _neon );
1218 INIT7( satd_x3, _neon );
1219 INIT7( satd_x4, _neon );
1220 INIT4( hadamard_ac, _neon );
1221 pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_neon;
1222 pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_neon;
1223 pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_neon;
1224 pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_neon;
1225 pixf->var2[PIXEL_8x8] = x264_pixel_var2_8x8_neon;
1227 pixf->ssim_4x4x2_core = x264_pixel_ssim_4x4x2_core_neon;
1228 pixf->ssim_end4 = x264_pixel_ssim_end4_neon;
1230 if( cpu&X264_CPU_FAST_NEON_MRC )
1232 pixf->sad[PIXEL_4x8] = x264_pixel_sad_4x8_neon;
1233 pixf->sad[PIXEL_4x4] = x264_pixel_sad_4x4_neon;
1234 pixf->sad_aligned[PIXEL_4x8] = x264_pixel_sad_aligned_4x8_neon;
1235 pixf->sad_aligned[PIXEL_4x4] = x264_pixel_sad_aligned_4x4_neon;
1237 else // really just scheduled for dual issue / A8
1239 INIT5( sad_aligned, _neon_dual );
1243 #endif // HIGH_BIT_DEPTH
1245 if( cpu&X264_CPU_ALTIVEC )
1247 x264_pixel_altivec_init( pixf );
1253 INIT4( sad_x3, _vis );
1254 INIT4( sad_x4, _vis );
1256 #endif // !HIGH_BIT_DEPTH
1258 pixf->ads[PIXEL_8x16] =
1259 pixf->ads[PIXEL_8x4] =
1260 pixf->ads[PIXEL_4x8] = pixf->ads[PIXEL_16x8];
1261 pixf->ads[PIXEL_4x4] = pixf->ads[PIXEL_8x8];