1 /*****************************************************************************
2 * pixel.c: pixel metrics
3 *****************************************************************************
4 * Copyright (C) 2003-2011 x264 project
6 * Authors: Loren Merritt <lorenm@u.washington.edu>
7 * Laurent Aimar <fenrir@via.ecp.fr>
8 * Fiona Glaser <fiona@x264.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
24 * This program is also available under a commercial proprietary license.
25 * For more information, contact us at licensing@x264.com.
26 *****************************************************************************/
31 # include "x86/pixel.h"
32 # include "x86/predict.h"
35 # include "ppc/pixel.h"
38 # include "arm/pixel.h"
41 # include "sparc/pixel.h"
45 /****************************************************************************
47 ****************************************************************************/
48 #define PIXEL_SAD_C( name, lx, ly ) \
49 static int name( pixel *pix1, int i_stride_pix1, \
50 pixel *pix2, int i_stride_pix2 ) \
53 for( int y = 0; y < ly; y++ ) \
55 for( int x = 0; x < lx; x++ ) \
57 i_sum += abs( pix1[x] - pix2[x] ); \
59 pix1 += i_stride_pix1; \
60 pix2 += i_stride_pix2; \
66 PIXEL_SAD_C( x264_pixel_sad_16x16, 16, 16 )
67 PIXEL_SAD_C( x264_pixel_sad_16x8, 16, 8 )
68 PIXEL_SAD_C( x264_pixel_sad_8x16, 8, 16 )
69 PIXEL_SAD_C( x264_pixel_sad_8x8, 8, 8 )
70 PIXEL_SAD_C( x264_pixel_sad_8x4, 8, 4 )
71 PIXEL_SAD_C( x264_pixel_sad_4x8, 4, 8 )
72 PIXEL_SAD_C( x264_pixel_sad_4x4, 4, 4 )
75 /****************************************************************************
77 ****************************************************************************/
78 #define PIXEL_SSD_C( name, lx, ly ) \
79 static int name( pixel *pix1, int i_stride_pix1, \
80 pixel *pix2, int i_stride_pix2 ) \
83 for( int y = 0; y < ly; y++ ) \
85 for( int x = 0; x < lx; x++ ) \
87 int d = pix1[x] - pix2[x]; \
90 pix1 += i_stride_pix1; \
91 pix2 += i_stride_pix2; \
96 PIXEL_SSD_C( x264_pixel_ssd_16x16, 16, 16 )
97 PIXEL_SSD_C( x264_pixel_ssd_16x8, 16, 8 )
98 PIXEL_SSD_C( x264_pixel_ssd_8x16, 8, 16 )
99 PIXEL_SSD_C( x264_pixel_ssd_8x8, 8, 8 )
100 PIXEL_SSD_C( x264_pixel_ssd_8x4, 8, 4 )
101 PIXEL_SSD_C( x264_pixel_ssd_4x8, 4, 8 )
102 PIXEL_SSD_C( x264_pixel_ssd_4x4, 4, 4 )
104 uint64_t x264_pixel_ssd_wxh( x264_pixel_function_t *pf, pixel *pix1, int i_pix1, pixel *pix2, int i_pix2, int i_width, int i_height )
108 int align = !(((intptr_t)pix1 | (intptr_t)pix2 | i_pix1 | i_pix2) & 15);
110 #define SSD(size) i_ssd += pf->ssd[size]( pix1 + y*i_pix1 + x, i_pix1, \
111 pix2 + y*i_pix2 + x, i_pix2 );
112 for( y = 0; y < i_height-15; y += 16 )
116 for( ; x < i_width-15; x += 16 )
118 for( ; x < i_width-7; x += 8 )
122 for( int x = 0; x < i_width-7; x += 8 )
126 #define SSD1 { int d = pix1[y*i_pix1+x] - pix2[y*i_pix2+x]; i_ssd += d*d; }
129 for( y = 0; y < (i_height & ~7); y++ )
130 for( int x = i_width & ~7; x < i_width; x++ )
135 for( y = i_height & ~7; y < i_height; y++ )
136 for( int x = 0; x < i_width; x++ )
144 static void pixel_ssd_nv12_core( pixel *pixuv1, int stride1, pixel *pixuv2, int stride2, int width, int height, uint64_t *ssd_u, uint64_t *ssd_v )
146 *ssd_u = 0, *ssd_v = 0;
147 for( int y = 0; y < height; y++, pixuv1+=stride1, pixuv2+=stride2 )
148 for( int x = 0; x < width; x++ )
150 int du = pixuv1[2*x] - pixuv2[2*x];
151 int dv = pixuv1[2*x+1] - pixuv2[2*x+1];
157 void x264_pixel_ssd_nv12( x264_pixel_function_t *pf, pixel *pix1, int i_pix1, pixel *pix2, int i_pix2, int i_width, int i_height, uint64_t *ssd_u, uint64_t *ssd_v )
159 pf->ssd_nv12_core( pix1, i_pix1, pix2, i_pix2, i_width&~7, i_height, ssd_u, ssd_v );
163 pixel_ssd_nv12_core( pix1+(i_width&~7), i_pix1, pix2+(i_width&~7), i_pix2, i_width&7, i_height, &tmp[0], &tmp[1] );
169 /****************************************************************************
171 ****************************************************************************/
172 #define PIXEL_VAR_C( name, w ) \
173 static uint64_t name( pixel *pix, int i_stride ) \
175 uint32_t sum = 0, sqr = 0; \
176 for( int y = 0; y < w; y++ ) \
178 for( int x = 0; x < w; x++ ) \
181 sqr += pix[x] * pix[x]; \
185 return sum + ((uint64_t)sqr << 32); \
188 PIXEL_VAR_C( x264_pixel_var_16x16, 16 )
189 PIXEL_VAR_C( x264_pixel_var_8x8, 8 )
191 /****************************************************************************
193 ****************************************************************************/
194 static int pixel_var2_8x8( pixel *pix1, int i_stride1, pixel *pix2, int i_stride2, int *ssd )
196 uint32_t var = 0, sum = 0, sqr = 0;
197 for( int y = 0; y < 8; y++ )
199 for( int x = 0; x < 8; x++ )
201 int diff = pix1[x] - pix2[x];
209 var = sqr - ((uint64_t)sum * sum >> 6);
215 typedef uint32_t sum_t;
216 typedef uint64_t sum2_t;
218 typedef uint16_t sum_t;
219 typedef uint32_t sum2_t;
221 #define BITS_PER_SUM (8 * sizeof(sum_t))
223 #define HADAMARD4(d0, d1, d2, d3, s0, s1, s2, s3) {\
224 sum2_t t0 = s0 + s1;\
225 sum2_t t1 = s0 - s1;\
226 sum2_t t2 = s2 + s3;\
227 sum2_t t3 = s2 - s3;\
234 // in: a pseudo-simd number of the form x+(y<<16)
235 // return: abs(x)+(abs(y)<<16)
236 static ALWAYS_INLINE sum2_t abs2( sum2_t a )
238 sum2_t s = ((a>>(BITS_PER_SUM-1))&(((sum2_t)1<<BITS_PER_SUM)+1))*((sum_t)-1);
242 /****************************************************************************
243 * pixel_satd_WxH: sum of 4x4 Hadamard transformed differences
244 ****************************************************************************/
246 static NOINLINE int x264_pixel_satd_4x4( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
249 sum2_t a0, a1, a2, a3, b0, b1;
251 for( int i = 0; i < 4; i++, pix1 += i_pix1, pix2 += i_pix2 )
253 a0 = pix1[0] - pix2[0];
254 a1 = pix1[1] - pix2[1];
255 b0 = (a0+a1) + ((a0-a1)<<BITS_PER_SUM);
256 a2 = pix1[2] - pix2[2];
257 a3 = pix1[3] - pix2[3];
258 b1 = (a2+a3) + ((a2-a3)<<BITS_PER_SUM);
262 for( int i = 0; i < 2; i++ )
264 HADAMARD4( a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i] );
265 a0 = abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
266 sum += ((sum_t)a0) + (a0>>BITS_PER_SUM);
271 static NOINLINE int x264_pixel_satd_8x4( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
274 sum2_t a0, a1, a2, a3;
276 for( int i = 0; i < 4; i++, pix1 += i_pix1, pix2 += i_pix2 )
278 a0 = (pix1[0] - pix2[0]) + ((sum2_t)(pix1[4] - pix2[4]) << BITS_PER_SUM);
279 a1 = (pix1[1] - pix2[1]) + ((sum2_t)(pix1[5] - pix2[5]) << BITS_PER_SUM);
280 a2 = (pix1[2] - pix2[2]) + ((sum2_t)(pix1[6] - pix2[6]) << BITS_PER_SUM);
281 a3 = (pix1[3] - pix2[3]) + ((sum2_t)(pix1[7] - pix2[7]) << BITS_PER_SUM);
282 HADAMARD4( tmp[i][0], tmp[i][1], tmp[i][2], tmp[i][3], a0,a1,a2,a3 );
284 for( int i = 0; i < 4; i++ )
286 HADAMARD4( a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i] );
287 sum += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
289 return (((sum_t)sum) + (sum>>BITS_PER_SUM)) >> 1;
292 #define PIXEL_SATD_C( w, h, sub )\
293 static int x264_pixel_satd_##w##x##h( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )\
295 int sum = sub( pix1, i_pix1, pix2, i_pix2 )\
296 + sub( pix1+4*i_pix1, i_pix1, pix2+4*i_pix2, i_pix2 );\
298 sum+= sub( pix1+8, i_pix1, pix2+8, i_pix2 )\
299 + sub( pix1+8+4*i_pix1, i_pix1, pix2+8+4*i_pix2, i_pix2 );\
301 sum+= sub( pix1+8*i_pix1, i_pix1, pix2+8*i_pix2, i_pix2 )\
302 + sub( pix1+12*i_pix1, i_pix1, pix2+12*i_pix2, i_pix2 );\
303 if( w==16 && h==16 )\
304 sum+= sub( pix1+8+8*i_pix1, i_pix1, pix2+8+8*i_pix2, i_pix2 )\
305 + sub( pix1+8+12*i_pix1, i_pix1, pix2+8+12*i_pix2, i_pix2 );\
308 PIXEL_SATD_C( 16, 16, x264_pixel_satd_8x4 )
309 PIXEL_SATD_C( 16, 8, x264_pixel_satd_8x4 )
310 PIXEL_SATD_C( 8, 16, x264_pixel_satd_8x4 )
311 PIXEL_SATD_C( 8, 8, x264_pixel_satd_8x4 )
312 PIXEL_SATD_C( 4, 8, x264_pixel_satd_4x4 )
315 static NOINLINE int sa8d_8x8( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
318 sum2_t a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3;
320 for( int i = 0; i < 8; i++, pix1 += i_pix1, pix2 += i_pix2 )
322 a0 = pix1[0] - pix2[0];
323 a1 = pix1[1] - pix2[1];
324 b0 = (a0+a1) + ((a0-a1)<<BITS_PER_SUM);
325 a2 = pix1[2] - pix2[2];
326 a3 = pix1[3] - pix2[3];
327 b1 = (a2+a3) + ((a2-a3)<<BITS_PER_SUM);
328 a4 = pix1[4] - pix2[4];
329 a5 = pix1[5] - pix2[5];
330 b2 = (a4+a5) + ((a4-a5)<<BITS_PER_SUM);
331 a6 = pix1[6] - pix2[6];
332 a7 = pix1[7] - pix2[7];
333 b3 = (a6+a7) + ((a6-a7)<<BITS_PER_SUM);
334 HADAMARD4( tmp[i][0], tmp[i][1], tmp[i][2], tmp[i][3], b0,b1,b2,b3 );
336 for( int i = 0; i < 4; i++ )
338 HADAMARD4( a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i] );
339 HADAMARD4( a4, a5, a6, a7, tmp[4][i], tmp[5][i], tmp[6][i], tmp[7][i] );
340 b0 = abs2(a0+a4) + abs2(a0-a4);
341 b0 += abs2(a1+a5) + abs2(a1-a5);
342 b0 += abs2(a2+a6) + abs2(a2-a6);
343 b0 += abs2(a3+a7) + abs2(a3-a7);
344 sum += (sum_t)b0 + (b0>>BITS_PER_SUM);
349 static int x264_pixel_sa8d_8x8( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
351 int sum = sa8d_8x8( pix1, i_pix1, pix2, i_pix2 );
355 static int x264_pixel_sa8d_16x16( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
357 int sum = sa8d_8x8( pix1, i_pix1, pix2, i_pix2 )
358 + sa8d_8x8( pix1+8, i_pix1, pix2+8, i_pix2 )
359 + sa8d_8x8( pix1+8*i_pix1, i_pix1, pix2+8*i_pix2, i_pix2 )
360 + sa8d_8x8( pix1+8+8*i_pix1, i_pix1, pix2+8+8*i_pix2, i_pix2 );
365 static NOINLINE uint64_t pixel_hadamard_ac( pixel *pix, int stride )
368 sum2_t a0, a1, a2, a3, dc;
369 sum2_t sum4 = 0, sum8 = 0;
370 for( int i = 0; i < 8; i++, pix+=stride )
372 sum2_t *t = tmp + (i&3) + (i&4)*4;
373 a0 = (pix[0]+pix[1]) + ((sum2_t)(pix[0]-pix[1])<<BITS_PER_SUM);
374 a1 = (pix[2]+pix[3]) + ((sum2_t)(pix[2]-pix[3])<<BITS_PER_SUM);
377 a2 = (pix[4]+pix[5]) + ((sum2_t)(pix[4]-pix[5])<<BITS_PER_SUM);
378 a3 = (pix[6]+pix[7]) + ((sum2_t)(pix[6]-pix[7])<<BITS_PER_SUM);
382 for( int i = 0; i < 8; i++ )
384 HADAMARD4( a0, a1, a2, a3, tmp[i*4+0], tmp[i*4+1], tmp[i*4+2], tmp[i*4+3] );
389 sum4 += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
391 for( int i = 0; i < 8; i++ )
393 HADAMARD4( a0,a1,a2,a3, tmp[i], tmp[8+i], tmp[16+i], tmp[24+i] );
394 sum8 += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
396 dc = (sum_t)(tmp[0] + tmp[8] + tmp[16] + tmp[24]);
397 sum4 = (sum_t)sum4 + (sum4>>BITS_PER_SUM) - dc;
398 sum8 = (sum_t)sum8 + (sum8>>BITS_PER_SUM) - dc;
399 return ((uint64_t)sum8<<32) + sum4;
402 #define HADAMARD_AC(w,h) \
403 static uint64_t x264_pixel_hadamard_ac_##w##x##h( pixel *pix, int stride )\
405 uint64_t sum = pixel_hadamard_ac( pix, stride );\
407 sum += pixel_hadamard_ac( pix+8, stride );\
409 sum += pixel_hadamard_ac( pix+8*stride, stride );\
410 if( w==16 && h==16 )\
411 sum += pixel_hadamard_ac( pix+8*stride+8, stride );\
412 return ((sum>>34)<<32) + ((uint32_t)sum>>1);\
414 HADAMARD_AC( 16, 16 )
420 /****************************************************************************
422 ****************************************************************************/
423 #define SAD_X( size ) \
424 static void x264_pixel_sad_x3_##size( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2, int i_stride, int scores[3] )\
426 scores[0] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix0, i_stride );\
427 scores[1] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix1, i_stride );\
428 scores[2] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix2, i_stride );\
430 static void x264_pixel_sad_x4_##size( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2, pixel *pix3, int i_stride, int scores[4] )\
432 scores[0] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix0, i_stride );\
433 scores[1] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix1, i_stride );\
434 scores[2] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix2, i_stride );\
435 scores[3] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix3, i_stride );\
453 #endif // !HIGH_BIT_DEPTH
455 /****************************************************************************
457 * no faster than single satd, but needed for satd to be a drop-in replacement for sad
458 ****************************************************************************/
460 #define SATD_X( size, cpu ) \
461 static void x264_pixel_satd_x3_##size##cpu( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2, int i_stride, int scores[3] )\
463 scores[0] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix0, i_stride );\
464 scores[1] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix1, i_stride );\
465 scores[2] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix2, i_stride );\
467 static void x264_pixel_satd_x4_##size##cpu( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2, pixel *pix3, int i_stride, int scores[4] )\
469 scores[0] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix0, i_stride );\
470 scores[1] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix1, i_stride );\
471 scores[2] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix2, i_stride );\
472 scores[3] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix3, i_stride );\
474 #define SATD_X_DECL6( cpu )\
475 SATD_X( 16x16, cpu )\
481 #define SATD_X_DECL7( cpu )\
487 SATD_X_DECL7( _mmx2 )
489 SATD_X_DECL6( _sse2 )
490 SATD_X_DECL7( _ssse3 )
491 SATD_X_DECL7( _sse4 )
493 #endif // !HIGH_BIT_DEPTH
498 SATD_X_DECL7( _neon )
500 #endif // !HIGH_BIT_DEPTH
502 #define INTRA_MBCMP_8x8( mbcmp, cpu, cpu2 )\
503 void x264_intra_##mbcmp##_x3_8x8##cpu( pixel *fenc, pixel edge[36], int res[3] )\
505 ALIGNED_ARRAY_16( pixel, pix, [8*FDEC_STRIDE] );\
506 x264_predict_8x8_v##cpu2( pix, edge );\
507 res[0] = x264_pixel_##mbcmp##_8x8##cpu( pix, FDEC_STRIDE, fenc, FENC_STRIDE );\
508 x264_predict_8x8_h##cpu2( pix, edge );\
509 res[1] = x264_pixel_##mbcmp##_8x8##cpu( pix, FDEC_STRIDE, fenc, FENC_STRIDE );\
510 x264_predict_8x8_dc##cpu2( pix, edge );\
511 res[2] = x264_pixel_##mbcmp##_8x8##cpu( pix, FDEC_STRIDE, fenc, FENC_STRIDE );\
514 INTRA_MBCMP_8x8( sad,, _c )
515 INTRA_MBCMP_8x8(sa8d,, _c )
516 #if HIGH_BIT_DEPTH && HAVE_MMX
517 INTRA_MBCMP_8x8( sad, _mmx2, _c )
518 INTRA_MBCMP_8x8( sad, _sse2, _sse2 )
519 INTRA_MBCMP_8x8( sad, _ssse3, _sse2 )
520 INTRA_MBCMP_8x8(sa8d, _sse2, _sse2 )
523 #define INTRA_MBCMP( mbcmp, size, pred1, pred2, pred3, chroma, cpu, cpu2 )\
524 void x264_intra_##mbcmp##_x3_##size##chroma##cpu( pixel *fenc, pixel *fdec, int res[3] )\
526 x264_predict_##size##chroma##_##pred1##cpu2( fdec );\
527 res[0] = x264_pixel_##mbcmp##_##size##cpu( fdec, FDEC_STRIDE, fenc, FENC_STRIDE );\
528 x264_predict_##size##chroma##_##pred2##cpu2( fdec );\
529 res[1] = x264_pixel_##mbcmp##_##size##cpu( fdec, FDEC_STRIDE, fenc, FENC_STRIDE );\
530 x264_predict_##size##chroma##_##pred3##cpu2( fdec );\
531 res[2] = x264_pixel_##mbcmp##_##size##cpu( fdec, FDEC_STRIDE, fenc, FENC_STRIDE );\
534 INTRA_MBCMP( sad, 4x4, v, h, dc, ,, _c )
535 INTRA_MBCMP(satd, 4x4, v, h, dc, ,, _c )
536 INTRA_MBCMP( sad, 8x8, dc, h, v, c,, _c )
537 INTRA_MBCMP(satd, 8x8, dc, h, v, c,, _c )
538 INTRA_MBCMP( sad, 16x16, v, h, dc, ,, _c )
539 INTRA_MBCMP(satd, 16x16, v, h, dc, ,, _c )
541 #if HIGH_BIT_DEPTH && HAVE_MMX
542 INTRA_MBCMP( sad, 4x4, v, h, dc, , _mmx2, _c )
543 INTRA_MBCMP(satd, 4x4, v, h, dc, , _mmx2, _c )
544 INTRA_MBCMP( sad, 8x8, dc, h, v, c, _mmx2, _c )
545 INTRA_MBCMP(satd, 8x8, dc, h, v, c, _mmx2, _c )
546 INTRA_MBCMP( sad, 16x16, v, h, dc, , _mmx2, _mmx2 )
547 INTRA_MBCMP(satd, 16x16, v, h, dc, , _mmx2, _mmx2 )
548 INTRA_MBCMP( sad, 8x8, dc, h, v, c, _sse2, _sse2 )
549 INTRA_MBCMP( sad, 16x16, v, h, dc, , _sse2, _sse2 )
550 INTRA_MBCMP( sad, 4x4, v, h, dc, , _ssse3, _c )
551 INTRA_MBCMP( sad, 8x8, dc, h, v, c, _ssse3, _sse2 )
552 INTRA_MBCMP( sad, 16x16, v, h, dc, , _ssse3, _sse2 )
555 /****************************************************************************
556 * structural similarity metric
557 ****************************************************************************/
558 static void ssim_4x4x2_core( const pixel *pix1, int stride1,
559 const pixel *pix2, int stride2,
562 for( int z = 0; z < 2; z++ )
564 uint32_t s1 = 0, s2 = 0, ss = 0, s12 = 0;
565 for( int y = 0; y < 4; y++ )
566 for( int x = 0; x < 4; x++ )
568 int a = pix1[x+y*stride1];
569 int b = pix2[x+y*stride2];
585 static float ssim_end1( int s1, int s2, int ss, int s12 )
587 /* Maximum value for 10-bit is: ss*64 = (2^10-1)^2*16*4*64 = 4286582784, which will overflow in some cases.
588 * s1*s1, s2*s2, and s1*s2 also obtain this value for edge cases: ((2^10-1)*16*4)^2 = 4286582784.
589 * Maximum value for 9-bit is: ss*64 = (2^9-1)^2*16*4*64 = 1069551616, which will not overflow. */
592 static const float ssim_c1 = .01*.01*PIXEL_MAX*PIXEL_MAX*64;
593 static const float ssim_c2 = .03*.03*PIXEL_MAX*PIXEL_MAX*64*63;
596 static const int ssim_c1 = (int)(.01*.01*PIXEL_MAX*PIXEL_MAX*64 + .5);
597 static const int ssim_c2 = (int)(.03*.03*PIXEL_MAX*PIXEL_MAX*64*63 + .5);
603 type vars = fss*64 - fs1*fs1 - fs2*fs2;
604 type covar = fs12*64 - fs1*fs2;
605 return (float)(2*fs1*fs2 + ssim_c1) * (float)(2*covar + ssim_c2)
606 / ((float)(fs1*fs1 + fs2*fs2 + ssim_c1) * (float)(vars + ssim_c2));
610 static float ssim_end4( int sum0[5][4], int sum1[5][4], int width )
613 for( int i = 0; i < width; i++ )
614 ssim += ssim_end1( sum0[i][0] + sum0[i+1][0] + sum1[i][0] + sum1[i+1][0],
615 sum0[i][1] + sum0[i+1][1] + sum1[i][1] + sum1[i+1][1],
616 sum0[i][2] + sum0[i+1][2] + sum1[i][2] + sum1[i+1][2],
617 sum0[i][3] + sum0[i+1][3] + sum1[i][3] + sum1[i+1][3] );
621 float x264_pixel_ssim_wxh( x264_pixel_function_t *pf,
622 pixel *pix1, int stride1,
623 pixel *pix2, int stride2,
624 int width, int height, void *buf, int *cnt )
628 int (*sum0)[4] = buf;
629 int (*sum1)[4] = sum0 + (width >> 2) + 3;
632 for( int y = 1; y < height; y++ )
636 XCHG( void*, sum0, sum1 );
637 for( int x = 0; x < width; x+=2 )
638 pf->ssim_4x4x2_core( &pix1[4*(x+z*stride1)], stride1, &pix2[4*(x+z*stride2)], stride2, &sum0[x] );
640 for( int x = 0; x < width-1; x += 4 )
641 ssim += pf->ssim_end4( sum0+x, sum1+x, X264_MIN(4,width-x-1) );
643 *cnt = (height-1) * (width-1);
647 static int pixel_vsad( pixel *src, int stride, int height )
650 for( int i = 1; i < height; i++, src += stride )
651 for( int j = 0; j < 16; j++ )
652 score += abs(src[j] - src[j+stride]);
656 int x264_field_vsad( x264_t *h, int mb_x, int mb_y )
658 int score_field, score_frame;
659 int stride = h->fenc->i_stride[0];
660 int mb_stride = h->mb.i_mb_stride;
661 pixel *fenc = h->fenc->plane[0] + 16 * (mb_x + mb_y * stride);
662 int mb_xy = mb_x + mb_y*mb_stride;
664 /* We don't want to analyze pixels outside the frame, as it gives inaccurate results. */
665 int mbpair_height = X264_MIN( h->param.i_height - mb_y * 16, 32 );
666 score_frame = h->pixf.vsad( fenc, stride, mbpair_height );
667 score_field = h->pixf.vsad( fenc, stride*2, mbpair_height >> 1 );
668 score_field += h->pixf.vsad( fenc+stride, stride*2, mbpair_height >> 1 );
671 score_field += 512 - h->mb.field[mb_xy -1]*1024;
673 score_field += 512 - h->mb.field[mb_xy-mb_stride]*1024;
675 return (score_field < score_frame);
678 /****************************************************************************
679 * successive elimination
680 ****************************************************************************/
681 static int x264_pixel_ads4( int enc_dc[4], uint16_t *sums, int delta,
682 uint16_t *cost_mvx, int16_t *mvs, int width, int thresh )
685 for( int i = 0; i < width; i++, sums++ )
687 int ads = abs( enc_dc[0] - sums[0] )
688 + abs( enc_dc[1] - sums[8] )
689 + abs( enc_dc[2] - sums[delta] )
690 + abs( enc_dc[3] - sums[delta+8] )
698 static int x264_pixel_ads2( int enc_dc[2], uint16_t *sums, int delta,
699 uint16_t *cost_mvx, int16_t *mvs, int width, int thresh )
702 for( int i = 0; i < width; i++, sums++ )
704 int ads = abs( enc_dc[0] - sums[0] )
705 + abs( enc_dc[1] - sums[delta] )
713 static int x264_pixel_ads1( int enc_dc[1], uint16_t *sums, int delta,
714 uint16_t *cost_mvx, int16_t *mvs, int width, int thresh )
717 for( int i = 0; i<width; i++, sums++ )
719 int ads = abs( enc_dc[0] - sums[0] )
728 /****************************************************************************
730 ****************************************************************************/
731 void x264_pixel_init( int cpu, x264_pixel_function_t *pixf )
733 memset( pixf, 0, sizeof(*pixf) );
735 #define INIT2_NAME( name1, name2, cpu ) \
736 pixf->name1[PIXEL_16x16] = x264_pixel_##name2##_16x16##cpu;\
737 pixf->name1[PIXEL_16x8] = x264_pixel_##name2##_16x8##cpu;
738 #define INIT4_NAME( name1, name2, cpu ) \
739 INIT2_NAME( name1, name2, cpu ) \
740 pixf->name1[PIXEL_8x16] = x264_pixel_##name2##_8x16##cpu;\
741 pixf->name1[PIXEL_8x8] = x264_pixel_##name2##_8x8##cpu;
742 #define INIT5_NAME( name1, name2, cpu ) \
743 INIT4_NAME( name1, name2, cpu ) \
744 pixf->name1[PIXEL_8x4] = x264_pixel_##name2##_8x4##cpu;
745 #define INIT6_NAME( name1, name2, cpu ) \
746 INIT5_NAME( name1, name2, cpu ) \
747 pixf->name1[PIXEL_4x8] = x264_pixel_##name2##_4x8##cpu;
748 #define INIT7_NAME( name1, name2, cpu ) \
749 INIT6_NAME( name1, name2, cpu ) \
750 pixf->name1[PIXEL_4x4] = x264_pixel_##name2##_4x4##cpu;
751 #define INIT2( name, cpu ) INIT2_NAME( name, name, cpu )
752 #define INIT4( name, cpu ) INIT4_NAME( name, name, cpu )
753 #define INIT5( name, cpu ) INIT5_NAME( name, name, cpu )
754 #define INIT6( name, cpu ) INIT6_NAME( name, name, cpu )
755 #define INIT7( name, cpu ) INIT7_NAME( name, name, cpu )
757 #define INIT_ADS( cpu ) \
758 pixf->ads[PIXEL_16x16] = x264_pixel_ads4##cpu;\
759 pixf->ads[PIXEL_16x8] = x264_pixel_ads2##cpu;\
760 pixf->ads[PIXEL_8x8] = x264_pixel_ads1##cpu;
763 INIT7_NAME( sad_aligned, sad, );
770 INIT4( hadamard_ac, );
773 pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16;
774 pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8;
775 pixf->var[PIXEL_16x16] = x264_pixel_var_16x16;
776 pixf->var[PIXEL_8x8] = x264_pixel_var_8x8;
778 pixf->ssd_nv12_core = pixel_ssd_nv12_core;
779 pixf->ssim_4x4x2_core = ssim_4x4x2_core;
780 pixf->ssim_end4 = ssim_end4;
781 pixf->var2_8x8 = pixel_var2_8x8;
782 pixf->vsad = pixel_vsad;
784 pixf->intra_sad_x3_4x4 = x264_intra_sad_x3_4x4;
785 pixf->intra_satd_x3_4x4 = x264_intra_satd_x3_4x4;
786 pixf->intra_sad_x3_8x8 = x264_intra_sad_x3_8x8;
787 pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8;
788 pixf->intra_sad_x3_8x8c = x264_intra_sad_x3_8x8c;
789 pixf->intra_satd_x3_8x8c = x264_intra_satd_x3_8x8c;
790 pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16;
791 pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16;
795 if( cpu&X264_CPU_MMX2 )
798 INIT7( sad_x3, _mmx2 );
799 INIT7( sad_x4, _mmx2 );
800 INIT7( satd, _mmx2 );
801 INIT7( satd_x3, _mmx2 );
802 INIT7( satd_x4, _mmx2 );
803 INIT4( hadamard_ac, _mmx2 );
807 pixf->ssd_nv12_core = x264_pixel_ssd_nv12_core_mmx2;
808 pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_mmx2;
809 pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_mmx2;
810 pixf->var2_8x8 = x264_pixel_var2_8x8_mmx2;
812 pixf->intra_sad_x3_4x4 = x264_intra_sad_x3_4x4_mmx2;
813 pixf->intra_satd_x3_4x4 = x264_intra_satd_x3_4x4_mmx2;
814 pixf->intra_sad_x3_8x8 = x264_intra_sad_x3_8x8_mmx2;
815 pixf->intra_sad_x3_8x8c = x264_intra_sad_x3_8x8c_mmx2;
816 pixf->intra_satd_x3_8x8c = x264_intra_satd_x3_8x8c_mmx2;
817 pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16_mmx2;
818 pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16_mmx2;
820 if( cpu&X264_CPU_SSE2 )
822 INIT4_NAME( sad_aligned, sad, _sse2_aligned );
825 pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_sse2;
826 pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_sse2;
828 pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_sse2;
830 pixf->ssd_nv12_core = x264_pixel_ssd_nv12_core_sse2;
831 pixf->ssim_4x4x2_core = x264_pixel_ssim_4x4x2_core_sse2;
832 pixf->ssim_end4 = x264_pixel_ssim_end4_sse2;
833 pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_sse2;
834 pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_sse2;
835 pixf->var2_8x8 = x264_pixel_var2_8x8_sse2;
837 if( (cpu&X264_CPU_SSE2) && !(cpu&X264_CPU_SSE2_IS_SLOW) )
840 INIT2( sad_x3, _sse2 );
841 INIT2( sad_x4, _sse2 );
844 if( !(cpu&X264_CPU_STACK_MOD4) )
846 INIT4( hadamard_ac, _sse2 );
849 pixf->intra_sad_x3_8x8 = x264_intra_sad_x3_8x8_sse2;
850 pixf->intra_sad_x3_8x8c = x264_intra_sad_x3_8x8c_sse2;
851 pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16_sse2;
853 if( cpu&X264_CPU_SSE2_IS_FAST )
855 pixf->sad[PIXEL_8x16] = x264_pixel_sad_8x16_sse2;
856 pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_sse2;
857 pixf->sad_x3[PIXEL_8x8] = x264_pixel_sad_x3_8x8_sse2;
858 pixf->sad_x3[PIXEL_8x4] = x264_pixel_sad_x3_8x4_sse2;
859 pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_sse2;
860 pixf->sad_x4[PIXEL_8x8] = x264_pixel_sad_x4_8x8_sse2;
861 pixf->sad_x4[PIXEL_8x4] = x264_pixel_sad_x4_8x4_sse2;
863 if( cpu&X264_CPU_SSSE3 )
865 INIT4_NAME( sad_aligned, sad, _ssse3_aligned );
866 INIT7( sad, _ssse3 );
867 INIT7( sad_x3, _ssse3 );
868 INIT7( sad_x4, _ssse3 );
871 if( !(cpu&X264_CPU_STACK_MOD4) )
873 INIT4( hadamard_ac, _ssse3 );
876 pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_ssse3;
877 pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_ssse3;
878 pixf->intra_sad_x3_4x4 = x264_intra_sad_x3_4x4_ssse3;
879 pixf->intra_sad_x3_8x8 = x264_intra_sad_x3_8x8_ssse3;
880 pixf->intra_sad_x3_8x8c = x264_intra_sad_x3_8x8c_ssse3;
881 pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16_ssse3;
883 if( cpu&X264_CPU_SSE4 )
885 if( !(cpu&X264_CPU_STACK_MOD4) )
887 INIT4( hadamard_ac, _sse4 );
889 pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_sse4;
890 pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_sse4;
892 if( cpu&X264_CPU_AVX )
895 if( !(cpu&X264_CPU_STACK_MOD4) )
897 INIT4( hadamard_ac, _avx );
899 pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_avx;
900 pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_avx;
901 pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_avx;
902 pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_avx;
903 pixf->ssd_nv12_core = x264_pixel_ssd_nv12_core_avx;
904 pixf->ssim_4x4x2_core = x264_pixel_ssim_4x4x2_core_avx;
905 pixf->ssim_end4 = x264_pixel_ssim_end4_avx;
908 #else // !HIGH_BIT_DEPTH
910 if( cpu&X264_CPU_MMX )
915 if( cpu&X264_CPU_MMX2 )
918 INIT7_NAME( sad_aligned, sad, _mmx2 );
919 INIT7( sad_x3, _mmx2 );
920 INIT7( sad_x4, _mmx2 );
921 INIT7( satd, _mmx2 );
922 INIT7( satd_x3, _mmx2 );
923 INIT7( satd_x4, _mmx2 );
924 INIT4( hadamard_ac, _mmx2 );
926 pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_mmx2;
927 pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_mmx2;
928 pixf->ssd_nv12_core = x264_pixel_ssd_nv12_core_mmx2;
930 pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_mmx2;
931 pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_mmx2;
932 pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_mmx2;
933 pixf->ssim_4x4x2_core = x264_pixel_ssim_4x4x2_core_mmx2;
934 pixf->var2_8x8 = x264_pixel_var2_8x8_mmx2;
935 pixf->vsad = x264_pixel_vsad_mmx2;
937 if( cpu&X264_CPU_CACHELINE_32 )
939 INIT5( sad, _cache32_mmx2 );
940 INIT4( sad_x3, _cache32_mmx2 );
941 INIT4( sad_x4, _cache32_mmx2 );
943 else if( cpu&X264_CPU_CACHELINE_64 )
945 INIT5( sad, _cache64_mmx2 );
946 INIT4( sad_x3, _cache64_mmx2 );
947 INIT4( sad_x4, _cache64_mmx2 );
950 if( cpu&X264_CPU_CACHELINE_64 )
952 pixf->sad[PIXEL_8x16] = x264_pixel_sad_8x16_cache64_mmx2;
953 pixf->sad[PIXEL_8x8] = x264_pixel_sad_8x8_cache64_mmx2;
954 pixf->sad[PIXEL_8x4] = x264_pixel_sad_8x4_cache64_mmx2;
955 pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_cache64_mmx2;
956 pixf->sad_x3[PIXEL_8x8] = x264_pixel_sad_x3_8x8_cache64_mmx2;
957 pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_cache64_mmx2;
958 pixf->sad_x4[PIXEL_8x8] = x264_pixel_sad_x4_8x8_cache64_mmx2;
961 pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16_mmx2;
962 pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16_mmx2;
963 pixf->intra_satd_x3_8x8c = x264_intra_satd_x3_8x8c_mmx2;
964 pixf->intra_sad_x3_8x8c = x264_intra_sad_x3_8x8c_mmx2;
965 pixf->intra_sad_x3_8x8 = x264_intra_sad_x3_8x8_mmx2;
966 pixf->intra_satd_x3_4x4 = x264_intra_satd_x3_4x4_mmx2;
967 pixf->intra_sad_x3_4x4 = x264_intra_sad_x3_4x4_mmx2;
970 if( cpu&X264_CPU_SSE2 )
972 INIT5( ssd, _sse2slow );
973 INIT2_NAME( sad_aligned, sad, _sse2_aligned );
974 pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_sse2;
975 pixf->ssd_nv12_core = x264_pixel_ssd_nv12_core_sse2;
976 pixf->ssim_4x4x2_core = x264_pixel_ssim_4x4x2_core_sse2;
977 pixf->ssim_end4 = x264_pixel_ssim_end4_sse2;
978 pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_sse2;
979 pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_sse2;
981 pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_sse2;
983 pixf->var2_8x8 = x264_pixel_var2_8x8_sse2;
984 pixf->vsad = x264_pixel_vsad_sse2;
987 if( (cpu&X264_CPU_SSE2) && !(cpu&X264_CPU_SSE2_IS_SLOW) )
990 INIT2( sad_x3, _sse2 );
991 INIT2( sad_x4, _sse2 );
992 INIT6( satd, _sse2 );
993 INIT6( satd_x3, _sse2 );
994 INIT6( satd_x4, _sse2 );
995 if( !(cpu&X264_CPU_STACK_MOD4) )
997 INIT4( hadamard_ac, _sse2 );
1000 pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_sse2;
1001 pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16_sse2;
1002 if( cpu&X264_CPU_CACHELINE_64 )
1004 INIT2( ssd, _sse2); /* faster for width 16 on p4 */
1006 INIT2( sad, _cache64_sse2 );
1007 INIT2( sad_x3, _cache64_sse2 );
1008 INIT2( sad_x4, _cache64_sse2 );
1010 if( cpu&X264_CPU_SSE2_IS_FAST )
1012 pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_cache64_sse2;
1013 pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_cache64_sse2;
1017 if( cpu&X264_CPU_SSE_MISALIGN )
1019 INIT2( sad_x3, _sse2_misalign );
1020 INIT2( sad_x4, _sse2_misalign );
1024 if( cpu&X264_CPU_SSE2_IS_FAST && !(cpu&X264_CPU_CACHELINE_64) )
1026 pixf->sad_aligned[PIXEL_8x16] = x264_pixel_sad_8x16_sse2;
1027 pixf->sad[PIXEL_8x16] = x264_pixel_sad_8x16_sse2;
1028 pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_sse2;
1029 pixf->sad_x3[PIXEL_8x8] = x264_pixel_sad_x3_8x8_sse2;
1030 pixf->sad_x3[PIXEL_8x4] = x264_pixel_sad_x3_8x4_sse2;
1031 pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_sse2;
1032 pixf->sad_x4[PIXEL_8x8] = x264_pixel_sad_x4_8x8_sse2;
1033 pixf->sad_x4[PIXEL_8x4] = x264_pixel_sad_x4_8x4_sse2;
1036 if( (cpu&X264_CPU_SSE3) && (cpu&X264_CPU_CACHELINE_64) )
1038 INIT2( sad, _sse3 );
1039 INIT2( sad_x3, _sse3 );
1040 INIT2( sad_x4, _sse3 );
1043 if( cpu&X264_CPU_SSSE3 )
1045 if( !(cpu&X264_CPU_STACK_MOD4) )
1047 INIT4( hadamard_ac, _ssse3 );
1050 if( !(cpu&X264_CPU_SLOW_ATOM) )
1052 INIT7( ssd, _ssse3 );
1053 pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_ssse3;
1054 pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_ssse3;
1055 INIT7( satd, _ssse3 );
1056 INIT7( satd_x3, _ssse3 );
1057 INIT7( satd_x4, _ssse3 );
1059 pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16_ssse3;
1060 pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16_ssse3;
1061 pixf->intra_satd_x3_8x8c = x264_intra_satd_x3_8x8c_ssse3;
1062 pixf->intra_sad_x3_8x8c = x264_intra_sad_x3_8x8c_ssse3;
1063 pixf->intra_satd_x3_4x4 = x264_intra_satd_x3_4x4_ssse3;
1065 pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_ssse3;
1067 pixf->var2_8x8 = x264_pixel_var2_8x8_ssse3;
1068 if( cpu&X264_CPU_SHUFFLE_IS_FAST )
1069 pixf->intra_sad_x3_8x8 = x264_intra_sad_x3_8x8_ssse3;
1070 if( cpu&X264_CPU_CACHELINE_64 )
1072 INIT2( sad, _cache64_ssse3 );
1073 INIT2( sad_x3, _cache64_ssse3 );
1074 INIT2( sad_x4, _cache64_ssse3 );
1076 if( cpu&X264_CPU_SLOW_ATOM || !(cpu&X264_CPU_SHUFFLE_IS_FAST) )
1078 INIT5( ssd, _sse2 ); /* on conroe, sse2 is faster for width8/16 */
1082 if( cpu&X264_CPU_SSE4 )
1084 INIT7( satd, _sse4 );
1085 INIT7( satd_x3, _sse4 );
1086 INIT7( satd_x4, _sse4 );
1087 if( !(cpu&X264_CPU_STACK_MOD4) )
1089 INIT4( hadamard_ac, _sse4 );
1091 pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_sse4;
1092 pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_sse4;
1093 pixf->intra_sad_x3_4x4 = x264_intra_sad_x3_4x4_sse4;
1096 if( cpu&X264_CPU_AVX )
1098 INIT7( satd, _avx );
1099 INIT7( satd_x3, _avx );
1100 INIT7( satd_x4, _avx );
1102 if( !(cpu&X264_CPU_STACK_MOD4) )
1104 INIT4( hadamard_ac, _avx );
1108 pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_avx;
1109 pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_avx;
1110 pixf->intra_sa8d_x3_8x8= x264_intra_sa8d_x3_8x8_avx;
1112 pixf->ssd_nv12_core = x264_pixel_ssd_nv12_core_avx;
1113 pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_avx;
1114 pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_avx;
1115 pixf->ssim_4x4x2_core = x264_pixel_ssim_4x4x2_core_avx;
1116 pixf->ssim_end4 = x264_pixel_ssim_end4_avx;
1117 pixf->intra_sad_x3_4x4 = x264_intra_sad_x3_4x4_avx;
1118 pixf->intra_sad_x3_8x8 = x264_intra_sad_x3_8x8_avx;
1123 if( cpu&X264_CPU_ARMV6 )
1125 pixf->sad[PIXEL_4x8] = x264_pixel_sad_4x8_armv6;
1126 pixf->sad[PIXEL_4x4] = x264_pixel_sad_4x4_armv6;
1127 pixf->sad_aligned[PIXEL_4x8] = x264_pixel_sad_4x8_armv6;
1128 pixf->sad_aligned[PIXEL_4x4] = x264_pixel_sad_4x4_armv6;
1130 if( cpu&X264_CPU_NEON )
1132 INIT5( sad, _neon );
1133 INIT5( sad_aligned, _neon );
1134 INIT7( sad_x3, _neon );
1135 INIT7( sad_x4, _neon );
1136 INIT7( ssd, _neon );
1137 INIT7( satd, _neon );
1138 INIT7( satd_x3, _neon );
1139 INIT7( satd_x4, _neon );
1140 INIT4( hadamard_ac, _neon );
1141 pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_neon;
1142 pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_neon;
1143 pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_neon;
1144 pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_neon;
1145 pixf->var2_8x8 = x264_pixel_var2_8x8_neon;
1147 pixf->ssim_4x4x2_core = x264_pixel_ssim_4x4x2_core_neon;
1148 pixf->ssim_end4 = x264_pixel_ssim_end4_neon;
1150 if( cpu&X264_CPU_FAST_NEON_MRC )
1152 pixf->sad[PIXEL_4x8] = x264_pixel_sad_4x8_neon;
1153 pixf->sad[PIXEL_4x4] = x264_pixel_sad_4x4_neon;
1154 pixf->sad_aligned[PIXEL_4x8] = x264_pixel_sad_aligned_4x8_neon;
1155 pixf->sad_aligned[PIXEL_4x4] = x264_pixel_sad_aligned_4x4_neon;
1157 else // really just scheduled for dual issue / A8
1159 INIT5( sad_aligned, _neon_dual );
1163 #endif // HIGH_BIT_DEPTH
1165 if( cpu&X264_CPU_ALTIVEC )
1167 x264_pixel_altivec_init( pixf );
1173 INIT4( sad_x3, _vis );
1174 INIT4( sad_x4, _vis );
1176 #endif // !HIGH_BIT_DEPTH
1178 pixf->ads[PIXEL_8x16] =
1179 pixf->ads[PIXEL_8x4] =
1180 pixf->ads[PIXEL_4x8] = pixf->ads[PIXEL_16x8];
1181 pixf->ads[PIXEL_4x4] = pixf->ads[PIXEL_8x8];