1 /*****************************************************************************
2 * pixel.c: pixel metrics
3 *****************************************************************************
4 * Copyright (C) 2003-2010 x264 project
6 * Authors: Loren Merritt <lorenm@u.washington.edu>
7 * Laurent Aimar <fenrir@via.ecp.fr>
8 * Fiona Glaser <fiona@x264.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
24 * This program is also available under a commercial proprietary license.
25 * For more information, contact us at licensing@x264.com.
26 *****************************************************************************/
31 # include "x86/pixel.h"
34 # include "ppc/pixel.h"
37 # include "arm/pixel.h"
40 # include "sparc/pixel.h"
44 /****************************************************************************
46 ****************************************************************************/
47 #define PIXEL_SAD_C( name, lx, ly ) \
48 static int name( pixel *pix1, int i_stride_pix1, \
49 pixel *pix2, int i_stride_pix2 ) \
52 for( int y = 0; y < ly; y++ ) \
54 for( int x = 0; x < lx; x++ ) \
56 i_sum += abs( pix1[x] - pix2[x] ); \
58 pix1 += i_stride_pix1; \
59 pix2 += i_stride_pix2; \
65 PIXEL_SAD_C( x264_pixel_sad_16x16, 16, 16 )
66 PIXEL_SAD_C( x264_pixel_sad_16x8, 16, 8 )
67 PIXEL_SAD_C( x264_pixel_sad_8x16, 8, 16 )
68 PIXEL_SAD_C( x264_pixel_sad_8x8, 8, 8 )
69 PIXEL_SAD_C( x264_pixel_sad_8x4, 8, 4 )
70 PIXEL_SAD_C( x264_pixel_sad_4x8, 4, 8 )
71 PIXEL_SAD_C( x264_pixel_sad_4x4, 4, 4 )
74 /****************************************************************************
76 ****************************************************************************/
77 #define PIXEL_SSD_C( name, lx, ly ) \
78 static int name( pixel *pix1, int i_stride_pix1, \
79 pixel *pix2, int i_stride_pix2 ) \
82 for( int y = 0; y < ly; y++ ) \
84 for( int x = 0; x < lx; x++ ) \
86 int d = pix1[x] - pix2[x]; \
89 pix1 += i_stride_pix1; \
90 pix2 += i_stride_pix2; \
95 PIXEL_SSD_C( x264_pixel_ssd_16x16, 16, 16 )
96 PIXEL_SSD_C( x264_pixel_ssd_16x8, 16, 8 )
97 PIXEL_SSD_C( x264_pixel_ssd_8x16, 8, 16 )
98 PIXEL_SSD_C( x264_pixel_ssd_8x8, 8, 8 )
99 PIXEL_SSD_C( x264_pixel_ssd_8x4, 8, 4 )
100 PIXEL_SSD_C( x264_pixel_ssd_4x8, 4, 8 )
101 PIXEL_SSD_C( x264_pixel_ssd_4x4, 4, 4 )
103 uint64_t x264_pixel_ssd_wxh( x264_pixel_function_t *pf, pixel *pix1, int i_pix1, pixel *pix2, int i_pix2, int i_width, int i_height )
107 int align = !(((intptr_t)pix1 | (intptr_t)pix2 | i_pix1 | i_pix2) & 15);
109 #define SSD(size) i_ssd += pf->ssd[size]( pix1 + y*i_pix1 + x, i_pix1, \
110 pix2 + y*i_pix2 + x, i_pix2 );
111 for( y = 0; y < i_height-15; y += 16 )
115 for( ; x < i_width-15; x += 16 )
117 for( ; x < i_width-7; x += 8 )
121 for( int x = 0; x < i_width-7; x += 8 )
125 #define SSD1 { int d = pix1[y*i_pix1+x] - pix2[y*i_pix2+x]; i_ssd += d*d; }
128 for( y = 0; y < (i_height & ~7); y++ )
129 for( int x = i_width & ~7; x < i_width; x++ )
134 for( y = i_height & ~7; y < i_height; y++ )
135 for( int x = 0; x < i_width; x++ )
143 static void pixel_ssd_nv12_core( pixel *pixuv1, int stride1, pixel *pixuv2, int stride2, int width, int height, uint64_t *ssd_u, uint64_t *ssd_v )
145 *ssd_u = 0, *ssd_v = 0;
146 for( int y = 0; y < height; y++, pixuv1+=stride1, pixuv2+=stride2 )
147 for( int x = 0; x < width; x++ )
149 int du = pixuv1[2*x] - pixuv2[2*x];
150 int dv = pixuv1[2*x+1] - pixuv2[2*x+1];
156 void x264_pixel_ssd_nv12( x264_pixel_function_t *pf, pixel *pix1, int i_pix1, pixel *pix2, int i_pix2, int i_width, int i_height, uint64_t *ssd_u, uint64_t *ssd_v )
158 pf->ssd_nv12_core( pix1, i_pix1, pix2, i_pix2, i_width&~7, i_height, ssd_u, ssd_v );
162 pixel_ssd_nv12_core( pix1+(i_width&~7), i_pix1, pix2+(i_width&~7), i_pix2, i_width&7, i_height, &tmp[0], &tmp[1] );
168 /****************************************************************************
170 ****************************************************************************/
171 #define PIXEL_VAR_C( name, w ) \
172 static uint64_t name( pixel *pix, int i_stride ) \
174 uint32_t sum = 0, sqr = 0; \
175 for( int y = 0; y < w; y++ ) \
177 for( int x = 0; x < w; x++ ) \
180 sqr += pix[x] * pix[x]; \
184 return sum + ((uint64_t)sqr << 32); \
187 PIXEL_VAR_C( x264_pixel_var_16x16, 16 )
188 PIXEL_VAR_C( x264_pixel_var_8x8, 8 )
190 /****************************************************************************
192 ****************************************************************************/
193 static int pixel_var2_8x8( pixel *pix1, int i_stride1, pixel *pix2, int i_stride2, int *ssd )
195 uint32_t var = 0, sum = 0, sqr = 0;
196 for( int y = 0; y < 8; y++ )
198 for( int x = 0; x < 8; x++ )
200 int diff = pix1[x] - pix2[x];
208 var = sqr - ((uint64_t)sum * sum >> 6);
214 typedef uint32_t sum_t;
215 typedef uint64_t sum2_t;
217 typedef uint16_t sum_t;
218 typedef uint32_t sum2_t;
220 #define BITS_PER_SUM (8 * sizeof(sum_t))
222 #define HADAMARD4(d0, d1, d2, d3, s0, s1, s2, s3) {\
223 sum2_t t0 = s0 + s1;\
224 sum2_t t1 = s0 - s1;\
225 sum2_t t2 = s2 + s3;\
226 sum2_t t3 = s2 - s3;\
233 // in: a pseudo-simd number of the form x+(y<<16)
234 // return: abs(x)+(abs(y)<<16)
235 static ALWAYS_INLINE sum2_t abs2( sum2_t a )
237 sum2_t s = ((a>>(BITS_PER_SUM-1))&(((sum2_t)1<<BITS_PER_SUM)+1))*((sum_t)-1);
241 /****************************************************************************
242 * pixel_satd_WxH: sum of 4x4 Hadamard transformed differences
243 ****************************************************************************/
245 static NOINLINE int x264_pixel_satd_4x4( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
248 sum2_t a0, a1, a2, a3, b0, b1;
250 for( int i = 0; i < 4; i++, pix1 += i_pix1, pix2 += i_pix2 )
252 a0 = pix1[0] - pix2[0];
253 a1 = pix1[1] - pix2[1];
254 b0 = (a0+a1) + ((a0-a1)<<BITS_PER_SUM);
255 a2 = pix1[2] - pix2[2];
256 a3 = pix1[3] - pix2[3];
257 b1 = (a2+a3) + ((a2-a3)<<BITS_PER_SUM);
261 for( int i = 0; i < 2; i++ )
263 HADAMARD4( a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i] );
264 a0 = abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
265 sum += ((sum_t)a0) + (a0>>BITS_PER_SUM);
270 static NOINLINE int x264_pixel_satd_8x4( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
273 sum2_t a0, a1, a2, a3;
275 for( int i = 0; i < 4; i++, pix1 += i_pix1, pix2 += i_pix2 )
277 a0 = (pix1[0] - pix2[0]) + ((sum2_t)(pix1[4] - pix2[4]) << BITS_PER_SUM);
278 a1 = (pix1[1] - pix2[1]) + ((sum2_t)(pix1[5] - pix2[5]) << BITS_PER_SUM);
279 a2 = (pix1[2] - pix2[2]) + ((sum2_t)(pix1[6] - pix2[6]) << BITS_PER_SUM);
280 a3 = (pix1[3] - pix2[3]) + ((sum2_t)(pix1[7] - pix2[7]) << BITS_PER_SUM);
281 HADAMARD4( tmp[i][0], tmp[i][1], tmp[i][2], tmp[i][3], a0,a1,a2,a3 );
283 for( int i = 0; i < 4; i++ )
285 HADAMARD4( a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i] );
286 sum += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
288 return (((sum_t)sum) + (sum>>BITS_PER_SUM)) >> 1;
291 #define PIXEL_SATD_C( w, h, sub )\
292 static int x264_pixel_satd_##w##x##h( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )\
294 int sum = sub( pix1, i_pix1, pix2, i_pix2 )\
295 + sub( pix1+4*i_pix1, i_pix1, pix2+4*i_pix2, i_pix2 );\
297 sum+= sub( pix1+8, i_pix1, pix2+8, i_pix2 )\
298 + sub( pix1+8+4*i_pix1, i_pix1, pix2+8+4*i_pix2, i_pix2 );\
300 sum+= sub( pix1+8*i_pix1, i_pix1, pix2+8*i_pix2, i_pix2 )\
301 + sub( pix1+12*i_pix1, i_pix1, pix2+12*i_pix2, i_pix2 );\
302 if( w==16 && h==16 )\
303 sum+= sub( pix1+8+8*i_pix1, i_pix1, pix2+8+8*i_pix2, i_pix2 )\
304 + sub( pix1+8+12*i_pix1, i_pix1, pix2+8+12*i_pix2, i_pix2 );\
307 PIXEL_SATD_C( 16, 16, x264_pixel_satd_8x4 )
308 PIXEL_SATD_C( 16, 8, x264_pixel_satd_8x4 )
309 PIXEL_SATD_C( 8, 16, x264_pixel_satd_8x4 )
310 PIXEL_SATD_C( 8, 8, x264_pixel_satd_8x4 )
311 PIXEL_SATD_C( 4, 8, x264_pixel_satd_4x4 )
314 static NOINLINE int sa8d_8x8( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
317 sum2_t a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3;
319 for( int i = 0; i < 8; i++, pix1 += i_pix1, pix2 += i_pix2 )
321 a0 = pix1[0] - pix2[0];
322 a1 = pix1[1] - pix2[1];
323 b0 = (a0+a1) + ((a0-a1)<<BITS_PER_SUM);
324 a2 = pix1[2] - pix2[2];
325 a3 = pix1[3] - pix2[3];
326 b1 = (a2+a3) + ((a2-a3)<<BITS_PER_SUM);
327 a4 = pix1[4] - pix2[4];
328 a5 = pix1[5] - pix2[5];
329 b2 = (a4+a5) + ((a4-a5)<<BITS_PER_SUM);
330 a6 = pix1[6] - pix2[6];
331 a7 = pix1[7] - pix2[7];
332 b3 = (a6+a7) + ((a6-a7)<<BITS_PER_SUM);
333 HADAMARD4( tmp[i][0], tmp[i][1], tmp[i][2], tmp[i][3], b0,b1,b2,b3 );
335 for( int i = 0; i < 4; i++ )
337 HADAMARD4( a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i] );
338 HADAMARD4( a4, a5, a6, a7, tmp[4][i], tmp[5][i], tmp[6][i], tmp[7][i] );
339 b0 = abs2(a0+a4) + abs2(a0-a4);
340 b0 += abs2(a1+a5) + abs2(a1-a5);
341 b0 += abs2(a2+a6) + abs2(a2-a6);
342 b0 += abs2(a3+a7) + abs2(a3-a7);
343 sum += (sum_t)b0 + (b0>>BITS_PER_SUM);
348 static int x264_pixel_sa8d_8x8( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
350 int sum = sa8d_8x8( pix1, i_pix1, pix2, i_pix2 );
354 static int x264_pixel_sa8d_16x16( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
356 int sum = sa8d_8x8( pix1, i_pix1, pix2, i_pix2 )
357 + sa8d_8x8( pix1+8, i_pix1, pix2+8, i_pix2 )
358 + sa8d_8x8( pix1+8*i_pix1, i_pix1, pix2+8*i_pix2, i_pix2 )
359 + sa8d_8x8( pix1+8+8*i_pix1, i_pix1, pix2+8+8*i_pix2, i_pix2 );
364 static NOINLINE uint64_t pixel_hadamard_ac( pixel *pix, int stride )
367 sum2_t a0, a1, a2, a3, dc;
368 sum2_t sum4 = 0, sum8 = 0;
369 for( int i = 0; i < 8; i++, pix+=stride )
371 sum2_t *t = tmp + (i&3) + (i&4)*4;
372 a0 = (pix[0]+pix[1]) + ((sum2_t)(pix[0]-pix[1])<<BITS_PER_SUM);
373 a1 = (pix[2]+pix[3]) + ((sum2_t)(pix[2]-pix[3])<<BITS_PER_SUM);
376 a2 = (pix[4]+pix[5]) + ((sum2_t)(pix[4]-pix[5])<<BITS_PER_SUM);
377 a3 = (pix[6]+pix[7]) + ((sum2_t)(pix[6]-pix[7])<<BITS_PER_SUM);
381 for( int i = 0; i < 8; i++ )
383 HADAMARD4( a0, a1, a2, a3, tmp[i*4+0], tmp[i*4+1], tmp[i*4+2], tmp[i*4+3] );
388 sum4 += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
390 for( int i = 0; i < 8; i++ )
392 HADAMARD4( a0,a1,a2,a3, tmp[i], tmp[8+i], tmp[16+i], tmp[24+i] );
393 sum8 += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
395 dc = (sum_t)(tmp[0] + tmp[8] + tmp[16] + tmp[24]);
396 sum4 = (sum_t)sum4 + (sum4>>BITS_PER_SUM) - dc;
397 sum8 = (sum_t)sum8 + (sum8>>BITS_PER_SUM) - dc;
398 return ((uint64_t)sum8<<32) + sum4;
401 #define HADAMARD_AC(w,h) \
402 static uint64_t x264_pixel_hadamard_ac_##w##x##h( pixel *pix, int stride )\
404 uint64_t sum = pixel_hadamard_ac( pix, stride );\
406 sum += pixel_hadamard_ac( pix+8, stride );\
408 sum += pixel_hadamard_ac( pix+8*stride, stride );\
409 if( w==16 && h==16 )\
410 sum += pixel_hadamard_ac( pix+8*stride+8, stride );\
411 return ((sum>>34)<<32) + ((uint32_t)sum>>1);\
413 HADAMARD_AC( 16, 16 )
419 /****************************************************************************
421 ****************************************************************************/
422 #define SAD_X( size ) \
423 static void x264_pixel_sad_x3_##size( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2, int i_stride, int scores[3] )\
425 scores[0] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix0, i_stride );\
426 scores[1] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix1, i_stride );\
427 scores[2] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix2, i_stride );\
429 static void x264_pixel_sad_x4_##size( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2, pixel *pix3, int i_stride, int scores[4] )\
431 scores[0] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix0, i_stride );\
432 scores[1] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix1, i_stride );\
433 scores[2] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix2, i_stride );\
434 scores[3] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix3, i_stride );\
452 #endif // !HIGH_BIT_DEPTH
454 /****************************************************************************
456 * no faster than single satd, but needed for satd to be a drop-in replacement for sad
457 ****************************************************************************/
459 #define SATD_X( size, cpu ) \
460 static void x264_pixel_satd_x3_##size##cpu( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2, int i_stride, int scores[3] )\
462 scores[0] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix0, i_stride );\
463 scores[1] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix1, i_stride );\
464 scores[2] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix2, i_stride );\
466 static void x264_pixel_satd_x4_##size##cpu( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2, pixel *pix3, int i_stride, int scores[4] )\
468 scores[0] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix0, i_stride );\
469 scores[1] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix1, i_stride );\
470 scores[2] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix2, i_stride );\
471 scores[3] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix3, i_stride );\
473 #define SATD_X_DECL6( cpu )\
474 SATD_X( 16x16, cpu )\
480 #define SATD_X_DECL7( cpu )\
486 SATD_X_DECL7( _mmxext )
488 SATD_X_DECL6( _sse2 )
489 SATD_X_DECL7( _ssse3 )
490 SATD_X_DECL7( _sse4 )
491 #endif // !HIGH_BIT_DEPTH
496 SATD_X_DECL7( _neon )
498 #endif // !HIGH_BIT_DEPTH
500 #define INTRA_MBCMP_8x8( mbcmp, cpu )\
501 void x264_intra_##mbcmp##_x3_8x8##cpu( pixel *fenc, pixel edge[33], int res[3] )\
503 ALIGNED_ARRAY_16( pixel, pix, [8*FDEC_STRIDE] );\
504 x264_predict_8x8_v_c( pix, edge );\
505 res[0] = x264_pixel_##mbcmp##_8x8##cpu( pix, FDEC_STRIDE, fenc, FENC_STRIDE );\
506 x264_predict_8x8_h_c( pix, edge );\
507 res[1] = x264_pixel_##mbcmp##_8x8##cpu( pix, FDEC_STRIDE, fenc, FENC_STRIDE );\
508 x264_predict_8x8_dc_c( pix, edge );\
509 res[2] = x264_pixel_##mbcmp##_8x8##cpu( pix, FDEC_STRIDE, fenc, FENC_STRIDE );\
512 INTRA_MBCMP_8x8( sad, )
513 INTRA_MBCMP_8x8(sa8d, )
514 #if HIGH_BIT_DEPTH && HAVE_MMX
515 INTRA_MBCMP_8x8( sad, _mmxext)
516 INTRA_MBCMP_8x8( sad, _sse2 )
517 INTRA_MBCMP_8x8( sad, _ssse3 )
518 INTRA_MBCMP_8x8(sa8d, _sse2 )
521 #define INTRA_MBCMP( mbcmp, size, pred1, pred2, pred3, chroma, cpu )\
522 void x264_intra_##mbcmp##_x3_##size##x##size##chroma##cpu( pixel *fenc, pixel *fdec, int res[3] )\
524 x264_predict_##size##x##size##chroma##_##pred1##_c( fdec );\
525 res[0] = x264_pixel_##mbcmp##_##size##x##size##cpu( fdec, FDEC_STRIDE, fenc, FENC_STRIDE );\
526 x264_predict_##size##x##size##chroma##_##pred2##_c( fdec );\
527 res[1] = x264_pixel_##mbcmp##_##size##x##size##cpu( fdec, FDEC_STRIDE, fenc, FENC_STRIDE );\
528 x264_predict_##size##x##size##chroma##_##pred3##_c( fdec );\
529 res[2] = x264_pixel_##mbcmp##_##size##x##size##cpu( fdec, FDEC_STRIDE, fenc, FENC_STRIDE );\
532 INTRA_MBCMP( sad, 4, v, h, dc, , )
533 INTRA_MBCMP(satd, 4, v, h, dc, , )
534 INTRA_MBCMP( sad, 8, dc, h, v, c, )
535 INTRA_MBCMP(satd, 8, dc, h, v, c, )
536 INTRA_MBCMP( sad, 16, v, h, dc, , )
537 INTRA_MBCMP(satd, 16, v, h, dc, , )
539 #if HIGH_BIT_DEPTH && HAVE_MMX
540 INTRA_MBCMP( sad, 4, v, h, dc, , _mmxext)
541 INTRA_MBCMP(satd, 4, v, h, dc, , _mmxext)
542 INTRA_MBCMP( sad, 8, dc, h, v, c, _mmxext)
543 INTRA_MBCMP(satd, 8, dc, h, v, c, _mmxext)
544 INTRA_MBCMP( sad, 16, v, h, dc, , _mmxext)
545 INTRA_MBCMP(satd, 16, v, h, dc, , _mmxext)
546 INTRA_MBCMP( sad, 8, dc, h, v, c, _sse2 )
547 INTRA_MBCMP( sad, 16, v, h, dc, , _sse2 )
548 INTRA_MBCMP( sad, 8, dc, h, v, c, _ssse3 )
549 INTRA_MBCMP( sad, 16, v, h, dc, , _ssse3 )
552 /****************************************************************************
553 * structural similarity metric
554 ****************************************************************************/
555 static void ssim_4x4x2_core( const pixel *pix1, int stride1,
556 const pixel *pix2, int stride2,
559 for( int z = 0; z < 2; z++ )
561 uint32_t s1 = 0, s2 = 0, ss = 0, s12 = 0;
562 for( int y = 0; y < 4; y++ )
563 for( int x = 0; x < 4; x++ )
565 int a = pix1[x+y*stride1];
566 int b = pix2[x+y*stride2];
582 static float ssim_end1( int s1, int s2, int ss, int s12 )
584 static const int ssim_c1 = (int)(.01*.01*PIXEL_MAX*PIXEL_MAX*64 + .5);
585 static const int ssim_c2 = (int)(.03*.03*PIXEL_MAX*PIXEL_MAX*64*63 + .5);
586 int vars = ss*64 - s1*s1 - s2*s2;
587 int covar = s12*64 - s1*s2;
588 return (float)(2*s1*s2 + ssim_c1) * (float)(2*covar + ssim_c2)
589 / ((float)(s1*s1 + s2*s2 + ssim_c1) * (float)(vars + ssim_c2));
592 static float ssim_end4( int sum0[5][4], int sum1[5][4], int width )
595 for( int i = 0; i < width; i++ )
596 ssim += ssim_end1( sum0[i][0] + sum0[i+1][0] + sum1[i][0] + sum1[i+1][0],
597 sum0[i][1] + sum0[i+1][1] + sum1[i][1] + sum1[i+1][1],
598 sum0[i][2] + sum0[i+1][2] + sum1[i][2] + sum1[i+1][2],
599 sum0[i][3] + sum0[i+1][3] + sum1[i][3] + sum1[i+1][3] );
603 float x264_pixel_ssim_wxh( x264_pixel_function_t *pf,
604 pixel *pix1, int stride1,
605 pixel *pix2, int stride2,
606 int width, int height, void *buf )
610 int (*sum0)[4] = buf;
611 int (*sum1)[4] = sum0 + (width >> 2) + 3;
614 for( int y = 1; y < height; y++ )
618 XCHG( void*, sum0, sum1 );
619 for( int x = 0; x < width; x+=2 )
620 pf->ssim_4x4x2_core( &pix1[4*(x+z*stride1)], stride1, &pix2[4*(x+z*stride2)], stride2, &sum0[x] );
622 for( int x = 0; x < width-1; x += 4 )
623 ssim += pf->ssim_end4( sum0+x, sum1+x, X264_MIN(4,width-x-1) );
629 /****************************************************************************
630 * successive elimination
631 ****************************************************************************/
632 static int x264_pixel_ads4( int enc_dc[4], uint16_t *sums, int delta,
633 uint16_t *cost_mvx, int16_t *mvs, int width, int thresh )
636 for( int i = 0; i < width; i++, sums++ )
638 int ads = abs( enc_dc[0] - sums[0] )
639 + abs( enc_dc[1] - sums[8] )
640 + abs( enc_dc[2] - sums[delta] )
641 + abs( enc_dc[3] - sums[delta+8] )
649 static int x264_pixel_ads2( int enc_dc[2], uint16_t *sums, int delta,
650 uint16_t *cost_mvx, int16_t *mvs, int width, int thresh )
653 for( int i = 0; i < width; i++, sums++ )
655 int ads = abs( enc_dc[0] - sums[0] )
656 + abs( enc_dc[1] - sums[delta] )
664 static int x264_pixel_ads1( int enc_dc[1], uint16_t *sums, int delta,
665 uint16_t *cost_mvx, int16_t *mvs, int width, int thresh )
668 for( int i = 0; i<width; i++, sums++ )
670 int ads = abs( enc_dc[0] - sums[0] )
679 /****************************************************************************
681 ****************************************************************************/
682 void x264_pixel_init( int cpu, x264_pixel_function_t *pixf )
684 memset( pixf, 0, sizeof(*pixf) );
686 #define INIT2_NAME( name1, name2, cpu ) \
687 pixf->name1[PIXEL_16x16] = x264_pixel_##name2##_16x16##cpu;\
688 pixf->name1[PIXEL_16x8] = x264_pixel_##name2##_16x8##cpu;
689 #define INIT4_NAME( name1, name2, cpu ) \
690 INIT2_NAME( name1, name2, cpu ) \
691 pixf->name1[PIXEL_8x16] = x264_pixel_##name2##_8x16##cpu;\
692 pixf->name1[PIXEL_8x8] = x264_pixel_##name2##_8x8##cpu;
693 #define INIT5_NAME( name1, name2, cpu ) \
694 INIT4_NAME( name1, name2, cpu ) \
695 pixf->name1[PIXEL_8x4] = x264_pixel_##name2##_8x4##cpu;
696 #define INIT6_NAME( name1, name2, cpu ) \
697 INIT5_NAME( name1, name2, cpu ) \
698 pixf->name1[PIXEL_4x8] = x264_pixel_##name2##_4x8##cpu;
699 #define INIT7_NAME( name1, name2, cpu ) \
700 INIT6_NAME( name1, name2, cpu ) \
701 pixf->name1[PIXEL_4x4] = x264_pixel_##name2##_4x4##cpu;
702 #define INIT2( name, cpu ) INIT2_NAME( name, name, cpu )
703 #define INIT4( name, cpu ) INIT4_NAME( name, name, cpu )
704 #define INIT5( name, cpu ) INIT5_NAME( name, name, cpu )
705 #define INIT6( name, cpu ) INIT6_NAME( name, name, cpu )
706 #define INIT7( name, cpu ) INIT7_NAME( name, name, cpu )
708 #define INIT_ADS( cpu ) \
709 pixf->ads[PIXEL_16x16] = x264_pixel_ads4##cpu;\
710 pixf->ads[PIXEL_16x8] = x264_pixel_ads2##cpu;\
711 pixf->ads[PIXEL_8x8] = x264_pixel_ads1##cpu;
714 INIT7_NAME( sad_aligned, sad, );
721 INIT4( hadamard_ac, );
724 pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16;
725 pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8;
726 pixf->var[PIXEL_16x16] = x264_pixel_var_16x16;
727 pixf->var[PIXEL_8x8] = x264_pixel_var_8x8;
729 pixf->ssd_nv12_core = pixel_ssd_nv12_core;
730 pixf->ssim_4x4x2_core = ssim_4x4x2_core;
731 pixf->ssim_end4 = ssim_end4;
732 pixf->var2_8x8 = pixel_var2_8x8;
734 pixf->intra_sad_x3_4x4 = x264_intra_sad_x3_4x4;
735 pixf->intra_satd_x3_4x4 = x264_intra_satd_x3_4x4;
736 pixf->intra_sad_x3_8x8 = x264_intra_sad_x3_8x8;
737 pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8;
738 pixf->intra_sad_x3_8x8c = x264_intra_sad_x3_8x8c;
739 pixf->intra_satd_x3_8x8c = x264_intra_satd_x3_8x8c;
740 pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16;
741 pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16;
745 if( cpu&X264_CPU_MMXEXT )
747 INIT7( sad, _mmxext );
748 INIT7( sad_x3, _mmxext );
749 INIT7( sad_x4, _mmxext );
750 INIT7( satd, _mmxext );
751 INIT7( satd_x3, _mmxext );
752 INIT7( satd_x4, _mmxext );
753 INIT4( hadamard_ac, _mmxext );
754 INIT7( ssd, _mmxext );
757 pixf->ssd_nv12_core = x264_pixel_ssd_nv12_core_mmxext;
758 pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_mmxext;
759 pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_mmxext;
760 pixf->var2_8x8 = x264_pixel_var2_8x8_mmxext;
762 pixf->intra_sad_x3_4x4 = x264_intra_sad_x3_4x4_mmxext;
763 pixf->intra_satd_x3_4x4 = x264_intra_satd_x3_4x4_mmxext;
764 pixf->intra_sad_x3_8x8 = x264_intra_sad_x3_8x8_mmxext;
765 pixf->intra_sad_x3_8x8c = x264_intra_sad_x3_8x8c_mmxext;
766 pixf->intra_satd_x3_8x8c = x264_intra_satd_x3_8x8c_mmxext;
767 pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16_mmxext;
768 pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16_mmxext;
770 if( cpu&X264_CPU_SSE2 )
772 INIT4_NAME( sad_aligned, sad, _sse2_aligned );
775 pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_sse2;
776 pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_sse2;
778 pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_sse2;
780 pixf->ssd_nv12_core = x264_pixel_ssd_nv12_core_sse2;
781 pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_sse2;
782 pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_sse2;
783 pixf->var2_8x8 = x264_pixel_var2_8x8_sse2;
785 if( (cpu&X264_CPU_SSE2) && !(cpu&X264_CPU_SSE2_IS_SLOW) )
788 INIT2( sad_x3, _sse2 );
789 INIT2( sad_x4, _sse2 );
792 if( !(cpu&X264_CPU_STACK_MOD4) )
794 INIT4( hadamard_ac, _sse2 );
797 pixf->intra_sad_x3_8x8 = x264_intra_sad_x3_8x8_sse2;
798 pixf->intra_sad_x3_8x8c = x264_intra_sad_x3_8x8c_sse2;
799 pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16_sse2;
801 if( cpu&X264_CPU_SSE2_IS_FAST )
803 pixf->sad[PIXEL_8x16] = x264_pixel_sad_8x16_sse2;
804 pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_sse2;
805 pixf->sad_x3[PIXEL_8x8] = x264_pixel_sad_x3_8x8_sse2;
806 pixf->sad_x3[PIXEL_8x4] = x264_pixel_sad_x3_8x4_sse2;
807 pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_sse2;
808 pixf->sad_x4[PIXEL_8x8] = x264_pixel_sad_x4_8x8_sse2;
809 pixf->sad_x4[PIXEL_8x4] = x264_pixel_sad_x4_8x4_sse2;
811 if( cpu&X264_CPU_SSSE3 )
813 INIT7( sad, _ssse3 );
814 INIT7( sad_x3, _ssse3 );
815 INIT7( sad_x4, _ssse3 );
818 if( !(cpu&X264_CPU_STACK_MOD4) )
820 INIT4( hadamard_ac, _ssse3 );
823 pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_ssse3;
824 pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_ssse3;
825 pixf->intra_sad_x3_8x8 = x264_intra_sad_x3_8x8_ssse3;
826 pixf->intra_sad_x3_8x8c = x264_intra_sad_x3_8x8c_ssse3;
827 pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16_ssse3;
830 #else // !HIGH_BIT_DEPTH
832 if( cpu&X264_CPU_MMX )
837 if( cpu&X264_CPU_MMXEXT )
839 INIT7( sad, _mmxext );
840 INIT7_NAME( sad_aligned, sad, _mmxext );
841 INIT7( sad_x3, _mmxext );
842 INIT7( sad_x4, _mmxext );
843 INIT7( satd, _mmxext );
844 INIT7( satd_x3, _mmxext );
845 INIT7( satd_x4, _mmxext );
846 INIT4( hadamard_ac, _mmxext );
848 pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_mmxext;
849 pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_mmxext;
850 pixf->ssd_nv12_core = x264_pixel_ssd_nv12_core_mmxext;
852 pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_mmxext;
853 pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_mmxext;
854 pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_mmxext;
855 pixf->ssim_4x4x2_core = x264_pixel_ssim_4x4x2_core_mmxext;
856 pixf->var2_8x8 = x264_pixel_var2_8x8_mmxext;
858 if( cpu&X264_CPU_CACHELINE_32 )
860 INIT5( sad, _cache32_mmxext );
861 INIT4( sad_x3, _cache32_mmxext );
862 INIT4( sad_x4, _cache32_mmxext );
864 else if( cpu&X264_CPU_CACHELINE_64 )
866 INIT5( sad, _cache64_mmxext );
867 INIT4( sad_x3, _cache64_mmxext );
868 INIT4( sad_x4, _cache64_mmxext );
871 if( cpu&X264_CPU_CACHELINE_64 )
873 pixf->sad[PIXEL_8x16] = x264_pixel_sad_8x16_cache64_mmxext;
874 pixf->sad[PIXEL_8x8] = x264_pixel_sad_8x8_cache64_mmxext;
875 pixf->sad[PIXEL_8x4] = x264_pixel_sad_8x4_cache64_mmxext;
876 pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_cache64_mmxext;
877 pixf->sad_x3[PIXEL_8x8] = x264_pixel_sad_x3_8x8_cache64_mmxext;
878 pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_cache64_mmxext;
879 pixf->sad_x4[PIXEL_8x8] = x264_pixel_sad_x4_8x8_cache64_mmxext;
882 pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16_mmxext;
883 pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16_mmxext;
884 pixf->intra_satd_x3_8x8c = x264_intra_satd_x3_8x8c_mmxext;
885 pixf->intra_sad_x3_8x8c = x264_intra_sad_x3_8x8c_mmxext;
886 pixf->intra_sad_x3_8x8 = x264_intra_sad_x3_8x8_mmxext;
887 pixf->intra_satd_x3_4x4 = x264_intra_satd_x3_4x4_mmxext;
888 pixf->intra_sad_x3_4x4 = x264_intra_sad_x3_4x4_mmxext;
891 if( cpu&X264_CPU_SSE2 )
893 INIT5( ssd, _sse2slow );
894 INIT2_NAME( sad_aligned, sad, _sse2_aligned );
895 pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_sse2;
896 pixf->ssd_nv12_core = x264_pixel_ssd_nv12_core_sse2;
897 pixf->ssim_4x4x2_core = x264_pixel_ssim_4x4x2_core_sse2;
898 pixf->ssim_end4 = x264_pixel_ssim_end4_sse2;
899 pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_sse2;
900 pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_sse2;
902 pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_sse2;
904 pixf->var2_8x8 = x264_pixel_var2_8x8_sse2;
907 if( (cpu&X264_CPU_SSE2) && !(cpu&X264_CPU_SSE2_IS_SLOW) )
910 INIT2( sad_x3, _sse2 );
911 INIT2( sad_x4, _sse2 );
912 INIT6( satd, _sse2 );
913 INIT6( satd_x3, _sse2 );
914 INIT6( satd_x4, _sse2 );
915 if( !(cpu&X264_CPU_STACK_MOD4) )
917 INIT4( hadamard_ac, _sse2 );
920 pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_sse2;
921 pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16_sse2;
922 if( cpu&X264_CPU_CACHELINE_64 )
924 INIT2( ssd, _sse2); /* faster for width 16 on p4 */
926 INIT2( sad, _cache64_sse2 );
927 INIT2( sad_x3, _cache64_sse2 );
928 INIT2( sad_x4, _cache64_sse2 );
930 if( cpu&X264_CPU_SSE2_IS_FAST )
932 pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_cache64_sse2;
933 pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_cache64_sse2;
937 if( cpu&X264_CPU_SSE_MISALIGN )
939 INIT2( sad_x3, _sse2_misalign );
940 INIT2( sad_x4, _sse2_misalign );
944 if( cpu&X264_CPU_SSE2_IS_FAST && !(cpu&X264_CPU_CACHELINE_64) )
946 pixf->sad_aligned[PIXEL_8x16] = x264_pixel_sad_8x16_sse2;
947 pixf->sad[PIXEL_8x16] = x264_pixel_sad_8x16_sse2;
948 pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_sse2;
949 pixf->sad_x3[PIXEL_8x8] = x264_pixel_sad_x3_8x8_sse2;
950 pixf->sad_x3[PIXEL_8x4] = x264_pixel_sad_x3_8x4_sse2;
951 pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_sse2;
952 pixf->sad_x4[PIXEL_8x8] = x264_pixel_sad_x4_8x8_sse2;
953 pixf->sad_x4[PIXEL_8x4] = x264_pixel_sad_x4_8x4_sse2;
956 if( (cpu&X264_CPU_SSE3) && (cpu&X264_CPU_CACHELINE_64) )
959 INIT2( sad_x3, _sse3 );
960 INIT2( sad_x4, _sse3 );
963 if( cpu&X264_CPU_SSSE3 )
965 if( !(cpu&X264_CPU_STACK_MOD4) )
967 INIT4( hadamard_ac, _ssse3 );
970 if( !(cpu&X264_CPU_SLOW_ATOM) )
972 INIT7( ssd, _ssse3 );
973 pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_ssse3;
974 pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_ssse3;
975 INIT7( satd, _ssse3 );
976 INIT7( satd_x3, _ssse3 );
977 INIT7( satd_x4, _ssse3 );
979 pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16_ssse3;
980 pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16_ssse3;
981 pixf->intra_satd_x3_8x8c = x264_intra_satd_x3_8x8c_ssse3;
982 pixf->intra_sad_x3_8x8c = x264_intra_sad_x3_8x8c_ssse3;
983 pixf->intra_satd_x3_4x4 = x264_intra_satd_x3_4x4_ssse3;
985 pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_ssse3;
987 pixf->var2_8x8 = x264_pixel_var2_8x8_ssse3;
988 if( cpu&X264_CPU_CACHELINE_64 )
990 INIT2( sad, _cache64_ssse3 );
991 INIT2( sad_x3, _cache64_ssse3 );
992 INIT2( sad_x4, _cache64_ssse3 );
994 if( cpu&X264_CPU_SLOW_ATOM || !(cpu&X264_CPU_SHUFFLE_IS_FAST) )
996 INIT5( ssd, _sse2 ); /* on conroe, sse2 is faster for width8/16 */
1000 if( cpu&X264_CPU_SSE4 )
1002 INIT7( satd, _sse4 );
1003 INIT7( satd_x3, _sse4 );
1004 INIT7( satd_x4, _sse4 );
1005 if( !(cpu&X264_CPU_STACK_MOD4) )
1007 INIT4( hadamard_ac, _sse4 );
1009 pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_sse4;
1010 pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_sse4;
1011 pixf->intra_sad_x3_4x4 = x264_intra_sad_x3_4x4_sse4;
1012 /* Slower on Conroe, so only enable under SSE4 */
1013 pixf->intra_sad_x3_8x8 = x264_intra_sad_x3_8x8_ssse3;
1018 if( cpu&X264_CPU_ARMV6 )
1020 pixf->sad[PIXEL_4x8] = x264_pixel_sad_4x8_armv6;
1021 pixf->sad[PIXEL_4x4] = x264_pixel_sad_4x4_armv6;
1022 pixf->sad_aligned[PIXEL_4x8] = x264_pixel_sad_4x8_armv6;
1023 pixf->sad_aligned[PIXEL_4x4] = x264_pixel_sad_4x4_armv6;
1025 if( cpu&X264_CPU_NEON )
1027 INIT5( sad, _neon );
1028 INIT5( sad_aligned, _neon );
1029 INIT7( sad_x3, _neon );
1030 INIT7( sad_x4, _neon );
1031 INIT7( ssd, _neon );
1032 INIT7( satd, _neon );
1033 INIT7( satd_x3, _neon );
1034 INIT7( satd_x4, _neon );
1035 INIT4( hadamard_ac, _neon );
1036 pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_neon;
1037 pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_neon;
1038 pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_neon;
1039 pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_neon;
1040 pixf->var2_8x8 = x264_pixel_var2_8x8_neon;
1042 pixf->ssim_4x4x2_core = x264_pixel_ssim_4x4x2_core_neon;
1043 pixf->ssim_end4 = x264_pixel_ssim_end4_neon;
1045 if( cpu&X264_CPU_FAST_NEON_MRC )
1047 pixf->sad[PIXEL_4x8] = x264_pixel_sad_4x8_neon;
1048 pixf->sad[PIXEL_4x4] = x264_pixel_sad_4x4_neon;
1049 pixf->sad_aligned[PIXEL_4x8] = x264_pixel_sad_aligned_4x8_neon;
1050 pixf->sad_aligned[PIXEL_4x4] = x264_pixel_sad_aligned_4x4_neon;
1052 else // really just scheduled for dual issue / A8
1054 INIT5( sad_aligned, _neon_dual );
1058 #endif // HIGH_BIT_DEPTH
1060 if( cpu&X264_CPU_ALTIVEC )
1062 x264_pixel_altivec_init( pixf );
1068 INIT4( sad_x3, _vis );
1069 INIT4( sad_x4, _vis );
1071 #endif // !HIGH_BIT_DEPTH
1073 pixf->ads[PIXEL_8x16] =
1074 pixf->ads[PIXEL_8x4] =
1075 pixf->ads[PIXEL_4x8] = pixf->ads[PIXEL_16x8];
1076 pixf->ads[PIXEL_4x4] = pixf->ads[PIXEL_8x8];