]> git.sesse.net Git - x264/blob - common/pixel.c
x86: combined SA8D/SATD dsp function
[x264] / common / pixel.c
1 /*****************************************************************************
2  * pixel.c: pixel metrics
3  *****************************************************************************
4  * Copyright (C) 2003-2013 x264 project
5  *
6  * Authors: Loren Merritt <lorenm@u.washington.edu>
7  *          Laurent Aimar <fenrir@via.ecp.fr>
8  *          Fiona Glaser <fiona@x264.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
23  *
24  * This program is also available under a commercial proprietary license.
25  * For more information, contact us at licensing@x264.com.
26  *****************************************************************************/
27
28 #include "common.h"
29
30 #if HAVE_MMX
31 #   include "x86/pixel.h"
32 #   include "x86/predict.h"
33 #endif
34 #if ARCH_PPC
35 #   include "ppc/pixel.h"
36 #endif
37 #if ARCH_ARM
38 #   include "arm/pixel.h"
39 #endif
40 #if ARCH_UltraSPARC
41 #   include "sparc/pixel.h"
42 #endif
43
44
45 /****************************************************************************
46  * pixel_sad_WxH
47  ****************************************************************************/
48 #define PIXEL_SAD_C( name, lx, ly ) \
49 static int name( pixel *pix1, intptr_t i_stride_pix1,  \
50                  pixel *pix2, intptr_t i_stride_pix2 ) \
51 {                                                   \
52     int i_sum = 0;                                  \
53     for( int y = 0; y < ly; y++ )                   \
54     {                                               \
55         for( int x = 0; x < lx; x++ )               \
56         {                                           \
57             i_sum += abs( pix1[x] - pix2[x] );      \
58         }                                           \
59         pix1 += i_stride_pix1;                      \
60         pix2 += i_stride_pix2;                      \
61     }                                               \
62     return i_sum;                                   \
63 }
64
65
66 PIXEL_SAD_C( x264_pixel_sad_16x16, 16, 16 )
67 PIXEL_SAD_C( x264_pixel_sad_16x8,  16,  8 )
68 PIXEL_SAD_C( x264_pixel_sad_8x16,   8, 16 )
69 PIXEL_SAD_C( x264_pixel_sad_8x8,    8,  8 )
70 PIXEL_SAD_C( x264_pixel_sad_8x4,    8,  4 )
71 PIXEL_SAD_C( x264_pixel_sad_4x16,   4, 16 )
72 PIXEL_SAD_C( x264_pixel_sad_4x8,    4,  8 )
73 PIXEL_SAD_C( x264_pixel_sad_4x4,    4,  4 )
74
75 /****************************************************************************
76  * pixel_ssd_WxH
77  ****************************************************************************/
78 #define PIXEL_SSD_C( name, lx, ly ) \
79 static int name( pixel *pix1, intptr_t i_stride_pix1,  \
80                  pixel *pix2, intptr_t i_stride_pix2 ) \
81 {                                                   \
82     int i_sum = 0;                                  \
83     for( int y = 0; y < ly; y++ )                   \
84     {                                               \
85         for( int x = 0; x < lx; x++ )               \
86         {                                           \
87             int d = pix1[x] - pix2[x];              \
88             i_sum += d*d;                           \
89         }                                           \
90         pix1 += i_stride_pix1;                      \
91         pix2 += i_stride_pix2;                      \
92     }                                               \
93     return i_sum;                                   \
94 }
95
96 PIXEL_SSD_C( x264_pixel_ssd_16x16, 16, 16 )
97 PIXEL_SSD_C( x264_pixel_ssd_16x8,  16,  8 )
98 PIXEL_SSD_C( x264_pixel_ssd_8x16,   8, 16 )
99 PIXEL_SSD_C( x264_pixel_ssd_8x8,    8,  8 )
100 PIXEL_SSD_C( x264_pixel_ssd_8x4,    8,  4 )
101 PIXEL_SSD_C( x264_pixel_ssd_4x16,   4, 16 )
102 PIXEL_SSD_C( x264_pixel_ssd_4x8,    4,  8 )
103 PIXEL_SSD_C( x264_pixel_ssd_4x4,    4,  4 )
104
105 uint64_t x264_pixel_ssd_wxh( x264_pixel_function_t *pf, pixel *pix1, intptr_t i_pix1,
106                              pixel *pix2, intptr_t i_pix2, int i_width, int i_height )
107 {
108     uint64_t i_ssd = 0;
109     int y;
110     int align = !(((intptr_t)pix1 | (intptr_t)pix2 | i_pix1 | i_pix2) & 15);
111
112 #define SSD(size) i_ssd += pf->ssd[size]( pix1 + y*i_pix1 + x, i_pix1, \
113                                           pix2 + y*i_pix2 + x, i_pix2 );
114     for( y = 0; y < i_height-15; y += 16 )
115     {
116         int x = 0;
117         if( align )
118             for( ; x < i_width-15; x += 16 )
119                 SSD(PIXEL_16x16);
120         for( ; x < i_width-7; x += 8 )
121             SSD(PIXEL_8x16);
122     }
123     if( y < i_height-7 )
124         for( int x = 0; x < i_width-7; x += 8 )
125             SSD(PIXEL_8x8);
126 #undef SSD
127
128 #define SSD1 { int d = pix1[y*i_pix1+x] - pix2[y*i_pix2+x]; i_ssd += d*d; }
129     if( i_width & 7 )
130     {
131         for( y = 0; y < (i_height & ~7); y++ )
132             for( int x = i_width & ~7; x < i_width; x++ )
133                 SSD1;
134     }
135     if( i_height & 7 )
136     {
137         for( y = i_height & ~7; y < i_height; y++ )
138             for( int x = 0; x < i_width; x++ )
139                 SSD1;
140     }
141 #undef SSD1
142
143     return i_ssd;
144 }
145
146 static void pixel_ssd_nv12_core( pixel *pixuv1, intptr_t stride1, pixel *pixuv2, intptr_t stride2,
147                                  int width, int height, uint64_t *ssd_u, uint64_t *ssd_v )
148 {
149     *ssd_u = 0, *ssd_v = 0;
150     for( int y = 0; y < height; y++, pixuv1+=stride1, pixuv2+=stride2 )
151         for( int x = 0; x < width; x++ )
152         {
153             int du = pixuv1[2*x]   - pixuv2[2*x];
154             int dv = pixuv1[2*x+1] - pixuv2[2*x+1];
155             *ssd_u += du*du;
156             *ssd_v += dv*dv;
157         }
158 }
159
160 void x264_pixel_ssd_nv12( x264_pixel_function_t *pf, pixel *pix1, intptr_t i_pix1, pixel *pix2, intptr_t i_pix2,
161                           int i_width, int i_height, uint64_t *ssd_u, uint64_t *ssd_v )
162 {
163     pf->ssd_nv12_core( pix1, i_pix1, pix2, i_pix2, i_width&~7, i_height, ssd_u, ssd_v );
164     if( i_width&7 )
165     {
166         uint64_t tmp[2];
167         pixel_ssd_nv12_core( pix1+(i_width&~7), i_pix1, pix2+(i_width&~7), i_pix2, i_width&7, i_height, &tmp[0], &tmp[1] );
168         *ssd_u += tmp[0];
169         *ssd_v += tmp[1];
170     }
171 }
172
173 /****************************************************************************
174  * pixel_var_wxh
175  ****************************************************************************/
176 #define PIXEL_VAR_C( name, w, h ) \
177 static uint64_t name( pixel *pix, intptr_t i_stride ) \
178 {                                             \
179     uint32_t sum = 0, sqr = 0;                \
180     for( int y = 0; y < h; y++ )              \
181     {                                         \
182         for( int x = 0; x < w; x++ )          \
183         {                                     \
184             sum += pix[x];                    \
185             sqr += pix[x] * pix[x];           \
186         }                                     \
187         pix += i_stride;                      \
188     }                                         \
189     return sum + ((uint64_t)sqr << 32);       \
190 }
191
192 PIXEL_VAR_C( x264_pixel_var_16x16, 16, 16 )
193 PIXEL_VAR_C( x264_pixel_var_8x16,   8, 16 )
194 PIXEL_VAR_C( x264_pixel_var_8x8,    8,  8 )
195
196 /****************************************************************************
197  * pixel_var2_wxh
198  ****************************************************************************/
199 #define PIXEL_VAR2_C( name, w, h, shift ) \
200 static int name( pixel *pix1, intptr_t i_stride1, pixel *pix2, intptr_t i_stride2, int *ssd ) \
201 { \
202     uint32_t var = 0, sum = 0, sqr = 0; \
203     for( int y = 0; y < h; y++ ) \
204     { \
205         for( int x = 0; x < w; x++ ) \
206         { \
207             int diff = pix1[x] - pix2[x]; \
208             sum += diff; \
209             sqr += diff * diff; \
210         } \
211         pix1 += i_stride1; \
212         pix2 += i_stride2; \
213     } \
214     sum = abs(sum); \
215     var = sqr - ((uint64_t)sum * sum >> shift); \
216     *ssd = sqr; \
217     return var; \
218 }
219
220 PIXEL_VAR2_C( x264_pixel_var2_8x16, 8, 16, 7 )
221 PIXEL_VAR2_C( x264_pixel_var2_8x8,  8,  8, 6 )
222
223 #if BIT_DEPTH > 8
224     typedef uint32_t sum_t;
225     typedef uint64_t sum2_t;
226 #else
227     typedef uint16_t sum_t;
228     typedef uint32_t sum2_t;
229 #endif
230 #define BITS_PER_SUM (8 * sizeof(sum_t))
231
232 #define HADAMARD4(d0, d1, d2, d3, s0, s1, s2, s3) {\
233     sum2_t t0 = s0 + s1;\
234     sum2_t t1 = s0 - s1;\
235     sum2_t t2 = s2 + s3;\
236     sum2_t t3 = s2 - s3;\
237     d0 = t0 + t2;\
238     d2 = t0 - t2;\
239     d1 = t1 + t3;\
240     d3 = t1 - t3;\
241 }
242
243 // in: a pseudo-simd number of the form x+(y<<16)
244 // return: abs(x)+(abs(y)<<16)
245 static ALWAYS_INLINE sum2_t abs2( sum2_t a )
246 {
247     sum2_t s = ((a>>(BITS_PER_SUM-1))&(((sum2_t)1<<BITS_PER_SUM)+1))*((sum_t)-1);
248     return (a+s)^s;
249 }
250
251 /****************************************************************************
252  * pixel_satd_WxH: sum of 4x4 Hadamard transformed differences
253  ****************************************************************************/
254
255 static NOINLINE int x264_pixel_satd_4x4( pixel *pix1, intptr_t i_pix1, pixel *pix2, intptr_t i_pix2 )
256 {
257     sum2_t tmp[4][2];
258     sum2_t a0, a1, a2, a3, b0, b1;
259     sum2_t sum = 0;
260     for( int i = 0; i < 4; i++, pix1 += i_pix1, pix2 += i_pix2 )
261     {
262         a0 = pix1[0] - pix2[0];
263         a1 = pix1[1] - pix2[1];
264         b0 = (a0+a1) + ((a0-a1)<<BITS_PER_SUM);
265         a2 = pix1[2] - pix2[2];
266         a3 = pix1[3] - pix2[3];
267         b1 = (a2+a3) + ((a2-a3)<<BITS_PER_SUM);
268         tmp[i][0] = b0 + b1;
269         tmp[i][1] = b0 - b1;
270     }
271     for( int i = 0; i < 2; i++ )
272     {
273         HADAMARD4( a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i] );
274         a0 = abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
275         sum += ((sum_t)a0) + (a0>>BITS_PER_SUM);
276     }
277     return sum >> 1;
278 }
279
280 static NOINLINE int x264_pixel_satd_8x4( pixel *pix1, intptr_t i_pix1, pixel *pix2, intptr_t i_pix2 )
281 {
282     sum2_t tmp[4][4];
283     sum2_t a0, a1, a2, a3;
284     sum2_t sum = 0;
285     for( int i = 0; i < 4; i++, pix1 += i_pix1, pix2 += i_pix2 )
286     {
287         a0 = (pix1[0] - pix2[0]) + ((sum2_t)(pix1[4] - pix2[4]) << BITS_PER_SUM);
288         a1 = (pix1[1] - pix2[1]) + ((sum2_t)(pix1[5] - pix2[5]) << BITS_PER_SUM);
289         a2 = (pix1[2] - pix2[2]) + ((sum2_t)(pix1[6] - pix2[6]) << BITS_PER_SUM);
290         a3 = (pix1[3] - pix2[3]) + ((sum2_t)(pix1[7] - pix2[7]) << BITS_PER_SUM);
291         HADAMARD4( tmp[i][0], tmp[i][1], tmp[i][2], tmp[i][3], a0,a1,a2,a3 );
292     }
293     for( int i = 0; i < 4; i++ )
294     {
295         HADAMARD4( a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i] );
296         sum += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
297     }
298     return (((sum_t)sum) + (sum>>BITS_PER_SUM)) >> 1;
299 }
300
301 #define PIXEL_SATD_C( w, h, sub )\
302 static int x264_pixel_satd_##w##x##h( pixel *pix1, intptr_t i_pix1, pixel *pix2, intptr_t i_pix2 )\
303 {\
304     int sum = sub( pix1, i_pix1, pix2, i_pix2 )\
305             + sub( pix1+4*i_pix1, i_pix1, pix2+4*i_pix2, i_pix2 );\
306     if( w==16 )\
307         sum+= sub( pix1+8, i_pix1, pix2+8, i_pix2 )\
308             + sub( pix1+8+4*i_pix1, i_pix1, pix2+8+4*i_pix2, i_pix2 );\
309     if( h==16 )\
310         sum+= sub( pix1+8*i_pix1, i_pix1, pix2+8*i_pix2, i_pix2 )\
311             + sub( pix1+12*i_pix1, i_pix1, pix2+12*i_pix2, i_pix2 );\
312     if( w==16 && h==16 )\
313         sum+= sub( pix1+8+8*i_pix1, i_pix1, pix2+8+8*i_pix2, i_pix2 )\
314             + sub( pix1+8+12*i_pix1, i_pix1, pix2+8+12*i_pix2, i_pix2 );\
315     return sum;\
316 }
317 PIXEL_SATD_C( 16, 16, x264_pixel_satd_8x4 )
318 PIXEL_SATD_C( 16, 8,  x264_pixel_satd_8x4 )
319 PIXEL_SATD_C( 8,  16, x264_pixel_satd_8x4 )
320 PIXEL_SATD_C( 8,  8,  x264_pixel_satd_8x4 )
321 PIXEL_SATD_C( 4,  16, x264_pixel_satd_4x4 )
322 PIXEL_SATD_C( 4,  8,  x264_pixel_satd_4x4 )
323
324 static NOINLINE int sa8d_8x8( pixel *pix1, intptr_t i_pix1, pixel *pix2, intptr_t i_pix2 )
325 {
326     sum2_t tmp[8][4];
327     sum2_t a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3;
328     sum2_t sum = 0;
329     for( int i = 0; i < 8; i++, pix1 += i_pix1, pix2 += i_pix2 )
330     {
331         a0 = pix1[0] - pix2[0];
332         a1 = pix1[1] - pix2[1];
333         b0 = (a0+a1) + ((a0-a1)<<BITS_PER_SUM);
334         a2 = pix1[2] - pix2[2];
335         a3 = pix1[3] - pix2[3];
336         b1 = (a2+a3) + ((a2-a3)<<BITS_PER_SUM);
337         a4 = pix1[4] - pix2[4];
338         a5 = pix1[5] - pix2[5];
339         b2 = (a4+a5) + ((a4-a5)<<BITS_PER_SUM);
340         a6 = pix1[6] - pix2[6];
341         a7 = pix1[7] - pix2[7];
342         b3 = (a6+a7) + ((a6-a7)<<BITS_PER_SUM);
343         HADAMARD4( tmp[i][0], tmp[i][1], tmp[i][2], tmp[i][3], b0,b1,b2,b3 );
344     }
345     for( int i = 0; i < 4; i++ )
346     {
347         HADAMARD4( a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i] );
348         HADAMARD4( a4, a5, a6, a7, tmp[4][i], tmp[5][i], tmp[6][i], tmp[7][i] );
349         b0  = abs2(a0+a4) + abs2(a0-a4);
350         b0 += abs2(a1+a5) + abs2(a1-a5);
351         b0 += abs2(a2+a6) + abs2(a2-a6);
352         b0 += abs2(a3+a7) + abs2(a3-a7);
353         sum += (sum_t)b0 + (b0>>BITS_PER_SUM);
354     }
355     return sum;
356 }
357
358 static int x264_pixel_sa8d_8x8( pixel *pix1, intptr_t i_pix1, pixel *pix2, intptr_t i_pix2 )
359 {
360     int sum = sa8d_8x8( pix1, i_pix1, pix2, i_pix2 );
361     return (sum+2)>>2;
362 }
363
364 static int x264_pixel_sa8d_16x16( pixel *pix1, intptr_t i_pix1, pixel *pix2, intptr_t i_pix2 )
365 {
366     int sum = sa8d_8x8( pix1, i_pix1, pix2, i_pix2 )
367             + sa8d_8x8( pix1+8, i_pix1, pix2+8, i_pix2 )
368             + sa8d_8x8( pix1+8*i_pix1, i_pix1, pix2+8*i_pix2, i_pix2 )
369             + sa8d_8x8( pix1+8+8*i_pix1, i_pix1, pix2+8+8*i_pix2, i_pix2 );
370     return (sum+2)>>2;
371 }
372
373 static NOINLINE uint64_t pixel_hadamard_ac( pixel *pix, intptr_t stride )
374 {
375     sum2_t tmp[32];
376     sum2_t a0, a1, a2, a3, dc;
377     sum2_t sum4 = 0, sum8 = 0;
378     for( int i = 0; i < 8; i++, pix+=stride )
379     {
380         sum2_t *t = tmp + (i&3) + (i&4)*4;
381         a0 = (pix[0]+pix[1]) + ((sum2_t)(pix[0]-pix[1])<<BITS_PER_SUM);
382         a1 = (pix[2]+pix[3]) + ((sum2_t)(pix[2]-pix[3])<<BITS_PER_SUM);
383         t[0] = a0 + a1;
384         t[4] = a0 - a1;
385         a2 = (pix[4]+pix[5]) + ((sum2_t)(pix[4]-pix[5])<<BITS_PER_SUM);
386         a3 = (pix[6]+pix[7]) + ((sum2_t)(pix[6]-pix[7])<<BITS_PER_SUM);
387         t[8] = a2 + a3;
388         t[12] = a2 - a3;
389     }
390     for( int i = 0; i < 8; i++ )
391     {
392         HADAMARD4( a0, a1, a2, a3, tmp[i*4+0], tmp[i*4+1], tmp[i*4+2], tmp[i*4+3] );
393         tmp[i*4+0] = a0;
394         tmp[i*4+1] = a1;
395         tmp[i*4+2] = a2;
396         tmp[i*4+3] = a3;
397         sum4 += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
398     }
399     for( int i = 0; i < 8; i++ )
400     {
401         HADAMARD4( a0,a1,a2,a3, tmp[i], tmp[8+i], tmp[16+i], tmp[24+i] );
402         sum8 += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
403     }
404     dc = (sum_t)(tmp[0] + tmp[8] + tmp[16] + tmp[24]);
405     sum4 = (sum_t)sum4 + (sum4>>BITS_PER_SUM) - dc;
406     sum8 = (sum_t)sum8 + (sum8>>BITS_PER_SUM) - dc;
407     return ((uint64_t)sum8<<32) + sum4;
408 }
409
410 #define HADAMARD_AC(w,h) \
411 static uint64_t x264_pixel_hadamard_ac_##w##x##h( pixel *pix, intptr_t stride )\
412 {\
413     uint64_t sum = pixel_hadamard_ac( pix, stride );\
414     if( w==16 )\
415         sum += pixel_hadamard_ac( pix+8, stride );\
416     if( h==16 )\
417         sum += pixel_hadamard_ac( pix+8*stride, stride );\
418     if( w==16 && h==16 )\
419         sum += pixel_hadamard_ac( pix+8*stride+8, stride );\
420     return ((sum>>34)<<32) + ((uint32_t)sum>>1);\
421 }
422 HADAMARD_AC( 16, 16 )
423 HADAMARD_AC( 16, 8 )
424 HADAMARD_AC( 8, 16 )
425 HADAMARD_AC( 8, 8 )
426
427
428 /****************************************************************************
429  * pixel_sad_x4
430  ****************************************************************************/
431 #define SAD_X( size ) \
432 static void x264_pixel_sad_x3_##size( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2,\
433                                       intptr_t i_stride, int scores[3] )\
434 {\
435     scores[0] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix0, i_stride );\
436     scores[1] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix1, i_stride );\
437     scores[2] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix2, i_stride );\
438 }\
439 static void x264_pixel_sad_x4_##size( pixel *fenc, pixel *pix0, pixel *pix1,pixel *pix2, pixel *pix3,\
440                                       intptr_t i_stride, int scores[4] )\
441 {\
442     scores[0] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix0, i_stride );\
443     scores[1] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix1, i_stride );\
444     scores[2] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix2, i_stride );\
445     scores[3] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix3, i_stride );\
446 }
447
448 SAD_X( 16x16 )
449 SAD_X( 16x8 )
450 SAD_X( 8x16 )
451 SAD_X( 8x8 )
452 SAD_X( 8x4 )
453 SAD_X( 4x8 )
454 SAD_X( 4x4 )
455
456 #if !HIGH_BIT_DEPTH
457 #if ARCH_UltraSPARC
458 SAD_X( 16x16_vis )
459 SAD_X( 16x8_vis )
460 SAD_X( 8x16_vis )
461 SAD_X( 8x8_vis )
462 #endif
463 #endif // !HIGH_BIT_DEPTH
464
465 /****************************************************************************
466  * pixel_satd_x4
467  * no faster than single satd, but needed for satd to be a drop-in replacement for sad
468  ****************************************************************************/
469
470 #define SATD_X( size, cpu ) \
471 static void x264_pixel_satd_x3_##size##cpu( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2,\
472                                             intptr_t i_stride, int scores[3] )\
473 {\
474     scores[0] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix0, i_stride );\
475     scores[1] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix1, i_stride );\
476     scores[2] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix2, i_stride );\
477 }\
478 static void x264_pixel_satd_x4_##size##cpu( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2, pixel *pix3,\
479                                             intptr_t i_stride, int scores[4] )\
480 {\
481     scores[0] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix0, i_stride );\
482     scores[1] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix1, i_stride );\
483     scores[2] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix2, i_stride );\
484     scores[3] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix3, i_stride );\
485 }
486 #define SATD_X_DECL6( cpu )\
487 SATD_X( 16x16, cpu )\
488 SATD_X( 16x8, cpu )\
489 SATD_X( 8x16, cpu )\
490 SATD_X( 8x8, cpu )\
491 SATD_X( 8x4, cpu )\
492 SATD_X( 4x8, cpu )
493 #define SATD_X_DECL7( cpu )\
494 SATD_X_DECL6( cpu )\
495 SATD_X( 4x4, cpu )
496
497 SATD_X_DECL7()
498 #if HAVE_MMX
499 SATD_X_DECL7( _mmx2 )
500 #if !HIGH_BIT_DEPTH
501 SATD_X_DECL6( _sse2 )
502 SATD_X_DECL7( _ssse3 )
503 SATD_X_DECL7( _sse4 )
504 SATD_X_DECL7( _avx )
505 SATD_X_DECL7( _xop )
506 #endif // !HIGH_BIT_DEPTH
507 #endif
508
509 #if !HIGH_BIT_DEPTH
510 #if HAVE_ARMV6
511 SATD_X_DECL7( _neon )
512 #endif
513 #endif // !HIGH_BIT_DEPTH
514
515 #define INTRA_MBCMP_8x8( mbcmp, cpu, cpu2 )\
516 void x264_intra_##mbcmp##_x3_8x8##cpu( pixel *fenc, pixel edge[36], int res[3] )\
517 {\
518     ALIGNED_ARRAY_16( pixel, pix, [8*FDEC_STRIDE] );\
519     x264_predict_8x8_v##cpu2( pix, edge );\
520     res[0] = x264_pixel_##mbcmp##_8x8##cpu( pix, FDEC_STRIDE, fenc, FENC_STRIDE );\
521     x264_predict_8x8_h##cpu2( pix, edge );\
522     res[1] = x264_pixel_##mbcmp##_8x8##cpu( pix, FDEC_STRIDE, fenc, FENC_STRIDE );\
523     x264_predict_8x8_dc##cpu2( pix, edge );\
524     res[2] = x264_pixel_##mbcmp##_8x8##cpu( pix, FDEC_STRIDE, fenc, FENC_STRIDE );\
525 }
526
527 INTRA_MBCMP_8x8( sad,, _c )
528 INTRA_MBCMP_8x8(sa8d,, _c )
529 #if HIGH_BIT_DEPTH && HAVE_MMX
530 #define x264_predict_8x8_v_sse2 x264_predict_8x8_v_sse
531 INTRA_MBCMP_8x8( sad, _mmx2,  _c )
532 INTRA_MBCMP_8x8(sa8d, _sse2,  _sse2 )
533 #endif
534
535 #define INTRA_MBCMP( mbcmp, size, pred1, pred2, pred3, chroma, cpu, cpu2 )\
536 void x264_intra_##mbcmp##_x3_##size##chroma##cpu( pixel *fenc, pixel *fdec, int res[3] )\
537 {\
538     x264_predict_##size##chroma##_##pred1##cpu2( fdec );\
539     res[0] = x264_pixel_##mbcmp##_##size##cpu( fdec, FDEC_STRIDE, fenc, FENC_STRIDE );\
540     x264_predict_##size##chroma##_##pred2##cpu2( fdec );\
541     res[1] = x264_pixel_##mbcmp##_##size##cpu( fdec, FDEC_STRIDE, fenc, FENC_STRIDE );\
542     x264_predict_##size##chroma##_##pred3##cpu2( fdec );\
543     res[2] = x264_pixel_##mbcmp##_##size##cpu( fdec, FDEC_STRIDE, fenc, FENC_STRIDE );\
544 }
545
546 INTRA_MBCMP( sad,  4x4,   v, h, dc,  ,, _c )
547 INTRA_MBCMP(satd,  4x4,   v, h, dc,  ,, _c )
548 INTRA_MBCMP( sad,  8x8,  dc, h,  v, c,, _c )
549 INTRA_MBCMP(satd,  8x8,  dc, h,  v, c,, _c )
550 INTRA_MBCMP( sad,  8x16, dc, h,  v, c,, _c )
551 INTRA_MBCMP(satd,  8x16, dc, h,  v, c,, _c )
552 INTRA_MBCMP( sad, 16x16,  v, h, dc,  ,, _c )
553 INTRA_MBCMP(satd, 16x16,  v, h, dc,  ,, _c )
554
555 #if HAVE_MMX
556 #if HIGH_BIT_DEPTH
557 #define x264_predict_8x8c_v_sse2 x264_predict_8x8c_v_sse
558 #define x264_predict_8x16c_v_sse2 x264_predict_8x16c_v_sse
559 #define x264_predict_16x16_v_sse2 x264_predict_16x16_v_sse
560 INTRA_MBCMP( sad,  4x4,   v, h, dc,  , _mmx2, _c )
561 INTRA_MBCMP( sad,  8x8,  dc, h,  v, c, _mmx2, _c )
562 INTRA_MBCMP( sad, 16x16,  v, h, dc,  , _mmx2, _mmx2 )
563 INTRA_MBCMP( sad,  8x8,  dc, h,  v, c, _sse2, _sse2 )
564 INTRA_MBCMP( sad, 16x16,  v, h, dc,  , _sse2, _sse2 )
565 INTRA_MBCMP( sad,  8x8,  dc, h,  v, c, _ssse3, _sse2 )
566 INTRA_MBCMP( sad, 16x16,  v, h, dc,  , _ssse3, _sse2 )
567 #else
568 #define x264_predict_8x16c_v_mmx2 x264_predict_8x16c_v_mmx
569 INTRA_MBCMP( sad,  8x16, dc, h,  v, c, _mmx2, _mmx2 )
570 INTRA_MBCMP(satd,  8x16, dc, h,  v, c, _mmx2, _mmx2 )
571 INTRA_MBCMP( sad,  8x16, dc, h,  v, c, _sse2, _mmx2 )
572 INTRA_MBCMP(satd,  8x16, dc, h,  v, c, _sse2, _mmx2 )
573 INTRA_MBCMP(satd,  8x16, dc, h,  v, c, _ssse3, _mmx2 )
574 INTRA_MBCMP(satd,  8x16, dc, h,  v, c, _sse4, _mmx2 )
575 INTRA_MBCMP(satd,  8x16, dc, h,  v, c, _avx, _mmx2 )
576 INTRA_MBCMP(satd,  8x16, dc, h,  v, c, _xop, _mmx2 )
577 #endif
578 #endif
579
580 // No C implementation of intra_satd_x9. See checkasm for its behavior,
581 // or see x264_mb_analyse_intra for the entirely different algorithm we
582 // use when lacking an asm implementation of it.
583
584
585
586 /****************************************************************************
587  * structural similarity metric
588  ****************************************************************************/
589 static void ssim_4x4x2_core( const pixel *pix1, intptr_t stride1,
590                              const pixel *pix2, intptr_t stride2,
591                              int sums[2][4] )
592 {
593     for( int z = 0; z < 2; z++ )
594     {
595         uint32_t s1 = 0, s2 = 0, ss = 0, s12 = 0;
596         for( int y = 0; y < 4; y++ )
597             for( int x = 0; x < 4; x++ )
598             {
599                 int a = pix1[x+y*stride1];
600                 int b = pix2[x+y*stride2];
601                 s1  += a;
602                 s2  += b;
603                 ss  += a*a;
604                 ss  += b*b;
605                 s12 += a*b;
606             }
607         sums[z][0] = s1;
608         sums[z][1] = s2;
609         sums[z][2] = ss;
610         sums[z][3] = s12;
611         pix1 += 4;
612         pix2 += 4;
613     }
614 }
615
616 static float ssim_end1( int s1, int s2, int ss, int s12 )
617 {
618 /* Maximum value for 10-bit is: ss*64 = (2^10-1)^2*16*4*64 = 4286582784, which will overflow in some cases.
619  * s1*s1, s2*s2, and s1*s2 also obtain this value for edge cases: ((2^10-1)*16*4)^2 = 4286582784.
620  * Maximum value for 9-bit is: ss*64 = (2^9-1)^2*16*4*64 = 1069551616, which will not overflow. */
621 #if BIT_DEPTH > 9
622 #define type float
623     static const float ssim_c1 = .01*.01*PIXEL_MAX*PIXEL_MAX*64;
624     static const float ssim_c2 = .03*.03*PIXEL_MAX*PIXEL_MAX*64*63;
625 #else
626 #define type int
627     static const int ssim_c1 = (int)(.01*.01*PIXEL_MAX*PIXEL_MAX*64 + .5);
628     static const int ssim_c2 = (int)(.03*.03*PIXEL_MAX*PIXEL_MAX*64*63 + .5);
629 #endif
630     type fs1 = s1;
631     type fs2 = s2;
632     type fss = ss;
633     type fs12 = s12;
634     type vars = fss*64 - fs1*fs1 - fs2*fs2;
635     type covar = fs12*64 - fs1*fs2;
636     return (float)(2*fs1*fs2 + ssim_c1) * (float)(2*covar + ssim_c2)
637          / ((float)(fs1*fs1 + fs2*fs2 + ssim_c1) * (float)(vars + ssim_c2));
638 #undef type
639 }
640
641 static float ssim_end4( int sum0[5][4], int sum1[5][4], int width )
642 {
643     float ssim = 0.0;
644     for( int i = 0; i < width; i++ )
645         ssim += ssim_end1( sum0[i][0] + sum0[i+1][0] + sum1[i][0] + sum1[i+1][0],
646                            sum0[i][1] + sum0[i+1][1] + sum1[i][1] + sum1[i+1][1],
647                            sum0[i][2] + sum0[i+1][2] + sum1[i][2] + sum1[i+1][2],
648                            sum0[i][3] + sum0[i+1][3] + sum1[i][3] + sum1[i+1][3] );
649     return ssim;
650 }
651
652 float x264_pixel_ssim_wxh( x264_pixel_function_t *pf,
653                            pixel *pix1, intptr_t stride1,
654                            pixel *pix2, intptr_t stride2,
655                            int width, int height, void *buf, int *cnt )
656 {
657     int z = 0;
658     float ssim = 0.0;
659     int (*sum0)[4] = buf;
660     int (*sum1)[4] = sum0 + (width >> 2) + 3;
661     width >>= 2;
662     height >>= 2;
663     for( int y = 1; y < height; y++ )
664     {
665         for( ; z <= y; z++ )
666         {
667             XCHG( void*, sum0, sum1 );
668             for( int x = 0; x < width; x+=2 )
669                 pf->ssim_4x4x2_core( &pix1[4*(x+z*stride1)], stride1, &pix2[4*(x+z*stride2)], stride2, &sum0[x] );
670         }
671         for( int x = 0; x < width-1; x += 4 )
672             ssim += pf->ssim_end4( sum0+x, sum1+x, X264_MIN(4,width-x-1) );
673     }
674     *cnt = (height-1) * (width-1);
675     return ssim;
676 }
677
678 static int pixel_vsad( pixel *src, intptr_t stride, int height )
679 {
680     int score = 0;
681     for( int i = 1; i < height; i++, src += stride )
682         for( int j = 0; j < 16; j++ )
683             score += abs(src[j] - src[j+stride]);
684     return score;
685 }
686
687 int x264_field_vsad( x264_t *h, int mb_x, int mb_y )
688 {
689     int score_field, score_frame;
690     int stride = h->fenc->i_stride[0];
691     int mb_stride = h->mb.i_mb_stride;
692     pixel *fenc = h->fenc->plane[0] + 16 * (mb_x + mb_y * stride);
693     int mb_xy = mb_x + mb_y*mb_stride;
694
695     /* We don't want to analyze pixels outside the frame, as it gives inaccurate results. */
696     int mbpair_height = X264_MIN( h->param.i_height - mb_y * 16, 32 );
697     score_frame  = h->pixf.vsad( fenc,          stride, mbpair_height );
698     score_field  = h->pixf.vsad( fenc,        stride*2, mbpair_height >> 1 );
699     score_field += h->pixf.vsad( fenc+stride, stride*2, mbpair_height >> 1 );
700
701     if( mb_x > 0 )
702         score_field += 512 - h->mb.field[mb_xy        -1]*1024;
703     if( mb_y > 0 )
704         score_field += 512 - h->mb.field[mb_xy-mb_stride]*1024;
705
706     return (score_field < score_frame);
707 }
708
709 static int pixel_asd8( pixel *pix1, intptr_t stride1, pixel *pix2, intptr_t stride2, int height )
710 {
711     int sum = 0;
712     for( int y = 0; y < height; y++, pix1 += stride1, pix2 += stride2 )
713         for( int x = 0; x < 8; x++ )
714             sum += pix1[x] - pix2[x];
715     return abs( sum );
716 }
717
718 /****************************************************************************
719  * successive elimination
720  ****************************************************************************/
721 static int x264_pixel_ads4( int enc_dc[4], uint16_t *sums, int delta,
722                             uint16_t *cost_mvx, int16_t *mvs, int width, int thresh )
723 {
724     int nmv = 0;
725     for( int i = 0; i < width; i++, sums++ )
726     {
727         int ads = abs( enc_dc[0] - sums[0] )
728                 + abs( enc_dc[1] - sums[8] )
729                 + abs( enc_dc[2] - sums[delta] )
730                 + abs( enc_dc[3] - sums[delta+8] )
731                 + cost_mvx[i];
732         if( ads < thresh )
733             mvs[nmv++] = i;
734     }
735     return nmv;
736 }
737
738 static int x264_pixel_ads2( int enc_dc[2], uint16_t *sums, int delta,
739                             uint16_t *cost_mvx, int16_t *mvs, int width, int thresh )
740 {
741     int nmv = 0;
742     for( int i = 0; i < width; i++, sums++ )
743     {
744         int ads = abs( enc_dc[0] - sums[0] )
745                 + abs( enc_dc[1] - sums[delta] )
746                 + cost_mvx[i];
747         if( ads < thresh )
748             mvs[nmv++] = i;
749     }
750     return nmv;
751 }
752
753 static int x264_pixel_ads1( int enc_dc[1], uint16_t *sums, int delta,
754                             uint16_t *cost_mvx, int16_t *mvs, int width, int thresh )
755 {
756     int nmv = 0;
757     for( int i = 0; i<width; i++, sums++ )
758     {
759         int ads = abs( enc_dc[0] - sums[0] )
760                 + cost_mvx[i];
761         if( ads < thresh )
762             mvs[nmv++] = i;
763     }
764     return nmv;
765 }
766
767
768 /****************************************************************************
769  * x264_pixel_init:
770  ****************************************************************************/
771 void x264_pixel_init( int cpu, x264_pixel_function_t *pixf )
772 {
773     memset( pixf, 0, sizeof(*pixf) );
774
775 #define INIT2_NAME( name1, name2, cpu ) \
776     pixf->name1[PIXEL_16x16] = x264_pixel_##name2##_16x16##cpu;\
777     pixf->name1[PIXEL_16x8]  = x264_pixel_##name2##_16x8##cpu;
778 #define INIT4_NAME( name1, name2, cpu ) \
779     INIT2_NAME( name1, name2, cpu ) \
780     pixf->name1[PIXEL_8x16]  = x264_pixel_##name2##_8x16##cpu;\
781     pixf->name1[PIXEL_8x8]   = x264_pixel_##name2##_8x8##cpu;
782 #define INIT5_NAME( name1, name2, cpu ) \
783     INIT4_NAME( name1, name2, cpu ) \
784     pixf->name1[PIXEL_8x4]   = x264_pixel_##name2##_8x4##cpu;
785 #define INIT6_NAME( name1, name2, cpu ) \
786     INIT5_NAME( name1, name2, cpu ) \
787     pixf->name1[PIXEL_4x8]   = x264_pixel_##name2##_4x8##cpu;
788 #define INIT7_NAME( name1, name2, cpu ) \
789     INIT6_NAME( name1, name2, cpu ) \
790     pixf->name1[PIXEL_4x4]   = x264_pixel_##name2##_4x4##cpu;
791 #define INIT8_NAME( name1, name2, cpu ) \
792     INIT7_NAME( name1, name2, cpu ) \
793     pixf->name1[PIXEL_4x16]  = x264_pixel_##name2##_4x16##cpu;
794 #define INIT2( name, cpu ) INIT2_NAME( name, name, cpu )
795 #define INIT4( name, cpu ) INIT4_NAME( name, name, cpu )
796 #define INIT5( name, cpu ) INIT5_NAME( name, name, cpu )
797 #define INIT6( name, cpu ) INIT6_NAME( name, name, cpu )
798 #define INIT7( name, cpu ) INIT7_NAME( name, name, cpu )
799 #define INIT8( name, cpu ) INIT8_NAME( name, name, cpu )
800
801 #define INIT_ADS( cpu ) \
802     pixf->ads[PIXEL_16x16] = x264_pixel_ads4##cpu;\
803     pixf->ads[PIXEL_16x8] = x264_pixel_ads2##cpu;\
804     pixf->ads[PIXEL_8x8] = x264_pixel_ads1##cpu;
805
806     INIT8( sad, );
807     INIT8_NAME( sad_aligned, sad, );
808     INIT7( sad_x3, );
809     INIT7( sad_x4, );
810     INIT8( ssd, );
811     INIT8( satd, );
812     INIT7( satd_x3, );
813     INIT7( satd_x4, );
814     INIT4( hadamard_ac, );
815     INIT_ADS( );
816
817     pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16;
818     pixf->sa8d[PIXEL_8x8]   = x264_pixel_sa8d_8x8;
819     pixf->var[PIXEL_16x16] = x264_pixel_var_16x16;
820     pixf->var[PIXEL_8x16]  = x264_pixel_var_8x16;
821     pixf->var[PIXEL_8x8]   = x264_pixel_var_8x8;
822     pixf->var2[PIXEL_8x16]  = x264_pixel_var2_8x16;
823     pixf->var2[PIXEL_8x8]   = x264_pixel_var2_8x8;
824
825     pixf->ssd_nv12_core = pixel_ssd_nv12_core;
826     pixf->ssim_4x4x2_core = ssim_4x4x2_core;
827     pixf->ssim_end4 = ssim_end4;
828     pixf->vsad = pixel_vsad;
829     pixf->asd8 = pixel_asd8;
830
831     pixf->intra_sad_x3_4x4    = x264_intra_sad_x3_4x4;
832     pixf->intra_satd_x3_4x4   = x264_intra_satd_x3_4x4;
833     pixf->intra_sad_x3_8x8    = x264_intra_sad_x3_8x8;
834     pixf->intra_sa8d_x3_8x8   = x264_intra_sa8d_x3_8x8;
835     pixf->intra_sad_x3_8x8c   = x264_intra_sad_x3_8x8c;
836     pixf->intra_satd_x3_8x8c  = x264_intra_satd_x3_8x8c;
837     pixf->intra_sad_x3_8x16c  = x264_intra_sad_x3_8x16c;
838     pixf->intra_satd_x3_8x16c = x264_intra_satd_x3_8x16c;
839     pixf->intra_sad_x3_16x16  = x264_intra_sad_x3_16x16;
840     pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16;
841
842 #if HIGH_BIT_DEPTH
843 #if HAVE_MMX
844     if( cpu&X264_CPU_MMX2 )
845     {
846         INIT7( sad, _mmx2 );
847         INIT7( sad_x3, _mmx2 );
848         INIT7( sad_x4, _mmx2 );
849         INIT8( satd, _mmx2 );
850         INIT7( satd_x3, _mmx2 );
851         INIT7( satd_x4, _mmx2 );
852         INIT4( hadamard_ac, _mmx2 );
853         INIT8( ssd, _mmx2 );
854         INIT_ADS( _mmx2 );
855
856         pixf->ssd_nv12_core = x264_pixel_ssd_nv12_core_mmx2;
857         pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_mmx2;
858         pixf->var[PIXEL_8x8]   = x264_pixel_var_8x8_mmx2;
859 #if ARCH_X86
860         pixf->var2[PIXEL_8x8]  = x264_pixel_var2_8x8_mmx2;
861         pixf->var2[PIXEL_8x16] = x264_pixel_var2_8x16_mmx2;
862 #endif
863
864         pixf->intra_sad_x3_4x4    = x264_intra_sad_x3_4x4_mmx2;
865         pixf->intra_satd_x3_4x4   = x264_intra_satd_x3_4x4_mmx2;
866         pixf->intra_sad_x3_8x8    = x264_intra_sad_x3_8x8_mmx2;
867         pixf->intra_sad_x3_8x8c   = x264_intra_sad_x3_8x8c_mmx2;
868         pixf->intra_satd_x3_8x8c  = x264_intra_satd_x3_8x8c_mmx2;
869         pixf->intra_sad_x3_16x16  = x264_intra_sad_x3_16x16_mmx2;
870         pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16_mmx2;
871     }
872     if( cpu&X264_CPU_SSE2 )
873     {
874         INIT4_NAME( sad_aligned, sad, _sse2_aligned );
875         INIT5( ssd, _sse2 );
876         INIT6( satd, _sse2 );
877         pixf->satd[PIXEL_4x16] = x264_pixel_satd_4x16_sse2;
878
879         pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_sse2;
880         pixf->sa8d[PIXEL_8x8]   = x264_pixel_sa8d_8x8_sse2;
881 #if ARCH_X86_64
882         pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_sse2;
883         pixf->sa8d_satd[PIXEL_16x16] = x264_pixel_sa8d_satd_16x16_sse2;
884 #endif
885         pixf->intra_sad_x3_4x4  = x264_intra_sad_x3_4x4_sse2;
886         pixf->ssd_nv12_core = x264_pixel_ssd_nv12_core_sse2;
887         pixf->ssim_4x4x2_core  = x264_pixel_ssim_4x4x2_core_sse2;
888         pixf->ssim_end4        = x264_pixel_ssim_end4_sse2;
889         pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_sse2;
890         pixf->var[PIXEL_8x8]   = x264_pixel_var_8x8_sse2;
891         pixf->var2[PIXEL_8x8]  = x264_pixel_var2_8x8_sse2;
892         pixf->var2[PIXEL_8x16] = x264_pixel_var2_8x16_sse2;
893         pixf->intra_sad_x3_8x8 = x264_intra_sad_x3_8x8_sse2;
894     }
895     if( (cpu&X264_CPU_SSE2) && !(cpu&X264_CPU_SSE2_IS_SLOW) )
896     {
897         INIT5( sad, _sse2 );
898         INIT2( sad_x3, _sse2 );
899         INIT2( sad_x4, _sse2 );
900         INIT_ADS( _sse2 );
901
902         if( !(cpu&X264_CPU_STACK_MOD4) )
903         {
904             INIT4( hadamard_ac, _sse2 );
905         }
906         pixf->vsad = x264_pixel_vsad_sse2;
907         pixf->asd8 = x264_pixel_asd8_sse2;
908         pixf->intra_sad_x3_8x8    = x264_intra_sad_x3_8x8_sse2;
909         pixf->intra_sad_x3_8x8c   = x264_intra_sad_x3_8x8c_sse2;
910         pixf->intra_sad_x3_16x16  = x264_intra_sad_x3_16x16_sse2;
911     }
912     if( cpu&X264_CPU_SSE2_IS_FAST )
913     {
914         pixf->sad[PIXEL_8x16] = x264_pixel_sad_8x16_sse2;
915         pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_sse2;
916         pixf->sad_x3[PIXEL_8x8]  = x264_pixel_sad_x3_8x8_sse2;
917         pixf->sad_x3[PIXEL_8x4]  = x264_pixel_sad_x3_8x4_sse2;
918         pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_sse2;
919         pixf->sad_x4[PIXEL_8x8]  = x264_pixel_sad_x4_8x8_sse2;
920         pixf->sad_x4[PIXEL_8x4]  = x264_pixel_sad_x4_8x4_sse2;
921     }
922     if( cpu&X264_CPU_SSSE3 )
923     {
924         INIT4_NAME( sad_aligned, sad, _ssse3_aligned );
925         INIT7( sad, _ssse3 );
926         INIT7( sad_x3, _ssse3 );
927         INIT7( sad_x4, _ssse3 );
928         INIT_ADS( _ssse3 );
929         INIT6( satd, _ssse3 );
930         pixf->satd[PIXEL_4x16] = x264_pixel_satd_4x16_ssse3;
931
932         if( !(cpu&X264_CPU_STACK_MOD4) )
933         {
934             INIT4( hadamard_ac, _ssse3 );
935         }
936         pixf->vsad = x264_pixel_vsad_ssse3;
937         pixf->asd8 = x264_pixel_asd8_ssse3;
938         pixf->intra_sad_x3_4x4  = x264_intra_sad_x3_4x4_ssse3;
939         pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_ssse3;
940         pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8_ssse3;
941 #if ARCH_X86_64
942         pixf->sa8d_satd[PIXEL_16x16] = x264_pixel_sa8d_satd_16x16_ssse3;
943 #endif
944         pixf->intra_sad_x3_4x4    = x264_intra_sad_x3_4x4_ssse3;
945         pixf->intra_sad_x3_8x8    = x264_intra_sad_x3_8x8_ssse3;
946         pixf->intra_sad_x3_8x8c   = x264_intra_sad_x3_8x8c_ssse3;
947         pixf->intra_sad_x3_16x16  = x264_intra_sad_x3_16x16_ssse3;
948     }
949     if( cpu&X264_CPU_SSE4 )
950     {
951         INIT6( satd, _sse4 );
952         pixf->satd[PIXEL_4x16] = x264_pixel_satd_4x16_sse4;
953         if( !(cpu&X264_CPU_STACK_MOD4) )
954         {
955             INIT4( hadamard_ac, _sse4 );
956         }
957         pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_sse4;
958         pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8_sse4;
959 #if ARCH_X86_64
960         pixf->sa8d_satd[PIXEL_16x16] = x264_pixel_sa8d_satd_16x16_sse4;
961 #endif
962     }
963     if( cpu&X264_CPU_AVX )
964     {
965         INIT_ADS( _avx );
966         INIT6( satd, _avx );
967         pixf->satd[PIXEL_4x16] = x264_pixel_satd_4x16_avx;
968         if( !(cpu&X264_CPU_STACK_MOD4) )
969         {
970             INIT4( hadamard_ac, _avx );
971         }
972         pixf->intra_sad_x3_4x4    = x264_intra_sad_x3_4x4_avx;
973         pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_avx;
974         pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8_avx;
975         pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_avx;
976         pixf->var[PIXEL_8x8]   = x264_pixel_var_8x8_avx;
977         pixf->ssd_nv12_core    = x264_pixel_ssd_nv12_core_avx;
978         pixf->ssim_4x4x2_core  = x264_pixel_ssim_4x4x2_core_avx;
979         pixf->ssim_end4        = x264_pixel_ssim_end4_avx;
980 #if ARCH_X86_64
981         pixf->sa8d_satd[PIXEL_16x16] = x264_pixel_sa8d_satd_16x16_avx;
982 #endif
983     }
984     if( cpu&X264_CPU_XOP )
985     {
986         pixf->vsad = x264_pixel_vsad_xop;
987         pixf->asd8 = x264_pixel_asd8_xop;
988     }
989 #endif // HAVE_MMX
990 #else // !HIGH_BIT_DEPTH
991 #if HAVE_MMX
992     if( cpu&X264_CPU_MMX )
993     {
994         INIT8( ssd, _mmx );
995     }
996
997     if( cpu&X264_CPU_MMX2 )
998     {
999         INIT8( sad, _mmx2 );
1000         INIT8_NAME( sad_aligned, sad, _mmx2 );
1001         INIT7( sad_x3, _mmx2 );
1002         INIT7( sad_x4, _mmx2 );
1003         INIT8( satd, _mmx2 );
1004         INIT7( satd_x3, _mmx2 );
1005         INIT7( satd_x4, _mmx2 );
1006         INIT4( hadamard_ac, _mmx2 );
1007         INIT_ADS( _mmx2 );
1008         pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_mmx2;
1009         pixf->var[PIXEL_8x16]  = x264_pixel_var_8x16_mmx2;
1010         pixf->var[PIXEL_8x8]   = x264_pixel_var_8x8_mmx2;
1011         pixf->ssd_nv12_core    = x264_pixel_ssd_nv12_core_mmx2;
1012 #if ARCH_X86
1013         pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_mmx2;
1014         pixf->sa8d[PIXEL_8x8]   = x264_pixel_sa8d_8x8_mmx2;
1015         pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_mmx2;
1016         pixf->ssim_4x4x2_core = x264_pixel_ssim_4x4x2_core_mmx2;
1017         pixf->var2[PIXEL_8x8] = x264_pixel_var2_8x8_mmx2;
1018         pixf->var2[PIXEL_8x16] = x264_pixel_var2_8x16_mmx2;
1019         pixf->vsad = x264_pixel_vsad_mmx2;
1020
1021         if( cpu&X264_CPU_CACHELINE_32 )
1022         {
1023             INIT5( sad, _cache32_mmx2 );
1024             INIT4( sad_x3, _cache32_mmx2 );
1025             INIT4( sad_x4, _cache32_mmx2 );
1026         }
1027         else if( cpu&X264_CPU_CACHELINE_64 )
1028         {
1029             INIT5( sad, _cache64_mmx2 );
1030             INIT4( sad_x3, _cache64_mmx2 );
1031             INIT4( sad_x4, _cache64_mmx2 );
1032         }
1033 #else
1034         if( cpu&X264_CPU_CACHELINE_64 )
1035         {
1036             pixf->sad[PIXEL_8x16] = x264_pixel_sad_8x16_cache64_mmx2;
1037             pixf->sad[PIXEL_8x8]  = x264_pixel_sad_8x8_cache64_mmx2;
1038             pixf->sad[PIXEL_8x4]  = x264_pixel_sad_8x4_cache64_mmx2;
1039             pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_cache64_mmx2;
1040             pixf->sad_x3[PIXEL_8x8]  = x264_pixel_sad_x3_8x8_cache64_mmx2;
1041             pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_cache64_mmx2;
1042             pixf->sad_x4[PIXEL_8x8]  = x264_pixel_sad_x4_8x8_cache64_mmx2;
1043         }
1044 #endif
1045         pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16_mmx2;
1046         pixf->intra_sad_x3_16x16  = x264_intra_sad_x3_16x16_mmx2;
1047         pixf->intra_satd_x3_8x16c = x264_intra_satd_x3_8x16c_mmx2;
1048         pixf->intra_sad_x3_8x16c  = x264_intra_sad_x3_8x16c_mmx2;
1049         pixf->intra_satd_x3_8x8c  = x264_intra_satd_x3_8x8c_mmx2;
1050         pixf->intra_sad_x3_8x8c   = x264_intra_sad_x3_8x8c_mmx2;
1051         pixf->intra_sad_x3_8x8    = x264_intra_sad_x3_8x8_mmx2;
1052         pixf->intra_satd_x3_4x4   = x264_intra_satd_x3_4x4_mmx2;
1053         pixf->intra_sad_x3_4x4    = x264_intra_sad_x3_4x4_mmx2;
1054     }
1055
1056     if( cpu&X264_CPU_SSE2 )
1057     {
1058         INIT5( ssd, _sse2slow );
1059         INIT2_NAME( sad_aligned, sad, _sse2_aligned );
1060         pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_sse2;
1061         pixf->ssd_nv12_core    = x264_pixel_ssd_nv12_core_sse2;
1062         pixf->ssim_4x4x2_core  = x264_pixel_ssim_4x4x2_core_sse2;
1063         pixf->ssim_end4        = x264_pixel_ssim_end4_sse2;
1064         pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_sse2;
1065         pixf->sa8d[PIXEL_8x8]   = x264_pixel_sa8d_8x8_sse2;
1066 #if ARCH_X86_64
1067         pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_sse2;
1068         pixf->sa8d_satd[PIXEL_16x16] = x264_pixel_sa8d_satd_16x16_sse2;
1069 #endif
1070         pixf->var2[PIXEL_8x8]   = x264_pixel_var2_8x8_sse2;
1071         pixf->var2[PIXEL_8x16]  = x264_pixel_var2_8x16_sse2;
1072         pixf->vsad = x264_pixel_vsad_sse2;
1073         pixf->asd8 = x264_pixel_asd8_sse2;
1074     }
1075
1076     if( (cpu&X264_CPU_SSE2) && !(cpu&X264_CPU_SSE2_IS_SLOW) )
1077     {
1078         INIT2( sad, _sse2 );
1079         INIT2( sad_x3, _sse2 );
1080         INIT2( sad_x4, _sse2 );
1081         INIT6( satd, _sse2 );
1082         pixf->satd[PIXEL_4x16]   = x264_pixel_satd_4x16_sse2;
1083         INIT6( satd_x3, _sse2 );
1084         INIT6( satd_x4, _sse2 );
1085         if( !(cpu&X264_CPU_STACK_MOD4) )
1086         {
1087             INIT4( hadamard_ac, _sse2 );
1088         }
1089         INIT_ADS( _sse2 );
1090         pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_sse2;
1091         pixf->var[PIXEL_8x16] = x264_pixel_var_8x16_sse2;
1092         pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16_sse2;
1093         pixf->intra_satd_x3_8x16c = x264_intra_satd_x3_8x16c_sse2;
1094         pixf->intra_sad_x3_8x16c  = x264_intra_sad_x3_8x16c_sse2;
1095         if( cpu&X264_CPU_CACHELINE_64 )
1096         {
1097             INIT2( ssd, _sse2); /* faster for width 16 on p4 */
1098 #if ARCH_X86
1099             INIT2( sad, _cache64_sse2 );
1100             INIT2( sad_x3, _cache64_sse2 );
1101             INIT2( sad_x4, _cache64_sse2 );
1102 #endif
1103            if( cpu&X264_CPU_SSE2_IS_FAST )
1104            {
1105                pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_cache64_sse2;
1106                pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_cache64_sse2;
1107            }
1108         }
1109
1110         if( cpu&X264_CPU_SSE_MISALIGN )
1111         {
1112             INIT2( sad_x3, _sse2_misalign );
1113             INIT2( sad_x4, _sse2_misalign );
1114         }
1115     }
1116
1117     if( cpu&X264_CPU_SSE2_IS_FAST && !(cpu&X264_CPU_CACHELINE_64) )
1118     {
1119         pixf->sad_aligned[PIXEL_8x16] = x264_pixel_sad_8x16_sse2;
1120         pixf->sad[PIXEL_8x16] = x264_pixel_sad_8x16_sse2;
1121         pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_sse2;
1122         pixf->sad_x3[PIXEL_8x8] = x264_pixel_sad_x3_8x8_sse2;
1123         pixf->sad_x3[PIXEL_8x4] = x264_pixel_sad_x3_8x4_sse2;
1124         pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_sse2;
1125         pixf->sad_x4[PIXEL_8x8] = x264_pixel_sad_x4_8x8_sse2;
1126         pixf->sad_x4[PIXEL_8x4] = x264_pixel_sad_x4_8x4_sse2;
1127     }
1128
1129     if( (cpu&X264_CPU_SSE3) && (cpu&X264_CPU_CACHELINE_64) )
1130     {
1131         INIT2( sad, _sse3 );
1132         INIT2( sad_x3, _sse3 );
1133         INIT2( sad_x4, _sse3 );
1134     }
1135
1136     if( cpu&X264_CPU_SSSE3 )
1137     {
1138         if( !(cpu&X264_CPU_STACK_MOD4) )
1139         {
1140             INIT4( hadamard_ac, _ssse3 );
1141             pixf->intra_sad_x9_4x4  = x264_intra_sad_x9_4x4_ssse3;
1142             pixf->intra_satd_x9_4x4 = x264_intra_satd_x9_4x4_ssse3;
1143             pixf->intra_sad_x9_8x8  = x264_intra_sad_x9_8x8_ssse3;
1144 #if ARCH_X86_64
1145             pixf->intra_sa8d_x9_8x8 = x264_intra_sa8d_x9_8x8_ssse3;
1146 #endif
1147         }
1148         INIT_ADS( _ssse3 );
1149         if( !(cpu&X264_CPU_SLOW_ATOM) )
1150         {
1151             INIT8( ssd, _ssse3 );
1152             pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_ssse3;
1153             pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8_ssse3;
1154             INIT8( satd, _ssse3 );
1155             INIT7( satd_x3, _ssse3 );
1156             INIT7( satd_x4, _ssse3 );
1157         }
1158         pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16_ssse3;
1159         pixf->intra_sad_x3_16x16  = x264_intra_sad_x3_16x16_ssse3;
1160         pixf->intra_satd_x3_8x16c = x264_intra_satd_x3_8x16c_ssse3;
1161         pixf->intra_satd_x3_8x8c  = x264_intra_satd_x3_8x8c_ssse3;
1162         pixf->intra_sad_x3_8x8c   = x264_intra_sad_x3_8x8c_ssse3;
1163         pixf->var2[PIXEL_8x8] = x264_pixel_var2_8x8_ssse3;
1164         pixf->var2[PIXEL_8x16] = x264_pixel_var2_8x16_ssse3;
1165         pixf->asd8 = x264_pixel_asd8_ssse3;
1166 #if ARCH_X86_64
1167         pixf->sa8d_satd[PIXEL_16x16] = x264_pixel_sa8d_satd_16x16_ssse3;
1168 #endif
1169         if( cpu&X264_CPU_CACHELINE_64 )
1170         {
1171             INIT2( sad, _cache64_ssse3 );
1172             INIT2( sad_x3, _cache64_ssse3 );
1173             INIT2( sad_x4, _cache64_ssse3 );
1174         }
1175         if( cpu&X264_CPU_SLOW_ATOM || !(cpu&X264_CPU_SHUFFLE_IS_FAST) )
1176         {
1177             INIT5( ssd, _sse2 ); /* on conroe, sse2 is faster for width8/16 */
1178         }
1179     }
1180
1181     if( cpu&X264_CPU_SSE4 )
1182     {
1183         INIT8( satd, _sse4 );
1184         INIT7( satd_x3, _sse4 );
1185         INIT7( satd_x4, _sse4 );
1186         if( !(cpu&X264_CPU_STACK_MOD4) )
1187         {
1188             INIT4( hadamard_ac, _sse4 );
1189             pixf->intra_sad_x9_4x4  = x264_intra_sad_x9_4x4_sse4;
1190             pixf->intra_satd_x9_4x4 = x264_intra_satd_x9_4x4_sse4;
1191             pixf->intra_sad_x9_8x8  = x264_intra_sad_x9_8x8_sse4;
1192 #if ARCH_X86_64
1193             pixf->intra_sa8d_x9_8x8 = x264_intra_sa8d_x9_8x8_sse4;
1194 #endif
1195         }
1196         pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_sse4;
1197         pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8_sse4;
1198         pixf->intra_satd_x3_8x16c = x264_intra_satd_x3_8x16c_sse4;
1199 #if ARCH_X86_64
1200         pixf->sa8d_satd[PIXEL_16x16] = x264_pixel_sa8d_satd_16x16_sse4;
1201 #endif
1202     }
1203
1204     if( cpu&X264_CPU_AVX )
1205     {
1206         INIT8( satd, _avx );
1207         INIT7( satd_x3, _avx );
1208         INIT7( satd_x4, _avx );
1209         INIT_ADS( _avx );
1210         if( !(cpu&X264_CPU_STACK_MOD4) )
1211         {
1212             INIT4( hadamard_ac, _avx );
1213             pixf->intra_sad_x9_4x4  = x264_intra_sad_x9_4x4_avx;
1214             pixf->intra_satd_x9_4x4 = x264_intra_satd_x9_4x4_avx;
1215             pixf->intra_sad_x9_8x8  = x264_intra_sad_x9_8x8_avx;
1216 #if ARCH_X86_64
1217             pixf->intra_sa8d_x9_8x8 = x264_intra_sa8d_x9_8x8_avx;
1218 #endif
1219         }
1220         INIT5( ssd, _avx );
1221         pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_avx;
1222         pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8_avx;
1223         pixf->intra_satd_x3_8x16c = x264_intra_satd_x3_8x16c_avx;
1224         pixf->ssd_nv12_core    = x264_pixel_ssd_nv12_core_avx;
1225         pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_avx;
1226         pixf->var[PIXEL_8x16]  = x264_pixel_var_8x16_avx;
1227         pixf->var[PIXEL_8x8]   = x264_pixel_var_8x8_avx;
1228         pixf->ssim_4x4x2_core  = x264_pixel_ssim_4x4x2_core_avx;
1229         pixf->ssim_end4        = x264_pixel_ssim_end4_avx;
1230 #if ARCH_X86_64
1231         pixf->sa8d_satd[PIXEL_16x16] = x264_pixel_sa8d_satd_16x16_avx;
1232 #endif
1233     }
1234
1235     if( cpu&X264_CPU_XOP )
1236     {
1237         INIT7( satd, _xop );
1238         INIT7( satd_x3, _xop );
1239         INIT7( satd_x4, _xop );
1240         if( !(cpu&X264_CPU_STACK_MOD4) )
1241         {
1242             INIT4( hadamard_ac, _xop );
1243             pixf->intra_satd_x9_4x4 = x264_intra_satd_x9_4x4_xop;
1244         }
1245         INIT5( ssd, _xop );
1246         pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_xop;
1247         pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8_xop;
1248         pixf->intra_satd_x3_8x16c = x264_intra_satd_x3_8x16c_xop;
1249         pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_xop;
1250         pixf->var[PIXEL_8x16]  = x264_pixel_var_8x16_xop;
1251         pixf->var[PIXEL_8x8]   = x264_pixel_var_8x8_xop;
1252         pixf->var2[PIXEL_8x8] = x264_pixel_var2_8x8_xop;
1253         pixf->var2[PIXEL_8x16] = x264_pixel_var2_8x16_xop;
1254 #if ARCH_X86_64
1255         pixf->sa8d_satd[PIXEL_16x16] = x264_pixel_sa8d_satd_16x16_xop;
1256 #endif
1257     }
1258 #endif //HAVE_MMX
1259
1260 #if HAVE_ARMV6
1261     if( cpu&X264_CPU_ARMV6 )
1262     {
1263         pixf->sad[PIXEL_4x8] = x264_pixel_sad_4x8_armv6;
1264         pixf->sad[PIXEL_4x4] = x264_pixel_sad_4x4_armv6;
1265         pixf->sad_aligned[PIXEL_4x8] = x264_pixel_sad_4x8_armv6;
1266         pixf->sad_aligned[PIXEL_4x4] = x264_pixel_sad_4x4_armv6;
1267     }
1268     if( cpu&X264_CPU_NEON )
1269     {
1270         INIT5( sad, _neon );
1271         INIT5( sad_aligned, _neon );
1272         INIT7( sad_x3, _neon );
1273         INIT7( sad_x4, _neon );
1274         INIT7( ssd, _neon );
1275         INIT7( satd, _neon );
1276         INIT7( satd_x3, _neon );
1277         INIT7( satd_x4, _neon );
1278         INIT4( hadamard_ac, _neon );
1279         pixf->sa8d[PIXEL_8x8]   = x264_pixel_sa8d_8x8_neon;
1280         pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_neon;
1281         pixf->var[PIXEL_8x8]    = x264_pixel_var_8x8_neon;
1282         pixf->var[PIXEL_16x16]  = x264_pixel_var_16x16_neon;
1283         pixf->var2[PIXEL_8x8]   = x264_pixel_var2_8x8_neon;
1284
1285         pixf->ssim_4x4x2_core   = x264_pixel_ssim_4x4x2_core_neon;
1286         pixf->ssim_end4         = x264_pixel_ssim_end4_neon;
1287
1288         if( cpu&X264_CPU_FAST_NEON_MRC )
1289         {
1290             pixf->sad[PIXEL_4x8] = x264_pixel_sad_4x8_neon;
1291             pixf->sad[PIXEL_4x4] = x264_pixel_sad_4x4_neon;
1292             pixf->sad_aligned[PIXEL_4x8] = x264_pixel_sad_aligned_4x8_neon;
1293             pixf->sad_aligned[PIXEL_4x4] = x264_pixel_sad_aligned_4x4_neon;
1294         }
1295         else    // really just scheduled for dual issue / A8
1296         {
1297             INIT5( sad_aligned, _neon_dual );
1298         }
1299     }
1300 #endif
1301 #endif // HIGH_BIT_DEPTH
1302 #if HAVE_ALTIVEC
1303     if( cpu&X264_CPU_ALTIVEC )
1304     {
1305         x264_pixel_altivec_init( pixf );
1306     }
1307 #endif
1308 #if !HIGH_BIT_DEPTH
1309 #if ARCH_UltraSPARC
1310     INIT4( sad, _vis );
1311     INIT4( sad_x3, _vis );
1312     INIT4( sad_x4, _vis );
1313 #endif
1314 #endif // !HIGH_BIT_DEPTH
1315
1316     pixf->ads[PIXEL_8x16] =
1317     pixf->ads[PIXEL_8x4] =
1318     pixf->ads[PIXEL_4x8] = pixf->ads[PIXEL_16x8];
1319     pixf->ads[PIXEL_4x4] = pixf->ads[PIXEL_8x8];
1320 }
1321