]> git.sesse.net Git - x264/blob - common/pixel.c
arm: use available neon functions for intra_sa8d/sad/satd_x3
[x264] / common / pixel.c
1 /*****************************************************************************
2  * pixel.c: pixel metrics
3  *****************************************************************************
4  * Copyright (C) 2003-2014 x264 project
5  *
6  * Authors: Loren Merritt <lorenm@u.washington.edu>
7  *          Laurent Aimar <fenrir@via.ecp.fr>
8  *          Fiona Glaser <fiona@x264.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
23  *
24  * This program is also available under a commercial proprietary license.
25  * For more information, contact us at licensing@x264.com.
26  *****************************************************************************/
27
28 #include "common.h"
29
30 #if HAVE_MMX
31 #   include "x86/pixel.h"
32 #   include "x86/predict.h"
33 #endif
34 #if ARCH_PPC
35 #   include "ppc/pixel.h"
36 #endif
37 #if ARCH_ARM
38 #   include "arm/pixel.h"
39 #   include "arm/predict.h"
40 #endif
41 #if ARCH_UltraSPARC
42 #   include "sparc/pixel.h"
43 #endif
44
45
46 /****************************************************************************
47  * pixel_sad_WxH
48  ****************************************************************************/
49 #define PIXEL_SAD_C( name, lx, ly ) \
50 static int name( pixel *pix1, intptr_t i_stride_pix1,  \
51                  pixel *pix2, intptr_t i_stride_pix2 ) \
52 {                                                   \
53     int i_sum = 0;                                  \
54     for( int y = 0; y < ly; y++ )                   \
55     {                                               \
56         for( int x = 0; x < lx; x++ )               \
57         {                                           \
58             i_sum += abs( pix1[x] - pix2[x] );      \
59         }                                           \
60         pix1 += i_stride_pix1;                      \
61         pix2 += i_stride_pix2;                      \
62     }                                               \
63     return i_sum;                                   \
64 }
65
66
67 PIXEL_SAD_C( x264_pixel_sad_16x16, 16, 16 )
68 PIXEL_SAD_C( x264_pixel_sad_16x8,  16,  8 )
69 PIXEL_SAD_C( x264_pixel_sad_8x16,   8, 16 )
70 PIXEL_SAD_C( x264_pixel_sad_8x8,    8,  8 )
71 PIXEL_SAD_C( x264_pixel_sad_8x4,    8,  4 )
72 PIXEL_SAD_C( x264_pixel_sad_4x16,   4, 16 )
73 PIXEL_SAD_C( x264_pixel_sad_4x8,    4,  8 )
74 PIXEL_SAD_C( x264_pixel_sad_4x4,    4,  4 )
75
76 /****************************************************************************
77  * pixel_ssd_WxH
78  ****************************************************************************/
79 #define PIXEL_SSD_C( name, lx, ly ) \
80 static int name( pixel *pix1, intptr_t i_stride_pix1,  \
81                  pixel *pix2, intptr_t i_stride_pix2 ) \
82 {                                                   \
83     int i_sum = 0;                                  \
84     for( int y = 0; y < ly; y++ )                   \
85     {                                               \
86         for( int x = 0; x < lx; x++ )               \
87         {                                           \
88             int d = pix1[x] - pix2[x];              \
89             i_sum += d*d;                           \
90         }                                           \
91         pix1 += i_stride_pix1;                      \
92         pix2 += i_stride_pix2;                      \
93     }                                               \
94     return i_sum;                                   \
95 }
96
97 PIXEL_SSD_C( x264_pixel_ssd_16x16, 16, 16 )
98 PIXEL_SSD_C( x264_pixel_ssd_16x8,  16,  8 )
99 PIXEL_SSD_C( x264_pixel_ssd_8x16,   8, 16 )
100 PIXEL_SSD_C( x264_pixel_ssd_8x8,    8,  8 )
101 PIXEL_SSD_C( x264_pixel_ssd_8x4,    8,  4 )
102 PIXEL_SSD_C( x264_pixel_ssd_4x16,   4, 16 )
103 PIXEL_SSD_C( x264_pixel_ssd_4x8,    4,  8 )
104 PIXEL_SSD_C( x264_pixel_ssd_4x4,    4,  4 )
105
106 uint64_t x264_pixel_ssd_wxh( x264_pixel_function_t *pf, pixel *pix1, intptr_t i_pix1,
107                              pixel *pix2, intptr_t i_pix2, int i_width, int i_height )
108 {
109     uint64_t i_ssd = 0;
110     int y;
111     int align = !(((intptr_t)pix1 | (intptr_t)pix2 | i_pix1 | i_pix2) & 15);
112
113 #define SSD(size) i_ssd += pf->ssd[size]( pix1 + y*i_pix1 + x, i_pix1, \
114                                           pix2 + y*i_pix2 + x, i_pix2 );
115     for( y = 0; y < i_height-15; y += 16 )
116     {
117         int x = 0;
118         if( align )
119             for( ; x < i_width-15; x += 16 )
120                 SSD(PIXEL_16x16);
121         for( ; x < i_width-7; x += 8 )
122             SSD(PIXEL_8x16);
123     }
124     if( y < i_height-7 )
125         for( int x = 0; x < i_width-7; x += 8 )
126             SSD(PIXEL_8x8);
127 #undef SSD
128
129 #define SSD1 { int d = pix1[y*i_pix1+x] - pix2[y*i_pix2+x]; i_ssd += d*d; }
130     if( i_width & 7 )
131     {
132         for( y = 0; y < (i_height & ~7); y++ )
133             for( int x = i_width & ~7; x < i_width; x++ )
134                 SSD1;
135     }
136     if( i_height & 7 )
137     {
138         for( y = i_height & ~7; y < i_height; y++ )
139             for( int x = 0; x < i_width; x++ )
140                 SSD1;
141     }
142 #undef SSD1
143
144     return i_ssd;
145 }
146
147 static void pixel_ssd_nv12_core( pixel *pixuv1, intptr_t stride1, pixel *pixuv2, intptr_t stride2,
148                                  int width, int height, uint64_t *ssd_u, uint64_t *ssd_v )
149 {
150     *ssd_u = 0, *ssd_v = 0;
151     for( int y = 0; y < height; y++, pixuv1+=stride1, pixuv2+=stride2 )
152         for( int x = 0; x < width; x++ )
153         {
154             int du = pixuv1[2*x]   - pixuv2[2*x];
155             int dv = pixuv1[2*x+1] - pixuv2[2*x+1];
156             *ssd_u += du*du;
157             *ssd_v += dv*dv;
158         }
159 }
160
161 void x264_pixel_ssd_nv12( x264_pixel_function_t *pf, pixel *pix1, intptr_t i_pix1, pixel *pix2, intptr_t i_pix2,
162                           int i_width, int i_height, uint64_t *ssd_u, uint64_t *ssd_v )
163 {
164     pf->ssd_nv12_core( pix1, i_pix1, pix2, i_pix2, i_width&~7, i_height, ssd_u, ssd_v );
165     if( i_width&7 )
166     {
167         uint64_t tmp[2];
168         pixel_ssd_nv12_core( pix1+(i_width&~7), i_pix1, pix2+(i_width&~7), i_pix2, i_width&7, i_height, &tmp[0], &tmp[1] );
169         *ssd_u += tmp[0];
170         *ssd_v += tmp[1];
171     }
172 }
173
174 /****************************************************************************
175  * pixel_var_wxh
176  ****************************************************************************/
177 #define PIXEL_VAR_C( name, w, h ) \
178 static uint64_t name( pixel *pix, intptr_t i_stride ) \
179 {                                             \
180     uint32_t sum = 0, sqr = 0;                \
181     for( int y = 0; y < h; y++ )              \
182     {                                         \
183         for( int x = 0; x < w; x++ )          \
184         {                                     \
185             sum += pix[x];                    \
186             sqr += pix[x] * pix[x];           \
187         }                                     \
188         pix += i_stride;                      \
189     }                                         \
190     return sum + ((uint64_t)sqr << 32);       \
191 }
192
193 PIXEL_VAR_C( x264_pixel_var_16x16, 16, 16 )
194 PIXEL_VAR_C( x264_pixel_var_8x16,   8, 16 )
195 PIXEL_VAR_C( x264_pixel_var_8x8,    8,  8 )
196
197 /****************************************************************************
198  * pixel_var2_wxh
199  ****************************************************************************/
200 #define PIXEL_VAR2_C( name, w, h, shift ) \
201 static int name( pixel *pix1, intptr_t i_stride1, pixel *pix2, intptr_t i_stride2, int *ssd ) \
202 { \
203     uint32_t var = 0, sum = 0, sqr = 0; \
204     for( int y = 0; y < h; y++ ) \
205     { \
206         for( int x = 0; x < w; x++ ) \
207         { \
208             int diff = pix1[x] - pix2[x]; \
209             sum += diff; \
210             sqr += diff * diff; \
211         } \
212         pix1 += i_stride1; \
213         pix2 += i_stride2; \
214     } \
215     sum = abs(sum); \
216     var = sqr - ((uint64_t)sum * sum >> shift); \
217     *ssd = sqr; \
218     return var; \
219 }
220
221 PIXEL_VAR2_C( x264_pixel_var2_8x16, 8, 16, 7 )
222 PIXEL_VAR2_C( x264_pixel_var2_8x8,  8,  8, 6 )
223
224 #if BIT_DEPTH > 8
225     typedef uint32_t sum_t;
226     typedef uint64_t sum2_t;
227 #else
228     typedef uint16_t sum_t;
229     typedef uint32_t sum2_t;
230 #endif
231 #define BITS_PER_SUM (8 * sizeof(sum_t))
232
233 #define HADAMARD4(d0, d1, d2, d3, s0, s1, s2, s3) {\
234     sum2_t t0 = s0 + s1;\
235     sum2_t t1 = s0 - s1;\
236     sum2_t t2 = s2 + s3;\
237     sum2_t t3 = s2 - s3;\
238     d0 = t0 + t2;\
239     d2 = t0 - t2;\
240     d1 = t1 + t3;\
241     d3 = t1 - t3;\
242 }
243
244 // in: a pseudo-simd number of the form x+(y<<16)
245 // return: abs(x)+(abs(y)<<16)
246 static ALWAYS_INLINE sum2_t abs2( sum2_t a )
247 {
248     sum2_t s = ((a>>(BITS_PER_SUM-1))&(((sum2_t)1<<BITS_PER_SUM)+1))*((sum_t)-1);
249     return (a+s)^s;
250 }
251
252 /****************************************************************************
253  * pixel_satd_WxH: sum of 4x4 Hadamard transformed differences
254  ****************************************************************************/
255
256 static NOINLINE int x264_pixel_satd_4x4( pixel *pix1, intptr_t i_pix1, pixel *pix2, intptr_t i_pix2 )
257 {
258     sum2_t tmp[4][2];
259     sum2_t a0, a1, a2, a3, b0, b1;
260     sum2_t sum = 0;
261     for( int i = 0; i < 4; i++, pix1 += i_pix1, pix2 += i_pix2 )
262     {
263         a0 = pix1[0] - pix2[0];
264         a1 = pix1[1] - pix2[1];
265         b0 = (a0+a1) + ((a0-a1)<<BITS_PER_SUM);
266         a2 = pix1[2] - pix2[2];
267         a3 = pix1[3] - pix2[3];
268         b1 = (a2+a3) + ((a2-a3)<<BITS_PER_SUM);
269         tmp[i][0] = b0 + b1;
270         tmp[i][1] = b0 - b1;
271     }
272     for( int i = 0; i < 2; i++ )
273     {
274         HADAMARD4( a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i] );
275         a0 = abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
276         sum += ((sum_t)a0) + (a0>>BITS_PER_SUM);
277     }
278     return sum >> 1;
279 }
280
281 static NOINLINE int x264_pixel_satd_8x4( pixel *pix1, intptr_t i_pix1, pixel *pix2, intptr_t i_pix2 )
282 {
283     sum2_t tmp[4][4];
284     sum2_t a0, a1, a2, a3;
285     sum2_t sum = 0;
286     for( int i = 0; i < 4; i++, pix1 += i_pix1, pix2 += i_pix2 )
287     {
288         a0 = (pix1[0] - pix2[0]) + ((sum2_t)(pix1[4] - pix2[4]) << BITS_PER_SUM);
289         a1 = (pix1[1] - pix2[1]) + ((sum2_t)(pix1[5] - pix2[5]) << BITS_PER_SUM);
290         a2 = (pix1[2] - pix2[2]) + ((sum2_t)(pix1[6] - pix2[6]) << BITS_PER_SUM);
291         a3 = (pix1[3] - pix2[3]) + ((sum2_t)(pix1[7] - pix2[7]) << BITS_PER_SUM);
292         HADAMARD4( tmp[i][0], tmp[i][1], tmp[i][2], tmp[i][3], a0,a1,a2,a3 );
293     }
294     for( int i = 0; i < 4; i++ )
295     {
296         HADAMARD4( a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i] );
297         sum += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
298     }
299     return (((sum_t)sum) + (sum>>BITS_PER_SUM)) >> 1;
300 }
301
302 #define PIXEL_SATD_C( w, h, sub )\
303 static int x264_pixel_satd_##w##x##h( pixel *pix1, intptr_t i_pix1, pixel *pix2, intptr_t i_pix2 )\
304 {\
305     int sum = sub( pix1, i_pix1, pix2, i_pix2 )\
306             + sub( pix1+4*i_pix1, i_pix1, pix2+4*i_pix2, i_pix2 );\
307     if( w==16 )\
308         sum+= sub( pix1+8, i_pix1, pix2+8, i_pix2 )\
309             + sub( pix1+8+4*i_pix1, i_pix1, pix2+8+4*i_pix2, i_pix2 );\
310     if( h==16 )\
311         sum+= sub( pix1+8*i_pix1, i_pix1, pix2+8*i_pix2, i_pix2 )\
312             + sub( pix1+12*i_pix1, i_pix1, pix2+12*i_pix2, i_pix2 );\
313     if( w==16 && h==16 )\
314         sum+= sub( pix1+8+8*i_pix1, i_pix1, pix2+8+8*i_pix2, i_pix2 )\
315             + sub( pix1+8+12*i_pix1, i_pix1, pix2+8+12*i_pix2, i_pix2 );\
316     return sum;\
317 }
318 PIXEL_SATD_C( 16, 16, x264_pixel_satd_8x4 )
319 PIXEL_SATD_C( 16, 8,  x264_pixel_satd_8x4 )
320 PIXEL_SATD_C( 8,  16, x264_pixel_satd_8x4 )
321 PIXEL_SATD_C( 8,  8,  x264_pixel_satd_8x4 )
322 PIXEL_SATD_C( 4,  16, x264_pixel_satd_4x4 )
323 PIXEL_SATD_C( 4,  8,  x264_pixel_satd_4x4 )
324
325 static NOINLINE int sa8d_8x8( pixel *pix1, intptr_t i_pix1, pixel *pix2, intptr_t i_pix2 )
326 {
327     sum2_t tmp[8][4];
328     sum2_t a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3;
329     sum2_t sum = 0;
330     for( int i = 0; i < 8; i++, pix1 += i_pix1, pix2 += i_pix2 )
331     {
332         a0 = pix1[0] - pix2[0];
333         a1 = pix1[1] - pix2[1];
334         b0 = (a0+a1) + ((a0-a1)<<BITS_PER_SUM);
335         a2 = pix1[2] - pix2[2];
336         a3 = pix1[3] - pix2[3];
337         b1 = (a2+a3) + ((a2-a3)<<BITS_PER_SUM);
338         a4 = pix1[4] - pix2[4];
339         a5 = pix1[5] - pix2[5];
340         b2 = (a4+a5) + ((a4-a5)<<BITS_PER_SUM);
341         a6 = pix1[6] - pix2[6];
342         a7 = pix1[7] - pix2[7];
343         b3 = (a6+a7) + ((a6-a7)<<BITS_PER_SUM);
344         HADAMARD4( tmp[i][0], tmp[i][1], tmp[i][2], tmp[i][3], b0,b1,b2,b3 );
345     }
346     for( int i = 0; i < 4; i++ )
347     {
348         HADAMARD4( a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i] );
349         HADAMARD4( a4, a5, a6, a7, tmp[4][i], tmp[5][i], tmp[6][i], tmp[7][i] );
350         b0  = abs2(a0+a4) + abs2(a0-a4);
351         b0 += abs2(a1+a5) + abs2(a1-a5);
352         b0 += abs2(a2+a6) + abs2(a2-a6);
353         b0 += abs2(a3+a7) + abs2(a3-a7);
354         sum += (sum_t)b0 + (b0>>BITS_PER_SUM);
355     }
356     return sum;
357 }
358
359 static int x264_pixel_sa8d_8x8( pixel *pix1, intptr_t i_pix1, pixel *pix2, intptr_t i_pix2 )
360 {
361     int sum = sa8d_8x8( pix1, i_pix1, pix2, i_pix2 );
362     return (sum+2)>>2;
363 }
364
365 static int x264_pixel_sa8d_16x16( pixel *pix1, intptr_t i_pix1, pixel *pix2, intptr_t i_pix2 )
366 {
367     int sum = sa8d_8x8( pix1, i_pix1, pix2, i_pix2 )
368             + sa8d_8x8( pix1+8, i_pix1, pix2+8, i_pix2 )
369             + sa8d_8x8( pix1+8*i_pix1, i_pix1, pix2+8*i_pix2, i_pix2 )
370             + sa8d_8x8( pix1+8+8*i_pix1, i_pix1, pix2+8+8*i_pix2, i_pix2 );
371     return (sum+2)>>2;
372 }
373
374 static NOINLINE uint64_t pixel_hadamard_ac( pixel *pix, intptr_t stride )
375 {
376     sum2_t tmp[32];
377     sum2_t a0, a1, a2, a3, dc;
378     sum2_t sum4 = 0, sum8 = 0;
379     for( int i = 0; i < 8; i++, pix+=stride )
380     {
381         sum2_t *t = tmp + (i&3) + (i&4)*4;
382         a0 = (pix[0]+pix[1]) + ((sum2_t)(pix[0]-pix[1])<<BITS_PER_SUM);
383         a1 = (pix[2]+pix[3]) + ((sum2_t)(pix[2]-pix[3])<<BITS_PER_SUM);
384         t[0] = a0 + a1;
385         t[4] = a0 - a1;
386         a2 = (pix[4]+pix[5]) + ((sum2_t)(pix[4]-pix[5])<<BITS_PER_SUM);
387         a3 = (pix[6]+pix[7]) + ((sum2_t)(pix[6]-pix[7])<<BITS_PER_SUM);
388         t[8] = a2 + a3;
389         t[12] = a2 - a3;
390     }
391     for( int i = 0; i < 8; i++ )
392     {
393         HADAMARD4( a0, a1, a2, a3, tmp[i*4+0], tmp[i*4+1], tmp[i*4+2], tmp[i*4+3] );
394         tmp[i*4+0] = a0;
395         tmp[i*4+1] = a1;
396         tmp[i*4+2] = a2;
397         tmp[i*4+3] = a3;
398         sum4 += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
399     }
400     for( int i = 0; i < 8; i++ )
401     {
402         HADAMARD4( a0,a1,a2,a3, tmp[i], tmp[8+i], tmp[16+i], tmp[24+i] );
403         sum8 += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
404     }
405     dc = (sum_t)(tmp[0] + tmp[8] + tmp[16] + tmp[24]);
406     sum4 = (sum_t)sum4 + (sum4>>BITS_PER_SUM) - dc;
407     sum8 = (sum_t)sum8 + (sum8>>BITS_PER_SUM) - dc;
408     return ((uint64_t)sum8<<32) + sum4;
409 }
410
411 #define HADAMARD_AC(w,h) \
412 static uint64_t x264_pixel_hadamard_ac_##w##x##h( pixel *pix, intptr_t stride )\
413 {\
414     uint64_t sum = pixel_hadamard_ac( pix, stride );\
415     if( w==16 )\
416         sum += pixel_hadamard_ac( pix+8, stride );\
417     if( h==16 )\
418         sum += pixel_hadamard_ac( pix+8*stride, stride );\
419     if( w==16 && h==16 )\
420         sum += pixel_hadamard_ac( pix+8*stride+8, stride );\
421     return ((sum>>34)<<32) + ((uint32_t)sum>>1);\
422 }
423 HADAMARD_AC( 16, 16 )
424 HADAMARD_AC( 16, 8 )
425 HADAMARD_AC( 8, 16 )
426 HADAMARD_AC( 8, 8 )
427
428
429 /****************************************************************************
430  * pixel_sad_x4
431  ****************************************************************************/
432 #define SAD_X( size ) \
433 static void x264_pixel_sad_x3_##size( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2,\
434                                       intptr_t i_stride, int scores[3] )\
435 {\
436     scores[0] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix0, i_stride );\
437     scores[1] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix1, i_stride );\
438     scores[2] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix2, i_stride );\
439 }\
440 static void x264_pixel_sad_x4_##size( pixel *fenc, pixel *pix0, pixel *pix1,pixel *pix2, pixel *pix3,\
441                                       intptr_t i_stride, int scores[4] )\
442 {\
443     scores[0] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix0, i_stride );\
444     scores[1] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix1, i_stride );\
445     scores[2] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix2, i_stride );\
446     scores[3] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix3, i_stride );\
447 }
448
449 SAD_X( 16x16 )
450 SAD_X( 16x8 )
451 SAD_X( 8x16 )
452 SAD_X( 8x8 )
453 SAD_X( 8x4 )
454 SAD_X( 4x8 )
455 SAD_X( 4x4 )
456
457 #if !HIGH_BIT_DEPTH
458 #if ARCH_UltraSPARC
459 SAD_X( 16x16_vis )
460 SAD_X( 16x8_vis )
461 SAD_X( 8x16_vis )
462 SAD_X( 8x8_vis )
463 #endif
464 #endif // !HIGH_BIT_DEPTH
465
466 /****************************************************************************
467  * pixel_satd_x4
468  * no faster than single satd, but needed for satd to be a drop-in replacement for sad
469  ****************************************************************************/
470
471 #define SATD_X( size, cpu ) \
472 static void x264_pixel_satd_x3_##size##cpu( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2,\
473                                             intptr_t i_stride, int scores[3] )\
474 {\
475     scores[0] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix0, i_stride );\
476     scores[1] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix1, i_stride );\
477     scores[2] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix2, i_stride );\
478 }\
479 static void x264_pixel_satd_x4_##size##cpu( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2, pixel *pix3,\
480                                             intptr_t i_stride, int scores[4] )\
481 {\
482     scores[0] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix0, i_stride );\
483     scores[1] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix1, i_stride );\
484     scores[2] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix2, i_stride );\
485     scores[3] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix3, i_stride );\
486 }
487 #define SATD_X_DECL6( cpu )\
488 SATD_X( 16x16, cpu )\
489 SATD_X( 16x8, cpu )\
490 SATD_X( 8x16, cpu )\
491 SATD_X( 8x8, cpu )\
492 SATD_X( 8x4, cpu )\
493 SATD_X( 4x8, cpu )
494 #define SATD_X_DECL7( cpu )\
495 SATD_X_DECL6( cpu )\
496 SATD_X( 4x4, cpu )
497
498 SATD_X_DECL7()
499 #if HAVE_MMX
500 SATD_X_DECL7( _mmx2 )
501 #if !HIGH_BIT_DEPTH
502 SATD_X_DECL6( _sse2 )
503 SATD_X_DECL7( _ssse3 )
504 SATD_X_DECL6( _ssse3_atom )
505 SATD_X_DECL7( _sse4 )
506 SATD_X_DECL7( _avx )
507 SATD_X_DECL7( _xop )
508 #endif // !HIGH_BIT_DEPTH
509 #endif
510
511 #if !HIGH_BIT_DEPTH
512 #if HAVE_ARMV6
513 SATD_X_DECL7( _neon )
514 #endif
515 #endif // !HIGH_BIT_DEPTH
516
517 #define INTRA_MBCMP_8x8( mbcmp, cpu, cpu2 )\
518 void x264_intra_##mbcmp##_x3_8x8##cpu( pixel *fenc, pixel edge[36], int res[3] )\
519 {\
520     ALIGNED_ARRAY_16( pixel, pix, [8*FDEC_STRIDE] );\
521     x264_predict_8x8_v##cpu2( pix, edge );\
522     res[0] = x264_pixel_##mbcmp##_8x8##cpu( pix, FDEC_STRIDE, fenc, FENC_STRIDE );\
523     x264_predict_8x8_h##cpu2( pix, edge );\
524     res[1] = x264_pixel_##mbcmp##_8x8##cpu( pix, FDEC_STRIDE, fenc, FENC_STRIDE );\
525     x264_predict_8x8_dc##cpu2( pix, edge );\
526     res[2] = x264_pixel_##mbcmp##_8x8##cpu( pix, FDEC_STRIDE, fenc, FENC_STRIDE );\
527 }
528
529 INTRA_MBCMP_8x8( sad,, _c )
530 INTRA_MBCMP_8x8(sa8d,, _c )
531 #if HIGH_BIT_DEPTH && HAVE_MMX
532 #define x264_predict_8x8_v_sse2 x264_predict_8x8_v_sse
533 INTRA_MBCMP_8x8( sad, _mmx2,  _c )
534 INTRA_MBCMP_8x8(sa8d, _sse2,  _sse2 )
535 #endif
536 #if !HIGH_BIT_DEPTH && HAVE_ARMV6
537 INTRA_MBCMP_8x8( sad, _neon, _neon )
538 INTRA_MBCMP_8x8(sa8d, _neon, _neon )
539 #endif
540
541 #define INTRA_MBCMP( mbcmp, size, pred1, pred2, pred3, chroma, cpu, cpu2 )\
542 void x264_intra_##mbcmp##_x3_##size##chroma##cpu( pixel *fenc, pixel *fdec, int res[3] )\
543 {\
544     x264_predict_##size##chroma##_##pred1##cpu2( fdec );\
545     res[0] = x264_pixel_##mbcmp##_##size##cpu( fdec, FDEC_STRIDE, fenc, FENC_STRIDE );\
546     x264_predict_##size##chroma##_##pred2##cpu2( fdec );\
547     res[1] = x264_pixel_##mbcmp##_##size##cpu( fdec, FDEC_STRIDE, fenc, FENC_STRIDE );\
548     x264_predict_##size##chroma##_##pred3##cpu2( fdec );\
549     res[2] = x264_pixel_##mbcmp##_##size##cpu( fdec, FDEC_STRIDE, fenc, FENC_STRIDE );\
550 }
551
552 INTRA_MBCMP( sad,  4x4,   v, h, dc,  ,, _c )
553 INTRA_MBCMP(satd,  4x4,   v, h, dc,  ,, _c )
554 INTRA_MBCMP( sad,  8x8,  dc, h,  v, c,, _c )
555 INTRA_MBCMP(satd,  8x8,  dc, h,  v, c,, _c )
556 INTRA_MBCMP( sad,  8x16, dc, h,  v, c,, _c )
557 INTRA_MBCMP(satd,  8x16, dc, h,  v, c,, _c )
558 INTRA_MBCMP( sad, 16x16,  v, h, dc,  ,, _c )
559 INTRA_MBCMP(satd, 16x16,  v, h, dc,  ,, _c )
560
561 #if HAVE_MMX
562 #if HIGH_BIT_DEPTH
563 #define x264_predict_8x8c_v_mmx2 x264_predict_8x8c_v_mmx
564 #define x264_predict_8x16c_v_mmx2 x264_predict_8x16c_v_c
565 #define x264_predict_8x8c_v_sse2 x264_predict_8x8c_v_sse
566 #define x264_predict_8x16c_v_sse2 x264_predict_8x16c_v_sse
567 #define x264_predict_16x16_v_sse2 x264_predict_16x16_v_sse
568 INTRA_MBCMP( sad,  4x4,   v, h, dc,  , _mmx2, _c )
569 INTRA_MBCMP( sad,  8x8,  dc, h,  v, c, _mmx2, _mmx2 )
570 INTRA_MBCMP( sad,  8x16, dc, h,  v, c, _mmx2, _mmx2 )
571 INTRA_MBCMP(satd,  8x16, dc, h,  v, c, _mmx2, _mmx2 )
572 INTRA_MBCMP( sad, 16x16,  v, h, dc,  , _mmx2, _mmx2 )
573 INTRA_MBCMP( sad,  8x8,  dc, h,  v, c, _sse2, _sse2 )
574 INTRA_MBCMP( sad,  8x16, dc, h,  v, c, _sse2, _sse2 )
575 INTRA_MBCMP(satd,  8x16, dc, h,  v, c, _sse2, _sse2 )
576 INTRA_MBCMP( sad, 16x16,  v, h, dc,  , _sse2, _sse2 )
577 INTRA_MBCMP( sad,  8x8,  dc, h,  v, c, _ssse3, _sse2 )
578 INTRA_MBCMP( sad,  8x16, dc, h,  v, c, _ssse3, _sse2 )
579 INTRA_MBCMP(satd,  8x16, dc, h,  v, c, _ssse3, _sse2 )
580 INTRA_MBCMP( sad, 16x16,  v, h, dc,  , _ssse3, _sse2 )
581 INTRA_MBCMP(satd,  8x16, dc, h,  v, c, _sse4, _sse2 )
582 INTRA_MBCMP(satd,  8x16, dc, h,  v, c, _avx, _sse2 )
583 #else
584 #define x264_predict_8x16c_v_mmx2 x264_predict_8x16c_v_mmx
585 INTRA_MBCMP( sad,  8x16, dc, h,  v, c, _mmx2, _mmx2 )
586 INTRA_MBCMP(satd,  8x16, dc, h,  v, c, _mmx2, _mmx2 )
587 INTRA_MBCMP( sad,  8x16, dc, h,  v, c, _sse2, _mmx2 )
588 INTRA_MBCMP(satd,  8x16, dc, h,  v, c, _sse2, _mmx2 )
589 INTRA_MBCMP(satd,  8x16, dc, h,  v, c, _ssse3, _mmx2 )
590 INTRA_MBCMP(satd,  8x16, dc, h,  v, c, _sse4, _mmx2 )
591 INTRA_MBCMP(satd,  8x16, dc, h,  v, c, _avx, _mmx2 )
592 INTRA_MBCMP(satd,  8x16, dc, h,  v, c, _xop, _mmx2 )
593 #endif
594 #endif
595 #if !HIGH_BIT_DEPTH && HAVE_ARMV6
596 INTRA_MBCMP( sad,  4x4,   v, h, dc,  , _neon, _c )
597 INTRA_MBCMP(satd,  4x4,   v, h, dc,  , _neon, _c )
598 INTRA_MBCMP( sad,  8x8,  dc, h,  v, c, _neon, _neon )
599 INTRA_MBCMP(satd,  8x8,  dc, h,  v, c, _neon, _neon )
600 INTRA_MBCMP( sad,  8x16, dc, h,  v, c, _neon, _c )
601 INTRA_MBCMP(satd,  8x16, dc, h,  v, c, _neon, _c )
602 INTRA_MBCMP( sad, 16x16,  v, h, dc,  , _neon, _neon )
603 INTRA_MBCMP(satd, 16x16,  v, h, dc,  , _neon, _neon )
604 #endif
605
606 // No C implementation of intra_satd_x9. See checkasm for its behavior,
607 // or see x264_mb_analyse_intra for the entirely different algorithm we
608 // use when lacking an asm implementation of it.
609
610
611
612 /****************************************************************************
613  * structural similarity metric
614  ****************************************************************************/
615 static void ssim_4x4x2_core( const pixel *pix1, intptr_t stride1,
616                              const pixel *pix2, intptr_t stride2,
617                              int sums[2][4] )
618 {
619     for( int z = 0; z < 2; z++ )
620     {
621         uint32_t s1 = 0, s2 = 0, ss = 0, s12 = 0;
622         for( int y = 0; y < 4; y++ )
623             for( int x = 0; x < 4; x++ )
624             {
625                 int a = pix1[x+y*stride1];
626                 int b = pix2[x+y*stride2];
627                 s1  += a;
628                 s2  += b;
629                 ss  += a*a;
630                 ss  += b*b;
631                 s12 += a*b;
632             }
633         sums[z][0] = s1;
634         sums[z][1] = s2;
635         sums[z][2] = ss;
636         sums[z][3] = s12;
637         pix1 += 4;
638         pix2 += 4;
639     }
640 }
641
642 static float ssim_end1( int s1, int s2, int ss, int s12 )
643 {
644 /* Maximum value for 10-bit is: ss*64 = (2^10-1)^2*16*4*64 = 4286582784, which will overflow in some cases.
645  * s1*s1, s2*s2, and s1*s2 also obtain this value for edge cases: ((2^10-1)*16*4)^2 = 4286582784.
646  * Maximum value for 9-bit is: ss*64 = (2^9-1)^2*16*4*64 = 1069551616, which will not overflow. */
647 #if BIT_DEPTH > 9
648 #define type float
649     static const float ssim_c1 = .01*.01*PIXEL_MAX*PIXEL_MAX*64;
650     static const float ssim_c2 = .03*.03*PIXEL_MAX*PIXEL_MAX*64*63;
651 #else
652 #define type int
653     static const int ssim_c1 = (int)(.01*.01*PIXEL_MAX*PIXEL_MAX*64 + .5);
654     static const int ssim_c2 = (int)(.03*.03*PIXEL_MAX*PIXEL_MAX*64*63 + .5);
655 #endif
656     type fs1 = s1;
657     type fs2 = s2;
658     type fss = ss;
659     type fs12 = s12;
660     type vars = fss*64 - fs1*fs1 - fs2*fs2;
661     type covar = fs12*64 - fs1*fs2;
662     return (float)(2*fs1*fs2 + ssim_c1) * (float)(2*covar + ssim_c2)
663          / ((float)(fs1*fs1 + fs2*fs2 + ssim_c1) * (float)(vars + ssim_c2));
664 #undef type
665 }
666
667 static float ssim_end4( int sum0[5][4], int sum1[5][4], int width )
668 {
669     float ssim = 0.0;
670     for( int i = 0; i < width; i++ )
671         ssim += ssim_end1( sum0[i][0] + sum0[i+1][0] + sum1[i][0] + sum1[i+1][0],
672                            sum0[i][1] + sum0[i+1][1] + sum1[i][1] + sum1[i+1][1],
673                            sum0[i][2] + sum0[i+1][2] + sum1[i][2] + sum1[i+1][2],
674                            sum0[i][3] + sum0[i+1][3] + sum1[i][3] + sum1[i+1][3] );
675     return ssim;
676 }
677
678 float x264_pixel_ssim_wxh( x264_pixel_function_t *pf,
679                            pixel *pix1, intptr_t stride1,
680                            pixel *pix2, intptr_t stride2,
681                            int width, int height, void *buf, int *cnt )
682 {
683     int z = 0;
684     float ssim = 0.0;
685     int (*sum0)[4] = buf;
686     int (*sum1)[4] = sum0 + (width >> 2) + 3;
687     width >>= 2;
688     height >>= 2;
689     for( int y = 1; y < height; y++ )
690     {
691         for( ; z <= y; z++ )
692         {
693             XCHG( void*, sum0, sum1 );
694             for( int x = 0; x < width; x+=2 )
695                 pf->ssim_4x4x2_core( &pix1[4*(x+z*stride1)], stride1, &pix2[4*(x+z*stride2)], stride2, &sum0[x] );
696         }
697         for( int x = 0; x < width-1; x += 4 )
698             ssim += pf->ssim_end4( sum0+x, sum1+x, X264_MIN(4,width-x-1) );
699     }
700     *cnt = (height-1) * (width-1);
701     return ssim;
702 }
703
704 static int pixel_vsad( pixel *src, intptr_t stride, int height )
705 {
706     int score = 0;
707     for( int i = 1; i < height; i++, src += stride )
708         for( int j = 0; j < 16; j++ )
709             score += abs(src[j] - src[j+stride]);
710     return score;
711 }
712
713 int x264_field_vsad( x264_t *h, int mb_x, int mb_y )
714 {
715     int score_field, score_frame;
716     int stride = h->fenc->i_stride[0];
717     int mb_stride = h->mb.i_mb_stride;
718     pixel *fenc = h->fenc->plane[0] + 16 * (mb_x + mb_y * stride);
719     int mb_xy = mb_x + mb_y*mb_stride;
720
721     /* We don't want to analyze pixels outside the frame, as it gives inaccurate results. */
722     int mbpair_height = X264_MIN( h->param.i_height - mb_y * 16, 32 );
723     score_frame  = h->pixf.vsad( fenc,          stride, mbpair_height );
724     score_field  = h->pixf.vsad( fenc,        stride*2, mbpair_height >> 1 );
725     score_field += h->pixf.vsad( fenc+stride, stride*2, mbpair_height >> 1 );
726
727     if( mb_x > 0 )
728         score_field += 512 - h->mb.field[mb_xy        -1]*1024;
729     if( mb_y > 0 )
730         score_field += 512 - h->mb.field[mb_xy-mb_stride]*1024;
731
732     return (score_field < score_frame);
733 }
734
735 static int pixel_asd8( pixel *pix1, intptr_t stride1, pixel *pix2, intptr_t stride2, int height )
736 {
737     int sum = 0;
738     for( int y = 0; y < height; y++, pix1 += stride1, pix2 += stride2 )
739         for( int x = 0; x < 8; x++ )
740             sum += pix1[x] - pix2[x];
741     return abs( sum );
742 }
743
744 /****************************************************************************
745  * successive elimination
746  ****************************************************************************/
747 static int x264_pixel_ads4( int enc_dc[4], uint16_t *sums, int delta,
748                             uint16_t *cost_mvx, int16_t *mvs, int width, int thresh )
749 {
750     int nmv = 0;
751     for( int i = 0; i < width; i++, sums++ )
752     {
753         int ads = abs( enc_dc[0] - sums[0] )
754                 + abs( enc_dc[1] - sums[8] )
755                 + abs( enc_dc[2] - sums[delta] )
756                 + abs( enc_dc[3] - sums[delta+8] )
757                 + cost_mvx[i];
758         if( ads < thresh )
759             mvs[nmv++] = i;
760     }
761     return nmv;
762 }
763
764 static int x264_pixel_ads2( int enc_dc[2], uint16_t *sums, int delta,
765                             uint16_t *cost_mvx, int16_t *mvs, int width, int thresh )
766 {
767     int nmv = 0;
768     for( int i = 0; i < width; i++, sums++ )
769     {
770         int ads = abs( enc_dc[0] - sums[0] )
771                 + abs( enc_dc[1] - sums[delta] )
772                 + cost_mvx[i];
773         if( ads < thresh )
774             mvs[nmv++] = i;
775     }
776     return nmv;
777 }
778
779 static int x264_pixel_ads1( int enc_dc[1], uint16_t *sums, int delta,
780                             uint16_t *cost_mvx, int16_t *mvs, int width, int thresh )
781 {
782     int nmv = 0;
783     for( int i = 0; i<width; i++, sums++ )
784     {
785         int ads = abs( enc_dc[0] - sums[0] )
786                 + cost_mvx[i];
787         if( ads < thresh )
788             mvs[nmv++] = i;
789     }
790     return nmv;
791 }
792
793
794 /****************************************************************************
795  * x264_pixel_init:
796  ****************************************************************************/
797 void x264_pixel_init( int cpu, x264_pixel_function_t *pixf )
798 {
799     memset( pixf, 0, sizeof(*pixf) );
800
801 #define INIT2_NAME( name1, name2, cpu ) \
802     pixf->name1[PIXEL_16x16] = x264_pixel_##name2##_16x16##cpu;\
803     pixf->name1[PIXEL_16x8]  = x264_pixel_##name2##_16x8##cpu;
804 #define INIT4_NAME( name1, name2, cpu ) \
805     INIT2_NAME( name1, name2, cpu ) \
806     pixf->name1[PIXEL_8x16]  = x264_pixel_##name2##_8x16##cpu;\
807     pixf->name1[PIXEL_8x8]   = x264_pixel_##name2##_8x8##cpu;
808 #define INIT5_NAME( name1, name2, cpu ) \
809     INIT4_NAME( name1, name2, cpu ) \
810     pixf->name1[PIXEL_8x4]   = x264_pixel_##name2##_8x4##cpu;
811 #define INIT6_NAME( name1, name2, cpu ) \
812     INIT5_NAME( name1, name2, cpu ) \
813     pixf->name1[PIXEL_4x8]   = x264_pixel_##name2##_4x8##cpu;
814 #define INIT7_NAME( name1, name2, cpu ) \
815     INIT6_NAME( name1, name2, cpu ) \
816     pixf->name1[PIXEL_4x4]   = x264_pixel_##name2##_4x4##cpu;
817 #define INIT8_NAME( name1, name2, cpu ) \
818     INIT7_NAME( name1, name2, cpu ) \
819     pixf->name1[PIXEL_4x16]  = x264_pixel_##name2##_4x16##cpu;
820 #define INIT2( name, cpu ) INIT2_NAME( name, name, cpu )
821 #define INIT4( name, cpu ) INIT4_NAME( name, name, cpu )
822 #define INIT5( name, cpu ) INIT5_NAME( name, name, cpu )
823 #define INIT6( name, cpu ) INIT6_NAME( name, name, cpu )
824 #define INIT7( name, cpu ) INIT7_NAME( name, name, cpu )
825 #define INIT8( name, cpu ) INIT8_NAME( name, name, cpu )
826
827 #define INIT_ADS( cpu ) \
828     pixf->ads[PIXEL_16x16] = x264_pixel_ads4##cpu;\
829     pixf->ads[PIXEL_16x8] = x264_pixel_ads2##cpu;\
830     pixf->ads[PIXEL_8x8] = x264_pixel_ads1##cpu;
831
832     INIT8( sad, );
833     INIT8_NAME( sad_aligned, sad, );
834     INIT7( sad_x3, );
835     INIT7( sad_x4, );
836     INIT8( ssd, );
837     INIT8( satd, );
838     INIT7( satd_x3, );
839     INIT7( satd_x4, );
840     INIT4( hadamard_ac, );
841     INIT_ADS( );
842
843     pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16;
844     pixf->sa8d[PIXEL_8x8]   = x264_pixel_sa8d_8x8;
845     pixf->var[PIXEL_16x16] = x264_pixel_var_16x16;
846     pixf->var[PIXEL_8x16]  = x264_pixel_var_8x16;
847     pixf->var[PIXEL_8x8]   = x264_pixel_var_8x8;
848     pixf->var2[PIXEL_8x16]  = x264_pixel_var2_8x16;
849     pixf->var2[PIXEL_8x8]   = x264_pixel_var2_8x8;
850
851     pixf->ssd_nv12_core = pixel_ssd_nv12_core;
852     pixf->ssim_4x4x2_core = ssim_4x4x2_core;
853     pixf->ssim_end4 = ssim_end4;
854     pixf->vsad = pixel_vsad;
855     pixf->asd8 = pixel_asd8;
856
857     pixf->intra_sad_x3_4x4    = x264_intra_sad_x3_4x4;
858     pixf->intra_satd_x3_4x4   = x264_intra_satd_x3_4x4;
859     pixf->intra_sad_x3_8x8    = x264_intra_sad_x3_8x8;
860     pixf->intra_sa8d_x3_8x8   = x264_intra_sa8d_x3_8x8;
861     pixf->intra_sad_x3_8x8c   = x264_intra_sad_x3_8x8c;
862     pixf->intra_satd_x3_8x8c  = x264_intra_satd_x3_8x8c;
863     pixf->intra_sad_x3_8x16c  = x264_intra_sad_x3_8x16c;
864     pixf->intra_satd_x3_8x16c = x264_intra_satd_x3_8x16c;
865     pixf->intra_sad_x3_16x16  = x264_intra_sad_x3_16x16;
866     pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16;
867
868 #if HIGH_BIT_DEPTH
869 #if HAVE_MMX
870     if( cpu&X264_CPU_MMX2 )
871     {
872         INIT7( sad, _mmx2 );
873         INIT7_NAME( sad_aligned, sad, _mmx2 );
874         INIT7( sad_x3, _mmx2 );
875         INIT7( sad_x4, _mmx2 );
876         INIT8( satd, _mmx2 );
877         INIT7( satd_x3, _mmx2 );
878         INIT7( satd_x4, _mmx2 );
879         INIT4( hadamard_ac, _mmx2 );
880         INIT8( ssd, _mmx2 );
881         INIT_ADS( _mmx2 );
882
883         pixf->ssd_nv12_core = x264_pixel_ssd_nv12_core_mmx2;
884         pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_mmx2;
885         pixf->var[PIXEL_8x8]   = x264_pixel_var_8x8_mmx2;
886 #if ARCH_X86
887         pixf->var2[PIXEL_8x8]  = x264_pixel_var2_8x8_mmx2;
888         pixf->var2[PIXEL_8x16] = x264_pixel_var2_8x16_mmx2;
889 #endif
890
891         pixf->intra_sad_x3_4x4    = x264_intra_sad_x3_4x4_mmx2;
892         pixf->intra_satd_x3_4x4   = x264_intra_satd_x3_4x4_mmx2;
893         pixf->intra_sad_x3_8x8    = x264_intra_sad_x3_8x8_mmx2;
894         pixf->intra_sad_x3_8x8c   = x264_intra_sad_x3_8x8c_mmx2;
895         pixf->intra_satd_x3_8x8c  = x264_intra_satd_x3_8x8c_mmx2;
896         pixf->intra_sad_x3_8x16c  = x264_intra_sad_x3_8x16c_mmx2;
897         pixf->intra_satd_x3_8x16c = x264_intra_satd_x3_8x16c_mmx2;
898         pixf->intra_sad_x3_16x16  = x264_intra_sad_x3_16x16_mmx2;
899         pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16_mmx2;
900     }
901     if( cpu&X264_CPU_SSE2 )
902     {
903         INIT4_NAME( sad_aligned, sad, _sse2_aligned );
904         INIT5( ssd, _sse2 );
905         INIT6( satd, _sse2 );
906         pixf->satd[PIXEL_4x16] = x264_pixel_satd_4x16_sse2;
907
908         pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_sse2;
909         pixf->sa8d[PIXEL_8x8]   = x264_pixel_sa8d_8x8_sse2;
910 #if ARCH_X86_64
911         pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_sse2;
912         pixf->sa8d_satd[PIXEL_16x16] = x264_pixel_sa8d_satd_16x16_sse2;
913 #endif
914         pixf->intra_sad_x3_4x4  = x264_intra_sad_x3_4x4_sse2;
915         pixf->ssd_nv12_core = x264_pixel_ssd_nv12_core_sse2;
916         pixf->ssim_4x4x2_core  = x264_pixel_ssim_4x4x2_core_sse2;
917         pixf->ssim_end4        = x264_pixel_ssim_end4_sse2;
918         pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_sse2;
919         pixf->var[PIXEL_8x8]   = x264_pixel_var_8x8_sse2;
920         pixf->var2[PIXEL_8x8]  = x264_pixel_var2_8x8_sse2;
921         pixf->var2[PIXEL_8x16] = x264_pixel_var2_8x16_sse2;
922         pixf->intra_sad_x3_8x8 = x264_intra_sad_x3_8x8_sse2;
923     }
924     if( (cpu&X264_CPU_SSE2) && !(cpu&X264_CPU_SSE2_IS_SLOW) )
925     {
926         INIT5( sad, _sse2 );
927         INIT2( sad_x3, _sse2 );
928         INIT2( sad_x4, _sse2 );
929         INIT_ADS( _sse2 );
930
931         if( !(cpu&X264_CPU_STACK_MOD4) )
932         {
933             INIT4( hadamard_ac, _sse2 );
934         }
935         pixf->vsad = x264_pixel_vsad_sse2;
936         pixf->asd8 = x264_pixel_asd8_sse2;
937         pixf->intra_sad_x3_8x8    = x264_intra_sad_x3_8x8_sse2;
938         pixf->intra_sad_x3_8x8c   = x264_intra_sad_x3_8x8c_sse2;
939         pixf->intra_sad_x3_8x16c  = x264_intra_sad_x3_8x16c_sse2;
940         pixf->intra_satd_x3_8x16c = x264_intra_satd_x3_8x16c_sse2;
941         pixf->intra_sad_x3_16x16  = x264_intra_sad_x3_16x16_sse2;
942     }
943     if( cpu&X264_CPU_SSE2_IS_FAST )
944     {
945         pixf->sad[PIXEL_8x16] = x264_pixel_sad_8x16_sse2;
946         pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_sse2;
947         pixf->sad_x3[PIXEL_8x8]  = x264_pixel_sad_x3_8x8_sse2;
948         pixf->sad_x3[PIXEL_8x4]  = x264_pixel_sad_x3_8x4_sse2;
949         pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_sse2;
950         pixf->sad_x4[PIXEL_8x8]  = x264_pixel_sad_x4_8x8_sse2;
951         pixf->sad_x4[PIXEL_8x4]  = x264_pixel_sad_x4_8x4_sse2;
952     }
953     if( cpu&X264_CPU_SSSE3 )
954     {
955         INIT4_NAME( sad_aligned, sad, _ssse3_aligned );
956         pixf->sad_aligned[PIXEL_4x4] = x264_pixel_sad_4x4_ssse3;
957         pixf->sad_aligned[PIXEL_4x8] = x264_pixel_sad_4x8_ssse3;
958         INIT7( sad, _ssse3 );
959         INIT7( sad_x3, _ssse3 );
960         INIT7( sad_x4, _ssse3 );
961         INIT_ADS( _ssse3 );
962         INIT6( satd, _ssse3 );
963         pixf->satd[PIXEL_4x16] = x264_pixel_satd_4x16_ssse3;
964
965         if( !(cpu&X264_CPU_STACK_MOD4) )
966         {
967             INIT4( hadamard_ac, _ssse3 );
968         }
969         pixf->vsad = x264_pixel_vsad_ssse3;
970         pixf->asd8 = x264_pixel_asd8_ssse3;
971         pixf->intra_sad_x3_4x4  = x264_intra_sad_x3_4x4_ssse3;
972         pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_ssse3;
973         pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8_ssse3;
974 #if ARCH_X86_64
975         pixf->sa8d_satd[PIXEL_16x16] = x264_pixel_sa8d_satd_16x16_ssse3;
976 #endif
977         pixf->intra_sad_x3_4x4    = x264_intra_sad_x3_4x4_ssse3;
978         pixf->intra_sad_x3_8x8    = x264_intra_sad_x3_8x8_ssse3;
979         pixf->intra_sad_x3_8x8c   = x264_intra_sad_x3_8x8c_ssse3;
980         pixf->intra_sad_x3_8x16c  = x264_intra_sad_x3_8x16c_ssse3;
981         pixf->intra_satd_x3_8x16c = x264_intra_satd_x3_8x16c_ssse3;
982         pixf->intra_sad_x3_16x16  = x264_intra_sad_x3_16x16_ssse3;
983     }
984     if( cpu&X264_CPU_SSE4 )
985     {
986         INIT6( satd, _sse4 );
987         pixf->satd[PIXEL_4x16] = x264_pixel_satd_4x16_sse4;
988         if( !(cpu&X264_CPU_STACK_MOD4) )
989         {
990             INIT4( hadamard_ac, _sse4 );
991         }
992         pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_sse4;
993         pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8_sse4;
994 #if ARCH_X86_64
995         pixf->sa8d_satd[PIXEL_16x16] = x264_pixel_sa8d_satd_16x16_sse4;
996 #endif
997         pixf->intra_satd_x3_8x16c = x264_intra_satd_x3_8x16c_sse4;
998     }
999     if( cpu&X264_CPU_AVX )
1000     {
1001         INIT5_NAME( sad_aligned, sad, _ssse3 ); /* AVX-capable CPUs doesn't benefit from an aligned version */
1002         INIT_ADS( _avx );
1003         INIT6( satd, _avx );
1004         pixf->satd[PIXEL_4x16] = x264_pixel_satd_4x16_avx;
1005         if( !(cpu&X264_CPU_STACK_MOD4) )
1006         {
1007             INIT4( hadamard_ac, _avx );
1008         }
1009         pixf->intra_sad_x3_4x4    = x264_intra_sad_x3_4x4_avx;
1010         pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_avx;
1011         pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8_avx;
1012         pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_avx;
1013         pixf->var[PIXEL_8x8]   = x264_pixel_var_8x8_avx;
1014         pixf->ssd_nv12_core    = x264_pixel_ssd_nv12_core_avx;
1015         pixf->ssim_4x4x2_core  = x264_pixel_ssim_4x4x2_core_avx;
1016         pixf->ssim_end4        = x264_pixel_ssim_end4_avx;
1017 #if ARCH_X86_64
1018         pixf->sa8d_satd[PIXEL_16x16] = x264_pixel_sa8d_satd_16x16_avx;
1019 #endif
1020         pixf->intra_satd_x3_8x16c = x264_intra_satd_x3_8x16c_avx;
1021     }
1022     if( cpu&X264_CPU_XOP )
1023     {
1024         pixf->vsad = x264_pixel_vsad_xop;
1025         pixf->asd8 = x264_pixel_asd8_xop;
1026     }
1027     if( cpu&X264_CPU_AVX2 )
1028     {
1029         INIT2( ssd, _avx2 );
1030         INIT2( sad, _avx2 );
1031         INIT2_NAME( sad_aligned, sad, _avx2 );
1032         INIT2( sad_x3, _avx2 );
1033         INIT2( sad_x4, _avx2 );
1034         pixf->vsad = x264_pixel_vsad_avx2;
1035         pixf->ssd_nv12_core = x264_pixel_ssd_nv12_core_avx2;
1036         pixf->intra_sad_x3_8x8 = x264_intra_sad_x3_8x8_avx2;
1037     }
1038 #endif // HAVE_MMX
1039 #else // !HIGH_BIT_DEPTH
1040 #if HAVE_MMX
1041     if( cpu&X264_CPU_MMX )
1042     {
1043         INIT8( ssd, _mmx );
1044     }
1045
1046     if( cpu&X264_CPU_MMX2 )
1047     {
1048         INIT8( sad, _mmx2 );
1049         INIT8_NAME( sad_aligned, sad, _mmx2 );
1050         INIT7( sad_x3, _mmx2 );
1051         INIT7( sad_x4, _mmx2 );
1052         INIT8( satd, _mmx2 );
1053         INIT7( satd_x3, _mmx2 );
1054         INIT7( satd_x4, _mmx2 );
1055         INIT4( hadamard_ac, _mmx2 );
1056         INIT_ADS( _mmx2 );
1057         pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_mmx2;
1058         pixf->var[PIXEL_8x16]  = x264_pixel_var_8x16_mmx2;
1059         pixf->var[PIXEL_8x8]   = x264_pixel_var_8x8_mmx2;
1060         pixf->ssd_nv12_core    = x264_pixel_ssd_nv12_core_mmx2;
1061 #if ARCH_X86
1062         pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_mmx2;
1063         pixf->sa8d[PIXEL_8x8]   = x264_pixel_sa8d_8x8_mmx2;
1064         pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_mmx2;
1065         pixf->ssim_4x4x2_core = x264_pixel_ssim_4x4x2_core_mmx2;
1066         pixf->var2[PIXEL_8x8] = x264_pixel_var2_8x8_mmx2;
1067         pixf->var2[PIXEL_8x16] = x264_pixel_var2_8x16_mmx2;
1068         pixf->vsad = x264_pixel_vsad_mmx2;
1069
1070         if( cpu&X264_CPU_CACHELINE_32 )
1071         {
1072             INIT5( sad, _cache32_mmx2 );
1073             INIT4( sad_x3, _cache32_mmx2 );
1074             INIT4( sad_x4, _cache32_mmx2 );
1075         }
1076         else if( cpu&X264_CPU_CACHELINE_64 && !(cpu&X264_CPU_SLOW_ATOM) )
1077         {
1078             INIT5( sad, _cache64_mmx2 );
1079             INIT4( sad_x3, _cache64_mmx2 );
1080             INIT4( sad_x4, _cache64_mmx2 );
1081         }
1082 #else
1083         if( cpu&X264_CPU_CACHELINE_64 && !(cpu&X264_CPU_SLOW_ATOM) )
1084         {
1085             pixf->sad[PIXEL_8x16] = x264_pixel_sad_8x16_cache64_mmx2;
1086             pixf->sad[PIXEL_8x8]  = x264_pixel_sad_8x8_cache64_mmx2;
1087             pixf->sad[PIXEL_8x4]  = x264_pixel_sad_8x4_cache64_mmx2;
1088             pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_cache64_mmx2;
1089             pixf->sad_x3[PIXEL_8x8]  = x264_pixel_sad_x3_8x8_cache64_mmx2;
1090             pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_cache64_mmx2;
1091             pixf->sad_x4[PIXEL_8x8]  = x264_pixel_sad_x4_8x8_cache64_mmx2;
1092         }
1093 #endif
1094         pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16_mmx2;
1095         pixf->intra_sad_x3_16x16  = x264_intra_sad_x3_16x16_mmx2;
1096         pixf->intra_satd_x3_8x16c = x264_intra_satd_x3_8x16c_mmx2;
1097         pixf->intra_sad_x3_8x16c  = x264_intra_sad_x3_8x16c_mmx2;
1098         pixf->intra_satd_x3_8x8c  = x264_intra_satd_x3_8x8c_mmx2;
1099         pixf->intra_sad_x3_8x8c   = x264_intra_sad_x3_8x8c_mmx2;
1100         pixf->intra_sad_x3_8x8    = x264_intra_sad_x3_8x8_mmx2;
1101         pixf->intra_satd_x3_4x4   = x264_intra_satd_x3_4x4_mmx2;
1102         pixf->intra_sad_x3_4x4    = x264_intra_sad_x3_4x4_mmx2;
1103     }
1104
1105     if( cpu&X264_CPU_SSE2 )
1106     {
1107         INIT5( ssd, _sse2slow );
1108         INIT2_NAME( sad_aligned, sad, _sse2_aligned );
1109         pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_sse2;
1110         pixf->ssd_nv12_core    = x264_pixel_ssd_nv12_core_sse2;
1111         pixf->ssim_4x4x2_core  = x264_pixel_ssim_4x4x2_core_sse2;
1112         pixf->ssim_end4        = x264_pixel_ssim_end4_sse2;
1113         pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_sse2;
1114         pixf->sa8d[PIXEL_8x8]   = x264_pixel_sa8d_8x8_sse2;
1115 #if ARCH_X86_64
1116         pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_sse2;
1117         pixf->sa8d_satd[PIXEL_16x16] = x264_pixel_sa8d_satd_16x16_sse2;
1118 #endif
1119         pixf->var2[PIXEL_8x8]   = x264_pixel_var2_8x8_sse2;
1120         pixf->var2[PIXEL_8x16]  = x264_pixel_var2_8x16_sse2;
1121         pixf->vsad = x264_pixel_vsad_sse2;
1122         pixf->asd8 = x264_pixel_asd8_sse2;
1123     }
1124
1125     if( (cpu&X264_CPU_SSE2) && !(cpu&X264_CPU_SSE2_IS_SLOW) )
1126     {
1127         INIT2( sad, _sse2 );
1128         INIT2( sad_x3, _sse2 );
1129         INIT2( sad_x4, _sse2 );
1130         INIT6( satd, _sse2 );
1131         pixf->satd[PIXEL_4x16]   = x264_pixel_satd_4x16_sse2;
1132         INIT6( satd_x3, _sse2 );
1133         INIT6( satd_x4, _sse2 );
1134         INIT4( hadamard_ac, _sse2 );
1135         INIT_ADS( _sse2 );
1136         pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_sse2;
1137         pixf->var[PIXEL_8x16] = x264_pixel_var_8x16_sse2;
1138         pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16_sse2;
1139         pixf->intra_satd_x3_8x16c = x264_intra_satd_x3_8x16c_sse2;
1140         pixf->intra_sad_x3_8x16c  = x264_intra_sad_x3_8x16c_sse2;
1141         if( cpu&X264_CPU_CACHELINE_64 )
1142         {
1143             INIT2( ssd, _sse2); /* faster for width 16 on p4 */
1144 #if ARCH_X86
1145             INIT2( sad, _cache64_sse2 );
1146             INIT2( sad_x3, _cache64_sse2 );
1147             INIT2( sad_x4, _cache64_sse2 );
1148 #endif
1149            if( cpu&X264_CPU_SSE2_IS_FAST )
1150            {
1151                pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_cache64_sse2;
1152                pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_cache64_sse2;
1153            }
1154         }
1155     }
1156
1157     if( cpu&X264_CPU_SSE2_IS_FAST && !(cpu&X264_CPU_CACHELINE_64) )
1158     {
1159         pixf->sad_aligned[PIXEL_8x16] = x264_pixel_sad_8x16_sse2;
1160         pixf->sad[PIXEL_8x16] = x264_pixel_sad_8x16_sse2;
1161         pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_sse2;
1162         pixf->sad_x3[PIXEL_8x8] = x264_pixel_sad_x3_8x8_sse2;
1163         pixf->sad_x3[PIXEL_8x4] = x264_pixel_sad_x3_8x4_sse2;
1164         pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_sse2;
1165         pixf->sad_x4[PIXEL_8x8] = x264_pixel_sad_x4_8x8_sse2;
1166         pixf->sad_x4[PIXEL_8x4] = x264_pixel_sad_x4_8x4_sse2;
1167     }
1168
1169     if( (cpu&X264_CPU_SSE3) && (cpu&X264_CPU_CACHELINE_64) )
1170     {
1171         INIT2( sad, _sse3 );
1172         INIT2( sad_x3, _sse3 );
1173         INIT2( sad_x4, _sse3 );
1174     }
1175
1176     if( cpu&X264_CPU_SSSE3 )
1177     {
1178         INIT4( hadamard_ac, _ssse3 );
1179         if( !(cpu&X264_CPU_STACK_MOD4) )
1180         {
1181             pixf->intra_sad_x9_4x4  = x264_intra_sad_x9_4x4_ssse3;
1182             pixf->intra_satd_x9_4x4 = x264_intra_satd_x9_4x4_ssse3;
1183             pixf->intra_sad_x9_8x8  = x264_intra_sad_x9_8x8_ssse3;
1184 #if ARCH_X86_64
1185             pixf->intra_sa8d_x9_8x8 = x264_intra_sa8d_x9_8x8_ssse3;
1186 #endif
1187         }
1188         INIT_ADS( _ssse3 );
1189         if( cpu&X264_CPU_SLOW_ATOM )
1190         {
1191             pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_ssse3_atom;
1192             pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8_ssse3_atom;
1193             INIT6( satd, _ssse3_atom );
1194             pixf->satd[PIXEL_4x16]  = x264_pixel_satd_4x16_ssse3_atom;
1195             INIT6( satd_x3, _ssse3_atom );
1196             INIT6( satd_x4, _ssse3_atom );
1197             INIT4( hadamard_ac, _ssse3_atom );
1198 #if ARCH_X86_64
1199             pixf->sa8d_satd[PIXEL_16x16] = x264_pixel_sa8d_satd_16x16_ssse3_atom;
1200 #endif
1201         }
1202         else
1203         {
1204             INIT8( ssd, _ssse3 );
1205             pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_ssse3;
1206             pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8_ssse3;
1207             INIT8( satd, _ssse3 );
1208             INIT7( satd_x3, _ssse3 );
1209             INIT7( satd_x4, _ssse3 );
1210 #if ARCH_X86_64
1211             pixf->sa8d_satd[PIXEL_16x16] = x264_pixel_sa8d_satd_16x16_ssse3;
1212 #endif
1213         }
1214         pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16_ssse3;
1215         if( !(cpu&X264_CPU_SLOW_PSHUFB) )
1216             pixf->intra_sad_x3_16x16  = x264_intra_sad_x3_16x16_ssse3;
1217         pixf->intra_satd_x3_8x16c = x264_intra_satd_x3_8x16c_ssse3;
1218         pixf->intra_satd_x3_8x8c  = x264_intra_satd_x3_8x8c_ssse3;
1219         pixf->intra_sad_x3_8x8c   = x264_intra_sad_x3_8x8c_ssse3;
1220         pixf->var2[PIXEL_8x8] = x264_pixel_var2_8x8_ssse3;
1221         pixf->var2[PIXEL_8x16] = x264_pixel_var2_8x16_ssse3;
1222         pixf->asd8 = x264_pixel_asd8_ssse3;
1223         if( cpu&X264_CPU_CACHELINE_64 )
1224         {
1225             INIT2( sad, _cache64_ssse3 );
1226             INIT2( sad_x3, _cache64_ssse3 );
1227             INIT2( sad_x4, _cache64_ssse3 );
1228         }
1229         else
1230         {
1231             INIT2( sad_x3, _ssse3 );
1232             INIT5( sad_x4, _ssse3 );
1233         }
1234         if( (cpu&X264_CPU_SLOW_ATOM) || (cpu&X264_CPU_SLOW_SHUFFLE) )
1235         {
1236             INIT5( ssd, _sse2 ); /* on conroe, sse2 is faster for width8/16 */
1237         }
1238     }
1239
1240     if( cpu&X264_CPU_SSE4 )
1241     {
1242         INIT8( satd, _sse4 );
1243         INIT7( satd_x3, _sse4 );
1244         INIT7( satd_x4, _sse4 );
1245         INIT4( hadamard_ac, _sse4 );
1246         if( !(cpu&X264_CPU_STACK_MOD4) )
1247         {
1248             pixf->intra_sad_x9_4x4  = x264_intra_sad_x9_4x4_sse4;
1249             pixf->intra_satd_x9_4x4 = x264_intra_satd_x9_4x4_sse4;
1250             pixf->intra_sad_x9_8x8  = x264_intra_sad_x9_8x8_sse4;
1251 #if ARCH_X86_64
1252             pixf->intra_sa8d_x9_8x8 = x264_intra_sa8d_x9_8x8_sse4;
1253 #endif
1254         }
1255         pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_sse4;
1256         pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8_sse4;
1257         pixf->intra_satd_x3_8x16c = x264_intra_satd_x3_8x16c_sse4;
1258 #if ARCH_X86_64
1259         pixf->sa8d_satd[PIXEL_16x16] = x264_pixel_sa8d_satd_16x16_sse4;
1260 #endif
1261     }
1262
1263     if( cpu&X264_CPU_AVX )
1264     {
1265         INIT2_NAME( sad_aligned, sad, _sse2 ); /* AVX-capable CPUs doesn't benefit from an aligned version */
1266         INIT2( sad_x3, _avx );
1267         INIT2( sad_x4, _avx );
1268         INIT8( satd, _avx );
1269         INIT7( satd_x3, _avx );
1270         INIT7( satd_x4, _avx );
1271         INIT_ADS( _avx );
1272         INIT4( hadamard_ac, _avx );
1273         if( !(cpu&X264_CPU_STACK_MOD4) )
1274         {
1275             pixf->intra_sad_x9_4x4  = x264_intra_sad_x9_4x4_avx;
1276             pixf->intra_satd_x9_4x4 = x264_intra_satd_x9_4x4_avx;
1277             pixf->intra_sad_x9_8x8  = x264_intra_sad_x9_8x8_avx;
1278 #if ARCH_X86_64
1279             pixf->intra_sa8d_x9_8x8 = x264_intra_sa8d_x9_8x8_avx;
1280 #endif
1281         }
1282         INIT5( ssd, _avx );
1283         pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_avx;
1284         pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8_avx;
1285         pixf->intra_satd_x3_8x16c = x264_intra_satd_x3_8x16c_avx;
1286         pixf->ssd_nv12_core    = x264_pixel_ssd_nv12_core_avx;
1287         pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_avx;
1288         pixf->var[PIXEL_8x16]  = x264_pixel_var_8x16_avx;
1289         pixf->var[PIXEL_8x8]   = x264_pixel_var_8x8_avx;
1290         pixf->ssim_4x4x2_core  = x264_pixel_ssim_4x4x2_core_avx;
1291         pixf->ssim_end4        = x264_pixel_ssim_end4_avx;
1292 #if ARCH_X86_64
1293         pixf->sa8d_satd[PIXEL_16x16] = x264_pixel_sa8d_satd_16x16_avx;
1294 #endif
1295     }
1296
1297     if( cpu&X264_CPU_XOP )
1298     {
1299         INIT7( satd, _xop );
1300         INIT7( satd_x3, _xop );
1301         INIT7( satd_x4, _xop );
1302         INIT4( hadamard_ac, _xop );
1303         if( !(cpu&X264_CPU_STACK_MOD4) )
1304         {
1305             pixf->intra_satd_x9_4x4 = x264_intra_satd_x9_4x4_xop;
1306         }
1307         INIT5( ssd, _xop );
1308         pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_xop;
1309         pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8_xop;
1310         pixf->intra_satd_x3_8x16c = x264_intra_satd_x3_8x16c_xop;
1311         pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_xop;
1312         pixf->var[PIXEL_8x16]  = x264_pixel_var_8x16_xop;
1313         pixf->var[PIXEL_8x8]   = x264_pixel_var_8x8_xop;
1314         pixf->var2[PIXEL_8x8] = x264_pixel_var2_8x8_xop;
1315         pixf->var2[PIXEL_8x16] = x264_pixel_var2_8x16_xop;
1316 #if ARCH_X86_64
1317         pixf->sa8d_satd[PIXEL_16x16] = x264_pixel_sa8d_satd_16x16_xop;
1318 #endif
1319     }
1320
1321     if( cpu&X264_CPU_AVX2 )
1322     {
1323         INIT2( ssd, _avx2 );
1324         INIT2( sad_x3, _avx2 );
1325         INIT2( sad_x4, _avx2 );
1326         INIT4( satd, _avx2 );
1327         INIT2( hadamard_ac, _avx2 );
1328         INIT_ADS( _avx2 );
1329         pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8_avx2;
1330         pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_avx2;
1331         pixf->var2[PIXEL_8x16]  = x264_pixel_var2_8x16_avx2;
1332         pixf->var2[PIXEL_8x8]   = x264_pixel_var2_8x8_avx2;
1333         pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16_avx2;
1334         pixf->intra_sad_x9_8x8  = x264_intra_sad_x9_8x8_avx2;
1335         pixf->intra_sad_x3_8x8c = x264_intra_sad_x3_8x8c_avx2;
1336         pixf->ssd_nv12_core = x264_pixel_ssd_nv12_core_avx2;
1337 #if ARCH_X86_64
1338         pixf->sa8d_satd[PIXEL_16x16] = x264_pixel_sa8d_satd_16x16_avx2;
1339 #endif
1340     }
1341 #endif //HAVE_MMX
1342
1343 #if HAVE_ARMV6
1344     if( cpu&X264_CPU_ARMV6 )
1345     {
1346         pixf->sad[PIXEL_4x8] = x264_pixel_sad_4x8_armv6;
1347         pixf->sad[PIXEL_4x4] = x264_pixel_sad_4x4_armv6;
1348         pixf->sad_aligned[PIXEL_4x8] = x264_pixel_sad_4x8_armv6;
1349         pixf->sad_aligned[PIXEL_4x4] = x264_pixel_sad_4x4_armv6;
1350     }
1351     if( cpu&X264_CPU_NEON )
1352     {
1353         INIT5( sad, _neon );
1354         INIT5( sad_aligned, _neon );
1355         INIT7( sad_x3, _neon );
1356         INIT7( sad_x4, _neon );
1357         INIT7( ssd, _neon );
1358         INIT7( satd, _neon );
1359         INIT7( satd_x3, _neon );
1360         INIT7( satd_x4, _neon );
1361         INIT4( hadamard_ac, _neon );
1362         pixf->sa8d[PIXEL_8x8]   = x264_pixel_sa8d_8x8_neon;
1363         pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_neon;
1364         pixf->var[PIXEL_8x8]    = x264_pixel_var_8x8_neon;
1365         pixf->var[PIXEL_8x16]   = x264_pixel_var_8x16_neon;
1366         pixf->var[PIXEL_16x16]  = x264_pixel_var_16x16_neon;
1367         pixf->var2[PIXEL_8x8]   = x264_pixel_var2_8x8_neon;
1368         pixf->var2[PIXEL_8x16]  = x264_pixel_var2_8x16_neon;
1369
1370         pixf->intra_sad_x3_4x4    = x264_intra_sad_x3_4x4_neon;
1371         pixf->intra_satd_x3_4x4   = x264_intra_satd_x3_4x4_neon;
1372         pixf->intra_sad_x3_8x8    = x264_intra_sad_x3_8x8_neon;
1373         pixf->intra_sa8d_x3_8x8   = x264_intra_sa8d_x3_8x8_neon;
1374         pixf->intra_sad_x3_8x8c   = x264_intra_sad_x3_8x8c_neon;
1375         pixf->intra_satd_x3_8x8c  = x264_intra_satd_x3_8x8c_neon;
1376         pixf->intra_sad_x3_8x16c  = x264_intra_sad_x3_8x16c_neon;
1377         pixf->intra_satd_x3_8x16c = x264_intra_satd_x3_8x16c_neon;
1378         pixf->intra_sad_x3_16x16  = x264_intra_sad_x3_16x16_neon;
1379         pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16_neon;
1380
1381         pixf->ssim_4x4x2_core   = x264_pixel_ssim_4x4x2_core_neon;
1382         pixf->ssim_end4         = x264_pixel_ssim_end4_neon;
1383
1384         if( cpu&X264_CPU_FAST_NEON_MRC )
1385         {
1386             pixf->sad[PIXEL_4x8] = x264_pixel_sad_4x8_neon;
1387             pixf->sad[PIXEL_4x4] = x264_pixel_sad_4x4_neon;
1388             pixf->sad_aligned[PIXEL_4x8] = x264_pixel_sad_aligned_4x8_neon;
1389             pixf->sad_aligned[PIXEL_4x4] = x264_pixel_sad_aligned_4x4_neon;
1390         }
1391         else    // really just scheduled for dual issue / A8
1392         {
1393             INIT5( sad_aligned, _neon_dual );
1394         }
1395     }
1396 #endif
1397 #endif // HIGH_BIT_DEPTH
1398 #if HAVE_ALTIVEC
1399     if( cpu&X264_CPU_ALTIVEC )
1400     {
1401         x264_pixel_altivec_init( pixf );
1402     }
1403 #endif
1404 #if !HIGH_BIT_DEPTH
1405 #if ARCH_UltraSPARC
1406     INIT4( sad, _vis );
1407     INIT4( sad_x3, _vis );
1408     INIT4( sad_x4, _vis );
1409 #endif
1410 #endif // !HIGH_BIT_DEPTH
1411
1412     pixf->ads[PIXEL_8x16] =
1413     pixf->ads[PIXEL_8x4] =
1414     pixf->ads[PIXEL_4x8] = pixf->ads[PIXEL_16x8];
1415     pixf->ads[PIXEL_4x4] = pixf->ads[PIXEL_8x8];
1416 }
1417