]> git.sesse.net Git - x264/blob - common/pixel.c
Delete all SPARC optimizations
[x264] / common / pixel.c
1 /*****************************************************************************
2  * pixel.c: pixel metrics
3  *****************************************************************************
4  * Copyright (C) 2003-2014 x264 project
5  *
6  * Authors: Loren Merritt <lorenm@u.washington.edu>
7  *          Laurent Aimar <fenrir@via.ecp.fr>
8  *          Fiona Glaser <fiona@x264.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
23  *
24  * This program is also available under a commercial proprietary license.
25  * For more information, contact us at licensing@x264.com.
26  *****************************************************************************/
27
28 #include "common.h"
29
30 #if HAVE_MMX
31 #   include "x86/pixel.h"
32 #   include "x86/predict.h"
33 #endif
34 #if ARCH_PPC
35 #   include "ppc/pixel.h"
36 #endif
37 #if ARCH_ARM
38 #   include "arm/pixel.h"
39 #   include "arm/predict.h"
40 #endif
41
42
43 /****************************************************************************
44  * pixel_sad_WxH
45  ****************************************************************************/
46 #define PIXEL_SAD_C( name, lx, ly ) \
47 static int name( pixel *pix1, intptr_t i_stride_pix1,  \
48                  pixel *pix2, intptr_t i_stride_pix2 ) \
49 {                                                   \
50     int i_sum = 0;                                  \
51     for( int y = 0; y < ly; y++ )                   \
52     {                                               \
53         for( int x = 0; x < lx; x++ )               \
54         {                                           \
55             i_sum += abs( pix1[x] - pix2[x] );      \
56         }                                           \
57         pix1 += i_stride_pix1;                      \
58         pix2 += i_stride_pix2;                      \
59     }                                               \
60     return i_sum;                                   \
61 }
62
63
64 PIXEL_SAD_C( x264_pixel_sad_16x16, 16, 16 )
65 PIXEL_SAD_C( x264_pixel_sad_16x8,  16,  8 )
66 PIXEL_SAD_C( x264_pixel_sad_8x16,   8, 16 )
67 PIXEL_SAD_C( x264_pixel_sad_8x8,    8,  8 )
68 PIXEL_SAD_C( x264_pixel_sad_8x4,    8,  4 )
69 PIXEL_SAD_C( x264_pixel_sad_4x16,   4, 16 )
70 PIXEL_SAD_C( x264_pixel_sad_4x8,    4,  8 )
71 PIXEL_SAD_C( x264_pixel_sad_4x4,    4,  4 )
72
73 /****************************************************************************
74  * pixel_ssd_WxH
75  ****************************************************************************/
76 #define PIXEL_SSD_C( name, lx, ly ) \
77 static int name( pixel *pix1, intptr_t i_stride_pix1,  \
78                  pixel *pix2, intptr_t i_stride_pix2 ) \
79 {                                                   \
80     int i_sum = 0;                                  \
81     for( int y = 0; y < ly; y++ )                   \
82     {                                               \
83         for( int x = 0; x < lx; x++ )               \
84         {                                           \
85             int d = pix1[x] - pix2[x];              \
86             i_sum += d*d;                           \
87         }                                           \
88         pix1 += i_stride_pix1;                      \
89         pix2 += i_stride_pix2;                      \
90     }                                               \
91     return i_sum;                                   \
92 }
93
94 PIXEL_SSD_C( x264_pixel_ssd_16x16, 16, 16 )
95 PIXEL_SSD_C( x264_pixel_ssd_16x8,  16,  8 )
96 PIXEL_SSD_C( x264_pixel_ssd_8x16,   8, 16 )
97 PIXEL_SSD_C( x264_pixel_ssd_8x8,    8,  8 )
98 PIXEL_SSD_C( x264_pixel_ssd_8x4,    8,  4 )
99 PIXEL_SSD_C( x264_pixel_ssd_4x16,   4, 16 )
100 PIXEL_SSD_C( x264_pixel_ssd_4x8,    4,  8 )
101 PIXEL_SSD_C( x264_pixel_ssd_4x4,    4,  4 )
102
103 uint64_t x264_pixel_ssd_wxh( x264_pixel_function_t *pf, pixel *pix1, intptr_t i_pix1,
104                              pixel *pix2, intptr_t i_pix2, int i_width, int i_height )
105 {
106     uint64_t i_ssd = 0;
107     int y;
108     int align = !(((intptr_t)pix1 | (intptr_t)pix2 | i_pix1 | i_pix2) & 15);
109
110 #define SSD(size) i_ssd += pf->ssd[size]( pix1 + y*i_pix1 + x, i_pix1, \
111                                           pix2 + y*i_pix2 + x, i_pix2 );
112     for( y = 0; y < i_height-15; y += 16 )
113     {
114         int x = 0;
115         if( align )
116             for( ; x < i_width-15; x += 16 )
117                 SSD(PIXEL_16x16);
118         for( ; x < i_width-7; x += 8 )
119             SSD(PIXEL_8x16);
120     }
121     if( y < i_height-7 )
122         for( int x = 0; x < i_width-7; x += 8 )
123             SSD(PIXEL_8x8);
124 #undef SSD
125
126 #define SSD1 { int d = pix1[y*i_pix1+x] - pix2[y*i_pix2+x]; i_ssd += d*d; }
127     if( i_width & 7 )
128     {
129         for( y = 0; y < (i_height & ~7); y++ )
130             for( int x = i_width & ~7; x < i_width; x++ )
131                 SSD1;
132     }
133     if( i_height & 7 )
134     {
135         for( y = i_height & ~7; y < i_height; y++ )
136             for( int x = 0; x < i_width; x++ )
137                 SSD1;
138     }
139 #undef SSD1
140
141     return i_ssd;
142 }
143
144 static void pixel_ssd_nv12_core( pixel *pixuv1, intptr_t stride1, pixel *pixuv2, intptr_t stride2,
145                                  int width, int height, uint64_t *ssd_u, uint64_t *ssd_v )
146 {
147     *ssd_u = 0, *ssd_v = 0;
148     for( int y = 0; y < height; y++, pixuv1+=stride1, pixuv2+=stride2 )
149         for( int x = 0; x < width; x++ )
150         {
151             int du = pixuv1[2*x]   - pixuv2[2*x];
152             int dv = pixuv1[2*x+1] - pixuv2[2*x+1];
153             *ssd_u += du*du;
154             *ssd_v += dv*dv;
155         }
156 }
157
158 void x264_pixel_ssd_nv12( x264_pixel_function_t *pf, pixel *pix1, intptr_t i_pix1, pixel *pix2, intptr_t i_pix2,
159                           int i_width, int i_height, uint64_t *ssd_u, uint64_t *ssd_v )
160 {
161     pf->ssd_nv12_core( pix1, i_pix1, pix2, i_pix2, i_width&~7, i_height, ssd_u, ssd_v );
162     if( i_width&7 )
163     {
164         uint64_t tmp[2];
165         pixel_ssd_nv12_core( pix1+(i_width&~7), i_pix1, pix2+(i_width&~7), i_pix2, i_width&7, i_height, &tmp[0], &tmp[1] );
166         *ssd_u += tmp[0];
167         *ssd_v += tmp[1];
168     }
169 }
170
171 /****************************************************************************
172  * pixel_var_wxh
173  ****************************************************************************/
174 #define PIXEL_VAR_C( name, w, h ) \
175 static uint64_t name( pixel *pix, intptr_t i_stride ) \
176 {                                             \
177     uint32_t sum = 0, sqr = 0;                \
178     for( int y = 0; y < h; y++ )              \
179     {                                         \
180         for( int x = 0; x < w; x++ )          \
181         {                                     \
182             sum += pix[x];                    \
183             sqr += pix[x] * pix[x];           \
184         }                                     \
185         pix += i_stride;                      \
186     }                                         \
187     return sum + ((uint64_t)sqr << 32);       \
188 }
189
190 PIXEL_VAR_C( x264_pixel_var_16x16, 16, 16 )
191 PIXEL_VAR_C( x264_pixel_var_8x16,   8, 16 )
192 PIXEL_VAR_C( x264_pixel_var_8x8,    8,  8 )
193
194 /****************************************************************************
195  * pixel_var2_wxh
196  ****************************************************************************/
197 #define PIXEL_VAR2_C( name, w, h, shift ) \
198 static int name( pixel *pix1, intptr_t i_stride1, pixel *pix2, intptr_t i_stride2, int *ssd ) \
199 { \
200     uint32_t var = 0, sum = 0, sqr = 0; \
201     for( int y = 0; y < h; y++ ) \
202     { \
203         for( int x = 0; x < w; x++ ) \
204         { \
205             int diff = pix1[x] - pix2[x]; \
206             sum += diff; \
207             sqr += diff * diff; \
208         } \
209         pix1 += i_stride1; \
210         pix2 += i_stride2; \
211     } \
212     sum = abs(sum); \
213     var = sqr - ((uint64_t)sum * sum >> shift); \
214     *ssd = sqr; \
215     return var; \
216 }
217
218 PIXEL_VAR2_C( x264_pixel_var2_8x16, 8, 16, 7 )
219 PIXEL_VAR2_C( x264_pixel_var2_8x8,  8,  8, 6 )
220
221 #if BIT_DEPTH > 8
222     typedef uint32_t sum_t;
223     typedef uint64_t sum2_t;
224 #else
225     typedef uint16_t sum_t;
226     typedef uint32_t sum2_t;
227 #endif
228 #define BITS_PER_SUM (8 * sizeof(sum_t))
229
230 #define HADAMARD4(d0, d1, d2, d3, s0, s1, s2, s3) {\
231     sum2_t t0 = s0 + s1;\
232     sum2_t t1 = s0 - s1;\
233     sum2_t t2 = s2 + s3;\
234     sum2_t t3 = s2 - s3;\
235     d0 = t0 + t2;\
236     d2 = t0 - t2;\
237     d1 = t1 + t3;\
238     d3 = t1 - t3;\
239 }
240
241 // in: a pseudo-simd number of the form x+(y<<16)
242 // return: abs(x)+(abs(y)<<16)
243 static ALWAYS_INLINE sum2_t abs2( sum2_t a )
244 {
245     sum2_t s = ((a>>(BITS_PER_SUM-1))&(((sum2_t)1<<BITS_PER_SUM)+1))*((sum_t)-1);
246     return (a+s)^s;
247 }
248
249 /****************************************************************************
250  * pixel_satd_WxH: sum of 4x4 Hadamard transformed differences
251  ****************************************************************************/
252
253 static NOINLINE int x264_pixel_satd_4x4( pixel *pix1, intptr_t i_pix1, pixel *pix2, intptr_t i_pix2 )
254 {
255     sum2_t tmp[4][2];
256     sum2_t a0, a1, a2, a3, b0, b1;
257     sum2_t sum = 0;
258     for( int i = 0; i < 4; i++, pix1 += i_pix1, pix2 += i_pix2 )
259     {
260         a0 = pix1[0] - pix2[0];
261         a1 = pix1[1] - pix2[1];
262         b0 = (a0+a1) + ((a0-a1)<<BITS_PER_SUM);
263         a2 = pix1[2] - pix2[2];
264         a3 = pix1[3] - pix2[3];
265         b1 = (a2+a3) + ((a2-a3)<<BITS_PER_SUM);
266         tmp[i][0] = b0 + b1;
267         tmp[i][1] = b0 - b1;
268     }
269     for( int i = 0; i < 2; i++ )
270     {
271         HADAMARD4( a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i] );
272         a0 = abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
273         sum += ((sum_t)a0) + (a0>>BITS_PER_SUM);
274     }
275     return sum >> 1;
276 }
277
278 static NOINLINE int x264_pixel_satd_8x4( pixel *pix1, intptr_t i_pix1, pixel *pix2, intptr_t i_pix2 )
279 {
280     sum2_t tmp[4][4];
281     sum2_t a0, a1, a2, a3;
282     sum2_t sum = 0;
283     for( int i = 0; i < 4; i++, pix1 += i_pix1, pix2 += i_pix2 )
284     {
285         a0 = (pix1[0] - pix2[0]) + ((sum2_t)(pix1[4] - pix2[4]) << BITS_PER_SUM);
286         a1 = (pix1[1] - pix2[1]) + ((sum2_t)(pix1[5] - pix2[5]) << BITS_PER_SUM);
287         a2 = (pix1[2] - pix2[2]) + ((sum2_t)(pix1[6] - pix2[6]) << BITS_PER_SUM);
288         a3 = (pix1[3] - pix2[3]) + ((sum2_t)(pix1[7] - pix2[7]) << BITS_PER_SUM);
289         HADAMARD4( tmp[i][0], tmp[i][1], tmp[i][2], tmp[i][3], a0,a1,a2,a3 );
290     }
291     for( int i = 0; i < 4; i++ )
292     {
293         HADAMARD4( a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i] );
294         sum += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
295     }
296     return (((sum_t)sum) + (sum>>BITS_PER_SUM)) >> 1;
297 }
298
299 #define PIXEL_SATD_C( w, h, sub )\
300 static int x264_pixel_satd_##w##x##h( pixel *pix1, intptr_t i_pix1, pixel *pix2, intptr_t i_pix2 )\
301 {\
302     int sum = sub( pix1, i_pix1, pix2, i_pix2 )\
303             + sub( pix1+4*i_pix1, i_pix1, pix2+4*i_pix2, i_pix2 );\
304     if( w==16 )\
305         sum+= sub( pix1+8, i_pix1, pix2+8, i_pix2 )\
306             + sub( pix1+8+4*i_pix1, i_pix1, pix2+8+4*i_pix2, i_pix2 );\
307     if( h==16 )\
308         sum+= sub( pix1+8*i_pix1, i_pix1, pix2+8*i_pix2, i_pix2 )\
309             + sub( pix1+12*i_pix1, i_pix1, pix2+12*i_pix2, i_pix2 );\
310     if( w==16 && h==16 )\
311         sum+= sub( pix1+8+8*i_pix1, i_pix1, pix2+8+8*i_pix2, i_pix2 )\
312             + sub( pix1+8+12*i_pix1, i_pix1, pix2+8+12*i_pix2, i_pix2 );\
313     return sum;\
314 }
315 PIXEL_SATD_C( 16, 16, x264_pixel_satd_8x4 )
316 PIXEL_SATD_C( 16, 8,  x264_pixel_satd_8x4 )
317 PIXEL_SATD_C( 8,  16, x264_pixel_satd_8x4 )
318 PIXEL_SATD_C( 8,  8,  x264_pixel_satd_8x4 )
319 PIXEL_SATD_C( 4,  16, x264_pixel_satd_4x4 )
320 PIXEL_SATD_C( 4,  8,  x264_pixel_satd_4x4 )
321
322 static NOINLINE int sa8d_8x8( pixel *pix1, intptr_t i_pix1, pixel *pix2, intptr_t i_pix2 )
323 {
324     sum2_t tmp[8][4];
325     sum2_t a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3;
326     sum2_t sum = 0;
327     for( int i = 0; i < 8; i++, pix1 += i_pix1, pix2 += i_pix2 )
328     {
329         a0 = pix1[0] - pix2[0];
330         a1 = pix1[1] - pix2[1];
331         b0 = (a0+a1) + ((a0-a1)<<BITS_PER_SUM);
332         a2 = pix1[2] - pix2[2];
333         a3 = pix1[3] - pix2[3];
334         b1 = (a2+a3) + ((a2-a3)<<BITS_PER_SUM);
335         a4 = pix1[4] - pix2[4];
336         a5 = pix1[5] - pix2[5];
337         b2 = (a4+a5) + ((a4-a5)<<BITS_PER_SUM);
338         a6 = pix1[6] - pix2[6];
339         a7 = pix1[7] - pix2[7];
340         b3 = (a6+a7) + ((a6-a7)<<BITS_PER_SUM);
341         HADAMARD4( tmp[i][0], tmp[i][1], tmp[i][2], tmp[i][3], b0,b1,b2,b3 );
342     }
343     for( int i = 0; i < 4; i++ )
344     {
345         HADAMARD4( a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i] );
346         HADAMARD4( a4, a5, a6, a7, tmp[4][i], tmp[5][i], tmp[6][i], tmp[7][i] );
347         b0  = abs2(a0+a4) + abs2(a0-a4);
348         b0 += abs2(a1+a5) + abs2(a1-a5);
349         b0 += abs2(a2+a6) + abs2(a2-a6);
350         b0 += abs2(a3+a7) + abs2(a3-a7);
351         sum += (sum_t)b0 + (b0>>BITS_PER_SUM);
352     }
353     return sum;
354 }
355
356 static int x264_pixel_sa8d_8x8( pixel *pix1, intptr_t i_pix1, pixel *pix2, intptr_t i_pix2 )
357 {
358     int sum = sa8d_8x8( pix1, i_pix1, pix2, i_pix2 );
359     return (sum+2)>>2;
360 }
361
362 static int x264_pixel_sa8d_16x16( pixel *pix1, intptr_t i_pix1, pixel *pix2, intptr_t i_pix2 )
363 {
364     int sum = sa8d_8x8( pix1, i_pix1, pix2, i_pix2 )
365             + sa8d_8x8( pix1+8, i_pix1, pix2+8, i_pix2 )
366             + sa8d_8x8( pix1+8*i_pix1, i_pix1, pix2+8*i_pix2, i_pix2 )
367             + sa8d_8x8( pix1+8+8*i_pix1, i_pix1, pix2+8+8*i_pix2, i_pix2 );
368     return (sum+2)>>2;
369 }
370
371 static NOINLINE uint64_t pixel_hadamard_ac( pixel *pix, intptr_t stride )
372 {
373     sum2_t tmp[32];
374     sum2_t a0, a1, a2, a3, dc;
375     sum2_t sum4 = 0, sum8 = 0;
376     for( int i = 0; i < 8; i++, pix+=stride )
377     {
378         sum2_t *t = tmp + (i&3) + (i&4)*4;
379         a0 = (pix[0]+pix[1]) + ((sum2_t)(pix[0]-pix[1])<<BITS_PER_SUM);
380         a1 = (pix[2]+pix[3]) + ((sum2_t)(pix[2]-pix[3])<<BITS_PER_SUM);
381         t[0] = a0 + a1;
382         t[4] = a0 - a1;
383         a2 = (pix[4]+pix[5]) + ((sum2_t)(pix[4]-pix[5])<<BITS_PER_SUM);
384         a3 = (pix[6]+pix[7]) + ((sum2_t)(pix[6]-pix[7])<<BITS_PER_SUM);
385         t[8] = a2 + a3;
386         t[12] = a2 - a3;
387     }
388     for( int i = 0; i < 8; i++ )
389     {
390         HADAMARD4( a0, a1, a2, a3, tmp[i*4+0], tmp[i*4+1], tmp[i*4+2], tmp[i*4+3] );
391         tmp[i*4+0] = a0;
392         tmp[i*4+1] = a1;
393         tmp[i*4+2] = a2;
394         tmp[i*4+3] = a3;
395         sum4 += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
396     }
397     for( int i = 0; i < 8; i++ )
398     {
399         HADAMARD4( a0,a1,a2,a3, tmp[i], tmp[8+i], tmp[16+i], tmp[24+i] );
400         sum8 += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
401     }
402     dc = (sum_t)(tmp[0] + tmp[8] + tmp[16] + tmp[24]);
403     sum4 = (sum_t)sum4 + (sum4>>BITS_PER_SUM) - dc;
404     sum8 = (sum_t)sum8 + (sum8>>BITS_PER_SUM) - dc;
405     return ((uint64_t)sum8<<32) + sum4;
406 }
407
408 #define HADAMARD_AC(w,h) \
409 static uint64_t x264_pixel_hadamard_ac_##w##x##h( pixel *pix, intptr_t stride )\
410 {\
411     uint64_t sum = pixel_hadamard_ac( pix, stride );\
412     if( w==16 )\
413         sum += pixel_hadamard_ac( pix+8, stride );\
414     if( h==16 )\
415         sum += pixel_hadamard_ac( pix+8*stride, stride );\
416     if( w==16 && h==16 )\
417         sum += pixel_hadamard_ac( pix+8*stride+8, stride );\
418     return ((sum>>34)<<32) + ((uint32_t)sum>>1);\
419 }
420 HADAMARD_AC( 16, 16 )
421 HADAMARD_AC( 16, 8 )
422 HADAMARD_AC( 8, 16 )
423 HADAMARD_AC( 8, 8 )
424
425
426 /****************************************************************************
427  * pixel_sad_x4
428  ****************************************************************************/
429 #define SAD_X( size ) \
430 static void x264_pixel_sad_x3_##size( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2,\
431                                       intptr_t i_stride, int scores[3] )\
432 {\
433     scores[0] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix0, i_stride );\
434     scores[1] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix1, i_stride );\
435     scores[2] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix2, i_stride );\
436 }\
437 static void x264_pixel_sad_x4_##size( pixel *fenc, pixel *pix0, pixel *pix1,pixel *pix2, pixel *pix3,\
438                                       intptr_t i_stride, int scores[4] )\
439 {\
440     scores[0] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix0, i_stride );\
441     scores[1] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix1, i_stride );\
442     scores[2] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix2, i_stride );\
443     scores[3] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix3, i_stride );\
444 }
445
446 SAD_X( 16x16 )
447 SAD_X( 16x8 )
448 SAD_X( 8x16 )
449 SAD_X( 8x8 )
450 SAD_X( 8x4 )
451 SAD_X( 4x8 )
452 SAD_X( 4x4 )
453
454 /****************************************************************************
455  * pixel_satd_x4
456  * no faster than single satd, but needed for satd to be a drop-in replacement for sad
457  ****************************************************************************/
458
459 #define SATD_X( size, cpu ) \
460 static void x264_pixel_satd_x3_##size##cpu( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2,\
461                                             intptr_t i_stride, int scores[3] )\
462 {\
463     scores[0] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix0, i_stride );\
464     scores[1] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix1, i_stride );\
465     scores[2] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix2, i_stride );\
466 }\
467 static void x264_pixel_satd_x4_##size##cpu( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2, pixel *pix3,\
468                                             intptr_t i_stride, int scores[4] )\
469 {\
470     scores[0] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix0, i_stride );\
471     scores[1] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix1, i_stride );\
472     scores[2] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix2, i_stride );\
473     scores[3] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix3, i_stride );\
474 }
475 #define SATD_X_DECL6( cpu )\
476 SATD_X( 16x16, cpu )\
477 SATD_X( 16x8, cpu )\
478 SATD_X( 8x16, cpu )\
479 SATD_X( 8x8, cpu )\
480 SATD_X( 8x4, cpu )\
481 SATD_X( 4x8, cpu )
482 #define SATD_X_DECL7( cpu )\
483 SATD_X_DECL6( cpu )\
484 SATD_X( 4x4, cpu )
485
486 SATD_X_DECL7()
487 #if HAVE_MMX
488 SATD_X_DECL7( _mmx2 )
489 #if !HIGH_BIT_DEPTH
490 SATD_X_DECL6( _sse2 )
491 SATD_X_DECL7( _ssse3 )
492 SATD_X_DECL6( _ssse3_atom )
493 SATD_X_DECL7( _sse4 )
494 SATD_X_DECL7( _avx )
495 SATD_X_DECL7( _xop )
496 #endif // !HIGH_BIT_DEPTH
497 #endif
498
499 #if !HIGH_BIT_DEPTH
500 #if HAVE_ARMV6
501 SATD_X_DECL7( _neon )
502 #endif
503 #endif // !HIGH_BIT_DEPTH
504
505 #define INTRA_MBCMP_8x8( mbcmp, cpu, cpu2 )\
506 void x264_intra_##mbcmp##_x3_8x8##cpu( pixel *fenc, pixel edge[36], int res[3] )\
507 {\
508     ALIGNED_ARRAY_16( pixel, pix, [8*FDEC_STRIDE] );\
509     x264_predict_8x8_v##cpu2( pix, edge );\
510     res[0] = x264_pixel_##mbcmp##_8x8##cpu( pix, FDEC_STRIDE, fenc, FENC_STRIDE );\
511     x264_predict_8x8_h##cpu2( pix, edge );\
512     res[1] = x264_pixel_##mbcmp##_8x8##cpu( pix, FDEC_STRIDE, fenc, FENC_STRIDE );\
513     x264_predict_8x8_dc##cpu2( pix, edge );\
514     res[2] = x264_pixel_##mbcmp##_8x8##cpu( pix, FDEC_STRIDE, fenc, FENC_STRIDE );\
515 }
516
517 INTRA_MBCMP_8x8( sad,, _c )
518 INTRA_MBCMP_8x8(sa8d,, _c )
519 #if HIGH_BIT_DEPTH && HAVE_MMX
520 #define x264_predict_8x8_v_sse2 x264_predict_8x8_v_sse
521 INTRA_MBCMP_8x8( sad, _mmx2,  _c )
522 INTRA_MBCMP_8x8(sa8d, _sse2,  _sse2 )
523 #endif
524 #if !HIGH_BIT_DEPTH && HAVE_ARMV6
525 INTRA_MBCMP_8x8( sad, _neon, _neon )
526 INTRA_MBCMP_8x8(sa8d, _neon, _neon )
527 #endif
528
529 #define INTRA_MBCMP( mbcmp, size, pred1, pred2, pred3, chroma, cpu, cpu2 )\
530 void x264_intra_##mbcmp##_x3_##size##chroma##cpu( pixel *fenc, pixel *fdec, int res[3] )\
531 {\
532     x264_predict_##size##chroma##_##pred1##cpu2( fdec );\
533     res[0] = x264_pixel_##mbcmp##_##size##cpu( fdec, FDEC_STRIDE, fenc, FENC_STRIDE );\
534     x264_predict_##size##chroma##_##pred2##cpu2( fdec );\
535     res[1] = x264_pixel_##mbcmp##_##size##cpu( fdec, FDEC_STRIDE, fenc, FENC_STRIDE );\
536     x264_predict_##size##chroma##_##pred3##cpu2( fdec );\
537     res[2] = x264_pixel_##mbcmp##_##size##cpu( fdec, FDEC_STRIDE, fenc, FENC_STRIDE );\
538 }
539
540 INTRA_MBCMP( sad,  4x4,   v, h, dc,  ,, _c )
541 INTRA_MBCMP(satd,  4x4,   v, h, dc,  ,, _c )
542 INTRA_MBCMP( sad,  8x8,  dc, h,  v, c,, _c )
543 INTRA_MBCMP(satd,  8x8,  dc, h,  v, c,, _c )
544 INTRA_MBCMP( sad,  8x16, dc, h,  v, c,, _c )
545 INTRA_MBCMP(satd,  8x16, dc, h,  v, c,, _c )
546 INTRA_MBCMP( sad, 16x16,  v, h, dc,  ,, _c )
547 INTRA_MBCMP(satd, 16x16,  v, h, dc,  ,, _c )
548
549 #if HAVE_MMX
550 #if HIGH_BIT_DEPTH
551 #define x264_predict_8x8c_v_mmx2 x264_predict_8x8c_v_mmx
552 #define x264_predict_8x16c_v_mmx2 x264_predict_8x16c_v_c
553 #define x264_predict_8x8c_v_sse2 x264_predict_8x8c_v_sse
554 #define x264_predict_8x16c_v_sse2 x264_predict_8x16c_v_sse
555 #define x264_predict_16x16_v_sse2 x264_predict_16x16_v_sse
556 INTRA_MBCMP( sad,  4x4,   v, h, dc,  , _mmx2, _c )
557 INTRA_MBCMP( sad,  8x8,  dc, h,  v, c, _mmx2, _mmx2 )
558 INTRA_MBCMP( sad,  8x16, dc, h,  v, c, _mmx2, _mmx2 )
559 INTRA_MBCMP(satd,  8x16, dc, h,  v, c, _mmx2, _mmx2 )
560 INTRA_MBCMP( sad, 16x16,  v, h, dc,  , _mmx2, _mmx2 )
561 INTRA_MBCMP( sad,  8x8,  dc, h,  v, c, _sse2, _sse2 )
562 INTRA_MBCMP( sad,  8x16, dc, h,  v, c, _sse2, _sse2 )
563 INTRA_MBCMP(satd,  8x16, dc, h,  v, c, _sse2, _sse2 )
564 INTRA_MBCMP( sad, 16x16,  v, h, dc,  , _sse2, _sse2 )
565 INTRA_MBCMP( sad,  8x8,  dc, h,  v, c, _ssse3, _sse2 )
566 INTRA_MBCMP( sad,  8x16, dc, h,  v, c, _ssse3, _sse2 )
567 INTRA_MBCMP(satd,  8x16, dc, h,  v, c, _ssse3, _sse2 )
568 INTRA_MBCMP( sad, 16x16,  v, h, dc,  , _ssse3, _sse2 )
569 INTRA_MBCMP(satd,  8x16, dc, h,  v, c, _sse4, _sse2 )
570 INTRA_MBCMP(satd,  8x16, dc, h,  v, c, _avx, _sse2 )
571 #else
572 #define x264_predict_8x16c_v_mmx2 x264_predict_8x16c_v_mmx
573 INTRA_MBCMP( sad,  8x16, dc, h,  v, c, _mmx2, _mmx2 )
574 INTRA_MBCMP(satd,  8x16, dc, h,  v, c, _mmx2, _mmx2 )
575 INTRA_MBCMP( sad,  8x16, dc, h,  v, c, _sse2, _mmx2 )
576 INTRA_MBCMP(satd,  8x16, dc, h,  v, c, _sse2, _mmx2 )
577 INTRA_MBCMP(satd,  8x16, dc, h,  v, c, _ssse3, _mmx2 )
578 INTRA_MBCMP(satd,  8x16, dc, h,  v, c, _sse4, _mmx2 )
579 INTRA_MBCMP(satd,  8x16, dc, h,  v, c, _avx, _mmx2 )
580 INTRA_MBCMP(satd,  8x16, dc, h,  v, c, _xop, _mmx2 )
581 #endif
582 #endif
583 #if !HIGH_BIT_DEPTH && HAVE_ARMV6
584 INTRA_MBCMP( sad,  4x4,   v, h, dc,  , _neon, _armv6 )
585 INTRA_MBCMP(satd,  4x4,   v, h, dc,  , _neon, _armv6 )
586 INTRA_MBCMP( sad,  8x8,  dc, h,  v, c, _neon, _neon )
587 INTRA_MBCMP(satd,  8x8,  dc, h,  v, c, _neon, _neon )
588 INTRA_MBCMP( sad,  8x16, dc, h,  v, c, _neon, _c )
589 INTRA_MBCMP(satd,  8x16, dc, h,  v, c, _neon, _c )
590 INTRA_MBCMP( sad, 16x16,  v, h, dc,  , _neon, _neon )
591 INTRA_MBCMP(satd, 16x16,  v, h, dc,  , _neon, _neon )
592 #endif
593
594 // No C implementation of intra_satd_x9. See checkasm for its behavior,
595 // or see x264_mb_analyse_intra for the entirely different algorithm we
596 // use when lacking an asm implementation of it.
597
598
599
600 /****************************************************************************
601  * structural similarity metric
602  ****************************************************************************/
603 static void ssim_4x4x2_core( const pixel *pix1, intptr_t stride1,
604                              const pixel *pix2, intptr_t stride2,
605                              int sums[2][4] )
606 {
607     for( int z = 0; z < 2; z++ )
608     {
609         uint32_t s1 = 0, s2 = 0, ss = 0, s12 = 0;
610         for( int y = 0; y < 4; y++ )
611             for( int x = 0; x < 4; x++ )
612             {
613                 int a = pix1[x+y*stride1];
614                 int b = pix2[x+y*stride2];
615                 s1  += a;
616                 s2  += b;
617                 ss  += a*a;
618                 ss  += b*b;
619                 s12 += a*b;
620             }
621         sums[z][0] = s1;
622         sums[z][1] = s2;
623         sums[z][2] = ss;
624         sums[z][3] = s12;
625         pix1 += 4;
626         pix2 += 4;
627     }
628 }
629
630 static float ssim_end1( int s1, int s2, int ss, int s12 )
631 {
632 /* Maximum value for 10-bit is: ss*64 = (2^10-1)^2*16*4*64 = 4286582784, which will overflow in some cases.
633  * s1*s1, s2*s2, and s1*s2 also obtain this value for edge cases: ((2^10-1)*16*4)^2 = 4286582784.
634  * Maximum value for 9-bit is: ss*64 = (2^9-1)^2*16*4*64 = 1069551616, which will not overflow. */
635 #if BIT_DEPTH > 9
636 #define type float
637     static const float ssim_c1 = .01*.01*PIXEL_MAX*PIXEL_MAX*64;
638     static const float ssim_c2 = .03*.03*PIXEL_MAX*PIXEL_MAX*64*63;
639 #else
640 #define type int
641     static const int ssim_c1 = (int)(.01*.01*PIXEL_MAX*PIXEL_MAX*64 + .5);
642     static const int ssim_c2 = (int)(.03*.03*PIXEL_MAX*PIXEL_MAX*64*63 + .5);
643 #endif
644     type fs1 = s1;
645     type fs2 = s2;
646     type fss = ss;
647     type fs12 = s12;
648     type vars = fss*64 - fs1*fs1 - fs2*fs2;
649     type covar = fs12*64 - fs1*fs2;
650     return (float)(2*fs1*fs2 + ssim_c1) * (float)(2*covar + ssim_c2)
651          / ((float)(fs1*fs1 + fs2*fs2 + ssim_c1) * (float)(vars + ssim_c2));
652 #undef type
653 }
654
655 static float ssim_end4( int sum0[5][4], int sum1[5][4], int width )
656 {
657     float ssim = 0.0;
658     for( int i = 0; i < width; i++ )
659         ssim += ssim_end1( sum0[i][0] + sum0[i+1][0] + sum1[i][0] + sum1[i+1][0],
660                            sum0[i][1] + sum0[i+1][1] + sum1[i][1] + sum1[i+1][1],
661                            sum0[i][2] + sum0[i+1][2] + sum1[i][2] + sum1[i+1][2],
662                            sum0[i][3] + sum0[i+1][3] + sum1[i][3] + sum1[i+1][3] );
663     return ssim;
664 }
665
666 float x264_pixel_ssim_wxh( x264_pixel_function_t *pf,
667                            pixel *pix1, intptr_t stride1,
668                            pixel *pix2, intptr_t stride2,
669                            int width, int height, void *buf, int *cnt )
670 {
671     int z = 0;
672     float ssim = 0.0;
673     int (*sum0)[4] = buf;
674     int (*sum1)[4] = sum0 + (width >> 2) + 3;
675     width >>= 2;
676     height >>= 2;
677     for( int y = 1; y < height; y++ )
678     {
679         for( ; z <= y; z++ )
680         {
681             XCHG( void*, sum0, sum1 );
682             for( int x = 0; x < width; x+=2 )
683                 pf->ssim_4x4x2_core( &pix1[4*(x+z*stride1)], stride1, &pix2[4*(x+z*stride2)], stride2, &sum0[x] );
684         }
685         for( int x = 0; x < width-1; x += 4 )
686             ssim += pf->ssim_end4( sum0+x, sum1+x, X264_MIN(4,width-x-1) );
687     }
688     *cnt = (height-1) * (width-1);
689     return ssim;
690 }
691
692 static int pixel_vsad( pixel *src, intptr_t stride, int height )
693 {
694     int score = 0;
695     for( int i = 1; i < height; i++, src += stride )
696         for( int j = 0; j < 16; j++ )
697             score += abs(src[j] - src[j+stride]);
698     return score;
699 }
700
701 int x264_field_vsad( x264_t *h, int mb_x, int mb_y )
702 {
703     int score_field, score_frame;
704     int stride = h->fenc->i_stride[0];
705     int mb_stride = h->mb.i_mb_stride;
706     pixel *fenc = h->fenc->plane[0] + 16 * (mb_x + mb_y * stride);
707     int mb_xy = mb_x + mb_y*mb_stride;
708
709     /* We don't want to analyze pixels outside the frame, as it gives inaccurate results. */
710     int mbpair_height = X264_MIN( h->param.i_height - mb_y * 16, 32 );
711     score_frame  = h->pixf.vsad( fenc,          stride, mbpair_height );
712     score_field  = h->pixf.vsad( fenc,        stride*2, mbpair_height >> 1 );
713     score_field += h->pixf.vsad( fenc+stride, stride*2, mbpair_height >> 1 );
714
715     if( mb_x > 0 )
716         score_field += 512 - h->mb.field[mb_xy        -1]*1024;
717     if( mb_y > 0 )
718         score_field += 512 - h->mb.field[mb_xy-mb_stride]*1024;
719
720     return (score_field < score_frame);
721 }
722
723 static int pixel_asd8( pixel *pix1, intptr_t stride1, pixel *pix2, intptr_t stride2, int height )
724 {
725     int sum = 0;
726     for( int y = 0; y < height; y++, pix1 += stride1, pix2 += stride2 )
727         for( int x = 0; x < 8; x++ )
728             sum += pix1[x] - pix2[x];
729     return abs( sum );
730 }
731
732 /****************************************************************************
733  * successive elimination
734  ****************************************************************************/
735 static int x264_pixel_ads4( int enc_dc[4], uint16_t *sums, int delta,
736                             uint16_t *cost_mvx, int16_t *mvs, int width, int thresh )
737 {
738     int nmv = 0;
739     for( int i = 0; i < width; i++, sums++ )
740     {
741         int ads = abs( enc_dc[0] - sums[0] )
742                 + abs( enc_dc[1] - sums[8] )
743                 + abs( enc_dc[2] - sums[delta] )
744                 + abs( enc_dc[3] - sums[delta+8] )
745                 + cost_mvx[i];
746         if( ads < thresh )
747             mvs[nmv++] = i;
748     }
749     return nmv;
750 }
751
752 static int x264_pixel_ads2( int enc_dc[2], uint16_t *sums, int delta,
753                             uint16_t *cost_mvx, int16_t *mvs, int width, int thresh )
754 {
755     int nmv = 0;
756     for( int i = 0; i < width; i++, sums++ )
757     {
758         int ads = abs( enc_dc[0] - sums[0] )
759                 + abs( enc_dc[1] - sums[delta] )
760                 + cost_mvx[i];
761         if( ads < thresh )
762             mvs[nmv++] = i;
763     }
764     return nmv;
765 }
766
767 static int x264_pixel_ads1( int enc_dc[1], uint16_t *sums, int delta,
768                             uint16_t *cost_mvx, int16_t *mvs, int width, int thresh )
769 {
770     int nmv = 0;
771     for( int i = 0; i<width; i++, sums++ )
772     {
773         int ads = abs( enc_dc[0] - sums[0] )
774                 + cost_mvx[i];
775         if( ads < thresh )
776             mvs[nmv++] = i;
777     }
778     return nmv;
779 }
780
781
782 /****************************************************************************
783  * x264_pixel_init:
784  ****************************************************************************/
785 void x264_pixel_init( int cpu, x264_pixel_function_t *pixf )
786 {
787     memset( pixf, 0, sizeof(*pixf) );
788
789 #define INIT2_NAME( name1, name2, cpu ) \
790     pixf->name1[PIXEL_16x16] = x264_pixel_##name2##_16x16##cpu;\
791     pixf->name1[PIXEL_16x8]  = x264_pixel_##name2##_16x8##cpu;
792 #define INIT4_NAME( name1, name2, cpu ) \
793     INIT2_NAME( name1, name2, cpu ) \
794     pixf->name1[PIXEL_8x16]  = x264_pixel_##name2##_8x16##cpu;\
795     pixf->name1[PIXEL_8x8]   = x264_pixel_##name2##_8x8##cpu;
796 #define INIT5_NAME( name1, name2, cpu ) \
797     INIT4_NAME( name1, name2, cpu ) \
798     pixf->name1[PIXEL_8x4]   = x264_pixel_##name2##_8x4##cpu;
799 #define INIT6_NAME( name1, name2, cpu ) \
800     INIT5_NAME( name1, name2, cpu ) \
801     pixf->name1[PIXEL_4x8]   = x264_pixel_##name2##_4x8##cpu;
802 #define INIT7_NAME( name1, name2, cpu ) \
803     INIT6_NAME( name1, name2, cpu ) \
804     pixf->name1[PIXEL_4x4]   = x264_pixel_##name2##_4x4##cpu;
805 #define INIT8_NAME( name1, name2, cpu ) \
806     INIT7_NAME( name1, name2, cpu ) \
807     pixf->name1[PIXEL_4x16]  = x264_pixel_##name2##_4x16##cpu;
808 #define INIT2( name, cpu ) INIT2_NAME( name, name, cpu )
809 #define INIT4( name, cpu ) INIT4_NAME( name, name, cpu )
810 #define INIT5( name, cpu ) INIT5_NAME( name, name, cpu )
811 #define INIT6( name, cpu ) INIT6_NAME( name, name, cpu )
812 #define INIT7( name, cpu ) INIT7_NAME( name, name, cpu )
813 #define INIT8( name, cpu ) INIT8_NAME( name, name, cpu )
814
815 #define INIT_ADS( cpu ) \
816     pixf->ads[PIXEL_16x16] = x264_pixel_ads4##cpu;\
817     pixf->ads[PIXEL_16x8] = x264_pixel_ads2##cpu;\
818     pixf->ads[PIXEL_8x8] = x264_pixel_ads1##cpu;
819
820     INIT8( sad, );
821     INIT8_NAME( sad_aligned, sad, );
822     INIT7( sad_x3, );
823     INIT7( sad_x4, );
824     INIT8( ssd, );
825     INIT8( satd, );
826     INIT7( satd_x3, );
827     INIT7( satd_x4, );
828     INIT4( hadamard_ac, );
829     INIT_ADS( );
830
831     pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16;
832     pixf->sa8d[PIXEL_8x8]   = x264_pixel_sa8d_8x8;
833     pixf->var[PIXEL_16x16] = x264_pixel_var_16x16;
834     pixf->var[PIXEL_8x16]  = x264_pixel_var_8x16;
835     pixf->var[PIXEL_8x8]   = x264_pixel_var_8x8;
836     pixf->var2[PIXEL_8x16]  = x264_pixel_var2_8x16;
837     pixf->var2[PIXEL_8x8]   = x264_pixel_var2_8x8;
838
839     pixf->ssd_nv12_core = pixel_ssd_nv12_core;
840     pixf->ssim_4x4x2_core = ssim_4x4x2_core;
841     pixf->ssim_end4 = ssim_end4;
842     pixf->vsad = pixel_vsad;
843     pixf->asd8 = pixel_asd8;
844
845     pixf->intra_sad_x3_4x4    = x264_intra_sad_x3_4x4;
846     pixf->intra_satd_x3_4x4   = x264_intra_satd_x3_4x4;
847     pixf->intra_sad_x3_8x8    = x264_intra_sad_x3_8x8;
848     pixf->intra_sa8d_x3_8x8   = x264_intra_sa8d_x3_8x8;
849     pixf->intra_sad_x3_8x8c   = x264_intra_sad_x3_8x8c;
850     pixf->intra_satd_x3_8x8c  = x264_intra_satd_x3_8x8c;
851     pixf->intra_sad_x3_8x16c  = x264_intra_sad_x3_8x16c;
852     pixf->intra_satd_x3_8x16c = x264_intra_satd_x3_8x16c;
853     pixf->intra_sad_x3_16x16  = x264_intra_sad_x3_16x16;
854     pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16;
855
856 #if HIGH_BIT_DEPTH
857 #if HAVE_MMX
858     if( cpu&X264_CPU_MMX2 )
859     {
860         INIT7( sad, _mmx2 );
861         INIT7_NAME( sad_aligned, sad, _mmx2 );
862         INIT7( sad_x3, _mmx2 );
863         INIT7( sad_x4, _mmx2 );
864         INIT8( satd, _mmx2 );
865         INIT7( satd_x3, _mmx2 );
866         INIT7( satd_x4, _mmx2 );
867         INIT4( hadamard_ac, _mmx2 );
868         INIT8( ssd, _mmx2 );
869         INIT_ADS( _mmx2 );
870
871         pixf->ssd_nv12_core = x264_pixel_ssd_nv12_core_mmx2;
872         pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_mmx2;
873         pixf->var[PIXEL_8x8]   = x264_pixel_var_8x8_mmx2;
874 #if ARCH_X86
875         pixf->var2[PIXEL_8x8]  = x264_pixel_var2_8x8_mmx2;
876         pixf->var2[PIXEL_8x16] = x264_pixel_var2_8x16_mmx2;
877 #endif
878
879         pixf->intra_sad_x3_4x4    = x264_intra_sad_x3_4x4_mmx2;
880         pixf->intra_satd_x3_4x4   = x264_intra_satd_x3_4x4_mmx2;
881         pixf->intra_sad_x3_8x8    = x264_intra_sad_x3_8x8_mmx2;
882         pixf->intra_sad_x3_8x8c   = x264_intra_sad_x3_8x8c_mmx2;
883         pixf->intra_satd_x3_8x8c  = x264_intra_satd_x3_8x8c_mmx2;
884         pixf->intra_sad_x3_8x16c  = x264_intra_sad_x3_8x16c_mmx2;
885         pixf->intra_satd_x3_8x16c = x264_intra_satd_x3_8x16c_mmx2;
886         pixf->intra_sad_x3_16x16  = x264_intra_sad_x3_16x16_mmx2;
887         pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16_mmx2;
888     }
889     if( cpu&X264_CPU_SSE2 )
890     {
891         INIT4_NAME( sad_aligned, sad, _sse2_aligned );
892         INIT5( ssd, _sse2 );
893         INIT6( satd, _sse2 );
894         pixf->satd[PIXEL_4x16] = x264_pixel_satd_4x16_sse2;
895
896         pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_sse2;
897         pixf->sa8d[PIXEL_8x8]   = x264_pixel_sa8d_8x8_sse2;
898 #if ARCH_X86_64
899         pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_sse2;
900         pixf->sa8d_satd[PIXEL_16x16] = x264_pixel_sa8d_satd_16x16_sse2;
901 #endif
902         pixf->intra_sad_x3_4x4  = x264_intra_sad_x3_4x4_sse2;
903         pixf->ssd_nv12_core = x264_pixel_ssd_nv12_core_sse2;
904         pixf->ssim_4x4x2_core  = x264_pixel_ssim_4x4x2_core_sse2;
905         pixf->ssim_end4        = x264_pixel_ssim_end4_sse2;
906         pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_sse2;
907         pixf->var[PIXEL_8x8]   = x264_pixel_var_8x8_sse2;
908         pixf->var2[PIXEL_8x8]  = x264_pixel_var2_8x8_sse2;
909         pixf->var2[PIXEL_8x16] = x264_pixel_var2_8x16_sse2;
910         pixf->intra_sad_x3_8x8 = x264_intra_sad_x3_8x8_sse2;
911     }
912     if( (cpu&X264_CPU_SSE2) && !(cpu&X264_CPU_SSE2_IS_SLOW) )
913     {
914         INIT5( sad, _sse2 );
915         INIT2( sad_x3, _sse2 );
916         INIT2( sad_x4, _sse2 );
917         INIT_ADS( _sse2 );
918
919         if( !(cpu&X264_CPU_STACK_MOD4) )
920         {
921             INIT4( hadamard_ac, _sse2 );
922         }
923         pixf->vsad = x264_pixel_vsad_sse2;
924         pixf->asd8 = x264_pixel_asd8_sse2;
925         pixf->intra_sad_x3_8x8    = x264_intra_sad_x3_8x8_sse2;
926         pixf->intra_sad_x3_8x8c   = x264_intra_sad_x3_8x8c_sse2;
927         pixf->intra_sad_x3_8x16c  = x264_intra_sad_x3_8x16c_sse2;
928         pixf->intra_satd_x3_8x16c = x264_intra_satd_x3_8x16c_sse2;
929         pixf->intra_sad_x3_16x16  = x264_intra_sad_x3_16x16_sse2;
930     }
931     if( cpu&X264_CPU_SSE2_IS_FAST )
932     {
933         pixf->sad[PIXEL_8x16] = x264_pixel_sad_8x16_sse2;
934         pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_sse2;
935         pixf->sad_x3[PIXEL_8x8]  = x264_pixel_sad_x3_8x8_sse2;
936         pixf->sad_x3[PIXEL_8x4]  = x264_pixel_sad_x3_8x4_sse2;
937         pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_sse2;
938         pixf->sad_x4[PIXEL_8x8]  = x264_pixel_sad_x4_8x8_sse2;
939         pixf->sad_x4[PIXEL_8x4]  = x264_pixel_sad_x4_8x4_sse2;
940     }
941     if( cpu&X264_CPU_SSSE3 )
942     {
943         INIT4_NAME( sad_aligned, sad, _ssse3_aligned );
944         pixf->sad_aligned[PIXEL_4x4] = x264_pixel_sad_4x4_ssse3;
945         pixf->sad_aligned[PIXEL_4x8] = x264_pixel_sad_4x8_ssse3;
946         INIT7( sad, _ssse3 );
947         INIT7( sad_x3, _ssse3 );
948         INIT7( sad_x4, _ssse3 );
949         INIT_ADS( _ssse3 );
950         INIT6( satd, _ssse3 );
951         pixf->satd[PIXEL_4x16] = x264_pixel_satd_4x16_ssse3;
952
953         if( !(cpu&X264_CPU_STACK_MOD4) )
954         {
955             INIT4( hadamard_ac, _ssse3 );
956         }
957         pixf->vsad = x264_pixel_vsad_ssse3;
958         pixf->asd8 = x264_pixel_asd8_ssse3;
959         pixf->intra_sad_x3_4x4  = x264_intra_sad_x3_4x4_ssse3;
960         pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_ssse3;
961         pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8_ssse3;
962 #if ARCH_X86_64
963         pixf->sa8d_satd[PIXEL_16x16] = x264_pixel_sa8d_satd_16x16_ssse3;
964 #endif
965         pixf->intra_sad_x3_4x4    = x264_intra_sad_x3_4x4_ssse3;
966         pixf->intra_sad_x3_8x8    = x264_intra_sad_x3_8x8_ssse3;
967         pixf->intra_sad_x3_8x8c   = x264_intra_sad_x3_8x8c_ssse3;
968         pixf->intra_sad_x3_8x16c  = x264_intra_sad_x3_8x16c_ssse3;
969         pixf->intra_satd_x3_8x16c = x264_intra_satd_x3_8x16c_ssse3;
970         pixf->intra_sad_x3_16x16  = x264_intra_sad_x3_16x16_ssse3;
971     }
972     if( cpu&X264_CPU_SSE4 )
973     {
974         INIT6( satd, _sse4 );
975         pixf->satd[PIXEL_4x16] = x264_pixel_satd_4x16_sse4;
976         if( !(cpu&X264_CPU_STACK_MOD4) )
977         {
978             INIT4( hadamard_ac, _sse4 );
979         }
980         pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_sse4;
981         pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8_sse4;
982 #if ARCH_X86_64
983         pixf->sa8d_satd[PIXEL_16x16] = x264_pixel_sa8d_satd_16x16_sse4;
984 #endif
985         pixf->intra_satd_x3_8x16c = x264_intra_satd_x3_8x16c_sse4;
986     }
987     if( cpu&X264_CPU_AVX )
988     {
989         INIT5_NAME( sad_aligned, sad, _ssse3 ); /* AVX-capable CPUs doesn't benefit from an aligned version */
990         INIT_ADS( _avx );
991         INIT6( satd, _avx );
992         pixf->satd[PIXEL_4x16] = x264_pixel_satd_4x16_avx;
993         if( !(cpu&X264_CPU_STACK_MOD4) )
994         {
995             INIT4( hadamard_ac, _avx );
996         }
997         pixf->intra_sad_x3_4x4    = x264_intra_sad_x3_4x4_avx;
998         pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_avx;
999         pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8_avx;
1000         pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_avx;
1001         pixf->var[PIXEL_8x8]   = x264_pixel_var_8x8_avx;
1002         pixf->ssd_nv12_core    = x264_pixel_ssd_nv12_core_avx;
1003         pixf->ssim_4x4x2_core  = x264_pixel_ssim_4x4x2_core_avx;
1004         pixf->ssim_end4        = x264_pixel_ssim_end4_avx;
1005 #if ARCH_X86_64
1006         pixf->sa8d_satd[PIXEL_16x16] = x264_pixel_sa8d_satd_16x16_avx;
1007 #endif
1008         pixf->intra_satd_x3_8x16c = x264_intra_satd_x3_8x16c_avx;
1009     }
1010     if( cpu&X264_CPU_XOP )
1011     {
1012         INIT5( sad_x3, _xop );
1013         INIT5( sad_x4, _xop );
1014         pixf->ssd_nv12_core    = x264_pixel_ssd_nv12_core_xop;
1015         pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_xop;
1016         pixf->var[PIXEL_8x8]   = x264_pixel_var_8x8_xop;
1017         pixf->vsad = x264_pixel_vsad_xop;
1018         pixf->asd8 = x264_pixel_asd8_xop;
1019 #if ARCH_X86_64
1020         pixf->sa8d_satd[PIXEL_16x16] = x264_pixel_sa8d_satd_16x16_xop;
1021 #endif
1022     }
1023     if( cpu&X264_CPU_AVX2 )
1024     {
1025         INIT2( ssd, _avx2 );
1026         INIT2( sad, _avx2 );
1027         INIT2_NAME( sad_aligned, sad, _avx2 );
1028         INIT2( sad_x3, _avx2 );
1029         INIT2( sad_x4, _avx2 );
1030         pixf->vsad = x264_pixel_vsad_avx2;
1031         pixf->ssd_nv12_core = x264_pixel_ssd_nv12_core_avx2;
1032         pixf->intra_sad_x3_8x8 = x264_intra_sad_x3_8x8_avx2;
1033     }
1034 #endif // HAVE_MMX
1035 #else // !HIGH_BIT_DEPTH
1036 #if HAVE_MMX
1037     if( cpu&X264_CPU_MMX )
1038     {
1039         INIT8( ssd, _mmx );
1040     }
1041
1042     if( cpu&X264_CPU_MMX2 )
1043     {
1044         INIT8( sad, _mmx2 );
1045         INIT8_NAME( sad_aligned, sad, _mmx2 );
1046         INIT7( sad_x3, _mmx2 );
1047         INIT7( sad_x4, _mmx2 );
1048         INIT8( satd, _mmx2 );
1049         INIT7( satd_x3, _mmx2 );
1050         INIT7( satd_x4, _mmx2 );
1051         INIT4( hadamard_ac, _mmx2 );
1052         INIT_ADS( _mmx2 );
1053         pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_mmx2;
1054         pixf->var[PIXEL_8x16]  = x264_pixel_var_8x16_mmx2;
1055         pixf->var[PIXEL_8x8]   = x264_pixel_var_8x8_mmx2;
1056         pixf->ssd_nv12_core    = x264_pixel_ssd_nv12_core_mmx2;
1057 #if ARCH_X86
1058         pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_mmx2;
1059         pixf->sa8d[PIXEL_8x8]   = x264_pixel_sa8d_8x8_mmx2;
1060         pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_mmx2;
1061         pixf->ssim_4x4x2_core = x264_pixel_ssim_4x4x2_core_mmx2;
1062         pixf->var2[PIXEL_8x8] = x264_pixel_var2_8x8_mmx2;
1063         pixf->var2[PIXEL_8x16] = x264_pixel_var2_8x16_mmx2;
1064         pixf->vsad = x264_pixel_vsad_mmx2;
1065
1066         if( cpu&X264_CPU_CACHELINE_32 )
1067         {
1068             INIT5( sad, _cache32_mmx2 );
1069             INIT4( sad_x3, _cache32_mmx2 );
1070             INIT4( sad_x4, _cache32_mmx2 );
1071         }
1072         else if( cpu&X264_CPU_CACHELINE_64 && !(cpu&X264_CPU_SLOW_ATOM) )
1073         {
1074             INIT5( sad, _cache64_mmx2 );
1075             INIT4( sad_x3, _cache64_mmx2 );
1076             INIT4( sad_x4, _cache64_mmx2 );
1077         }
1078 #else
1079         if( cpu&X264_CPU_CACHELINE_64 && !(cpu&X264_CPU_SLOW_ATOM) )
1080         {
1081             pixf->sad[PIXEL_8x16] = x264_pixel_sad_8x16_cache64_mmx2;
1082             pixf->sad[PIXEL_8x8]  = x264_pixel_sad_8x8_cache64_mmx2;
1083             pixf->sad[PIXEL_8x4]  = x264_pixel_sad_8x4_cache64_mmx2;
1084             pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_cache64_mmx2;
1085             pixf->sad_x3[PIXEL_8x8]  = x264_pixel_sad_x3_8x8_cache64_mmx2;
1086             pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_cache64_mmx2;
1087             pixf->sad_x4[PIXEL_8x8]  = x264_pixel_sad_x4_8x8_cache64_mmx2;
1088         }
1089 #endif
1090         pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16_mmx2;
1091         pixf->intra_sad_x3_16x16  = x264_intra_sad_x3_16x16_mmx2;
1092         pixf->intra_satd_x3_8x16c = x264_intra_satd_x3_8x16c_mmx2;
1093         pixf->intra_sad_x3_8x16c  = x264_intra_sad_x3_8x16c_mmx2;
1094         pixf->intra_satd_x3_8x8c  = x264_intra_satd_x3_8x8c_mmx2;
1095         pixf->intra_sad_x3_8x8c   = x264_intra_sad_x3_8x8c_mmx2;
1096         pixf->intra_sad_x3_8x8    = x264_intra_sad_x3_8x8_mmx2;
1097         pixf->intra_satd_x3_4x4   = x264_intra_satd_x3_4x4_mmx2;
1098         pixf->intra_sad_x3_4x4    = x264_intra_sad_x3_4x4_mmx2;
1099     }
1100
1101     if( cpu&X264_CPU_SSE2 )
1102     {
1103         INIT5( ssd, _sse2slow );
1104         INIT2_NAME( sad_aligned, sad, _sse2_aligned );
1105         pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_sse2;
1106         pixf->ssd_nv12_core    = x264_pixel_ssd_nv12_core_sse2;
1107         pixf->ssim_4x4x2_core  = x264_pixel_ssim_4x4x2_core_sse2;
1108         pixf->ssim_end4        = x264_pixel_ssim_end4_sse2;
1109         pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_sse2;
1110         pixf->sa8d[PIXEL_8x8]   = x264_pixel_sa8d_8x8_sse2;
1111 #if ARCH_X86_64
1112         pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_sse2;
1113         pixf->sa8d_satd[PIXEL_16x16] = x264_pixel_sa8d_satd_16x16_sse2;
1114 #endif
1115         pixf->var2[PIXEL_8x8]   = x264_pixel_var2_8x8_sse2;
1116         pixf->var2[PIXEL_8x16]  = x264_pixel_var2_8x16_sse2;
1117         pixf->vsad = x264_pixel_vsad_sse2;
1118         pixf->asd8 = x264_pixel_asd8_sse2;
1119     }
1120
1121     if( (cpu&X264_CPU_SSE2) && !(cpu&X264_CPU_SSE2_IS_SLOW) )
1122     {
1123         INIT2( sad, _sse2 );
1124         INIT2( sad_x3, _sse2 );
1125         INIT2( sad_x4, _sse2 );
1126         INIT6( satd, _sse2 );
1127         pixf->satd[PIXEL_4x16]   = x264_pixel_satd_4x16_sse2;
1128         INIT6( satd_x3, _sse2 );
1129         INIT6( satd_x4, _sse2 );
1130         INIT4( hadamard_ac, _sse2 );
1131         INIT_ADS( _sse2 );
1132         pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_sse2;
1133         pixf->var[PIXEL_8x16] = x264_pixel_var_8x16_sse2;
1134         pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16_sse2;
1135         pixf->intra_satd_x3_8x16c = x264_intra_satd_x3_8x16c_sse2;
1136         pixf->intra_sad_x3_8x16c  = x264_intra_sad_x3_8x16c_sse2;
1137         if( cpu&X264_CPU_CACHELINE_64 )
1138         {
1139             INIT2( ssd, _sse2); /* faster for width 16 on p4 */
1140 #if ARCH_X86
1141             INIT2( sad, _cache64_sse2 );
1142             INIT2( sad_x3, _cache64_sse2 );
1143             INIT2( sad_x4, _cache64_sse2 );
1144 #endif
1145            if( cpu&X264_CPU_SSE2_IS_FAST )
1146            {
1147                pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_cache64_sse2;
1148                pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_cache64_sse2;
1149            }
1150         }
1151     }
1152
1153     if( cpu&X264_CPU_SSE2_IS_FAST && !(cpu&X264_CPU_CACHELINE_64) )
1154     {
1155         pixf->sad_aligned[PIXEL_8x16] = x264_pixel_sad_8x16_sse2;
1156         pixf->sad[PIXEL_8x16] = x264_pixel_sad_8x16_sse2;
1157         pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_sse2;
1158         pixf->sad_x3[PIXEL_8x8] = x264_pixel_sad_x3_8x8_sse2;
1159         pixf->sad_x3[PIXEL_8x4] = x264_pixel_sad_x3_8x4_sse2;
1160         pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_sse2;
1161         pixf->sad_x4[PIXEL_8x8] = x264_pixel_sad_x4_8x8_sse2;
1162         pixf->sad_x4[PIXEL_8x4] = x264_pixel_sad_x4_8x4_sse2;
1163     }
1164
1165     if( (cpu&X264_CPU_SSE3) && (cpu&X264_CPU_CACHELINE_64) )
1166     {
1167         INIT2( sad, _sse3 );
1168         INIT2( sad_x3, _sse3 );
1169         INIT2( sad_x4, _sse3 );
1170     }
1171
1172     if( cpu&X264_CPU_SSSE3 )
1173     {
1174         INIT4( hadamard_ac, _ssse3 );
1175         if( !(cpu&X264_CPU_STACK_MOD4) )
1176         {
1177             pixf->intra_sad_x9_4x4  = x264_intra_sad_x9_4x4_ssse3;
1178             pixf->intra_satd_x9_4x4 = x264_intra_satd_x9_4x4_ssse3;
1179             pixf->intra_sad_x9_8x8  = x264_intra_sad_x9_8x8_ssse3;
1180 #if ARCH_X86_64
1181             pixf->intra_sa8d_x9_8x8 = x264_intra_sa8d_x9_8x8_ssse3;
1182 #endif
1183         }
1184         INIT_ADS( _ssse3 );
1185         if( cpu&X264_CPU_SLOW_ATOM )
1186         {
1187             pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_ssse3_atom;
1188             pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8_ssse3_atom;
1189             INIT6( satd, _ssse3_atom );
1190             pixf->satd[PIXEL_4x16]  = x264_pixel_satd_4x16_ssse3_atom;
1191             INIT6( satd_x3, _ssse3_atom );
1192             INIT6( satd_x4, _ssse3_atom );
1193             INIT4( hadamard_ac, _ssse3_atom );
1194 #if ARCH_X86_64
1195             pixf->sa8d_satd[PIXEL_16x16] = x264_pixel_sa8d_satd_16x16_ssse3_atom;
1196 #endif
1197         }
1198         else
1199         {
1200             INIT8( ssd, _ssse3 );
1201             pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_ssse3;
1202             pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8_ssse3;
1203             INIT8( satd, _ssse3 );
1204             INIT7( satd_x3, _ssse3 );
1205             INIT7( satd_x4, _ssse3 );
1206 #if ARCH_X86_64
1207             pixf->sa8d_satd[PIXEL_16x16] = x264_pixel_sa8d_satd_16x16_ssse3;
1208 #endif
1209         }
1210         pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16_ssse3;
1211         if( !(cpu&X264_CPU_SLOW_PSHUFB) )
1212             pixf->intra_sad_x3_16x16  = x264_intra_sad_x3_16x16_ssse3;
1213         pixf->intra_satd_x3_8x16c = x264_intra_satd_x3_8x16c_ssse3;
1214         pixf->intra_satd_x3_8x8c  = x264_intra_satd_x3_8x8c_ssse3;
1215         pixf->intra_sad_x3_8x8c   = x264_intra_sad_x3_8x8c_ssse3;
1216         pixf->var2[PIXEL_8x8] = x264_pixel_var2_8x8_ssse3;
1217         pixf->var2[PIXEL_8x16] = x264_pixel_var2_8x16_ssse3;
1218         pixf->asd8 = x264_pixel_asd8_ssse3;
1219         if( cpu&X264_CPU_CACHELINE_64 )
1220         {
1221             INIT2( sad, _cache64_ssse3 );
1222             INIT2( sad_x3, _cache64_ssse3 );
1223             INIT2( sad_x4, _cache64_ssse3 );
1224         }
1225         else
1226         {
1227             INIT2( sad_x3, _ssse3 );
1228             INIT5( sad_x4, _ssse3 );
1229         }
1230         if( (cpu&X264_CPU_SLOW_ATOM) || (cpu&X264_CPU_SLOW_SHUFFLE) )
1231         {
1232             INIT5( ssd, _sse2 ); /* on conroe, sse2 is faster for width8/16 */
1233         }
1234     }
1235
1236     if( cpu&X264_CPU_SSE4 )
1237     {
1238         INIT8( satd, _sse4 );
1239         INIT7( satd_x3, _sse4 );
1240         INIT7( satd_x4, _sse4 );
1241         INIT4( hadamard_ac, _sse4 );
1242         if( !(cpu&X264_CPU_STACK_MOD4) )
1243         {
1244             pixf->intra_sad_x9_4x4  = x264_intra_sad_x9_4x4_sse4;
1245             pixf->intra_satd_x9_4x4 = x264_intra_satd_x9_4x4_sse4;
1246             pixf->intra_sad_x9_8x8  = x264_intra_sad_x9_8x8_sse4;
1247 #if ARCH_X86_64
1248             pixf->intra_sa8d_x9_8x8 = x264_intra_sa8d_x9_8x8_sse4;
1249 #endif
1250         }
1251         pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_sse4;
1252         pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8_sse4;
1253         pixf->intra_satd_x3_8x16c = x264_intra_satd_x3_8x16c_sse4;
1254 #if ARCH_X86_64
1255         pixf->sa8d_satd[PIXEL_16x16] = x264_pixel_sa8d_satd_16x16_sse4;
1256 #endif
1257     }
1258
1259     if( cpu&X264_CPU_AVX )
1260     {
1261         INIT2_NAME( sad_aligned, sad, _sse2 ); /* AVX-capable CPUs doesn't benefit from an aligned version */
1262         INIT2( sad_x3, _avx );
1263         INIT2( sad_x4, _avx );
1264         INIT8( satd, _avx );
1265         INIT7( satd_x3, _avx );
1266         INIT7( satd_x4, _avx );
1267         INIT_ADS( _avx );
1268         INIT4( hadamard_ac, _avx );
1269         if( !(cpu&X264_CPU_STACK_MOD4) )
1270         {
1271             pixf->intra_sad_x9_4x4  = x264_intra_sad_x9_4x4_avx;
1272             pixf->intra_satd_x9_4x4 = x264_intra_satd_x9_4x4_avx;
1273             pixf->intra_sad_x9_8x8  = x264_intra_sad_x9_8x8_avx;
1274 #if ARCH_X86_64
1275             pixf->intra_sa8d_x9_8x8 = x264_intra_sa8d_x9_8x8_avx;
1276 #endif
1277         }
1278         INIT5( ssd, _avx );
1279         pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_avx;
1280         pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8_avx;
1281         pixf->intra_satd_x3_8x16c = x264_intra_satd_x3_8x16c_avx;
1282         pixf->ssd_nv12_core    = x264_pixel_ssd_nv12_core_avx;
1283         pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_avx;
1284         pixf->var[PIXEL_8x16]  = x264_pixel_var_8x16_avx;
1285         pixf->var[PIXEL_8x8]   = x264_pixel_var_8x8_avx;
1286         pixf->ssim_4x4x2_core  = x264_pixel_ssim_4x4x2_core_avx;
1287         pixf->ssim_end4        = x264_pixel_ssim_end4_avx;
1288 #if ARCH_X86_64
1289         pixf->sa8d_satd[PIXEL_16x16] = x264_pixel_sa8d_satd_16x16_avx;
1290 #endif
1291     }
1292
1293     if( cpu&X264_CPU_XOP )
1294     {
1295         INIT7( satd, _xop );
1296         INIT7( satd_x3, _xop );
1297         INIT7( satd_x4, _xop );
1298         INIT4( hadamard_ac, _xop );
1299         if( !(cpu&X264_CPU_STACK_MOD4) )
1300         {
1301             pixf->intra_satd_x9_4x4 = x264_intra_satd_x9_4x4_xop;
1302         }
1303         INIT5( ssd, _xop );
1304         pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_xop;
1305         pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8_xop;
1306         pixf->intra_satd_x3_8x16c = x264_intra_satd_x3_8x16c_xop;
1307         pixf->ssd_nv12_core    = x264_pixel_ssd_nv12_core_xop;
1308         pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_xop;
1309         pixf->var[PIXEL_8x16]  = x264_pixel_var_8x16_xop;
1310         pixf->var[PIXEL_8x8]   = x264_pixel_var_8x8_xop;
1311         pixf->var2[PIXEL_8x8] = x264_pixel_var2_8x8_xop;
1312         pixf->var2[PIXEL_8x16] = x264_pixel_var2_8x16_xop;
1313 #if ARCH_X86_64
1314         pixf->sa8d_satd[PIXEL_16x16] = x264_pixel_sa8d_satd_16x16_xop;
1315 #endif
1316     }
1317
1318     if( cpu&X264_CPU_AVX2 )
1319     {
1320         INIT2( ssd, _avx2 );
1321         INIT2( sad_x3, _avx2 );
1322         INIT2( sad_x4, _avx2 );
1323         INIT4( satd, _avx2 );
1324         INIT2( hadamard_ac, _avx2 );
1325         INIT_ADS( _avx2 );
1326         pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8_avx2;
1327         pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_avx2;
1328         pixf->var2[PIXEL_8x16]  = x264_pixel_var2_8x16_avx2;
1329         pixf->var2[PIXEL_8x8]   = x264_pixel_var2_8x8_avx2;
1330         pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16_avx2;
1331         pixf->intra_sad_x9_8x8  = x264_intra_sad_x9_8x8_avx2;
1332         pixf->intra_sad_x3_8x8c = x264_intra_sad_x3_8x8c_avx2;
1333         pixf->ssd_nv12_core = x264_pixel_ssd_nv12_core_avx2;
1334 #if ARCH_X86_64
1335         pixf->sa8d_satd[PIXEL_16x16] = x264_pixel_sa8d_satd_16x16_avx2;
1336 #endif
1337     }
1338 #endif //HAVE_MMX
1339
1340 #if HAVE_ARMV6
1341     if( cpu&X264_CPU_ARMV6 )
1342     {
1343         pixf->sad[PIXEL_4x8] = x264_pixel_sad_4x8_armv6;
1344         pixf->sad[PIXEL_4x4] = x264_pixel_sad_4x4_armv6;
1345         pixf->sad_aligned[PIXEL_4x8] = x264_pixel_sad_4x8_armv6;
1346         pixf->sad_aligned[PIXEL_4x4] = x264_pixel_sad_4x4_armv6;
1347     }
1348     if( cpu&X264_CPU_NEON )
1349     {
1350         INIT5( sad, _neon );
1351         INIT5( sad_aligned, _neon );
1352         INIT7( sad_x3, _neon );
1353         INIT7( sad_x4, _neon );
1354         INIT7( ssd, _neon );
1355         INIT7( satd, _neon );
1356         INIT7( satd_x3, _neon );
1357         INIT7( satd_x4, _neon );
1358         INIT4( hadamard_ac, _neon );
1359         pixf->sa8d[PIXEL_8x8]   = x264_pixel_sa8d_8x8_neon;
1360         pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_neon;
1361         pixf->var[PIXEL_8x8]    = x264_pixel_var_8x8_neon;
1362         pixf->var[PIXEL_8x16]   = x264_pixel_var_8x16_neon;
1363         pixf->var[PIXEL_16x16]  = x264_pixel_var_16x16_neon;
1364         pixf->var2[PIXEL_8x8]   = x264_pixel_var2_8x8_neon;
1365         pixf->var2[PIXEL_8x16]  = x264_pixel_var2_8x16_neon;
1366
1367         pixf->intra_sad_x3_4x4    = x264_intra_sad_x3_4x4_neon;
1368         pixf->intra_satd_x3_4x4   = x264_intra_satd_x3_4x4_neon;
1369         pixf->intra_sad_x3_8x8    = x264_intra_sad_x3_8x8_neon;
1370         pixf->intra_sa8d_x3_8x8   = x264_intra_sa8d_x3_8x8_neon;
1371         pixf->intra_sad_x3_8x8c   = x264_intra_sad_x3_8x8c_neon;
1372         pixf->intra_satd_x3_8x8c  = x264_intra_satd_x3_8x8c_neon;
1373         pixf->intra_sad_x3_8x16c  = x264_intra_sad_x3_8x16c_neon;
1374         pixf->intra_satd_x3_8x16c = x264_intra_satd_x3_8x16c_neon;
1375         pixf->intra_sad_x3_16x16  = x264_intra_sad_x3_16x16_neon;
1376         pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16_neon;
1377
1378         pixf->ssim_4x4x2_core   = x264_pixel_ssim_4x4x2_core_neon;
1379         pixf->ssim_end4         = x264_pixel_ssim_end4_neon;
1380
1381         if( cpu&X264_CPU_FAST_NEON_MRC )
1382         {
1383             pixf->sad[PIXEL_4x8] = x264_pixel_sad_4x8_neon;
1384             pixf->sad[PIXEL_4x4] = x264_pixel_sad_4x4_neon;
1385             pixf->sad_aligned[PIXEL_4x8] = x264_pixel_sad_aligned_4x8_neon;
1386             pixf->sad_aligned[PIXEL_4x4] = x264_pixel_sad_aligned_4x4_neon;
1387         }
1388         else    // really just scheduled for dual issue / A8
1389         {
1390             INIT5( sad_aligned, _neon_dual );
1391         }
1392     }
1393 #endif
1394 #endif // HIGH_BIT_DEPTH
1395 #if HAVE_ALTIVEC
1396     if( cpu&X264_CPU_ALTIVEC )
1397     {
1398         x264_pixel_altivec_init( pixf );
1399     }
1400 #endif
1401
1402     pixf->ads[PIXEL_8x16] =
1403     pixf->ads[PIXEL_8x4] =
1404     pixf->ads[PIXEL_4x8] = pixf->ads[PIXEL_16x8];
1405     pixf->ads[PIXEL_4x4] = pixf->ads[PIXEL_8x8];
1406 }
1407