]> git.sesse.net Git - x264/blob - common/pixel.c
SSSE3/SSE4/AVX 9-way fully merged i8x8 analysis (sa8d_x9)
[x264] / common / pixel.c
1 /*****************************************************************************
2  * pixel.c: pixel metrics
3  *****************************************************************************
4  * Copyright (C) 2003-2011 x264 project
5  *
6  * Authors: Loren Merritt <lorenm@u.washington.edu>
7  *          Laurent Aimar <fenrir@via.ecp.fr>
8  *          Fiona Glaser <fiona@x264.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
23  *
24  * This program is also available under a commercial proprietary license.
25  * For more information, contact us at licensing@x264.com.
26  *****************************************************************************/
27
28 #include "common.h"
29
30 #if HAVE_MMX
31 #   include "x86/pixel.h"
32 #   include "x86/predict.h"
33 #endif
34 #if ARCH_PPC
35 #   include "ppc/pixel.h"
36 #endif
37 #if ARCH_ARM
38 #   include "arm/pixel.h"
39 #endif
40 #if ARCH_UltraSPARC
41 #   include "sparc/pixel.h"
42 #endif
43
44
45 /****************************************************************************
46  * pixel_sad_WxH
47  ****************************************************************************/
48 #define PIXEL_SAD_C( name, lx, ly ) \
49 static int name( pixel *pix1, int i_stride_pix1,  \
50                  pixel *pix2, int i_stride_pix2 ) \
51 {                                                   \
52     int i_sum = 0;                                  \
53     for( int y = 0; y < ly; y++ )                   \
54     {                                               \
55         for( int x = 0; x < lx; x++ )               \
56         {                                           \
57             i_sum += abs( pix1[x] - pix2[x] );      \
58         }                                           \
59         pix1 += i_stride_pix1;                      \
60         pix2 += i_stride_pix2;                      \
61     }                                               \
62     return i_sum;                                   \
63 }
64
65
66 PIXEL_SAD_C( x264_pixel_sad_16x16, 16, 16 )
67 PIXEL_SAD_C( x264_pixel_sad_16x8,  16,  8 )
68 PIXEL_SAD_C( x264_pixel_sad_8x16,   8, 16 )
69 PIXEL_SAD_C( x264_pixel_sad_8x8,    8,  8 )
70 PIXEL_SAD_C( x264_pixel_sad_8x4,    8,  4 )
71 PIXEL_SAD_C( x264_pixel_sad_4x16,   4, 16 )
72 PIXEL_SAD_C( x264_pixel_sad_4x8,    4,  8 )
73 PIXEL_SAD_C( x264_pixel_sad_4x4,    4,  4 )
74
75 /****************************************************************************
76  * pixel_ssd_WxH
77  ****************************************************************************/
78 #define PIXEL_SSD_C( name, lx, ly ) \
79 static int name( pixel *pix1, int i_stride_pix1,  \
80                  pixel *pix2, int i_stride_pix2 ) \
81 {                                                   \
82     int i_sum = 0;                                  \
83     for( int y = 0; y < ly; y++ )                   \
84     {                                               \
85         for( int x = 0; x < lx; x++ )               \
86         {                                           \
87             int d = pix1[x] - pix2[x];              \
88             i_sum += d*d;                           \
89         }                                           \
90         pix1 += i_stride_pix1;                      \
91         pix2 += i_stride_pix2;                      \
92     }                                               \
93     return i_sum;                                   \
94 }
95
96 PIXEL_SSD_C( x264_pixel_ssd_16x16, 16, 16 )
97 PIXEL_SSD_C( x264_pixel_ssd_16x8,  16,  8 )
98 PIXEL_SSD_C( x264_pixel_ssd_8x16,   8, 16 )
99 PIXEL_SSD_C( x264_pixel_ssd_8x8,    8,  8 )
100 PIXEL_SSD_C( x264_pixel_ssd_8x4,    8,  4 )
101 PIXEL_SSD_C( x264_pixel_ssd_4x16,   4, 16 )
102 PIXEL_SSD_C( x264_pixel_ssd_4x8,    4,  8 )
103 PIXEL_SSD_C( x264_pixel_ssd_4x4,    4,  4 )
104
105 uint64_t x264_pixel_ssd_wxh( x264_pixel_function_t *pf, pixel *pix1, int i_pix1, pixel *pix2, int i_pix2, int i_width, int i_height )
106 {
107     uint64_t i_ssd = 0;
108     int y;
109     int align = !(((intptr_t)pix1 | (intptr_t)pix2 | i_pix1 | i_pix2) & 15);
110
111 #define SSD(size) i_ssd += pf->ssd[size]( pix1 + y*i_pix1 + x, i_pix1, \
112                                           pix2 + y*i_pix2 + x, i_pix2 );
113     for( y = 0; y < i_height-15; y += 16 )
114     {
115         int x = 0;
116         if( align )
117             for( ; x < i_width-15; x += 16 )
118                 SSD(PIXEL_16x16);
119         for( ; x < i_width-7; x += 8 )
120             SSD(PIXEL_8x16);
121     }
122     if( y < i_height-7 )
123         for( int x = 0; x < i_width-7; x += 8 )
124             SSD(PIXEL_8x8);
125 #undef SSD
126
127 #define SSD1 { int d = pix1[y*i_pix1+x] - pix2[y*i_pix2+x]; i_ssd += d*d; }
128     if( i_width & 7 )
129     {
130         for( y = 0; y < (i_height & ~7); y++ )
131             for( int x = i_width & ~7; x < i_width; x++ )
132                 SSD1;
133     }
134     if( i_height & 7 )
135     {
136         for( y = i_height & ~7; y < i_height; y++ )
137             for( int x = 0; x < i_width; x++ )
138                 SSD1;
139     }
140 #undef SSD1
141
142     return i_ssd;
143 }
144
145 static void pixel_ssd_nv12_core( pixel *pixuv1, int stride1, pixel *pixuv2, int stride2, int width, int height, uint64_t *ssd_u, uint64_t *ssd_v )
146 {
147     *ssd_u = 0, *ssd_v = 0;
148     for( int y = 0; y < height; y++, pixuv1+=stride1, pixuv2+=stride2 )
149         for( int x = 0; x < width; x++ )
150         {
151             int du = pixuv1[2*x]   - pixuv2[2*x];
152             int dv = pixuv1[2*x+1] - pixuv2[2*x+1];
153             *ssd_u += du*du;
154             *ssd_v += dv*dv;
155         }
156 }
157
158 void x264_pixel_ssd_nv12( x264_pixel_function_t *pf, pixel *pix1, int i_pix1, pixel *pix2, int i_pix2, int i_width, int i_height, uint64_t *ssd_u, uint64_t *ssd_v )
159 {
160     pf->ssd_nv12_core( pix1, i_pix1, pix2, i_pix2, i_width&~7, i_height, ssd_u, ssd_v );
161     if( i_width&7 )
162     {
163         uint64_t tmp[2];
164         pixel_ssd_nv12_core( pix1+(i_width&~7), i_pix1, pix2+(i_width&~7), i_pix2, i_width&7, i_height, &tmp[0], &tmp[1] );
165         *ssd_u += tmp[0];
166         *ssd_v += tmp[1];
167     }
168 }
169
170 /****************************************************************************
171  * pixel_var_wxh
172  ****************************************************************************/
173 #define PIXEL_VAR_C( name, w, h ) \
174 static uint64_t name( pixel *pix, int i_stride ) \
175 {                                             \
176     uint32_t sum = 0, sqr = 0;                \
177     for( int y = 0; y < h; y++ )              \
178     {                                         \
179         for( int x = 0; x < w; x++ )          \
180         {                                     \
181             sum += pix[x];                    \
182             sqr += pix[x] * pix[x];           \
183         }                                     \
184         pix += i_stride;                      \
185     }                                         \
186     return sum + ((uint64_t)sqr << 32);       \
187 }
188
189 PIXEL_VAR_C( x264_pixel_var_16x16, 16, 16 )
190 PIXEL_VAR_C( x264_pixel_var_8x16,   8, 16 )
191 PIXEL_VAR_C( x264_pixel_var_8x8,    8,  8 )
192
193 /****************************************************************************
194  * pixel_var2_wxh
195  ****************************************************************************/
196 #define PIXEL_VAR2_C( name, w, h, shift ) \
197 static int name( pixel *pix1, int i_stride1, pixel *pix2, int i_stride2, int *ssd ) \
198 { \
199     uint32_t var = 0, sum = 0, sqr = 0; \
200     for( int y = 0; y < h; y++ ) \
201     { \
202         for( int x = 0; x < w; x++ ) \
203         { \
204             int diff = pix1[x] - pix2[x]; \
205             sum += diff; \
206             sqr += diff * diff; \
207         } \
208         pix1 += i_stride1; \
209         pix2 += i_stride2; \
210     } \
211     sum = abs(sum); \
212     var = sqr - ((uint64_t)sum * sum >> shift); \
213     *ssd = sqr; \
214     return var; \
215 }
216
217 PIXEL_VAR2_C( x264_pixel_var2_8x16, 8, 16, 7 )
218 PIXEL_VAR2_C( x264_pixel_var2_8x8,  8,  8, 6 )
219
220 #if BIT_DEPTH > 8
221     typedef uint32_t sum_t;
222     typedef uint64_t sum2_t;
223 #else
224     typedef uint16_t sum_t;
225     typedef uint32_t sum2_t;
226 #endif
227 #define BITS_PER_SUM (8 * sizeof(sum_t))
228
229 #define HADAMARD4(d0, d1, d2, d3, s0, s1, s2, s3) {\
230     sum2_t t0 = s0 + s1;\
231     sum2_t t1 = s0 - s1;\
232     sum2_t t2 = s2 + s3;\
233     sum2_t t3 = s2 - s3;\
234     d0 = t0 + t2;\
235     d2 = t0 - t2;\
236     d1 = t1 + t3;\
237     d3 = t1 - t3;\
238 }
239
240 // in: a pseudo-simd number of the form x+(y<<16)
241 // return: abs(x)+(abs(y)<<16)
242 static ALWAYS_INLINE sum2_t abs2( sum2_t a )
243 {
244     sum2_t s = ((a>>(BITS_PER_SUM-1))&(((sum2_t)1<<BITS_PER_SUM)+1))*((sum_t)-1);
245     return (a+s)^s;
246 }
247
248 /****************************************************************************
249  * pixel_satd_WxH: sum of 4x4 Hadamard transformed differences
250  ****************************************************************************/
251
252 static NOINLINE int x264_pixel_satd_4x4( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
253 {
254     sum2_t tmp[4][2];
255     sum2_t a0, a1, a2, a3, b0, b1;
256     sum2_t sum = 0;
257     for( int i = 0; i < 4; i++, pix1 += i_pix1, pix2 += i_pix2 )
258     {
259         a0 = pix1[0] - pix2[0];
260         a1 = pix1[1] - pix2[1];
261         b0 = (a0+a1) + ((a0-a1)<<BITS_PER_SUM);
262         a2 = pix1[2] - pix2[2];
263         a3 = pix1[3] - pix2[3];
264         b1 = (a2+a3) + ((a2-a3)<<BITS_PER_SUM);
265         tmp[i][0] = b0 + b1;
266         tmp[i][1] = b0 - b1;
267     }
268     for( int i = 0; i < 2; i++ )
269     {
270         HADAMARD4( a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i] );
271         a0 = abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
272         sum += ((sum_t)a0) + (a0>>BITS_PER_SUM);
273     }
274     return sum >> 1;
275 }
276
277 static NOINLINE int x264_pixel_satd_8x4( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
278 {
279     sum2_t tmp[4][4];
280     sum2_t a0, a1, a2, a3;
281     sum2_t sum = 0;
282     for( int i = 0; i < 4; i++, pix1 += i_pix1, pix2 += i_pix2 )
283     {
284         a0 = (pix1[0] - pix2[0]) + ((sum2_t)(pix1[4] - pix2[4]) << BITS_PER_SUM);
285         a1 = (pix1[1] - pix2[1]) + ((sum2_t)(pix1[5] - pix2[5]) << BITS_PER_SUM);
286         a2 = (pix1[2] - pix2[2]) + ((sum2_t)(pix1[6] - pix2[6]) << BITS_PER_SUM);
287         a3 = (pix1[3] - pix2[3]) + ((sum2_t)(pix1[7] - pix2[7]) << BITS_PER_SUM);
288         HADAMARD4( tmp[i][0], tmp[i][1], tmp[i][2], tmp[i][3], a0,a1,a2,a3 );
289     }
290     for( int i = 0; i < 4; i++ )
291     {
292         HADAMARD4( a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i] );
293         sum += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
294     }
295     return (((sum_t)sum) + (sum>>BITS_PER_SUM)) >> 1;
296 }
297
298 #define PIXEL_SATD_C( w, h, sub )\
299 static int x264_pixel_satd_##w##x##h( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )\
300 {\
301     int sum = sub( pix1, i_pix1, pix2, i_pix2 )\
302             + sub( pix1+4*i_pix1, i_pix1, pix2+4*i_pix2, i_pix2 );\
303     if( w==16 )\
304         sum+= sub( pix1+8, i_pix1, pix2+8, i_pix2 )\
305             + sub( pix1+8+4*i_pix1, i_pix1, pix2+8+4*i_pix2, i_pix2 );\
306     if( h==16 )\
307         sum+= sub( pix1+8*i_pix1, i_pix1, pix2+8*i_pix2, i_pix2 )\
308             + sub( pix1+12*i_pix1, i_pix1, pix2+12*i_pix2, i_pix2 );\
309     if( w==16 && h==16 )\
310         sum+= sub( pix1+8+8*i_pix1, i_pix1, pix2+8+8*i_pix2, i_pix2 )\
311             + sub( pix1+8+12*i_pix1, i_pix1, pix2+8+12*i_pix2, i_pix2 );\
312     return sum;\
313 }
314 PIXEL_SATD_C( 16, 16, x264_pixel_satd_8x4 )
315 PIXEL_SATD_C( 16, 8,  x264_pixel_satd_8x4 )
316 PIXEL_SATD_C( 8,  16, x264_pixel_satd_8x4 )
317 PIXEL_SATD_C( 8,  8,  x264_pixel_satd_8x4 )
318 PIXEL_SATD_C( 4,  16, x264_pixel_satd_4x4 )
319 PIXEL_SATD_C( 4,  8,  x264_pixel_satd_4x4 )
320
321 static NOINLINE int sa8d_8x8( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
322 {
323     sum2_t tmp[8][4];
324     sum2_t a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3;
325     sum2_t sum = 0;
326     for( int i = 0; i < 8; i++, pix1 += i_pix1, pix2 += i_pix2 )
327     {
328         a0 = pix1[0] - pix2[0];
329         a1 = pix1[1] - pix2[1];
330         b0 = (a0+a1) + ((a0-a1)<<BITS_PER_SUM);
331         a2 = pix1[2] - pix2[2];
332         a3 = pix1[3] - pix2[3];
333         b1 = (a2+a3) + ((a2-a3)<<BITS_PER_SUM);
334         a4 = pix1[4] - pix2[4];
335         a5 = pix1[5] - pix2[5];
336         b2 = (a4+a5) + ((a4-a5)<<BITS_PER_SUM);
337         a6 = pix1[6] - pix2[6];
338         a7 = pix1[7] - pix2[7];
339         b3 = (a6+a7) + ((a6-a7)<<BITS_PER_SUM);
340         HADAMARD4( tmp[i][0], tmp[i][1], tmp[i][2], tmp[i][3], b0,b1,b2,b3 );
341     }
342     for( int i = 0; i < 4; i++ )
343     {
344         HADAMARD4( a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i] );
345         HADAMARD4( a4, a5, a6, a7, tmp[4][i], tmp[5][i], tmp[6][i], tmp[7][i] );
346         b0  = abs2(a0+a4) + abs2(a0-a4);
347         b0 += abs2(a1+a5) + abs2(a1-a5);
348         b0 += abs2(a2+a6) + abs2(a2-a6);
349         b0 += abs2(a3+a7) + abs2(a3-a7);
350         sum += (sum_t)b0 + (b0>>BITS_PER_SUM);
351     }
352     return sum;
353 }
354
355 static int x264_pixel_sa8d_8x8( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
356 {
357     int sum = sa8d_8x8( pix1, i_pix1, pix2, i_pix2 );
358     return (sum+2)>>2;
359 }
360
361 static int x264_pixel_sa8d_16x16( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
362 {
363     int sum = sa8d_8x8( pix1, i_pix1, pix2, i_pix2 )
364             + sa8d_8x8( pix1+8, i_pix1, pix2+8, i_pix2 )
365             + sa8d_8x8( pix1+8*i_pix1, i_pix1, pix2+8*i_pix2, i_pix2 )
366             + sa8d_8x8( pix1+8+8*i_pix1, i_pix1, pix2+8+8*i_pix2, i_pix2 );
367     return (sum+2)>>2;
368 }
369
370
371 static NOINLINE uint64_t pixel_hadamard_ac( pixel *pix, int stride )
372 {
373     sum2_t tmp[32];
374     sum2_t a0, a1, a2, a3, dc;
375     sum2_t sum4 = 0, sum8 = 0;
376     for( int i = 0; i < 8; i++, pix+=stride )
377     {
378         sum2_t *t = tmp + (i&3) + (i&4)*4;
379         a0 = (pix[0]+pix[1]) + ((sum2_t)(pix[0]-pix[1])<<BITS_PER_SUM);
380         a1 = (pix[2]+pix[3]) + ((sum2_t)(pix[2]-pix[3])<<BITS_PER_SUM);
381         t[0] = a0 + a1;
382         t[4] = a0 - a1;
383         a2 = (pix[4]+pix[5]) + ((sum2_t)(pix[4]-pix[5])<<BITS_PER_SUM);
384         a3 = (pix[6]+pix[7]) + ((sum2_t)(pix[6]-pix[7])<<BITS_PER_SUM);
385         t[8] = a2 + a3;
386         t[12] = a2 - a3;
387     }
388     for( int i = 0; i < 8; i++ )
389     {
390         HADAMARD4( a0, a1, a2, a3, tmp[i*4+0], tmp[i*4+1], tmp[i*4+2], tmp[i*4+3] );
391         tmp[i*4+0] = a0;
392         tmp[i*4+1] = a1;
393         tmp[i*4+2] = a2;
394         tmp[i*4+3] = a3;
395         sum4 += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
396     }
397     for( int i = 0; i < 8; i++ )
398     {
399         HADAMARD4( a0,a1,a2,a3, tmp[i], tmp[8+i], tmp[16+i], tmp[24+i] );
400         sum8 += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
401     }
402     dc = (sum_t)(tmp[0] + tmp[8] + tmp[16] + tmp[24]);
403     sum4 = (sum_t)sum4 + (sum4>>BITS_PER_SUM) - dc;
404     sum8 = (sum_t)sum8 + (sum8>>BITS_PER_SUM) - dc;
405     return ((uint64_t)sum8<<32) + sum4;
406 }
407
408 #define HADAMARD_AC(w,h) \
409 static uint64_t x264_pixel_hadamard_ac_##w##x##h( pixel *pix, int stride )\
410 {\
411     uint64_t sum = pixel_hadamard_ac( pix, stride );\
412     if( w==16 )\
413         sum += pixel_hadamard_ac( pix+8, stride );\
414     if( h==16 )\
415         sum += pixel_hadamard_ac( pix+8*stride, stride );\
416     if( w==16 && h==16 )\
417         sum += pixel_hadamard_ac( pix+8*stride+8, stride );\
418     return ((sum>>34)<<32) + ((uint32_t)sum>>1);\
419 }
420 HADAMARD_AC( 16, 16 )
421 HADAMARD_AC( 16, 8 )
422 HADAMARD_AC( 8, 16 )
423 HADAMARD_AC( 8, 8 )
424
425
426 /****************************************************************************
427  * pixel_sad_x4
428  ****************************************************************************/
429 #define SAD_X( size ) \
430 static void x264_pixel_sad_x3_##size( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2, int i_stride, int scores[3] )\
431 {\
432     scores[0] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix0, i_stride );\
433     scores[1] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix1, i_stride );\
434     scores[2] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix2, i_stride );\
435 }\
436 static void x264_pixel_sad_x4_##size( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2, pixel *pix3, int i_stride, int scores[4] )\
437 {\
438     scores[0] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix0, i_stride );\
439     scores[1] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix1, i_stride );\
440     scores[2] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix2, i_stride );\
441     scores[3] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix3, i_stride );\
442 }
443
444 SAD_X( 16x16 )
445 SAD_X( 16x8 )
446 SAD_X( 8x16 )
447 SAD_X( 8x8 )
448 SAD_X( 8x4 )
449 SAD_X( 4x8 )
450 SAD_X( 4x4 )
451
452 #if !HIGH_BIT_DEPTH
453 #if ARCH_UltraSPARC
454 SAD_X( 16x16_vis )
455 SAD_X( 16x8_vis )
456 SAD_X( 8x16_vis )
457 SAD_X( 8x8_vis )
458 #endif
459 #endif // !HIGH_BIT_DEPTH
460
461 /****************************************************************************
462  * pixel_satd_x4
463  * no faster than single satd, but needed for satd to be a drop-in replacement for sad
464  ****************************************************************************/
465
466 #define SATD_X( size, cpu ) \
467 static void x264_pixel_satd_x3_##size##cpu( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2, int i_stride, int scores[3] )\
468 {\
469     scores[0] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix0, i_stride );\
470     scores[1] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix1, i_stride );\
471     scores[2] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix2, i_stride );\
472 }\
473 static void x264_pixel_satd_x4_##size##cpu( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2, pixel *pix3, int i_stride, int scores[4] )\
474 {\
475     scores[0] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix0, i_stride );\
476     scores[1] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix1, i_stride );\
477     scores[2] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix2, i_stride );\
478     scores[3] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix3, i_stride );\
479 }
480 #define SATD_X_DECL6( cpu )\
481 SATD_X( 16x16, cpu )\
482 SATD_X( 16x8, cpu )\
483 SATD_X( 8x16, cpu )\
484 SATD_X( 8x8, cpu )\
485 SATD_X( 8x4, cpu )\
486 SATD_X( 4x8, cpu )
487 #define SATD_X_DECL7( cpu )\
488 SATD_X_DECL6( cpu )\
489 SATD_X( 4x4, cpu )
490
491 SATD_X_DECL7()
492 #if HAVE_MMX
493 SATD_X_DECL7( _mmx2 )
494 #if !HIGH_BIT_DEPTH
495 SATD_X_DECL6( _sse2 )
496 SATD_X_DECL7( _ssse3 )
497 SATD_X_DECL7( _sse4 )
498 SATD_X_DECL7( _avx )
499 SATD_X_DECL7( _xop )
500 #endif // !HIGH_BIT_DEPTH
501 #endif
502
503 #if !HIGH_BIT_DEPTH
504 #if HAVE_ARMV6
505 SATD_X_DECL7( _neon )
506 #endif
507 #endif // !HIGH_BIT_DEPTH
508
509 #define INTRA_MBCMP_8x8( mbcmp, cpu, cpu2 )\
510 void x264_intra_##mbcmp##_x3_8x8##cpu( pixel *fenc, pixel edge[36], int res[3] )\
511 {\
512     ALIGNED_ARRAY_16( pixel, pix, [8*FDEC_STRIDE] );\
513     x264_predict_8x8_v##cpu2( pix, edge );\
514     res[0] = x264_pixel_##mbcmp##_8x8##cpu( pix, FDEC_STRIDE, fenc, FENC_STRIDE );\
515     x264_predict_8x8_h##cpu2( pix, edge );\
516     res[1] = x264_pixel_##mbcmp##_8x8##cpu( pix, FDEC_STRIDE, fenc, FENC_STRIDE );\
517     x264_predict_8x8_dc##cpu2( pix, edge );\
518     res[2] = x264_pixel_##mbcmp##_8x8##cpu( pix, FDEC_STRIDE, fenc, FENC_STRIDE );\
519 }
520
521 INTRA_MBCMP_8x8( sad,, _c )
522 INTRA_MBCMP_8x8(sa8d,, _c )
523 #if HIGH_BIT_DEPTH && HAVE_MMX
524 INTRA_MBCMP_8x8( sad, _mmx2,  _c )
525 INTRA_MBCMP_8x8( sad, _sse2,  _sse2 )
526 INTRA_MBCMP_8x8( sad, _ssse3, _sse2 )
527 INTRA_MBCMP_8x8(sa8d, _sse2,  _sse2 )
528 #endif
529
530 #define INTRA_MBCMP( mbcmp, size, pred1, pred2, pred3, chroma, cpu, cpu2 )\
531 void x264_intra_##mbcmp##_x3_##size##chroma##cpu( pixel *fenc, pixel *fdec, int res[3] )\
532 {\
533     x264_predict_##size##chroma##_##pred1##cpu2( fdec );\
534     res[0] = x264_pixel_##mbcmp##_##size##cpu( fdec, FDEC_STRIDE, fenc, FENC_STRIDE );\
535     x264_predict_##size##chroma##_##pred2##cpu2( fdec );\
536     res[1] = x264_pixel_##mbcmp##_##size##cpu( fdec, FDEC_STRIDE, fenc, FENC_STRIDE );\
537     x264_predict_##size##chroma##_##pred3##cpu2( fdec );\
538     res[2] = x264_pixel_##mbcmp##_##size##cpu( fdec, FDEC_STRIDE, fenc, FENC_STRIDE );\
539 }
540
541 INTRA_MBCMP( sad,  4x4,   v, h, dc,  ,, _c )
542 INTRA_MBCMP(satd,  4x4,   v, h, dc,  ,, _c )
543 INTRA_MBCMP( sad,  8x8,  dc, h,  v, c,, _c )
544 INTRA_MBCMP(satd,  8x8,  dc, h,  v, c,, _c )
545 INTRA_MBCMP( sad,  8x16, dc, h,  v, c,, _c )
546 INTRA_MBCMP(satd,  8x16, dc, h,  v, c,, _c )
547 INTRA_MBCMP( sad, 16x16,  v, h, dc,  ,, _c )
548 INTRA_MBCMP(satd, 16x16,  v, h, dc,  ,, _c )
549
550 #if HIGH_BIT_DEPTH && HAVE_MMX
551 INTRA_MBCMP( sad,  4x4,   v, h, dc,  , _mmx2, _c )
552 INTRA_MBCMP(satd,  4x4,   v, h, dc,  , _mmx2, _c )
553 INTRA_MBCMP( sad,  8x8,  dc, h,  v, c, _mmx2, _c )
554 INTRA_MBCMP(satd,  8x8,  dc, h,  v, c, _mmx2, _c )
555 INTRA_MBCMP( sad, 16x16,  v, h, dc,  , _mmx2, _mmx2 )
556 INTRA_MBCMP(satd, 16x16,  v, h, dc,  , _mmx2, _mmx2 )
557 INTRA_MBCMP( sad,  8x8,  dc, h,  v, c, _sse2, _sse2 )
558 INTRA_MBCMP( sad, 16x16,  v, h, dc,  , _sse2, _sse2 )
559 INTRA_MBCMP( sad,  4x4,   v, h, dc,  , _ssse3, _c )
560 INTRA_MBCMP( sad,  8x8,  dc, h,  v, c, _ssse3, _sse2 )
561 INTRA_MBCMP( sad, 16x16,  v, h, dc,  , _ssse3, _sse2 )
562 #endif
563
564 // No C implementation of intra_satd_x9. See checkasm for its behavior,
565 // or see x264_mb_analyse_intra for the entirely different algorithm we
566 // use when lacking an asm implementation of it.
567
568
569
570 /****************************************************************************
571  * structural similarity metric
572  ****************************************************************************/
573 static void ssim_4x4x2_core( const pixel *pix1, int stride1,
574                              const pixel *pix2, int stride2,
575                              int sums[2][4])
576 {
577     for( int z = 0; z < 2; z++ )
578     {
579         uint32_t s1 = 0, s2 = 0, ss = 0, s12 = 0;
580         for( int y = 0; y < 4; y++ )
581             for( int x = 0; x < 4; x++ )
582             {
583                 int a = pix1[x+y*stride1];
584                 int b = pix2[x+y*stride2];
585                 s1  += a;
586                 s2  += b;
587                 ss  += a*a;
588                 ss  += b*b;
589                 s12 += a*b;
590             }
591         sums[z][0] = s1;
592         sums[z][1] = s2;
593         sums[z][2] = ss;
594         sums[z][3] = s12;
595         pix1 += 4;
596         pix2 += 4;
597     }
598 }
599
600 static float ssim_end1( int s1, int s2, int ss, int s12 )
601 {
602 /* Maximum value for 10-bit is: ss*64 = (2^10-1)^2*16*4*64 = 4286582784, which will overflow in some cases.
603  * s1*s1, s2*s2, and s1*s2 also obtain this value for edge cases: ((2^10-1)*16*4)^2 = 4286582784.
604  * Maximum value for 9-bit is: ss*64 = (2^9-1)^2*16*4*64 = 1069551616, which will not overflow. */
605 #if BIT_DEPTH > 9
606 #define type float
607     static const float ssim_c1 = .01*.01*PIXEL_MAX*PIXEL_MAX*64;
608     static const float ssim_c2 = .03*.03*PIXEL_MAX*PIXEL_MAX*64*63;
609 #else
610 #define type int
611     static const int ssim_c1 = (int)(.01*.01*PIXEL_MAX*PIXEL_MAX*64 + .5);
612     static const int ssim_c2 = (int)(.03*.03*PIXEL_MAX*PIXEL_MAX*64*63 + .5);
613 #endif
614     type fs1 = s1;
615     type fs2 = s2;
616     type fss = ss;
617     type fs12 = s12;
618     type vars = fss*64 - fs1*fs1 - fs2*fs2;
619     type covar = fs12*64 - fs1*fs2;
620     return (float)(2*fs1*fs2 + ssim_c1) * (float)(2*covar + ssim_c2)
621          / ((float)(fs1*fs1 + fs2*fs2 + ssim_c1) * (float)(vars + ssim_c2));
622 #undef type
623 }
624
625 static float ssim_end4( int sum0[5][4], int sum1[5][4], int width )
626 {
627     float ssim = 0.0;
628     for( int i = 0; i < width; i++ )
629         ssim += ssim_end1( sum0[i][0] + sum0[i+1][0] + sum1[i][0] + sum1[i+1][0],
630                            sum0[i][1] + sum0[i+1][1] + sum1[i][1] + sum1[i+1][1],
631                            sum0[i][2] + sum0[i+1][2] + sum1[i][2] + sum1[i+1][2],
632                            sum0[i][3] + sum0[i+1][3] + sum1[i][3] + sum1[i+1][3] );
633     return ssim;
634 }
635
636 float x264_pixel_ssim_wxh( x264_pixel_function_t *pf,
637                            pixel *pix1, int stride1,
638                            pixel *pix2, int stride2,
639                            int width, int height, void *buf, int *cnt )
640 {
641     int z = 0;
642     float ssim = 0.0;
643     int (*sum0)[4] = buf;
644     int (*sum1)[4] = sum0 + (width >> 2) + 3;
645     width >>= 2;
646     height >>= 2;
647     for( int y = 1; y < height; y++ )
648     {
649         for( ; z <= y; z++ )
650         {
651             XCHG( void*, sum0, sum1 );
652             for( int x = 0; x < width; x+=2 )
653                 pf->ssim_4x4x2_core( &pix1[4*(x+z*stride1)], stride1, &pix2[4*(x+z*stride2)], stride2, &sum0[x] );
654         }
655         for( int x = 0; x < width-1; x += 4 )
656             ssim += pf->ssim_end4( sum0+x, sum1+x, X264_MIN(4,width-x-1) );
657     }
658     *cnt = (height-1) * (width-1);
659     return ssim;
660 }
661
662 static int pixel_vsad( pixel *src, int stride, int height )
663 {
664     int score = 0;
665     for( int i = 1; i < height; i++, src += stride )
666         for( int j = 0; j < 16; j++ )
667             score += abs(src[j] - src[j+stride]);
668     return score;
669 }
670
671 int x264_field_vsad( x264_t *h, int mb_x, int mb_y )
672 {
673     int score_field, score_frame;
674     int stride = h->fenc->i_stride[0];
675     int mb_stride = h->mb.i_mb_stride;
676     pixel *fenc = h->fenc->plane[0] + 16 * (mb_x + mb_y * stride);
677     int mb_xy = mb_x + mb_y*mb_stride;
678
679     /* We don't want to analyze pixels outside the frame, as it gives inaccurate results. */
680     int mbpair_height = X264_MIN( h->param.i_height - mb_y * 16, 32 );
681     score_frame  = h->pixf.vsad( fenc,          stride, mbpair_height );
682     score_field  = h->pixf.vsad( fenc,        stride*2, mbpair_height >> 1 );
683     score_field += h->pixf.vsad( fenc+stride, stride*2, mbpair_height >> 1 );
684
685     if( mb_x > 0 )
686         score_field += 512 - h->mb.field[mb_xy        -1]*1024;
687     if( mb_y > 0 )
688         score_field += 512 - h->mb.field[mb_xy-mb_stride]*1024;
689
690     return (score_field < score_frame);
691 }
692
693 /****************************************************************************
694  * successive elimination
695  ****************************************************************************/
696 static int x264_pixel_ads4( int enc_dc[4], uint16_t *sums, int delta,
697                             uint16_t *cost_mvx, int16_t *mvs, int width, int thresh )
698 {
699     int nmv = 0;
700     for( int i = 0; i < width; i++, sums++ )
701     {
702         int ads = abs( enc_dc[0] - sums[0] )
703                 + abs( enc_dc[1] - sums[8] )
704                 + abs( enc_dc[2] - sums[delta] )
705                 + abs( enc_dc[3] - sums[delta+8] )
706                 + cost_mvx[i];
707         if( ads < thresh )
708             mvs[nmv++] = i;
709     }
710     return nmv;
711 }
712
713 static int x264_pixel_ads2( int enc_dc[2], uint16_t *sums, int delta,
714                             uint16_t *cost_mvx, int16_t *mvs, int width, int thresh )
715 {
716     int nmv = 0;
717     for( int i = 0; i < width; i++, sums++ )
718     {
719         int ads = abs( enc_dc[0] - sums[0] )
720                 + abs( enc_dc[1] - sums[delta] )
721                 + cost_mvx[i];
722         if( ads < thresh )
723             mvs[nmv++] = i;
724     }
725     return nmv;
726 }
727
728 static int x264_pixel_ads1( int enc_dc[1], uint16_t *sums, int delta,
729                             uint16_t *cost_mvx, int16_t *mvs, int width, int thresh )
730 {
731     int nmv = 0;
732     for( int i = 0; i<width; i++, sums++ )
733     {
734         int ads = abs( enc_dc[0] - sums[0] )
735                 + cost_mvx[i];
736         if( ads < thresh )
737             mvs[nmv++] = i;
738     }
739     return nmv;
740 }
741
742
743 /****************************************************************************
744  * x264_pixel_init:
745  ****************************************************************************/
746 void x264_pixel_init( int cpu, x264_pixel_function_t *pixf )
747 {
748     memset( pixf, 0, sizeof(*pixf) );
749
750 #define INIT2_NAME( name1, name2, cpu ) \
751     pixf->name1[PIXEL_16x16] = x264_pixel_##name2##_16x16##cpu;\
752     pixf->name1[PIXEL_16x8]  = x264_pixel_##name2##_16x8##cpu;
753 #define INIT4_NAME( name1, name2, cpu ) \
754     INIT2_NAME( name1, name2, cpu ) \
755     pixf->name1[PIXEL_8x16]  = x264_pixel_##name2##_8x16##cpu;\
756     pixf->name1[PIXEL_8x8]   = x264_pixel_##name2##_8x8##cpu;
757 #define INIT5_NAME( name1, name2, cpu ) \
758     INIT4_NAME( name1, name2, cpu ) \
759     pixf->name1[PIXEL_8x4]   = x264_pixel_##name2##_8x4##cpu;
760 #define INIT6_NAME( name1, name2, cpu ) \
761     INIT5_NAME( name1, name2, cpu ) \
762     pixf->name1[PIXEL_4x8]   = x264_pixel_##name2##_4x8##cpu;
763 #define INIT7_NAME( name1, name2, cpu ) \
764     INIT6_NAME( name1, name2, cpu ) \
765     pixf->name1[PIXEL_4x4]   = x264_pixel_##name2##_4x4##cpu;
766 #define INIT8_NAME( name1, name2, cpu ) \
767     INIT7_NAME( name1, name2, cpu ) \
768     pixf->name1[PIXEL_4x16]  = x264_pixel_##name2##_4x16##cpu;
769 #define INIT2( name, cpu ) INIT2_NAME( name, name, cpu )
770 #define INIT4( name, cpu ) INIT4_NAME( name, name, cpu )
771 #define INIT5( name, cpu ) INIT5_NAME( name, name, cpu )
772 #define INIT6( name, cpu ) INIT6_NAME( name, name, cpu )
773 #define INIT7( name, cpu ) INIT7_NAME( name, name, cpu )
774 #define INIT8( name, cpu ) INIT8_NAME( name, name, cpu )
775
776 #define INIT_ADS( cpu ) \
777     pixf->ads[PIXEL_16x16] = x264_pixel_ads4##cpu;\
778     pixf->ads[PIXEL_16x8] = x264_pixel_ads2##cpu;\
779     pixf->ads[PIXEL_8x8] = x264_pixel_ads1##cpu;
780
781     INIT8( sad, );
782     INIT8_NAME( sad_aligned, sad, );
783     INIT7( sad_x3, );
784     INIT7( sad_x4, );
785     INIT8( ssd, );
786     INIT8( satd, );
787     INIT7( satd_x3, );
788     INIT7( satd_x4, );
789     INIT4( hadamard_ac, );
790     INIT_ADS( );
791
792     pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16;
793     pixf->sa8d[PIXEL_8x8]   = x264_pixel_sa8d_8x8;
794     pixf->var[PIXEL_16x16] = x264_pixel_var_16x16;
795     pixf->var[PIXEL_8x16]  = x264_pixel_var_8x16;
796     pixf->var[PIXEL_8x8]   = x264_pixel_var_8x8;
797     pixf->var2[PIXEL_8x16]  = x264_pixel_var2_8x16;
798     pixf->var2[PIXEL_8x8]   = x264_pixel_var2_8x8;
799
800     pixf->ssd_nv12_core = pixel_ssd_nv12_core;
801     pixf->ssim_4x4x2_core = ssim_4x4x2_core;
802     pixf->ssim_end4 = ssim_end4;
803     pixf->vsad = pixel_vsad;
804
805     pixf->intra_sad_x3_4x4    = x264_intra_sad_x3_4x4;
806     pixf->intra_satd_x3_4x4   = x264_intra_satd_x3_4x4;
807     pixf->intra_sad_x3_8x8    = x264_intra_sad_x3_8x8;
808     pixf->intra_sa8d_x3_8x8   = x264_intra_sa8d_x3_8x8;
809     pixf->intra_sad_x3_8x8c   = x264_intra_sad_x3_8x8c;
810     pixf->intra_satd_x3_8x8c  = x264_intra_satd_x3_8x8c;
811     pixf->intra_sad_x3_8x16c  = x264_intra_sad_x3_8x16c;
812     pixf->intra_satd_x3_8x16c = x264_intra_satd_x3_8x16c;
813     pixf->intra_sad_x3_16x16  = x264_intra_sad_x3_16x16;
814     pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16;
815
816 #if HIGH_BIT_DEPTH
817 #if HAVE_MMX
818     if( cpu&X264_CPU_MMX2 )
819     {
820         INIT7( sad, _mmx2 );
821         INIT7( sad_x3, _mmx2 );
822         INIT7( sad_x4, _mmx2 );
823         INIT7( satd, _mmx2 );
824         INIT7( satd_x3, _mmx2 );
825         INIT7( satd_x4, _mmx2 );
826         INIT4( hadamard_ac, _mmx2 );
827         INIT7( ssd, _mmx2 );
828         INIT_ADS( _mmx2 );
829
830         pixf->ssd_nv12_core = x264_pixel_ssd_nv12_core_mmx2;
831         pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_mmx2;
832         pixf->var[PIXEL_8x8]   = x264_pixel_var_8x8_mmx2;
833         pixf->var2[PIXEL_8x8]  = x264_pixel_var2_8x8_mmx2;
834
835         pixf->intra_sad_x3_4x4    = x264_intra_sad_x3_4x4_mmx2;
836         pixf->intra_satd_x3_4x4   = x264_intra_satd_x3_4x4_mmx2;
837         pixf->intra_sad_x3_8x8    = x264_intra_sad_x3_8x8_mmx2;
838         pixf->intra_sad_x3_8x8c   = x264_intra_sad_x3_8x8c_mmx2;
839         pixf->intra_satd_x3_8x8c  = x264_intra_satd_x3_8x8c_mmx2;
840         pixf->intra_sad_x3_16x16  = x264_intra_sad_x3_16x16_mmx2;
841         pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16_mmx2;
842     }
843     if( cpu&X264_CPU_SSE2 )
844     {
845         INIT4_NAME( sad_aligned, sad, _sse2_aligned );
846         INIT5( ssd, _sse2 );
847
848         pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_sse2;
849         pixf->sa8d[PIXEL_8x8]   = x264_pixel_sa8d_8x8_sse2;
850 #if ARCH_X86_64
851         pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_sse2;
852 #endif
853         pixf->ssd_nv12_core = x264_pixel_ssd_nv12_core_sse2;
854         pixf->ssim_4x4x2_core  = x264_pixel_ssim_4x4x2_core_sse2;
855         pixf->ssim_end4        = x264_pixel_ssim_end4_sse2;
856         pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_sse2;
857         pixf->var[PIXEL_8x8]   = x264_pixel_var_8x8_sse2;
858         pixf->var2[PIXEL_8x8]  = x264_pixel_var2_8x8_sse2;
859     }
860     if( (cpu&X264_CPU_SSE2) && !(cpu&X264_CPU_SSE2_IS_SLOW) )
861     {
862         INIT5( sad, _sse2 );
863         INIT2( sad_x3, _sse2 );
864         INIT2( sad_x4, _sse2 );
865         INIT_ADS( _sse2 );
866
867         if( !(cpu&X264_CPU_STACK_MOD4) )
868         {
869             INIT4( hadamard_ac, _sse2 );
870         }
871
872         pixf->intra_sad_x3_8x8    = x264_intra_sad_x3_8x8_sse2;
873         pixf->intra_sad_x3_8x8c   = x264_intra_sad_x3_8x8c_sse2;
874         pixf->intra_sad_x3_16x16  = x264_intra_sad_x3_16x16_sse2;
875     }
876     if( cpu&X264_CPU_SSE2_IS_FAST )
877     {
878         pixf->sad[PIXEL_8x16] = x264_pixel_sad_8x16_sse2;
879         pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_sse2;
880         pixf->sad_x3[PIXEL_8x8]  = x264_pixel_sad_x3_8x8_sse2;
881         pixf->sad_x3[PIXEL_8x4]  = x264_pixel_sad_x3_8x4_sse2;
882         pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_sse2;
883         pixf->sad_x4[PIXEL_8x8]  = x264_pixel_sad_x4_8x8_sse2;
884         pixf->sad_x4[PIXEL_8x4]  = x264_pixel_sad_x4_8x4_sse2;
885     }
886     if( cpu&X264_CPU_SSSE3 )
887     {
888         INIT4_NAME( sad_aligned, sad, _ssse3_aligned );
889         INIT7( sad, _ssse3 );
890         INIT7( sad_x3, _ssse3 );
891         INIT7( sad_x4, _ssse3 );
892         INIT_ADS( _ssse3 );
893
894         if( !(cpu&X264_CPU_STACK_MOD4) )
895         {
896             INIT4( hadamard_ac, _ssse3 );
897         }
898
899         pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_ssse3;
900         pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8_ssse3;
901         pixf->intra_sad_x3_4x4    = x264_intra_sad_x3_4x4_ssse3;
902         pixf->intra_sad_x3_8x8    = x264_intra_sad_x3_8x8_ssse3;
903         pixf->intra_sad_x3_8x8c   = x264_intra_sad_x3_8x8c_ssse3;
904         pixf->intra_sad_x3_16x16  = x264_intra_sad_x3_16x16_ssse3;
905     }
906     if( cpu&X264_CPU_SSE4 )
907     {
908         if( !(cpu&X264_CPU_STACK_MOD4) )
909         {
910             INIT4( hadamard_ac, _sse4 );
911         }
912         pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_sse4;
913         pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8_sse4;
914     }
915     if( cpu&X264_CPU_AVX )
916     {
917         INIT_ADS( _avx );
918         if( !(cpu&X264_CPU_STACK_MOD4) )
919         {
920             INIT4( hadamard_ac, _avx );
921         }
922         pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_avx;
923         pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8_avx;
924         pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_avx;
925         pixf->var[PIXEL_8x8]   = x264_pixel_var_8x8_avx;
926         pixf->ssd_nv12_core    = x264_pixel_ssd_nv12_core_avx;
927         pixf->ssim_4x4x2_core  = x264_pixel_ssim_4x4x2_core_avx;
928         pixf->ssim_end4        = x264_pixel_ssim_end4_avx;
929     }
930 #endif // HAVE_MMX
931 #else // !HIGH_BIT_DEPTH
932 #if HAVE_MMX
933     if( cpu&X264_CPU_MMX )
934     {
935         INIT8( ssd, _mmx );
936     }
937
938     if( cpu&X264_CPU_MMX2 )
939     {
940         INIT8( sad, _mmx2 );
941         INIT8_NAME( sad_aligned, sad, _mmx2 );
942         INIT7( sad_x3, _mmx2 );
943         INIT7( sad_x4, _mmx2 );
944         INIT7( satd, _mmx2 );
945         INIT7( satd_x3, _mmx2 );
946         INIT7( satd_x4, _mmx2 );
947         INIT4( hadamard_ac, _mmx2 );
948         INIT_ADS( _mmx2 );
949         pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_mmx2;
950         pixf->var[PIXEL_8x16]  = x264_pixel_var_8x16_mmx2;
951         pixf->var[PIXEL_8x8]   = x264_pixel_var_8x8_mmx2;
952         pixf->ssd_nv12_core    = x264_pixel_ssd_nv12_core_mmx2;
953 #if ARCH_X86
954         pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_mmx2;
955         pixf->sa8d[PIXEL_8x8]   = x264_pixel_sa8d_8x8_mmx2;
956         pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_mmx2;
957         pixf->ssim_4x4x2_core = x264_pixel_ssim_4x4x2_core_mmx2;
958         pixf->var2[PIXEL_8x8] = x264_pixel_var2_8x8_mmx2;
959         pixf->vsad = x264_pixel_vsad_mmx2;
960
961         if( cpu&X264_CPU_CACHELINE_32 )
962         {
963             INIT5( sad, _cache32_mmx2 );
964             INIT4( sad_x3, _cache32_mmx2 );
965             INIT4( sad_x4, _cache32_mmx2 );
966         }
967         else if( cpu&X264_CPU_CACHELINE_64 )
968         {
969             INIT5( sad, _cache64_mmx2 );
970             INIT4( sad_x3, _cache64_mmx2 );
971             INIT4( sad_x4, _cache64_mmx2 );
972         }
973 #else
974         if( cpu&X264_CPU_CACHELINE_64 )
975         {
976             pixf->sad[PIXEL_8x16] = x264_pixel_sad_8x16_cache64_mmx2;
977             pixf->sad[PIXEL_8x8]  = x264_pixel_sad_8x8_cache64_mmx2;
978             pixf->sad[PIXEL_8x4]  = x264_pixel_sad_8x4_cache64_mmx2;
979             pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_cache64_mmx2;
980             pixf->sad_x3[PIXEL_8x8]  = x264_pixel_sad_x3_8x8_cache64_mmx2;
981             pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_cache64_mmx2;
982             pixf->sad_x4[PIXEL_8x8]  = x264_pixel_sad_x4_8x8_cache64_mmx2;
983         }
984 #endif
985         pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16_mmx2;
986         pixf->intra_sad_x3_16x16  = x264_intra_sad_x3_16x16_mmx2;
987         pixf->intra_satd_x3_8x8c  = x264_intra_satd_x3_8x8c_mmx2;
988         pixf->intra_sad_x3_8x8c   = x264_intra_sad_x3_8x8c_mmx2;
989         pixf->intra_sad_x3_8x8    = x264_intra_sad_x3_8x8_mmx2;
990         pixf->intra_satd_x3_4x4   = x264_intra_satd_x3_4x4_mmx2;
991         pixf->intra_sad_x3_4x4    = x264_intra_sad_x3_4x4_mmx2;
992     }
993
994     if( cpu&X264_CPU_SSE2 )
995     {
996         INIT5( ssd, _sse2slow );
997         INIT2_NAME( sad_aligned, sad, _sse2_aligned );
998         pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_sse2;
999         pixf->ssd_nv12_core    = x264_pixel_ssd_nv12_core_sse2;
1000         pixf->ssim_4x4x2_core  = x264_pixel_ssim_4x4x2_core_sse2;
1001         pixf->ssim_end4        = x264_pixel_ssim_end4_sse2;
1002         pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_sse2;
1003         pixf->sa8d[PIXEL_8x8]   = x264_pixel_sa8d_8x8_sse2;
1004 #if ARCH_X86_64
1005         pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_sse2;
1006 #endif
1007         pixf->var2[PIXEL_8x8]   = x264_pixel_var2_8x8_sse2;
1008         pixf->vsad = x264_pixel_vsad_sse2;
1009     }
1010
1011     if( (cpu&X264_CPU_SSE2) && !(cpu&X264_CPU_SSE2_IS_SLOW) )
1012     {
1013         INIT2( sad, _sse2 );
1014         INIT2( sad_x3, _sse2 );
1015         INIT2( sad_x4, _sse2 );
1016         INIT6( satd, _sse2 );
1017         INIT6( satd_x3, _sse2 );
1018         INIT6( satd_x4, _sse2 );
1019         if( !(cpu&X264_CPU_STACK_MOD4) )
1020         {
1021             INIT4( hadamard_ac, _sse2 );
1022         }
1023         INIT_ADS( _sse2 );
1024         pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_sse2;
1025         pixf->var[PIXEL_8x16] = x264_pixel_var_8x16_sse2;
1026         pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16_sse2;
1027         if( cpu&X264_CPU_CACHELINE_64 )
1028         {
1029             INIT2( ssd, _sse2); /* faster for width 16 on p4 */
1030 #if ARCH_X86
1031             INIT2( sad, _cache64_sse2 );
1032             INIT2( sad_x3, _cache64_sse2 );
1033             INIT2( sad_x4, _cache64_sse2 );
1034 #endif
1035            if( cpu&X264_CPU_SSE2_IS_FAST )
1036            {
1037                pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_cache64_sse2;
1038                pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_cache64_sse2;
1039            }
1040         }
1041
1042         if( cpu&X264_CPU_SSE_MISALIGN )
1043         {
1044             INIT2( sad_x3, _sse2_misalign );
1045             INIT2( sad_x4, _sse2_misalign );
1046         }
1047     }
1048
1049     if( cpu&X264_CPU_SSE2_IS_FAST && !(cpu&X264_CPU_CACHELINE_64) )
1050     {
1051         pixf->sad_aligned[PIXEL_8x16] = x264_pixel_sad_8x16_sse2;
1052         pixf->sad[PIXEL_8x16] = x264_pixel_sad_8x16_sse2;
1053         pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_sse2;
1054         pixf->sad_x3[PIXEL_8x8] = x264_pixel_sad_x3_8x8_sse2;
1055         pixf->sad_x3[PIXEL_8x4] = x264_pixel_sad_x3_8x4_sse2;
1056         pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_sse2;
1057         pixf->sad_x4[PIXEL_8x8] = x264_pixel_sad_x4_8x8_sse2;
1058         pixf->sad_x4[PIXEL_8x4] = x264_pixel_sad_x4_8x4_sse2;
1059     }
1060
1061     if( (cpu&X264_CPU_SSE3) && (cpu&X264_CPU_CACHELINE_64) )
1062     {
1063         INIT2( sad, _sse3 );
1064         INIT2( sad_x3, _sse3 );
1065         INIT2( sad_x4, _sse3 );
1066     }
1067
1068     if( cpu&X264_CPU_SSSE3 )
1069     {
1070         if( !(cpu&X264_CPU_STACK_MOD4) )
1071         {
1072             INIT4( hadamard_ac, _ssse3 );
1073             pixf->intra_sad_x9_4x4  = x264_intra_sad_x9_4x4_ssse3;
1074             pixf->intra_satd_x9_4x4 = x264_intra_satd_x9_4x4_ssse3;
1075             pixf->intra_sad_x9_8x8  = x264_intra_sad_x9_8x8_ssse3;
1076 #if ARCH_X86_64
1077             pixf->intra_sa8d_x9_8x8 = x264_intra_sa8d_x9_8x8_ssse3;
1078 #endif
1079         }
1080         INIT_ADS( _ssse3 );
1081         if( !(cpu&X264_CPU_SLOW_ATOM) )
1082         {
1083             INIT8( ssd, _ssse3 );
1084             pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_ssse3;
1085             pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8_ssse3;
1086             INIT7( satd, _ssse3 );
1087             INIT7( satd_x3, _ssse3 );
1088             INIT7( satd_x4, _ssse3 );
1089         }
1090         pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16_ssse3;
1091         pixf->intra_sad_x3_16x16  = x264_intra_sad_x3_16x16_ssse3;
1092         pixf->intra_satd_x3_8x8c  = x264_intra_satd_x3_8x8c_ssse3;
1093         pixf->intra_sad_x3_8x8c   = x264_intra_sad_x3_8x8c_ssse3;
1094         pixf->intra_satd_x3_4x4   = x264_intra_satd_x3_4x4_ssse3;
1095 #if ARCH_X86_64
1096         pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_ssse3;
1097 #endif
1098         pixf->var2[PIXEL_8x8] = x264_pixel_var2_8x8_ssse3;
1099         if( cpu&X264_CPU_SHUFFLE_IS_FAST )
1100             pixf->intra_sad_x3_8x8  = x264_intra_sad_x3_8x8_ssse3;
1101         if( cpu&X264_CPU_CACHELINE_64 )
1102         {
1103             INIT2( sad, _cache64_ssse3 );
1104             INIT2( sad_x3, _cache64_ssse3 );
1105             INIT2( sad_x4, _cache64_ssse3 );
1106         }
1107         if( cpu&X264_CPU_SLOW_ATOM || !(cpu&X264_CPU_SHUFFLE_IS_FAST) )
1108         {
1109             INIT5( ssd, _sse2 ); /* on conroe, sse2 is faster for width8/16 */
1110         }
1111     }
1112
1113     if( cpu&X264_CPU_SSE4 )
1114     {
1115         INIT7( satd, _sse4 );
1116         INIT7( satd_x3, _sse4 );
1117         INIT7( satd_x4, _sse4 );
1118         if( !(cpu&X264_CPU_STACK_MOD4) )
1119         {
1120             INIT4( hadamard_ac, _sse4 );
1121             pixf->intra_sad_x9_4x4  = x264_intra_sad_x9_4x4_sse4;
1122             pixf->intra_satd_x9_4x4 = x264_intra_satd_x9_4x4_sse4;
1123             pixf->intra_sad_x9_8x8  = x264_intra_sad_x9_8x8_sse4;
1124 #if ARCH_X86_64
1125             pixf->intra_sa8d_x9_8x8 = x264_intra_sa8d_x9_8x8_sse4;
1126 #endif
1127         }
1128         pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_sse4;
1129         pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8_sse4;
1130         pixf->intra_sad_x3_4x4 = x264_intra_sad_x3_4x4_sse4;
1131     }
1132
1133     if( cpu&X264_CPU_AVX )
1134     {
1135         INIT7( satd, _avx );
1136         INIT7( satd_x3, _avx );
1137         INIT7( satd_x4, _avx );
1138         INIT_ADS( _avx );
1139         if( !(cpu&X264_CPU_STACK_MOD4) )
1140         {
1141             INIT4( hadamard_ac, _avx );
1142             pixf->intra_sad_x9_4x4  = x264_intra_sad_x9_4x4_avx;
1143             pixf->intra_satd_x9_4x4 = x264_intra_satd_x9_4x4_avx;
1144             pixf->intra_sad_x9_8x8  = x264_intra_sad_x9_8x8_avx;
1145 #if ARCH_X86_64
1146             pixf->intra_sa8d_x9_8x8 = x264_intra_sa8d_x9_8x8_avx;
1147 #endif
1148         }
1149         INIT5( ssd, _avx );
1150         pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_avx;
1151         pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8_avx;
1152 #if ARCH_X86_64
1153         pixf->intra_sa8d_x3_8x8= x264_intra_sa8d_x3_8x8_avx;
1154 #endif
1155         pixf->ssd_nv12_core    = x264_pixel_ssd_nv12_core_avx;
1156         pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_avx;
1157         pixf->var[PIXEL_8x16]  = x264_pixel_var_8x16_avx;
1158         pixf->var[PIXEL_8x8]   = x264_pixel_var_8x8_avx;
1159         pixf->ssim_4x4x2_core  = x264_pixel_ssim_4x4x2_core_avx;
1160         pixf->ssim_end4        = x264_pixel_ssim_end4_avx;
1161         pixf->intra_sad_x3_4x4 = x264_intra_sad_x3_4x4_avx;
1162         pixf->intra_sad_x3_8x8 = x264_intra_sad_x3_8x8_avx;
1163     }
1164
1165     if( cpu&X264_CPU_XOP )
1166     {
1167         INIT7( satd, _xop );
1168         INIT7( satd_x3, _xop );
1169         INIT7( satd_x4, _xop );
1170         if( !(cpu&X264_CPU_STACK_MOD4) )
1171         {
1172             INIT4( hadamard_ac, _xop );
1173             pixf->intra_satd_x9_4x4 = x264_intra_satd_x9_4x4_xop;
1174         }
1175         INIT5( ssd, _xop );
1176         pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_xop;
1177         pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8_xop;
1178 #if ARCH_X86_64
1179         pixf->intra_sa8d_x3_8x8= x264_intra_sa8d_x3_8x8_xop;
1180 #endif
1181         pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_xop;
1182         pixf->var[PIXEL_8x16]  = x264_pixel_var_8x16_xop;
1183         pixf->var[PIXEL_8x8]   = x264_pixel_var_8x8_xop;
1184         pixf->var2[PIXEL_8x8] = x264_pixel_var2_8x8_xop;
1185     }
1186 #endif //HAVE_MMX
1187
1188 #if HAVE_ARMV6
1189     if( cpu&X264_CPU_ARMV6 )
1190     {
1191         pixf->sad[PIXEL_4x8] = x264_pixel_sad_4x8_armv6;
1192         pixf->sad[PIXEL_4x4] = x264_pixel_sad_4x4_armv6;
1193         pixf->sad_aligned[PIXEL_4x8] = x264_pixel_sad_4x8_armv6;
1194         pixf->sad_aligned[PIXEL_4x4] = x264_pixel_sad_4x4_armv6;
1195     }
1196     if( cpu&X264_CPU_NEON )
1197     {
1198         INIT5( sad, _neon );
1199         INIT5( sad_aligned, _neon );
1200         INIT7( sad_x3, _neon );
1201         INIT7( sad_x4, _neon );
1202         INIT7( ssd, _neon );
1203         INIT7( satd, _neon );
1204         INIT7( satd_x3, _neon );
1205         INIT7( satd_x4, _neon );
1206         INIT4( hadamard_ac, _neon );
1207         pixf->sa8d[PIXEL_8x8]   = x264_pixel_sa8d_8x8_neon;
1208         pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_neon;
1209         pixf->var[PIXEL_8x8]    = x264_pixel_var_8x8_neon;
1210         pixf->var[PIXEL_16x16]  = x264_pixel_var_16x16_neon;
1211         pixf->var2[PIXEL_8x8]   = x264_pixel_var2_8x8_neon;
1212
1213         pixf->ssim_4x4x2_core   = x264_pixel_ssim_4x4x2_core_neon;
1214         pixf->ssim_end4         = x264_pixel_ssim_end4_neon;
1215
1216         if( cpu&X264_CPU_FAST_NEON_MRC )
1217         {
1218             pixf->sad[PIXEL_4x8] = x264_pixel_sad_4x8_neon;
1219             pixf->sad[PIXEL_4x4] = x264_pixel_sad_4x4_neon;
1220             pixf->sad_aligned[PIXEL_4x8] = x264_pixel_sad_aligned_4x8_neon;
1221             pixf->sad_aligned[PIXEL_4x4] = x264_pixel_sad_aligned_4x4_neon;
1222         }
1223         else    // really just scheduled for dual issue / A8
1224         {
1225             INIT5( sad_aligned, _neon_dual );
1226         }
1227     }
1228 #endif
1229 #endif // HIGH_BIT_DEPTH
1230 #if HAVE_ALTIVEC
1231     if( cpu&X264_CPU_ALTIVEC )
1232     {
1233         x264_pixel_altivec_init( pixf );
1234     }
1235 #endif
1236 #if !HIGH_BIT_DEPTH
1237 #if ARCH_UltraSPARC
1238     INIT4( sad, _vis );
1239     INIT4( sad_x3, _vis );
1240     INIT4( sad_x4, _vis );
1241 #endif
1242 #endif // !HIGH_BIT_DEPTH
1243
1244     pixf->ads[PIXEL_8x16] =
1245     pixf->ads[PIXEL_8x4] =
1246     pixf->ads[PIXEL_4x8] = pixf->ads[PIXEL_16x8];
1247     pixf->ads[PIXEL_4x4] = pixf->ads[PIXEL_8x8];
1248 }
1249