]> git.sesse.net Git - x264/blob - common/pixel.c
SSSE3/SSE4 9-way fully merged i4x4 analysis (sad/satd_x9)
[x264] / common / pixel.c
1 /*****************************************************************************
2  * pixel.c: pixel metrics
3  *****************************************************************************
4  * Copyright (C) 2003-2011 x264 project
5  *
6  * Authors: Loren Merritt <lorenm@u.washington.edu>
7  *          Laurent Aimar <fenrir@via.ecp.fr>
8  *          Fiona Glaser <fiona@x264.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
23  *
24  * This program is also available under a commercial proprietary license.
25  * For more information, contact us at licensing@x264.com.
26  *****************************************************************************/
27
28 #include "common.h"
29
30 #if HAVE_MMX
31 #   include "x86/pixel.h"
32 #   include "x86/predict.h"
33 #endif
34 #if ARCH_PPC
35 #   include "ppc/pixel.h"
36 #endif
37 #if ARCH_ARM
38 #   include "arm/pixel.h"
39 #endif
40 #if ARCH_UltraSPARC
41 #   include "sparc/pixel.h"
42 #endif
43
44
45 /****************************************************************************
46  * pixel_sad_WxH
47  ****************************************************************************/
48 #define PIXEL_SAD_C( name, lx, ly ) \
49 static int name( pixel *pix1, int i_stride_pix1,  \
50                  pixel *pix2, int i_stride_pix2 ) \
51 {                                                   \
52     int i_sum = 0;                                  \
53     for( int y = 0; y < ly; y++ )                   \
54     {                                               \
55         for( int x = 0; x < lx; x++ )               \
56         {                                           \
57             i_sum += abs( pix1[x] - pix2[x] );      \
58         }                                           \
59         pix1 += i_stride_pix1;                      \
60         pix2 += i_stride_pix2;                      \
61     }                                               \
62     return i_sum;                                   \
63 }
64
65
66 PIXEL_SAD_C( x264_pixel_sad_16x16, 16, 16 )
67 PIXEL_SAD_C( x264_pixel_sad_16x8,  16,  8 )
68 PIXEL_SAD_C( x264_pixel_sad_8x16,   8, 16 )
69 PIXEL_SAD_C( x264_pixel_sad_8x8,    8,  8 )
70 PIXEL_SAD_C( x264_pixel_sad_8x4,    8,  4 )
71 PIXEL_SAD_C( x264_pixel_sad_4x8,    4,  8 )
72 PIXEL_SAD_C( x264_pixel_sad_4x4,    4,  4 )
73
74
75 /****************************************************************************
76  * pixel_ssd_WxH
77  ****************************************************************************/
78 #define PIXEL_SSD_C( name, lx, ly ) \
79 static int name( pixel *pix1, int i_stride_pix1,  \
80                  pixel *pix2, int i_stride_pix2 ) \
81 {                                                   \
82     int i_sum = 0;                                  \
83     for( int y = 0; y < ly; y++ )                   \
84     {                                               \
85         for( int x = 0; x < lx; x++ )               \
86         {                                           \
87             int d = pix1[x] - pix2[x];              \
88             i_sum += d*d;                           \
89         }                                           \
90         pix1 += i_stride_pix1;                      \
91         pix2 += i_stride_pix2;                      \
92     }                                               \
93     return i_sum;                                   \
94 }
95
96 PIXEL_SSD_C( x264_pixel_ssd_16x16, 16, 16 )
97 PIXEL_SSD_C( x264_pixel_ssd_16x8,  16,  8 )
98 PIXEL_SSD_C( x264_pixel_ssd_8x16,   8, 16 )
99 PIXEL_SSD_C( x264_pixel_ssd_8x8,    8,  8 )
100 PIXEL_SSD_C( x264_pixel_ssd_8x4,    8,  4 )
101 PIXEL_SSD_C( x264_pixel_ssd_4x8,    4,  8 )
102 PIXEL_SSD_C( x264_pixel_ssd_4x4,    4,  4 )
103
104 uint64_t x264_pixel_ssd_wxh( x264_pixel_function_t *pf, pixel *pix1, int i_pix1, pixel *pix2, int i_pix2, int i_width, int i_height )
105 {
106     uint64_t i_ssd = 0;
107     int y;
108     int align = !(((intptr_t)pix1 | (intptr_t)pix2 | i_pix1 | i_pix2) & 15);
109
110 #define SSD(size) i_ssd += pf->ssd[size]( pix1 + y*i_pix1 + x, i_pix1, \
111                                           pix2 + y*i_pix2 + x, i_pix2 );
112     for( y = 0; y < i_height-15; y += 16 )
113     {
114         int x = 0;
115         if( align )
116             for( ; x < i_width-15; x += 16 )
117                 SSD(PIXEL_16x16);
118         for( ; x < i_width-7; x += 8 )
119             SSD(PIXEL_8x16);
120     }
121     if( y < i_height-7 )
122         for( int x = 0; x < i_width-7; x += 8 )
123             SSD(PIXEL_8x8);
124 #undef SSD
125
126 #define SSD1 { int d = pix1[y*i_pix1+x] - pix2[y*i_pix2+x]; i_ssd += d*d; }
127     if( i_width & 7 )
128     {
129         for( y = 0; y < (i_height & ~7); y++ )
130             for( int x = i_width & ~7; x < i_width; x++ )
131                 SSD1;
132     }
133     if( i_height & 7 )
134     {
135         for( y = i_height & ~7; y < i_height; y++ )
136             for( int x = 0; x < i_width; x++ )
137                 SSD1;
138     }
139 #undef SSD1
140
141     return i_ssd;
142 }
143
144 static void pixel_ssd_nv12_core( pixel *pixuv1, int stride1, pixel *pixuv2, int stride2, int width, int height, uint64_t *ssd_u, uint64_t *ssd_v )
145 {
146     *ssd_u = 0, *ssd_v = 0;
147     for( int y = 0; y < height; y++, pixuv1+=stride1, pixuv2+=stride2 )
148         for( int x = 0; x < width; x++ )
149         {
150             int du = pixuv1[2*x]   - pixuv2[2*x];
151             int dv = pixuv1[2*x+1] - pixuv2[2*x+1];
152             *ssd_u += du*du;
153             *ssd_v += dv*dv;
154         }
155 }
156
157 void x264_pixel_ssd_nv12( x264_pixel_function_t *pf, pixel *pix1, int i_pix1, pixel *pix2, int i_pix2, int i_width, int i_height, uint64_t *ssd_u, uint64_t *ssd_v )
158 {
159     pf->ssd_nv12_core( pix1, i_pix1, pix2, i_pix2, i_width&~7, i_height, ssd_u, ssd_v );
160     if( i_width&7 )
161     {
162         uint64_t tmp[2];
163         pixel_ssd_nv12_core( pix1+(i_width&~7), i_pix1, pix2+(i_width&~7), i_pix2, i_width&7, i_height, &tmp[0], &tmp[1] );
164         *ssd_u += tmp[0];
165         *ssd_v += tmp[1];
166     }
167 }
168
169 /****************************************************************************
170  * pixel_var_wxh
171  ****************************************************************************/
172 #define PIXEL_VAR_C( name, w ) \
173 static uint64_t name( pixel *pix, int i_stride ) \
174 {                                             \
175     uint32_t sum = 0, sqr = 0;                \
176     for( int y = 0; y < w; y++ )              \
177     {                                         \
178         for( int x = 0; x < w; x++ )          \
179         {                                     \
180             sum += pix[x];                    \
181             sqr += pix[x] * pix[x];           \
182         }                                     \
183         pix += i_stride;                      \
184     }                                         \
185     return sum + ((uint64_t)sqr << 32);       \
186 }
187
188 PIXEL_VAR_C( x264_pixel_var_16x16, 16 )
189 PIXEL_VAR_C( x264_pixel_var_8x8,    8 )
190
191 /****************************************************************************
192  * pixel_var2_wxh
193  ****************************************************************************/
194 static int pixel_var2_8x8( pixel *pix1, int i_stride1, pixel *pix2, int i_stride2, int *ssd )
195 {
196     uint32_t var = 0, sum = 0, sqr = 0;
197     for( int y = 0; y < 8; y++ )
198     {
199         for( int x = 0; x < 8; x++ )
200         {
201             int diff = pix1[x] - pix2[x];
202             sum += diff;
203             sqr += diff * diff;
204         }
205         pix1 += i_stride1;
206         pix2 += i_stride2;
207     }
208     sum = abs(sum);
209     var = sqr - ((uint64_t)sum * sum >> 6);
210     *ssd = sqr;
211     return var;
212 }
213
214 #if BIT_DEPTH > 8
215     typedef uint32_t sum_t;
216     typedef uint64_t sum2_t;
217 #else
218     typedef uint16_t sum_t;
219     typedef uint32_t sum2_t;
220 #endif
221 #define BITS_PER_SUM (8 * sizeof(sum_t))
222
223 #define HADAMARD4(d0, d1, d2, d3, s0, s1, s2, s3) {\
224     sum2_t t0 = s0 + s1;\
225     sum2_t t1 = s0 - s1;\
226     sum2_t t2 = s2 + s3;\
227     sum2_t t3 = s2 - s3;\
228     d0 = t0 + t2;\
229     d2 = t0 - t2;\
230     d1 = t1 + t3;\
231     d3 = t1 - t3;\
232 }
233
234 // in: a pseudo-simd number of the form x+(y<<16)
235 // return: abs(x)+(abs(y)<<16)
236 static ALWAYS_INLINE sum2_t abs2( sum2_t a )
237 {
238     sum2_t s = ((a>>(BITS_PER_SUM-1))&(((sum2_t)1<<BITS_PER_SUM)+1))*((sum_t)-1);
239     return (a+s)^s;
240 }
241
242 /****************************************************************************
243  * pixel_satd_WxH: sum of 4x4 Hadamard transformed differences
244  ****************************************************************************/
245
246 static NOINLINE int x264_pixel_satd_4x4( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
247 {
248     sum2_t tmp[4][2];
249     sum2_t a0, a1, a2, a3, b0, b1;
250     sum2_t sum = 0;
251     for( int i = 0; i < 4; i++, pix1 += i_pix1, pix2 += i_pix2 )
252     {
253         a0 = pix1[0] - pix2[0];
254         a1 = pix1[1] - pix2[1];
255         b0 = (a0+a1) + ((a0-a1)<<BITS_PER_SUM);
256         a2 = pix1[2] - pix2[2];
257         a3 = pix1[3] - pix2[3];
258         b1 = (a2+a3) + ((a2-a3)<<BITS_PER_SUM);
259         tmp[i][0] = b0 + b1;
260         tmp[i][1] = b0 - b1;
261     }
262     for( int i = 0; i < 2; i++ )
263     {
264         HADAMARD4( a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i] );
265         a0 = abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
266         sum += ((sum_t)a0) + (a0>>BITS_PER_SUM);
267     }
268     return sum >> 1;
269 }
270
271 static NOINLINE int x264_pixel_satd_8x4( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
272 {
273     sum2_t tmp[4][4];
274     sum2_t a0, a1, a2, a3;
275     sum2_t sum = 0;
276     for( int i = 0; i < 4; i++, pix1 += i_pix1, pix2 += i_pix2 )
277     {
278         a0 = (pix1[0] - pix2[0]) + ((sum2_t)(pix1[4] - pix2[4]) << BITS_PER_SUM);
279         a1 = (pix1[1] - pix2[1]) + ((sum2_t)(pix1[5] - pix2[5]) << BITS_PER_SUM);
280         a2 = (pix1[2] - pix2[2]) + ((sum2_t)(pix1[6] - pix2[6]) << BITS_PER_SUM);
281         a3 = (pix1[3] - pix2[3]) + ((sum2_t)(pix1[7] - pix2[7]) << BITS_PER_SUM);
282         HADAMARD4( tmp[i][0], tmp[i][1], tmp[i][2], tmp[i][3], a0,a1,a2,a3 );
283     }
284     for( int i = 0; i < 4; i++ )
285     {
286         HADAMARD4( a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i] );
287         sum += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
288     }
289     return (((sum_t)sum) + (sum>>BITS_PER_SUM)) >> 1;
290 }
291
292 #define PIXEL_SATD_C( w, h, sub )\
293 static int x264_pixel_satd_##w##x##h( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )\
294 {\
295     int sum = sub( pix1, i_pix1, pix2, i_pix2 )\
296             + sub( pix1+4*i_pix1, i_pix1, pix2+4*i_pix2, i_pix2 );\
297     if( w==16 )\
298         sum+= sub( pix1+8, i_pix1, pix2+8, i_pix2 )\
299             + sub( pix1+8+4*i_pix1, i_pix1, pix2+8+4*i_pix2, i_pix2 );\
300     if( h==16 )\
301         sum+= sub( pix1+8*i_pix1, i_pix1, pix2+8*i_pix2, i_pix2 )\
302             + sub( pix1+12*i_pix1, i_pix1, pix2+12*i_pix2, i_pix2 );\
303     if( w==16 && h==16 )\
304         sum+= sub( pix1+8+8*i_pix1, i_pix1, pix2+8+8*i_pix2, i_pix2 )\
305             + sub( pix1+8+12*i_pix1, i_pix1, pix2+8+12*i_pix2, i_pix2 );\
306     return sum;\
307 }
308 PIXEL_SATD_C( 16, 16, x264_pixel_satd_8x4 )
309 PIXEL_SATD_C( 16, 8,  x264_pixel_satd_8x4 )
310 PIXEL_SATD_C( 8,  16, x264_pixel_satd_8x4 )
311 PIXEL_SATD_C( 8,  8,  x264_pixel_satd_8x4 )
312 PIXEL_SATD_C( 4,  8,  x264_pixel_satd_4x4 )
313
314
315 static NOINLINE int sa8d_8x8( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
316 {
317     sum2_t tmp[8][4];
318     sum2_t a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3;
319     sum2_t sum = 0;
320     for( int i = 0; i < 8; i++, pix1 += i_pix1, pix2 += i_pix2 )
321     {
322         a0 = pix1[0] - pix2[0];
323         a1 = pix1[1] - pix2[1];
324         b0 = (a0+a1) + ((a0-a1)<<BITS_PER_SUM);
325         a2 = pix1[2] - pix2[2];
326         a3 = pix1[3] - pix2[3];
327         b1 = (a2+a3) + ((a2-a3)<<BITS_PER_SUM);
328         a4 = pix1[4] - pix2[4];
329         a5 = pix1[5] - pix2[5];
330         b2 = (a4+a5) + ((a4-a5)<<BITS_PER_SUM);
331         a6 = pix1[6] - pix2[6];
332         a7 = pix1[7] - pix2[7];
333         b3 = (a6+a7) + ((a6-a7)<<BITS_PER_SUM);
334         HADAMARD4( tmp[i][0], tmp[i][1], tmp[i][2], tmp[i][3], b0,b1,b2,b3 );
335     }
336     for( int i = 0; i < 4; i++ )
337     {
338         HADAMARD4( a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i] );
339         HADAMARD4( a4, a5, a6, a7, tmp[4][i], tmp[5][i], tmp[6][i], tmp[7][i] );
340         b0  = abs2(a0+a4) + abs2(a0-a4);
341         b0 += abs2(a1+a5) + abs2(a1-a5);
342         b0 += abs2(a2+a6) + abs2(a2-a6);
343         b0 += abs2(a3+a7) + abs2(a3-a7);
344         sum += (sum_t)b0 + (b0>>BITS_PER_SUM);
345     }
346     return sum;
347 }
348
349 static int x264_pixel_sa8d_8x8( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
350 {
351     int sum = sa8d_8x8( pix1, i_pix1, pix2, i_pix2 );
352     return (sum+2)>>2;
353 }
354
355 static int x264_pixel_sa8d_16x16( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
356 {
357     int sum = sa8d_8x8( pix1, i_pix1, pix2, i_pix2 )
358             + sa8d_8x8( pix1+8, i_pix1, pix2+8, i_pix2 )
359             + sa8d_8x8( pix1+8*i_pix1, i_pix1, pix2+8*i_pix2, i_pix2 )
360             + sa8d_8x8( pix1+8+8*i_pix1, i_pix1, pix2+8+8*i_pix2, i_pix2 );
361     return (sum+2)>>2;
362 }
363
364
365 static NOINLINE uint64_t pixel_hadamard_ac( pixel *pix, int stride )
366 {
367     sum2_t tmp[32];
368     sum2_t a0, a1, a2, a3, dc;
369     sum2_t sum4 = 0, sum8 = 0;
370     for( int i = 0; i < 8; i++, pix+=stride )
371     {
372         sum2_t *t = tmp + (i&3) + (i&4)*4;
373         a0 = (pix[0]+pix[1]) + ((sum2_t)(pix[0]-pix[1])<<BITS_PER_SUM);
374         a1 = (pix[2]+pix[3]) + ((sum2_t)(pix[2]-pix[3])<<BITS_PER_SUM);
375         t[0] = a0 + a1;
376         t[4] = a0 - a1;
377         a2 = (pix[4]+pix[5]) + ((sum2_t)(pix[4]-pix[5])<<BITS_PER_SUM);
378         a3 = (pix[6]+pix[7]) + ((sum2_t)(pix[6]-pix[7])<<BITS_PER_SUM);
379         t[8] = a2 + a3;
380         t[12] = a2 - a3;
381     }
382     for( int i = 0; i < 8; i++ )
383     {
384         HADAMARD4( a0, a1, a2, a3, tmp[i*4+0], tmp[i*4+1], tmp[i*4+2], tmp[i*4+3] );
385         tmp[i*4+0] = a0;
386         tmp[i*4+1] = a1;
387         tmp[i*4+2] = a2;
388         tmp[i*4+3] = a3;
389         sum4 += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
390     }
391     for( int i = 0; i < 8; i++ )
392     {
393         HADAMARD4( a0,a1,a2,a3, tmp[i], tmp[8+i], tmp[16+i], tmp[24+i] );
394         sum8 += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
395     }
396     dc = (sum_t)(tmp[0] + tmp[8] + tmp[16] + tmp[24]);
397     sum4 = (sum_t)sum4 + (sum4>>BITS_PER_SUM) - dc;
398     sum8 = (sum_t)sum8 + (sum8>>BITS_PER_SUM) - dc;
399     return ((uint64_t)sum8<<32) + sum4;
400 }
401
402 #define HADAMARD_AC(w,h) \
403 static uint64_t x264_pixel_hadamard_ac_##w##x##h( pixel *pix, int stride )\
404 {\
405     uint64_t sum = pixel_hadamard_ac( pix, stride );\
406     if( w==16 )\
407         sum += pixel_hadamard_ac( pix+8, stride );\
408     if( h==16 )\
409         sum += pixel_hadamard_ac( pix+8*stride, stride );\
410     if( w==16 && h==16 )\
411         sum += pixel_hadamard_ac( pix+8*stride+8, stride );\
412     return ((sum>>34)<<32) + ((uint32_t)sum>>1);\
413 }
414 HADAMARD_AC( 16, 16 )
415 HADAMARD_AC( 16, 8 )
416 HADAMARD_AC( 8, 16 )
417 HADAMARD_AC( 8, 8 )
418
419
420 /****************************************************************************
421  * pixel_sad_x4
422  ****************************************************************************/
423 #define SAD_X( size ) \
424 static void x264_pixel_sad_x3_##size( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2, int i_stride, int scores[3] )\
425 {\
426     scores[0] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix0, i_stride );\
427     scores[1] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix1, i_stride );\
428     scores[2] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix2, i_stride );\
429 }\
430 static void x264_pixel_sad_x4_##size( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2, pixel *pix3, int i_stride, int scores[4] )\
431 {\
432     scores[0] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix0, i_stride );\
433     scores[1] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix1, i_stride );\
434     scores[2] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix2, i_stride );\
435     scores[3] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix3, i_stride );\
436 }
437
438 SAD_X( 16x16 )
439 SAD_X( 16x8 )
440 SAD_X( 8x16 )
441 SAD_X( 8x8 )
442 SAD_X( 8x4 )
443 SAD_X( 4x8 )
444 SAD_X( 4x4 )
445
446 #if !HIGH_BIT_DEPTH
447 #if ARCH_UltraSPARC
448 SAD_X( 16x16_vis )
449 SAD_X( 16x8_vis )
450 SAD_X( 8x16_vis )
451 SAD_X( 8x8_vis )
452 #endif
453 #endif // !HIGH_BIT_DEPTH
454
455 /****************************************************************************
456  * pixel_satd_x4
457  * no faster than single satd, but needed for satd to be a drop-in replacement for sad
458  ****************************************************************************/
459
460 #define SATD_X( size, cpu ) \
461 static void x264_pixel_satd_x3_##size##cpu( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2, int i_stride, int scores[3] )\
462 {\
463     scores[0] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix0, i_stride );\
464     scores[1] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix1, i_stride );\
465     scores[2] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix2, i_stride );\
466 }\
467 static void x264_pixel_satd_x4_##size##cpu( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2, pixel *pix3, int i_stride, int scores[4] )\
468 {\
469     scores[0] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix0, i_stride );\
470     scores[1] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix1, i_stride );\
471     scores[2] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix2, i_stride );\
472     scores[3] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix3, i_stride );\
473 }
474 #define SATD_X_DECL6( cpu )\
475 SATD_X( 16x16, cpu )\
476 SATD_X( 16x8, cpu )\
477 SATD_X( 8x16, cpu )\
478 SATD_X( 8x8, cpu )\
479 SATD_X( 8x4, cpu )\
480 SATD_X( 4x8, cpu )
481 #define SATD_X_DECL7( cpu )\
482 SATD_X_DECL6( cpu )\
483 SATD_X( 4x4, cpu )
484
485 SATD_X_DECL7()
486 #if HAVE_MMX
487 SATD_X_DECL7( _mmx2 )
488 #if !HIGH_BIT_DEPTH
489 SATD_X_DECL6( _sse2 )
490 SATD_X_DECL7( _ssse3 )
491 SATD_X_DECL7( _sse4 )
492 SATD_X_DECL7( _avx )
493 #endif // !HIGH_BIT_DEPTH
494 #endif
495
496 #if !HIGH_BIT_DEPTH
497 #if HAVE_ARMV6
498 SATD_X_DECL7( _neon )
499 #endif
500 #endif // !HIGH_BIT_DEPTH
501
502 #define INTRA_MBCMP_8x8( mbcmp, cpu, cpu2 )\
503 void x264_intra_##mbcmp##_x3_8x8##cpu( pixel *fenc, pixel edge[36], int res[3] )\
504 {\
505     ALIGNED_ARRAY_16( pixel, pix, [8*FDEC_STRIDE] );\
506     x264_predict_8x8_v##cpu2( pix, edge );\
507     res[0] = x264_pixel_##mbcmp##_8x8##cpu( pix, FDEC_STRIDE, fenc, FENC_STRIDE );\
508     x264_predict_8x8_h##cpu2( pix, edge );\
509     res[1] = x264_pixel_##mbcmp##_8x8##cpu( pix, FDEC_STRIDE, fenc, FENC_STRIDE );\
510     x264_predict_8x8_dc##cpu2( pix, edge );\
511     res[2] = x264_pixel_##mbcmp##_8x8##cpu( pix, FDEC_STRIDE, fenc, FENC_STRIDE );\
512 }
513
514 INTRA_MBCMP_8x8( sad,, _c )
515 INTRA_MBCMP_8x8(sa8d,, _c )
516 #if HIGH_BIT_DEPTH && HAVE_MMX
517 INTRA_MBCMP_8x8( sad, _mmx2,  _c )
518 INTRA_MBCMP_8x8( sad, _sse2,  _sse2 )
519 INTRA_MBCMP_8x8( sad, _ssse3, _sse2 )
520 INTRA_MBCMP_8x8(sa8d, _sse2,  _sse2 )
521 #endif
522
523 #define INTRA_MBCMP( mbcmp, size, pred1, pred2, pred3, chroma, cpu, cpu2 )\
524 void x264_intra_##mbcmp##_x3_##size##chroma##cpu( pixel *fenc, pixel *fdec, int res[3] )\
525 {\
526     x264_predict_##size##chroma##_##pred1##cpu2( fdec );\
527     res[0] = x264_pixel_##mbcmp##_##size##cpu( fdec, FDEC_STRIDE, fenc, FENC_STRIDE );\
528     x264_predict_##size##chroma##_##pred2##cpu2( fdec );\
529     res[1] = x264_pixel_##mbcmp##_##size##cpu( fdec, FDEC_STRIDE, fenc, FENC_STRIDE );\
530     x264_predict_##size##chroma##_##pred3##cpu2( fdec );\
531     res[2] = x264_pixel_##mbcmp##_##size##cpu( fdec, FDEC_STRIDE, fenc, FENC_STRIDE );\
532 }
533
534 INTRA_MBCMP( sad,  4x4,   v, h, dc,  ,, _c )
535 INTRA_MBCMP(satd,  4x4,   v, h, dc,  ,, _c )
536 INTRA_MBCMP( sad,  8x8,  dc, h,  v, c,, _c )
537 INTRA_MBCMP(satd,  8x8,  dc, h,  v, c,, _c )
538 INTRA_MBCMP( sad, 16x16,  v, h, dc,  ,, _c )
539 INTRA_MBCMP(satd, 16x16,  v, h, dc,  ,, _c )
540
541 #if HIGH_BIT_DEPTH && HAVE_MMX
542 INTRA_MBCMP( sad,  4x4,   v, h, dc,  , _mmx2, _c )
543 INTRA_MBCMP(satd,  4x4,   v, h, dc,  , _mmx2, _c )
544 INTRA_MBCMP( sad,  8x8,  dc, h,  v, c, _mmx2, _c )
545 INTRA_MBCMP(satd,  8x8,  dc, h,  v, c, _mmx2, _c )
546 INTRA_MBCMP( sad, 16x16,  v, h, dc,  , _mmx2, _mmx2 )
547 INTRA_MBCMP(satd, 16x16,  v, h, dc,  , _mmx2, _mmx2 )
548 INTRA_MBCMP( sad,  8x8,  dc, h,  v, c, _sse2, _sse2 )
549 INTRA_MBCMP( sad, 16x16,  v, h, dc,  , _sse2, _sse2 )
550 INTRA_MBCMP( sad,  4x4,   v, h, dc,  , _ssse3, _c )
551 INTRA_MBCMP( sad,  8x8,  dc, h,  v, c, _ssse3, _sse2 )
552 INTRA_MBCMP( sad, 16x16,  v, h, dc,  , _ssse3, _sse2 )
553 #endif
554
555 // No C implementation of intra_satd_x9. See checkasm for its behavior,
556 // or see x264_mb_analyse_intra for the entirely different algorithm we
557 // use when lacking an asm implementation of it.
558
559
560
561 /****************************************************************************
562  * structural similarity metric
563  ****************************************************************************/
564 static void ssim_4x4x2_core( const pixel *pix1, int stride1,
565                              const pixel *pix2, int stride2,
566                              int sums[2][4])
567 {
568     for( int z = 0; z < 2; z++ )
569     {
570         uint32_t s1 = 0, s2 = 0, ss = 0, s12 = 0;
571         for( int y = 0; y < 4; y++ )
572             for( int x = 0; x < 4; x++ )
573             {
574                 int a = pix1[x+y*stride1];
575                 int b = pix2[x+y*stride2];
576                 s1  += a;
577                 s2  += b;
578                 ss  += a*a;
579                 ss  += b*b;
580                 s12 += a*b;
581             }
582         sums[z][0] = s1;
583         sums[z][1] = s2;
584         sums[z][2] = ss;
585         sums[z][3] = s12;
586         pix1 += 4;
587         pix2 += 4;
588     }
589 }
590
591 static float ssim_end1( int s1, int s2, int ss, int s12 )
592 {
593 /* Maximum value for 10-bit is: ss*64 = (2^10-1)^2*16*4*64 = 4286582784, which will overflow in some cases.
594  * s1*s1, s2*s2, and s1*s2 also obtain this value for edge cases: ((2^10-1)*16*4)^2 = 4286582784.
595  * Maximum value for 9-bit is: ss*64 = (2^9-1)^2*16*4*64 = 1069551616, which will not overflow. */
596 #if BIT_DEPTH > 9
597 #define type float
598     static const float ssim_c1 = .01*.01*PIXEL_MAX*PIXEL_MAX*64;
599     static const float ssim_c2 = .03*.03*PIXEL_MAX*PIXEL_MAX*64*63;
600 #else
601 #define type int
602     static const int ssim_c1 = (int)(.01*.01*PIXEL_MAX*PIXEL_MAX*64 + .5);
603     static const int ssim_c2 = (int)(.03*.03*PIXEL_MAX*PIXEL_MAX*64*63 + .5);
604 #endif
605     type fs1 = s1;
606     type fs2 = s2;
607     type fss = ss;
608     type fs12 = s12;
609     type vars = fss*64 - fs1*fs1 - fs2*fs2;
610     type covar = fs12*64 - fs1*fs2;
611     return (float)(2*fs1*fs2 + ssim_c1) * (float)(2*covar + ssim_c2)
612          / ((float)(fs1*fs1 + fs2*fs2 + ssim_c1) * (float)(vars + ssim_c2));
613 #undef type
614 }
615
616 static float ssim_end4( int sum0[5][4], int sum1[5][4], int width )
617 {
618     float ssim = 0.0;
619     for( int i = 0; i < width; i++ )
620         ssim += ssim_end1( sum0[i][0] + sum0[i+1][0] + sum1[i][0] + sum1[i+1][0],
621                            sum0[i][1] + sum0[i+1][1] + sum1[i][1] + sum1[i+1][1],
622                            sum0[i][2] + sum0[i+1][2] + sum1[i][2] + sum1[i+1][2],
623                            sum0[i][3] + sum0[i+1][3] + sum1[i][3] + sum1[i+1][3] );
624     return ssim;
625 }
626
627 float x264_pixel_ssim_wxh( x264_pixel_function_t *pf,
628                            pixel *pix1, int stride1,
629                            pixel *pix2, int stride2,
630                            int width, int height, void *buf, int *cnt )
631 {
632     int z = 0;
633     float ssim = 0.0;
634     int (*sum0)[4] = buf;
635     int (*sum1)[4] = sum0 + (width >> 2) + 3;
636     width >>= 2;
637     height >>= 2;
638     for( int y = 1; y < height; y++ )
639     {
640         for( ; z <= y; z++ )
641         {
642             XCHG( void*, sum0, sum1 );
643             for( int x = 0; x < width; x+=2 )
644                 pf->ssim_4x4x2_core( &pix1[4*(x+z*stride1)], stride1, &pix2[4*(x+z*stride2)], stride2, &sum0[x] );
645         }
646         for( int x = 0; x < width-1; x += 4 )
647             ssim += pf->ssim_end4( sum0+x, sum1+x, X264_MIN(4,width-x-1) );
648     }
649     *cnt = (height-1) * (width-1);
650     return ssim;
651 }
652
653 static int pixel_vsad( pixel *src, int stride, int height )
654 {
655     int score = 0;
656     for( int i = 1; i < height; i++, src += stride )
657         for( int j = 0; j < 16; j++ )
658             score += abs(src[j] - src[j+stride]);
659     return score;
660 }
661
662 int x264_field_vsad( x264_t *h, int mb_x, int mb_y )
663 {
664     int score_field, score_frame;
665     int stride = h->fenc->i_stride[0];
666     int mb_stride = h->mb.i_mb_stride;
667     pixel *fenc = h->fenc->plane[0] + 16 * (mb_x + mb_y * stride);
668     int mb_xy = mb_x + mb_y*mb_stride;
669
670     /* We don't want to analyze pixels outside the frame, as it gives inaccurate results. */
671     int mbpair_height = X264_MIN( h->param.i_height - mb_y * 16, 32 );
672     score_frame  = h->pixf.vsad( fenc,          stride, mbpair_height );
673     score_field  = h->pixf.vsad( fenc,        stride*2, mbpair_height >> 1 );
674     score_field += h->pixf.vsad( fenc+stride, stride*2, mbpair_height >> 1 );
675
676     if( mb_x > 0 )
677         score_field += 512 - h->mb.field[mb_xy        -1]*1024;
678     if( mb_y > 0 )
679         score_field += 512 - h->mb.field[mb_xy-mb_stride]*1024;
680
681     return (score_field < score_frame);
682 }
683
684 /****************************************************************************
685  * successive elimination
686  ****************************************************************************/
687 static int x264_pixel_ads4( int enc_dc[4], uint16_t *sums, int delta,
688                             uint16_t *cost_mvx, int16_t *mvs, int width, int thresh )
689 {
690     int nmv = 0;
691     for( int i = 0; i < width; i++, sums++ )
692     {
693         int ads = abs( enc_dc[0] - sums[0] )
694                 + abs( enc_dc[1] - sums[8] )
695                 + abs( enc_dc[2] - sums[delta] )
696                 + abs( enc_dc[3] - sums[delta+8] )
697                 + cost_mvx[i];
698         if( ads < thresh )
699             mvs[nmv++] = i;
700     }
701     return nmv;
702 }
703
704 static int x264_pixel_ads2( int enc_dc[2], uint16_t *sums, int delta,
705                             uint16_t *cost_mvx, int16_t *mvs, int width, int thresh )
706 {
707     int nmv = 0;
708     for( int i = 0; i < width; i++, sums++ )
709     {
710         int ads = abs( enc_dc[0] - sums[0] )
711                 + abs( enc_dc[1] - sums[delta] )
712                 + cost_mvx[i];
713         if( ads < thresh )
714             mvs[nmv++] = i;
715     }
716     return nmv;
717 }
718
719 static int x264_pixel_ads1( int enc_dc[1], uint16_t *sums, int delta,
720                             uint16_t *cost_mvx, int16_t *mvs, int width, int thresh )
721 {
722     int nmv = 0;
723     for( int i = 0; i<width; i++, sums++ )
724     {
725         int ads = abs( enc_dc[0] - sums[0] )
726                 + cost_mvx[i];
727         if( ads < thresh )
728             mvs[nmv++] = i;
729     }
730     return nmv;
731 }
732
733
734 /****************************************************************************
735  * x264_pixel_init:
736  ****************************************************************************/
737 void x264_pixel_init( int cpu, x264_pixel_function_t *pixf )
738 {
739     memset( pixf, 0, sizeof(*pixf) );
740
741 #define INIT2_NAME( name1, name2, cpu ) \
742     pixf->name1[PIXEL_16x16] = x264_pixel_##name2##_16x16##cpu;\
743     pixf->name1[PIXEL_16x8]  = x264_pixel_##name2##_16x8##cpu;
744 #define INIT4_NAME( name1, name2, cpu ) \
745     INIT2_NAME( name1, name2, cpu ) \
746     pixf->name1[PIXEL_8x16]  = x264_pixel_##name2##_8x16##cpu;\
747     pixf->name1[PIXEL_8x8]   = x264_pixel_##name2##_8x8##cpu;
748 #define INIT5_NAME( name1, name2, cpu ) \
749     INIT4_NAME( name1, name2, cpu ) \
750     pixf->name1[PIXEL_8x4]   = x264_pixel_##name2##_8x4##cpu;
751 #define INIT6_NAME( name1, name2, cpu ) \
752     INIT5_NAME( name1, name2, cpu ) \
753     pixf->name1[PIXEL_4x8]   = x264_pixel_##name2##_4x8##cpu;
754 #define INIT7_NAME( name1, name2, cpu ) \
755     INIT6_NAME( name1, name2, cpu ) \
756     pixf->name1[PIXEL_4x4]   = x264_pixel_##name2##_4x4##cpu;
757 #define INIT2( name, cpu ) INIT2_NAME( name, name, cpu )
758 #define INIT4( name, cpu ) INIT4_NAME( name, name, cpu )
759 #define INIT5( name, cpu ) INIT5_NAME( name, name, cpu )
760 #define INIT6( name, cpu ) INIT6_NAME( name, name, cpu )
761 #define INIT7( name, cpu ) INIT7_NAME( name, name, cpu )
762
763 #define INIT_ADS( cpu ) \
764     pixf->ads[PIXEL_16x16] = x264_pixel_ads4##cpu;\
765     pixf->ads[PIXEL_16x8] = x264_pixel_ads2##cpu;\
766     pixf->ads[PIXEL_8x8] = x264_pixel_ads1##cpu;
767
768     INIT7( sad, );
769     INIT7_NAME( sad_aligned, sad, );
770     INIT7( sad_x3, );
771     INIT7( sad_x4, );
772     INIT7( ssd, );
773     INIT7( satd, );
774     INIT7( satd_x3, );
775     INIT7( satd_x4, );
776     INIT4( hadamard_ac, );
777     INIT_ADS( );
778
779     pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16;
780     pixf->sa8d[PIXEL_8x8]   = x264_pixel_sa8d_8x8;
781     pixf->var[PIXEL_16x16] = x264_pixel_var_16x16;
782     pixf->var[PIXEL_8x8]   = x264_pixel_var_8x8;
783
784     pixf->ssd_nv12_core = pixel_ssd_nv12_core;
785     pixf->ssim_4x4x2_core = ssim_4x4x2_core;
786     pixf->ssim_end4 = ssim_end4;
787     pixf->var2_8x8 = pixel_var2_8x8;
788     pixf->vsad = pixel_vsad;
789
790     pixf->intra_sad_x3_4x4    = x264_intra_sad_x3_4x4;
791     pixf->intra_satd_x3_4x4   = x264_intra_satd_x3_4x4;
792     pixf->intra_sad_x3_8x8    = x264_intra_sad_x3_8x8;
793     pixf->intra_sa8d_x3_8x8   = x264_intra_sa8d_x3_8x8;
794     pixf->intra_sad_x3_8x8c   = x264_intra_sad_x3_8x8c;
795     pixf->intra_satd_x3_8x8c  = x264_intra_satd_x3_8x8c;
796     pixf->intra_sad_x3_16x16  = x264_intra_sad_x3_16x16;
797     pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16;
798
799 #if HIGH_BIT_DEPTH
800 #if HAVE_MMX
801     if( cpu&X264_CPU_MMX2 )
802     {
803         INIT7( sad, _mmx2 );
804         INIT7( sad_x3, _mmx2 );
805         INIT7( sad_x4, _mmx2 );
806         INIT7( satd, _mmx2 );
807         INIT7( satd_x3, _mmx2 );
808         INIT7( satd_x4, _mmx2 );
809         INIT4( hadamard_ac, _mmx2 );
810         INIT7( ssd, _mmx2 );
811         INIT_ADS( _mmx2 );
812
813         pixf->ssd_nv12_core = x264_pixel_ssd_nv12_core_mmx2;
814         pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_mmx2;
815         pixf->var[PIXEL_8x8]   = x264_pixel_var_8x8_mmx2;
816         pixf->var2_8x8 = x264_pixel_var2_8x8_mmx2;
817
818         pixf->intra_sad_x3_4x4    = x264_intra_sad_x3_4x4_mmx2;
819         pixf->intra_satd_x3_4x4   = x264_intra_satd_x3_4x4_mmx2;
820         pixf->intra_sad_x3_8x8    = x264_intra_sad_x3_8x8_mmx2;
821         pixf->intra_sad_x3_8x8c   = x264_intra_sad_x3_8x8c_mmx2;
822         pixf->intra_satd_x3_8x8c  = x264_intra_satd_x3_8x8c_mmx2;
823         pixf->intra_sad_x3_16x16  = x264_intra_sad_x3_16x16_mmx2;
824         pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16_mmx2;
825     }
826     if( cpu&X264_CPU_SSE2 )
827     {
828         INIT4_NAME( sad_aligned, sad, _sse2_aligned );
829         INIT5( ssd, _sse2 );
830
831         pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_sse2;
832         pixf->sa8d[PIXEL_8x8]   = x264_pixel_sa8d_8x8_sse2;
833 #if ARCH_X86_64
834         pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_sse2;
835 #endif
836         pixf->ssd_nv12_core = x264_pixel_ssd_nv12_core_sse2;
837         pixf->ssim_4x4x2_core  = x264_pixel_ssim_4x4x2_core_sse2;
838         pixf->ssim_end4        = x264_pixel_ssim_end4_sse2;
839         pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_sse2;
840         pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_sse2;
841         pixf->var2_8x8 = x264_pixel_var2_8x8_sse2;
842     }
843     if( (cpu&X264_CPU_SSE2) && !(cpu&X264_CPU_SSE2_IS_SLOW) )
844     {
845         INIT5( sad, _sse2 );
846         INIT2( sad_x3, _sse2 );
847         INIT2( sad_x4, _sse2 );
848         INIT_ADS( _sse2 );
849
850         if( !(cpu&X264_CPU_STACK_MOD4) )
851         {
852             INIT4( hadamard_ac, _sse2 );
853         }
854
855         pixf->intra_sad_x3_8x8    = x264_intra_sad_x3_8x8_sse2;
856         pixf->intra_sad_x3_8x8c   = x264_intra_sad_x3_8x8c_sse2;
857         pixf->intra_sad_x3_16x16  = x264_intra_sad_x3_16x16_sse2;
858     }
859     if( cpu&X264_CPU_SSE2_IS_FAST )
860     {
861         pixf->sad[PIXEL_8x16] = x264_pixel_sad_8x16_sse2;
862         pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_sse2;
863         pixf->sad_x3[PIXEL_8x8]  = x264_pixel_sad_x3_8x8_sse2;
864         pixf->sad_x3[PIXEL_8x4]  = x264_pixel_sad_x3_8x4_sse2;
865         pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_sse2;
866         pixf->sad_x4[PIXEL_8x8]  = x264_pixel_sad_x4_8x8_sse2;
867         pixf->sad_x4[PIXEL_8x4]  = x264_pixel_sad_x4_8x4_sse2;
868     }
869     if( cpu&X264_CPU_SSSE3 )
870     {
871         INIT4_NAME( sad_aligned, sad, _ssse3_aligned );
872         INIT7( sad, _ssse3 );
873         INIT7( sad_x3, _ssse3 );
874         INIT7( sad_x4, _ssse3 );
875         INIT_ADS( _ssse3 );
876
877         if( !(cpu&X264_CPU_STACK_MOD4) )
878         {
879             INIT4( hadamard_ac, _ssse3 );
880         }
881
882         pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_ssse3;
883         pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8_ssse3;
884         pixf->intra_sad_x3_4x4    = x264_intra_sad_x3_4x4_ssse3;
885         pixf->intra_sad_x3_8x8    = x264_intra_sad_x3_8x8_ssse3;
886         pixf->intra_sad_x3_8x8c   = x264_intra_sad_x3_8x8c_ssse3;
887         pixf->intra_sad_x3_16x16  = x264_intra_sad_x3_16x16_ssse3;
888     }
889     if( cpu&X264_CPU_SSE4 )
890     {
891         if( !(cpu&X264_CPU_STACK_MOD4) )
892         {
893             INIT4( hadamard_ac, _sse4 );
894         }
895         pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_sse4;
896         pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8_sse4;
897     }
898     if( cpu&X264_CPU_AVX )
899     {
900         INIT_ADS( _avx );
901         if( !(cpu&X264_CPU_STACK_MOD4) )
902         {
903             INIT4( hadamard_ac, _avx );
904         }
905         pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_avx;
906         pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8_avx;
907         pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_avx;
908         pixf->var[PIXEL_8x8]   = x264_pixel_var_8x8_avx;
909         pixf->ssd_nv12_core    = x264_pixel_ssd_nv12_core_avx;
910         pixf->ssim_4x4x2_core  = x264_pixel_ssim_4x4x2_core_avx;
911         pixf->ssim_end4        = x264_pixel_ssim_end4_avx;
912     }
913 #endif // HAVE_MMX
914 #else // !HIGH_BIT_DEPTH
915 #if HAVE_MMX
916     if( cpu&X264_CPU_MMX )
917     {
918         INIT7( ssd, _mmx );
919     }
920
921     if( cpu&X264_CPU_MMX2 )
922     {
923         INIT7( sad, _mmx2 );
924         INIT7_NAME( sad_aligned, sad, _mmx2 );
925         INIT7( sad_x3, _mmx2 );
926         INIT7( sad_x4, _mmx2 );
927         INIT7( satd, _mmx2 );
928         INIT7( satd_x3, _mmx2 );
929         INIT7( satd_x4, _mmx2 );
930         INIT4( hadamard_ac, _mmx2 );
931         INIT_ADS( _mmx2 );
932         pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_mmx2;
933         pixf->var[PIXEL_8x8]   = x264_pixel_var_8x8_mmx2;
934         pixf->ssd_nv12_core    = x264_pixel_ssd_nv12_core_mmx2;
935 #if ARCH_X86
936         pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_mmx2;
937         pixf->sa8d[PIXEL_8x8]   = x264_pixel_sa8d_8x8_mmx2;
938         pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_mmx2;
939         pixf->ssim_4x4x2_core = x264_pixel_ssim_4x4x2_core_mmx2;
940         pixf->var2_8x8 = x264_pixel_var2_8x8_mmx2;
941         pixf->vsad = x264_pixel_vsad_mmx2;
942
943         if( cpu&X264_CPU_CACHELINE_32 )
944         {
945             INIT5( sad, _cache32_mmx2 );
946             INIT4( sad_x3, _cache32_mmx2 );
947             INIT4( sad_x4, _cache32_mmx2 );
948         }
949         else if( cpu&X264_CPU_CACHELINE_64 )
950         {
951             INIT5( sad, _cache64_mmx2 );
952             INIT4( sad_x3, _cache64_mmx2 );
953             INIT4( sad_x4, _cache64_mmx2 );
954         }
955 #else
956         if( cpu&X264_CPU_CACHELINE_64 )
957         {
958             pixf->sad[PIXEL_8x16] = x264_pixel_sad_8x16_cache64_mmx2;
959             pixf->sad[PIXEL_8x8]  = x264_pixel_sad_8x8_cache64_mmx2;
960             pixf->sad[PIXEL_8x4]  = x264_pixel_sad_8x4_cache64_mmx2;
961             pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_cache64_mmx2;
962             pixf->sad_x3[PIXEL_8x8]  = x264_pixel_sad_x3_8x8_cache64_mmx2;
963             pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_cache64_mmx2;
964             pixf->sad_x4[PIXEL_8x8]  = x264_pixel_sad_x4_8x8_cache64_mmx2;
965         }
966 #endif
967         pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16_mmx2;
968         pixf->intra_sad_x3_16x16  = x264_intra_sad_x3_16x16_mmx2;
969         pixf->intra_satd_x3_8x8c  = x264_intra_satd_x3_8x8c_mmx2;
970         pixf->intra_sad_x3_8x8c   = x264_intra_sad_x3_8x8c_mmx2;
971         pixf->intra_sad_x3_8x8    = x264_intra_sad_x3_8x8_mmx2;
972         pixf->intra_satd_x3_4x4   = x264_intra_satd_x3_4x4_mmx2;
973         pixf->intra_sad_x3_4x4    = x264_intra_sad_x3_4x4_mmx2;
974     }
975
976     if( cpu&X264_CPU_SSE2 )
977     {
978         INIT5( ssd, _sse2slow );
979         INIT2_NAME( sad_aligned, sad, _sse2_aligned );
980         pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_sse2;
981         pixf->ssd_nv12_core    = x264_pixel_ssd_nv12_core_sse2;
982         pixf->ssim_4x4x2_core  = x264_pixel_ssim_4x4x2_core_sse2;
983         pixf->ssim_end4        = x264_pixel_ssim_end4_sse2;
984         pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_sse2;
985         pixf->sa8d[PIXEL_8x8]   = x264_pixel_sa8d_8x8_sse2;
986 #if ARCH_X86_64
987         pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_sse2;
988 #endif
989         pixf->var2_8x8 = x264_pixel_var2_8x8_sse2;
990         pixf->vsad = x264_pixel_vsad_sse2;
991     }
992
993     if( (cpu&X264_CPU_SSE2) && !(cpu&X264_CPU_SSE2_IS_SLOW) )
994     {
995         INIT2( sad, _sse2 );
996         INIT2( sad_x3, _sse2 );
997         INIT2( sad_x4, _sse2 );
998         INIT6( satd, _sse2 );
999         INIT6( satd_x3, _sse2 );
1000         INIT6( satd_x4, _sse2 );
1001         if( !(cpu&X264_CPU_STACK_MOD4) )
1002         {
1003             INIT4( hadamard_ac, _sse2 );
1004         }
1005         INIT_ADS( _sse2 );
1006         pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_sse2;
1007         pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16_sse2;
1008         if( cpu&X264_CPU_CACHELINE_64 )
1009         {
1010             INIT2( ssd, _sse2); /* faster for width 16 on p4 */
1011 #if ARCH_X86
1012             INIT2( sad, _cache64_sse2 );
1013             INIT2( sad_x3, _cache64_sse2 );
1014             INIT2( sad_x4, _cache64_sse2 );
1015 #endif
1016            if( cpu&X264_CPU_SSE2_IS_FAST )
1017            {
1018                pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_cache64_sse2;
1019                pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_cache64_sse2;
1020            }
1021         }
1022
1023         if( cpu&X264_CPU_SSE_MISALIGN )
1024         {
1025             INIT2( sad_x3, _sse2_misalign );
1026             INIT2( sad_x4, _sse2_misalign );
1027         }
1028     }
1029
1030     if( cpu&X264_CPU_SSE2_IS_FAST && !(cpu&X264_CPU_CACHELINE_64) )
1031     {
1032         pixf->sad_aligned[PIXEL_8x16] = x264_pixel_sad_8x16_sse2;
1033         pixf->sad[PIXEL_8x16] = x264_pixel_sad_8x16_sse2;
1034         pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_sse2;
1035         pixf->sad_x3[PIXEL_8x8] = x264_pixel_sad_x3_8x8_sse2;
1036         pixf->sad_x3[PIXEL_8x4] = x264_pixel_sad_x3_8x4_sse2;
1037         pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_sse2;
1038         pixf->sad_x4[PIXEL_8x8] = x264_pixel_sad_x4_8x8_sse2;
1039         pixf->sad_x4[PIXEL_8x4] = x264_pixel_sad_x4_8x4_sse2;
1040     }
1041
1042     if( (cpu&X264_CPU_SSE3) && (cpu&X264_CPU_CACHELINE_64) )
1043     {
1044         INIT2( sad, _sse3 );
1045         INIT2( sad_x3, _sse3 );
1046         INIT2( sad_x4, _sse3 );
1047     }
1048
1049     if( cpu&X264_CPU_SSSE3 )
1050     {
1051         if( !(cpu&X264_CPU_STACK_MOD4) )
1052         {
1053             INIT4( hadamard_ac, _ssse3 );
1054             pixf->intra_sad_x9_4x4  = x264_intra_sad_x9_4x4_ssse3;
1055             pixf->intra_satd_x9_4x4 = x264_intra_satd_x9_4x4_ssse3;
1056         }
1057         INIT_ADS( _ssse3 );
1058         if( !(cpu&X264_CPU_SLOW_ATOM) )
1059         {
1060             INIT7( ssd, _ssse3 );
1061             pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_ssse3;
1062             pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8_ssse3;
1063             INIT7( satd, _ssse3 );
1064             INIT7( satd_x3, _ssse3 );
1065             INIT7( satd_x4, _ssse3 );
1066         }
1067         pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16_ssse3;
1068         pixf->intra_sad_x3_16x16  = x264_intra_sad_x3_16x16_ssse3;
1069         pixf->intra_satd_x3_8x8c  = x264_intra_satd_x3_8x8c_ssse3;
1070         pixf->intra_sad_x3_8x8c   = x264_intra_sad_x3_8x8c_ssse3;
1071         pixf->intra_satd_x3_4x4   = x264_intra_satd_x3_4x4_ssse3;
1072 #if ARCH_X86_64
1073         pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_ssse3;
1074 #endif
1075         pixf->var2_8x8 = x264_pixel_var2_8x8_ssse3;
1076         if( cpu&X264_CPU_SHUFFLE_IS_FAST )
1077             pixf->intra_sad_x3_8x8  = x264_intra_sad_x3_8x8_ssse3;
1078         if( cpu&X264_CPU_CACHELINE_64 )
1079         {
1080             INIT2( sad, _cache64_ssse3 );
1081             INIT2( sad_x3, _cache64_ssse3 );
1082             INIT2( sad_x4, _cache64_ssse3 );
1083         }
1084         if( cpu&X264_CPU_SLOW_ATOM || !(cpu&X264_CPU_SHUFFLE_IS_FAST) )
1085         {
1086             INIT5( ssd, _sse2 ); /* on conroe, sse2 is faster for width8/16 */
1087         }
1088     }
1089
1090     if( cpu&X264_CPU_SSE4 )
1091     {
1092         INIT7( satd, _sse4 );
1093         INIT7( satd_x3, _sse4 );
1094         INIT7( satd_x4, _sse4 );
1095         if( !(cpu&X264_CPU_STACK_MOD4) )
1096         {
1097             INIT4( hadamard_ac, _sse4 );
1098             pixf->intra_sad_x9_4x4  = x264_intra_sad_x9_4x4_sse4;
1099             pixf->intra_satd_x9_4x4 = x264_intra_satd_x9_4x4_sse4;
1100         }
1101         pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_sse4;
1102         pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8_sse4;
1103         pixf->intra_sad_x3_4x4 = x264_intra_sad_x3_4x4_sse4;
1104     }
1105
1106     if( cpu&X264_CPU_AVX )
1107     {
1108         INIT7( satd, _avx );
1109         INIT7( satd_x3, _avx );
1110         INIT7( satd_x4, _avx );
1111         INIT_ADS( _avx );
1112         if( !(cpu&X264_CPU_STACK_MOD4) )
1113         {
1114             INIT4( hadamard_ac, _avx );
1115             pixf->intra_sad_x9_4x4  = x264_intra_sad_x9_4x4_avx;
1116             pixf->intra_satd_x9_4x4 = x264_intra_satd_x9_4x4_avx;
1117         }
1118         INIT5( ssd, _avx );
1119 #if ARCH_X86_64
1120         pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_avx;
1121         pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8_avx;
1122         pixf->intra_sa8d_x3_8x8= x264_intra_sa8d_x3_8x8_avx;
1123 #endif
1124         pixf->ssd_nv12_core    = x264_pixel_ssd_nv12_core_avx;
1125         pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_avx;
1126         pixf->var[PIXEL_8x8]   = x264_pixel_var_8x8_avx;
1127         pixf->ssim_4x4x2_core  = x264_pixel_ssim_4x4x2_core_avx;
1128         pixf->ssim_end4        = x264_pixel_ssim_end4_avx;
1129         pixf->intra_sad_x3_4x4 = x264_intra_sad_x3_4x4_avx;
1130         pixf->intra_sad_x3_8x8 = x264_intra_sad_x3_8x8_avx;
1131     }
1132 #endif //HAVE_MMX
1133
1134 #if HAVE_ARMV6
1135     if( cpu&X264_CPU_ARMV6 )
1136     {
1137         pixf->sad[PIXEL_4x8] = x264_pixel_sad_4x8_armv6;
1138         pixf->sad[PIXEL_4x4] = x264_pixel_sad_4x4_armv6;
1139         pixf->sad_aligned[PIXEL_4x8] = x264_pixel_sad_4x8_armv6;
1140         pixf->sad_aligned[PIXEL_4x4] = x264_pixel_sad_4x4_armv6;
1141     }
1142     if( cpu&X264_CPU_NEON )
1143     {
1144         INIT5( sad, _neon );
1145         INIT5( sad_aligned, _neon );
1146         INIT7( sad_x3, _neon );
1147         INIT7( sad_x4, _neon );
1148         INIT7( ssd, _neon );
1149         INIT7( satd, _neon );
1150         INIT7( satd_x3, _neon );
1151         INIT7( satd_x4, _neon );
1152         INIT4( hadamard_ac, _neon );
1153         pixf->sa8d[PIXEL_8x8]   = x264_pixel_sa8d_8x8_neon;
1154         pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_neon;
1155         pixf->var[PIXEL_8x8]    = x264_pixel_var_8x8_neon;
1156         pixf->var[PIXEL_16x16]  = x264_pixel_var_16x16_neon;
1157         pixf->var2_8x8          = x264_pixel_var2_8x8_neon;
1158
1159         pixf->ssim_4x4x2_core   = x264_pixel_ssim_4x4x2_core_neon;
1160         pixf->ssim_end4         = x264_pixel_ssim_end4_neon;
1161
1162         if( cpu&X264_CPU_FAST_NEON_MRC )
1163         {
1164             pixf->sad[PIXEL_4x8] = x264_pixel_sad_4x8_neon;
1165             pixf->sad[PIXEL_4x4] = x264_pixel_sad_4x4_neon;
1166             pixf->sad_aligned[PIXEL_4x8] = x264_pixel_sad_aligned_4x8_neon;
1167             pixf->sad_aligned[PIXEL_4x4] = x264_pixel_sad_aligned_4x4_neon;
1168         }
1169         else    // really just scheduled for dual issue / A8
1170         {
1171             INIT5( sad_aligned, _neon_dual );
1172         }
1173     }
1174 #endif
1175 #endif // HIGH_BIT_DEPTH
1176 #if HAVE_ALTIVEC
1177     if( cpu&X264_CPU_ALTIVEC )
1178     {
1179         x264_pixel_altivec_init( pixf );
1180     }
1181 #endif
1182 #if !HIGH_BIT_DEPTH
1183 #if ARCH_UltraSPARC
1184     INIT4( sad, _vis );
1185     INIT4( sad_x3, _vis );
1186     INIT4( sad_x4, _vis );
1187 #endif
1188 #endif // !HIGH_BIT_DEPTH
1189
1190     pixf->ads[PIXEL_8x16] =
1191     pixf->ads[PIXEL_8x4] =
1192     pixf->ads[PIXEL_4x8] = pixf->ads[PIXEL_16x8];
1193     pixf->ads[PIXEL_4x4] = pixf->ads[PIXEL_8x8];
1194 }
1195