]> git.sesse.net Git - x264/blob - common/pixel.c
GSOC merge part 3: ARM NEON pixel assembly functions
[x264] / common / pixel.c
1 /*****************************************************************************
2  * pixel.c: h264 encoder
3  *****************************************************************************
4  * Copyright (C) 2003-2008 x264 project
5  *
6  * Authors: Loren Merritt <lorenm@u.washington.edu>
7  *          Laurent Aimar <fenrir@via.ecp.fr>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
22  *****************************************************************************/
23
24 #include "common.h"
25
26 #ifdef HAVE_MMX
27 #   include "x86/pixel.h"
28 #endif
29 #ifdef ARCH_PPC
30 #   include "ppc/pixel.h"
31 #endif
32 #ifdef ARCH_ARM
33 #   include "arm/pixel.h"
34 #endif
35 #ifdef ARCH_UltraSparc
36 #   include "sparc/pixel.h"
37 #endif
38
39
40 /****************************************************************************
41  * pixel_sad_WxH
42  ****************************************************************************/
43 #define PIXEL_SAD_C( name, lx, ly ) \
44 static int name( uint8_t *pix1, int i_stride_pix1,  \
45                  uint8_t *pix2, int i_stride_pix2 ) \
46 {                                                   \
47     int i_sum = 0;                                  \
48     int x, y;                                       \
49     for( y = 0; y < ly; y++ )                       \
50     {                                               \
51         for( x = 0; x < lx; x++ )                   \
52         {                                           \
53             i_sum += abs( pix1[x] - pix2[x] );      \
54         }                                           \
55         pix1 += i_stride_pix1;                      \
56         pix2 += i_stride_pix2;                      \
57     }                                               \
58     return i_sum;                                   \
59 }
60
61
62 PIXEL_SAD_C( x264_pixel_sad_16x16, 16, 16 )
63 PIXEL_SAD_C( x264_pixel_sad_16x8,  16,  8 )
64 PIXEL_SAD_C( x264_pixel_sad_8x16,   8, 16 )
65 PIXEL_SAD_C( x264_pixel_sad_8x8,    8,  8 )
66 PIXEL_SAD_C( x264_pixel_sad_8x4,    8,  4 )
67 PIXEL_SAD_C( x264_pixel_sad_4x8,    4,  8 )
68 PIXEL_SAD_C( x264_pixel_sad_4x4,    4,  4 )
69
70
71 /****************************************************************************
72  * pixel_ssd_WxH
73  ****************************************************************************/
74 #define PIXEL_SSD_C( name, lx, ly ) \
75 static int name( uint8_t *pix1, int i_stride_pix1,  \
76                  uint8_t *pix2, int i_stride_pix2 ) \
77 {                                                   \
78     int i_sum = 0;                                  \
79     int x, y;                                       \
80     for( y = 0; y < ly; y++ )                       \
81     {                                               \
82         for( x = 0; x < lx; x++ )                   \
83         {                                           \
84             int d = pix1[x] - pix2[x];              \
85             i_sum += d*d;                           \
86         }                                           \
87         pix1 += i_stride_pix1;                      \
88         pix2 += i_stride_pix2;                      \
89     }                                               \
90     return i_sum;                                   \
91 }
92
93 PIXEL_SSD_C( x264_pixel_ssd_16x16, 16, 16 )
94 PIXEL_SSD_C( x264_pixel_ssd_16x8,  16,  8 )
95 PIXEL_SSD_C( x264_pixel_ssd_8x16,   8, 16 )
96 PIXEL_SSD_C( x264_pixel_ssd_8x8,    8,  8 )
97 PIXEL_SSD_C( x264_pixel_ssd_8x4,    8,  4 )
98 PIXEL_SSD_C( x264_pixel_ssd_4x8,    4,  8 )
99 PIXEL_SSD_C( x264_pixel_ssd_4x4,    4,  4 )
100
101 int64_t x264_pixel_ssd_wxh( x264_pixel_function_t *pf, uint8_t *pix1, int i_pix1, uint8_t *pix2, int i_pix2, int i_width, int i_height )
102 {
103     int64_t i_ssd = 0;
104     int x, y;
105     int align = !(((intptr_t)pix1 | (intptr_t)pix2 | i_pix1 | i_pix2) & 15);
106
107 #define SSD(size) i_ssd += pf->ssd[size]( pix1 + y*i_pix1 + x, i_pix1, \
108                                           pix2 + y*i_pix2 + x, i_pix2 );
109     for( y = 0; y < i_height-15; y += 16 )
110     {
111         x = 0;
112         if( align )
113             for( ; x < i_width-15; x += 16 )
114                 SSD(PIXEL_16x16);
115         for( ; x < i_width-7; x += 8 )
116             SSD(PIXEL_8x16);
117     }
118     if( y < i_height-7 )
119         for( x = 0; x < i_width-7; x += 8 )
120             SSD(PIXEL_8x8);
121 #undef SSD
122
123 #define SSD1 { int d = pix1[y*i_pix1+x] - pix2[y*i_pix2+x]; i_ssd += d*d; }
124     if( i_width % 8 != 0 )
125     {
126         for( y = 0; y < (i_height & ~7); y++ )
127             for( x = i_width & ~7; x < i_width; x++ )
128                 SSD1;
129     }
130     if( i_height % 8 != 0 )
131     {
132         for( y = i_height & ~7; y < i_height; y++ )
133             for( x = 0; x < i_width; x++ )
134                 SSD1;
135     }
136 #undef SSD1
137
138     return i_ssd;
139 }
140
141
142 /****************************************************************************
143  * pixel_var_wxh
144  ****************************************************************************/
145 #define PIXEL_VAR_C( name, w, shift ) \
146 static int name( uint8_t *pix, int i_stride ) \
147 {                                             \
148     uint32_t var = 0, sum = 0, sqr = 0;       \
149     int x, y;                                 \
150     for( y = 0; y < w; y++ )                  \
151     {                                         \
152         for( x = 0; x < w; x++ )              \
153         {                                     \
154             sum += pix[x];                    \
155             sqr += pix[x] * pix[x];           \
156         }                                     \
157         pix += i_stride;                      \
158     }                                         \
159     var = sqr - (sum * sum >> shift);         \
160     return var;                               \
161 }
162
163 PIXEL_VAR_C( x264_pixel_var_16x16, 16, 8 )
164 PIXEL_VAR_C( x264_pixel_var_8x8,    8, 6 )
165
166 /****************************************************************************
167  * pixel_var2_wxh
168  ****************************************************************************/
169 static int pixel_var2_8x8( uint8_t *pix1, int i_stride1, uint8_t *pix2, int i_stride2, int *ssd )
170 {
171     uint32_t var = 0, sum = 0, sqr = 0;
172     int x, y;
173     for( y = 0; y < 8; y++ )
174     {
175         for( x = 0; x < 8; x++ )
176         {
177             int diff = pix1[x] - pix2[x];
178             sum += diff;
179             sqr += diff * diff;
180         }
181         pix1 += i_stride1;
182         pix2 += i_stride2;
183     }
184     sum = abs(sum);
185     var = sqr - (sum * sum >> 6);
186     *ssd = sqr;
187     return var;
188 }
189
190
191 #define HADAMARD4(d0,d1,d2,d3,s0,s1,s2,s3) {\
192     int t0 = s0 + s1;\
193     int t1 = s0 - s1;\
194     int t2 = s2 + s3;\
195     int t3 = s2 - s3;\
196     d0 = t0 + t2;\
197     d2 = t0 - t2;\
198     d1 = t1 + t3;\
199     d3 = t1 - t3;\
200 }
201
202 // in: a pseudo-simd number of the form x+(y<<16)
203 // return: abs(x)+(abs(y)<<16)
204 static ALWAYS_INLINE uint32_t abs2( uint32_t a )
205 {
206     uint32_t s = ((a>>15)&0x10001)*0xffff;
207     return (a+s)^s;
208 }
209
210 /****************************************************************************
211  * pixel_satd_WxH: sum of 4x4 Hadamard transformed differences
212  ****************************************************************************/
213
214 static NOINLINE int x264_pixel_satd_4x4( uint8_t *pix1, int i_pix1, uint8_t *pix2, int i_pix2 )
215 {
216     uint32_t tmp[4][2];
217     uint32_t a0,a1,a2,a3,b0,b1;
218     int sum=0, i;
219     for( i=0; i<4; i++, pix1+=i_pix1, pix2+=i_pix2 )
220     {
221         a0 = pix1[0] - pix2[0];
222         a1 = pix1[1] - pix2[1];
223         b0 = (a0+a1) + ((a0-a1)<<16);
224         a2 = pix1[2] - pix2[2];
225         a3 = pix1[3] - pix2[3];
226         b1 = (a2+a3) + ((a2-a3)<<16);
227         tmp[i][0] = b0 + b1;
228         tmp[i][1] = b0 - b1;
229     }
230     for( i=0; i<2; i++ )
231     {
232         HADAMARD4( a0,a1,a2,a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i] );
233         a0 = abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
234         sum += ((uint16_t)a0) + (a0>>16);
235     }
236     return sum >> 1;
237 }
238
239 static NOINLINE int x264_pixel_satd_8x4( uint8_t *pix1, int i_pix1, uint8_t *pix2, int i_pix2 )
240 {
241     uint32_t tmp[4][4];
242     uint32_t a0,a1,a2,a3;
243     int sum=0, i;
244     for( i=0; i<4; i++, pix1+=i_pix1, pix2+=i_pix2 )
245     {
246         a0 = (pix1[0] - pix2[0]) + ((pix1[4] - pix2[4]) << 16);
247         a1 = (pix1[1] - pix2[1]) + ((pix1[5] - pix2[5]) << 16);
248         a2 = (pix1[2] - pix2[2]) + ((pix1[6] - pix2[6]) << 16);
249         a3 = (pix1[3] - pix2[3]) + ((pix1[7] - pix2[7]) << 16);
250         HADAMARD4( tmp[i][0], tmp[i][1], tmp[i][2], tmp[i][3], a0,a1,a2,a3 );
251     }
252     for( i=0; i<4; i++ )
253     {
254         HADAMARD4( a0,a1,a2,a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i] );
255         sum += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
256     }
257     return (((uint16_t)sum) + ((uint32_t)sum>>16)) >> 1;
258 }
259
260 #define PIXEL_SATD_C( w, h, sub )\
261 static int x264_pixel_satd_##w##x##h( uint8_t *pix1, int i_pix1, uint8_t *pix2, int i_pix2 )\
262 {\
263     int sum = sub( pix1, i_pix1, pix2, i_pix2 )\
264             + sub( pix1+4*i_pix1, i_pix1, pix2+4*i_pix2, i_pix2 );\
265     if( w==16 )\
266         sum+= sub( pix1+8, i_pix1, pix2+8, i_pix2 )\
267             + sub( pix1+8+4*i_pix1, i_pix1, pix2+8+4*i_pix2, i_pix2 );\
268     if( h==16 )\
269         sum+= sub( pix1+8*i_pix1, i_pix1, pix2+8*i_pix2, i_pix2 )\
270             + sub( pix1+12*i_pix1, i_pix1, pix2+12*i_pix2, i_pix2 );\
271     if( w==16 && h==16 )\
272         sum+= sub( pix1+8+8*i_pix1, i_pix1, pix2+8+8*i_pix2, i_pix2 )\
273             + sub( pix1+8+12*i_pix1, i_pix1, pix2+8+12*i_pix2, i_pix2 );\
274     return sum;\
275 }
276 PIXEL_SATD_C( 16, 16, x264_pixel_satd_8x4 )
277 PIXEL_SATD_C( 16, 8,  x264_pixel_satd_8x4 )
278 PIXEL_SATD_C( 8,  16, x264_pixel_satd_8x4 )
279 PIXEL_SATD_C( 8,  8,  x264_pixel_satd_8x4 )
280 PIXEL_SATD_C( 4,  8,  x264_pixel_satd_4x4 )
281
282
283 static NOINLINE int sa8d_8x8( uint8_t *pix1, int i_pix1, uint8_t *pix2, int i_pix2 )
284 {
285     uint32_t tmp[8][4];
286     uint32_t a0,a1,a2,a3,a4,a5,a6,a7,b0,b1,b2,b3;
287     int sum=0, i;
288     for( i=0; i<8; i++, pix1+=i_pix1, pix2+=i_pix2 )
289     {
290         a0 = pix1[0] - pix2[0];
291         a1 = pix1[1] - pix2[1];
292         b0 = (a0+a1) + ((a0-a1)<<16);
293         a2 = pix1[2] - pix2[2];
294         a3 = pix1[3] - pix2[3];
295         b1 = (a2+a3) + ((a2-a3)<<16);
296         a4 = pix1[4] - pix2[4];
297         a5 = pix1[5] - pix2[5];
298         b2 = (a4+a5) + ((a4-a5)<<16);
299         a6 = pix1[6] - pix2[6];
300         a7 = pix1[7] - pix2[7];
301         b3 = (a6+a7) + ((a6-a7)<<16);
302         HADAMARD4( tmp[i][0], tmp[i][1], tmp[i][2], tmp[i][3], b0,b1,b2,b3 );
303     }
304     for( i=0; i<4; i++ )
305     {
306         HADAMARD4( a0,a1,a2,a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i] );
307         HADAMARD4( a4,a5,a6,a7, tmp[4][i], tmp[5][i], tmp[6][i], tmp[7][i] );
308         b0  = abs2(a0+a4) + abs2(a0-a4);
309         b0 += abs2(a1+a5) + abs2(a1-a5);
310         b0 += abs2(a2+a6) + abs2(a2-a6);
311         b0 += abs2(a3+a7) + abs2(a3-a7);
312         sum += (uint16_t)b0 + (b0>>16);
313     }
314     return sum;
315 }
316
317 static int x264_pixel_sa8d_8x8( uint8_t *pix1, int i_pix1, uint8_t *pix2, int i_pix2 )
318 {
319     int sum = sa8d_8x8( pix1, i_pix1, pix2, i_pix2 );
320     return (sum+2)>>2;
321 }
322
323 static int x264_pixel_sa8d_16x16( uint8_t *pix1, int i_pix1, uint8_t *pix2, int i_pix2 )
324 {
325     int sum = sa8d_8x8( pix1, i_pix1, pix2, i_pix2 )
326             + sa8d_8x8( pix1+8, i_pix1, pix2+8, i_pix2 )
327             + sa8d_8x8( pix1+8*i_pix1, i_pix1, pix2+8*i_pix2, i_pix2 )
328             + sa8d_8x8( pix1+8+8*i_pix1, i_pix1, pix2+8+8*i_pix2, i_pix2 );
329     return (sum+2)>>2;
330 }
331
332
333 static NOINLINE uint64_t pixel_hadamard_ac( uint8_t *pix, int stride )
334 {
335     uint32_t tmp[32];
336     uint32_t a0,a1,a2,a3,dc;
337     int sum4=0, sum8=0, i;
338     for( i=0; i<8; i++, pix+=stride )
339     {
340         uint32_t *t = tmp + (i&3) + (i&4)*4;
341         a0 = (pix[0]+pix[1]) + ((pix[0]-pix[1])<<16);
342         a1 = (pix[2]+pix[3]) + ((pix[2]-pix[3])<<16);
343         t[0] = a0 + a1;
344         t[4] = a0 - a1;
345         a2 = (pix[4]+pix[5]) + ((pix[4]-pix[5])<<16);
346         a3 = (pix[6]+pix[7]) + ((pix[6]-pix[7])<<16);
347         t[8] = a2 + a3;
348         t[12] = a2 - a3;
349     }
350     for( i=0; i<8; i++ )
351     {
352         HADAMARD4( a0,a1,a2,a3, tmp[i*4+0], tmp[i*4+1], tmp[i*4+2], tmp[i*4+3] );
353         tmp[i*4+0] = a0;
354         tmp[i*4+1] = a1;
355         tmp[i*4+2] = a2;
356         tmp[i*4+3] = a3;
357         sum4 += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
358     }
359     for( i=0; i<8; i++ )
360     {
361         HADAMARD4( a0,a1,a2,a3, tmp[i], tmp[8+i], tmp[16+i], tmp[24+i] );
362         sum8 += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
363     }
364     dc = (uint16_t)(tmp[0] + tmp[8] + tmp[16] + tmp[24]);
365     sum4 = (uint16_t)sum4 + ((uint32_t)sum4>>16) - dc;
366     sum8 = (uint16_t)sum8 + ((uint32_t)sum8>>16) - dc;
367     return ((uint64_t)sum8<<32) + sum4;
368 }
369
370 #define HADAMARD_AC(w,h) \
371 static uint64_t x264_pixel_hadamard_ac_##w##x##h( uint8_t *pix, int stride )\
372 {\
373     uint64_t sum = pixel_hadamard_ac( pix, stride );\
374     if( w==16 )\
375         sum += pixel_hadamard_ac( pix+8, stride );\
376     if( h==16 )\
377         sum += pixel_hadamard_ac( pix+8*stride, stride );\
378     if( w==16 && h==16 )\
379         sum += pixel_hadamard_ac( pix+8*stride+8, stride );\
380     return ((sum>>34)<<32) + ((uint32_t)sum>>1);\
381 }
382 HADAMARD_AC( 16, 16 )
383 HADAMARD_AC( 16, 8 )
384 HADAMARD_AC( 8, 16 )
385 HADAMARD_AC( 8, 8 )
386
387
388 /****************************************************************************
389  * pixel_sad_x4
390  ****************************************************************************/
391 #define SAD_X( size ) \
392 static void x264_pixel_sad_x3_##size( uint8_t *fenc, uint8_t *pix0, uint8_t *pix1, uint8_t *pix2, int i_stride, int scores[3] )\
393 {\
394     scores[0] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix0, i_stride );\
395     scores[1] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix1, i_stride );\
396     scores[2] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix2, i_stride );\
397 }\
398 static void x264_pixel_sad_x4_##size( uint8_t *fenc, uint8_t *pix0, uint8_t *pix1, uint8_t *pix2, uint8_t *pix3, int i_stride, int scores[4] )\
399 {\
400     scores[0] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix0, i_stride );\
401     scores[1] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix1, i_stride );\
402     scores[2] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix2, i_stride );\
403     scores[3] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix3, i_stride );\
404 }
405
406 SAD_X( 16x16 )
407 SAD_X( 16x8 )
408 SAD_X( 8x16 )
409 SAD_X( 8x8 )
410 SAD_X( 8x4 )
411 SAD_X( 4x8 )
412 SAD_X( 4x4 )
413
414 #ifdef ARCH_UltraSparc
415 SAD_X( 16x16_vis )
416 SAD_X( 16x8_vis )
417 SAD_X( 8x16_vis )
418 SAD_X( 8x8_vis )
419 #endif
420
421 /****************************************************************************
422  * pixel_satd_x4
423  * no faster than single satd, but needed for satd to be a drop-in replacement for sad
424  ****************************************************************************/
425
426 #define SATD_X( size, cpu ) \
427 static void x264_pixel_satd_x3_##size##cpu( uint8_t *fenc, uint8_t *pix0, uint8_t *pix1, uint8_t *pix2, int i_stride, int scores[3] )\
428 {\
429     scores[0] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix0, i_stride );\
430     scores[1] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix1, i_stride );\
431     scores[2] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix2, i_stride );\
432 }\
433 static void x264_pixel_satd_x4_##size##cpu( uint8_t *fenc, uint8_t *pix0, uint8_t *pix1, uint8_t *pix2, uint8_t *pix3, int i_stride, int scores[4] )\
434 {\
435     scores[0] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix0, i_stride );\
436     scores[1] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix1, i_stride );\
437     scores[2] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix2, i_stride );\
438     scores[3] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix3, i_stride );\
439 }
440 #define SATD_X_DECL6( cpu )\
441 SATD_X( 16x16, cpu )\
442 SATD_X( 16x8, cpu )\
443 SATD_X( 8x16, cpu )\
444 SATD_X( 8x8, cpu )\
445 SATD_X( 8x4, cpu )\
446 SATD_X( 4x8, cpu )
447 #define SATD_X_DECL7( cpu )\
448 SATD_X_DECL6( cpu )\
449 SATD_X( 4x4, cpu )
450
451 SATD_X_DECL7()
452 #ifdef HAVE_MMX
453 SATD_X_DECL7( _mmxext )
454 SATD_X_DECL6( _sse2 )
455 SATD_X_DECL7( _ssse3 )
456 SATD_X_DECL7( _sse4 )
457 #endif
458
459 #ifdef HAVE_ARMV6
460 SATD_X_DECL7( _neon )
461 #endif
462
463 /****************************************************************************
464  * structural similarity metric
465  ****************************************************************************/
466 static void ssim_4x4x2_core( const uint8_t *pix1, int stride1,
467                              const uint8_t *pix2, int stride2,
468                              int sums[2][4])
469 {
470     int x, y, z;
471     for(z=0; z<2; z++)
472     {
473         uint32_t s1=0, s2=0, ss=0, s12=0;
474         for(y=0; y<4; y++)
475             for(x=0; x<4; x++)
476             {
477                 int a = pix1[x+y*stride1];
478                 int b = pix2[x+y*stride2];
479                 s1  += a;
480                 s2  += b;
481                 ss  += a*a;
482                 ss  += b*b;
483                 s12 += a*b;
484             }
485         sums[z][0] = s1;
486         sums[z][1] = s2;
487         sums[z][2] = ss;
488         sums[z][3] = s12;
489         pix1 += 4;
490         pix2 += 4;
491     }
492 }
493
494 static float ssim_end1( int s1, int s2, int ss, int s12 )
495 {
496     static const int ssim_c1 = (int)(.01*.01*255*255*64 + .5);
497     static const int ssim_c2 = (int)(.03*.03*255*255*64*63 + .5);
498     int vars = ss*64 - s1*s1 - s2*s2;
499     int covar = s12*64 - s1*s2;
500     return (float)(2*s1*s2 + ssim_c1) * (float)(2*covar + ssim_c2)\
501            / ((float)(s1*s1 + s2*s2 + ssim_c1) * (float)(vars + ssim_c2));
502 }
503
504 static float ssim_end4( int sum0[5][4], int sum1[5][4], int width )
505 {
506     int i;
507     float ssim = 0.0;
508     for( i = 0; i < width; i++ )
509         ssim += ssim_end1( sum0[i][0] + sum0[i+1][0] + sum1[i][0] + sum1[i+1][0],
510                            sum0[i][1] + sum0[i+1][1] + sum1[i][1] + sum1[i+1][1],
511                            sum0[i][2] + sum0[i+1][2] + sum1[i][2] + sum1[i+1][2],
512                            sum0[i][3] + sum0[i+1][3] + sum1[i][3] + sum1[i+1][3] );
513     return ssim;
514 }
515
516 float x264_pixel_ssim_wxh( x264_pixel_function_t *pf,
517                            uint8_t *pix1, int stride1,
518                            uint8_t *pix2, int stride2,
519                            int width, int height, void *buf )
520 {
521     int x, y, z;
522     float ssim = 0.0;
523     int (*sum0)[4] = buf;
524     int (*sum1)[4] = sum0 + width/4+3;
525     width >>= 2;
526     height >>= 2;
527     z = 0;
528     for( y = 1; y < height; y++ )
529     {
530         for( ; z <= y; z++ )
531         {
532             XCHG( void*, sum0, sum1 );
533             for( x = 0; x < width; x+=2 )
534                 pf->ssim_4x4x2_core( &pix1[4*(x+z*stride1)], stride1, &pix2[4*(x+z*stride2)], stride2, &sum0[x] );
535         }
536         for( x = 0; x < width-1; x += 4 )
537             ssim += pf->ssim_end4( sum0+x, sum1+x, X264_MIN(4,width-x-1) );
538     }
539     return ssim;
540 }
541
542
543 /****************************************************************************
544  * successive elimination
545  ****************************************************************************/
546 static int x264_pixel_ads4( int enc_dc[4], uint16_t *sums, int delta,
547                             uint16_t *cost_mvx, int16_t *mvs, int width, int thresh )
548 {
549     int nmv=0, i;
550     for( i=0; i<width; i++, sums++ )
551     {
552         int ads = abs( enc_dc[0] - sums[0] )
553                 + abs( enc_dc[1] - sums[8] )
554                 + abs( enc_dc[2] - sums[delta] )
555                 + abs( enc_dc[3] - sums[delta+8] )
556                 + cost_mvx[i];
557         if( ads < thresh )
558             mvs[nmv++] = i;
559     }
560     return nmv;
561 }
562
563 static int x264_pixel_ads2( int enc_dc[2], uint16_t *sums, int delta,
564                             uint16_t *cost_mvx, int16_t *mvs, int width, int thresh )
565 {
566     int nmv=0, i;
567     for( i=0; i<width; i++, sums++ )
568     {
569         int ads = abs( enc_dc[0] - sums[0] )
570                 + abs( enc_dc[1] - sums[delta] )
571                 + cost_mvx[i];
572         if( ads < thresh )
573             mvs[nmv++] = i;
574     }
575     return nmv;
576 }
577
578 static int x264_pixel_ads1( int enc_dc[1], uint16_t *sums, int delta,
579                             uint16_t *cost_mvx, int16_t *mvs, int width, int thresh )
580 {
581     int nmv=0, i;
582     for( i=0; i<width; i++, sums++ )
583     {
584         int ads = abs( enc_dc[0] - sums[0] )
585                 + cost_mvx[i];
586         if( ads < thresh )
587             mvs[nmv++] = i;
588     }
589     return nmv;
590 }
591
592
593 /****************************************************************************
594  * x264_pixel_init:
595  ****************************************************************************/
596 void x264_pixel_init( int cpu, x264_pixel_function_t *pixf )
597 {
598     memset( pixf, 0, sizeof(*pixf) );
599
600 #define INIT2_NAME( name1, name2, cpu ) \
601     pixf->name1[PIXEL_16x16] = x264_pixel_##name2##_16x16##cpu;\
602     pixf->name1[PIXEL_16x8]  = x264_pixel_##name2##_16x8##cpu;
603 #define INIT4_NAME( name1, name2, cpu ) \
604     INIT2_NAME( name1, name2, cpu ) \
605     pixf->name1[PIXEL_8x16]  = x264_pixel_##name2##_8x16##cpu;\
606     pixf->name1[PIXEL_8x8]   = x264_pixel_##name2##_8x8##cpu;
607 #define INIT5_NAME( name1, name2, cpu ) \
608     INIT4_NAME( name1, name2, cpu ) \
609     pixf->name1[PIXEL_8x4]   = x264_pixel_##name2##_8x4##cpu;
610 #define INIT6_NAME( name1, name2, cpu ) \
611     INIT5_NAME( name1, name2, cpu ) \
612     pixf->name1[PIXEL_4x8]   = x264_pixel_##name2##_4x8##cpu;
613 #define INIT7_NAME( name1, name2, cpu ) \
614     INIT6_NAME( name1, name2, cpu ) \
615     pixf->name1[PIXEL_4x4]   = x264_pixel_##name2##_4x4##cpu;
616 #define INIT2( name, cpu ) INIT2_NAME( name, name, cpu )
617 #define INIT4( name, cpu ) INIT4_NAME( name, name, cpu )
618 #define INIT5( name, cpu ) INIT5_NAME( name, name, cpu )
619 #define INIT6( name, cpu ) INIT6_NAME( name, name, cpu )
620 #define INIT7( name, cpu ) INIT7_NAME( name, name, cpu )
621
622 #define INIT_ADS( cpu ) \
623     pixf->ads[PIXEL_16x16] = x264_pixel_ads4##cpu;\
624     pixf->ads[PIXEL_16x8] = x264_pixel_ads2##cpu;\
625     pixf->ads[PIXEL_8x8] = x264_pixel_ads1##cpu;
626
627     INIT7( sad, );
628     INIT7_NAME( sad_aligned, sad, );
629     INIT7( sad_x3, );
630     INIT7( sad_x4, );
631     INIT7( ssd, );
632     INIT7( satd, );
633     INIT7( satd_x3, );
634     INIT7( satd_x4, );
635     INIT4( hadamard_ac, );
636     INIT_ADS( );
637
638     pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16;
639     pixf->sa8d[PIXEL_8x8]   = x264_pixel_sa8d_8x8;
640     pixf->var[PIXEL_16x16] = x264_pixel_var_16x16;
641     pixf->var[PIXEL_8x8]   = x264_pixel_var_8x8;
642
643     pixf->ssim_4x4x2_core = ssim_4x4x2_core;
644     pixf->ssim_end4 = ssim_end4;
645     pixf->var2_8x8 = pixel_var2_8x8;
646
647 #ifdef HAVE_MMX
648     if( cpu&X264_CPU_MMX )
649     {
650         INIT7( ssd, _mmx );
651     }
652
653     if( cpu&X264_CPU_MMXEXT )
654     {
655         INIT7( sad, _mmxext );
656         INIT7_NAME( sad_aligned, sad, _mmxext );
657         INIT7( sad_x3, _mmxext );
658         INIT7( sad_x4, _mmxext );
659         INIT7( satd, _mmxext );
660         INIT7( satd_x3, _mmxext );
661         INIT7( satd_x4, _mmxext );
662         INIT4( hadamard_ac, _mmxext );
663         INIT_ADS( _mmxext );
664         pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_mmxext;
665         pixf->var[PIXEL_8x8]   = x264_pixel_var_8x8_mmxext;
666 #ifdef ARCH_X86
667         pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_mmxext;
668         pixf->sa8d[PIXEL_8x8]   = x264_pixel_sa8d_8x8_mmxext;
669         pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_mmxext;
670         pixf->ssim_4x4x2_core  = x264_pixel_ssim_4x4x2_core_mmxext;
671         pixf->var2_8x8 = x264_pixel_var2_8x8_mmxext;
672
673         if( cpu&X264_CPU_CACHELINE_32 )
674         {
675             INIT5( sad, _cache32_mmxext );
676             INIT4( sad_x3, _cache32_mmxext );
677             INIT4( sad_x4, _cache32_mmxext );
678         }
679         else if( cpu&X264_CPU_CACHELINE_64 )
680         {
681             INIT5( sad, _cache64_mmxext );
682             INIT4( sad_x3, _cache64_mmxext );
683             INIT4( sad_x4, _cache64_mmxext );
684         }
685 #else
686         if( cpu&X264_CPU_CACHELINE_64 )
687         {
688             pixf->sad[PIXEL_8x16] = x264_pixel_sad_8x16_cache64_mmxext;
689             pixf->sad[PIXEL_8x8]  = x264_pixel_sad_8x8_cache64_mmxext;
690             pixf->sad[PIXEL_8x4]  = x264_pixel_sad_8x4_cache64_mmxext;
691             pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_cache64_mmxext;
692             pixf->sad_x3[PIXEL_8x8]  = x264_pixel_sad_x3_8x8_cache64_mmxext;
693             pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_cache64_mmxext;
694             pixf->sad_x4[PIXEL_8x8]  = x264_pixel_sad_x4_8x8_cache64_mmxext;
695         }
696 #endif
697         pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16_mmxext;
698         pixf->intra_sad_x3_16x16  = x264_intra_sad_x3_16x16_mmxext;
699         pixf->intra_satd_x3_8x8c  = x264_intra_satd_x3_8x8c_mmxext;
700         pixf->intra_sad_x3_8x8c   = x264_intra_sad_x3_8x8c_mmxext;
701         pixf->intra_sad_x3_8x8    = x264_intra_sad_x3_8x8_mmxext;
702         pixf->intra_satd_x3_4x4   = x264_intra_satd_x3_4x4_mmxext;
703         pixf->intra_sad_x3_4x4    = x264_intra_sad_x3_4x4_mmxext;
704     }
705
706     if( cpu&X264_CPU_SSE2 )
707     {
708         INIT5( ssd, _sse2slow );
709         INIT2_NAME( sad_aligned, sad, _sse2_aligned );
710         pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_sse2;
711         pixf->ssim_4x4x2_core  = x264_pixel_ssim_4x4x2_core_sse2;
712         pixf->ssim_end4        = x264_pixel_ssim_end4_sse2;
713         pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_sse2;
714         pixf->sa8d[PIXEL_8x8]   = x264_pixel_sa8d_8x8_sse2;
715 #ifdef ARCH_X86_64
716         pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_sse2;
717 #endif
718         pixf->var2_8x8 = x264_pixel_var2_8x8_sse2;
719     }
720
721     if( (cpu&X264_CPU_SSE2) && !(cpu&X264_CPU_SSE2_IS_SLOW) )
722     {
723         INIT2( sad, _sse2 );
724         INIT2( sad_x3, _sse2 );
725         INIT2( sad_x4, _sse2 );
726         INIT6( satd, _sse2 );
727         INIT6( satd_x3, _sse2 );
728         INIT6( satd_x4, _sse2 );
729         if( !(cpu&X264_CPU_STACK_MOD4) )
730         {
731             INIT4( hadamard_ac, _sse2 );
732         }
733         INIT_ADS( _sse2 );
734         pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_sse2;
735         pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16_sse2;
736         if( cpu&X264_CPU_CACHELINE_64 )
737         {
738             INIT2( ssd, _sse2); /* faster for width 16 on p4 */
739 #ifdef ARCH_X86
740             INIT2( sad, _cache64_sse2 );
741             INIT2( sad_x3, _cache64_sse2 );
742             INIT2( sad_x4, _cache64_sse2 );
743 #endif
744            if( cpu&X264_CPU_SSE2_IS_FAST )
745            {
746                pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_cache64_sse2;
747                pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_cache64_sse2;
748            }
749         }
750
751         if( cpu&X264_CPU_SSE_MISALIGN )
752         {
753             INIT2( sad_x3, _sse2_misalign );
754             INIT2( sad_x4, _sse2_misalign );
755         }
756     }
757
758     if( cpu&X264_CPU_SSE2_IS_FAST && !(cpu&X264_CPU_CACHELINE_64) )
759     {
760         pixf->sad_aligned[PIXEL_8x16] = x264_pixel_sad_8x16_sse2;
761         pixf->sad[PIXEL_8x16] = x264_pixel_sad_8x16_sse2;
762         pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_sse2;
763         pixf->sad_x3[PIXEL_8x8] = x264_pixel_sad_x3_8x8_sse2;
764         pixf->sad_x3[PIXEL_8x4] = x264_pixel_sad_x3_8x4_sse2;
765         pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_sse2;
766         pixf->sad_x4[PIXEL_8x8] = x264_pixel_sad_x4_8x8_sse2;
767         pixf->sad_x4[PIXEL_8x4] = x264_pixel_sad_x4_8x4_sse2;
768     }
769
770     if( (cpu&X264_CPU_SSE3) && (cpu&X264_CPU_CACHELINE_64) )
771     {
772         INIT2( sad, _sse3 );
773         INIT2( sad_x3, _sse3 );
774         INIT2( sad_x4, _sse3 );
775     }
776
777     if( cpu&X264_CPU_SSSE3 )
778     {
779         INIT7( ssd, _ssse3 );
780         INIT7( satd, _ssse3 );
781         INIT7( satd_x3, _ssse3 );
782         INIT7( satd_x4, _ssse3 );
783         if( !(cpu&X264_CPU_STACK_MOD4) )
784         {
785             INIT4( hadamard_ac, _ssse3 );
786         }
787         INIT_ADS( _ssse3 );
788         pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_ssse3;
789         pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8_ssse3;
790         pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16_ssse3;
791         pixf->intra_sad_x3_16x16  = x264_intra_sad_x3_16x16_ssse3;
792         pixf->intra_satd_x3_8x8c  = x264_intra_satd_x3_8x8c_ssse3;
793         pixf->intra_sad_x3_8x8c   = x264_intra_sad_x3_8x8c_ssse3;
794         pixf->intra_satd_x3_4x4   = x264_intra_satd_x3_4x4_ssse3;
795 #ifdef ARCH_X86_64
796         pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_ssse3;
797 #endif
798         pixf->var2_8x8 = x264_pixel_var2_8x8_ssse3;
799         if( cpu&X264_CPU_CACHELINE_64 )
800         {
801             INIT2( sad, _cache64_ssse3 );
802             INIT2( sad_x3, _cache64_ssse3 );
803             INIT2( sad_x4, _cache64_ssse3 );
804         }
805         if( !(cpu&X264_CPU_SHUFFLE_IS_FAST) )
806         {
807             INIT5( ssd, _sse2 ); /* on conroe, sse2 is faster for width8/16 */
808         }
809     }
810
811     if( cpu&X264_CPU_SSE4 )
812     {
813         INIT7( satd, _sse4 );
814         INIT7( satd_x3, _sse4 );
815         INIT7( satd_x4, _sse4 );
816         if( !(cpu&X264_CPU_STACK_MOD4) )
817         {
818             INIT4( hadamard_ac, _sse4 );
819         }
820         pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_sse4;
821         pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8_sse4;
822     }
823 #endif //HAVE_MMX
824
825 #ifdef HAVE_ARMV6
826     if( cpu&X264_CPU_ARMV6 )
827     {
828         pixf->sad[PIXEL_4x8] = x264_pixel_sad_4x8_armv6;
829         pixf->sad[PIXEL_4x4] = x264_pixel_sad_4x4_armv6;
830         pixf->sad_aligned[PIXEL_4x8] = x264_pixel_sad_4x8_armv6;
831         pixf->sad_aligned[PIXEL_4x4] = x264_pixel_sad_4x4_armv6;
832     }
833     if( cpu&X264_CPU_NEON )
834     {
835         INIT5( sad, _neon );
836         INIT5( sad_aligned, _neon );
837         INIT7( sad_x3, _neon );
838         INIT7( sad_x4, _neon );
839         INIT7( ssd, _neon );
840         INIT7( satd, _neon );
841         INIT7( satd_x3, _neon );
842         INIT7( satd_x4, _neon );
843         INIT4( hadamard_ac, _neon );
844         pixf->sa8d[PIXEL_8x8]   = x264_pixel_sa8d_8x8_neon;
845         pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_neon;
846         pixf->var[PIXEL_8x8]    = x264_pixel_var_8x8_neon;
847         pixf->var[PIXEL_16x16]  = x264_pixel_var_16x16_neon;
848         pixf->var2_8x8          = x264_pixel_var2_8x8_neon;
849
850         pixf->ssim_4x4x2_core   = x264_pixel_ssim_4x4x2_core_neon;
851         pixf->ssim_end4         = x264_pixel_ssim_end4_neon;
852
853         if( cpu&X264_CPU_FAST_NEON_MRC )
854         {
855             pixf->sad[PIXEL_4x8] = x264_pixel_sad_4x8_neon;
856             pixf->sad[PIXEL_4x4] = x264_pixel_sad_4x4_neon;
857             pixf->sad_aligned[PIXEL_4x8] = x264_pixel_sad_aligned_4x8_neon;
858             pixf->sad_aligned[PIXEL_4x4] = x264_pixel_sad_aligned_4x4_neon;
859         }
860         else    // really just scheduled for dual issue / A8
861         {
862             INIT5( sad_aligned, _neon_dual );
863         }
864     }
865 #endif
866 #ifdef ARCH_PPC
867     if( cpu&X264_CPU_ALTIVEC )
868     {
869         x264_pixel_altivec_init( pixf );
870     }
871 #endif
872 #ifdef ARCH_UltraSparc
873     INIT4( sad, _vis );
874     INIT4( sad_x3, _vis );
875     INIT4( sad_x4, _vis );
876 #endif
877
878     pixf->ads[PIXEL_8x16] =
879     pixf->ads[PIXEL_8x4] =
880     pixf->ads[PIXEL_4x8] = pixf->ads[PIXEL_16x8];
881     pixf->ads[PIXEL_4x4] = pixf->ads[PIXEL_8x8];
882 }
883