]> git.sesse.net Git - x264/blob - common/pixel.c
Update source file headers
[x264] / common / pixel.c
1 /*****************************************************************************
2  * pixel.c: pixel metrics
3  *****************************************************************************
4  * Copyright (C) 2003-2010 x264 project
5  *
6  * Authors: Loren Merritt <lorenm@u.washington.edu>
7  *          Laurent Aimar <fenrir@via.ecp.fr>
8  *          Fiona Glaser <fiona@x264.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
23  *
24  * This program is also available under a commercial proprietary license.
25  * For more information, contact us at licensing@x264.com.
26  *****************************************************************************/
27
28 #include "common.h"
29
30 #if HAVE_MMX
31 #   include "x86/pixel.h"
32 #endif
33 #if ARCH_PPC
34 #   include "ppc/pixel.h"
35 #endif
36 #if ARCH_ARM
37 #   include "arm/pixel.h"
38 #endif
39 #if ARCH_UltraSparc
40 #   include "sparc/pixel.h"
41 #endif
42
43
44 /****************************************************************************
45  * pixel_sad_WxH
46  ****************************************************************************/
47 #define PIXEL_SAD_C( name, lx, ly ) \
48 static int name( pixel *pix1, int i_stride_pix1,  \
49                  pixel *pix2, int i_stride_pix2 ) \
50 {                                                   \
51     int i_sum = 0;                                  \
52     for( int y = 0; y < ly; y++ )                   \
53     {                                               \
54         for( int x = 0; x < lx; x++ )               \
55         {                                           \
56             i_sum += abs( pix1[x] - pix2[x] );      \
57         }                                           \
58         pix1 += i_stride_pix1;                      \
59         pix2 += i_stride_pix2;                      \
60     }                                               \
61     return i_sum;                                   \
62 }
63
64
65 PIXEL_SAD_C( x264_pixel_sad_16x16, 16, 16 )
66 PIXEL_SAD_C( x264_pixel_sad_16x8,  16,  8 )
67 PIXEL_SAD_C( x264_pixel_sad_8x16,   8, 16 )
68 PIXEL_SAD_C( x264_pixel_sad_8x8,    8,  8 )
69 PIXEL_SAD_C( x264_pixel_sad_8x4,    8,  4 )
70 PIXEL_SAD_C( x264_pixel_sad_4x8,    4,  8 )
71 PIXEL_SAD_C( x264_pixel_sad_4x4,    4,  4 )
72
73
74 /****************************************************************************
75  * pixel_ssd_WxH
76  ****************************************************************************/
77 #define PIXEL_SSD_C( name, lx, ly ) \
78 static int name( pixel *pix1, int i_stride_pix1,  \
79                  pixel *pix2, int i_stride_pix2 ) \
80 {                                                   \
81     int i_sum = 0;                                  \
82     for( int y = 0; y < ly; y++ )                   \
83     {                                               \
84         for( int x = 0; x < lx; x++ )               \
85         {                                           \
86             int d = pix1[x] - pix2[x];              \
87             i_sum += d*d;                           \
88         }                                           \
89         pix1 += i_stride_pix1;                      \
90         pix2 += i_stride_pix2;                      \
91     }                                               \
92     return i_sum;                                   \
93 }
94
95 PIXEL_SSD_C( x264_pixel_ssd_16x16, 16, 16 )
96 PIXEL_SSD_C( x264_pixel_ssd_16x8,  16,  8 )
97 PIXEL_SSD_C( x264_pixel_ssd_8x16,   8, 16 )
98 PIXEL_SSD_C( x264_pixel_ssd_8x8,    8,  8 )
99 PIXEL_SSD_C( x264_pixel_ssd_8x4,    8,  4 )
100 PIXEL_SSD_C( x264_pixel_ssd_4x8,    4,  8 )
101 PIXEL_SSD_C( x264_pixel_ssd_4x4,    4,  4 )
102
103 uint64_t x264_pixel_ssd_wxh( x264_pixel_function_t *pf, pixel *pix1, int i_pix1, pixel *pix2, int i_pix2, int i_width, int i_height )
104 {
105     uint64_t i_ssd = 0;
106     int y;
107     int align = !(((intptr_t)pix1 | (intptr_t)pix2 | i_pix1 | i_pix2) & 15);
108
109 #define SSD(size) i_ssd += pf->ssd[size]( pix1 + y*i_pix1 + x, i_pix1, \
110                                           pix2 + y*i_pix2 + x, i_pix2 );
111     for( y = 0; y < i_height-15; y += 16 )
112     {
113         int x = 0;
114         if( align )
115             for( ; x < i_width-15; x += 16 )
116                 SSD(PIXEL_16x16);
117         for( ; x < i_width-7; x += 8 )
118             SSD(PIXEL_8x16);
119     }
120     if( y < i_height-7 )
121         for( int x = 0; x < i_width-7; x += 8 )
122             SSD(PIXEL_8x8);
123 #undef SSD
124
125 #define SSD1 { int d = pix1[y*i_pix1+x] - pix2[y*i_pix2+x]; i_ssd += d*d; }
126     if( i_width & 7 )
127     {
128         for( y = 0; y < (i_height & ~7); y++ )
129             for( int x = i_width & ~7; x < i_width; x++ )
130                 SSD1;
131     }
132     if( i_height & 7 )
133     {
134         for( y = i_height & ~7; y < i_height; y++ )
135             for( int x = 0; x < i_width; x++ )
136                 SSD1;
137     }
138 #undef SSD1
139
140     return i_ssd;
141 }
142
143 static uint64_t pixel_ssd_nv12_core( pixel *pixuv1, int stride1, pixel *pixuv2, int stride2, int width, int height )
144 {
145     uint32_t ssd_u=0, ssd_v=0;
146     for( int y = 0; y < height; y++, pixuv1+=stride1, pixuv2+=stride2 )
147         for( int x = 0; x < width; x++ )
148         {
149             int du = pixuv1[2*x]   - pixuv2[2*x];
150             int dv = pixuv1[2*x+1] - pixuv2[2*x+1];
151             ssd_u += du*du;
152             ssd_v += dv*dv;
153         }
154     return ssd_u + ((uint64_t)ssd_v<<32);
155 }
156
157 // SSD in uint32 (i.e. packing two into uint64) can potentially overflow on
158 // image widths >= 11008 (or 6604 if interlaced), since this is called on blocks
159 // of height up to 12 (resp 20). Though it will probably take significantly more
160 // than that at sane distortion levels.
161 uint64_t x264_pixel_ssd_nv12( x264_pixel_function_t *pf, pixel *pix1, int i_pix1, pixel *pix2, int i_pix2, int i_width, int i_height )
162 {
163     uint64_t ssd = pf->ssd_nv12_core( pix1, i_pix1, pix2, i_pix2, i_width&~7, i_height );
164     if( i_width&7 )
165         ssd += pixel_ssd_nv12_core( pix1+(i_width&~7), i_pix1, pix2+(i_width&~7), i_pix2, i_width&7, i_height );
166     return ssd;
167 }
168
169 /****************************************************************************
170  * pixel_var_wxh
171  ****************************************************************************/
172 #define PIXEL_VAR_C( name, w ) \
173 static uint64_t name( pixel *pix, int i_stride ) \
174 {                                             \
175     uint32_t sum = 0, sqr = 0;                \
176     for( int y = 0; y < w; y++ )              \
177     {                                         \
178         for( int x = 0; x < w; x++ )          \
179         {                                     \
180             sum += pix[x];                    \
181             sqr += pix[x] * pix[x];           \
182         }                                     \
183         pix += i_stride;                      \
184     }                                         \
185     return sum + ((uint64_t)sqr << 32);       \
186 }
187
188 PIXEL_VAR_C( x264_pixel_var_16x16, 16 )
189 PIXEL_VAR_C( x264_pixel_var_8x8,    8 )
190
191 /****************************************************************************
192  * pixel_var2_wxh
193  ****************************************************************************/
194 static int pixel_var2_8x8( pixel *pix1, int i_stride1, pixel *pix2, int i_stride2, int *ssd )
195 {
196     uint32_t var = 0, sum = 0, sqr = 0;
197     for( int y = 0; y < 8; y++ )
198     {
199         for( int x = 0; x < 8; x++ )
200         {
201             int diff = pix1[x] - pix2[x];
202             sum += diff;
203             sqr += diff * diff;
204         }
205         pix1 += i_stride1;
206         pix2 += i_stride2;
207     }
208     sum = abs(sum);
209     var = sqr - ((uint64_t)sum * sum >> 6);
210     *ssd = sqr;
211     return var;
212 }
213
214
215 #define HADAMARD4(d0, d1, d2, d3, s0, s1, s2, s3) {\
216     int t0 = s0 + s1;\
217     int t1 = s0 - s1;\
218     int t2 = s2 + s3;\
219     int t3 = s2 - s3;\
220     d0 = t0 + t2;\
221     d2 = t0 - t2;\
222     d1 = t1 + t3;\
223     d3 = t1 - t3;\
224 }
225
226 // in: a pseudo-simd number of the form x+(y<<16)
227 // return: abs(x)+(abs(y)<<16)
228 static ALWAYS_INLINE uint32_t abs2( uint32_t a )
229 {
230     uint32_t s = ((a>>15)&0x10001)*0xffff;
231     return (a+s)^s;
232 }
233
234 /****************************************************************************
235  * pixel_satd_WxH: sum of 4x4 Hadamard transformed differences
236  ****************************************************************************/
237
238 static NOINLINE int x264_pixel_satd_4x4( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
239 {
240     uint32_t tmp[4][2];
241     uint32_t a0, a1, a2, a3, b0, b1;
242     int sum = 0;
243     for( int i = 0; i < 4; i++, pix1 += i_pix1, pix2 += i_pix2 )
244     {
245         a0 = pix1[0] - pix2[0];
246         a1 = pix1[1] - pix2[1];
247         b0 = (a0+a1) + ((a0-a1)<<16);
248         a2 = pix1[2] - pix2[2];
249         a3 = pix1[3] - pix2[3];
250         b1 = (a2+a3) + ((a2-a3)<<16);
251         tmp[i][0] = b0 + b1;
252         tmp[i][1] = b0 - b1;
253     }
254     for( int i = 0; i < 2; i++ )
255     {
256         HADAMARD4( a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i] );
257         a0 = abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
258         sum += ((uint16_t)a0) + (a0>>16);
259     }
260     return sum >> 1;
261 }
262
263 static NOINLINE int x264_pixel_satd_8x4( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
264 {
265     uint32_t tmp[4][4];
266     uint32_t a0, a1, a2, a3;
267     int sum = 0;
268     for( int i = 0; i < 4; i++, pix1 += i_pix1, pix2 += i_pix2 )
269     {
270         a0 = (pix1[0] - pix2[0]) + ((pix1[4] - pix2[4]) << 16);
271         a1 = (pix1[1] - pix2[1]) + ((pix1[5] - pix2[5]) << 16);
272         a2 = (pix1[2] - pix2[2]) + ((pix1[6] - pix2[6]) << 16);
273         a3 = (pix1[3] - pix2[3]) + ((pix1[7] - pix2[7]) << 16);
274         HADAMARD4( tmp[i][0], tmp[i][1], tmp[i][2], tmp[i][3], a0,a1,a2,a3 );
275     }
276     for( int i = 0; i < 4; i++ )
277     {
278         HADAMARD4( a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i] );
279         sum += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
280     }
281     return (((uint16_t)sum) + ((uint32_t)sum>>16)) >> 1;
282 }
283
284 #define PIXEL_SATD_C( w, h, sub )\
285 static int x264_pixel_satd_##w##x##h( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )\
286 {\
287     int sum = sub( pix1, i_pix1, pix2, i_pix2 )\
288             + sub( pix1+4*i_pix1, i_pix1, pix2+4*i_pix2, i_pix2 );\
289     if( w==16 )\
290         sum+= sub( pix1+8, i_pix1, pix2+8, i_pix2 )\
291             + sub( pix1+8+4*i_pix1, i_pix1, pix2+8+4*i_pix2, i_pix2 );\
292     if( h==16 )\
293         sum+= sub( pix1+8*i_pix1, i_pix1, pix2+8*i_pix2, i_pix2 )\
294             + sub( pix1+12*i_pix1, i_pix1, pix2+12*i_pix2, i_pix2 );\
295     if( w==16 && h==16 )\
296         sum+= sub( pix1+8+8*i_pix1, i_pix1, pix2+8+8*i_pix2, i_pix2 )\
297             + sub( pix1+8+12*i_pix1, i_pix1, pix2+8+12*i_pix2, i_pix2 );\
298     return sum;\
299 }
300 PIXEL_SATD_C( 16, 16, x264_pixel_satd_8x4 )
301 PIXEL_SATD_C( 16, 8,  x264_pixel_satd_8x4 )
302 PIXEL_SATD_C( 8,  16, x264_pixel_satd_8x4 )
303 PIXEL_SATD_C( 8,  8,  x264_pixel_satd_8x4 )
304 PIXEL_SATD_C( 4,  8,  x264_pixel_satd_4x4 )
305
306
307 static NOINLINE int sa8d_8x8( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
308 {
309     uint32_t tmp[8][4];
310     uint32_t a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3;
311     int sum = 0;
312     for( int i = 0; i < 8; i++, pix1 += i_pix1, pix2 += i_pix2 )
313     {
314         a0 = pix1[0] - pix2[0];
315         a1 = pix1[1] - pix2[1];
316         b0 = (a0+a1) + ((a0-a1)<<16);
317         a2 = pix1[2] - pix2[2];
318         a3 = pix1[3] - pix2[3];
319         b1 = (a2+a3) + ((a2-a3)<<16);
320         a4 = pix1[4] - pix2[4];
321         a5 = pix1[5] - pix2[5];
322         b2 = (a4+a5) + ((a4-a5)<<16);
323         a6 = pix1[6] - pix2[6];
324         a7 = pix1[7] - pix2[7];
325         b3 = (a6+a7) + ((a6-a7)<<16);
326         HADAMARD4( tmp[i][0], tmp[i][1], tmp[i][2], tmp[i][3], b0,b1,b2,b3 );
327     }
328     for( int i = 0; i < 4; i++ )
329     {
330         HADAMARD4( a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i] );
331         HADAMARD4( a4, a5, a6, a7, tmp[4][i], tmp[5][i], tmp[6][i], tmp[7][i] );
332         b0  = abs2(a0+a4) + abs2(a0-a4);
333         b0 += abs2(a1+a5) + abs2(a1-a5);
334         b0 += abs2(a2+a6) + abs2(a2-a6);
335         b0 += abs2(a3+a7) + abs2(a3-a7);
336         sum += (uint16_t)b0 + (b0>>16);
337     }
338     return sum;
339 }
340
341 static int x264_pixel_sa8d_8x8( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
342 {
343     int sum = sa8d_8x8( pix1, i_pix1, pix2, i_pix2 );
344     return (sum+2)>>2;
345 }
346
347 static int x264_pixel_sa8d_16x16( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
348 {
349     int sum = sa8d_8x8( pix1, i_pix1, pix2, i_pix2 )
350             + sa8d_8x8( pix1+8, i_pix1, pix2+8, i_pix2 )
351             + sa8d_8x8( pix1+8*i_pix1, i_pix1, pix2+8*i_pix2, i_pix2 )
352             + sa8d_8x8( pix1+8+8*i_pix1, i_pix1, pix2+8+8*i_pix2, i_pix2 );
353     return (sum+2)>>2;
354 }
355
356
357 static NOINLINE uint64_t pixel_hadamard_ac( pixel *pix, int stride )
358 {
359     uint32_t tmp[32];
360     uint32_t a0, a1, a2, a3, dc;
361     int sum4 = 0, sum8 = 0;
362     for( int i = 0; i < 8; i++, pix+=stride )
363     {
364         uint32_t *t = tmp + (i&3) + (i&4)*4;
365         a0 = (pix[0]+pix[1]) + ((pix[0]-pix[1])<<16);
366         a1 = (pix[2]+pix[3]) + ((pix[2]-pix[3])<<16);
367         t[0] = a0 + a1;
368         t[4] = a0 - a1;
369         a2 = (pix[4]+pix[5]) + ((pix[4]-pix[5])<<16);
370         a3 = (pix[6]+pix[7]) + ((pix[6]-pix[7])<<16);
371         t[8] = a2 + a3;
372         t[12] = a2 - a3;
373     }
374     for( int i = 0; i < 8; i++ )
375     {
376         HADAMARD4( a0, a1, a2, a3, tmp[i*4+0], tmp[i*4+1], tmp[i*4+2], tmp[i*4+3] );
377         tmp[i*4+0] = a0;
378         tmp[i*4+1] = a1;
379         tmp[i*4+2] = a2;
380         tmp[i*4+3] = a3;
381         sum4 += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
382     }
383     for( int i = 0; i < 8; i++ )
384     {
385         HADAMARD4( a0,a1,a2,a3, tmp[i], tmp[8+i], tmp[16+i], tmp[24+i] );
386         sum8 += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
387     }
388     dc = (uint16_t)(tmp[0] + tmp[8] + tmp[16] + tmp[24]);
389     sum4 = (uint16_t)sum4 + ((uint32_t)sum4>>16) - dc;
390     sum8 = (uint16_t)sum8 + ((uint32_t)sum8>>16) - dc;
391     return ((uint64_t)sum8<<32) + sum4;
392 }
393
394 #define HADAMARD_AC(w,h) \
395 static uint64_t x264_pixel_hadamard_ac_##w##x##h( pixel *pix, int stride )\
396 {\
397     uint64_t sum = pixel_hadamard_ac( pix, stride );\
398     if( w==16 )\
399         sum += pixel_hadamard_ac( pix+8, stride );\
400     if( h==16 )\
401         sum += pixel_hadamard_ac( pix+8*stride, stride );\
402     if( w==16 && h==16 )\
403         sum += pixel_hadamard_ac( pix+8*stride+8, stride );\
404     return ((sum>>34)<<32) + ((uint32_t)sum>>1);\
405 }
406 HADAMARD_AC( 16, 16 )
407 HADAMARD_AC( 16, 8 )
408 HADAMARD_AC( 8, 16 )
409 HADAMARD_AC( 8, 8 )
410
411
412 /****************************************************************************
413  * pixel_sad_x4
414  ****************************************************************************/
415 #define SAD_X( size ) \
416 static void x264_pixel_sad_x3_##size( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2, int i_stride, int scores[3] )\
417 {\
418     scores[0] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix0, i_stride );\
419     scores[1] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix1, i_stride );\
420     scores[2] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix2, i_stride );\
421 }\
422 static void x264_pixel_sad_x4_##size( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2, pixel *pix3, int i_stride, int scores[4] )\
423 {\
424     scores[0] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix0, i_stride );\
425     scores[1] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix1, i_stride );\
426     scores[2] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix2, i_stride );\
427     scores[3] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix3, i_stride );\
428 }
429
430 SAD_X( 16x16 )
431 SAD_X( 16x8 )
432 SAD_X( 8x16 )
433 SAD_X( 8x8 )
434 SAD_X( 8x4 )
435 SAD_X( 4x8 )
436 SAD_X( 4x4 )
437
438 #if !X264_HIGH_BIT_DEPTH
439 #if ARCH_UltraSparc
440 SAD_X( 16x16_vis )
441 SAD_X( 16x8_vis )
442 SAD_X( 8x16_vis )
443 SAD_X( 8x8_vis )
444 #endif
445 #endif // !X264_HIGH_BIT_DEPTH
446
447 /****************************************************************************
448  * pixel_satd_x4
449  * no faster than single satd, but needed for satd to be a drop-in replacement for sad
450  ****************************************************************************/
451
452 #define SATD_X( size, cpu ) \
453 static void x264_pixel_satd_x3_##size##cpu( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2, int i_stride, int scores[3] )\
454 {\
455     scores[0] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix0, i_stride );\
456     scores[1] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix1, i_stride );\
457     scores[2] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix2, i_stride );\
458 }\
459 static void x264_pixel_satd_x4_##size##cpu( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2, pixel *pix3, int i_stride, int scores[4] )\
460 {\
461     scores[0] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix0, i_stride );\
462     scores[1] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix1, i_stride );\
463     scores[2] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix2, i_stride );\
464     scores[3] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix3, i_stride );\
465 }
466 #define SATD_X_DECL6( cpu )\
467 SATD_X( 16x16, cpu )\
468 SATD_X( 16x8, cpu )\
469 SATD_X( 8x16, cpu )\
470 SATD_X( 8x8, cpu )\
471 SATD_X( 8x4, cpu )\
472 SATD_X( 4x8, cpu )
473 #define SATD_X_DECL7( cpu )\
474 SATD_X_DECL6( cpu )\
475 SATD_X( 4x4, cpu )
476
477 SATD_X_DECL7()
478 #if !X264_HIGH_BIT_DEPTH
479 #if HAVE_MMX
480 SATD_X_DECL7( _mmxext )
481 SATD_X_DECL6( _sse2 )
482 SATD_X_DECL7( _ssse3 )
483 SATD_X_DECL7( _sse4 )
484 #endif
485
486 #if HAVE_ARMV6
487 SATD_X_DECL7( _neon )
488 #endif
489 #endif // !X264_HIGH_BIT_DEPTH
490
491 #define INTRA_MBCMP_8x8( mbcmp )\
492 void x264_intra_##mbcmp##_x3_8x8( pixel *fenc, pixel edge[33], int res[3] )\
493 {\
494     pixel pix[8*FDEC_STRIDE];\
495     x264_predict_8x8_v_c( pix, edge );\
496     res[0] = x264_pixel_##mbcmp##_8x8( pix, FDEC_STRIDE, fenc, FENC_STRIDE );\
497     x264_predict_8x8_h_c( pix, edge );\
498     res[1] = x264_pixel_##mbcmp##_8x8( pix, FDEC_STRIDE, fenc, FENC_STRIDE );\
499     x264_predict_8x8_dc_c( pix, edge );\
500     res[2] = x264_pixel_##mbcmp##_8x8( pix, FDEC_STRIDE, fenc, FENC_STRIDE );\
501 }
502
503 INTRA_MBCMP_8x8(sad)
504 INTRA_MBCMP_8x8(sa8d)
505
506 #define INTRA_MBCMP( mbcmp, size, pred1, pred2, pred3, chroma )\
507 void x264_intra_##mbcmp##_x3_##size##x##size##chroma( pixel *fenc, pixel *fdec, int res[3] )\
508 {\
509     x264_predict_##size##x##size##chroma##_##pred1##_c( fdec );\
510     res[0] = x264_pixel_##mbcmp##_##size##x##size( fdec, FDEC_STRIDE, fenc, FENC_STRIDE );\
511     x264_predict_##size##x##size##chroma##_##pred2##_c( fdec );\
512     res[1] = x264_pixel_##mbcmp##_##size##x##size( fdec, FDEC_STRIDE, fenc, FENC_STRIDE );\
513     x264_predict_##size##x##size##chroma##_##pred3##_c( fdec );\
514     res[2] = x264_pixel_##mbcmp##_##size##x##size( fdec, FDEC_STRIDE, fenc, FENC_STRIDE );\
515 }
516
517 INTRA_MBCMP(sad, 4, v, h, dc, )
518 INTRA_MBCMP(satd, 4, v, h, dc, )
519 INTRA_MBCMP(sad, 8, dc, h, v, c )
520 INTRA_MBCMP(satd, 8, dc, h, v, c )
521 INTRA_MBCMP(sad, 16, v, h, dc, )
522 INTRA_MBCMP(satd, 16, v, h, dc, )
523
524 /****************************************************************************
525  * structural similarity metric
526  ****************************************************************************/
527 static void ssim_4x4x2_core( const pixel *pix1, int stride1,
528                              const pixel *pix2, int stride2,
529                              int sums[2][4])
530 {
531     for( int z = 0; z < 2; z++ )
532     {
533         uint32_t s1 = 0, s2 = 0, ss = 0, s12 = 0;
534         for( int y = 0; y < 4; y++ )
535             for( int x = 0; x < 4; x++ )
536             {
537                 int a = pix1[x+y*stride1];
538                 int b = pix2[x+y*stride2];
539                 s1  += a;
540                 s2  += b;
541                 ss  += a*a;
542                 ss  += b*b;
543                 s12 += a*b;
544             }
545         sums[z][0] = s1;
546         sums[z][1] = s2;
547         sums[z][2] = ss;
548         sums[z][3] = s12;
549         pix1 += 4;
550         pix2 += 4;
551     }
552 }
553
554 static float ssim_end1( int s1, int s2, int ss, int s12 )
555 {
556     static const int ssim_c1 = (int)(.01*.01*PIXEL_MAX*PIXEL_MAX*64 + .5);
557     static const int ssim_c2 = (int)(.03*.03*PIXEL_MAX*PIXEL_MAX*64*63 + .5);
558     int vars = ss*64 - s1*s1 - s2*s2;
559     int covar = s12*64 - s1*s2;
560     return (float)(2*s1*s2 + ssim_c1) * (float)(2*covar + ssim_c2)
561          / ((float)(s1*s1 + s2*s2 + ssim_c1) * (float)(vars + ssim_c2));
562 }
563
564 static float ssim_end4( int sum0[5][4], int sum1[5][4], int width )
565 {
566     float ssim = 0.0;
567     for( int i = 0; i < width; i++ )
568         ssim += ssim_end1( sum0[i][0] + sum0[i+1][0] + sum1[i][0] + sum1[i+1][0],
569                            sum0[i][1] + sum0[i+1][1] + sum1[i][1] + sum1[i+1][1],
570                            sum0[i][2] + sum0[i+1][2] + sum1[i][2] + sum1[i+1][2],
571                            sum0[i][3] + sum0[i+1][3] + sum1[i][3] + sum1[i+1][3] );
572     return ssim;
573 }
574
575 float x264_pixel_ssim_wxh( x264_pixel_function_t *pf,
576                            pixel *pix1, int stride1,
577                            pixel *pix2, int stride2,
578                            int width, int height, void *buf )
579 {
580     int z = 0;
581     float ssim = 0.0;
582     int (*sum0)[4] = buf;
583     int (*sum1)[4] = sum0 + (width >> 2) + 3;
584     width >>= 2;
585     height >>= 2;
586     for( int y = 1; y < height; y++ )
587     {
588         for( ; z <= y; z++ )
589         {
590             XCHG( void*, sum0, sum1 );
591             for( int x = 0; x < width; x+=2 )
592                 pf->ssim_4x4x2_core( &pix1[4*(x+z*stride1)], stride1, &pix2[4*(x+z*stride2)], stride2, &sum0[x] );
593         }
594         for( int x = 0; x < width-1; x += 4 )
595             ssim += pf->ssim_end4( sum0+x, sum1+x, X264_MIN(4,width-x-1) );
596     }
597     return ssim;
598 }
599
600
601 /****************************************************************************
602  * successive elimination
603  ****************************************************************************/
604 static int x264_pixel_ads4( int enc_dc[4], uint16_t *sums, int delta,
605                             uint16_t *cost_mvx, int16_t *mvs, int width, int thresh )
606 {
607     int nmv = 0;
608     for( int i = 0; i < width; i++, sums++ )
609     {
610         int ads = abs( enc_dc[0] - sums[0] )
611                 + abs( enc_dc[1] - sums[8] )
612                 + abs( enc_dc[2] - sums[delta] )
613                 + abs( enc_dc[3] - sums[delta+8] )
614                 + cost_mvx[i];
615         if( ads < thresh )
616             mvs[nmv++] = i;
617     }
618     return nmv;
619 }
620
621 static int x264_pixel_ads2( int enc_dc[2], uint16_t *sums, int delta,
622                             uint16_t *cost_mvx, int16_t *mvs, int width, int thresh )
623 {
624     int nmv = 0;
625     for( int i = 0; i < width; i++, sums++ )
626     {
627         int ads = abs( enc_dc[0] - sums[0] )
628                 + abs( enc_dc[1] - sums[delta] )
629                 + cost_mvx[i];
630         if( ads < thresh )
631             mvs[nmv++] = i;
632     }
633     return nmv;
634 }
635
636 static int x264_pixel_ads1( int enc_dc[1], uint16_t *sums, int delta,
637                             uint16_t *cost_mvx, int16_t *mvs, int width, int thresh )
638 {
639     int nmv = 0;
640     for( int i = 0; i<width; i++, sums++ )
641     {
642         int ads = abs( enc_dc[0] - sums[0] )
643                 + cost_mvx[i];
644         if( ads < thresh )
645             mvs[nmv++] = i;
646     }
647     return nmv;
648 }
649
650
651 /****************************************************************************
652  * x264_pixel_init:
653  ****************************************************************************/
654 void x264_pixel_init( int cpu, x264_pixel_function_t *pixf )
655 {
656     memset( pixf, 0, sizeof(*pixf) );
657
658 #define INIT2_NAME( name1, name2, cpu ) \
659     pixf->name1[PIXEL_16x16] = x264_pixel_##name2##_16x16##cpu;\
660     pixf->name1[PIXEL_16x8]  = x264_pixel_##name2##_16x8##cpu;
661 #define INIT4_NAME( name1, name2, cpu ) \
662     INIT2_NAME( name1, name2, cpu ) \
663     pixf->name1[PIXEL_8x16]  = x264_pixel_##name2##_8x16##cpu;\
664     pixf->name1[PIXEL_8x8]   = x264_pixel_##name2##_8x8##cpu;
665 #define INIT5_NAME( name1, name2, cpu ) \
666     INIT4_NAME( name1, name2, cpu ) \
667     pixf->name1[PIXEL_8x4]   = x264_pixel_##name2##_8x4##cpu;
668 #define INIT6_NAME( name1, name2, cpu ) \
669     INIT5_NAME( name1, name2, cpu ) \
670     pixf->name1[PIXEL_4x8]   = x264_pixel_##name2##_4x8##cpu;
671 #define INIT7_NAME( name1, name2, cpu ) \
672     INIT6_NAME( name1, name2, cpu ) \
673     pixf->name1[PIXEL_4x4]   = x264_pixel_##name2##_4x4##cpu;
674 #define INIT2( name, cpu ) INIT2_NAME( name, name, cpu )
675 #define INIT4( name, cpu ) INIT4_NAME( name, name, cpu )
676 #define INIT5( name, cpu ) INIT5_NAME( name, name, cpu )
677 #define INIT6( name, cpu ) INIT6_NAME( name, name, cpu )
678 #define INIT7( name, cpu ) INIT7_NAME( name, name, cpu )
679
680 #define INIT_ADS( cpu ) \
681     pixf->ads[PIXEL_16x16] = x264_pixel_ads4##cpu;\
682     pixf->ads[PIXEL_16x8] = x264_pixel_ads2##cpu;\
683     pixf->ads[PIXEL_8x8] = x264_pixel_ads1##cpu;
684
685     INIT7( sad, );
686     INIT7_NAME( sad_aligned, sad, );
687     INIT7( sad_x3, );
688     INIT7( sad_x4, );
689     INIT7( ssd, );
690     INIT7( satd, );
691     INIT7( satd_x3, );
692     INIT7( satd_x4, );
693     INIT4( hadamard_ac, );
694     INIT_ADS( );
695
696     pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16;
697     pixf->sa8d[PIXEL_8x8]   = x264_pixel_sa8d_8x8;
698     pixf->var[PIXEL_16x16] = x264_pixel_var_16x16;
699     pixf->var[PIXEL_8x8]   = x264_pixel_var_8x8;
700
701     pixf->ssd_nv12_core = pixel_ssd_nv12_core;
702     pixf->ssim_4x4x2_core = ssim_4x4x2_core;
703     pixf->ssim_end4 = ssim_end4;
704     pixf->var2_8x8 = pixel_var2_8x8;
705
706     pixf->intra_sad_x3_4x4    = x264_intra_sad_x3_4x4;
707     pixf->intra_satd_x3_4x4   = x264_intra_satd_x3_4x4;
708     pixf->intra_sad_x3_8x8    = x264_intra_sad_x3_8x8;
709     pixf->intra_sa8d_x3_8x8   = x264_intra_sa8d_x3_8x8;
710     pixf->intra_sad_x3_8x8c   = x264_intra_sad_x3_8x8c;
711     pixf->intra_satd_x3_8x8c  = x264_intra_satd_x3_8x8c;
712     pixf->intra_sad_x3_16x16  = x264_intra_sad_x3_16x16;
713     pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16;
714
715 #if !X264_HIGH_BIT_DEPTH
716 #if HAVE_MMX
717     if( cpu&X264_CPU_MMX )
718     {
719         INIT7( ssd, _mmx );
720     }
721
722     if( cpu&X264_CPU_MMXEXT )
723     {
724         INIT7( sad, _mmxext );
725         INIT7_NAME( sad_aligned, sad, _mmxext );
726         INIT7( sad_x3, _mmxext );
727         INIT7( sad_x4, _mmxext );
728         INIT7( satd, _mmxext );
729         INIT7( satd_x3, _mmxext );
730         INIT7( satd_x4, _mmxext );
731         INIT4( hadamard_ac, _mmxext );
732         INIT_ADS( _mmxext );
733         pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_mmxext;
734         pixf->var[PIXEL_8x8]   = x264_pixel_var_8x8_mmxext;
735         pixf->ssd_nv12_core    = x264_pixel_ssd_nv12_core_mmxext;
736 #if ARCH_X86
737         pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_mmxext;
738         pixf->sa8d[PIXEL_8x8]   = x264_pixel_sa8d_8x8_mmxext;
739         pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_mmxext;
740         pixf->ssim_4x4x2_core  = x264_pixel_ssim_4x4x2_core_mmxext;
741         pixf->var2_8x8 = x264_pixel_var2_8x8_mmxext;
742
743         if( cpu&X264_CPU_CACHELINE_32 )
744         {
745             INIT5( sad, _cache32_mmxext );
746             INIT4( sad_x3, _cache32_mmxext );
747             INIT4( sad_x4, _cache32_mmxext );
748         }
749         else if( cpu&X264_CPU_CACHELINE_64 )
750         {
751             INIT5( sad, _cache64_mmxext );
752             INIT4( sad_x3, _cache64_mmxext );
753             INIT4( sad_x4, _cache64_mmxext );
754         }
755 #else
756         if( cpu&X264_CPU_CACHELINE_64 )
757         {
758             pixf->sad[PIXEL_8x16] = x264_pixel_sad_8x16_cache64_mmxext;
759             pixf->sad[PIXEL_8x8]  = x264_pixel_sad_8x8_cache64_mmxext;
760             pixf->sad[PIXEL_8x4]  = x264_pixel_sad_8x4_cache64_mmxext;
761             pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_cache64_mmxext;
762             pixf->sad_x3[PIXEL_8x8]  = x264_pixel_sad_x3_8x8_cache64_mmxext;
763             pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_cache64_mmxext;
764             pixf->sad_x4[PIXEL_8x8]  = x264_pixel_sad_x4_8x8_cache64_mmxext;
765         }
766 #endif
767         pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16_mmxext;
768         pixf->intra_sad_x3_16x16  = x264_intra_sad_x3_16x16_mmxext;
769         pixf->intra_satd_x3_8x8c  = x264_intra_satd_x3_8x8c_mmxext;
770         pixf->intra_sad_x3_8x8c   = x264_intra_sad_x3_8x8c_mmxext;
771         pixf->intra_sad_x3_8x8    = x264_intra_sad_x3_8x8_mmxext;
772         pixf->intra_satd_x3_4x4   = x264_intra_satd_x3_4x4_mmxext;
773         pixf->intra_sad_x3_4x4    = x264_intra_sad_x3_4x4_mmxext;
774     }
775
776     if( cpu&X264_CPU_SSE2 )
777     {
778         INIT5( ssd, _sse2slow );
779         INIT2_NAME( sad_aligned, sad, _sse2_aligned );
780         pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_sse2;
781         pixf->ssd_nv12_core    = x264_pixel_ssd_nv12_core_sse2;
782         pixf->ssim_4x4x2_core  = x264_pixel_ssim_4x4x2_core_sse2;
783         pixf->ssim_end4        = x264_pixel_ssim_end4_sse2;
784         pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_sse2;
785         pixf->sa8d[PIXEL_8x8]   = x264_pixel_sa8d_8x8_sse2;
786 #if ARCH_X86_64
787         pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_sse2;
788 #endif
789         pixf->var2_8x8 = x264_pixel_var2_8x8_sse2;
790     }
791
792     if( (cpu&X264_CPU_SSE2) && !(cpu&X264_CPU_SSE2_IS_SLOW) )
793     {
794         INIT2( sad, _sse2 );
795         INIT2( sad_x3, _sse2 );
796         INIT2( sad_x4, _sse2 );
797         INIT6( satd, _sse2 );
798         INIT6( satd_x3, _sse2 );
799         INIT6( satd_x4, _sse2 );
800         if( !(cpu&X264_CPU_STACK_MOD4) )
801         {
802             INIT4( hadamard_ac, _sse2 );
803         }
804         INIT_ADS( _sse2 );
805         pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_sse2;
806         pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16_sse2;
807         if( cpu&X264_CPU_CACHELINE_64 )
808         {
809             INIT2( ssd, _sse2); /* faster for width 16 on p4 */
810 #if ARCH_X86
811             INIT2( sad, _cache64_sse2 );
812             INIT2( sad_x3, _cache64_sse2 );
813             INIT2( sad_x4, _cache64_sse2 );
814 #endif
815            if( cpu&X264_CPU_SSE2_IS_FAST )
816            {
817                pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_cache64_sse2;
818                pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_cache64_sse2;
819            }
820         }
821
822         if( cpu&X264_CPU_SSE_MISALIGN )
823         {
824             INIT2( sad_x3, _sse2_misalign );
825             INIT2( sad_x4, _sse2_misalign );
826         }
827     }
828
829     if( cpu&X264_CPU_SSE2_IS_FAST && !(cpu&X264_CPU_CACHELINE_64) )
830     {
831         pixf->sad_aligned[PIXEL_8x16] = x264_pixel_sad_8x16_sse2;
832         pixf->sad[PIXEL_8x16] = x264_pixel_sad_8x16_sse2;
833         pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_sse2;
834         pixf->sad_x3[PIXEL_8x8] = x264_pixel_sad_x3_8x8_sse2;
835         pixf->sad_x3[PIXEL_8x4] = x264_pixel_sad_x3_8x4_sse2;
836         pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_sse2;
837         pixf->sad_x4[PIXEL_8x8] = x264_pixel_sad_x4_8x8_sse2;
838         pixf->sad_x4[PIXEL_8x4] = x264_pixel_sad_x4_8x4_sse2;
839     }
840
841     if( (cpu&X264_CPU_SSE3) && (cpu&X264_CPU_CACHELINE_64) )
842     {
843         INIT2( sad, _sse3 );
844         INIT2( sad_x3, _sse3 );
845         INIT2( sad_x4, _sse3 );
846     }
847
848     if( cpu&X264_CPU_SSSE3 )
849     {
850         if( !(cpu&X264_CPU_STACK_MOD4) )
851         {
852             INIT4( hadamard_ac, _ssse3 );
853         }
854         INIT_ADS( _ssse3 );
855         if( !(cpu&X264_CPU_SLOW_ATOM) )
856         {
857             INIT7( ssd, _ssse3 );
858             pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_ssse3;
859             pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8_ssse3;
860             INIT7( satd, _ssse3 );
861             INIT7( satd_x3, _ssse3 );
862             INIT7( satd_x4, _ssse3 );
863         }
864         pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16_ssse3;
865         pixf->intra_sad_x3_16x16  = x264_intra_sad_x3_16x16_ssse3;
866         pixf->intra_satd_x3_8x8c  = x264_intra_satd_x3_8x8c_ssse3;
867         pixf->intra_sad_x3_8x8c   = x264_intra_sad_x3_8x8c_ssse3;
868         pixf->intra_satd_x3_4x4   = x264_intra_satd_x3_4x4_ssse3;
869 #if ARCH_X86_64
870         pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_ssse3;
871 #endif
872         pixf->var2_8x8 = x264_pixel_var2_8x8_ssse3;
873         if( cpu&X264_CPU_CACHELINE_64 )
874         {
875             INIT2( sad, _cache64_ssse3 );
876             INIT2( sad_x3, _cache64_ssse3 );
877             INIT2( sad_x4, _cache64_ssse3 );
878         }
879         if( cpu&X264_CPU_SLOW_ATOM || !(cpu&X264_CPU_SHUFFLE_IS_FAST) )
880         {
881             INIT5( ssd, _sse2 ); /* on conroe, sse2 is faster for width8/16 */
882         }
883     }
884
885     if( cpu&X264_CPU_SSE4 )
886     {
887         INIT7( satd, _sse4 );
888         INIT7( satd_x3, _sse4 );
889         INIT7( satd_x4, _sse4 );
890         if( !(cpu&X264_CPU_STACK_MOD4) )
891         {
892             INIT4( hadamard_ac, _sse4 );
893         }
894         pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_sse4;
895         pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8_sse4;
896         pixf->intra_sad_x3_4x4 = x264_intra_sad_x3_4x4_sse4;
897         /* Slower on Conroe, so only enable under SSE4 */
898         pixf->intra_sad_x3_8x8  = x264_intra_sad_x3_8x8_ssse3;
899     }
900 #endif //HAVE_MMX
901
902 #if HAVE_ARMV6
903     if( cpu&X264_CPU_ARMV6 )
904     {
905         pixf->sad[PIXEL_4x8] = x264_pixel_sad_4x8_armv6;
906         pixf->sad[PIXEL_4x4] = x264_pixel_sad_4x4_armv6;
907         pixf->sad_aligned[PIXEL_4x8] = x264_pixel_sad_4x8_armv6;
908         pixf->sad_aligned[PIXEL_4x4] = x264_pixel_sad_4x4_armv6;
909     }
910     if( cpu&X264_CPU_NEON )
911     {
912         INIT5( sad, _neon );
913         INIT5( sad_aligned, _neon );
914         INIT7( sad_x3, _neon );
915         INIT7( sad_x4, _neon );
916         INIT7( ssd, _neon );
917         INIT7( satd, _neon );
918         INIT7( satd_x3, _neon );
919         INIT7( satd_x4, _neon );
920         INIT4( hadamard_ac, _neon );
921         pixf->sa8d[PIXEL_8x8]   = x264_pixel_sa8d_8x8_neon;
922         pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_neon;
923         pixf->var[PIXEL_8x8]    = x264_pixel_var_8x8_neon;
924         pixf->var[PIXEL_16x16]  = x264_pixel_var_16x16_neon;
925         pixf->var2_8x8          = x264_pixel_var2_8x8_neon;
926
927         pixf->ssim_4x4x2_core   = x264_pixel_ssim_4x4x2_core_neon;
928         pixf->ssim_end4         = x264_pixel_ssim_end4_neon;
929
930         if( cpu&X264_CPU_FAST_NEON_MRC )
931         {
932             pixf->sad[PIXEL_4x8] = x264_pixel_sad_4x8_neon;
933             pixf->sad[PIXEL_4x4] = x264_pixel_sad_4x4_neon;
934             pixf->sad_aligned[PIXEL_4x8] = x264_pixel_sad_aligned_4x8_neon;
935             pixf->sad_aligned[PIXEL_4x4] = x264_pixel_sad_aligned_4x4_neon;
936         }
937         else    // really just scheduled for dual issue / A8
938         {
939             INIT5( sad_aligned, _neon_dual );
940         }
941     }
942 #endif
943 #endif // !X264_HIGH_BIT_DEPTH
944 #if HAVE_ALTIVEC
945     if( cpu&X264_CPU_ALTIVEC )
946     {
947         x264_pixel_altivec_init( pixf );
948     }
949 #endif
950 #if !X264_HIGH_BIT_DEPTH
951 #if ARCH_UltraSparc
952     INIT4( sad, _vis );
953     INIT4( sad_x3, _vis );
954     INIT4( sad_x4, _vis );
955 #endif
956 #endif // !X264_HIGH_BIT_DEPTH
957
958     pixf->ads[PIXEL_8x16] =
959     pixf->ads[PIXEL_8x4] =
960     pixf->ads[PIXEL_4x8] = pixf->ads[PIXEL_16x8];
961     pixf->ads[PIXEL_4x4] = pixf->ads[PIXEL_8x8];
962 }
963