]> git.sesse.net Git - x264/blob - common/pixel.c
Convert x264 to use NV12 pixel format internally
[x264] / common / pixel.c
1 /*****************************************************************************
2  * pixel.c: h264 encoder
3  *****************************************************************************
4  * Copyright (C) 2003-2008 x264 project
5  *
6  * Authors: Loren Merritt <lorenm@u.washington.edu>
7  *          Laurent Aimar <fenrir@via.ecp.fr>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
22  *****************************************************************************/
23
24 #include "common.h"
25
26 #if HAVE_MMX
27 #   include "x86/pixel.h"
28 #endif
29 #if ARCH_PPC
30 #   include "ppc/pixel.h"
31 #endif
32 #if ARCH_ARM
33 #   include "arm/pixel.h"
34 #endif
35 #if ARCH_UltraSparc
36 #   include "sparc/pixel.h"
37 #endif
38
39
40 /****************************************************************************
41  * pixel_sad_WxH
42  ****************************************************************************/
43 #define PIXEL_SAD_C( name, lx, ly ) \
44 static int name( pixel *pix1, int i_stride_pix1,  \
45                  pixel *pix2, int i_stride_pix2 ) \
46 {                                                   \
47     int i_sum = 0;                                  \
48     for( int y = 0; y < ly; y++ )                   \
49     {                                               \
50         for( int x = 0; x < lx; x++ )               \
51         {                                           \
52             i_sum += abs( pix1[x] - pix2[x] );      \
53         }                                           \
54         pix1 += i_stride_pix1;                      \
55         pix2 += i_stride_pix2;                      \
56     }                                               \
57     return i_sum;                                   \
58 }
59
60
61 PIXEL_SAD_C( x264_pixel_sad_16x16, 16, 16 )
62 PIXEL_SAD_C( x264_pixel_sad_16x8,  16,  8 )
63 PIXEL_SAD_C( x264_pixel_sad_8x16,   8, 16 )
64 PIXEL_SAD_C( x264_pixel_sad_8x8,    8,  8 )
65 PIXEL_SAD_C( x264_pixel_sad_8x4,    8,  4 )
66 PIXEL_SAD_C( x264_pixel_sad_4x8,    4,  8 )
67 PIXEL_SAD_C( x264_pixel_sad_4x4,    4,  4 )
68
69
70 /****************************************************************************
71  * pixel_ssd_WxH
72  ****************************************************************************/
73 #define PIXEL_SSD_C( name, lx, ly ) \
74 static int name( pixel *pix1, int i_stride_pix1,  \
75                  pixel *pix2, int i_stride_pix2 ) \
76 {                                                   \
77     int i_sum = 0;                                  \
78     for( int y = 0; y < ly; y++ )                   \
79     {                                               \
80         for( int x = 0; x < lx; x++ )               \
81         {                                           \
82             int d = pix1[x] - pix2[x];              \
83             i_sum += d*d;                           \
84         }                                           \
85         pix1 += i_stride_pix1;                      \
86         pix2 += i_stride_pix2;                      \
87     }                                               \
88     return i_sum;                                   \
89 }
90
91 PIXEL_SSD_C( x264_pixel_ssd_16x16, 16, 16 )
92 PIXEL_SSD_C( x264_pixel_ssd_16x8,  16,  8 )
93 PIXEL_SSD_C( x264_pixel_ssd_8x16,   8, 16 )
94 PIXEL_SSD_C( x264_pixel_ssd_8x8,    8,  8 )
95 PIXEL_SSD_C( x264_pixel_ssd_8x4,    8,  4 )
96 PIXEL_SSD_C( x264_pixel_ssd_4x8,    4,  8 )
97 PIXEL_SSD_C( x264_pixel_ssd_4x4,    4,  4 )
98
99 uint64_t x264_pixel_ssd_wxh( x264_pixel_function_t *pf, pixel *pix1, int i_pix1, pixel *pix2, int i_pix2, int i_width, int i_height )
100 {
101     uint64_t i_ssd = 0;
102     int y;
103     int align = !(((intptr_t)pix1 | (intptr_t)pix2 | i_pix1 | i_pix2) & 15);
104
105 #define SSD(size) i_ssd += pf->ssd[size]( pix1 + y*i_pix1 + x, i_pix1, \
106                                           pix2 + y*i_pix2 + x, i_pix2 );
107     for( y = 0; y < i_height-15; y += 16 )
108     {
109         int x = 0;
110         if( align )
111             for( ; x < i_width-15; x += 16 )
112                 SSD(PIXEL_16x16);
113         for( ; x < i_width-7; x += 8 )
114             SSD(PIXEL_8x16);
115     }
116     if( y < i_height-7 )
117         for( int x = 0; x < i_width-7; x += 8 )
118             SSD(PIXEL_8x8);
119 #undef SSD
120
121 #define SSD1 { int d = pix1[y*i_pix1+x] - pix2[y*i_pix2+x]; i_ssd += d*d; }
122     if( i_width & 7 )
123     {
124         for( y = 0; y < (i_height & ~7); y++ )
125             for( int x = i_width & ~7; x < i_width; x++ )
126                 SSD1;
127     }
128     if( i_height & 7 )
129     {
130         for( y = i_height & ~7; y < i_height; y++ )
131             for( int x = 0; x < i_width; x++ )
132                 SSD1;
133     }
134 #undef SSD1
135
136     return i_ssd;
137 }
138
139 static uint64_t pixel_ssd_nv12_core( pixel *pixuv1, int stride1, pixel *pixuv2, int stride2, int width, int height )
140 {
141     uint32_t ssd_u=0, ssd_v=0;
142     for( int y = 0; y < height; y++, pixuv1+=stride1, pixuv2+=stride2 )
143         for( int x = 0; x < width; x++ )
144         {
145             int du = pixuv1[2*x]   - pixuv2[2*x];
146             int dv = pixuv1[2*x+1] - pixuv2[2*x+1];
147             ssd_u += du*du;
148             ssd_v += dv*dv;
149         }
150     return ssd_u + ((uint64_t)ssd_v<<32);
151 }
152
153 // SSD in uint32 (i.e. packing two into uint64) can potentially overflow on
154 // image widths >= 11008 (or 6604 if interlaced), since this is called on blocks
155 // of height up to 12 (resp 20). Though it will probably take significantly more
156 // than that at sane distortion levels.
157 uint64_t x264_pixel_ssd_nv12( x264_pixel_function_t *pf, pixel *pix1, int i_pix1, pixel *pix2, int i_pix2, int i_width, int i_height )
158 {
159     uint64_t ssd = pf->ssd_nv12_core( pix1, i_pix1, pix2, i_pix2, i_width&~7, i_height );
160     if( i_width&7 )
161         ssd += pixel_ssd_nv12_core( pix1+(i_width&~7), i_pix1, pix2+(i_width&~7), i_pix2, i_width&7, i_height );
162     return ssd;
163 }
164
165 /****************************************************************************
166  * pixel_var_wxh
167  ****************************************************************************/
168 #define PIXEL_VAR_C( name, w ) \
169 static uint64_t name( pixel *pix, int i_stride ) \
170 {                                             \
171     uint32_t sum = 0, sqr = 0;                \
172     for( int y = 0; y < w; y++ )              \
173     {                                         \
174         for( int x = 0; x < w; x++ )          \
175         {                                     \
176             sum += pix[x];                    \
177             sqr += pix[x] * pix[x];           \
178         }                                     \
179         pix += i_stride;                      \
180     }                                         \
181     return sum + ((uint64_t)sqr << 32);       \
182 }
183
184 PIXEL_VAR_C( x264_pixel_var_16x16, 16 )
185 PIXEL_VAR_C( x264_pixel_var_8x8,    8 )
186
187 /****************************************************************************
188  * pixel_var2_wxh
189  ****************************************************************************/
190 static int pixel_var2_8x8( pixel *pix1, int i_stride1, pixel *pix2, int i_stride2, int *ssd )
191 {
192     uint32_t var = 0, sum = 0, sqr = 0;
193     for( int y = 0; y < 8; y++ )
194     {
195         for( int x = 0; x < 8; x++ )
196         {
197             int diff = pix1[x] - pix2[x];
198             sum += diff;
199             sqr += diff * diff;
200         }
201         pix1 += i_stride1;
202         pix2 += i_stride2;
203     }
204     sum = abs(sum);
205     var = sqr - ((uint64_t)sum * sum >> 6);
206     *ssd = sqr;
207     return var;
208 }
209
210
211 #define HADAMARD4(d0, d1, d2, d3, s0, s1, s2, s3) {\
212     int t0 = s0 + s1;\
213     int t1 = s0 - s1;\
214     int t2 = s2 + s3;\
215     int t3 = s2 - s3;\
216     d0 = t0 + t2;\
217     d2 = t0 - t2;\
218     d1 = t1 + t3;\
219     d3 = t1 - t3;\
220 }
221
222 // in: a pseudo-simd number of the form x+(y<<16)
223 // return: abs(x)+(abs(y)<<16)
224 static ALWAYS_INLINE uint32_t abs2( uint32_t a )
225 {
226     uint32_t s = ((a>>15)&0x10001)*0xffff;
227     return (a+s)^s;
228 }
229
230 /****************************************************************************
231  * pixel_satd_WxH: sum of 4x4 Hadamard transformed differences
232  ****************************************************************************/
233
234 static NOINLINE int x264_pixel_satd_4x4( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
235 {
236     uint32_t tmp[4][2];
237     uint32_t a0, a1, a2, a3, b0, b1;
238     int sum = 0;
239     for( int i = 0; i < 4; i++, pix1 += i_pix1, pix2 += i_pix2 )
240     {
241         a0 = pix1[0] - pix2[0];
242         a1 = pix1[1] - pix2[1];
243         b0 = (a0+a1) + ((a0-a1)<<16);
244         a2 = pix1[2] - pix2[2];
245         a3 = pix1[3] - pix2[3];
246         b1 = (a2+a3) + ((a2-a3)<<16);
247         tmp[i][0] = b0 + b1;
248         tmp[i][1] = b0 - b1;
249     }
250     for( int i = 0; i < 2; i++ )
251     {
252         HADAMARD4( a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i] );
253         a0 = abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
254         sum += ((uint16_t)a0) + (a0>>16);
255     }
256     return sum >> 1;
257 }
258
259 static NOINLINE int x264_pixel_satd_8x4( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
260 {
261     uint32_t tmp[4][4];
262     uint32_t a0, a1, a2, a3;
263     int sum = 0;
264     for( int i = 0; i < 4; i++, pix1 += i_pix1, pix2 += i_pix2 )
265     {
266         a0 = (pix1[0] - pix2[0]) + ((pix1[4] - pix2[4]) << 16);
267         a1 = (pix1[1] - pix2[1]) + ((pix1[5] - pix2[5]) << 16);
268         a2 = (pix1[2] - pix2[2]) + ((pix1[6] - pix2[6]) << 16);
269         a3 = (pix1[3] - pix2[3]) + ((pix1[7] - pix2[7]) << 16);
270         HADAMARD4( tmp[i][0], tmp[i][1], tmp[i][2], tmp[i][3], a0,a1,a2,a3 );
271     }
272     for( int i = 0; i < 4; i++ )
273     {
274         HADAMARD4( a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i] );
275         sum += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
276     }
277     return (((uint16_t)sum) + ((uint32_t)sum>>16)) >> 1;
278 }
279
280 #define PIXEL_SATD_C( w, h, sub )\
281 static int x264_pixel_satd_##w##x##h( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )\
282 {\
283     int sum = sub( pix1, i_pix1, pix2, i_pix2 )\
284             + sub( pix1+4*i_pix1, i_pix1, pix2+4*i_pix2, i_pix2 );\
285     if( w==16 )\
286         sum+= sub( pix1+8, i_pix1, pix2+8, i_pix2 )\
287             + sub( pix1+8+4*i_pix1, i_pix1, pix2+8+4*i_pix2, i_pix2 );\
288     if( h==16 )\
289         sum+= sub( pix1+8*i_pix1, i_pix1, pix2+8*i_pix2, i_pix2 )\
290             + sub( pix1+12*i_pix1, i_pix1, pix2+12*i_pix2, i_pix2 );\
291     if( w==16 && h==16 )\
292         sum+= sub( pix1+8+8*i_pix1, i_pix1, pix2+8+8*i_pix2, i_pix2 )\
293             + sub( pix1+8+12*i_pix1, i_pix1, pix2+8+12*i_pix2, i_pix2 );\
294     return sum;\
295 }
296 PIXEL_SATD_C( 16, 16, x264_pixel_satd_8x4 )
297 PIXEL_SATD_C( 16, 8,  x264_pixel_satd_8x4 )
298 PIXEL_SATD_C( 8,  16, x264_pixel_satd_8x4 )
299 PIXEL_SATD_C( 8,  8,  x264_pixel_satd_8x4 )
300 PIXEL_SATD_C( 4,  8,  x264_pixel_satd_4x4 )
301
302
303 static NOINLINE int sa8d_8x8( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
304 {
305     uint32_t tmp[8][4];
306     uint32_t a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3;
307     int sum = 0;
308     for( int i = 0; i < 8; i++, pix1 += i_pix1, pix2 += i_pix2 )
309     {
310         a0 = pix1[0] - pix2[0];
311         a1 = pix1[1] - pix2[1];
312         b0 = (a0+a1) + ((a0-a1)<<16);
313         a2 = pix1[2] - pix2[2];
314         a3 = pix1[3] - pix2[3];
315         b1 = (a2+a3) + ((a2-a3)<<16);
316         a4 = pix1[4] - pix2[4];
317         a5 = pix1[5] - pix2[5];
318         b2 = (a4+a5) + ((a4-a5)<<16);
319         a6 = pix1[6] - pix2[6];
320         a7 = pix1[7] - pix2[7];
321         b3 = (a6+a7) + ((a6-a7)<<16);
322         HADAMARD4( tmp[i][0], tmp[i][1], tmp[i][2], tmp[i][3], b0,b1,b2,b3 );
323     }
324     for( int i = 0; i < 4; i++ )
325     {
326         HADAMARD4( a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i] );
327         HADAMARD4( a4, a5, a6, a7, tmp[4][i], tmp[5][i], tmp[6][i], tmp[7][i] );
328         b0  = abs2(a0+a4) + abs2(a0-a4);
329         b0 += abs2(a1+a5) + abs2(a1-a5);
330         b0 += abs2(a2+a6) + abs2(a2-a6);
331         b0 += abs2(a3+a7) + abs2(a3-a7);
332         sum += (uint16_t)b0 + (b0>>16);
333     }
334     return sum;
335 }
336
337 static int x264_pixel_sa8d_8x8( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
338 {
339     int sum = sa8d_8x8( pix1, i_pix1, pix2, i_pix2 );
340     return (sum+2)>>2;
341 }
342
343 static int x264_pixel_sa8d_16x16( pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
344 {
345     int sum = sa8d_8x8( pix1, i_pix1, pix2, i_pix2 )
346             + sa8d_8x8( pix1+8, i_pix1, pix2+8, i_pix2 )
347             + sa8d_8x8( pix1+8*i_pix1, i_pix1, pix2+8*i_pix2, i_pix2 )
348             + sa8d_8x8( pix1+8+8*i_pix1, i_pix1, pix2+8+8*i_pix2, i_pix2 );
349     return (sum+2)>>2;
350 }
351
352
353 static NOINLINE uint64_t pixel_hadamard_ac( pixel *pix, int stride )
354 {
355     uint32_t tmp[32];
356     uint32_t a0, a1, a2, a3, dc;
357     int sum4 = 0, sum8 = 0;
358     for( int i = 0; i < 8; i++, pix+=stride )
359     {
360         uint32_t *t = tmp + (i&3) + (i&4)*4;
361         a0 = (pix[0]+pix[1]) + ((pix[0]-pix[1])<<16);
362         a1 = (pix[2]+pix[3]) + ((pix[2]-pix[3])<<16);
363         t[0] = a0 + a1;
364         t[4] = a0 - a1;
365         a2 = (pix[4]+pix[5]) + ((pix[4]-pix[5])<<16);
366         a3 = (pix[6]+pix[7]) + ((pix[6]-pix[7])<<16);
367         t[8] = a2 + a3;
368         t[12] = a2 - a3;
369     }
370     for( int i = 0; i < 8; i++ )
371     {
372         HADAMARD4( a0, a1, a2, a3, tmp[i*4+0], tmp[i*4+1], tmp[i*4+2], tmp[i*4+3] );
373         tmp[i*4+0] = a0;
374         tmp[i*4+1] = a1;
375         tmp[i*4+2] = a2;
376         tmp[i*4+3] = a3;
377         sum4 += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
378     }
379     for( int i = 0; i < 8; i++ )
380     {
381         HADAMARD4( a0,a1,a2,a3, tmp[i], tmp[8+i], tmp[16+i], tmp[24+i] );
382         sum8 += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
383     }
384     dc = (uint16_t)(tmp[0] + tmp[8] + tmp[16] + tmp[24]);
385     sum4 = (uint16_t)sum4 + ((uint32_t)sum4>>16) - dc;
386     sum8 = (uint16_t)sum8 + ((uint32_t)sum8>>16) - dc;
387     return ((uint64_t)sum8<<32) + sum4;
388 }
389
390 #define HADAMARD_AC(w,h) \
391 static uint64_t x264_pixel_hadamard_ac_##w##x##h( pixel *pix, int stride )\
392 {\
393     uint64_t sum = pixel_hadamard_ac( pix, stride );\
394     if( w==16 )\
395         sum += pixel_hadamard_ac( pix+8, stride );\
396     if( h==16 )\
397         sum += pixel_hadamard_ac( pix+8*stride, stride );\
398     if( w==16 && h==16 )\
399         sum += pixel_hadamard_ac( pix+8*stride+8, stride );\
400     return ((sum>>34)<<32) + ((uint32_t)sum>>1);\
401 }
402 HADAMARD_AC( 16, 16 )
403 HADAMARD_AC( 16, 8 )
404 HADAMARD_AC( 8, 16 )
405 HADAMARD_AC( 8, 8 )
406
407
408 /****************************************************************************
409  * pixel_sad_x4
410  ****************************************************************************/
411 #define SAD_X( size ) \
412 static void x264_pixel_sad_x3_##size( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2, int i_stride, int scores[3] )\
413 {\
414     scores[0] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix0, i_stride );\
415     scores[1] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix1, i_stride );\
416     scores[2] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix2, i_stride );\
417 }\
418 static void x264_pixel_sad_x4_##size( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2, pixel *pix3, int i_stride, int scores[4] )\
419 {\
420     scores[0] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix0, i_stride );\
421     scores[1] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix1, i_stride );\
422     scores[2] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix2, i_stride );\
423     scores[3] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix3, i_stride );\
424 }
425
426 SAD_X( 16x16 )
427 SAD_X( 16x8 )
428 SAD_X( 8x16 )
429 SAD_X( 8x8 )
430 SAD_X( 8x4 )
431 SAD_X( 4x8 )
432 SAD_X( 4x4 )
433
434 #if !X264_HIGH_BIT_DEPTH
435 #if ARCH_UltraSparc
436 SAD_X( 16x16_vis )
437 SAD_X( 16x8_vis )
438 SAD_X( 8x16_vis )
439 SAD_X( 8x8_vis )
440 #endif
441 #endif // !X264_HIGH_BIT_DEPTH
442
443 /****************************************************************************
444  * pixel_satd_x4
445  * no faster than single satd, but needed for satd to be a drop-in replacement for sad
446  ****************************************************************************/
447
448 #define SATD_X( size, cpu ) \
449 static void x264_pixel_satd_x3_##size##cpu( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2, int i_stride, int scores[3] )\
450 {\
451     scores[0] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix0, i_stride );\
452     scores[1] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix1, i_stride );\
453     scores[2] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix2, i_stride );\
454 }\
455 static void x264_pixel_satd_x4_##size##cpu( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2, pixel *pix3, int i_stride, int scores[4] )\
456 {\
457     scores[0] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix0, i_stride );\
458     scores[1] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix1, i_stride );\
459     scores[2] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix2, i_stride );\
460     scores[3] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix3, i_stride );\
461 }
462 #define SATD_X_DECL6( cpu )\
463 SATD_X( 16x16, cpu )\
464 SATD_X( 16x8, cpu )\
465 SATD_X( 8x16, cpu )\
466 SATD_X( 8x8, cpu )\
467 SATD_X( 8x4, cpu )\
468 SATD_X( 4x8, cpu )
469 #define SATD_X_DECL7( cpu )\
470 SATD_X_DECL6( cpu )\
471 SATD_X( 4x4, cpu )
472
473 SATD_X_DECL7()
474 #if !X264_HIGH_BIT_DEPTH
475 #if HAVE_MMX
476 SATD_X_DECL7( _mmxext )
477 SATD_X_DECL6( _sse2 )
478 SATD_X_DECL7( _ssse3 )
479 SATD_X_DECL7( _sse4 )
480 #endif
481
482 #if HAVE_ARMV6
483 SATD_X_DECL7( _neon )
484 #endif
485 #endif // !X264_HIGH_BIT_DEPTH
486
487 #define INTRA_MBCMP_8x8( mbcmp )\
488 void x264_intra_##mbcmp##_x3_8x8( pixel *fenc, pixel edge[33], int res[3] )\
489 {\
490     pixel pix[8*FDEC_STRIDE];\
491     x264_predict_8x8_v_c( pix, edge );\
492     res[0] = x264_pixel_##mbcmp##_8x8( pix, FDEC_STRIDE, fenc, FENC_STRIDE );\
493     x264_predict_8x8_h_c( pix, edge );\
494     res[1] = x264_pixel_##mbcmp##_8x8( pix, FDEC_STRIDE, fenc, FENC_STRIDE );\
495     x264_predict_8x8_dc_c( pix, edge );\
496     res[2] = x264_pixel_##mbcmp##_8x8( pix, FDEC_STRIDE, fenc, FENC_STRIDE );\
497 }
498
499 INTRA_MBCMP_8x8(sad)
500 INTRA_MBCMP_8x8(sa8d)
501
502 #define INTRA_MBCMP( mbcmp, size, pred1, pred2, pred3, chroma )\
503 void x264_intra_##mbcmp##_x3_##size##x##size##chroma( pixel *fenc, pixel *fdec, int res[3] )\
504 {\
505     x264_predict_##size##x##size##chroma##_##pred1##_c( fdec );\
506     res[0] = x264_pixel_##mbcmp##_##size##x##size( fdec, FDEC_STRIDE, fenc, FENC_STRIDE );\
507     x264_predict_##size##x##size##chroma##_##pred2##_c( fdec );\
508     res[1] = x264_pixel_##mbcmp##_##size##x##size( fdec, FDEC_STRIDE, fenc, FENC_STRIDE );\
509     x264_predict_##size##x##size##chroma##_##pred3##_c( fdec );\
510     res[2] = x264_pixel_##mbcmp##_##size##x##size( fdec, FDEC_STRIDE, fenc, FENC_STRIDE );\
511 }
512
513 INTRA_MBCMP(sad, 4, v, h, dc, )
514 INTRA_MBCMP(satd, 4, v, h, dc, )
515 INTRA_MBCMP(sad, 8, dc, h, v, c )
516 INTRA_MBCMP(satd, 8, dc, h, v, c )
517 INTRA_MBCMP(sad, 16, v, h, dc, )
518 INTRA_MBCMP(satd, 16, v, h, dc, )
519
520 /****************************************************************************
521  * structural similarity metric
522  ****************************************************************************/
523 static void ssim_4x4x2_core( const pixel *pix1, int stride1,
524                              const pixel *pix2, int stride2,
525                              int sums[2][4])
526 {
527     for( int z = 0; z < 2; z++ )
528     {
529         uint32_t s1 = 0, s2 = 0, ss = 0, s12 = 0;
530         for( int y = 0; y < 4; y++ )
531             for( int x = 0; x < 4; x++ )
532             {
533                 int a = pix1[x+y*stride1];
534                 int b = pix2[x+y*stride2];
535                 s1  += a;
536                 s2  += b;
537                 ss  += a*a;
538                 ss  += b*b;
539                 s12 += a*b;
540             }
541         sums[z][0] = s1;
542         sums[z][1] = s2;
543         sums[z][2] = ss;
544         sums[z][3] = s12;
545         pix1 += 4;
546         pix2 += 4;
547     }
548 }
549
550 static float ssim_end1( int s1, int s2, int ss, int s12 )
551 {
552     static const int ssim_c1 = (int)(.01*.01*PIXEL_MAX*PIXEL_MAX*64 + .5);
553     static const int ssim_c2 = (int)(.03*.03*PIXEL_MAX*PIXEL_MAX*64*63 + .5);
554     int vars = ss*64 - s1*s1 - s2*s2;
555     int covar = s12*64 - s1*s2;
556     return (float)(2*s1*s2 + ssim_c1) * (float)(2*covar + ssim_c2)
557          / ((float)(s1*s1 + s2*s2 + ssim_c1) * (float)(vars + ssim_c2));
558 }
559
560 static float ssim_end4( int sum0[5][4], int sum1[5][4], int width )
561 {
562     float ssim = 0.0;
563     for( int i = 0; i < width; i++ )
564         ssim += ssim_end1( sum0[i][0] + sum0[i+1][0] + sum1[i][0] + sum1[i+1][0],
565                            sum0[i][1] + sum0[i+1][1] + sum1[i][1] + sum1[i+1][1],
566                            sum0[i][2] + sum0[i+1][2] + sum1[i][2] + sum1[i+1][2],
567                            sum0[i][3] + sum0[i+1][3] + sum1[i][3] + sum1[i+1][3] );
568     return ssim;
569 }
570
571 float x264_pixel_ssim_wxh( x264_pixel_function_t *pf,
572                            pixel *pix1, int stride1,
573                            pixel *pix2, int stride2,
574                            int width, int height, void *buf )
575 {
576     int z = 0;
577     float ssim = 0.0;
578     int (*sum0)[4] = buf;
579     int (*sum1)[4] = sum0 + (width >> 2) + 3;
580     width >>= 2;
581     height >>= 2;
582     for( int y = 1; y < height; y++ )
583     {
584         for( ; z <= y; z++ )
585         {
586             XCHG( void*, sum0, sum1 );
587             for( int x = 0; x < width; x+=2 )
588                 pf->ssim_4x4x2_core( &pix1[4*(x+z*stride1)], stride1, &pix2[4*(x+z*stride2)], stride2, &sum0[x] );
589         }
590         for( int x = 0; x < width-1; x += 4 )
591             ssim += pf->ssim_end4( sum0+x, sum1+x, X264_MIN(4,width-x-1) );
592     }
593     return ssim;
594 }
595
596
597 /****************************************************************************
598  * successive elimination
599  ****************************************************************************/
600 static int x264_pixel_ads4( int enc_dc[4], uint16_t *sums, int delta,
601                             uint16_t *cost_mvx, int16_t *mvs, int width, int thresh )
602 {
603     int nmv = 0;
604     for( int i = 0; i < width; i++, sums++ )
605     {
606         int ads = abs( enc_dc[0] - sums[0] )
607                 + abs( enc_dc[1] - sums[8] )
608                 + abs( enc_dc[2] - sums[delta] )
609                 + abs( enc_dc[3] - sums[delta+8] )
610                 + cost_mvx[i];
611         if( ads < thresh )
612             mvs[nmv++] = i;
613     }
614     return nmv;
615 }
616
617 static int x264_pixel_ads2( int enc_dc[2], uint16_t *sums, int delta,
618                             uint16_t *cost_mvx, int16_t *mvs, int width, int thresh )
619 {
620     int nmv = 0;
621     for( int i = 0; i < width; i++, sums++ )
622     {
623         int ads = abs( enc_dc[0] - sums[0] )
624                 + abs( enc_dc[1] - sums[delta] )
625                 + cost_mvx[i];
626         if( ads < thresh )
627             mvs[nmv++] = i;
628     }
629     return nmv;
630 }
631
632 static int x264_pixel_ads1( int enc_dc[1], uint16_t *sums, int delta,
633                             uint16_t *cost_mvx, int16_t *mvs, int width, int thresh )
634 {
635     int nmv = 0;
636     for( int i = 0; i<width; i++, sums++ )
637     {
638         int ads = abs( enc_dc[0] - sums[0] )
639                 + cost_mvx[i];
640         if( ads < thresh )
641             mvs[nmv++] = i;
642     }
643     return nmv;
644 }
645
646
647 /****************************************************************************
648  * x264_pixel_init:
649  ****************************************************************************/
650 void x264_pixel_init( int cpu, x264_pixel_function_t *pixf )
651 {
652     memset( pixf, 0, sizeof(*pixf) );
653
654 #define INIT2_NAME( name1, name2, cpu ) \
655     pixf->name1[PIXEL_16x16] = x264_pixel_##name2##_16x16##cpu;\
656     pixf->name1[PIXEL_16x8]  = x264_pixel_##name2##_16x8##cpu;
657 #define INIT4_NAME( name1, name2, cpu ) \
658     INIT2_NAME( name1, name2, cpu ) \
659     pixf->name1[PIXEL_8x16]  = x264_pixel_##name2##_8x16##cpu;\
660     pixf->name1[PIXEL_8x8]   = x264_pixel_##name2##_8x8##cpu;
661 #define INIT5_NAME( name1, name2, cpu ) \
662     INIT4_NAME( name1, name2, cpu ) \
663     pixf->name1[PIXEL_8x4]   = x264_pixel_##name2##_8x4##cpu;
664 #define INIT6_NAME( name1, name2, cpu ) \
665     INIT5_NAME( name1, name2, cpu ) \
666     pixf->name1[PIXEL_4x8]   = x264_pixel_##name2##_4x8##cpu;
667 #define INIT7_NAME( name1, name2, cpu ) \
668     INIT6_NAME( name1, name2, cpu ) \
669     pixf->name1[PIXEL_4x4]   = x264_pixel_##name2##_4x4##cpu;
670 #define INIT2( name, cpu ) INIT2_NAME( name, name, cpu )
671 #define INIT4( name, cpu ) INIT4_NAME( name, name, cpu )
672 #define INIT5( name, cpu ) INIT5_NAME( name, name, cpu )
673 #define INIT6( name, cpu ) INIT6_NAME( name, name, cpu )
674 #define INIT7( name, cpu ) INIT7_NAME( name, name, cpu )
675
676 #define INIT_ADS( cpu ) \
677     pixf->ads[PIXEL_16x16] = x264_pixel_ads4##cpu;\
678     pixf->ads[PIXEL_16x8] = x264_pixel_ads2##cpu;\
679     pixf->ads[PIXEL_8x8] = x264_pixel_ads1##cpu;
680
681     INIT7( sad, );
682     INIT7_NAME( sad_aligned, sad, );
683     INIT7( sad_x3, );
684     INIT7( sad_x4, );
685     INIT7( ssd, );
686     INIT7( satd, );
687     INIT7( satd_x3, );
688     INIT7( satd_x4, );
689     INIT4( hadamard_ac, );
690     INIT_ADS( );
691
692     pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16;
693     pixf->sa8d[PIXEL_8x8]   = x264_pixel_sa8d_8x8;
694     pixf->var[PIXEL_16x16] = x264_pixel_var_16x16;
695     pixf->var[PIXEL_8x8]   = x264_pixel_var_8x8;
696
697     pixf->ssd_nv12_core = pixel_ssd_nv12_core;
698     pixf->ssim_4x4x2_core = ssim_4x4x2_core;
699     pixf->ssim_end4 = ssim_end4;
700     pixf->var2_8x8 = pixel_var2_8x8;
701
702     pixf->intra_sad_x3_4x4    = x264_intra_sad_x3_4x4;
703     pixf->intra_satd_x3_4x4   = x264_intra_satd_x3_4x4;
704     pixf->intra_sad_x3_8x8    = x264_intra_sad_x3_8x8;
705     pixf->intra_sa8d_x3_8x8   = x264_intra_sa8d_x3_8x8;
706     pixf->intra_sad_x3_8x8c   = x264_intra_sad_x3_8x8c;
707     pixf->intra_satd_x3_8x8c  = x264_intra_satd_x3_8x8c;
708     pixf->intra_sad_x3_16x16  = x264_intra_sad_x3_16x16;
709     pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16;
710
711 #if !X264_HIGH_BIT_DEPTH
712 #if HAVE_MMX
713     if( cpu&X264_CPU_MMX )
714     {
715         INIT7( ssd, _mmx );
716     }
717
718     if( cpu&X264_CPU_MMXEXT )
719     {
720         INIT7( sad, _mmxext );
721         INIT7_NAME( sad_aligned, sad, _mmxext );
722         INIT7( sad_x3, _mmxext );
723         INIT7( sad_x4, _mmxext );
724         INIT7( satd, _mmxext );
725         INIT7( satd_x3, _mmxext );
726         INIT7( satd_x4, _mmxext );
727         INIT4( hadamard_ac, _mmxext );
728         INIT_ADS( _mmxext );
729         pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_mmxext;
730         pixf->var[PIXEL_8x8]   = x264_pixel_var_8x8_mmxext;
731         pixf->ssd_nv12_core    = x264_pixel_ssd_nv12_core_mmxext;
732 #if ARCH_X86
733         pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_mmxext;
734         pixf->sa8d[PIXEL_8x8]   = x264_pixel_sa8d_8x8_mmxext;
735         pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_mmxext;
736         pixf->ssim_4x4x2_core  = x264_pixel_ssim_4x4x2_core_mmxext;
737         pixf->var2_8x8 = x264_pixel_var2_8x8_mmxext;
738
739         if( cpu&X264_CPU_CACHELINE_32 )
740         {
741             INIT5( sad, _cache32_mmxext );
742             INIT4( sad_x3, _cache32_mmxext );
743             INIT4( sad_x4, _cache32_mmxext );
744         }
745         else if( cpu&X264_CPU_CACHELINE_64 )
746         {
747             INIT5( sad, _cache64_mmxext );
748             INIT4( sad_x3, _cache64_mmxext );
749             INIT4( sad_x4, _cache64_mmxext );
750         }
751 #else
752         if( cpu&X264_CPU_CACHELINE_64 )
753         {
754             pixf->sad[PIXEL_8x16] = x264_pixel_sad_8x16_cache64_mmxext;
755             pixf->sad[PIXEL_8x8]  = x264_pixel_sad_8x8_cache64_mmxext;
756             pixf->sad[PIXEL_8x4]  = x264_pixel_sad_8x4_cache64_mmxext;
757             pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_cache64_mmxext;
758             pixf->sad_x3[PIXEL_8x8]  = x264_pixel_sad_x3_8x8_cache64_mmxext;
759             pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_cache64_mmxext;
760             pixf->sad_x4[PIXEL_8x8]  = x264_pixel_sad_x4_8x8_cache64_mmxext;
761         }
762 #endif
763         pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16_mmxext;
764         pixf->intra_sad_x3_16x16  = x264_intra_sad_x3_16x16_mmxext;
765         pixf->intra_satd_x3_8x8c  = x264_intra_satd_x3_8x8c_mmxext;
766         pixf->intra_sad_x3_8x8c   = x264_intra_sad_x3_8x8c_mmxext;
767         pixf->intra_sad_x3_8x8    = x264_intra_sad_x3_8x8_mmxext;
768         pixf->intra_satd_x3_4x4   = x264_intra_satd_x3_4x4_mmxext;
769         pixf->intra_sad_x3_4x4    = x264_intra_sad_x3_4x4_mmxext;
770     }
771
772     if( cpu&X264_CPU_SSE2 )
773     {
774         INIT5( ssd, _sse2slow );
775         INIT2_NAME( sad_aligned, sad, _sse2_aligned );
776         pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_sse2;
777         pixf->ssd_nv12_core    = x264_pixel_ssd_nv12_core_sse2;
778         pixf->ssim_4x4x2_core  = x264_pixel_ssim_4x4x2_core_sse2;
779         pixf->ssim_end4        = x264_pixel_ssim_end4_sse2;
780         pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_sse2;
781         pixf->sa8d[PIXEL_8x8]   = x264_pixel_sa8d_8x8_sse2;
782 #if ARCH_X86_64
783         pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_sse2;
784 #endif
785         pixf->var2_8x8 = x264_pixel_var2_8x8_sse2;
786     }
787
788     if( (cpu&X264_CPU_SSE2) && !(cpu&X264_CPU_SSE2_IS_SLOW) )
789     {
790         INIT2( sad, _sse2 );
791         INIT2( sad_x3, _sse2 );
792         INIT2( sad_x4, _sse2 );
793         INIT6( satd, _sse2 );
794         INIT6( satd_x3, _sse2 );
795         INIT6( satd_x4, _sse2 );
796         if( !(cpu&X264_CPU_STACK_MOD4) )
797         {
798             INIT4( hadamard_ac, _sse2 );
799         }
800         INIT_ADS( _sse2 );
801         pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_sse2;
802         pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16_sse2;
803         if( cpu&X264_CPU_CACHELINE_64 )
804         {
805             INIT2( ssd, _sse2); /* faster for width 16 on p4 */
806 #if ARCH_X86
807             INIT2( sad, _cache64_sse2 );
808             INIT2( sad_x3, _cache64_sse2 );
809             INIT2( sad_x4, _cache64_sse2 );
810 #endif
811            if( cpu&X264_CPU_SSE2_IS_FAST )
812            {
813                pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_cache64_sse2;
814                pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_cache64_sse2;
815            }
816         }
817
818         if( cpu&X264_CPU_SSE_MISALIGN )
819         {
820             INIT2( sad_x3, _sse2_misalign );
821             INIT2( sad_x4, _sse2_misalign );
822         }
823     }
824
825     if( cpu&X264_CPU_SSE2_IS_FAST && !(cpu&X264_CPU_CACHELINE_64) )
826     {
827         pixf->sad_aligned[PIXEL_8x16] = x264_pixel_sad_8x16_sse2;
828         pixf->sad[PIXEL_8x16] = x264_pixel_sad_8x16_sse2;
829         pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_sse2;
830         pixf->sad_x3[PIXEL_8x8] = x264_pixel_sad_x3_8x8_sse2;
831         pixf->sad_x3[PIXEL_8x4] = x264_pixel_sad_x3_8x4_sse2;
832         pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_sse2;
833         pixf->sad_x4[PIXEL_8x8] = x264_pixel_sad_x4_8x8_sse2;
834         pixf->sad_x4[PIXEL_8x4] = x264_pixel_sad_x4_8x4_sse2;
835     }
836
837     if( (cpu&X264_CPU_SSE3) && (cpu&X264_CPU_CACHELINE_64) )
838     {
839         INIT2( sad, _sse3 );
840         INIT2( sad_x3, _sse3 );
841         INIT2( sad_x4, _sse3 );
842     }
843
844     if( cpu&X264_CPU_SSSE3 )
845     {
846         if( !(cpu&X264_CPU_STACK_MOD4) )
847         {
848             INIT4( hadamard_ac, _ssse3 );
849         }
850         INIT_ADS( _ssse3 );
851         if( !(cpu&X264_CPU_SLOW_ATOM) )
852         {
853             INIT7( ssd, _ssse3 );
854             pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_ssse3;
855             pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8_ssse3;
856             INIT7( satd, _ssse3 );
857             INIT7( satd_x3, _ssse3 );
858             INIT7( satd_x4, _ssse3 );
859         }
860         pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16_ssse3;
861         pixf->intra_sad_x3_16x16  = x264_intra_sad_x3_16x16_ssse3;
862         pixf->intra_satd_x3_8x8c  = x264_intra_satd_x3_8x8c_ssse3;
863         pixf->intra_sad_x3_8x8c   = x264_intra_sad_x3_8x8c_ssse3;
864         pixf->intra_satd_x3_4x4   = x264_intra_satd_x3_4x4_ssse3;
865 #if ARCH_X86_64
866         pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_ssse3;
867 #endif
868         pixf->var2_8x8 = x264_pixel_var2_8x8_ssse3;
869         if( cpu&X264_CPU_CACHELINE_64 )
870         {
871             INIT2( sad, _cache64_ssse3 );
872             INIT2( sad_x3, _cache64_ssse3 );
873             INIT2( sad_x4, _cache64_ssse3 );
874         }
875         if( cpu&X264_CPU_SLOW_ATOM || !(cpu&X264_CPU_SHUFFLE_IS_FAST) )
876         {
877             INIT5( ssd, _sse2 ); /* on conroe, sse2 is faster for width8/16 */
878         }
879     }
880
881     if( cpu&X264_CPU_SSE4 )
882     {
883         INIT7( satd, _sse4 );
884         INIT7( satd_x3, _sse4 );
885         INIT7( satd_x4, _sse4 );
886         if( !(cpu&X264_CPU_STACK_MOD4) )
887         {
888             INIT4( hadamard_ac, _sse4 );
889         }
890         pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_sse4;
891         pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8_sse4;
892         pixf->intra_sad_x3_4x4 = x264_intra_sad_x3_4x4_sse4;
893         /* Slower on Conroe, so only enable under SSE4 */
894         pixf->intra_sad_x3_8x8  = x264_intra_sad_x3_8x8_ssse3;
895     }
896 #endif //HAVE_MMX
897
898 #if HAVE_ARMV6
899     if( cpu&X264_CPU_ARMV6 )
900     {
901         pixf->sad[PIXEL_4x8] = x264_pixel_sad_4x8_armv6;
902         pixf->sad[PIXEL_4x4] = x264_pixel_sad_4x4_armv6;
903         pixf->sad_aligned[PIXEL_4x8] = x264_pixel_sad_4x8_armv6;
904         pixf->sad_aligned[PIXEL_4x4] = x264_pixel_sad_4x4_armv6;
905     }
906     if( cpu&X264_CPU_NEON )
907     {
908         INIT5( sad, _neon );
909         INIT5( sad_aligned, _neon );
910         INIT7( sad_x3, _neon );
911         INIT7( sad_x4, _neon );
912         INIT7( ssd, _neon );
913         INIT7( satd, _neon );
914         INIT7( satd_x3, _neon );
915         INIT7( satd_x4, _neon );
916         INIT4( hadamard_ac, _neon );
917         pixf->sa8d[PIXEL_8x8]   = x264_pixel_sa8d_8x8_neon;
918         pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_neon;
919         pixf->var[PIXEL_8x8]    = x264_pixel_var_8x8_neon;
920         pixf->var[PIXEL_16x16]  = x264_pixel_var_16x16_neon;
921         pixf->var2_8x8          = x264_pixel_var2_8x8_neon;
922
923         pixf->ssim_4x4x2_core   = x264_pixel_ssim_4x4x2_core_neon;
924         pixf->ssim_end4         = x264_pixel_ssim_end4_neon;
925
926         if( cpu&X264_CPU_FAST_NEON_MRC )
927         {
928             pixf->sad[PIXEL_4x8] = x264_pixel_sad_4x8_neon;
929             pixf->sad[PIXEL_4x4] = x264_pixel_sad_4x4_neon;
930             pixf->sad_aligned[PIXEL_4x8] = x264_pixel_sad_aligned_4x8_neon;
931             pixf->sad_aligned[PIXEL_4x4] = x264_pixel_sad_aligned_4x4_neon;
932         }
933         else    // really just scheduled for dual issue / A8
934         {
935             INIT5( sad_aligned, _neon_dual );
936         }
937     }
938 #endif
939 #endif // !X264_HIGH_BIT_DEPTH
940 #if HAVE_ALTIVEC
941     if( cpu&X264_CPU_ALTIVEC )
942     {
943         x264_pixel_altivec_init( pixf );
944     }
945 #endif
946 #if !X264_HIGH_BIT_DEPTH
947 #if ARCH_UltraSparc
948     INIT4( sad, _vis );
949     INIT4( sad_x3, _vis );
950     INIT4( sad_x4, _vis );
951 #endif
952 #endif // !X264_HIGH_BIT_DEPTH
953
954     pixf->ads[PIXEL_8x16] =
955     pixf->ads[PIXEL_8x4] =
956     pixf->ads[PIXEL_4x8] = pixf->ads[PIXEL_16x8];
957     pixf->ads[PIXEL_4x4] = pixf->ads[PIXEL_8x8];
958 }
959