]> git.sesse.net Git - x264/blob - common/pixel.c
mmx implementation of x264_pixel_sa8d
[x264] / common / pixel.c
1 /*****************************************************************************
2  * pixel.c: h264 encoder
3  *****************************************************************************
4  * Copyright (C) 2003 Laurent Aimar
5  * $Id: pixel.c,v 1.1 2004/06/03 19:27:07 fenrir Exp $
6  *
7  * Authors: Laurent Aimar <fenrir@via.ecp.fr>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111, USA.
22  *****************************************************************************/
23
24 #include <string.h>
25
26 #include "common.h"
27 #include "clip1.h"
28
29 #ifdef HAVE_MMXEXT
30 #   include "i386/pixel.h"
31 #endif
32 #ifdef ARCH_PPC
33 #   include "ppc/pixel.h"
34 #endif
35 #ifdef ARCH_UltraSparc
36 #   include "sparc/pixel.h"
37 #endif
38
39
40 /****************************************************************************
41  * pixel_sad_WxH
42  ****************************************************************************/
43 #define PIXEL_SAD_C( name, lx, ly ) \
44 static int name( uint8_t *pix1, int i_stride_pix1,  \
45                  uint8_t *pix2, int i_stride_pix2 ) \
46 {                                                   \
47     int i_sum = 0;                                  \
48     int x, y;                                       \
49     for( y = 0; y < ly; y++ )                       \
50     {                                               \
51         for( x = 0; x < lx; x++ )                   \
52         {                                           \
53             i_sum += abs( pix1[x] - pix2[x] );      \
54         }                                           \
55         pix1 += i_stride_pix1;                      \
56         pix2 += i_stride_pix2;                      \
57     }                                               \
58     return i_sum;                                   \
59 }
60
61
62 PIXEL_SAD_C( x264_pixel_sad_16x16, 16, 16 )
63 PIXEL_SAD_C( x264_pixel_sad_16x8,  16,  8 )
64 PIXEL_SAD_C( x264_pixel_sad_8x16,   8, 16 )
65 PIXEL_SAD_C( x264_pixel_sad_8x8,    8,  8 )
66 PIXEL_SAD_C( x264_pixel_sad_8x4,    8,  4 )
67 PIXEL_SAD_C( x264_pixel_sad_4x8,    4,  8 )
68 PIXEL_SAD_C( x264_pixel_sad_4x4,    4,  4 )
69
70
71 /****************************************************************************
72  * pixel_ssd_WxH
73  ****************************************************************************/
74 #define PIXEL_SSD_C( name, lx, ly ) \
75 static int name( uint8_t *pix1, int i_stride_pix1,  \
76                  uint8_t *pix2, int i_stride_pix2 ) \
77 {                                                   \
78     int i_sum = 0;                                  \
79     int x, y;                                       \
80     for( y = 0; y < ly; y++ )                       \
81     {                                               \
82         for( x = 0; x < lx; x++ )                   \
83         {                                           \
84             int d = pix1[x] - pix2[x];              \
85             i_sum += d*d;                           \
86         }                                           \
87         pix1 += i_stride_pix1;                      \
88         pix2 += i_stride_pix2;                      \
89     }                                               \
90     return i_sum;                                   \
91 }
92
93 PIXEL_SSD_C( x264_pixel_ssd_16x16, 16, 16 )
94 PIXEL_SSD_C( x264_pixel_ssd_16x8,  16,  8 )
95 PIXEL_SSD_C( x264_pixel_ssd_8x16,   8, 16 )
96 PIXEL_SSD_C( x264_pixel_ssd_8x8,    8,  8 )
97 PIXEL_SSD_C( x264_pixel_ssd_8x4,    8,  4 )
98 PIXEL_SSD_C( x264_pixel_ssd_4x8,    4,  8 )
99 PIXEL_SSD_C( x264_pixel_ssd_4x4,    4,  4 )
100
101 int64_t x264_pixel_ssd_wxh( x264_pixel_function_t *pf, uint8_t *pix1, int i_pix1, uint8_t *pix2, int i_pix2, int i_width, int i_height )
102 {
103     int64_t i_ssd = 0;
104     int x, y;
105
106 #define SSD(size) i_ssd += pf->ssd[size]( pix1 + y*i_pix1 + x, i_pix1, \
107                                           pix2 + y*i_pix2 + x, i_pix2 );
108     for( y = 0; y < i_height-15; y += 16 )
109     {
110         for( x = 0; x < i_width-15; x += 16 )
111             SSD(PIXEL_16x16);
112         if( x < i_width-7 )
113             SSD(PIXEL_8x16);
114     }
115     if( y < i_height-7 )
116         for( x = 0; x < i_width-7; x += 8 )
117             SSD(PIXEL_8x8);
118 #undef SSD
119
120 #define SSD1 { int d = pix1[y*i_pix1+x] - pix2[y*i_pix2+x]; i_ssd += d*d; }
121     if( i_width % 8 != 0 )
122     {
123         for( y = 0; y < (i_height & ~7); y++ )
124             for( x = i_width & ~7; x < i_width; x++ )
125                 SSD1;
126     }
127     if( i_height % 8 != 0 )
128     {
129         for( y = i_height & ~7; y < i_height; y++ )
130             for( x = 0; x < i_width; x++ )
131                 SSD1;
132     }
133 #undef SSD1
134
135     return i_ssd;
136 }
137
138
139 static inline void pixel_sub_wxh( int16_t *diff, int i_size,
140                                   uint8_t *pix1, int i_pix1, uint8_t *pix2, int i_pix2 )
141 {
142     int y, x;
143     for( y = 0; y < i_size; y++ )
144     {
145         for( x = 0; x < i_size; x++ )
146         {
147             diff[x + y*i_size] = pix1[x] - pix2[x];
148         }
149         pix1 += i_pix1;
150         pix2 += i_pix2;
151     }
152 }
153
154
155 /****************************************************************************
156  * pixel_satd_WxH: sum of 4x4 Hadamard transformed differences
157  ****************************************************************************/
158 static int pixel_satd_wxh( uint8_t *pix1, int i_pix1, uint8_t *pix2, int i_pix2, int i_width, int i_height )
159 {
160     int16_t tmp[4][4];
161     int16_t diff[4][4];
162     int x, y;
163     int i_satd = 0;
164
165     for( y = 0; y < i_height; y += 4 )
166     {
167         for( x = 0; x < i_width; x += 4 )
168         {
169             int d;
170
171             pixel_sub_wxh( (int16_t*)diff, 4, &pix1[x], i_pix1, &pix2[x], i_pix2 );
172
173             for( d = 0; d < 4; d++ )
174             {
175                 int s01, s23;
176                 int d01, d23;
177
178                 s01 = diff[d][0] + diff[d][1]; s23 = diff[d][2] + diff[d][3];
179                 d01 = diff[d][0] - diff[d][1]; d23 = diff[d][2] - diff[d][3];
180
181                 tmp[d][0] = s01 + s23;
182                 tmp[d][1] = s01 - s23;
183                 tmp[d][2] = d01 - d23;
184                 tmp[d][3] = d01 + d23;
185             }
186             for( d = 0; d < 4; d++ )
187             {
188                 int s01, s23;
189                 int d01, d23;
190
191                 s01 = tmp[0][d] + tmp[1][d]; s23 = tmp[2][d] + tmp[3][d];
192                 d01 = tmp[0][d] - tmp[1][d]; d23 = tmp[2][d] - tmp[3][d];
193
194                 i_satd += abs( s01 + s23 ) + abs( s01 - s23 ) + abs( d01 - d23 ) + abs( d01 + d23 );
195             }
196
197         }
198         pix1 += 4 * i_pix1;
199         pix2 += 4 * i_pix2;
200     }
201
202     return i_satd / 2;
203 }
204 #define PIXEL_SATD_C( name, width, height ) \
205 static int name( uint8_t *pix1, int i_stride_pix1, \
206                  uint8_t *pix2, int i_stride_pix2 ) \
207 { \
208     return pixel_satd_wxh( pix1, i_stride_pix1, pix2, i_stride_pix2, width, height ); \
209 }
210 PIXEL_SATD_C( x264_pixel_satd_16x16, 16, 16 )
211 PIXEL_SATD_C( x264_pixel_satd_16x8,  16, 8 )
212 PIXEL_SATD_C( x264_pixel_satd_8x16,  8, 16 )
213 PIXEL_SATD_C( x264_pixel_satd_8x8,   8, 8 )
214 PIXEL_SATD_C( x264_pixel_satd_8x4,   8, 4 )
215 PIXEL_SATD_C( x264_pixel_satd_4x8,   4, 8 )
216 PIXEL_SATD_C( x264_pixel_satd_4x4,   4, 4 )
217
218
219 /****************************************************************************
220  * pixel_sa8d_WxH: sum of 8x8 Hadamard transformed differences
221  ****************************************************************************/
222 #define SA8D_1D {\
223     const int a0 = SRC(0) + SRC(4);\
224     const int a4 = SRC(0) - SRC(4);\
225     const int a1 = SRC(1) + SRC(5);\
226     const int a5 = SRC(1) - SRC(5);\
227     const int a2 = SRC(2) + SRC(6);\
228     const int a6 = SRC(2) - SRC(6);\
229     const int a3 = SRC(3) + SRC(7);\
230     const int a7 = SRC(3) - SRC(7);\
231     const int b0 = a0 + a2;\
232     const int b2 = a0 - a2;\
233     const int b1 = a1 + a3;\
234     const int b3 = a1 - a3;\
235     const int b4 = a4 + a6;\
236     const int b6 = a4 - a6;\
237     const int b5 = a5 + a7;\
238     const int b7 = a5 - a7;\
239     DST(0, b0 + b1);\
240     DST(1, b0 - b1);\
241     DST(2, b2 + b3);\
242     DST(3, b2 - b3);\
243     DST(4, b4 + b5);\
244     DST(5, b4 - b5);\
245     DST(6, b6 + b7);\
246     DST(7, b6 - b7);\
247 }
248
249 static inline int pixel_sa8d_wxh( uint8_t *pix1, int i_pix1, uint8_t *pix2, int i_pix2,
250                                   int i_width, int i_height )
251 {
252     int16_t diff[8][8];
253     int i_satd = 0;
254     int x, y;
255
256     for( y = 0; y < i_height; y += 8 )
257     {
258         for( x = 0; x < i_width; x += 8 )
259         {
260             int i;
261             pixel_sub_wxh( (int16_t*)diff, 8, pix1+x, i_pix1, pix2+x, i_pix2 );
262
263 #define SRC(x)     diff[i][x]
264 #define DST(x,rhs) diff[i][x] = (rhs)
265             for( i = 0; i < 8; i++ )
266                 SA8D_1D
267 #undef SRC
268 #undef DST
269
270 #define SRC(x)     diff[x][i]
271 #define DST(x,rhs) i_satd += abs(rhs)
272             for( i = 0; i < 8; i++ )
273                 SA8D_1D
274 #undef SRC
275 #undef DST
276         }
277         pix1 += 8 * i_pix1;
278         pix2 += 8 * i_pix2;
279     }
280
281     return i_satd;
282 }
283
284 #define PIXEL_SA8D_C( width, height ) \
285 static int x264_pixel_sa8d_##width##x##height( uint8_t *pix1, int i_stride_pix1, \
286                                                uint8_t *pix2, int i_stride_pix2 ) \
287 { \
288     return ( pixel_sa8d_wxh( pix1, i_stride_pix1, pix2, i_stride_pix2, width, height ) + 2 ) >> 2; \
289 }
290 PIXEL_SA8D_C( 16, 16 )
291 PIXEL_SA8D_C( 16, 8 )
292 PIXEL_SA8D_C( 8, 16 )
293 PIXEL_SA8D_C( 8, 8 )
294
295 #define SAD_X( size ) \
296 static void x264_pixel_sad_x3_##size( uint8_t *fenc, uint8_t *pix0, uint8_t *pix1, uint8_t *pix2, int i_stride, int scores[3] )\
297 {\
298     scores[0] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix0, i_stride );\
299     scores[1] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix1, i_stride );\
300     scores[2] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix2, i_stride );\
301 }\
302 static void x264_pixel_sad_x4_##size( uint8_t *fenc, uint8_t *pix0, uint8_t *pix1, uint8_t *pix2, uint8_t *pix3, int i_stride, int scores[4] )\
303 {\
304     scores[0] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix0, i_stride );\
305     scores[1] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix1, i_stride );\
306     scores[2] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix2, i_stride );\
307     scores[3] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix3, i_stride );\
308 }
309
310 SAD_X( 16x16 )
311 SAD_X( 16x8 )
312 SAD_X( 8x16 )
313 SAD_X( 8x8 )
314 SAD_X( 8x4 )
315 SAD_X( 4x8 )
316 SAD_X( 4x4 )
317
318 #ifdef ARCH_UltraSparc
319 SAD_X( 16x16_vis )
320 SAD_X( 16x8_vis )
321 SAD_X( 8x16_vis )
322 SAD_X( 8x8_vis )
323 #endif
324
325 /****************************************************************************
326  * x264_pixel_init:
327  ****************************************************************************/
328 void x264_pixel_init( int cpu, x264_pixel_function_t *pixf )
329 {
330     memset( pixf, 0, sizeof(*pixf) );
331
332 #define INIT( name, cpu ) \
333     pixf->name[PIXEL_16x16] = x264_pixel_##name##_16x16##cpu;\
334     pixf->name[PIXEL_16x8]  = x264_pixel_##name##_16x8##cpu;\
335     pixf->name[PIXEL_8x16]  = x264_pixel_##name##_8x16##cpu;\
336     pixf->name[PIXEL_8x8]   = x264_pixel_##name##_8x8##cpu;\
337     pixf->name[PIXEL_8x4]   = x264_pixel_##name##_8x4##cpu;\
338     pixf->name[PIXEL_4x8]   = x264_pixel_##name##_4x8##cpu;\
339     pixf->name[PIXEL_4x4]   = x264_pixel_##name##_4x4##cpu;
340
341     INIT( sad, );
342     INIT( sad_x3, );
343     INIT( sad_x4, );
344     INIT( ssd, );
345     INIT( satd, );
346
347     pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16;
348     pixf->sa8d[PIXEL_16x8] = x264_pixel_sa8d_16x8;
349     pixf->sa8d[PIXEL_8x16] = x264_pixel_sa8d_8x16;
350     pixf->sa8d[PIXEL_8x8]  = x264_pixel_sa8d_8x8;
351
352 #ifdef HAVE_MMXEXT
353     if( cpu&X264_CPU_MMX )
354     {
355         INIT( ssd, _mmx );
356     }
357
358     if( cpu&X264_CPU_MMXEXT )
359     {
360         INIT( sad, _mmxext );
361         INIT( sad_x3, _mmxext );
362         INIT( sad_x4, _mmxext );
363         INIT( satd, _mmxext );
364
365         pixf->sad_pde[PIXEL_16x16] = x264_pixel_sad_pde_16x16_mmxext;
366         pixf->sad_pde[PIXEL_16x8 ] = x264_pixel_sad_pde_16x8_mmxext;
367         pixf->sad_pde[PIXEL_8x16 ] = x264_pixel_sad_pde_8x16_mmxext;
368
369 #ifdef ARCH_X86
370         pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_mmxext;
371         pixf->sa8d[PIXEL_8x8]   = x264_pixel_sa8d_8x8_mmxext;
372 #endif
373     }
374 #endif
375
376 #ifdef HAVE_SSE2
377     // disable on AMD processors since it is slower
378     if( (cpu&X264_CPU_SSE2) && !(cpu&X264_CPU_3DNOW) )
379     {
380         pixf->sad[PIXEL_16x16] = x264_pixel_sad_16x16_sse2;
381         pixf->sad[PIXEL_16x8 ] = x264_pixel_sad_16x8_sse2;
382
383         pixf->satd[PIXEL_16x16]= x264_pixel_satd_16x16_sse2;
384         pixf->satd[PIXEL_16x8] = x264_pixel_satd_16x8_sse2;
385         pixf->satd[PIXEL_8x16] = x264_pixel_satd_8x16_sse2;
386         pixf->satd[PIXEL_8x8]  = x264_pixel_satd_8x8_sse2;
387         pixf->satd[PIXEL_8x4]  = x264_pixel_satd_8x4_sse2;
388
389 #ifdef ARCH_X86
390         pixf->sad_x3[PIXEL_16x16] = x264_pixel_sad_x3_16x16_sse2;
391         pixf->sad_x3[PIXEL_16x8 ] = x264_pixel_sad_x3_16x8_sse2;
392
393         pixf->sad_x4[PIXEL_16x16] = x264_pixel_sad_x4_16x16_sse2;
394         pixf->sad_x4[PIXEL_16x8 ] = x264_pixel_sad_x4_16x8_sse2;
395 #endif
396     }
397     // these are faster on both Intel and AMD
398     if( cpu&X264_CPU_SSE2 )
399     {
400         pixf->ssd[PIXEL_16x16] = x264_pixel_ssd_16x16_sse2;
401         pixf->ssd[PIXEL_16x8]  = x264_pixel_ssd_16x8_sse2;
402
403 #ifdef ARCH_X86_64
404         pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_sse2;
405         pixf->sa8d[PIXEL_8x8]   = x264_pixel_sa8d_8x8_sse2;
406 #endif
407     }
408 #endif
409
410 #ifdef ARCH_PPC
411     if( cpu&X264_CPU_ALTIVEC )
412     {
413         x264_pixel_altivec_init( pixf );
414     }
415 #endif
416 #ifdef ARCH_UltraSparc
417     pixf->sad[PIXEL_8x8]   = x264_pixel_sad_8x8_vis;
418     pixf->sad[PIXEL_8x16]  = x264_pixel_sad_8x16_vis;
419     pixf->sad[PIXEL_16x8]  = x264_pixel_sad_16x8_vis;
420     pixf->sad[PIXEL_16x16] = x264_pixel_sad_16x16_vis;
421
422     pixf->sad_x3[PIXEL_8x8]   = x264_pixel_sad_x3_8x8_vis;
423     pixf->sad_x3[PIXEL_8x16]  = x264_pixel_sad_x3_8x16_vis;
424     pixf->sad_x3[PIXEL_16x8]  = x264_pixel_sad_x3_16x8_vis;
425     pixf->sad_x3[PIXEL_16x16] = x264_pixel_sad_x3_16x16_vis;
426
427     pixf->sad_x4[PIXEL_8x8]   = x264_pixel_sad_x4_8x8_vis;
428     pixf->sad_x4[PIXEL_8x16]  = x264_pixel_sad_x4_8x16_vis;
429     pixf->sad_x4[PIXEL_16x8]  = x264_pixel_sad_x4_16x8_vis;
430     pixf->sad_x4[PIXEL_16x16] = x264_pixel_sad_x4_16x16_vis;
431 #endif
432 }
433