1 /*****************************************************************************
2 * pixel.c: ppc pixel metrics
3 *****************************************************************************
4 * Copyright (C) 2003-2016 x264 project
6 * Authors: Eric Petit <eric.petit@lapsus.org>
7 * Guillaume Poirier <gpoirier@mplayerhq.hu>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
23 * This program is also available under a commercial proprietary license.
24 * For more information, contact us at licensing@x264.com.
25 *****************************************************************************/
27 #include "common/common.h"
28 #include "ppccommon.h"
31 /***********************************************************************
33 **********************************************************************/
35 #define PIXEL_SAD_ALTIVEC( name, lx, ly, a, b ) \
36 static int name( uint8_t *pix1, intptr_t i_pix1, \
37 uint8_t *pix2, intptr_t i_pix2 ) \
39 ALIGNED_16( int sum ); \
43 vec_u8_t pix1v, pix2v; \
44 vec_s32_t sumv = zero_s32v; \
45 for( int y = 0; y < ly; y++ ) \
47 VEC_LOAD_G( pix1, pix1v, lx, vec_u8_t ); \
48 VEC_LOAD_G( pix2, pix2v, lx, vec_u8_t ); \
49 sumv = (vec_s32_t) vec_sum4s( \
50 vec_sub( vec_max( pix1v, pix2v ), \
51 vec_min( pix1v, pix2v ) ), \
56 sumv = vec_sum##a( sumv, zero_s32v ); \
57 sumv = vec_splat( sumv, b ); \
58 vec_ste( sumv, 0, &sum ); \
62 PIXEL_SAD_ALTIVEC( pixel_sad_16x16_altivec, 16, 16, s, 3 )
63 PIXEL_SAD_ALTIVEC( pixel_sad_8x16_altivec, 8, 16, 2s, 1 )
64 PIXEL_SAD_ALTIVEC( pixel_sad_16x8_altivec, 16, 8, s, 3 )
65 PIXEL_SAD_ALTIVEC( pixel_sad_8x8_altivec, 8, 8, 2s, 1 )
69 /***********************************************************************
71 **********************************************************************/
73 /***********************************************************************
75 ***********************************************************************
76 * b[0] = a[0] + a[1] + a[2] + a[3]
77 * b[1] = a[0] + a[1] - a[2] - a[3]
78 * b[2] = a[0] - a[1] - a[2] + a[3]
79 * b[3] = a[0] - a[1] + a[2] - a[3]
80 **********************************************************************/
81 #define VEC_HADAMAR(a0,a1,a2,a3,b0,b1,b2,b3) \
82 b2 = vec_add( a0, a1 ); \
83 b3 = vec_add( a2, a3 ); \
84 a0 = vec_sub( a0, a1 ); \
85 a2 = vec_sub( a2, a3 ); \
86 b0 = vec_add( b2, b3 ); \
87 b1 = vec_sub( b2, b3 ); \
88 b2 = vec_sub( a0, a2 ); \
89 b3 = vec_add( a0, a2 )
91 /***********************************************************************
93 ***********************************************************************
98 * Call vec_sub()/vec_max() instead of vec_abs() because vec_abs()
99 * actually also calls vec_splat(0), but we already have a null vector.
100 **********************************************************************/
102 a = vec_max( a, vec_sub( zero_s16v, a ) );
104 #define VEC_ABSOLUTE(a) (vec_u16_t)vec_max( a, vec_sub( zero_s16v, a ) )
106 /***********************************************************************
108 ***********************************************************************
112 * c[i] = abs(a[2*i]) + abs(a[2*i+1]) + [bi]
113 **********************************************************************/
114 #define VEC_ADD_ABS(a,b,c) \
116 c = vec_sum4s( a, b )
118 /***********************************************************************
120 **********************************************************************/
121 static int pixel_satd_4x4_altivec( uint8_t *pix1, intptr_t i_pix1,
122 uint8_t *pix2, intptr_t i_pix2 )
124 ALIGNED_16( int i_satd );
127 PREP_LOAD_SRC( pix1 );
128 vec_s16_t diff0v, diff1v, diff2v, diff3v;
129 vec_s16_t temp0v, temp1v, temp2v, temp3v;
132 vec_u8_t _offset1v_ = vec_lvsl(0, pix2);
133 vec_u8_t _offset2v_ = vec_lvsl(0, pix2 + i_pix2);
136 VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 4, diff0v, offset1v );
137 VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 4, diff1v, offset2v );
138 VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 4, diff2v, offset1v );
139 VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 4, diff3v, offset2v );
142 VEC_HADAMAR( diff0v, diff1v, diff2v, diff3v,
143 temp0v, temp1v, temp2v, temp3v );
145 VEC_TRANSPOSE_4( temp0v, temp1v, temp2v, temp3v,
146 diff0v, diff1v, diff2v, diff3v );
148 VEC_HADAMAR( diff0v, diff1v, diff2v, diff3v,
149 temp0v, temp1v, temp2v, temp3v );
151 VEC_ADD_ABS( temp0v, zero_s32v, satdv );
152 VEC_ADD_ABS( temp1v, satdv, satdv );
153 VEC_ADD_ABS( temp2v, satdv, satdv );
154 VEC_ADD_ABS( temp3v, satdv, satdv );
156 satdv = vec_sum2s( satdv, zero_s32v );
157 satdv = vec_splat( satdv, 1 );
158 vec_ste( satdv, 0, &i_satd );
163 /***********************************************************************
165 **********************************************************************/
166 static int pixel_satd_4x8_altivec( uint8_t *pix1, intptr_t i_pix1,
167 uint8_t *pix2, intptr_t i_pix2 )
169 ALIGNED_16( int i_satd );
172 vec_s16_t diff0v, diff1v, diff2v, diff3v;
173 vec_s16_t temp0v, temp1v, temp2v, temp3v;
176 PREP_LOAD_SRC( pix1 );
177 vec_u8_t _offset1v_ = vec_lvsl(0, pix2);
178 vec_u8_t _offset2v_ = vec_lvsl(0, pix2 + i_pix2);
180 VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 4, diff0v, offset1v );
181 VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 4, diff1v, offset2v );
182 VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 4, diff2v, offset1v );
183 VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 4, diff3v, offset2v );
184 VEC_HADAMAR( diff0v, diff1v, diff2v, diff3v,
185 temp0v, temp1v, temp2v, temp3v );
186 VEC_TRANSPOSE_4( temp0v, temp1v, temp2v, temp3v,
187 diff0v, diff1v, diff2v, diff3v );
188 VEC_HADAMAR( diff0v, diff1v, diff2v, diff3v,
189 temp0v, temp1v, temp2v, temp3v );
190 VEC_ADD_ABS( temp0v, zero_s32v, satdv );
191 VEC_ADD_ABS( temp1v, satdv, satdv );
192 VEC_ADD_ABS( temp2v, satdv, satdv );
193 VEC_ADD_ABS( temp3v, satdv, satdv );
195 VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 4, diff0v, offset1v );
196 VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 4, diff1v, offset2v );
197 VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 4, diff2v, offset1v );
198 VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 4, diff3v, offset2v );
199 VEC_HADAMAR( diff0v, diff1v, diff2v, diff3v,
200 temp0v, temp1v, temp2v, temp3v );
201 VEC_TRANSPOSE_4( temp0v, temp1v, temp2v, temp3v,
202 diff0v, diff1v, diff2v, diff3v );
203 VEC_HADAMAR( diff0v, diff1v, diff2v, diff3v,
204 temp0v, temp1v, temp2v, temp3v );
205 VEC_ADD_ABS( temp0v, satdv, satdv );
206 VEC_ADD_ABS( temp1v, satdv, satdv );
207 VEC_ADD_ABS( temp2v, satdv, satdv );
208 VEC_ADD_ABS( temp3v, satdv, satdv );
210 satdv = vec_sum2s( satdv, zero_s32v );
211 satdv = vec_splat( satdv, 1 );
212 vec_ste( satdv, 0, &i_satd );
217 /***********************************************************************
219 **********************************************************************/
220 static int pixel_satd_8x4_altivec( uint8_t *pix1, intptr_t i_pix1,
221 uint8_t *pix2, intptr_t i_pix2 )
223 ALIGNED_16( int i_satd );
226 vec_s16_t diff0v, diff1v, diff2v, diff3v,
227 diff4v, diff5v, diff6v, diff7v;
228 vec_s16_t temp0v, temp1v, temp2v, temp3v,
229 temp4v, temp5v, temp6v, temp7v;
233 PREP_LOAD_SRC( pix1 );
234 vec_u8_t _offset1v_ = vec_lvsl(0, pix2);
235 vec_u8_t _offset2v_ = vec_lvsl(0, pix2 + i_pix2);
237 VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 8, diff0v, offset1v );
238 VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 8, diff1v, offset2v );
239 VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 8, diff2v, offset1v );
240 VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 8, diff3v, offset2v );
242 VEC_HADAMAR( diff0v, diff1v, diff2v, diff3v,
243 temp0v, temp1v, temp2v, temp3v );
244 /* This causes warnings because temp4v...temp7v haven't be set,
246 VEC_TRANSPOSE_8( temp0v, temp1v, temp2v, temp3v,
247 temp4v, temp5v, temp6v, temp7v,
248 diff0v, diff1v, diff2v, diff3v,
249 diff4v, diff5v, diff6v, diff7v );
250 VEC_HADAMAR( diff0v, diff1v, diff2v, diff3v,
251 temp0v, temp1v, temp2v, temp3v );
252 VEC_HADAMAR( diff4v, diff5v, diff6v, diff7v,
253 temp4v, temp5v, temp6v, temp7v );
255 VEC_ADD_ABS( temp0v, zero_s32v, satdv );
256 VEC_ADD_ABS( temp1v, satdv, satdv );
257 VEC_ADD_ABS( temp2v, satdv, satdv );
258 VEC_ADD_ABS( temp3v, satdv, satdv );
259 VEC_ADD_ABS( temp4v, satdv, satdv );
260 VEC_ADD_ABS( temp5v, satdv, satdv );
261 VEC_ADD_ABS( temp6v, satdv, satdv );
262 VEC_ADD_ABS( temp7v, satdv, satdv );
264 satdv = vec_sum2s( satdv, zero_s32v );
265 satdv = vec_splat( satdv, 1 );
266 vec_ste( satdv, 0, &i_satd );
271 /***********************************************************************
273 **********************************************************************/
274 static int pixel_satd_8x8_altivec( uint8_t *pix1, intptr_t i_pix1,
275 uint8_t *pix2, intptr_t i_pix2 )
277 ALIGNED_16( int i_satd );
280 vec_s16_t diff0v, diff1v, diff2v, diff3v,
281 diff4v, diff5v, diff6v, diff7v;
282 vec_s16_t temp0v, temp1v, temp2v, temp3v,
283 temp4v, temp5v, temp6v, temp7v;
286 vec_u8_t _offset1_1v_ = vec_lvsl(0, pix1);
287 vec_u8_t _offset1_2v_ = vec_lvsl(0, pix1 + i_pix1);
288 vec_u8_t _offset2_1v_ = vec_lvsl(0, pix2);
289 vec_u8_t _offset2_2v_ = vec_lvsl(0, pix2 + i_pix2);
291 VEC_DIFF_H_OFFSET( pix1, i_pix1, pix2, i_pix2, 8, diff0v, offset1_1v, offset2_1v );
292 VEC_DIFF_H_OFFSET( pix1, i_pix1, pix2, i_pix2, 8, diff1v, offset1_2v, offset2_2v );
293 VEC_DIFF_H_OFFSET( pix1, i_pix1, pix2, i_pix2, 8, diff2v, offset1_1v, offset2_1v );
294 VEC_DIFF_H_OFFSET( pix1, i_pix1, pix2, i_pix2, 8, diff3v, offset1_2v, offset2_2v );
295 VEC_DIFF_H_OFFSET( pix1, i_pix1, pix2, i_pix2, 8, diff4v, offset1_1v, offset2_1v );
296 VEC_DIFF_H_OFFSET( pix1, i_pix1, pix2, i_pix2, 8, diff5v, offset1_2v, offset2_2v );
297 VEC_DIFF_H_OFFSET( pix1, i_pix1, pix2, i_pix2, 8, diff6v, offset1_1v, offset2_1v );
298 VEC_DIFF_H_OFFSET( pix1, i_pix1, pix2, i_pix2, 8, diff7v, offset1_2v, offset2_2v );
300 VEC_HADAMAR( diff0v, diff1v, diff2v, diff3v,
301 temp0v, temp1v, temp2v, temp3v );
302 VEC_HADAMAR( diff4v, diff5v, diff6v, diff7v,
303 temp4v, temp5v, temp6v, temp7v );
305 VEC_TRANSPOSE_8( temp0v, temp1v, temp2v, temp3v,
306 temp4v, temp5v, temp6v, temp7v,
307 diff0v, diff1v, diff2v, diff3v,
308 diff4v, diff5v, diff6v, diff7v );
310 VEC_HADAMAR( diff0v, diff1v, diff2v, diff3v,
311 temp0v, temp1v, temp2v, temp3v );
312 VEC_HADAMAR( diff4v, diff5v, diff6v, diff7v,
313 temp4v, temp5v, temp6v, temp7v );
315 VEC_ADD_ABS( temp0v, zero_s32v, satdv );
316 VEC_ADD_ABS( temp1v, satdv, satdv );
317 VEC_ADD_ABS( temp2v, satdv, satdv );
318 VEC_ADD_ABS( temp3v, satdv, satdv );
319 VEC_ADD_ABS( temp4v, satdv, satdv );
320 VEC_ADD_ABS( temp5v, satdv, satdv );
321 VEC_ADD_ABS( temp6v, satdv, satdv );
322 VEC_ADD_ABS( temp7v, satdv, satdv );
324 satdv = vec_sums( satdv, zero_s32v );
325 satdv = vec_splat( satdv, 3 );
326 vec_ste( satdv, 0, &i_satd );
331 /***********************************************************************
333 **********************************************************************/
334 static int pixel_satd_8x16_altivec( uint8_t *pix1, intptr_t i_pix1,
335 uint8_t *pix2, intptr_t i_pix2 )
337 ALIGNED_16( int i_satd );
340 vec_s16_t diff0v, diff1v, diff2v, diff3v,
341 diff4v, diff5v, diff6v, diff7v;
342 vec_s16_t temp0v, temp1v, temp2v, temp3v,
343 temp4v, temp5v, temp6v, temp7v;
346 PREP_LOAD_SRC( pix1 );
347 vec_u8_t _offset1v_ = vec_lvsl(0, pix2);
348 vec_u8_t _offset2v_ = vec_lvsl(0, pix2 + i_pix2);
350 VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 8, diff0v, offset1v );
351 VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 8, diff1v, offset2v );
352 VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 8, diff2v, offset1v );
353 VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 8, diff3v, offset2v );
354 VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 8, diff4v, offset1v );
355 VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 8, diff5v, offset2v );
356 VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 8, diff6v , offset1v);
357 VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 8, diff7v, offset2v );
358 VEC_HADAMAR( diff0v, diff1v, diff2v, diff3v,
359 temp0v, temp1v, temp2v, temp3v );
360 VEC_HADAMAR( diff4v, diff5v, diff6v, diff7v,
361 temp4v, temp5v, temp6v, temp7v );
362 VEC_TRANSPOSE_8( temp0v, temp1v, temp2v, temp3v,
363 temp4v, temp5v, temp6v, temp7v,
364 diff0v, diff1v, diff2v, diff3v,
365 diff4v, diff5v, diff6v, diff7v );
366 VEC_HADAMAR( diff0v, diff1v, diff2v, diff3v,
367 temp0v, temp1v, temp2v, temp3v );
368 VEC_HADAMAR( diff4v, diff5v, diff6v, diff7v,
369 temp4v, temp5v, temp6v, temp7v );
370 VEC_ADD_ABS( temp0v, zero_s32v, satdv );
371 VEC_ADD_ABS( temp1v, satdv, satdv );
372 VEC_ADD_ABS( temp2v, satdv, satdv );
373 VEC_ADD_ABS( temp3v, satdv, satdv );
374 VEC_ADD_ABS( temp4v, satdv, satdv );
375 VEC_ADD_ABS( temp5v, satdv, satdv );
376 VEC_ADD_ABS( temp6v, satdv, satdv );
377 VEC_ADD_ABS( temp7v, satdv, satdv );
379 VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 8, diff0v, offset1v );
380 VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 8, diff1v, offset2v );
381 VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 8, diff2v, offset1v );
382 VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 8, diff3v, offset2v );
383 VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 8, diff4v, offset1v );
384 VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 8, diff5v, offset2v );
385 VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 8, diff6v, offset1v );
386 VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 8, diff7v, offset2v );
387 VEC_HADAMAR( diff0v, diff1v, diff2v, diff3v,
388 temp0v, temp1v, temp2v, temp3v );
389 VEC_HADAMAR( diff4v, diff5v, diff6v, diff7v,
390 temp4v, temp5v, temp6v, temp7v );
391 VEC_TRANSPOSE_8( temp0v, temp1v, temp2v, temp3v,
392 temp4v, temp5v, temp6v, temp7v,
393 diff0v, diff1v, diff2v, diff3v,
394 diff4v, diff5v, diff6v, diff7v );
395 VEC_HADAMAR( diff0v, diff1v, diff2v, diff3v,
396 temp0v, temp1v, temp2v, temp3v );
397 VEC_HADAMAR( diff4v, diff5v, diff6v, diff7v,
398 temp4v, temp5v, temp6v, temp7v );
399 VEC_ADD_ABS( temp0v, satdv, satdv );
400 VEC_ADD_ABS( temp1v, satdv, satdv );
401 VEC_ADD_ABS( temp2v, satdv, satdv );
402 VEC_ADD_ABS( temp3v, satdv, satdv );
403 VEC_ADD_ABS( temp4v, satdv, satdv );
404 VEC_ADD_ABS( temp5v, satdv, satdv );
405 VEC_ADD_ABS( temp6v, satdv, satdv );
406 VEC_ADD_ABS( temp7v, satdv, satdv );
408 satdv = vec_sums( satdv, zero_s32v );
409 satdv = vec_splat( satdv, 3 );
410 vec_ste( satdv, 0, &i_satd );
415 /***********************************************************************
417 **********************************************************************/
418 static int pixel_satd_16x8_altivec( uint8_t *pix1, intptr_t i_pix1,
419 uint8_t *pix2, intptr_t i_pix2 )
421 ALIGNED_16( int i_satd );
425 PREP_LOAD_SRC( pix2 );
427 vec_s16_t pix1v, pix2v;
428 vec_s16_t diffh0v, diffh1v, diffh2v, diffh3v,
429 diffh4v, diffh5v, diffh6v, diffh7v;
430 vec_s16_t diffl0v, diffl1v, diffl2v, diffl3v,
431 diffl4v, diffl5v, diffl6v, diffl7v;
432 vec_s16_t temp0v, temp1v, temp2v, temp3v,
433 temp4v, temp5v, temp6v, temp7v;
435 VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, diffh0v, diffl0v );
436 VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, diffh1v, diffl1v );
437 VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, diffh2v, diffl2v );
438 VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, diffh3v, diffl3v );
439 VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, diffh4v, diffl4v );
440 VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, diffh5v, diffl5v );
441 VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, diffh6v, diffl6v );
442 VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, diffh7v, diffl7v );
444 VEC_HADAMAR( diffh0v, diffh1v, diffh2v, diffh3v,
445 temp0v, temp1v, temp2v, temp3v );
446 VEC_HADAMAR( diffh4v, diffh5v, diffh6v, diffh7v,
447 temp4v, temp5v, temp6v, temp7v );
449 VEC_TRANSPOSE_8( temp0v, temp1v, temp2v, temp3v,
450 temp4v, temp5v, temp6v, temp7v,
451 diffh0v, diffh1v, diffh2v, diffh3v,
452 diffh4v, diffh5v, diffh6v, diffh7v );
454 VEC_HADAMAR( diffh0v, diffh1v, diffh2v, diffh3v,
455 temp0v, temp1v, temp2v, temp3v );
456 VEC_HADAMAR( diffh4v, diffh5v, diffh6v, diffh7v,
457 temp4v, temp5v, temp6v, temp7v );
459 VEC_ADD_ABS( temp0v, zero_s32v, satdv );
460 VEC_ADD_ABS( temp1v, satdv, satdv );
461 VEC_ADD_ABS( temp2v, satdv, satdv );
462 VEC_ADD_ABS( temp3v, satdv, satdv );
463 VEC_ADD_ABS( temp4v, satdv, satdv );
464 VEC_ADD_ABS( temp5v, satdv, satdv );
465 VEC_ADD_ABS( temp6v, satdv, satdv );
466 VEC_ADD_ABS( temp7v, satdv, satdv );
468 VEC_HADAMAR( diffl0v, diffl1v, diffl2v, diffl3v,
469 temp0v, temp1v, temp2v, temp3v );
470 VEC_HADAMAR( diffl4v, diffl5v, diffl6v, diffl7v,
471 temp4v, temp5v, temp6v, temp7v );
473 VEC_TRANSPOSE_8( temp0v, temp1v, temp2v, temp3v,
474 temp4v, temp5v, temp6v, temp7v,
475 diffl0v, diffl1v, diffl2v, diffl3v,
476 diffl4v, diffl5v, diffl6v, diffl7v );
478 VEC_HADAMAR( diffl0v, diffl1v, diffl2v, diffl3v,
479 temp0v, temp1v, temp2v, temp3v );
480 VEC_HADAMAR( diffl4v, diffl5v, diffl6v, diffl7v,
481 temp4v, temp5v, temp6v, temp7v );
483 VEC_ADD_ABS( temp0v, satdv, satdv );
484 VEC_ADD_ABS( temp1v, satdv, satdv );
485 VEC_ADD_ABS( temp2v, satdv, satdv );
486 VEC_ADD_ABS( temp3v, satdv, satdv );
487 VEC_ADD_ABS( temp4v, satdv, satdv );
488 VEC_ADD_ABS( temp5v, satdv, satdv );
489 VEC_ADD_ABS( temp6v, satdv, satdv );
490 VEC_ADD_ABS( temp7v, satdv, satdv );
492 satdv = vec_sums( satdv, zero_s32v );
493 satdv = vec_splat( satdv, 3 );
494 vec_ste( satdv, 0, &i_satd );
499 /***********************************************************************
501 **********************************************************************/
502 static int pixel_satd_16x16_altivec( uint8_t *pix1, intptr_t i_pix1,
503 uint8_t *pix2, intptr_t i_pix2 )
505 ALIGNED_16( int i_satd );
510 vec_s16_t pix1v, pix2v;
511 vec_s16_t diffh0v, diffh1v, diffh2v, diffh3v,
512 diffh4v, diffh5v, diffh6v, diffh7v;
513 vec_s16_t diffl0v, diffl1v, diffl2v, diffl3v,
514 diffl4v, diffl5v, diffl6v, diffl7v;
515 vec_s16_t temp0v, temp1v, temp2v, temp3v,
516 temp4v, temp5v, temp6v, temp7v;
517 PREP_LOAD_SRC( pix2 );
520 VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, diffh0v, diffl0v );
521 VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, diffh1v, diffl1v );
522 VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, diffh2v, diffl2v );
523 VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, diffh3v, diffl3v );
524 VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, diffh4v, diffl4v );
525 VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, diffh5v, diffl5v );
526 VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, diffh6v, diffl6v );
527 VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, diffh7v, diffl7v );
528 VEC_HADAMAR( diffh0v, diffh1v, diffh2v, diffh3v,
529 temp0v, temp1v, temp2v, temp3v );
530 VEC_HADAMAR( diffh4v, diffh5v, diffh6v, diffh7v,
531 temp4v, temp5v, temp6v, temp7v );
532 VEC_TRANSPOSE_8( temp0v, temp1v, temp2v, temp3v,
533 temp4v, temp5v, temp6v, temp7v,
534 diffh0v, diffh1v, diffh2v, diffh3v,
535 diffh4v, diffh5v, diffh6v, diffh7v );
536 VEC_HADAMAR( diffh0v, diffh1v, diffh2v, diffh3v,
537 temp0v, temp1v, temp2v, temp3v );
538 VEC_HADAMAR( diffh4v, diffh5v, diffh6v, diffh7v,
539 temp4v, temp5v, temp6v, temp7v );
540 VEC_ADD_ABS( temp0v, zero_s32v, satdv );
541 VEC_ADD_ABS( temp1v, satdv, satdv );
542 VEC_ADD_ABS( temp2v, satdv, satdv );
543 VEC_ADD_ABS( temp3v, satdv, satdv );
544 VEC_ADD_ABS( temp4v, satdv, satdv );
545 VEC_ADD_ABS( temp5v, satdv, satdv );
546 VEC_ADD_ABS( temp6v, satdv, satdv );
547 VEC_ADD_ABS( temp7v, satdv, satdv );
548 VEC_HADAMAR( diffl0v, diffl1v, diffl2v, diffl3v,
549 temp0v, temp1v, temp2v, temp3v );
550 VEC_HADAMAR( diffl4v, diffl5v, diffl6v, diffl7v,
551 temp4v, temp5v, temp6v, temp7v );
552 VEC_TRANSPOSE_8( temp0v, temp1v, temp2v, temp3v,
553 temp4v, temp5v, temp6v, temp7v,
554 diffl0v, diffl1v, diffl2v, diffl3v,
555 diffl4v, diffl5v, diffl6v, diffl7v );
556 VEC_HADAMAR( diffl0v, diffl1v, diffl2v, diffl3v,
557 temp0v, temp1v, temp2v, temp3v );
558 VEC_HADAMAR( diffl4v, diffl5v, diffl6v, diffl7v,
559 temp4v, temp5v, temp6v, temp7v );
560 VEC_ADD_ABS( temp0v, satdv, satdv );
561 VEC_ADD_ABS( temp1v, satdv, satdv );
562 VEC_ADD_ABS( temp2v, satdv, satdv );
563 VEC_ADD_ABS( temp3v, satdv, satdv );
564 VEC_ADD_ABS( temp4v, satdv, satdv );
565 VEC_ADD_ABS( temp5v, satdv, satdv );
566 VEC_ADD_ABS( temp6v, satdv, satdv );
567 VEC_ADD_ABS( temp7v, satdv, satdv );
569 VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, diffh0v, diffl0v );
570 VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, diffh1v, diffl1v );
571 VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, diffh2v, diffl2v );
572 VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, diffh3v, diffl3v );
573 VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, diffh4v, diffl4v );
574 VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, diffh5v, diffl5v );
575 VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, diffh6v, diffl6v );
576 VEC_DIFF_HL( pix1, i_pix1, pix2, i_pix2, diffh7v, diffl7v );
577 VEC_HADAMAR( diffh0v, diffh1v, diffh2v, diffh3v,
578 temp0v, temp1v, temp2v, temp3v );
579 VEC_HADAMAR( diffh4v, diffh5v, diffh6v, diffh7v,
580 temp4v, temp5v, temp6v, temp7v );
581 VEC_TRANSPOSE_8( temp0v, temp1v, temp2v, temp3v,
582 temp4v, temp5v, temp6v, temp7v,
583 diffh0v, diffh1v, diffh2v, diffh3v,
584 diffh4v, diffh5v, diffh6v, diffh7v );
585 VEC_HADAMAR( diffh0v, diffh1v, diffh2v, diffh3v,
586 temp0v, temp1v, temp2v, temp3v );
587 VEC_HADAMAR( diffh4v, diffh5v, diffh6v, diffh7v,
588 temp4v, temp5v, temp6v, temp7v );
589 VEC_ADD_ABS( temp0v, satdv, satdv );
590 VEC_ADD_ABS( temp1v, satdv, satdv );
591 VEC_ADD_ABS( temp2v, satdv, satdv );
592 VEC_ADD_ABS( temp3v, satdv, satdv );
593 VEC_ADD_ABS( temp4v, satdv, satdv );
594 VEC_ADD_ABS( temp5v, satdv, satdv );
595 VEC_ADD_ABS( temp6v, satdv, satdv );
596 VEC_ADD_ABS( temp7v, satdv, satdv );
597 VEC_HADAMAR( diffl0v, diffl1v, diffl2v, diffl3v,
598 temp0v, temp1v, temp2v, temp3v );
599 VEC_HADAMAR( diffl4v, diffl5v, diffl6v, diffl7v,
600 temp4v, temp5v, temp6v, temp7v );
601 VEC_TRANSPOSE_8( temp0v, temp1v, temp2v, temp3v,
602 temp4v, temp5v, temp6v, temp7v,
603 diffl0v, diffl1v, diffl2v, diffl3v,
604 diffl4v, diffl5v, diffl6v, diffl7v );
605 VEC_HADAMAR( diffl0v, diffl1v, diffl2v, diffl3v,
606 temp0v, temp1v, temp2v, temp3v );
607 VEC_HADAMAR( diffl4v, diffl5v, diffl6v, diffl7v,
608 temp4v, temp5v, temp6v, temp7v );
609 VEC_ADD_ABS( temp0v, satdv, satdv );
610 VEC_ADD_ABS( temp1v, satdv, satdv );
611 VEC_ADD_ABS( temp2v, satdv, satdv );
612 VEC_ADD_ABS( temp3v, satdv, satdv );
613 VEC_ADD_ABS( temp4v, satdv, satdv );
614 VEC_ADD_ABS( temp5v, satdv, satdv );
615 VEC_ADD_ABS( temp6v, satdv, satdv );
616 VEC_ADD_ABS( temp7v, satdv, satdv );
618 satdv = vec_sums( satdv, zero_s32v );
619 satdv = vec_splat( satdv, 3 );
620 vec_ste( satdv, 0, &i_satd );
627 /***********************************************************************
628 * Interleaved SAD routines
629 **********************************************************************/
631 static void pixel_sad_x4_16x16_altivec( uint8_t *fenc,
632 uint8_t *pix0, uint8_t *pix1,
633 uint8_t *pix2, uint8_t *pix3,
634 intptr_t i_stride, int scores[4] )
636 ALIGNED_16( int sum0 );
637 ALIGNED_16( int sum1 );
638 ALIGNED_16( int sum2 );
639 ALIGNED_16( int sum3 );
642 vec_u8_t temp_lv, temp_hv;
643 vec_u8_t fencv, pix0v, pix1v, pix2v, pix3v;
644 //vec_u8_t perm0v, perm1v, perm2v, perm3v;
645 vec_u8_t perm0vA, perm1vA, perm2vA, perm3vA, perm0vB, perm1vB, perm2vB, perm3vB;
647 vec_s32_t sum0v, sum1v, sum2v, sum3v;
649 sum0v = vec_splat_s32(0);
650 sum1v = vec_splat_s32(0);
651 sum2v = vec_splat_s32(0);
652 sum3v = vec_splat_s32(0);
654 perm0vA = vec_lvsl(0, pix0);
655 perm1vA = vec_lvsl(0, pix1);
656 perm2vA = vec_lvsl(0, pix2);
657 perm3vA = vec_lvsl(0, pix3);
659 perm0vB = vec_lvsl(0, pix0 + i_stride);
660 perm1vB = vec_lvsl(0, pix1 + i_stride);
661 perm2vB = vec_lvsl(0, pix2 + i_stride);
662 perm3vB = vec_lvsl(0, pix3 + i_stride);
664 for( int y = 0; y < 8; y++ )
666 temp_lv = vec_ld(0, pix0);
667 temp_hv = vec_ld(16, pix0);
668 pix0v = vec_perm(temp_lv, temp_hv, perm0vA);
671 temp_lv = vec_ld(0, pix1);
672 temp_hv = vec_ld(16, pix1);
673 pix1v = vec_perm(temp_lv, temp_hv, perm1vA);
676 fencv = vec_ld(0, fenc);
679 temp_lv = vec_ld(0, pix2);
680 temp_hv = vec_ld(16, pix2);
681 pix2v = vec_perm(temp_lv, temp_hv, perm2vA);
684 temp_lv = vec_ld(0, pix3);
685 temp_hv = vec_ld(16, pix3);
686 pix3v = vec_perm(temp_lv, temp_hv, perm3vA);
689 sum0v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix0v ), vec_min( fencv, pix0v ) ), (vec_u32_t) sum0v );
690 sum1v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix1v ), vec_min( fencv, pix1v ) ), (vec_u32_t) sum1v );
691 sum2v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix2v ), vec_min( fencv, pix2v ) ), (vec_u32_t) sum2v );
692 sum3v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix3v ), vec_min( fencv, pix3v ) ), (vec_u32_t) sum3v );
694 temp_lv = vec_ld(0, pix0);
695 temp_hv = vec_ld(16, pix0);
696 pix0v = vec_perm(temp_lv, temp_hv, perm0vB);
699 temp_lv = vec_ld(0, pix1);
700 temp_hv = vec_ld(16, pix1);
701 pix1v = vec_perm(temp_lv, temp_hv, perm1vB);
704 fencv = vec_ld(0, fenc);
707 temp_lv = vec_ld(0, pix2);
708 temp_hv = vec_ld(16, pix2);
709 pix2v = vec_perm(temp_lv, temp_hv, perm2vB);
712 temp_lv = vec_ld(0, pix3);
713 temp_hv = vec_ld(16, pix3);
714 pix3v = vec_perm(temp_lv, temp_hv, perm3vB);
717 sum0v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix0v ), vec_min( fencv, pix0v ) ), (vec_u32_t) sum0v );
718 sum1v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix1v ), vec_min( fencv, pix1v ) ), (vec_u32_t) sum1v );
719 sum2v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix2v ), vec_min( fencv, pix2v ) ), (vec_u32_t) sum2v );
720 sum3v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix3v ), vec_min( fencv, pix3v ) ), (vec_u32_t) sum3v );
723 sum0v = vec_sums( sum0v, zero_s32v );
724 sum1v = vec_sums( sum1v, zero_s32v );
725 sum2v = vec_sums( sum2v, zero_s32v );
726 sum3v = vec_sums( sum3v, zero_s32v );
728 sum0v = vec_splat( sum0v, 3 );
729 sum1v = vec_splat( sum1v, 3 );
730 sum2v = vec_splat( sum2v, 3 );
731 sum3v = vec_splat( sum3v, 3 );
733 vec_ste( sum0v, 0, &sum0);
734 vec_ste( sum1v, 0, &sum1);
735 vec_ste( sum2v, 0, &sum2);
736 vec_ste( sum3v, 0, &sum3);
744 static void pixel_sad_x3_16x16_altivec( uint8_t *fenc, uint8_t *pix0,
745 uint8_t *pix1, uint8_t *pix2,
746 intptr_t i_stride, int scores[3] )
748 ALIGNED_16( int sum0 );
749 ALIGNED_16( int sum1 );
750 ALIGNED_16( int sum2 );
753 vec_u8_t temp_lv, temp_hv; // temporary load vectors
754 vec_u8_t fencv, pix0v, pix1v, pix2v;
755 vec_u8_t perm0vA, perm1vA, perm2vA, perm0vB, perm1vB, perm2vB;
757 vec_s32_t sum0v, sum1v, sum2v;
759 sum0v = vec_splat_s32(0);
760 sum1v = vec_splat_s32(0);
761 sum2v = vec_splat_s32(0);
763 perm0vA = vec_lvsl(0, pix0);
764 perm1vA = vec_lvsl(0, pix1);
765 perm2vA = vec_lvsl(0, pix2);
767 perm0vB = vec_lvsl(0, pix0 + i_stride);
768 perm1vB = vec_lvsl(0, pix1 + i_stride);
769 perm2vB = vec_lvsl(0, pix2 + i_stride);
771 for( int y = 0; y < 8; y++ )
773 temp_lv = vec_ld(0, pix0);
774 temp_hv = vec_ld(16, pix0);
775 pix0v = vec_perm(temp_lv, temp_hv, perm0vA);
778 temp_lv = vec_ld(0, pix1);
779 temp_hv = vec_ld(16, pix1);
780 pix1v = vec_perm(temp_lv, temp_hv, perm1vA);
783 fencv = vec_ld(0, fenc);
786 temp_lv = vec_ld(0, pix2);
787 temp_hv = vec_ld(16, pix2);
788 pix2v = vec_perm(temp_lv, temp_hv, perm2vA);
791 sum0v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix0v ), vec_min( fencv, pix0v ) ), (vec_u32_t) sum0v );
792 sum1v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix1v ), vec_min( fencv, pix1v ) ), (vec_u32_t) sum1v );
793 sum2v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix2v ), vec_min( fencv, pix2v ) ), (vec_u32_t) sum2v );
795 temp_lv = vec_ld(0, pix0);
796 temp_hv = vec_ld(16, pix0);
797 pix0v = vec_perm(temp_lv, temp_hv, perm0vB);
801 temp_lv = vec_ld(0, pix1);
802 temp_hv = vec_ld(16, pix1);
803 pix1v = vec_perm(temp_lv, temp_hv, perm1vB);
806 fencv = vec_ld(0, fenc);
809 temp_lv = vec_ld(0, pix2);
810 temp_hv = vec_ld(16, pix2);
811 pix2v = vec_perm(temp_lv, temp_hv, perm2vB);
814 sum0v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix0v ), vec_min( fencv, pix0v ) ), (vec_u32_t) sum0v );
815 sum1v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix1v ), vec_min( fencv, pix1v ) ), (vec_u32_t) sum1v );
816 sum2v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix2v ), vec_min( fencv, pix2v ) ), (vec_u32_t) sum2v );
819 sum0v = vec_sums( sum0v, zero_s32v );
820 sum1v = vec_sums( sum1v, zero_s32v );
821 sum2v = vec_sums( sum2v, zero_s32v );
823 sum0v = vec_splat( sum0v, 3 );
824 sum1v = vec_splat( sum1v, 3 );
825 sum2v = vec_splat( sum2v, 3 );
827 vec_ste( sum0v, 0, &sum0);
828 vec_ste( sum1v, 0, &sum1);
829 vec_ste( sum2v, 0, &sum2);
836 static void pixel_sad_x4_16x8_altivec( uint8_t *fenc, uint8_t *pix0, uint8_t *pix1, uint8_t *pix2,
837 uint8_t *pix3, intptr_t i_stride, int scores[4] )
839 ALIGNED_16( int sum0 );
840 ALIGNED_16( int sum1 );
841 ALIGNED_16( int sum2 );
842 ALIGNED_16( int sum3 );
845 vec_u8_t temp_lv, temp_hv;
846 vec_u8_t fencv, pix0v, pix1v, pix2v, pix3v;
847 vec_u8_t perm0vA, perm1vA, perm2vA, perm3vA, perm0vB, perm1vB, perm2vB, perm3vB;
849 vec_s32_t sum0v, sum1v, sum2v, sum3v;
851 sum0v = vec_splat_s32(0);
852 sum1v = vec_splat_s32(0);
853 sum2v = vec_splat_s32(0);
854 sum3v = vec_splat_s32(0);
856 perm0vA = vec_lvsl(0, pix0);
857 perm1vA = vec_lvsl(0, pix1);
858 perm2vA = vec_lvsl(0, pix2);
859 perm3vA = vec_lvsl(0, pix3);
861 perm0vB = vec_lvsl(0, pix0 + i_stride);
862 perm1vB = vec_lvsl(0, pix1 + i_stride);
863 perm2vB = vec_lvsl(0, pix2 + i_stride);
864 perm3vB = vec_lvsl(0, pix3 + i_stride);
866 for( int y = 0; y < 4; y++ )
868 temp_lv = vec_ld(0, pix0);
869 temp_hv = vec_ld(16, pix0);
870 pix0v = vec_perm(temp_lv, temp_hv, perm0vA);
873 temp_lv = vec_ld(0, pix1);
874 temp_hv = vec_ld(16, pix1);
875 pix1v = vec_perm(temp_lv, temp_hv, perm1vA);
878 fencv = vec_ld(0, fenc);
881 temp_lv = vec_ld(0, pix2);
882 temp_hv = vec_ld(16, pix2);
883 pix2v = vec_perm(temp_lv, temp_hv, perm2vA);
886 temp_lv = vec_ld(0, pix3);
887 temp_hv = vec_ld(16, pix3);
888 pix3v = vec_perm(temp_lv, temp_hv, perm3vA);
891 sum0v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix0v ), vec_min( fencv, pix0v ) ), (vec_u32_t) sum0v );
892 sum1v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix1v ), vec_min( fencv, pix1v ) ), (vec_u32_t) sum1v );
893 sum2v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix2v ), vec_min( fencv, pix2v ) ), (vec_u32_t) sum2v );
894 sum3v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix3v ), vec_min( fencv, pix3v ) ), (vec_u32_t) sum3v );
896 temp_lv = vec_ld(0, pix0);
897 temp_hv = vec_ld(16, pix0);
898 pix0v = vec_perm(temp_lv, temp_hv, perm0vB);
901 temp_lv = vec_ld(0, pix1);
902 temp_hv = vec_ld(16, pix1);
903 pix1v = vec_perm(temp_lv, temp_hv, perm1vB);
906 fencv = vec_ld(0, fenc);
909 temp_lv = vec_ld(0, pix2);
910 temp_hv = vec_ld(16, pix2);
911 pix2v = vec_perm(temp_lv, temp_hv, perm2vB);
914 temp_lv = vec_ld(0, pix3);
915 temp_hv = vec_ld(16, pix3);
916 pix3v = vec_perm(temp_lv, temp_hv, perm3vB);
919 sum0v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix0v ), vec_min( fencv, pix0v ) ), (vec_u32_t) sum0v );
920 sum1v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix1v ), vec_min( fencv, pix1v ) ), (vec_u32_t) sum1v );
921 sum2v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix2v ), vec_min( fencv, pix2v ) ), (vec_u32_t) sum2v );
922 sum3v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix3v ), vec_min( fencv, pix3v ) ), (vec_u32_t) sum3v );
925 sum0v = vec_sums( sum0v, zero_s32v );
926 sum1v = vec_sums( sum1v, zero_s32v );
927 sum2v = vec_sums( sum2v, zero_s32v );
928 sum3v = vec_sums( sum3v, zero_s32v );
930 sum0v = vec_splat( sum0v, 3 );
931 sum1v = vec_splat( sum1v, 3 );
932 sum2v = vec_splat( sum2v, 3 );
933 sum3v = vec_splat( sum3v, 3 );
935 vec_ste( sum0v, 0, &sum0);
936 vec_ste( sum1v, 0, &sum1);
937 vec_ste( sum2v, 0, &sum2);
938 vec_ste( sum3v, 0, &sum3);
946 static void pixel_sad_x3_16x8_altivec( uint8_t *fenc, uint8_t *pix0,
947 uint8_t *pix1, uint8_t *pix2,
948 intptr_t i_stride, int scores[3] )
950 ALIGNED_16( int sum0 );
951 ALIGNED_16( int sum1 );
952 ALIGNED_16( int sum2 );
955 vec_u8_t temp_lv, temp_hv;
956 vec_u8_t fencv, pix0v, pix1v, pix2v;
957 vec_u8_t perm0vA, perm1vA, perm2vA, perm0vB, perm1vB, perm2vB;
959 vec_s32_t sum0v, sum1v, sum2v;
961 sum0v = vec_splat_s32(0);
962 sum1v = vec_splat_s32(0);
963 sum2v = vec_splat_s32(0);
965 perm0vA = vec_lvsl(0, pix0);
966 perm1vA = vec_lvsl(0, pix1);
967 perm2vA = vec_lvsl(0, pix2);
969 perm0vB = vec_lvsl(0, pix0 + i_stride);
970 perm1vB = vec_lvsl(0, pix1 + i_stride);
971 perm2vB = vec_lvsl(0, pix2 + i_stride);
973 for( int y = 0; y < 4; y++ )
975 temp_lv = vec_ld(0, pix0);
976 temp_hv = vec_ld(16, pix0);
977 pix0v = vec_perm(temp_lv, temp_hv, perm0vA);
980 temp_lv = vec_ld(0, pix1);
981 temp_hv = vec_ld(16, pix1);
982 pix1v = vec_perm(temp_lv, temp_hv, perm1vA);
985 fencv = vec_ld(0, fenc);
988 temp_lv = vec_ld(0, pix2);
989 temp_hv = vec_ld(16, pix2);
990 pix2v = vec_perm(temp_lv, temp_hv, perm2vA);
993 sum0v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix0v ), vec_min( fencv, pix0v ) ), (vec_u32_t) sum0v );
994 sum1v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix1v ), vec_min( fencv, pix1v ) ), (vec_u32_t) sum1v );
995 sum2v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix2v ), vec_min( fencv, pix2v ) ), (vec_u32_t) sum2v );
997 temp_lv = vec_ld(0, pix0);
998 temp_hv = vec_ld(16, pix0);
999 pix0v = vec_perm(temp_lv, temp_hv, perm0vB);
1002 temp_lv = vec_ld(0, pix1);
1003 temp_hv = vec_ld(16, pix1);
1004 pix1v = vec_perm(temp_lv, temp_hv, perm1vB);
1007 fencv = vec_ld(0, fenc);
1008 fenc += FENC_STRIDE;
1010 temp_lv = vec_ld(0, pix2);
1011 temp_hv = vec_ld(16, pix2);
1012 pix2v = vec_perm(temp_lv, temp_hv, perm2vB);
1015 sum0v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix0v ), vec_min( fencv, pix0v ) ), (vec_u32_t) sum0v );
1016 sum1v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix1v ), vec_min( fencv, pix1v ) ), (vec_u32_t) sum1v );
1017 sum2v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix2v ), vec_min( fencv, pix2v ) ), (vec_u32_t) sum2v );
1020 sum0v = vec_sums( sum0v, zero_s32v );
1021 sum1v = vec_sums( sum1v, zero_s32v );
1022 sum2v = vec_sums( sum2v, zero_s32v );
1024 sum0v = vec_splat( sum0v, 3 );
1025 sum1v = vec_splat( sum1v, 3 );
1026 sum2v = vec_splat( sum2v, 3 );
1028 vec_ste( sum0v, 0, &sum0);
1029 vec_ste( sum1v, 0, &sum1);
1030 vec_ste( sum2v, 0, &sum2);
1038 static void pixel_sad_x4_8x16_altivec( uint8_t *fenc,
1039 uint8_t *pix0, uint8_t *pix1,
1040 uint8_t *pix2, uint8_t *pix3,
1041 intptr_t i_stride, int scores[4] )
1043 ALIGNED_16( int sum0 );
1044 ALIGNED_16( int sum1 );
1045 ALIGNED_16( int sum2 );
1046 ALIGNED_16( int sum3 );
1049 vec_u8_t temp_lv, temp_hv;
1050 vec_u8_t fencv, pix0v, pix1v, pix2v, pix3v;
1051 vec_u8_t perm0vA, perm1vA, perm2vA, perm3vA, perm0vB, perm1vB, perm2vB, perm3vB, permEncv;
1053 vec_s32_t sum0v, sum1v, sum2v, sum3v;
1055 sum0v = vec_splat_s32(0);
1056 sum1v = vec_splat_s32(0);
1057 sum2v = vec_splat_s32(0);
1058 sum3v = vec_splat_s32(0);
1060 permEncv = vec_lvsl(0, fenc);
1061 perm0vA = vec_lvsl(0, pix0);
1062 perm1vA = vec_lvsl(0, pix1);
1063 perm2vA = vec_lvsl(0, pix2);
1064 perm3vA = vec_lvsl(0, pix3);
1066 perm0vB = vec_lvsl(0, pix0 + i_stride);
1067 perm1vB = vec_lvsl(0, pix1 + i_stride);
1068 perm2vB = vec_lvsl(0, pix2 + i_stride);
1069 perm3vB = vec_lvsl(0, pix3 + i_stride);
1071 for( int y = 0; y < 8; y++ )
1073 temp_lv = vec_ld(0, pix0);
1074 temp_hv = vec_ld(16, pix0);
1075 pix0v = vec_perm(temp_lv, temp_hv, perm0vA);
1078 temp_lv = vec_ld(0, pix1);
1079 temp_hv = vec_ld(16, pix1);
1080 pix1v = vec_perm(temp_lv, temp_hv, perm1vA);
1083 temp_lv = vec_ld(0, fenc);
1084 fencv = vec_perm(temp_lv, temp_hv, permEncv);
1085 fenc += FENC_STRIDE;
1087 temp_lv = vec_ld(0, pix2);
1088 temp_hv = vec_ld(16, pix2);
1089 pix2v = vec_perm(temp_lv, temp_hv, perm2vA);
1092 temp_lv = vec_ld(0, pix3);
1093 temp_hv = vec_ld(16, pix3);
1094 pix3v = vec_perm(temp_lv, temp_hv, perm3vA);
1097 sum0v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix0v ), vec_min( fencv, pix0v ) ), (vec_u32_t) sum0v );
1098 sum1v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix1v ), vec_min( fencv, pix1v ) ), (vec_u32_t) sum1v );
1099 sum2v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix2v ), vec_min( fencv, pix2v ) ), (vec_u32_t) sum2v );
1100 sum3v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix3v ), vec_min( fencv, pix3v ) ), (vec_u32_t) sum3v );
1102 temp_lv = vec_ld(0, pix0);
1103 temp_hv = vec_ld(16, pix0);
1104 pix0v = vec_perm(temp_lv, temp_hv, perm0vB);
1107 temp_lv = vec_ld(0, pix1);
1108 temp_hv = vec_ld(16, pix1);
1109 pix1v = vec_perm(temp_lv, temp_hv, perm1vB);
1112 temp_lv = vec_ld(0, fenc);
1113 fencv = vec_perm(temp_lv, temp_hv, permEncv);
1114 fenc += FENC_STRIDE;
1116 temp_lv = vec_ld(0, pix2);
1117 temp_hv = vec_ld(16, pix2);
1118 pix2v = vec_perm(temp_lv, temp_hv, perm2vB);
1121 temp_lv = vec_ld(0, pix3);
1122 temp_hv = vec_ld(16, pix3);
1123 pix3v = vec_perm(temp_lv, temp_hv, perm3vB);
1126 sum0v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix0v ), vec_min( fencv, pix0v ) ), (vec_u32_t) sum0v );
1127 sum1v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix1v ), vec_min( fencv, pix1v ) ), (vec_u32_t) sum1v );
1128 sum2v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix2v ), vec_min( fencv, pix2v ) ), (vec_u32_t) sum2v );
1129 sum3v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix3v ), vec_min( fencv, pix3v ) ), (vec_u32_t) sum3v );
1132 sum0v = vec_sum2s( sum0v, zero_s32v );
1133 sum1v = vec_sum2s( sum1v, zero_s32v );
1134 sum2v = vec_sum2s( sum2v, zero_s32v );
1135 sum3v = vec_sum2s( sum3v, zero_s32v );
1137 sum0v = vec_splat( sum0v, 1 );
1138 sum1v = vec_splat( sum1v, 1 );
1139 sum2v = vec_splat( sum2v, 1 );
1140 sum3v = vec_splat( sum3v, 1 );
1142 vec_ste( sum0v, 0, &sum0);
1143 vec_ste( sum1v, 0, &sum1);
1144 vec_ste( sum2v, 0, &sum2);
1145 vec_ste( sum3v, 0, &sum3);
1153 static void pixel_sad_x3_8x16_altivec( uint8_t *fenc, uint8_t *pix0,
1154 uint8_t *pix1, uint8_t *pix2,
1155 intptr_t i_stride, int scores[3] )
1157 ALIGNED_16( int sum0 );
1158 ALIGNED_16( int sum1 );
1159 ALIGNED_16( int sum2 );
1162 vec_u8_t temp_lv, temp_hv;
1163 vec_u8_t fencv, pix0v, pix1v, pix2v;
1164 vec_u8_t perm0vA, perm1vA, perm2vA, perm0vB, perm1vB, perm2vB,permEncv;
1166 vec_s32_t sum0v, sum1v, sum2v;
1168 sum0v = vec_splat_s32(0);
1169 sum1v = vec_splat_s32(0);
1170 sum2v = vec_splat_s32(0);
1172 permEncv = vec_lvsl(0, fenc);
1173 perm0vA = vec_lvsl(0, pix0);
1174 perm1vA = vec_lvsl(0, pix1);
1175 perm2vA = vec_lvsl(0, pix2);
1177 perm0vB = vec_lvsl(0, pix0 + i_stride);
1178 perm1vB = vec_lvsl(0, pix1 + i_stride);
1179 perm2vB = vec_lvsl(0, pix2 + i_stride);
1181 for( int y = 0; y < 8; y++ )
1183 temp_lv = vec_ld(0, pix0);
1184 temp_hv = vec_ld(16, pix0);
1185 pix0v = vec_perm(temp_lv, temp_hv, perm0vA);
1188 temp_lv = vec_ld(0, pix1);
1189 temp_hv = vec_ld(16, pix1);
1190 pix1v = vec_perm(temp_lv, temp_hv, perm1vA);
1193 temp_lv = vec_ld(0, fenc);
1194 fencv = vec_perm(temp_lv, temp_hv, permEncv);
1195 fenc += FENC_STRIDE;
1197 temp_lv = vec_ld(0, pix2);
1198 temp_hv = vec_ld(16, pix2);
1199 pix2v = vec_perm(temp_lv, temp_hv, perm2vA);
1202 sum0v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix0v ), vec_min( fencv, pix0v ) ), (vec_u32_t) sum0v );
1203 sum1v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix1v ), vec_min( fencv, pix1v ) ), (vec_u32_t) sum1v );
1204 sum2v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix2v ), vec_min( fencv, pix2v ) ), (vec_u32_t) sum2v );
1206 temp_lv = vec_ld(0, pix0);
1207 temp_hv = vec_ld(16, pix0);
1208 pix0v = vec_perm(temp_lv, temp_hv, perm0vB);
1211 temp_lv = vec_ld(0, pix1);
1212 temp_hv = vec_ld(16, pix1);
1213 pix1v = vec_perm(temp_lv, temp_hv, perm1vB);
1216 temp_lv = vec_ld(0, fenc);
1217 fencv = vec_perm(temp_lv, temp_hv, permEncv);
1218 fenc += FENC_STRIDE;
1220 temp_lv = vec_ld(0, pix2);
1221 temp_hv = vec_ld(16, pix2);
1222 pix2v = vec_perm(temp_lv, temp_hv, perm2vB);
1225 sum0v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix0v ), vec_min( fencv, pix0v ) ), (vec_u32_t) sum0v );
1226 sum1v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix1v ), vec_min( fencv, pix1v ) ), (vec_u32_t) sum1v );
1227 sum2v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix2v ), vec_min( fencv, pix2v ) ), (vec_u32_t) sum2v );
1230 sum0v = vec_sum2s( sum0v, zero_s32v );
1231 sum1v = vec_sum2s( sum1v, zero_s32v );
1232 sum2v = vec_sum2s( sum2v, zero_s32v );
1234 sum0v = vec_splat( sum0v, 1 );
1235 sum1v = vec_splat( sum1v, 1 );
1236 sum2v = vec_splat( sum2v, 1 );
1238 vec_ste( sum0v, 0, &sum0);
1239 vec_ste( sum1v, 0, &sum1);
1240 vec_ste( sum2v, 0, &sum2);
1247 static void pixel_sad_x4_8x8_altivec( uint8_t *fenc,
1248 uint8_t *pix0, uint8_t *pix1,
1249 uint8_t *pix2, uint8_t *pix3,
1250 intptr_t i_stride, int scores[4] )
1252 ALIGNED_16( int sum0 );
1253 ALIGNED_16( int sum1 );
1254 ALIGNED_16( int sum2 );
1255 ALIGNED_16( int sum3 );
1258 vec_u8_t temp_lv, temp_hv;
1259 vec_u8_t fencv, pix0v, pix1v, pix2v, pix3v;
1260 vec_u8_t perm0vA, perm1vA, perm2vA, perm3vA, perm0vB, perm1vB, perm2vB, perm3vB, permEncv;
1262 vec_s32_t sum0v, sum1v, sum2v, sum3v;
1264 sum0v = vec_splat_s32(0);
1265 sum1v = vec_splat_s32(0);
1266 sum2v = vec_splat_s32(0);
1267 sum3v = vec_splat_s32(0);
1269 permEncv = vec_lvsl(0, fenc);
1270 perm0vA = vec_lvsl(0, pix0);
1271 perm1vA = vec_lvsl(0, pix1);
1272 perm2vA = vec_lvsl(0, pix2);
1273 perm3vA = vec_lvsl(0, pix3);
1275 perm0vB = vec_lvsl(0, pix0 + i_stride);
1276 perm1vB = vec_lvsl(0, pix1 + i_stride);
1277 perm2vB = vec_lvsl(0, pix2 + i_stride);
1278 perm3vB = vec_lvsl(0, pix3 + i_stride);
1280 for( int y = 0; y < 4; y++ )
1282 temp_lv = vec_ld(0, pix0);
1283 temp_hv = vec_ld(16, pix0);
1284 pix0v = vec_perm(temp_lv, temp_hv, perm0vA);
1287 temp_lv = vec_ld(0, pix1);
1288 temp_hv = vec_ld(16, pix1);
1289 pix1v = vec_perm(temp_lv, temp_hv, perm1vA);
1292 temp_lv = vec_ld(0, fenc);
1293 fencv = vec_perm(temp_lv, temp_hv, permEncv);
1294 fenc += FENC_STRIDE;
1296 temp_lv = vec_ld(0, pix2);
1297 temp_hv = vec_ld(16, pix2);
1298 pix2v = vec_perm(temp_lv, temp_hv, perm2vA);
1301 temp_lv = vec_ld(0, pix3);
1302 temp_hv = vec_ld(16, pix3);
1303 pix3v = vec_perm(temp_lv, temp_hv, perm3vA);
1306 sum0v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix0v ), vec_min( fencv, pix0v ) ), (vec_u32_t) sum0v );
1307 sum1v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix1v ), vec_min( fencv, pix1v ) ), (vec_u32_t) sum1v );
1308 sum2v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix2v ), vec_min( fencv, pix2v ) ), (vec_u32_t) sum2v );
1309 sum3v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix3v ), vec_min( fencv, pix3v ) ), (vec_u32_t) sum3v );
1311 temp_lv = vec_ld(0, pix0);
1312 temp_hv = vec_ld(16, pix0);
1313 pix0v = vec_perm(temp_lv, temp_hv, perm0vB);
1316 temp_lv = vec_ld(0, pix1);
1317 temp_hv = vec_ld(16, pix1);
1318 pix1v = vec_perm(temp_lv, temp_hv, perm1vB);
1321 temp_lv = vec_ld(0, fenc);
1322 fencv = vec_perm(temp_lv, temp_hv, permEncv);
1323 fenc += FENC_STRIDE;
1325 temp_lv = vec_ld(0, pix2);
1326 temp_hv = vec_ld(16, pix2);
1327 pix2v = vec_perm(temp_lv, temp_hv, perm2vB);
1330 temp_lv = vec_ld(0, pix3);
1331 temp_hv = vec_ld(16, pix3);
1332 pix3v = vec_perm(temp_lv, temp_hv, perm3vB);
1335 sum0v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix0v ), vec_min( fencv, pix0v ) ), (vec_u32_t) sum0v );
1336 sum1v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix1v ), vec_min( fencv, pix1v ) ), (vec_u32_t) sum1v );
1337 sum2v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix2v ), vec_min( fencv, pix2v ) ), (vec_u32_t) sum2v );
1338 sum3v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix3v ), vec_min( fencv, pix3v ) ), (vec_u32_t) sum3v );
1341 sum0v = vec_sum2s( sum0v, zero_s32v );
1342 sum1v = vec_sum2s( sum1v, zero_s32v );
1343 sum2v = vec_sum2s( sum2v, zero_s32v );
1344 sum3v = vec_sum2s( sum3v, zero_s32v );
1346 sum0v = vec_splat( sum0v, 1 );
1347 sum1v = vec_splat( sum1v, 1 );
1348 sum2v = vec_splat( sum2v, 1 );
1349 sum3v = vec_splat( sum3v, 1 );
1351 vec_ste( sum0v, 0, &sum0);
1352 vec_ste( sum1v, 0, &sum1);
1353 vec_ste( sum2v, 0, &sum2);
1354 vec_ste( sum3v, 0, &sum3);
1362 static void pixel_sad_x3_8x8_altivec( uint8_t *fenc, uint8_t *pix0,
1363 uint8_t *pix1, uint8_t *pix2,
1364 intptr_t i_stride, int scores[3] )
1366 ALIGNED_16( int sum0 );
1367 ALIGNED_16( int sum1 );
1368 ALIGNED_16( int sum2 );
1371 vec_u8_t temp_lv, temp_hv;
1372 vec_u8_t fencv, pix0v, pix1v, pix2v;
1373 vec_u8_t perm0vA, perm1vA, perm2vA, perm0vB, perm1vB, perm2vB, permEncv;
1375 vec_s32_t sum0v, sum1v, sum2v;
1377 sum0v = vec_splat_s32(0);
1378 sum1v = vec_splat_s32(0);
1379 sum2v = vec_splat_s32(0);
1381 permEncv = vec_lvsl(0, fenc);
1382 perm0vA = vec_lvsl(0, pix0);
1383 perm1vA = vec_lvsl(0, pix1);
1384 perm2vA = vec_lvsl(0, pix2);
1386 perm0vB = vec_lvsl(0, pix0 + i_stride);
1387 perm1vB = vec_lvsl(0, pix1 + i_stride);
1388 perm2vB = vec_lvsl(0, pix2 + i_stride);
1390 for( int y = 0; y < 4; y++ )
1392 temp_lv = vec_ld(0, pix0);
1393 temp_hv = vec_ld(16, pix0);
1394 pix0v = vec_perm(temp_lv, temp_hv, perm0vA);
1397 temp_lv = vec_ld(0, pix1);
1398 temp_hv = vec_ld(16, pix1);
1399 pix1v = vec_perm(temp_lv, temp_hv, perm1vA);
1402 temp_lv = vec_ld(0, fenc);
1403 fencv = vec_perm(temp_lv, temp_hv, permEncv);
1404 fenc += FENC_STRIDE;
1406 temp_lv = vec_ld(0, pix2);
1407 temp_hv = vec_ld(16, pix2);
1408 pix2v = vec_perm(temp_lv, temp_hv, perm2vA);
1411 sum0v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix0v ), vec_min( fencv, pix0v ) ), (vec_u32_t) sum0v );
1412 sum1v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix1v ), vec_min( fencv, pix1v ) ), (vec_u32_t) sum1v );
1413 sum2v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix2v ), vec_min( fencv, pix2v ) ), (vec_u32_t) sum2v );
1415 temp_lv = vec_ld(0, pix0);
1416 temp_hv = vec_ld(16, pix0);
1417 pix0v = vec_perm(temp_lv, temp_hv, perm0vB);
1420 temp_lv = vec_ld(0, pix1);
1421 temp_hv = vec_ld(16, pix1);
1422 pix1v = vec_perm(temp_lv, temp_hv, perm1vB);
1425 temp_lv = vec_ld(0, fenc);
1426 fencv = vec_perm(temp_lv, temp_hv, permEncv);
1427 fenc += FENC_STRIDE;
1429 temp_lv = vec_ld(0, pix2);
1430 temp_hv = vec_ld(16, pix2);
1431 pix2v = vec_perm(temp_lv, temp_hv, perm2vB);
1434 sum0v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix0v ), vec_min( fencv, pix0v ) ), (vec_u32_t) sum0v );
1435 sum1v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix1v ), vec_min( fencv, pix1v ) ), (vec_u32_t) sum1v );
1436 sum2v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix2v ), vec_min( fencv, pix2v ) ), (vec_u32_t) sum2v );
1439 sum0v = vec_sum2s( sum0v, zero_s32v );
1440 sum1v = vec_sum2s( sum1v, zero_s32v );
1441 sum2v = vec_sum2s( sum2v, zero_s32v );
1443 sum0v = vec_splat( sum0v, 1 );
1444 sum1v = vec_splat( sum1v, 1 );
1445 sum2v = vec_splat( sum2v, 1 );
1447 vec_ste( sum0v, 0, &sum0);
1448 vec_ste( sum1v, 0, &sum1);
1449 vec_ste( sum2v, 0, &sum2);
1456 /***********************************************************************
1458 **********************************************************************/
1460 static int pixel_ssd_16x16_altivec ( uint8_t *pix1, intptr_t i_stride_pix1,
1461 uint8_t *pix2, intptr_t i_stride_pix2 )
1463 ALIGNED_16( int sum );
1466 vec_u8_t pix1vA, pix2vA, pix1vB, pix2vB;
1468 vec_u8_t maxA, minA, diffA, maxB, minB, diffB;
1469 vec_u8_t temp_lv, temp_hv;
1470 vec_u8_t permA, permB;
1472 sumv = vec_splat_u32(0);
1474 permA = vec_lvsl(0, pix2);
1475 permB = vec_lvsl(0, pix2 + i_stride_pix2);
1477 temp_lv = vec_ld(0, pix2);
1478 temp_hv = vec_ld(16, pix2);
1479 pix2vA = vec_perm(temp_lv, temp_hv, permA);
1480 pix1vA = vec_ld(0, pix1);
1482 for( int y = 0; y < 7; y++ )
1484 pix1 += i_stride_pix1;
1485 pix2 += i_stride_pix2;
1487 maxA = vec_max(pix1vA, pix2vA);
1488 minA = vec_min(pix1vA, pix2vA);
1490 temp_lv = vec_ld(0, pix2);
1491 temp_hv = vec_ld(16, pix2);
1492 pix2vB = vec_perm(temp_lv, temp_hv, permB);
1493 pix1vB = vec_ld(0, pix1);
1495 diffA = vec_sub(maxA, minA);
1496 sumv = vec_msum(diffA, diffA, sumv);
1498 pix1 += i_stride_pix1;
1499 pix2 += i_stride_pix2;
1501 maxB = vec_max(pix1vB, pix2vB);
1502 minB = vec_min(pix1vB, pix2vB);
1504 temp_lv = vec_ld(0, pix2);
1505 temp_hv = vec_ld(16, pix2);
1506 pix2vA = vec_perm(temp_lv, temp_hv, permA);
1507 pix1vA = vec_ld(0, pix1);
1509 diffB = vec_sub(maxB, minB);
1510 sumv = vec_msum(diffB, diffB, sumv);
1513 pix1 += i_stride_pix1;
1514 pix2 += i_stride_pix2;
1516 temp_lv = vec_ld(0, pix2);
1517 temp_hv = vec_ld(16, pix2);
1518 pix2vB = vec_perm(temp_lv, temp_hv, permB);
1519 pix1vB = vec_ld(0, pix1);
1521 maxA = vec_max(pix1vA, pix2vA);
1522 minA = vec_min(pix1vA, pix2vA);
1524 maxB = vec_max(pix1vB, pix2vB);
1525 minB = vec_min(pix1vB, pix2vB);
1527 diffA = vec_sub(maxA, minA);
1528 sumv = vec_msum(diffA, diffA, sumv);
1530 diffB = vec_sub(maxB, minB);
1531 sumv = vec_msum(diffB, diffB, sumv);
1533 sumv = (vec_u32_t) vec_sums((vec_s32_t) sumv, zero_s32v);
1534 sumv = vec_splat(sumv, 3);
1535 vec_ste((vec_s32_t) sumv, 0, &sum);
1539 static int pixel_ssd_8x8_altivec ( uint8_t *pix1, intptr_t i_stride_pix1,
1540 uint8_t *pix2, intptr_t i_stride_pix2 )
1542 ALIGNED_16( int sum );
1545 vec_u8_t pix1v, pix2v;
1547 vec_u8_t maxv, minv, diffv;
1548 vec_u8_t temp_lv, temp_hv;
1549 vec_u8_t perm1v, perm2v;
1551 const vec_u32_t sel = (vec_u32_t)CV(-1,-1,0,0);
1553 sumv = vec_splat_u32(0);
1555 perm1v = vec_lvsl(0, pix1);
1556 perm2v = vec_lvsl(0, pix2);
1558 for( int y = 0; y < 8; y++ )
1560 temp_hv = vec_ld(0, pix1);
1561 temp_lv = vec_ld(7, pix1);
1562 pix1v = vec_perm(temp_hv, temp_lv, perm1v);
1564 temp_hv = vec_ld(0, pix2);
1565 temp_lv = vec_ld(7, pix2);
1566 pix2v = vec_perm(temp_hv, temp_lv, perm2v);
1568 maxv = vec_max(pix1v, pix2v);
1569 minv = vec_min(pix1v, pix2v);
1571 diffv = vec_sub(maxv, minv);
1572 sumv = vec_msum(diffv, diffv, sumv);
1574 pix1 += i_stride_pix1;
1575 pix2 += i_stride_pix2;
1578 sumv = vec_sel( zero_u32v, sumv, sel );
1580 sumv = (vec_u32_t) vec_sums((vec_s32_t) sumv, zero_s32v);
1581 sumv = vec_splat(sumv, 3);
1582 vec_ste((vec_s32_t) sumv, 0, &sum);
1588 /****************************************************************************
1590 ****************************************************************************/
1591 static uint64_t x264_pixel_var_16x16_altivec( uint8_t *pix, intptr_t i_stride )
1593 ALIGNED_16(uint32_t sum_tab[4]);
1594 ALIGNED_16(uint32_t sqr_tab[4]);
1597 vec_u32_t sqr_v = zero_u32v;
1598 vec_u32_t sum_v = zero_u32v;
1600 for( int y = 0; y < 16; y++ )
1602 vec_u8_t pix0_v = vec_ld(0, pix);
1603 sum_v = vec_sum4s(pix0_v, sum_v);
1604 sqr_v = vec_msum(pix0_v, pix0_v, sqr_v);
1608 sum_v = (vec_u32_t)vec_sums( (vec_s32_t)sum_v, zero_s32v );
1609 sqr_v = (vec_u32_t)vec_sums( (vec_s32_t)sqr_v, zero_s32v );
1610 vec_ste(sum_v, 12, sum_tab);
1611 vec_ste(sqr_v, 12, sqr_tab);
1613 uint32_t sum = sum_tab[3];
1614 uint32_t sqr = sqr_tab[3];
1615 return sum + ((uint64_t)sqr<<32);
1618 static uint64_t x264_pixel_var_8x8_altivec( uint8_t *pix, intptr_t i_stride )
1620 ALIGNED_16(uint32_t sum_tab[4]);
1621 ALIGNED_16(uint32_t sqr_tab[4]);
1624 vec_u32_t sqr_v = zero_u32v;
1625 vec_u32_t sum_v = zero_u32v;
1627 static const vec_u8_t perm_tab[] =
1629 CV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* pix=mod16, i_stride=mod16 */
1630 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17),
1631 CV(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, /* pix=mod8, i_stride=mod16 */
1632 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F),
1634 vec_u8_t perm = perm_tab[ ((uintptr_t)pix & 8) >> 3 ];
1636 for( int y = 0; y < 4; y++ )
1638 vec_u8_t pix0_v = vec_ld(0, pix);
1639 vec_u8_t pix1_v = vec_ld(i_stride, pix);
1640 vec_u8_t pix_v = vec_perm(pix0_v, pix1_v, perm);
1641 sum_v = vec_sum4s(pix_v, sum_v);
1642 sqr_v = vec_msum(pix_v, pix_v, sqr_v);
1646 sum_v = (vec_u32_t)vec_sums( (vec_s32_t)sum_v, zero_s32v );
1647 sqr_v = (vec_u32_t)vec_sums( (vec_s32_t)sqr_v, zero_s32v );
1648 vec_ste(sum_v, 12, sum_tab);
1649 vec_ste(sqr_v, 12, sqr_tab);
1651 uint32_t sum = sum_tab[3];
1652 uint32_t sqr = sqr_tab[3];
1653 return sum + ((uint64_t)sqr<<32);
1657 /**********************************************************************
1658 * SA8D routines: sum of 8x8 Hadamard transformed differences
1659 **********************************************************************/
1660 /* SA8D_1D unrolled by 8 in Altivec */
1661 #define SA8D_1D_ALTIVEC( sa8d0v, sa8d1v, sa8d2v, sa8d3v, \
1662 sa8d4v, sa8d5v, sa8d6v, sa8d7v ) \
1664 /* int a0 = SRC(0) + SRC(4) */ \
1665 vec_s16_t a0v = vec_add(sa8d0v, sa8d4v); \
1666 /* int a4 = SRC(0) - SRC(4) */ \
1667 vec_s16_t a4v = vec_sub(sa8d0v, sa8d4v); \
1668 /* int a1 = SRC(1) + SRC(5) */ \
1669 vec_s16_t a1v = vec_add(sa8d1v, sa8d5v); \
1670 /* int a5 = SRC(1) - SRC(5) */ \
1671 vec_s16_t a5v = vec_sub(sa8d1v, sa8d5v); \
1672 /* int a2 = SRC(2) + SRC(6) */ \
1673 vec_s16_t a2v = vec_add(sa8d2v, sa8d6v); \
1674 /* int a6 = SRC(2) - SRC(6) */ \
1675 vec_s16_t a6v = vec_sub(sa8d2v, sa8d6v); \
1676 /* int a3 = SRC(3) + SRC(7) */ \
1677 vec_s16_t a3v = vec_add(sa8d3v, sa8d7v); \
1678 /* int a7 = SRC(3) - SRC(7) */ \
1679 vec_s16_t a7v = vec_sub(sa8d3v, sa8d7v); \
1681 /* int b0 = a0 + a2 */ \
1682 vec_s16_t b0v = vec_add(a0v, a2v); \
1683 /* int b2 = a0 - a2; */ \
1684 vec_s16_t b2v = vec_sub(a0v, a2v); \
1685 /* int b1 = a1 + a3; */ \
1686 vec_s16_t b1v = vec_add(a1v, a3v); \
1687 /* int b3 = a1 - a3; */ \
1688 vec_s16_t b3v = vec_sub(a1v, a3v); \
1689 /* int b4 = a4 + a6; */ \
1690 vec_s16_t b4v = vec_add(a4v, a6v); \
1691 /* int b6 = a4 - a6; */ \
1692 vec_s16_t b6v = vec_sub(a4v, a6v); \
1693 /* int b5 = a5 + a7; */ \
1694 vec_s16_t b5v = vec_add(a5v, a7v); \
1695 /* int b7 = a5 - a7; */ \
1696 vec_s16_t b7v = vec_sub(a5v, a7v); \
1698 /* DST(0, b0 + b1) */ \
1699 sa8d0v = vec_add(b0v, b1v); \
1700 /* DST(1, b0 - b1) */ \
1701 sa8d1v = vec_sub(b0v, b1v); \
1702 /* DST(2, b2 + b3) */ \
1703 sa8d2v = vec_add(b2v, b3v); \
1704 /* DST(3, b2 - b3) */ \
1705 sa8d3v = vec_sub(b2v, b3v); \
1706 /* DST(4, b4 + b5) */ \
1707 sa8d4v = vec_add(b4v, b5v); \
1708 /* DST(5, b4 - b5) */ \
1709 sa8d5v = vec_sub(b4v, b5v); \
1710 /* DST(6, b6 + b7) */ \
1711 sa8d6v = vec_add(b6v, b7v); \
1712 /* DST(7, b6 - b7) */ \
1713 sa8d7v = vec_sub(b6v, b7v); \
1716 static int pixel_sa8d_8x8_core_altivec( uint8_t *pix1, intptr_t i_pix1,
1717 uint8_t *pix2, intptr_t i_pix2 )
1722 PREP_LOAD_SRC( pix1 );
1723 PREP_LOAD_SRC( pix2 );
1725 vec_s16_t diff0v, diff1v, diff2v, diff3v, diff4v, diff5v, diff6v, diff7v;
1727 VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 8, diff0v, pix2 );
1728 VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 8, diff1v, pix2 );
1729 VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 8, diff2v, pix2 );
1730 VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 8, diff3v, pix2 );
1732 VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 8, diff4v, pix2 );
1733 VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 8, diff5v, pix2 );
1734 VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 8, diff6v, pix2 );
1735 VEC_DIFF_H( pix1, i_pix1, pix2, i_pix2, 8, diff7v, pix2 );
1737 vec_s16_t sa8d0v, sa8d1v, sa8d2v, sa8d3v, sa8d4v, sa8d5v, sa8d6v, sa8d7v;
1739 SA8D_1D_ALTIVEC(diff0v, diff1v, diff2v, diff3v,
1740 diff4v, diff5v, diff6v, diff7v);
1742 VEC_TRANSPOSE_8(diff0v, diff1v, diff2v, diff3v,
1743 diff4v, diff5v, diff6v, diff7v,
1744 sa8d0v, sa8d1v, sa8d2v, sa8d3v,
1745 sa8d4v, sa8d5v, sa8d6v, sa8d7v );
1747 SA8D_1D_ALTIVEC(sa8d0v, sa8d1v, sa8d2v, sa8d3v,
1748 sa8d4v, sa8d5v, sa8d6v, sa8d7v );
1750 /* accumulation of the absolute value of all elements of the resulting bloc */
1751 vec_s16_t abs0v = VEC_ABS(sa8d0v);
1752 vec_s16_t abs1v = VEC_ABS(sa8d1v);
1753 vec_s16_t sum01v = vec_add(abs0v, abs1v);
1755 vec_s16_t abs2v = VEC_ABS(sa8d2v);
1756 vec_s16_t abs3v = VEC_ABS(sa8d3v);
1757 vec_s16_t sum23v = vec_add(abs2v, abs3v);
1759 vec_s16_t abs4v = VEC_ABS(sa8d4v);
1760 vec_s16_t abs5v = VEC_ABS(sa8d5v);
1761 vec_s16_t sum45v = vec_add(abs4v, abs5v);
1763 vec_s16_t abs6v = VEC_ABS(sa8d6v);
1764 vec_s16_t abs7v = VEC_ABS(sa8d7v);
1765 vec_s16_t sum67v = vec_add(abs6v, abs7v);
1767 vec_s16_t sum0123v = vec_add(sum01v, sum23v);
1768 vec_s16_t sum4567v = vec_add(sum45v, sum67v);
1772 sumblocv = vec_sum4s(sum0123v, (vec_s32_t)zerov );
1773 sumblocv = vec_sum4s(sum4567v, sumblocv );
1775 sumblocv = vec_sums(sumblocv, (vec_s32_t)zerov );
1777 sumblocv = vec_splat(sumblocv, 3);
1779 vec_ste(sumblocv, 0, &i_satd);
1784 static int pixel_sa8d_8x8_altivec( uint8_t *pix1, intptr_t i_pix1,
1785 uint8_t *pix2, intptr_t i_pix2 )
1788 i_satd = (pixel_sa8d_8x8_core_altivec( pix1, i_pix1, pix2, i_pix2 )+2)>>2;
1792 static int pixel_sa8d_16x16_altivec( uint8_t *pix1, intptr_t i_pix1,
1793 uint8_t *pix2, intptr_t i_pix2 )
1797 i_satd = (pixel_sa8d_8x8_core_altivec( &pix1[0], i_pix1, &pix2[0], i_pix2 )
1798 + pixel_sa8d_8x8_core_altivec( &pix1[8], i_pix1, &pix2[8], i_pix2 )
1799 + pixel_sa8d_8x8_core_altivec( &pix1[8*i_pix1], i_pix1, &pix2[8*i_pix2], i_pix2 )
1800 + pixel_sa8d_8x8_core_altivec( &pix1[8*i_pix1+8], i_pix1, &pix2[8*i_pix2+8], i_pix2 ) +2)>>2;
1804 #define HADAMARD4_ALTIVEC(d0,d1,d2,d3,s0,s1,s2,s3) {\
1805 vec_s16_t t0 = vec_add(s0, s1); \
1806 vec_s16_t t1 = vec_sub(s0, s1); \
1807 vec_s16_t t2 = vec_add(s2, s3); \
1808 vec_s16_t t3 = vec_sub(s2, s3); \
1809 d0 = vec_add(t0, t2); \
1810 d2 = vec_sub(t0, t2); \
1811 d1 = vec_add(t1, t3); \
1812 d3 = vec_sub(t1, t3); \
1815 #define VEC_LOAD_HIGH( p, num ) \
1816 vec_u8_t pix8_##num = vec_ld( stride*num, p ); \
1817 vec_s16_t pix16_s##num = (vec_s16_t)vec_perm(pix8_##num, zero_u8v, perm); \
1818 vec_s16_t pix16_d##num;
1820 static uint64_t pixel_hadamard_ac_altivec( uint8_t *pix, intptr_t stride, const vec_u8_t perm )
1822 ALIGNED_16( int32_t sum4_tab[4] );
1823 ALIGNED_16( int32_t sum8_tab[4] );
1826 VEC_LOAD_HIGH( pix, 0 );
1827 VEC_LOAD_HIGH( pix, 1 );
1828 VEC_LOAD_HIGH( pix, 2 );
1829 VEC_LOAD_HIGH( pix, 3 );
1830 HADAMARD4_ALTIVEC(pix16_d0,pix16_d1,pix16_d2,pix16_d3,
1831 pix16_s0,pix16_s1,pix16_s2,pix16_s3);
1833 VEC_LOAD_HIGH( pix, 4 );
1834 VEC_LOAD_HIGH( pix, 5 );
1835 VEC_LOAD_HIGH( pix, 6 );
1836 VEC_LOAD_HIGH( pix, 7 );
1837 HADAMARD4_ALTIVEC(pix16_d4,pix16_d5,pix16_d6,pix16_d7,
1838 pix16_s4,pix16_s5,pix16_s6,pix16_s7);
1840 VEC_TRANSPOSE_8(pix16_d0, pix16_d1, pix16_d2, pix16_d3,
1841 pix16_d4, pix16_d5, pix16_d6, pix16_d7,
1842 pix16_s0, pix16_s1, pix16_s2, pix16_s3,
1843 pix16_s4, pix16_s5, pix16_s6, pix16_s7);
1845 HADAMARD4_ALTIVEC(pix16_d0,pix16_d1,pix16_d2,pix16_d3,
1846 pix16_s0,pix16_s1,pix16_s2,pix16_s3);
1848 HADAMARD4_ALTIVEC(pix16_d4,pix16_d5,pix16_d6,pix16_d7,
1849 pix16_s4,pix16_s5,pix16_s6,pix16_s7);
1851 vec_u16_t addabs01 = vec_add( VEC_ABSOLUTE(pix16_d0), VEC_ABSOLUTE(pix16_d1) );
1852 vec_u16_t addabs23 = vec_add( VEC_ABSOLUTE(pix16_d2), VEC_ABSOLUTE(pix16_d3) );
1853 vec_u16_t addabs45 = vec_add( VEC_ABSOLUTE(pix16_d4), VEC_ABSOLUTE(pix16_d5) );
1854 vec_u16_t addabs67 = vec_add( VEC_ABSOLUTE(pix16_d6), VEC_ABSOLUTE(pix16_d7) );
1856 vec_u16_t sum4_v = vec_add(vec_add(addabs01, addabs23), vec_add(addabs45, addabs67));
1857 vec_ste(vec_sums(vec_sum4s((vec_s16_t)sum4_v, zero_s32v), zero_s32v), 12, sum4_tab);
1859 vec_s16_t tmpi0 = vec_add(pix16_d0, pix16_d4);
1860 vec_s16_t tmpi4 = vec_sub(pix16_d0, pix16_d4);
1861 vec_s16_t tmpi1 = vec_add(pix16_d1, pix16_d5);
1862 vec_s16_t tmpi5 = vec_sub(pix16_d1, pix16_d5);
1863 vec_s16_t tmpi2 = vec_add(pix16_d2, pix16_d6);
1864 vec_s16_t tmpi6 = vec_sub(pix16_d2, pix16_d6);
1865 vec_s16_t tmpi3 = vec_add(pix16_d3, pix16_d7);
1866 vec_s16_t tmpi7 = vec_sub(pix16_d3, pix16_d7);
1868 int sum4 = sum4_tab[3];
1870 VEC_TRANSPOSE_8(tmpi0, tmpi1, tmpi2, tmpi3,
1871 tmpi4, tmpi5, tmpi6, tmpi7,
1872 pix16_d0, pix16_d1, pix16_d2, pix16_d3,
1873 pix16_d4, pix16_d5, pix16_d6, pix16_d7);
1875 vec_u16_t addsum04 = vec_add( VEC_ABSOLUTE( vec_add(pix16_d0, pix16_d4) ),
1876 VEC_ABSOLUTE( vec_sub(pix16_d0, pix16_d4) ) );
1877 vec_u16_t addsum15 = vec_add( VEC_ABSOLUTE( vec_add(pix16_d1, pix16_d5) ),
1878 VEC_ABSOLUTE( vec_sub(pix16_d1, pix16_d5) ) );
1879 vec_u16_t addsum26 = vec_add( VEC_ABSOLUTE( vec_add(pix16_d2, pix16_d6) ),
1880 VEC_ABSOLUTE( vec_sub(pix16_d2, pix16_d6) ) );
1881 vec_u16_t addsum37 = vec_add( VEC_ABSOLUTE( vec_add(pix16_d3, pix16_d7) ),
1882 VEC_ABSOLUTE( vec_sub(pix16_d3, pix16_d7) ) );
1884 vec_u16_t sum8_v = vec_add( vec_add(addsum04, addsum15), vec_add(addsum26, addsum37) );
1885 vec_ste(vec_sums(vec_sum4s((vec_s16_t)sum8_v, zero_s32v), zero_s32v), 12, sum8_tab);
1887 int sum8 = sum8_tab[3];
1889 ALIGNED_16( int16_t tmp0_4_tab[8] );
1890 vec_ste(vec_add(pix16_d0, pix16_d4), 0, tmp0_4_tab);
1892 sum4 -= tmp0_4_tab[0];
1893 sum8 -= tmp0_4_tab[0];
1894 return ((uint64_t)sum8<<32) + sum4;
1898 static const vec_u8_t hadamard_permtab[] =
1900 CV(0x10,0x00,0x11,0x01, 0x12,0x02,0x13,0x03, /* pix = mod16 */
1901 0x14,0x04,0x15,0x05, 0x16,0x06,0x17,0x07 ),
1902 CV(0x18,0x08,0x19,0x09, 0x1A,0x0A,0x1B,0x0B, /* pix = mod8 */
1903 0x1C,0x0C,0x1D,0x0D, 0x1E,0x0E,0x1F,0x0F )
1906 static uint64_t x264_pixel_hadamard_ac_16x16_altivec( uint8_t *pix, intptr_t stride )
1908 int idx = ((uintptr_t)pix & 8) >> 3;
1909 vec_u8_t permh = hadamard_permtab[idx];
1910 vec_u8_t perml = hadamard_permtab[!idx];
1911 uint64_t sum = pixel_hadamard_ac_altivec( pix, stride, permh );
1912 sum += pixel_hadamard_ac_altivec( pix+8, stride, perml );
1913 sum += pixel_hadamard_ac_altivec( pix+8*stride, stride, permh );
1914 sum += pixel_hadamard_ac_altivec( pix+8*stride+8, stride, perml );
1915 return ((sum>>34)<<32) + ((uint32_t)sum>>1);
1918 static uint64_t x264_pixel_hadamard_ac_16x8_altivec( uint8_t *pix, intptr_t stride )
1920 int idx = ((uintptr_t)pix & 8) >> 3;
1921 vec_u8_t permh = hadamard_permtab[idx];
1922 vec_u8_t perml = hadamard_permtab[!idx];
1923 uint64_t sum = pixel_hadamard_ac_altivec( pix, stride, permh );
1924 sum += pixel_hadamard_ac_altivec( pix+8, stride, perml );
1925 return ((sum>>34)<<32) + ((uint32_t)sum>>1);
1928 static uint64_t x264_pixel_hadamard_ac_8x16_altivec( uint8_t *pix, intptr_t stride )
1930 vec_u8_t perm = hadamard_permtab[ (((uintptr_t)pix & 8) >> 3) ];
1931 uint64_t sum = pixel_hadamard_ac_altivec( pix, stride, perm );
1932 sum += pixel_hadamard_ac_altivec( pix+8*stride, stride, perm );
1933 return ((sum>>34)<<32) + ((uint32_t)sum>>1);
1936 static uint64_t x264_pixel_hadamard_ac_8x8_altivec( uint8_t *pix, intptr_t stride )
1938 vec_u8_t perm = hadamard_permtab[ (((uintptr_t)pix & 8) >> 3) ];
1939 uint64_t sum = pixel_hadamard_ac_altivec( pix, stride, perm );
1940 return ((sum>>34)<<32) + ((uint32_t)sum>>1);
1944 /****************************************************************************
1945 * structural similarity metric
1946 ****************************************************************************/
1947 static void ssim_4x4x2_core_altivec( const uint8_t *pix1, intptr_t stride1,
1948 const uint8_t *pix2, intptr_t stride2,
1951 ALIGNED_16( int temp[4] );
1953 vec_u8_t pix1v, pix2v;
1954 vec_u32_t s1v, s2v, ssv, s12v;
1956 PREP_LOAD_SRC (pix1);
1957 PREP_LOAD_SRC (pix2);
1960 s1v = s2v = ssv = s12v = zero_u32v;
1962 for( int y = 0; y < 4; y++ )
1964 VEC_LOAD( &pix1[y*stride1], pix1v, 16, vec_u8_t, pix1 );
1965 VEC_LOAD( &pix2[y*stride2], pix2v, 16, vec_u8_t, pix2 );
1967 s1v = vec_sum4s( pix1v, s1v );
1968 s2v = vec_sum4s( pix2v, s2v );
1969 ssv = vec_msum( pix1v, pix1v, ssv );
1970 ssv = vec_msum( pix2v, pix2v, ssv );
1971 s12v = vec_msum( pix1v, pix2v, s12v );
1974 vec_st( (vec_s32_t)s1v, 0, temp );
1975 sums[0][0] = temp[0];
1976 sums[1][0] = temp[1];
1977 vec_st( (vec_s32_t)s2v, 0, temp );
1978 sums[0][1] = temp[0];
1979 sums[1][1] = temp[1];
1980 vec_st( (vec_s32_t)ssv, 0, temp );
1981 sums[0][2] = temp[0];
1982 sums[1][2] = temp[1];
1983 vec_st( (vec_s32_t)s12v, 0, temp );
1984 sums[0][3] = temp[0];
1985 sums[1][3] = temp[1];
1988 #define SATD_X( size ) \
1989 static void pixel_satd_x3_##size##_altivec( uint8_t *fenc, uint8_t *pix0, uint8_t *pix1, uint8_t *pix2,\
1990 intptr_t i_stride, int scores[3] )\
1992 scores[0] = pixel_satd_##size##_altivec( fenc, FENC_STRIDE, pix0, i_stride );\
1993 scores[1] = pixel_satd_##size##_altivec( fenc, FENC_STRIDE, pix1, i_stride );\
1994 scores[2] = pixel_satd_##size##_altivec( fenc, FENC_STRIDE, pix2, i_stride );\
1996 static void pixel_satd_x4_##size##_altivec( uint8_t *fenc, uint8_t *pix0, uint8_t *pix1, uint8_t *pix2,\
1997 uint8_t *pix3, intptr_t i_stride, int scores[4] )\
1999 scores[0] = pixel_satd_##size##_altivec( fenc, FENC_STRIDE, pix0, i_stride );\
2000 scores[1] = pixel_satd_##size##_altivec( fenc, FENC_STRIDE, pix1, i_stride );\
2001 scores[2] = pixel_satd_##size##_altivec( fenc, FENC_STRIDE, pix2, i_stride );\
2002 scores[3] = pixel_satd_##size##_altivec( fenc, FENC_STRIDE, pix3, i_stride );\
2013 #define INTRA_MBCMP_8x8( mbcmp )\
2014 void intra_##mbcmp##_x3_8x8_altivec( uint8_t *fenc, uint8_t edge[36], int res[3] )\
2016 ALIGNED_8( uint8_t pix[8*FDEC_STRIDE] );\
2017 x264_predict_8x8_v_c( pix, edge );\
2018 res[0] = pixel_##mbcmp##_8x8_altivec( pix, FDEC_STRIDE, fenc, FENC_STRIDE );\
2019 x264_predict_8x8_h_c( pix, edge );\
2020 res[1] = pixel_##mbcmp##_8x8_altivec( pix, FDEC_STRIDE, fenc, FENC_STRIDE );\
2021 x264_predict_8x8_dc_c( pix, edge );\
2022 res[2] = pixel_##mbcmp##_8x8_altivec( pix, FDEC_STRIDE, fenc, FENC_STRIDE );\
2025 INTRA_MBCMP_8x8(sad)
2026 INTRA_MBCMP_8x8(sa8d)
2028 #define INTRA_MBCMP( mbcmp, size, pred1, pred2, pred3, chroma )\
2029 void intra_##mbcmp##_x3_##size##x##size##chroma##_altivec( uint8_t *fenc, uint8_t *fdec, int res[3] )\
2031 x264_predict_##size##x##size##chroma##_##pred1##_c( fdec );\
2032 res[0] = pixel_##mbcmp##_##size##x##size##_altivec( fdec, FDEC_STRIDE, fenc, FENC_STRIDE );\
2033 x264_predict_##size##x##size##chroma##_##pred2##_c( fdec );\
2034 res[1] = pixel_##mbcmp##_##size##x##size##_altivec( fdec, FDEC_STRIDE, fenc, FENC_STRIDE );\
2035 x264_predict_##size##x##size##chroma##_##pred3##_c( fdec );\
2036 res[2] = pixel_##mbcmp##_##size##x##size##_altivec( fdec, FDEC_STRIDE, fenc, FENC_STRIDE );\
2039 INTRA_MBCMP(satd, 4, v, h, dc, )
2040 INTRA_MBCMP(sad, 8, dc, h, v, c )
2041 INTRA_MBCMP(satd, 8, dc, h, v, c )
2042 INTRA_MBCMP(sad, 16, v, h, dc, )
2043 INTRA_MBCMP(satd, 16, v, h, dc, )
2044 #endif // !HIGH_BIT_DEPTH
2046 /****************************************************************************
2048 ****************************************************************************/
2049 void x264_pixel_altivec_init( x264_pixel_function_t *pixf )
2052 pixf->sad[PIXEL_16x16] = pixel_sad_16x16_altivec;
2053 pixf->sad[PIXEL_8x16] = pixel_sad_8x16_altivec;
2054 pixf->sad[PIXEL_16x8] = pixel_sad_16x8_altivec;
2055 pixf->sad[PIXEL_8x8] = pixel_sad_8x8_altivec;
2057 pixf->sad_x3[PIXEL_16x16] = pixel_sad_x3_16x16_altivec;
2058 pixf->sad_x3[PIXEL_8x16] = pixel_sad_x3_8x16_altivec;
2059 pixf->sad_x3[PIXEL_16x8] = pixel_sad_x3_16x8_altivec;
2060 pixf->sad_x3[PIXEL_8x8] = pixel_sad_x3_8x8_altivec;
2062 pixf->sad_x4[PIXEL_16x16] = pixel_sad_x4_16x16_altivec;
2063 pixf->sad_x4[PIXEL_8x16] = pixel_sad_x4_8x16_altivec;
2064 pixf->sad_x4[PIXEL_16x8] = pixel_sad_x4_16x8_altivec;
2065 pixf->sad_x4[PIXEL_8x8] = pixel_sad_x4_8x8_altivec;
2067 pixf->satd[PIXEL_16x16] = pixel_satd_16x16_altivec;
2068 pixf->satd[PIXEL_8x16] = pixel_satd_8x16_altivec;
2069 pixf->satd[PIXEL_16x8] = pixel_satd_16x8_altivec;
2070 pixf->satd[PIXEL_8x8] = pixel_satd_8x8_altivec;
2071 pixf->satd[PIXEL_8x4] = pixel_satd_8x4_altivec;
2072 pixf->satd[PIXEL_4x8] = pixel_satd_4x8_altivec;
2073 pixf->satd[PIXEL_4x4] = pixel_satd_4x4_altivec;
2075 pixf->satd_x3[PIXEL_16x16] = pixel_satd_x3_16x16_altivec;
2076 pixf->satd_x3[PIXEL_8x16] = pixel_satd_x3_8x16_altivec;
2077 pixf->satd_x3[PIXEL_16x8] = pixel_satd_x3_16x8_altivec;
2078 pixf->satd_x3[PIXEL_8x8] = pixel_satd_x3_8x8_altivec;
2079 pixf->satd_x3[PIXEL_8x4] = pixel_satd_x3_8x4_altivec;
2080 pixf->satd_x3[PIXEL_4x8] = pixel_satd_x3_4x8_altivec;
2081 pixf->satd_x3[PIXEL_4x4] = pixel_satd_x3_4x4_altivec;
2083 pixf->satd_x4[PIXEL_16x16] = pixel_satd_x4_16x16_altivec;
2084 pixf->satd_x4[PIXEL_8x16] = pixel_satd_x4_8x16_altivec;
2085 pixf->satd_x4[PIXEL_16x8] = pixel_satd_x4_16x8_altivec;
2086 pixf->satd_x4[PIXEL_8x8] = pixel_satd_x4_8x8_altivec;
2087 pixf->satd_x4[PIXEL_8x4] = pixel_satd_x4_8x4_altivec;
2088 pixf->satd_x4[PIXEL_4x8] = pixel_satd_x4_4x8_altivec;
2089 pixf->satd_x4[PIXEL_4x4] = pixel_satd_x4_4x4_altivec;
2091 pixf->intra_sad_x3_8x8 = intra_sad_x3_8x8_altivec;
2092 pixf->intra_sad_x3_8x8c = intra_sad_x3_8x8c_altivec;
2093 pixf->intra_sad_x3_16x16 = intra_sad_x3_16x16_altivec;
2095 pixf->intra_satd_x3_4x4 = intra_satd_x3_4x4_altivec;
2096 pixf->intra_satd_x3_8x8c = intra_satd_x3_8x8c_altivec;
2097 pixf->intra_satd_x3_16x16 = intra_satd_x3_16x16_altivec;
2099 pixf->ssd[PIXEL_16x16] = pixel_ssd_16x16_altivec;
2100 pixf->ssd[PIXEL_8x8] = pixel_ssd_8x8_altivec;
2102 pixf->sa8d[PIXEL_16x16] = pixel_sa8d_16x16_altivec;
2103 pixf->sa8d[PIXEL_8x8] = pixel_sa8d_8x8_altivec;
2105 pixf->intra_sa8d_x3_8x8 = intra_sa8d_x3_8x8_altivec;
2107 pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_altivec;
2108 pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_altivec;
2110 pixf->hadamard_ac[PIXEL_16x16] = x264_pixel_hadamard_ac_16x16_altivec;
2111 pixf->hadamard_ac[PIXEL_16x8] = x264_pixel_hadamard_ac_16x8_altivec;
2112 pixf->hadamard_ac[PIXEL_8x16] = x264_pixel_hadamard_ac_8x16_altivec;
2113 pixf->hadamard_ac[PIXEL_8x8] = x264_pixel_hadamard_ac_8x8_altivec;
2115 pixf->ssim_4x4x2_core = ssim_4x4x2_core_altivec;
2116 #endif // !HIGH_BIT_DEPTH