]> git.sesse.net Git - x264/blob - common/ppc/mc.c
Massive cosmetic and syntax cleanup
[x264] / common / ppc / mc.c
1 /*****************************************************************************
2  * mc.c: h264 encoder library (Motion Compensation)
3  *****************************************************************************
4  * Copyright (C) 2003-2008 x264 project
5  *
6  * Authors: Eric Petit <eric.petit@lapsus.org>
7  *          Guillaume Poirier <gpoirier@mplayerhq.hu>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
22  *****************************************************************************/
23
24 #include <stdlib.h>
25 #include <stdio.h>
26 #include <string.h>
27 #include <stdint.h>
28 #include <stdarg.h>
29
30 #include "x264.h"
31 #include "common/common.h"
32 #include "common/mc.h"
33 #include "mc.h"
34 #include "ppccommon.h"
35
36 typedef void (*pf_mc_t)( uint8_t *src, int i_src,
37                          uint8_t *dst, int i_dst, int i_height );
38
39
40 static const int hpel_ref0[16] = {0,1,1,1,0,1,1,1,2,3,3,3,0,1,1,1};
41 static const int hpel_ref1[16] = {0,0,0,0,2,2,3,2,2,2,3,2,2,2,3,2};
42
43
44 static inline int x264_tapfilter( uint8_t *pix, int i_pix_next )
45 {
46     return pix[-2*i_pix_next] - 5*pix[-1*i_pix_next] + 20*(pix[0] +
47            pix[1*i_pix_next]) - 5*pix[ 2*i_pix_next] +
48            pix[ 3*i_pix_next];
49 }
50 static inline int x264_tapfilter1( uint8_t *pix )
51 {
52     return pix[-2] - 5*pix[-1] + 20*(pix[0] + pix[1]) - 5*pix[ 2] +
53            pix[ 3];
54 }
55
56
57 static inline void x264_pixel_avg2_w4_altivec( uint8_t *dst,  int i_dst,
58                                                uint8_t *src1, int i_src1,
59                                                uint8_t *src2, int i_height )
60 {
61     for( int y = 0; y < i_height; y++ )
62     {
63         for( int x = 0; x < 4; x++ )
64             dst[x] = ( src1[x] + src2[x] + 1 ) >> 1;
65         dst  += i_dst;
66         src1 += i_src1;
67         src2 += i_src1;
68     }
69 }
70
71 static inline void x264_pixel_avg2_w8_altivec( uint8_t *dst,  int i_dst,
72                                                uint8_t *src1, int i_src1,
73                                                uint8_t *src2, int i_height )
74 {
75     vec_u8_t src1v, src2v;
76     PREP_LOAD;
77     PREP_STORE8;
78     PREP_LOAD_SRC( src1 );
79     PREP_LOAD_SRC( src2 );
80
81     for( int y = 0; y < i_height; y++ )
82     {
83         VEC_LOAD( src1, src1v, 8, vec_u8_t, src1 );
84         VEC_LOAD( src2, src2v, 8, vec_u8_t, src2 );
85         src1v = vec_avg( src1v, src2v );
86         VEC_STORE8( src1v, dst );
87
88         dst  += i_dst;
89         src1 += i_src1;
90         src2 += i_src1;
91     }
92 }
93
94 static inline void x264_pixel_avg2_w16_altivec( uint8_t *dst,  int i_dst,
95                                                 uint8_t *src1, int i_src1,
96                                                 uint8_t *src2, int i_height )
97 {
98     vec_u8_t src1v, src2v;
99     PREP_LOAD;
100     PREP_LOAD_SRC( src1 );
101     PREP_LOAD_SRC( src2 );
102
103     for( int y = 0; y < i_height; y++ )
104     {
105         VEC_LOAD( src1, src1v, 16, vec_u8_t, src1 );
106         VEC_LOAD( src2, src2v, 16, vec_u8_t, src2 );
107         src1v = vec_avg( src1v, src2v );
108         vec_st(src1v, 0, dst);
109
110         dst  += i_dst;
111         src1 += i_src1;
112         src2 += i_src1;
113     }
114 }
115
116 static inline void x264_pixel_avg2_w20_altivec( uint8_t *dst,  int i_dst,
117                                                 uint8_t *src1, int i_src1,
118                                                 uint8_t *src2, int i_height )
119 {
120     x264_pixel_avg2_w16_altivec(dst, i_dst, src1, i_src1, src2, i_height);
121     x264_pixel_avg2_w4_altivec(dst+16, i_dst, src1+16, i_src1, src2+16, i_height);
122 }
123
124 /* mc_copy: plain c */
125
126 #define MC_COPY( name, a )                                \
127 static void name( uint8_t *dst, int i_dst,                \
128                   uint8_t *src, int i_src, int i_height ) \
129 {                                                         \
130     int y;                                                \
131     for( y = 0; y < i_height; y++ )                       \
132     {                                                     \
133         memcpy( dst, src, a );                            \
134         src += i_src;                                     \
135         dst += i_dst;                                     \
136     }                                                     \
137 }
138 MC_COPY( x264_mc_copy_w4_altivec,  4  )
139 MC_COPY( x264_mc_copy_w8_altivec,  8  )
140
141 static void x264_mc_copy_w16_altivec( uint8_t *dst, int i_dst,
142                                       uint8_t *src, int i_src, int i_height )
143 {
144     vec_u8_t cpyV;
145     PREP_LOAD;
146     PREP_LOAD_SRC( src );
147
148     for( int y = 0; y < i_height; y++)
149     {
150         VEC_LOAD( src, cpyV, 16, vec_u8_t, src );
151         vec_st(cpyV, 0, dst);
152
153         src += i_src;
154         dst += i_dst;
155     }
156 }
157
158
159 static void x264_mc_copy_w16_aligned_altivec( uint8_t *dst, int i_dst,
160                                               uint8_t *src, int i_src, int i_height )
161 {
162     for( int y = 0; y < i_height; ++y)
163     {
164         vec_u8_t cpyV = vec_ld( 0, src);
165         vec_st(cpyV, 0, dst);
166
167         src += i_src;
168         dst += i_dst;
169     }
170 }
171
172
173 static void mc_luma_altivec( uint8_t *dst,    int i_dst_stride,
174                              uint8_t *src[4], int i_src_stride,
175                              int mvx, int mvy,
176                              int i_width, int i_height, const x264_weight_t *weight )
177 {
178     int qpel_idx = ((mvy&3)<<2) + (mvx&3);
179     int offset = (mvy>>2)*i_src_stride + (mvx>>2);
180     uint8_t *src1 = src[hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;
181     if( qpel_idx & 5 ) /* qpel interpolation needed */
182     {
183         uint8_t *src2 = src[hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);
184
185         switch( i_width )
186         {
187             case 4:
188                 x264_pixel_avg2_w4_altivec( dst, i_dst_stride, src1, i_src_stride, src2, i_height );
189                 break;
190             case 8:
191                 x264_pixel_avg2_w8_altivec( dst, i_dst_stride, src1, i_src_stride, src2, i_height );
192                 break;
193             case 16:
194             default:
195                 x264_pixel_avg2_w16_altivec( dst, i_dst_stride, src1, i_src_stride, src2, i_height );
196         }
197         if( weight->weightfn )
198             weight->weightfn[i_width>>2]( dst, i_dst_stride, dst, i_dst_stride, weight, i_height );
199     }
200     else if( weight->weightfn )
201         weight->weightfn[i_width>>2]( dst, i_dst_stride, src1, i_src_stride, weight, i_height );
202     else
203     {
204         switch( i_width )
205         {
206             case 4:
207                 x264_mc_copy_w4_altivec( dst, i_dst_stride, src1, i_src_stride, i_height );
208                 break;
209             case 8:
210                 x264_mc_copy_w8_altivec( dst, i_dst_stride, src1, i_src_stride, i_height );
211                 break;
212             case 16:
213                 x264_mc_copy_w16_altivec( dst, i_dst_stride, src1, i_src_stride, i_height );
214                 break;
215         }
216     }
217 }
218
219
220
221 static uint8_t *get_ref_altivec( uint8_t *dst,   int *i_dst_stride,
222                                  uint8_t *src[4], int i_src_stride,
223                                  int mvx, int mvy,
224                                  int i_width, int i_height, const x264_weight_t *weight )
225 {
226     int qpel_idx = ((mvy&3)<<2) + (mvx&3);
227     int offset = (mvy>>2)*i_src_stride + (mvx>>2);
228     uint8_t *src1 = src[hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;
229     if( qpel_idx & 5 ) /* qpel interpolation needed */
230     {
231         uint8_t *src2 = src[hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);
232         switch( i_width )
233         {
234             case 4:
235                 x264_pixel_avg2_w4_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
236                 break;
237             case 8:
238                 x264_pixel_avg2_w8_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
239                 break;
240             case 12:
241             case 16:
242             default:
243                 x264_pixel_avg2_w16_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
244                 break;
245             case 20:
246                 x264_pixel_avg2_w20_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
247                 break;
248         }
249         if( weight->weightfn )
250             weight->weightfn[i_width>>2]( dst, *i_dst_stride, dst, *i_dst_stride, weight, i_height );
251         return dst;
252     }
253     else if( weight->weightfn )
254     {
255         weight->weightfn[i_width>>2]( dst, *i_dst_stride, src1, i_src_stride, weight, i_height );
256         return dst;
257     }
258     else
259     {
260         *i_dst_stride = i_src_stride;
261         return src1;
262     }
263 }
264
265 static void mc_chroma_2xh( uint8_t *dst, int i_dst_stride,
266                            uint8_t *src, int i_src_stride,
267                            int mvx, int mvy,
268                            int i_height )
269 {
270     uint8_t *srcp;
271     int d8x = mvx&0x07;
272     int d8y = mvy&0x07;
273
274     int cA = (8-d8x)*(8-d8y);
275     int cB = d8x    *(8-d8y);
276     int cC = (8-d8x)*d8y;
277     int cD = d8x    *d8y;
278
279     src += (mvy >> 3) * i_src_stride + (mvx >> 3);
280     srcp = &src[i_src_stride];
281
282     for( int y = 0; y < i_height; y++ )
283     {
284         dst[0] = ( cA*src[0] +  cB*src[0+1] + cC*srcp[0] + cD*srcp[0+1] + 32 ) >> 6;
285         dst[1] = ( cA*src[1] +  cB*src[1+1] + cC*srcp[1] + cD*srcp[1+1] + 32 ) >> 6;
286
287         src  += i_src_stride;
288         srcp += i_src_stride;
289         dst  += i_dst_stride;
290     }
291  }
292
293
294 #define DO_PROCESS_W4( a )  \
295     dstv_16A = vec_mladd( src##a##v_16A, coeff##a##v, dstv_16A );   \
296     dstv_16B = vec_mladd( src##a##v_16B, coeff##a##v, dstv_16B )
297
298 static void mc_chroma_altivec_4xh( uint8_t *dst, int i_dst_stride,
299                                    uint8_t *src, int i_src_stride,
300                                    int mvx, int mvy,
301                                    int i_height )
302 {
303     uint8_t *srcp;
304     int d8x = mvx & 0x07;
305     int d8y = mvy & 0x07;
306
307     ALIGNED_16( uint16_t coeff[4] );
308     coeff[0] = (8-d8x)*(8-d8y);
309     coeff[1] = d8x    *(8-d8y);
310     coeff[2] = (8-d8x)*d8y;
311     coeff[3] = d8x    *d8y;
312
313     src += (mvy >> 3) * i_src_stride + (mvx >> 3);
314     srcp = &src[i_src_stride];
315
316     LOAD_ZERO;
317     PREP_LOAD;
318     PREP_LOAD_SRC( src );
319     vec_u16_t   coeff0v, coeff1v, coeff2v, coeff3v;
320     vec_u8_t    src2v_8A, dstv_8A;
321     vec_u8_t    src2v_8B, dstv_8B;
322     vec_u16_t   src0v_16A, src1v_16A, src2v_16A, src3v_16A, dstv_16A;
323     vec_u16_t   src0v_16B, src1v_16B, src2v_16B, src3v_16B, dstv_16B;
324     vec_u16_t   shiftv, k32v;
325
326     coeff0v = vec_ld( 0, coeff );
327     coeff3v = vec_splat( coeff0v, 3 );
328     coeff2v = vec_splat( coeff0v, 2 );
329     coeff1v = vec_splat( coeff0v, 1 );
330     coeff0v = vec_splat( coeff0v, 0 );
331     k32v    = vec_sl( vec_splat_u16( 1 ), vec_splat_u16( 5 ) );
332     shiftv  = vec_splat_u16( 6 );
333
334     VEC_LOAD( src, src2v_8B, 5, vec_u8_t, src );
335     src2v_16B = vec_u8_to_u16( src2v_8B );
336     src3v_16B = vec_sld( src2v_16B, src2v_16B, 2 );
337
338     for( int y = 0; y < i_height; y += 2 )
339     {
340         src0v_16A = src2v_16B;
341         src1v_16A = src3v_16B;
342
343         VEC_LOAD_G( srcp, src2v_8A, 5, vec_u8_t );
344         srcp += i_src_stride;
345         VEC_LOAD_G( srcp, src2v_8B, 5, vec_u8_t );
346         srcp += i_src_stride;
347         src2v_16A = vec_u8_to_u16( src2v_8A );
348         src2v_16B = vec_u8_to_u16( src2v_8B );
349         src3v_16A = vec_sld( src2v_16A, src2v_16A, 2 );
350         src3v_16B = vec_sld( src2v_16B, src2v_16B, 2 );
351
352         src0v_16B = src2v_16A;
353         src1v_16B = src3v_16A;
354
355         dstv_16A = dstv_16B = k32v;
356         DO_PROCESS_W4( 0 );
357         DO_PROCESS_W4( 1 );
358         DO_PROCESS_W4( 2 );
359         DO_PROCESS_W4( 3 );
360
361         dstv_16A = vec_sr( dstv_16A, shiftv );
362         dstv_16B = vec_sr( dstv_16B, shiftv );
363         dstv_8A  = vec_u16_to_u8( dstv_16A );
364         dstv_8B  = vec_u16_to_u8( dstv_16B );
365         vec_ste( vec_splat( (vec_u32_t) dstv_8A, 0 ), 0, (uint32_t*) dst );
366         dst += i_dst_stride;
367         vec_ste( vec_splat( (vec_u32_t) dstv_8B, 0 ), 0, (uint32_t*) dst );
368         dst += i_dst_stride;
369     }
370 }
371
372 #define DO_PROCESS_W8( a )  \
373     src##a##v_16A = vec_u8_to_u16( src##a##v_8A );  \
374     src##a##v_16B = vec_u8_to_u16( src##a##v_8B );  \
375     dstv_16A = vec_mladd( src##a##v_16A, coeff##a##v, dstv_16A );   \
376     dstv_16B = vec_mladd( src##a##v_16B, coeff##a##v, dstv_16B )
377
378 static void mc_chroma_altivec_8xh( uint8_t *dst, int i_dst_stride,
379                                    uint8_t *src, int i_src_stride,
380                                    int mvx, int mvy,
381                                    int i_height )
382 {
383     uint8_t *srcp;
384     int d8x = mvx & 0x07;
385     int d8y = mvy & 0x07;
386
387     ALIGNED_16( uint16_t coeff[4] );
388     coeff[0] = (8-d8x)*(8-d8y);
389     coeff[1] = d8x    *(8-d8y);
390     coeff[2] = (8-d8x)*d8y;
391     coeff[3] = d8x    *d8y;
392
393     src += (mvy >> 3) * i_src_stride + (mvx >> 3);
394     srcp = &src[i_src_stride];
395
396     LOAD_ZERO;
397     PREP_LOAD;
398     PREP_LOAD_SRC( src );
399     PREP_STORE8;
400     vec_u16_t   coeff0v, coeff1v, coeff2v, coeff3v;
401     vec_u8_t    src0v_8A, src1v_8A, src2v_8A, src3v_8A, dstv_8A;
402     vec_u8_t    src0v_8B, src1v_8B, src2v_8B, src3v_8B, dstv_8B;
403     vec_u16_t   src0v_16A, src1v_16A, src2v_16A, src3v_16A, dstv_16A;
404     vec_u16_t   src0v_16B, src1v_16B, src2v_16B, src3v_16B, dstv_16B;
405     vec_u16_t   shiftv, k32v;
406
407     coeff0v = vec_ld( 0, coeff );
408     coeff3v = vec_splat( coeff0v, 3 );
409     coeff2v = vec_splat( coeff0v, 2 );
410     coeff1v = vec_splat( coeff0v, 1 );
411     coeff0v = vec_splat( coeff0v, 0 );
412     k32v    = vec_sl( vec_splat_u16( 1 ), vec_splat_u16( 5 ) );
413     shiftv  = vec_splat_u16( 6 );
414
415     VEC_LOAD( src, src2v_8B, 9, vec_u8_t, src );
416     src3v_8B = vec_sld( src2v_8B, src2v_8B, 1 );
417
418     for( int y = 0; y < i_height; y+=2 )
419     {
420         src0v_8A = src2v_8B;
421         src1v_8A = src3v_8B;
422
423         VEC_LOAD_G( srcp, src2v_8A, 9, vec_u8_t );
424         srcp += i_src_stride;
425         VEC_LOAD_G( srcp, src2v_8B, 9, vec_u8_t );
426         srcp += i_src_stride;
427         src3v_8A = vec_sld( src2v_8A, src2v_8A, 1 );
428         src3v_8B = vec_sld( src2v_8B, src2v_8B, 1 );
429
430         src0v_8B = src2v_8A;
431         src1v_8B = src3v_8A;
432         dstv_16A = dstv_16B = k32v;
433         DO_PROCESS_W8( 0 );
434         DO_PROCESS_W8( 1 );
435         DO_PROCESS_W8( 2 );
436         DO_PROCESS_W8( 3 );
437
438         dstv_16A = vec_sr( dstv_16A, shiftv );
439         dstv_16B = vec_sr( dstv_16B, shiftv );
440         dstv_8A  = vec_u16_to_u8( dstv_16A );
441         dstv_8B  = vec_u16_to_u8( dstv_16B );
442         VEC_STORE8( dstv_8A, dst );
443         dst += i_dst_stride;
444         VEC_STORE8( dstv_8B, dst );
445         dst += i_dst_stride;
446     }
447 }
448
449 static void mc_chroma_altivec( uint8_t *dst, int i_dst_stride,
450                                uint8_t *src, int i_src_stride,
451                                int mvx, int mvy,
452                                int i_width, int i_height )
453 {
454     if( i_width == 8 )
455         mc_chroma_altivec_8xh( dst, i_dst_stride, src, i_src_stride,
456                                mvx, mvy, i_height );
457     else if( i_width == 4 )
458         mc_chroma_altivec_4xh( dst, i_dst_stride, src, i_src_stride,
459                                mvx, mvy, i_height );
460     else
461         mc_chroma_2xh( dst, i_dst_stride, src, i_src_stride,
462                        mvx, mvy, i_height );
463 }
464
465 #define HPEL_FILTER_1( t1v, t2v, t3v, t4v, t5v, t6v ) \
466 {                                                     \
467     t1v = vec_add( t1v, t6v );                        \
468     t2v = vec_add( t2v, t5v );                        \
469     t3v = vec_add( t3v, t4v );                        \
470                                                       \
471     t1v = vec_sub( t1v, t2v );   /* (a-b) */          \
472     t2v = vec_sub( t2v, t3v );   /* (b-c) */          \
473     t2v = vec_sl(  t2v, twov );  /* (b-c)*4 */        \
474     t1v = vec_sub( t1v, t2v );   /* a-5*b+4*c */      \
475     t3v = vec_sl(  t3v, fourv ); /* 16*c */           \
476     t1v = vec_add( t1v, t3v );   /* a-5*b+20*c */     \
477 }
478
479 #define HPEL_FILTER_2( t1v, t2v, t3v, t4v, t5v, t6v ) \
480 {                                                     \
481     t1v = vec_add( t1v, t6v );                        \
482     t2v = vec_add( t2v, t5v );                        \
483     t3v = vec_add( t3v, t4v );                        \
484                                                       \
485     t1v = vec_sub( t1v, t2v );  /* (a-b) */           \
486     t1v = vec_sra( t1v, twov ); /* (a-b)/4 */         \
487     t1v = vec_sub( t1v, t2v );  /* (a-b)/4-b */       \
488     t1v = vec_add( t1v, t3v );  /* (a-b)/4-b+c */     \
489     t1v = vec_sra( t1v, twov ); /* ((a-b)/4-b+c)/4 */ \
490     t1v = vec_add( t1v, t3v );  /* ((a-b)/4-b+c)/4+c = (a-5*b+20*c)/16 */ \
491 }
492
493 #define HPEL_FILTER_HORIZONTAL()                             \
494 {                                                            \
495     VEC_LOAD_G( &src[x- 2+i_stride*y], src1v, 16, vec_u8_t); \
496     VEC_LOAD_G( &src[x+14+i_stride*y], src6v, 16, vec_u8_t); \
497                                                              \
498     src2v = vec_sld( src1v, src6v,  1 );                     \
499     src3v = vec_sld( src1v, src6v,  2 );                     \
500     src4v = vec_sld( src1v, src6v,  3 );                     \
501     src5v = vec_sld( src1v, src6v,  4 );                     \
502     src6v = vec_sld( src1v, src6v,  5 );                     \
503                                                              \
504     temp1v = vec_u8_to_s16_h( src1v );                       \
505     temp2v = vec_u8_to_s16_h( src2v );                       \
506     temp3v = vec_u8_to_s16_h( src3v );                       \
507     temp4v = vec_u8_to_s16_h( src4v );                       \
508     temp5v = vec_u8_to_s16_h( src5v );                       \
509     temp6v = vec_u8_to_s16_h( src6v );                       \
510                                                              \
511     HPEL_FILTER_1( temp1v, temp2v, temp3v,                   \
512                    temp4v, temp5v, temp6v );                 \
513                                                              \
514     dest1v = vec_add( temp1v, sixteenv );                    \
515     dest1v = vec_sra( dest1v, fivev );                       \
516                                                              \
517     temp1v = vec_u8_to_s16_l( src1v );                       \
518     temp2v = vec_u8_to_s16_l( src2v );                       \
519     temp3v = vec_u8_to_s16_l( src3v );                       \
520     temp4v = vec_u8_to_s16_l( src4v );                       \
521     temp5v = vec_u8_to_s16_l( src5v );                       \
522     temp6v = vec_u8_to_s16_l( src6v );                       \
523                                                              \
524     HPEL_FILTER_1( temp1v, temp2v, temp3v,                   \
525                    temp4v, temp5v, temp6v );                 \
526                                                              \
527     dest2v = vec_add( temp1v, sixteenv );                    \
528     dest2v = vec_sra( dest2v, fivev );                       \
529                                                              \
530     destv = vec_packsu( dest1v, dest2v );                    \
531                                                              \
532     VEC_STORE16( destv, &dsth[x+i_stride*y], dsth );         \
533 }
534
535 #define HPEL_FILTER_VERTICAL()                                    \
536 {                                                                 \
537     VEC_LOAD( &src[x+i_stride*(y-2)], src1v, 16, vec_u8_t, src ); \
538     VEC_LOAD( &src[x+i_stride*(y-1)], src2v, 16, vec_u8_t, src ); \
539     VEC_LOAD( &src[x+i_stride*(y-0)], src3v, 16, vec_u8_t, src ); \
540     VEC_LOAD( &src[x+i_stride*(y+1)], src4v, 16, vec_u8_t, src ); \
541     VEC_LOAD( &src[x+i_stride*(y+2)], src5v, 16, vec_u8_t, src ); \
542     VEC_LOAD( &src[x+i_stride*(y+3)], src6v, 16, vec_u8_t, src ); \
543                                                                   \
544     temp1v = vec_u8_to_s16_h( src1v );                            \
545     temp2v = vec_u8_to_s16_h( src2v );                            \
546     temp3v = vec_u8_to_s16_h( src3v );                            \
547     temp4v = vec_u8_to_s16_h( src4v );                            \
548     temp5v = vec_u8_to_s16_h( src5v );                            \
549     temp6v = vec_u8_to_s16_h( src6v );                            \
550                                                                   \
551     HPEL_FILTER_1( temp1v, temp2v, temp3v,                        \
552                    temp4v, temp5v, temp6v );                      \
553                                                                   \
554     dest1v = vec_add( temp1v, sixteenv );                         \
555     dest1v = vec_sra( dest1v, fivev );                            \
556                                                                   \
557     temp4v = vec_u8_to_s16_l( src1v );                            \
558     temp5v = vec_u8_to_s16_l( src2v );                            \
559     temp6v = vec_u8_to_s16_l( src3v );                            \
560     temp7v = vec_u8_to_s16_l( src4v );                            \
561     temp8v = vec_u8_to_s16_l( src5v );                            \
562     temp9v = vec_u8_to_s16_l( src6v );                            \
563                                                                   \
564     HPEL_FILTER_1( temp4v, temp5v, temp6v,                        \
565                    temp7v, temp8v, temp9v );                      \
566                                                                   \
567     dest2v = vec_add( temp4v, sixteenv );                         \
568     dest2v = vec_sra( dest2v, fivev );                            \
569                                                                   \
570     destv = vec_packsu( dest1v, dest2v );                         \
571                                                                   \
572     VEC_STORE16( destv, &dstv[x+i_stride*y], dsth );              \
573 }
574
575 #define HPEL_FILTER_CENTRAL()                           \
576 {                                                       \
577     temp1v = vec_sld( tempav, tempbv, 12 );             \
578     temp2v = vec_sld( tempav, tempbv, 14 );             \
579     temp3v = tempbv;                                    \
580     temp4v = vec_sld( tempbv, tempcv,  2 );             \
581     temp5v = vec_sld( tempbv, tempcv,  4 );             \
582     temp6v = vec_sld( tempbv, tempcv,  6 );             \
583                                                         \
584     HPEL_FILTER_2( temp1v, temp2v, temp3v,              \
585                    temp4v, temp5v, temp6v );            \
586                                                         \
587     dest1v = vec_add( temp1v, thirtytwov );             \
588     dest1v = vec_sra( dest1v, sixv );                   \
589                                                         \
590     temp1v = vec_sld( tempbv, tempcv, 12 );             \
591     temp2v = vec_sld( tempbv, tempcv, 14 );             \
592     temp3v = tempcv;                                    \
593     temp4v = vec_sld( tempcv, tempdv,  2 );             \
594     temp5v = vec_sld( tempcv, tempdv,  4 );             \
595     temp6v = vec_sld( tempcv, tempdv,  6 );             \
596                                                         \
597     HPEL_FILTER_2( temp1v, temp2v, temp3v,              \
598                    temp4v, temp5v, temp6v );            \
599                                                         \
600     dest2v = vec_add( temp1v, thirtytwov );             \
601     dest2v = vec_sra( dest2v, sixv );                   \
602                                                         \
603     destv = vec_packsu( dest1v, dest2v );               \
604                                                         \
605     VEC_STORE16( destv, &dstc[x-16+i_stride*y], dsth ); \
606 }
607
608 void x264_hpel_filter_altivec( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src,
609                                int i_stride, int i_width, int i_height, int16_t *buf )
610 {
611     vec_u8_t destv;
612     vec_u8_t src1v, src2v, src3v, src4v, src5v, src6v;
613     vec_s16_t dest1v, dest2v;
614     vec_s16_t temp1v, temp2v, temp3v, temp4v, temp5v, temp6v, temp7v, temp8v, temp9v;
615     vec_s16_t tempav, tempbv, tempcv, tempdv, tempev;
616
617     PREP_LOAD;
618     PREP_LOAD_SRC( src);
619     PREP_STORE16;
620     PREP_STORE16_DST( dsth );
621     LOAD_ZERO;
622
623     vec_u16_t twov, fourv, fivev, sixv;
624     vec_s16_t sixteenv, thirtytwov;
625     vec_u16_u temp_u;
626
627     temp_u.s[0]=2;
628     twov = vec_splat( temp_u.v, 0 );
629     temp_u.s[0]=4;
630     fourv = vec_splat( temp_u.v, 0 );
631     temp_u.s[0]=5;
632     fivev = vec_splat( temp_u.v, 0 );
633     temp_u.s[0]=6;
634     sixv = vec_splat( temp_u.v, 0 );
635     temp_u.s[0]=16;
636     sixteenv = (vec_s16_t)vec_splat( temp_u.v, 0 );
637     temp_u.s[0]=32;
638     thirtytwov = (vec_s16_t)vec_splat( temp_u.v, 0 );
639
640     for( int y = 0; y < i_height; y++ )
641     {
642         int x = 0;
643
644         /* horizontal_filter */
645         HPEL_FILTER_HORIZONTAL();
646
647         /* vertical_filter */
648         HPEL_FILTER_VERTICAL();
649
650         /* central_filter */
651         tempav = tempcv;
652         tempbv = tempdv;
653         tempcv = vec_splat( temp1v, 0 ); /* first only */
654         tempdv = temp1v;
655         tempev = temp4v;
656
657         for( x = 16; x < i_width; x+=16 )
658         {
659             /* horizontal_filter */
660             HPEL_FILTER_HORIZONTAL();
661
662             /* vertical_filter */
663             HPEL_FILTER_VERTICAL();
664
665             /* central_filter */
666             tempav = tempcv;
667             tempbv = tempdv;
668             tempcv = tempev;
669             tempdv = temp1v;
670             tempev = temp4v;
671
672             HPEL_FILTER_CENTRAL();
673         }
674
675         /* Partial vertical filter */
676         VEC_LOAD_PARTIAL( &src[x+i_stride*(y-2)], src1v, 16, vec_u8_t, src );
677         VEC_LOAD_PARTIAL( &src[x+i_stride*(y-1)], src2v, 16, vec_u8_t, src );
678         VEC_LOAD_PARTIAL( &src[x+i_stride*(y-0)], src3v, 16, vec_u8_t, src );
679         VEC_LOAD_PARTIAL( &src[x+i_stride*(y+1)], src4v, 16, vec_u8_t, src );
680         VEC_LOAD_PARTIAL( &src[x+i_stride*(y+2)], src5v, 16, vec_u8_t, src );
681         VEC_LOAD_PARTIAL( &src[x+i_stride*(y+3)], src6v, 16, vec_u8_t, src );
682
683         temp1v = vec_u8_to_s16_h( src1v );
684         temp2v = vec_u8_to_s16_h( src2v );
685         temp3v = vec_u8_to_s16_h( src3v );
686         temp4v = vec_u8_to_s16_h( src4v );
687         temp5v = vec_u8_to_s16_h( src5v );
688         temp6v = vec_u8_to_s16_h( src6v );
689
690         HPEL_FILTER_1( temp1v, temp2v, temp3v, temp4v, temp5v, temp6v );
691
692         /* central_filter */
693         tempav = tempcv;
694         tempbv = tempdv;
695         tempcv = tempev;
696         tempdv = temp1v;
697         /* tempev is not used */
698
699         HPEL_FILTER_CENTRAL();
700     }
701 }
702
703 static void frame_init_lowres_core_altivec( uint8_t *src0, uint8_t *dst0, uint8_t *dsth, uint8_t *dstv, uint8_t *dstc,
704                                            int src_stride, int dst_stride, int width, int height )
705 {
706     int w = width/16;
707     int end = (width & 15);
708     vec_u8_t src0v, src1v, src2v;
709     vec_u8_t lv, hv, src1p1v;
710     vec_u8_t avg0v, avg1v, avghv, avghp1v, avgleftv, avgrightv;
711     static const vec_u8_t inverse_bridge_shuffle = CV(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E );
712
713     for( int y = 0; y < height; y++ )
714     {
715         int x;
716         uint8_t *src1 = src0+src_stride;
717         uint8_t *src2 = src1+src_stride;
718
719         src0v = vec_ld(0, src0);
720         src1v = vec_ld(0, src1);
721         src2v = vec_ld(0, src2);
722
723         avg0v = vec_avg(src0v, src1v);
724         avg1v = vec_avg(src1v, src2v);
725
726         for( x = 0; x < w; x++ )
727         {
728             lv = vec_ld(16*(x*2+1), src0);
729             src1v = vec_ld(16*(x*2+1), src1);
730             avghv = vec_avg(lv, src1v);
731
732             lv = vec_ld(16*(x*2+2), src0);
733             src1p1v = vec_ld(16*(x*2+2), src1);
734             avghp1v = vec_avg(lv, src1p1v);
735
736             avgleftv = vec_avg(vec_sld(avg0v, avghv, 1), avg0v);
737             avgrightv = vec_avg(vec_sld(avghv, avghp1v, 1), avghv);
738
739             vec_st(vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle), 16*x, dst0);
740             vec_st((vec_u8_t)vec_pack((vec_u16_t)avgleftv,(vec_u16_t)avgrightv), 16*x, dsth);
741
742             avg0v = avghp1v;
743
744             hv = vec_ld(16*(x*2+1), src2);
745             avghv = vec_avg(src1v, hv);
746
747             hv = vec_ld(16*(x*2+2), src2);
748             avghp1v = vec_avg(src1p1v, hv);
749
750             avgleftv = vec_avg(vec_sld(avg1v, avghv, 1), avg1v);
751             avgrightv = vec_avg(vec_sld(avghv, avghp1v, 1), avghv);
752
753             vec_st(vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle), 16*x, dstv);
754             vec_st((vec_u8_t)vec_pack((vec_u16_t)avgleftv,(vec_u16_t)avgrightv), 16*x, dstc);
755
756             avg1v = avghp1v;
757
758         }
759         if( end )
760         {
761             lv = vec_ld(16*(x*2+1), src0);
762             src1v = vec_ld(16*(x*2+1), src1);
763             avghv = vec_avg(lv, src1v);
764
765             lv = vec_ld(16*(x*2+1), src2);
766             avghp1v = vec_avg(src1v, lv);
767
768             avgleftv = vec_avg(vec_sld(avg0v, avghv, 1), avg0v);
769             avgrightv = vec_avg(vec_sld(avg1v, avghp1v, 1), avg1v);
770
771             lv = vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle);
772             hv = (vec_u8_t)vec_pack((vec_u16_t)avgleftv,(vec_u16_t)avgrightv);
773
774             vec_ste((vec_u32_t)lv,16*x,(uint32_t*)dst0);
775             vec_ste((vec_u32_t)lv,16*x+4,(uint32_t*)dst0);
776             vec_ste((vec_u32_t)hv,16*x,(uint32_t*)dsth);
777             vec_ste((vec_u32_t)hv,16*x+4,(uint32_t*)dsth);
778
779             lv = vec_sld(lv, lv, 8);
780             hv = vec_sld(hv, hv, 8);
781
782             vec_ste((vec_u32_t)lv,16*x,(uint32_t*)dstv);
783             vec_ste((vec_u32_t)lv,16*x+4,(uint32_t*)dstv);
784             vec_ste((vec_u32_t)hv,16*x,(uint32_t*)dstc);
785             vec_ste((vec_u32_t)hv,16*x+4,(uint32_t*)dstc);
786         }
787
788         src0 += src_stride*2;
789         dst0 += dst_stride;
790         dsth += dst_stride;
791         dstv += dst_stride;
792         dstc += dst_stride;
793     }
794 }
795
796 void x264_mc_altivec_init( x264_mc_functions_t *pf )
797 {
798     pf->mc_luma   = mc_luma_altivec;
799     pf->get_ref   = get_ref_altivec;
800     pf->mc_chroma = mc_chroma_altivec;
801
802     pf->copy_16x16_unaligned = x264_mc_copy_w16_altivec;
803     pf->copy[PIXEL_16x16] = x264_mc_copy_w16_aligned_altivec;
804
805     pf->hpel_filter = x264_hpel_filter_altivec;
806     pf->frame_init_lowres_core = frame_init_lowres_core_altivec;
807 }