]> git.sesse.net Git - x264/blob - common/ppc/mc.c
Convert x264 to use NV12 pixel format internally
[x264] / common / ppc / mc.c
1 /*****************************************************************************
2  * mc.c: h264 encoder library (Motion Compensation)
3  *****************************************************************************
4  * Copyright (C) 2003-2008 x264 project
5  *
6  * Authors: Eric Petit <eric.petit@lapsus.org>
7  *          Guillaume Poirier <gpoirier@mplayerhq.hu>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
22  *****************************************************************************/
23
24 #include <stdlib.h>
25 #include <stdio.h>
26 #include <string.h>
27 #include <stdint.h>
28 #include <stdarg.h>
29
30 #include "x264.h"
31 #include "common/common.h"
32 #include "common/mc.h"
33 #include "mc.h"
34 #include "ppccommon.h"
35
36 #if !X264_HIGH_BIT_DEPTH
37 typedef void (*pf_mc_t)( uint8_t *src, int i_src,
38                          uint8_t *dst, int i_dst, int i_height );
39
40
41 static const uint8_t hpel_ref0[16] = {0,1,1,1,0,1,1,1,2,3,3,3,0,1,1,1};
42 static const uint8_t hpel_ref1[16] = {0,0,0,0,2,2,3,2,2,2,3,2,2,2,3,2};
43
44
45 static inline int x264_tapfilter( uint8_t *pix, int i_pix_next )
46 {
47     return pix[-2*i_pix_next] - 5*pix[-1*i_pix_next] + 20*(pix[0] +
48            pix[1*i_pix_next]) - 5*pix[ 2*i_pix_next] +
49            pix[ 3*i_pix_next];
50 }
51 static inline int x264_tapfilter1( uint8_t *pix )
52 {
53     return pix[-2] - 5*pix[-1] + 20*(pix[0] + pix[1]) - 5*pix[ 2] +
54            pix[ 3];
55 }
56
57
58 static inline void x264_pixel_avg2_w4_altivec( uint8_t *dst,  int i_dst,
59                                                uint8_t *src1, int i_src1,
60                                                uint8_t *src2, int i_height )
61 {
62     for( int y = 0; y < i_height; y++ )
63     {
64         for( int x = 0; x < 4; x++ )
65             dst[x] = ( src1[x] + src2[x] + 1 ) >> 1;
66         dst  += i_dst;
67         src1 += i_src1;
68         src2 += i_src1;
69     }
70 }
71
72 static inline void x264_pixel_avg2_w8_altivec( uint8_t *dst,  int i_dst,
73                                                uint8_t *src1, int i_src1,
74                                                uint8_t *src2, int i_height )
75 {
76     vec_u8_t src1v, src2v;
77     PREP_LOAD;
78     PREP_STORE8;
79     PREP_LOAD_SRC( src1 );
80     PREP_LOAD_SRC( src2 );
81
82     for( int y = 0; y < i_height; y++ )
83     {
84         VEC_LOAD( src1, src1v, 8, vec_u8_t, src1 );
85         VEC_LOAD( src2, src2v, 8, vec_u8_t, src2 );
86         src1v = vec_avg( src1v, src2v );
87         VEC_STORE8( src1v, dst );
88
89         dst  += i_dst;
90         src1 += i_src1;
91         src2 += i_src1;
92     }
93 }
94
95 static inline void x264_pixel_avg2_w16_altivec( uint8_t *dst,  int i_dst,
96                                                 uint8_t *src1, int i_src1,
97                                                 uint8_t *src2, int i_height )
98 {
99     vec_u8_t src1v, src2v;
100     PREP_LOAD;
101     PREP_LOAD_SRC( src1 );
102     PREP_LOAD_SRC( src2 );
103
104     for( int y = 0; y < i_height; y++ )
105     {
106         VEC_LOAD( src1, src1v, 16, vec_u8_t, src1 );
107         VEC_LOAD( src2, src2v, 16, vec_u8_t, src2 );
108         src1v = vec_avg( src1v, src2v );
109         vec_st(src1v, 0, dst);
110
111         dst  += i_dst;
112         src1 += i_src1;
113         src2 += i_src1;
114     }
115 }
116
117 static inline void x264_pixel_avg2_w20_altivec( uint8_t *dst,  int i_dst,
118                                                 uint8_t *src1, int i_src1,
119                                                 uint8_t *src2, int i_height )
120 {
121     x264_pixel_avg2_w16_altivec(dst, i_dst, src1, i_src1, src2, i_height);
122     x264_pixel_avg2_w4_altivec(dst+16, i_dst, src1+16, i_src1, src2+16, i_height);
123 }
124
125 /* mc_copy: plain c */
126
127 #define MC_COPY( name, a )                                \
128 static void name( uint8_t *dst, int i_dst,                \
129                   uint8_t *src, int i_src, int i_height ) \
130 {                                                         \
131     int y;                                                \
132     for( y = 0; y < i_height; y++ )                       \
133     {                                                     \
134         memcpy( dst, src, a );                            \
135         src += i_src;                                     \
136         dst += i_dst;                                     \
137     }                                                     \
138 }
139 MC_COPY( x264_mc_copy_w4_altivec,  4  )
140 MC_COPY( x264_mc_copy_w8_altivec,  8  )
141
142 static void x264_mc_copy_w16_altivec( uint8_t *dst, int i_dst,
143                                       uint8_t *src, int i_src, int i_height )
144 {
145     vec_u8_t cpyV;
146     PREP_LOAD;
147     PREP_LOAD_SRC( src );
148
149     for( int y = 0; y < i_height; y++)
150     {
151         VEC_LOAD( src, cpyV, 16, vec_u8_t, src );
152         vec_st(cpyV, 0, dst);
153
154         src += i_src;
155         dst += i_dst;
156     }
157 }
158
159
160 static void x264_mc_copy_w16_aligned_altivec( uint8_t *dst, int i_dst,
161                                               uint8_t *src, int i_src, int i_height )
162 {
163     for( int y = 0; y < i_height; ++y)
164     {
165         vec_u8_t cpyV = vec_ld( 0, src);
166         vec_st(cpyV, 0, dst);
167
168         src += i_src;
169         dst += i_dst;
170     }
171 }
172
173
174 static void mc_luma_altivec( uint8_t *dst,    int i_dst_stride,
175                              uint8_t *src[4], int i_src_stride,
176                              int mvx, int mvy,
177                              int i_width, int i_height, const x264_weight_t *weight )
178 {
179     int qpel_idx = ((mvy&3)<<2) + (mvx&3);
180     int offset = (mvy>>2)*i_src_stride + (mvx>>2);
181     uint8_t *src1 = src[hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;
182     if( qpel_idx & 5 ) /* qpel interpolation needed */
183     {
184         uint8_t *src2 = src[hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);
185
186         switch( i_width )
187         {
188             case 4:
189                 x264_pixel_avg2_w4_altivec( dst, i_dst_stride, src1, i_src_stride, src2, i_height );
190                 break;
191             case 8:
192                 x264_pixel_avg2_w8_altivec( dst, i_dst_stride, src1, i_src_stride, src2, i_height );
193                 break;
194             case 16:
195             default:
196                 x264_pixel_avg2_w16_altivec( dst, i_dst_stride, src1, i_src_stride, src2, i_height );
197         }
198         if( weight->weightfn )
199             weight->weightfn[i_width>>2]( dst, i_dst_stride, dst, i_dst_stride, weight, i_height );
200     }
201     else if( weight->weightfn )
202         weight->weightfn[i_width>>2]( dst, i_dst_stride, src1, i_src_stride, weight, i_height );
203     else
204     {
205         switch( i_width )
206         {
207             case 4:
208                 x264_mc_copy_w4_altivec( dst, i_dst_stride, src1, i_src_stride, i_height );
209                 break;
210             case 8:
211                 x264_mc_copy_w8_altivec( dst, i_dst_stride, src1, i_src_stride, i_height );
212                 break;
213             case 16:
214                 x264_mc_copy_w16_altivec( dst, i_dst_stride, src1, i_src_stride, i_height );
215                 break;
216         }
217     }
218 }
219
220
221
222 static uint8_t *get_ref_altivec( uint8_t *dst,   int *i_dst_stride,
223                                  uint8_t *src[4], int i_src_stride,
224                                  int mvx, int mvy,
225                                  int i_width, int i_height, const x264_weight_t *weight )
226 {
227     int qpel_idx = ((mvy&3)<<2) + (mvx&3);
228     int offset = (mvy>>2)*i_src_stride + (mvx>>2);
229     uint8_t *src1 = src[hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;
230     if( qpel_idx & 5 ) /* qpel interpolation needed */
231     {
232         uint8_t *src2 = src[hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);
233         switch( i_width )
234         {
235             case 4:
236                 x264_pixel_avg2_w4_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
237                 break;
238             case 8:
239                 x264_pixel_avg2_w8_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
240                 break;
241             case 12:
242             case 16:
243             default:
244                 x264_pixel_avg2_w16_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
245                 break;
246             case 20:
247                 x264_pixel_avg2_w20_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
248                 break;
249         }
250         if( weight->weightfn )
251             weight->weightfn[i_width>>2]( dst, *i_dst_stride, dst, *i_dst_stride, weight, i_height );
252         return dst;
253     }
254     else if( weight->weightfn )
255     {
256         weight->weightfn[i_width>>2]( dst, *i_dst_stride, src1, i_src_stride, weight, i_height );
257         return dst;
258     }
259     else
260     {
261         *i_dst_stride = i_src_stride;
262         return src1;
263     }
264 }
265
266 static void mc_chroma_2xh( uint8_t *dst, int i_dst_stride,
267                            uint8_t *src, int i_src_stride,
268                            int mvx, int mvy,
269                            int i_height )
270 {
271     uint8_t *srcp;
272     int d8x = mvx&0x07;
273     int d8y = mvy&0x07;
274
275     int cA = (8-d8x)*(8-d8y);
276     int cB = d8x    *(8-d8y);
277     int cC = (8-d8x)*d8y;
278     int cD = d8x    *d8y;
279
280     src += (mvy >> 3) * i_src_stride + (mvx >> 3);
281     srcp = &src[i_src_stride];
282
283     for( int y = 0; y < i_height; y++ )
284     {
285         dst[0] = ( cA*src[0] +  cB*src[0+1] + cC*srcp[0] + cD*srcp[0+1] + 32 ) >> 6;
286         dst[1] = ( cA*src[1] +  cB*src[1+1] + cC*srcp[1] + cD*srcp[1+1] + 32 ) >> 6;
287
288         src  += i_src_stride;
289         srcp += i_src_stride;
290         dst  += i_dst_stride;
291     }
292  }
293
294
295 #define DO_PROCESS_W4( a ) \
296     dstv_16A = vec_mladd( src##a##v_16A, coeff##a##v, dstv_16A ); \
297     dstv_16B = vec_mladd( src##a##v_16B, coeff##a##v, dstv_16B )
298
299 static void mc_chroma_altivec_4xh( uint8_t *dst, int i_dst_stride,
300                                    uint8_t *src, int i_src_stride,
301                                    int mvx, int mvy,
302                                    int i_height )
303 {
304     uint8_t *srcp;
305     int d8x = mvx & 0x07;
306     int d8y = mvy & 0x07;
307
308     ALIGNED_16( uint16_t coeff[4] );
309     coeff[0] = (8-d8x)*(8-d8y);
310     coeff[1] = d8x    *(8-d8y);
311     coeff[2] = (8-d8x)*d8y;
312     coeff[3] = d8x    *d8y;
313
314     src += (mvy >> 3) * i_src_stride + (mvx >> 3);
315     srcp = &src[i_src_stride];
316
317     LOAD_ZERO;
318     PREP_LOAD;
319     PREP_LOAD_SRC( src );
320     vec_u16_t   coeff0v, coeff1v, coeff2v, coeff3v;
321     vec_u8_t    src2v_8A, dstv_8A;
322     vec_u8_t    src2v_8B, dstv_8B;
323     vec_u16_t   src0v_16A, src1v_16A, src2v_16A, src3v_16A, dstv_16A;
324     vec_u16_t   src0v_16B, src1v_16B, src2v_16B, src3v_16B, dstv_16B;
325     vec_u16_t   shiftv, k32v;
326
327     coeff0v = vec_ld( 0, coeff );
328     coeff3v = vec_splat( coeff0v, 3 );
329     coeff2v = vec_splat( coeff0v, 2 );
330     coeff1v = vec_splat( coeff0v, 1 );
331     coeff0v = vec_splat( coeff0v, 0 );
332     k32v    = vec_sl( vec_splat_u16( 1 ), vec_splat_u16( 5 ) );
333     shiftv  = vec_splat_u16( 6 );
334
335     VEC_LOAD( src, src2v_8B, 5, vec_u8_t, src );
336     src2v_16B = vec_u8_to_u16( src2v_8B );
337     src3v_16B = vec_sld( src2v_16B, src2v_16B, 2 );
338
339     for( int y = 0; y < i_height; y += 2 )
340     {
341         src0v_16A = src2v_16B;
342         src1v_16A = src3v_16B;
343
344         VEC_LOAD_G( srcp, src2v_8A, 5, vec_u8_t );
345         srcp += i_src_stride;
346         VEC_LOAD_G( srcp, src2v_8B, 5, vec_u8_t );
347         srcp += i_src_stride;
348         src2v_16A = vec_u8_to_u16( src2v_8A );
349         src2v_16B = vec_u8_to_u16( src2v_8B );
350         src3v_16A = vec_sld( src2v_16A, src2v_16A, 2 );
351         src3v_16B = vec_sld( src2v_16B, src2v_16B, 2 );
352
353         src0v_16B = src2v_16A;
354         src1v_16B = src3v_16A;
355
356         dstv_16A = dstv_16B = k32v;
357         DO_PROCESS_W4( 0 );
358         DO_PROCESS_W4( 1 );
359         DO_PROCESS_W4( 2 );
360         DO_PROCESS_W4( 3 );
361
362         dstv_16A = vec_sr( dstv_16A, shiftv );
363         dstv_16B = vec_sr( dstv_16B, shiftv );
364         dstv_8A  = vec_u16_to_u8( dstv_16A );
365         dstv_8B  = vec_u16_to_u8( dstv_16B );
366         vec_ste( vec_splat( (vec_u32_t) dstv_8A, 0 ), 0, (uint32_t*) dst );
367         dst += i_dst_stride;
368         vec_ste( vec_splat( (vec_u32_t) dstv_8B, 0 ), 0, (uint32_t*) dst );
369         dst += i_dst_stride;
370     }
371 }
372
373 #define DO_PROCESS_W8( a ) \
374     src##a##v_16A = vec_u8_to_u16( src##a##v_8A ); \
375     src##a##v_16B = vec_u8_to_u16( src##a##v_8B ); \
376     dstv_16A = vec_mladd( src##a##v_16A, coeff##a##v, dstv_16A ); \
377     dstv_16B = vec_mladd( src##a##v_16B, coeff##a##v, dstv_16B )
378
379 static void mc_chroma_altivec_8xh( uint8_t *dst, int i_dst_stride,
380                                    uint8_t *src, int i_src_stride,
381                                    int mvx, int mvy,
382                                    int i_height )
383 {
384     uint8_t *srcp;
385     int d8x = mvx & 0x07;
386     int d8y = mvy & 0x07;
387
388     ALIGNED_16( uint16_t coeff[4] );
389     coeff[0] = (8-d8x)*(8-d8y);
390     coeff[1] = d8x    *(8-d8y);
391     coeff[2] = (8-d8x)*d8y;
392     coeff[3] = d8x    *d8y;
393
394     src += (mvy >> 3) * i_src_stride + (mvx >> 3);
395     srcp = &src[i_src_stride];
396
397     LOAD_ZERO;
398     PREP_LOAD;
399     PREP_LOAD_SRC( src );
400     PREP_STORE8;
401     vec_u16_t   coeff0v, coeff1v, coeff2v, coeff3v;
402     vec_u8_t    src0v_8A, src1v_8A, src2v_8A, src3v_8A, dstv_8A;
403     vec_u8_t    src0v_8B, src1v_8B, src2v_8B, src3v_8B, dstv_8B;
404     vec_u16_t   src0v_16A, src1v_16A, src2v_16A, src3v_16A, dstv_16A;
405     vec_u16_t   src0v_16B, src1v_16B, src2v_16B, src3v_16B, dstv_16B;
406     vec_u16_t   shiftv, k32v;
407
408     coeff0v = vec_ld( 0, coeff );
409     coeff3v = vec_splat( coeff0v, 3 );
410     coeff2v = vec_splat( coeff0v, 2 );
411     coeff1v = vec_splat( coeff0v, 1 );
412     coeff0v = vec_splat( coeff0v, 0 );
413     k32v    = vec_sl( vec_splat_u16( 1 ), vec_splat_u16( 5 ) );
414     shiftv  = vec_splat_u16( 6 );
415
416     VEC_LOAD( src, src2v_8B, 9, vec_u8_t, src );
417     src3v_8B = vec_sld( src2v_8B, src2v_8B, 1 );
418
419     for( int y = 0; y < i_height; y+=2 )
420     {
421         src0v_8A = src2v_8B;
422         src1v_8A = src3v_8B;
423
424         VEC_LOAD_G( srcp, src2v_8A, 9, vec_u8_t );
425         srcp += i_src_stride;
426         VEC_LOAD_G( srcp, src2v_8B, 9, vec_u8_t );
427         srcp += i_src_stride;
428         src3v_8A = vec_sld( src2v_8A, src2v_8A, 1 );
429         src3v_8B = vec_sld( src2v_8B, src2v_8B, 1 );
430
431         src0v_8B = src2v_8A;
432         src1v_8B = src3v_8A;
433         dstv_16A = dstv_16B = k32v;
434         DO_PROCESS_W8( 0 );
435         DO_PROCESS_W8( 1 );
436         DO_PROCESS_W8( 2 );
437         DO_PROCESS_W8( 3 );
438
439         dstv_16A = vec_sr( dstv_16A, shiftv );
440         dstv_16B = vec_sr( dstv_16B, shiftv );
441         dstv_8A  = vec_u16_to_u8( dstv_16A );
442         dstv_8B  = vec_u16_to_u8( dstv_16B );
443         VEC_STORE8( dstv_8A, dst );
444         dst += i_dst_stride;
445         VEC_STORE8( dstv_8B, dst );
446         dst += i_dst_stride;
447     }
448 }
449
450 static void mc_chroma_altivec( uint8_t *dst, int i_dst_stride,
451                                uint8_t *src, int i_src_stride,
452                                int mvx, int mvy,
453                                int i_width, int i_height )
454 {
455     if( i_width == 8 )
456         mc_chroma_altivec_8xh( dst, i_dst_stride, src, i_src_stride,
457                                mvx, mvy, i_height );
458     else if( i_width == 4 )
459         mc_chroma_altivec_4xh( dst, i_dst_stride, src, i_src_stride,
460                                mvx, mvy, i_height );
461     else
462         mc_chroma_2xh( dst, i_dst_stride, src, i_src_stride,
463                        mvx, mvy, i_height );
464 }
465
466 #define HPEL_FILTER_1( t1v, t2v, t3v, t4v, t5v, t6v ) \
467 {                                                     \
468     t1v = vec_add( t1v, t6v );                        \
469     t2v = vec_add( t2v, t5v );                        \
470     t3v = vec_add( t3v, t4v );                        \
471                                                       \
472     t1v = vec_sub( t1v, t2v );   /* (a-b) */          \
473     t2v = vec_sub( t2v, t3v );   /* (b-c) */          \
474     t2v = vec_sl(  t2v, twov );  /* (b-c)*4 */        \
475     t1v = vec_sub( t1v, t2v );   /* a-5*b+4*c */      \
476     t3v = vec_sl(  t3v, fourv ); /* 16*c */           \
477     t1v = vec_add( t1v, t3v );   /* a-5*b+20*c */     \
478 }
479
480 #define HPEL_FILTER_2( t1v, t2v, t3v, t4v, t5v, t6v ) \
481 {                                                     \
482     t1v = vec_add( t1v, t6v );                        \
483     t2v = vec_add( t2v, t5v );                        \
484     t3v = vec_add( t3v, t4v );                        \
485                                                       \
486     t1v = vec_sub( t1v, t2v );  /* (a-b) */           \
487     t1v = vec_sra( t1v, twov ); /* (a-b)/4 */         \
488     t1v = vec_sub( t1v, t2v );  /* (a-b)/4-b */       \
489     t1v = vec_add( t1v, t3v );  /* (a-b)/4-b+c */     \
490     t1v = vec_sra( t1v, twov ); /* ((a-b)/4-b+c)/4 */ \
491     t1v = vec_add( t1v, t3v );  /* ((a-b)/4-b+c)/4+c = (a-5*b+20*c)/16 */ \
492 }
493
494 #define HPEL_FILTER_HORIZONTAL()                             \
495 {                                                            \
496     VEC_LOAD_G( &src[x- 2+i_stride*y], src1v, 16, vec_u8_t); \
497     VEC_LOAD_G( &src[x+14+i_stride*y], src6v, 16, vec_u8_t); \
498                                                              \
499     src2v = vec_sld( src1v, src6v,  1 );                     \
500     src3v = vec_sld( src1v, src6v,  2 );                     \
501     src4v = vec_sld( src1v, src6v,  3 );                     \
502     src5v = vec_sld( src1v, src6v,  4 );                     \
503     src6v = vec_sld( src1v, src6v,  5 );                     \
504                                                              \
505     temp1v = vec_u8_to_s16_h( src1v );                       \
506     temp2v = vec_u8_to_s16_h( src2v );                       \
507     temp3v = vec_u8_to_s16_h( src3v );                       \
508     temp4v = vec_u8_to_s16_h( src4v );                       \
509     temp5v = vec_u8_to_s16_h( src5v );                       \
510     temp6v = vec_u8_to_s16_h( src6v );                       \
511                                                              \
512     HPEL_FILTER_1( temp1v, temp2v, temp3v,                   \
513                    temp4v, temp5v, temp6v );                 \
514                                                              \
515     dest1v = vec_add( temp1v, sixteenv );                    \
516     dest1v = vec_sra( dest1v, fivev );                       \
517                                                              \
518     temp1v = vec_u8_to_s16_l( src1v );                       \
519     temp2v = vec_u8_to_s16_l( src2v );                       \
520     temp3v = vec_u8_to_s16_l( src3v );                       \
521     temp4v = vec_u8_to_s16_l( src4v );                       \
522     temp5v = vec_u8_to_s16_l( src5v );                       \
523     temp6v = vec_u8_to_s16_l( src6v );                       \
524                                                              \
525     HPEL_FILTER_1( temp1v, temp2v, temp3v,                   \
526                    temp4v, temp5v, temp6v );                 \
527                                                              \
528     dest2v = vec_add( temp1v, sixteenv );                    \
529     dest2v = vec_sra( dest2v, fivev );                       \
530                                                              \
531     destv = vec_packsu( dest1v, dest2v );                    \
532                                                              \
533     VEC_STORE16( destv, &dsth[x+i_stride*y], dsth );         \
534 }
535
536 #define HPEL_FILTER_VERTICAL()                                    \
537 {                                                                 \
538     VEC_LOAD( &src[x+i_stride*(y-2)], src1v, 16, vec_u8_t, src ); \
539     VEC_LOAD( &src[x+i_stride*(y-1)], src2v, 16, vec_u8_t, src ); \
540     VEC_LOAD( &src[x+i_stride*(y-0)], src3v, 16, vec_u8_t, src ); \
541     VEC_LOAD( &src[x+i_stride*(y+1)], src4v, 16, vec_u8_t, src ); \
542     VEC_LOAD( &src[x+i_stride*(y+2)], src5v, 16, vec_u8_t, src ); \
543     VEC_LOAD( &src[x+i_stride*(y+3)], src6v, 16, vec_u8_t, src ); \
544                                                                   \
545     temp1v = vec_u8_to_s16_h( src1v );                            \
546     temp2v = vec_u8_to_s16_h( src2v );                            \
547     temp3v = vec_u8_to_s16_h( src3v );                            \
548     temp4v = vec_u8_to_s16_h( src4v );                            \
549     temp5v = vec_u8_to_s16_h( src5v );                            \
550     temp6v = vec_u8_to_s16_h( src6v );                            \
551                                                                   \
552     HPEL_FILTER_1( temp1v, temp2v, temp3v,                        \
553                    temp4v, temp5v, temp6v );                      \
554                                                                   \
555     dest1v = vec_add( temp1v, sixteenv );                         \
556     dest1v = vec_sra( dest1v, fivev );                            \
557                                                                   \
558     temp4v = vec_u8_to_s16_l( src1v );                            \
559     temp5v = vec_u8_to_s16_l( src2v );                            \
560     temp6v = vec_u8_to_s16_l( src3v );                            \
561     temp7v = vec_u8_to_s16_l( src4v );                            \
562     temp8v = vec_u8_to_s16_l( src5v );                            \
563     temp9v = vec_u8_to_s16_l( src6v );                            \
564                                                                   \
565     HPEL_FILTER_1( temp4v, temp5v, temp6v,                        \
566                    temp7v, temp8v, temp9v );                      \
567                                                                   \
568     dest2v = vec_add( temp4v, sixteenv );                         \
569     dest2v = vec_sra( dest2v, fivev );                            \
570                                                                   \
571     destv = vec_packsu( dest1v, dest2v );                         \
572                                                                   \
573     VEC_STORE16( destv, &dstv[x+i_stride*y], dsth );              \
574 }
575
576 #define HPEL_FILTER_CENTRAL()                           \
577 {                                                       \
578     temp1v = vec_sld( tempav, tempbv, 12 );             \
579     temp2v = vec_sld( tempav, tempbv, 14 );             \
580     temp3v = tempbv;                                    \
581     temp4v = vec_sld( tempbv, tempcv,  2 );             \
582     temp5v = vec_sld( tempbv, tempcv,  4 );             \
583     temp6v = vec_sld( tempbv, tempcv,  6 );             \
584                                                         \
585     HPEL_FILTER_2( temp1v, temp2v, temp3v,              \
586                    temp4v, temp5v, temp6v );            \
587                                                         \
588     dest1v = vec_add( temp1v, thirtytwov );             \
589     dest1v = vec_sra( dest1v, sixv );                   \
590                                                         \
591     temp1v = vec_sld( tempbv, tempcv, 12 );             \
592     temp2v = vec_sld( tempbv, tempcv, 14 );             \
593     temp3v = tempcv;                                    \
594     temp4v = vec_sld( tempcv, tempdv,  2 );             \
595     temp5v = vec_sld( tempcv, tempdv,  4 );             \
596     temp6v = vec_sld( tempcv, tempdv,  6 );             \
597                                                         \
598     HPEL_FILTER_2( temp1v, temp2v, temp3v,              \
599                    temp4v, temp5v, temp6v );            \
600                                                         \
601     dest2v = vec_add( temp1v, thirtytwov );             \
602     dest2v = vec_sra( dest2v, sixv );                   \
603                                                         \
604     destv = vec_packsu( dest1v, dest2v );               \
605                                                         \
606     VEC_STORE16( destv, &dstc[x-16+i_stride*y], dsth ); \
607 }
608
609 void x264_hpel_filter_altivec( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src,
610                                int i_stride, int i_width, int i_height, int16_t *buf )
611 {
612     vec_u8_t destv;
613     vec_u8_t src1v, src2v, src3v, src4v, src5v, src6v;
614     vec_s16_t dest1v, dest2v;
615     vec_s16_t temp1v, temp2v, temp3v, temp4v, temp5v, temp6v, temp7v, temp8v, temp9v;
616     vec_s16_t tempav, tempbv, tempcv, tempdv, tempev;
617
618     PREP_LOAD;
619     PREP_LOAD_SRC( src);
620     PREP_STORE16;
621     PREP_STORE16_DST( dsth );
622     LOAD_ZERO;
623
624     vec_u16_t twov, fourv, fivev, sixv;
625     vec_s16_t sixteenv, thirtytwov;
626     vec_u16_u temp_u;
627
628     temp_u.s[0]=2;
629     twov = vec_splat( temp_u.v, 0 );
630     temp_u.s[0]=4;
631     fourv = vec_splat( temp_u.v, 0 );
632     temp_u.s[0]=5;
633     fivev = vec_splat( temp_u.v, 0 );
634     temp_u.s[0]=6;
635     sixv = vec_splat( temp_u.v, 0 );
636     temp_u.s[0]=16;
637     sixteenv = (vec_s16_t)vec_splat( temp_u.v, 0 );
638     temp_u.s[0]=32;
639     thirtytwov = (vec_s16_t)vec_splat( temp_u.v, 0 );
640
641     for( int y = 0; y < i_height; y++ )
642     {
643         int x = 0;
644
645         /* horizontal_filter */
646         HPEL_FILTER_HORIZONTAL();
647
648         /* vertical_filter */
649         HPEL_FILTER_VERTICAL();
650
651         /* central_filter */
652         tempav = tempcv;
653         tempbv = tempdv;
654         tempcv = vec_splat( temp1v, 0 ); /* first only */
655         tempdv = temp1v;
656         tempev = temp4v;
657
658         for( x = 16; x < i_width; x+=16 )
659         {
660             /* horizontal_filter */
661             HPEL_FILTER_HORIZONTAL();
662
663             /* vertical_filter */
664             HPEL_FILTER_VERTICAL();
665
666             /* central_filter */
667             tempav = tempcv;
668             tempbv = tempdv;
669             tempcv = tempev;
670             tempdv = temp1v;
671             tempev = temp4v;
672
673             HPEL_FILTER_CENTRAL();
674         }
675
676         /* Partial vertical filter */
677         VEC_LOAD_PARTIAL( &src[x+i_stride*(y-2)], src1v, 16, vec_u8_t, src );
678         VEC_LOAD_PARTIAL( &src[x+i_stride*(y-1)], src2v, 16, vec_u8_t, src );
679         VEC_LOAD_PARTIAL( &src[x+i_stride*(y-0)], src3v, 16, vec_u8_t, src );
680         VEC_LOAD_PARTIAL( &src[x+i_stride*(y+1)], src4v, 16, vec_u8_t, src );
681         VEC_LOAD_PARTIAL( &src[x+i_stride*(y+2)], src5v, 16, vec_u8_t, src );
682         VEC_LOAD_PARTIAL( &src[x+i_stride*(y+3)], src6v, 16, vec_u8_t, src );
683
684         temp1v = vec_u8_to_s16_h( src1v );
685         temp2v = vec_u8_to_s16_h( src2v );
686         temp3v = vec_u8_to_s16_h( src3v );
687         temp4v = vec_u8_to_s16_h( src4v );
688         temp5v = vec_u8_to_s16_h( src5v );
689         temp6v = vec_u8_to_s16_h( src6v );
690
691         HPEL_FILTER_1( temp1v, temp2v, temp3v, temp4v, temp5v, temp6v );
692
693         /* central_filter */
694         tempav = tempcv;
695         tempbv = tempdv;
696         tempcv = tempev;
697         tempdv = temp1v;
698         /* tempev is not used */
699
700         HPEL_FILTER_CENTRAL();
701     }
702 }
703
704 static void frame_init_lowres_core_altivec( uint8_t *src0, uint8_t *dst0, uint8_t *dsth, uint8_t *dstv, uint8_t *dstc,
705                                            int src_stride, int dst_stride, int width, int height )
706 {
707     int w = width >> 4;
708     int end = (width & 15);
709     vec_u8_t src0v, src1v, src2v;
710     vec_u8_t lv, hv, src1p1v;
711     vec_u8_t avg0v, avg1v, avghv, avghp1v, avgleftv, avgrightv;
712     static const vec_u8_t inverse_bridge_shuffle = CV(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E );
713
714     for( int y = 0; y < height; y++ )
715     {
716         int x;
717         uint8_t *src1 = src0+src_stride;
718         uint8_t *src2 = src1+src_stride;
719
720         src0v = vec_ld(0, src0);
721         src1v = vec_ld(0, src1);
722         src2v = vec_ld(0, src2);
723
724         avg0v = vec_avg(src0v, src1v);
725         avg1v = vec_avg(src1v, src2v);
726
727         for( x = 0; x < w; x++ )
728         {
729             lv = vec_ld(16*(x*2+1), src0);
730             src1v = vec_ld(16*(x*2+1), src1);
731             avghv = vec_avg(lv, src1v);
732
733             lv = vec_ld(16*(x*2+2), src0);
734             src1p1v = vec_ld(16*(x*2+2), src1);
735             avghp1v = vec_avg(lv, src1p1v);
736
737             avgleftv = vec_avg(vec_sld(avg0v, avghv, 1), avg0v);
738             avgrightv = vec_avg(vec_sld(avghv, avghp1v, 1), avghv);
739
740             vec_st(vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle), 16*x, dst0);
741             vec_st((vec_u8_t)vec_pack((vec_u16_t)avgleftv,(vec_u16_t)avgrightv), 16*x, dsth);
742
743             avg0v = avghp1v;
744
745             hv = vec_ld(16*(x*2+1), src2);
746             avghv = vec_avg(src1v, hv);
747
748             hv = vec_ld(16*(x*2+2), src2);
749             avghp1v = vec_avg(src1p1v, hv);
750
751             avgleftv = vec_avg(vec_sld(avg1v, avghv, 1), avg1v);
752             avgrightv = vec_avg(vec_sld(avghv, avghp1v, 1), avghv);
753
754             vec_st(vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle), 16*x, dstv);
755             vec_st((vec_u8_t)vec_pack((vec_u16_t)avgleftv,(vec_u16_t)avgrightv), 16*x, dstc);
756
757             avg1v = avghp1v;
758
759         }
760         if( end )
761         {
762             lv = vec_ld(16*(x*2+1), src0);
763             src1v = vec_ld(16*(x*2+1), src1);
764             avghv = vec_avg(lv, src1v);
765
766             lv = vec_ld(16*(x*2+1), src2);
767             avghp1v = vec_avg(src1v, lv);
768
769             avgleftv = vec_avg(vec_sld(avg0v, avghv, 1), avg0v);
770             avgrightv = vec_avg(vec_sld(avg1v, avghp1v, 1), avg1v);
771
772             lv = vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle);
773             hv = (vec_u8_t)vec_pack((vec_u16_t)avgleftv,(vec_u16_t)avgrightv);
774
775             vec_ste((vec_u32_t)lv,16*x,(uint32_t*)dst0);
776             vec_ste((vec_u32_t)lv,16*x+4,(uint32_t*)dst0);
777             vec_ste((vec_u32_t)hv,16*x,(uint32_t*)dsth);
778             vec_ste((vec_u32_t)hv,16*x+4,(uint32_t*)dsth);
779
780             lv = vec_sld(lv, lv, 8);
781             hv = vec_sld(hv, hv, 8);
782
783             vec_ste((vec_u32_t)lv,16*x,(uint32_t*)dstv);
784             vec_ste((vec_u32_t)lv,16*x+4,(uint32_t*)dstv);
785             vec_ste((vec_u32_t)hv,16*x,(uint32_t*)dstc);
786             vec_ste((vec_u32_t)hv,16*x+4,(uint32_t*)dstc);
787         }
788
789         src0 += src_stride*2;
790         dst0 += dst_stride;
791         dsth += dst_stride;
792         dstv += dst_stride;
793         dstc += dst_stride;
794     }
795 }
796 #endif // !X264_HIGH_BIT_DEPTH
797
798 void x264_mc_altivec_init( x264_mc_functions_t *pf )
799 {
800 #if !X264_HIGH_BIT_DEPTH
801     pf->mc_luma   = mc_luma_altivec;
802     pf->get_ref   = get_ref_altivec;
803 //  pf->mc_chroma = mc_chroma_altivec;
804
805     pf->copy_16x16_unaligned = x264_mc_copy_w16_altivec;
806     pf->copy[PIXEL_16x16] = x264_mc_copy_w16_aligned_altivec;
807
808     pf->hpel_filter = x264_hpel_filter_altivec;
809     pf->frame_init_lowres_core = frame_init_lowres_core_altivec;
810 #endif // !X264_HIGH_BIT_DEPTH
811 }