1 /*****************************************************************************
2 * ppccommon.h: ppc utility macros
3 *****************************************************************************
4 * Copyright (C) 2003-2015 x264 project
6 * Authors: Eric Petit <eric.petit@lapsus.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
22 * This program is also available under a commercial proprietary license.
23 * For more information, contact us at licensing@x264.com.
24 *****************************************************************************/
30 /***********************************************************************
31 * For constant vectors, use parentheses on OS X and braces on Linux
32 **********************************************************************/
33 #if defined(__APPLE__) && __GNUC__ < 4
39 /***********************************************************************
41 **********************************************************************/
42 #define vec_u8_t vector unsigned char
43 #define vec_s8_t vector signed char
44 #define vec_u16_t vector unsigned short
45 #define vec_s16_t vector signed short
46 #define vec_u32_t vector unsigned int
47 #define vec_s32_t vector signed int
69 /***********************************************************************
71 **********************************************************************/
72 #define LOAD_ZERO const vec_u8_t zerov = vec_splat_u8( 0 )
74 #define zero_u8v (vec_u8_t) zerov
75 #define zero_s8v (vec_s8_t) zerov
76 #define zero_u16v (vec_u16_t) zerov
77 #define zero_s16v (vec_s16_t) zerov
78 #define zero_u32v (vec_u32_t) zerov
79 #define zero_s32v (vec_s32_t) zerov
81 /***********************************************************************
82 * 8 <-> 16 bits conversions
83 **********************************************************************/
84 #ifdef WORDS_BIGENDIAN
85 #define vec_u8_to_u16_h(v) (vec_u16_t) vec_mergeh( zero_u8v, (vec_u8_t) v )
86 #define vec_u8_to_u16_l(v) (vec_u16_t) vec_mergel( zero_u8v, (vec_u8_t) v )
87 #define vec_u8_to_s16_h(v) (vec_s16_t) vec_mergeh( zero_u8v, (vec_u8_t) v )
88 #define vec_u8_to_s16_l(v) (vec_s16_t) vec_mergel( zero_u8v, (vec_u8_t) v )
90 #define vec_u8_to_u16_h(v) (vec_u16_t) vec_mergeh( (vec_u8_t) v, zero_u8v )
91 #define vec_u8_to_u16_l(v) (vec_u16_t) vec_mergel( (vec_u8_t) v, zero_u8v )
92 #define vec_u8_to_s16_h(v) (vec_s16_t) vec_mergeh( (vec_u8_t) v, zero_u8v )
93 #define vec_u8_to_s16_l(v) (vec_s16_t) vec_mergel( (vec_u8_t) v, zero_u8v )
96 #define vec_u8_to_u16(v) vec_u8_to_u16_h(v)
97 #define vec_u8_to_s16(v) vec_u8_to_s16_h(v)
99 #define vec_u16_to_u8(v) vec_pack( v, zero_u16v )
100 #define vec_s16_to_u8(v) vec_packsu( v, zero_s16v )
103 /***********************************************************************
104 * 16 <-> 32 bits conversions
105 **********************************************************************/
106 #ifdef WORDS_BIGENDIAN
107 #define vec_u16_to_u32_h(v) (vec_u32_t) vec_mergeh( zero_u16v, (vec_u16_t) v )
108 #define vec_u16_to_u32_l(v) (vec_u32_t) vec_mergel( zero_u16v, (vec_u16_t) v )
109 #define vec_u16_to_s32_h(v) (vec_s32_t) vec_mergeh( zero_u16v, (vec_u16_t) v )
110 #define vec_u16_to_s32_l(v) (vec_s32_t) vec_mergel( zero_u16v, (vec_u16_t) v )
112 #define vec_u16_to_u32_h(v) (vec_u32_t) vec_mergeh( (vec_u16_t) v, zero_u16v )
113 #define vec_u16_to_u32_l(v) (vec_u32_t) vec_mergel( (vec_u16_t) v, zero_u16v )
114 #define vec_u16_to_s32_h(v) (vec_s32_t) vec_mergeh( (vec_u16_t) v, zero_u16v )
115 #define vec_u16_to_s32_l(v) (vec_s32_t) vec_mergel( (vec_u16_t) v, zero_u16v )
118 #define vec_u16_to_u32(v) vec_u16_to_u32_h(v)
119 #define vec_u16_to_s32(v) vec_u16_to_s32_h(v)
121 #define vec_u32_to_u16(v) vec_pack( v, zero_u32v )
122 #define vec_s32_to_u16(v) vec_packsu( v, zero_s32v )
125 /***********************************************************************
126 * PREP_LOAD: declares two vectors required to perform unaligned loads
127 * VEC_LOAD: loads n bytes from u8 * p into vector v of type t where o is from original src offset
128 * VEC_LOAD:_G: loads n bytes from u8 * p into vectory v of type t - use when offset is not known
129 * VEC_LOAD_OFFSET: as above, but with offset vector known in advance
130 **********************************************************************/
134 #define PREP_LOAD_SRC( src ) \
135 vec_u8_t _##src##_ = vec_lvsl(0, src)
137 #define VEC_LOAD_G( p, v, n, t ) \
138 _hv = vec_ld( 0, p ); \
139 v = (t) vec_lvsl( 0, p ); \
140 _lv = vec_ld( n - 1, p ); \
141 v = (t) vec_perm( _hv, _lv, (vec_u8_t) v )
143 #define VEC_LOAD( p, v, n, t, g ) \
144 _hv = vec_ld( 0, p ); \
145 _lv = vec_ld( n - 1, p ); \
146 v = (t) vec_perm( _hv, _lv, (vec_u8_t) _##g##_ )
148 #define VEC_LOAD_OFFSET( p, v, n, t, o ) \
149 _hv = vec_ld( 0, p); \
150 _lv = vec_ld( n - 1, p ); \
151 v = (t) vec_perm( _hv, _lv, (vec_u8_t) o )
153 #define VEC_LOAD_PARTIAL( p, v, n, t, g) \
154 _hv = vec_ld( 0, p); \
155 v = (t) vec_perm( _hv, _hv, (vec_u8_t) _##g##_ )
158 /***********************************************************************
159 * PREP_STORE##n: declares required vectors to store n bytes to a
160 * potentially unaligned address
161 * VEC_STORE##n: stores n bytes from vector v to address p
162 **********************************************************************/
163 #define PREP_STORE16 \
166 #define PREP_STORE16_DST( dst ) \
167 vec_u8_t _##dst##l_ = vec_lvsl(0, dst); \
168 vec_u8_t _##dst##r_ = vec_lvsr(0, dst);
170 #define VEC_STORE16( v, p, o ) \
171 _hv = vec_ld( 0, p ); \
172 _lv = vec_ld( 15, p ); \
173 _tmp1v = vec_perm( _lv, _hv, _##o##l_ ); \
174 _lv = vec_perm( (vec_u8_t) v, _tmp1v, _##o##r_ ); \
175 vec_st( _lv, 15, (uint8_t *) p ); \
176 _hv = vec_perm( _tmp1v, (vec_u8_t) v, _##o##r_ ); \
177 vec_st( _hv, 0, (uint8_t *) p )
180 #define PREP_STORE8 \
183 #define VEC_STORE8( v, p ) \
184 _tmp3v = vec_lvsl(0, p); \
185 v = vec_perm(v, v, _tmp3v); \
186 vec_ste((vec_u32_t)v,0,(uint32_t*)p); \
187 vec_ste((vec_u32_t)v,4,(uint32_t*)p)
190 #define PREP_STORE4 \
192 vec_u8_t _tmp2v, _tmp3v; \
193 const vec_u8_t sel = \
194 (vec_u8_t) CV(-1,-1,-1,-1,0,0,0,0,0,0,0,0,0,0,0,0)
196 #define VEC_STORE4( v, p ) \
197 _tmp3v = vec_lvsr( 0, p ); \
198 v = vec_perm( v, v, _tmp3v ); \
199 _lv = vec_ld( 3, p ); \
200 _tmp1v = vec_perm( sel, zero_u8v, _tmp3v ); \
201 _lv = vec_sel( _lv, v, _tmp1v ); \
202 vec_st( _lv, 3, p ); \
203 _hv = vec_ld( 0, p ); \
204 _tmp2v = vec_perm( zero_u8v, sel, _tmp3v ); \
205 _hv = vec_sel( _hv, v, _tmp2v ); \
208 /***********************************************************************
210 ***********************************************************************
211 * Transposes a 8x8 matrix of s16 vectors
212 **********************************************************************/
213 #define VEC_TRANSPOSE_8(a0,a1,a2,a3,a4,a5,a6,a7,b0,b1,b2,b3,b4,b5,b6,b7) \
214 b0 = vec_mergeh( a0, a4 ); \
215 b1 = vec_mergel( a0, a4 ); \
216 b2 = vec_mergeh( a1, a5 ); \
217 b3 = vec_mergel( a1, a5 ); \
218 b4 = vec_mergeh( a2, a6 ); \
219 b5 = vec_mergel( a2, a6 ); \
220 b6 = vec_mergeh( a3, a7 ); \
221 b7 = vec_mergel( a3, a7 ); \
222 a0 = vec_mergeh( b0, b4 ); \
223 a1 = vec_mergel( b0, b4 ); \
224 a2 = vec_mergeh( b1, b5 ); \
225 a3 = vec_mergel( b1, b5 ); \
226 a4 = vec_mergeh( b2, b6 ); \
227 a5 = vec_mergel( b2, b6 ); \
228 a6 = vec_mergeh( b3, b7 ); \
229 a7 = vec_mergel( b3, b7 ); \
230 b0 = vec_mergeh( a0, a4 ); \
231 b1 = vec_mergel( a0, a4 ); \
232 b2 = vec_mergeh( a1, a5 ); \
233 b3 = vec_mergel( a1, a5 ); \
234 b4 = vec_mergeh( a2, a6 ); \
235 b5 = vec_mergel( a2, a6 ); \
236 b6 = vec_mergeh( a3, a7 ); \
237 b7 = vec_mergel( a3, a7 )
239 /***********************************************************************
241 ***********************************************************************
242 * Transposes a 4x4 matrix of s16 vectors.
243 * Actually source and destination are 8x4. The low elements of the
244 * source are discarded and the low elements of the destination mustn't
246 **********************************************************************/
247 #define VEC_TRANSPOSE_4(a0,a1,a2,a3,b0,b1,b2,b3) \
248 b0 = vec_mergeh( a0, a0 ); \
249 b1 = vec_mergeh( a1, a0 ); \
250 b2 = vec_mergeh( a2, a0 ); \
251 b3 = vec_mergeh( a3, a0 ); \
252 a0 = vec_mergeh( b0, b2 ); \
253 a1 = vec_mergel( b0, b2 ); \
254 a2 = vec_mergeh( b1, b3 ); \
255 a3 = vec_mergel( b1, b3 ); \
256 b0 = vec_mergeh( a0, a2 ); \
257 b1 = vec_mergel( a0, a2 ); \
258 b2 = vec_mergeh( a1, a3 ); \
259 b3 = vec_mergel( a1, a3 )
261 /***********************************************************************
263 ***********************************************************************
268 * Loads n bytes from p1 and p2, do the diff of the high elements into
269 * d, increments p1 and p2 by i1 and i2 into known offset g
270 **********************************************************************/
274 vec_s16_t pix1v, pix2v;
277 #define VEC_DIFF_H(p1,i1,p2,i2,n,d,g) \
278 VEC_LOAD_PARTIAL( p1, pix1v, n, vec_s16_t, p1); \
279 pix1v = vec_u8_to_s16( pix1v ); \
280 VEC_LOAD( p2, pix2v, n, vec_s16_t, g); \
281 pix2v = vec_u8_to_s16( pix2v ); \
282 d = vec_sub( pix1v, pix2v ); \
286 #define VEC_DIFF_H_OFFSET(p1,i1,p2,i2,n,d,g1,g2) \
287 pix1v = (vec_s16_t)vec_perm( vec_ld( 0, p1 ), zero_u8v, _##g1##_ );\
288 pix1v = vec_u8_to_s16( pix1v ); \
289 VEC_LOAD( p2, pix2v, n, vec_s16_t, g2); \
290 pix2v = vec_u8_to_s16( pix2v ); \
291 d = vec_sub( pix1v, pix2v ); \
296 /***********************************************************************
298 ***********************************************************************
303 * Loads 16 bytes from p1 and p2, do the diff of the high elements into
304 * dh, the diff of the low elements into dl, increments p1 and p2 by i1
306 **********************************************************************/
307 #define VEC_DIFF_HL(p1,i1,p2,i2,dh,dl) \
308 pix1v = (vec_s16_t)vec_ld(0, p1); \
309 temp0v = vec_u8_to_s16_h( pix1v ); \
310 temp1v = vec_u8_to_s16_l( pix1v ); \
311 VEC_LOAD( p2, pix2v, 16, vec_s16_t, p2); \
312 temp2v = vec_u8_to_s16_h( pix2v ); \
313 temp3v = vec_u8_to_s16_l( pix2v ); \
314 dh = vec_sub( temp0v, temp2v ); \
315 dl = vec_sub( temp1v, temp3v ); \
319 /***********************************************************************
320 * VEC_DIFF_H_8BYTE_ALIGNED
321 ***********************************************************************
326 * Loads n bytes from p1 and p2, do the diff of the high elements into
327 * d, increments p1 and p2 by i1 and i2
328 * Slightly faster when we know we are loading/diffing 8bytes which
329 * are 8 byte aligned. Reduces need for two loads and two vec_lvsl()'s
330 **********************************************************************/
331 #define PREP_DIFF_8BYTEALIGNED \
333 vec_s16_t pix1v, pix2v; \
334 vec_u8_t pix1v8, pix2v8; \
335 vec_u8_t permPix1, permPix2; \
336 permPix1 = vec_lvsl(0, pix1); \
337 permPix2 = vec_lvsl(0, pix2); \
339 #define VEC_DIFF_H_8BYTE_ALIGNED(p1,i1,p2,i2,n,d) \
340 pix1v8 = vec_perm(vec_ld(0,p1), zero_u8v, permPix1); \
341 pix2v8 = vec_perm(vec_ld(0, p2), zero_u8v, permPix2); \
342 pix1v = vec_u8_to_s16( pix1v8 ); \
343 pix2v = vec_u8_to_s16( pix2v8 ); \
344 d = vec_sub( pix1v, pix2v); \