1 /*****************************************************************************
2 * ppccommon.h: h264 encoder
3 *****************************************************************************
4 * Copyright (C) 2003 Eric Petit <titer@m0k.org>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
19 *****************************************************************************/
21 /***********************************************************************
22 * For constant vectors, use parentheses on OS X and braces on Linux
23 **********************************************************************/
30 /***********************************************************************
32 **********************************************************************/
33 #define vec_u8_t vector unsigned char
34 #define vec_s8_t vector signed char
35 #define vec_u16_t vector unsigned short
36 #define vec_s16_t vector signed short
37 #define vec_u32_t vector unsigned int
38 #define vec_s32_t vector signed int
42 vector unsigned int v;
47 vector unsigned short v;
52 vector signed short v;
55 /***********************************************************************
57 **********************************************************************/
58 #define LOAD_ZERO const vec_u8_t zerov = vec_splat_u8( 0 )
60 #define zero_u8v (vec_u8_t) zerov
61 #define zero_s8v (vec_s8_t) zerov
62 #define zero_u16v (vec_u16_t) zerov
63 #define zero_s16v (vec_s16_t) zerov
64 #define zero_u32v (vec_u32_t) zerov
65 #define zero_s32v (vec_s32_t) zerov
67 /***********************************************************************
68 * 8 <-> 16 bits conversions
69 **********************************************************************/
70 #define vec_u8_to_u16_h(v) (vec_u16_t) vec_mergeh( zero_u8v, (vec_u8_t) v )
71 #define vec_u8_to_u16_l(v) (vec_u16_t) vec_mergel( zero_u8v, (vec_u8_t) v )
72 #define vec_u8_to_s16_h(v) (vec_s16_t) vec_mergeh( zero_u8v, (vec_u8_t) v )
73 #define vec_u8_to_s16_l(v) (vec_s16_t) vec_mergel( zero_u8v, (vec_u8_t) v )
75 #define vec_u8_to_u16(v) vec_u8_to_u16_h(v)
76 #define vec_u8_to_s16(v) vec_u8_to_s16_h(v)
78 #define vec_u16_to_u8(v) vec_pack( v, zero_u16v )
79 #define vec_s16_to_u8(v) vec_packsu( v, zero_s16v )
81 /***********************************************************************
82 * PREP_LOAD: declares two vectors required to perform unaligned loads
83 * VEC_LOAD: loads n bytes from u8 * p into vector v of type t
84 **********************************************************************/
88 #define VEC_LOAD( p, v, n, t ) \
89 _hv = vec_ld( 0, p ); \
90 v = (t) vec_lvsl( 0, p ); \
91 _lv = vec_ld( n - 1, p ); \
92 v = (t) vec_perm( _hv, _lv, (vec_u8_t) v )
94 /***********************************************************************
95 * PREP_STORE##n: declares required vectors to store n bytes to a
96 * potentially unaligned address
97 * VEC_STORE##n: stores n bytes from vector v to address p
98 **********************************************************************/
99 #define PREP_STORE16 \
100 vec_u8_t _tmp1v, _tmp2v \
102 #define VEC_STORE16( v, p ) \
103 _hv = vec_ld( 0, p ); \
104 _tmp2v = vec_lvsl( 0, p ); \
105 _lv = vec_ld( 15, p ); \
106 _tmp1v = vec_perm( _lv, _hv, _tmp2v ); \
107 _tmp2v = vec_lvsr( 0, p ); \
108 _lv = vec_perm( (vec_u8_t) v, _tmp1v, _tmp2v ); \
109 vec_st( _lv, 15, (uint8_t *) p ); \
110 _hv = vec_perm( _tmp1v, (vec_u8_t) v, _tmp2v ); \
111 vec_st( _hv, 0, (uint8_t *) p )
113 #define PREP_STORE8 \
115 vec_u8_t _tmp3v, _tmp4v; \
116 const vec_u8_t sel_h = \
117 (vec_u8_t) CV(-1,-1,-1,-1,-1,-1,-1,-1,0,0,0,0,0,0,0,0)
119 #define PREP_STORE8_HL \
121 const vec_u8_t sel_l = \
122 (vec_u8_t) CV(0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,-1,-1,-1)
124 #define VEC_STORE8 VEC_STORE8_H
126 #define VEC_STORE8_H( v, p ) \
127 _tmp3v = vec_lvsr( 0, (uint8_t *) p ); \
128 _tmp4v = vec_perm( (vec_u8_t) v, (vec_u8_t) v, _tmp3v ); \
129 _lv = vec_ld( 7, (uint8_t *) p ); \
130 _tmp1v = vec_perm( sel_h, zero_u8v, _tmp3v ); \
131 _lv = vec_sel( _lv, _tmp4v, _tmp1v ); \
132 vec_st( _lv, 7, (uint8_t *) p ); \
133 _hv = vec_ld( 0, (uint8_t *) p ); \
134 _tmp2v = vec_perm( zero_u8v, sel_h, _tmp3v ); \
135 _hv = vec_sel( _hv, _tmp4v, _tmp2v ); \
136 vec_st( _hv, 0, (uint8_t *) p )
138 #define VEC_STORE8_L( v, p ) \
139 _tmp3v = vec_lvsr( 8, (uint8_t *) p ); \
140 _tmp4v = vec_perm( (vec_u8_t) v, (vec_u8_t) v, _tmp3v ); \
141 _lv = vec_ld( 7, (uint8_t *) p ); \
142 _tmp1v = vec_perm( sel_l, zero_u8v, _tmp3v ); \
143 _lv = vec_sel( _lv, _tmp4v, _tmp1v ); \
144 vec_st( _lv, 7, (uint8_t *) p ); \
145 _hv = vec_ld( 0, (uint8_t *) p ); \
146 _tmp2v = vec_perm( zero_u8v, sel_l, _tmp3v ); \
147 _hv = vec_sel( _hv, _tmp4v, _tmp2v ); \
148 vec_st( _hv, 0, (uint8_t *) p )
150 #define PREP_STORE4 \
153 const vec_u8_t sel = \
154 (vec_u8_t) CV(-1,-1,-1,-1,0,0,0,0,0,0,0,0,0,0,0,0)
156 #define VEC_STORE4( v, p ) \
157 _tmp3v = vec_lvsr( 0, p ); \
158 v = vec_perm( v, v, _tmp3v ); \
159 _lv = vec_ld( 3, p ); \
160 _tmp1v = vec_perm( sel, zero_u8v, _tmp3v ); \
161 _lv = vec_sel( _lv, v, _tmp1v ); \
162 vec_st( _lv, 3, p ); \
163 _hv = vec_ld( 0, p ); \
164 _tmp2v = vec_perm( zero_u8v, sel, _tmp3v ); \
165 _hv = vec_sel( _hv, v, _tmp2v ); \
168 /***********************************************************************
170 ***********************************************************************
171 * Transposes a 8x8 matrix of s16 vectors
172 **********************************************************************/
173 #define VEC_TRANSPOSE_8(a0,a1,a2,a3,a4,a5,a6,a7,b0,b1,b2,b3,b4,b5,b6,b7) \
174 b0 = vec_mergeh( a0, a4 ); \
175 b1 = vec_mergel( a0, a4 ); \
176 b2 = vec_mergeh( a1, a5 ); \
177 b3 = vec_mergel( a1, a5 ); \
178 b4 = vec_mergeh( a2, a6 ); \
179 b5 = vec_mergel( a2, a6 ); \
180 b6 = vec_mergeh( a3, a7 ); \
181 b7 = vec_mergel( a3, a7 ); \
182 a0 = vec_mergeh( b0, b4 ); \
183 a1 = vec_mergel( b0, b4 ); \
184 a2 = vec_mergeh( b1, b5 ); \
185 a3 = vec_mergel( b1, b5 ); \
186 a4 = vec_mergeh( b2, b6 ); \
187 a5 = vec_mergel( b2, b6 ); \
188 a6 = vec_mergeh( b3, b7 ); \
189 a7 = vec_mergel( b3, b7 ); \
190 b0 = vec_mergeh( a0, a4 ); \
191 b1 = vec_mergel( a0, a4 ); \
192 b2 = vec_mergeh( a1, a5 ); \
193 b3 = vec_mergel( a1, a5 ); \
194 b4 = vec_mergeh( a2, a6 ); \
195 b5 = vec_mergel( a2, a6 ); \
196 b6 = vec_mergeh( a3, a7 ); \
197 b7 = vec_mergel( a3, a7 )
199 /***********************************************************************
201 ***********************************************************************
202 * Transposes a 4x4 matrix of s16 vectors.
203 * Actually source and destination are 8x4. The low elements of the
204 * source are discarded and the low elements of the destination mustn't
206 **********************************************************************/
207 #define VEC_TRANSPOSE_4(a0,a1,a2,a3,b0,b1,b2,b3) \
208 b0 = vec_mergeh( a0, a0 ); \
209 b1 = vec_mergeh( a1, a0 ); \
210 b2 = vec_mergeh( a2, a0 ); \
211 b3 = vec_mergeh( a3, a0 ); \
212 a0 = vec_mergeh( b0, b2 ); \
213 a1 = vec_mergel( b0, b2 ); \
214 a2 = vec_mergeh( b1, b3 ); \
215 a3 = vec_mergel( b1, b3 ); \
216 b0 = vec_mergeh( a0, a2 ); \
217 b1 = vec_mergel( a0, a2 ); \
218 b2 = vec_mergeh( a1, a3 ); \
219 b3 = vec_mergel( a1, a3 )
221 /***********************************************************************
223 ***********************************************************************
228 * Loads n bytes from p1 and p2, do the diff of the high elements into
229 * d, increments p1 and p2 by i1 and i2
230 **********************************************************************/
234 vec_s16_t pix1v, pix2v;
236 #define VEC_DIFF_H(p1,i1,p2,i2,n,d) \
237 VEC_LOAD( p1, pix1v, n, vec_s16_t ); \
238 pix1v = vec_u8_to_s16( pix1v ); \
239 VEC_LOAD( p2, pix2v, n, vec_s16_t ); \
240 pix2v = vec_u8_to_s16( pix2v ); \
241 d = vec_sub( pix1v, pix2v ); \
245 /***********************************************************************
247 ***********************************************************************
252 * Loads 16 bytes from p1 and p2, do the diff of the high elements into
253 * dh, the diff of the low elements into dl, increments p1 and p2 by i1
255 **********************************************************************/
256 #define VEC_DIFF_HL(p1,i1,p2,i2,dh,dl) \
257 VEC_LOAD( p1, pix1v, 16, vec_s16_t ); \
258 temp0v = vec_u8_to_s16_h( pix1v ); \
259 temp1v = vec_u8_to_s16_l( pix1v ); \
260 VEC_LOAD( p2, pix2v, 16, vec_s16_t ); \
261 temp2v = vec_u8_to_s16_h( pix2v ); \
262 temp3v = vec_u8_to_s16_l( pix2v ); \
263 dh = vec_sub( temp0v, temp2v ); \
264 dl = vec_sub( temp1v, temp3v ); \
268 /***********************************************************************
269 * VEC_DIFF_H_8BYTE_ALIGNED
270 ***********************************************************************
275 * Loads n bytes from p1 and p2, do the diff of the high elements into
276 * d, increments p1 and p2 by i1 and i2
277 * Slightly faster when we know we are loading/diffing 8bytes which
278 * are 8 byte aligned. Reduces need for two loads and two vec_lvsl()'s
279 **********************************************************************/
280 #define PREP_DIFF_8BYTEALIGNED \
282 vec_s16_t pix1v, pix2v; \
283 vec_u8_t pix1v8, pix2v8; \
284 vec_u8_t permPix1, permPix2; \
285 permPix1 = vec_lvsl(0, pix1); \
286 permPix2 = vec_lvsl(0, pix2); \
288 #define VEC_DIFF_H_8BYTE_ALIGNED(p1,i1,p2,i2,n,d) \
289 pix1v8 = vec_perm(vec_ld(0,p1), zero_u8v, permPix1); \
290 pix2v8 = vec_perm(vec_ld(0, p2), zero_u8v, permPix2); \
291 pix1v = vec_u8_to_s16( pix1v8 ); \
292 pix2v = vec_u8_to_s16( pix2v8 ); \
293 d = vec_sub( pix1v, pix2v); \