1 /*****************************************************************************
2 * ppccommon.h: h264 encoder
3 *****************************************************************************
4 * Copyright (C) 2003 Laurent Aimar
5 * $Id: ppccommon.h,v 1.1 2004/06/03 19:27:07 fenrir Exp $
7 * Authors: Eric Petit <titer@m0k.org>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA.
22 *****************************************************************************/
24 /***********************************************************************
25 * For constant vectors, use parentheses on OS X and braces on Linux
26 **********************************************************************/
33 /***********************************************************************
35 **********************************************************************/
36 #define vec_u8_t vector unsigned char
37 #define vec_s8_t vector signed char
38 #define vec_u16_t vector unsigned short
39 #define vec_s16_t vector signed short
40 #define vec_u32_t vector unsigned int
41 #define vec_s32_t vector signed int
43 /***********************************************************************
45 **********************************************************************/
46 #define LOAD_ZERO const vec_u8_t zerov = vec_splat_u8( 0 )
48 #define zero_u8v (vec_u8_t) zerov
49 #define zero_s8v (vec_s8_t) zerov
50 #define zero_u16v (vec_u16_t) zerov
51 #define zero_s16v (vec_s16_t) zerov
52 #define zero_u32v (vec_u32_t) zerov
53 #define zero_s32v (vec_s32_t) zerov
55 /***********************************************************************
56 * 8 <-> 16 bits conversions
57 **********************************************************************/
58 #define vec_u8_to_u16_h(v) (vec_u16_t) vec_mergeh( zero_u8v, (vec_u8_t) v )
59 #define vec_u8_to_u16_l(v) (vec_u16_t) vec_mergel( zero_u8v, (vec_u8_t) v )
60 #define vec_u8_to_s16_h(v) (vec_s16_t) vec_mergeh( zero_u8v, (vec_u8_t) v )
61 #define vec_u8_to_s16_l(v) (vec_s16_t) vec_mergel( zero_u8v, (vec_u8_t) v )
63 #define vec_u8_to_u16(v) vec_u8_to_u16_h(v)
64 #define vec_u8_to_s16(v) vec_u8_to_s16_h(v)
66 #define vec_u16_to_u8(v) vec_pack( v, zero_u16v )
67 #define vec_s16_to_u8(v) vec_pack( v, zero_u16v )
69 /***********************************************************************
70 * PREP_LOAD: declares two vectors required to perform unaligned loads
71 * VEC_LOAD: loads n bytes from address p into vector v
72 **********************************************************************/
76 #define VEC_LOAD( p, v, n ) \
77 _hv = vec_ld( 0, p ); \
78 v = vec_lvsl( 0, p ); \
79 _lv = vec_ld( n - 1, p ); \
80 v = vec_perm( _hv, _lv, v )
82 /***********************************************************************
83 * PREP_STORE##n: declares required vectors to store n bytes to a
84 * potentially unaligned address
85 * VEC_STORE##n: stores n bytes from vector v to address p
86 **********************************************************************/
87 #define PREP_STORE16 \
88 vec_u8_t _tmp1v, _tmp2v \
90 #define VEC_STORE16( v, p ) \
91 _hv = vec_ld( 0, p ); \
92 _tmp2v = vec_lvsl( 0, p ); \
93 _lv = vec_ld( 15, p ); \
94 _tmp1v = vec_perm( _lv, _hv, _tmp2v ); \
95 _tmp2v = vec_lvsr( 0, p ); \
96 _lv = vec_perm( v, _tmp1v, _tmp2v ); \
97 vec_st( _lv, 15, p ); \
98 _hv = vec_perm( _tmp1v, v, _tmp2v ); \
101 #define PREP_STORE8 \
104 const vec_u8_t sel = \
105 (vec_u8_t) CV(-1,-1,-1,-1,-1,-1,-1,-1,0,0,0,0,0,0,0,0)
107 #define VEC_STORE8( v, p ) \
108 _tmp3v = vec_lvsr( 0, p ); \
109 v = vec_perm( v, v, _tmp3v ); \
110 _lv = vec_ld( 7, p ); \
111 _tmp1v = vec_perm( sel, zero_u8v, _tmp3v ); \
112 _lv = vec_sel( _lv, v, _tmp1v ); \
113 vec_st( _lv, 7, p ); \
114 _hv = vec_ld( 0, p ); \
115 _tmp2v = vec_perm( zero_u8v, sel, _tmp3v ); \
116 _hv = vec_sel( _hv, v, _tmp2v ); \
119 #define PREP_STORE4 \
122 const vec_u8_t sel = \
123 (vec_u8_t) CV(-1,-1,-1,-1,0,0,0,0,0,0,0,0,0,0,0,0)
125 #define VEC_STORE4( v, p ) \
126 _tmp3v = vec_lvsr( 0, p ); \
127 v = vec_perm( v, v, _tmp3v ); \
128 _lv = vec_ld( 3, p ); \
129 _tmp1v = vec_perm( sel, zero_u8v, _tmp3v ); \
130 _lv = vec_sel( _lv, v, _tmp1v ); \
131 vec_st( _lv, 3, p ); \
132 _hv = vec_ld( 0, p ); \
133 _tmp2v = vec_perm( zero_u8v, sel, _tmp3v ); \
134 _hv = vec_sel( _hv, v, _tmp2v ); \
137 /***********************************************************************
139 **********************************************************************/
140 #define vec_transpose8x8(a0,a1,a2,a3,a4,a5,a6,a7,b0,b1,b2,b3,b4,b5,b6,b7) \
141 b0 = vec_mergeh( a0, a4 ); \
142 b1 = vec_mergel( a0, a4 ); \
143 b2 = vec_mergeh( a1, a5 ); \
144 b3 = vec_mergel( a1, a5 ); \
145 b4 = vec_mergeh( a2, a6 ); \
146 b5 = vec_mergel( a2, a6 ); \
147 b6 = vec_mergeh( a3, a7 ); \
148 b7 = vec_mergel( a3, a7 ); \
149 a0 = vec_mergeh( b0, b4 ); \
150 a1 = vec_mergel( b0, b4 ); \
151 a2 = vec_mergeh( b1, b5 ); \
152 a3 = vec_mergel( b1, b5 ); \
153 a4 = vec_mergeh( b2, b6 ); \
154 a5 = vec_mergel( b2, b6 ); \
155 a6 = vec_mergeh( b3, b7 ); \
156 a7 = vec_mergel( b3, b7 ); \
157 b0 = vec_mergeh( a0, a4 ); \
158 b1 = vec_mergel( a0, a4 ); \
159 b2 = vec_mergeh( a1, a5 ); \
160 b3 = vec_mergel( a1, a5 ); \
161 b4 = vec_mergeh( a2, a6 ); \
162 b5 = vec_mergel( a2, a6 ); \
163 b6 = vec_mergeh( a3, a7 ); \
164 b7 = vec_mergel( a3, a7 )
166 /***********************************************************************
168 **********************************************************************/
169 #define vec_transpose4x4(a0,a1,a2,a3,b0,b1,b2,b3) \
170 b0 = vec_mergeh( a0, a0 ); \
171 b1 = vec_mergeh( a1, a0 ); \
172 b2 = vec_mergeh( a2, a0 ); \
173 b3 = vec_mergeh( a3, a0 ); \
174 a0 = vec_mergeh( b0, b2 ); \
175 a1 = vec_mergel( b0, b2 ); \
176 a2 = vec_mergeh( b1, b3 ); \
177 a3 = vec_mergel( b1, b3 ); \
178 b0 = vec_mergeh( a0, a2 ); \
179 b1 = vec_mergel( a0, a2 ); \
180 b2 = vec_mergeh( a1, a3 ); \
181 b3 = vec_mergel( a1, a3 )
183 /***********************************************************************
185 ***********************************************************************
186 * b[0] = a[0] + a[1] + a[2] + a[3]
187 * b[1] = a[0] + a[1] - a[2] - a[3]
188 * b[2] = a[0] - a[1] - a[2] + a[3]
189 * b[3] = a[0] - a[1] + a[2] - a[3]
190 **********************************************************************/
191 #define vec_hadamar(a0,a1,a2,a3,b0,b1,b2,b3) \
192 b2 = vec_add( a0, a1 ); \
193 b3 = vec_add( a2, a3 ); \
194 a0 = vec_sub( a0, a1 ); \
195 a2 = vec_sub( a2, a3 ); \
196 b0 = vec_add( b2, b3 ); \
197 b1 = vec_sub( b2, b3 ); \
198 b2 = vec_sub( a0, a2 ); \
199 b3 = vec_add( a0, a2 )