2 * This file is part of FFmpeg.
4 * FFmpeg is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2.1 of the License, or (at your option) any later version.
9 * FFmpeg is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with FFmpeg; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 * Contains misc utility macros and inline functions
24 #ifndef AVUTIL_PPC_UTIL_ALTIVEC_H
25 #define AVUTIL_PPC_UTIL_ALTIVEC_H
35 #include "types_altivec.h"
37 // used to build registers permutation vectors (vcprm)
38 // the 's' are for words in the _s_econd vector
39 #define WORD_0 0x00,0x01,0x02,0x03
40 #define WORD_1 0x04,0x05,0x06,0x07
41 #define WORD_2 0x08,0x09,0x0a,0x0b
42 #define WORD_3 0x0c,0x0d,0x0e,0x0f
43 #define WORD_s0 0x10,0x11,0x12,0x13
44 #define WORD_s1 0x14,0x15,0x16,0x17
45 #define WORD_s2 0x18,0x19,0x1a,0x1b
46 #define WORD_s3 0x1c,0x1d,0x1e,0x1f
48 #define vcprm(a,b,c,d) (const vector unsigned char){WORD_ ## a, WORD_ ## b, WORD_ ## c, WORD_ ## d}
49 #define vcii(a,b,c,d) (const vector float){FLOAT_ ## a, FLOAT_ ## b, FLOAT_ ## c, FLOAT_ ## d}
51 // vcprmle is used to keep the same index as in the SSE version.
52 // it's the same as vcprm, with the index inversed
53 // ('le' is Little Endian)
54 #define vcprmle(a,b,c,d) vcprm(d,c,b,a)
56 // used to build inverse/identity vectors (vcii)
57 // n is _n_egative, p is _p_ositive
62 // Transpose 8x8 matrix of 16-bit elements (in-place)
63 #define TRANSPOSE8(a,b,c,d,e,f,g,h) \
65 vector signed short A1, B1, C1, D1, E1, F1, G1, H1; \
66 vector signed short A2, B2, C2, D2, E2, F2, G2, H2; \
68 A1 = vec_mergeh (a, e); \
69 B1 = vec_mergel (a, e); \
70 C1 = vec_mergeh (b, f); \
71 D1 = vec_mergel (b, f); \
72 E1 = vec_mergeh (c, g); \
73 F1 = vec_mergel (c, g); \
74 G1 = vec_mergeh (d, h); \
75 H1 = vec_mergel (d, h); \
77 A2 = vec_mergeh (A1, E1); \
78 B2 = vec_mergel (A1, E1); \
79 C2 = vec_mergeh (B1, F1); \
80 D2 = vec_mergel (B1, F1); \
81 E2 = vec_mergeh (C1, G1); \
82 F2 = vec_mergel (C1, G1); \
83 G2 = vec_mergeh (D1, H1); \
84 H2 = vec_mergel (D1, H1); \
86 a = vec_mergeh (A2, E2); \
87 b = vec_mergel (A2, E2); \
88 c = vec_mergeh (B2, F2); \
89 d = vec_mergel (B2, F2); \
90 e = vec_mergeh (C2, G2); \
91 f = vec_mergel (C2, G2); \
92 g = vec_mergeh (D2, H2); \
93 h = vec_mergel (D2, H2); \
97 /** @brief loads unaligned vector @a *src with offset @a offset
99 static inline vector unsigned char unaligned_load(int offset, uint8_t *src)
101 register vector unsigned char first = vec_ld(offset, src);
102 register vector unsigned char second = vec_ld(offset+15, src);
103 register vector unsigned char mask = vec_lvsl(offset, src);
104 return vec_perm(first, second, mask);
108 * loads vector known misalignment
109 * @param perm_vec the align permute vector to combine the two loads from lvsl
111 static inline vec_u8 load_with_perm_vec(int offset, uint8_t *src, vec_u8 perm_vec)
113 vec_u8 a = vec_ld(offset, src);
114 vec_u8 b = vec_ld(offset+15, src);
115 return vec_perm(a, b, perm_vec);
118 #endif /* AVUTIL_PPC_UTIL_ALTIVEC_H */