2 * AltiVec-enhanced yuv2yuvX
4 * Copyright (C) 2004 Romain Dolbeau <romain@dolbeau.org>
5 * based on the equivalent C code in swscale.c
7 * This file is part of FFmpeg.
9 * FFmpeg is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * FFmpeg is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with FFmpeg; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 #include "libswscale/swscale.h"
28 #include "libswscale/swscale_internal.h"
29 #include "libavutil/cpu.h"
30 #include "yuv2rgb_altivec.h"
32 #define vzero vec_splat_s32(0)
34 static inline void altivec_packIntArrayToCharArray(int *val, uint8_t *dest,
38 vector unsigned int altivec_vectorShiftInt19 =
39 vec_add(vec_splat_u32(10), vec_splat_u32(9));
40 if ((uintptr_t)dest % 16) {
41 /* badly aligned store, we force store alignment */
42 /* and will handle load misalignment on val w/ vec_perm */
43 vector unsigned char perm1;
45 for (i = 0; (i < dstW) &&
46 (((uintptr_t)dest + i) % 16); i++) {
48 dest[i] = (t < 0) ? 0 : ((t > 255) ? 255 : t);
50 perm1 = vec_lvsl(i << 2, val);
51 v1 = vec_ld(i << 2, val);
52 for (; i < (dstW - 15); i += 16) {
54 vector signed int v2 = vec_ld(offset + 16, val);
55 vector signed int v3 = vec_ld(offset + 32, val);
56 vector signed int v4 = vec_ld(offset + 48, val);
57 vector signed int v5 = vec_ld(offset + 64, val);
58 vector signed int v12 = vec_perm(v1, v2, perm1);
59 vector signed int v23 = vec_perm(v2, v3, perm1);
60 vector signed int v34 = vec_perm(v3, v4, perm1);
61 vector signed int v45 = vec_perm(v4, v5, perm1);
63 vector signed int vA = vec_sra(v12, altivec_vectorShiftInt19);
64 vector signed int vB = vec_sra(v23, altivec_vectorShiftInt19);
65 vector signed int vC = vec_sra(v34, altivec_vectorShiftInt19);
66 vector signed int vD = vec_sra(v45, altivec_vectorShiftInt19);
67 vector unsigned short vs1 = vec_packsu(vA, vB);
68 vector unsigned short vs2 = vec_packsu(vC, vD);
69 vector unsigned char vf = vec_packsu(vs1, vs2);
73 } else { // dest is properly aligned, great
74 for (i = 0; i < (dstW - 15); i += 16) {
76 vector signed int v1 = vec_ld(offset, val);
77 vector signed int v2 = vec_ld(offset + 16, val);
78 vector signed int v3 = vec_ld(offset + 32, val);
79 vector signed int v4 = vec_ld(offset + 48, val);
80 vector signed int v5 = vec_sra(v1, altivec_vectorShiftInt19);
81 vector signed int v6 = vec_sra(v2, altivec_vectorShiftInt19);
82 vector signed int v7 = vec_sra(v3, altivec_vectorShiftInt19);
83 vector signed int v8 = vec_sra(v4, altivec_vectorShiftInt19);
84 vector unsigned short vs1 = vec_packsu(v5, v6);
85 vector unsigned short vs2 = vec_packsu(v7, v8);
86 vector unsigned char vf = vec_packsu(vs1, vs2);
90 for (; i < dstW; i++) {
92 dest[i] = (t < 0) ? 0 : ((t > 255) ? 255 : t);
96 // FIXME remove the usage of scratch buffers.
97 static void yuv2planeX_altivec(const int16_t *filter, int filterSize,
98 const int16_t **src, uint8_t *dest, int dstW,
99 const uint8_t *dither, int offset)
102 DECLARE_ALIGNED(16, int, val)[dstW];
104 for (i = 0; i < dstW; i++)
105 val[i] = dither[(i + offset) & 7] << 12;
107 for (j = 0; j < filterSize; j++) {
108 vector signed short l1, vLumFilter = vec_ld(j << 1, filter);
109 vector unsigned char perm, perm0 = vec_lvsl(j << 1, filter);
110 vLumFilter = vec_perm(vLumFilter, vLumFilter, perm0);
111 vLumFilter = vec_splat(vLumFilter, 0); // lumFilter[j] is loaded 8 times in vLumFilter
113 perm = vec_lvsl(0, src[j]);
114 l1 = vec_ld(0, src[j]);
116 for (i = 0; i < (dstW - 7); i += 8) {
118 vector signed short l2 = vec_ld((i << 1) + 16, src[j]);
120 vector signed int v1 = vec_ld(offset, val);
121 vector signed int v2 = vec_ld(offset + 16, val);
123 vector signed short ls = vec_perm(l1, l2, perm); // lumSrc[j][i] ... lumSrc[j][i+7]
125 vector signed int i1 = vec_mule(vLumFilter, ls);
126 vector signed int i2 = vec_mulo(vLumFilter, ls);
128 vector signed int vf1 = vec_mergeh(i1, i2);
129 vector signed int vf2 = vec_mergel(i1, i2); // lumSrc[j][i] * lumFilter[j] ... lumSrc[j][i+7] * lumFilter[j]
131 vector signed int vo1 = vec_add(v1, vf1);
132 vector signed int vo2 = vec_add(v2, vf2);
134 vec_st(vo1, offset, val);
135 vec_st(vo2, offset + 16, val);
139 for (; i < dstW; i++)
140 val[i] += src[j][i] * filter[j];
142 altivec_packIntArrayToCharArray(val, dest, dstW);
145 static void hScale_altivec_real(SwsContext *c, int16_t *dst, int dstW,
146 const uint8_t *src, const int16_t *filter,
147 const int32_t *filterPos, int filterSize)
150 DECLARE_ALIGNED(16, int, tempo)[4];
152 if (filterSize % 4) {
153 for (i = 0; i < dstW; i++) {
155 register int srcPos = filterPos[i];
156 register int val = 0;
157 for (j = 0; j < filterSize; j++)
158 val += ((int)src[srcPos + j]) * filter[filterSize * i + j];
159 dst[i] = FFMIN(val >> 7, (1 << 15) - 1);
162 switch (filterSize) {
164 for (i = 0; i < dstW; i++) {
165 register int srcPos = filterPos[i];
167 vector unsigned char src_v0 = vec_ld(srcPos, src);
168 vector unsigned char src_v1, src_vF;
169 vector signed short src_v, filter_v;
170 vector signed int val_vEven, val_s;
171 if ((((uintptr_t)src + srcPos) % 16) > 12) {
172 src_v1 = vec_ld(srcPos + 16, src);
174 src_vF = vec_perm(src_v0, src_v1, vec_lvsl(srcPos, src));
176 src_v = // vec_unpackh sign-extends...
177 (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF));
178 // now put our elements in the even slots
179 src_v = vec_mergeh(src_v, (vector signed short)vzero);
181 filter_v = vec_ld(i << 3, filter);
182 // The 3 above is 2 (filterSize == 4) + 1 (sizeof(short) == 2).
184 // The neat trick: We only care for half the elements,
185 // high or low depending on (i<<3)%16 (it's 0 or 8 here),
186 // and we're going to use vec_mule, so we choose
187 // carefully how to "unpack" the elements into the even slots.
189 filter_v = vec_mergel(filter_v, (vector signed short)vzero);
191 filter_v = vec_mergeh(filter_v, (vector signed short)vzero);
193 val_vEven = vec_mule(src_v, filter_v);
194 val_s = vec_sums(val_vEven, vzero);
195 vec_st(val_s, 0, tempo);
196 dst[i] = FFMIN(tempo[3] >> 7, (1 << 15) - 1);
201 for (i = 0; i < dstW; i++) {
202 register int srcPos = filterPos[i];
204 vector unsigned char src_v0 = vec_ld(srcPos, src);
205 vector unsigned char src_v1, src_vF;
206 vector signed short src_v, filter_v;
207 vector signed int val_v, val_s;
208 if ((((uintptr_t)src + srcPos) % 16) > 8) {
209 src_v1 = vec_ld(srcPos + 16, src);
211 src_vF = vec_perm(src_v0, src_v1, vec_lvsl(srcPos, src));
213 src_v = // vec_unpackh sign-extends...
214 (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF));
215 filter_v = vec_ld(i << 4, filter);
216 // the 4 above is 3 (filterSize == 8) + 1 (sizeof(short) == 2)
218 val_v = vec_msums(src_v, filter_v, (vector signed int)vzero);
219 val_s = vec_sums(val_v, vzero);
220 vec_st(val_s, 0, tempo);
221 dst[i] = FFMIN(tempo[3] >> 7, (1 << 15) - 1);
226 for (i = 0; i < dstW; i++) {
227 register int srcPos = filterPos[i];
229 vector unsigned char src_v0 = vec_ld(srcPos, src);
230 vector unsigned char src_v1 = vec_ld(srcPos + 16, src);
231 vector unsigned char src_vF = vec_perm(src_v0, src_v1, vec_lvsl(srcPos, src));
233 vector signed short src_vA = // vec_unpackh sign-extends...
234 (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF));
235 vector signed short src_vB = // vec_unpackh sign-extends...
236 (vector signed short)(vec_mergel((vector unsigned char)vzero, src_vF));
238 vector signed short filter_v0 = vec_ld(i << 5, filter);
239 vector signed short filter_v1 = vec_ld((i << 5) + 16, filter);
240 // the 5 above are 4 (filterSize == 16) + 1 (sizeof(short) == 2)
242 vector signed int val_acc = vec_msums(src_vA, filter_v0, (vector signed int)vzero);
243 vector signed int val_v = vec_msums(src_vB, filter_v1, val_acc);
245 vector signed int val_s = vec_sums(val_v, vzero);
247 vec_st(val_s, 0, tempo);
248 dst[i] = FFMIN(tempo[3] >> 7, (1 << 15) - 1);
253 for (i = 0; i < dstW; i++) {
255 register int srcPos = filterPos[i];
257 vector signed int val_s, val_v = (vector signed int)vzero;
258 vector signed short filter_v0R = vec_ld(i * 2 * filterSize, filter);
259 vector unsigned char permF = vec_lvsl((i * 2 * filterSize), filter);
261 vector unsigned char src_v0 = vec_ld(srcPos, src);
262 vector unsigned char permS = vec_lvsl(srcPos, src);
264 for (j = 0; j < filterSize - 15; j += 16) {
265 vector unsigned char src_v1 = vec_ld(srcPos + j + 16, src);
266 vector unsigned char src_vF = vec_perm(src_v0, src_v1, permS);
268 vector signed short src_vA = // vec_unpackh sign-extends...
269 (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF));
270 vector signed short src_vB = // vec_unpackh sign-extends...
271 (vector signed short)(vec_mergel((vector unsigned char)vzero, src_vF));
273 vector signed short filter_v1R = vec_ld((i * 2 * filterSize) + (j * 2) + 16, filter);
274 vector signed short filter_v2R = vec_ld((i * 2 * filterSize) + (j * 2) + 32, filter);
275 vector signed short filter_v0 = vec_perm(filter_v0R, filter_v1R, permF);
276 vector signed short filter_v1 = vec_perm(filter_v1R, filter_v2R, permF);
278 vector signed int val_acc = vec_msums(src_vA, filter_v0, val_v);
279 val_v = vec_msums(src_vB, filter_v1, val_acc);
281 filter_v0R = filter_v2R;
285 if (j < filterSize - 7) {
286 // loading src_v0 is useless, it's already done above
287 // vector unsigned char src_v0 = vec_ld(srcPos + j, src);
288 vector unsigned char src_v1, src_vF;
289 vector signed short src_v, filter_v1R, filter_v;
290 if ((((uintptr_t)src + srcPos) % 16) > 8) {
291 src_v1 = vec_ld(srcPos + j + 16, src);
293 src_vF = vec_perm(src_v0, src_v1, permS);
295 src_v = // vec_unpackh sign-extends...
296 (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF));
297 // loading filter_v0R is useless, it's already done above
298 // vector signed short filter_v0R = vec_ld((i * 2 * filterSize) + j, filter);
299 filter_v1R = vec_ld((i * 2 * filterSize) + (j * 2) + 16, filter);
300 filter_v = vec_perm(filter_v0R, filter_v1R, permF);
302 val_v = vec_msums(src_v, filter_v, val_v);
305 val_s = vec_sums(val_v, vzero);
307 vec_st(val_s, 0, tempo);
308 dst[i] = FFMIN(tempo[3] >> 7, (1 << 15) - 1);
313 void ff_sws_init_swScale_altivec(SwsContext *c)
315 enum PixelFormat dstFormat = c->dstFormat;
317 if (!(av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC))
320 if (c->srcBpc == 8 && c->dstBpc <= 10) {
321 c->hyScale = c->hcScale = hScale_altivec_real;
323 if (!is16BPS(dstFormat) && !is9_OR_10BPS(dstFormat) &&
324 dstFormat != PIX_FMT_NV12 && dstFormat != PIX_FMT_NV21 &&
326 c->yuv2planeX = yuv2planeX_altivec;
329 /* The following list of supported dstFormat values should
330 * match what's found in the body of ff_yuv2packedX_altivec() */
331 if (!(c->flags & (SWS_BITEXACT | SWS_FULL_CHR_H_INT)) && !c->alpPixBuf) {
332 switch (c->dstFormat) {
334 c->yuv2packedX = ff_yuv2abgr_X_altivec;
337 c->yuv2packedX = ff_yuv2bgra_X_altivec;
340 c->yuv2packedX = ff_yuv2argb_X_altivec;
343 c->yuv2packedX = ff_yuv2rgba_X_altivec;
346 c->yuv2packedX = ff_yuv2bgr24_X_altivec;
349 c->yuv2packedX = ff_yuv2rgb24_X_altivec;