2 * AltiVec-enhanced yuv2yuvX
4 * Copyright (C) 2004 Romain Dolbeau <romain@dolbeau.org>
5 * based on the equivalent C code in swscale.c
7 * This file is part of FFmpeg.
9 * FFmpeg is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * FFmpeg is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with FFmpeg; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 #include "libswscale/swscale.h"
28 #include "libswscale/swscale_internal.h"
29 #include "libavutil/attributes.h"
30 #include "libavutil/cpu.h"
31 #include "yuv2rgb_altivec.h"
32 #include "libavutil/ppc/util_altivec.h"
35 #define vzero vec_splat_s32(0)
38 #define GET_LS(a,b,c,s) {\
40 a = vec_vsx_ld(((b) << 1) + 16, s);\
43 #define yuv2planeX_8(d1, d2, l1, src, x, perm, filter) do {\
44 vector signed short ls;\
45 GET_LS(l1, x, perm, src);\
46 vector signed int i1 = vec_mule(filter, ls);\
47 vector signed int i2 = vec_mulo(filter, ls);\
48 vector signed int vf1, vf2;\
49 vf1 = vec_mergeh(i1, i2);\
50 vf2 = vec_mergel(i1, i2);\
51 d1 = vec_add(d1, vf1);\
52 d2 = vec_add(d2, vf2);\
55 #define LOAD_FILTER(vf,f) {\
56 vf = vec_vsx_ld(joffset, f);\
58 #define LOAD_L1(ll1,s,p){\
59 ll1 = vec_vsx_ld(xoffset, s);\
62 // The 3 above is 2 (filterSize == 4) + 1 (sizeof(short) == 2).
64 // The neat trick: We only care for half the elements,
65 // high or low depending on (i<<3)%16 (it's 0 or 8 here),
66 // and we're going to use vec_mule, so we choose
67 // carefully how to "unpack" the elements into the even slots.
68 #define GET_VF4(a, vf, f) {\
69 vf = (vector signed short)vec_vsx_ld(a << 3, f);\
70 vf = vec_mergeh(vf, (vector signed short)vzero);\
72 #define FIRST_LOAD(sv, pos, s, per) {}
73 #define UPDATE_PTR(s0, d0, s1, d1) {}
74 #define LOAD_SRCV(pos, a, s, per, v0, v1, vf) {\
75 vf = vec_vsx_ld(pos + a, s);\
77 #define LOAD_SRCV8(pos, a, s, per, v0, v1, vf) LOAD_SRCV(pos, a, s, per, v0, v1, vf)
78 #define GET_VFD(a, b, f, vf0, vf1, per, vf, off) {\
79 vf = vec_vsx_ld((a * 2 * filterSize) + (b * 2) + off, f);\
82 #define FUNC(name) name ## _vsx
83 #include "swscale_ppc_template.c"
88 #endif /* !HAVE_BIGENDIAN */
90 static void yuv2plane1_8_u(const int16_t *src, uint8_t *dest, int dstW,
91 const uint8_t *dither, int offset, int start)
94 for (i = start; i < dstW; i++) {
95 int val = (src[i] + dither[(i + offset) & 7]) >> 7;
96 dest[i] = av_clip_uint8(val);
100 static void yuv2plane1_8_vsx(const int16_t *src, uint8_t *dest, int dstW,
101 const uint8_t *dither, int offset)
103 const int dst_u = -(uintptr_t)dest & 15;
105 LOCAL_ALIGNED(16, int16_t, val, [16]);
106 const vector uint16_t shifts = (vector uint16_t) {7, 7, 7, 7, 7, 7, 7, 7};
107 vector int16_t vi, vileft, ditherleft, ditherright;
110 for (j = 0; j < 16; j++) {
111 val[j] = dither[(dst_u + offset + j) & 7];
114 ditherleft = vec_ld(0, val);
115 ditherright = vec_ld(0, &val[8]);
117 yuv2plane1_8_u(src, dest, dst_u, dither, offset, 0);
119 for (i = dst_u; i < dstW - 15; i += 16) {
121 vi = vec_vsx_ld(0, &src[i]);
122 vi = vec_adds(ditherleft, vi);
123 vileft = vec_sra(vi, shifts);
125 vi = vec_vsx_ld(0, &src[i + 8]);
126 vi = vec_adds(ditherright, vi);
127 vi = vec_sra(vi, shifts);
129 vd = vec_packsu(vileft, vi);
130 vec_st(vd, 0, &dest[i]);
133 yuv2plane1_8_u(src, dest, dstW, dither, offset, i);
138 #define output_pixel(pos, val) \
140 AV_WB16(pos, av_clip_uintp2(val >> shift, output_bits)); \
142 AV_WL16(pos, av_clip_uintp2(val >> shift, output_bits)); \
145 static void yuv2plane1_nbps_u(const int16_t *src, uint16_t *dest, int dstW,
146 int big_endian, int output_bits, int start)
149 int shift = 15 - output_bits;
151 for (i = start; i < dstW; i++) {
152 int val = src[i] + (1 << (shift - 1));
153 output_pixel(&dest[i], val);
157 static void yuv2plane1_nbps_vsx(const int16_t *src, uint16_t *dest, int dstW,
158 int big_endian, int output_bits)
160 const int dst_u = -(uintptr_t)dest & 7;
161 const int shift = 15 - output_bits;
162 const int add = (1 << (shift - 1));
163 const int clip = (1 << output_bits) - 1;
164 const vector uint16_t vadd = (vector uint16_t) {add, add, add, add, add, add, add, add};
165 const vector uint16_t vswap = (vector uint16_t) vec_splat_u16(big_endian ? 8 : 0);
166 const vector uint16_t vshift = (vector uint16_t) vec_splat_u16(shift);
167 const vector uint16_t vlargest = (vector uint16_t) {clip, clip, clip, clip, clip, clip, clip, clip};
171 yuv2plane1_nbps_u(src, dest, dst_u, big_endian, output_bits, 0);
173 for (i = dst_u; i < dstW - 7; i += 8) {
174 v = vec_vsx_ld(0, (const uint16_t *) &src[i]);
175 v = vec_add(v, vadd);
176 v = vec_sr(v, vshift);
177 v = vec_min(v, vlargest);
178 v = vec_rl(v, vswap);
179 vec_st(v, 0, &dest[i]);
182 yuv2plane1_nbps_u(src, dest, dstW, big_endian, output_bits, i);
185 static void yuv2planeX_nbps_u(const int16_t *filter, int filterSize,
186 const int16_t **src, uint16_t *dest, int dstW,
187 int big_endian, int output_bits, int start)
190 int shift = 11 + 16 - output_bits;
192 for (i = start; i < dstW; i++) {
193 int val = 1 << (shift - 1);
196 for (j = 0; j < filterSize; j++)
197 val += src[j][i] * filter[j];
199 output_pixel(&dest[i], val);
203 static void yuv2planeX_nbps_vsx(const int16_t *filter, int filterSize,
204 const int16_t **src, uint16_t *dest, int dstW,
205 int big_endian, int output_bits)
207 const int dst_u = -(uintptr_t)dest & 7;
208 const int shift = 11 + 16 - output_bits;
209 const int add = (1 << (shift - 1));
210 const int clip = (1 << output_bits) - 1;
211 const uint16_t swap = big_endian ? 8 : 0;
212 const vector uint32_t vadd = (vector uint32_t) {add, add, add, add};
213 const vector uint32_t vshift = (vector uint32_t) {shift, shift, shift, shift};
214 const vector uint16_t vswap = (vector uint16_t) {swap, swap, swap, swap, swap, swap, swap, swap};
215 const vector uint16_t vlargest = (vector uint16_t) {clip, clip, clip, clip, clip, clip, clip, clip};
216 const vector int16_t vzero = vec_splat_s16(0);
217 const vector uint8_t vperm = (vector uint8_t) {0, 1, 8, 9, 2, 3, 10, 11, 4, 5, 12, 13, 6, 7, 14, 15};
218 vector int16_t vfilter[MAX_FILTER_SIZE], vin;
220 vector uint32_t vleft, vright, vtmp;
223 for (i = 0; i < filterSize; i++) {
224 vfilter[i] = (vector int16_t) {filter[i], filter[i], filter[i], filter[i],
225 filter[i], filter[i], filter[i], filter[i]};
228 yuv2planeX_nbps_u(filter, filterSize, src, dest, dst_u, big_endian, output_bits, 0);
230 for (i = dst_u; i < dstW - 7; i += 8) {
231 vleft = vright = vadd;
233 for (j = 0; j < filterSize; j++) {
234 vin = vec_vsx_ld(0, &src[j][i]);
235 vtmp = (vector uint32_t) vec_mule(vin, vfilter[j]);
236 vleft = vec_add(vleft, vtmp);
237 vtmp = (vector uint32_t) vec_mulo(vin, vfilter[j]);
238 vright = vec_add(vright, vtmp);
241 vleft = vec_sra(vleft, vshift);
242 vright = vec_sra(vright, vshift);
243 v = vec_packsu(vleft, vright);
244 v = (vector uint16_t) vec_max((vector int16_t) v, vzero);
245 v = vec_min(v, vlargest);
246 v = vec_rl(v, vswap);
247 v = vec_perm(v, v, vperm);
248 vec_st(v, 0, &dest[i]);
251 yuv2planeX_nbps_u(filter, filterSize, src, dest, dstW, big_endian, output_bits, i);
257 #define output_pixel(pos, val, bias, signedness) \
259 AV_WB16(pos, bias + av_clip_ ## signedness ## 16(val >> shift)); \
261 AV_WL16(pos, bias + av_clip_ ## signedness ## 16(val >> shift)); \
264 static void yuv2plane1_16_u(const int32_t *src, uint16_t *dest, int dstW,
265 int big_endian, int output_bits, int start)
270 for (i = start; i < dstW; i++) {
271 int val = src[i] + (1 << (shift - 1));
272 output_pixel(&dest[i], val, 0, uint);
276 static void yuv2plane1_16_vsx(const int32_t *src, uint16_t *dest, int dstW,
277 int big_endian, int output_bits)
279 const int dst_u = -(uintptr_t)dest & 7;
281 const int add = (1 << (shift - 1));
282 const vector uint32_t vadd = (vector uint32_t) {add, add, add, add};
283 const vector uint16_t vswap = (vector uint16_t) vec_splat_u16(big_endian ? 8 : 0);
284 const vector uint32_t vshift = (vector uint32_t) vec_splat_u32(shift);
285 vector uint32_t v, v2;
289 yuv2plane1_16_u(src, dest, dst_u, big_endian, output_bits, 0);
291 for (i = dst_u; i < dstW - 7; i += 8) {
292 v = vec_vsx_ld(0, (const uint32_t *) &src[i]);
293 v = vec_add(v, vadd);
294 v = vec_sr(v, vshift);
296 v2 = vec_vsx_ld(0, (const uint32_t *) &src[i + 4]);
297 v2 = vec_add(v2, vadd);
298 v2 = vec_sr(v2, vshift);
300 vd = vec_packsu(v, v2);
301 vd = vec_rl(vd, vswap);
303 vec_st(vd, 0, &dest[i]);
306 yuv2plane1_16_u(src, dest, dstW, big_endian, output_bits, i);
311 static void yuv2planeX_16_u(const int16_t *filter, int filterSize,
312 const int32_t **src, uint16_t *dest, int dstW,
313 int big_endian, int output_bits, int start)
318 for (i = start; i < dstW; i++) {
319 int val = 1 << (shift - 1);
322 /* range of val is [0,0x7FFFFFFF], so 31 bits, but with lanczos/spline
323 * filters (or anything with negative coeffs, the range can be slightly
324 * wider in both directions. To account for this overflow, we subtract
325 * a constant so it always fits in the signed range (assuming a
326 * reasonable filterSize), and re-add that at the end. */
328 for (j = 0; j < filterSize; j++)
329 val += src[j][i] * (unsigned)filter[j];
331 output_pixel(&dest[i], val, 0x8000, int);
335 static void yuv2planeX_16_vsx(const int16_t *filter, int filterSize,
336 const int32_t **src, uint16_t *dest, int dstW,
337 int big_endian, int output_bits)
339 const int dst_u = -(uintptr_t)dest & 7;
340 const int shift = 15;
341 const int bias = 0x8000;
342 const int add = (1 << (shift - 1)) - 0x40000000;
343 const uint16_t swap = big_endian ? 8 : 0;
344 const vector uint32_t vadd = (vector uint32_t) {add, add, add, add};
345 const vector uint32_t vshift = (vector uint32_t) {shift, shift, shift, shift};
346 const vector uint16_t vswap = (vector uint16_t) {swap, swap, swap, swap, swap, swap, swap, swap};
347 const vector uint16_t vbias = (vector uint16_t) {bias, bias, bias, bias, bias, bias, bias, bias};
348 vector int32_t vfilter[MAX_FILTER_SIZE];
350 vector uint32_t vleft, vright, vtmp;
351 vector int32_t vin32l, vin32r;
354 for (i = 0; i < filterSize; i++) {
355 vfilter[i] = (vector int32_t) {filter[i], filter[i], filter[i], filter[i]};
358 yuv2planeX_16_u(filter, filterSize, src, dest, dst_u, big_endian, output_bits, 0);
360 for (i = dst_u; i < dstW - 7; i += 8) {
361 vleft = vright = vadd;
363 for (j = 0; j < filterSize; j++) {
364 vin32l = vec_vsx_ld(0, &src[j][i]);
365 vin32r = vec_vsx_ld(0, &src[j][i + 4]);
367 vtmp = (vector uint32_t) vec_mul(vin32l, vfilter[j]);
368 vleft = vec_add(vleft, vtmp);
369 vtmp = (vector uint32_t) vec_mul(vin32r, vfilter[j]);
370 vright = vec_add(vright, vtmp);
373 vleft = vec_sra(vleft, vshift);
374 vright = vec_sra(vright, vshift);
375 v = (vector uint16_t) vec_packs((vector int32_t) vleft, (vector int32_t) vright);
376 v = vec_add(v, vbias);
377 v = vec_rl(v, vswap);
378 vec_st(v, 0, &dest[i]);
381 yuv2planeX_16_u(filter, filterSize, src, dest, dstW, big_endian, output_bits, i);
384 #endif /* HAVE_POWER8 */
386 #define yuv2NBPS(bits, BE_LE, is_be, template_size, typeX_t) \
387 yuv2NBPS1(bits, BE_LE, is_be, template_size, typeX_t) \
388 yuv2NBPSX(bits, BE_LE, is_be, template_size, typeX_t)
390 #define yuv2NBPS1(bits, BE_LE, is_be, template_size, typeX_t) \
391 static void yuv2plane1_ ## bits ## BE_LE ## _vsx(const int16_t *src, \
392 uint8_t *dest, int dstW, \
393 const uint8_t *dither, int offset) \
395 yuv2plane1_ ## template_size ## _vsx((const typeX_t *) src, \
396 (uint16_t *) dest, dstW, is_be, bits); \
399 #define yuv2NBPSX(bits, BE_LE, is_be, template_size, typeX_t) \
400 static void yuv2planeX_ ## bits ## BE_LE ## _vsx(const int16_t *filter, int filterSize, \
401 const int16_t **src, uint8_t *dest, int dstW, \
402 const uint8_t *dither, int offset)\
404 yuv2planeX_## template_size ## _vsx(filter, \
405 filterSize, (const typeX_t **) src, \
406 (uint16_t *) dest, dstW, is_be, bits); \
409 yuv2NBPS( 9, BE, 1, nbps, int16_t)
410 yuv2NBPS( 9, LE, 0, nbps, int16_t)
411 yuv2NBPS(10, BE, 1, nbps, int16_t)
412 yuv2NBPS(10, LE, 0, nbps, int16_t)
413 yuv2NBPS(12, BE, 1, nbps, int16_t)
414 yuv2NBPS(12, LE, 0, nbps, int16_t)
415 yuv2NBPS(14, BE, 1, nbps, int16_t)
416 yuv2NBPS(14, LE, 0, nbps, int16_t)
418 yuv2NBPS1(16, BE, 1, 16, int32_t)
419 yuv2NBPS1(16, LE, 0, 16, int32_t)
421 yuv2NBPSX(16, BE, 1, 16, int32_t)
422 yuv2NBPSX(16, LE, 0, 16, int32_t)
425 #endif /* !HAVE_BIGENDIAN */
427 #endif /* HAVE_VSX */
429 av_cold void ff_sws_init_swscale_vsx(SwsContext *c)
432 enum AVPixelFormat dstFormat = c->dstFormat;
433 const int cpu_flags = av_get_cpu_flags();
435 if (!(cpu_flags & AV_CPU_FLAG_VSX))
439 if (c->srcBpc == 8 && c->dstBpc <= 14) {
440 c->hyScale = c->hcScale = hScale_real_vsx;
442 if (!is16BPS(dstFormat) && !isNBPS(dstFormat) &&
443 dstFormat != AV_PIX_FMT_NV12 && dstFormat != AV_PIX_FMT_NV21 &&
444 dstFormat != AV_PIX_FMT_GRAYF32BE && dstFormat != AV_PIX_FMT_GRAYF32LE &&
446 c->yuv2planeX = yuv2planeX_vsx;
450 if (!(c->flags & (SWS_BITEXACT | SWS_FULL_CHR_H_INT)) && !c->needAlpha) {
453 c->yuv2plane1 = yuv2plane1_8_vsx;
457 c->yuv2plane1 = isBE(dstFormat) ? yuv2plane1_9BE_vsx : yuv2plane1_9LE_vsx;
458 c->yuv2planeX = isBE(dstFormat) ? yuv2planeX_9BE_vsx : yuv2planeX_9LE_vsx;
461 c->yuv2plane1 = isBE(dstFormat) ? yuv2plane1_10BE_vsx : yuv2plane1_10LE_vsx;
462 c->yuv2planeX = isBE(dstFormat) ? yuv2planeX_10BE_vsx : yuv2planeX_10LE_vsx;
465 c->yuv2plane1 = isBE(dstFormat) ? yuv2plane1_12BE_vsx : yuv2plane1_12LE_vsx;
466 c->yuv2planeX = isBE(dstFormat) ? yuv2planeX_12BE_vsx : yuv2planeX_12LE_vsx;
469 c->yuv2plane1 = isBE(dstFormat) ? yuv2plane1_14BE_vsx : yuv2plane1_14LE_vsx;
470 c->yuv2planeX = isBE(dstFormat) ? yuv2planeX_14BE_vsx : yuv2planeX_14LE_vsx;
473 c->yuv2plane1 = isBE(dstFormat) ? yuv2plane1_16BE_vsx : yuv2plane1_16LE_vsx;
475 if (cpu_flags & AV_CPU_FLAG_POWER8) {
476 c->yuv2planeX = isBE(dstFormat) ? yuv2planeX_16BE_vsx : yuv2planeX_16LE_vsx;
478 #endif /* HAVE_POWER8 */
480 #endif /* !HAVE_BIGENDIAN */
483 #endif /* HAVE_VSX */