2 * AltiVec-enhanced yuv2yuvX
4 * Copyright (C) 2004 Romain Dolbeau <romain@dolbeau.org>
5 * based on the equivalent C code in swscale.c
7 * This file is part of FFmpeg.
9 * FFmpeg is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * FFmpeg is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with FFmpeg; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 #include "libswscale/swscale.h"
28 #include "libswscale/swscale_internal.h"
29 #include "libavutil/attributes.h"
30 #include "libavutil/cpu.h"
31 #include "yuv2rgb_altivec.h"
32 #include "libavutil/ppc/util_altivec.h"
35 #define vzero vec_splat_s32(0)
38 #define GET_LS(a,b,c,s) {\
40 a = vec_vsx_ld(((b) << 1) + 16, s);\
43 #define yuv2planeX_8(d1, d2, l1, src, x, perm, filter) do {\
44 vector signed short ls;\
45 vector signed int vf1, vf2, i1, i2;\
46 GET_LS(l1, x, perm, src);\
47 i1 = vec_mule(filter, ls);\
48 i2 = vec_mulo(filter, ls);\
49 vf1 = vec_mergeh(i1, i2);\
50 vf2 = vec_mergel(i1, i2);\
51 d1 = vec_add(d1, vf1);\
52 d2 = vec_add(d2, vf2);\
55 #define LOAD_FILTER(vf,f) {\
56 vf = vec_vsx_ld(joffset, f);\
58 #define LOAD_L1(ll1,s,p){\
59 ll1 = vec_vsx_ld(xoffset, s);\
62 // The 3 above is 2 (filterSize == 4) + 1 (sizeof(short) == 2).
64 // The neat trick: We only care for half the elements,
65 // high or low depending on (i<<3)%16 (it's 0 or 8 here),
66 // and we're going to use vec_mule, so we choose
67 // carefully how to "unpack" the elements into the even slots.
68 #define GET_VF4(a, vf, f) {\
69 vf = (vector signed short)vec_vsx_ld(a << 3, f);\
70 vf = vec_mergeh(vf, (vector signed short)vzero);\
72 #define FIRST_LOAD(sv, pos, s, per) {}
73 #define UPDATE_PTR(s0, d0, s1, d1) {}
74 #define LOAD_SRCV(pos, a, s, per, v0, v1, vf) {\
75 vf = vec_vsx_ld(pos + a, s);\
77 #define LOAD_SRCV8(pos, a, s, per, v0, v1, vf) LOAD_SRCV(pos, a, s, per, v0, v1, vf)
78 #define GET_VFD(a, b, f, vf0, vf1, per, vf, off) {\
79 vf = vec_vsx_ld((a * 2 * filterSize) + (b * 2) + off, f);\
82 #define FUNC(name) name ## _vsx
83 #include "swscale_ppc_template.c"
88 #endif /* !HAVE_BIGENDIAN */
90 static void yuv2plane1_8_u(const int16_t *src, uint8_t *dest, int dstW,
91 const uint8_t *dither, int offset, int start)
94 for (i = start; i < dstW; i++) {
95 int val = (src[i] + dither[(i + offset) & 7]) >> 7;
96 dest[i] = av_clip_uint8(val);
100 static void yuv2plane1_8_vsx(const int16_t *src, uint8_t *dest, int dstW,
101 const uint8_t *dither, int offset)
103 const int dst_u = -(uintptr_t)dest & 15;
105 LOCAL_ALIGNED(16, int16_t, val, [16]);
106 const vector uint16_t shifts = (vector uint16_t) {7, 7, 7, 7, 7, 7, 7, 7};
107 vector int16_t vi, vileft, ditherleft, ditherright;
110 for (j = 0; j < 16; j++) {
111 val[j] = dither[(dst_u + offset + j) & 7];
114 ditherleft = vec_ld(0, val);
115 ditherright = vec_ld(0, &val[8]);
117 yuv2plane1_8_u(src, dest, dst_u, dither, offset, 0);
119 for (i = dst_u; i < dstW - 15; i += 16) {
121 vi = vec_vsx_ld(0, &src[i]);
122 vi = vec_adds(ditherleft, vi);
123 vileft = vec_sra(vi, shifts);
125 vi = vec_vsx_ld(0, &src[i + 8]);
126 vi = vec_adds(ditherright, vi);
127 vi = vec_sra(vi, shifts);
129 vd = vec_packsu(vileft, vi);
130 vec_st(vd, 0, &dest[i]);
133 yuv2plane1_8_u(src, dest, dstW, dither, offset, i);
138 #define output_pixel(pos, val) \
140 AV_WB16(pos, av_clip_uintp2(val >> shift, output_bits)); \
142 AV_WL16(pos, av_clip_uintp2(val >> shift, output_bits)); \
145 static void yuv2plane1_nbps_u(const int16_t *src, uint16_t *dest, int dstW,
146 int big_endian, int output_bits, int start)
149 int shift = 15 - output_bits;
151 for (i = start; i < dstW; i++) {
152 int val = src[i] + (1 << (shift - 1));
153 output_pixel(&dest[i], val);
157 static void yuv2plane1_nbps_vsx(const int16_t *src, uint16_t *dest, int dstW,
158 int big_endian, int output_bits)
160 const int dst_u = -(uintptr_t)dest & 7;
161 const int shift = 15 - output_bits;
162 const int add = (1 << (shift - 1));
163 const int clip = (1 << output_bits) - 1;
164 const vector uint16_t vadd = (vector uint16_t) {add, add, add, add, add, add, add, add};
165 const vector uint16_t vswap = (vector uint16_t) vec_splat_u16(big_endian ? 8 : 0);
166 const vector uint16_t vshift = (vector uint16_t) vec_splat_u16(shift);
167 const vector uint16_t vlargest = (vector uint16_t) {clip, clip, clip, clip, clip, clip, clip, clip};
171 yuv2plane1_nbps_u(src, dest, dst_u, big_endian, output_bits, 0);
173 for (i = dst_u; i < dstW - 7; i += 8) {
174 v = vec_vsx_ld(0, (const uint16_t *) &src[i]);
175 v = vec_add(v, vadd);
176 v = vec_sr(v, vshift);
177 v = vec_min(v, vlargest);
178 v = vec_rl(v, vswap);
179 vec_st(v, 0, &dest[i]);
182 yuv2plane1_nbps_u(src, dest, dstW, big_endian, output_bits, i);
185 static void yuv2planeX_nbps_u(const int16_t *filter, int filterSize,
186 const int16_t **src, uint16_t *dest, int dstW,
187 int big_endian, int output_bits, int start)
190 int shift = 11 + 16 - output_bits;
192 for (i = start; i < dstW; i++) {
193 int val = 1 << (shift - 1);
196 for (j = 0; j < filterSize; j++)
197 val += src[j][i] * filter[j];
199 output_pixel(&dest[i], val);
203 static void yuv2planeX_nbps_vsx(const int16_t *filter, int filterSize,
204 const int16_t **src, uint16_t *dest, int dstW,
205 int big_endian, int output_bits)
207 const int dst_u = -(uintptr_t)dest & 7;
208 const int shift = 11 + 16 - output_bits;
209 const int add = (1 << (shift - 1));
210 const int clip = (1 << output_bits) - 1;
211 const uint16_t swap = big_endian ? 8 : 0;
212 const vector uint32_t vadd = (vector uint32_t) {add, add, add, add};
213 const vector uint32_t vshift = (vector uint32_t) {shift, shift, shift, shift};
214 const vector uint16_t vswap = (vector uint16_t) {swap, swap, swap, swap, swap, swap, swap, swap};
215 const vector uint16_t vlargest = (vector uint16_t) {clip, clip, clip, clip, clip, clip, clip, clip};
216 const vector int16_t vzero = vec_splat_s16(0);
217 const vector uint8_t vperm = (vector uint8_t) {0, 1, 8, 9, 2, 3, 10, 11, 4, 5, 12, 13, 6, 7, 14, 15};
218 vector int16_t vfilter[MAX_FILTER_SIZE], vin;
220 vector uint32_t vleft, vright, vtmp;
223 for (i = 0; i < filterSize; i++) {
224 vfilter[i] = (vector int16_t) {filter[i], filter[i], filter[i], filter[i],
225 filter[i], filter[i], filter[i], filter[i]};
228 yuv2planeX_nbps_u(filter, filterSize, src, dest, dst_u, big_endian, output_bits, 0);
230 for (i = dst_u; i < dstW - 7; i += 8) {
231 vleft = vright = vadd;
233 for (j = 0; j < filterSize; j++) {
234 vin = vec_vsx_ld(0, &src[j][i]);
235 vtmp = (vector uint32_t) vec_mule(vin, vfilter[j]);
236 vleft = vec_add(vleft, vtmp);
237 vtmp = (vector uint32_t) vec_mulo(vin, vfilter[j]);
238 vright = vec_add(vright, vtmp);
241 vleft = vec_sra(vleft, vshift);
242 vright = vec_sra(vright, vshift);
243 v = vec_packsu(vleft, vright);
244 v = (vector uint16_t) vec_max((vector int16_t) v, vzero);
245 v = vec_min(v, vlargest);
246 v = vec_rl(v, vswap);
247 v = vec_perm(v, v, vperm);
248 vec_st(v, 0, &dest[i]);
251 yuv2planeX_nbps_u(filter, filterSize, src, dest, dstW, big_endian, output_bits, i);
257 #define output_pixel(pos, val, bias, signedness) \
259 AV_WB16(pos, bias + av_clip_ ## signedness ## 16(val >> shift)); \
261 AV_WL16(pos, bias + av_clip_ ## signedness ## 16(val >> shift)); \
264 static void yuv2plane1_16_u(const int32_t *src, uint16_t *dest, int dstW,
265 int big_endian, int output_bits, int start)
270 for (i = start; i < dstW; i++) {
271 int val = src[i] + (1 << (shift - 1));
272 output_pixel(&dest[i], val, 0, uint);
276 static void yuv2plane1_16_vsx(const int32_t *src, uint16_t *dest, int dstW,
277 int big_endian, int output_bits)
279 const int dst_u = -(uintptr_t)dest & 7;
281 const int add = (1 << (shift - 1));
282 const vector uint32_t vadd = (vector uint32_t) {add, add, add, add};
283 const vector uint16_t vswap = (vector uint16_t) vec_splat_u16(big_endian ? 8 : 0);
284 const vector uint32_t vshift = (vector uint32_t) vec_splat_u32(shift);
285 vector uint32_t v, v2;
289 yuv2plane1_16_u(src, dest, dst_u, big_endian, output_bits, 0);
291 for (i = dst_u; i < dstW - 7; i += 8) {
292 v = vec_vsx_ld(0, (const uint32_t *) &src[i]);
293 v = vec_add(v, vadd);
294 v = vec_sr(v, vshift);
296 v2 = vec_vsx_ld(0, (const uint32_t *) &src[i + 4]);
297 v2 = vec_add(v2, vadd);
298 v2 = vec_sr(v2, vshift);
300 vd = vec_packsu(v, v2);
301 vd = vec_rl(vd, vswap);
303 vec_st(vd, 0, &dest[i]);
306 yuv2plane1_16_u(src, dest, dstW, big_endian, output_bits, i);
311 static void yuv2planeX_16_u(const int16_t *filter, int filterSize,
312 const int32_t **src, uint16_t *dest, int dstW,
313 int big_endian, int output_bits, int start)
318 for (i = start; i < dstW; i++) {
319 int val = 1 << (shift - 1);
322 /* range of val is [0,0x7FFFFFFF], so 31 bits, but with lanczos/spline
323 * filters (or anything with negative coeffs, the range can be slightly
324 * wider in both directions. To account for this overflow, we subtract
325 * a constant so it always fits in the signed range (assuming a
326 * reasonable filterSize), and re-add that at the end. */
328 for (j = 0; j < filterSize; j++)
329 val += src[j][i] * (unsigned)filter[j];
331 output_pixel(&dest[i], val, 0x8000, int);
335 static void yuv2planeX_16_vsx(const int16_t *filter, int filterSize,
336 const int32_t **src, uint16_t *dest, int dstW,
337 int big_endian, int output_bits)
339 const int dst_u = -(uintptr_t)dest & 7;
340 const int shift = 15;
341 const int bias = 0x8000;
342 const int add = (1 << (shift - 1)) - 0x40000000;
343 const uint16_t swap = big_endian ? 8 : 0;
344 const vector uint32_t vadd = (vector uint32_t) {add, add, add, add};
345 const vector uint32_t vshift = (vector uint32_t) {shift, shift, shift, shift};
346 const vector uint16_t vswap = (vector uint16_t) {swap, swap, swap, swap, swap, swap, swap, swap};
347 const vector uint16_t vbias = (vector uint16_t) {bias, bias, bias, bias, bias, bias, bias, bias};
348 vector int32_t vfilter[MAX_FILTER_SIZE];
350 vector uint32_t vleft, vright, vtmp;
351 vector int32_t vin32l, vin32r;
354 for (i = 0; i < filterSize; i++) {
355 vfilter[i] = (vector int32_t) {filter[i], filter[i], filter[i], filter[i]};
358 yuv2planeX_16_u(filter, filterSize, src, dest, dst_u, big_endian, output_bits, 0);
360 for (i = dst_u; i < dstW - 7; i += 8) {
361 vleft = vright = vadd;
363 for (j = 0; j < filterSize; j++) {
364 vin32l = vec_vsx_ld(0, &src[j][i]);
365 vin32r = vec_vsx_ld(0, &src[j][i + 4]);
367 vtmp = (vector uint32_t) vec_mul(vin32l, vfilter[j]);
368 vleft = vec_add(vleft, vtmp);
369 vtmp = (vector uint32_t) vec_mul(vin32r, vfilter[j]);
370 vright = vec_add(vright, vtmp);
373 vleft = vec_sra(vleft, vshift);
374 vright = vec_sra(vright, vshift);
375 v = (vector uint16_t) vec_packs((vector int32_t) vleft, (vector int32_t) vright);
376 v = vec_add(v, vbias);
377 v = vec_rl(v, vswap);
378 vec_st(v, 0, &dest[i]);
381 yuv2planeX_16_u(filter, filterSize, src, dest, dstW, big_endian, output_bits, i);
384 #endif /* HAVE_POWER8 */
386 #define yuv2NBPS(bits, BE_LE, is_be, template_size, typeX_t) \
387 yuv2NBPS1(bits, BE_LE, is_be, template_size, typeX_t) \
388 yuv2NBPSX(bits, BE_LE, is_be, template_size, typeX_t)
390 #define yuv2NBPS1(bits, BE_LE, is_be, template_size, typeX_t) \
391 static void yuv2plane1_ ## bits ## BE_LE ## _vsx(const int16_t *src, \
392 uint8_t *dest, int dstW, \
393 const uint8_t *dither, int offset) \
395 yuv2plane1_ ## template_size ## _vsx((const typeX_t *) src, \
396 (uint16_t *) dest, dstW, is_be, bits); \
399 #define yuv2NBPSX(bits, BE_LE, is_be, template_size, typeX_t) \
400 static void yuv2planeX_ ## bits ## BE_LE ## _vsx(const int16_t *filter, int filterSize, \
401 const int16_t **src, uint8_t *dest, int dstW, \
402 const uint8_t *dither, int offset)\
404 yuv2planeX_## template_size ## _vsx(filter, \
405 filterSize, (const typeX_t **) src, \
406 (uint16_t *) dest, dstW, is_be, bits); \
409 yuv2NBPS( 9, BE, 1, nbps, int16_t)
410 yuv2NBPS( 9, LE, 0, nbps, int16_t)
411 yuv2NBPS(10, BE, 1, nbps, int16_t)
412 yuv2NBPS(10, LE, 0, nbps, int16_t)
413 yuv2NBPS(12, BE, 1, nbps, int16_t)
414 yuv2NBPS(12, LE, 0, nbps, int16_t)
415 yuv2NBPS(14, BE, 1, nbps, int16_t)
416 yuv2NBPS(14, LE, 0, nbps, int16_t)
418 yuv2NBPS1(16, BE, 1, 16, int32_t)
419 yuv2NBPS1(16, LE, 0, 16, int32_t)
421 yuv2NBPSX(16, BE, 1, 16, int32_t)
422 yuv2NBPSX(16, LE, 0, 16, int32_t)
425 static av_always_inline void
426 yuv2rgb_full_1_vsx_template(SwsContext *c, const int16_t *buf0,
427 const int16_t *ubuf[2], const int16_t *vbuf[2],
428 const int16_t *abuf0, uint8_t *dest, int dstW,
429 int uvalpha, int y, enum AVPixelFormat target,
432 const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
433 const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
434 vector int16_t vy, vu, vv, A = vec_splat_s16(0), tmp16;
435 vector int32_t vy32_l, vy32_r, vu32_l, vu32_r, vv32_l, vv32_r, tmp32, tmp32_2;
436 vector int32_t R_l, R_r, G_l, G_r, B_l, B_r;
437 vector uint16_t rd16, gd16, bd16;
438 vector uint8_t rd, bd, gd, ad, out0, out1, tmp8;
439 const vector uint16_t zero16 = vec_splat_u16(0);
440 const vector int32_t y_offset = vec_splats(c->yuv2rgb_y_offset);
441 const vector int32_t y_coeff = vec_splats(c->yuv2rgb_y_coeff);
442 const vector int32_t y_add = vec_splats(1 << 21);
443 const vector int32_t v2r_coeff = vec_splats(c->yuv2rgb_v2r_coeff);
444 const vector int32_t v2g_coeff = vec_splats(c->yuv2rgb_v2g_coeff);
445 const vector int32_t u2g_coeff = vec_splats(c->yuv2rgb_u2g_coeff);
446 const vector int32_t u2b_coeff = vec_splats(c->yuv2rgb_u2b_coeff);
447 const vector int32_t rgbclip = vec_splats(1 << 30);
448 const vector int32_t zero32 = vec_splat_s32(0);
449 const vector uint32_t shift2 = vec_splat_u32(2);
450 const vector uint32_t shift22 = vec_splats(22U);
451 const vector uint16_t sub7 = vec_splats((uint16_t) (128 << 7));
452 const vector uint16_t sub8 = vec_splats((uint16_t) (128 << 8));
453 const vector int16_t mul4 = vec_splat_s16(4);
454 const vector int16_t mul8 = vec_splat_s16(8);
455 const vector int16_t add64 = vec_splat_s16(64);
456 const vector uint16_t shift7 = vec_splat_u16(7);
457 const vector int16_t max255 = vec_splat_s16(255);
460 // Various permutations
461 const vector uint8_t perm3rg0 = (vector uint8_t) {0x0, 0x10, 0,
467 const vector uint8_t perm3rg1 = (vector uint8_t) { 0x15, 0,
470 const vector uint8_t perm3tb0 = (vector uint8_t) {0x0, 0x1, 0x10,
476 const vector uint8_t perm3tb1 = (vector uint8_t) { 0x0, 0x15,
480 for (i = 0; i < dstW; i += 8) { // The x86 asm also overwrites padding bytes.
481 vy = vec_ld(0, &buf0[i]);
482 vy32_l = vec_unpackh(vy);
483 vy32_r = vec_unpackl(vy);
484 vy32_l = vec_sl(vy32_l, shift2);
485 vy32_r = vec_sl(vy32_r, shift2);
487 vu = vec_ld(0, &ubuf0[i]);
488 vv = vec_ld(0, &vbuf0[i]);
489 if (uvalpha < 2048) {
490 vu = (vector int16_t) vec_sub((vector uint16_t) vu, sub7);
491 vv = (vector int16_t) vec_sub((vector uint16_t) vv, sub7);
493 tmp32 = vec_mule(vu, mul4);
494 tmp32_2 = vec_mulo(vu, mul4);
495 vu32_l = vec_mergeh(tmp32, tmp32_2);
496 vu32_r = vec_mergel(tmp32, tmp32_2);
497 tmp32 = vec_mule(vv, mul4);
498 tmp32_2 = vec_mulo(vv, mul4);
499 vv32_l = vec_mergeh(tmp32, tmp32_2);
500 vv32_r = vec_mergel(tmp32, tmp32_2);
502 tmp16 = vec_ld(0, &ubuf1[i]);
503 vu = vec_add(vu, tmp16);
504 vu = (vector int16_t) vec_sub((vector uint16_t) vu, sub8);
505 tmp16 = vec_ld(0, &vbuf1[i]);
506 vv = vec_add(vv, tmp16);
507 vv = (vector int16_t) vec_sub((vector uint16_t) vv, sub8);
509 vu32_l = vec_mule(vu, mul8);
510 vu32_r = vec_mulo(vu, mul8);
511 vv32_l = vec_mule(vv, mul8);
512 vv32_r = vec_mulo(vv, mul8);
516 A = vec_ld(0, &abuf0[i]);
517 A = vec_add(A, add64);
518 A = vec_sr(A, shift7);
519 A = vec_max(A, max255);
520 ad = vec_packsu(A, (vector int16_t) zero16);
522 ad = vec_splats((uint8_t) 255);
525 vy32_l = vec_sub(vy32_l, y_offset);
526 vy32_r = vec_sub(vy32_r, y_offset);
527 vy32_l = vec_mul(vy32_l, y_coeff);
528 vy32_r = vec_mul(vy32_r, y_coeff);
529 vy32_l = vec_add(vy32_l, y_add);
530 vy32_r = vec_add(vy32_r, y_add);
532 R_l = vec_mul(vv32_l, v2r_coeff);
533 R_l = vec_add(R_l, vy32_l);
534 R_r = vec_mul(vv32_r, v2r_coeff);
535 R_r = vec_add(R_r, vy32_r);
536 G_l = vec_mul(vv32_l, v2g_coeff);
537 tmp32 = vec_mul(vu32_l, u2g_coeff);
538 G_l = vec_add(G_l, vy32_l);
539 G_l = vec_add(G_l, tmp32);
540 G_r = vec_mul(vv32_r, v2g_coeff);
541 tmp32 = vec_mul(vu32_r, u2g_coeff);
542 G_r = vec_add(G_r, vy32_r);
543 G_r = vec_add(G_r, tmp32);
545 B_l = vec_mul(vu32_l, u2b_coeff);
546 B_l = vec_add(B_l, vy32_l);
547 B_r = vec_mul(vu32_r, u2b_coeff);
548 B_r = vec_add(B_r, vy32_r);
550 R_l = vec_max(R_l, zero32);
551 R_r = vec_max(R_r, zero32);
552 G_l = vec_max(G_l, zero32);
553 G_r = vec_max(G_r, zero32);
554 B_l = vec_max(B_l, zero32);
555 B_r = vec_max(B_r, zero32);
557 R_l = vec_min(R_l, rgbclip);
558 R_r = vec_min(R_r, rgbclip);
559 G_l = vec_min(G_l, rgbclip);
560 G_r = vec_min(G_r, rgbclip);
561 B_l = vec_min(B_l, rgbclip);
562 B_r = vec_min(B_r, rgbclip);
564 R_l = vec_sr(R_l, shift22);
565 R_r = vec_sr(R_r, shift22);
566 G_l = vec_sr(G_l, shift22);
567 G_r = vec_sr(G_r, shift22);
568 B_l = vec_sr(B_l, shift22);
569 B_r = vec_sr(B_r, shift22);
571 rd16 = vec_packsu(R_l, R_r);
572 gd16 = vec_packsu(G_l, G_r);
573 bd16 = vec_packsu(B_l, B_r);
574 rd = vec_packsu(rd16, zero16);
575 gd = vec_packsu(gd16, zero16);
576 bd = vec_packsu(bd16, zero16);
579 case AV_PIX_FMT_RGB24:
580 out0 = vec_perm(rd, gd, perm3rg0);
581 out0 = vec_perm(out0, bd, perm3tb0);
582 out1 = vec_perm(rd, gd, perm3rg1);
583 out1 = vec_perm(out1, bd, perm3tb1);
585 vec_vsx_st(out0, 0, dest);
586 vec_vsx_st(out1, 16, dest);
590 case AV_PIX_FMT_BGR24:
591 out0 = vec_perm(bd, gd, perm3rg0);
592 out0 = vec_perm(out0, rd, perm3tb0);
593 out1 = vec_perm(bd, gd, perm3rg1);
594 out1 = vec_perm(out1, rd, perm3tb1);
596 vec_vsx_st(out0, 0, dest);
597 vec_vsx_st(out1, 16, dest);
601 case AV_PIX_FMT_BGRA:
602 out0 = vec_mergeh(bd, gd);
603 out1 = vec_mergeh(rd, ad);
605 tmp8 = (vector uint8_t) vec_mergeh((vector uint16_t) out0, (vector uint16_t) out1);
606 vec_vsx_st(tmp8, 0, dest);
607 tmp8 = (vector uint8_t) vec_mergel((vector uint16_t) out0, (vector uint16_t) out1);
608 vec_vsx_st(tmp8, 16, dest);
612 case AV_PIX_FMT_RGBA:
613 out0 = vec_mergeh(rd, gd);
614 out1 = vec_mergeh(bd, ad);
616 tmp8 = (vector uint8_t) vec_mergeh((vector uint16_t) out0, (vector uint16_t) out1);
617 vec_vsx_st(tmp8, 0, dest);
618 tmp8 = (vector uint8_t) vec_mergel((vector uint16_t) out0, (vector uint16_t) out1);
619 vec_vsx_st(tmp8, 16, dest);
623 case AV_PIX_FMT_ARGB:
624 out0 = vec_mergeh(ad, rd);
625 out1 = vec_mergeh(gd, bd);
627 tmp8 = (vector uint8_t) vec_mergeh((vector uint16_t) out0, (vector uint16_t) out1);
628 vec_vsx_st(tmp8, 0, dest);
629 tmp8 = (vector uint8_t) vec_mergel((vector uint16_t) out0, (vector uint16_t) out1);
630 vec_vsx_st(tmp8, 16, dest);
634 case AV_PIX_FMT_ABGR:
635 out0 = vec_mergeh(ad, bd);
636 out1 = vec_mergeh(gd, rd);
638 tmp8 = (vector uint8_t) vec_mergeh((vector uint16_t) out0, (vector uint16_t) out1);
639 vec_vsx_st(tmp8, 0, dest);
640 tmp8 = (vector uint8_t) vec_mergel((vector uint16_t) out0, (vector uint16_t) out1);
641 vec_vsx_st(tmp8, 16, dest);
649 #define YUV2RGBWRAPPER(name, base, ext, fmt, hasAlpha) \
650 static void name ## ext ## _1_vsx(SwsContext *c, const int16_t *buf0, \
651 const int16_t *ubuf[2], const int16_t *vbuf[2], \
652 const int16_t *abuf0, uint8_t *dest, int dstW, \
653 int uvalpha, int y) \
655 name ## base ## _1_vsx_template(c, buf0, ubuf, vbuf, abuf0, dest, \
656 dstW, uvalpha, y, fmt, hasAlpha); \
659 YUV2RGBWRAPPER(yuv2, rgb_full, bgrx32_full, AV_PIX_FMT_BGRA, 0)
660 YUV2RGBWRAPPER(yuv2, rgb_full, rgbx32_full, AV_PIX_FMT_RGBA, 0)
661 YUV2RGBWRAPPER(yuv2, rgb_full, xrgb32_full, AV_PIX_FMT_ARGB, 0)
662 YUV2RGBWRAPPER(yuv2, rgb_full, xbgr32_full, AV_PIX_FMT_ABGR, 0)
664 YUV2RGBWRAPPER(yuv2, rgb_full, rgb24_full, AV_PIX_FMT_RGB24, 0)
665 YUV2RGBWRAPPER(yuv2, rgb_full, bgr24_full, AV_PIX_FMT_BGR24, 0)
667 static av_always_inline void
668 write422(const vector int16_t vy1, const vector int16_t vy2,
669 const vector int16_t vu, const vector int16_t vv,
670 uint8_t *dest, const enum AVPixelFormat target)
672 vector uint8_t vd1, vd2, tmp;
673 const vector uint8_t yuyv1 = (vector uint8_t) {
674 0x0, 0x10, 0x1, 0x18,
675 0x2, 0x11, 0x3, 0x19,
676 0x4, 0x12, 0x5, 0x1a,
677 0x6, 0x13, 0x7, 0x1b };
678 const vector uint8_t yuyv2 = (vector uint8_t) {
679 0x8, 0x14, 0x9, 0x1c,
680 0xa, 0x15, 0xb, 0x1d,
681 0xc, 0x16, 0xd, 0x1e,
682 0xe, 0x17, 0xf, 0x1f };
683 const vector uint8_t yvyu1 = (vector uint8_t) {
684 0x0, 0x18, 0x1, 0x10,
685 0x2, 0x19, 0x3, 0x11,
686 0x4, 0x1a, 0x5, 0x12,
687 0x6, 0x1b, 0x7, 0x13 };
688 const vector uint8_t yvyu2 = (vector uint8_t) {
689 0x8, 0x1c, 0x9, 0x14,
690 0xa, 0x1d, 0xb, 0x15,
691 0xc, 0x1e, 0xd, 0x16,
692 0xe, 0x1f, 0xf, 0x17 };
693 const vector uint8_t uyvy1 = (vector uint8_t) {
694 0x10, 0x0, 0x18, 0x1,
695 0x11, 0x2, 0x19, 0x3,
696 0x12, 0x4, 0x1a, 0x5,
697 0x13, 0x6, 0x1b, 0x7 };
698 const vector uint8_t uyvy2 = (vector uint8_t) {
699 0x14, 0x8, 0x1c, 0x9,
700 0x15, 0xa, 0x1d, 0xb,
701 0x16, 0xc, 0x1e, 0xd,
702 0x17, 0xe, 0x1f, 0xf };
704 vd1 = vec_packsu(vy1, vy2);
705 vd2 = vec_packsu(vu, vv);
708 case AV_PIX_FMT_YUYV422:
709 tmp = vec_perm(vd1, vd2, yuyv1);
710 vec_st(tmp, 0, dest);
711 tmp = vec_perm(vd1, vd2, yuyv2);
712 vec_st(tmp, 16, dest);
714 case AV_PIX_FMT_YVYU422:
715 tmp = vec_perm(vd1, vd2, yvyu1);
716 vec_st(tmp, 0, dest);
717 tmp = vec_perm(vd1, vd2, yvyu2);
718 vec_st(tmp, 16, dest);
720 case AV_PIX_FMT_UYVY422:
721 tmp = vec_perm(vd1, vd2, uyvy1);
722 vec_st(tmp, 0, dest);
723 tmp = vec_perm(vd1, vd2, uyvy2);
724 vec_st(tmp, 16, dest);
729 static av_always_inline void
730 yuv2422_1_vsx_template(SwsContext *c, const int16_t *buf0,
731 const int16_t *ubuf[2], const int16_t *vbuf[2],
732 const int16_t *abuf0, uint8_t *dest, int dstW,
733 int uvalpha, int y, enum AVPixelFormat target)
735 const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
736 vector int16_t vy1, vy2, vu, vv, tmp;
737 const vector int16_t add64 = vec_splats((int16_t) 64);
738 const vector int16_t add128 = vec_splats((int16_t) 128);
739 const vector uint16_t shift7 = vec_splat_u16(7);
740 const vector uint16_t shift8 = vec_splat_u16(8);
743 if (uvalpha < 2048) {
744 for (i = 0; i < ((dstW + 1) >> 1); i += 8) {
745 vy1 = vec_ld(0, &buf0[i * 2]);
746 vy2 = vec_ld(0, &buf0[(i + 4) * 2]);
747 vu = vec_ld(0, &ubuf0[i]);
748 vv = vec_ld(0, &vbuf0[i]);
750 vy1 = vec_add(vy1, add64);
751 vy2 = vec_add(vy2, add64);
752 vu = vec_add(vu, add64);
753 vv = vec_add(vv, add64);
755 vy1 = vec_sra(vy1, shift7);
756 vy2 = vec_sra(vy2, shift7);
757 vu = vec_sra(vu, shift7);
758 vv = vec_sra(vv, shift7);
760 write422(vy1, vy2, vu, vv, &dest[i * 4], target);
763 const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
764 for (i = 0; i < ((dstW + 1) >> 1); i += 8) {
765 vy1 = vec_ld(0, &buf0[i * 2]);
766 vy2 = vec_ld(0, &buf0[(i + 4) * 2]);
767 vu = vec_ld(0, &ubuf0[i]);
768 tmp = vec_ld(0, &ubuf1[i]);
769 vu = vec_adds(vu, tmp);
770 vv = vec_ld(0, &vbuf0[i]);
771 tmp = vec_ld(0, &vbuf1[i]);
772 vv = vec_adds(vv, tmp);
774 vy1 = vec_add(vy1, add64);
775 vy2 = vec_add(vy2, add64);
776 vu = vec_adds(vu, add128);
777 vv = vec_adds(vv, add128);
779 vy1 = vec_sra(vy1, shift7);
780 vy2 = vec_sra(vy2, shift7);
781 vu = vec_sra(vu, shift8);
782 vv = vec_sra(vv, shift8);
784 write422(vy1, vy2, vu, vv, &dest[i * 4], target);
789 #define YUV2PACKEDWRAPPER(name, base, ext, fmt) \
790 static void name ## ext ## _1_vsx(SwsContext *c, const int16_t *buf0, \
791 const int16_t *ubuf[2], const int16_t *vbuf[2], \
792 const int16_t *abuf0, uint8_t *dest, int dstW, \
793 int uvalpha, int y) \
795 name ## base ## _1_vsx_template(c, buf0, ubuf, vbuf, \
796 abuf0, dest, dstW, uvalpha, \
800 YUV2PACKEDWRAPPER(yuv2, 422, yuyv422, AV_PIX_FMT_YUYV422)
801 YUV2PACKEDWRAPPER(yuv2, 422, yvyu422, AV_PIX_FMT_YVYU422)
802 YUV2PACKEDWRAPPER(yuv2, 422, uyvy422, AV_PIX_FMT_UYVY422)
804 #endif /* !HAVE_BIGENDIAN */
806 #endif /* HAVE_VSX */
808 av_cold void ff_sws_init_swscale_vsx(SwsContext *c)
811 enum AVPixelFormat dstFormat = c->dstFormat;
812 const int cpu_flags = av_get_cpu_flags();
814 if (!(cpu_flags & AV_CPU_FLAG_VSX))
818 if (c->srcBpc == 8 && c->dstBpc <= 14) {
819 c->hyScale = c->hcScale = hScale_real_vsx;
821 if (!is16BPS(dstFormat) && !isNBPS(dstFormat) &&
822 dstFormat != AV_PIX_FMT_NV12 && dstFormat != AV_PIX_FMT_NV21 &&
823 dstFormat != AV_PIX_FMT_GRAYF32BE && dstFormat != AV_PIX_FMT_GRAYF32LE &&
825 c->yuv2planeX = yuv2planeX_vsx;
829 if (!(c->flags & (SWS_BITEXACT | SWS_FULL_CHR_H_INT)) && !c->needAlpha) {
832 c->yuv2plane1 = yuv2plane1_8_vsx;
836 c->yuv2plane1 = isBE(dstFormat) ? yuv2plane1_9BE_vsx : yuv2plane1_9LE_vsx;
837 c->yuv2planeX = isBE(dstFormat) ? yuv2planeX_9BE_vsx : yuv2planeX_9LE_vsx;
840 c->yuv2plane1 = isBE(dstFormat) ? yuv2plane1_10BE_vsx : yuv2plane1_10LE_vsx;
841 c->yuv2planeX = isBE(dstFormat) ? yuv2planeX_10BE_vsx : yuv2planeX_10LE_vsx;
844 c->yuv2plane1 = isBE(dstFormat) ? yuv2plane1_12BE_vsx : yuv2plane1_12LE_vsx;
845 c->yuv2planeX = isBE(dstFormat) ? yuv2planeX_12BE_vsx : yuv2planeX_12LE_vsx;
848 c->yuv2plane1 = isBE(dstFormat) ? yuv2plane1_14BE_vsx : yuv2plane1_14LE_vsx;
849 c->yuv2planeX = isBE(dstFormat) ? yuv2planeX_14BE_vsx : yuv2planeX_14LE_vsx;
852 c->yuv2plane1 = isBE(dstFormat) ? yuv2plane1_16BE_vsx : yuv2plane1_16LE_vsx;
854 if (cpu_flags & AV_CPU_FLAG_POWER8) {
855 c->yuv2planeX = isBE(dstFormat) ? yuv2planeX_16BE_vsx : yuv2planeX_16LE_vsx;
857 #endif /* HAVE_POWER8 */
859 #endif /* !HAVE_BIGENDIAN */
863 if (c->flags & SWS_BITEXACT)
867 if (c->flags & SWS_FULL_CHR_H_INT) {
869 case AV_PIX_FMT_RGB24:
870 if (HAVE_POWER8 && cpu_flags & AV_CPU_FLAG_POWER8) {
871 c->yuv2packed1 = yuv2rgb24_full_1_vsx;
874 case AV_PIX_FMT_BGR24:
875 if (HAVE_POWER8 && cpu_flags & AV_CPU_FLAG_POWER8) {
876 c->yuv2packed1 = yuv2bgr24_full_1_vsx;
879 case AV_PIX_FMT_BGRA:
880 if (HAVE_POWER8 && cpu_flags & AV_CPU_FLAG_POWER8) {
882 c->yuv2packed1 = yuv2bgrx32_full_1_vsx;
886 case AV_PIX_FMT_RGBA:
887 if (HAVE_POWER8 && cpu_flags & AV_CPU_FLAG_POWER8) {
889 c->yuv2packed1 = yuv2rgbx32_full_1_vsx;
893 case AV_PIX_FMT_ARGB:
894 if (HAVE_POWER8 && cpu_flags & AV_CPU_FLAG_POWER8) {
896 c->yuv2packed1 = yuv2xrgb32_full_1_vsx;
900 case AV_PIX_FMT_ABGR:
901 if (HAVE_POWER8 && cpu_flags & AV_CPU_FLAG_POWER8) {
903 c->yuv2packed1 = yuv2xbgr32_full_1_vsx;
908 } else { /* !SWS_FULL_CHR_H_INT */
910 case AV_PIX_FMT_YUYV422:
911 c->yuv2packed1 = yuv2yuyv422_1_vsx;
913 case AV_PIX_FMT_YVYU422:
914 c->yuv2packed1 = yuv2yvyu422_1_vsx;
916 case AV_PIX_FMT_UYVY422:
917 c->yuv2packed1 = yuv2uyvy422_1_vsx;
921 #endif /* !HAVE_BIGENDIAN */
923 #endif /* HAVE_VSX */