]> git.sesse.net Git - ffmpeg/blob - libswscale/ppc/yuv2rgb_altivec.c
aae44031e8871e893a3ae24be4b188e9ef1225ce
[ffmpeg] / libswscale / ppc / yuv2rgb_altivec.c
1 /*
2  * AltiVec acceleration for colorspace conversion
3  *
4  * copyright (C) 2004 Marc Hoffman <marc.hoffman@analog.com>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22
23 /*
24  * Convert I420 YV12 to RGB in various formats,
25  * it rejects images that are not in 420 formats,
26  * it rejects images that don't have widths of multiples of 16,
27  * it rejects images that don't have heights of multiples of 2.
28  * Reject defers to C simulation code.
29  *
30  * Lots of optimizations to be done here.
31  *
32  * 1. Need to fix saturation code. I just couldn't get it to fly with packs
33  * and adds, so we currently use max/min to clip.
34  *
35  * 2. The inefficient use of chroma loading needs a bit of brushing up.
36  *
37  * 3. Analysis of pipeline stalls needs to be done. Use shark to identify
38  * pipeline stalls.
39  *
40  *
41  * MODIFIED to calculate coeffs from currently selected color space.
42  * MODIFIED core to be a macro where you specify the output format.
43  * ADDED UYVY conversion which is never called due to some thing in swscale.
44  * CORRECTED algorithim selection to be strict on input formats.
45  * ADDED runtime detection of AltiVec.
46  *
47  * ADDED altivec_yuv2packedX vertical scl + RGB converter
48  *
49  * March 27,2004
50  * PERFORMANCE ANALYSIS
51  *
52  * The C version uses 25% of the processor or ~250Mips for D1 video rawvideo
53  * used as test.
54  * The AltiVec version uses 10% of the processor or ~100Mips for D1 video
55  * same sequence.
56  *
57  * 720 * 480 * 30  ~10MPS
58  *
59  * so we have roughly 10 clocks per pixel. This is too high, something has
60  * to be wrong.
61  *
62  * OPTIMIZED clip codes to utilize vec_max and vec_packs removing the
63  * need for vec_min.
64  *
65  * OPTIMIZED DST OUTPUT cache/DMA controls. We are pretty much guaranteed to
66  * have the input video frame, it was just decompressed so it probably resides
67  * in L1 caches. However, we are creating the output video stream. This needs
68  * to use the DSTST instruction to optimize for the cache. We couple this with
69  * the fact that we are not going to be visiting the input buffer again so we
70  * mark it Least Recently Used. This shaves 25% of the processor cycles off.
71  *
72  * Now memcpy is the largest mips consumer in the system, probably due
73  * to the inefficient X11 stuff.
74  *
75  * GL libraries seem to be very slow on this machine 1.33Ghz PB running
76  * Jaguar, this is not the case for my 1Ghz PB.  I thought it might be
77  * a versioning issue, however I have libGL.1.2.dylib for both
78  * machines. (We need to figure this out now.)
79  *
80  * GL2 libraries work now with patch for RGB32.
81  *
82  * NOTE: quartz vo driver ARGB32_to_RGB24 consumes 30% of the processor.
83  *
84  * Integrated luma prescaling adjustment for saturation/contrast/brightness
85  * adjustment.
86  */
87
88 #include <stdio.h>
89 #include <stdlib.h>
90 #include <string.h>
91 #include <inttypes.h>
92 #include <assert.h>
93
94 #include "config.h"
95 #include "libswscale/rgb2rgb.h"
96 #include "libswscale/swscale.h"
97 #include "libswscale/swscale_internal.h"
98 #include "libavutil/attributes.h"
99 #include "libavutil/cpu.h"
100 #include "libavutil/pixdesc.h"
101 #include "yuv2rgb_altivec.h"
102
103 #if HAVE_ALTIVEC
104
105 #undef PROFILE_THE_BEAST
106 #undef INC_SCALING
107
108 typedef unsigned char ubyte;
109 typedef signed char   sbyte;
110
111 /* RGB interleaver, 16 planar pels 8-bit samples per channel in
112  * homogeneous vector registers x0,x1,x2 are interleaved with the
113  * following technique:
114  *
115  *    o0 = vec_mergeh(x0, x1);
116  *    o1 = vec_perm(o0, x2, perm_rgb_0);
117  *    o2 = vec_perm(o0, x2, perm_rgb_1);
118  *    o3 = vec_mergel(x0, x1);
119  *    o4 = vec_perm(o3, o2, perm_rgb_2);
120  *    o5 = vec_perm(o3, o2, perm_rgb_3);
121  *
122  * perm_rgb_0:   o0(RG).h v1(B) --> o1*
123  *            0   1  2   3   4
124  *           rgbr|gbrg|brgb|rgbr
125  *           0010 0100 1001 0010
126  *           0102 3145 2673 894A
127  *
128  * perm_rgb_1:   o0(RG).h v1(B) --> o2
129  *            0   1  2   3   4
130  *           gbrg|brgb|bbbb|bbbb
131  *           0100 1001 1111 1111
132  *           B5CD 6EF7 89AB CDEF
133  *
134  * perm_rgb_2:   o3(RG).l o2(rgbB.l) --> o4*
135  *            0   1  2   3   4
136  *           gbrg|brgb|rgbr|gbrg
137  *           1111 1111 0010 0100
138  *           89AB CDEF 0182 3945
139  *
140  * perm_rgb_2:   o3(RG).l o2(rgbB.l) ---> o5*
141  *            0   1  2   3   4
142  *           brgb|rgbr|gbrg|brgb
143  *           1001 0010 0100 1001
144  *           a67b 89cA BdCD eEFf
145  */
146 static const vector unsigned char
147     perm_rgb_0 = { 0x00, 0x01, 0x10, 0x02, 0x03, 0x11, 0x04, 0x05,
148                    0x12, 0x06, 0x07, 0x13, 0x08, 0x09, 0x14, 0x0a },
149     perm_rgb_1 = { 0x0b, 0x15, 0x0c, 0x0d, 0x16, 0x0e, 0x0f, 0x17,
150                    0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f },
151     perm_rgb_2 = { 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
152                    0x00, 0x01, 0x18, 0x02, 0x03, 0x19, 0x04, 0x05 },
153     perm_rgb_3 = { 0x1a, 0x06, 0x07, 0x1b, 0x08, 0x09, 0x1c, 0x0a,
154                    0x0b, 0x1d, 0x0c, 0x0d, 0x1e, 0x0e, 0x0f, 0x1f };
155
156 #define vec_merge3(x2, x1, x0, y0, y1, y2)     \
157     do {                                       \
158         __typeof__(x0) o0, o2, o3;             \
159         o0 = vec_mergeh(x0, x1);               \
160         y0 = vec_perm(o0, x2, perm_rgb_0);     \
161         o2 = vec_perm(o0, x2, perm_rgb_1);     \
162         o3 = vec_mergel(x0, x1);               \
163         y1 = vec_perm(o3, o2, perm_rgb_2);     \
164         y2 = vec_perm(o3, o2, perm_rgb_3);     \
165     } while (0)
166
167 #define vec_mstbgr24(x0, x1, x2, ptr)          \
168     do {                                       \
169         __typeof__(x0) _0, _1, _2;             \
170         vec_merge3(x0, x1, x2, _0, _1, _2);    \
171         vec_st(_0, 0, ptr++);                  \
172         vec_st(_1, 0, ptr++);                  \
173         vec_st(_2, 0, ptr++);                  \
174     } while (0)
175
176 #define vec_mstrgb24(x0, x1, x2, ptr)          \
177     do {                                       \
178         __typeof__(x0) _0, _1, _2;             \
179         vec_merge3(x2, x1, x0, _0, _1, _2);    \
180         vec_st(_0, 0, ptr++);                  \
181         vec_st(_1, 0, ptr++);                  \
182         vec_st(_2, 0, ptr++);                  \
183     } while (0)
184
185 /* pack the pixels in rgb0 format
186  * msb R
187  * lsb 0
188  */
189 #define vec_mstrgb32(T, x0, x1, x2, x3, ptr)                            \
190     do {                                                                \
191         T _0, _1, _2, _3;                                               \
192         _0 = vec_mergeh(x0, x1);                                        \
193         _1 = vec_mergeh(x2, x3);                                        \
194         _2 = (T) vec_mergeh((vector unsigned short) _0,                 \
195                             (vector unsigned short) _1);                \
196         _3 = (T) vec_mergel((vector unsigned short) _0,                 \
197                             (vector unsigned short) _1);                \
198         vec_st(_2, 0 * 16, (T *) ptr);                                  \
199         vec_st(_3, 1 * 16, (T *) ptr);                                  \
200         _0 = vec_mergel(x0, x1);                                        \
201         _1 = vec_mergel(x2, x3);                                        \
202         _2 = (T) vec_mergeh((vector unsigned short) _0,                 \
203                             (vector unsigned short) _1);                \
204         _3 = (T) vec_mergel((vector unsigned short) _0,                 \
205                             (vector unsigned short) _1);                \
206         vec_st(_2, 2 * 16, (T *) ptr);                                  \
207         vec_st(_3, 3 * 16, (T *) ptr);                                  \
208         ptr += 4;                                                       \
209     } while (0)
210
211 /*
212  * 1     0       1.4021   | | Y |
213  * 1    -0.3441 -0.7142   |x| Cb|
214  * 1     1.7718  0        | | Cr|
215  *
216  *
217  * Y:      [-128 127]
218  * Cb/Cr : [-128 127]
219  *
220  * typical YUV conversion works on Y: 0-255 this version has been
221  * optimized for JPEG decoding.
222  */
223
224 #if HAVE_BIGENDIAN
225 #define vec_unh(x)                                                      \
226     (vector signed short)                                               \
227         vec_perm(x, (__typeof__(x)) { 0 },                              \
228                  ((vector unsigned char) {                              \
229                      0x10, 0x00, 0x10, 0x01, 0x10, 0x02, 0x10, 0x03,    \
230                      0x10, 0x04, 0x10, 0x05, 0x10, 0x06, 0x10, 0x07 }))
231
232 #define vec_unl(x)                                                      \
233     (vector signed short)                                               \
234         vec_perm(x, (__typeof__(x)) { 0 },                              \
235                  ((vector unsigned char) {                              \
236                      0x10, 0x08, 0x10, 0x09, 0x10, 0x0A, 0x10, 0x0B,    \
237                      0x10, 0x0C, 0x10, 0x0D, 0x10, 0x0E, 0x10, 0x0F }))
238 #else
239 #define vec_unh(x)(vector signed short) vec_mergeh(x,(__typeof__(x)) { 0 })
240 #define vec_unl(x)(vector signed short) vec_mergel(x,(__typeof__(x)) { 0 })
241 #endif
242
243 #define vec_clip_s16(x)                                                 \
244     vec_max(vec_min(x, ((vector signed short) {                         \
245                     235, 235, 235, 235, 235, 235, 235, 235 })),         \
246             ((vector signed short) { 16, 16, 16, 16, 16, 16, 16, 16 }))
247
248 #define vec_packclp(x, y)                                               \
249     (vector unsigned char)                                              \
250         vec_packs((vector unsigned short)                               \
251                       vec_max(x, ((vector signed short) { 0 })),        \
252                   (vector unsigned short)                               \
253                       vec_max(y, ((vector signed short) { 0 })))
254
255 static inline void cvtyuvtoRGB(SwsContext *c, vector signed short Y,
256                                vector signed short U, vector signed short V,
257                                vector signed short *R, vector signed short *G,
258                                vector signed short *B)
259 {
260     vector signed short vx, ux, uvx;
261
262     Y = vec_mradds(Y, c->CY, c->OY);
263     U = vec_sub(U, (vector signed short)
264                        vec_splat((vector signed short) { 128 }, 0));
265     V = vec_sub(V, (vector signed short)
266                        vec_splat((vector signed short) { 128 }, 0));
267
268     // ux  = (CBU * (u << c->CSHIFT) + 0x4000) >> 15;
269     ux = vec_sl(U, c->CSHIFT);
270     *B = vec_mradds(ux, c->CBU, Y);
271
272     // vx  = (CRV * (v << c->CSHIFT) + 0x4000) >> 15;
273     vx = vec_sl(V, c->CSHIFT);
274     *R = vec_mradds(vx, c->CRV, Y);
275
276     // uvx = ((CGU * u) + (CGV * v)) >> 15;
277     uvx = vec_mradds(U, c->CGU, Y);
278     *G  = vec_mradds(V, c->CGV, uvx);
279 }
280
281 /*
282  * ------------------------------------------------------------------------------
283  * CS converters
284  * ------------------------------------------------------------------------------
285  */
286
287 #define DEFCSP420_CVT(name, out_pixels)                                       \
288 static int altivec_ ## name(SwsContext *c, const unsigned char **in,          \
289                             int *instrides, int srcSliceY, int srcSliceH,     \
290                             unsigned char **oplanes, int *outstrides)         \
291 {                                                                             \
292     int w = c->srcW;                                                          \
293     int h = srcSliceH;                                                        \
294     int i, j;                                                                 \
295     int instrides_scl[3];                                                     \
296     vector unsigned char y0, y1;                                              \
297                                                                               \
298     vector signed char u, v;                                                  \
299                                                                               \
300     vector signed short Y0, Y1, Y2, Y3;                                       \
301     vector signed short U, V;                                                 \
302     vector signed short vx, ux, uvx;                                          \
303     vector signed short vx0, ux0, uvx0;                                       \
304     vector signed short vx1, ux1, uvx1;                                       \
305     vector signed short R0, G0, B0;                                           \
306     vector signed short R1, G1, B1;                                           \
307     vector unsigned char R, G, B;                                             \
308                                                                               \
309     const vector unsigned char *y1ivP, *y2ivP, *uivP, *vivP;                  \
310     vector unsigned char align_perm;                                          \
311                                                                               \
312     vector signed short lCY       = c->CY;                                    \
313     vector signed short lOY       = c->OY;                                    \
314     vector signed short lCRV      = c->CRV;                                   \
315     vector signed short lCBU      = c->CBU;                                   \
316     vector signed short lCGU      = c->CGU;                                   \
317     vector signed short lCGV      = c->CGV;                                   \
318     vector unsigned short lCSHIFT = c->CSHIFT;                                \
319                                                                               \
320     const ubyte *y1i = in[0];                                                 \
321     const ubyte *y2i = in[0] + instrides[0];                                  \
322     const ubyte *ui  = in[1];                                                 \
323     const ubyte *vi  = in[2];                                                 \
324                                                                               \
325     vector unsigned char *oute, *outo;                                        \
326                                                                               \
327     /* loop moves y{1, 2}i by w */                                            \
328     instrides_scl[0] = instrides[0] * 2 - w;                                  \
329     /* loop moves ui by w / 2 */                                              \
330     instrides_scl[1] = instrides[1] - w / 2;                                  \
331     /* loop moves vi by w / 2 */                                              \
332     instrides_scl[2] = instrides[2] - w / 2;                                  \
333                                                                               \
334     for (i = 0; i < h / 2; i++) {                                             \
335         oute = (vector unsigned char *)(oplanes[0] + outstrides[0] *          \
336                                         (srcSliceY + i * 2));                 \
337         outo = oute + (outstrides[0] >> 4);                                   \
338         vec_dstst(outo, (0x02000002 | (((w * 3 + 32) / 32) << 16)), 0);       \
339         vec_dstst(oute, (0x02000002 | (((w * 3 + 32) / 32) << 16)), 1);       \
340                                                                               \
341         for (j = 0; j < w / 16; j++) {                                        \
342             y1ivP = (const vector unsigned char *) y1i;                       \
343             y2ivP = (const vector unsigned char *) y2i;                       \
344             uivP  = (const vector unsigned char *) ui;                        \
345             vivP  = (const vector unsigned char *) vi;                        \
346                                                                               \
347             align_perm = vec_lvsl(0, y1i);                                    \
348             y0 = (vector unsigned char)                                       \
349                      vec_perm(y1ivP[0], y1ivP[1], align_perm);                \
350                                                                               \
351             align_perm = vec_lvsl(0, y2i);                                    \
352             y1 = (vector unsigned char)                                       \
353                      vec_perm(y2ivP[0], y2ivP[1], align_perm);                \
354                                                                               \
355             align_perm = vec_lvsl(0, ui);                                     \
356             u = (vector signed char)                                          \
357                     vec_perm(uivP[0], uivP[1], align_perm);                   \
358                                                                               \
359             align_perm = vec_lvsl(0, vi);                                     \
360             v = (vector signed char)                                          \
361                     vec_perm(vivP[0], vivP[1], align_perm);                   \
362                                                                               \
363             u = (vector signed char)                                          \
364                     vec_sub(u,                                                \
365                             (vector signed char)                              \
366                                 vec_splat((vector signed char) { 128 }, 0));  \
367             v = (vector signed char)                                          \
368                     vec_sub(v,                                                \
369                             (vector signed char)                              \
370                                 vec_splat((vector signed char) { 128 }, 0));  \
371                                                                               \
372             U = vec_unpackh(u);                                               \
373             V = vec_unpackh(v);                                               \
374                                                                               \
375             Y0 = vec_unh(y0);                                                 \
376             Y1 = vec_unl(y0);                                                 \
377             Y2 = vec_unh(y1);                                                 \
378             Y3 = vec_unl(y1);                                                 \
379                                                                               \
380             Y0 = vec_mradds(Y0, lCY, lOY);                                    \
381             Y1 = vec_mradds(Y1, lCY, lOY);                                    \
382             Y2 = vec_mradds(Y2, lCY, lOY);                                    \
383             Y3 = vec_mradds(Y3, lCY, lOY);                                    \
384                                                                               \
385             /* ux  = (CBU * (u << CSHIFT) + 0x4000) >> 15 */                  \
386             ux  = vec_sl(U, lCSHIFT);                                         \
387             ux  = vec_mradds(ux, lCBU, (vector signed short) { 0 });          \
388             ux0 = vec_mergeh(ux, ux);                                         \
389             ux1 = vec_mergel(ux, ux);                                         \
390                                                                               \
391             /* vx  = (CRV * (v << CSHIFT) + 0x4000) >> 15; */                 \
392             vx  = vec_sl(V, lCSHIFT);                                         \
393             vx  = vec_mradds(vx, lCRV, (vector signed short) { 0 });          \
394             vx0 = vec_mergeh(vx, vx);                                         \
395             vx1 = vec_mergel(vx, vx);                                         \
396                                                                               \
397             /* uvx = ((CGU * u) + (CGV * v)) >> 15 */                         \
398             uvx  = vec_mradds(U, lCGU, (vector signed short) { 0 });          \
399             uvx  = vec_mradds(V, lCGV, uvx);                                  \
400             uvx0 = vec_mergeh(uvx, uvx);                                      \
401             uvx1 = vec_mergel(uvx, uvx);                                      \
402                                                                               \
403             R0 = vec_add(Y0, vx0);                                            \
404             G0 = vec_add(Y0, uvx0);                                           \
405             B0 = vec_add(Y0, ux0);                                            \
406             R1 = vec_add(Y1, vx1);                                            \
407             G1 = vec_add(Y1, uvx1);                                           \
408             B1 = vec_add(Y1, ux1);                                            \
409                                                                               \
410             R = vec_packclp(R0, R1);                                          \
411             G = vec_packclp(G0, G1);                                          \
412             B = vec_packclp(B0, B1);                                          \
413                                                                               \
414             out_pixels(R, G, B, oute);                                        \
415                                                                               \
416             R0 = vec_add(Y2, vx0);                                            \
417             G0 = vec_add(Y2, uvx0);                                           \
418             B0 = vec_add(Y2, ux0);                                            \
419             R1 = vec_add(Y3, vx1);                                            \
420             G1 = vec_add(Y3, uvx1);                                           \
421             B1 = vec_add(Y3, ux1);                                            \
422             R  = vec_packclp(R0, R1);                                         \
423             G  = vec_packclp(G0, G1);                                         \
424             B  = vec_packclp(B0, B1);                                         \
425                                                                               \
426                                                                               \
427             out_pixels(R, G, B, outo);                                        \
428                                                                               \
429             y1i += 16;                                                        \
430             y2i += 16;                                                        \
431             ui  += 8;                                                         \
432             vi  += 8;                                                         \
433         }                                                                     \
434                                                                               \
435         ui  += instrides_scl[1];                                              \
436         vi  += instrides_scl[2];                                              \
437         y1i += instrides_scl[0];                                              \
438         y2i += instrides_scl[0];                                              \
439     }                                                                         \
440     return srcSliceH;                                                         \
441 }
442
443 #define out_abgr(a, b, c, ptr)                                          \
444     vec_mstrgb32(__typeof__(a), ((__typeof__(a)) { 255 }), c, b, a, ptr)
445 #define out_bgra(a, b, c, ptr)                                          \
446     vec_mstrgb32(__typeof__(a), c, b, a, ((__typeof__(a)) { 255 }), ptr)
447 #define out_rgba(a, b, c, ptr)                                          \
448     vec_mstrgb32(__typeof__(a), a, b, c, ((__typeof__(a)) { 255 }), ptr)
449 #define out_argb(a, b, c, ptr)                                          \
450     vec_mstrgb32(__typeof__(a), ((__typeof__(a)) { 255 }), a, b, c, ptr)
451 #define out_rgb24(a, b, c, ptr) vec_mstrgb24(a, b, c, ptr)
452 #define out_bgr24(a, b, c, ptr) vec_mstbgr24(a, b, c, ptr)
453
454 DEFCSP420_CVT(yuv2_abgr,  out_abgr)
455 DEFCSP420_CVT(yuv2_bgra,  out_bgra)
456 DEFCSP420_CVT(yuv2_rgba,  out_rgba)
457 DEFCSP420_CVT(yuv2_argb,  out_argb)
458 DEFCSP420_CVT(yuv2_rgb24, out_rgb24)
459 DEFCSP420_CVT(yuv2_bgr24, out_bgr24)
460
461 // uyvy|uyvy|uyvy|uyvy
462 // 0123 4567 89ab cdef
463 static const vector unsigned char
464     demux_u = { 0x10, 0x00, 0x10, 0x00,
465                 0x10, 0x04, 0x10, 0x04,
466                 0x10, 0x08, 0x10, 0x08,
467                 0x10, 0x0c, 0x10, 0x0c },
468     demux_v = { 0x10, 0x02, 0x10, 0x02,
469                 0x10, 0x06, 0x10, 0x06,
470                 0x10, 0x0A, 0x10, 0x0A,
471                 0x10, 0x0E, 0x10, 0x0E },
472     demux_y = { 0x10, 0x01, 0x10, 0x03,
473                 0x10, 0x05, 0x10, 0x07,
474                 0x10, 0x09, 0x10, 0x0B,
475                 0x10, 0x0D, 0x10, 0x0F };
476
477 /*
478  * this is so I can play live CCIR raw video
479  */
480 static int altivec_uyvy_rgb32(SwsContext *c, const unsigned char **in,
481                               int *instrides, int srcSliceY, int srcSliceH,
482                               unsigned char **oplanes, int *outstrides)
483 {
484     int w = c->srcW;
485     int h = srcSliceH;
486     int i, j;
487     vector unsigned char uyvy;
488     vector signed short Y, U, V;
489     vector signed short R0, G0, B0, R1, G1, B1;
490     vector unsigned char R, G, B;
491     vector unsigned char *out;
492     const ubyte *img;
493
494     img = in[0];
495     out = (vector unsigned char *) (oplanes[0] + srcSliceY * outstrides[0]);
496
497     for (i = 0; i < h; i++)
498         for (j = 0; j < w / 16; j++) {
499             uyvy = vec_ld(0, img);
500
501             U = (vector signed short)
502                     vec_perm(uyvy, (vector unsigned char) { 0 }, demux_u);
503             V = (vector signed short)
504                     vec_perm(uyvy, (vector unsigned char) { 0 }, demux_v);
505             Y = (vector signed short)
506                     vec_perm(uyvy, (vector unsigned char) { 0 }, demux_y);
507
508             cvtyuvtoRGB(c, Y, U, V, &R0, &G0, &B0);
509
510             uyvy = vec_ld(16, img);
511
512             U = (vector signed short)
513                     vec_perm(uyvy, (vector unsigned char) { 0 }, demux_u);
514             V = (vector signed short)
515                     vec_perm(uyvy, (vector unsigned char) { 0 }, demux_v);
516             Y = (vector signed short)
517                     vec_perm(uyvy, (vector unsigned char) { 0 }, demux_y);
518
519             cvtyuvtoRGB(c, Y, U, V, &R1, &G1, &B1);
520
521             R = vec_packclp(R0, R1);
522             G = vec_packclp(G0, G1);
523             B = vec_packclp(B0, B1);
524
525             // vec_mstbgr24 (R,G,B, out);
526             out_rgba(R, G, B, out);
527
528             img += 32;
529         }
530     return srcSliceH;
531 }
532
533 #endif /* HAVE_ALTIVEC */
534
535 /* Ok currently the acceleration routine only supports
536  * inputs of widths a multiple of 16
537  * and heights a multiple 2
538  *
539  * So we just fall back to the C codes for this.
540  */
541 av_cold SwsFunc ff_yuv2rgb_init_ppc(SwsContext *c)
542 {
543 #if HAVE_ALTIVEC
544     if (!(av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC))
545         return NULL;
546
547     /*
548      * and this seems not to matter too much I tried a bunch of
549      * videos with abnormal widths and MPlayer crashes elsewhere.
550      * mplayer -vo x11 -rawvideo on:w=350:h=240 raw-350x240.eyuv
551      * boom with X11 bad match.
552      *
553      */
554     if ((c->srcW & 0xf) != 0)
555         return NULL;
556
557     switch (c->srcFormat) {
558     case AV_PIX_FMT_YUV410P:
559     case AV_PIX_FMT_YUV420P:
560     /*case IMGFMT_CLPL:        ??? */
561     case AV_PIX_FMT_GRAY8:
562     case AV_PIX_FMT_NV12:
563     case AV_PIX_FMT_NV21:
564         if ((c->srcH & 0x1) != 0)
565             return NULL;
566
567         switch (c->dstFormat) {
568         case AV_PIX_FMT_RGB24:
569             av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space RGB24\n");
570             return altivec_yuv2_rgb24;
571         case AV_PIX_FMT_BGR24:
572             av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space BGR24\n");
573             return altivec_yuv2_bgr24;
574         case AV_PIX_FMT_ARGB:
575             av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space ARGB\n");
576             return altivec_yuv2_argb;
577         case AV_PIX_FMT_ABGR:
578             av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space ABGR\n");
579             return altivec_yuv2_abgr;
580         case AV_PIX_FMT_RGBA:
581             av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space RGBA\n");
582             return altivec_yuv2_rgba;
583         case AV_PIX_FMT_BGRA:
584             av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space BGRA\n");
585             return altivec_yuv2_bgra;
586         default: return NULL;
587         }
588         break;
589
590     case AV_PIX_FMT_UYVY422:
591         switch (c->dstFormat) {
592         case AV_PIX_FMT_BGR32:
593             av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space UYVY -> RGB32\n");
594             return altivec_uyvy_rgb32;
595         default: return NULL;
596         }
597         break;
598     }
599 #endif /* HAVE_ALTIVEC */
600
601     return NULL;
602 }
603
604 av_cold void ff_yuv2rgb_init_tables_ppc(SwsContext *c,
605                                         const int inv_table[4],
606                                         int brightness,
607                                         int contrast,
608                                         int saturation)
609 {
610 #if HAVE_ALTIVEC
611     union {
612         DECLARE_ALIGNED(16, signed short, tmp)[8];
613         vector signed short vec;
614     } buf;
615
616     if (!(av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC))
617         return;
618
619     buf.tmp[0] = ((0xffffLL) * contrast >> 8) >> 9;                               // cy
620     buf.tmp[1] = -256 * brightness;                                               // oy
621     buf.tmp[2] =   (inv_table[0] >> 3) * (contrast >> 16) * (saturation >> 16);   // crv
622     buf.tmp[3] =   (inv_table[1] >> 3) * (contrast >> 16) * (saturation >> 16);   // cbu
623     buf.tmp[4] = -((inv_table[2] >> 1) * (contrast >> 16) * (saturation >> 16));  // cgu
624     buf.tmp[5] = -((inv_table[3] >> 1) * (contrast >> 16) * (saturation >> 16));  // cgv
625
626     c->CSHIFT = (vector unsigned short) vec_splat_u16(2);
627     c->CY     = vec_splat((vector signed short) buf.vec, 0);
628     c->OY     = vec_splat((vector signed short) buf.vec, 1);
629     c->CRV    = vec_splat((vector signed short) buf.vec, 2);
630     c->CBU    = vec_splat((vector signed short) buf.vec, 3);
631     c->CGU    = vec_splat((vector signed short) buf.vec, 4);
632     c->CGV    = vec_splat((vector signed short) buf.vec, 5);
633     return;
634 #endif /* HAVE_ALTIVEC */
635 }
636
637 #if HAVE_ALTIVEC
638
639 static av_always_inline void yuv2packedX_altivec(SwsContext *c,
640                                                  const int16_t *lumFilter,
641                                                  const int16_t **lumSrc,
642                                                  int lumFilterSize,
643                                                  const int16_t *chrFilter,
644                                                  const int16_t **chrUSrc,
645                                                  const int16_t **chrVSrc,
646                                                  int chrFilterSize,
647                                                  const int16_t **alpSrc,
648                                                  uint8_t *dest,
649                                                  int dstW, int dstY,
650                                                  enum AVPixelFormat target)
651 {
652     int i, j;
653     vector signed short X, X0, X1, Y0, U0, V0, Y1, U1, V1, U, V;
654     vector signed short R0, G0, B0, R1, G1, B1;
655
656     vector unsigned char R, G, B;
657     vector unsigned char *out, *nout;
658
659     vector signed short RND   = vec_splat_s16(1 << 3);
660     vector unsigned short SCL = vec_splat_u16(4);
661     DECLARE_ALIGNED(16, unsigned int, scratch)[16];
662
663     vector signed short *YCoeffs, *CCoeffs;
664
665     YCoeffs = c->vYCoeffsBank + dstY * lumFilterSize;
666     CCoeffs = c->vCCoeffsBank + dstY * chrFilterSize;
667
668     out = (vector unsigned char *) dest;
669
670     for (i = 0; i < dstW; i += 16) {
671         Y0 = RND;
672         Y1 = RND;
673         /* extract 16 coeffs from lumSrc */
674         for (j = 0; j < lumFilterSize; j++) {
675             X0 = vec_ld(0, &lumSrc[j][i]);
676             X1 = vec_ld(16, &lumSrc[j][i]);
677             Y0 = vec_mradds(X0, YCoeffs[j], Y0);
678             Y1 = vec_mradds(X1, YCoeffs[j], Y1);
679         }
680
681         U = RND;
682         V = RND;
683         /* extract 8 coeffs from U,V */
684         for (j = 0; j < chrFilterSize; j++) {
685             X = vec_ld(0, &chrUSrc[j][i / 2]);
686             U = vec_mradds(X, CCoeffs[j], U);
687             X = vec_ld(0, &chrVSrc[j][i / 2]);
688             V = vec_mradds(X, CCoeffs[j], V);
689         }
690
691         /* scale and clip signals */
692         Y0 = vec_sra(Y0, SCL);
693         Y1 = vec_sra(Y1, SCL);
694         U  = vec_sra(U, SCL);
695         V  = vec_sra(V, SCL);
696
697         Y0 = vec_clip_s16(Y0);
698         Y1 = vec_clip_s16(Y1);
699         U  = vec_clip_s16(U);
700         V  = vec_clip_s16(V);
701
702         /* now we have
703          * Y0 = y0 y1 y2 y3 y4 y5 y6 y7    Y1 = y8 y9 y10 y11 y12 y13 y14 y15
704          * U  = u0 u1 u2 u3 u4 u5 u6 u7    V  = v0 v1 v2 v3 v4 v5 v6 v7
705          *
706          * Y0 = y0 y1 y2 y3 y4 y5 y6 y7    Y1 = y8 y9 y10 y11 y12 y13 y14 y15
707          * U0 = u0 u0 u1 u1 u2 u2 u3 u3    U1 = u4 u4 u5 u5 u6 u6 u7 u7
708          * V0 = v0 v0 v1 v1 v2 v2 v3 v3    V1 = v4 v4 v5 v5 v6 v6 v7 v7
709          */
710
711         U0 = vec_mergeh(U, U);
712         V0 = vec_mergeh(V, V);
713
714         U1 = vec_mergel(U, U);
715         V1 = vec_mergel(V, V);
716
717         cvtyuvtoRGB(c, Y0, U0, V0, &R0, &G0, &B0);
718         cvtyuvtoRGB(c, Y1, U1, V1, &R1, &G1, &B1);
719
720         R = vec_packclp(R0, R1);
721         G = vec_packclp(G0, G1);
722         B = vec_packclp(B0, B1);
723
724         switch (target) {
725         case AV_PIX_FMT_ABGR:
726             out_abgr(R, G, B, out);
727             break;
728         case AV_PIX_FMT_BGRA:
729             out_bgra(R, G, B, out);
730             break;
731         case AV_PIX_FMT_RGBA:
732             out_rgba(R, G, B, out);
733             break;
734         case AV_PIX_FMT_ARGB:
735             out_argb(R, G, B, out);
736             break;
737         case AV_PIX_FMT_RGB24:
738             out_rgb24(R, G, B, out);
739             break;
740         case AV_PIX_FMT_BGR24:
741             out_bgr24(R, G, B, out);
742             break;
743         default:
744         {
745             /* If this is reached, the caller should have called yuv2packedXinC
746              * instead. */
747             static int printed_error_message;
748             if (!printed_error_message) {
749                 av_log(c, AV_LOG_ERROR,
750                        "altivec_yuv2packedX doesn't support %s output\n",
751                        av_get_pix_fmt_name(c->dstFormat));
752                 printed_error_message = 1;
753             }
754             return;
755         }
756         }
757     }
758
759     if (i < dstW) {
760         i -= 16;
761
762         Y0 = RND;
763         Y1 = RND;
764         /* extract 16 coeffs from lumSrc */
765         for (j = 0; j < lumFilterSize; j++) {
766             X0 = vec_ld(0, &lumSrc[j][i]);
767             X1 = vec_ld(16, &lumSrc[j][i]);
768             Y0 = vec_mradds(X0, YCoeffs[j], Y0);
769             Y1 = vec_mradds(X1, YCoeffs[j], Y1);
770         }
771
772         U = RND;
773         V = RND;
774         /* extract 8 coeffs from U,V */
775         for (j = 0; j < chrFilterSize; j++) {
776             X = vec_ld(0, &chrUSrc[j][i / 2]);
777             U = vec_mradds(X, CCoeffs[j], U);
778             X = vec_ld(0, &chrVSrc[j][i / 2]);
779             V = vec_mradds(X, CCoeffs[j], V);
780         }
781
782         /* scale and clip signals */
783         Y0 = vec_sra(Y0, SCL);
784         Y1 = vec_sra(Y1, SCL);
785         U  = vec_sra(U, SCL);
786         V  = vec_sra(V, SCL);
787
788         Y0 = vec_clip_s16(Y0);
789         Y1 = vec_clip_s16(Y1);
790         U  = vec_clip_s16(U);
791         V  = vec_clip_s16(V);
792
793         /* now we have
794          * Y0 = y0 y1 y2 y3 y4 y5 y6 y7    Y1 = y8 y9 y10 y11 y12 y13 y14 y15
795          * U  = u0 u1 u2 u3 u4 u5 u6 u7    V  = v0 v1 v2 v3 v4 v5 v6 v7
796          *
797          * Y0 = y0 y1 y2 y3 y4 y5 y6 y7    Y1 = y8 y9 y10 y11 y12 y13 y14 y15
798          * U0 = u0 u0 u1 u1 u2 u2 u3 u3    U1 = u4 u4 u5 u5 u6 u6 u7 u7
799          * V0 = v0 v0 v1 v1 v2 v2 v3 v3    V1 = v4 v4 v5 v5 v6 v6 v7 v7
800          */
801
802         U0 = vec_mergeh(U, U);
803         V0 = vec_mergeh(V, V);
804
805         U1 = vec_mergel(U, U);
806         V1 = vec_mergel(V, V);
807
808         cvtyuvtoRGB(c, Y0, U0, V0, &R0, &G0, &B0);
809         cvtyuvtoRGB(c, Y1, U1, V1, &R1, &G1, &B1);
810
811         R = vec_packclp(R0, R1);
812         G = vec_packclp(G0, G1);
813         B = vec_packclp(B0, B1);
814
815         nout = (vector unsigned char *) scratch;
816         switch (target) {
817         case AV_PIX_FMT_ABGR:
818             out_abgr(R, G, B, nout);
819             break;
820         case AV_PIX_FMT_BGRA:
821             out_bgra(R, G, B, nout);
822             break;
823         case AV_PIX_FMT_RGBA:
824             out_rgba(R, G, B, nout);
825             break;
826         case AV_PIX_FMT_ARGB:
827             out_argb(R, G, B, nout);
828             break;
829         case AV_PIX_FMT_RGB24:
830             out_rgb24(R, G, B, nout);
831             break;
832         case AV_PIX_FMT_BGR24:
833             out_bgr24(R, G, B, nout);
834             break;
835         default:
836             /* Unreachable, I think. */
837             av_log(c, AV_LOG_ERROR,
838                    "altivec_yuv2packedX doesn't support %s output\n",
839                    av_get_pix_fmt_name(c->dstFormat));
840             return;
841         }
842
843         memcpy(&((uint32_t *) dest)[i], scratch, (dstW - i) / 4);
844     }
845 }
846
847 #define YUV2PACKEDX_WRAPPER(suffix, pixfmt)                             \
848 void ff_yuv2 ## suffix ## _X_altivec(SwsContext *c,                     \
849                                      const int16_t *lumFilter,          \
850                                      const int16_t **lumSrc,            \
851                                      int lumFilterSize,                 \
852                                      const int16_t *chrFilter,          \
853                                      const int16_t **chrUSrc,           \
854                                      const int16_t **chrVSrc,           \
855                                      int chrFilterSize,                 \
856                                      const int16_t **alpSrc,            \
857                                      uint8_t *dest, int dstW, int dstY) \
858 {                                                                       \
859     yuv2packedX_altivec(c, lumFilter, lumSrc, lumFilterSize,            \
860                         chrFilter, chrUSrc, chrVSrc,                    \
861                         chrFilterSize, alpSrc,                          \
862                         dest, dstW, dstY, pixfmt);                      \
863 }
864
865 YUV2PACKEDX_WRAPPER(abgr,  AV_PIX_FMT_ABGR);
866 YUV2PACKEDX_WRAPPER(bgra,  AV_PIX_FMT_BGRA);
867 YUV2PACKEDX_WRAPPER(argb,  AV_PIX_FMT_ARGB);
868 YUV2PACKEDX_WRAPPER(rgba,  AV_PIX_FMT_RGBA);
869 YUV2PACKEDX_WRAPPER(rgb24, AV_PIX_FMT_RGB24);
870 YUV2PACKEDX_WRAPPER(bgr24, AV_PIX_FMT_BGR24);
871
872 #endif /* HAVE_ALTIVEC */