2 * VP8 compatible video decoder
4 * Copyright (C) 2010 David Conrad
6 * This file is part of Libav.
8 * Libav is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * Libav is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with Libav; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 #include "libavutil/cpu.h"
26 #include "libavutil/mem.h"
27 #include "libavutil/ppc/cpu.h"
28 #include "libavutil/ppc/util_altivec.h"
30 #include "libavcodec/vp8dsp.h"
32 #include "hpeldsp_altivec.h"
34 #if HAVE_ALTIVEC && HAVE_BIGENDIAN
35 #define REPT4(...) { __VA_ARGS__, __VA_ARGS__, __VA_ARGS__, __VA_ARGS__ }
37 // h subpel filter uses msum to multiply+add 4 pixel taps at once
38 static const vec_s8 h_subpel_filters_inner[7] =
40 REPT4( -6, 123, 12, -1),
41 REPT4(-11, 108, 36, -8),
42 REPT4( -9, 93, 50, -6),
43 REPT4(-16, 77, 77, -16),
44 REPT4( -6, 50, 93, -9),
45 REPT4( -8, 36, 108, -11),
46 REPT4( -1, 12, 123, -6),
49 // for 6tap filters, these are the outer two taps
50 // The zeros mask off pixels 4-7 when filtering 0-3
52 static const vec_s8 h_subpel_filters_outer[3] =
59 #define LOAD_H_SUBPEL_FILTER(i) \
60 vec_s8 filter_inner = h_subpel_filters_inner[i]; \
61 vec_s8 filter_outerh = h_subpel_filters_outer[(i)>>1]; \
62 vec_s8 filter_outerl = vec_sld(filter_outerh, filter_outerh, 2)
64 #define FILTER_H(dstv, off) \
65 a = vec_ld((off)-is6tap-1, src); \
66 b = vec_ld((off)-is6tap-1+15, src); \
68 pixh = vec_perm(a, b, permh##off); \
69 pixl = vec_perm(a, b, perml##off); \
70 filth = vec_msum(filter_inner, pixh, c64); \
71 filtl = vec_msum(filter_inner, pixl, c64); \
74 outer = vec_perm(a, b, perm_6tap##off); \
75 filth = vec_msum(filter_outerh, outer, filth); \
76 filtl = vec_msum(filter_outerl, outer, filtl); \
79 filtl = filth; /* discard pixels 4-7 */ \
80 dstv = vec_packs(filth, filtl); \
81 dstv = vec_sra(dstv, c7)
83 static av_always_inline
84 void put_vp8_epel_h_altivec_core(uint8_t *dst, ptrdiff_t dst_stride,
85 uint8_t *src, ptrdiff_t src_stride,
86 int h, int mx, int w, int is6tap)
88 LOAD_H_SUBPEL_FILTER(mx-1);
89 vec_u8 align_vec0, align_vec8, permh0, permh8, filt;
90 vec_u8 perm_6tap0, perm_6tap8, perml0, perml8;
91 vec_u8 a, b, pixh, pixl, outer;
95 vec_u8 perm_inner6 = { 1,2,3,4, 2,3,4,5, 3,4,5,6, 4,5,6,7 };
96 vec_u8 perm_inner4 = { 0,1,2,3, 1,2,3,4, 2,3,4,5, 3,4,5,6 };
97 vec_u8 perm_inner = is6tap ? perm_inner6 : perm_inner4;
98 vec_u8 perm_outer = { 4,9, 0,5, 5,10, 1,6, 6,11, 2,7, 7,12, 3,8 };
99 vec_s32 c64 = vec_sl(vec_splat_s32(1), vec_splat_u32(6));
100 vec_u16 c7 = vec_splat_u16(7);
102 align_vec0 = vec_lvsl( -is6tap-1, src);
103 align_vec8 = vec_lvsl(8-is6tap-1, src);
105 permh0 = vec_perm(align_vec0, align_vec0, perm_inner);
106 permh8 = vec_perm(align_vec8, align_vec8, perm_inner);
107 perm_inner = vec_add(perm_inner, vec_splat_u8(4));
108 perml0 = vec_perm(align_vec0, align_vec0, perm_inner);
109 perml8 = vec_perm(align_vec8, align_vec8, perm_inner);
110 perm_6tap0 = vec_perm(align_vec0, align_vec0, perm_outer);
111 perm_6tap8 = vec_perm(align_vec8, align_vec8, perm_outer);
118 filt = vec_packsu(f16h, f16l);
119 vec_st(filt, 0, dst);
121 filt = vec_packsu(f16h, f16h);
122 vec_ste((vec_u32)filt, 0, (uint32_t*)dst);
124 vec_ste((vec_u32)filt, 4, (uint32_t*)dst);
131 // v subpel filter does a simple vertical multiply + add
132 static const vec_u8 v_subpel_filters[7] =
134 { 0, 6, 123, 12, 1, 0 },
135 { 2, 11, 108, 36, 8, 1 },
136 { 0, 9, 93, 50, 6, 0 },
137 { 3, 16, 77, 77, 16, 3 },
138 { 0, 6, 50, 93, 9, 0 },
139 { 1, 8, 36, 108, 11, 2 },
140 { 0, 1, 12, 123, 6, 0 },
143 #define LOAD_V_SUBPEL_FILTER(i) \
144 vec_u8 subpel_filter = v_subpel_filters[i]; \
145 vec_u8 f0 = vec_splat(subpel_filter, 0); \
146 vec_u8 f1 = vec_splat(subpel_filter, 1); \
147 vec_u8 f2 = vec_splat(subpel_filter, 2); \
148 vec_u8 f3 = vec_splat(subpel_filter, 3); \
149 vec_u8 f4 = vec_splat(subpel_filter, 4); \
150 vec_u8 f5 = vec_splat(subpel_filter, 5)
152 #define FILTER_V(dstv, vec_mul) \
153 s1f = (vec_s16)vec_mul(s1, f1); \
154 s2f = (vec_s16)vec_mul(s2, f2); \
155 s3f = (vec_s16)vec_mul(s3, f3); \
156 s4f = (vec_s16)vec_mul(s4, f4); \
157 s2f = vec_subs(s2f, s1f); \
158 s3f = vec_subs(s3f, s4f); \
160 s0f = (vec_s16)vec_mul(s0, f0); \
161 s5f = (vec_s16)vec_mul(s5, f5); \
162 s2f = vec_adds(s2f, s0f); \
163 s3f = vec_adds(s3f, s5f); \
165 dstv = vec_adds(s2f, s3f); \
166 dstv = vec_adds(dstv, c64); \
167 dstv = vec_sra(dstv, c7)
169 static av_always_inline
170 void put_vp8_epel_v_altivec_core(uint8_t *dst, ptrdiff_t dst_stride,
171 uint8_t *src, ptrdiff_t src_stride,
172 int h, int my, int w, int is6tap)
174 LOAD_V_SUBPEL_FILTER(my-1);
175 vec_u8 s0, s1, s2, s3, s4, s5, filt, align_vech, perm_vec, align_vecl;
176 vec_s16 s0f, s1f, s2f, s3f, s4f, s5f, f16h, f16l;
177 vec_s16 c64 = vec_sl(vec_splat_s16(1), vec_splat_u16(6));
178 vec_u16 c7 = vec_splat_u16(7);
180 // we want pixels 0-7 to be in the even positions and 8-15 in the odd,
181 // so combine this permute with the alignment permute vector
182 align_vech = vec_lvsl(0, src);
183 align_vecl = vec_sld(align_vech, align_vech, 8);
185 perm_vec = vec_mergeh(align_vech, align_vecl);
187 perm_vec = vec_mergeh(align_vech, align_vech);
190 s0 = load_with_perm_vec(-2*src_stride, src, perm_vec);
191 s1 = load_with_perm_vec(-1*src_stride, src, perm_vec);
192 s2 = load_with_perm_vec( 0*src_stride, src, perm_vec);
193 s3 = load_with_perm_vec( 1*src_stride, src, perm_vec);
195 s4 = load_with_perm_vec( 2*src_stride, src, perm_vec);
197 src += (2+is6tap)*src_stride;
201 s5 = load_with_perm_vec(0, src, perm_vec);
203 s4 = load_with_perm_vec(0, src, perm_vec);
205 FILTER_V(f16h, vec_mule);
208 FILTER_V(f16l, vec_mulo);
209 filt = vec_packsu(f16h, f16l);
210 vec_st(filt, 0, dst);
212 filt = vec_packsu(f16h, f16h);
214 filt = (vec_u8)vec_splat((vec_u32)filt, 0);
216 vec_ste((vec_u32)filt, 4, (uint32_t*)dst);
217 vec_ste((vec_u32)filt, 0, (uint32_t*)dst);
233 #define EPEL_FUNCS(WIDTH, TAPS) \
235 void put_vp8_epel ## WIDTH ## _h ## TAPS ## _altivec(uint8_t *dst, ptrdiff_t dst_stride, uint8_t *src, ptrdiff_t src_stride, int h, int mx, int my) \
237 put_vp8_epel_h_altivec_core(dst, dst_stride, src, src_stride, h, mx, WIDTH, TAPS == 6); \
241 void put_vp8_epel ## WIDTH ## _v ## TAPS ## _altivec(uint8_t *dst, ptrdiff_t dst_stride, uint8_t *src, ptrdiff_t src_stride, int h, int mx, int my) \
243 put_vp8_epel_v_altivec_core(dst, dst_stride, src, src_stride, h, my, WIDTH, TAPS == 6); \
246 #define EPEL_HV(WIDTH, HTAPS, VTAPS) \
247 static void put_vp8_epel ## WIDTH ## _h ## HTAPS ## v ## VTAPS ## _altivec(uint8_t *dst, ptrdiff_t dstride, uint8_t *src, ptrdiff_t sstride, int h, int mx, int my) \
249 DECLARE_ALIGNED(16, uint8_t, tmp)[(2*WIDTH+5)*16]; \
251 put_vp8_epel ## WIDTH ## _h ## HTAPS ## _altivec(tmp, 16, src-2*sstride, sstride, h+5, mx, my); \
252 put_vp8_epel ## WIDTH ## _v ## VTAPS ## _altivec(dst, dstride, tmp+2*16, 16, h, mx, my); \
254 put_vp8_epel ## WIDTH ## _h ## HTAPS ## _altivec(tmp, 16, src-sstride, sstride, h+4, mx, my); \
255 put_vp8_epel ## WIDTH ## _v ## VTAPS ## _altivec(dst, dstride, tmp+16, 16, h, mx, my); \
275 static void put_vp8_pixels16_altivec(uint8_t *dst, ptrdiff_t dstride, uint8_t *src, ptrdiff_t sstride, int h, int mx, int my)
277 register vector unsigned char pixelsv1, pixelsv2;
278 register vector unsigned char pixelsv1B, pixelsv2B;
279 register vector unsigned char pixelsv1C, pixelsv2C;
280 register vector unsigned char pixelsv1D, pixelsv2D;
282 register vector unsigned char perm = vec_lvsl(0, src);
284 register ptrdiff_t dstride2 = dstride << 1, sstride2 = sstride << 1;
285 register ptrdiff_t dstride3 = dstride2 + dstride, sstride3 = sstride + sstride2;
286 register ptrdiff_t dstride4 = dstride << 2, sstride4 = sstride << 2;
288 // hand-unrolling the loop by 4 gains about 15%
289 // mininum execution time goes from 74 to 60 cycles
290 // it's faster than -funroll-loops, but using
291 // -funroll-loops w/ this is bad - 74 cycles again.
292 // all this is on a 7450, tuning for the 7450
293 for (i = 0; i < h; i += 4) {
294 pixelsv1 = vec_ld( 0, src);
295 pixelsv2 = vec_ld(15, src);
296 pixelsv1B = vec_ld(sstride, src);
297 pixelsv2B = vec_ld(15 + sstride, src);
298 pixelsv1C = vec_ld(sstride2, src);
299 pixelsv2C = vec_ld(15 + sstride2, src);
300 pixelsv1D = vec_ld(sstride3, src);
301 pixelsv2D = vec_ld(15 + sstride3, src);
302 vec_st(vec_perm(pixelsv1, pixelsv2, perm),
303 0, (unsigned char*)dst);
304 vec_st(vec_perm(pixelsv1B, pixelsv2B, perm),
305 dstride, (unsigned char*)dst);
306 vec_st(vec_perm(pixelsv1C, pixelsv2C, perm),
307 dstride2, (unsigned char*)dst);
308 vec_st(vec_perm(pixelsv1D, pixelsv2D, perm),
309 dstride3, (unsigned char*)dst);
315 #endif /* HAVE_ALTIVEC && HAVE_BIGENDIAN */
317 av_cold void ff_vp78dsp_init_ppc(VP8DSPContext *c)
319 #if HAVE_ALTIVEC && HAVE_BIGENDIAN
320 if (!PPC_ALTIVEC(av_get_cpu_flags()))
323 c->put_vp8_epel_pixels_tab[0][0][0] = put_vp8_pixels16_altivec;
324 c->put_vp8_epel_pixels_tab[0][0][2] = put_vp8_epel16_h6_altivec;
325 c->put_vp8_epel_pixels_tab[0][2][0] = put_vp8_epel16_v6_altivec;
326 c->put_vp8_epel_pixels_tab[0][2][2] = put_vp8_epel16_h6v6_altivec;
328 c->put_vp8_epel_pixels_tab[1][0][2] = put_vp8_epel8_h6_altivec;
329 c->put_vp8_epel_pixels_tab[1][2][0] = put_vp8_epel8_v6_altivec;
330 c->put_vp8_epel_pixels_tab[1][0][1] = put_vp8_epel8_h4_altivec;
331 c->put_vp8_epel_pixels_tab[1][1][0] = put_vp8_epel8_v4_altivec;
333 c->put_vp8_epel_pixels_tab[1][2][2] = put_vp8_epel8_h6v6_altivec;
334 c->put_vp8_epel_pixels_tab[1][1][1] = put_vp8_epel8_h4v4_altivec;
335 c->put_vp8_epel_pixels_tab[1][1][2] = put_vp8_epel8_h6v4_altivec;
336 c->put_vp8_epel_pixels_tab[1][2][1] = put_vp8_epel8_h4v6_altivec;
338 c->put_vp8_epel_pixels_tab[2][0][2] = put_vp8_epel4_h6_altivec;
339 c->put_vp8_epel_pixels_tab[2][2][0] = put_vp8_epel4_v6_altivec;
340 c->put_vp8_epel_pixels_tab[2][0][1] = put_vp8_epel4_h4_altivec;
341 c->put_vp8_epel_pixels_tab[2][1][0] = put_vp8_epel4_v4_altivec;
343 c->put_vp8_epel_pixels_tab[2][2][2] = put_vp8_epel4_h6v6_altivec;
344 c->put_vp8_epel_pixels_tab[2][1][1] = put_vp8_epel4_h4v4_altivec;
345 c->put_vp8_epel_pixels_tab[2][1][2] = put_vp8_epel4_h6v4_altivec;
346 c->put_vp8_epel_pixels_tab[2][2][1] = put_vp8_epel4_h4v6_altivec;
347 #endif /* HAVE_ALTIVEC && HAVE_BIGENDIAN */