2 * Copyright (c) 2007 Luca Barbato <lu_zero@gentoo.org>
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * miscellaneous integer operations
31 #include "libavutil/attributes.h"
32 #include "libavutil/ppc/types_altivec.h"
33 #include "libavcodec/dsputil.h"
34 #include "dsputil_altivec.h"
36 static int ssd_int8_vs_int16_altivec(const int8_t *pix1, const int16_t *pix2,
39 int i, size16 = size >> 4;
40 vector signed char vpix1;
41 vector signed short vpix2, vdiff, vpix1l, vpix1h;
43 vector signed int vscore;
45 } u = { .vscore = vec_splat_s32(0) };
47 // XXX lazy way, fix it later
49 #define vec_unaligned_load(b) \
50 vec_perm(vec_ld(0, b), vec_ld(15, b), vec_lvsl(0, b));
53 // score += (pix1[i] - pix2[i]) * (pix1[i] - pix2[i]);
54 // load pix1 and the first batch of pix2
56 vpix1 = vec_unaligned_load(pix1);
57 vpix2 = vec_unaligned_load(pix2);
60 vpix1h = vec_unpackh(vpix1);
61 vdiff = vec_sub(vpix1h, vpix2);
62 vpix1l = vec_unpackl(vpix1);
63 // load another batch from pix2
64 vpix2 = vec_unaligned_load(pix2);
65 u.vscore = vec_msum(vdiff, vdiff, u.vscore);
66 vdiff = vec_sub(vpix1l, vpix2);
67 u.vscore = vec_msum(vdiff, vdiff, u.vscore);
72 u.vscore = vec_sums(u.vscore, vec_splat_s32(0));
75 for (i = 0; i < size; i++)
76 u.score[3] += (pix1[i] - pix2[i]) * (pix1[i] - pix2[i]);
81 static int32_t scalarproduct_int16_altivec(const int16_t *v1, const int16_t *v2,
86 register vec_s16 vec1;
87 register vec_s32 res = vec_splat_s32(0), t;
90 for (i = 0; i < order; i += 8) {
91 vec1 = vec_unaligned_load(v1);
92 t = vec_msum(vec1, vec_ld(0, v2), zero_s32v);
93 res = vec_sums(t, res);
97 res = vec_splat(res, 3);
98 vec_ste(res, 0, &ires);
103 static int32_t scalarproduct_and_madd_int16_altivec(int16_t *v1,
109 vec_s16 *pv1 = (vec_s16 *) v1;
110 register vec_s16 muls = { mul, mul, mul, mul, mul, mul, mul, mul };
111 register vec_s16 t0, t1, i0, i1, i4;
112 register vec_s16 i2 = vec_ld(0, v2), i3 = vec_ld(0, v3);
113 register vec_s32 res = zero_s32v;
114 register vec_u8 align = vec_lvsl(0, v2);
120 t0 = vec_perm(i2, i1, align);
122 t1 = vec_perm(i1, i2, align);
125 res = vec_msum(t0, i0, res);
126 res = vec_msum(t1, i1, res);
128 t0 = vec_perm(i3, i4, align);
130 t1 = vec_perm(i4, i3, align);
131 pv1[0] = vec_mladd(t0, muls, i0);
132 pv1[1] = vec_mladd(t1, muls, i1);
137 res = vec_splat(vec_sums(res, zero_s32v), 3);
138 vec_ste(res, 0, &ires);
143 av_cold void ff_int_init_altivec(DSPContext *c, AVCodecContext *avctx)
145 c->ssd_int8_vs_int16 = ssd_int8_vs_int16_altivec;
147 c->scalarproduct_int16 = scalarproduct_int16_altivec;
149 c->scalarproduct_and_madd_int16 = scalarproduct_and_madd_int16_altivec;