2 * Copyright (c) 2007 Luca Barbato <lu_zero@gentoo.org>
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 ** @file libavcodec/ppc/int_altivec.c
31 #include "libavcodec/dsputil.h"
33 #include "dsputil_altivec.h"
35 #include "types_altivec.h"
37 static int ssd_int8_vs_int16_altivec(const int8_t *pix1, const int16_t *pix2,
40 vector signed char vpix1;
41 vector signed short vpix2, vdiff, vpix1l,vpix1h;
42 union { vector signed int vscore;
45 u.vscore = vec_splat_s32(0);
47 //XXX lazy way, fix it later
49 #define vec_unaligned_load(b) \
50 vec_perm(vec_ld(0,b),vec_ld(15,b),vec_lvsl(0, b));
54 // score += (pix1[i]-pix2[i])*(pix1[i]-pix2[i]);
55 //load pix1 and the first batch of pix2
57 vpix1 = vec_unaligned_load(pix1);
58 vpix2 = vec_unaligned_load(pix2);
61 vpix1h = vec_unpackh(vpix1);
62 vdiff = vec_sub(vpix1h, vpix2);
63 vpix1l = vec_unpackl(vpix1);
64 // load another batch from pix2
65 vpix2 = vec_unaligned_load(pix2);
66 u.vscore = vec_msum(vdiff, vdiff, u.vscore);
67 vdiff = vec_sub(vpix1l, vpix2);
68 u.vscore = vec_msum(vdiff, vdiff, u.vscore);
73 u.vscore = vec_sums(u.vscore, vec_splat_s32(0));
76 for (i = 0; i < size; i++) {
77 u.score[3] += (pix1[i]-pix2[i])*(pix1[i]-pix2[i]);
82 static void add_int16_altivec(int16_t * v1, int16_t * v2, int order)
85 register vec_s16 vec, *pv;
87 for(i = 0; i < order; i += 8){
89 vec = vec_perm(pv[0], pv[1], vec_lvsl(0, v2));
90 vec_st(vec_add(vec_ld(0, v1), vec), 0, v1);
96 static void sub_int16_altivec(int16_t * v1, int16_t * v2, int order)
99 register vec_s16 vec, *pv;
101 for(i = 0; i < order; i += 8){
103 vec = vec_perm(pv[0], pv[1], vec_lvsl(0, v2));
104 vec_st(vec_sub(vec_ld(0, v1), vec), 0, v1);
110 static int32_t scalarproduct_int16_altivec(int16_t * v1, int16_t * v2, int order, const int shift)
114 register vec_s16 vec1, *pv;
115 register vec_s32 res = vec_splat_s32(0), t;
116 register vec_u32 shifts;
117 DECLARE_ALIGNED_16(int32_t, ires);
120 if(shift & 0x10) shifts = vec_add(shifts, vec_sl(vec_splat_u32(0x08), vec_splat_u32(0x1)));
121 if(shift & 0x08) shifts = vec_add(shifts, vec_splat_u32(0x08));
122 if(shift & 0x04) shifts = vec_add(shifts, vec_splat_u32(0x04));
123 if(shift & 0x02) shifts = vec_add(shifts, vec_splat_u32(0x02));
124 if(shift & 0x01) shifts = vec_add(shifts, vec_splat_u32(0x01));
126 for(i = 0; i < order; i += 8){
128 vec1 = vec_perm(pv[0], pv[1], vec_lvsl(0, v1));
129 t = vec_msum(vec1, vec_ld(0, v2), zero_s32v);
130 t = vec_sr(t, shifts);
131 res = vec_sums(t, res);
135 res = vec_splat(res, 3);
136 vec_ste(res, 0, &ires);
140 void int_init_altivec(DSPContext* c, AVCodecContext *avctx)
142 c->ssd_int8_vs_int16 = ssd_int8_vs_int16_altivec;
143 c->add_int16 = add_int16_altivec;
144 c->sub_int16 = sub_int16_altivec;
145 c->scalarproduct_int16 = scalarproduct_int16_altivec;