2 * GMC (Global Motion Compensation)
4 * Copyright (c) 2003 Romain Dolbeau <romain@dolbeau.org>
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "../dsputil.h"
25 #include "gcc_fixes.h"
27 #include "dsputil_altivec.h"
30 altivec-enhanced gmc1. ATM this code assume stride is a multiple of 8,
31 to preserve proper dst alignement.
33 #define GMC1_PERF_COND (h==8)
34 void gmc1_altivec(uint8_t *dst /* align 8 */, uint8_t *src /* align1 */, int stride, int h, int x16, int y16, int rounder)
36 POWERPC_PERF_DECLARE(altivec_gmc1_num, GMC1_PERF_COND);
37 #ifdef ALTIVEC_USE_REFERENCE_C_CODE
38 const int A=(16-x16)*(16-y16);
39 const int B=( x16)*(16-y16);
40 const int C=(16-x16)*( y16);
41 const int D=( x16)*( y16);
44 POWERPC_PERF_START_COUNT(altivec_gmc1_num, GMC1_PERF_COND);
48 dst[0]= (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1] + rounder)>>8;
49 dst[1]= (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2] + rounder)>>8;
50 dst[2]= (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3] + rounder)>>8;
51 dst[3]= (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4] + rounder)>>8;
52 dst[4]= (A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5] + rounder)>>8;
53 dst[5]= (A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6] + rounder)>>8;
54 dst[6]= (A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7] + rounder)>>8;
55 dst[7]= (A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8] + rounder)>>8;
60 POWERPC_PERF_STOP_COUNT(altivec_gmc1_num, GMC1_PERF_COND);
62 #else /* ALTIVEC_USE_REFERENCE_C_CODE */
63 const unsigned short __attribute__ ((aligned(16))) rounder_a[8] =
64 {rounder, rounder, rounder, rounder,
65 rounder, rounder, rounder, rounder};
66 const unsigned short __attribute__ ((aligned(16))) ABCD[8] =
68 (16-x16)*(16-y16), /* A */
69 ( x16)*(16-y16), /* B */
70 (16-x16)*( y16), /* C */
71 ( x16)*( y16), /* D */
72 0, 0, 0, 0 /* padding */
74 register const_vector unsigned char vczero = (const_vector unsigned char)vec_splat_u8(0);
75 register const_vector unsigned short vcsr8 = (const_vector unsigned short)vec_splat_u16(8);
76 register vector unsigned char dstv, dstv2, src_0, src_1, srcvA, srcvB, srcvC, srcvD;
77 register vector unsigned short Av, Bv, Cv, Dv, rounderV, tempA, tempB, tempC, tempD;
79 unsigned long dst_odd = (unsigned long)dst & 0x0000000F;
80 unsigned long src_really_odd = (unsigned long)src & 0x0000000F;
83 POWERPC_PERF_START_COUNT(altivec_gmc1_num, GMC1_PERF_COND);
85 tempA = vec_ld(0, (unsigned short*)ABCD);
86 Av = vec_splat(tempA, 0);
87 Bv = vec_splat(tempA, 1);
88 Cv = vec_splat(tempA, 2);
89 Dv = vec_splat(tempA, 3);
91 rounderV = vec_ld(0, (unsigned short*)rounder_a);
93 // we'll be able to pick-up our 9 char elements
94 // at src from those 32 bytes
95 // we load the first batch here, as inside the loop
96 // we can re-use 'src+stride' from one iteration
97 // as the 'src' of the next.
98 src_0 = vec_ld(0, src);
99 src_1 = vec_ld(16, src);
100 srcvA = vec_perm(src_0, src_1, vec_lvsl(0, src));
102 if (src_really_odd != 0x0000000F)
103 { // if src & 0xF == 0xF, then (src+1) is properly aligned on the second vector.
104 srcvB = vec_perm(src_0, src_1, vec_lvsl(1, src));
110 srcvA = vec_mergeh(vczero, srcvA);
111 srcvB = vec_mergeh(vczero, srcvB);
115 dst_odd = (unsigned long)dst & 0x0000000F;
116 src_really_odd = (((unsigned long)src) + stride) & 0x0000000F;
118 dstv = vec_ld(0, dst);
120 // we we'll be able to pick-up our 9 char elements
121 // at src + stride from those 32 bytes
122 // then reuse the resulting 2 vectors srvcC and srcvD
123 // as the next srcvA and srcvB
124 src_0 = vec_ld(stride + 0, src);
125 src_1 = vec_ld(stride + 16, src);
126 srcvC = vec_perm(src_0, src_1, vec_lvsl(stride + 0, src));
128 if (src_really_odd != 0x0000000F)
129 { // if src & 0xF == 0xF, then (src+1) is properly aligned on the second vector.
130 srcvD = vec_perm(src_0, src_1, vec_lvsl(stride + 1, src));
137 srcvC = vec_mergeh(vczero, srcvC);
138 srcvD = vec_mergeh(vczero, srcvD);
141 // OK, now we (finally) do the math :-)
142 // those four instructions replaces 32 int muls & 32 int adds.
143 // isn't AltiVec nice ?
144 tempA = vec_mladd((vector unsigned short)srcvA, Av, rounderV);
145 tempB = vec_mladd((vector unsigned short)srcvB, Bv, tempA);
146 tempC = vec_mladd((vector unsigned short)srcvC, Cv, tempB);
147 tempD = vec_mladd((vector unsigned short)srcvD, Dv, tempC);
152 tempD = vec_sr(tempD, vcsr8);
154 dstv2 = vec_pack(tempD, (vector unsigned short)vczero);
158 dstv2 = vec_perm(dstv, dstv2, vcprm(0,1,s0,s1));
162 dstv2 = vec_perm(dstv, dstv2, vcprm(s0,s1,2,3));
165 vec_st(dstv2, 0, dst);
171 POWERPC_PERF_STOP_COUNT(altivec_gmc1_num, GMC1_PERF_COND);
173 #endif /* ALTIVEC_USE_REFERENCE_C_CODE */