2 * Alpha optimized DSP utils
3 * Copyright (c) 2002 Falk Hueffner <falk@debian.org>
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "../dsputil.h"
23 void get_pixels_mvi(DCTELEM *restrict block,
24 const uint8_t *restrict pixels, int line_size)
32 stq(unpkbw(p), block);
33 stq(unpkbw(p >> 32), block + 4);
40 void diff_pixels_mvi(DCTELEM *block, const uint8_t *s1, const uint8_t *s2,
43 uint64_t mask = 0x4040;
48 uint64_t x, y, c, d, a;
55 a = zap(mask, c); /* We use 0x4040404040404040 here... */
56 d += 4 * a; /* ...so we can use s4addq here. */
59 stq(unpkbw(d) | (unpkbw(signs) << 8), block);
60 stq(unpkbw(d >> 32) | (unpkbw(signs >> 32) << 8), block + 4);
68 static inline uint64_t avg2(uint64_t a, uint64_t b)
70 return (a | b) - (((a ^ b) & BYTE_VEC(0xfe)) >> 1);
73 static inline uint64_t avg4(uint64_t l1, uint64_t l2, uint64_t l3, uint64_t l4)
75 uint64_t r1 = ((l1 & ~BYTE_VEC(0x03)) >> 2)
76 + ((l2 & ~BYTE_VEC(0x03)) >> 2)
77 + ((l3 & ~BYTE_VEC(0x03)) >> 2)
78 + ((l4 & ~BYTE_VEC(0x03)) >> 2);
79 uint64_t r2 = (( (l1 & BYTE_VEC(0x03))
80 + (l2 & BYTE_VEC(0x03))
81 + (l3 & BYTE_VEC(0x03))
82 + (l4 & BYTE_VEC(0x03))
83 + BYTE_VEC(0x02)) >> 2) & BYTE_VEC(0x03);
87 int pix_abs8x8_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
91 if ((size_t) pix2 & 0x7) {
92 /* works only when pix2 is actually unaligned */
93 do { /* do 8 pixel a time */
98 result += perr(p1, p2);
109 result += perr(p1, p2);
119 #if 0 /* now done in assembly */
120 int pix_abs16x16_mvi(uint8_t *pix1, uint8_t *pix2, int line_size)
125 if ((size_t) pix2 & 0x7) {
126 /* works only when pix2 is actually unaligned */
127 do { /* do 16 pixel a time */
128 uint64_t p1_l, p1_r, p2_l, p2_r;
132 p1_r = ldq(pix1 + 8);
134 p2_l = extql(ldq_u(pix2), pix2) | extqh(t, pix2);
135 p2_r = extql(t, pix2) | extqh(ldq_u(pix2 + 16), pix2);
139 result += perr(p1_l, p2_l)
144 uint64_t p1_l, p1_r, p2_l, p2_r;
147 p1_r = ldq(pix1 + 8);
149 p2_r = ldq(pix2 + 8);
153 result += perr(p1_l, p2_l)
162 int pix_abs16x16_x2_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
165 uint64_t disalign = (size_t) pix2 & 0x7;
170 uint64_t p1_l, p1_r, p2_l, p2_r;
174 p1_r = ldq(pix1 + 8);
177 p2_l = avg2(l, (l >> 8) | ((uint64_t) r << 56));
178 p2_r = avg2(r, (r >> 8) | ((uint64_t) pix2[16] << 56));
182 result += perr(p1_l, p2_l)
187 /* |.......l|lllllllr|rrrrrrr*|
188 This case is special because disalign1 would be 8, which
189 gets treated as 0 by extqh. At least it is a bit faster
192 uint64_t p1_l, p1_r, p2_l, p2_r;
196 p1_r = ldq(pix1 + 8);
199 r = ldq_u(pix2 + 16);
200 p2_l = avg2(extql(l, disalign) | extqh(m, disalign), m);
201 p2_r = avg2(extql(m, disalign) | extqh(r, disalign), r);
205 result += perr(p1_l, p2_l)
211 uint64_t disalign1 = disalign + 1;
212 uint64_t p1_l, p1_r, p2_l, p2_r;
216 p1_r = ldq(pix1 + 8);
219 r = ldq_u(pix2 + 16);
220 p2_l = avg2(extql(l, disalign) | extqh(m, disalign),
221 extql(l, disalign1) | extqh(m, disalign1));
222 p2_r = avg2(extql(m, disalign) | extqh(r, disalign),
223 extql(m, disalign1) | extqh(r, disalign1));
227 result += perr(p1_l, p2_l)
235 int pix_abs16x16_y2_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
239 if ((size_t) pix2 & 0x7) {
240 uint64_t t, p2_l, p2_r;
242 p2_l = extql(ldq_u(pix2), pix2) | extqh(t, pix2);
243 p2_r = extql(t, pix2) | extqh(ldq_u(pix2 + 16), pix2);
246 uint64_t p1_l, p1_r, np2_l, np2_r;
250 p1_r = ldq(pix1 + 8);
253 np2_l = extql(ldq_u(pix2), pix2) | extqh(t, pix2);
254 np2_r = extql(t, pix2) | extqh(ldq_u(pix2 + 16), pix2);
256 result += perr(p1_l, avg2(p2_l, np2_l))
257 + perr(p1_r, avg2(p2_r, np2_r));
267 p2_r = ldq(pix2 + 8);
269 uint64_t p1_l, p1_r, np2_l, np2_r;
272 p1_r = ldq(pix1 + 8);
275 np2_r = ldq(pix2 + 8);
277 result += perr(p1_l, avg2(p2_l, np2_l))
278 + perr(p1_r, avg2(p2_r, np2_r));
288 int pix_abs16x16_xy2_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
293 uint64_t p2_l, p2_r, p2_x;
296 p1_r = ldq(pix1 + 8);
298 if ((size_t) pix2 & 0x7) { /* could be optimized a lot */
300 p2_r = uldq(pix2 + 8);
301 p2_x = (uint64_t) pix2[16] << 56;
304 p2_r = ldq(pix2 + 8);
305 p2_x = ldq(pix2 + 16) << 56;
309 uint64_t np1_l, np1_r;
310 uint64_t np2_l, np2_r, np2_x;
316 np1_r = ldq(pix1 + 8);
318 if ((size_t) pix2 & 0x7) { /* could be optimized a lot */
320 np2_r = uldq(pix2 + 8);
321 np2_x = (uint64_t) pix2[16] << 56;
324 np2_r = ldq(pix2 + 8);
325 np2_x = ldq(pix2 + 16) << 56;
329 avg4( p2_l, ( p2_l >> 8) | ((uint64_t) p2_r << 56),
330 np2_l, (np2_l >> 8) | ((uint64_t) np2_r << 56)))
332 avg4( p2_r, ( p2_r >> 8) | ((uint64_t) p2_x),
333 np2_r, (np2_r >> 8) | ((uint64_t) np2_x)));