1 ;*****************************************************************************
2 ;* MMX optimized DSP utils
3 ;*****************************************************************************
4 ;* Copyright (c) 2000, 2001 Fabrice Bellard
5 ;* Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
7 ;* This file is part of FFmpeg.
9 ;* FFmpeg is free software; you can redistribute it and/or
10 ;* modify it under the terms of the GNU Lesser General Public
11 ;* License as published by the Free Software Foundation; either
12 ;* version 2.1 of the License, or (at your option) any later version.
14 ;* FFmpeg is distributed in the hope that it will be useful,
15 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
16 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 ;* Lesser General Public License for more details.
19 ;* You should have received a copy of the GNU Lesser General Public
20 ;* License along with FFmpeg; if not, write to the Free Software
21 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 ;*****************************************************************************
24 %include "libavutil/x86/x86inc.asm"
25 %include "libavutil/x86/x86util.asm"
29 %macro DIFF_PIXELS_1 4
37 ; %1=uint8_t *pix1, %2=uint8_t *pix2, %3=static offset, %4=stride, %5=stride*3
38 ; %6=temporary storage location
39 ; this macro requires $mmsize stack space (aligned) on %6 (except on SSE+x86-64)
40 %macro DIFF_PIXELS_8 6
41 DIFF_PIXELS_1 m0, m7, [%1 +%3], [%2 +%3]
42 DIFF_PIXELS_1 m1, m7, [%1+%4 +%3], [%2+%4 +%3]
43 DIFF_PIXELS_1 m2, m7, [%1+%4*2+%3], [%2+%4*2+%3]
46 DIFF_PIXELS_1 m3, m7, [%1 +%3], [%2 +%3]
47 DIFF_PIXELS_1 m4, m7, [%1+%4 +%3], [%2+%4 +%3]
48 DIFF_PIXELS_1 m5, m7, [%1+%4*2+%3], [%2+%4*2+%3]
49 DIFF_PIXELS_1 m6, m7, [%1+%5 +%3], [%2+%5 +%3]
51 DIFF_PIXELS_1 m7, m8, [%1+%4*4+%3], [%2+%4*4+%3]
54 DIFF_PIXELS_1 m7, m0, [%1+%4*4+%3], [%2+%4*4+%3]
62 SUMSUB_BADC w, 0, 1, 2, 3
63 SUMSUB_BADC w, 4, 5, 6, 7
64 SUMSUB_BADC w, 0, 2, 1, 3
65 SUMSUB_BADC w, 4, 6, 5, 7
66 SUMSUB_BADC w, 0, 4, 1, 5
67 SUMSUB_BADC w, 2, 6, 3, 7
81 %macro ABS_SUM_8x8_64 1
83 ABS2_SUM m2, m3, m8, m9, m0, m1
84 ABS2_SUM m4, m5, m8, m9, m0, m1
85 ABS2_SUM m6, m7, m8, m9, m0, m1
89 %macro ABS_SUM_8x8_32 1
103 ; FIXME: HSUM_* saturates at 64k, while an 8x8 hadamard or dct block can get up to
104 ; about 100k on extreme inputs. But that's very unlikely to occur in natural video,
105 ; and it's even more unlikely to not have any alternative mvs/modes with lower cost.
135 mova [%1+mmsize*0], %2
136 mova [%1+mmsize*1], %3
137 mova [%1+mmsize*2], %4
138 mova [%1+mmsize*3], %5
142 mova %2, [%1+mmsize*0]
143 mova %3, [%1+mmsize*1]
144 mova %4, [%1+mmsize*2]
145 mova %5, [%1+mmsize*3]
148 %macro hadamard8_16_wrapper 3
149 cglobal hadamard8_diff_%1, 4, 4, %2
151 %assign pad %3*mmsize-(4+stack_offset&(mmsize-1))
154 call hadamard8x8_diff_%1
160 cglobal hadamard8_diff16_%1, 5, 6, %2
162 %assign pad %3*mmsize-(4+stack_offset&(mmsize-1))
166 call hadamard8x8_diff_%1
171 call hadamard8x8_diff_%1
179 call hadamard8x8_diff_%1
184 call hadamard8x8_diff_%1
195 %macro HADAMARD8_DIFF_MMX 1
197 ; int hadamard8_diff_##cpu(void *s, uint8_t *src1, uint8_t *src2,
199 ; r0 = void *s = unused, int h = unused (always 8)
200 ; note how r1, r2 and r3 are not clobbered in this function, so 16x16
201 ; can simply call this 2x2x (and that's why we access rsp+gprsize
202 ; everywhere, which is rsp of calling func
207 DIFF_PIXELS_8 r1, r2, 0, r3, r0, rsp+gprsize+0x60
209 mova [rsp+gprsize+0x60], m7
210 TRANSPOSE4x4W 0, 1, 2, 3, 7
211 STORE4 rsp+gprsize, m0, m1, m2, m3
212 mova m7, [rsp+gprsize+0x60]
213 TRANSPOSE4x4W 4, 5, 6, 7, 0
214 STORE4 rsp+gprsize+0x40, m4, m5, m6, m7
217 DIFF_PIXELS_8 r1, r2, 4, r3, r0, rsp+gprsize+0x60
219 mova [rsp+gprsize+0x60], m7
220 TRANSPOSE4x4W 0, 1, 2, 3, 7
221 STORE4 rsp+gprsize+0x20, m0, m1, m2, m3
222 mova m7, [rsp+gprsize+0x60]
223 TRANSPOSE4x4W 4, 5, 6, 7, 0
225 LOAD4 rsp+gprsize+0x40, m0, m1, m2, m3
227 ABS_SUM_8x8_32 rsp+gprsize+0x60
228 mova [rsp+gprsize+0x60], m0
230 LOAD4 rsp+gprsize , m0, m1, m2, m3
231 LOAD4 rsp+gprsize+0x20, m4, m5, m6, m7
233 ABS_SUM_8x8_32 rsp+gprsize
234 paddusw m0, [rsp+gprsize+0x60]
240 hadamard8_16_wrapper %1, 0, 14
243 %macro HADAMARD8_DIFF_SSE2 2
246 DIFF_PIXELS_8 r1, r2, 0, r3, r0, rsp+gprsize
249 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 8
251 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [rsp+gprsize], [rsp+mmsize+gprsize]
254 ABS_SUM_8x8 rsp+gprsize
255 HSUM_SSE2 m0, m1, eax
259 hadamard8_16_wrapper %1, %2, 3
263 %define ABS1 ABS1_MMX
264 %define HSUM HSUM_MMX
265 HADAMARD8_DIFF_MMX mmx
267 %define ABS1 ABS1_MMX2
268 %define HSUM HSUM_MMX2
269 HADAMARD8_DIFF_MMX mmx2
272 %define ABS2 ABS2_MMX2
274 %define ABS_SUM_8x8 ABS_SUM_8x8_64
276 %define ABS_SUM_8x8 ABS_SUM_8x8_32
278 HADAMARD8_DIFF_SSE2 sse2, 10
280 %define ABS2 ABS2_SSSE3
281 %define ABS_SUM_8x8 ABS_SUM_8x8_64
282 HADAMARD8_DIFF_SSE2 ssse3, 9
285 ; sse16_sse2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h)
286 cglobal sse16_sse2, 5, 5, 8
288 pxor m0, m0 ; mm0 = 0
289 pxor m7, m7 ; mm7 holds the sum
291 .next2lines ; FIXME why are these unaligned movs? pix1[] is aligned
292 movu m1, [r1 ] ; mm1 = pix1[0][0-15]
293 movu m2, [r2 ] ; mm2 = pix2[0][0-15]
294 movu m3, [r1+r3] ; mm3 = pix1[1][0-15]
295 movu m4, [r2+r3] ; mm4 = pix2[1][0-15]
297 ; todo: mm1-mm2, mm3-mm4
298 ; algo: subtract mm1 from mm2 with saturation and vice versa
299 ; OR the result to get the absolute difference
310 ; now convert to 16-bit vectors so we can square them
316 punpcklbw m1, m0 ; mm1 not spread over (mm1,mm2)
317 punpcklbw m3, m0 ; mm4 not spread over (mm3,mm4)
324 lea r1, [r1+r3*2] ; pix1 += 2*line_size
325 lea r2, [r2+r3*2] ; pix2 += 2*line_size
336 psrldq m7, 8 ; shift hi qword to lo
339 psrldq m7, 4 ; shift hi dword to lo
341 movd eax, m7 ; return value