1 ;******************************************************************************
2 ;* Pixel utilities SIMD
4 ;* Copyright (C) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5 ;* Copyright (C) 2014 Clément Bœsch <u pkh me>
7 ;* This file is part of FFmpeg.
9 ;* FFmpeg is free software; you can redistribute it and/or
10 ;* modify it under the terms of the GNU Lesser General Public
11 ;* License as published by the Free Software Foundation; either
12 ;* version 2.1 of the License, or (at your option) any later version.
14 ;* FFmpeg is distributed in the hope that it will be useful,
15 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
16 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 ;* Lesser General Public License for more details.
19 ;* You should have received a copy of the GNU Lesser General Public
20 ;* License along with FFmpeg; if not, write to the Free Software
21 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 ;******************************************************************************
24 %include "x86util.asm"
28 ;-------------------------------------------------------------------------------
29 ; int ff_pixelutils_sad_8x8_mmx(const uint8_t *src1, ptrdiff_t stride1,
30 ; const uint8_t *src2, ptrdiff_t stride2);
31 ;-------------------------------------------------------------------------------
33 cglobal pixelutils_sad_8x8, 4,4,0, src1, stride1, src2, stride2
38 mova m2, [src1q + stride1q]
40 mova m3, [src2q + stride2q]
55 lea src1q, [src1q + 2*stride1q]
56 lea src2q, [src2q + 2*stride2q]
66 ;-------------------------------------------------------------------------------
67 ; int ff_pixelutils_sad_8x8_mmxext(const uint8_t *src1, ptrdiff_t stride1,
68 ; const uint8_t *src2, ptrdiff_t stride2);
69 ;-------------------------------------------------------------------------------
71 cglobal pixelutils_sad_8x8, 4,4,0, src1, stride1, src2, stride2
75 mova m1, [src1q + stride1q]
77 psadbw m1, [src2q + stride2q]
80 lea src1q, [src1q + 2*stride1q]
81 lea src2q, [src2q + 2*stride2q]
86 ;-------------------------------------------------------------------------------
87 ; int ff_pixelutils_sad_16x16_mmxext(const uint8_t *src1, ptrdiff_t stride1,
88 ; const uint8_t *src2, ptrdiff_t stride2);
89 ;-------------------------------------------------------------------------------
91 cglobal pixelutils_sad_16x16, 4,4,0, src1, stride1, src2, stride2
97 psadbw m1, [src2q + 8]
106 ;-------------------------------------------------------------------------------
107 ; int ff_pixelutils_sad_16x16_sse2(const uint8_t *src1, ptrdiff_t stride1,
108 ; const uint8_t *src2, ptrdiff_t stride2);
109 ;-------------------------------------------------------------------------------
111 cglobal pixelutils_sad_16x16, 4,4,5, src1, stride1, src2, stride2
114 movu m1, [src1q + stride1q]
115 movu m3, [src2q + stride2q]
120 lea src1q, [src1q + 2*stride1q]
121 lea src2q, [src2q + 2*stride2q]
124 movu m1, [src1q + stride1q]
125 movu m3, [src2q + stride2q]
136 ;-------------------------------------------------------------------------------
137 ; int ff_pixelutils_sad_[au]_16x16_sse2(const uint8_t *src1, ptrdiff_t stride1,
138 ; const uint8_t *src2, ptrdiff_t stride2);
139 ;-------------------------------------------------------------------------------
140 %macro SAD_XMM_16x16 1
142 cglobal pixelutils_sad_%1_16x16, 4,4,3, src1, stride1, src2, stride2
145 mov%1 m1, [src2q + stride2q]
146 psadbw m1, [src1q + stride1q]
149 lea src1q, [src1q + 2*stride1q]
150 lea src2q, [src2q + 2*stride2q]
153 mov%1 m1, [src2q + stride2q]
154 psadbw m1, [src1q + stride1q]
168 %macro PROCESS_SAD_32x4_U 0
214 %macro PROCESS_SAD_32x4 1
252 ;-----------------------------------------------------------------------------
253 ; int ff_pixelutils_sad_32x32_sse2(const uint8_t *src1, ptrdiff_t stride1,
254 ; const uint8_t *src2, ptrdiff_t stride2);
255 ;-----------------------------------------------------------------------------
257 cglobal pixelutils_sad_32x32, 4,5,5, src1, stride1, src2, stride2
271 ;-------------------------------------------------------------------------------
272 ; int ff_pixelutils_sad_[au]_32x32_sse2(const uint8_t *src1, ptrdiff_t stride1,
273 ; const uint8_t *src2, ptrdiff_t stride2);
274 ;-------------------------------------------------------------------------------
275 %macro SAD_XMM_32x32 1
277 cglobal pixelutils_sad_%1_32x32, 4,5,3, src1, stride1, src2, stride2
295 %if HAVE_AVX2_EXTERNAL
296 ;-------------------------------------------------------------------------------
297 ; int ff_pixelutils_sad_32x32_avx2(const uint8_t *src1, ptrdiff_t stride1,
298 ; const uint8_t *src2, ptrdiff_t stride2);
299 ;-------------------------------------------------------------------------------
301 cglobal pixelutils_sad_32x32, 4,7,5, src1, stride1, src2, stride2
304 lea r5, [stride1q * 3]
305 lea r6, [stride2q * 3]
308 movu m1, [src1q] ; row 0 of pix0
309 movu m2, [src2q] ; row 0 of pix1
310 movu m3, [src1q + stride1q] ; row 1 of pix0
311 movu m4, [src2q + stride2q] ; row 1 of pix1
318 movu m1, [src1q + 2 * stride1q] ; row 2 of pix0
319 movu m2, [src2q + 2 * stride2q] ; row 2 of pix1
320 movu m3, [src1q + r5] ; row 3 of pix0
321 movu m4, [src2q + r6] ; row 3 of pix1
328 lea src2q, [src2q + 4 * stride2q]
329 lea src1q, [src1q + 4 * stride1q]
334 vextracti128 xm1, m0, 1
341 ;-------------------------------------------------------------------------------
342 ; int ff_pixelutils_sad_[au]_32x32_avx2(const uint8_t *src1, ptrdiff_t stride1,
343 ; const uint8_t *src2, ptrdiff_t stride2);
344 ;-------------------------------------------------------------------------------
345 %macro SAD_AVX2_32x32 1
347 cglobal pixelutils_sad_%1_32x32, 4,7,3, src1, stride1, src2, stride2
350 lea r5, [stride1q * 3]
351 lea r6, [stride2q * 3]
354 mov%1 m1, [src2q] ; row 0 of pix1
356 mov%1 m2, [src2q + stride2q] ; row 1 of pix1
357 psadbw m2, [src1q + stride1q]
362 mov%1 m1, [src2q + 2 * stride2q] ; row 2 of pix1
363 psadbw m1, [src1q + 2 * stride1q]
364 mov%1 m2, [src2q + r6] ; row 3 of pix1
365 psadbw m2, [src1q + r5]
370 lea src2q, [src2q + 4 * stride2q]
371 lea src1q, [src1q + 4 * stride1q]
376 vextracti128 xm1, m0, 1