1 ;*****************************************************************************
3 ;*****************************************************************************
4 ;* Copyright (C) 2008 Loren Merritt <lorenm@u.washington.edu>
6 ;* This program is free software; you can redistribute it and/or modify
7 ;* it under the terms of the GNU General Public License as published by
8 ;* the Free Software Foundation; either version 2 of the License, or
9 ;* (at your option) any later version.
11 ;* This program is distributed in the hope that it will be useful,
12 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
13 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 ;* GNU General Public License for more details.
16 ;* You should have received a copy of the GNU General Public License
17 ;* along with this program; if not, write to the Free Software
18 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
19 ;*****************************************************************************
28 %macro TRANSPOSE4x4W 5
29 SBUTTERFLY wd, %1, %2, %5
30 SBUTTERFLY wd, %3, %4, %5
31 SBUTTERFLY dq, %1, %3, %5
32 SBUTTERFLY dq, %2, %4, %5
36 %macro TRANSPOSE2x4x4W 5
37 SBUTTERFLY wd, %1, %2, %5
38 SBUTTERFLY wd, %3, %4, %5
39 SBUTTERFLY dq, %1, %3, %5
40 SBUTTERFLY dq, %2, %4, %5
41 SBUTTERFLY qdq, %1, %2, %5
42 SBUTTERFLY qdq, %3, %4, %5
45 %macro TRANSPOSE4x4D 5
46 SBUTTERFLY dq, %1, %2, %5
47 SBUTTERFLY dq, %3, %4, %5
48 SBUTTERFLY qdq, %1, %3, %5
49 SBUTTERFLY qdq, %2, %4, %5
53 %macro TRANSPOSE8x8W 9-11
55 SBUTTERFLY wd, %1, %2, %9
56 SBUTTERFLY wd, %3, %4, %9
57 SBUTTERFLY wd, %5, %6, %9
58 SBUTTERFLY wd, %7, %8, %9
59 SBUTTERFLY dq, %1, %3, %9
60 SBUTTERFLY dq, %2, %4, %9
61 SBUTTERFLY dq, %5, %7, %9
62 SBUTTERFLY dq, %6, %8, %9
63 SBUTTERFLY qdq, %1, %5, %9
64 SBUTTERFLY qdq, %2, %6, %9
65 SBUTTERFLY qdq, %3, %7, %9
66 SBUTTERFLY qdq, %4, %8, %9
70 ; in: m0..m7, unless %11 in which case m6 is in %9
71 ; out: m0..m7, unless %11 in which case m4 is in %10
72 ; spills into %9 and %10
76 SBUTTERFLY wd, %1, %2, %7
79 SBUTTERFLY wd, %3, %4, %2
80 SBUTTERFLY wd, %5, %6, %2
81 SBUTTERFLY wd, %7, %8, %2
82 SBUTTERFLY dq, %1, %3, %2
85 SBUTTERFLY dq, %2, %4, %3
86 SBUTTERFLY dq, %5, %7, %3
87 SBUTTERFLY dq, %6, %8, %3
88 SBUTTERFLY qdq, %1, %5, %3
89 SBUTTERFLY qdq, %2, %6, %3
92 SBUTTERFLY qdq, %3, %7, %2
93 SBUTTERFLY qdq, %4, %8, %2
102 %macro ABS1_MMX 2 ; a, tmp
108 %macro ABS2_MMX 4 ; a, b, tmp0, tmp1
126 %define ABS1 ABS1_MMX
127 %define ABS2 ABS2_MMX
135 movd %1, [%2-3] ;to avoid crossing a cacheline
145 %macro SPLATB_SSSE3 3
164 %macro PALIGNR_SSSE3 4
168 %macro DEINTB 5 ; mask, reg1, mask, reg2, optional src to fill masks from
176 pand m%1, m%2 ; dst .. y6 .. y4
177 pand m%3, m%4 ; src .. y6 .. y4
178 psrlw m%2, 8 ; dst .. y7 .. y5
179 psrlw m%4, 8 ; src .. y7 .. y5
194 %macro SUMSUB_BADC 4-5
208 %macro HADAMARD4_V 4+
209 SUMSUB_BADC %1, %2, %3, %4
210 SUMSUB_BADC %1, %3, %2, %4
213 %macro HADAMARD8_V 8+
214 SUMSUB_BADC %1, %2, %3, %4
215 SUMSUB_BADC %5, %6, %7, %8
216 SUMSUB_BADC %1, %3, %2, %4
217 SUMSUB_BADC %5, %7, %6, %8
218 SUMSUB_BADC %1, %5, %2, %6
219 SUMSUB_BADC %3, %7, %4, %8
222 %macro TRANS_SSE2 5-6
224 ; %1: transpose width (d/q) - use SBUTTERFLY qdq for dq
225 ; %2: ord/unord (for compat with sse4, unused)
229 %define mask [mask_10 GLOBAL]
232 %define mask [mask_1100 GLOBAL]
235 %if %0==6 ; less dependency if we have two tmp
236 mova m%5, mask ; ff00
238 psll%1 m%4, shift ; x4..
240 pandn m%5, m%3 ; ..x0
241 psrl%1 m%3, shift ; ..x1
244 %else ; more dependency, one insn less. sometimes faster, sometimes not
246 psll%1 m%4, shift ; x4..
247 pxor m%4, m%3 ; (x4^x1)x0
248 pand m%4, mask ; (x4^x1)..
250 psrl%1 m%4, shift ; ..(x1^x4)
256 %macro TRANS_SSE4 5-6 ; see above
262 pblendw m%3, m%4, 10101010b
265 pblendw m%4, m%5, 01010101b
272 shufps m%3, m%4, 10001000b
273 shufps m%5, m%4, 11011101b
279 ; %1=distance in words (0 for vertical pass, 1/2/4 for horizontal passes)
280 ; %2=sumsub/max/amax (sum and diff / maximum / maximum of absolutes)
283 %if %1!=0 ; have to reorder stuff for horizontal op
286 ; sumsub needs order because a-b != b-a unless a=b
289 ; if we just max, order doesn't matter (allows pblendw+or in sse4)
292 TRANS d, ORDER, %3, %4, %5, %6
295 SBUTTERFLY dq, %3, %4, %5
297 TRANS q, ORDER, %3, %4, %5, %6
300 SBUTTERFLY qdq, %3, %4, %5
304 SUMSUB_BA m%3, m%4, m%5
308 ABS2 m%3, m%4, m%5, m%6
319 %macro HADAMARD2_2D 6-7 sumsub
320 HADAMARD 0, sumsub, %1, %2, %5
321 HADAMARD 0, sumsub, %3, %4, %5
322 SBUTTERFLY %6, %1, %2, %5
324 HADAMARD 0, amax, %1, %2, %5, %7
326 HADAMARD 0, %7, %1, %2, %5
328 SBUTTERFLY %6, %3, %4, %5
330 HADAMARD 0, amax, %3, %4, %5, %7
332 HADAMARD 0, %7, %3, %4, %5
336 %macro HADAMARD4_2D 5-6 sumsub
337 HADAMARD2_2D %1, %2, %3, %4, %5, wd
338 HADAMARD2_2D %1, %3, %2, %4, %5, dq, %6
342 %macro HADAMARD4_2D_SSE 5-6 sumsub
343 HADAMARD 0, sumsub, %1, %2, %5 ; 1st V row 0 + 1
344 HADAMARD 0, sumsub, %3, %4, %5 ; 1st V row 2 + 3
345 SBUTTERFLY wd, %1, %2, %5 ; %1: m0 1+0 %2: m1 1+0
346 SBUTTERFLY wd, %3, %4, %5 ; %3: m0 3+2 %4: m1 3+2
347 HADAMARD2_2D %1, %3, %2, %4, %5, dq
348 SBUTTERFLY qdq, %1, %2, %5
349 HADAMARD 0, %6, %1, %2, %5 ; 2nd H m1/m0 row 0+1
350 SBUTTERFLY qdq, %3, %4, %5
351 HADAMARD 0, %6, %3, %4, %5 ; 2nd H m1/m0 row 2+3
354 %macro HADAMARD8_2D 9-10 sumsub
355 HADAMARD2_2D %1, %2, %3, %4, %9, wd
356 HADAMARD2_2D %5, %6, %7, %8, %9, wd
357 HADAMARD2_2D %1, %3, %2, %4, %9, dq
358 HADAMARD2_2D %5, %7, %6, %8, %9, dq
359 HADAMARD2_2D %1, %5, %3, %7, %9, qdq, %10
360 HADAMARD2_2D %2, %6, %4, %8, %9, qdq, %10
394 SUMSUB_BADC m%4, m%1, m%3, m%2; m%5
395 SUMSUB_BA m%3, m%4, m%5
396 SUMSUB2_AB m%1, m%2, m%5
397 SWAP %1, %3, %4, %5, %2
399 SUMSUB_BADC m%4, m%1, m%3, m%2
402 SUMSUB2_AB m%1, [%5], m%2
409 SUMSUBD2_AB m%2, m%4, m%6, m%5
410 SUMSUB_BA m%3, m%1, m%6
411 SUMSUB_BADC m%4, m%3, m%2, m%1, m%6
413 SUMSUBD2_AB m%2, m%4, [%5], [%5+16]
415 SUMSUB_BADC m%4, m%3, m%2, m%1
436 %macro LOAD_DIFF8x4_SSE2 8
437 LOAD_DIFF m%1, m%5, m%6, [%7+%1*FENC_STRIDE], [%8+%1*FDEC_STRIDE]
438 LOAD_DIFF m%2, m%5, m%6, [%7+%2*FENC_STRIDE], [%8+%2*FDEC_STRIDE]
439 LOAD_DIFF m%3, m%5, m%6, [%7+%3*FENC_STRIDE], [%8+%3*FDEC_STRIDE]
440 LOAD_DIFF m%4, m%5, m%6, [%7+%4*FENC_STRIDE], [%8+%4*FDEC_STRIDE]
443 %macro LOAD_DIFF8x4_SSSE3 8 ; 4x dst, 1x tmp, 1x mul, 2x ptr
444 movh m%2, [%8+%1*FDEC_STRIDE]
445 movh m%1, [%7+%1*FENC_STRIDE]
447 movh m%3, [%8+%2*FDEC_STRIDE]
448 movh m%2, [%7+%2*FENC_STRIDE]
450 movh m%4, [%8+%3*FDEC_STRIDE]
451 movh m%3, [%7+%3*FENC_STRIDE]
453 movh m%5, [%8+%4*FDEC_STRIDE]
454 movh m%4, [%7+%4*FENC_STRIDE]
467 movhps [%5+%6+32], m%1
468 movhps [%5+%6+40], m%2
469 movhps [%5+%6+48], m%3
470 movhps [%5+%6+56], m%4
474 movhps [r0-4*FDEC_STRIDE], %1
475 movh [r0-3*FDEC_STRIDE], %1
476 movhps [r0-2*FDEC_STRIDE], %2
477 movh [r0-1*FDEC_STRIDE], %2
478 movhps [r0+0*FDEC_STRIDE], %3
479 movh [r0+1*FDEC_STRIDE], %3
480 movhps [r0+2*FDEC_STRIDE], %4
481 movh [r0+3*FDEC_STRIDE], %4
484 %macro LOAD_DIFF_8x4P 7-10 r0,r2,0 ; 4x dest, 2x temp, 2x pointer, increment?
485 LOAD_DIFF m%1, m%5, m%7, [%8], [%9]
486 LOAD_DIFF m%2, m%6, m%7, [%8+r1], [%9+r3]
487 LOAD_DIFF m%3, m%5, m%7, [%8+2*r1], [%9+2*r3]
488 LOAD_DIFF m%4, m%6, m%7, [%8+r4], [%9+r5]