1 ;******************************************************************************
2 ;* VP9 loop filter SIMD optimizations
4 ;* Copyright (C) 2013-2014 Clément Bœsch <u pkh me>
5 ;* Copyright (C) 2014 Ronald S. Bultje <rsbultje@gmail.com>
7 ;* This file is part of FFmpeg.
9 ;* FFmpeg is free software; you can redistribute it and/or
10 ;* modify it under the terms of the GNU Lesser General Public
11 ;* License as published by the Free Software Foundation; either
12 ;* version 2.1 of the License, or (at your option) any later version.
14 ;* FFmpeg is distributed in the hope that it will be useful,
15 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
16 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 ;* Lesser General Public License for more details.
19 ;* You should have received a copy of the GNU Lesser General Public
20 ;* License along with FFmpeg; if not, write to the Free Software
21 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 ;******************************************************************************
24 %include "libavutil/x86/x86util.asm"
31 pb_4: times 16 db 0x04
32 pb_10: times 16 db 0x10
33 pb_40: times 16 db 0x40
34 pb_81: times 16 db 0x81
35 pb_f8: times 16 db 0xf8
36 pb_fe: times 16 db 0xfe
37 pb_ff: times 16 db 0xff
42 ; with mix functions, two 8-bit thresholds are stored in a 16-bit storage,
43 ; the following mask is used to splat both in the same register
44 mask_mix: times 8 db 0
47 mask_mix84: times 8 db 0xff
49 mask_mix48: times 8 db 0x00
71 %macro ABSSUB 4 ; dst, src1 (RO), src2 (RO), tmp
85 %macro CMP_GT 2-3 ; src/dst, cmp, pb_80
92 ; %1 = abs(%2-%3) > %4
93 %macro ABSSUB_GT 5-6 [pb_80]; dst, src1, src2, cmp, tmp, [pb_80]
94 ABSSUB %1, %2, %3, %5 ; dst = abs(src1-src2)
95 CMP_GT %1, %4, %6 ; dst > cmp
98 %macro MASK_APPLY 4 ; %1=new_data/dst %2=old_data %3=mask %4=tmp
99 pand %1, %3 ; new &= mask
100 pandn %4, %3, %2 ; tmp = ~mask & old
101 por %1, %4 ; new&mask | old&~mask
106 punpck%1bw %2, %3, %4
113 %macro FILTER_SUBx2_ADDx2 11 ; %1=dst %2=h/l %3=cache %4=stack_off %5=sub1 %6=sub2 %7=add1
114 ; %8=add2 %9=rshift, [unpack], [unpack_is_mem_on_x86_32]
115 psubw %3, [rsp+%4+%5*mmsize*2]
116 psubw %3, [rsp+%4+%6*mmsize*2]
117 paddw %3, [rsp+%4+%7*mmsize*2]
120 punpck%2bw %1, %10, m0
122 UNPACK %2, %1, %10, m0
124 mova [rsp+%4+%8*mmsize*2], %1
127 paddw %3, [rsp+%4+%8*mmsize*2]
132 ; FIXME interleave l/h better (for instruction pairing)
133 %macro FILTER_INIT 9 ; tmp1, tmp2, cacheL, cacheH, dstp, stack_off, filterid, mask, source
134 FILTER%7_INIT %1, l, %3, %6 + 0
135 FILTER%7_INIT %2, h, %4, %6 + mmsize
137 MASK_APPLY %1, %9, %8, %2
142 %macro FILTER_UPDATE 12-16 "", "", "", 0 ; tmp1, tmp2, cacheL, cacheH, dstp, stack_off, -, -, +, +, rshift,
143 ; mask, [source], [unpack + src], [unpack_is_mem_on_x86_32]
144 ; FIXME interleave this properly with the subx2/addx2
146 %if %16 == 0 || ARCH_X86_64
150 FILTER_SUBx2_ADDx2 %1, l, %3, %6 + 0, %7, %8, %9, %10, %11, %14, %16
151 FILTER_SUBx2_ADDx2 %2, h, %4, %6 + mmsize, %7, %8, %9, %10, %11, %14, %16
154 MASK_APPLY %1, %13, %12, %2
156 MASK_APPLY %1, %5, %12, %2
161 %macro SRSHIFT3B_2X 4 ; reg1, reg2, [pb_10], tmp
173 %macro EXTRACT_POS_NEG 3 ; i8, neg, pos
176 pcmpgtb %3, %1 ; i8 < 0 mask
177 psubb %2, %1 ; neg values (only the originally - will be kept)
178 pand %2, %3 ; negative values of i8 (but stored as +)
179 pandn %3, %1 ; positive values of i8
183 %macro SIGN_ADD 4 ; dst, u8, i8, tmp1
184 EXTRACT_POS_NEG %3, %4, %1
185 paddusb %1, %2 ; add the positives
186 psubusb %1, %4 ; sub the negatives
190 %macro SIGN_SUB 4 ; dst, u8, i8, tmp1
191 EXTRACT_POS_NEG %3, %1, %4
192 paddusb %1, %2 ; add the negatives
193 psubusb %1, %4 ; sub the positives
196 %macro FILTER6_INIT 4 ; %1=dst %2=h/l %3=cache, %4=stack_off
197 UNPACK %2, %1, rp3, m0 ; p3: B->W
198 mova [rsp+%4+0*mmsize*2], %1
199 paddw %3, %1, %1 ; p3*2
201 punpck%2bw %1, m1, m0 ; p2: B->W
202 mova [rsp+%4+1*mmsize*2], %1
203 paddw %3, %1 ; p3*3 + p2
204 paddw %3, %1 ; p3*3 + p2*2
205 UNPACK %2, %1, rp1, m0 ; p1: B->W
206 mova [rsp+%4+2*mmsize*2], %1
207 paddw %3, %1 ; p3*3 + p2*2 + p1
208 UNPACK %2, %1, rp0, m0 ; p0: B->W
209 mova [rsp+%4+3*mmsize*2], %1
210 paddw %3, %1 ; p3*3 + p2*2 + p1 + p0
211 UNPACK %2, %1, rq0, m0 ; q0: B->W
212 mova [rsp+%4+4*mmsize*2], %1
213 paddw %3, %1 ; p3*3 + p2*2 + p1 + p0 + q0
214 paddw %3, [pw_4] ; p3*3 + p2*2 + p1 + p0 + q0 + 4
215 psraw %1, %3, 3 ; (p3*3 + p2*2 + p1 + p0 + q0 + 4) >> 3
218 %macro FILTER14_INIT 4 ; %1=dst %2=h/l %3=cache, %4=stack_off
219 punpck%2bw %1, m2, m0 ; p7: B->W
220 mova [rsp+%4+ 8*mmsize*2], %1
221 psllw %3, %1, 3 ; p7*8
223 punpck%2bw %1, m3, m0 ; p6: B->W
224 mova [rsp+%4+ 9*mmsize*2], %1
225 paddw %3, %1 ; p7*7 + p6
226 paddw %3, %1 ; p7*7 + p6*2
227 UNPACK %2, %1, rp5, m0 ; p5: B->W
228 mova [rsp+%4+10*mmsize*2], %1
229 paddw %3, %1 ; p7*7 + p6*2 + p5
230 UNPACK %2, %1, rp4, m0 ; p4: B->W
231 mova [rsp+%4+11*mmsize*2], %1
232 paddw %3, %1 ; p7*7 + p6*2 + p5 + p4
233 paddw %3, [rsp+%4+ 0*mmsize*2] ; p7*7 + p6*2 + p5 + p4 + p3
234 paddw %3, [rsp+%4+ 1*mmsize*2] ; p7*7 + p6*2 + p5 + .. + p2
235 paddw %3, [rsp+%4+ 2*mmsize*2] ; p7*7 + p6*2 + p5 + .. + p1
236 paddw %3, [rsp+%4+ 3*mmsize*2] ; p7*7 + p6*2 + p5 + .. + p0
237 paddw %3, [rsp+%4+ 4*mmsize*2] ; p7*7 + p6*2 + p5 + .. + p0 + q0
238 paddw %3, [pw_8] ; p7*7 + p6*2 + p5 + .. + p0 + q0 + 8
239 psraw %1, %3, 4 ; (p7*7 + p6*2 + p5 + .. + p0 + q0 + 8) >> 4
242 %macro TRANSPOSE16x16B 17
244 SBUTTERFLY bw, %1, %2, %16
245 SBUTTERFLY bw, %3, %4, %16
246 SBUTTERFLY bw, %5, %6, %16
247 SBUTTERFLY bw, %7, %8, %16
248 SBUTTERFLY bw, %9, %10, %16
249 SBUTTERFLY bw, %11, %12, %16
250 SBUTTERFLY bw, %13, %14, %16
253 SBUTTERFLY bw, %15, %16, %14
254 SBUTTERFLY wd, %1, %3, %14
255 SBUTTERFLY wd, %2, %4, %14
256 SBUTTERFLY wd, %5, %7, %14
257 SBUTTERFLY wd, %6, %8, %14
258 SBUTTERFLY wd, %9, %11, %14
259 SBUTTERFLY wd, %10, %12, %14
260 SBUTTERFLY wd, %13, %15, %14
263 SBUTTERFLY wd, %14, %16, %12
264 SBUTTERFLY dq, %1, %5, %12
265 SBUTTERFLY dq, %2, %6, %12
266 SBUTTERFLY dq, %3, %7, %12
267 SBUTTERFLY dq, %4, %8, %12
268 SBUTTERFLY dq, %9, %13, %12
269 SBUTTERFLY dq, %10, %14, %12
270 SBUTTERFLY dq, %11, %15, %12
273 SBUTTERFLY dq, %12, %16, %8
274 SBUTTERFLY qdq, %1, %9, %8
275 SBUTTERFLY qdq, %2, %10, %8
276 SBUTTERFLY qdq, %3, %11, %8
277 SBUTTERFLY qdq, %4, %12, %8
278 SBUTTERFLY qdq, %5, %13, %8
279 SBUTTERFLY qdq, %6, %14, %8
280 SBUTTERFLY qdq, %7, %15, %8
283 SBUTTERFLY qdq, %8, %16, %1
293 %macro TRANSPOSE8x8B 13
294 SBUTTERFLY bw, %1, %2, %7
297 SBUTTERFLY bw, %3, %4, %2
298 SBUTTERFLY bw, %5, %6, %2
299 SBUTTERFLY bw, %7, %8, %2
300 SBUTTERFLY wd, %1, %3, %2
303 SBUTTERFLY wd, %2, %4, %3
304 SBUTTERFLY wd, %5, %7, %3
305 SBUTTERFLY wd, %6, %8, %3
306 SBUTTERFLY dq, %1, %5, %3
307 SBUTTERFLY dq, %2, %6, %3
311 SBUTTERFLY dq, %3, %7, %2
312 SBUTTERFLY dq, %4, %8, %2
317 %macro DEFINE_REAL_P7_TO_Q7 0-1 0
318 %define P7 dstq + 4*mstrideq + %1
319 %define P6 dstq + mstride3q + %1
320 %define P5 dstq + 2*mstrideq + %1
321 %define P4 dstq + mstrideq + %1
323 %define P2 dstq + strideq + %1
324 %define P1 dstq + 2* strideq + %1
325 %define P0 dstq + stride3q + %1
326 %define Q0 dstq + 4* strideq + %1
327 %define Q1 dst2q + mstride3q + %1
328 %define Q2 dst2q + 2*mstrideq + %1
329 %define Q3 dst2q + mstrideq + %1
330 %define Q4 dst2q + %1
331 %define Q5 dst2q + strideq + %1
332 %define Q6 dst2q + 2* strideq + %1
333 %define Q7 dst2q + stride3q + %1
336 %macro DEFINE_TRANSPOSED_P7_TO_Q7 0-1 0
337 %define P3 rsp + 0*mmsize + %1
338 %define P2 rsp + 1*mmsize + %1
339 %define P1 rsp + 2*mmsize + %1
340 %define P0 rsp + 3*mmsize + %1
341 %define Q0 rsp + 4*mmsize + %1
342 %define Q1 rsp + 5*mmsize + %1
343 %define Q2 rsp + 6*mmsize + %1
344 %define Q3 rsp + 7*mmsize + %1
346 %define P7 rsp + 8*mmsize + %1
347 %define P6 rsp + 9*mmsize + %1
348 %define P5 rsp + 10*mmsize + %1
349 %define P4 rsp + 11*mmsize + %1
350 %define Q4 rsp + 12*mmsize + %1
351 %define Q5 rsp + 13*mmsize + %1
352 %define Q6 rsp + 14*mmsize + %1
353 %define Q7 rsp + 15*mmsize + %1
357 ; ..............AB -> AAAAAAAABBBBBBBB
358 %macro SPLATB_MIX 1-2 [mask_mix]
368 %macro LOOPFILTER 5 ; %1=v/h %2=size1 %3+%4=stack, %5=mmx/32bit stack only
370 %if ARCH_X86_32 || mmsize == 8
375 cglobal vp9_loop_filter_%1_%2_ %+ mmsize, 5, 9, 16, %3 + %4 + %%ext, dst, stride, E, I, H, mstride, dst2, stride3, mstride3
378 cglobal vp9_loop_filter_%1_%2_ %+ mmsize, 4, 8, 16, %3 + %4 + %%ext, dst, stride, E, I, mstride, dst2, stride3, mstride3
380 cglobal vp9_loop_filter_%1_%2_ %+ mmsize, 2, 6, 16, %3 + %4 + %%ext, dst, stride, mstride, dst2, stride3, mstride3
387 mov mstrideq, strideq
390 lea stride3q, [strideq*3]
391 lea mstride3q, [mstrideq*3]
400 lea dstq, [dstq + 4*strideq - 4]
403 lea dstq, [dstq + 4*strideq - 8] ; go from top center (h pos) to center left (v pos)
406 lea dstq, [dstq + 4*mstrideq]
408 ; FIXME we shouldn't need two dts registers if mmsize == 8
409 lea dst2q, [dstq + 8*strideq]
420 %if (ARCH_X86_64 && mmsize == 16) || %2 > 16
433 DEFINE_TRANSPOSED_P7_TO_Q7
435 TRANSPOSE16x16B 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, [rsp]
440 %else ; %2 == 44/48/84/88
450 TRANSPOSE8x8W 0, 2, 4, 6, 8, 10, 12, 14, 15
475 TRANSPOSE8x8B 0, 1, 2, 3, 4, 5, 6, 7, [P1], u, [rsp+%3+%4], [rsp+64], [rsp+80]
476 DEFINE_TRANSPOSED_P7_TO_Q7
499 TRANSPOSE8x8B 0, 1, 2, 3, 4, 5, 6, 7, [Q6], u, [rsp+%3+%4], [rsp+72], [rsp+88]
500 DEFINE_TRANSPOSED_P7_TO_Q7 8
515 DEFINE_TRANSPOSED_P7_TO_Q7
516 %elif %2 > 16 ; %2 == 44/48/84/88
534 DEFINE_TRANSPOSED_P7_TO_Q7
535 TRANSPOSE8x8W 0, 2, 4, 6, 1, 5, 7, 3, [rsp], [Q0], 1
543 %else ; %2 == 4 || %2 == 8
544 SBUTTERFLY bw, 0, 1, 6
545 SBUTTERFLY bw, 2, 3, 6
546 SBUTTERFLY bw, 4, 5, 6
547 mova [rsp+4*mmsize], m5
549 SBUTTERFLY bw, 6, 7, 5
550 DEFINE_TRANSPOSED_P7_TO_Q7
551 TRANSPOSE4x4W 0, 2, 4, 6, 5
556 mova m5, [rsp+4*mmsize]
557 TRANSPOSE4x4W 1, 3, 5, 7, 0
567 %if %2 == 16 || mmsize == 8
571 SPLATB_REG m2, I, m0 ; I I I I ...
572 SPLATB_REG m3, E, m0 ; E E E E ...
596 ; In case of horizontal, P3..Q3 are already present in some registers due
597 ; to the previous transpose, so we just swap registers.
621 ABSSUB_GT m5, rp3, rp2, m2, m7, m0 ; m5 = abs(p3-p2) <= I
622 ABSSUB_GT m1, rp2, rp1, m2, m7, m0 ; m1 = abs(p2-p1) <= I
624 ABSSUB_GT m1, rp1, rp0, m2, m7, m0 ; m1 = abs(p1-p0) <= I
626 ABSSUB_GT m1, rq0, rq1, m2, m7, m0 ; m1 = abs(q1-q0) <= I
628 ABSSUB_GT m1, rq1, rq2, m2, m7, m0 ; m1 = abs(q2-q1) <= I
630 ABSSUB_GT m1, rq2, rq3, m2, m7, m0 ; m1 = abs(q3-q2) <= I
632 ABSSUB m1, rp0, rq0, m7 ; abs(p0-q0)
633 paddusb m1, m1 ; abs(p0-q0) * 2
634 ABSSUB m2, rp1, rq1, m7 ; abs(p1-q1)
635 pand m2, [pb_fe] ; drop lsb so shift can work
636 psrlq m2, 1 ; abs(p1-q1)/2
637 paddusb m1, m2 ; abs(p0-q0)*2 + abs(p1-q1)/2
640 por m1, m5 ; fm final value
644 ; (m3: fm, m8..15: p3 p2 p1 p0 q0 q1 q2 q3)
645 ; calc flat8in (if not 44_16) and hev masks
646 %if %2 != 44 && %2 != 4
647 mova m6, [pb_81] ; [1 1 1 1 ...] ^ 0x80
648 ABSSUB_GT m2, rp3, rp0, m6, m5 ; abs(p3 - p0) <= 1
655 ABSSUB_GT m1, rp2, rp0, m6, m5, rb80 ; abs(p2 - p0) <= 1
657 ABSSUB m4, rp1, rp0, m5 ; abs(p1 - p0)
662 SPLATB_REG m7, H, m0 ; H H H H ...
669 pcmpgtb m0, m4, m7 ; abs(p1 - p0) > H (1/2 hev condition)
670 CMP_GT m4, m6 ; abs(p1 - p0) <= 1
671 por m2, m4 ; (flat8in)
672 ABSSUB m4, rq1, rq0, m1 ; abs(q1 - q0)
674 pcmpgtb m5, m4, m7 ; abs(q1 - q0) > H (2/2 hev condition)
675 por m0, m5 ; hev final value
676 CMP_GT m4, m6 ; abs(q1 - q0) <= 1
677 por m2, m4 ; (flat8in)
678 ABSSUB_GT m1, rq2, rq0, m6, m5, rb80 ; abs(q2 - q0) <= 1
680 ABSSUB_GT m1, rq3, rq0, m6, m5, rb80 ; abs(q3 - q0) <= 1
681 por m2, m1 ; flat8in final value
683 %if %2 == 84 || %2 == 48
684 pand m2, [mask_mix%2]
695 SPLATB_REG m7, H, m0 ; H H H H ...
698 ABSSUB m4, rp1, rp0, m1 ; abs(p1 - p0)
700 pcmpgtb m0, m4, m7 ; abs(p1 - p0) > H (1/2 hev condition)
701 ABSSUB m4, rq1, rq0, m1 ; abs(q1 - q0)
703 pcmpgtb m5, m4, m7 ; abs(q1 - q0) > H (2/2 hev condition)
704 por m0, m5 ; hev final value
708 ; (m0: hev, m2: flat8in, m3: fm, m6: pb_81, m9..15: p2 p1 p0 q0 q1 q2 q3)
719 ABSSUB_GT m1, rp7, rp0, m6, m5 ; abs(p7 - p0) <= 1
720 ABSSUB_GT m7, rp6, rp0, m6, m5 ; abs(p6 - p0) <= 1
731 ABSSUB_GT m7, rp5, rp0, m6, m5 ; abs(p5 - p0) <= 1
733 ABSSUB_GT m7, rp4, rp0, m6, m5 ; abs(p4 - p0) <= 1
744 ABSSUB_GT m7, rq4, rq0, m6, m5 ; abs(q4 - q0) <= 1
746 ABSSUB_GT m7, rq5, rq0, m6, m5 ; abs(q5 - q0) <= 1
757 ABSSUB_GT m7, rq6, rq0, m6, m5 ; abs(q4 - q0) <= 1
759 ABSSUB_GT m7, rq7, rq0, m6, m5 ; abs(q5 - q0) <= 1
760 por m1, m7 ; flat8out final value
765 ; if (out && in) filter_14()
766 ; else if (in) filter_6()
767 ; else if (hev) filter_2()
772 ; f6: fm & ~f14 & in => fm & ~(out & in) & in => fm & ~out & in
773 ; f2: fm & ~f14 & ~f6 & hev => fm & ~(out & in) & ~(~out & in) & hev => fm & ~in & hev
774 ; f4: fm & ~f14 & ~f6 & ~f2 => fm & ~(out & in) & ~(~out & in) & ~(~in & hev) => fm & ~in & ~hev
776 ; (m0: hev, [m1: flat8out], [m2: flat8in], m3: fm, m8..15: p5 p4 p1 p0 q0 q1 q6 q7)
778 %if %2 != 44 && %2 != 4
779 mova m6, [pb_80] ; already in m6 if 44_16
780 SCRATCH 2, 15, rsp+%3+%4
782 SCRATCH 1, 8, rsp+%3+%4+16
785 pxor m2, m6, rq0 ; q0 ^ 0x80
786 pxor m4, m6, rp0 ; p0 ^ 0x80
787 psubsb m2, m4 ; (signed) q0 - p0
788 pxor m4, m6, rp1 ; p1 ^ 0x80
789 pxor m5, m6, rq1 ; q1 ^ 0x80
790 psubsb m4, m5 ; (signed) p1 - q1
791 paddsb m4, m2 ; (q0 - p0) + (p1 - q1)
792 paddsb m4, m2 ; 2*(q0 - p0) + (p1 - q1)
793 paddsb m4, m2 ; 3*(q0 - p0) + (p1 - q1)
794 paddsb m6, m4, [pb_4] ; m6: f1 = clip(f + 4, 127)
795 paddsb m4, [pb_3] ; m4: f2 = clip(f + 3, 127)
797 mova m14, [pb_10] ; will be reused in filter4()
802 SRSHIFT3B_2X m6, m4, rb10, m7 ; f1 and f2 sign byte shift by 3
803 SIGN_SUB m7, rq0, m6, m5 ; m7 = q0 - f1
804 SIGN_ADD m1, rp0, m4, m5 ; m1 = p0 + f2
805 %if %2 != 44 && %2 != 4
807 pandn m6, m15, m3 ; ~mask(in) & mask(fm)
812 pand m6, m0 ; (~mask(in) & mask(fm)) & mask(hev)
816 MASK_APPLY m7, rq0, m6, m5 ; m7 = filter2(q0) & mask / we write it in filter4()
817 MASK_APPLY m1, rp0, m6, m5 ; m1 = filter2(p0) & mask / we write it in filter4()
819 ; (m0: hev, m1: p0', m2: q0-p0, m3: fm, m7: q0', [m8: flat8out], m10..13: p1 p0 q0 q1, m14: pb_10, [m15: flat8in], )
822 paddsb m2, m4 ; 2 * (q0 - p0)
823 paddsb m2, m4 ; 3 * (q0 - p0)
824 paddsb m6, m2, [pb_4] ; m6: f1 = clip(f + 4, 127)
825 paddsb m2, [pb_3] ; m2: f2 = clip(f + 3, 127)
826 SRSHIFT3B_2X m6, m2, rb10, m4 ; f1 and f2 sign byte shift by 3
827 %if %2 != 44 && %2 != 4
829 pandn m5, m15, m3 ; ~mask(in) & mask(fm)
834 pandn m0, m5 ; ~mask(hev) & (~mask(in) & mask(fm))
838 SIGN_SUB m5, rq0, m6, m4 ; q0 - f1
839 MASK_APPLY m5, m7, m0, m4 ; filter4(q0) & mask
841 SIGN_ADD m7, rp0, m2, m4 ; p0 + f2
842 MASK_APPLY m7, m1, m0, m4 ; filter4(p0) & mask
845 pxor m1, m1 ; f=(f1+1)>>1
848 SIGN_ADD m1, rp1, m6, m2 ; p1 + f
849 SIGN_SUB m4, rq1, m6, m2 ; q1 - f
850 MASK_APPLY m1, rp1, m0, m2 ; m1 = filter4(p1)
851 MASK_APPLY m4, rq1, m0, m2 ; m4 = filter4(q1)
855 %if %2 != 44 && %2 != 4
856 UNSCRATCH 2, 15, rsp+%3+%4
859 ; ([m1: flat8out], m2: flat8in, m3: fm, m10..13: p1 p0 q0 q1)
861 %if %2 != 44 && %2 != 4
866 pand m2, m3 ; mask(fm) & mask(in)
868 pandn m3, m8, m2 ; ~mask(out) & (mask(fm) & mask(in))
870 mova m3, [rsp+%3+%4+16]
884 FILTER_INIT m4, m5, m6, m7, [P2], %4, 6, m3, m1 ; [p2]
886 FILTER_UPDATE m4, m5, m6, m7, [P1], %4, 0, 1, 2, 5, 3, m3, "", rq1, "", 1 ; [p1] -p3 -p2 +p1 +q1
887 FILTER_UPDATE m4, m5, m6, m7, [P0], %4, 0, 2, 3, 6, 3, m3, "", m1 ; [p0] -p3 -p1 +p0 +q2
888 FILTER_UPDATE m4, m5, m6, m7, [Q0], %4, 0, 3, 4, 7, 3, m3, "", rq3, "", 1 ; [q0] -p3 -p0 +q0 +q3
889 FILTER_UPDATE m4, m5, m6, m7, [Q1], %4, 1, 4, 5, 7, 3, m3, "" ; [q1] -p2 -q0 +q1 +q3
890 FILTER_UPDATE m4, m5, m6, m7, [Q2], %4, 2, 5, 6, 7, 3, m3, m1 ; [q2] -p1 -q1 +q2 +q3
894 UNSCRATCH 1, 8, rsp+%3+%4+16
897 ; (m0: 0, [m1: flat8out], m2: fm & flat8in, m8..15: q2 q3 p1 p0 q0 q1 p3 p2)
900 ; m2 m3 m8 m9 m14 m15 m10 m11 m12 m13
902 ; q2 q3 p3 p2 p1 p0 q0 q1
903 ; p6 -7 p7 p6 p5 p4 . . . . .
904 ; p5 -6 -p7 -p6 +p5 +q1 . . . .
905 ; p4 -5 -p7 -p5 +p4 +q2 . . . q2
906 ; p3 -4 -p7 -p4 +p3 +q3 . . . q3
907 ; p2 -3 -p7 -p3 +p2 +q4 . . . q4
908 ; p1 -2 -p7 -p2 +p1 +q5 . . . q5
909 ; p0 -1 -p7 -p1 +p0 +q6 . . . q6
910 ; q0 +0 -p7 -p0 +q0 +q7 . . . q7
911 ; q1 +1 -p6 -q0 +q1 +q7 q1 . . .
912 ; q2 +2 -p5 -q1 +q2 +q7 . q2 . .
913 ; q3 +3 -p4 -q2 +q3 +q7 . q3 . .
914 ; q4 +4 -p3 -q3 +q4 +q7 . q4 . .
915 ; q5 +5 -p2 -q4 +q5 +q7 . q5 . .
916 ; q6 +6 -p1 -q5 +q6 +q7 . q6 . .
919 pand m1, m2 ; mask(out) & (mask(fm) & mask(in))
951 FILTER_INIT m4, m5, m6, m7, [P6], %4, 14, m1, m3 ; [p6]
952 FILTER_UPDATE m4, m5, m6, m7, [P5], %4, 8, 9, 10, 5, 4, m1, rp5s ; [p5] -p7 -p6 +p5 +q1
953 FILTER_UPDATE m4, m5, m6, m7, [P4], %4, 8, 10, 11, 6, 4, m1, rp4s ; [p4] -p7 -p5 +p4 +q2
954 FILTER_UPDATE m4, m5, m6, m7, [P3], %4, 8, 11, 0, 7, 4, m1, rp3s ; [p3] -p7 -p4 +p3 +q3
955 FILTER_UPDATE m4, m5, m6, m7, [P2], %4, 8, 0, 1, 12, 4, m1, "", rq4, [Q4], 1 ; [p2] -p7 -p3 +p2 +q4
956 FILTER_UPDATE m4, m5, m6, m7, [P1], %4, 8, 1, 2, 13, 4, m1, "", rq5, [Q5], 1 ; [p1] -p7 -p2 +p1 +q5
957 FILTER_UPDATE m4, m5, m6, m7, [P0], %4, 8, 2, 3, 14, 4, m1, "", rq6, [Q6], 1 ; [p0] -p7 -p1 +p0 +q6
958 FILTER_UPDATE m4, m5, m6, m7, [Q0], %4, 8, 3, 4, 15, 4, m1, "", rq7, [Q7], 1 ; [q0] -p7 -p0 +q0 +q7
959 FILTER_UPDATE m4, m5, m6, m7, [Q1], %4, 9, 4, 5, 15, 4, m1, "" ; [q1] -p6 -q0 +q1 +q7
960 FILTER_UPDATE m4, m5, m6, m7, [Q2], %4, 10, 5, 6, 15, 4, m1, "" ; [q2] -p5 -q1 +q2 +q7
961 FILTER_UPDATE m4, m5, m6, m7, [Q3], %4, 11, 6, 7, 15, 4, m1, "" ; [q3] -p4 -q2 +q3 +q7
962 FILTER_UPDATE m4, m5, m6, m7, [Q4], %4, 0, 7, 12, 15, 4, m1, rq4s ; [q4] -p3 -q3 +q4 +q7
963 FILTER_UPDATE m4, m5, m6, m7, [Q5], %4, 1, 12, 13, 15, 4, m1, rq5s ; [q5] -p2 -q4 +q5 +q7
964 FILTER_UPDATE m4, m5, m6, m7, [Q6], %4, 2, 13, 14, 15, 4, m1, rq6s ; [q6] -p1 -q5 +q6 +q7
988 TRANSPOSE16x16B 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, [rsp]
1007 DEFINE_REAL_P7_TO_Q7
1008 TRANSPOSE8x8B 0, 1, 2, 3, 4, 5, 6, 7, [rsp+32], a, [rsp+%3+%4], [Q0], [Q1]
1023 DEFINE_TRANSPOSED_P7_TO_Q7
1031 DEFINE_REAL_P7_TO_Q7 8
1032 TRANSPOSE8x8B 0, 1, 2, 3, 4, 5, 6, 7, [rsp+224], a, [rsp+%3+%4], [Q0], [Q1]
1048 %elif %2 == 44 || %2 == 4
1053 DEFINE_REAL_P7_TO_Q7 2
1054 SBUTTERFLY bw, 0, 1, 4
1055 SBUTTERFLY bw, 2, 3, 4
1056 SBUTTERFLY wd, 0, 2, 4
1057 SBUTTERFLY wd, 1, 3, 4
1102 ; the following code do a transpose of 8 full lines to 16 half
1103 ; lines (high part). It is inlined to avoid the need of a staging area
1114 DEFINE_REAL_P7_TO_Q7
1116 SBUTTERFLY bw, 0, 1, 8
1117 SBUTTERFLY bw, 2, 3, 8
1118 SBUTTERFLY bw, 4, 5, 8
1119 SBUTTERFLY bw, 6, 7, 8
1120 SBUTTERFLY wd, 0, 2, 8
1121 SBUTTERFLY wd, 1, 3, 8
1122 SBUTTERFLY wd, 4, 6, 8
1123 SBUTTERFLY wd, 5, 7, 8
1124 SBUTTERFLY dq, 0, 4, 8
1125 SBUTTERFLY dq, 1, 5, 8
1126 SBUTTERFLY dq, 2, 6, 8
1127 SBUTTERFLY dq, 3, 7, 8
1129 SBUTTERFLY bw, 0, 1, 6
1130 mova [rsp+mmsize*4], m1
1131 mova m6, [rsp+mmsize*6]
1132 SBUTTERFLY bw, 2, 3, 1
1133 SBUTTERFLY bw, 4, 5, 1
1134 SBUTTERFLY bw, 6, 7, 1
1135 SBUTTERFLY wd, 0, 2, 1
1136 mova [rsp+mmsize*6], m2
1137 mova m1, [rsp+mmsize*4]
1138 SBUTTERFLY wd, 1, 3, 2
1139 SBUTTERFLY wd, 4, 6, 2
1140 SBUTTERFLY wd, 5, 7, 2
1141 SBUTTERFLY dq, 0, 4, 2
1142 SBUTTERFLY dq, 1, 5, 2
1149 mova m2, [rsp+mmsize*6]
1150 SBUTTERFLY dq, 2, 6, 1
1151 SBUTTERFLY dq, 3, 7, 1
1191 LOOPFILTER v, %1, %2, 0, %4
1192 LOOPFILTER h, %1, %2, %3, %4
1195 %macro LPF_16_VH_ALL_OPTS 4
1196 LPF_16_VH %1, %2, %3, %4, sse2
1197 LPF_16_VH %1, %2, %3, %4, ssse3
1198 LPF_16_VH %1, %2, %3, %4, avx
1201 LPF_16_VH_ALL_OPTS 16, 512, 256, 32
1202 LPF_16_VH_ALL_OPTS 44, 0, 128, 0
1203 LPF_16_VH_ALL_OPTS 48, 256, 128, 16
1204 LPF_16_VH_ALL_OPTS 84, 256, 128, 16
1205 LPF_16_VH_ALL_OPTS 88, 256, 128, 16
1208 LOOPFILTER v, 4, 0, 0, 0
1209 LOOPFILTER h, 4, 0, 64, 0
1210 LOOPFILTER v, 8, 128, 0, 8
1211 LOOPFILTER h, 8, 128, 64, 8