1 ;******************************************************************************
2 ;* VP9 motion compensation SIMD optimizations
4 ;* Copyright (c) 2013 Ronald S. Bultje <rsbultje gmail com>
6 ;* This file is part of FFmpeg.
8 ;* FFmpeg is free software; you can redistribute it and/or
9 ;* modify it under the terms of the GNU Lesser General Public
10 ;* License as published by the Free Software Foundation; either
11 ;* version 2.1 of the License, or (at your option) any later version.
13 ;* FFmpeg is distributed in the hope that it will be useful,
14 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 ;* Lesser General Public License for more details.
18 ;* You should have received a copy of the GNU Lesser General Public
19 ;* License along with FFmpeg; if not, write to the Free Software
20 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 ;******************************************************************************
23 %include "libavutil/x86/x86util.asm"
30 %macro F8_SSSE3_TAPS 8
48 %macro F8_16BPP_TAPS 8
56 const filters_%1 ; smooth
57 F8_TAPS -3, -1, 32, 64, 38, 1, -3, 0
58 F8_TAPS -2, -2, 29, 63, 41, 2, -3, 0
59 F8_TAPS -2, -2, 26, 63, 43, 4, -4, 0
60 F8_TAPS -2, -3, 24, 62, 46, 5, -4, 0
61 F8_TAPS -2, -3, 21, 60, 49, 7, -4, 0
62 F8_TAPS -1, -4, 18, 59, 51, 9, -4, 0
63 F8_TAPS -1, -4, 16, 57, 53, 12, -4, -1
64 F8_TAPS -1, -4, 14, 55, 55, 14, -4, -1
65 F8_TAPS -1, -4, 12, 53, 57, 16, -4, -1
66 F8_TAPS 0, -4, 9, 51, 59, 18, -4, -1
67 F8_TAPS 0, -4, 7, 49, 60, 21, -3, -2
68 F8_TAPS 0, -4, 5, 46, 62, 24, -3, -2
69 F8_TAPS 0, -4, 4, 43, 63, 26, -2, -2
70 F8_TAPS 0, -3, 2, 41, 63, 29, -2, -2
71 F8_TAPS 0, -3, 1, 38, 64, 32, -1, -3
73 F8_TAPS 0, 1, -5, 126, 8, -3, 1, 0
74 F8_TAPS -1, 3, -10, 122, 18, -6, 2, 0
75 F8_TAPS -1, 4, -13, 118, 27, -9, 3, -1
76 F8_TAPS -1, 4, -16, 112, 37, -11, 4, -1
77 F8_TAPS -1, 5, -18, 105, 48, -14, 4, -1
78 F8_TAPS -1, 5, -19, 97, 58, -16, 5, -1
79 F8_TAPS -1, 6, -19, 88, 68, -18, 5, -1
80 F8_TAPS -1, 6, -19, 78, 78, -19, 6, -1
81 F8_TAPS -1, 5, -18, 68, 88, -19, 6, -1
82 F8_TAPS -1, 5, -16, 58, 97, -19, 5, -1
83 F8_TAPS -1, 4, -14, 48, 105, -18, 5, -1
84 F8_TAPS -1, 4, -11, 37, 112, -16, 4, -1
85 F8_TAPS -1, 3, -9, 27, 118, -13, 4, -1
86 F8_TAPS 0, 2, -6, 18, 122, -10, 3, -1
87 F8_TAPS 0, 1, -3, 8, 126, -5, 1, 0
89 F8_TAPS -1, 3, -7, 127, 8, -3, 1, 0
90 F8_TAPS -2, 5, -13, 125, 17, -6, 3, -1
91 F8_TAPS -3, 7, -17, 121, 27, -10, 5, -2
92 F8_TAPS -4, 9, -20, 115, 37, -13, 6, -2
93 F8_TAPS -4, 10, -23, 108, 48, -16, 8, -3
94 F8_TAPS -4, 10, -24, 100, 59, -19, 9, -3
95 F8_TAPS -4, 11, -24, 90, 70, -21, 10, -4
96 F8_TAPS -4, 11, -23, 80, 80, -23, 11, -4
97 F8_TAPS -4, 10, -21, 70, 90, -24, 11, -4
98 F8_TAPS -3, 9, -19, 59, 100, -24, 10, -4
99 F8_TAPS -3, 8, -16, 48, 108, -23, 10, -4
100 F8_TAPS -2, 6, -13, 37, 115, -20, 9, -4
101 F8_TAPS -2, 5, -10, 27, 121, -17, 7, -3
102 F8_TAPS -1, 3, -6, 17, 125, -13, 5, -2
103 F8_TAPS 0, 1, -3, 8, 127, -7, 3, -1
106 %define F8_TAPS F8_SSSE3_TAPS
107 ; int8_t ff_filters_ssse3[3][15][4][32]
109 %define F8_TAPS F8_SSE2_TAPS
110 ; int16_t ff_filters_sse2[3][15][8][8]
112 %define F8_TAPS F8_16BPP_TAPS
113 ; int16_t ff_filters_16bpp[3][15][4][16]
118 %macro filter_sse2_h_fn 1
119 %assign %%px mmsize/2
120 cglobal vp9_%1_8tap_1d_h_ %+ %%px %+ _8, 6, 6, 15, dst, dstride, src, sstride, h, filtery
123 mova m7, [filteryq+ 0]
124 %if ARCH_X86_64 && mmsize > 8
125 mova m8, [filteryq+ 16]
126 mova m9, [filteryq+ 32]
127 mova m10, [filteryq+ 48]
128 mova m11, [filteryq+ 64]
129 mova m12, [filteryq+ 80]
130 mova m13, [filteryq+ 96]
131 mova m14, [filteryq+112]
145 %if ARCH_X86_64 && mmsize > 8
151 pmullw m1, [filteryq+ 16]
152 pmullw m2, [filteryq+ 32]
153 pmullw m3, [filteryq+ 48]
154 pmullw m4, [filteryq+ 64]
166 %if ARCH_X86_64 && mmsize > 8
171 pmullw m1, [filteryq+ 80]
172 pmullw m3, [filteryq+ 96]
173 pmullw m4, [filteryq+112]
204 %assign %%px mmsize/2
205 cglobal vp9_%1_8tap_1d_h_ %+ %%px %+ _8, 6, 6, 11, dst, dstride, src, sstride, h, filtery
207 mova m7, [filteryq+ 0]
208 %if ARCH_X86_64 && mmsize > 8
209 mova m8, [filteryq+32]
210 mova m9, [filteryq+64]
211 mova m10, [filteryq+96]
228 %if ARCH_X86_64 && mmsize > 8
233 pmaddubsw m2, [filteryq+32]
234 pmaddubsw m4, [filteryq+64]
235 pmaddubsw m1, [filteryq+96]
264 %macro filter_hx2_fn 1
266 cglobal vp9_%1_8tap_1d_h_ %+ %%px %+ _8, 6, 6, 14, dst, dstride, src, sstride, h, filtery
268 mova m8, [filteryq+ 0]
269 mova m9, [filteryq+32]
270 mova m10, [filteryq+64]
271 mova m11, [filteryq+96]
282 SBUTTERFLY bw, 0, 1, 12
283 SBUTTERFLY bw, 2, 3, 12
284 SBUTTERFLY bw, 4, 5, 12
285 SBUTTERFLY bw, 6, 7, 12
317 %if HAVE_AVX2_EXTERNAL
325 %macro filter_sse2_v_fn 1
326 %assign %%px mmsize/2
328 cglobal vp9_%1_8tap_1d_v_ %+ %%px %+ _8, 6, 8, 15, dst, dstride, src, sstride, h, filtery, src4, sstride3
330 cglobal vp9_%1_8tap_1d_v_ %+ %%px %+ _8, 4, 7, 15, dst, dstride, src, sstride, filtery, src4, sstride3
336 lea sstride3q, [sstrideq*3]
337 lea src4q, [srcq+sstrideq]
339 mova m7, [filteryq+ 0]
340 %if ARCH_X86_64 && mmsize > 8
341 mova m8, [filteryq+ 16]
342 mova m9, [filteryq+ 32]
343 mova m10, [filteryq+ 48]
344 mova m11, [filteryq+ 64]
345 mova m12, [filteryq+ 80]
346 mova m13, [filteryq+ 96]
347 mova m14, [filteryq+112]
350 ; FIXME maybe reuse loads from previous rows, or just
351 ; more generally unroll this to prevent multiple loads of
354 movh m1, [srcq+sstrideq]
355 movh m2, [srcq+sstrideq*2]
356 movh m3, [srcq+sstride3q]
365 %if ARCH_X86_64 && mmsize > 8
371 pmullw m1, [filteryq+ 16]
372 pmullw m2, [filteryq+ 32]
373 pmullw m3, [filteryq+ 48]
374 pmullw m4, [filteryq+ 64]
379 movh m1, [src4q+sstrideq]
380 movh m3, [src4q+sstrideq*2]
381 movh m4, [src4q+sstride3q]
386 %if ARCH_X86_64 && mmsize > 8
391 pmullw m1, [filteryq+ 80]
392 pmullw m3, [filteryq+ 96]
393 pmullw m4, [filteryq+112]
424 %assign %%px mmsize/2
426 cglobal vp9_%1_8tap_1d_v_ %+ %%px %+ _8, 6, 8, 11, dst, dstride, src, sstride, h, filtery, src4, sstride3
428 cglobal vp9_%1_8tap_1d_v_ %+ %%px %+ _8, 4, 7, 11, dst, dstride, src, sstride, filtery, src4, sstride3
433 lea sstride3q, [sstrideq*3]
434 lea src4q, [srcq+sstrideq]
436 mova m7, [filteryq+ 0]
437 %if ARCH_X86_64 && mmsize > 8
438 mova m8, [filteryq+32]
439 mova m9, [filteryq+64]
440 mova m10, [filteryq+96]
443 ; FIXME maybe reuse loads from previous rows, or just more generally
444 ; unroll this to prevent multiple loads of the same data?
446 movh m1, [srcq+sstrideq]
447 movh m2, [srcq+sstrideq*2]
448 movh m3, [srcq+sstride3q]
450 movh m5, [src4q+sstrideq]
453 movh m1, [src4q+sstrideq*2]
454 movh m3, [src4q+sstride3q]
460 %if ARCH_X86_64 && mmsize > 8
465 pmaddubsw m2, [filteryq+32]
466 pmaddubsw m4, [filteryq+64]
467 pmaddubsw m1, [filteryq+96]
497 %macro filter_vx2_fn 1
499 cglobal vp9_%1_8tap_1d_v_ %+ %%px %+ _8, 6, 8, 14, dst, dstride, src, sstride, h, filtery, src4, sstride3
501 lea sstride3q, [sstrideq*3]
502 lea src4q, [srcq+sstrideq]
504 mova m8, [filteryq+ 0]
505 mova m9, [filteryq+32]
506 mova m10, [filteryq+64]
507 mova m11, [filteryq+96]
509 ; FIXME maybe reuse loads from previous rows, or just
510 ; more generally unroll this to prevent multiple loads of
513 movu m1, [srcq+sstrideq]
514 movu m2, [srcq+sstrideq*2]
515 movu m3, [srcq+sstride3q]
517 movu m5, [src4q+sstrideq]
518 movu m6, [src4q+sstrideq*2]
519 movu m7, [src4q+sstride3q]
522 SBUTTERFLY bw, 0, 1, 12
523 SBUTTERFLY bw, 2, 3, 12
524 SBUTTERFLY bw, 4, 5, 12
525 SBUTTERFLY bw, 6, 7, 12
557 %if HAVE_AVX2_EXTERNAL
565 %macro fpel_fn 6-8 0, 4
585 cglobal vp9_%1%2 %+ %%szsuf, 5, 7, 4, dst, dstride, src, sstride, h, dstride3, sstride3
586 lea sstride3q, [sstrideq*3]
587 lea dstride3q, [dstrideq*3]
589 cglobal vp9_%1%2 %+ %%szsuf, 5, 5, %8, dst, dstride, src, sstride, h
593 %%srcfn m1, [srcq+s%3]
594 %%srcfn m2, [srcq+s%4]
595 %%srcfn m3, [srcq+s%5]
597 %%srcfn m4, [srcq+mmsize*4]
598 %%srcfn m5, [srcq+mmsize*5]
599 %%srcfn m6, [srcq+mmsize*6]
600 %%srcfn m7, [srcq+mmsize*7]
602 lea srcq, [srcq+sstrideq*%6]
605 %%pavg m1, [dstq+d%3]
606 %%pavg m2, [dstq+d%4]
607 %%pavg m3, [dstq+d%5]
609 %%pavg m4, [dstq+mmsize*4]
610 %%pavg m5, [dstq+mmsize*5]
611 %%pavg m6, [dstq+mmsize*6]
612 %%pavg m7, [dstq+mmsize*7]
616 %%dstfn [dstq+d%3], m1
617 %%dstfn [dstq+d%4], m2
618 %%dstfn [dstq+d%5], m3
620 %%dstfn [dstq+mmsize*4], m4
621 %%dstfn [dstq+mmsize*5], m5
622 %%dstfn [dstq+mmsize*6], m6
623 %%dstfn [dstq+mmsize*7], m7
625 lea dstq, [dstq+dstrideq*%6]
636 fpel_fn put, 4, strideq, strideq*2, stride3q, 4
637 fpel_fn put, 8, strideq, strideq*2, stride3q, 4
639 fpel_fn avg, 4, strideq, strideq*2, stride3q, 4, 8
640 fpel_fn avg, 8, strideq, strideq*2, stride3q, 4, 8
642 fpel_fn put, 16, strideq, strideq*2, stride3q, 4
643 fpel_fn put, 32, mmsize, strideq, strideq+mmsize, 2
644 fpel_fn put, 64, mmsize, mmsize*2, mmsize*3, 1
645 fpel_fn put, 128, mmsize, mmsize*2, mmsize*3, 1, 0, 8
647 fpel_fn avg, 16, strideq, strideq*2, stride3q, 4, 8
648 fpel_fn avg, 32, mmsize, strideq, strideq+mmsize, 2, 8
649 fpel_fn avg, 64, mmsize, mmsize*2, mmsize*3, 1, 8
651 fpel_fn put, 32, strideq, strideq*2, stride3q, 4
652 fpel_fn put, 64, mmsize, strideq, strideq+mmsize, 2
653 fpel_fn put, 128, mmsize, mmsize*2, mmsize*3, 1
654 %if HAVE_AVX2_EXTERNAL
656 fpel_fn avg, 32, strideq, strideq*2, stride3q, 4, 8
657 fpel_fn avg, 64, mmsize, strideq, strideq+mmsize, 2, 8
660 fpel_fn avg, 8, strideq, strideq*2, stride3q, 4, 16
662 fpel_fn avg, 16, strideq, strideq*2, stride3q, 4, 16
663 fpel_fn avg, 32, mmsize, strideq, strideq+mmsize, 2, 16
664 fpel_fn avg, 64, mmsize, mmsize*2, mmsize*3, 1, 16
665 fpel_fn avg, 128, mmsize, mmsize*2, mmsize*3, 1, 16, 8
666 %if HAVE_AVX2_EXTERNAL
668 fpel_fn avg, 32, strideq, strideq*2, stride3q, 4, 16
669 fpel_fn avg, 64, mmsize, strideq, strideq+mmsize, 2, 16
670 fpel_fn avg, 128, mmsize, mmsize*2, mmsize*3, 1, 16