1 ;******************************************************************************
2 ;* VP9 MC SIMD optimizations
4 ;* Copyright (c) 2013 Ronald S. Bultje <rsbultje gmail com>
6 ;* This file is part of FFmpeg.
8 ;* FFmpeg is free software; you can redistribute it and/or
9 ;* modify it under the terms of the GNU Lesser General Public
10 ;* License as published by the Free Software Foundation; either
11 ;* version 2.1 of the License, or (at your option) any later version.
13 ;* FFmpeg is distributed in the hope that it will be useful,
14 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 ;* Lesser General Public License for more details.
18 ;* You should have received a copy of the GNU Lesser General Public
19 ;* License along with FFmpeg; if not, write to the Free Software
20 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 ;******************************************************************************
23 %include "libavutil/x86/x86util.asm"
30 %macro F8_SSSE3_TAPS 8
49 const filters_%1 ; smooth
50 F8_TAPS -3, -1, 32, 64, 38, 1, -3, 0
51 F8_TAPS -2, -2, 29, 63, 41, 2, -3, 0
52 F8_TAPS -2, -2, 26, 63, 43, 4, -4, 0
53 F8_TAPS -2, -3, 24, 62, 46, 5, -4, 0
54 F8_TAPS -2, -3, 21, 60, 49, 7, -4, 0
55 F8_TAPS -1, -4, 18, 59, 51, 9, -4, 0
56 F8_TAPS -1, -4, 16, 57, 53, 12, -4, -1
57 F8_TAPS -1, -4, 14, 55, 55, 14, -4, -1
58 F8_TAPS -1, -4, 12, 53, 57, 16, -4, -1
59 F8_TAPS 0, -4, 9, 51, 59, 18, -4, -1
60 F8_TAPS 0, -4, 7, 49, 60, 21, -3, -2
61 F8_TAPS 0, -4, 5, 46, 62, 24, -3, -2
62 F8_TAPS 0, -4, 4, 43, 63, 26, -2, -2
63 F8_TAPS 0, -3, 2, 41, 63, 29, -2, -2
64 F8_TAPS 0, -3, 1, 38, 64, 32, -1, -3
66 F8_TAPS 0, 1, -5, 126, 8, -3, 1, 0
67 F8_TAPS -1, 3, -10, 122, 18, -6, 2, 0
68 F8_TAPS -1, 4, -13, 118, 27, -9, 3, -1
69 F8_TAPS -1, 4, -16, 112, 37, -11, 4, -1
70 F8_TAPS -1, 5, -18, 105, 48, -14, 4, -1
71 F8_TAPS -1, 5, -19, 97, 58, -16, 5, -1
72 F8_TAPS -1, 6, -19, 88, 68, -18, 5, -1
73 F8_TAPS -1, 6, -19, 78, 78, -19, 6, -1
74 F8_TAPS -1, 5, -18, 68, 88, -19, 6, -1
75 F8_TAPS -1, 5, -16, 58, 97, -19, 5, -1
76 F8_TAPS -1, 4, -14, 48, 105, -18, 5, -1
77 F8_TAPS -1, 4, -11, 37, 112, -16, 4, -1
78 F8_TAPS -1, 3, -9, 27, 118, -13, 4, -1
79 F8_TAPS 0, 2, -6, 18, 122, -10, 3, -1
80 F8_TAPS 0, 1, -3, 8, 126, -5, 1, 0
82 F8_TAPS -1, 3, -7, 127, 8, -3, 1, 0
83 F8_TAPS -2, 5, -13, 125, 17, -6, 3, -1
84 F8_TAPS -3, 7, -17, 121, 27, -10, 5, -2
85 F8_TAPS -4, 9, -20, 115, 37, -13, 6, -2
86 F8_TAPS -4, 10, -23, 108, 48, -16, 8, -3
87 F8_TAPS -4, 10, -24, 100, 59, -19, 9, -3
88 F8_TAPS -4, 11, -24, 90, 70, -21, 10, -4
89 F8_TAPS -4, 11, -23, 80, 80, -23, 11, -4
90 F8_TAPS -4, 10, -21, 70, 90, -24, 11, -4
91 F8_TAPS -3, 9, -19, 59, 100, -24, 10, -4
92 F8_TAPS -3, 8, -16, 48, 108, -23, 10, -4
93 F8_TAPS -2, 6, -13, 37, 115, -20, 9, -4
94 F8_TAPS -2, 5, -10, 27, 121, -17, 7, -3
95 F8_TAPS -1, 3, -6, 17, 125, -13, 5, -2
96 F8_TAPS 0, 1, -3, 8, 127, -7, 3, -1
99 %define F8_TAPS F8_SSSE3_TAPS
100 ; int8_t ff_filters_ssse3[3][15][4][32]
102 %define F8_TAPS F8_SSE2_TAPS
103 ; int16_t ff_filters_sse2[3][15][8][8]
108 %macro filter_sse2_h_fn 1
109 %assign %%px mmsize/2
110 cglobal vp9_%1_8tap_1d_h_ %+ %%px, 6, 6, 15, dst, dstride, src, sstride, h, filtery
113 mova m7, [filteryq+ 0]
114 %if ARCH_X86_64 && mmsize > 8
115 mova m8, [filteryq+ 16]
116 mova m9, [filteryq+ 32]
117 mova m10, [filteryq+ 48]
118 mova m11, [filteryq+ 64]
119 mova m12, [filteryq+ 80]
120 mova m13, [filteryq+ 96]
121 mova m14, [filteryq+112]
135 %if ARCH_X86_64 && mmsize > 8
141 pmullw m1, [filteryq+ 16]
142 pmullw m2, [filteryq+ 32]
143 pmullw m3, [filteryq+ 48]
144 pmullw m4, [filteryq+ 64]
156 %if ARCH_X86_64 && mmsize > 8
161 pmullw m1, [filteryq+ 80]
162 pmullw m3, [filteryq+ 96]
163 pmullw m4, [filteryq+112]
194 %assign %%px mmsize/2
195 cglobal vp9_%1_8tap_1d_h_ %+ %%px, 6, 6, 11, dst, dstride, src, sstride, h, filtery
197 mova m7, [filteryq+ 0]
198 %if ARCH_X86_64 && mmsize > 8
199 mova m8, [filteryq+32]
200 mova m9, [filteryq+64]
201 mova m10, [filteryq+96]
218 %if ARCH_X86_64 && mmsize > 8
223 pmaddubsw m2, [filteryq+32]
224 pmaddubsw m4, [filteryq+64]
225 pmaddubsw m1, [filteryq+96]
254 %macro filter_hx2_fn 1
256 cglobal vp9_%1_8tap_1d_h_ %+ %%px, 6, 6, 14, dst, dstride, src, sstride, h, filtery
258 mova m8, [filteryq+ 0]
259 mova m9, [filteryq+32]
260 mova m10, [filteryq+64]
261 mova m11, [filteryq+96]
272 SBUTTERFLY bw, 0, 1, 12
273 SBUTTERFLY bw, 2, 3, 12
274 SBUTTERFLY bw, 4, 5, 12
275 SBUTTERFLY bw, 6, 7, 12
307 %if HAVE_AVX2_EXTERNAL
315 %macro filter_sse2_v_fn 1
316 %assign %%px mmsize/2
318 cglobal vp9_%1_8tap_1d_v_ %+ %%px, 6, 8, 15, dst, dstride, src, sstride, h, filtery, src4, sstride3
320 cglobal vp9_%1_8tap_1d_v_ %+ %%px, 4, 7, 15, dst, dstride, src, sstride, filtery, src4, sstride3
326 lea sstride3q, [sstrideq*3]
327 lea src4q, [srcq+sstrideq]
329 mova m7, [filteryq+ 0]
330 %if ARCH_X86_64 && mmsize > 8
331 mova m8, [filteryq+ 16]
332 mova m9, [filteryq+ 32]
333 mova m10, [filteryq+ 48]
334 mova m11, [filteryq+ 64]
335 mova m12, [filteryq+ 80]
336 mova m13, [filteryq+ 96]
337 mova m14, [filteryq+112]
340 ; FIXME maybe reuse loads from previous rows, or just
341 ; more generally unroll this to prevent multiple loads of
344 movh m1, [srcq+sstrideq]
345 movh m2, [srcq+sstrideq*2]
346 movh m3, [srcq+sstride3q]
355 %if ARCH_X86_64 && mmsize > 8
361 pmullw m1, [filteryq+ 16]
362 pmullw m2, [filteryq+ 32]
363 pmullw m3, [filteryq+ 48]
364 pmullw m4, [filteryq+ 64]
369 movh m1, [src4q+sstrideq]
370 movh m3, [src4q+sstrideq*2]
371 movh m4, [src4q+sstride3q]
376 %if ARCH_X86_64 && mmsize > 8
381 pmullw m1, [filteryq+ 80]
382 pmullw m3, [filteryq+ 96]
383 pmullw m4, [filteryq+112]
414 %assign %%px mmsize/2
416 cglobal vp9_%1_8tap_1d_v_ %+ %%px, 6, 8, 11, dst, dstride, src, sstride, h, filtery, src4, sstride3
418 cglobal vp9_%1_8tap_1d_v_ %+ %%px, 4, 7, 11, dst, dstride, src, sstride, filtery, src4, sstride3
423 lea sstride3q, [sstrideq*3]
424 lea src4q, [srcq+sstrideq]
426 mova m7, [filteryq+ 0]
427 %if ARCH_X86_64 && mmsize > 8
428 mova m8, [filteryq+32]
429 mova m9, [filteryq+64]
430 mova m10, [filteryq+96]
433 ; FIXME maybe reuse loads from previous rows, or just
434 ; more generally unroll this to prevent multiple loads of
437 movh m1, [srcq+sstrideq]
438 movh m2, [srcq+sstrideq*2]
439 movh m3, [srcq+sstride3q]
441 movh m5, [src4q+sstrideq]
444 movh m1, [src4q+sstrideq*2]
445 movh m3, [src4q+sstride3q]
451 %if ARCH_X86_64 && mmsize > 8
456 pmaddubsw m2, [filteryq+32]
457 pmaddubsw m4, [filteryq+64]
458 pmaddubsw m1, [filteryq+96]
488 %macro filter_vx2_fn 1
490 cglobal vp9_%1_8tap_1d_v_ %+ %%px, 6, 8, 14, dst, dstride, src, sstride, h, filtery, src4, sstride3
492 lea sstride3q, [sstrideq*3]
493 lea src4q, [srcq+sstrideq]
495 mova m8, [filteryq+ 0]
496 mova m9, [filteryq+32]
497 mova m10, [filteryq+64]
498 mova m11, [filteryq+96]
500 ; FIXME maybe reuse loads from previous rows, or just
501 ; more generally unroll this to prevent multiple loads of
504 movu m1, [srcq+sstrideq]
505 movu m2, [srcq+sstrideq*2]
506 movu m3, [srcq+sstride3q]
508 movu m5, [src4q+sstrideq]
509 movu m6, [src4q+sstrideq*2]
510 movu m7, [src4q+sstride3q]
513 SBUTTERFLY bw, 0, 1, 12
514 SBUTTERFLY bw, 2, 3, 12
515 SBUTTERFLY bw, 4, 5, 12
516 SBUTTERFLY bw, 6, 7, 12
548 %if HAVE_AVX2_EXTERNAL
566 cglobal vp9_%1%2, 5, 7, 4, dst, dstride, src, sstride, h, dstride3, sstride3
567 lea sstride3q, [sstrideq*3]
568 lea dstride3q, [dstrideq*3]
570 cglobal vp9_%1%2, 5, 5, 4, dst, dstride, src, sstride, h
574 %%srcfn m1, [srcq+s%3]
575 %%srcfn m2, [srcq+s%4]
576 %%srcfn m3, [srcq+s%5]
577 lea srcq, [srcq+sstrideq*%6]
585 %%dstfn [dstq+d%3], m1
586 %%dstfn [dstq+d%4], m2
587 %%dstfn [dstq+d%5], m3
588 lea dstq, [dstq+dstrideq*%6]
599 fpel_fn put, 4, strideq, strideq*2, stride3q, 4
600 fpel_fn put, 8, strideq, strideq*2, stride3q, 4
602 fpel_fn avg, 4, strideq, strideq*2, stride3q, 4
603 fpel_fn avg, 8, strideq, strideq*2, stride3q, 4
605 fpel_fn put, 16, strideq, strideq*2, stride3q, 4
606 fpel_fn put, 32, mmsize, strideq, strideq+mmsize, 2
607 fpel_fn put, 64, mmsize, mmsize*2, mmsize*3, 1
609 fpel_fn avg, 16, strideq, strideq*2, stride3q, 4
610 fpel_fn avg, 32, mmsize, strideq, strideq+mmsize, 2
611 fpel_fn avg, 64, mmsize, mmsize*2, mmsize*3, 1
613 fpel_fn put, 32, strideq, strideq*2, stride3q, 4
614 fpel_fn put, 64, mmsize, strideq, strideq+mmsize, 2
615 %if HAVE_AVX2_EXTERNAL
617 fpel_fn avg, 32, strideq, strideq*2, stride3q, 4
618 fpel_fn avg, 64, mmsize, strideq, strideq+mmsize, 2