1 ;******************************************************************************
2 ;* VP9 MC SIMD optimizations
4 ;* Copyright (c) 2013 Ronald S. Bultje <rsbultje gmail com>
6 ;* This file is part of FFmpeg.
8 ;* FFmpeg is free software; you can redistribute it and/or
9 ;* modify it under the terms of the GNU Lesser General Public
10 ;* License as published by the Free Software Foundation; either
11 ;* version 2.1 of the License, or (at your option) any later version.
13 ;* FFmpeg is distributed in the hope that it will be useful,
14 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 ;* Lesser General Public License for more details.
18 ;* You should have received a copy of the GNU Lesser General Public
19 ;* License along with FFmpeg; if not, write to the Free Software
20 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 ;******************************************************************************
23 %include "libavutil/x86/x86util.asm"
27 ; FIXME share with vp8dsp.asm
28 pw_256: times 8 dw 256
36 ; int8_t ff_filters_ssse3[3][15][4][16]
37 const filters_ssse3 ; smooth
38 F8_TAPS -3, -1, 32, 64, 38, 1, -3, 0
39 F8_TAPS -2, -2, 29, 63, 41, 2, -3, 0
40 F8_TAPS -2, -2, 26, 63, 43, 4, -4, 0
41 F8_TAPS -2, -3, 24, 62, 46, 5, -4, 0
42 F8_TAPS -2, -3, 21, 60, 49, 7, -4, 0
43 F8_TAPS -1, -4, 18, 59, 51, 9, -4, 0
44 F8_TAPS -1, -4, 16, 57, 53, 12, -4, -1
45 F8_TAPS -1, -4, 14, 55, 55, 14, -4, -1
46 F8_TAPS -1, -4, 12, 53, 57, 16, -4, -1
47 F8_TAPS 0, -4, 9, 51, 59, 18, -4, -1
48 F8_TAPS 0, -4, 7, 49, 60, 21, -3, -2
49 F8_TAPS 0, -4, 5, 46, 62, 24, -3, -2
50 F8_TAPS 0, -4, 4, 43, 63, 26, -2, -2
51 F8_TAPS 0, -3, 2, 41, 63, 29, -2, -2
52 F8_TAPS 0, -3, 1, 38, 64, 32, -1, -3
54 F8_TAPS 0, 1, -5, 126, 8, -3, 1, 0
55 F8_TAPS -1, 3, -10, 122, 18, -6, 2, 0
56 F8_TAPS -1, 4, -13, 118, 27, -9, 3, -1
57 F8_TAPS -1, 4, -16, 112, 37, -11, 4, -1
58 F8_TAPS -1, 5, -18, 105, 48, -14, 4, -1
59 F8_TAPS -1, 5, -19, 97, 58, -16, 5, -1
60 F8_TAPS -1, 6, -19, 88, 68, -18, 5, -1
61 F8_TAPS -1, 6, -19, 78, 78, -19, 6, -1
62 F8_TAPS -1, 5, -18, 68, 88, -19, 6, -1
63 F8_TAPS -1, 5, -16, 58, 97, -19, 5, -1
64 F8_TAPS -1, 4, -14, 48, 105, -18, 5, -1
65 F8_TAPS -1, 4, -11, 37, 112, -16, 4, -1
66 F8_TAPS -1, 3, -9, 27, 118, -13, 4, -1
67 F8_TAPS 0, 2, -6, 18, 122, -10, 3, -1
68 F8_TAPS 0, 1, -3, 8, 126, -5, 1, 0
70 F8_TAPS -1, 3, -7, 127, 8, -3, 1, 0
71 F8_TAPS -2, 5, -13, 125, 17, -6, 3, -1
72 F8_TAPS -3, 7, -17, 121, 27, -10, 5, -2
73 F8_TAPS -4, 9, -20, 115, 37, -13, 6, -2
74 F8_TAPS -4, 10, -23, 108, 48, -16, 8, -3
75 F8_TAPS -4, 10, -24, 100, 59, -19, 9, -3
76 F8_TAPS -4, 11, -24, 90, 70, -21, 10, -4
77 F8_TAPS -4, 11, -23, 80, 80, -23, 11, -4
78 F8_TAPS -4, 10, -21, 70, 90, -24, 11, -4
79 F8_TAPS -3, 9, -19, 59, 100, -24, 10, -4
80 F8_TAPS -3, 8, -16, 48, 108, -23, 10, -4
81 F8_TAPS -2, 6, -13, 37, 115, -20, 9, -4
82 F8_TAPS -2, 5, -10, 27, 121, -17, 7, -3
83 F8_TAPS -1, 3, -6, 17, 125, -13, 5, -2
84 F8_TAPS 0, 1, -3, 8, 127, -7, 3, -1
90 cglobal vp9_%1_8tap_1d_h_ %+ %%px, 6, 6, 11, dst, dstride, src, sstride, h, filtery
92 mova m7, [filteryq+ 0]
93 %if ARCH_X86_64 && mmsize > 8
94 mova m8, [filteryq+16]
95 mova m9, [filteryq+32]
96 mova m10, [filteryq+48]
113 %if ARCH_X86_64 && mmsize > 8
118 pmaddubsw m2, [filteryq+16]
119 pmaddubsw m4, [filteryq+32]
120 pmaddubsw m1, [filteryq+48]
149 %macro filter_hx2_fn 1
151 cglobal vp9_%1_8tap_1d_h_ %+ %%px, 6, 6, 14, dst, dstride, src, sstride, h, filtery
153 mova m8, [filteryq+ 0]
154 mova m9, [filteryq+16]
155 mova m10, [filteryq+32]
156 mova m11, [filteryq+48]
167 SBUTTERFLY bw, 0, 1, 12
168 SBUTTERFLY bw, 2, 3, 12
169 SBUTTERFLY bw, 4, 5, 12
170 SBUTTERFLY bw, 6, 7, 12
205 %assign %%px mmsize/2
207 cglobal vp9_%1_8tap_1d_v_ %+ %%px, 6, 8, 11, dst, dstride, src, sstride, h, filtery, src4, sstride3
209 cglobal vp9_%1_8tap_1d_v_ %+ %%px, 4, 7, 11, dst, dstride, src, sstride, filtery, src4, sstride3
214 lea sstride3q, [sstrideq*3]
215 lea src4q, [srcq+sstrideq]
217 mova m7, [filteryq+ 0]
218 %if ARCH_X86_64 && mmsize > 8
219 mova m8, [filteryq+16]
220 mova m9, [filteryq+32]
221 mova m10, [filteryq+48]
224 ; FIXME maybe reuse loads from previous rows, or just
225 ; more generally unroll this to prevent multiple loads of
228 movh m1, [srcq+sstrideq]
229 movh m2, [srcq+sstrideq*2]
230 movh m3, [srcq+sstride3q]
232 movh m5, [src4q+sstrideq]
235 movh m1, [src4q+sstrideq*2]
236 movh m3, [src4q+sstride3q]
242 %if ARCH_X86_64 && mmsize > 8
247 pmaddubsw m2, [filteryq+16]
248 pmaddubsw m4, [filteryq+32]
249 pmaddubsw m1, [filteryq+48]
279 %macro filter_vx2_fn 1
281 cglobal vp9_%1_8tap_1d_v_ %+ %%px, 6, 8, 14, dst, dstride, src, sstride, h, filtery, src4, sstride3
283 lea sstride3q, [sstrideq*3]
284 lea src4q, [srcq+sstrideq]
286 mova m8, [filteryq+ 0]
287 mova m9, [filteryq+16]
288 mova m10, [filteryq+32]
289 mova m11, [filteryq+48]
291 ; FIXME maybe reuse loads from previous rows, or just
292 ; more generally unroll this to prevent multiple loads of
295 movu m1, [srcq+sstrideq]
296 movu m2, [srcq+sstrideq*2]
297 movu m3, [srcq+sstride3q]
299 movu m5, [src4q+sstrideq]
300 movu m6, [src4q+sstrideq*2]
301 movu m7, [src4q+sstride3q]
304 SBUTTERFLY bw, 0, 1, 12
305 SBUTTERFLY bw, 2, 3, 12
306 SBUTTERFLY bw, 4, 5, 12
307 SBUTTERFLY bw, 6, 7, 12
351 cglobal vp9_%1%2, 5, 7, 4, dst, dstride, src, sstride, h, dstride3, sstride3
352 lea sstride3q, [sstrideq*3]
353 lea dstride3q, [dstrideq*3]
355 cglobal vp9_%1%2, 5, 5, 4, dst, dstride, src, sstride, h
359 %%srcfn m1, [srcq+s%3]
360 %%srcfn m2, [srcq+s%4]
361 %%srcfn m3, [srcq+s%5]
362 lea srcq, [srcq+sstrideq*%6]
370 %%dstfn [dstq+d%3], m1
371 %%dstfn [dstq+d%4], m2
372 %%dstfn [dstq+d%5], m3
373 lea dstq, [dstq+dstrideq*%6]
382 fpel_fn put, 4, strideq, strideq*2, stride3q, 4
383 fpel_fn put, 8, strideq, strideq*2, stride3q, 4
385 fpel_fn avg, 4, strideq, strideq*2, stride3q, 4
386 fpel_fn avg, 8, strideq, strideq*2, stride3q, 4
388 fpel_fn put, 16, strideq, strideq*2, stride3q, 4
389 fpel_fn put, 32, mmsize, strideq, strideq+mmsize, 2
390 fpel_fn put, 64, mmsize, mmsize*2, mmsize*3, 1
392 fpel_fn avg, 16, strideq, strideq*2, stride3q, 4
393 fpel_fn avg, 32, mmsize, strideq, strideq+mmsize, 2
394 fpel_fn avg, 64, mmsize, mmsize*2, mmsize*3, 1