1 ;******************************************************************************
2 ;* VP9 SIMD optimizations
4 ;* Copyright (c) 2013 Ronald S. Bultje <rsbultje gmail com>
6 ;* This file is part of FFmpeg.
8 ;* FFmpeg is free software; you can redistribute it and/or
9 ;* modify it under the terms of the GNU Lesser General Public
10 ;* License as published by the Free Software Foundation; either
11 ;* version 2.1 of the License, or (at your option) any later version.
13 ;* FFmpeg is distributed in the hope that it will be useful,
14 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 ;* Lesser General Public License for more details.
18 ;* You should have received a copy of the GNU Lesser General Public
19 ;* License along with FFmpeg; if not, write to the Free Software
20 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 ;******************************************************************************
23 %include "libavutil/x86/x86util.asm"
27 ; FIXME share with vp8dsp.asm
28 pw_256: times 8 dw 256
36 ; int8_t ff_filters_ssse3[3][15][4][16]
37 const filters_ssse3 ; smooth
38 F8_TAPS -3, -1, 32, 64, 38, 1, -3, 0
39 F8_TAPS -2, -2, 29, 63, 41, 2, -3, 0
40 F8_TAPS -2, -2, 26, 63, 43, 4, -4, 0
41 F8_TAPS -2, -3, 24, 62, 46, 5, -4, 0
42 F8_TAPS -2, -3, 21, 60, 49, 7, -4, 0
43 F8_TAPS -1, -4, 18, 59, 51, 9, -4, 0
44 F8_TAPS -1, -4, 16, 57, 53, 12, -4, -1
45 F8_TAPS -1, -4, 14, 55, 55, 14, -4, -1
46 F8_TAPS -1, -4, 12, 53, 57, 16, -4, -1
47 F8_TAPS 0, -4, 9, 51, 59, 18, -4, -1
48 F8_TAPS 0, -4, 7, 49, 60, 21, -3, -2
49 F8_TAPS 0, -4, 5, 46, 62, 24, -3, -2
50 F8_TAPS 0, -4, 4, 43, 63, 26, -2, -2
51 F8_TAPS 0, -3, 2, 41, 63, 29, -2, -2
52 F8_TAPS 0, -3, 1, 38, 64, 32, -1, -3
54 F8_TAPS 0, 1, -5, 126, 8, -3, 1, 0
55 F8_TAPS -1, 3, -10, 122, 18, -6, 2, 0
56 F8_TAPS -1, 4, -13, 118, 27, -9, 3, -1
57 F8_TAPS -1, 4, -16, 112, 37, -11, 4, -1
58 F8_TAPS -1, 5, -18, 105, 48, -14, 4, -1
59 F8_TAPS -1, 5, -19, 97, 58, -16, 5, -1
60 F8_TAPS -1, 6, -19, 88, 68, -18, 5, -1
61 F8_TAPS -1, 6, -19, 78, 78, -19, 6, -1
62 F8_TAPS -1, 5, -18, 68, 88, -19, 6, -1
63 F8_TAPS -1, 5, -16, 58, 97, -19, 5, -1
64 F8_TAPS -1, 4, -14, 48, 105, -18, 5, -1
65 F8_TAPS -1, 4, -11, 37, 112, -16, 4, -1
66 F8_TAPS -1, 3, -9, 27, 118, -13, 4, -1
67 F8_TAPS 0, 2, -6, 18, 122, -10, 3, -1
68 F8_TAPS 0, 1, -3, 8, 126, -5, 1, 0
70 F8_TAPS -1, 3, -7, 127, 8, -3, 1, 0
71 F8_TAPS -2, 5, -13, 125, 17, -6, 3, -1
72 F8_TAPS -3, 7, -17, 121, 27, -10, 5, -2
73 F8_TAPS -4, 9, -20, 115, 37, -13, 6, -2
74 F8_TAPS -4, 10, -23, 108, 48, -16, 8, -3
75 F8_TAPS -4, 10, -24, 100, 59, -19, 9, -3
76 F8_TAPS -4, 11, -24, 90, 70, -21, 10, -4
77 F8_TAPS -4, 11, -23, 80, 80, -23, 11, -4
78 F8_TAPS -4, 10, -21, 70, 90, -24, 11, -4
79 F8_TAPS -3, 9, -19, 59, 100, -24, 10, -4
80 F8_TAPS -3, 8, -16, 48, 108, -23, 10, -4
81 F8_TAPS -2, 6, -13, 37, 115, -20, 9, -4
82 F8_TAPS -2, 5, -10, 27, 121, -17, 7, -3
83 F8_TAPS -1, 3, -6, 17, 125, -13, 5, -2
84 F8_TAPS 0, 1, -3, 8, 127, -7, 3, -1
86 pw_11585x2: times 8 dw 23170
88 %macro VP9_IDCT_COEFFS 2
89 pw_m%1_%2: dw -%1, %2, -%1, %2, -%1, %2, -%1, %2
90 pw_%2_%1: dw %2, %1, %2, %1, %2, %1, %2, %1
93 %macro VP9_IDCT_COEFFS_ALL 2
94 pw_%1x2: times 8 dw %1*2
95 pw_%2x2: times 8 dw %2*2
96 VP9_IDCT_COEFFS %1, %2
99 VP9_IDCT_COEFFS_ALL 15137, 6270
100 VP9_IDCT_COEFFS_ALL 16069, 3196
101 VP9_IDCT_COEFFS 9102, 13623
103 pd_8192: times 4 dd 8192
104 pw_2048: times 8 dw 2048
105 pw_1024: times 8 dw 1024
113 ; (a*x + b*y + round) >> shift
114 %macro VP9_MULSUB_2W_2X 6 ; dst1, dst2, src (unchanged), round, coefs1, coefs2
123 %macro VP9_UNPACK_MULSUB_2W_4X 4 ; dst1, dst2, coef1, coef2
124 punpckhwd m6, m%2, m%1
125 VP9_MULSUB_2W_2X 4, 5, 6, 7, [pw_m%3_%4], [pw_%4_%3]
127 VP9_MULSUB_2W_2X %1, 6, %2, 7, [pw_m%3_%4], [pw_%4_%3]
133 %macro VP9_STORE_2X 2
135 movh m7, [dstq+strideq]
143 movh [dstq+strideq], m7
146 ;-------------------------------------------------------------------------------------------
147 ; void vp9_idct_idct_4x4_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
148 ;-------------------------------------------------------------------------------------------
150 %macro VP9_IDCT4_1D_FINALIZE 0
151 SUMSUB_BA w, 3, 2, 4 ; m3=t3+t0, m2=-t3+t0
152 SUMSUB_BA w, 1, 0, 4 ; m1=t2+t1, m0=-t2+t1
153 SWAP 0, 3 ; 3102 -> 0132
154 SWAP 3, 2 ; 0132 -> 0123
157 %macro VP9_IDCT4_1D 0
158 SUMSUB_BA w, 2, 0, 4 ; m2=IN(0)+IN(2) m0=IN(0)-IN(2)
159 mova m4, [pw_11585x2]
160 pmulhrsw m2, m4 ; m2=t0
161 pmulhrsw m0, m4 ; m0=t1
162 VP9_UNPACK_MULSUB_2W_4X 1, 3, 15137, 6270 ; m1=t2, m3=t3
163 VP9_IDCT4_1D_FINALIZE
166 ; 2x2 top left corner
167 %macro VP9_IDCT4_2x2_1D 0
168 pmulhrsw m0, m5 ; m0=t1
171 pmulhrsw m1, m6 ; m1=t2
172 pmulhrsw m3, m7 ; m3=t3
173 VP9_IDCT4_1D_FINALIZE
176 %macro VP9_IDCT4_WRITEOUT 0
178 pmulhrsw m0, m5 ; (x*2048 + (1<<14))>>15 <=> (x+8)>>4
181 lea dstq, [dstq+2*strideq]
188 cglobal vp9_idct_idct_4x4_add, 4,4,0, dst, stride, block, eob
190 cmp eobd, 4 ; 2x2 or smaller
193 cmp eobd, 1 ; faster path for when only DC is set
197 mova m5, [pw_11585x2]
204 pmulhrsw m0, m5 ; (x*2048 + (1<<14))>>15 <=> (x+8)>>4
206 lea dstq, [dstq+2*strideq]
210 ; faster path for when only top left 2x2 block is set
214 mova m5, [pw_11585x2]
216 mova m7, [pw_15137x2]
218 TRANSPOSE4x4W 0, 1, 2, 3, 4
220 pxor m4, m4 ; used for the block reset, and VP9_STORE_2X
226 .idctfull: ; generic full 4x4 idct/idct
231 mova m7, [pd_8192] ; rounding
233 TRANSPOSE4x4W 0, 1, 2, 3, 4
235 pxor m4, m4 ; used for the block reset, and VP9_STORE_2X
243 ;-------------------------------------------------------------------------------------------
244 ; void vp9_idct_idct_8x8_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
245 ;-------------------------------------------------------------------------------------------
247 %if ARCH_X86_64 ; TODO: 32-bit? (32-bit limited to 8 xmm reg, we use 13 here)
248 %macro VP9_IDCT8_1D_FINALIZE 0
249 SUMSUB_BA w, 3, 10, 4 ; m3=t0+t7, m10=t0-t7
250 SUMSUB_BA w, 1, 2, 4 ; m1=t1+t6, m2=t1-t6
251 SUMSUB_BA w, 11, 0, 4 ; m11=t2+t5, m0=t2-t5
252 SUMSUB_BA w, 9, 8, 4 ; m9=t3+t4, m8=t3-t4
259 %macro VP9_IDCT8_1D 0
260 SUMSUB_BA w, 8, 0, 4 ; m8=IN(0)+IN(4) m0=IN(0)-IN(4)
261 pmulhrsw m8, m12 ; m8=t0a
262 pmulhrsw m0, m12 ; m0=t1a
263 VP9_UNPACK_MULSUB_2W_4X 2, 10, 15137, 6270 ; m2=t2a, m10=t3a
264 VP9_UNPACK_MULSUB_2W_4X 1, 11, 16069, 3196 ; m1=t4a, m11=t7a
265 VP9_UNPACK_MULSUB_2W_4X 9, 3, 9102, 13623 ; m9=t5a, m3=t6a
266 SUMSUB_BA w, 10, 8, 4 ; m10=t0a+t3a (t0), m8=t0a-t3a (t3)
267 SUMSUB_BA w, 2, 0, 4 ; m2=t1a+t2a (t1), m0=t1a-t2a (t2)
268 SUMSUB_BA w, 9, 1, 4 ; m9=t4a+t5a (t4), m1=t4a-t5a (t5a)
269 SUMSUB_BA w, 3, 11, 4 ; m3=t7a+t6a (t7), m11=t7a-t6a (t6a)
270 SUMSUB_BA w, 1, 11, 4 ; m1=t6a+t5a (t6), m11=t6a-t5a (t5)
271 pmulhrsw m1, m12 ; m1=t6
272 pmulhrsw m11, m12 ; m11=t5
273 VP9_IDCT8_1D_FINALIZE
276 ; TODO: a lot of t* copies can probably be removed and merged with
277 ; following SUMSUBs from VP9_IDCT8_1D_FINALIZE with AVX
278 %macro VP9_IDCT8_2x2_1D 0
279 pmulhrsw m0, m12 ; m0=t0
281 pmulhrsw m1, m6 ; m1=t4
282 pmulhrsw m3, m7 ; m3=t7
284 mova m10, m0 ; m10=t2
286 mova m11, m3 ; t5 = t7a ...
287 mova m9, m3 ; t6 = t7a ...
288 psubw m11, m1 ; t5 = t7a - t4a
289 paddw m9, m1 ; t6 = t7a + t4a
290 pmulhrsw m11, m12 ; m11=t5
291 pmulhrsw m9, m12 ; m9=t6
294 VP9_IDCT8_1D_FINALIZE
297 %macro VP9_IDCT8_WRITEOUT 0
299 pmulhrsw m0, m5 ; (x*1024 + (1<<14))>>15 <=> (x+16)>>5
302 lea dstq, [dstq+2*strideq]
306 lea dstq, [dstq+2*strideq]
310 lea dstq, [dstq+2*strideq]
313 VP9_STORE_2X m10, m11
317 cglobal vp9_idct_idct_8x8_add, 4,4,13, dst, stride, block, eob
319 mova m12, [pw_11585x2] ; often used
321 cmp eobd, 3 ; top left corner or less
324 cmp eobd, 1 ; faster path for when only DC is set
325 jne .idcttopleftcorner
334 pmulhrsw m0, m5 ; (x*1024 + (1<<14))>>15 <=> (x+16)>>5
336 lea dstq, [dstq+2*strideq]
338 lea dstq, [dstq+2*strideq]
340 lea dstq, [dstq+2*strideq]
344 ; faster path for when only left corner is set (3 input: DC, right to DC, below
345 ; to DC). Note: also working with a 2x2 block
350 mova m7, [pw_16069x2]
352 TRANSPOSE8x8W 0, 1, 2, 3, 8, 9, 10, 11, 4
354 pxor m4, m4 ; used for the block reset, and VP9_STORE_2X
360 .idctfull: ; generic full 8x8 idct/idct
361 mova m0, [blockq+ 0] ; IN(0)
362 mova m1, [blockq+ 16] ; IN(1)
363 mova m2, [blockq+ 32] ; IN(2)
364 mova m3, [blockq+ 48] ; IN(3)
365 mova m8, [blockq+ 64] ; IN(4)
366 mova m9, [blockq+ 80] ; IN(5)
367 mova m10, [blockq+ 96] ; IN(6)
368 mova m11, [blockq+112] ; IN(7)
369 mova m7, [pd_8192] ; rounding
371 TRANSPOSE8x8W 0, 1, 2, 3, 8, 9, 10, 11, 4
373 pxor m4, m4 ; used for the block reset, and VP9_STORE_2X
375 mova [blockq+ 16], m4
376 mova [blockq+ 32], m4
377 mova [blockq+ 48], m4
378 mova [blockq+ 64], m4
379 mova [blockq+ 80], m4
380 mova [blockq+ 96], m4
381 mova [blockq+112], m4
388 %assign %%px mmsize/2
389 cglobal %1_8tap_1d_h_ %+ %%px, 6, 6, 11, dst, dstride, src, sstride, h, filtery
391 mova m7, [filteryq+ 0]
392 %if ARCH_X86_64 && mmsize > 8
393 mova m8, [filteryq+16]
394 mova m9, [filteryq+32]
395 mova m10, [filteryq+48]
412 %if ARCH_X86_64 && mmsize > 8
417 pmaddubsw m2, [filteryq+16]
418 pmaddubsw m4, [filteryq+32]
419 pmaddubsw m1, [filteryq+48]
448 %assign %%px mmsize/2
450 cglobal %1_8tap_1d_v_ %+ %%px, 6, 8, 11, dst, dstride, src, sstride, h, filtery, src4, sstride3
452 cglobal %1_8tap_1d_v_ %+ %%px, 4, 7, 11, dst, dstride, src, sstride, filtery, src4, sstride3
457 lea sstride3q, [sstrideq*3]
461 mova m7, [filteryq+ 0]
462 lea src4q, [srcq+sstrideq*4]
463 %if ARCH_X86_64 && mmsize > 8
464 mova m8, [filteryq+16]
465 mova m9, [filteryq+32]
466 mova m10, [filteryq+48]
469 ; FIXME maybe reuse loads from previous rows, or just
470 ; more generally unroll this to prevent multiple loads of
473 movh m1, [srcq+sstrideq]
474 movh m2, [srcq+sstrideq*2]
475 movh m3, [srcq+sstride3q]
477 movh m5, [src4q+sstrideq]
480 movh m1, [src4q+sstrideq*2]
481 movh m3, [src4q+sstride3q]
487 %if ARCH_X86_64 && mmsize > 8
492 pmaddubsw m2, [filteryq+16]
493 pmaddubsw m4, [filteryq+32]
494 pmaddubsw m1, [filteryq+48]
532 cglobal %1%2, 5, 7, 4, dst, dstride, src, sstride, h, dstride3, sstride3
533 lea sstride3q, [sstrideq*3]
534 lea dstride3q, [dstrideq*3]
536 cglobal %1%2, 5, 5, 4, dst, dstride, src, sstride, h
540 %%srcfn m1, [srcq+s%3]
541 %%srcfn m2, [srcq+s%4]
542 %%srcfn m3, [srcq+s%5]
543 lea srcq, [srcq+sstrideq*%6]
551 %%dstfn [dstq+d%3], m1
552 %%dstfn [dstq+d%4], m2
553 %%dstfn [dstq+d%5], m3
554 lea dstq, [dstq+dstrideq*%6]
563 fpel_fn put, 4, strideq, strideq*2, stride3q, 4
564 fpel_fn put, 8, strideq, strideq*2, stride3q, 4
566 fpel_fn avg, 4, strideq, strideq*2, stride3q, 4
567 fpel_fn avg, 8, strideq, strideq*2, stride3q, 4
569 fpel_fn put, 16, strideq, strideq*2, stride3q, 4
570 fpel_fn put, 32, mmsize, strideq, strideq+mmsize, 2
571 fpel_fn put, 64, mmsize, mmsize*2, mmsize*3, 1
573 fpel_fn avg, 16, strideq, strideq*2, stride3q, 4
574 fpel_fn avg, 32, mmsize, strideq, strideq+mmsize, 2
575 fpel_fn avg, 64, mmsize, mmsize*2, mmsize*3, 1