1 ;******************************************************************************
2 ;* VP9 IDCT SIMD optimizations
4 ;* Copyright (C) 2013 Clément Bœsch <u pkh me>
5 ;* Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
7 ;* This file is part of FFmpeg.
9 ;* FFmpeg is free software; you can redistribute it and/or
10 ;* modify it under the terms of the GNU Lesser General Public
11 ;* License as published by the Free Software Foundation; either
12 ;* version 2.1 of the License, or (at your option) any later version.
14 ;* FFmpeg is distributed in the hope that it will be useful,
15 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
16 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 ;* Lesser General Public License for more details.
19 ;* You should have received a copy of the GNU Lesser General Public
20 ;* License along with FFmpeg; if not, write to the Free Software
21 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 ;******************************************************************************
24 %include "libavutil/x86/x86util.asm"
28 pw_11585x2: times 8 dw 23170
29 pw_m11585x2: times 8 dw -23170
31 %macro VP9_IDCT_COEFFS 2-3 0
32 pw_%1x2: times 8 dw %1*2
33 pw_m%1x2: times 8 dw -%1*2
34 pw_%2x2: times 8 dw %2*2
35 pw_m%2x2: times 8 dw -%2*2
36 pw_m%1_%2: times 4 dw -%1, %2
37 pw_%2_%1: times 4 dw %2, %1
38 pw_m%2_m%1: times 4 dw -%2, -%1
40 pw_m%2_%1: times 4 dw -%2, %1
41 pw_%1_%2: times 4 dw %1, %2
45 VP9_IDCT_COEFFS 15137, 6270, 1
46 VP9_IDCT_COEFFS 16069, 3196, 1
47 VP9_IDCT_COEFFS 9102, 13623, 1
48 VP9_IDCT_COEFFS 16305, 1606
49 VP9_IDCT_COEFFS 10394, 12665
50 VP9_IDCT_COEFFS 14449, 7723
51 VP9_IDCT_COEFFS 4756, 15679
52 VP9_IDCT_COEFFS 16364, 804
53 VP9_IDCT_COEFFS 11003, 12140
54 VP9_IDCT_COEFFS 14811, 7005
55 VP9_IDCT_COEFFS 5520, 15426
56 VP9_IDCT_COEFFS 15893, 3981
57 VP9_IDCT_COEFFS 8423, 14053
58 VP9_IDCT_COEFFS 13160, 9760
59 VP9_IDCT_COEFFS 2404, 16207
61 pw_5283_13377: times 4 dw 5283, 13377
62 pw_9929_13377: times 4 dw 9929, 13377
63 pw_15212_m13377: times 4 dw 15212, -13377
64 pw_15212_9929: times 4 dw 15212, 9929
65 pw_m5283_m15212: times 4 dw -5283, -15212
66 pw_13377x2: times 8 dw 13377*2
68 pd_8192: times 4 dd 8192
69 pw_2048: times 8 dw 2048
70 pw_1024: times 8 dw 1024
71 pw_512: times 8 dw 512
76 ; (a*x + b*y + round) >> shift
77 %macro VP9_MULSUB_2W_2X 5 ; dst1, dst2/src, round, coefs1, coefs2
86 %macro VP9_MULSUB_2W_4X 7 ; dst1, dst2, coef1, coef2, rnd, tmp1/src, tmp2
87 VP9_MULSUB_2W_2X %7, %6, %5, [pw_m%3_%4], [pw_%4_%3]
88 VP9_MULSUB_2W_2X %1, %2, %5, [pw_m%3_%4], [pw_%4_%3]
93 %macro VP9_UNPACK_MULSUB_2W_4X 7-9 ; dst1, dst2, (src1, src2,) coef1, coef2, rnd, tmp1, tmp2
95 punpckhwd m%6, m%2, m%1
97 VP9_MULSUB_2W_4X %1, %2, %3, %4, %5, %6, %7
99 punpckhwd m%8, m%4, m%3
100 punpcklwd m%2, m%4, m%3
101 VP9_MULSUB_2W_4X %1, %2, %5, %6, %7, %8, %9
105 %macro VP9_UNPACK_MULSUB_2D_4X 6 ; dst1 [src1], dst2 [src2], dst3, dst4, mul1, mul2
106 punpckhwd m%4, m%2, m%1
108 pmaddwd m%3, m%4, [pw_m%5_%6]
109 pmaddwd m%4, [pw_%6_%5]
110 pmaddwd m%1, m%2, [pw_m%5_%6]
111 pmaddwd m%2, [pw_%6_%5]
114 %macro VP9_RND_SH_SUMSUB_BA 6 ; dst1 [src1], dst2 [src2], src3, src4, tmp, round
115 SUMSUB_BA d, %1, %2, %5
116 SUMSUB_BA d, %3, %4, %5
129 %macro VP9_STORE_2X 5-6 dstq ; reg1, reg2, tmp1, tmp2, zero, dst
131 movh m%4, [%6+strideq]
139 movh [%6+strideq], m%4
142 %macro ZERO_BLOCK 4 ; mem, stride, nnzcpl, zero_reg
147 mova [%1+%%y+%%x], %4
148 %assign %%x (%%x+mmsize)
154 ;-------------------------------------------------------------------------------------------
155 ; void vp9_iwht_iwht_4x4_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
156 ;-------------------------------------------------------------------------------------------
158 %macro VP9_IWHT4_1D 0
174 cglobal vp9_iwht_iwht_4x4_add, 3, 3, 0, dst, stride, block, eob
175 mova m0, [blockq+0*8]
176 mova m1, [blockq+1*8]
177 mova m2, [blockq+2*8]
178 mova m3, [blockq+3*8]
185 TRANSPOSE4x4W 0, 1, 2, 3, 4
189 VP9_STORE_2X 0, 1, 5, 6, 4
190 lea dstq, [dstq+strideq*2]
191 VP9_STORE_2X 2, 3, 5, 6, 4
192 ZERO_BLOCK blockq, 8, 4, m4
195 ;-------------------------------------------------------------------------------------------
196 ; void vp9_idct_idct_4x4_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
197 ;-------------------------------------------------------------------------------------------
199 %macro VP9_IDCT4_1D_FINALIZE 0
200 SUMSUB_BA w, 3, 2, 4 ; m3=t3+t0, m2=-t3+t0
201 SUMSUB_BA w, 1, 0, 4 ; m1=t2+t1, m0=-t2+t1
202 SWAP 0, 3, 2 ; 3102 -> 0123
205 %macro VP9_IDCT4_1D 0
206 SUMSUB_BA w, 2, 0, 4 ; m2=IN(0)+IN(2) m0=IN(0)-IN(2)
207 pmulhrsw m2, m6 ; m2=t0
208 pmulhrsw m0, m6 ; m0=t1
209 VP9_UNPACK_MULSUB_2W_4X 1, 3, 15137, 6270, m7, 4, 5 ; m1=t2, m3=t3
210 VP9_IDCT4_1D_FINALIZE
213 ; 2x2 top left corner
214 %macro VP9_IDCT4_2x2_1D 0
215 pmulhrsw m0, m5 ; m0=t1
218 pmulhrsw m1, m6 ; m1=t2
219 pmulhrsw m3, m7 ; m3=t3
220 VP9_IDCT4_1D_FINALIZE
223 %macro VP9_IDCT4_WRITEOUT 0
225 pmulhrsw m0, m5 ; (x*2048 + (1<<14))>>15 <=> (x+8)>>4
227 VP9_STORE_2X 0, 1, 6, 7, 4
228 lea dstq, [dstq+2*strideq]
231 VP9_STORE_2X 2, 3, 6, 7, 4
235 cglobal vp9_idct_idct_4x4_add, 4,4,0, dst, stride, block, eob
237 cmp eobd, 4 ; 2x2 or smaller
240 cmp eobd, 1 ; faster path for when only DC is set
244 mova m5, [pw_11585x2]
250 pmulhrsw m0, [pw_2048] ; (x*2048 + (1<<14))>>15 <=> (x+8)>>4
251 VP9_STORE_2X 0, 0, 6, 7, 4
252 lea dstq, [dstq+2*strideq]
253 VP9_STORE_2X 0, 0, 6, 7, 4
256 ; faster path for when only top left 2x2 block is set
260 mova m5, [pw_11585x2]
262 mova m7, [pw_15137x2]
264 TRANSPOSE4x4W 0, 1, 2, 3, 4
266 pxor m4, m4 ; used for the block reset, and VP9_STORE_2X
272 .idctfull: ; generic full 4x4 idct/idct
277 mova m6, [pw_11585x2]
278 mova m7, [pd_8192] ; rounding
280 TRANSPOSE4x4W 0, 1, 2, 3, 4
282 pxor m4, m4 ; used for the block reset, and VP9_STORE_2X
290 ;-------------------------------------------------------------------------------------------
291 ; void vp9_iadst_iadst_4x4_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
292 ;-------------------------------------------------------------------------------------------
294 %macro VP9_IADST4_1D 0
302 pmaddwd xmm1, xmm0, [pw_5283_13377]
303 pmaddwd xmm4, xmm0, [pw_9929_13377]
304 pmaddwd xmm0, [pw_15212_m13377]
305 pmaddwd xmm3, xmm2, [pw_15212_9929]
306 pmaddwd xmm2, [pw_m5283_m15212]
309 paddd xmm3, [pd_8192]
310 paddd xmm2, [pd_8192]
317 pmulhrsw m3, [pw_13377x2] ; out2
321 movdq2q m0, xmm0 ; out3
322 movdq2q m1, xmm1 ; out0
323 movdq2q m2, xmm4 ; out1
329 cglobal vp9_%1_%3_4x4_add, 3, 3, 8, dst, stride, block, eob
334 mova m6, [pw_11585x2]
335 mova m7, [pd_8192] ; rounding
337 TRANSPOSE4x4W 0, 1, 2, 3, 4
339 pxor m4, m4 ; used for the block reset, and VP9_STORE_2X
348 IADST4_FN idct, IDCT4, iadst, IADST4, ssse3
349 IADST4_FN iadst, IADST4, idct, IDCT4, ssse3
350 IADST4_FN iadst, IADST4, iadst, IADST4, ssse3
352 %if ARCH_X86_64 ; TODO: 32-bit? (32-bit limited to 8 xmm reg, we use more)
354 ;-------------------------------------------------------------------------------------------
355 ; void vp9_idct_idct_8x8_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
356 ;-------------------------------------------------------------------------------------------
358 %macro VP9_IDCT8_1D_FINALIZE 0
359 SUMSUB_BA w, 3, 10, 4 ; m3=t0+t7, m10=t0-t7
360 SUMSUB_BA w, 1, 2, 4 ; m1=t1+t6, m2=t1-t6
361 SUMSUB_BA w, 11, 0, 4 ; m11=t2+t5, m0=t2-t5
362 SUMSUB_BA w, 9, 8, 4 ; m9=t3+t4, m8=t3-t4
367 %macro VP9_IDCT8_1D 0
368 SUMSUB_BA w, 8, 0, 4 ; m8=IN(0)+IN(4) m0=IN(0)-IN(4)
369 pmulhrsw m8, m12 ; m8=t0a
370 pmulhrsw m0, m12 ; m0=t1a
371 VP9_UNPACK_MULSUB_2W_4X 2, 10, 15137, 6270, m7, 4, 5 ; m2=t2a, m10=t3a
372 VP9_UNPACK_MULSUB_2W_4X 1, 11, 16069, 3196, m7, 4, 5 ; m1=t4a, m11=t7a
373 VP9_UNPACK_MULSUB_2W_4X 9, 3, 9102, 13623, m7, 4, 5 ; m9=t5a, m3=t6a
374 SUMSUB_BA w, 10, 8, 4 ; m10=t0a+t3a (t0), m8=t0a-t3a (t3)
375 SUMSUB_BA w, 2, 0, 4 ; m2=t1a+t2a (t1), m0=t1a-t2a (t2)
376 SUMSUB_BA w, 9, 1, 4 ; m9=t4a+t5a (t4), m1=t4a-t5a (t5a)
377 SUMSUB_BA w, 3, 11, 4 ; m3=t7a+t6a (t7), m11=t7a-t6a (t6a)
378 SUMSUB_BA w, 1, 11, 4 ; m1=t6a+t5a (t6), m11=t6a-t5a (t5)
379 pmulhrsw m1, m12 ; m1=t6
380 pmulhrsw m11, m12 ; m11=t5
381 VP9_IDCT8_1D_FINALIZE
384 %macro VP9_IDCT8_4x4_1D 0
385 pmulhrsw m0, m12 ; m0=t1a/t0a
386 pmulhrsw m10, m2, [pw_15137x2] ; m10=t3a
387 pmulhrsw m2, [pw_6270x2] ; m2=t2a
388 pmulhrsw m11, m1, [pw_16069x2] ; m11=t7a
389 pmulhrsw m1, [pw_3196x2] ; m1=t4a
390 pmulhrsw m9, m3, [pw_9102x2] ; m9=-t5a
391 pmulhrsw m3, [pw_13623x2] ; m3=t6a
392 psubw m8, m0, m10 ; m8=t0a-t3a (t3)
393 paddw m10, m0 ; m10=t0a+t3a (t0)
394 SUMSUB_BA w, 2, 0, 4 ; m2=t1a+t2a (t1), m0=t1a-t2a (t2)
395 SUMSUB_BA w, 9, 1, 4 ; m1=t4a+t5a (t4), m9=t4a-t5a (t5a)
397 SUMSUB_BA w, 3, 11, 4 ; m3=t7a+t6a (t7), m11=t7a-t6a (t6a)
398 SUMSUB_BA w, 1, 11, 4 ; m1=t6a+t5a (t6), m11=t6a-t5a (t5)
399 pmulhrsw m1, m12 ; m1=t6
400 pmulhrsw m11, m12 ; m11=t5
401 VP9_IDCT8_1D_FINALIZE
404 ; TODO: a lot of t* copies can probably be removed and merged with
405 ; following SUMSUBs from VP9_IDCT8_1D_FINALIZE with AVX
406 %macro VP9_IDCT8_2x2_1D 0
407 pmulhrsw m0, m12 ; m0=t0
409 pmulhrsw m1, m6 ; m1=t4
410 pmulhrsw m3, m7 ; m3=t7
412 mova m10, m0 ; m10=t2
414 mova m11, m3 ; t5 = t7a ...
415 mova m9, m3 ; t6 = t7a ...
416 psubw m11, m1 ; t5 = t7a - t4a
417 paddw m9, m1 ; t6 = t7a + t4a
418 pmulhrsw m11, m12 ; m11=t5
419 pmulhrsw m9, m12 ; m9=t6
422 VP9_IDCT8_1D_FINALIZE
425 %macro VP9_IDCT8_WRITEOUT 0
427 pmulhrsw m0, m5 ; (x*1024 + (1<<14))>>15 <=> (x+16)>>5
429 VP9_STORE_2X 0, 1, 6, 7, 4
430 lea dstq, [dstq+2*strideq]
433 VP9_STORE_2X 2, 3, 6, 7, 4
434 lea dstq, [dstq+2*strideq]
437 VP9_STORE_2X 8, 9, 6, 7, 4
438 lea dstq, [dstq+2*strideq]
441 VP9_STORE_2X 10, 11, 6, 7, 4
444 %macro VP9_IDCT_IDCT_8x8_ADD_XMM 1
446 cglobal vp9_idct_idct_8x8_add, 4,4,13, dst, stride, block, eob
448 mova m12, [pw_11585x2] ; often used
450 cmp eobd, 12 ; top left half or less
453 cmp eobd, 3 ; top left corner or less
456 cmp eobd, 1 ; faster path for when only DC is set
457 jne .idcttopleftcorner
466 pmulhrsw m0, m5 ; (x*1024 + (1<<14))>>15 <=> (x+16)>>5
467 VP9_STORE_2X 0, 0, 6, 7, 4
468 lea dstq, [dstq+2*strideq]
469 VP9_STORE_2X 0, 0, 6, 7, 4
470 lea dstq, [dstq+2*strideq]
471 VP9_STORE_2X 0, 0, 6, 7, 4
472 lea dstq, [dstq+2*strideq]
473 VP9_STORE_2X 0, 0, 6, 7, 4
476 ; faster path for when only left corner is set (3 input: DC, right to DC, below
477 ; to DC). Note: also working with a 2x2 block
482 mova m7, [pw_16069x2]
484 TRANSPOSE8x8W 0, 1, 2, 3, 8, 9, 10, 11, 4
486 pxor m4, m4 ; used for the block reset, and VP9_STORE_2X
493 movh m0, [blockq + 0]
494 movh m1, [blockq +16]
495 movh m2, [blockq +32]
496 movh m3, [blockq +48]
498 TRANSPOSE8x8W 0, 1, 2, 3, 8, 9, 10, 11, 4
508 .idctfull: ; generic full 8x8 idct/idct
509 mova m0, [blockq+ 0] ; IN(0)
510 mova m1, [blockq+ 16] ; IN(1)
511 mova m2, [blockq+ 32] ; IN(2)
512 mova m3, [blockq+ 48] ; IN(3)
513 mova m8, [blockq+ 64] ; IN(4)
514 mova m9, [blockq+ 80] ; IN(5)
515 mova m10, [blockq+ 96] ; IN(6)
516 mova m11, [blockq+112] ; IN(7)
517 mova m7, [pd_8192] ; rounding
519 TRANSPOSE8x8W 0, 1, 2, 3, 8, 9, 10, 11, 4
522 pxor m4, m4 ; used for the block reset, and VP9_STORE_2X
523 ZERO_BLOCK blockq, 16, 8, m4
528 VP9_IDCT_IDCT_8x8_ADD_XMM ssse3
529 VP9_IDCT_IDCT_8x8_ADD_XMM avx
531 ;---------------------------------------------------------------------------------------------
532 ; void vp9_iadst_iadst_8x8_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
533 ;---------------------------------------------------------------------------------------------
535 %macro VP9_IADST8_1D 0 ; input/output=m0/1/2/3/8/9/10/11
536 VP9_UNPACK_MULSUB_2D_4X 11, 0, 4, 5, 16305, 1606 ; m11/4=t1[d], m0/5=t0[d]
537 VP9_UNPACK_MULSUB_2D_4X 3, 8, 6, 13, 10394, 12665 ; m3/6=t5[d], m8/13=t4[d]
538 VP9_RND_SH_SUMSUB_BA 8, 0, 13, 5, 14, m7 ; m8=t0[w], m0=t4[w]
539 VP9_RND_SH_SUMSUB_BA 3, 11, 6, 4, 14, m7 ; m3=t1[w], m11=t5[w]
541 VP9_UNPACK_MULSUB_2D_4X 9, 2, 4, 5, 14449, 7723 ; m9/4=t3[d], m2/5=t2[d]
542 VP9_UNPACK_MULSUB_2D_4X 1, 10, 6, 13, 4756, 15679 ; m1/6=t7[d], m10/13=t6[d]
543 VP9_RND_SH_SUMSUB_BA 10, 2, 13, 5, 14, m7 ; m10=t2[w], m2=t6[w]
544 VP9_RND_SH_SUMSUB_BA 1, 9, 6, 4, 14, m7 ; m1=t3[w], m9=t7[w]
546 ; m8=t0, m3=t1, m10=t2, m1=t3, m0=t4, m11=t5, m2=t6, m9=t7
548 VP9_UNPACK_MULSUB_2D_4X 0, 11, 4, 5, 15137, 6270 ; m0/4=t5[d], m11/5=t4[d]
549 VP9_UNPACK_MULSUB_2D_4X 9, 2, 6, 13, 6270, 15137 ; m9/6=t6[d], m2/13=t7[d]
550 VP9_RND_SH_SUMSUB_BA 9, 11, 6, 5, 14, m7
551 psignw m9, [pw_m1] ; m9=out1[w], m11=t6[w]
552 VP9_RND_SH_SUMSUB_BA 2, 0, 13, 4, 14, m7 ; m2=out6[w], m0=t7[w]
554 SUMSUB_BA w, 10, 8, 14 ; m10=out0[w], m8=t2[w]
555 SUMSUB_BA w, 1, 3, 14
556 psignw m1, [pw_m1] ; m1=out7[w], m3=t3[w]
558 ; m10=out0, m9=out1, m8=t2, m3=t3, m11=t6, m0=t7, m2=out6, m1=out7
561 SUMSUB_BA w, 0, 11, 5
564 pmulhrsw m8, m12 ; out4
565 pmulhrsw m0, m12 ; out2
566 psignw m3, [pw_m1] ; out3
567 psignw m11, [pw_m1] ; out5
569 ; m10=out0, m9=out1, m0=out2, m3=out3, m8=out4, m11=out5, m2=out6, m1=out7
577 cglobal vp9_%1_%3_8x8_add, 3, 3, 15, dst, stride, block, eob
578 mova m0, [blockq+ 0] ; IN(0)
579 mova m1, [blockq+ 16] ; IN(1)
580 mova m2, [blockq+ 32] ; IN(2)
581 mova m3, [blockq+ 48] ; IN(3)
582 mova m8, [blockq+ 64] ; IN(4)
583 mova m9, [blockq+ 80] ; IN(5)
584 mova m10, [blockq+ 96] ; IN(6)
585 mova m11, [blockq+112] ; IN(7)
587 mova m12, [pw_11585x2] ; often used
588 mova m7, [pd_8192] ; rounding
590 TRANSPOSE8x8W 0, 1, 2, 3, 8, 9, 10, 11, 4
593 pxor m4, m4 ; used for the block reset, and VP9_STORE_2X
594 ZERO_BLOCK blockq, 16, 8, m4
599 IADST8_FN idct, IDCT8, iadst, IADST8, ssse3
600 IADST8_FN idct, IDCT8, iadst, IADST8, avx
601 IADST8_FN iadst, IADST8, idct, IDCT8, ssse3
602 IADST8_FN iadst, IADST8, idct, IDCT8, avx
603 IADST8_FN iadst, IADST8, iadst, IADST8, ssse3
604 IADST8_FN iadst, IADST8, iadst, IADST8, avx
606 ;---------------------------------------------------------------------------------------------
607 ; void vp9_idct_idct_16x16_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
608 ;---------------------------------------------------------------------------------------------
610 ; at the end of this macro, m7 is stored in stack_scratch
611 ; everything else (t0-6 and t8-15) is stored in m0-6 and m8-15
612 ; the following sumsubs have not been done yet:
613 ; SUMSUB_BA w, 6, 9, 15 ; t6, t9
614 ; SUMSUB_BA w, 7, 8, 15 ; t7, t8
615 %macro VP9_IDCT16_1D_START 4 ; src, nnzc, stride, stack_scratch
617 mova m3, [%1+ 1*%3] ; IN(1)
618 mova m12, [%1+ 2*%3] ; IN(2)
619 mova m0, [%1+ 3*%3] ; IN(3)
621 pmulhrsw m15, m12, [pw_16069x2] ; t6-7
622 pmulhrsw m12, [pw_3196x2] ; t4-5
623 pmulhrsw m4, m3, [pw_16305x2] ; t14-15
624 pmulhrsw m3, [pw_1606x2] ; t8-9
625 pmulhrsw m7, m0, [pw_m4756x2] ; t10-11
626 pmulhrsw m0, [pw_15679x2] ; t12-13
628 ; m8=t0, m9=t1, m10=t2, m11=t3, m12=t4, m14=t5, m13=t6, m15=t7
629 ; m3=t8, m5=t9, m1=t10, m7=t11, m0=t12, m6=t13, m2=t14, m4=t15
633 pmulhrsw m13, [pw_11585x2] ; t5
634 pmulhrsw m14, [pw_11585x2] ; t6
636 VP9_UNPACK_MULSUB_2W_4X 2, 5, 4, 3, 15137, 6270, [pd_8192], 10, 11 ; t9, t14
637 VP9_UNPACK_MULSUB_2W_4X 6, 1, 0, 7, 6270, m15137, [pd_8192], 10, 11 ; t10, t13
639 ; m15=t0, m14=t1, m13=t2, m12=t3, m11=t4, m10=t5, m9=t6, m8=t7
640 ; m7=t8, m6=t9, m2=t10, m3=t11, m4=t12, m5=t13, m1=t14, m0=t15
642 mova m5, [%1+ 1*%3] ; IN(1)
643 mova m14, [%1+ 2*%3] ; IN(2)
644 mova m6, [%1+ 3*%3] ; IN(3)
645 mova m9, [%1+ 4*%3] ; IN(4)
646 mova m7, [%1+ 5*%3] ; IN(5)
647 mova m15, [%1+ 6*%3] ; IN(6)
648 mova m4, [%1+ 7*%3] ; IN(7)
650 pmulhrsw m8, m9, [pw_15137x2] ; t3
651 pmulhrsw m9, [pw_6270x2] ; t2
652 pmulhrsw m13, m14, [pw_16069x2] ; t7
653 pmulhrsw m14, [pw_3196x2] ; t4
654 pmulhrsw m12, m15, [pw_m9102x2] ; t5
655 pmulhrsw m15, [pw_13623x2] ; t6
656 pmulhrsw m2, m5, [pw_16305x2] ; t15
657 pmulhrsw m5, [pw_1606x2] ; t8
658 pmulhrsw m3, m4, [pw_m10394x2] ; t9
659 pmulhrsw m4, [pw_12665x2] ; t14
660 pmulhrsw m0, m7, [pw_14449x2] ; t13
661 pmulhrsw m7, [pw_7723x2] ; t10
662 pmulhrsw m1, m6, [pw_m4756x2] ; t11
663 pmulhrsw m6, [pw_15679x2] ; t12
665 mova m3, [%1+ 9*%3] ; IN(9)
666 mova m12, [%1+10*%3] ; IN(10)
667 mova m0, [%1+11*%3] ; IN(11)
668 mova m8, [%1+12*%3] ; IN(12)
669 mova m1, [%1+13*%3] ; IN(13)
670 mova m13, [%1+14*%3] ; IN(14)
671 mova m2, [%1+15*%3] ; IN(15)
673 ; m10=in0, m5=in1, m14=in2, m6=in3, m9=in4, m7=in5, m15=in6, m4=in7
674 ; m11=in8, m3=in9, m12=in10 m0=in11, m8=in12, m1=in13, m13=in14, m2=in15
676 VP9_UNPACK_MULSUB_2W_4X 9, 8, 15137, 6270, [pd_8192], 10, 11 ; t2, t3
677 VP9_UNPACK_MULSUB_2W_4X 14, 13, 16069, 3196, [pd_8192], 10, 11 ; t4, t7
678 VP9_UNPACK_MULSUB_2W_4X 12, 15, 9102, 13623, [pd_8192], 10, 11 ; t5, t6
679 VP9_UNPACK_MULSUB_2W_4X 5, 2, 16305, 1606, [pd_8192], 10, 11 ; t8, t15
680 VP9_UNPACK_MULSUB_2W_4X 3, 4, 10394, 12665, [pd_8192], 10, 11 ; t9, t14
681 VP9_UNPACK_MULSUB_2W_4X 7, 0, 14449, 7723, [pd_8192], 10, 11 ; t10, t13
682 VP9_UNPACK_MULSUB_2W_4X 1, 6, 4756, 15679, [pd_8192], 10, 11 ; t11, t12
685 ; m11=t0, m10=t1, m9=t2, m8=t3, m14=t4, m12=t5, m15=t6, m13=t7
686 ; m5=t8, m3=t9, m7=t10, m1=t11, m6=t12, m0=t13, m4=t14, m2=t15
688 SUMSUB_BA w, 12, 14, 10 ; t4, t5
689 SUMSUB_BA w, 15, 13, 10 ; t7, t6
690 SUMSUB_BA w, 3, 5, 10 ; t8, t9
691 SUMSUB_BA w, 7, 1, 10 ; t11, t10
692 SUMSUB_BA w, 0, 6, 10 ; t12, t13
693 SUMSUB_BA w, 4, 2, 10 ; t15, t14
695 ; m8=t0, m9=t1, m10=t2, m11=t3, m12=t4, m14=t5, m13=t6, m15=t7
696 ; m3=t8, m5=t9, m1=t10, m7=t11, m0=t12, m6=t13, m2=t14, m4=t15
698 SUMSUB_BA w, 14, 13, 10
699 pmulhrsw m13, [pw_11585x2] ; t5
700 pmulhrsw m14, [pw_11585x2] ; t6
701 VP9_UNPACK_MULSUB_2W_4X 2, 5, 15137, 6270, [pd_8192], 10, 11 ; t9, t14
702 VP9_UNPACK_MULSUB_2W_4X 6, 1, 6270, m15137, [pd_8192], 10, 11 ; t10, t13
705 ; m8=t0, m9=t1, m10=t2, m11=t3, m12=t4, m13=t5, m14=t6, m15=t7
706 ; m3=t8, m2=t9, m6=t10, m7=t11, m0=t12, m1=t13, m5=t14, m4=t15
708 SUMSUB_BA w, 7, 3, 10 ; t8, t11
709 SUMSUB_BA w, 6, 2, 10 ; t9, t10
710 SUMSUB_BA w, 0, 4, 10 ; t15, t12
711 SUMSUB_BA w, 1, 5, 10 ; t14. t13
713 ; m15=t0, m14=t1, m13=t2, m12=t3, m11=t4, m10=t5, m9=t6, m8=t7
714 ; m7=t8, m6=t9, m2=t10, m3=t11, m4=t12, m5=t13, m1=t14, m0=t15
716 SUMSUB_BA w, 2, 5, 10
717 SUMSUB_BA w, 3, 4, 10
718 pmulhrsw m5, [pw_11585x2] ; t10
719 pmulhrsw m4, [pw_11585x2] ; t11
720 pmulhrsw m3, [pw_11585x2] ; t12
721 pmulhrsw m2, [pw_11585x2] ; t13
723 ; backup first register
726 ; m15=t0, m14=t1, m13=t2, m12=t3, m11=t4, m10=t5, m9=t6, m8=t7
727 ; m7=t8, m6=t9, m5=t10, m4=t11, m3=t12, m2=t13, m1=t14, m0=t15
731 mova m11, [%1+ 0*%3] ; IN(0)
732 pmulhrsw m11, [pw_11585x2] ; t0-t3
741 mova m10, [%1+ 0*%3] ; IN(0)
743 pmulhrsw m10, [pw_11585x2] ; t0 and t1
747 mova m11, [%1+ 8*%3] ; IN(8)
750 SUMSUB_BA w, 11, 10, 7
751 pmulhrsw m11, [pw_11585x2] ; t0
752 pmulhrsw m10, [pw_11585x2] ; t1
755 SUMSUB_BA w, 8, 11, 7 ; t0, t3
757 SUMSUB_BA w, 9, 10, 7 ; t1, t2
760 SUMSUB_BA w, 15, 8, 7 ; t0, t7
761 SUMSUB_BA w, 14, 9, 7 ; t1, t6
762 SUMSUB_BA w, 13, 10, 7 ; t2, t5
764 SUMSUB_BA w, 12, 11, 7 ; t3, t4
766 SUMSUB_BA w, 0, 15, 7 ; t0, t15
767 SUMSUB_BA w, 1, 14, 7 ; t1, t14
768 SUMSUB_BA w, 2, 13, 7 ; t2, t13
769 SUMSUB_BA w, 3, 12, 7 ; t3, t12
770 SUMSUB_BA w, 4, 11, 7 ; t4, t11
771 SUMSUB_BA w, 5, 10, 7 ; t5, t10
774 %macro VP9_IDCT16_1D 2-3 16 ; src, pass, nnzc
775 VP9_IDCT16_1D_START %1, %3, 32, tmpq+32
778 ; backup a different register
782 SUMSUB_BA w, 6, 9, 15 ; t6, t9
783 SUMSUB_BA w, 7, 8, 15 ; t7, t8
785 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 15
796 TRANSPOSE8x8W 8, 9, 10, 11, 12, 13, 14, 15, 0
806 ; backup more registers
811 pmulhrsw m0, [pw_512]
812 pmulhrsw m1, [pw_512]
813 VP9_STORE_2X 0, 1, 8, 9, 7
814 lea dstq, [dstq+strideq*2]
815 pmulhrsw m2, [pw_512]
816 pmulhrsw m3, [pw_512]
817 VP9_STORE_2X 2, 3, 8, 9, 7
818 lea dstq, [dstq+strideq*2]
819 pmulhrsw m4, [pw_512]
820 pmulhrsw m5, [pw_512]
821 VP9_STORE_2X 4, 5, 8, 9, 7
822 lea dstq, [dstq+strideq*2]
825 SWAP 0, 7 ; move zero from m7 to m0
830 SUMSUB_BA w, 6, 9, 1 ; t6, t9
831 SUMSUB_BA w, 7, 8, 1 ; t7, t8
833 pmulhrsw m6, [pw_512]
834 pmulhrsw m7, [pw_512]
835 VP9_STORE_2X 6, 7, 1, 2, 0
836 lea dstq, [dstq+strideq*2]
837 pmulhrsw m8, [pw_512]
838 pmulhrsw m9, [pw_512]
839 VP9_STORE_2X 8, 9, 1, 2, 0
840 lea dstq, [dstq+strideq*2]
841 pmulhrsw m10, [pw_512]
842 pmulhrsw m11, [pw_512]
843 VP9_STORE_2X 10, 11, 1, 2, 0
844 lea dstq, [dstq+strideq*2]
845 pmulhrsw m12, [pw_512]
846 pmulhrsw m13, [pw_512]
847 VP9_STORE_2X 12, 13, 1, 2, 0
848 lea dstq, [dstq+strideq*2]
849 pmulhrsw m14, [pw_512]
850 pmulhrsw m15, [pw_512]
851 VP9_STORE_2X 14, 15, 1, 2, 0
855 %macro VP9_STORE_2XFULL 6-7 strideq; dc, tmp1, tmp2, tmp3, tmp4, zero, stride
858 punpcklbw m%2, m%3, m%6
860 punpcklbw m%4, m%5, m%6
872 %macro VP9_IDCT_IDCT_16x16_ADD_XMM 1
874 cglobal vp9_idct_idct_16x16_add, 4, 6, 16, 512, dst, stride, block, eob
875 ; 2x2=eob=3, 4x4=eob=10
878 cmp eobd, 1 ; faster path for when only DC is set
883 mova m1, [pw_11585x2]
887 pmulhrsw m0, [pw_512]
891 VP9_STORE_2XFULL 0, 1, 2, 3, 4, 5
892 lea dstq, [dstq+2*strideq]
894 VP9_STORE_2XFULL 0, 1, 2, 3, 4, 5
897 DEFINE_ARGS dst, stride, block, cnt, dst_bak, tmp
900 VP9_IDCT16_1D blockq, 1, 8
905 VP9_IDCT16_1D tmpq, 2, 8
906 lea dstq, [dst_bakq+8]
911 ; at the end of the loop, m0 should still be zero
912 ; use that to zero out block coefficients
913 ZERO_BLOCK blockq, 32, 8, m0
920 VP9_IDCT16_1D blockq, 1
931 VP9_IDCT16_1D tmpq, 2
932 lea dstq, [dst_bakq+8]
937 ; at the end of the loop, m0 should still be zero
938 ; use that to zero out block coefficients
939 ZERO_BLOCK blockq, 32, 16, m0
943 VP9_IDCT_IDCT_16x16_ADD_XMM ssse3
944 VP9_IDCT_IDCT_16x16_ADD_XMM avx
946 ;---------------------------------------------------------------------------------------------
947 ; void vp9_iadst_iadst_16x16_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
948 ;---------------------------------------------------------------------------------------------
950 %macro VP9_IADST16_1D 2 ; src, pass
952 mova m0, [%1+ 0*32] ; in0
953 mova m1, [%1+15*32] ; in15
954 mova m8, [%1+ 7*32] ; in7
955 mova m9, [%1+ 8*32] ; in8
957 VP9_UNPACK_MULSUB_2D_4X 1, 0, 2, 3, 16364, 804 ; m1/2=t1[d], m0/3=t0[d]
958 VP9_UNPACK_MULSUB_2D_4X 8, 9, 11, 10, 11003, 12140 ; m8/11=t9[d], m9/10=t8[d]
959 VP9_RND_SH_SUMSUB_BA 9, 0, 10, 3, 4, [pd_8192] ; m9=t0[w], m0=t8[w]
960 VP9_RND_SH_SUMSUB_BA 8, 1, 11, 2, 4, [pd_8192] ; m8=t1[w], m1=t9[w]
962 mova m11, [%1+ 2*32] ; in2
963 mova m10, [%1+13*32] ; in13
964 mova m3, [%1+ 5*32] ; in5
965 mova m2, [%1+10*32] ; in10
967 VP9_UNPACK_MULSUB_2D_4X 10, 11, 6, 7, 15893, 3981 ; m10/6=t3[d], m11/7=t2[d]
968 VP9_UNPACK_MULSUB_2D_4X 3, 2, 4, 5, 8423, 14053 ; m3/4=t11[d], m2/5=t10[d]
969 VP9_RND_SH_SUMSUB_BA 2, 11, 5, 7, 12, [pd_8192] ; m2=t2[w], m11=t10[w]
970 VP9_RND_SH_SUMSUB_BA 3, 10, 4, 6, 12, [pd_8192] ; m3=t3[w], m10=t11[w]
972 mova [tmpq+ 0*%%str], m9 ; make some scratch space (t0:m9->r0)
973 mova m4, [%1+ 4*32] ; in4
974 mova m5, [%1+11*32] ; in11
975 mova m12, [%1+ 3*32] ; in3
976 mova m13, [%1+12*32] ; in12
978 VP9_UNPACK_MULSUB_2D_4X 5, 4, 7, 6, 14811, 7005 ; m5/7=t5[d], m4/6=t4[d]
979 VP9_UNPACK_MULSUB_2D_4X 12, 13, 14, 15, 5520, 15426 ; m12/14=t13[d], m13/15=t12[d]
980 VP9_RND_SH_SUMSUB_BA 13, 4, 15, 6, 9, [pd_8192] ; m13=t4[w], m4=t12[w]
981 VP9_RND_SH_SUMSUB_BA 12, 5, 14, 7, 9, [pd_8192] ; m12=t5[w], m5=t13[w]
983 mova [tmpq+ 2*%%str], m8 ; t1:m9->r2
984 mova [tmpq+ 3*%%str], m2 ; t2:m2->r3
985 mova [tmpq+ 4*%%str], m3 ; t3:m3->r4
986 mova [tmpq+ 5*%%str], m13 ; t4:m13->r5
987 mova m2, [%1+ 6*32] ; in6
988 mova m3, [%1+ 9*32] ; in9
989 mova m8, [%1+ 1*32] ; in1
990 mova m9, [%1+14*32] ; in14
992 VP9_UNPACK_MULSUB_2D_4X 3, 2, 7, 6, 13160, 9760 ; m3/7=t7[d], m2/6=t6[d]
993 VP9_UNPACK_MULSUB_2D_4X 8, 9, 13, 14, 2404, 16207 ; m8/13=t15[d], m9/14=t14[d]
994 VP9_RND_SH_SUMSUB_BA 9, 2, 14, 6, 15, [pd_8192] ; m9=t6[w], m2=t14[w]
995 VP9_RND_SH_SUMSUB_BA 8, 3, 13, 7, 15, [pd_8192] ; m8=t7[w], m3=t15[w]
997 ; r0=t0, r2=t1, r3=t2, r4=t3, r5=t4, m12=t5, m9=t6, m8=t7
998 ; m0=t8, m1=t9, m11=t10, m10=t11, m4=t12, m5=t13, m2=t14, m3=t15
1000 ; handle t8-15 first
1001 VP9_UNPACK_MULSUB_2D_4X 0, 1, 6, 7, 16069, 3196 ; m1/7=t8[d], m0/6=t9[d]
1002 VP9_UNPACK_MULSUB_2D_4X 5, 4, 13, 14, 3196, 16069 ; m5/13=t12[d], m4/14=t13[d]
1003 VP9_RND_SH_SUMSUB_BA 5, 1, 13, 7, 15, [pd_8192] ; m5=t8[w], m1=t12[w]
1004 VP9_RND_SH_SUMSUB_BA 4, 0, 14, 6, 15, [pd_8192] ; m4=t9[w], m0=t13[w]
1006 VP9_UNPACK_MULSUB_2D_4X 11, 10, 6, 7, 9102, 13623 ; m11/6=t11[d], m10/7=t10[d]
1007 VP9_UNPACK_MULSUB_2D_4X 3, 2, 13, 14, 13623, 9102 ; m3/13=t14[d], m2/14=t15[d]
1008 VP9_RND_SH_SUMSUB_BA 3, 10, 13, 7, 15, [pd_8192] ; m3=t10[w], m10=t14[w]
1009 VP9_RND_SH_SUMSUB_BA 2, 11, 14, 6, 15, [pd_8192] ; m2=t11[w], m11=t15[w]
1011 ; m5=t8, m4=t9, m3=t10, m2=t11, m1=t12, m0=t13, m10=t14, m11=t15
1013 VP9_UNPACK_MULSUB_2D_4X 1, 0, 6, 7, 15137, 6270 ; m1/6=t13[d], m0/7=t12[d]
1014 VP9_UNPACK_MULSUB_2D_4X 11, 10, 13, 14, 6270, 15137 ; m11/13=t14[d], m10/14=t15[d]
1015 VP9_RND_SH_SUMSUB_BA 11, 0, 13, 7, 15, [pd_8192] ; m11=out2[w], m0=t14[w]
1016 VP9_RND_SH_SUMSUB_BA 10, 1, 14, 6, 15, [pd_8192]
1017 psignw m10, [pw_m1] ; m10=out13[w], m1=t15[w]
1019 SUMSUB_BA w, 3, 5, 15
1020 psignw m3, [pw_m1] ; m3=out1[w], m5=t10[w]
1021 SUMSUB_BA w, 2, 4, 15 ; m2=out14[w], m4=t11[w]
1023 SUMSUB_BA w, 5, 4, 15
1024 pmulhrsw m5, [pw_11585x2] ; m5=out6[w]
1025 pmulhrsw m4, [pw_11585x2] ; m4=out9[w]
1026 SUMSUB_BA w, 1, 0, 15
1027 pmulhrsw m1, [pw_m11585x2] ; m1=out5[w]
1028 pmulhrsw m0, [pw_11585x2] ; m0=out10[w]
1030 ; m3=out1, m11=out2, m1=out5, m5=out6, m4=out9, m0=out10, m10=out13, m2=out14
1032 mova m6, [tmpq+ 0*%%str]
1033 mova m7, [tmpq+ 2*%%str]
1034 mova m13, [tmpq+ 3*%%str]
1035 mova m14, [tmpq+ 4*%%str]
1036 mova m15, [tmpq+ 5*%%str]
1037 mova [tmpq+ 8*%%str], m5
1038 mova [tmpq+ 9*%%str], m4
1039 mova [tmpq+10*%%str], m0
1040 mova [tmpq+11*%%str], m10
1041 mova [tmpq+12*%%str], m2
1043 ; m6=t0, m7=t1, m13=t2, m14=t3, m15=t4, m12=t5, m9=t6, m8=t7
1044 ; m3=out1, m11=out2, m1=out5, r8=out6, r9=out9, r10=out10, r11=out13, r12=out14
1046 SUMSUB_BA w, 15, 6, 0 ; m15=t0[w], m6=t4[w]
1047 SUMSUB_BA w, 12, 7, 0 ; m12=t1[w], m7=t5[w]
1048 SUMSUB_BA w, 9, 13, 0 ; m9=t2[w], m13=t6[w]
1049 SUMSUB_BA w, 8, 14, 0 ; m8=t3[w], m14=t7[w]
1051 VP9_UNPACK_MULSUB_2D_4X 6, 7, 0, 2, 15137, 6270 ; m6/0=t5[d], m7/2=t4[d]
1052 VP9_UNPACK_MULSUB_2D_4X 14, 13, 4, 5, 6270, 15137 ; m14/4=t6[d], m13/5=t7[d]
1053 VP9_RND_SH_SUMSUB_BA 14, 7, 4, 2, 10, [pd_8192]
1054 psignw m14, [pw_m1] ; m14=out3[w], m7=t6[w]
1055 VP9_RND_SH_SUMSUB_BA 13, 6, 5, 0, 10, [pd_8192] ; m13=out12[w], m6=t7[w]
1056 SUMSUB_BA w, 9, 15, 10 ; m9=out0[w], m15=t2[w]
1057 SUMSUB_BA w, 8, 12, 10
1058 psignw m8, [pw_m1] ; m8=out15[w], m12=t3[w]
1060 SUMSUB_BA w, 12, 15, 10
1061 pmulhrsw m12, [pw_m11585x2] ; m12=out7[w]
1062 pmulhrsw m15, [pw_11585x2] ; m15=out8[w]
1063 SUMSUB_BA w, 7, 6, 10
1064 pmulhrsw m7, [pw_11585x2] ; m7=out4[w]
1065 pmulhrsw m6, [pw_11585x2] ; m6=out11[w]
1067 ; m9=out0, m14=out3, m7=out4, m12=out7, m15=out8, m6=out11, m13=out12, m8=out15
1068 ; m3=out1, m11=out2, m1=out5, r8=out6, r9=out9, r10=out10, r11=out13, r12=out14
1071 mova m0, [tmpq+ 8*%%str]
1072 TRANSPOSE8x8W 9, 3, 11, 14, 7, 1, 0, 12, 2
1073 mova [tmpq+ 0*16], m9
1074 mova [tmpq+ 2*16], m3
1075 mova [tmpq+ 4*16], m11
1076 mova [tmpq+ 6*16], m14
1077 mova m9, [tmpq+ 9*%%str]
1078 mova m3, [tmpq+10*%%str]
1079 mova m11, [tmpq+11*%%str]
1080 mova m14, [tmpq+12*%%str]
1081 mova [tmpq+ 8*16], m7
1082 mova [tmpq+10*16], m1
1083 mova [tmpq+12*16], m0
1084 mova [tmpq+14*16], m12
1086 TRANSPOSE8x8W 15, 9, 3, 6, 13, 11, 14, 8, 2
1087 mova [tmpq+ 1*16], m15
1088 mova [tmpq+ 3*16], m9
1089 mova [tmpq+ 5*16], m3
1090 mova [tmpq+ 7*16], m6
1091 mova [tmpq+ 9*16], m13
1092 mova [tmpq+11*16], m11
1093 mova [tmpq+13*16], m14
1094 mova [tmpq+15*16], m8
1096 mova m5, [tmpq+ 8*%%str]
1099 pmulhrsw m9, [pw_512]
1100 pmulhrsw m3, [pw_512]
1101 VP9_STORE_2X 9, 3, 2, 4, 0
1102 lea dstq, [dstq+strideq*2]
1103 pmulhrsw m11, [pw_512]
1104 pmulhrsw m14, [pw_512]
1105 VP9_STORE_2X 11, 14, 2, 4, 0
1106 lea dstq, [dstq+strideq*2]
1107 pmulhrsw m7, [pw_512]
1108 pmulhrsw m1, [pw_512]
1109 VP9_STORE_2X 7, 1, 2, 4, 0
1110 lea dstq, [dstq+strideq*2]
1111 pmulhrsw m5, [pw_512]
1112 pmulhrsw m12, [pw_512]
1113 VP9_STORE_2X 5, 12, 2, 4, 0
1114 lea dstq, [dstq+strideq*2]
1116 mova m9, [tmpq+ 9*%%str]
1117 mova m3, [tmpq+10*%%str]
1118 mova m11, [tmpq+11*%%str]
1119 mova m14, [tmpq+12*%%str]
1121 pmulhrsw m15, [pw_512]
1122 pmulhrsw m9, [pw_512]
1123 VP9_STORE_2X 15, 9, 2, 4, 0
1124 lea dstq, [dstq+strideq*2]
1125 pmulhrsw m3, [pw_512]
1126 pmulhrsw m6, [pw_512]
1127 VP9_STORE_2X 3, 6, 2, 4, 0
1128 lea dstq, [dstq+strideq*2]
1129 pmulhrsw m13, [pw_512]
1130 pmulhrsw m11, [pw_512]
1131 VP9_STORE_2X 13, 11, 2, 4, 0
1132 lea dstq, [dstq+strideq*2]
1133 pmulhrsw m14, [pw_512]
1134 pmulhrsw m8, [pw_512]
1135 VP9_STORE_2X 14, 8, 2, 4, 0
1141 cglobal vp9_%1_%3_16x16_add, 3, 6, 16, 512, dst, stride, block, cnt, dst_bak, tmp
1157 lea dstq, [dst_bakq+8]
1162 ; at the end of the loop, m0 should still be zero
1163 ; use that to zero out block coefficients
1164 ZERO_BLOCK blockq, 32, 16, m0
1168 IADST16_FN idct, IDCT16, iadst, IADST16, ssse3
1169 IADST16_FN idct, IDCT16, iadst, IADST16, avx
1170 IADST16_FN iadst, IADST16, idct, IDCT16, ssse3
1171 IADST16_FN iadst, IADST16, idct, IDCT16, avx
1172 IADST16_FN iadst, IADST16, iadst, IADST16, ssse3
1173 IADST16_FN iadst, IADST16, iadst, IADST16, avx
1175 ;---------------------------------------------------------------------------------------------
1176 ; void vp9_idct_idct_32x32_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
1177 ;---------------------------------------------------------------------------------------------
1179 %macro VP9_IDCT32_1D 2-3 32 ; src, pass, nnzc
1180 %assign %%str 16*%2*%2
1181 ; first do t0-15, this can be done identical to idct16x16
1182 VP9_IDCT16_1D_START %1, %3/2, 64*2, tmpq+ 4*%%str
1184 ; backup a different register
1185 mova [tmpq+30*%%str], m15 ; t15
1186 mova m7, [tmpq+ 4*%%str]
1188 SUMSUB_BA w, 6, 9, 15 ; t6, t9
1189 SUMSUB_BA w, 7, 8, 15 ; t7, t8
1191 ; store everything on stack to make space available for t16-31
1192 ; we store interleaved with the output of the second half (t16-31)
1193 ; so we don't need to allocate extra stack space
1194 mova [tmpq+ 0*%%str], m0 ; t0
1195 mova [tmpq+ 4*%%str], m1 ; t1
1196 mova [tmpq+ 8*%%str], m2 ; t2
1197 mova [tmpq+12*%%str], m3 ; t3
1198 mova [tmpq+16*%%str], m4 ; t4
1199 mova [tmpq+20*%%str], m5 ; t5
1200 mova [tmpq+24*%%str], m6 ; t6
1201 mova [tmpq+28*%%str], m7 ; t7
1202 mova [tmpq+ 2*%%str], m8 ; t8
1203 mova [tmpq+ 6*%%str], m9 ; t9
1204 mova [tmpq+10*%%str], m10 ; t10
1205 mova [tmpq+14*%%str], m11 ; t11
1206 mova [tmpq+18*%%str], m12 ; t12
1207 mova [tmpq+22*%%str], m13 ; t13
1208 mova [tmpq+26*%%str], m14 ; t14
1210 ; then, secondly, do t16-31
1217 pmulhrsw m11, m4, [pw_16364x2] ;t31
1218 pmulhrsw m4, [pw_804x2] ;t16
1219 pmulhrsw m8, m7, [pw_m5520x2] ;t19
1220 pmulhrsw m7, [pw_15426x2] ;t28
1221 pmulhrsw m15, m0, [pw_15893x2] ;t27
1222 pmulhrsw m0, [pw_3981x2] ;t20
1223 pmulhrsw m12, m3, [pw_m2404x2] ;t23
1224 pmulhrsw m3, [pw_16207x2] ;t24
1226 ; m4=t16/17, m8=t18/19, m0=t20/21, m12=t22/23,
1227 ; m3=t24/25, m15=t26/27, m7=t28/29, m11=t30/31
1229 VP9_UNPACK_MULSUB_2W_4X 5, 10, 11, 4, 16069, 3196, [pd_8192], 6, 9 ; t17, t30
1230 VP9_UNPACK_MULSUB_2W_4X 9, 6, 7, 8, 3196, m16069, [pd_8192], 1, 14 ; t18, t29
1231 ; from 1 stage forward
1232 SUMSUB_BA w, 8, 4, 1
1234 mova [tmpq+17*%%str], m8 ; t16
1235 mova [tmpq+21*%%str], m4 ; t19
1236 VP9_UNPACK_MULSUB_2W_4X 1, 14, 15, 0, 9102, 13623, [pd_8192], 4, 8 ; t21, t26
1237 VP9_UNPACK_MULSUB_2W_4X 13, 2, 3, 12, 13623, m9102, [pd_8192], 4, 8 ; t22, t25
1239 ; m4=t16, m5=t17, m9=t18, m8=t19, m0=t20, m1=t21, m13=t22, m12=t23,
1240 ; m3=t24, m2=t25, m14=t26, m15=t27, m7=t28, m6=t29, m10=t30, m11=t31
1242 mova m10, [%1+ 1*64]
1243 mova m13, [%1+ 3*64]
1244 mova m14, [%1+ 5*64]
1247 mova m15, [%1+11*64]
1248 mova m12, [%1+13*64]
1249 mova m11, [%1+15*64]
1251 pmulhrsw m5, m10, [pw_16364x2]
1252 pmulhrsw m10, [pw_804x2]
1253 pmulhrsw m4, m11, [pw_m11003x2]
1254 pmulhrsw m11, [pw_12140x2]
1255 pmulhrsw m7, m8, [pw_14811x2]
1256 pmulhrsw m8, [pw_7005x2]
1257 pmulhrsw m6, m9, [pw_m5520x2]
1258 pmulhrsw m9, [pw_15426x2]
1259 pmulhrsw m1, m14, [pw_15893x2]
1260 pmulhrsw m14, [pw_3981x2]
1261 pmulhrsw m0, m15, [pw_m8423x2]
1262 pmulhrsw m15, [pw_14053x2]
1271 ; m10=in1, m4=in17, m8=in9, m6=in25, m14=in5, m0=in21, m12=in13, m2=in29,
1272 ; m13=in3, m3=in19, m15=in11, m1=in27, m9=in7, m7=in23, m11=in15, m5=in31
1274 VP9_UNPACK_MULSUB_2W_4X 10, 5, 16364, 804, [pd_8192], 2, 3 ; t16, t31
1275 VP9_UNPACK_MULSUB_2W_4X 4, 11, 11003, 12140, [pd_8192], 2, 3 ; t17, t30
1276 VP9_UNPACK_MULSUB_2W_4X 8, 7, 14811, 7005, [pd_8192], 2, 3 ; t18, t29
1277 VP9_UNPACK_MULSUB_2W_4X 6, 9, 5520, 15426, [pd_8192], 2, 3 ; t19, t28
1278 VP9_UNPACK_MULSUB_2W_4X 14, 1, 15893, 3981, [pd_8192], 2, 3 ; t20, t27
1279 VP9_UNPACK_MULSUB_2W_4X 0, 15, 8423, 14053, [pd_8192], 2, 3 ; t21, t26
1282 ; from 1 stage forward
1283 SUMSUB_BA w, 4, 10, 2
1284 SUMSUB_BA w, 8, 6, 2
1285 ; from 2 stages forward
1286 SUMSUB_BA w, 8, 4, 2
1288 mova [tmpq+17*%%str], m8 ; t16
1289 mova [tmpq+21*%%str], m4 ; t19
1291 pmulhrsw m3, m12, [pw_13160x2]
1292 pmulhrsw m12, [pw_9760x2]
1293 pmulhrsw m2, m13, [pw_m2404x2]
1294 pmulhrsw m13, [pw_16207x2]
1298 VP9_UNPACK_MULSUB_2W_4X 12, 3, 13160, 9760, [pd_8192], 4, 8 ; t22, t25
1299 VP9_UNPACK_MULSUB_2W_4X 2, 13, 2404, 16207, [pd_8192], 4, 8 ; t23, t24
1302 ; m10=t16, m4=t17, m8=t18, m6=t19, m14=t20, m0=t21, m12=t22, m2=t23,
1303 ; m13=t24, m3=t25, m15=t26, m1=t27, m9=t28, m7=t29, m11=t30, m5=t31
1305 SUMSUB_BA w, 0, 14, 4
1306 SUMSUB_BA w, 12, 2, 4
1307 SUMSUB_BA w, 3, 13, 4
1308 SUMSUB_BA w, 15, 1, 4
1309 SUMSUB_BA w, 7, 9, 4
1310 SUMSUB_BA w, 11, 5, 4
1312 ; m4=t16, m10=t17, m6=t18, m8=t19, m0=t20, m14=t21, m2=t22, m12=t23,
1313 ; m3=t24, m13=t25, m1=t26, m15=t27, m7=t28, m9=t29, m5=t30, m11=t31
1315 VP9_UNPACK_MULSUB_2W_4X 5, 10, 16069, 3196, [pd_8192], 4, 8 ; t17, t30
1316 VP9_UNPACK_MULSUB_2W_4X 9, 6, 3196, m16069, [pd_8192], 4, 8 ; t18, t29
1317 VP9_UNPACK_MULSUB_2W_4X 1, 14, 9102, 13623, [pd_8192], 4, 8 ; t21, t26
1318 VP9_UNPACK_MULSUB_2W_4X 13, 2, 13623, m9102, [pd_8192], 4, 8 ; t22, t25
1321 ; m4=t16, m5=t17, m9=t18, m8=t19, m0=t20, m1=t21, m13=t22, m12=t23,
1322 ; m3=t24, m2=t25, m14=t26, m15=t27, m7=t28, m6=t29, m10=t30, m11=t31
1324 SUMSUB_BA w, 9, 5, 4
1325 SUMSUB_BA w, 1, 13, 4
1326 SUMSUB_BA w, 0, 12, 4
1327 SUMSUB_BA w, 15, 3, 4
1328 SUMSUB_BA w, 14, 2, 4
1329 SUMSUB_BA w, 6, 10, 4
1330 SUMSUB_BA w, 7, 11, 4
1332 ; m8[s]=t16, m9=t17, m5=t18, m4[s]=t19, m12=t20, m13=t21, m1=t22, m0=t23,
1333 ; m15=t24, m14=t25, m2=t26, m3=t27, m11=t28, m10=t29, m6=t30, m7=t31
1335 mova m8, [tmpq+17*%%str] ; t16
1336 ; from 2 stages forward
1337 SUMSUB_BA w, 0, 8, 4
1338 SUMSUB_BA w, 15, 7, 4
1339 ; from 3 stages forward
1340 SUMSUB_BA w, 8, 7, 4
1341 pmulhrsw m7, [pw_11585x2]
1342 pmulhrsw m8, [pw_11585x2]
1344 mova [tmpq+ 1*%%str], m0 ; t16
1345 mova [tmpq+29*%%str], m7 ; t23
1347 mova m4, [tmpq+21*%%str] ; t19
1348 VP9_UNPACK_MULSUB_2W_4X 10, 5, 15137, 6270, [pd_8192], 0, 7 ; t18, t29
1349 VP9_UNPACK_MULSUB_2W_4X 11, 4, 15137, 6270, [pd_8192], 0, 7 ; t19, t28
1350 VP9_UNPACK_MULSUB_2W_4X 3, 12, 6270, m15137, [pd_8192], 0, 7 ; t20, t27
1351 VP9_UNPACK_MULSUB_2W_4X 2, 13, 6270, m15137, [pd_8192], 0, 7 ; t21, t26
1353 ; m8=t16, m9=t17, m10=t18, m11=t19, m3=t20, m2=t21, m1=t22, m0=t23,
1354 ; m15=t24, m14=t25, m13=t26, m12=t27, m4=t28, m5=t29, m6=t30, m7=t31
1356 SUMSUB_BA w, 1, 9, 0
1357 SUMSUB_BA w, 2, 10, 0
1358 SUMSUB_BA w, 3, 11, 0
1359 SUMSUB_BA w, 12, 4, 0
1360 SUMSUB_BA w, 13, 5, 0
1361 SUMSUB_BA w, 14, 6, 0
1363 ; m0=t16, m1=t17, m2=t18, m3=t19, m11=t20, m10=t21, m9=t22, m8=t23,
1364 ; m7=t24, m6=t25, m5=t26, m4=t27, m12=t28, m13=t29, m14=t30, m15=t31
1366 SUMSUB_BA w, 9, 6, 0
1367 SUMSUB_BA w, 10, 5, 0
1368 SUMSUB_BA w, 11, 4, 0
1370 pmulhrsw m6, [pw_11585x2]
1371 pmulhrsw m9, [pw_11585x2]
1372 pmulhrsw m5, [pw_11585x2]
1373 pmulhrsw m10, [pw_11585x2]
1374 pmulhrsw m4, [pw_11585x2]
1375 pmulhrsw m11, [pw_11585x2]
1377 ; m0=t16, m1=t17, m2=t18, m3=t19, m4=t20, m5=t21, m6=t22, m7=t23,
1378 ; m8=t24, m9=t25, m10=t26, m11=t27, m12=t28, m13=t29, m14=t30, m15=t31
1380 ; store t17-19 (and t20-22 for pass 1) - keep t24-31 in registers for
1381 ; final sumsub in pass 1, or keep t20-22 and t24-31 in registers for
1382 ; final sumsub of pass 2
1383 mova [tmpq+ 5*%%str], m1 ; t17
1384 mova [tmpq+ 9*%%str], m2 ; t18
1385 mova [tmpq+13*%%str], m3 ; t19
1387 ; then do final pass to sumsub+store the two halves
1389 mova [tmpq+17*%%str], m4 ; t20
1390 mova [tmpq+21*%%str], m5 ; t21
1391 mova [tmpq+25*%%str], m6 ; t22
1393 mova m0, [tmpq+ 0*%%str] ; t0
1394 mova m1, [tmpq+ 4*%%str] ; t1
1395 mova m2, [tmpq+ 8*%%str] ; t2
1396 mova m3, [tmpq+12*%%str] ; t3
1397 mova m4, [tmpq+16*%%str] ; t4
1398 mova m5, [tmpq+20*%%str] ; t5
1399 mova m6, [tmpq+24*%%str] ; t6
1401 SUMSUB_BA w, 15, 0, 7
1402 mova [tmpq+ 3*%%str], m0 ; t15
1403 mova m7, [tmpq+28*%%str] ; t7
1404 SUMSUB_BA w, 14, 1, 0
1405 SUMSUB_BA w, 13, 2, 0
1406 SUMSUB_BA w, 12, 3, 0
1407 SUMSUB_BA w, 11, 4, 0
1408 SUMSUB_BA w, 10, 5, 0
1409 SUMSUB_BA w, 9, 6, 0
1410 SUMSUB_BA w, 8, 7, 0
1412 TRANSPOSE8x8W 15, 14, 13, 12, 11, 10, 9, 8, 0
1413 mova [tmpq+ 0*%%str], m15
1414 mova [tmpq+ 4*%%str], m14
1415 mova [tmpq+ 8*%%str], m13
1416 mova [tmpq+12*%%str], m12
1417 mova [tmpq+16*%%str], m11
1418 mova [tmpq+20*%%str], m10
1419 mova [tmpq+24*%%str], m9
1420 mova [tmpq+28*%%str], m8
1422 mova m0, [tmpq+ 3*%%str] ; t15
1423 TRANSPOSE8x8W 7, 6, 5, 4, 3, 2, 1, 0, 8
1424 mova [tmpq+ 3*%%str], m7
1425 mova [tmpq+ 7*%%str], m6
1426 mova [tmpq+11*%%str], m5
1427 mova [tmpq+15*%%str], m4
1428 mova [tmpq+19*%%str], m3
1429 mova [tmpq+23*%%str], m2
1430 mova [tmpq+27*%%str], m1
1431 mova [tmpq+31*%%str], m0
1433 mova m15, [tmpq+ 2*%%str] ; t8
1434 mova m14, [tmpq+ 6*%%str] ; t9
1435 mova m13, [tmpq+10*%%str] ; t10
1436 mova m12, [tmpq+14*%%str] ; t11
1437 mova m11, [tmpq+18*%%str] ; t12
1438 mova m10, [tmpq+22*%%str] ; t13
1439 mova m9, [tmpq+26*%%str] ; t14
1440 mova m8, [tmpq+30*%%str] ; t15
1441 mova m7, [tmpq+ 1*%%str] ; t16
1442 mova m6, [tmpq+ 5*%%str] ; t17
1443 mova m5, [tmpq+ 9*%%str] ; t18
1444 mova m4, [tmpq+13*%%str] ; t19
1445 mova m3, [tmpq+17*%%str] ; t20
1446 mova m2, [tmpq+21*%%str] ; t21
1447 mova m1, [tmpq+25*%%str] ; t22
1449 SUMSUB_BA w, 7, 8, 0
1450 mova [tmpq+ 2*%%str], m8
1451 mova m0, [tmpq+29*%%str] ; t23
1452 SUMSUB_BA w, 6, 9, 8
1453 SUMSUB_BA w, 5, 10, 8
1454 SUMSUB_BA w, 4, 11, 8
1455 SUMSUB_BA w, 3, 12, 8
1456 SUMSUB_BA w, 2, 13, 8
1457 SUMSUB_BA w, 1, 14, 8
1458 SUMSUB_BA w, 0, 15, 8
1460 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 8
1461 mova [tmpq+ 1*%%str], m0
1462 mova [tmpq+ 5*%%str], m1
1463 mova [tmpq+ 9*%%str], m2
1464 mova [tmpq+13*%%str], m3
1465 mova [tmpq+17*%%str], m4
1466 mova [tmpq+21*%%str], m5
1467 mova [tmpq+25*%%str], m6
1468 mova [tmpq+29*%%str], m7
1470 mova m8, [tmpq+ 2*%%str]
1471 TRANSPOSE8x8W 8, 9, 10, 11, 12, 13, 14, 15, 0
1472 mova [tmpq+ 2*%%str], m8
1473 mova [tmpq+ 6*%%str], m9
1474 mova [tmpq+10*%%str], m10
1475 mova [tmpq+14*%%str], m11
1476 mova [tmpq+18*%%str], m12
1477 mova [tmpq+22*%%str], m13
1478 mova [tmpq+26*%%str], m14
1479 mova [tmpq+30*%%str], m15
1481 ; t0-7 is in [tmpq+{0,4,8,12,16,20,24,28}*%%str]
1482 ; t8-15 is in [tmpq+{2,6,10,14,18,22,26,30}*%%str]
1483 ; t16-19 and t23 is in [tmpq+{1,5,9,13,29}*%%str]
1485 ; t24-31 is in m8-15
1488 %macro %%STORE_2X2 7-8 1 ; src[1-4], tmp[1-2], zero, inc_dst_ptrs
1489 SUMSUB_BA w, %4, %1, %5
1490 SUMSUB_BA w, %3, %2, %5
1491 pmulhrsw m%4, [pw_512]
1492 pmulhrsw m%3, [pw_512]
1493 VP9_STORE_2X %4, %3, %5, %6, %7
1497 pmulhrsw m%2, [pw_512]
1498 pmulhrsw m%1, [pw_512]
1499 VP9_STORE_2X %2, %1, %5, %6, %7, dst_endq
1501 sub dst_endq, stride2q
1505 ; store t0-1 and t30-31
1506 mova m0, [tmpq+ 0*%%str]
1507 mova m1, [tmpq+ 4*%%str]
1508 %%STORE_2X2 0, 1, 14, 15, 2, 3, 7
1510 ; store t2-3 and t28-29
1511 mova m0, [tmpq+ 8*%%str]
1512 mova m1, [tmpq+12*%%str]
1513 %%STORE_2X2 0, 1, 12, 13, 2, 3, 7
1515 ; store t4-5 and t26-27
1516 mova m0, [tmpq+16*%%str]
1517 mova m1, [tmpq+20*%%str]
1518 %%STORE_2X2 0, 1, 10, 11, 2, 3, 7
1520 ; store t6-7 and t24-25
1521 mova m0, [tmpq+24*%%str]
1522 mova m1, [tmpq+28*%%str]
1523 %%STORE_2X2 0, 1, 8, 9, 2, 3, 7
1525 ; store t8-9 and t22-23
1526 mova m0, [tmpq+ 2*%%str]
1527 mova m1, [tmpq+ 6*%%str]
1528 mova m8, [tmpq+29*%%str]
1529 %%STORE_2X2 0, 1, 6, 8, 2, 3, 7
1531 ; store t10-11 and t20-21
1532 mova m0, [tmpq+10*%%str]
1533 mova m1, [tmpq+14*%%str]
1534 %%STORE_2X2 0, 1, 4, 5, 2, 3, 7
1536 ; store t12-13 and t18-19
1537 mova m0, [tmpq+18*%%str]
1538 mova m1, [tmpq+22*%%str]
1539 mova m5, [tmpq+13*%%str]
1540 mova m4, [tmpq+ 9*%%str]
1541 %%STORE_2X2 0, 1, 4, 5, 2, 3, 7
1544 mova m0, [tmpq+26*%%str]
1545 mova m1, [tmpq+30*%%str]
1546 mova m5, [tmpq+ 5*%%str]
1547 mova m4, [tmpq+ 1*%%str]
1548 %%STORE_2X2 0, 1, 4, 5, 2, 3, 7, 0
1552 %macro VP9_IDCT_IDCT_32x32_ADD_XMM 1
1554 cglobal vp9_idct_idct_32x32_add, 4, 9, 16, 2048, dst, stride, block, eob
1564 mova m1, [pw_11585x2]
1567 SPLATW m0, m0, q0000
1568 pmulhrsw m0, [pw_512]
1571 DEFINE_ARGS dst, stride, block, cnt
1573 VP9_STORE_2XFULL 0, 1, 2, 3, 4, 5, mmsize
1576 VP9_STORE_2XFULL 0, 1, 2, 3, 4, 5, mmsize
1579 DEFINE_ARGS dst_bak, stride, block, cnt, dst, stride30, dst_end, stride2, tmp
1582 VP9_IDCT32_1D blockq, 1, 8
1584 mov stride30q, strideq ; stride
1585 lea stride2q, [strideq*2] ; stride*2
1586 shl stride30q, 5 ; stride*32
1588 sub stride30q, stride2q ; stride*30
1591 lea dst_endq, [dst_bakq+stride30q]
1592 VP9_IDCT32_1D tmpq, 2, 8
1598 ; at the end of the loop, m7 should still be zero
1599 ; use that to zero out block coefficients
1600 ZERO_BLOCK blockq, 64, 8, m7
1607 VP9_IDCT32_1D blockq, 1, 16
1614 mov stride30q, strideq ; stride
1615 lea stride2q, [strideq*2] ; stride*2
1616 shl stride30q, 5 ; stride*32
1619 sub stride30q, stride2q ; stride*30
1622 lea dst_endq, [dst_bakq+stride30q]
1623 VP9_IDCT32_1D tmpq, 2, 16
1629 ; at the end of the loop, m7 should still be zero
1630 ; use that to zero out block coefficients
1631 ZERO_BLOCK blockq, 64, 16, m7
1638 VP9_IDCT32_1D blockq, 1
1645 mov stride30q, strideq ; stride
1646 lea stride2q, [strideq*2] ; stride*2
1647 shl stride30q, 5 ; stride*32
1650 sub stride30q, stride2q ; stride*30
1653 lea dst_endq, [dst_bakq+stride30q]
1654 VP9_IDCT32_1D tmpq, 2
1660 ; at the end of the loop, m7 should still be zero
1661 ; use that to zero out block coefficients
1662 ZERO_BLOCK blockq, 64, 32, m7
1666 VP9_IDCT_IDCT_32x32_ADD_XMM ssse3
1667 VP9_IDCT_IDCT_32x32_ADD_XMM avx