1 ;******************************************************************************
2 ;* VP9 IDCT SIMD optimizations
4 ;* Copyright (C) 2013 Clément Bœsch <u pkh me>
5 ;* Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
7 ;* This file is part of FFmpeg.
9 ;* FFmpeg is free software; you can redistribute it and/or
10 ;* modify it under the terms of the GNU Lesser General Public
11 ;* License as published by the Free Software Foundation; either
12 ;* version 2.1 of the License, or (at your option) any later version.
14 ;* FFmpeg is distributed in the hope that it will be useful,
15 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
16 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 ;* Lesser General Public License for more details.
19 ;* You should have received a copy of the GNU Lesser General Public
20 ;* License along with FFmpeg; if not, write to the Free Software
21 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 ;******************************************************************************
24 %include "libavutil/x86/x86util.asm"
28 pw_11585x2: times 8 dw 23170
29 pw_m11585x2: times 8 dw -23170
30 pw_m11585_11585: times 4 dw -11585, 11585
31 pw_11585_11585: times 8 dw 11585
33 %macro VP9_IDCT_COEFFS 2-3 0
34 pw_%1x2: times 8 dw %1*2
35 pw_m%1x2: times 8 dw -%1*2
36 pw_%2x2: times 8 dw %2*2
37 pw_m%2x2: times 8 dw -%2*2
38 pw_m%1_%2: times 4 dw -%1, %2
39 pw_%2_%1: times 4 dw %2, %1
40 pw_m%2_m%1: times 4 dw -%2, -%1
42 pw_m%2_%1: times 4 dw -%2, %1
43 pw_%1_%2: times 4 dw %1, %2
47 VP9_IDCT_COEFFS 15137, 6270, 1
48 VP9_IDCT_COEFFS 16069, 3196, 1
49 VP9_IDCT_COEFFS 9102, 13623, 1
50 VP9_IDCT_COEFFS 16305, 1606
51 VP9_IDCT_COEFFS 10394, 12665
52 VP9_IDCT_COEFFS 14449, 7723
53 VP9_IDCT_COEFFS 4756, 15679
54 VP9_IDCT_COEFFS 16364, 804
55 VP9_IDCT_COEFFS 11003, 12140
56 VP9_IDCT_COEFFS 14811, 7005
57 VP9_IDCT_COEFFS 5520, 15426
58 VP9_IDCT_COEFFS 15893, 3981
59 VP9_IDCT_COEFFS 8423, 14053
60 VP9_IDCT_COEFFS 13160, 9760
61 VP9_IDCT_COEFFS 2404, 16207
63 pw_5283_13377: times 4 dw 5283, 13377
64 pw_9929_13377: times 4 dw 9929, 13377
65 pw_15212_m13377: times 4 dw 15212, -13377
66 pw_15212_9929: times 4 dw 15212, 9929
67 pw_m5283_m15212: times 4 dw -5283, -15212
68 pw_13377x2: times 8 dw 13377*2
69 pw_13377_m13377: times 4 dw 13377, -13377
71 pd_8192: times 4 dd 8192
83 ; (a*x + b*y + round) >> shift
84 %macro VP9_MULSUB_2W_2X 5 ; dst1, dst2/src, round, coefs1, coefs2
93 %macro VP9_MULSUB_2W_4X 7 ; dst1, dst2, coef1, coef2, rnd, tmp1/src, tmp2
94 VP9_MULSUB_2W_2X %7, %6, %5, [pw_m%3_%4], [pw_%4_%3]
95 VP9_MULSUB_2W_2X %1, %2, %5, [pw_m%3_%4], [pw_%4_%3]
100 %macro VP9_UNPACK_MULSUB_2W_4X 7-9 ; dst1, dst2, (src1, src2,) coef1, coef2, rnd, tmp1, tmp2
102 punpckhwd m%6, m%2, m%1
104 VP9_MULSUB_2W_4X %1, %2, %3, %4, %5, %6, %7
106 punpckhwd m%8, m%4, m%3
107 punpcklwd m%2, m%4, m%3
108 VP9_MULSUB_2W_4X %1, %2, %5, %6, %7, %8, %9
112 %macro VP9_UNPACK_MULSUB_2D_4X 6 ; dst1 [src1], dst2 [src2], dst3, dst4, mul1, mul2
113 punpckhwd m%4, m%2, m%1
115 pmaddwd m%3, m%4, [pw_m%5_%6]
116 pmaddwd m%4, [pw_%6_%5]
117 pmaddwd m%1, m%2, [pw_m%5_%6]
118 pmaddwd m%2, [pw_%6_%5]
121 %macro VP9_RND_SH_SUMSUB_BA 6 ; dst1 [src1], dst2 [src2], src3, src4, tmp, round
122 SUMSUB_BA d, %1, %2, %5
123 SUMSUB_BA d, %3, %4, %5
136 %macro VP9_STORE_2X 5-6 dstq ; reg1, reg2, tmp1, tmp2, zero, dst
138 movh m%4, [%6+strideq]
146 movh [%6+strideq], m%4
149 %macro ZERO_BLOCK 4 ; mem, stride, nnzcpl, zero_reg
154 mova [%1+%%y+%%x], %4
155 %assign %%x (%%x+mmsize)
161 ;-------------------------------------------------------------------------------------------
162 ; void vp9_iwht_iwht_4x4_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
163 ;-------------------------------------------------------------------------------------------
165 %macro VP9_IWHT4_1D 0
181 cglobal vp9_iwht_iwht_4x4_add, 3, 3, 0, dst, stride, block, eob
182 mova m0, [blockq+0*8]
183 mova m1, [blockq+1*8]
184 mova m2, [blockq+2*8]
185 mova m3, [blockq+3*8]
192 TRANSPOSE4x4W 0, 1, 2, 3, 4
196 VP9_STORE_2X 0, 1, 5, 6, 4
197 lea dstq, [dstq+strideq*2]
198 VP9_STORE_2X 2, 3, 5, 6, 4
199 ZERO_BLOCK blockq, 8, 4, m4
202 ;-------------------------------------------------------------------------------------------
203 ; void vp9_idct_idct_4x4_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
204 ;-------------------------------------------------------------------------------------------
206 %macro VP9_IDCT4_1D_FINALIZE 0
207 SUMSUB_BA w, 3, 2, 4 ; m3=t3+t0, m2=-t3+t0
208 SUMSUB_BA w, 1, 0, 4 ; m1=t2+t1, m0=-t2+t1
209 SWAP 0, 3, 2 ; 3102 -> 0123
212 %macro VP9_IDCT4_1D 0
214 SUMSUB_BA w, 2, 0, 4 ; m2=IN(0)+IN(2) m0=IN(0)-IN(2)
215 pmulhrsw m2, m6 ; m2=t0
216 pmulhrsw m0, m6 ; m0=t1
218 VP9_UNPACK_MULSUB_2W_4X 0, 2, 11585, 11585, m7, 4, 5 ; m0=t1, m1=t0
220 VP9_UNPACK_MULSUB_2W_4X 1, 3, 15137, 6270, m7, 4, 5 ; m1=t2, m3=t3
221 VP9_IDCT4_1D_FINALIZE
224 ; 2x2 top left corner
225 %macro VP9_IDCT4_2x2_1D 0
226 pmulhrsw m0, m5 ; m0=t1
229 pmulhrsw m1, m6 ; m1=t2
230 pmulhrsw m3, m7 ; m3=t3
231 VP9_IDCT4_1D_FINALIZE
234 %macro VP9_IDCT4_WRITEOUT 0
237 pmulhrsw m0, m5 ; (x*2048 + (1<<14))>>15 <=> (x+8)>>4
246 VP9_STORE_2X 0, 1, 6, 7, 4
247 lea dstq, [dstq+2*strideq]
257 VP9_STORE_2X 2, 3, 6, 7, 4
262 cglobal vp9_idct_idct_4x4_add, 4, 4, 0, dst, stride, block, eob
265 cmp eobd, 4 ; 2x2 or smaller
268 cmp eobd, 1 ; faster path for when only DC is set
277 mova m5, [pw_11585x2]
281 DEFINE_ARGS dst, stride, block, coef
282 movsx coefd, word [blockq]
287 add coefd, (8 << 14) + 8192
295 pmulhrsw m0, [pw_2048] ; (x*2048 + (1<<14))>>15 <=> (x+8)>>4
297 VP9_STORE_2X 0, 0, 6, 7, 4
298 lea dstq, [dstq+2*strideq]
299 VP9_STORE_2X 0, 0, 6, 7, 4
303 ; faster path for when only top left 2x2 block is set
307 mova m5, [pw_11585x2]
309 mova m7, [pw_15137x2]
311 ; partial 2x4 transpose
314 SBUTTERFLY dq, 0, 2, 1
317 pxor m4, m4 ; used for the block reset, and VP9_STORE_2X
324 .idctfull: ; generic full 4x4 idct/idct
330 mova m6, [pw_11585x2]
332 mova m7, [pd_8192] ; rounding
334 TRANSPOSE4x4W 0, 1, 2, 3, 4
336 pxor m4, m4 ; used for the block reset, and VP9_STORE_2X
348 ;-------------------------------------------------------------------------------------------
349 ; void vp9_iadst_iadst_4x4_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
350 ;-------------------------------------------------------------------------------------------
352 %macro VP9_IADST4_1D 0
360 paddw xmm6, xmm3, xmm0
365 pmaddwd xmm1, xmm0, [pw_5283_13377]
366 pmaddwd xmm4, xmm0, [pw_9929_13377]
367 pmaddwd xmm0, [pw_15212_m13377]
368 pmaddwd xmm3, xmm2, [pw_15212_9929]
369 pmaddwd xmm2, [pw_m5283_m15212]
373 pmaddwd xmm6, [pw_13377_m13377]
378 %if notcpuflag(ssse3)
388 pmulhrsw m3, [pw_13377x2] ; out2
395 %if notcpuflag(ssse3)
398 movdq2q m0, xmm0 ; out3
399 movdq2q m1, xmm1 ; out0
400 movdq2q m2, xmm4 ; out1
401 %if notcpuflag(ssse3)
402 movdq2q m3, xmm6 ; out2
409 cglobal vp9_%1_%3_4x4_add, 3, 3, 6 + notcpuflag(ssse3), dst, stride, block, eob
410 %if WIN64 && notcpuflag(ssse3)
413 movdqa xmm5, [pd_8192]
419 mova m6, [pw_11585x2]
421 %ifnidn %1%3, iadstiadst
425 TRANSPOSE4x4W 0, 1, 2, 3, 4
427 pxor m4, m4 ; used for the block reset, and VP9_STORE_2X
436 IADST4_FN idct, IDCT4, iadst, IADST4, sse2
437 IADST4_FN iadst, IADST4, idct, IDCT4, sse2
438 IADST4_FN iadst, IADST4, iadst, IADST4, sse2
440 IADST4_FN idct, IDCT4, iadst, IADST4, ssse3
441 IADST4_FN iadst, IADST4, idct, IDCT4, ssse3
442 IADST4_FN iadst, IADST4, iadst, IADST4, ssse3
460 ;-------------------------------------------------------------------------------------------
461 ; void vp9_idct_idct_8x8_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
462 ;-------------------------------------------------------------------------------------------
464 %macro VP9_IDCT8_1D_FINALIZE 0
465 SUMSUB_BA w, 3, 6, 5 ; m3=t0+t7, m6=t0-t7
466 SUMSUB_BA w, 1, 2, 5 ; m1=t1+t6, m2=t1-t6
467 SUMSUB_BA w, 7, 0, 5 ; m7=t2+t5, m0=t2-t5
469 UNSCRATCH 5, 8, blockq+ 0
470 SCRATCH 2, 8, blockq+ 0
472 SUMSUB_BA w, 5, 4, 2 ; m5=t3+t4, m4=t3-t4
482 ; - in: m0/m4 is in mem
483 ; - out: m6 is in mem
485 ; - everything is in registers (m0-7)
486 %macro VP9_IDCT8_1D 0
492 VP9_UNPACK_MULSUB_2W_4X 5, 3, 9102, 13623, D_8192_REG, 0, 4 ; m5=t5a, m3=t6a
493 VP9_UNPACK_MULSUB_2W_4X 1, 7, 16069, 3196, D_8192_REG, 0, 4 ; m1=t4a, m7=t7a
494 SUMSUB_BA w, 5, 1, 0 ; m5=t4a+t5a (t4), m1=t4a-t5a (t5a)
495 SUMSUB_BA w, 3, 7, 0 ; m3=t7a+t6a (t7), m7=t7a-t6a (t6a)
497 SUMSUB_BA w, 1, 7, 0 ; m1=t6a+t5a (t6), m7=t6a-t5a (t5)
498 pmulhrsw m1, W_11585x2_REG ; m1=t6
499 pmulhrsw m7, W_11585x2_REG ; m7=t5
501 VP9_UNPACK_MULSUB_2W_4X 7, 1, 11585, 11585, D_8192_REG, 0, 4
503 VP9_UNPACK_MULSUB_2W_4X 2, 6, 15137, 6270, D_8192_REG, 0, 4 ; m2=t2a, m6=t3a
505 UNSCRATCH 0, 8, blockq+ 0 ; IN(0)
506 UNSCRATCH 4, 9, blockq+64 ; IN(4)
507 SCRATCH 5, 8, blockq+ 0
510 SUMSUB_BA w, 4, 0, 5 ; m4=IN(0)+IN(4) m0=IN(0)-IN(4)
511 pmulhrsw m4, W_11585x2_REG ; m4=t0a
512 pmulhrsw m0, W_11585x2_REG ; m0=t1a
514 SCRATCH 7, 9, blockq+64
515 VP9_UNPACK_MULSUB_2W_4X 0, 4, 11585, 11585, D_8192_REG, 5, 7
516 UNSCRATCH 7, 9, blockq+64
518 SUMSUB_BA w, 6, 4, 5 ; m6=t0a+t3a (t0), m4=t0a-t3a (t3)
519 SUMSUB_BA w, 2, 0, 5 ; m2=t1a+t2a (t1), m0=t1a-t2a (t2)
521 VP9_IDCT8_1D_FINALIZE
524 %macro VP9_IDCT8_4x4_1D 0
525 pmulhrsw m0, W_11585x2_REG ; m0=t1a/t0a
526 pmulhrsw m6, m2, [pw_15137x2] ; m6=t3a
527 pmulhrsw m2, [pw_6270x2] ; m2=t2a
528 pmulhrsw m7, m1, [pw_16069x2] ; m7=t7a
529 pmulhrsw m1, [pw_3196x2] ; m1=t4a
530 pmulhrsw m5, m3, [pw_9102x2] ; m5=-t5a
531 pmulhrsw m3, [pw_13623x2] ; m3=t6a
532 SUMSUB_BA w, 5, 1, 4 ; m1=t4a+t5a (t4), m5=t4a-t5a (t5a)
534 SUMSUB_BA w, 3, 7, 4 ; m3=t7a+t6a (t7), m7=t7a-t6a (t6a)
535 SUMSUB_BA w, 1, 7, 4 ; m1=t6a+t5a (t6), m7=t6a-t5a (t5)
536 pmulhrsw m1, W_11585x2_REG ; m1=t6
537 pmulhrsw m7, W_11585x2_REG ; m7=t5
538 psubw m4, m0, m6 ; m4=t0a-t3a (t3)
539 paddw m6, m0 ; m6=t0a+t3a (t0)
540 SCRATCH 5, 8, blockq+ 0
541 SUMSUB_BA w, 2, 0, 5 ; m2=t1a+t2a (t1), m0=t1a-t2a (t2)
542 VP9_IDCT8_1D_FINALIZE
545 %macro VP9_IDCT8_2x2_1D 1
546 pmulhrsw m0, W_11585x2_REG ; m0=t0
547 pmulhrsw m3, m1, W_16069x2_REG ; m3=t7
548 pmulhrsw m1, W_3196x2_REG ; m1=t4
549 psubw m7, m3, m1 ; t5 = t7a - t4a
550 paddw m5, m3, m1 ; t6 = t7a + t4a
551 pmulhrsw m7, W_11585x2_REG ; m7=t5
552 pmulhrsw m5, W_11585x2_REG ; m5=t6
554 ; merged VP9_IDCT8_1D_FINALIZE to make register-sharing w/ avx easier
555 psubw m6, m0, m3 ; m6=t0-t7
556 paddw m3, m0 ; m3=t0+t7
557 psubw m2, m0, m1 ; m2=t1-t6
558 paddw m1, m0 ; m1=t1+t6
561 %define SCRATCH_REG 1
564 %define SCRATCH_REG 2
566 %define SCRATCH_REG 8
568 psubw m4, m0, m5 ; m4=t3-t4
569 paddw m5, m0 ; m5=t3+t4
570 SUMSUB_BA w, 7, 0, SCRATCH_REG ; m7=t2+t5, m0=t2-t5
576 %macro VP9_IDCT8_WRITEx2 6-8 5 ; line1, line2, tmp1, tmp2, zero, pw_1024/pw_16, shift
578 pmulhrsw m%1, %6 ; (x*1024 + (1<<14))>>15 <=> (x+16)>>5
587 VP9_STORE_2X %1, %2, %3, %4, %5
589 VP9_STORE_2X %1, %2, %3, %4, %5, %8
596 ; - m8 holds m6 (SWAP)
598 %macro VP9_IDCT8_WRITEOUT 0
608 %define ROUND_REG [pw_1024]
610 %define ROUND_REG [pw_16]
613 SCRATCH 5, 10, blockq+16
614 SCRATCH 7, 11, blockq+32
615 VP9_IDCT8_WRITEx2 0, 1, 5, 7, 6, ROUND_REG
616 lea dstq, [dstq+2*strideq]
617 VP9_IDCT8_WRITEx2 2, 3, 5, 7, 6, ROUND_REG
618 lea dstq, [dstq+2*strideq]
619 UNSCRATCH 5, 10, blockq+16
620 UNSCRATCH 7, 11, blockq+32
621 VP9_IDCT8_WRITEx2 4, 5, 0, 1, 6, ROUND_REG
622 lea dstq, [dstq+2*strideq]
623 UNSCRATCH 5, 8, blockq+ 0
624 VP9_IDCT8_WRITEx2 5, 7, 0, 1, 6, ROUND_REG
629 %macro VP9_IDCT_IDCT_8x8_ADD_XMM 2
631 cglobal vp9_idct_idct_8x8_add, 4, 4, %2, dst, stride, block, eob
635 mova m12, [pw_11585x2] ; often used
636 %define W_11585x2_REG m12
638 %define W_11585x2_REG [pw_11585x2]
641 cmp eobd, 12 ; top left half or less
644 cmp eobd, 3 ; top left corner or less
647 cmp eobd, 1 ; faster path for when only DC is set
648 jne .idcttopleftcorner
656 pmulhrsw m0, W_11585x2_REG
657 pmulhrsw m0, W_11585x2_REG
659 DEFINE_ARGS dst, stride, block, coef
660 movsx coefd, word [blockq]
665 add coefd, (16 << 14) + 8192
673 pmulhrsw m0, [pw_1024] ; (x*1024 + (1<<14))>>15 <=> (x+16)>>5
676 VP9_STORE_2X 0, 0, 6, 7, 4
677 lea dstq, [dstq+2*strideq]
679 VP9_STORE_2X 0, 0, 6, 7, 4
683 ; faster path for when only left corner is set (3 input: DC, right to DC, below
684 ; to DC). Note: also working with a 2x2 block
689 mova m10, [pw_3196x2]
690 mova m11, [pw_16069x2]
691 %define W_3196x2_REG m10
692 %define W_16069x2_REG m11
694 %define W_3196x2_REG [pw_3196x2]
695 %define W_16069x2_REG [pw_16069x2]
698 ; partial 2x8 transpose
699 ; punpcklwd m0, m1 already done inside idct
705 SBUTTERFLY qdq, 0, 4, 1
711 pxor m6, m6 ; used for the block reset, and VP9_STORE_2X
724 movh m0, [blockq + 0]
725 movh m1, [blockq +16]
726 movh m2, [blockq +32]
727 movh m3, [blockq +48]
729 ; partial 4x8 transpose
737 SBUTTERFLY dq, 0, 2, 1
738 SBUTTERFLY dq, 4, 6, 5
739 SBUTTERFLY qdq, 0, 4, 1
740 SBUTTERFLY qdq, 2, 6, 5
762 .idctfull: ; generic full 8x8 idct/idct
764 mova m0, [blockq+ 0] ; IN(0)
766 mova m1, [blockq+ 16] ; IN(1)
767 mova m2, [blockq+ 32] ; IN(2)
768 mova m3, [blockq+ 48] ; IN(3)
770 mova m4, [blockq+ 64] ; IN(4)
772 mova m5, [blockq+ 80] ; IN(5)
773 mova m6, [blockq+ 96] ; IN(6)
774 mova m7, [blockq+112] ; IN(7)
776 mova m11, [pd_8192] ; rounding
777 %define D_8192_REG m11
779 %define D_8192_REG [pd_8192]
783 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 8
785 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [blockq+0], [blockq+64], 1
793 pxor m6, m6 ; used for the block reset, and VP9_STORE_2X
795 ZERO_BLOCK blockq, 16, 8, m6
800 VP9_IDCT_IDCT_8x8_ADD_XMM sse2, 12
801 VP9_IDCT_IDCT_8x8_ADD_XMM ssse3, 13
802 VP9_IDCT_IDCT_8x8_ADD_XMM avx, 13
804 ;---------------------------------------------------------------------------------------------
805 ; void vp9_iadst_iadst_8x8_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
806 ;---------------------------------------------------------------------------------------------
809 ; - in: m0/3/4/7 are in mem [blockq+N*16]
810 ; - out: m6 is in mem [blockq+0]
812 ; - everything is in registers
813 %macro VP9_IADST8_1D 0 ; input/output=m0/1/2/3/4/5/6/7
821 VP9_UNPACK_MULSUB_2D_4X 5, 2, 0, 3, 14449, 7723 ; m5/2=t3[d], m2/4=t2[d]
822 VP9_UNPACK_MULSUB_2D_4X 1, 6, 4, 7, 4756, 15679 ; m1/4=t7[d], m6/7=t6[d]
823 SCRATCH 4, 12, blockq+1*16
824 VP9_RND_SH_SUMSUB_BA 6, 2, 7, 3, 4, D_8192_REG ; m6=t2[w], m2=t6[w]
825 UNSCRATCH 4, 12, blockq+1*16
826 VP9_RND_SH_SUMSUB_BA 1, 5, 4, 0, 3, D_8192_REG ; m1=t3[w], m5=t7[w]
828 UNSCRATCH 0, 8, blockq+16*0
829 UNSCRATCH 3, 9, blockq+16*3
830 UNSCRATCH 4, 10, blockq+16*4
831 UNSCRATCH 7, 11, blockq+16*7
832 SCRATCH 1, 8, blockq+16*1
833 SCRATCH 2, 9, blockq+16*2
834 SCRATCH 5, 10, blockq+16*5
835 SCRATCH 6, 11, blockq+16*6
837 VP9_UNPACK_MULSUB_2D_4X 7, 0, 1, 2, 16305, 1606 ; m7/1=t1[d], m0/2=t0[d]
838 VP9_UNPACK_MULSUB_2D_4X 3, 4, 5, 6, 10394, 12665 ; m3/5=t5[d], m4/6=t4[d]
839 SCRATCH 1, 12, blockq+ 0*16
840 VP9_RND_SH_SUMSUB_BA 4, 0, 6, 2, 1, D_8192_REG ; m4=t0[w], m0=t4[w]
841 UNSCRATCH 1, 12, blockq+ 0*16
842 VP9_RND_SH_SUMSUB_BA 3, 7, 5, 1, 2, D_8192_REG ; m3=t1[w], m7=t5[w]
844 UNSCRATCH 2, 9, blockq+16*2
845 UNSCRATCH 5, 10, blockq+16*5
846 SCRATCH 3, 9, blockq+16*3
847 SCRATCH 4, 10, blockq+16*4
849 ; m4=t0, m3=t1, m6=t2, m1=t3, m0=t4, m7=t5, m2=t6, m5=t7
851 VP9_UNPACK_MULSUB_2D_4X 0, 7, 1, 3, 15137, 6270 ; m0/1=t5[d], m7/3=t4[d]
852 VP9_UNPACK_MULSUB_2D_4X 5, 2, 4, 6, 6270, 15137 ; m5/4=t6[d], m2/6=t7[d]
853 SCRATCH 1, 12, blockq+ 0*16
854 VP9_RND_SH_SUMSUB_BA 5, 7, 4, 3, 1, D_8192_REG
855 UNSCRATCH 1, 12, blockq+ 0*16
856 PSIGNW m5, W_M1_REG ; m5=out1[w], m7=t6[w]
857 VP9_RND_SH_SUMSUB_BA 2, 0, 6, 1, 3, D_8192_REG ; m2=out6[w], m0=t7[w]
859 UNSCRATCH 1, 8, blockq+16*1
860 UNSCRATCH 3, 9, blockq+16*3
861 UNSCRATCH 4, 10, blockq+16*4
862 UNSCRATCH 6, 11, blockq+16*6
863 SCRATCH 2, 8, blockq+16*0
865 SUMSUB_BA w, 6, 4, 2 ; m6=out0[w], m4=t2[w]
867 PSIGNW m1, W_M1_REG ; m1=out7[w], m3=t3[w]
869 ; m6=out0, m5=out1, m4=t2, m3=t3, m7=t6, m0=t7, m2=out6, m1=out7
874 pmulhrsw m3, W_11585x2_REG
875 pmulhrsw m7, W_11585x2_REG
876 pmulhrsw m4, W_11585x2_REG ; out4
877 pmulhrsw m0, W_11585x2_REG ; out2
879 SCRATCH 5, 9, blockq+16*1
880 VP9_UNPACK_MULSUB_2W_4X 4, 3, 11585, 11585, D_8192_REG, 2, 5
881 VP9_UNPACK_MULSUB_2W_4X 7, 0, 11585, 11585, D_8192_REG, 2, 5
882 UNSCRATCH 5, 9, blockq+16*1
884 PSIGNW m3, W_M1_REG ; out3
885 PSIGNW m7, W_M1_REG ; out5
887 ; m6=out0, m5=out1, m0=out2, m3=out3, m4=out4, m7=out5, m2=out6, m1=out7
898 cglobal vp9_%1_%3_8x8_add, 3, 3, %6, dst, stride, block, eob
901 %define first_is_idct 1
903 %define first_is_idct 0
907 %define second_is_idct 1
909 %define second_is_idct 0
913 mova m0, [blockq+ 0] ; IN(0)
915 mova m1, [blockq+ 16] ; IN(1)
916 mova m2, [blockq+ 32] ; IN(2)
917 %if ARCH_X86_64 || first_is_idct
918 mova m3, [blockq+ 48] ; IN(3)
921 mova m4, [blockq+ 64] ; IN(4)
923 mova m5, [blockq+ 80] ; IN(5)
924 mova m6, [blockq+ 96] ; IN(6)
925 %if ARCH_X86_64 || first_is_idct
926 mova m7, [blockq+112] ; IN(7)
930 mova m15, [pw_11585x2] ; often used
932 mova m13, [pd_8192] ; rounding
934 %define W_11585x2_REG m15
935 %define D_8192_REG m13
938 %define W_11585x2_REG [pw_11585x2]
939 %define D_8192_REG [pd_8192]
940 %define W_M1_REG [pw_m1]
943 ; note different calling conventions for idct8 vs. iadst8 on x86-32
946 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 8
948 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [blockq+0], [blockq+64], 1
950 %if second_is_idct == 0
951 mova [blockq+ 48], m3
952 mova [blockq+112], m7
960 pxor m6, m6 ; used for the block reset, and VP9_STORE_2X
962 ZERO_BLOCK blockq, 16, 8, m6
967 %undef second_is_idct
971 %define PSIGNW PSIGNW_MMX
972 IADST8_FN idct, IDCT8, iadst, IADST8, sse2, 15
973 IADST8_FN iadst, IADST8, idct, IDCT8, sse2, 15
974 IADST8_FN iadst, IADST8, iadst, IADST8, sse2, 15
975 %define PSIGNW PSIGNW_SSSE3
976 IADST8_FN idct, IDCT8, iadst, IADST8, ssse3, 16
977 IADST8_FN idct, IDCT8, iadst, IADST8, avx, 16
978 IADST8_FN iadst, IADST8, idct, IDCT8, ssse3, 16
979 IADST8_FN iadst, IADST8, idct, IDCT8, avx, 16
980 IADST8_FN iadst, IADST8, iadst, IADST8, ssse3, 16
981 IADST8_FN iadst, IADST8, iadst, IADST8, avx, 16
984 ;---------------------------------------------------------------------------------------------
985 ; void vp9_idct_idct_16x16_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
986 ;---------------------------------------------------------------------------------------------
989 ; at the end of this macro, m7 is stored in [%4+15*%5]
990 ; everything else (t0-6 and t8-15) is stored in m0-6 and m8-15
991 ; the following sumsubs have not been done yet:
992 ; SUMSUB_BA w, 6, 9, 15 ; t6, t9
993 ; SUMSUB_BA w, 7, 8, 15 ; t7, t8
994 ; or (x86-32) t0-t5 are in m0-m5, t10-t15 are in x11/9/7/5/3/1,
995 ; and the following simsubs have not been done yet:
996 ; SUMSUB_BA w, x13, x14, 7 ; t6, t9
997 ; SUMSUB_BA w, x15, x12, 7 ; t7, t8
999 %macro VP9_IDCT16_1D_START 5 ; src, nnzc, stride, scratch, scratch_stride
1001 mova m3, [%1+ 1*%3] ; IN(1)
1002 mova m0, [%1+ 3*%3] ; IN(3)
1004 pmulhrsw m4, m3, [pw_16305x2] ; t14-15
1005 pmulhrsw m3, [pw_1606x2] ; t8-9
1006 pmulhrsw m7, m0, [pw_m4756x2] ; t10-11
1007 pmulhrsw m0, [pw_15679x2] ; t12-13
1009 ; m8=t0, m9=t1, m10=t2, m11=t3, m12=t4, m14=t5, m13=t6, m15=t7
1010 ; m3=t8, m5=t9, m1=t10, m7=t11, m0=t12, m6=t13, m2=t14, m4=t15
1012 VP9_UNPACK_MULSUB_2W_4X 2, 5, 4, 3, 15137, 6270, [pd_8192], 1, 6 ; t9, t14
1013 SCRATCH 4, 10, %4+ 1*%5
1014 SCRATCH 5, 11, %4+ 7*%5
1015 VP9_UNPACK_MULSUB_2W_4X 6, 1, 0, 7, 6270, m15137, [pd_8192], 4, 5 ; t10, t13
1016 UNSCRATCH 5, 11, %4+ 7*%5
1018 ; m15=t0, m14=t1, m13=t2, m12=t3, m11=t4, m10=t5, m9=t6, m8=t7
1019 ; m7=t8, m6=t9, m2=t10, m3=t11, m4=t12, m5=t13, m1=t14, m0=t15
1021 mova m5, [%1+ 1*%3] ; IN(1)
1022 mova m4, [%1+ 7*%3] ; IN(7)
1024 pmulhrsw m2, m5, [pw_16305x2] ; t15
1025 pmulhrsw m5, [pw_1606x2] ; t8
1026 pmulhrsw m3, m4, [pw_m10394x2] ; t9
1027 pmulhrsw m4, [pw_12665x2] ; t14
1029 mova m3, [%1+ 9*%3] ; IN(9)
1030 mova m2, [%1+15*%3] ; IN(15)
1032 ; m10=in0, m5=in1, m14=in2, m6=in3, m9=in4, m7=in5, m15=in6, m4=in7
1033 ; m11=in8, m3=in9, m12=in10 m0=in11, m8=in12, m1=in13, m13=in14, m2=in15
1035 VP9_UNPACK_MULSUB_2W_4X 5, 2, 16305, 1606, [pd_8192], 0, 1 ; t8, t15
1036 VP9_UNPACK_MULSUB_2W_4X 3, 4, 10394, 12665, [pd_8192], 0, 1 ; t9, t14
1039 SUMSUB_BA w, 3, 5, 0 ; t8, t9
1040 SUMSUB_BA w, 4, 2, 0 ; t15, t14
1042 VP9_UNPACK_MULSUB_2W_4X 2, 5, 15137, 6270, [pd_8192], 0, 1 ; t9, t14
1044 SCRATCH 4, 10, %4+ 1*%5
1045 SCRATCH 5, 11, %4+ 7*%5
1047 mova m6, [%1+ 3*%3] ; IN(3)
1048 mova m7, [%1+ 5*%3] ; IN(5)
1050 pmulhrsw m0, m7, [pw_14449x2] ; t13
1051 pmulhrsw m7, [pw_7723x2] ; t10
1052 pmulhrsw m1, m6, [pw_m4756x2] ; t11
1053 pmulhrsw m6, [pw_15679x2] ; t12
1055 mova m0, [%1+11*%3] ; IN(11)
1056 mova m1, [%1+13*%3] ; IN(13)
1058 VP9_UNPACK_MULSUB_2W_4X 7, 0, 14449, 7723, [pd_8192], 4, 5 ; t10, t13
1059 VP9_UNPACK_MULSUB_2W_4X 1, 6, 4756, 15679, [pd_8192], 4, 5 ; t11, t12
1062 ; m11=t0, m10=t1, m9=t2, m8=t3, m14=t4, m12=t5, m15=t6, m13=t7
1063 ; m5=t8, m3=t9, m7=t10, m1=t11, m6=t12, m0=t13, m4=t14, m2=t15
1065 SUMSUB_BA w, 7, 1, 4 ; t11, t10
1066 SUMSUB_BA w, 0, 6, 4 ; t12, t13
1068 ; m8=t0, m9=t1, m10=t2, m11=t3, m12=t4, m14=t5, m13=t6, m15=t7
1069 ; m3=t8, m5=t9, m1=t10, m7=t11, m0=t12, m6=t13, m2=t14, m4=t15
1071 VP9_UNPACK_MULSUB_2W_4X 6, 1, 6270, m15137, [pd_8192], 4, 5 ; t10, t13
1073 UNSCRATCH 5, 11, %4+ 7*%5
1076 ; m8=t0, m9=t1, m10=t2, m11=t3, m12=t4, m13=t5, m14=t6, m15=t7
1077 ; m3=t8, m2=t9, m6=t10, m7=t11, m0=t12, m1=t13, m5=t14, m4=t15
1079 SUMSUB_BA w, 7, 3, 4 ; t8, t11
1081 ; backup first register
1084 SUMSUB_BA w, 6, 2, 7 ; t9, t10
1085 UNSCRATCH 4, 10, %4+ 1*%5
1086 SUMSUB_BA w, 0, 4, 7 ; t15, t12
1087 SUMSUB_BA w, 1, 5, 7 ; t14. t13
1089 ; m15=t0, m14=t1, m13=t2, m12=t3, m11=t4, m10=t5, m9=t6, m8=t7
1090 ; m7=t8, m6=t9, m2=t10, m3=t11, m4=t12, m5=t13, m1=t14, m0=t15
1093 SUMSUB_BA w, 2, 5, 7
1094 SUMSUB_BA w, 3, 4, 7
1095 pmulhrsw m5, [pw_11585x2] ; t10
1096 pmulhrsw m4, [pw_11585x2] ; t11
1097 pmulhrsw m3, [pw_11585x2] ; t12
1098 pmulhrsw m2, [pw_11585x2] ; t13
1100 SCRATCH 6, 10, %4+ 1*%5
1101 VP9_UNPACK_MULSUB_2W_4X 5, 2, 11585, 11585, [pd_8192], 6, 7 ; t10, t13
1102 VP9_UNPACK_MULSUB_2W_4X 4, 3, 11585, 11585, [pd_8192], 6, 7 ; t11, t12
1103 UNSCRATCH 6, 10, %4+ 1*%5
1106 ; m15=t0, m14=t1, m13=t2, m12=t3, m11=t4, m10=t5, m9=t6, m8=t7
1107 ; m7=t8, m6=t9, m5=t10, m4=t11, m3=t12, m2=t13, m1=t14, m0=t15
1109 SCRATCH 0, 8, %4+ 1*%5
1110 SCRATCH 1, 9, %4+ 3*%5
1111 SCRATCH 2, 10, %4+ 5*%5
1112 SCRATCH 3, 11, %4+ 7*%5
1113 SCRATCH 4, 12, %4+ 9*%5
1114 SCRATCH 5, 13, %4+11*%5
1115 SCRATCH 6, 14, %4+13*%5
1119 mova m3, [%1+ 0*%3] ; IN(0)
1120 mova m4, [%1+ 2*%3] ; IN(2)
1122 pmulhrsw m3, [pw_11585x2] ; t0-t3
1123 pmulhrsw m7, m4, [pw_16069x2] ; t6-7
1124 pmulhrsw m4, [pw_3196x2] ; t4-5
1128 pmulhrsw m5, [pw_11585x2] ; t5
1129 pmulhrsw m6, [pw_11585x2] ; t6
1141 SCRATCH 7, 15, %4+12*%5
1143 mova m6, [%1+ 2*%3] ; IN(2)
1144 mova m1, [%1+ 4*%3] ; IN(4)
1145 mova m7, [%1+ 6*%3] ; IN(6)
1147 pmulhrsw m0, m1, [pw_15137x2] ; t3
1148 pmulhrsw m1, [pw_6270x2] ; t2
1149 pmulhrsw m5, m6, [pw_16069x2] ; t7
1150 pmulhrsw m6, [pw_3196x2] ; t4
1151 pmulhrsw m4, m7, [pw_m9102x2] ; t5
1152 pmulhrsw m7, [pw_13623x2] ; t6
1154 mova m4, [%1+10*%3] ; IN(10)
1155 mova m0, [%1+12*%3] ; IN(12)
1156 mova m5, [%1+14*%3] ; IN(14)
1158 VP9_UNPACK_MULSUB_2W_4X 1, 0, 15137, 6270, [pd_8192], 2, 3 ; t2, t3
1159 VP9_UNPACK_MULSUB_2W_4X 6, 5, 16069, 3196, [pd_8192], 2, 3 ; t4, t7
1160 VP9_UNPACK_MULSUB_2W_4X 4, 7, 9102, 13623, [pd_8192], 2, 3 ; t5, t6
1163 SUMSUB_BA w, 4, 6, 2 ; t4, t5
1164 SUMSUB_BA w, 7, 5, 2 ; t7, t6
1167 SUMSUB_BA w, 6, 5, 2
1168 pmulhrsw m5, [pw_11585x2] ; t5
1169 pmulhrsw m6, [pw_11585x2] ; t6
1171 VP9_UNPACK_MULSUB_2W_4X 5, 6, 11585, 11585, [pd_8192], 2, 3 ; t5, t6
1174 SCRATCH 5, 15, %4+10*%5
1175 mova m2, [%1+ 0*%3] ; IN(0)
1177 pmulhrsw m2, [pw_11585x2] ; t0 and t1
1181 SUMSUB_BA w, 7, 0, 5 ; t0, t7
1183 mova m3, [%1+ 8*%3] ; IN(8)
1185 ; from 3 stages back
1187 SUMSUB_BA w, 3, 2, 5
1188 pmulhrsw m3, [pw_11585x2] ; t0
1189 pmulhrsw m2, [pw_11585x2] ; t1
1192 VP9_UNPACK_MULSUB_2W_4X 2, 3, 11585, 11585, [pd_8192], 5, 0 ; t0, t1
1196 ; from 2 stages back
1197 SUMSUB_BA w, 0, 3, 5 ; t0, t3
1199 SUMSUB_BA w, 7, 0, 5 ; t0, t7
1201 UNSCRATCH 5, 15, %4+10*%5
1205 SCRATCH 7, 15, %4+12*%5
1206 SUMSUB_BA w, 1, 2, 7 ; t1, t2
1209 SUMSUB_BA w, 6, 1, 7 ; t1, t6
1210 SUMSUB_BA w, 5, 2, 7 ; t2, t5
1212 SUMSUB_BA w, 4, 3, 7 ; t3, t4
1223 SUMSUB_BA w, 0, 15, 7 ; t0, t15
1224 SUMSUB_BA w, 1, 14, 7 ; t1, t14
1225 SUMSUB_BA w, 2, 13, 7 ; t2, t13
1226 SUMSUB_BA w, 3, 12, 7 ; t3, t12
1227 SUMSUB_BA w, 4, 11, 7 ; t4, t11
1228 SUMSUB_BA w, 5, 10, 7 ; t5, t10
1235 %macro %%SUMSUB_BA_STORE 5 ; reg, from_mem, to_mem, scratch, scratch_stride
1237 SUMSUB_BA w, 6, %1, 7
1242 %%SUMSUB_BA_STORE 0, 1, 1, %4, %5 ; t0, t15
1243 %%SUMSUB_BA_STORE 1, 3, 3, %4, %5 ; t1, t14
1244 %%SUMSUB_BA_STORE 2, 5, 5, %4, %5 ; t2, t13
1245 %%SUMSUB_BA_STORE 3, 7, 7, %4, %5 ; t3, t12
1246 %%SUMSUB_BA_STORE 4, 9, 9, %4, %5 ; t4, t11
1247 %%SUMSUB_BA_STORE 5, 11, 11, %4, %5 ; t5, t10
1251 %macro VP9_IDCT16_1D 2-3 16 ; src, pass, nnzc
1253 VP9_IDCT16_1D_START %1, %3, 32, tmpq, 16
1256 ; backup a different register
1257 mova m7, [tmpq+15*16]
1258 mova [tmpq+ 1*16], m15
1260 SUMSUB_BA w, 6, 9, 15 ; t6, t9
1261 SUMSUB_BA w, 7, 8, 15 ; t7, t8
1263 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 15
1273 mova m15, [tmpq+ 1*16]
1274 TRANSPOSE8x8W 8, 9, 10, 11, 12, 13, 14, 15, 0
1277 mova [tmpq+ 80], m10
1278 mova [tmpq+112], m11
1279 mova [tmpq+144], m12
1280 mova [tmpq+176], m13
1281 mova [tmpq+208], m14
1282 mova [tmpq+240], m15
1284 mova m6, [tmpq+13*16]
1285 mova m7, [tmpq+14*16]
1286 SUMSUB_BA w, 6, 7 ; t6, t9
1287 mova [tmpq+14*16], m6
1288 mova [tmpq+13*16], m7
1289 mova m7, [tmpq+15*16]
1290 mova m6, [tmpq+12*16]
1291 SUMSUB_BA w, 7, 6 ; t7, t8
1292 mova [tmpq+15*16], m6
1294 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [tmpq+14*16], [tmpq+ 8*16], 1
1295 mova [tmpq+ 0*16], m0
1296 mova [tmpq+ 2*16], m1
1297 mova [tmpq+ 4*16], m2
1298 mova [tmpq+ 6*16], m3
1299 mova [tmpq+10*16], m5
1300 mova [tmpq+12*16], m6
1301 mova [tmpq+14*16], m7
1303 mova m0, [tmpq+15*16]
1304 mova m1, [tmpq+13*16]
1305 mova m2, [tmpq+11*16]
1306 mova m3, [tmpq+ 9*16]
1307 mova m4, [tmpq+ 7*16]
1308 mova m5, [tmpq+ 5*16]
1309 mova m7, [tmpq+ 1*16]
1310 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [tmpq+ 3*16], [tmpq+ 9*16], 1
1311 mova [tmpq+ 1*16], m0
1312 mova [tmpq+ 3*16], m1
1313 mova [tmpq+ 5*16], m2
1314 mova [tmpq+ 7*16], m3
1315 mova [tmpq+11*16], m5
1316 mova [tmpq+13*16], m6
1317 mova [tmpq+15*16], m7
1320 VP9_IDCT16_1D_START %1, %3, 32, %1, 32
1323 %define ROUND_REG [pw_512]
1325 %define ROUND_REG [pw_32]
1330 ; backup more registers
1334 VP9_IDCT8_WRITEx2 0, 1, 8, 9, 7, ROUND_REG, 6
1335 lea dstq, [dstq+strideq*2]
1336 VP9_IDCT8_WRITEx2 2, 3, 8, 9, 7, ROUND_REG, 6
1337 lea dstq, [dstq+strideq*2]
1338 VP9_IDCT8_WRITEx2 4, 5, 8, 9, 7, ROUND_REG, 6
1339 lea dstq, [dstq+strideq*2]
1341 ; restore from cache
1342 SWAP 0, 7 ; move zero from m7 to m0
1347 SUMSUB_BA w, 6, 9, 3 ; t6, t9
1348 SUMSUB_BA w, 7, 8, 3 ; t7, t8
1350 VP9_IDCT8_WRITEx2 6, 7, 3, 4, 0, ROUND_REG, 6
1351 lea dstq, [dstq+strideq*2]
1352 VP9_IDCT8_WRITEx2 8, 9, 3, 4, 0, ROUND_REG, 6
1353 lea dstq, [dstq+strideq*2]
1354 VP9_IDCT8_WRITEx2 10, 11, 1, 2, 0, ROUND_REG, 6
1355 lea dstq, [dstq+strideq*2]
1356 VP9_IDCT8_WRITEx2 12, 13, 1, 2, 0, ROUND_REG, 6
1357 lea dstq, [dstq+strideq*2]
1358 VP9_IDCT8_WRITEx2 14, 15, 1, 2, 0, ROUND_REG, 6
1360 mova [tmpq+ 0*32], m5
1362 VP9_IDCT8_WRITEx2 0, 1, 5, 6, 7, ROUND_REG, 6
1363 lea dstq, [dstq+strideq*2]
1364 VP9_IDCT8_WRITEx2 2, 3, 5, 6, 7, ROUND_REG, 6
1365 lea dstq, [dstq+strideq*2]
1367 SWAP 0, 7 ; move zero from m7 to m0
1368 mova m5, [tmpq+ 0*32]
1370 VP9_IDCT8_WRITEx2 4, 5, 1, 2, 0, ROUND_REG, 6
1371 lea dstq, [dstq+strideq*2]
1373 mova m4, [tmpq+13*32]
1374 mova m7, [tmpq+14*32]
1375 mova m5, [tmpq+15*32]
1376 mova m6, [tmpq+12*32]
1377 SUMSUB_BADC w, 4, 7, 5, 6, 1
1379 VP9_IDCT8_WRITEx2 4, 5, 1, 2, 0, ROUND_REG, 6
1380 lea dstq, [dstq+strideq*2]
1381 VP9_IDCT8_WRITEx2 6, 7, 1, 2, 0, ROUND_REG, 6
1382 lea dstq, [dstq+strideq*2]
1384 mova m4, [tmpq+11*32]
1385 mova m5, [tmpq+ 9*32]
1386 mova m6, [tmpq+ 7*32]
1387 mova m7, [tmpq+ 5*32]
1389 VP9_IDCT8_WRITEx2 4, 5, 1, 2, 0, ROUND_REG, 6
1390 lea dstq, [dstq+strideq*2]
1391 VP9_IDCT8_WRITEx2 6, 7, 1, 2, 0, ROUND_REG, 6
1392 lea dstq, [dstq+strideq*2]
1394 mova m4, [tmpq+ 3*32]
1395 mova m5, [tmpq+ 1*32]
1397 VP9_IDCT8_WRITEx2 4, 5, 1, 2, 0, ROUND_REG, 6
1398 lea dstq, [dstq+strideq*2]
1405 %macro VP9_STORE_2XFULL 6-7 strideq; dc, tmp1, tmp2, tmp3, tmp4, zero, stride
1408 punpcklbw m%2, m%3, m%6
1410 punpcklbw m%4, m%5, m%6
1422 %macro VP9_IDCT_IDCT_16x16_ADD_XMM 1
1424 cglobal vp9_idct_idct_16x16_add, 4, 6, 16, 512, dst, stride, block, eob
1426 ; 2x2=eob=3, 4x4=eob=10
1429 cmp eobd, 1 ; faster path for when only DC is set
1432 cmp eobd, 1 ; faster path for when only DC is set
1439 mova m1, [pw_11585x2]
1443 DEFINE_ARGS dst, stride, block, coef
1444 movsx coefd, word [blockq]
1449 add coefd, (32 << 14) + 8192
1453 SPLATW m0, m0, q0000
1455 pmulhrsw m0, [pw_512]
1460 VP9_STORE_2XFULL 0, 1, 2, 3, 4, 5
1461 lea dstq, [dstq+2*strideq]
1463 VP9_STORE_2XFULL 0, 1, 2, 3, 4, 5
1466 DEFINE_ARGS dst, stride, block, cnt, dst_bak, tmp
1470 VP9_IDCT16_1D blockq, 1, 8
1475 VP9_IDCT16_1D tmpq, 2, 8
1476 lea dstq, [dst_bakq+8]
1481 ; at the end of the loop, m0 should still be zero
1482 ; use that to zero out block coefficients
1483 ZERO_BLOCK blockq, 32, 8, m0
1491 VP9_IDCT16_1D blockq, 1
1502 VP9_IDCT16_1D tmpq, 2
1503 lea dstq, [dst_bakq+8]
1508 ; at the end of the loop, m0 should still be zero
1509 ; use that to zero out block coefficients
1510 ZERO_BLOCK blockq, 32, 16, m0
1514 VP9_IDCT_IDCT_16x16_ADD_XMM sse2
1515 VP9_IDCT_IDCT_16x16_ADD_XMM ssse3
1516 VP9_IDCT_IDCT_16x16_ADD_XMM avx
1518 ;---------------------------------------------------------------------------------------------
1519 ; void vp9_iadst_iadst_16x16_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
1520 ;---------------------------------------------------------------------------------------------
1522 %macro VP9_IADST16_1D 2 ; src, pass
1524 mova m0, [%1+ 0*32] ; in0
1525 mova m1, [%1+15*32] ; in15
1526 mova m2, [%1+ 7*32] ; in7
1527 mova m3, [%1+ 8*32] ; in8
1529 VP9_UNPACK_MULSUB_2D_4X 1, 0, 4, 5, 16364, 804 ; m1/4=t1[d], m0/5=t0[d]
1530 VP9_UNPACK_MULSUB_2D_4X 2, 3, 7, 6, 11003, 12140 ; m2/7=t9[d], m3/6=t8[d]
1531 SCRATCH 4, 8, tmpq+ 0*%%str
1532 VP9_RND_SH_SUMSUB_BA 3, 0, 6, 5, 4, [pd_8192] ; m3=t0[w], m0=t8[w]
1533 UNSCRATCH 4, 8, tmpq+ 0*%%str
1534 VP9_RND_SH_SUMSUB_BA 2, 1, 7, 4, 5, [pd_8192] ; m2=t1[w], m1=t9[w]
1536 SCRATCH 0, 10, tmpq+ 0*%%str
1537 SCRATCH 1, 11, tmpq+15*%%str
1538 mova [tmpq+ 7*%%str], m2
1539 mova [tmpq+ 8*%%str], m3
1541 mova m1, [%1+ 2*32] ; in2
1542 mova m0, [%1+13*32] ; in13
1543 mova m3, [%1+ 5*32] ; in5
1544 mova m2, [%1+10*32] ; in10
1546 VP9_UNPACK_MULSUB_2D_4X 0, 1, 6, 7, 15893, 3981 ; m0/6=t3[d], m1/7=t2[d]
1547 VP9_UNPACK_MULSUB_2D_4X 3, 2, 4, 5, 8423, 14053 ; m3/4=t11[d], m2/5=t10[d]
1548 SCRATCH 4, 12, tmpq+ 2*%%str
1549 VP9_RND_SH_SUMSUB_BA 2, 1, 5, 7, 4, [pd_8192] ; m2=t2[w], m1=t10[w]
1550 UNSCRATCH 4, 12, tmpq+ 2*%%str
1551 VP9_RND_SH_SUMSUB_BA 3, 0, 4, 6, 5, [pd_8192] ; m3=t3[w], m0=t11[w]
1553 SCRATCH 0, 12, tmpq+ 2*%%str
1554 SCRATCH 1, 13, tmpq+13*%%str
1555 mova [tmpq+ 5*%%str], m2
1556 mova [tmpq+10*%%str], m3
1558 mova m2, [%1+ 4*32] ; in4
1559 mova m3, [%1+11*32] ; in11
1560 mova m0, [%1+ 3*32] ; in3
1561 mova m1, [%1+12*32] ; in12
1563 VP9_UNPACK_MULSUB_2D_4X 3, 2, 7, 6, 14811, 7005 ; m3/7=t5[d], m2/6=t4[d]
1564 VP9_UNPACK_MULSUB_2D_4X 0, 1, 4, 5, 5520, 15426 ; m0/4=t13[d], m1/5=t12[d]
1565 SCRATCH 4, 9, tmpq+ 4*%%str
1566 VP9_RND_SH_SUMSUB_BA 1, 2, 5, 6, 4, [pd_8192] ; m1=t4[w], m2=t12[w]
1567 UNSCRATCH 4, 9, tmpq+ 4*%%str
1568 VP9_RND_SH_SUMSUB_BA 0, 3, 4, 7, 6, [pd_8192] ; m0=t5[w], m3=t13[w]
1570 SCRATCH 0, 8, tmpq+ 4*%%str
1571 mova [tmpq+11*%%str], m1 ; t4:m1->r11
1572 UNSCRATCH 0, 10, tmpq+ 0*%%str
1573 UNSCRATCH 1, 11, tmpq+15*%%str
1575 ; round 2 interleaved part 1
1576 VP9_UNPACK_MULSUB_2D_4X 0, 1, 6, 7, 16069, 3196 ; m1/7=t8[d], m0/6=t9[d]
1577 VP9_UNPACK_MULSUB_2D_4X 3, 2, 5, 4, 3196, 16069 ; m3/5=t12[d], m2/4=t13[d]
1578 SCRATCH 4, 9, tmpq+ 3*%%str
1579 VP9_RND_SH_SUMSUB_BA 3, 1, 5, 7, 4, [pd_8192] ; m3=t8[w], m1=t12[w]
1580 UNSCRATCH 4, 9, tmpq+ 3*%%str
1581 VP9_RND_SH_SUMSUB_BA 2, 0, 4, 6, 5, [pd_8192] ; m2=t9[w], m0=t13[w]
1583 SCRATCH 0, 10, tmpq+ 0*%%str
1584 SCRATCH 1, 11, tmpq+15*%%str
1585 SCRATCH 2, 14, tmpq+ 3*%%str
1586 SCRATCH 3, 15, tmpq+12*%%str
1588 mova m2, [%1+ 6*32] ; in6
1589 mova m3, [%1+ 9*32] ; in9
1590 mova m0, [%1+ 1*32] ; in1
1591 mova m1, [%1+14*32] ; in14
1593 VP9_UNPACK_MULSUB_2D_4X 3, 2, 7, 6, 13160, 9760 ; m3/7=t7[d], m2/6=t6[d]
1594 VP9_UNPACK_MULSUB_2D_4X 0, 1, 4, 5, 2404, 16207 ; m0/4=t15[d], m1/5=t14[d]
1595 SCRATCH 4, 9, tmpq+ 6*%%str
1596 VP9_RND_SH_SUMSUB_BA 1, 2, 5, 6, 4, [pd_8192] ; m1=t6[w], m2=t14[w]
1597 UNSCRATCH 4, 9, tmpq+ 6*%%str
1598 VP9_RND_SH_SUMSUB_BA 0, 3, 4, 7, 6, [pd_8192] ; m0=t7[w], m3=t15[w]
1600 ; r8=t0, r7=t1, r5=t2, r10=t3, r11=t4, m8|r4=t5, m1=t6, m0=t7
1601 ; m10|r0=t8, m11|r15=t9, m13|r13=t10, m12|r2=t11, m14|r3=t12, m15|r12=t13, m2=t14, m3=t15
1603 UNSCRATCH 4, 12, tmpq+ 2*%%str
1604 UNSCRATCH 5, 13, tmpq+13*%%str
1605 SCRATCH 0, 12, tmpq+ 1*%%str
1606 SCRATCH 1, 13, tmpq+14*%%str
1608 ; remainder of round 2 (rest of t8-15)
1609 VP9_UNPACK_MULSUB_2D_4X 5, 4, 6, 7, 9102, 13623 ; m5/6=t11[d], m4/7=t10[d]
1610 VP9_UNPACK_MULSUB_2D_4X 3, 2, 1, 0, 13623, 9102 ; m3/1=t14[d], m2/0=t15[d]
1611 SCRATCH 0, 9, tmpq+ 6*%%str
1612 VP9_RND_SH_SUMSUB_BA 3, 4, 1, 7, 0, [pd_8192] ; m3=t10[w], m4=t14[w]
1613 UNSCRATCH 0, 9, tmpq+ 6*%%str
1614 VP9_RND_SH_SUMSUB_BA 2, 5, 0, 6, 1, [pd_8192] ; m2=t11[w], m5=t15[w]
1616 ; m15|r12=t8, m14|r3=t9, m3=t10, m2=t11, m11|r15=t12, m10|r0=t13, m4=t14, m5=t15
1618 UNSCRATCH 6, 14, tmpq+ 3*%%str
1619 UNSCRATCH 7, 15, tmpq+12*%%str
1621 SUMSUB_BA w, 3, 7, 1
1622 PSIGNW m3, [pw_m1] ; m3=out1[w], m7=t10[w]
1623 SUMSUB_BA w, 2, 6, 1 ; m2=out14[w], m6=t11[w]
1626 SUMSUB_BA w, 7, 6, 1
1627 pmulhrsw m7, [pw_11585x2] ; m7=out6[w]
1628 pmulhrsw m6, [pw_11585x2] ; m6=out9[w]
1630 VP9_UNPACK_MULSUB_2W_4X 6, 7, 11585, 11585, [pd_8192], 1, 0
1633 mova [tmpq+ 3*%%str], m6
1634 mova [tmpq+ 6*%%str], m7
1635 UNSCRATCH 6, 10, tmpq+ 0*%%str
1636 UNSCRATCH 7, 11, tmpq+15*%%str
1637 mova [tmpq+13*%%str], m2
1638 SCRATCH 3, 11, tmpq+ 9*%%str
1640 VP9_UNPACK_MULSUB_2D_4X 7, 6, 2, 3, 15137, 6270 ; m6/3=t13[d], m7/2=t12[d]
1641 VP9_UNPACK_MULSUB_2D_4X 5, 4, 1, 0, 6270, 15137 ; m5/1=t14[d], m4/0=t15[d]
1642 SCRATCH 0, 9, tmpq+ 2*%%str
1643 VP9_RND_SH_SUMSUB_BA 5, 6, 1, 3, 0, [pd_8192] ; m5=out2[w], m6=t14[w]
1644 UNSCRATCH 0, 9, tmpq+ 2*%%str
1645 VP9_RND_SH_SUMSUB_BA 4, 7, 0, 2, 1, [pd_8192]
1646 PSIGNW m4, [pw_m1] ; m4=out13[w], m7=t15[w]
1649 SUMSUB_BA w, 7, 6, 1
1650 pmulhrsw m7, [pw_m11585x2] ; m7=out5[w]
1651 pmulhrsw m6, [pw_11585x2] ; m6=out10[w]
1654 VP9_UNPACK_MULSUB_2W_4X 7, 6, 11585, 11585, [pd_8192], 1, 0
1657 ; m11|r13=out1, m5=out2, m7=out5, r15=out6, r3=out9, m6=out10, m4=out13, r2=out14
1659 mova m2, [tmpq+ 8*%%str]
1660 mova m3, [tmpq+ 7*%%str]
1661 mova m1, [tmpq+11*%%str]
1662 mova [tmpq+ 7*%%str], m6
1663 mova [tmpq+11*%%str], m4
1664 mova m4, [tmpq+ 5*%%str]
1665 SCRATCH 5, 14, tmpq+ 5*%%str
1666 SCRATCH 7, 15, tmpq+ 8*%%str
1667 UNSCRATCH 6, 8, tmpq+ 4*%%str
1668 UNSCRATCH 5, 12, tmpq+ 1*%%str
1669 UNSCRATCH 7, 13, tmpq+14*%%str
1671 ; m2=t0, m3=t1, m9=t2, m0=t3, m1=t4, m8=t5, m13=t6, m12=t7
1672 ; m11|r13=out1, m5=out2, m7=out5, r15=out6, r3=out9, r10=out10, r11=out13, r2=out14
1674 SUMSUB_BA w, 1, 2, 0 ; m1=t0[w], m2=t4[w]
1675 mova m0, [tmpq+10*%%str]
1676 SCRATCH 1, 12, tmpq+ 1*%%str
1677 SUMSUB_BA w, 6, 3, 1 ; m8=t1[w], m3=t5[w]
1678 SCRATCH 6, 13, tmpq+ 4*%%str
1679 SUMSUB_BA w, 7, 4, 1 ; m13=t2[w], m9=t6[w]
1680 SCRATCH 7, 8, tmpq+10*%%str
1681 SUMSUB_BA w, 5, 0, 1 ; m12=t3[w], m0=t7[w]
1682 SCRATCH 5, 9, tmpq+14*%%str
1684 VP9_UNPACK_MULSUB_2D_4X 2, 3, 7, 5, 15137, 6270 ; m2/6=t5[d], m3/10=t4[d]
1685 VP9_UNPACK_MULSUB_2D_4X 0, 4, 1, 6, 6270, 15137 ; m0/14=t6[d], m9/15=t7[d]
1686 SCRATCH 6, 10, tmpq+ 0*%%str
1687 VP9_RND_SH_SUMSUB_BA 0, 3, 1, 5, 6, [pd_8192]
1688 UNSCRATCH 6, 10, tmpq+ 0*%%str
1689 PSIGNW m0, [pw_m1] ; m0=out3[w], m3=t6[w]
1690 VP9_RND_SH_SUMSUB_BA 4, 2, 6, 7, 5, [pd_8192] ; m9=out12[w], m2=t7[w]
1692 UNSCRATCH 1, 8, tmpq+10*%%str
1693 UNSCRATCH 5, 9, tmpq+14*%%str
1694 UNSCRATCH 6, 12, tmpq+ 1*%%str
1695 UNSCRATCH 7, 13, tmpq+ 4*%%str
1696 SCRATCH 4, 9, tmpq+14*%%str
1698 SUMSUB_BA w, 1, 6, 4 ; m13=out0[w], m1=t2[w]
1699 SUMSUB_BA w, 5, 7, 4
1700 PSIGNW m5, [pw_m1] ; m12=out15[w], m8=t3[w]
1703 SUMSUB_BA w, 7, 6, 4
1704 pmulhrsw m7, [pw_m11585x2] ; m8=out7[w]
1705 pmulhrsw m6, [pw_11585x2] ; m1=out8[w]
1706 SUMSUB_BA w, 3, 2, 4
1707 pmulhrsw m3, [pw_11585x2] ; m3=out4[w]
1708 pmulhrsw m2, [pw_11585x2] ; m2=out11[w]
1710 SCRATCH 5, 8, tmpq+10*%%str
1712 VP9_UNPACK_MULSUB_2W_4X 7, 6, 11585, 11585, [pd_8192], 5, 4
1713 VP9_UNPACK_MULSUB_2W_4X 2, 3, 11585, 11585, [pd_8192], 5, 4
1714 UNSCRATCH 5, 8, tmpq+10*%%str
1717 ; m13=out0, m0=out3, m3=out4, m8=out7, m1=out8, m2=out11, m9=out12, m12=out15
1718 ; m11|r13=out1, m5=out2, m7=out5, r15=out6, r3=out9, r10=out10, r11=out13, r2=out14
1722 mova m13, [tmpq+ 6*%%str]
1723 TRANSPOSE8x8W 1, 11, 14, 0, 3, 15, 13, 7, 10
1724 mova [tmpq+ 0*16], m1
1725 mova [tmpq+ 2*16], m11
1726 mova [tmpq+ 4*16], m14
1727 mova [tmpq+ 6*16], m0
1728 mova m1, [tmpq+ 3*%%str]
1729 mova m11, [tmpq+ 7*%%str]
1730 mova m14, [tmpq+11*%%str]
1731 mova m0, [tmpq+13*%%str]
1732 mova [tmpq+ 8*16], m3
1733 mova [tmpq+10*16], m15
1734 mova [tmpq+12*16], m13
1735 mova [tmpq+14*16], m7
1737 TRANSPOSE8x8W 6, 1, 11, 2, 9, 14, 0, 5, 10
1738 mova [tmpq+ 1*16], m6
1739 mova [tmpq+ 3*16], m1
1740 mova [tmpq+ 5*16], m11
1741 mova [tmpq+ 7*16], m2
1742 mova [tmpq+ 9*16], m9
1743 mova [tmpq+11*16], m14
1744 mova [tmpq+13*16], m0
1745 mova [tmpq+15*16], m5
1747 mova [tmpq+12*%%str], m2
1748 mova [tmpq+ 1*%%str], m5
1749 mova [tmpq+15*%%str], m6
1750 mova m2, [tmpq+ 9*%%str]
1751 mova m5, [tmpq+ 5*%%str]
1752 mova m6, [tmpq+ 8*%%str]
1753 TRANSPOSE8x8W 1, 2, 5, 0, 3, 6, 4, 7, [tmpq+ 6*%%str], [tmpq+ 8*%%str], 1
1754 mova [tmpq+ 0*16], m1
1755 mova [tmpq+ 2*16], m2
1756 mova [tmpq+ 4*16], m5
1757 mova [tmpq+ 6*16], m0
1758 mova [tmpq+10*16], m6
1759 mova m3, [tmpq+12*%%str]
1760 mova [tmpq+12*16], m4
1761 mova m4, [tmpq+14*%%str]
1762 mova [tmpq+14*16], m7
1764 mova m0, [tmpq+15*%%str]
1765 mova m1, [tmpq+ 3*%%str]
1766 mova m2, [tmpq+ 7*%%str]
1767 mova m5, [tmpq+11*%%str]
1768 mova m7, [tmpq+ 1*%%str]
1769 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [tmpq+13*%%str], [tmpq+ 9*%%str], 1
1770 mova [tmpq+ 1*16], m0
1771 mova [tmpq+ 3*16], m1
1772 mova [tmpq+ 5*16], m2
1773 mova [tmpq+ 7*16], m3
1774 mova [tmpq+11*16], m5
1775 mova [tmpq+13*16], m6
1776 mova [tmpq+15*16], m7
1782 %define ROUND_REG [pw_512]
1784 %define ROUND_REG [pw_32]
1788 mova m12, [tmpq+ 6*%%str]
1789 VP9_IDCT8_WRITEx2 1, 11, 10, 8, 4, ROUND_REG, 6
1790 lea dstq, [dstq+strideq*2]
1791 VP9_IDCT8_WRITEx2 14, 0, 10, 8, 4, ROUND_REG, 6
1792 lea dstq, [dstq+strideq*2]
1793 VP9_IDCT8_WRITEx2 3, 15, 10, 8, 4, ROUND_REG, 6
1794 lea dstq, [dstq+strideq*2]
1795 VP9_IDCT8_WRITEx2 12, 7, 10, 8, 4, ROUND_REG, 6
1796 lea dstq, [dstq+strideq*2]
1798 mova m1, [tmpq+ 3*%%str]
1799 mova m11, [tmpq+ 7*%%str]
1800 mova m14, [tmpq+11*%%str]
1801 mova m0, [tmpq+13*%%str]
1803 VP9_IDCT8_WRITEx2 6, 1, 10, 8, 4, ROUND_REG, 6
1804 lea dstq, [dstq+strideq*2]
1805 VP9_IDCT8_WRITEx2 11, 2, 10, 8, 4, ROUND_REG, 6
1806 lea dstq, [dstq+strideq*2]
1807 VP9_IDCT8_WRITEx2 9, 14, 10, 8, 4, ROUND_REG, 6
1808 lea dstq, [dstq+strideq*2]
1809 VP9_IDCT8_WRITEx2 0, 5, 10, 8, 4, ROUND_REG, 6
1811 mova [tmpq+ 0*%%str], m2
1812 mova [tmpq+ 1*%%str], m5
1813 mova [tmpq+ 2*%%str], m6
1814 mova m2, [tmpq+ 9*%%str]
1815 VP9_IDCT8_WRITEx2 1, 2, 5, 6, 4, ROUND_REG, 6
1816 lea dstq, [dstq+strideq*2]
1817 mova m5, [tmpq+ 5*%%str]
1818 VP9_IDCT8_WRITEx2 5, 0, 1, 2, 4, ROUND_REG, 6
1819 lea dstq, [dstq+strideq*2]
1820 mova m5, [tmpq+ 8*%%str]
1821 VP9_IDCT8_WRITEx2 3, 5, 1, 2, 4, ROUND_REG, 6
1822 lea dstq, [dstq+strideq*2]
1823 mova m5, [tmpq+ 6*%%str]
1824 VP9_IDCT8_WRITEx2 5, 7, 1, 2, 4, ROUND_REG, 6
1825 lea dstq, [dstq+strideq*2]
1827 mova m0, [tmpq+ 2*%%str]
1828 mova m3, [tmpq+ 3*%%str]
1829 VP9_IDCT8_WRITEx2 0, 3, 1, 2, 4, ROUND_REG, 6
1830 lea dstq, [dstq+strideq*2]
1831 mova m0, [tmpq+ 7*%%str]
1832 mova m3, [tmpq+ 0*%%str]
1833 VP9_IDCT8_WRITEx2 0, 3, 1, 2, 4, ROUND_REG, 6
1834 lea dstq, [dstq+strideq*2]
1835 mova m0, [tmpq+14*%%str]
1836 mova m3, [tmpq+11*%%str]
1837 VP9_IDCT8_WRITEx2 0, 3, 1, 2, 4, ROUND_REG, 6
1838 lea dstq, [dstq+strideq*2]
1839 mova m0, [tmpq+13*%%str]
1840 mova m3, [tmpq+ 1*%%str]
1841 VP9_IDCT8_WRITEx2 0, 3, 1, 2, 4, ROUND_REG, 6
1851 cglobal vp9_%1_%3_16x16_add, 3, 6, 16, 512, dst, stride, block, cnt, dst_bak, tmp
1867 lea dstq, [dst_bakq+8]
1872 ; at the end of the loop, m0 should still be zero
1873 ; use that to zero out block coefficients
1874 ZERO_BLOCK blockq, 32, 16, m0
1878 %define PSIGNW PSIGNW_MMX
1879 IADST16_FN idct, IDCT16, iadst, IADST16, sse2
1880 IADST16_FN iadst, IADST16, idct, IDCT16, sse2
1881 IADST16_FN iadst, IADST16, iadst, IADST16, sse2
1882 %define PSIGNW PSIGNW_SSSE3
1883 IADST16_FN idct, IDCT16, iadst, IADST16, ssse3
1884 IADST16_FN iadst, IADST16, idct, IDCT16, ssse3
1885 IADST16_FN iadst, IADST16, iadst, IADST16, ssse3
1886 IADST16_FN idct, IDCT16, iadst, IADST16, avx
1887 IADST16_FN iadst, IADST16, idct, IDCT16, avx
1888 IADST16_FN iadst, IADST16, iadst, IADST16, avx
1891 ;---------------------------------------------------------------------------------------------
1892 ; void vp9_idct_idct_32x32_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
1893 ;---------------------------------------------------------------------------------------------
1895 %macro VP9_IDCT32_1D 2-3 32 ; src, pass, nnzc
1896 %assign %%str 16*%2*%2
1897 ; first do t0-15, this can be done identical to idct16x16
1898 VP9_IDCT16_1D_START %1, %3/2, 64*2, tmpq, 2*%%str
1900 ; store everything on stack to make space available for t16-31
1901 ; we store interleaved with the output of the second half (t16-31)
1902 ; so we don't need to allocate extra stack space
1903 mova [tmpq+ 0*%%str], m0 ; t0
1904 mova [tmpq+ 4*%%str], m1 ; t1
1905 mova [tmpq+ 8*%%str], m2 ; t2
1906 mova [tmpq+12*%%str], m3 ; t3
1907 mova [tmpq+16*%%str], m4 ; t4
1908 mova [tmpq+20*%%str], m5 ; t5
1910 mova [tmpq+22*%%str], m10 ; t10
1911 mova [tmpq+18*%%str], m11 ; t11
1912 mova [tmpq+14*%%str], m12 ; t12
1913 mova [tmpq+10*%%str], m13 ; t13
1914 mova [tmpq+ 6*%%str], m14 ; t14
1915 mova [tmpq+ 2*%%str], m15 ; t15
1918 mova m0, [tmpq+ 30*%%str]
1919 UNSCRATCH 1, 6, tmpq+26*%%str
1920 UNSCRATCH 2, 8, tmpq+24*%%str
1921 UNSCRATCH 3, 9, tmpq+28*%%str
1922 SUMSUB_BA w, 1, 3, 4 ; t6, t9
1923 SUMSUB_BA w, 0, 2, 4 ; t7, t8
1925 mova [tmpq+24*%%str], m1 ; t6
1926 mova [tmpq+28*%%str], m0 ; t7
1927 mova [tmpq+30*%%str], m2 ; t8
1928 mova [tmpq+26*%%str], m3 ; t9
1930 ; then, secondly, do t16-31
1935 pmulhrsw m1, m4, [pw_16364x2] ;t31
1936 pmulhrsw m4, [pw_804x2] ;t16
1938 VP9_UNPACK_MULSUB_2W_4X 5, 0, 1, 4, 16069, 3196, [pd_8192], 6, 2 ; t17, t30
1940 pmulhrsw m3, m7, [pw_m5520x2] ;t19
1941 pmulhrsw m7, [pw_15426x2] ;t28
1943 SCRATCH 4, 13, tmpq+ 1*%%str
1944 SCRATCH 5, 12, tmpq+15*%%str
1946 VP9_UNPACK_MULSUB_2W_4X 2, 6, 7, 3, 3196, m16069, [pd_8192], 4, 5 ; t18, t29
1951 pmulhrsw m5, m0, [pw_16364x2]
1952 pmulhrsw m0, [pw_804x2]
1953 pmulhrsw m4, m1, [pw_m11003x2]
1954 pmulhrsw m1, [pw_12140x2]
1959 VP9_UNPACK_MULSUB_2W_4X 0, 5, 16364, 804, [pd_8192], 2, 3 ; t16, t31
1960 VP9_UNPACK_MULSUB_2W_4X 4, 1, 11003, 12140, [pd_8192], 2, 3 ; t17, t30
1962 SUMSUB_BA w, 4, 0, 2
1963 SUMSUB_BA w, 1, 5, 2
1965 VP9_UNPACK_MULSUB_2W_4X 5, 0, 16069, 3196, [pd_8192], 2, 3 ; t17, t30
1967 SCRATCH 4, 13, tmpq+ 1*%%str
1968 SCRATCH 5, 12, tmpq+15*%%str
1973 pmulhrsw m7, m3, [pw_14811x2]
1974 pmulhrsw m3, [pw_7005x2]
1975 pmulhrsw m6, m2, [pw_m5520x2]
1976 pmulhrsw m2, [pw_15426x2]
1981 VP9_UNPACK_MULSUB_2W_4X 3, 7, 14811, 7005, [pd_8192], 4, 5 ; t18, t29
1982 VP9_UNPACK_MULSUB_2W_4X 6, 2, 5520, 15426, [pd_8192], 4, 5 ; t19, t28
1984 SUMSUB_BA w, 3, 6, 4
1985 SUMSUB_BA w, 7, 2, 4
1987 VP9_UNPACK_MULSUB_2W_4X 2, 6, 3196, m16069, [pd_8192], 4, 5 ; t18, t29
1990 UNSCRATCH 5, 12, tmpq+15*%%str
1991 SUMSUB_BA w, 6, 0, 4
1992 mova [tmpq+25*%%str], m6 ; t19
1993 UNSCRATCH 4, 13, tmpq+ 1*%%str
1994 SUMSUB_BA w, 7, 1, 6
1995 SUMSUB_BA w, 3, 4, 6
1996 mova [tmpq+23*%%str], m3 ; t16
1997 SUMSUB_BA w, 2, 5, 6
1999 VP9_UNPACK_MULSUB_2W_4X 0, 5, 15137, 6270, [pd_8192], 6, 3 ; t18, t29
2000 VP9_UNPACK_MULSUB_2W_4X 1, 4, 15137, 6270, [pd_8192], 6, 3 ; t19, t28
2002 SCRATCH 0, 10, tmpq+ 1*%%str
2003 SCRATCH 1, 11, tmpq+ 7*%%str
2004 SCRATCH 2, 9, tmpq+ 9*%%str
2005 SCRATCH 4, 14, tmpq+15*%%str
2006 SCRATCH 5, 15, tmpq+17*%%str
2007 SCRATCH 7, 13, tmpq+31*%%str
2013 pmulhrsw m5, m0, [pw_15893x2] ;t27
2014 pmulhrsw m0, [pw_3981x2] ;t20
2016 VP9_UNPACK_MULSUB_2W_4X 1, 4, 5, 0, 9102, 13623, [pd_8192], 7, 2 ; t21, t26
2018 pmulhrsw m6, m3, [pw_m2404x2] ;t23
2019 pmulhrsw m3, [pw_16207x2] ;t24
2021 SCRATCH 5, 8, tmpq+ 5*%%str
2022 SCRATCH 4, 12, tmpq+11*%%str
2024 VP9_UNPACK_MULSUB_2W_4X 7, 2, 3, 6, 13623, m9102, [pd_8192], 4, 5 ; t22, t25
2029 pmulhrsw m1, m4, [pw_15893x2]
2030 pmulhrsw m4, [pw_3981x2]
2031 pmulhrsw m0, m5, [pw_m8423x2]
2032 pmulhrsw m5, [pw_14053x2]
2037 VP9_UNPACK_MULSUB_2W_4X 4, 1, 15893, 3981, [pd_8192], 2, 3 ; t20, t27
2038 VP9_UNPACK_MULSUB_2W_4X 0, 5, 8423, 14053, [pd_8192], 2, 3 ; t21, t26
2040 SUMSUB_BA w, 0, 4, 2
2041 SUMSUB_BA w, 5, 1, 2
2043 VP9_UNPACK_MULSUB_2W_4X 1, 4, 9102, 13623, [pd_8192], 2, 3 ; t21, t26
2045 SCRATCH 5, 8, tmpq+ 5*%%str
2046 SCRATCH 4, 12, tmpq+11*%%str
2051 pmulhrsw m3, m6, [pw_13160x2]
2052 pmulhrsw m6, [pw_9760x2]
2053 pmulhrsw m2, m7, [pw_m2404x2]
2054 pmulhrsw m7, [pw_16207x2]
2058 VP9_UNPACK_MULSUB_2W_4X 6, 3, 13160, 9760, [pd_8192], 4, 5 ; t22, t25
2059 VP9_UNPACK_MULSUB_2W_4X 2, 7, 2404, 16207, [pd_8192], 4, 5 ; t23, t24
2061 SUMSUB_BA w, 6, 2, 4
2062 SUMSUB_BA w, 3, 7, 4
2064 VP9_UNPACK_MULSUB_2W_4X 7, 2, 13623, m9102, [pd_8192], 4, 5 ; t22, t25
2067 ; m4=t16, m5=t17, m9=t18, m8=t19, m0=t20, m1=t21, m13=t22, m12=t23,
2068 ; m3=t24, m2=t25, m14=t26, m15=t27, m7=t28, m6=t29, m10=t30, m11=t31
2070 UNSCRATCH 4, 12, tmpq+11*%%str
2071 SUMSUB_BA w, 0, 6, 5
2072 SUMSUB_BA w, 4, 2, 5
2073 UNSCRATCH 5, 8, tmpq+ 5*%%str
2074 SCRATCH 4, 8, tmpq+11*%%str
2075 SUMSUB_BA w, 1, 7, 4
2076 SUMSUB_BA w, 5, 3, 4
2077 SCRATCH 5, 12, tmpq+ 5*%%str
2079 VP9_UNPACK_MULSUB_2W_4X 3, 6, 6270, m15137, [pd_8192], 4, 5 ; t20, t27
2080 VP9_UNPACK_MULSUB_2W_4X 2, 7, 6270, m15137, [pd_8192], 4, 5 ; t21, t26
2082 ; m8[s]=t16, m9=t17, m5=t18, m4[s]=t19, m12=t20, m13=t21, m1=t22, m0=t23,
2083 ; m15=t24, m14=t25, m2=t26, m3=t27, m11=t28, m10=t29, m6=t30, m7=t31
2085 UNSCRATCH 5, 9, tmpq+ 9*%%str
2086 mova m4, [tmpq+23*%%str] ; t16
2088 SUMSUB_BA w, 1, 5, 9
2089 SUMSUB_BA w, 0, 4, 9
2091 SUMSUB_BADC w, 1, 5, 0, 4
2093 mova [tmpq+29*%%str], m1 ; t17
2094 mova [tmpq+21*%%str], m0 ; t16
2095 UNSCRATCH 0, 10, tmpq+ 1*%%str
2096 UNSCRATCH 1, 11, tmpq+ 7*%%str
2098 SUMSUB_BA w, 2, 0, 9
2099 SUMSUB_BA w, 3, 1, 9
2101 SUMSUB_BADC w, 2, 0, 3, 1
2103 mova [tmpq+ 9*%%str], m2 ; t18
2104 mova [tmpq+13*%%str], m3 ; t19
2105 SCRATCH 0, 10, tmpq+23*%%str
2106 SCRATCH 1, 11, tmpq+27*%%str
2108 UNSCRATCH 2, 14, tmpq+15*%%str
2109 UNSCRATCH 3, 15, tmpq+17*%%str
2110 SUMSUB_BA w, 6, 2, 0
2111 SUMSUB_BA w, 7, 3, 0
2112 SCRATCH 6, 14, tmpq+ 3*%%str
2113 SCRATCH 7, 15, tmpq+ 7*%%str
2115 UNSCRATCH 0, 8, tmpq+11*%%str
2116 mova m1, [tmpq+25*%%str] ; t19
2117 UNSCRATCH 6, 12, tmpq+ 5*%%str
2118 UNSCRATCH 7, 13, tmpq+31*%%str
2120 SUMSUB_BA w, 0, 1, 9
2121 SUMSUB_BA w, 6, 7, 9
2123 SUMSUB_BADC w, 0, 1, 6, 7
2126 ; m0=t16, m1=t17, m2=t18, m3=t19, m11=t20, m10=t21, m9=t22, m8=t23,
2127 ; m7=t24, m6=t25, m5=t26, m4=t27, m12=t28, m13=t29, m14=t30, m15=t31
2131 SUMSUB_BA w, 4, 7, 8
2132 SUMSUB_BA w, 5, 1, 8
2134 SUMSUB_BADC w, 4, 7, 5, 1
2137 pmulhrsw m7, [pw_11585x2]
2138 pmulhrsw m4, [pw_11585x2]
2139 pmulhrsw m1, [pw_11585x2]
2140 pmulhrsw m5, [pw_11585x2]
2142 mova [tmpq+ 5*%%str], m7 ; t23
2143 SCRATCH 1, 13, tmpq+25*%%str
2144 UNSCRATCH 7, 10, tmpq+23*%%str
2145 UNSCRATCH 1, 11, tmpq+27*%%str
2148 SUMSUB_BA w, 7, 3, 10
2149 SUMSUB_BA w, 1, 2, 10
2151 SUMSUB_BADC w, 7, 3, 1, 2
2154 pmulhrsw m3, [pw_11585x2]
2155 pmulhrsw m7, [pw_11585x2]
2156 pmulhrsw m2, [pw_11585x2]
2157 pmulhrsw m1, [pw_11585x2]
2159 SCRATCH 0, 8, tmpq+15*%%str
2160 SCRATCH 6, 9, tmpq+17*%%str
2161 VP9_UNPACK_MULSUB_2W_4X 7, 4, 11585, 11585, [pd_8192], 0, 6
2162 mova [tmpq+ 5*%%str], m7 ; t23
2163 UNSCRATCH 7, 10, tmpq+23*%%str
2164 VP9_UNPACK_MULSUB_2W_4X 1, 5, 11585, 11585, [pd_8192], 0, 6
2165 SCRATCH 1, 13, tmpq+25*%%str
2166 UNSCRATCH 1, 11, tmpq+27*%%str
2167 VP9_UNPACK_MULSUB_2W_4X 3, 7, 11585, 11585, [pd_8192], 0, 6
2168 VP9_UNPACK_MULSUB_2W_4X 2, 1, 11585, 11585, [pd_8192], 0, 6
2169 UNSCRATCH 0, 8, tmpq+15*%%str
2170 UNSCRATCH 6, 9, tmpq+17*%%str
2173 ; m0=t16, m1=t17, m2=t18, m3=t19, m4=t20, m5=t21, m6=t22, m7=t23,
2174 ; m8=t24, m9=t25, m10=t26, m11=t27, m12=t28, m13=t29, m14=t30, m15=t31
2176 ; then do final pass to sumsub+store the two halves
2178 mova [tmpq+17*%%str], m2 ; t20
2179 mova [tmpq+ 1*%%str], m3 ; t21
2181 mova [tmpq+25*%%str], m13 ; t22
2183 mova m8, [tmpq+ 0*%%str] ; t0
2184 mova m9, [tmpq+ 4*%%str] ; t1
2185 mova m12, [tmpq+ 8*%%str] ; t2
2186 mova m11, [tmpq+12*%%str] ; t3
2187 mova m2, [tmpq+16*%%str] ; t4
2188 mova m3, [tmpq+20*%%str] ; t5
2189 mova m13, [tmpq+24*%%str] ; t6
2191 SUMSUB_BA w, 6, 8, 10
2192 mova [tmpq+ 3*%%str], m8 ; t15
2193 mova m10, [tmpq+28*%%str] ; t7
2194 SUMSUB_BA w, 0, 9, 8
2195 SUMSUB_BA w, 15, 12, 8
2196 SUMSUB_BA w, 14, 11, 8
2197 SUMSUB_BA w, 1, 2, 8
2198 SUMSUB_BA w, 7, 3, 8
2199 SUMSUB_BA w, 5, 13, 8
2200 SUMSUB_BA w, 4, 10, 8
2202 TRANSPOSE8x8W 6, 0, 15, 14, 1, 7, 5, 4, 8
2203 mova [tmpq+ 0*%%str], m6
2204 mova [tmpq+ 4*%%str], m0
2205 mova [tmpq+ 8*%%str], m15
2206 mova [tmpq+12*%%str], m14
2207 mova [tmpq+16*%%str], m1
2208 mova [tmpq+20*%%str], m7
2209 mova [tmpq+24*%%str], m5
2210 mova [tmpq+28*%%str], m4
2212 mova m8, [tmpq+ 3*%%str] ; t15
2213 TRANSPOSE8x8W 10, 13, 3, 2, 11, 12, 9, 8, 0
2214 mova [tmpq+ 3*%%str], m10
2215 mova [tmpq+ 7*%%str], m13
2216 mova [tmpq+11*%%str], m3
2217 mova [tmpq+15*%%str], m2
2218 mova [tmpq+19*%%str], m11
2219 mova [tmpq+23*%%str], m12
2220 mova [tmpq+27*%%str], m9
2221 mova [tmpq+31*%%str], m8
2223 mova m15, [tmpq+30*%%str] ; t8
2224 mova m14, [tmpq+26*%%str] ; t9
2225 mova m13, [tmpq+22*%%str] ; t10
2226 mova m12, [tmpq+18*%%str] ; t11
2227 mova m11, [tmpq+14*%%str] ; t12
2228 mova m10, [tmpq+10*%%str] ; t13
2229 mova m9, [tmpq+ 6*%%str] ; t14
2230 mova m8, [tmpq+ 2*%%str] ; t15
2231 mova m7, [tmpq+21*%%str] ; t16
2232 mova m6, [tmpq+29*%%str] ; t17
2233 mova m5, [tmpq+ 9*%%str] ; t18
2234 mova m4, [tmpq+13*%%str] ; t19
2235 mova m3, [tmpq+17*%%str] ; t20
2236 mova m2, [tmpq+ 1*%%str] ; t21
2237 mova m1, [tmpq+25*%%str] ; t22
2239 SUMSUB_BA w, 7, 8, 0
2240 mova [tmpq+ 2*%%str], m8
2241 mova m0, [tmpq+ 5*%%str] ; t23
2242 SUMSUB_BA w, 6, 9, 8
2243 SUMSUB_BA w, 5, 10, 8
2244 SUMSUB_BA w, 4, 11, 8
2245 SUMSUB_BA w, 3, 12, 8
2246 SUMSUB_BA w, 2, 13, 8
2247 SUMSUB_BA w, 1, 14, 8
2248 SUMSUB_BA w, 0, 15, 8
2250 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 8
2251 mova [tmpq+ 1*%%str], m0
2252 mova [tmpq+ 5*%%str], m1
2253 mova [tmpq+ 9*%%str], m2
2254 mova [tmpq+13*%%str], m3
2255 mova [tmpq+17*%%str], m4
2256 mova [tmpq+21*%%str], m5
2257 mova [tmpq+25*%%str], m6
2258 mova [tmpq+29*%%str], m7
2260 mova m8, [tmpq+ 2*%%str]
2261 TRANSPOSE8x8W 8, 9, 10, 11, 12, 13, 14, 15, 0
2262 mova [tmpq+ 2*%%str], m8
2263 mova [tmpq+ 6*%%str], m9
2264 mova [tmpq+10*%%str], m10
2265 mova [tmpq+14*%%str], m11
2266 mova [tmpq+18*%%str], m12
2267 mova [tmpq+22*%%str], m13
2268 mova [tmpq+26*%%str], m14
2269 mova [tmpq+30*%%str], m15
2271 mova m2, [tmpq+24*%%str] ; t6
2272 mova m3, [tmpq+28*%%str] ; t7
2273 SUMSUB_BADC w, 5, 2, 4, 3
2274 mova [tmpq+24*%%str], m5
2275 mova [tmpq+23*%%str], m2
2276 mova [tmpq+28*%%str], m4
2277 mova [tmpq+19*%%str], m3
2279 mova m2, [tmpq+16*%%str] ; t4
2280 mova m3, [tmpq+20*%%str] ; t5
2281 SUMSUB_BA w, 1, 2, 5
2282 SUMSUB_BA w, 7, 3, 5
2283 mova [tmpq+15*%%str], m2
2284 mova [tmpq+11*%%str], m3
2286 mova m2, [tmpq+ 0*%%str] ; t0
2287 mova m3, [tmpq+ 4*%%str] ; t1
2288 SUMSUB_BA w, 6, 2, 5
2289 SUMSUB_BA w, 0, 3, 5
2290 mova [tmpq+31*%%str], m2
2291 mova [tmpq+27*%%str], m3
2293 mova m2, [tmpq+ 8*%%str] ; t2
2294 mova m3, [tmpq+12*%%str] ; t3
2295 mova m5, [tmpq+ 7*%%str]
2296 mova m4, [tmpq+ 3*%%str]
2297 SUMSUB_BADC w, 5, 2, 4, 3
2298 mova [tmpq+ 7*%%str], m2
2299 mova [tmpq+ 3*%%str], m3
2301 mova m3, [tmpq+28*%%str]
2302 TRANSPOSE8x8W 6, 0, 5, 4, 1, 7, 2, 3, [tmpq+24*%%str], [tmpq+16*%%str], 1
2303 mova [tmpq+ 0*%%str], m6
2304 mova [tmpq+ 4*%%str], m0
2305 mova [tmpq+ 8*%%str], m5
2306 mova [tmpq+12*%%str], m4
2307 mova [tmpq+20*%%str], m7
2308 mova [tmpq+24*%%str], m2
2309 mova [tmpq+28*%%str], m3
2311 mova m6, [tmpq+19*%%str]
2312 mova m0, [tmpq+23*%%str]
2313 mova m5, [tmpq+11*%%str]
2314 mova m4, [tmpq+15*%%str]
2315 mova m1, [tmpq+ 3*%%str]
2316 mova m7, [tmpq+ 7*%%str]
2317 mova m3, [tmpq+31*%%str]
2318 TRANSPOSE8x8W 6, 0, 5, 4, 1, 7, 2, 3, [tmpq+27*%%str], [tmpq+19*%%str], 1
2319 mova [tmpq+ 3*%%str], m6
2320 mova [tmpq+ 7*%%str], m0
2321 mova [tmpq+11*%%str], m5
2322 mova [tmpq+15*%%str], m4
2323 mova [tmpq+23*%%str], m7
2324 mova [tmpq+27*%%str], m2
2325 mova [tmpq+31*%%str], m3
2327 mova m1, [tmpq+ 6*%%str] ; t14
2328 mova m0, [tmpq+ 2*%%str] ; t15
2329 mova m7, [tmpq+21*%%str] ; t16
2330 mova m6, [tmpq+29*%%str] ; t17
2331 SUMSUB_BA w, 7, 0, 2
2332 SUMSUB_BA w, 6, 1, 2
2333 mova [tmpq+29*%%str], m7
2334 mova [tmpq+ 2*%%str], m0
2335 mova [tmpq+21*%%str], m6
2336 mova [tmpq+ 6*%%str], m1
2338 mova m1, [tmpq+14*%%str] ; t12
2339 mova m0, [tmpq+10*%%str] ; t13
2340 mova m5, [tmpq+ 9*%%str] ; t18
2341 mova m4, [tmpq+13*%%str] ; t19
2342 SUMSUB_BA w, 5, 0, 2
2343 SUMSUB_BA w, 4, 1, 2
2344 mova [tmpq+10*%%str], m0
2345 mova [tmpq+14*%%str], m1
2347 mova m1, [tmpq+22*%%str] ; t10
2348 mova m0, [tmpq+18*%%str] ; t11
2349 mova m3, [tmpq+17*%%str] ; t20
2350 mova m2, [tmpq+ 1*%%str] ; t21
2351 SUMSUB_BA w, 3, 0, 6
2352 SUMSUB_BA w, 2, 1, 6
2353 mova [tmpq+18*%%str], m0
2354 mova [tmpq+22*%%str], m1
2356 mova m7, [tmpq+30*%%str] ; t8
2357 mova m6, [tmpq+26*%%str] ; t9
2358 mova m1, [tmpq+25*%%str] ; t22
2359 mova m0, [tmpq+ 5*%%str] ; t23
2360 SUMSUB_BADC w, 1, 6, 0, 7
2361 mova [tmpq+26*%%str], m6
2362 mova [tmpq+30*%%str], m7
2364 mova m7, [tmpq+29*%%str]
2365 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [tmpq+21*%%str], [tmpq+17*%%str], 1
2366 mova [tmpq+ 1*%%str], m0
2367 mova [tmpq+ 5*%%str], m1
2368 mova [tmpq+ 9*%%str], m2
2369 mova [tmpq+13*%%str], m3
2370 mova [tmpq+21*%%str], m5
2371 mova [tmpq+25*%%str], m6
2372 mova [tmpq+29*%%str], m7
2374 mova m0, [tmpq+ 2*%%str]
2375 mova m1, [tmpq+ 6*%%str]
2376 mova m2, [tmpq+10*%%str]
2377 mova m3, [tmpq+14*%%str]
2378 mova m4, [tmpq+18*%%str]
2379 mova m5, [tmpq+22*%%str]
2380 mova m7, [tmpq+30*%%str]
2381 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [tmpq+26*%%str], [tmpq+18*%%str], 1
2382 mova [tmpq+ 2*%%str], m0
2383 mova [tmpq+ 6*%%str], m1
2384 mova [tmpq+10*%%str], m2
2385 mova [tmpq+14*%%str], m3
2386 mova [tmpq+22*%%str], m5
2387 mova [tmpq+26*%%str], m6
2388 mova [tmpq+30*%%str], m7
2391 ; t0-7 is in [tmpq+{0,4,8,12,16,20,24,28}*%%str]
2392 ; t8-15 is in [tmpq+{2,6,10,14,18,22,26,30}*%%str]
2393 ; t16-19 and t23 is in [tmpq+{1,5,9,13,29}*%%str]
2395 ; t24-31 is in m8-15
2398 %define ROUND_REG [pw_512]
2400 %define ROUND_REG [pw_32]
2403 %macro %%STORE_2X2 7-8 1 ; src[1-4], tmp[1-2], zero, inc_dst_ptrs
2404 SUMSUB_BA w, %4, %1, %5
2405 SUMSUB_BA w, %3, %2, %5
2406 VP9_IDCT8_WRITEx2 %4, %3, %5, %6, %7, ROUND_REG, 6
2410 VP9_IDCT8_WRITEx2 %2, %1, %5, %6, %7, ROUND_REG, 6, dst_endq
2412 sub dst_endq, stride2q
2419 ; store t0-1 and t30-31
2420 mova m8, [tmpq+ 0*%%str]
2421 mova m9, [tmpq+ 4*%%str]
2422 %%STORE_2X2 8, 9, 0, 6, 12, 11, 10
2424 ; store t2-3 and t28-29
2425 mova m8, [tmpq+ 8*%%str]
2426 mova m9, [tmpq+12*%%str]
2427 %%STORE_2X2 8, 9, 14, 15, 12, 11, 10
2429 ; store t4-5 and t26-27
2430 mova m8, [tmpq+16*%%str]
2431 mova m9, [tmpq+20*%%str]
2432 %%STORE_2X2 8, 9, 7, 1, 12, 11, 10
2434 ; store t6-7 and t24-25
2435 mova m8, [tmpq+24*%%str]
2436 mova m9, [tmpq+28*%%str]
2437 %%STORE_2X2 8, 9, 4, 5, 12, 11, 10
2439 ; store t8-9 and t22-23
2440 mova m8, [tmpq+30*%%str]
2441 mova m9, [tmpq+26*%%str]
2442 mova m0, [tmpq+ 5*%%str]
2443 %%STORE_2X2 8, 9, 13, 0, 12, 11, 10
2445 ; store t10-11 and t20-21
2446 mova m8, [tmpq+22*%%str]
2447 mova m9, [tmpq+18*%%str]
2448 %%STORE_2X2 8, 9, 2, 3, 12, 11, 10
2450 ; store t12-13 and t18-19
2451 mova m8, [tmpq+14*%%str]
2452 mova m9, [tmpq+10*%%str]
2453 mova m5, [tmpq+13*%%str]
2454 mova m4, [tmpq+ 9*%%str]
2455 %%STORE_2X2 8, 9, 4, 5, 12, 11, 10
2458 mova m8, [tmpq+ 6*%%str]
2459 mova m9, [tmpq+ 2*%%str]
2460 mova m5, [tmpq+29*%%str]
2461 mova m4, [tmpq+21*%%str]
2462 %%STORE_2X2 8, 9, 4, 5, 12, 11, 10, 0
2466 mova [tmpq+ 1*%%str], m1
2467 mova [tmpq+11*%%str], m2
2468 mova [tmpq+15*%%str], m3
2469 mova [tmpq+17*%%str], m4
2470 mova [tmpq+19*%%str], m5
2473 ; store t0-1 and t30-31
2474 mova m2, [tmpq+ 0*%%str]
2475 mova m3, [tmpq+ 4*%%str]
2476 %%STORE_2X2 2, 3, 0, 6, 4, 5, 1
2478 ; store t2-3 and t28-29
2479 mova m2, [tmpq+ 8*%%str]
2480 mova m3, [tmpq+12*%%str]
2481 mova m0, [tmpq+ 3*%%str]
2482 mova m6, [tmpq+ 7*%%str]
2483 %%STORE_2X2 2, 3, 0, 6, 4, 5, 1
2485 ; store t4-5 and t26-27
2486 mova m2, [tmpq+16*%%str]
2487 mova m3, [tmpq+20*%%str]
2488 mova m0, [tmpq+ 1*%%str]
2489 %%STORE_2X2 2, 3, 7, 0, 4, 5, 1
2491 ; store t6-7 and t24-25
2492 mova m2, [tmpq+24*%%str]
2493 mova m3, [tmpq+28*%%str]
2494 mova m0, [tmpq+17*%%str]
2495 mova m6, [tmpq+19*%%str]
2496 %%STORE_2X2 2, 3, 0, 6, 4, 5, 1
2498 ; store t8-9 and t22-23
2499 mova m2, [tmpq+30*%%str]
2500 mova m3, [tmpq+26*%%str]
2501 mova m0, [tmpq+25*%%str]
2502 mova m6, [tmpq+ 5*%%str]
2503 %%STORE_2X2 2, 3, 0, 6, 4, 5, 1
2505 ; store t10-11 and t20-21
2506 mova m2, [tmpq+22*%%str]
2507 mova m3, [tmpq+18*%%str]
2508 mova m0, [tmpq+11*%%str]
2509 mova m6, [tmpq+15*%%str]
2510 %%STORE_2X2 2, 3, 0, 6, 4, 5, 1
2512 ; store t12-13 and t18-19
2513 mova m2, [tmpq+14*%%str]
2514 mova m3, [tmpq+10*%%str]
2515 mova m6, [tmpq+13*%%str]
2516 mova m0, [tmpq+ 9*%%str]
2517 %%STORE_2X2 2, 3, 0, 6, 4, 5, 1
2520 mova m2, [tmpq+ 6*%%str]
2521 mova m3, [tmpq+ 2*%%str]
2522 mova m6, [tmpq+29*%%str]
2523 mova m0, [tmpq+21*%%str]
2524 %%STORE_2X2 2, 3, 0, 6, 4, 5, 1, 0
2530 %macro VP9_IDCT_IDCT_32x32_ADD_XMM 1
2532 cglobal vp9_idct_idct_32x32_add, 0, 6 + ARCH_X86_64 * 3, 16, 2048, dst, stride, block, eob
2533 movifnidn eobd, dword eobm
2547 movifnidn blockq, blockmp
2548 movifnidn dstq, dstmp
2549 movifnidn strideq, stridemp
2552 mova m1, [pw_11585x2]
2556 DEFINE_ARGS dst, stride, block, coef
2557 movsx coefd, word [blockq]
2562 add coefd, (32 << 14) + 8192
2566 SPLATW m0, m0, q0000
2568 pmulhrsw m0, [pw_512]
2573 VP9_STORE_2XFULL 0, 1, 2, 3, 4, 5, mmsize
2576 VP9_STORE_2XFULL 0, 1, 2, 3, 4, 5, mmsize
2580 DEFINE_ARGS dst_bak, stride, block, cnt, dst, stride30, dst_end, stride2, tmp
2582 %define dst_bakq r0mp
2587 DEFINE_ARGS block, u1, u2, u3, u4, tmp
2591 VP9_IDCT32_1D blockq, 1, 8
2594 DEFINE_ARGS dst, stride, stride30, dst_end, stride2, tmp
2596 %define cntd dword r3m
2598 mov stride30q, strideq ; stride
2599 lea stride2q, [strideq*2] ; stride*2
2600 shl stride30q, 5 ; stride*32
2602 sub stride30q, stride2q ; stride*30
2605 lea dst_endq, [dstq+stride30q]
2606 VP9_IDCT32_1D tmpq, 2, 8
2612 ; at the end of the loop, m7 should still be zero
2613 ; use that to zero out block coefficients
2618 ZERO_BLOCK blockq, 64, 8, m1
2623 DEFINE_ARGS block, tmp, cnt
2629 VP9_IDCT32_1D blockq, 1, 16
2638 DEFINE_ARGS dst, stride, stride30, dst_end, stride2, tmp
2640 %define cntd dword r3m
2643 mov stride30q, strideq ; stride
2644 lea stride2q, [strideq*2] ; stride*2
2645 shl stride30q, 5 ; stride*32
2648 sub stride30q, stride2q ; stride*30
2651 lea dst_endq, [dstq+stride30q]
2652 VP9_IDCT32_1D tmpq, 2, 16
2658 ; at the end of the loop, m7 should still be zero
2659 ; use that to zero out block coefficients
2664 ZERO_BLOCK blockq, 64, 16, m1
2670 DEFINE_ARGS block, tmp, cnt
2676 VP9_IDCT32_1D blockq, 1
2685 DEFINE_ARGS dst, stride, stride30, dst_end, stride2, tmp
2687 %define cntd dword r3m
2690 mov stride30q, strideq ; stride
2691 lea stride2q, [strideq*2] ; stride*2
2692 shl stride30q, 5 ; stride*32
2695 sub stride30q, stride2q ; stride*30
2698 lea dst_endq, [dstq+stride30q]
2699 VP9_IDCT32_1D tmpq, 2
2705 ; at the end of the loop, m7 should still be zero
2706 ; use that to zero out block coefficients
2711 ZERO_BLOCK blockq, 64, 32, m1
2715 VP9_IDCT_IDCT_32x32_ADD_XMM sse2
2716 VP9_IDCT_IDCT_32x32_ADD_XMM ssse3
2717 VP9_IDCT_IDCT_32x32_ADD_XMM avx