1 ;******************************************************************************
2 ;* VP9 IDCT SIMD optimizations
4 ;* Copyright (C) 2013 Clément Bœsch <u pkh me>
5 ;* Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
7 ;* This file is part of FFmpeg.
9 ;* FFmpeg is free software; you can redistribute it and/or
10 ;* modify it under the terms of the GNU Lesser General Public
11 ;* License as published by the Free Software Foundation; either
12 ;* version 2.1 of the License, or (at your option) any later version.
14 ;* FFmpeg is distributed in the hope that it will be useful,
15 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
16 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 ;* Lesser General Public License for more details.
19 ;* You should have received a copy of the GNU Lesser General Public
20 ;* License along with FFmpeg; if not, write to the Free Software
21 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 ;******************************************************************************
24 %include "libavutil/x86/x86util.asm"
28 pw_11585x2: times 8 dw 23170
29 pw_m11585x2: times 8 dw -23170
30 pw_m11585_11585: times 4 dw -11585, 11585
31 pw_11585_11585: times 8 dw 11585
33 %macro VP9_IDCT_COEFFS 2-3 0
34 pw_%1x2: times 8 dw %1*2
35 pw_m%1x2: times 8 dw -%1*2
36 pw_%2x2: times 8 dw %2*2
37 pw_m%2x2: times 8 dw -%2*2
38 pw_m%1_%2: times 4 dw -%1, %2
39 pw_%2_%1: times 4 dw %2, %1
40 pw_m%2_m%1: times 4 dw -%2, -%1
42 pw_m%2_%1: times 4 dw -%2, %1
43 pw_%1_%2: times 4 dw %1, %2
47 VP9_IDCT_COEFFS 15137, 6270, 1
48 VP9_IDCT_COEFFS 16069, 3196, 1
49 VP9_IDCT_COEFFS 9102, 13623, 1
50 VP9_IDCT_COEFFS 16305, 1606
51 VP9_IDCT_COEFFS 10394, 12665
52 VP9_IDCT_COEFFS 14449, 7723
53 VP9_IDCT_COEFFS 4756, 15679
54 VP9_IDCT_COEFFS 16364, 804
55 VP9_IDCT_COEFFS 11003, 12140
56 VP9_IDCT_COEFFS 14811, 7005
57 VP9_IDCT_COEFFS 5520, 15426
58 VP9_IDCT_COEFFS 15893, 3981
59 VP9_IDCT_COEFFS 8423, 14053
60 VP9_IDCT_COEFFS 13160, 9760
61 VP9_IDCT_COEFFS 2404, 16207
63 pw_5283_13377: times 4 dw 5283, 13377
64 pw_9929_13377: times 4 dw 9929, 13377
65 pw_15212_m13377: times 4 dw 15212, -13377
66 pw_15212_9929: times 4 dw 15212, 9929
67 pw_m5283_m15212: times 4 dw -5283, -15212
68 pw_13377x2: times 8 dw 13377*2
69 pw_13377_m13377: times 4 dw 13377, -13377
71 pd_8192: times 4 dd 8192
83 ; (a*x + b*y + round) >> shift
84 %macro VP9_MULSUB_2W_2X 5 ; dst1, dst2/src, round, coefs1, coefs2
93 %macro VP9_MULSUB_2W_4X 7 ; dst1, dst2, coef1, coef2, rnd, tmp1/src, tmp2
94 VP9_MULSUB_2W_2X %7, %6, %5, [pw_m%3_%4], [pw_%4_%3]
95 VP9_MULSUB_2W_2X %1, %2, %5, [pw_m%3_%4], [pw_%4_%3]
100 %macro VP9_UNPACK_MULSUB_2W_4X 7-9 ; dst1, dst2, (src1, src2,) coef1, coef2, rnd, tmp1, tmp2
102 punpckhwd m%6, m%2, m%1
104 VP9_MULSUB_2W_4X %1, %2, %3, %4, %5, %6, %7
106 punpckhwd m%8, m%4, m%3
107 punpcklwd m%2, m%4, m%3
108 VP9_MULSUB_2W_4X %1, %2, %5, %6, %7, %8, %9
112 %macro VP9_UNPACK_MULSUB_2D_4X 6 ; dst1 [src1], dst2 [src2], dst3, dst4, mul1, mul2
113 punpckhwd m%4, m%2, m%1
115 pmaddwd m%3, m%4, [pw_m%5_%6]
116 pmaddwd m%4, [pw_%6_%5]
117 pmaddwd m%1, m%2, [pw_m%5_%6]
118 pmaddwd m%2, [pw_%6_%5]
121 %macro VP9_RND_SH_SUMSUB_BA 6 ; dst1 [src1], dst2 [src2], src3, src4, tmp, round
122 SUMSUB_BA d, %1, %2, %5
123 SUMSUB_BA d, %3, %4, %5
136 %macro VP9_STORE_2X 5-6 dstq ; reg1, reg2, tmp1, tmp2, zero, dst
138 movh m%4, [%6+strideq]
146 movh [%6+strideq], m%4
149 %macro ZERO_BLOCK 4 ; mem, stride, nnzcpl, zero_reg
154 mova [%1+%%y+%%x], %4
155 %assign %%x (%%x+mmsize)
161 ;-------------------------------------------------------------------------------------------
162 ; void vp9_iwht_iwht_4x4_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
163 ;-------------------------------------------------------------------------------------------
165 %macro VP9_IWHT4_1D 0
181 cglobal vp9_iwht_iwht_4x4_add, 3, 3, 0, dst, stride, block, eob
182 mova m0, [blockq+0*8]
183 mova m1, [blockq+1*8]
184 mova m2, [blockq+2*8]
185 mova m3, [blockq+3*8]
192 TRANSPOSE4x4W 0, 1, 2, 3, 4
196 VP9_STORE_2X 0, 1, 5, 6, 4
197 lea dstq, [dstq+strideq*2]
198 VP9_STORE_2X 2, 3, 5, 6, 4
199 ZERO_BLOCK blockq, 8, 4, m4
202 ;-------------------------------------------------------------------------------------------
203 ; void vp9_idct_idct_4x4_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
204 ;-------------------------------------------------------------------------------------------
206 %macro VP9_IDCT4_1D_FINALIZE 0
207 SUMSUB_BA w, 3, 2, 4 ; m3=t3+t0, m2=-t3+t0
208 SUMSUB_BA w, 1, 0, 4 ; m1=t2+t1, m0=-t2+t1
209 SWAP 0, 3, 2 ; 3102 -> 0123
212 %macro VP9_IDCT4_1D 0
214 SUMSUB_BA w, 2, 0, 4 ; m2=IN(0)+IN(2) m0=IN(0)-IN(2)
215 pmulhrsw m2, m6 ; m2=t0
216 pmulhrsw m0, m6 ; m0=t1
218 VP9_UNPACK_MULSUB_2W_4X 0, 2, 11585, 11585, m7, 4, 5 ; m0=t1, m1=t0
220 VP9_UNPACK_MULSUB_2W_4X 1, 3, 15137, 6270, m7, 4, 5 ; m1=t2, m3=t3
221 VP9_IDCT4_1D_FINALIZE
224 ; 2x2 top left corner
225 %macro VP9_IDCT4_2x2_1D 0
226 pmulhrsw m0, m5 ; m0=t1
229 pmulhrsw m1, m6 ; m1=t2
230 pmulhrsw m3, m7 ; m3=t3
231 VP9_IDCT4_1D_FINALIZE
234 %macro VP9_IDCT4_WRITEOUT 0
237 pmulhrsw m0, m5 ; (x*2048 + (1<<14))>>15 <=> (x+8)>>4
246 VP9_STORE_2X 0, 1, 6, 7, 4
247 lea dstq, [dstq+2*strideq]
257 VP9_STORE_2X 2, 3, 6, 7, 4
262 cglobal vp9_idct_idct_4x4_add, 4, 4, 0, dst, stride, block, eob
265 cmp eobd, 4 ; 2x2 or smaller
268 cmp eobd, 1 ; faster path for when only DC is set
277 mova m5, [pw_11585x2]
281 DEFINE_ARGS dst, stride, block, coef
282 movsx coefd, word [blockq]
287 add coefd, (8 << 14) + 8192
295 pmulhrsw m0, [pw_2048] ; (x*2048 + (1<<14))>>15 <=> (x+8)>>4
297 VP9_STORE_2X 0, 0, 6, 7, 4
298 lea dstq, [dstq+2*strideq]
299 VP9_STORE_2X 0, 0, 6, 7, 4
303 ; faster path for when only top left 2x2 block is set
307 mova m5, [pw_11585x2]
309 mova m7, [pw_15137x2]
311 ; partial 2x4 transpose
314 SBUTTERFLY dq, 0, 2, 1
317 pxor m4, m4 ; used for the block reset, and VP9_STORE_2X
324 .idctfull: ; generic full 4x4 idct/idct
330 mova m6, [pw_11585x2]
332 mova m7, [pd_8192] ; rounding
334 TRANSPOSE4x4W 0, 1, 2, 3, 4
336 pxor m4, m4 ; used for the block reset, and VP9_STORE_2X
348 ;-------------------------------------------------------------------------------------------
349 ; void vp9_iadst_iadst_4x4_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
350 ;-------------------------------------------------------------------------------------------
352 %macro VP9_IADST4_1D 0
360 paddw xmm6, xmm3, xmm0
365 pmaddwd xmm1, xmm0, [pw_5283_13377]
366 pmaddwd xmm4, xmm0, [pw_9929_13377]
367 pmaddwd xmm0, [pw_15212_m13377]
368 pmaddwd xmm3, xmm2, [pw_15212_9929]
369 pmaddwd xmm2, [pw_m5283_m15212]
373 pmaddwd xmm6, [pw_13377_m13377]
378 %if notcpuflag(ssse3)
388 pmulhrsw m3, [pw_13377x2] ; out2
395 %if notcpuflag(ssse3)
398 movdq2q m0, xmm0 ; out3
399 movdq2q m1, xmm1 ; out0
400 movdq2q m2, xmm4 ; out1
401 %if notcpuflag(ssse3)
402 movdq2q m3, xmm6 ; out2
409 cglobal vp9_%1_%3_4x4_add, 3, 3, 6 + notcpuflag(ssse3), dst, stride, block, eob
410 movdqa xmm5, [pd_8192]
416 mova m6, [pw_11585x2]
418 %ifnidn %1%3, iadstiadst
422 TRANSPOSE4x4W 0, 1, 2, 3, 4
424 pxor m4, m4 ; used for the block reset, and VP9_STORE_2X
433 IADST4_FN idct, IDCT4, iadst, IADST4, sse2
434 IADST4_FN iadst, IADST4, idct, IDCT4, sse2
435 IADST4_FN iadst, IADST4, iadst, IADST4, sse2
437 IADST4_FN idct, IDCT4, iadst, IADST4, ssse3
438 IADST4_FN iadst, IADST4, idct, IDCT4, ssse3
439 IADST4_FN iadst, IADST4, iadst, IADST4, ssse3
457 ;-------------------------------------------------------------------------------------------
458 ; void vp9_idct_idct_8x8_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
459 ;-------------------------------------------------------------------------------------------
461 %macro VP9_IDCT8_1D_FINALIZE 0
462 SUMSUB_BA w, 3, 6, 5 ; m3=t0+t7, m6=t0-t7
463 SUMSUB_BA w, 1, 2, 5 ; m1=t1+t6, m2=t1-t6
464 SUMSUB_BA w, 7, 0, 5 ; m7=t2+t5, m0=t2-t5
466 UNSCRATCH 5, 8, blockq+ 0
467 SCRATCH 2, 8, blockq+ 0
469 SUMSUB_BA w, 5, 4, 2 ; m5=t3+t4, m4=t3-t4
479 ; - in: m0/m4 is in mem
480 ; - out: m6 is in mem
482 ; - everything is in registers (m0-7)
483 %macro VP9_IDCT8_1D 0
489 VP9_UNPACK_MULSUB_2W_4X 5, 3, 9102, 13623, D_8192_REG, 0, 4 ; m5=t5a, m3=t6a
490 VP9_UNPACK_MULSUB_2W_4X 1, 7, 16069, 3196, D_8192_REG, 0, 4 ; m1=t4a, m7=t7a
491 SUMSUB_BA w, 5, 1, 0 ; m5=t4a+t5a (t4), m1=t4a-t5a (t5a)
492 SUMSUB_BA w, 3, 7, 0 ; m3=t7a+t6a (t7), m7=t7a-t6a (t6a)
494 SUMSUB_BA w, 1, 7, 0 ; m1=t6a+t5a (t6), m7=t6a-t5a (t5)
495 pmulhrsw m1, W_11585x2_REG ; m1=t6
496 pmulhrsw m7, W_11585x2_REG ; m7=t5
498 VP9_UNPACK_MULSUB_2W_4X 7, 1, 11585, 11585, D_8192_REG, 0, 4
500 VP9_UNPACK_MULSUB_2W_4X 2, 6, 15137, 6270, D_8192_REG, 0, 4 ; m2=t2a, m6=t3a
502 UNSCRATCH 0, 8, blockq+ 0 ; IN(0)
503 UNSCRATCH 4, 9, blockq+64 ; IN(4)
504 SCRATCH 5, 8, blockq+ 0
507 SUMSUB_BA w, 4, 0, 5 ; m4=IN(0)+IN(4) m0=IN(0)-IN(4)
508 pmulhrsw m4, W_11585x2_REG ; m4=t0a
509 pmulhrsw m0, W_11585x2_REG ; m0=t1a
511 SCRATCH 7, 9, blockq+64
512 VP9_UNPACK_MULSUB_2W_4X 0, 4, 11585, 11585, D_8192_REG, 5, 7
513 UNSCRATCH 7, 9, blockq+64
515 SUMSUB_BA w, 6, 4, 5 ; m6=t0a+t3a (t0), m4=t0a-t3a (t3)
516 SUMSUB_BA w, 2, 0, 5 ; m2=t1a+t2a (t1), m0=t1a-t2a (t2)
518 VP9_IDCT8_1D_FINALIZE
521 %macro VP9_IDCT8_4x4_1D 0
522 pmulhrsw m0, W_11585x2_REG ; m0=t1a/t0a
523 pmulhrsw m6, m2, [pw_15137x2] ; m6=t3a
524 pmulhrsw m2, [pw_6270x2] ; m2=t2a
525 pmulhrsw m7, m1, [pw_16069x2] ; m7=t7a
526 pmulhrsw m1, [pw_3196x2] ; m1=t4a
527 pmulhrsw m5, m3, [pw_9102x2] ; m5=-t5a
528 pmulhrsw m3, [pw_13623x2] ; m3=t6a
529 SUMSUB_BA w, 5, 1, 4 ; m1=t4a+t5a (t4), m5=t4a-t5a (t5a)
531 SUMSUB_BA w, 3, 7, 4 ; m3=t7a+t6a (t7), m7=t7a-t6a (t6a)
532 SUMSUB_BA w, 1, 7, 4 ; m1=t6a+t5a (t6), m7=t6a-t5a (t5)
533 pmulhrsw m1, W_11585x2_REG ; m1=t6
534 pmulhrsw m7, W_11585x2_REG ; m7=t5
535 psubw m4, m0, m6 ; m4=t0a-t3a (t3)
536 paddw m6, m0 ; m6=t0a+t3a (t0)
537 SCRATCH 5, 8, blockq+ 0
538 SUMSUB_BA w, 2, 0, 5 ; m2=t1a+t2a (t1), m0=t1a-t2a (t2)
539 VP9_IDCT8_1D_FINALIZE
542 %macro VP9_IDCT8_2x2_1D 1
543 pmulhrsw m0, W_11585x2_REG ; m0=t0
544 pmulhrsw m3, m1, W_16069x2_REG ; m3=t7
545 pmulhrsw m1, W_3196x2_REG ; m1=t4
546 psubw m7, m3, m1 ; t5 = t7a - t4a
547 paddw m5, m3, m1 ; t6 = t7a + t4a
548 pmulhrsw m7, W_11585x2_REG ; m7=t5
549 pmulhrsw m5, W_11585x2_REG ; m5=t6
551 ; merged VP9_IDCT8_1D_FINALIZE to make register-sharing w/ avx easier
552 psubw m6, m0, m3 ; m6=t0-t7
553 paddw m3, m0 ; m3=t0+t7
554 psubw m2, m0, m1 ; m2=t1-t6
555 paddw m1, m0 ; m1=t1+t6
558 %define SCRATCH_REG 1
561 %define SCRATCH_REG 2
563 %define SCRATCH_REG 8
565 psubw m4, m0, m5 ; m4=t3-t4
566 paddw m5, m0 ; m5=t3+t4
567 SUMSUB_BA w, 7, 0, SCRATCH_REG ; m7=t2+t5, m0=t2-t5
573 %macro VP9_IDCT8_WRITEx2 6-8 5 ; line1, line2, tmp1, tmp2, zero, pw_1024/pw_16, shift
575 pmulhrsw m%1, %6 ; (x*1024 + (1<<14))>>15 <=> (x+16)>>5
584 VP9_STORE_2X %1, %2, %3, %4, %5
586 VP9_STORE_2X %1, %2, %3, %4, %5, %8
593 ; - m8 holds m6 (SWAP)
595 %macro VP9_IDCT8_WRITEOUT 0
605 %define ROUND_REG [pw_1024]
607 %define ROUND_REG [pw_16]
610 SCRATCH 5, 10, blockq+16
611 SCRATCH 7, 11, blockq+32
612 VP9_IDCT8_WRITEx2 0, 1, 5, 7, 6, ROUND_REG
613 lea dstq, [dstq+2*strideq]
614 VP9_IDCT8_WRITEx2 2, 3, 5, 7, 6, ROUND_REG
615 lea dstq, [dstq+2*strideq]
616 UNSCRATCH 5, 10, blockq+16
617 UNSCRATCH 7, 11, blockq+32
618 VP9_IDCT8_WRITEx2 4, 5, 0, 1, 6, ROUND_REG
619 lea dstq, [dstq+2*strideq]
620 UNSCRATCH 5, 8, blockq+ 0
621 VP9_IDCT8_WRITEx2 5, 7, 0, 1, 6, ROUND_REG
626 %macro VP9_IDCT_IDCT_8x8_ADD_XMM 2
628 cglobal vp9_idct_idct_8x8_add, 4, 4, %2, dst, stride, block, eob
632 mova m12, [pw_11585x2] ; often used
633 %define W_11585x2_REG m12
635 %define W_11585x2_REG [pw_11585x2]
638 cmp eobd, 12 ; top left half or less
641 cmp eobd, 3 ; top left corner or less
644 cmp eobd, 1 ; faster path for when only DC is set
645 jne .idcttopleftcorner
653 pmulhrsw m0, W_11585x2_REG
654 pmulhrsw m0, W_11585x2_REG
656 DEFINE_ARGS dst, stride, block, coef
657 movsx coefd, word [blockq]
662 add coefd, (16 << 14) + 8192
670 pmulhrsw m0, [pw_1024] ; (x*1024 + (1<<14))>>15 <=> (x+16)>>5
673 VP9_STORE_2X 0, 0, 6, 7, 4
674 lea dstq, [dstq+2*strideq]
676 VP9_STORE_2X 0, 0, 6, 7, 4
680 ; faster path for when only left corner is set (3 input: DC, right to DC, below
681 ; to DC). Note: also working with a 2x2 block
686 mova m10, [pw_3196x2]
687 mova m11, [pw_16069x2]
688 %define W_3196x2_REG m10
689 %define W_16069x2_REG m11
691 %define W_3196x2_REG [pw_3196x2]
692 %define W_16069x2_REG [pw_16069x2]
695 ; partial 2x8 transpose
696 ; punpcklwd m0, m1 already done inside idct
702 SBUTTERFLY qdq, 0, 4, 1
708 pxor m6, m6 ; used for the block reset, and VP9_STORE_2X
721 movh m0, [blockq + 0]
722 movh m1, [blockq +16]
723 movh m2, [blockq +32]
724 movh m3, [blockq +48]
726 ; partial 4x8 transpose
734 SBUTTERFLY dq, 0, 2, 1
735 SBUTTERFLY dq, 4, 6, 5
736 SBUTTERFLY qdq, 0, 4, 1
737 SBUTTERFLY qdq, 2, 6, 5
759 .idctfull: ; generic full 8x8 idct/idct
761 mova m0, [blockq+ 0] ; IN(0)
763 mova m1, [blockq+ 16] ; IN(1)
764 mova m2, [blockq+ 32] ; IN(2)
765 mova m3, [blockq+ 48] ; IN(3)
767 mova m4, [blockq+ 64] ; IN(4)
769 mova m5, [blockq+ 80] ; IN(5)
770 mova m6, [blockq+ 96] ; IN(6)
771 mova m7, [blockq+112] ; IN(7)
773 mova m11, [pd_8192] ; rounding
774 %define D_8192_REG m11
776 %define D_8192_REG [pd_8192]
780 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 8
782 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [blockq+0], [blockq+64], 1
790 pxor m6, m6 ; used for the block reset, and VP9_STORE_2X
792 ZERO_BLOCK blockq, 16, 8, m6
797 VP9_IDCT_IDCT_8x8_ADD_XMM sse2, 12
798 VP9_IDCT_IDCT_8x8_ADD_XMM ssse3, 13
799 VP9_IDCT_IDCT_8x8_ADD_XMM avx, 13
801 ;---------------------------------------------------------------------------------------------
802 ; void vp9_iadst_iadst_8x8_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
803 ;---------------------------------------------------------------------------------------------
806 ; - in: m0/3/4/7 are in mem [blockq+N*16]
807 ; - out: m6 is in mem [blockq+0]
809 ; - everything is in registers
810 %macro VP9_IADST8_1D 0 ; input/output=m0/1/2/3/4/5/6/7
818 VP9_UNPACK_MULSUB_2D_4X 5, 2, 0, 3, 14449, 7723 ; m5/2=t3[d], m2/4=t2[d]
819 VP9_UNPACK_MULSUB_2D_4X 1, 6, 4, 7, 4756, 15679 ; m1/4=t7[d], m6/7=t6[d]
820 SCRATCH 4, 12, blockq+1*16
821 VP9_RND_SH_SUMSUB_BA 6, 2, 7, 3, 4, D_8192_REG ; m6=t2[w], m2=t6[w]
822 UNSCRATCH 4, 12, blockq+1*16
823 VP9_RND_SH_SUMSUB_BA 1, 5, 4, 0, 3, D_8192_REG ; m1=t3[w], m5=t7[w]
825 UNSCRATCH 0, 8, blockq+16*0
826 UNSCRATCH 3, 9, blockq+16*3
827 UNSCRATCH 4, 10, blockq+16*4
828 UNSCRATCH 7, 11, blockq+16*7
829 SCRATCH 1, 8, blockq+16*1
830 SCRATCH 2, 9, blockq+16*2
831 SCRATCH 5, 10, blockq+16*5
832 SCRATCH 6, 11, blockq+16*6
834 VP9_UNPACK_MULSUB_2D_4X 7, 0, 1, 2, 16305, 1606 ; m7/1=t1[d], m0/2=t0[d]
835 VP9_UNPACK_MULSUB_2D_4X 3, 4, 5, 6, 10394, 12665 ; m3/5=t5[d], m4/6=t4[d]
836 SCRATCH 1, 12, blockq+ 0*16
837 VP9_RND_SH_SUMSUB_BA 4, 0, 6, 2, 1, D_8192_REG ; m4=t0[w], m0=t4[w]
838 UNSCRATCH 1, 12, blockq+ 0*16
839 VP9_RND_SH_SUMSUB_BA 3, 7, 5, 1, 2, D_8192_REG ; m3=t1[w], m7=t5[w]
841 UNSCRATCH 2, 9, blockq+16*2
842 UNSCRATCH 5, 10, blockq+16*5
843 SCRATCH 3, 9, blockq+16*3
844 SCRATCH 4, 10, blockq+16*4
846 ; m4=t0, m3=t1, m6=t2, m1=t3, m0=t4, m7=t5, m2=t6, m5=t7
848 VP9_UNPACK_MULSUB_2D_4X 0, 7, 1, 3, 15137, 6270 ; m0/1=t5[d], m7/3=t4[d]
849 VP9_UNPACK_MULSUB_2D_4X 5, 2, 4, 6, 6270, 15137 ; m5/4=t6[d], m2/6=t7[d]
850 SCRATCH 1, 12, blockq+ 0*16
851 VP9_RND_SH_SUMSUB_BA 5, 7, 4, 3, 1, D_8192_REG
852 UNSCRATCH 1, 12, blockq+ 0*16
853 PSIGNW m5, W_M1_REG ; m5=out1[w], m7=t6[w]
854 VP9_RND_SH_SUMSUB_BA 2, 0, 6, 1, 3, D_8192_REG ; m2=out6[w], m0=t7[w]
856 UNSCRATCH 1, 8, blockq+16*1
857 UNSCRATCH 3, 9, blockq+16*3
858 UNSCRATCH 4, 10, blockq+16*4
859 UNSCRATCH 6, 11, blockq+16*6
860 SCRATCH 2, 8, blockq+16*0
862 SUMSUB_BA w, 6, 4, 2 ; m6=out0[w], m4=t2[w]
864 PSIGNW m1, W_M1_REG ; m1=out7[w], m3=t3[w]
866 ; m6=out0, m5=out1, m4=t2, m3=t3, m7=t6, m0=t7, m2=out6, m1=out7
871 pmulhrsw m3, W_11585x2_REG
872 pmulhrsw m7, W_11585x2_REG
873 pmulhrsw m4, W_11585x2_REG ; out4
874 pmulhrsw m0, W_11585x2_REG ; out2
876 SCRATCH 5, 9, blockq+16*1
877 VP9_UNPACK_MULSUB_2W_4X 4, 3, 11585, 11585, D_8192_REG, 2, 5
878 VP9_UNPACK_MULSUB_2W_4X 7, 0, 11585, 11585, D_8192_REG, 2, 5
879 UNSCRATCH 5, 9, blockq+16*1
881 PSIGNW m3, W_M1_REG ; out3
882 PSIGNW m7, W_M1_REG ; out5
884 ; m6=out0, m5=out1, m0=out2, m3=out3, m4=out4, m7=out5, m2=out6, m1=out7
895 cglobal vp9_%1_%3_8x8_add, 3, 3, %6, dst, stride, block, eob
898 %define first_is_idct 1
900 %define first_is_idct 0
904 %define second_is_idct 1
906 %define second_is_idct 0
910 mova m0, [blockq+ 0] ; IN(0)
912 mova m1, [blockq+ 16] ; IN(1)
913 mova m2, [blockq+ 32] ; IN(2)
914 %if ARCH_X86_64 || first_is_idct
915 mova m3, [blockq+ 48] ; IN(3)
918 mova m4, [blockq+ 64] ; IN(4)
920 mova m5, [blockq+ 80] ; IN(5)
921 mova m6, [blockq+ 96] ; IN(6)
922 %if ARCH_X86_64 || first_is_idct
923 mova m7, [blockq+112] ; IN(7)
927 mova m15, [pw_11585x2] ; often used
929 mova m13, [pd_8192] ; rounding
931 %define W_11585x2_REG m15
932 %define D_8192_REG m13
935 %define W_11585x2_REG [pw_11585x2]
936 %define D_8192_REG [pd_8192]
937 %define W_M1_REG [pw_m1]
940 ; note different calling conventions for idct8 vs. iadst8 on x86-32
943 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 8
945 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [blockq+0], [blockq+64], 1
947 %if second_is_idct == 0
948 mova [blockq+ 48], m3
949 mova [blockq+112], m7
957 pxor m6, m6 ; used for the block reset, and VP9_STORE_2X
959 ZERO_BLOCK blockq, 16, 8, m6
964 %undef second_is_idct
968 %define PSIGNW PSIGNW_MMX
969 IADST8_FN idct, IDCT8, iadst, IADST8, sse2, 15
970 IADST8_FN iadst, IADST8, idct, IDCT8, sse2, 15
971 IADST8_FN iadst, IADST8, iadst, IADST8, sse2, 15
972 %define PSIGNW PSIGNW_SSSE3
973 IADST8_FN idct, IDCT8, iadst, IADST8, ssse3, 16
974 IADST8_FN idct, IDCT8, iadst, IADST8, avx, 16
975 IADST8_FN iadst, IADST8, idct, IDCT8, ssse3, 16
976 IADST8_FN iadst, IADST8, idct, IDCT8, avx, 16
977 IADST8_FN iadst, IADST8, iadst, IADST8, ssse3, 16
978 IADST8_FN iadst, IADST8, iadst, IADST8, avx, 16
981 ;---------------------------------------------------------------------------------------------
982 ; void vp9_idct_idct_16x16_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
983 ;---------------------------------------------------------------------------------------------
986 ; at the end of this macro, m7 is stored in [%4+15*%5]
987 ; everything else (t0-6 and t8-15) is stored in m0-6 and m8-15
988 ; the following sumsubs have not been done yet:
989 ; SUMSUB_BA w, 6, 9, 15 ; t6, t9
990 ; SUMSUB_BA w, 7, 8, 15 ; t7, t8
991 ; or (x86-32) t0-t5 are in m0-m5, t10-t15 are in x11/9/7/5/3/1,
992 ; and the following simsubs have not been done yet:
993 ; SUMSUB_BA w, x13, x14, 7 ; t6, t9
994 ; SUMSUB_BA w, x15, x12, 7 ; t7, t8
996 %macro VP9_IDCT16_1D_START 5 ; src, nnzc, stride, scratch, scratch_stride
998 mova m3, [%1+ 1*%3] ; IN(1)
999 mova m0, [%1+ 3*%3] ; IN(3)
1001 pmulhrsw m4, m3, [pw_16305x2] ; t14-15
1002 pmulhrsw m3, [pw_1606x2] ; t8-9
1003 pmulhrsw m7, m0, [pw_m4756x2] ; t10-11
1004 pmulhrsw m0, [pw_15679x2] ; t12-13
1006 ; m8=t0, m9=t1, m10=t2, m11=t3, m12=t4, m14=t5, m13=t6, m15=t7
1007 ; m3=t8, m5=t9, m1=t10, m7=t11, m0=t12, m6=t13, m2=t14, m4=t15
1009 VP9_UNPACK_MULSUB_2W_4X 2, 5, 4, 3, 15137, 6270, [pd_8192], 1, 6 ; t9, t14
1010 SCRATCH 4, 10, %4+ 1*%5
1011 SCRATCH 5, 11, %4+ 7*%5
1012 VP9_UNPACK_MULSUB_2W_4X 6, 1, 0, 7, 6270, m15137, [pd_8192], 4, 5 ; t10, t13
1013 UNSCRATCH 5, 11, %4+ 7*%5
1015 ; m15=t0, m14=t1, m13=t2, m12=t3, m11=t4, m10=t5, m9=t6, m8=t7
1016 ; m7=t8, m6=t9, m2=t10, m3=t11, m4=t12, m5=t13, m1=t14, m0=t15
1018 mova m5, [%1+ 1*%3] ; IN(1)
1019 mova m4, [%1+ 7*%3] ; IN(7)
1021 pmulhrsw m2, m5, [pw_16305x2] ; t15
1022 pmulhrsw m5, [pw_1606x2] ; t8
1023 pmulhrsw m3, m4, [pw_m10394x2] ; t9
1024 pmulhrsw m4, [pw_12665x2] ; t14
1026 mova m3, [%1+ 9*%3] ; IN(9)
1027 mova m2, [%1+15*%3] ; IN(15)
1029 ; m10=in0, m5=in1, m14=in2, m6=in3, m9=in4, m7=in5, m15=in6, m4=in7
1030 ; m11=in8, m3=in9, m12=in10 m0=in11, m8=in12, m1=in13, m13=in14, m2=in15
1032 VP9_UNPACK_MULSUB_2W_4X 5, 2, 16305, 1606, [pd_8192], 0, 1 ; t8, t15
1033 VP9_UNPACK_MULSUB_2W_4X 3, 4, 10394, 12665, [pd_8192], 0, 1 ; t9, t14
1036 SUMSUB_BA w, 3, 5, 0 ; t8, t9
1037 SUMSUB_BA w, 4, 2, 0 ; t15, t14
1039 VP9_UNPACK_MULSUB_2W_4X 2, 5, 15137, 6270, [pd_8192], 0, 1 ; t9, t14
1041 SCRATCH 4, 10, %4+ 1*%5
1042 SCRATCH 5, 11, %4+ 7*%5
1044 mova m6, [%1+ 3*%3] ; IN(3)
1045 mova m7, [%1+ 5*%3] ; IN(5)
1047 pmulhrsw m0, m7, [pw_14449x2] ; t13
1048 pmulhrsw m7, [pw_7723x2] ; t10
1049 pmulhrsw m1, m6, [pw_m4756x2] ; t11
1050 pmulhrsw m6, [pw_15679x2] ; t12
1052 mova m0, [%1+11*%3] ; IN(11)
1053 mova m1, [%1+13*%3] ; IN(13)
1055 VP9_UNPACK_MULSUB_2W_4X 7, 0, 14449, 7723, [pd_8192], 4, 5 ; t10, t13
1056 VP9_UNPACK_MULSUB_2W_4X 1, 6, 4756, 15679, [pd_8192], 4, 5 ; t11, t12
1059 ; m11=t0, m10=t1, m9=t2, m8=t3, m14=t4, m12=t5, m15=t6, m13=t7
1060 ; m5=t8, m3=t9, m7=t10, m1=t11, m6=t12, m0=t13, m4=t14, m2=t15
1062 SUMSUB_BA w, 7, 1, 4 ; t11, t10
1063 SUMSUB_BA w, 0, 6, 4 ; t12, t13
1065 ; m8=t0, m9=t1, m10=t2, m11=t3, m12=t4, m14=t5, m13=t6, m15=t7
1066 ; m3=t8, m5=t9, m1=t10, m7=t11, m0=t12, m6=t13, m2=t14, m4=t15
1068 VP9_UNPACK_MULSUB_2W_4X 6, 1, 6270, m15137, [pd_8192], 4, 5 ; t10, t13
1070 UNSCRATCH 5, 11, %4+ 7*%5
1073 ; m8=t0, m9=t1, m10=t2, m11=t3, m12=t4, m13=t5, m14=t6, m15=t7
1074 ; m3=t8, m2=t9, m6=t10, m7=t11, m0=t12, m1=t13, m5=t14, m4=t15
1076 SUMSUB_BA w, 7, 3, 4 ; t8, t11
1078 ; backup first register
1081 SUMSUB_BA w, 6, 2, 7 ; t9, t10
1082 UNSCRATCH 4, 10, %4+ 1*%5
1083 SUMSUB_BA w, 0, 4, 7 ; t15, t12
1084 SUMSUB_BA w, 1, 5, 7 ; t14. t13
1086 ; m15=t0, m14=t1, m13=t2, m12=t3, m11=t4, m10=t5, m9=t6, m8=t7
1087 ; m7=t8, m6=t9, m2=t10, m3=t11, m4=t12, m5=t13, m1=t14, m0=t15
1090 SUMSUB_BA w, 2, 5, 7
1091 SUMSUB_BA w, 3, 4, 7
1092 pmulhrsw m5, [pw_11585x2] ; t10
1093 pmulhrsw m4, [pw_11585x2] ; t11
1094 pmulhrsw m3, [pw_11585x2] ; t12
1095 pmulhrsw m2, [pw_11585x2] ; t13
1097 SCRATCH 6, 10, %4+ 1*%5
1098 VP9_UNPACK_MULSUB_2W_4X 5, 2, 11585, 11585, [pd_8192], 6, 7 ; t10, t13
1099 VP9_UNPACK_MULSUB_2W_4X 4, 3, 11585, 11585, [pd_8192], 6, 7 ; t11, t12
1100 UNSCRATCH 6, 10, %4+ 1*%5
1103 ; m15=t0, m14=t1, m13=t2, m12=t3, m11=t4, m10=t5, m9=t6, m8=t7
1104 ; m7=t8, m6=t9, m5=t10, m4=t11, m3=t12, m2=t13, m1=t14, m0=t15
1106 SCRATCH 0, 8, %4+ 1*%5
1107 SCRATCH 1, 9, %4+ 3*%5
1108 SCRATCH 2, 10, %4+ 5*%5
1109 SCRATCH 3, 11, %4+ 7*%5
1110 SCRATCH 4, 12, %4+ 9*%5
1111 SCRATCH 5, 13, %4+11*%5
1112 SCRATCH 6, 14, %4+13*%5
1116 mova m3, [%1+ 0*%3] ; IN(0)
1117 mova m4, [%1+ 2*%3] ; IN(2)
1119 pmulhrsw m3, [pw_11585x2] ; t0-t3
1120 pmulhrsw m7, m4, [pw_16069x2] ; t6-7
1121 pmulhrsw m4, [pw_3196x2] ; t4-5
1125 pmulhrsw m5, [pw_11585x2] ; t5
1126 pmulhrsw m6, [pw_11585x2] ; t6
1138 SCRATCH 7, 15, %4+12*%5
1140 mova m6, [%1+ 2*%3] ; IN(2)
1141 mova m1, [%1+ 4*%3] ; IN(4)
1142 mova m7, [%1+ 6*%3] ; IN(6)
1144 pmulhrsw m0, m1, [pw_15137x2] ; t3
1145 pmulhrsw m1, [pw_6270x2] ; t2
1146 pmulhrsw m5, m6, [pw_16069x2] ; t7
1147 pmulhrsw m6, [pw_3196x2] ; t4
1148 pmulhrsw m4, m7, [pw_m9102x2] ; t5
1149 pmulhrsw m7, [pw_13623x2] ; t6
1151 mova m4, [%1+10*%3] ; IN(10)
1152 mova m0, [%1+12*%3] ; IN(12)
1153 mova m5, [%1+14*%3] ; IN(14)
1155 VP9_UNPACK_MULSUB_2W_4X 1, 0, 15137, 6270, [pd_8192], 2, 3 ; t2, t3
1156 VP9_UNPACK_MULSUB_2W_4X 6, 5, 16069, 3196, [pd_8192], 2, 3 ; t4, t7
1157 VP9_UNPACK_MULSUB_2W_4X 4, 7, 9102, 13623, [pd_8192], 2, 3 ; t5, t6
1160 SUMSUB_BA w, 4, 6, 2 ; t4, t5
1161 SUMSUB_BA w, 7, 5, 2 ; t7, t6
1164 SUMSUB_BA w, 6, 5, 2
1165 pmulhrsw m5, [pw_11585x2] ; t5
1166 pmulhrsw m6, [pw_11585x2] ; t6
1168 VP9_UNPACK_MULSUB_2W_4X 5, 6, 11585, 11585, [pd_8192], 2, 3 ; t5, t6
1171 SCRATCH 5, 15, %4+10*%5
1172 mova m2, [%1+ 0*%3] ; IN(0)
1174 pmulhrsw m2, [pw_11585x2] ; t0 and t1
1178 SUMSUB_BA w, 7, 0, 5 ; t0, t7
1180 mova m3, [%1+ 8*%3] ; IN(8)
1182 ; from 3 stages back
1184 SUMSUB_BA w, 3, 2, 5
1185 pmulhrsw m3, [pw_11585x2] ; t0
1186 pmulhrsw m2, [pw_11585x2] ; t1
1189 VP9_UNPACK_MULSUB_2W_4X 2, 3, 11585, 11585, [pd_8192], 5, 0 ; t0, t1
1193 ; from 2 stages back
1194 SUMSUB_BA w, 0, 3, 5 ; t0, t3
1196 SUMSUB_BA w, 7, 0, 5 ; t0, t7
1198 UNSCRATCH 5, 15, %4+10*%5
1202 SCRATCH 7, 15, %4+12*%5
1203 SUMSUB_BA w, 1, 2, 7 ; t1, t2
1206 SUMSUB_BA w, 6, 1, 7 ; t1, t6
1207 SUMSUB_BA w, 5, 2, 7 ; t2, t5
1209 SUMSUB_BA w, 4, 3, 7 ; t3, t4
1220 SUMSUB_BA w, 0, 15, 7 ; t0, t15
1221 SUMSUB_BA w, 1, 14, 7 ; t1, t14
1222 SUMSUB_BA w, 2, 13, 7 ; t2, t13
1223 SUMSUB_BA w, 3, 12, 7 ; t3, t12
1224 SUMSUB_BA w, 4, 11, 7 ; t4, t11
1225 SUMSUB_BA w, 5, 10, 7 ; t5, t10
1232 %macro %%SUMSUB_BA_STORE 5 ; reg, from_mem, to_mem, scratch, scratch_stride
1234 SUMSUB_BA w, 6, %1, 7
1239 %%SUMSUB_BA_STORE 0, 1, 1, %4, %5 ; t0, t15
1240 %%SUMSUB_BA_STORE 1, 3, 3, %4, %5 ; t1, t14
1241 %%SUMSUB_BA_STORE 2, 5, 5, %4, %5 ; t2, t13
1242 %%SUMSUB_BA_STORE 3, 7, 7, %4, %5 ; t3, t12
1243 %%SUMSUB_BA_STORE 4, 9, 9, %4, %5 ; t4, t11
1244 %%SUMSUB_BA_STORE 5, 11, 11, %4, %5 ; t5, t10
1248 %macro VP9_IDCT16_1D 2-3 16 ; src, pass, nnzc
1250 VP9_IDCT16_1D_START %1, %3, 32, tmpq, 16
1253 ; backup a different register
1254 mova m7, [tmpq+15*16]
1255 mova [tmpq+ 1*16], m15
1257 SUMSUB_BA w, 6, 9, 15 ; t6, t9
1258 SUMSUB_BA w, 7, 8, 15 ; t7, t8
1260 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 15
1270 mova m15, [tmpq+ 1*16]
1271 TRANSPOSE8x8W 8, 9, 10, 11, 12, 13, 14, 15, 0
1274 mova [tmpq+ 80], m10
1275 mova [tmpq+112], m11
1276 mova [tmpq+144], m12
1277 mova [tmpq+176], m13
1278 mova [tmpq+208], m14
1279 mova [tmpq+240], m15
1281 mova m6, [tmpq+13*16]
1282 mova m7, [tmpq+14*16]
1283 SUMSUB_BA w, 6, 7 ; t6, t9
1284 mova [tmpq+14*16], m6
1285 mova [tmpq+13*16], m7
1286 mova m7, [tmpq+15*16]
1287 mova m6, [tmpq+12*16]
1288 SUMSUB_BA w, 7, 6 ; t7, t8
1289 mova [tmpq+15*16], m6
1291 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [tmpq+14*16], [tmpq+ 8*16], 1
1292 mova [tmpq+ 0*16], m0
1293 mova [tmpq+ 2*16], m1
1294 mova [tmpq+ 4*16], m2
1295 mova [tmpq+ 6*16], m3
1296 mova [tmpq+10*16], m5
1297 mova [tmpq+12*16], m6
1298 mova [tmpq+14*16], m7
1300 mova m0, [tmpq+15*16]
1301 mova m1, [tmpq+13*16]
1302 mova m2, [tmpq+11*16]
1303 mova m3, [tmpq+ 9*16]
1304 mova m4, [tmpq+ 7*16]
1305 mova m5, [tmpq+ 5*16]
1306 mova m7, [tmpq+ 1*16]
1307 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [tmpq+ 3*16], [tmpq+ 9*16], 1
1308 mova [tmpq+ 1*16], m0
1309 mova [tmpq+ 3*16], m1
1310 mova [tmpq+ 5*16], m2
1311 mova [tmpq+ 7*16], m3
1312 mova [tmpq+11*16], m5
1313 mova [tmpq+13*16], m6
1314 mova [tmpq+15*16], m7
1317 VP9_IDCT16_1D_START %1, %3, 32, %1, 32
1320 %define ROUND_REG [pw_512]
1322 %define ROUND_REG [pw_32]
1327 ; backup more registers
1331 VP9_IDCT8_WRITEx2 0, 1, 8, 9, 7, ROUND_REG, 6
1332 lea dstq, [dstq+strideq*2]
1333 VP9_IDCT8_WRITEx2 2, 3, 8, 9, 7, ROUND_REG, 6
1334 lea dstq, [dstq+strideq*2]
1335 VP9_IDCT8_WRITEx2 4, 5, 8, 9, 7, ROUND_REG, 6
1336 lea dstq, [dstq+strideq*2]
1338 ; restore from cache
1339 SWAP 0, 7 ; move zero from m7 to m0
1344 SUMSUB_BA w, 6, 9, 3 ; t6, t9
1345 SUMSUB_BA w, 7, 8, 3 ; t7, t8
1347 VP9_IDCT8_WRITEx2 6, 7, 3, 4, 0, ROUND_REG, 6
1348 lea dstq, [dstq+strideq*2]
1349 VP9_IDCT8_WRITEx2 8, 9, 3, 4, 0, ROUND_REG, 6
1350 lea dstq, [dstq+strideq*2]
1351 VP9_IDCT8_WRITEx2 10, 11, 1, 2, 0, ROUND_REG, 6
1352 lea dstq, [dstq+strideq*2]
1353 VP9_IDCT8_WRITEx2 12, 13, 1, 2, 0, ROUND_REG, 6
1354 lea dstq, [dstq+strideq*2]
1355 VP9_IDCT8_WRITEx2 14, 15, 1, 2, 0, ROUND_REG, 6
1357 mova [tmpq+ 0*32], m5
1359 VP9_IDCT8_WRITEx2 0, 1, 5, 6, 7, ROUND_REG, 6
1360 lea dstq, [dstq+strideq*2]
1361 VP9_IDCT8_WRITEx2 2, 3, 5, 6, 7, ROUND_REG, 6
1362 lea dstq, [dstq+strideq*2]
1364 SWAP 0, 7 ; move zero from m7 to m0
1365 mova m5, [tmpq+ 0*32]
1367 VP9_IDCT8_WRITEx2 4, 5, 1, 2, 0, ROUND_REG, 6
1368 lea dstq, [dstq+strideq*2]
1370 mova m4, [tmpq+13*32]
1371 mova m7, [tmpq+14*32]
1372 mova m5, [tmpq+15*32]
1373 mova m6, [tmpq+12*32]
1374 SUMSUB_BADC w, 4, 7, 5, 6, 1
1376 VP9_IDCT8_WRITEx2 4, 5, 1, 2, 0, ROUND_REG, 6
1377 lea dstq, [dstq+strideq*2]
1378 VP9_IDCT8_WRITEx2 6, 7, 1, 2, 0, ROUND_REG, 6
1379 lea dstq, [dstq+strideq*2]
1381 mova m4, [tmpq+11*32]
1382 mova m5, [tmpq+ 9*32]
1383 mova m6, [tmpq+ 7*32]
1384 mova m7, [tmpq+ 5*32]
1386 VP9_IDCT8_WRITEx2 4, 5, 1, 2, 0, ROUND_REG, 6
1387 lea dstq, [dstq+strideq*2]
1388 VP9_IDCT8_WRITEx2 6, 7, 1, 2, 0, ROUND_REG, 6
1389 lea dstq, [dstq+strideq*2]
1391 mova m4, [tmpq+ 3*32]
1392 mova m5, [tmpq+ 1*32]
1394 VP9_IDCT8_WRITEx2 4, 5, 1, 2, 0, ROUND_REG, 6
1395 lea dstq, [dstq+strideq*2]
1402 %macro VP9_STORE_2XFULL 6-7 strideq; dc, tmp1, tmp2, tmp3, tmp4, zero, stride
1405 punpcklbw m%2, m%3, m%6
1407 punpcklbw m%4, m%5, m%6
1419 %macro VP9_IDCT_IDCT_16x16_ADD_XMM 1
1421 cglobal vp9_idct_idct_16x16_add, 4, 6, 16, 512, dst, stride, block, eob
1423 ; 2x2=eob=3, 4x4=eob=10
1426 cmp eobd, 1 ; faster path for when only DC is set
1429 cmp eobd, 1 ; faster path for when only DC is set
1436 mova m1, [pw_11585x2]
1440 DEFINE_ARGS dst, stride, block, coef
1441 movsx coefd, word [blockq]
1446 add coefd, (32 << 14) + 8192
1450 SPLATW m0, m0, q0000
1452 pmulhrsw m0, [pw_512]
1457 VP9_STORE_2XFULL 0, 1, 2, 3, 4, 5
1458 lea dstq, [dstq+2*strideq]
1460 VP9_STORE_2XFULL 0, 1, 2, 3, 4, 5
1463 DEFINE_ARGS dst, stride, block, cnt, dst_bak, tmp
1467 VP9_IDCT16_1D blockq, 1, 8
1472 VP9_IDCT16_1D tmpq, 2, 8
1473 lea dstq, [dst_bakq+8]
1478 ; at the end of the loop, m0 should still be zero
1479 ; use that to zero out block coefficients
1480 ZERO_BLOCK blockq, 32, 8, m0
1488 VP9_IDCT16_1D blockq, 1
1499 VP9_IDCT16_1D tmpq, 2
1500 lea dstq, [dst_bakq+8]
1505 ; at the end of the loop, m0 should still be zero
1506 ; use that to zero out block coefficients
1507 ZERO_BLOCK blockq, 32, 16, m0
1511 VP9_IDCT_IDCT_16x16_ADD_XMM sse2
1512 VP9_IDCT_IDCT_16x16_ADD_XMM ssse3
1513 VP9_IDCT_IDCT_16x16_ADD_XMM avx
1515 ;---------------------------------------------------------------------------------------------
1516 ; void vp9_iadst_iadst_16x16_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
1517 ;---------------------------------------------------------------------------------------------
1519 %macro VP9_IADST16_1D 2 ; src, pass
1521 mova m0, [%1+ 0*32] ; in0
1522 mova m1, [%1+15*32] ; in15
1523 mova m2, [%1+ 7*32] ; in7
1524 mova m3, [%1+ 8*32] ; in8
1526 VP9_UNPACK_MULSUB_2D_4X 1, 0, 4, 5, 16364, 804 ; m1/4=t1[d], m0/5=t0[d]
1527 VP9_UNPACK_MULSUB_2D_4X 2, 3, 7, 6, 11003, 12140 ; m2/7=t9[d], m3/6=t8[d]
1528 SCRATCH 4, 8, tmpq+ 0*%%str
1529 VP9_RND_SH_SUMSUB_BA 3, 0, 6, 5, 4, [pd_8192] ; m3=t0[w], m0=t8[w]
1530 UNSCRATCH 4, 8, tmpq+ 0*%%str
1531 VP9_RND_SH_SUMSUB_BA 2, 1, 7, 4, 5, [pd_8192] ; m2=t1[w], m1=t9[w]
1533 SCRATCH 0, 10, tmpq+ 0*%%str
1534 SCRATCH 1, 11, tmpq+15*%%str
1535 mova [tmpq+ 7*%%str], m2
1536 mova [tmpq+ 8*%%str], m3
1538 mova m1, [%1+ 2*32] ; in2
1539 mova m0, [%1+13*32] ; in13
1540 mova m3, [%1+ 5*32] ; in5
1541 mova m2, [%1+10*32] ; in10
1543 VP9_UNPACK_MULSUB_2D_4X 0, 1, 6, 7, 15893, 3981 ; m0/6=t3[d], m1/7=t2[d]
1544 VP9_UNPACK_MULSUB_2D_4X 3, 2, 4, 5, 8423, 14053 ; m3/4=t11[d], m2/5=t10[d]
1545 SCRATCH 4, 12, tmpq+ 2*%%str
1546 VP9_RND_SH_SUMSUB_BA 2, 1, 5, 7, 4, [pd_8192] ; m2=t2[w], m1=t10[w]
1547 UNSCRATCH 4, 12, tmpq+ 2*%%str
1548 VP9_RND_SH_SUMSUB_BA 3, 0, 4, 6, 5, [pd_8192] ; m3=t3[w], m0=t11[w]
1550 SCRATCH 0, 12, tmpq+ 2*%%str
1551 SCRATCH 1, 13, tmpq+13*%%str
1552 mova [tmpq+ 5*%%str], m2
1553 mova [tmpq+10*%%str], m3
1555 mova m2, [%1+ 4*32] ; in4
1556 mova m3, [%1+11*32] ; in11
1557 mova m0, [%1+ 3*32] ; in3
1558 mova m1, [%1+12*32] ; in12
1560 VP9_UNPACK_MULSUB_2D_4X 3, 2, 7, 6, 14811, 7005 ; m3/7=t5[d], m2/6=t4[d]
1561 VP9_UNPACK_MULSUB_2D_4X 0, 1, 4, 5, 5520, 15426 ; m0/4=t13[d], m1/5=t12[d]
1562 SCRATCH 4, 9, tmpq+ 4*%%str
1563 VP9_RND_SH_SUMSUB_BA 1, 2, 5, 6, 4, [pd_8192] ; m1=t4[w], m2=t12[w]
1564 UNSCRATCH 4, 9, tmpq+ 4*%%str
1565 VP9_RND_SH_SUMSUB_BA 0, 3, 4, 7, 6, [pd_8192] ; m0=t5[w], m3=t13[w]
1567 SCRATCH 0, 8, tmpq+ 4*%%str
1568 mova [tmpq+11*%%str], m1 ; t4:m1->r11
1569 UNSCRATCH 0, 10, tmpq+ 0*%%str
1570 UNSCRATCH 1, 11, tmpq+15*%%str
1572 ; round 2 interleaved part 1
1573 VP9_UNPACK_MULSUB_2D_4X 0, 1, 6, 7, 16069, 3196 ; m1/7=t8[d], m0/6=t9[d]
1574 VP9_UNPACK_MULSUB_2D_4X 3, 2, 5, 4, 3196, 16069 ; m3/5=t12[d], m2/4=t13[d]
1575 SCRATCH 4, 9, tmpq+ 3*%%str
1576 VP9_RND_SH_SUMSUB_BA 3, 1, 5, 7, 4, [pd_8192] ; m3=t8[w], m1=t12[w]
1577 UNSCRATCH 4, 9, tmpq+ 3*%%str
1578 VP9_RND_SH_SUMSUB_BA 2, 0, 4, 6, 5, [pd_8192] ; m2=t9[w], m0=t13[w]
1580 SCRATCH 0, 10, tmpq+ 0*%%str
1581 SCRATCH 1, 11, tmpq+15*%%str
1582 SCRATCH 2, 14, tmpq+ 3*%%str
1583 SCRATCH 3, 15, tmpq+12*%%str
1585 mova m2, [%1+ 6*32] ; in6
1586 mova m3, [%1+ 9*32] ; in9
1587 mova m0, [%1+ 1*32] ; in1
1588 mova m1, [%1+14*32] ; in14
1590 VP9_UNPACK_MULSUB_2D_4X 3, 2, 7, 6, 13160, 9760 ; m3/7=t7[d], m2/6=t6[d]
1591 VP9_UNPACK_MULSUB_2D_4X 0, 1, 4, 5, 2404, 16207 ; m0/4=t15[d], m1/5=t14[d]
1592 SCRATCH 4, 9, tmpq+ 6*%%str
1593 VP9_RND_SH_SUMSUB_BA 1, 2, 5, 6, 4, [pd_8192] ; m1=t6[w], m2=t14[w]
1594 UNSCRATCH 4, 9, tmpq+ 6*%%str
1595 VP9_RND_SH_SUMSUB_BA 0, 3, 4, 7, 6, [pd_8192] ; m0=t7[w], m3=t15[w]
1597 ; r8=t0, r7=t1, r5=t2, r10=t3, r11=t4, m8|r4=t5, m1=t6, m0=t7
1598 ; m10|r0=t8, m11|r15=t9, m13|r13=t10, m12|r2=t11, m14|r3=t12, m15|r12=t13, m2=t14, m3=t15
1600 UNSCRATCH 4, 12, tmpq+ 2*%%str
1601 UNSCRATCH 5, 13, tmpq+13*%%str
1602 SCRATCH 0, 12, tmpq+ 1*%%str
1603 SCRATCH 1, 13, tmpq+14*%%str
1605 ; remainder of round 2 (rest of t8-15)
1606 VP9_UNPACK_MULSUB_2D_4X 5, 4, 6, 7, 9102, 13623 ; m5/6=t11[d], m4/7=t10[d]
1607 VP9_UNPACK_MULSUB_2D_4X 3, 2, 1, 0, 13623, 9102 ; m3/1=t14[d], m2/0=t15[d]
1608 SCRATCH 0, 9, tmpq+ 6*%%str
1609 VP9_RND_SH_SUMSUB_BA 3, 4, 1, 7, 0, [pd_8192] ; m3=t10[w], m4=t14[w]
1610 UNSCRATCH 0, 9, tmpq+ 6*%%str
1611 VP9_RND_SH_SUMSUB_BA 2, 5, 0, 6, 1, [pd_8192] ; m2=t11[w], m5=t15[w]
1613 ; m15|r12=t8, m14|r3=t9, m3=t10, m2=t11, m11|r15=t12, m10|r0=t13, m4=t14, m5=t15
1615 UNSCRATCH 6, 14, tmpq+ 3*%%str
1616 UNSCRATCH 7, 15, tmpq+12*%%str
1618 SUMSUB_BA w, 3, 7, 1
1619 PSIGNW m3, [pw_m1] ; m3=out1[w], m7=t10[w]
1620 SUMSUB_BA w, 2, 6, 1 ; m2=out14[w], m6=t11[w]
1623 SUMSUB_BA w, 7, 6, 1
1624 pmulhrsw m7, [pw_11585x2] ; m7=out6[w]
1625 pmulhrsw m6, [pw_11585x2] ; m6=out9[w]
1627 VP9_UNPACK_MULSUB_2W_4X 6, 7, 11585, 11585, [pd_8192], 1, 0
1630 mova [tmpq+ 3*%%str], m6
1631 mova [tmpq+ 6*%%str], m7
1632 UNSCRATCH 6, 10, tmpq+ 0*%%str
1633 UNSCRATCH 7, 11, tmpq+15*%%str
1634 mova [tmpq+13*%%str], m2
1635 SCRATCH 3, 11, tmpq+ 9*%%str
1637 VP9_UNPACK_MULSUB_2D_4X 7, 6, 2, 3, 15137, 6270 ; m6/3=t13[d], m7/2=t12[d]
1638 VP9_UNPACK_MULSUB_2D_4X 5, 4, 1, 0, 6270, 15137 ; m5/1=t14[d], m4/0=t15[d]
1639 SCRATCH 0, 9, tmpq+ 2*%%str
1640 VP9_RND_SH_SUMSUB_BA 5, 6, 1, 3, 0, [pd_8192] ; m5=out2[w], m6=t14[w]
1641 UNSCRATCH 0, 9, tmpq+ 2*%%str
1642 VP9_RND_SH_SUMSUB_BA 4, 7, 0, 2, 1, [pd_8192]
1643 PSIGNW m4, [pw_m1] ; m4=out13[w], m7=t15[w]
1646 SUMSUB_BA w, 7, 6, 1
1647 pmulhrsw m7, [pw_m11585x2] ; m7=out5[w]
1648 pmulhrsw m6, [pw_11585x2] ; m6=out10[w]
1651 VP9_UNPACK_MULSUB_2W_4X 7, 6, 11585, 11585, [pd_8192], 1, 0
1654 ; m11|r13=out1, m5=out2, m7=out5, r15=out6, r3=out9, m6=out10, m4=out13, r2=out14
1656 mova m2, [tmpq+ 8*%%str]
1657 mova m3, [tmpq+ 7*%%str]
1658 mova m1, [tmpq+11*%%str]
1659 mova [tmpq+ 7*%%str], m6
1660 mova [tmpq+11*%%str], m4
1661 mova m4, [tmpq+ 5*%%str]
1662 SCRATCH 5, 14, tmpq+ 5*%%str
1663 SCRATCH 7, 15, tmpq+ 8*%%str
1664 UNSCRATCH 6, 8, tmpq+ 4*%%str
1665 UNSCRATCH 5, 12, tmpq+ 1*%%str
1666 UNSCRATCH 7, 13, tmpq+14*%%str
1668 ; m2=t0, m3=t1, m9=t2, m0=t3, m1=t4, m8=t5, m13=t6, m12=t7
1669 ; m11|r13=out1, m5=out2, m7=out5, r15=out6, r3=out9, r10=out10, r11=out13, r2=out14
1671 SUMSUB_BA w, 1, 2, 0 ; m1=t0[w], m2=t4[w]
1672 mova m0, [tmpq+10*%%str]
1673 SCRATCH 1, 12, tmpq+ 1*%%str
1674 SUMSUB_BA w, 6, 3, 1 ; m8=t1[w], m3=t5[w]
1675 SCRATCH 6, 13, tmpq+ 4*%%str
1676 SUMSUB_BA w, 7, 4, 1 ; m13=t2[w], m9=t6[w]
1677 SCRATCH 7, 8, tmpq+10*%%str
1678 SUMSUB_BA w, 5, 0, 1 ; m12=t3[w], m0=t7[w]
1679 SCRATCH 5, 9, tmpq+14*%%str
1681 VP9_UNPACK_MULSUB_2D_4X 2, 3, 7, 5, 15137, 6270 ; m2/6=t5[d], m3/10=t4[d]
1682 VP9_UNPACK_MULSUB_2D_4X 0, 4, 1, 6, 6270, 15137 ; m0/14=t6[d], m9/15=t7[d]
1683 SCRATCH 6, 10, tmpq+ 0*%%str
1684 VP9_RND_SH_SUMSUB_BA 0, 3, 1, 5, 6, [pd_8192]
1685 UNSCRATCH 6, 10, tmpq+ 0*%%str
1686 PSIGNW m0, [pw_m1] ; m0=out3[w], m3=t6[w]
1687 VP9_RND_SH_SUMSUB_BA 4, 2, 6, 7, 5, [pd_8192] ; m9=out12[w], m2=t7[w]
1689 UNSCRATCH 1, 8, tmpq+10*%%str
1690 UNSCRATCH 5, 9, tmpq+14*%%str
1691 UNSCRATCH 6, 12, tmpq+ 1*%%str
1692 UNSCRATCH 7, 13, tmpq+ 4*%%str
1693 SCRATCH 4, 9, tmpq+14*%%str
1695 SUMSUB_BA w, 1, 6, 4 ; m13=out0[w], m1=t2[w]
1696 SUMSUB_BA w, 5, 7, 4
1697 PSIGNW m5, [pw_m1] ; m12=out15[w], m8=t3[w]
1700 SUMSUB_BA w, 7, 6, 4
1701 pmulhrsw m7, [pw_m11585x2] ; m8=out7[w]
1702 pmulhrsw m6, [pw_11585x2] ; m1=out8[w]
1703 SUMSUB_BA w, 3, 2, 4
1704 pmulhrsw m3, [pw_11585x2] ; m3=out4[w]
1705 pmulhrsw m2, [pw_11585x2] ; m2=out11[w]
1707 SCRATCH 5, 8, tmpq+10*%%str
1709 VP9_UNPACK_MULSUB_2W_4X 7, 6, 11585, 11585, [pd_8192], 5, 4
1710 VP9_UNPACK_MULSUB_2W_4X 2, 3, 11585, 11585, [pd_8192], 5, 4
1711 UNSCRATCH 5, 8, tmpq+10*%%str
1714 ; m13=out0, m0=out3, m3=out4, m8=out7, m1=out8, m2=out11, m9=out12, m12=out15
1715 ; m11|r13=out1, m5=out2, m7=out5, r15=out6, r3=out9, r10=out10, r11=out13, r2=out14
1719 mova m13, [tmpq+ 6*%%str]
1720 TRANSPOSE8x8W 1, 11, 14, 0, 3, 15, 13, 7, 10
1721 mova [tmpq+ 0*16], m1
1722 mova [tmpq+ 2*16], m11
1723 mova [tmpq+ 4*16], m14
1724 mova [tmpq+ 6*16], m0
1725 mova m1, [tmpq+ 3*%%str]
1726 mova m11, [tmpq+ 7*%%str]
1727 mova m14, [tmpq+11*%%str]
1728 mova m0, [tmpq+13*%%str]
1729 mova [tmpq+ 8*16], m3
1730 mova [tmpq+10*16], m15
1731 mova [tmpq+12*16], m13
1732 mova [tmpq+14*16], m7
1734 TRANSPOSE8x8W 6, 1, 11, 2, 9, 14, 0, 5, 10
1735 mova [tmpq+ 1*16], m6
1736 mova [tmpq+ 3*16], m1
1737 mova [tmpq+ 5*16], m11
1738 mova [tmpq+ 7*16], m2
1739 mova [tmpq+ 9*16], m9
1740 mova [tmpq+11*16], m14
1741 mova [tmpq+13*16], m0
1742 mova [tmpq+15*16], m5
1744 mova [tmpq+12*%%str], m2
1745 mova [tmpq+ 1*%%str], m5
1746 mova [tmpq+15*%%str], m6
1747 mova m2, [tmpq+ 9*%%str]
1748 mova m5, [tmpq+ 5*%%str]
1749 mova m6, [tmpq+ 8*%%str]
1750 TRANSPOSE8x8W 1, 2, 5, 0, 3, 6, 4, 7, [tmpq+ 6*%%str], [tmpq+ 8*%%str], 1
1751 mova [tmpq+ 0*16], m1
1752 mova [tmpq+ 2*16], m2
1753 mova [tmpq+ 4*16], m5
1754 mova [tmpq+ 6*16], m0
1755 mova [tmpq+10*16], m6
1756 mova m3, [tmpq+12*%%str]
1757 mova [tmpq+12*16], m4
1758 mova m4, [tmpq+14*%%str]
1759 mova [tmpq+14*16], m7
1761 mova m0, [tmpq+15*%%str]
1762 mova m1, [tmpq+ 3*%%str]
1763 mova m2, [tmpq+ 7*%%str]
1764 mova m5, [tmpq+11*%%str]
1765 mova m7, [tmpq+ 1*%%str]
1766 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [tmpq+13*%%str], [tmpq+ 9*%%str], 1
1767 mova [tmpq+ 1*16], m0
1768 mova [tmpq+ 3*16], m1
1769 mova [tmpq+ 5*16], m2
1770 mova [tmpq+ 7*16], m3
1771 mova [tmpq+11*16], m5
1772 mova [tmpq+13*16], m6
1773 mova [tmpq+15*16], m7
1779 %define ROUND_REG [pw_512]
1781 %define ROUND_REG [pw_32]
1785 mova m12, [tmpq+ 6*%%str]
1786 VP9_IDCT8_WRITEx2 1, 11, 10, 8, 4, ROUND_REG, 6
1787 lea dstq, [dstq+strideq*2]
1788 VP9_IDCT8_WRITEx2 14, 0, 10, 8, 4, ROUND_REG, 6
1789 lea dstq, [dstq+strideq*2]
1790 VP9_IDCT8_WRITEx2 3, 15, 10, 8, 4, ROUND_REG, 6
1791 lea dstq, [dstq+strideq*2]
1792 VP9_IDCT8_WRITEx2 12, 7, 10, 8, 4, ROUND_REG, 6
1793 lea dstq, [dstq+strideq*2]
1795 mova m1, [tmpq+ 3*%%str]
1796 mova m11, [tmpq+ 7*%%str]
1797 mova m14, [tmpq+11*%%str]
1798 mova m0, [tmpq+13*%%str]
1800 VP9_IDCT8_WRITEx2 6, 1, 10, 8, 4, ROUND_REG, 6
1801 lea dstq, [dstq+strideq*2]
1802 VP9_IDCT8_WRITEx2 11, 2, 10, 8, 4, ROUND_REG, 6
1803 lea dstq, [dstq+strideq*2]
1804 VP9_IDCT8_WRITEx2 9, 14, 10, 8, 4, ROUND_REG, 6
1805 lea dstq, [dstq+strideq*2]
1806 VP9_IDCT8_WRITEx2 0, 5, 10, 8, 4, ROUND_REG, 6
1808 mova [tmpq+ 0*%%str], m2
1809 mova [tmpq+ 1*%%str], m5
1810 mova [tmpq+ 2*%%str], m6
1811 mova m2, [tmpq+ 9*%%str]
1812 VP9_IDCT8_WRITEx2 1, 2, 5, 6, 4, ROUND_REG, 6
1813 lea dstq, [dstq+strideq*2]
1814 mova m5, [tmpq+ 5*%%str]
1815 VP9_IDCT8_WRITEx2 5, 0, 1, 2, 4, ROUND_REG, 6
1816 lea dstq, [dstq+strideq*2]
1817 mova m5, [tmpq+ 8*%%str]
1818 VP9_IDCT8_WRITEx2 3, 5, 1, 2, 4, ROUND_REG, 6
1819 lea dstq, [dstq+strideq*2]
1820 mova m5, [tmpq+ 6*%%str]
1821 VP9_IDCT8_WRITEx2 5, 7, 1, 2, 4, ROUND_REG, 6
1822 lea dstq, [dstq+strideq*2]
1824 mova m0, [tmpq+ 2*%%str]
1825 mova m3, [tmpq+ 3*%%str]
1826 VP9_IDCT8_WRITEx2 0, 3, 1, 2, 4, ROUND_REG, 6
1827 lea dstq, [dstq+strideq*2]
1828 mova m0, [tmpq+ 7*%%str]
1829 mova m3, [tmpq+ 0*%%str]
1830 VP9_IDCT8_WRITEx2 0, 3, 1, 2, 4, ROUND_REG, 6
1831 lea dstq, [dstq+strideq*2]
1832 mova m0, [tmpq+14*%%str]
1833 mova m3, [tmpq+11*%%str]
1834 VP9_IDCT8_WRITEx2 0, 3, 1, 2, 4, ROUND_REG, 6
1835 lea dstq, [dstq+strideq*2]
1836 mova m0, [tmpq+13*%%str]
1837 mova m3, [tmpq+ 1*%%str]
1838 VP9_IDCT8_WRITEx2 0, 3, 1, 2, 4, ROUND_REG, 6
1848 cglobal vp9_%1_%3_16x16_add, 3, 6, 16, 512, dst, stride, block, cnt, dst_bak, tmp
1864 lea dstq, [dst_bakq+8]
1869 ; at the end of the loop, m0 should still be zero
1870 ; use that to zero out block coefficients
1871 ZERO_BLOCK blockq, 32, 16, m0
1875 %define PSIGNW PSIGNW_MMX
1876 IADST16_FN idct, IDCT16, iadst, IADST16, sse2
1877 IADST16_FN iadst, IADST16, idct, IDCT16, sse2
1878 IADST16_FN iadst, IADST16, iadst, IADST16, sse2
1879 %define PSIGNW PSIGNW_SSSE3
1880 IADST16_FN idct, IDCT16, iadst, IADST16, ssse3
1881 IADST16_FN iadst, IADST16, idct, IDCT16, ssse3
1882 IADST16_FN iadst, IADST16, iadst, IADST16, ssse3
1883 IADST16_FN idct, IDCT16, iadst, IADST16, avx
1884 IADST16_FN iadst, IADST16, idct, IDCT16, avx
1885 IADST16_FN iadst, IADST16, iadst, IADST16, avx
1888 ;---------------------------------------------------------------------------------------------
1889 ; void vp9_idct_idct_32x32_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
1890 ;---------------------------------------------------------------------------------------------
1892 %macro VP9_IDCT32_1D 2-3 32 ; src, pass, nnzc
1893 %assign %%str 16*%2*%2
1894 ; first do t0-15, this can be done identical to idct16x16
1895 VP9_IDCT16_1D_START %1, %3/2, 64*2, tmpq, 2*%%str
1897 ; store everything on stack to make space available for t16-31
1898 ; we store interleaved with the output of the second half (t16-31)
1899 ; so we don't need to allocate extra stack space
1900 mova [tmpq+ 0*%%str], m0 ; t0
1901 mova [tmpq+ 4*%%str], m1 ; t1
1902 mova [tmpq+ 8*%%str], m2 ; t2
1903 mova [tmpq+12*%%str], m3 ; t3
1904 mova [tmpq+16*%%str], m4 ; t4
1905 mova [tmpq+20*%%str], m5 ; t5
1907 mova [tmpq+22*%%str], m10 ; t10
1908 mova [tmpq+18*%%str], m11 ; t11
1909 mova [tmpq+14*%%str], m12 ; t12
1910 mova [tmpq+10*%%str], m13 ; t13
1911 mova [tmpq+ 6*%%str], m14 ; t14
1912 mova [tmpq+ 2*%%str], m15 ; t15
1915 mova m0, [tmpq+ 30*%%str]
1916 UNSCRATCH 1, 6, tmpq+26*%%str
1917 UNSCRATCH 2, 8, tmpq+24*%%str
1918 UNSCRATCH 3, 9, tmpq+28*%%str
1919 SUMSUB_BA w, 1, 3, 4 ; t6, t9
1920 SUMSUB_BA w, 0, 2, 4 ; t7, t8
1922 mova [tmpq+24*%%str], m1 ; t6
1923 mova [tmpq+28*%%str], m0 ; t7
1924 mova [tmpq+30*%%str], m2 ; t8
1925 mova [tmpq+26*%%str], m3 ; t9
1927 ; then, secondly, do t16-31
1932 pmulhrsw m1, m4, [pw_16364x2] ;t31
1933 pmulhrsw m4, [pw_804x2] ;t16
1935 VP9_UNPACK_MULSUB_2W_4X 5, 0, 1, 4, 16069, 3196, [pd_8192], 6, 2 ; t17, t30
1937 pmulhrsw m3, m7, [pw_m5520x2] ;t19
1938 pmulhrsw m7, [pw_15426x2] ;t28
1940 SCRATCH 4, 13, tmpq+ 1*%%str
1941 SCRATCH 5, 12, tmpq+15*%%str
1943 VP9_UNPACK_MULSUB_2W_4X 2, 6, 7, 3, 3196, m16069, [pd_8192], 4, 5 ; t18, t29
1948 pmulhrsw m5, m0, [pw_16364x2]
1949 pmulhrsw m0, [pw_804x2]
1950 pmulhrsw m4, m1, [pw_m11003x2]
1951 pmulhrsw m1, [pw_12140x2]
1956 VP9_UNPACK_MULSUB_2W_4X 0, 5, 16364, 804, [pd_8192], 2, 3 ; t16, t31
1957 VP9_UNPACK_MULSUB_2W_4X 4, 1, 11003, 12140, [pd_8192], 2, 3 ; t17, t30
1959 SUMSUB_BA w, 4, 0, 2
1960 SUMSUB_BA w, 1, 5, 2
1962 VP9_UNPACK_MULSUB_2W_4X 5, 0, 16069, 3196, [pd_8192], 2, 3 ; t17, t30
1964 SCRATCH 4, 13, tmpq+ 1*%%str
1965 SCRATCH 5, 12, tmpq+15*%%str
1970 pmulhrsw m7, m3, [pw_14811x2]
1971 pmulhrsw m3, [pw_7005x2]
1972 pmulhrsw m6, m2, [pw_m5520x2]
1973 pmulhrsw m2, [pw_15426x2]
1978 VP9_UNPACK_MULSUB_2W_4X 3, 7, 14811, 7005, [pd_8192], 4, 5 ; t18, t29
1979 VP9_UNPACK_MULSUB_2W_4X 6, 2, 5520, 15426, [pd_8192], 4, 5 ; t19, t28
1981 SUMSUB_BA w, 3, 6, 4
1982 SUMSUB_BA w, 7, 2, 4
1984 VP9_UNPACK_MULSUB_2W_4X 2, 6, 3196, m16069, [pd_8192], 4, 5 ; t18, t29
1987 UNSCRATCH 5, 12, tmpq+15*%%str
1988 SUMSUB_BA w, 6, 0, 4
1989 mova [tmpq+25*%%str], m6 ; t19
1990 UNSCRATCH 4, 13, tmpq+ 1*%%str
1991 SUMSUB_BA w, 7, 1, 6
1992 SUMSUB_BA w, 3, 4, 6
1993 mova [tmpq+23*%%str], m3 ; t16
1994 SUMSUB_BA w, 2, 5, 6
1996 VP9_UNPACK_MULSUB_2W_4X 0, 5, 15137, 6270, [pd_8192], 6, 3 ; t18, t29
1997 VP9_UNPACK_MULSUB_2W_4X 1, 4, 15137, 6270, [pd_8192], 6, 3 ; t19, t28
1999 SCRATCH 0, 10, tmpq+ 1*%%str
2000 SCRATCH 1, 11, tmpq+ 7*%%str
2001 SCRATCH 2, 9, tmpq+ 9*%%str
2002 SCRATCH 4, 14, tmpq+15*%%str
2003 SCRATCH 5, 15, tmpq+17*%%str
2004 SCRATCH 7, 13, tmpq+31*%%str
2010 pmulhrsw m5, m0, [pw_15893x2] ;t27
2011 pmulhrsw m0, [pw_3981x2] ;t20
2013 VP9_UNPACK_MULSUB_2W_4X 1, 4, 5, 0, 9102, 13623, [pd_8192], 7, 2 ; t21, t26
2015 pmulhrsw m6, m3, [pw_m2404x2] ;t23
2016 pmulhrsw m3, [pw_16207x2] ;t24
2018 SCRATCH 5, 8, tmpq+ 5*%%str
2019 SCRATCH 4, 12, tmpq+11*%%str
2021 VP9_UNPACK_MULSUB_2W_4X 7, 2, 3, 6, 13623, m9102, [pd_8192], 4, 5 ; t22, t25
2026 pmulhrsw m1, m4, [pw_15893x2]
2027 pmulhrsw m4, [pw_3981x2]
2028 pmulhrsw m0, m5, [pw_m8423x2]
2029 pmulhrsw m5, [pw_14053x2]
2034 VP9_UNPACK_MULSUB_2W_4X 4, 1, 15893, 3981, [pd_8192], 2, 3 ; t20, t27
2035 VP9_UNPACK_MULSUB_2W_4X 0, 5, 8423, 14053, [pd_8192], 2, 3 ; t21, t26
2037 SUMSUB_BA w, 0, 4, 2
2038 SUMSUB_BA w, 5, 1, 2
2040 VP9_UNPACK_MULSUB_2W_4X 1, 4, 9102, 13623, [pd_8192], 2, 3 ; t21, t26
2042 SCRATCH 5, 8, tmpq+ 5*%%str
2043 SCRATCH 4, 12, tmpq+11*%%str
2048 pmulhrsw m3, m6, [pw_13160x2]
2049 pmulhrsw m6, [pw_9760x2]
2050 pmulhrsw m2, m7, [pw_m2404x2]
2051 pmulhrsw m7, [pw_16207x2]
2055 VP9_UNPACK_MULSUB_2W_4X 6, 3, 13160, 9760, [pd_8192], 4, 5 ; t22, t25
2056 VP9_UNPACK_MULSUB_2W_4X 2, 7, 2404, 16207, [pd_8192], 4, 5 ; t23, t24
2058 SUMSUB_BA w, 6, 2, 4
2059 SUMSUB_BA w, 3, 7, 4
2061 VP9_UNPACK_MULSUB_2W_4X 7, 2, 13623, m9102, [pd_8192], 4, 5 ; t22, t25
2064 ; m4=t16, m5=t17, m9=t18, m8=t19, m0=t20, m1=t21, m13=t22, m12=t23,
2065 ; m3=t24, m2=t25, m14=t26, m15=t27, m7=t28, m6=t29, m10=t30, m11=t31
2067 UNSCRATCH 4, 12, tmpq+11*%%str
2068 SUMSUB_BA w, 0, 6, 5
2069 SUMSUB_BA w, 4, 2, 5
2070 UNSCRATCH 5, 8, tmpq+ 5*%%str
2071 SCRATCH 4, 8, tmpq+11*%%str
2072 SUMSUB_BA w, 1, 7, 4
2073 SUMSUB_BA w, 5, 3, 4
2074 SCRATCH 5, 12, tmpq+ 5*%%str
2076 VP9_UNPACK_MULSUB_2W_4X 3, 6, 6270, m15137, [pd_8192], 4, 5 ; t20, t27
2077 VP9_UNPACK_MULSUB_2W_4X 2, 7, 6270, m15137, [pd_8192], 4, 5 ; t21, t26
2079 ; m8[s]=t16, m9=t17, m5=t18, m4[s]=t19, m12=t20, m13=t21, m1=t22, m0=t23,
2080 ; m15=t24, m14=t25, m2=t26, m3=t27, m11=t28, m10=t29, m6=t30, m7=t31
2082 UNSCRATCH 5, 9, tmpq+ 9*%%str
2083 mova m4, [tmpq+23*%%str] ; t16
2085 SUMSUB_BA w, 1, 5, 9
2086 SUMSUB_BA w, 0, 4, 9
2088 SUMSUB_BADC w, 1, 5, 0, 4
2090 mova [tmpq+29*%%str], m1 ; t17
2091 mova [tmpq+21*%%str], m0 ; t16
2092 UNSCRATCH 0, 10, tmpq+ 1*%%str
2093 UNSCRATCH 1, 11, tmpq+ 7*%%str
2095 SUMSUB_BA w, 2, 0, 9
2096 SUMSUB_BA w, 3, 1, 9
2098 SUMSUB_BADC w, 2, 0, 3, 1
2100 mova [tmpq+ 9*%%str], m2 ; t18
2101 mova [tmpq+13*%%str], m3 ; t19
2102 SCRATCH 0, 10, tmpq+23*%%str
2103 SCRATCH 1, 11, tmpq+27*%%str
2105 UNSCRATCH 2, 14, tmpq+15*%%str
2106 UNSCRATCH 3, 15, tmpq+17*%%str
2107 SUMSUB_BA w, 6, 2, 0
2108 SUMSUB_BA w, 7, 3, 0
2109 SCRATCH 6, 14, tmpq+ 3*%%str
2110 SCRATCH 7, 15, tmpq+ 7*%%str
2112 UNSCRATCH 0, 8, tmpq+11*%%str
2113 mova m1, [tmpq+25*%%str] ; t19
2114 UNSCRATCH 6, 12, tmpq+ 5*%%str
2115 UNSCRATCH 7, 13, tmpq+31*%%str
2117 SUMSUB_BA w, 0, 1, 9
2118 SUMSUB_BA w, 6, 7, 9
2120 SUMSUB_BADC w, 0, 1, 6, 7
2123 ; m0=t16, m1=t17, m2=t18, m3=t19, m11=t20, m10=t21, m9=t22, m8=t23,
2124 ; m7=t24, m6=t25, m5=t26, m4=t27, m12=t28, m13=t29, m14=t30, m15=t31
2128 SUMSUB_BA w, 4, 7, 8
2129 SUMSUB_BA w, 5, 1, 8
2131 SUMSUB_BADC w, 4, 7, 5, 1
2134 pmulhrsw m7, [pw_11585x2]
2135 pmulhrsw m4, [pw_11585x2]
2136 pmulhrsw m1, [pw_11585x2]
2137 pmulhrsw m5, [pw_11585x2]
2139 mova [tmpq+ 5*%%str], m7 ; t23
2140 SCRATCH 1, 13, tmpq+25*%%str
2141 UNSCRATCH 7, 10, tmpq+23*%%str
2142 UNSCRATCH 1, 11, tmpq+27*%%str
2145 SUMSUB_BA w, 7, 3, 10
2146 SUMSUB_BA w, 1, 2, 10
2148 SUMSUB_BADC w, 7, 3, 1, 2
2151 pmulhrsw m3, [pw_11585x2]
2152 pmulhrsw m7, [pw_11585x2]
2153 pmulhrsw m2, [pw_11585x2]
2154 pmulhrsw m1, [pw_11585x2]
2156 SCRATCH 0, 8, tmpq+15*%%str
2157 SCRATCH 6, 9, tmpq+17*%%str
2158 VP9_UNPACK_MULSUB_2W_4X 7, 4, 11585, 11585, [pd_8192], 0, 6
2159 mova [tmpq+ 5*%%str], m7 ; t23
2160 UNSCRATCH 7, 10, tmpq+23*%%str
2161 VP9_UNPACK_MULSUB_2W_4X 1, 5, 11585, 11585, [pd_8192], 0, 6
2162 SCRATCH 1, 13, tmpq+25*%%str
2163 UNSCRATCH 1, 11, tmpq+27*%%str
2164 VP9_UNPACK_MULSUB_2W_4X 3, 7, 11585, 11585, [pd_8192], 0, 6
2165 VP9_UNPACK_MULSUB_2W_4X 2, 1, 11585, 11585, [pd_8192], 0, 6
2166 UNSCRATCH 0, 8, tmpq+15*%%str
2167 UNSCRATCH 6, 9, tmpq+17*%%str
2170 ; m0=t16, m1=t17, m2=t18, m3=t19, m4=t20, m5=t21, m6=t22, m7=t23,
2171 ; m8=t24, m9=t25, m10=t26, m11=t27, m12=t28, m13=t29, m14=t30, m15=t31
2173 ; then do final pass to sumsub+store the two halves
2175 mova [tmpq+17*%%str], m2 ; t20
2176 mova [tmpq+ 1*%%str], m3 ; t21
2178 mova [tmpq+25*%%str], m13 ; t22
2180 mova m8, [tmpq+ 0*%%str] ; t0
2181 mova m9, [tmpq+ 4*%%str] ; t1
2182 mova m12, [tmpq+ 8*%%str] ; t2
2183 mova m11, [tmpq+12*%%str] ; t3
2184 mova m2, [tmpq+16*%%str] ; t4
2185 mova m3, [tmpq+20*%%str] ; t5
2186 mova m13, [tmpq+24*%%str] ; t6
2188 SUMSUB_BA w, 6, 8, 10
2189 mova [tmpq+ 3*%%str], m8 ; t15
2190 mova m10, [tmpq+28*%%str] ; t7
2191 SUMSUB_BA w, 0, 9, 8
2192 SUMSUB_BA w, 15, 12, 8
2193 SUMSUB_BA w, 14, 11, 8
2194 SUMSUB_BA w, 1, 2, 8
2195 SUMSUB_BA w, 7, 3, 8
2196 SUMSUB_BA w, 5, 13, 8
2197 SUMSUB_BA w, 4, 10, 8
2199 TRANSPOSE8x8W 6, 0, 15, 14, 1, 7, 5, 4, 8
2200 mova [tmpq+ 0*%%str], m6
2201 mova [tmpq+ 4*%%str], m0
2202 mova [tmpq+ 8*%%str], m15
2203 mova [tmpq+12*%%str], m14
2204 mova [tmpq+16*%%str], m1
2205 mova [tmpq+20*%%str], m7
2206 mova [tmpq+24*%%str], m5
2207 mova [tmpq+28*%%str], m4
2209 mova m8, [tmpq+ 3*%%str] ; t15
2210 TRANSPOSE8x8W 10, 13, 3, 2, 11, 12, 9, 8, 0
2211 mova [tmpq+ 3*%%str], m10
2212 mova [tmpq+ 7*%%str], m13
2213 mova [tmpq+11*%%str], m3
2214 mova [tmpq+15*%%str], m2
2215 mova [tmpq+19*%%str], m11
2216 mova [tmpq+23*%%str], m12
2217 mova [tmpq+27*%%str], m9
2218 mova [tmpq+31*%%str], m8
2220 mova m15, [tmpq+30*%%str] ; t8
2221 mova m14, [tmpq+26*%%str] ; t9
2222 mova m13, [tmpq+22*%%str] ; t10
2223 mova m12, [tmpq+18*%%str] ; t11
2224 mova m11, [tmpq+14*%%str] ; t12
2225 mova m10, [tmpq+10*%%str] ; t13
2226 mova m9, [tmpq+ 6*%%str] ; t14
2227 mova m8, [tmpq+ 2*%%str] ; t15
2228 mova m7, [tmpq+21*%%str] ; t16
2229 mova m6, [tmpq+29*%%str] ; t17
2230 mova m5, [tmpq+ 9*%%str] ; t18
2231 mova m4, [tmpq+13*%%str] ; t19
2232 mova m3, [tmpq+17*%%str] ; t20
2233 mova m2, [tmpq+ 1*%%str] ; t21
2234 mova m1, [tmpq+25*%%str] ; t22
2236 SUMSUB_BA w, 7, 8, 0
2237 mova [tmpq+ 2*%%str], m8
2238 mova m0, [tmpq+ 5*%%str] ; t23
2239 SUMSUB_BA w, 6, 9, 8
2240 SUMSUB_BA w, 5, 10, 8
2241 SUMSUB_BA w, 4, 11, 8
2242 SUMSUB_BA w, 3, 12, 8
2243 SUMSUB_BA w, 2, 13, 8
2244 SUMSUB_BA w, 1, 14, 8
2245 SUMSUB_BA w, 0, 15, 8
2247 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 8
2248 mova [tmpq+ 1*%%str], m0
2249 mova [tmpq+ 5*%%str], m1
2250 mova [tmpq+ 9*%%str], m2
2251 mova [tmpq+13*%%str], m3
2252 mova [tmpq+17*%%str], m4
2253 mova [tmpq+21*%%str], m5
2254 mova [tmpq+25*%%str], m6
2255 mova [tmpq+29*%%str], m7
2257 mova m8, [tmpq+ 2*%%str]
2258 TRANSPOSE8x8W 8, 9, 10, 11, 12, 13, 14, 15, 0
2259 mova [tmpq+ 2*%%str], m8
2260 mova [tmpq+ 6*%%str], m9
2261 mova [tmpq+10*%%str], m10
2262 mova [tmpq+14*%%str], m11
2263 mova [tmpq+18*%%str], m12
2264 mova [tmpq+22*%%str], m13
2265 mova [tmpq+26*%%str], m14
2266 mova [tmpq+30*%%str], m15
2268 mova m2, [tmpq+24*%%str] ; t6
2269 mova m3, [tmpq+28*%%str] ; t7
2270 SUMSUB_BADC w, 5, 2, 4, 3
2271 mova [tmpq+24*%%str], m5
2272 mova [tmpq+23*%%str], m2
2273 mova [tmpq+28*%%str], m4
2274 mova [tmpq+19*%%str], m3
2276 mova m2, [tmpq+16*%%str] ; t4
2277 mova m3, [tmpq+20*%%str] ; t5
2278 SUMSUB_BA w, 1, 2, 5
2279 SUMSUB_BA w, 7, 3, 5
2280 mova [tmpq+15*%%str], m2
2281 mova [tmpq+11*%%str], m3
2283 mova m2, [tmpq+ 0*%%str] ; t0
2284 mova m3, [tmpq+ 4*%%str] ; t1
2285 SUMSUB_BA w, 6, 2, 5
2286 SUMSUB_BA w, 0, 3, 5
2287 mova [tmpq+31*%%str], m2
2288 mova [tmpq+27*%%str], m3
2290 mova m2, [tmpq+ 8*%%str] ; t2
2291 mova m3, [tmpq+12*%%str] ; t3
2292 mova m5, [tmpq+ 7*%%str]
2293 mova m4, [tmpq+ 3*%%str]
2294 SUMSUB_BADC w, 5, 2, 4, 3
2295 mova [tmpq+ 7*%%str], m2
2296 mova [tmpq+ 3*%%str], m3
2298 mova m3, [tmpq+28*%%str]
2299 TRANSPOSE8x8W 6, 0, 5, 4, 1, 7, 2, 3, [tmpq+24*%%str], [tmpq+16*%%str], 1
2300 mova [tmpq+ 0*%%str], m6
2301 mova [tmpq+ 4*%%str], m0
2302 mova [tmpq+ 8*%%str], m5
2303 mova [tmpq+12*%%str], m4
2304 mova [tmpq+20*%%str], m7
2305 mova [tmpq+24*%%str], m2
2306 mova [tmpq+28*%%str], m3
2308 mova m6, [tmpq+19*%%str]
2309 mova m0, [tmpq+23*%%str]
2310 mova m5, [tmpq+11*%%str]
2311 mova m4, [tmpq+15*%%str]
2312 mova m1, [tmpq+ 3*%%str]
2313 mova m7, [tmpq+ 7*%%str]
2314 mova m3, [tmpq+31*%%str]
2315 TRANSPOSE8x8W 6, 0, 5, 4, 1, 7, 2, 3, [tmpq+27*%%str], [tmpq+19*%%str], 1
2316 mova [tmpq+ 3*%%str], m6
2317 mova [tmpq+ 7*%%str], m0
2318 mova [tmpq+11*%%str], m5
2319 mova [tmpq+15*%%str], m4
2320 mova [tmpq+23*%%str], m7
2321 mova [tmpq+27*%%str], m2
2322 mova [tmpq+31*%%str], m3
2324 mova m1, [tmpq+ 6*%%str] ; t14
2325 mova m0, [tmpq+ 2*%%str] ; t15
2326 mova m7, [tmpq+21*%%str] ; t16
2327 mova m6, [tmpq+29*%%str] ; t17
2328 SUMSUB_BA w, 7, 0, 2
2329 SUMSUB_BA w, 6, 1, 2
2330 mova [tmpq+29*%%str], m7
2331 mova [tmpq+ 2*%%str], m0
2332 mova [tmpq+21*%%str], m6
2333 mova [tmpq+ 6*%%str], m1
2335 mova m1, [tmpq+14*%%str] ; t12
2336 mova m0, [tmpq+10*%%str] ; t13
2337 mova m5, [tmpq+ 9*%%str] ; t18
2338 mova m4, [tmpq+13*%%str] ; t19
2339 SUMSUB_BA w, 5, 0, 2
2340 SUMSUB_BA w, 4, 1, 2
2341 mova [tmpq+10*%%str], m0
2342 mova [tmpq+14*%%str], m1
2344 mova m1, [tmpq+22*%%str] ; t10
2345 mova m0, [tmpq+18*%%str] ; t11
2346 mova m3, [tmpq+17*%%str] ; t20
2347 mova m2, [tmpq+ 1*%%str] ; t21
2348 SUMSUB_BA w, 3, 0, 6
2349 SUMSUB_BA w, 2, 1, 6
2350 mova [tmpq+18*%%str], m0
2351 mova [tmpq+22*%%str], m1
2353 mova m7, [tmpq+30*%%str] ; t8
2354 mova m6, [tmpq+26*%%str] ; t9
2355 mova m1, [tmpq+25*%%str] ; t22
2356 mova m0, [tmpq+ 5*%%str] ; t23
2357 SUMSUB_BADC w, 1, 6, 0, 7
2358 mova [tmpq+26*%%str], m6
2359 mova [tmpq+30*%%str], m7
2361 mova m7, [tmpq+29*%%str]
2362 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [tmpq+21*%%str], [tmpq+17*%%str], 1
2363 mova [tmpq+ 1*%%str], m0
2364 mova [tmpq+ 5*%%str], m1
2365 mova [tmpq+ 9*%%str], m2
2366 mova [tmpq+13*%%str], m3
2367 mova [tmpq+21*%%str], m5
2368 mova [tmpq+25*%%str], m6
2369 mova [tmpq+29*%%str], m7
2371 mova m0, [tmpq+ 2*%%str]
2372 mova m1, [tmpq+ 6*%%str]
2373 mova m2, [tmpq+10*%%str]
2374 mova m3, [tmpq+14*%%str]
2375 mova m4, [tmpq+18*%%str]
2376 mova m5, [tmpq+22*%%str]
2377 mova m7, [tmpq+30*%%str]
2378 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [tmpq+26*%%str], [tmpq+18*%%str], 1
2379 mova [tmpq+ 2*%%str], m0
2380 mova [tmpq+ 6*%%str], m1
2381 mova [tmpq+10*%%str], m2
2382 mova [tmpq+14*%%str], m3
2383 mova [tmpq+22*%%str], m5
2384 mova [tmpq+26*%%str], m6
2385 mova [tmpq+30*%%str], m7
2388 ; t0-7 is in [tmpq+{0,4,8,12,16,20,24,28}*%%str]
2389 ; t8-15 is in [tmpq+{2,6,10,14,18,22,26,30}*%%str]
2390 ; t16-19 and t23 is in [tmpq+{1,5,9,13,29}*%%str]
2392 ; t24-31 is in m8-15
2395 %define ROUND_REG [pw_512]
2397 %define ROUND_REG [pw_32]
2400 %macro %%STORE_2X2 7-8 1 ; src[1-4], tmp[1-2], zero, inc_dst_ptrs
2401 SUMSUB_BA w, %4, %1, %5
2402 SUMSUB_BA w, %3, %2, %5
2403 VP9_IDCT8_WRITEx2 %4, %3, %5, %6, %7, ROUND_REG, 6
2407 VP9_IDCT8_WRITEx2 %2, %1, %5, %6, %7, ROUND_REG, 6, dst_endq
2409 sub dst_endq, stride2q
2416 ; store t0-1 and t30-31
2417 mova m8, [tmpq+ 0*%%str]
2418 mova m9, [tmpq+ 4*%%str]
2419 %%STORE_2X2 8, 9, 0, 6, 12, 11, 10
2421 ; store t2-3 and t28-29
2422 mova m8, [tmpq+ 8*%%str]
2423 mova m9, [tmpq+12*%%str]
2424 %%STORE_2X2 8, 9, 14, 15, 12, 11, 10
2426 ; store t4-5 and t26-27
2427 mova m8, [tmpq+16*%%str]
2428 mova m9, [tmpq+20*%%str]
2429 %%STORE_2X2 8, 9, 7, 1, 12, 11, 10
2431 ; store t6-7 and t24-25
2432 mova m8, [tmpq+24*%%str]
2433 mova m9, [tmpq+28*%%str]
2434 %%STORE_2X2 8, 9, 4, 5, 12, 11, 10
2436 ; store t8-9 and t22-23
2437 mova m8, [tmpq+30*%%str]
2438 mova m9, [tmpq+26*%%str]
2439 mova m0, [tmpq+ 5*%%str]
2440 %%STORE_2X2 8, 9, 13, 0, 12, 11, 10
2442 ; store t10-11 and t20-21
2443 mova m8, [tmpq+22*%%str]
2444 mova m9, [tmpq+18*%%str]
2445 %%STORE_2X2 8, 9, 2, 3, 12, 11, 10
2447 ; store t12-13 and t18-19
2448 mova m8, [tmpq+14*%%str]
2449 mova m9, [tmpq+10*%%str]
2450 mova m5, [tmpq+13*%%str]
2451 mova m4, [tmpq+ 9*%%str]
2452 %%STORE_2X2 8, 9, 4, 5, 12, 11, 10
2455 mova m8, [tmpq+ 6*%%str]
2456 mova m9, [tmpq+ 2*%%str]
2457 mova m5, [tmpq+29*%%str]
2458 mova m4, [tmpq+21*%%str]
2459 %%STORE_2X2 8, 9, 4, 5, 12, 11, 10, 0
2463 mova [tmpq+ 1*%%str], m1
2464 mova [tmpq+11*%%str], m2
2465 mova [tmpq+15*%%str], m3
2466 mova [tmpq+17*%%str], m4
2467 mova [tmpq+19*%%str], m5
2470 ; store t0-1 and t30-31
2471 mova m2, [tmpq+ 0*%%str]
2472 mova m3, [tmpq+ 4*%%str]
2473 %%STORE_2X2 2, 3, 0, 6, 4, 5, 1
2475 ; store t2-3 and t28-29
2476 mova m2, [tmpq+ 8*%%str]
2477 mova m3, [tmpq+12*%%str]
2478 mova m0, [tmpq+ 3*%%str]
2479 mova m6, [tmpq+ 7*%%str]
2480 %%STORE_2X2 2, 3, 0, 6, 4, 5, 1
2482 ; store t4-5 and t26-27
2483 mova m2, [tmpq+16*%%str]
2484 mova m3, [tmpq+20*%%str]
2485 mova m0, [tmpq+ 1*%%str]
2486 %%STORE_2X2 2, 3, 7, 0, 4, 5, 1
2488 ; store t6-7 and t24-25
2489 mova m2, [tmpq+24*%%str]
2490 mova m3, [tmpq+28*%%str]
2491 mova m0, [tmpq+17*%%str]
2492 mova m6, [tmpq+19*%%str]
2493 %%STORE_2X2 2, 3, 0, 6, 4, 5, 1
2495 ; store t8-9 and t22-23
2496 mova m2, [tmpq+30*%%str]
2497 mova m3, [tmpq+26*%%str]
2498 mova m0, [tmpq+25*%%str]
2499 mova m6, [tmpq+ 5*%%str]
2500 %%STORE_2X2 2, 3, 0, 6, 4, 5, 1
2502 ; store t10-11 and t20-21
2503 mova m2, [tmpq+22*%%str]
2504 mova m3, [tmpq+18*%%str]
2505 mova m0, [tmpq+11*%%str]
2506 mova m6, [tmpq+15*%%str]
2507 %%STORE_2X2 2, 3, 0, 6, 4, 5, 1
2509 ; store t12-13 and t18-19
2510 mova m2, [tmpq+14*%%str]
2511 mova m3, [tmpq+10*%%str]
2512 mova m6, [tmpq+13*%%str]
2513 mova m0, [tmpq+ 9*%%str]
2514 %%STORE_2X2 2, 3, 0, 6, 4, 5, 1
2517 mova m2, [tmpq+ 6*%%str]
2518 mova m3, [tmpq+ 2*%%str]
2519 mova m6, [tmpq+29*%%str]
2520 mova m0, [tmpq+21*%%str]
2521 %%STORE_2X2 2, 3, 0, 6, 4, 5, 1, 0
2527 %macro VP9_IDCT_IDCT_32x32_ADD_XMM 1
2529 cglobal vp9_idct_idct_32x32_add, 0, 6 + ARCH_X86_64 * 3, 16, 2048, dst, stride, block, eob
2530 movifnidn eobd, dword eobm
2544 movifnidn blockq, blockmp
2545 movifnidn dstq, dstmp
2546 movifnidn strideq, stridemp
2549 mova m1, [pw_11585x2]
2553 DEFINE_ARGS dst, stride, block, coef
2554 movsx coefd, word [blockq]
2559 add coefd, (32 << 14) + 8192
2563 SPLATW m0, m0, q0000
2565 pmulhrsw m0, [pw_512]
2570 VP9_STORE_2XFULL 0, 1, 2, 3, 4, 5, mmsize
2573 VP9_STORE_2XFULL 0, 1, 2, 3, 4, 5, mmsize
2577 DEFINE_ARGS dst_bak, stride, block, cnt, dst, stride30, dst_end, stride2, tmp
2579 %define dst_bakq r0mp
2584 DEFINE_ARGS block, u1, u2, u3, u4, tmp
2588 VP9_IDCT32_1D blockq, 1, 8
2591 DEFINE_ARGS dst, stride, stride30, dst_end, stride2, tmp
2593 %define cntd dword r3m
2595 mov stride30q, strideq ; stride
2596 lea stride2q, [strideq*2] ; stride*2
2597 shl stride30q, 5 ; stride*32
2599 sub stride30q, stride2q ; stride*30
2602 lea dst_endq, [dstq+stride30q]
2603 VP9_IDCT32_1D tmpq, 2, 8
2609 ; at the end of the loop, m7 should still be zero
2610 ; use that to zero out block coefficients
2615 ZERO_BLOCK blockq, 64, 8, m1
2620 DEFINE_ARGS block, tmp, cnt
2626 VP9_IDCT32_1D blockq, 1, 16
2635 DEFINE_ARGS dst, stride, stride30, dst_end, stride2, tmp
2637 %define cntd dword r3m
2640 mov stride30q, strideq ; stride
2641 lea stride2q, [strideq*2] ; stride*2
2642 shl stride30q, 5 ; stride*32
2645 sub stride30q, stride2q ; stride*30
2648 lea dst_endq, [dstq+stride30q]
2649 VP9_IDCT32_1D tmpq, 2, 16
2655 ; at the end of the loop, m7 should still be zero
2656 ; use that to zero out block coefficients
2661 ZERO_BLOCK blockq, 64, 16, m1
2667 DEFINE_ARGS block, tmp, cnt
2673 VP9_IDCT32_1D blockq, 1
2682 DEFINE_ARGS dst, stride, stride30, dst_end, stride2, tmp
2684 %define cntd dword r3m
2687 mov stride30q, strideq ; stride
2688 lea stride2q, [strideq*2] ; stride*2
2689 shl stride30q, 5 ; stride*32
2692 sub stride30q, stride2q ; stride*30
2695 lea dst_endq, [dstq+stride30q]
2696 VP9_IDCT32_1D tmpq, 2
2702 ; at the end of the loop, m7 should still be zero
2703 ; use that to zero out block coefficients
2708 ZERO_BLOCK blockq, 64, 32, m1
2712 VP9_IDCT_IDCT_32x32_ADD_XMM sse2
2713 VP9_IDCT_IDCT_32x32_ADD_XMM ssse3
2714 VP9_IDCT_IDCT_32x32_ADD_XMM avx