1 ;******************************************************************************
2 ;* VP9 IDCT SIMD optimizations
4 ;* Copyright (C) 2013 Clément Bœsch <u pkh me>
5 ;* Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
7 ;* This file is part of FFmpeg.
9 ;* FFmpeg is free software; you can redistribute it and/or
10 ;* modify it under the terms of the GNU Lesser General Public
11 ;* License as published by the Free Software Foundation; either
12 ;* version 2.1 of the License, or (at your option) any later version.
14 ;* FFmpeg is distributed in the hope that it will be useful,
15 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
16 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 ;* Lesser General Public License for more details.
19 ;* You should have received a copy of the GNU Lesser General Public
20 ;* License along with FFmpeg; if not, write to the Free Software
21 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 ;******************************************************************************
24 %include "libavutil/x86/x86util.asm"
28 pw_11585x2: times 8 dw 23170
29 pw_m11585x2: times 8 dw -23170
30 pw_m11585_11585: times 4 dw -11585, 11585
31 pw_11585_11585: times 8 dw 11585
32 pw_m11585_m11585: times 8 dw -11585
34 %macro VP9_IDCT_COEFFS 2-3 0
35 pw_%1x2: times 8 dw %1*2
36 pw_m%1x2: times 8 dw -%1*2
37 pw_%2x2: times 8 dw %2*2
38 pw_m%2x2: times 8 dw -%2*2
39 pw_m%1_%2: times 4 dw -%1, %2
40 pw_%2_%1: times 4 dw %2, %1
41 pw_m%2_m%1: times 4 dw -%2, -%1
43 pw_m%2_%1: times 4 dw -%2, %1
44 pw_%1_%2: times 4 dw %1, %2
48 VP9_IDCT_COEFFS 15137, 6270, 1
49 VP9_IDCT_COEFFS 16069, 3196, 1
50 VP9_IDCT_COEFFS 9102, 13623, 1
51 VP9_IDCT_COEFFS 16305, 1606
52 VP9_IDCT_COEFFS 10394, 12665
53 VP9_IDCT_COEFFS 14449, 7723
54 VP9_IDCT_COEFFS 4756, 15679
55 VP9_IDCT_COEFFS 16364, 804
56 VP9_IDCT_COEFFS 11003, 12140
57 VP9_IDCT_COEFFS 14811, 7005
58 VP9_IDCT_COEFFS 5520, 15426
59 VP9_IDCT_COEFFS 15893, 3981
60 VP9_IDCT_COEFFS 8423, 14053
61 VP9_IDCT_COEFFS 13160, 9760
62 VP9_IDCT_COEFFS 2404, 16207
64 pw_5283_13377: times 4 dw 5283, 13377
65 pw_9929_13377: times 4 dw 9929, 13377
66 pw_15212_m13377: times 4 dw 15212, -13377
67 pw_15212_9929: times 4 dw 15212, 9929
68 pw_m5283_m15212: times 4 dw -5283, -15212
69 pw_13377x2: times 8 dw 13377*2
70 pw_m13377_13377: times 4 dw -13377, 13377
71 pw_13377_0: times 4 dw 13377, 0
73 pd_8192: times 4 dd 8192
85 ; (a*x + b*y + round) >> shift
86 %macro VP9_MULSUB_2W_2X 5 ; dst1, dst2/src, round, coefs1, coefs2
95 %macro VP9_MULSUB_2W_4X 7 ; dst1, dst2, coef1, coef2, rnd, tmp1/src, tmp2
96 VP9_MULSUB_2W_2X %7, %6, %5, [pw_m%3_%4], [pw_%4_%3]
97 VP9_MULSUB_2W_2X %1, %2, %5, [pw_m%3_%4], [pw_%4_%3]
102 %macro VP9_UNPACK_MULSUB_2W_4X 7-9 ; dst1, dst2, (src1, src2,) coef1, coef2, rnd, tmp1, tmp2
104 punpckhwd m%6, m%2, m%1
106 VP9_MULSUB_2W_4X %1, %2, %3, %4, %5, %6, %7
108 punpckhwd m%8, m%4, m%3
109 punpcklwd m%2, m%4, m%3
110 VP9_MULSUB_2W_4X %1, %2, %5, %6, %7, %8, %9
114 %macro VP9_UNPACK_MULSUB_2D_4X 6 ; dst1 [src1], dst2 [src2], dst3, dst4, mul1, mul2
115 punpckhwd m%4, m%2, m%1
117 pmaddwd m%3, m%4, [pw_m%5_%6]
118 pmaddwd m%4, [pw_%6_%5]
119 pmaddwd m%1, m%2, [pw_m%5_%6]
120 pmaddwd m%2, [pw_%6_%5]
123 %macro VP9_RND_SH_SUMSUB_BA 6 ; dst1 [src1], dst2 [src2], src3, src4, tmp, round
124 SUMSUB_BA d, %1, %2, %5
125 SUMSUB_BA d, %3, %4, %5
138 %macro VP9_STORE_2X 5-6 dstq ; reg1, reg2, tmp1, tmp2, zero, dst
140 movh m%4, [%6+strideq]
148 movh [%6+strideq], m%4
151 %macro ZERO_BLOCK 4 ; mem, stride, nnzcpl, zero_reg
156 mova [%1+%%y+%%x], %4
157 %assign %%x (%%x+mmsize)
163 ;-------------------------------------------------------------------------------------------
164 ; void vp9_iwht_iwht_4x4_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
165 ;-------------------------------------------------------------------------------------------
167 %macro VP9_IWHT4_1D 0
183 cglobal vp9_iwht_iwht_4x4_add, 3, 3, 0, dst, stride, block, eob
184 mova m0, [blockq+0*8]
185 mova m1, [blockq+1*8]
186 mova m2, [blockq+2*8]
187 mova m3, [blockq+3*8]
194 TRANSPOSE4x4W 0, 1, 2, 3, 4
198 VP9_STORE_2X 0, 1, 5, 6, 4
199 lea dstq, [dstq+strideq*2]
200 VP9_STORE_2X 2, 3, 5, 6, 4
201 ZERO_BLOCK blockq, 8, 4, m4
204 ;-------------------------------------------------------------------------------------------
205 ; void vp9_idct_idct_4x4_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
206 ;-------------------------------------------------------------------------------------------
208 %macro VP9_IDCT4_1D_FINALIZE 0
209 SUMSUB_BA w, 3, 2, 4 ; m3=t3+t0, m2=-t3+t0
210 SUMSUB_BA w, 1, 0, 4 ; m1=t2+t1, m0=-t2+t1
211 SWAP 0, 3, 2 ; 3102 -> 0123
214 %macro VP9_IDCT4_1D 0
216 SUMSUB_BA w, 2, 0, 4 ; m2=IN(0)+IN(2) m0=IN(0)-IN(2)
217 pmulhrsw m2, m6 ; m2=t0
218 pmulhrsw m0, m6 ; m0=t1
220 VP9_UNPACK_MULSUB_2W_4X 0, 2, 11585, 11585, m7, 4, 5 ; m0=t1, m1=t0
222 VP9_UNPACK_MULSUB_2W_4X 1, 3, 15137, 6270, m7, 4, 5 ; m1=t2, m3=t3
223 VP9_IDCT4_1D_FINALIZE
226 ; 2x2 top left corner
227 %macro VP9_IDCT4_2x2_1D 0
228 pmulhrsw m0, m5 ; m0=t1
231 pmulhrsw m1, m6 ; m1=t2
232 pmulhrsw m3, m7 ; m3=t3
233 VP9_IDCT4_1D_FINALIZE
236 %macro VP9_IDCT4_WRITEOUT 0
239 pmulhrsw m0, m5 ; (x*2048 + (1<<14))>>15 <=> (x+8)>>4
248 VP9_STORE_2X 0, 1, 6, 7, 4
249 lea dstq, [dstq+2*strideq]
259 VP9_STORE_2X 2, 3, 6, 7, 4
264 cglobal vp9_idct_idct_4x4_add, 4, 4, 0, dst, stride, block, eob
267 cmp eobd, 4 ; 2x2 or smaller
270 cmp eobd, 1 ; faster path for when only DC is set
279 mova m5, [pw_11585x2]
283 DEFINE_ARGS dst, stride, block, coef
284 movsx coefd, word [blockq]
289 add coefd, (8 << 14) + 8192
297 pmulhrsw m0, [pw_2048] ; (x*2048 + (1<<14))>>15 <=> (x+8)>>4
299 VP9_STORE_2X 0, 0, 6, 7, 4
300 lea dstq, [dstq+2*strideq]
301 VP9_STORE_2X 0, 0, 6, 7, 4
305 ; faster path for when only top left 2x2 block is set
309 mova m5, [pw_11585x2]
311 mova m7, [pw_15137x2]
313 ; partial 2x4 transpose
316 SBUTTERFLY dq, 0, 2, 1
319 pxor m4, m4 ; used for the block reset, and VP9_STORE_2X
326 .idctfull: ; generic full 4x4 idct/idct
332 mova m6, [pw_11585x2]
334 mova m7, [pd_8192] ; rounding
336 TRANSPOSE4x4W 0, 1, 2, 3, 4
338 pxor m4, m4 ; used for the block reset, and VP9_STORE_2X
350 ;-------------------------------------------------------------------------------------------
351 ; void vp9_iadst_iadst_4x4_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
352 ;-------------------------------------------------------------------------------------------
354 %macro VP9_IADST4_1D 0
364 pmaddwd xmm1, xmm0, [pw_5283_13377]
365 pmaddwd xmm4, xmm0, [pw_9929_13377]
366 %if notcpuflag(ssse3)
367 pmaddwd xmm6, xmm0, [pw_13377_0]
369 pmaddwd xmm0, [pw_15212_m13377]
370 pmaddwd xmm3, xmm2, [pw_15212_9929]
371 %if notcpuflag(ssse3)
372 pmaddwd xmm7, xmm2, [pw_m13377_13377]
374 pmaddwd xmm2, [pw_m5283_m15212]
383 %if notcpuflag(ssse3)
393 pmulhrsw m3, [pw_13377x2] ; out2
400 %if notcpuflag(ssse3)
403 movdq2q m0, xmm0 ; out3
404 movdq2q m1, xmm1 ; out0
405 movdq2q m2, xmm4 ; out1
406 %if notcpuflag(ssse3)
407 movdq2q m3, xmm6 ; out2
414 cglobal vp9_%1_%3_4x4_add, 3, 3, 0, dst, stride, block, eob
415 %if WIN64 && notcpuflag(ssse3)
418 movdqa xmm5, [pd_8192]
424 mova m6, [pw_11585x2]
426 %ifnidn %1%3, iadstiadst
430 TRANSPOSE4x4W 0, 1, 2, 3, 4
432 pxor m4, m4 ; used for the block reset, and VP9_STORE_2X
441 IADST4_FN idct, IDCT4, iadst, IADST4, sse2
442 IADST4_FN iadst, IADST4, idct, IDCT4, sse2
443 IADST4_FN iadst, IADST4, iadst, IADST4, sse2
445 IADST4_FN idct, IDCT4, iadst, IADST4, ssse3
446 IADST4_FN iadst, IADST4, idct, IDCT4, ssse3
447 IADST4_FN iadst, IADST4, iadst, IADST4, ssse3
465 ;-------------------------------------------------------------------------------------------
466 ; void vp9_idct_idct_8x8_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
467 ;-------------------------------------------------------------------------------------------
469 %macro VP9_IDCT8_1D_FINALIZE 0
470 SUMSUB_BA w, 3, 6, 5 ; m3=t0+t7, m6=t0-t7
471 SUMSUB_BA w, 1, 2, 5 ; m1=t1+t6, m2=t1-t6
472 SUMSUB_BA w, 7, 0, 5 ; m7=t2+t5, m0=t2-t5
474 UNSCRATCH 5, 8, blockq+ 0
475 SCRATCH 2, 8, blockq+ 0
477 SUMSUB_BA w, 5, 4, 2 ; m5=t3+t4, m4=t3-t4
487 ; - in: m0/m4 is in mem
488 ; - out: m6 is in mem
490 ; - everything is in registers (m0-7)
491 %macro VP9_IDCT8_1D 0
497 VP9_UNPACK_MULSUB_2W_4X 5, 3, 9102, 13623, D_8192_REG, 0, 4 ; m5=t5a, m3=t6a
498 VP9_UNPACK_MULSUB_2W_4X 1, 7, 16069, 3196, D_8192_REG, 0, 4 ; m1=t4a, m7=t7a
499 SUMSUB_BA w, 5, 1, 0 ; m5=t4a+t5a (t4), m1=t4a-t5a (t5a)
500 SUMSUB_BA w, 3, 7, 0 ; m3=t7a+t6a (t7), m7=t7a-t6a (t6a)
502 SUMSUB_BA w, 1, 7, 0 ; m1=t6a+t5a (t6), m7=t6a-t5a (t5)
503 pmulhrsw m1, W_11585x2_REG ; m1=t6
504 pmulhrsw m7, W_11585x2_REG ; m7=t5
506 VP9_UNPACK_MULSUB_2W_4X 7, 1, 11585, 11585, D_8192_REG, 0, 4
508 VP9_UNPACK_MULSUB_2W_4X 2, 6, 15137, 6270, D_8192_REG, 0, 4 ; m2=t2a, m6=t3a
510 UNSCRATCH 0, 8, blockq+ 0 ; IN(0)
511 UNSCRATCH 4, 9, blockq+64 ; IN(4)
512 SCRATCH 5, 8, blockq+ 0
515 SUMSUB_BA w, 4, 0, 5 ; m4=IN(0)+IN(4) m0=IN(0)-IN(4)
516 pmulhrsw m4, W_11585x2_REG ; m4=t0a
517 pmulhrsw m0, W_11585x2_REG ; m0=t1a
519 SCRATCH 7, 9, blockq+64
520 VP9_UNPACK_MULSUB_2W_4X 0, 4, 11585, 11585, D_8192_REG, 5, 7
521 UNSCRATCH 7, 9, blockq+64
523 SUMSUB_BA w, 6, 4, 5 ; m6=t0a+t3a (t0), m4=t0a-t3a (t3)
524 SUMSUB_BA w, 2, 0, 5 ; m2=t1a+t2a (t1), m0=t1a-t2a (t2)
526 VP9_IDCT8_1D_FINALIZE
529 %macro VP9_IDCT8_4x4_1D 0
530 pmulhrsw m0, W_11585x2_REG ; m0=t1a/t0a
531 pmulhrsw m6, m2, [pw_15137x2] ; m6=t3a
532 pmulhrsw m2, [pw_6270x2] ; m2=t2a
533 pmulhrsw m7, m1, [pw_16069x2] ; m7=t7a
534 pmulhrsw m1, [pw_3196x2] ; m1=t4a
535 pmulhrsw m5, m3, [pw_m9102x2] ; m5=t5a
536 pmulhrsw m3, [pw_13623x2] ; m3=t6a
537 SUMSUB_BA w, 5, 1, 4 ; m1=t4a+t5a (t4), m5=t4a-t5a (t5a)
538 SUMSUB_BA w, 3, 7, 4 ; m3=t7a+t6a (t7), m7=t7a-t6a (t6a)
539 SUMSUB_BA w, 1, 7, 4 ; m1=t6a+t5a (t6), m7=t6a-t5a (t5)
540 pmulhrsw m1, W_11585x2_REG ; m1=t6
541 pmulhrsw m7, W_11585x2_REG ; m7=t5
542 psubw m4, m0, m6 ; m4=t0a-t3a (t3)
543 paddw m6, m0 ; m6=t0a+t3a (t0)
544 SCRATCH 5, 8, blockq+ 0
545 SUMSUB_BA w, 2, 0, 5 ; m2=t1a+t2a (t1), m0=t1a-t2a (t2)
546 VP9_IDCT8_1D_FINALIZE
549 %macro VP9_IDCT8_2x2_1D 1
550 pmulhrsw m0, W_11585x2_REG ; m0=t0
551 pmulhrsw m3, m1, W_16069x2_REG ; m3=t7
552 pmulhrsw m1, W_3196x2_REG ; m1=t4
553 psubw m7, m3, m1 ; t5 = t7a - t4a
554 paddw m5, m3, m1 ; t6 = t7a + t4a
555 pmulhrsw m7, W_11585x2_REG ; m7=t5
556 pmulhrsw m5, W_11585x2_REG ; m5=t6
558 ; merged VP9_IDCT8_1D_FINALIZE to make register-sharing w/ avx easier
559 psubw m6, m0, m3 ; m6=t0-t7
560 paddw m3, m0 ; m3=t0+t7
561 psubw m2, m0, m1 ; m2=t1-t6
562 paddw m1, m0 ; m1=t1+t6
565 %define SCRATCH_REG 1
568 %define SCRATCH_REG 2
570 %define SCRATCH_REG 8
572 psubw m4, m0, m5 ; m4=t3-t4
573 paddw m5, m0 ; m5=t3+t4
574 SUMSUB_BA w, 7, 0, SCRATCH_REG ; m7=t2+t5, m0=t2-t5
580 %macro VP9_IDCT8_WRITEx2 6-8 5 ; line1, line2, tmp1, tmp2, zero, pw_1024/pw_16, shift
582 pmulhrsw m%1, %6 ; (x*1024 + (1<<14))>>15 <=> (x+16)>>5
591 VP9_STORE_2X %1, %2, %3, %4, %5
593 VP9_STORE_2X %1, %2, %3, %4, %5, %8
600 ; - m8 holds m6 (SWAP)
602 %macro VP9_IDCT8_WRITEOUT 0
612 %define ROUND_REG [pw_1024]
614 %define ROUND_REG [pw_16]
617 SCRATCH 5, 10, blockq+16
618 SCRATCH 7, 11, blockq+32
619 VP9_IDCT8_WRITEx2 0, 1, 5, 7, 6, ROUND_REG
620 lea dstq, [dstq+2*strideq]
621 VP9_IDCT8_WRITEx2 2, 3, 5, 7, 6, ROUND_REG
622 lea dstq, [dstq+2*strideq]
623 UNSCRATCH 5, 10, blockq+16
624 UNSCRATCH 7, 11, blockq+32
625 VP9_IDCT8_WRITEx2 4, 5, 0, 1, 6, ROUND_REG
626 lea dstq, [dstq+2*strideq]
627 UNSCRATCH 5, 8, blockq+ 0
628 VP9_IDCT8_WRITEx2 5, 7, 0, 1, 6, ROUND_REG
633 %macro VP9_IDCT_IDCT_8x8_ADD_XMM 2
635 cglobal vp9_idct_idct_8x8_add, 4, 4, %2, dst, stride, block, eob
639 mova m12, [pw_11585x2] ; often used
640 %define W_11585x2_REG m12
642 %define W_11585x2_REG [pw_11585x2]
645 cmp eobd, 12 ; top left half or less
648 cmp eobd, 3 ; top left corner or less
651 cmp eobd, 1 ; faster path for when only DC is set
652 jne .idcttopleftcorner
660 pmulhrsw m0, W_11585x2_REG
661 pmulhrsw m0, W_11585x2_REG
663 DEFINE_ARGS dst, stride, block, coef
664 movsx coefd, word [blockq]
669 add coefd, (16 << 14) + 8192
677 pmulhrsw m0, [pw_1024] ; (x*1024 + (1<<14))>>15 <=> (x+16)>>5
680 VP9_STORE_2X 0, 0, 6, 7, 4
681 lea dstq, [dstq+2*strideq]
683 VP9_STORE_2X 0, 0, 6, 7, 4
687 ; faster path for when only left corner is set (3 input: DC, right to DC, below
688 ; to DC). Note: also working with a 2x2 block
693 mova m10, [pw_3196x2]
694 mova m11, [pw_16069x2]
695 %define W_3196x2_REG m10
696 %define W_16069x2_REG m11
698 %define W_3196x2_REG [pw_3196x2]
699 %define W_16069x2_REG [pw_16069x2]
702 ; partial 2x8 transpose
703 ; punpcklwd m0, m1 already done inside idct
709 SBUTTERFLY qdq, 0, 4, 1
715 pxor m6, m6 ; used for the block reset, and VP9_STORE_2X
728 movh m0, [blockq + 0]
729 movh m1, [blockq +16]
730 movh m2, [blockq +32]
731 movh m3, [blockq +48]
733 ; partial 4x8 transpose
741 SBUTTERFLY dq, 0, 2, 1
742 SBUTTERFLY dq, 4, 6, 5
743 SBUTTERFLY qdq, 0, 4, 1
744 SBUTTERFLY qdq, 2, 6, 5
766 .idctfull: ; generic full 8x8 idct/idct
768 mova m0, [blockq+ 0] ; IN(0)
770 mova m1, [blockq+ 16] ; IN(1)
771 mova m2, [blockq+ 32] ; IN(2)
772 mova m3, [blockq+ 48] ; IN(3)
774 mova m4, [blockq+ 64] ; IN(4)
776 mova m5, [blockq+ 80] ; IN(5)
777 mova m6, [blockq+ 96] ; IN(6)
778 mova m7, [blockq+112] ; IN(7)
780 mova m11, [pd_8192] ; rounding
781 %define D_8192_REG m11
783 %define D_8192_REG [pd_8192]
787 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 8
789 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [blockq+0], [blockq+64], 1
797 pxor m6, m6 ; used for the block reset, and VP9_STORE_2X
799 ZERO_BLOCK blockq, 16, 8, m6
804 VP9_IDCT_IDCT_8x8_ADD_XMM sse2, 12
805 VP9_IDCT_IDCT_8x8_ADD_XMM ssse3, 13
806 VP9_IDCT_IDCT_8x8_ADD_XMM avx, 13
808 ;---------------------------------------------------------------------------------------------
809 ; void vp9_iadst_iadst_8x8_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
810 ;---------------------------------------------------------------------------------------------
813 ; - in: m0/3/4/7 are in mem [blockq+N*16]
814 ; - out: m6 is in mem [blockq+0]
816 ; - everything is in registers
817 %macro VP9_IADST8_1D 0 ; input/output=m0/1/2/3/4/5/6/7
825 VP9_UNPACK_MULSUB_2D_4X 5, 2, 0, 3, 14449, 7723 ; m5/2=t3[d], m2/4=t2[d]
826 VP9_UNPACK_MULSUB_2D_4X 1, 6, 4, 7, 4756, 15679 ; m1/4=t7[d], m6/7=t6[d]
827 SCRATCH 4, 12, blockq+1*16
828 VP9_RND_SH_SUMSUB_BA 6, 2, 7, 3, 4, D_8192_REG ; m6=t2[w], m2=t6[w]
829 UNSCRATCH 4, 12, blockq+1*16
830 VP9_RND_SH_SUMSUB_BA 1, 5, 4, 0, 3, D_8192_REG ; m1=t3[w], m5=t7[w]
832 UNSCRATCH 0, 8, blockq+16*0
833 UNSCRATCH 3, 9, blockq+16*3
834 UNSCRATCH 4, 10, blockq+16*4
835 UNSCRATCH 7, 11, blockq+16*7
836 SCRATCH 1, 8, blockq+16*1
837 SCRATCH 2, 9, blockq+16*2
838 SCRATCH 5, 10, blockq+16*5
839 SCRATCH 6, 11, blockq+16*6
841 VP9_UNPACK_MULSUB_2D_4X 7, 0, 1, 2, 16305, 1606 ; m7/1=t1[d], m0/2=t0[d]
842 VP9_UNPACK_MULSUB_2D_4X 3, 4, 5, 6, 10394, 12665 ; m3/5=t5[d], m4/6=t4[d]
843 SCRATCH 1, 12, blockq+ 0*16
844 VP9_RND_SH_SUMSUB_BA 4, 0, 6, 2, 1, D_8192_REG ; m4=t0[w], m0=t4[w]
845 UNSCRATCH 1, 12, blockq+ 0*16
846 VP9_RND_SH_SUMSUB_BA 3, 7, 5, 1, 2, D_8192_REG ; m3=t1[w], m7=t5[w]
848 UNSCRATCH 2, 9, blockq+16*2
849 UNSCRATCH 5, 10, blockq+16*5
850 SCRATCH 3, 9, blockq+16*3
851 SCRATCH 4, 10, blockq+16*4
853 ; m4=t0, m3=t1, m6=t2, m1=t3, m0=t4, m7=t5, m2=t6, m5=t7
855 VP9_UNPACK_MULSUB_2D_4X 0, 7, 1, 3, 15137, 6270 ; m0/1=t5[d], m7/3=t4[d]
856 VP9_UNPACK_MULSUB_2D_4X 5, 2, 4, 6, 6270, 15137 ; m5/4=t6[d], m2/6=t7[d]
857 SCRATCH 1, 12, blockq+ 0*16
858 VP9_RND_SH_SUMSUB_BA 5, 7, 4, 3, 1, D_8192_REG
859 UNSCRATCH 1, 12, blockq+ 0*16
860 PSIGNW m5, W_M1_REG ; m5=out1[w], m7=t6[w]
861 VP9_RND_SH_SUMSUB_BA 2, 0, 6, 1, 3, D_8192_REG ; m2=out6[w], m0=t7[w]
863 UNSCRATCH 1, 8, blockq+16*1
864 UNSCRATCH 3, 9, blockq+16*3
865 UNSCRATCH 4, 10, blockq+16*4
866 UNSCRATCH 6, 11, blockq+16*6
867 SCRATCH 2, 8, blockq+16*0
869 SUMSUB_BA w, 6, 4, 2 ; m6=out0[w], m4=t2[w]
871 PSIGNW m1, W_M1_REG ; m1=out7[w], m3=t3[w]
873 ; m6=out0, m5=out1, m4=t2, m3=t3, m7=t6, m0=t7, m2=out6, m1=out7
875 ; unfortunately, the code below overflows in some cases
876 %if 0; cpuflag(ssse3)
879 pmulhrsw m3, W_11585x2_REG
880 pmulhrsw m7, W_11585x2_REG
881 pmulhrsw m4, W_11585x2_REG ; out4
882 pmulhrsw m0, W_11585x2_REG ; out2
884 SCRATCH 5, 9, blockq+16*1
885 VP9_UNPACK_MULSUB_2W_4X 4, 3, 11585, 11585, D_8192_REG, 2, 5
886 VP9_UNPACK_MULSUB_2W_4X 7, 0, 11585, 11585, D_8192_REG, 2, 5
887 UNSCRATCH 5, 9, blockq+16*1
889 PSIGNW m3, W_M1_REG ; out3
890 PSIGNW m7, W_M1_REG ; out5
892 ; m6=out0, m5=out1, m0=out2, m3=out3, m4=out4, m7=out5, m2=out6, m1=out7
903 cglobal vp9_%1_%3_8x8_add, 3, 3, %6, dst, stride, block, eob
906 %define first_is_idct 1
908 %define first_is_idct 0
912 %define second_is_idct 1
914 %define second_is_idct 0
918 mova m0, [blockq+ 0] ; IN(0)
920 mova m1, [blockq+ 16] ; IN(1)
921 mova m2, [blockq+ 32] ; IN(2)
922 %if ARCH_X86_64 || first_is_idct
923 mova m3, [blockq+ 48] ; IN(3)
926 mova m4, [blockq+ 64] ; IN(4)
928 mova m5, [blockq+ 80] ; IN(5)
929 mova m6, [blockq+ 96] ; IN(6)
930 %if ARCH_X86_64 || first_is_idct
931 mova m7, [blockq+112] ; IN(7)
935 mova m15, [pw_11585x2] ; often used
937 mova m13, [pd_8192] ; rounding
939 %define W_11585x2_REG m15
940 %define D_8192_REG m13
943 %define W_11585x2_REG [pw_11585x2]
944 %define D_8192_REG [pd_8192]
945 %define W_M1_REG [pw_m1]
948 ; note different calling conventions for idct8 vs. iadst8 on x86-32
951 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 8
953 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [blockq+0], [blockq+64], 1
955 %if second_is_idct == 0
956 mova [blockq+ 48], m3
957 mova [blockq+112], m7
965 pxor m6, m6 ; used for the block reset, and VP9_STORE_2X
967 ZERO_BLOCK blockq, 16, 8, m6
972 %undef second_is_idct
976 IADST8_FN idct, IDCT8, iadst, IADST8, sse2, 15
977 IADST8_FN iadst, IADST8, idct, IDCT8, sse2, 15
978 IADST8_FN iadst, IADST8, iadst, IADST8, sse2, 15
979 IADST8_FN idct, IDCT8, iadst, IADST8, ssse3, 16
980 IADST8_FN idct, IDCT8, iadst, IADST8, avx, 16
981 IADST8_FN iadst, IADST8, idct, IDCT8, ssse3, 16
982 IADST8_FN iadst, IADST8, idct, IDCT8, avx, 16
983 IADST8_FN iadst, IADST8, iadst, IADST8, ssse3, 16
984 IADST8_FN iadst, IADST8, iadst, IADST8, avx, 16
986 ;---------------------------------------------------------------------------------------------
987 ; void vp9_idct_idct_16x16_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
988 ;---------------------------------------------------------------------------------------------
991 ; at the end of this macro, m7 is stored in [%4+15*%5]
992 ; everything else (t0-6 and t8-15) is stored in m0-6 and m8-15
993 ; the following sumsubs have not been done yet:
994 ; SUMSUB_BA w, 6, 9, 15 ; t6, t9
995 ; SUMSUB_BA w, 7, 8, 15 ; t7, t8
996 ; or (x86-32) t0-t5 are in m0-m5, t10-t15 are in x11/9/7/5/3/1,
997 ; and the following simsubs have not been done yet:
998 ; SUMSUB_BA w, x13, x14, 7 ; t6, t9
999 ; SUMSUB_BA w, x15, x12, 7 ; t7, t8
1001 %macro VP9_IDCT16_1D_START 6 ; src, nnzc, stride, scratch, scratch_stride, is_iadst
1003 mova m3, [%1+ 1*%3] ; IN(1)
1004 mova m0, [%1+ 3*%3] ; IN(3)
1006 pmulhrsw m4, m3, [pw_16305x2] ; t14-15
1007 pmulhrsw m3, [pw_1606x2] ; t8-9
1008 pmulhrsw m7, m0, [pw_m4756x2] ; t10-11
1009 pmulhrsw m0, [pw_15679x2] ; t12-13
1011 ; m8=t0, m9=t1, m10=t2, m11=t3, m12=t4, m14=t5, m13=t6, m15=t7
1012 ; m3=t8, m5=t9, m1=t10, m7=t11, m0=t12, m6=t13, m2=t14, m4=t15
1014 VP9_UNPACK_MULSUB_2W_4X 2, 5, 4, 3, 15137, 6270, [pd_8192], 1, 6 ; t9, t14
1015 SCRATCH 4, 10, %4+ 1*%5
1016 SCRATCH 5, 11, %4+ 7*%5
1017 VP9_UNPACK_MULSUB_2W_4X 6, 1, 0, 7, 6270, m15137, [pd_8192], 4, 5 ; t10, t13
1018 UNSCRATCH 5, 11, %4+ 7*%5
1020 ; m15=t0, m14=t1, m13=t2, m12=t3, m11=t4, m10=t5, m9=t6, m8=t7
1021 ; m7=t8, m6=t9, m2=t10, m3=t11, m4=t12, m5=t13, m1=t14, m0=t15
1023 mova m5, [%1+ 1*%3] ; IN(1)
1024 mova m4, [%1+ 7*%3] ; IN(7)
1026 pmulhrsw m2, m5, [pw_16305x2] ; t15
1027 pmulhrsw m5, [pw_1606x2] ; t8
1028 pmulhrsw m3, m4, [pw_m10394x2] ; t9
1029 pmulhrsw m4, [pw_12665x2] ; t14
1031 mova m3, [%1+ 9*%3] ; IN(9)
1032 mova m2, [%1+15*%3] ; IN(15)
1034 ; m10=in0, m5=in1, m14=in2, m6=in3, m9=in4, m7=in5, m15=in6, m4=in7
1035 ; m11=in8, m3=in9, m12=in10 m0=in11, m8=in12, m1=in13, m13=in14, m2=in15
1037 VP9_UNPACK_MULSUB_2W_4X 5, 2, 16305, 1606, [pd_8192], 0, 1 ; t8, t15
1038 VP9_UNPACK_MULSUB_2W_4X 3, 4, 10394, 12665, [pd_8192], 0, 1 ; t9, t14
1041 SUMSUB_BA w, 3, 5, 0 ; t8, t9
1042 SUMSUB_BA w, 4, 2, 0 ; t15, t14
1044 VP9_UNPACK_MULSUB_2W_4X 2, 5, 15137, 6270, [pd_8192], 0, 1 ; t9, t14
1046 SCRATCH 4, 10, %4+ 1*%5
1047 SCRATCH 5, 11, %4+ 7*%5
1049 mova m6, [%1+ 3*%3] ; IN(3)
1050 mova m7, [%1+ 5*%3] ; IN(5)
1052 pmulhrsw m0, m7, [pw_14449x2] ; t13
1053 pmulhrsw m7, [pw_7723x2] ; t10
1054 pmulhrsw m1, m6, [pw_m4756x2] ; t11
1055 pmulhrsw m6, [pw_15679x2] ; t12
1057 mova m0, [%1+11*%3] ; IN(11)
1058 mova m1, [%1+13*%3] ; IN(13)
1060 VP9_UNPACK_MULSUB_2W_4X 7, 0, 14449, 7723, [pd_8192], 4, 5 ; t10, t13
1061 VP9_UNPACK_MULSUB_2W_4X 1, 6, 4756, 15679, [pd_8192], 4, 5 ; t11, t12
1064 ; m11=t0, m10=t1, m9=t2, m8=t3, m14=t4, m12=t5, m15=t6, m13=t7
1065 ; m5=t8, m3=t9, m7=t10, m1=t11, m6=t12, m0=t13, m4=t14, m2=t15
1067 SUMSUB_BA w, 7, 1, 4 ; t11, t10
1068 SUMSUB_BA w, 0, 6, 4 ; t12, t13
1070 ; m8=t0, m9=t1, m10=t2, m11=t3, m12=t4, m14=t5, m13=t6, m15=t7
1071 ; m3=t8, m5=t9, m1=t10, m7=t11, m0=t12, m6=t13, m2=t14, m4=t15
1073 VP9_UNPACK_MULSUB_2W_4X 6, 1, 6270, m15137, [pd_8192], 4, 5 ; t10, t13
1075 UNSCRATCH 5, 11, %4+ 7*%5
1078 ; m8=t0, m9=t1, m10=t2, m11=t3, m12=t4, m13=t5, m14=t6, m15=t7
1079 ; m3=t8, m2=t9, m6=t10, m7=t11, m0=t12, m1=t13, m5=t14, m4=t15
1081 SUMSUB_BA w, 7, 3, 4 ; t8, t11
1083 ; backup first register
1086 SUMSUB_BA w, 6, 2, 7 ; t9, t10
1087 UNSCRATCH 4, 10, %4+ 1*%5
1088 SUMSUB_BA w, 0, 4, 7 ; t15, t12
1089 SUMSUB_BA w, 1, 5, 7 ; t14. t13
1091 ; m15=t0, m14=t1, m13=t2, m12=t3, m11=t4, m10=t5, m9=t6, m8=t7
1092 ; m7=t8, m6=t9, m2=t10, m3=t11, m4=t12, m5=t13, m1=t14, m0=t15
1094 %if cpuflag(ssse3) && %6 == 0
1095 SUMSUB_BA w, 2, 5, 7
1096 SUMSUB_BA w, 3, 4, 7
1097 pmulhrsw m5, [pw_11585x2] ; t10
1098 pmulhrsw m4, [pw_11585x2] ; t11
1099 pmulhrsw m3, [pw_11585x2] ; t12
1100 pmulhrsw m2, [pw_11585x2] ; t13
1102 SCRATCH 6, 10, %4+ 1*%5
1103 VP9_UNPACK_MULSUB_2W_4X 5, 2, 11585, 11585, [pd_8192], 6, 7 ; t10, t13
1104 VP9_UNPACK_MULSUB_2W_4X 4, 3, 11585, 11585, [pd_8192], 6, 7 ; t11, t12
1105 UNSCRATCH 6, 10, %4+ 1*%5
1108 ; m15=t0, m14=t1, m13=t2, m12=t3, m11=t4, m10=t5, m9=t6, m8=t7
1109 ; m7=t8, m6=t9, m5=t10, m4=t11, m3=t12, m2=t13, m1=t14, m0=t15
1111 SCRATCH 0, 8, %4+ 1*%5
1112 SCRATCH 1, 9, %4+ 3*%5
1113 SCRATCH 2, 10, %4+ 5*%5
1114 SCRATCH 3, 11, %4+ 7*%5
1115 SCRATCH 4, 12, %4+ 9*%5
1116 SCRATCH 5, 13, %4+11*%5
1117 SCRATCH 6, 14, %4+13*%5
1121 mova m3, [%1+ 0*%3] ; IN(0)
1122 mova m4, [%1+ 2*%3] ; IN(2)
1124 pmulhrsw m3, [pw_11585x2] ; t0-t3
1125 pmulhrsw m7, m4, [pw_16069x2] ; t6-7
1126 pmulhrsw m4, [pw_3196x2] ; t4-5
1128 %if 0 ; overflows :(
1131 pmulhrsw m5, [pw_11585x2] ; t5
1132 pmulhrsw m6, [pw_11585x2] ; t6
1134 VP9_UNPACK_MULSUB_2W_4X 5, 6, 7, 4, 11585, 11585, [pd_8192], 0, 1 ; t5, t6
1147 SCRATCH 7, 15, %4+12*%5
1149 mova m6, [%1+ 2*%3] ; IN(2)
1150 mova m1, [%1+ 4*%3] ; IN(4)
1151 mova m7, [%1+ 6*%3] ; IN(6)
1153 pmulhrsw m0, m1, [pw_15137x2] ; t3
1154 pmulhrsw m1, [pw_6270x2] ; t2
1155 pmulhrsw m5, m6, [pw_16069x2] ; t7
1156 pmulhrsw m6, [pw_3196x2] ; t4
1157 pmulhrsw m4, m7, [pw_m9102x2] ; t5
1158 pmulhrsw m7, [pw_13623x2] ; t6
1160 mova m4, [%1+10*%3] ; IN(10)
1161 mova m0, [%1+12*%3] ; IN(12)
1162 mova m5, [%1+14*%3] ; IN(14)
1164 VP9_UNPACK_MULSUB_2W_4X 1, 0, 15137, 6270, [pd_8192], 2, 3 ; t2, t3
1165 VP9_UNPACK_MULSUB_2W_4X 6, 5, 16069, 3196, [pd_8192], 2, 3 ; t4, t7
1166 VP9_UNPACK_MULSUB_2W_4X 4, 7, 9102, 13623, [pd_8192], 2, 3 ; t5, t6
1169 SUMSUB_BA w, 4, 6, 2 ; t4, t5
1170 SUMSUB_BA w, 7, 5, 2 ; t7, t6
1172 %if cpuflag(ssse3) && %6 == 0
1173 SUMSUB_BA w, 6, 5, 2
1174 pmulhrsw m5, [pw_11585x2] ; t5
1175 pmulhrsw m6, [pw_11585x2] ; t6
1177 VP9_UNPACK_MULSUB_2W_4X 5, 6, 11585, 11585, [pd_8192], 2, 3 ; t5, t6
1180 SCRATCH 5, 15, %4+10*%5
1181 mova m2, [%1+ 0*%3] ; IN(0)
1183 pmulhrsw m2, [pw_11585x2] ; t0 and t1
1187 SUMSUB_BA w, 7, 0, 5 ; t0, t7
1189 mova m3, [%1+ 8*%3] ; IN(8)
1191 ; from 3 stages back
1192 %if cpuflag(ssse3) && %6 == 0
1193 SUMSUB_BA w, 3, 2, 5
1194 pmulhrsw m3, [pw_11585x2] ; t0
1195 pmulhrsw m2, [pw_11585x2] ; t1
1198 VP9_UNPACK_MULSUB_2W_4X 2, 3, 11585, 11585, [pd_8192], 5, 0 ; t0, t1
1202 ; from 2 stages back
1203 SUMSUB_BA w, 0, 3, 5 ; t0, t3
1205 SUMSUB_BA w, 7, 0, 5 ; t0, t7
1207 UNSCRATCH 5, 15, %4+10*%5
1211 SCRATCH 7, 15, %4+12*%5
1212 SUMSUB_BA w, 1, 2, 7 ; t1, t2
1215 SUMSUB_BA w, 6, 1, 7 ; t1, t6
1216 SUMSUB_BA w, 5, 2, 7 ; t2, t5
1218 SUMSUB_BA w, 4, 3, 7 ; t3, t4
1229 SUMSUB_BA w, 0, 15, 7 ; t0, t15
1230 SUMSUB_BA w, 1, 14, 7 ; t1, t14
1231 SUMSUB_BA w, 2, 13, 7 ; t2, t13
1232 SUMSUB_BA w, 3, 12, 7 ; t3, t12
1233 SUMSUB_BA w, 4, 11, 7 ; t4, t11
1234 SUMSUB_BA w, 5, 10, 7 ; t5, t10
1241 %macro %%SUMSUB_BA_STORE 5 ; reg, from_mem, to_mem, scratch, scratch_stride
1243 SUMSUB_BA w, 6, %1, 7
1248 %%SUMSUB_BA_STORE 0, 1, 1, %4, %5 ; t0, t15
1249 %%SUMSUB_BA_STORE 1, 3, 3, %4, %5 ; t1, t14
1250 %%SUMSUB_BA_STORE 2, 5, 5, %4, %5 ; t2, t13
1251 %%SUMSUB_BA_STORE 3, 7, 7, %4, %5 ; t3, t12
1252 %%SUMSUB_BA_STORE 4, 9, 9, %4, %5 ; t4, t11
1253 %%SUMSUB_BA_STORE 5, 11, 11, %4, %5 ; t5, t10
1257 %macro VP9_IDCT16_1D 2-4 16, 1 ; src, pass, nnzc, is_iadst
1259 VP9_IDCT16_1D_START %1, %3, 32, tmpq, 16, %4
1262 ; backup a different register
1263 mova m7, [tmpq+15*16]
1264 mova [tmpq+ 1*16], m15
1266 SUMSUB_BA w, 6, 9, 15 ; t6, t9
1267 SUMSUB_BA w, 7, 8, 15 ; t7, t8
1269 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 15
1279 mova m15, [tmpq+ 1*16]
1280 TRANSPOSE8x8W 8, 9, 10, 11, 12, 13, 14, 15, 0
1283 mova [tmpq+ 80], m10
1284 mova [tmpq+112], m11
1285 mova [tmpq+144], m12
1286 mova [tmpq+176], m13
1287 mova [tmpq+208], m14
1288 mova [tmpq+240], m15
1290 mova m6, [tmpq+13*16]
1291 mova m7, [tmpq+14*16]
1292 SUMSUB_BA w, 6, 7 ; t6, t9
1293 mova [tmpq+14*16], m6
1294 mova [tmpq+13*16], m7
1295 mova m7, [tmpq+15*16]
1296 mova m6, [tmpq+12*16]
1297 SUMSUB_BA w, 7, 6 ; t7, t8
1298 mova [tmpq+15*16], m6
1300 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [tmpq+14*16], [tmpq+ 8*16], 1
1301 mova [tmpq+ 0*16], m0
1302 mova [tmpq+ 2*16], m1
1303 mova [tmpq+ 4*16], m2
1304 mova [tmpq+ 6*16], m3
1305 mova [tmpq+10*16], m5
1306 mova [tmpq+12*16], m6
1307 mova [tmpq+14*16], m7
1309 mova m0, [tmpq+15*16]
1310 mova m1, [tmpq+13*16]
1311 mova m2, [tmpq+11*16]
1312 mova m3, [tmpq+ 9*16]
1313 mova m4, [tmpq+ 7*16]
1314 mova m5, [tmpq+ 5*16]
1315 mova m7, [tmpq+ 1*16]
1316 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [tmpq+ 3*16], [tmpq+ 9*16], 1
1317 mova [tmpq+ 1*16], m0
1318 mova [tmpq+ 3*16], m1
1319 mova [tmpq+ 5*16], m2
1320 mova [tmpq+ 7*16], m3
1321 mova [tmpq+11*16], m5
1322 mova [tmpq+13*16], m6
1323 mova [tmpq+15*16], m7
1326 VP9_IDCT16_1D_START %1, %3, 32, %1, 32, %4
1329 %define ROUND_REG [pw_512]
1331 %define ROUND_REG [pw_32]
1336 ; backup more registers
1340 VP9_IDCT8_WRITEx2 0, 1, 8, 9, 7, ROUND_REG, 6
1341 lea dstq, [dstq+strideq*2]
1342 VP9_IDCT8_WRITEx2 2, 3, 8, 9, 7, ROUND_REG, 6
1343 lea dstq, [dstq+strideq*2]
1344 VP9_IDCT8_WRITEx2 4, 5, 8, 9, 7, ROUND_REG, 6
1345 lea dstq, [dstq+strideq*2]
1347 ; restore from cache
1348 SWAP 0, 7 ; move zero from m7 to m0
1353 SUMSUB_BA w, 6, 9, 3 ; t6, t9
1354 SUMSUB_BA w, 7, 8, 3 ; t7, t8
1356 VP9_IDCT8_WRITEx2 6, 7, 3, 4, 0, ROUND_REG, 6
1357 lea dstq, [dstq+strideq*2]
1358 VP9_IDCT8_WRITEx2 8, 9, 3, 4, 0, ROUND_REG, 6
1359 lea dstq, [dstq+strideq*2]
1360 VP9_IDCT8_WRITEx2 10, 11, 1, 2, 0, ROUND_REG, 6
1361 lea dstq, [dstq+strideq*2]
1362 VP9_IDCT8_WRITEx2 12, 13, 1, 2, 0, ROUND_REG, 6
1363 lea dstq, [dstq+strideq*2]
1364 VP9_IDCT8_WRITEx2 14, 15, 1, 2, 0, ROUND_REG, 6
1366 mova [tmpq+ 0*32], m5
1368 VP9_IDCT8_WRITEx2 0, 1, 5, 6, 7, ROUND_REG, 6
1369 lea dstq, [dstq+strideq*2]
1370 VP9_IDCT8_WRITEx2 2, 3, 5, 6, 7, ROUND_REG, 6
1371 lea dstq, [dstq+strideq*2]
1373 SWAP 0, 7 ; move zero from m7 to m0
1374 mova m5, [tmpq+ 0*32]
1376 VP9_IDCT8_WRITEx2 4, 5, 1, 2, 0, ROUND_REG, 6
1377 lea dstq, [dstq+strideq*2]
1379 mova m4, [tmpq+13*32]
1380 mova m7, [tmpq+14*32]
1381 mova m5, [tmpq+15*32]
1382 mova m6, [tmpq+12*32]
1383 SUMSUB_BADC w, 4, 7, 5, 6, 1
1385 VP9_IDCT8_WRITEx2 4, 5, 1, 2, 0, ROUND_REG, 6
1386 lea dstq, [dstq+strideq*2]
1387 VP9_IDCT8_WRITEx2 6, 7, 1, 2, 0, ROUND_REG, 6
1388 lea dstq, [dstq+strideq*2]
1390 mova m4, [tmpq+11*32]
1391 mova m5, [tmpq+ 9*32]
1392 mova m6, [tmpq+ 7*32]
1393 mova m7, [tmpq+ 5*32]
1395 VP9_IDCT8_WRITEx2 4, 5, 1, 2, 0, ROUND_REG, 6
1396 lea dstq, [dstq+strideq*2]
1397 VP9_IDCT8_WRITEx2 6, 7, 1, 2, 0, ROUND_REG, 6
1398 lea dstq, [dstq+strideq*2]
1400 mova m4, [tmpq+ 3*32]
1401 mova m5, [tmpq+ 1*32]
1403 VP9_IDCT8_WRITEx2 4, 5, 1, 2, 0, ROUND_REG, 6
1404 lea dstq, [dstq+strideq*2]
1411 %macro VP9_STORE_2XFULL 6-7 strideq; dc, tmp1, tmp2, tmp3, tmp4, zero, stride
1414 punpcklbw m%2, m%3, m%6
1416 punpcklbw m%4, m%5, m%6
1428 %macro VP9_IDCT_IDCT_16x16_ADD_XMM 1
1430 cglobal vp9_idct_idct_16x16_add, 4, 6, 16, 512, dst, stride, block, eob
1432 ; 2x2=eob=3, 4x4=eob=10
1435 cmp eobd, 1 ; faster path for when only DC is set
1438 cmp eobd, 1 ; faster path for when only DC is set
1445 mova m1, [pw_11585x2]
1449 DEFINE_ARGS dst, stride, block, coef
1450 movsx coefd, word [blockq]
1455 add coefd, (32 << 14) + 8192
1459 SPLATW m0, m0, q0000
1461 pmulhrsw m0, [pw_512]
1466 VP9_STORE_2XFULL 0, 1, 2, 3, 4, 5
1467 lea dstq, [dstq+2*strideq]
1469 VP9_STORE_2XFULL 0, 1, 2, 3, 4, 5
1472 DEFINE_ARGS dst, stride, block, cnt, dst_bak, tmp
1476 VP9_IDCT16_1D blockq, 1, 8, 0
1481 VP9_IDCT16_1D tmpq, 2, 8, 0
1482 lea dstq, [dst_bakq+8]
1487 ; at the end of the loop, m0 should still be zero
1488 ; use that to zero out block coefficients
1489 ZERO_BLOCK blockq, 32, 8, m0
1497 VP9_IDCT16_1D blockq, 1, 16, 0
1508 VP9_IDCT16_1D tmpq, 2, 16, 0
1509 lea dstq, [dst_bakq+8]
1514 ; at the end of the loop, m0 should still be zero
1515 ; use that to zero out block coefficients
1516 ZERO_BLOCK blockq, 32, 16, m0
1520 VP9_IDCT_IDCT_16x16_ADD_XMM sse2
1521 VP9_IDCT_IDCT_16x16_ADD_XMM ssse3
1522 VP9_IDCT_IDCT_16x16_ADD_XMM avx
1524 ;---------------------------------------------------------------------------------------------
1525 ; void vp9_iadst_iadst_16x16_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
1526 ;---------------------------------------------------------------------------------------------
1528 %macro VP9_IADST16_1D 2 ; src, pass
1530 mova m0, [%1+ 0*32] ; in0
1531 mova m1, [%1+15*32] ; in15
1532 mova m2, [%1+ 7*32] ; in7
1533 mova m3, [%1+ 8*32] ; in8
1535 VP9_UNPACK_MULSUB_2D_4X 1, 0, 4, 5, 16364, 804 ; m1/4=t1[d], m0/5=t0[d]
1536 VP9_UNPACK_MULSUB_2D_4X 2, 3, 7, 6, 11003, 12140 ; m2/7=t9[d], m3/6=t8[d]
1537 SCRATCH 4, 8, tmpq+ 0*%%str
1538 VP9_RND_SH_SUMSUB_BA 3, 0, 6, 5, 4, [pd_8192] ; m3=t0[w], m0=t8[w]
1539 UNSCRATCH 4, 8, tmpq+ 0*%%str
1540 VP9_RND_SH_SUMSUB_BA 2, 1, 7, 4, 5, [pd_8192] ; m2=t1[w], m1=t9[w]
1542 SCRATCH 0, 10, tmpq+ 0*%%str
1543 SCRATCH 1, 11, tmpq+15*%%str
1544 mova [tmpq+ 7*%%str], m2
1545 mova [tmpq+ 8*%%str], m3
1547 mova m1, [%1+ 2*32] ; in2
1548 mova m0, [%1+13*32] ; in13
1549 mova m3, [%1+ 5*32] ; in5
1550 mova m2, [%1+10*32] ; in10
1552 VP9_UNPACK_MULSUB_2D_4X 0, 1, 6, 7, 15893, 3981 ; m0/6=t3[d], m1/7=t2[d]
1553 VP9_UNPACK_MULSUB_2D_4X 3, 2, 4, 5, 8423, 14053 ; m3/4=t11[d], m2/5=t10[d]
1554 SCRATCH 4, 12, tmpq+ 2*%%str
1555 VP9_RND_SH_SUMSUB_BA 2, 1, 5, 7, 4, [pd_8192] ; m2=t2[w], m1=t10[w]
1556 UNSCRATCH 4, 12, tmpq+ 2*%%str
1557 VP9_RND_SH_SUMSUB_BA 3, 0, 4, 6, 5, [pd_8192] ; m3=t3[w], m0=t11[w]
1559 SCRATCH 0, 12, tmpq+ 2*%%str
1560 SCRATCH 1, 13, tmpq+13*%%str
1561 mova [tmpq+ 5*%%str], m2
1562 mova [tmpq+10*%%str], m3
1564 mova m2, [%1+ 4*32] ; in4
1565 mova m3, [%1+11*32] ; in11
1566 mova m0, [%1+ 3*32] ; in3
1567 mova m1, [%1+12*32] ; in12
1569 VP9_UNPACK_MULSUB_2D_4X 3, 2, 7, 6, 14811, 7005 ; m3/7=t5[d], m2/6=t4[d]
1570 VP9_UNPACK_MULSUB_2D_4X 0, 1, 4, 5, 5520, 15426 ; m0/4=t13[d], m1/5=t12[d]
1571 SCRATCH 4, 9, tmpq+ 4*%%str
1572 VP9_RND_SH_SUMSUB_BA 1, 2, 5, 6, 4, [pd_8192] ; m1=t4[w], m2=t12[w]
1573 UNSCRATCH 4, 9, tmpq+ 4*%%str
1574 VP9_RND_SH_SUMSUB_BA 0, 3, 4, 7, 6, [pd_8192] ; m0=t5[w], m3=t13[w]
1576 SCRATCH 0, 8, tmpq+ 4*%%str
1577 mova [tmpq+11*%%str], m1 ; t4:m1->r11
1578 UNSCRATCH 0, 10, tmpq+ 0*%%str
1579 UNSCRATCH 1, 11, tmpq+15*%%str
1581 ; round 2 interleaved part 1
1582 VP9_UNPACK_MULSUB_2D_4X 0, 1, 6, 7, 16069, 3196 ; m1/7=t8[d], m0/6=t9[d]
1583 VP9_UNPACK_MULSUB_2D_4X 3, 2, 5, 4, 3196, 16069 ; m3/5=t12[d], m2/4=t13[d]
1584 SCRATCH 4, 9, tmpq+ 3*%%str
1585 VP9_RND_SH_SUMSUB_BA 3, 1, 5, 7, 4, [pd_8192] ; m3=t8[w], m1=t12[w]
1586 UNSCRATCH 4, 9, tmpq+ 3*%%str
1587 VP9_RND_SH_SUMSUB_BA 2, 0, 4, 6, 5, [pd_8192] ; m2=t9[w], m0=t13[w]
1589 SCRATCH 0, 10, tmpq+ 0*%%str
1590 SCRATCH 1, 11, tmpq+15*%%str
1591 SCRATCH 2, 14, tmpq+ 3*%%str
1592 SCRATCH 3, 15, tmpq+12*%%str
1594 mova m2, [%1+ 6*32] ; in6
1595 mova m3, [%1+ 9*32] ; in9
1596 mova m0, [%1+ 1*32] ; in1
1597 mova m1, [%1+14*32] ; in14
1599 VP9_UNPACK_MULSUB_2D_4X 3, 2, 7, 6, 13160, 9760 ; m3/7=t7[d], m2/6=t6[d]
1600 VP9_UNPACK_MULSUB_2D_4X 0, 1, 4, 5, 2404, 16207 ; m0/4=t15[d], m1/5=t14[d]
1601 SCRATCH 4, 9, tmpq+ 6*%%str
1602 VP9_RND_SH_SUMSUB_BA 1, 2, 5, 6, 4, [pd_8192] ; m1=t6[w], m2=t14[w]
1603 UNSCRATCH 4, 9, tmpq+ 6*%%str
1604 VP9_RND_SH_SUMSUB_BA 0, 3, 4, 7, 6, [pd_8192] ; m0=t7[w], m3=t15[w]
1606 ; r8=t0, r7=t1, r5=t2, r10=t3, r11=t4, m8|r4=t5, m1=t6, m0=t7
1607 ; m10|r0=t8, m11|r15=t9, m13|r13=t10, m12|r2=t11, m14|r3=t12, m15|r12=t13, m2=t14, m3=t15
1609 UNSCRATCH 4, 12, tmpq+ 2*%%str
1610 UNSCRATCH 5, 13, tmpq+13*%%str
1611 SCRATCH 0, 12, tmpq+ 1*%%str
1612 SCRATCH 1, 13, tmpq+14*%%str
1614 ; remainder of round 2 (rest of t8-15)
1615 VP9_UNPACK_MULSUB_2D_4X 5, 4, 6, 7, 9102, 13623 ; m5/6=t11[d], m4/7=t10[d]
1616 VP9_UNPACK_MULSUB_2D_4X 3, 2, 1, 0, 13623, 9102 ; m3/1=t14[d], m2/0=t15[d]
1617 SCRATCH 0, 9, tmpq+ 6*%%str
1618 VP9_RND_SH_SUMSUB_BA 3, 4, 1, 7, 0, [pd_8192] ; m3=t10[w], m4=t14[w]
1619 UNSCRATCH 0, 9, tmpq+ 6*%%str
1620 VP9_RND_SH_SUMSUB_BA 2, 5, 0, 6, 1, [pd_8192] ; m2=t11[w], m5=t15[w]
1622 ; m15|r12=t8, m14|r3=t9, m3=t10, m2=t11, m11|r15=t12, m10|r0=t13, m4=t14, m5=t15
1624 UNSCRATCH 6, 14, tmpq+ 3*%%str
1625 UNSCRATCH 7, 15, tmpq+12*%%str
1627 SUMSUB_BA w, 3, 7, 1
1628 PSIGNW m3, [pw_m1] ; m3=out1[w], m7=t10[w]
1629 SUMSUB_BA w, 2, 6, 1 ; m2=out14[w], m6=t11[w]
1631 ; unfortunately, the code below overflows in some cases, e.g.
1632 ; http://downloads.webmproject.org/test_data/libvpx/vp90-2-14-resize-fp-tiles-16-8.webm
1633 %if 0; cpuflag(ssse3)
1634 SUMSUB_BA w, 7, 6, 1
1635 pmulhrsw m7, [pw_11585x2] ; m7=out6[w]
1636 pmulhrsw m6, [pw_11585x2] ; m6=out9[w]
1638 VP9_UNPACK_MULSUB_2W_4X 6, 7, 11585, 11585, [pd_8192], 1, 0
1641 mova [tmpq+ 3*%%str], m6
1642 mova [tmpq+ 6*%%str], m7
1643 UNSCRATCH 6, 10, tmpq+ 0*%%str
1644 UNSCRATCH 7, 11, tmpq+15*%%str
1645 mova [tmpq+13*%%str], m2
1646 SCRATCH 3, 11, tmpq+ 9*%%str
1648 VP9_UNPACK_MULSUB_2D_4X 7, 6, 2, 3, 15137, 6270 ; m6/3=t13[d], m7/2=t12[d]
1649 VP9_UNPACK_MULSUB_2D_4X 5, 4, 1, 0, 6270, 15137 ; m5/1=t14[d], m4/0=t15[d]
1650 SCRATCH 0, 9, tmpq+ 2*%%str
1651 VP9_RND_SH_SUMSUB_BA 5, 6, 1, 3, 0, [pd_8192] ; m5=out2[w], m6=t14[w]
1652 UNSCRATCH 0, 9, tmpq+ 2*%%str
1653 VP9_RND_SH_SUMSUB_BA 4, 7, 0, 2, 1, [pd_8192]
1654 PSIGNW m4, [pw_m1] ; m4=out13[w], m7=t15[w]
1656 ; unfortunately, the code below overflows in some cases
1657 %if 0; cpuflag(ssse3)
1658 SUMSUB_BA w, 7, 6, 1
1659 pmulhrsw m7, [pw_m11585x2] ; m7=out5[w]
1660 pmulhrsw m6, [pw_11585x2] ; m6=out10[w]
1663 VP9_UNPACK_MULSUB_2W_4X 7, 6, 11585, 11585, [pd_8192], 1, 0
1666 ; m11|r13=out1, m5=out2, m7=out5, r15=out6, r3=out9, m6=out10, m4=out13, r2=out14
1668 mova m2, [tmpq+ 8*%%str]
1669 mova m3, [tmpq+ 7*%%str]
1670 mova m1, [tmpq+11*%%str]
1671 mova [tmpq+ 7*%%str], m6
1672 mova [tmpq+11*%%str], m4
1673 mova m4, [tmpq+ 5*%%str]
1674 SCRATCH 5, 14, tmpq+ 5*%%str
1675 SCRATCH 7, 15, tmpq+ 8*%%str
1676 UNSCRATCH 6, 8, tmpq+ 4*%%str
1677 UNSCRATCH 5, 12, tmpq+ 1*%%str
1678 UNSCRATCH 7, 13, tmpq+14*%%str
1680 ; m2=t0, m3=t1, m9=t2, m0=t3, m1=t4, m8=t5, m13=t6, m12=t7
1681 ; m11|r13=out1, m5=out2, m7=out5, r15=out6, r3=out9, r10=out10, r11=out13, r2=out14
1683 SUMSUB_BA w, 1, 2, 0 ; m1=t0[w], m2=t4[w]
1684 mova m0, [tmpq+10*%%str]
1685 SCRATCH 1, 12, tmpq+ 1*%%str
1686 SUMSUB_BA w, 6, 3, 1 ; m8=t1[w], m3=t5[w]
1687 SCRATCH 6, 13, tmpq+ 4*%%str
1688 SUMSUB_BA w, 7, 4, 1 ; m13=t2[w], m9=t6[w]
1689 SCRATCH 7, 8, tmpq+10*%%str
1690 SUMSUB_BA w, 5, 0, 1 ; m12=t3[w], m0=t7[w]
1691 SCRATCH 5, 9, tmpq+14*%%str
1693 VP9_UNPACK_MULSUB_2D_4X 2, 3, 7, 5, 15137, 6270 ; m2/6=t5[d], m3/10=t4[d]
1694 VP9_UNPACK_MULSUB_2D_4X 0, 4, 1, 6, 6270, 15137 ; m0/14=t6[d], m9/15=t7[d]
1695 SCRATCH 6, 10, tmpq+ 0*%%str
1696 VP9_RND_SH_SUMSUB_BA 0, 3, 1, 5, 6, [pd_8192]
1697 UNSCRATCH 6, 10, tmpq+ 0*%%str
1698 PSIGNW m0, [pw_m1] ; m0=out3[w], m3=t6[w]
1699 VP9_RND_SH_SUMSUB_BA 4, 2, 6, 7, 5, [pd_8192] ; m9=out12[w], m2=t7[w]
1701 UNSCRATCH 1, 8, tmpq+10*%%str
1702 UNSCRATCH 5, 9, tmpq+14*%%str
1703 UNSCRATCH 6, 12, tmpq+ 1*%%str
1704 UNSCRATCH 7, 13, tmpq+ 4*%%str
1705 SCRATCH 4, 9, tmpq+14*%%str
1707 SUMSUB_BA w, 1, 6, 4 ; m13=out0[w], m1=t2[w]
1708 SUMSUB_BA w, 5, 7, 4
1709 PSIGNW m5, [pw_m1] ; m12=out15[w], m8=t3[w]
1711 ; unfortunately, the code below overflows in some cases, e.g.
1712 ; http://downloads.webmproject.org/test_data/libvpx/vp90-2-14-resize-fp-tiles-16-8-4-2-1.webm
1713 %if 0 ; cpuflag(ssse3)
1714 SUMSUB_BA w, 7, 6, 4
1715 pmulhrsw m7, [pw_m11585x2] ; m8=out7[w]
1716 pmulhrsw m6, [pw_11585x2] ; m1=out8[w]
1718 SUMSUB_BA w, 3, 2, 4
1719 pmulhrsw m3, [pw_11585x2] ; m3=out4[w]
1720 pmulhrsw m2, [pw_11585x2] ; m2=out11[w]
1722 SCRATCH 5, 8, tmpq+10*%%str
1723 VP9_UNPACK_MULSUB_2W_4X 6, 7, 11585, m11585, [pd_8192], 5, 4
1724 VP9_UNPACK_MULSUB_2W_4X 2, 3, 11585, 11585, [pd_8192], 5, 4
1725 UNSCRATCH 5, 8, tmpq+10*%%str
1728 ; m13=out0, m0=out3, m3=out4, m8=out7, m1=out8, m2=out11, m9=out12, m12=out15
1729 ; m11|r13=out1, m5=out2, m7=out5, r15=out6, r3=out9, r10=out10, r11=out13, r2=out14
1733 mova m13, [tmpq+ 6*%%str]
1734 TRANSPOSE8x8W 1, 11, 14, 0, 3, 15, 13, 6, 10
1735 mova [tmpq+ 0*16], m1
1736 mova [tmpq+ 2*16], m11
1737 mova [tmpq+ 4*16], m14
1738 mova [tmpq+ 6*16], m0
1739 mova m1, [tmpq+ 3*%%str]
1740 mova m11, [tmpq+ 7*%%str]
1741 mova m14, [tmpq+11*%%str]
1742 mova m0, [tmpq+13*%%str]
1743 mova [tmpq+ 8*16], m3
1744 mova [tmpq+10*16], m15
1745 mova [tmpq+12*16], m13
1746 mova [tmpq+14*16], m6
1748 TRANSPOSE8x8W 7, 1, 11, 2, 9, 14, 0, 5, 10
1749 mova [tmpq+ 1*16], m7
1750 mova [tmpq+ 3*16], m1
1751 mova [tmpq+ 5*16], m11
1752 mova [tmpq+ 7*16], m2
1753 mova [tmpq+ 9*16], m9
1754 mova [tmpq+11*16], m14
1755 mova [tmpq+13*16], m0
1756 mova [tmpq+15*16], m5
1758 mova [tmpq+12*%%str], m2
1759 mova [tmpq+ 1*%%str], m5
1760 mova [tmpq+15*%%str], m7
1761 mova m2, [tmpq+ 9*%%str]
1762 mova m5, [tmpq+ 5*%%str]
1763 mova m7, [tmpq+ 8*%%str]
1764 TRANSPOSE8x8W 1, 2, 5, 0, 3, 7, 4, 6, [tmpq+ 6*%%str], [tmpq+ 8*%%str], 1
1765 mova [tmpq+ 0*16], m1
1766 mova [tmpq+ 2*16], m2
1767 mova [tmpq+ 4*16], m5
1768 mova [tmpq+ 6*16], m0
1769 mova [tmpq+10*16], m7
1770 mova m3, [tmpq+12*%%str]
1771 mova [tmpq+12*16], m4
1772 mova m4, [tmpq+14*%%str]
1773 mova [tmpq+14*16], m6
1775 mova m0, [tmpq+15*%%str]
1776 mova m1, [tmpq+ 3*%%str]
1777 mova m2, [tmpq+ 7*%%str]
1778 mova m5, [tmpq+11*%%str]
1779 mova m7, [tmpq+ 1*%%str]
1780 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [tmpq+13*%%str], [tmpq+ 9*%%str], 1
1781 mova [tmpq+ 1*16], m0
1782 mova [tmpq+ 3*16], m1
1783 mova [tmpq+ 5*16], m2
1784 mova [tmpq+ 7*16], m3
1785 mova [tmpq+11*16], m5
1786 mova [tmpq+13*16], m6
1787 mova [tmpq+15*16], m7
1793 %define ROUND_REG [pw_512]
1795 %define ROUND_REG [pw_32]
1799 mova m12, [tmpq+ 6*%%str]
1800 VP9_IDCT8_WRITEx2 1, 11, 10, 8, 4, ROUND_REG, 6
1801 lea dstq, [dstq+strideq*2]
1802 VP9_IDCT8_WRITEx2 14, 0, 10, 8, 4, ROUND_REG, 6
1803 lea dstq, [dstq+strideq*2]
1804 VP9_IDCT8_WRITEx2 3, 15, 10, 8, 4, ROUND_REG, 6
1805 lea dstq, [dstq+strideq*2]
1806 VP9_IDCT8_WRITEx2 12, 6, 10, 8, 4, ROUND_REG, 6
1807 lea dstq, [dstq+strideq*2]
1809 mova m1, [tmpq+ 3*%%str]
1810 mova m11, [tmpq+ 7*%%str]
1811 mova m14, [tmpq+11*%%str]
1812 mova m0, [tmpq+13*%%str]
1814 VP9_IDCT8_WRITEx2 7, 1, 10, 8, 4, ROUND_REG, 6
1815 lea dstq, [dstq+strideq*2]
1816 VP9_IDCT8_WRITEx2 11, 2, 10, 8, 4, ROUND_REG, 6
1817 lea dstq, [dstq+strideq*2]
1818 VP9_IDCT8_WRITEx2 9, 14, 10, 8, 4, ROUND_REG, 6
1819 lea dstq, [dstq+strideq*2]
1820 VP9_IDCT8_WRITEx2 0, 5, 10, 8, 4, ROUND_REG, 6
1822 mova [tmpq+ 0*%%str], m2
1823 mova [tmpq+ 1*%%str], m5
1824 mova [tmpq+ 2*%%str], m7
1825 mova m2, [tmpq+ 9*%%str]
1826 VP9_IDCT8_WRITEx2 1, 2, 5, 7, 4, ROUND_REG, 6
1827 lea dstq, [dstq+strideq*2]
1828 mova m5, [tmpq+ 5*%%str]
1829 VP9_IDCT8_WRITEx2 5, 0, 1, 2, 4, ROUND_REG, 6
1830 lea dstq, [dstq+strideq*2]
1831 mova m5, [tmpq+ 8*%%str]
1832 VP9_IDCT8_WRITEx2 3, 5, 1, 2, 4, ROUND_REG, 6
1833 lea dstq, [dstq+strideq*2]
1834 mova m5, [tmpq+ 6*%%str]
1835 VP9_IDCT8_WRITEx2 5, 6, 1, 2, 4, ROUND_REG, 6
1836 lea dstq, [dstq+strideq*2]
1838 mova m0, [tmpq+ 2*%%str]
1839 mova m3, [tmpq+ 3*%%str]
1840 VP9_IDCT8_WRITEx2 0, 3, 1, 2, 4, ROUND_REG, 6
1841 lea dstq, [dstq+strideq*2]
1842 mova m0, [tmpq+ 7*%%str]
1843 mova m3, [tmpq+ 0*%%str]
1844 VP9_IDCT8_WRITEx2 0, 3, 1, 2, 4, ROUND_REG, 6
1845 lea dstq, [dstq+strideq*2]
1846 mova m0, [tmpq+14*%%str]
1847 mova m3, [tmpq+11*%%str]
1848 VP9_IDCT8_WRITEx2 0, 3, 1, 2, 4, ROUND_REG, 6
1849 lea dstq, [dstq+strideq*2]
1850 mova m0, [tmpq+13*%%str]
1851 mova m3, [tmpq+ 1*%%str]
1852 VP9_IDCT8_WRITEx2 0, 3, 1, 2, 4, ROUND_REG, 6
1862 cglobal vp9_%1_%3_16x16_add, 3, 6, 16, 512, dst, stride, block, cnt, dst_bak, tmp
1878 lea dstq, [dst_bakq+8]
1883 ; at the end of the loop, m0 should still be zero
1884 ; use that to zero out block coefficients
1885 ZERO_BLOCK blockq, 32, 16, m0
1889 IADST16_FN idct, IDCT16, iadst, IADST16, sse2
1890 IADST16_FN iadst, IADST16, idct, IDCT16, sse2
1891 IADST16_FN iadst, IADST16, iadst, IADST16, sse2
1892 IADST16_FN idct, IDCT16, iadst, IADST16, ssse3
1893 IADST16_FN iadst, IADST16, idct, IDCT16, ssse3
1894 IADST16_FN iadst, IADST16, iadst, IADST16, ssse3
1895 IADST16_FN idct, IDCT16, iadst, IADST16, avx
1896 IADST16_FN iadst, IADST16, idct, IDCT16, avx
1897 IADST16_FN iadst, IADST16, iadst, IADST16, avx
1899 ;---------------------------------------------------------------------------------------------
1900 ; void vp9_idct_idct_32x32_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
1901 ;---------------------------------------------------------------------------------------------
1903 %macro VP9_IDCT32_1D 2-3 32 ; src, pass, nnzc
1904 %assign %%str 16*%2*%2
1905 ; first do t0-15, this can be done identical to idct16x16
1906 VP9_IDCT16_1D_START %1, %3/2, 64*2, tmpq, 2*%%str, 1
1908 ; store everything on stack to make space available for t16-31
1909 ; we store interleaved with the output of the second half (t16-31)
1910 ; so we don't need to allocate extra stack space
1911 mova [tmpq+ 0*%%str], m0 ; t0
1912 mova [tmpq+ 4*%%str], m1 ; t1
1913 mova [tmpq+ 8*%%str], m2 ; t2
1914 mova [tmpq+12*%%str], m3 ; t3
1915 mova [tmpq+16*%%str], m4 ; t4
1916 mova [tmpq+20*%%str], m5 ; t5
1918 mova [tmpq+22*%%str], m10 ; t10
1919 mova [tmpq+18*%%str], m11 ; t11
1920 mova [tmpq+14*%%str], m12 ; t12
1921 mova [tmpq+10*%%str], m13 ; t13
1922 mova [tmpq+ 6*%%str], m14 ; t14
1923 mova [tmpq+ 2*%%str], m15 ; t15
1926 mova m0, [tmpq+ 30*%%str]
1927 UNSCRATCH 1, 6, tmpq+26*%%str
1928 UNSCRATCH 2, 8, tmpq+24*%%str
1929 UNSCRATCH 3, 9, tmpq+28*%%str
1930 SUMSUB_BA w, 1, 3, 4 ; t6, t9
1931 SUMSUB_BA w, 0, 2, 4 ; t7, t8
1933 mova [tmpq+24*%%str], m1 ; t6
1934 mova [tmpq+28*%%str], m0 ; t7
1935 mova [tmpq+30*%%str], m2 ; t8
1936 mova [tmpq+26*%%str], m3 ; t9
1938 ; then, secondly, do t16-31
1943 pmulhrsw m1, m4, [pw_16364x2] ;t31
1944 pmulhrsw m4, [pw_804x2] ;t16
1946 VP9_UNPACK_MULSUB_2W_4X 5, 0, 1, 4, 16069, 3196, [pd_8192], 6, 2 ; t17, t30
1948 pmulhrsw m3, m7, [pw_m5520x2] ;t19
1949 pmulhrsw m7, [pw_15426x2] ;t28
1951 SCRATCH 4, 13, tmpq+ 1*%%str
1952 SCRATCH 5, 12, tmpq+15*%%str
1954 VP9_UNPACK_MULSUB_2W_4X 2, 6, 7, 3, 3196, m16069, [pd_8192], 4, 5 ; t18, t29
1959 pmulhrsw m5, m0, [pw_16364x2]
1960 pmulhrsw m0, [pw_804x2]
1961 pmulhrsw m4, m1, [pw_m11003x2]
1962 pmulhrsw m1, [pw_12140x2]
1967 VP9_UNPACK_MULSUB_2W_4X 0, 5, 16364, 804, [pd_8192], 2, 3 ; t16, t31
1968 VP9_UNPACK_MULSUB_2W_4X 4, 1, 11003, 12140, [pd_8192], 2, 3 ; t17, t30
1970 SUMSUB_BA w, 4, 0, 2
1971 SUMSUB_BA w, 1, 5, 2
1973 VP9_UNPACK_MULSUB_2W_4X 5, 0, 16069, 3196, [pd_8192], 2, 3 ; t17, t30
1975 SCRATCH 4, 13, tmpq+ 1*%%str
1976 SCRATCH 5, 12, tmpq+15*%%str
1981 pmulhrsw m7, m3, [pw_14811x2]
1982 pmulhrsw m3, [pw_7005x2]
1983 pmulhrsw m6, m2, [pw_m5520x2]
1984 pmulhrsw m2, [pw_15426x2]
1989 VP9_UNPACK_MULSUB_2W_4X 3, 7, 14811, 7005, [pd_8192], 4, 5 ; t18, t29
1990 VP9_UNPACK_MULSUB_2W_4X 6, 2, 5520, 15426, [pd_8192], 4, 5 ; t19, t28
1992 SUMSUB_BA w, 3, 6, 4
1993 SUMSUB_BA w, 7, 2, 4
1995 VP9_UNPACK_MULSUB_2W_4X 2, 6, 3196, m16069, [pd_8192], 4, 5 ; t18, t29
1998 UNSCRATCH 5, 12, tmpq+15*%%str
1999 SUMSUB_BA w, 6, 0, 4
2000 mova [tmpq+25*%%str], m6 ; t19
2001 UNSCRATCH 4, 13, tmpq+ 1*%%str
2002 SUMSUB_BA w, 7, 1, 6
2003 SUMSUB_BA w, 3, 4, 6
2004 mova [tmpq+23*%%str], m3 ; t16
2005 SUMSUB_BA w, 2, 5, 6
2007 VP9_UNPACK_MULSUB_2W_4X 0, 5, 15137, 6270, [pd_8192], 6, 3 ; t18, t29
2008 VP9_UNPACK_MULSUB_2W_4X 1, 4, 15137, 6270, [pd_8192], 6, 3 ; t19, t28
2010 SCRATCH 0, 10, tmpq+ 1*%%str
2011 SCRATCH 1, 11, tmpq+ 7*%%str
2012 SCRATCH 2, 9, tmpq+ 9*%%str
2013 SCRATCH 4, 14, tmpq+15*%%str
2014 SCRATCH 5, 15, tmpq+17*%%str
2015 SCRATCH 7, 13, tmpq+31*%%str
2021 pmulhrsw m5, m0, [pw_15893x2] ;t27
2022 pmulhrsw m0, [pw_3981x2] ;t20
2024 VP9_UNPACK_MULSUB_2W_4X 1, 4, 5, 0, 9102, 13623, [pd_8192], 7, 2 ; t21, t26
2026 pmulhrsw m6, m3, [pw_m2404x2] ;t23
2027 pmulhrsw m3, [pw_16207x2] ;t24
2029 SCRATCH 5, 8, tmpq+ 5*%%str
2030 SCRATCH 4, 12, tmpq+11*%%str
2032 VP9_UNPACK_MULSUB_2W_4X 7, 2, 3, 6, 13623, m9102, [pd_8192], 4, 5 ; t22, t25
2037 pmulhrsw m1, m4, [pw_15893x2]
2038 pmulhrsw m4, [pw_3981x2]
2039 pmulhrsw m0, m5, [pw_m8423x2]
2040 pmulhrsw m5, [pw_14053x2]
2045 VP9_UNPACK_MULSUB_2W_4X 4, 1, 15893, 3981, [pd_8192], 2, 3 ; t20, t27
2046 VP9_UNPACK_MULSUB_2W_4X 0, 5, 8423, 14053, [pd_8192], 2, 3 ; t21, t26
2048 SUMSUB_BA w, 0, 4, 2
2049 SUMSUB_BA w, 5, 1, 2
2051 VP9_UNPACK_MULSUB_2W_4X 1, 4, 9102, 13623, [pd_8192], 2, 3 ; t21, t26
2053 SCRATCH 5, 8, tmpq+ 5*%%str
2054 SCRATCH 4, 12, tmpq+11*%%str
2059 pmulhrsw m3, m6, [pw_13160x2]
2060 pmulhrsw m6, [pw_9760x2]
2061 pmulhrsw m2, m7, [pw_m2404x2]
2062 pmulhrsw m7, [pw_16207x2]
2066 VP9_UNPACK_MULSUB_2W_4X 6, 3, 13160, 9760, [pd_8192], 4, 5 ; t22, t25
2067 VP9_UNPACK_MULSUB_2W_4X 2, 7, 2404, 16207, [pd_8192], 4, 5 ; t23, t24
2069 SUMSUB_BA w, 6, 2, 4
2070 SUMSUB_BA w, 3, 7, 4
2072 VP9_UNPACK_MULSUB_2W_4X 7, 2, 13623, m9102, [pd_8192], 4, 5 ; t22, t25
2075 ; m4=t16, m5=t17, m9=t18, m8=t19, m0=t20, m1=t21, m13=t22, m12=t23,
2076 ; m3=t24, m2=t25, m14=t26, m15=t27, m7=t28, m6=t29, m10=t30, m11=t31
2078 UNSCRATCH 4, 12, tmpq+11*%%str
2079 SUMSUB_BA w, 0, 6, 5
2080 SUMSUB_BA w, 4, 2, 5
2081 UNSCRATCH 5, 8, tmpq+ 5*%%str
2082 SCRATCH 4, 8, tmpq+11*%%str
2083 SUMSUB_BA w, 1, 7, 4
2084 SUMSUB_BA w, 5, 3, 4
2085 SCRATCH 5, 12, tmpq+ 5*%%str
2087 VP9_UNPACK_MULSUB_2W_4X 3, 6, 6270, m15137, [pd_8192], 4, 5 ; t20, t27
2088 VP9_UNPACK_MULSUB_2W_4X 2, 7, 6270, m15137, [pd_8192], 4, 5 ; t21, t26
2090 ; m8[s]=t16, m9=t17, m5=t18, m4[s]=t19, m12=t20, m13=t21, m1=t22, m0=t23,
2091 ; m15=t24, m14=t25, m2=t26, m3=t27, m11=t28, m10=t29, m6=t30, m7=t31
2093 UNSCRATCH 5, 9, tmpq+ 9*%%str
2094 mova m4, [tmpq+23*%%str] ; t16
2096 SUMSUB_BA w, 1, 5, 9
2097 SUMSUB_BA w, 0, 4, 9
2099 SUMSUB_BADC w, 1, 5, 0, 4
2101 mova [tmpq+29*%%str], m1 ; t17
2102 mova [tmpq+21*%%str], m0 ; t16
2103 UNSCRATCH 0, 10, tmpq+ 1*%%str
2104 UNSCRATCH 1, 11, tmpq+ 7*%%str
2106 SUMSUB_BA w, 2, 0, 9
2107 SUMSUB_BA w, 3, 1, 9
2109 SUMSUB_BADC w, 2, 0, 3, 1
2111 mova [tmpq+ 9*%%str], m2 ; t18
2112 mova [tmpq+13*%%str], m3 ; t19
2113 SCRATCH 0, 10, tmpq+23*%%str
2114 SCRATCH 1, 11, tmpq+27*%%str
2116 UNSCRATCH 2, 14, tmpq+15*%%str
2117 UNSCRATCH 3, 15, tmpq+17*%%str
2118 SUMSUB_BA w, 6, 2, 0
2119 SUMSUB_BA w, 7, 3, 0
2120 SCRATCH 6, 14, tmpq+ 3*%%str
2121 SCRATCH 7, 15, tmpq+ 7*%%str
2123 UNSCRATCH 0, 8, tmpq+11*%%str
2124 mova m1, [tmpq+25*%%str] ; t19
2125 UNSCRATCH 6, 12, tmpq+ 5*%%str
2126 UNSCRATCH 7, 13, tmpq+31*%%str
2128 SUMSUB_BA w, 0, 1, 9
2129 SUMSUB_BA w, 6, 7, 9
2131 SUMSUB_BADC w, 0, 1, 6, 7
2134 ; m0=t16, m1=t17, m2=t18, m3=t19, m11=t20, m10=t21, m9=t22, m8=t23,
2135 ; m7=t24, m6=t25, m5=t26, m4=t27, m12=t28, m13=t29, m14=t30, m15=t31
2137 %if 0; cpuflag(ssse3)
2139 SUMSUB_BA w, 4, 7, 8
2140 SUMSUB_BA w, 5, 1, 8
2142 SUMSUB_BADC w, 4, 7, 5, 1
2145 pmulhrsw m7, [pw_11585x2]
2146 pmulhrsw m4, [pw_11585x2]
2147 pmulhrsw m1, [pw_11585x2]
2148 pmulhrsw m5, [pw_11585x2]
2150 mova [tmpq+ 5*%%str], m7 ; t23
2151 SCRATCH 1, 13, tmpq+25*%%str
2152 UNSCRATCH 7, 10, tmpq+23*%%str
2153 UNSCRATCH 1, 11, tmpq+27*%%str
2156 SUMSUB_BA w, 7, 3, 10
2157 SUMSUB_BA w, 1, 2, 10
2159 SUMSUB_BADC w, 7, 3, 1, 2
2162 pmulhrsw m3, [pw_11585x2]
2163 pmulhrsw m7, [pw_11585x2]
2164 pmulhrsw m2, [pw_11585x2]
2165 pmulhrsw m1, [pw_11585x2]
2167 SCRATCH 0, 8, tmpq+15*%%str
2168 SCRATCH 6, 9, tmpq+17*%%str
2169 VP9_UNPACK_MULSUB_2W_4X 7, 4, 11585, 11585, [pd_8192], 0, 6
2170 mova [tmpq+ 5*%%str], m7 ; t23
2171 UNSCRATCH 7, 10, tmpq+23*%%str
2172 VP9_UNPACK_MULSUB_2W_4X 1, 5, 11585, 11585, [pd_8192], 0, 6
2173 SCRATCH 1, 13, tmpq+25*%%str
2174 UNSCRATCH 1, 11, tmpq+27*%%str
2175 VP9_UNPACK_MULSUB_2W_4X 3, 7, 11585, 11585, [pd_8192], 0, 6
2176 VP9_UNPACK_MULSUB_2W_4X 2, 1, 11585, 11585, [pd_8192], 0, 6
2177 UNSCRATCH 0, 8, tmpq+15*%%str
2178 UNSCRATCH 6, 9, tmpq+17*%%str
2181 ; m0=t16, m1=t17, m2=t18, m3=t19, m4=t20, m5=t21, m6=t22, m7=t23,
2182 ; m8=t24, m9=t25, m10=t26, m11=t27, m12=t28, m13=t29, m14=t30, m15=t31
2184 ; then do final pass to sumsub+store the two halves
2186 mova [tmpq+17*%%str], m2 ; t20
2187 mova [tmpq+ 1*%%str], m3 ; t21
2189 mova [tmpq+25*%%str], m13 ; t22
2191 mova m8, [tmpq+ 0*%%str] ; t0
2192 mova m9, [tmpq+ 4*%%str] ; t1
2193 mova m12, [tmpq+ 8*%%str] ; t2
2194 mova m11, [tmpq+12*%%str] ; t3
2195 mova m2, [tmpq+16*%%str] ; t4
2196 mova m3, [tmpq+20*%%str] ; t5
2197 mova m13, [tmpq+24*%%str] ; t6
2199 SUMSUB_BA w, 6, 8, 10
2200 mova [tmpq+ 3*%%str], m8 ; t15
2201 mova m10, [tmpq+28*%%str] ; t7
2202 SUMSUB_BA w, 0, 9, 8
2203 SUMSUB_BA w, 15, 12, 8
2204 SUMSUB_BA w, 14, 11, 8
2205 SUMSUB_BA w, 1, 2, 8
2206 SUMSUB_BA w, 7, 3, 8
2207 SUMSUB_BA w, 5, 13, 8
2208 SUMSUB_BA w, 4, 10, 8
2210 TRANSPOSE8x8W 6, 0, 15, 14, 1, 7, 5, 4, 8
2211 mova [tmpq+ 0*%%str], m6
2212 mova [tmpq+ 4*%%str], m0
2213 mova [tmpq+ 8*%%str], m15
2214 mova [tmpq+12*%%str], m14
2215 mova [tmpq+16*%%str], m1
2216 mova [tmpq+20*%%str], m7
2217 mova [tmpq+24*%%str], m5
2218 mova [tmpq+28*%%str], m4
2220 mova m8, [tmpq+ 3*%%str] ; t15
2221 TRANSPOSE8x8W 10, 13, 3, 2, 11, 12, 9, 8, 0
2222 mova [tmpq+ 3*%%str], m10
2223 mova [tmpq+ 7*%%str], m13
2224 mova [tmpq+11*%%str], m3
2225 mova [tmpq+15*%%str], m2
2226 mova [tmpq+19*%%str], m11
2227 mova [tmpq+23*%%str], m12
2228 mova [tmpq+27*%%str], m9
2229 mova [tmpq+31*%%str], m8
2231 mova m15, [tmpq+30*%%str] ; t8
2232 mova m14, [tmpq+26*%%str] ; t9
2233 mova m13, [tmpq+22*%%str] ; t10
2234 mova m12, [tmpq+18*%%str] ; t11
2235 mova m11, [tmpq+14*%%str] ; t12
2236 mova m10, [tmpq+10*%%str] ; t13
2237 mova m9, [tmpq+ 6*%%str] ; t14
2238 mova m8, [tmpq+ 2*%%str] ; t15
2239 mova m7, [tmpq+21*%%str] ; t16
2240 mova m6, [tmpq+29*%%str] ; t17
2241 mova m5, [tmpq+ 9*%%str] ; t18
2242 mova m4, [tmpq+13*%%str] ; t19
2243 mova m3, [tmpq+17*%%str] ; t20
2244 mova m2, [tmpq+ 1*%%str] ; t21
2245 mova m1, [tmpq+25*%%str] ; t22
2247 SUMSUB_BA w, 7, 8, 0
2248 mova [tmpq+ 2*%%str], m8
2249 mova m0, [tmpq+ 5*%%str] ; t23
2250 SUMSUB_BA w, 6, 9, 8
2251 SUMSUB_BA w, 5, 10, 8
2252 SUMSUB_BA w, 4, 11, 8
2253 SUMSUB_BA w, 3, 12, 8
2254 SUMSUB_BA w, 2, 13, 8
2255 SUMSUB_BA w, 1, 14, 8
2256 SUMSUB_BA w, 0, 15, 8
2258 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 8
2259 mova [tmpq+ 1*%%str], m0
2260 mova [tmpq+ 5*%%str], m1
2261 mova [tmpq+ 9*%%str], m2
2262 mova [tmpq+13*%%str], m3
2263 mova [tmpq+17*%%str], m4
2264 mova [tmpq+21*%%str], m5
2265 mova [tmpq+25*%%str], m6
2266 mova [tmpq+29*%%str], m7
2268 mova m8, [tmpq+ 2*%%str]
2269 TRANSPOSE8x8W 8, 9, 10, 11, 12, 13, 14, 15, 0
2270 mova [tmpq+ 2*%%str], m8
2271 mova [tmpq+ 6*%%str], m9
2272 mova [tmpq+10*%%str], m10
2273 mova [tmpq+14*%%str], m11
2274 mova [tmpq+18*%%str], m12
2275 mova [tmpq+22*%%str], m13
2276 mova [tmpq+26*%%str], m14
2277 mova [tmpq+30*%%str], m15
2279 mova m2, [tmpq+24*%%str] ; t6
2280 mova m3, [tmpq+28*%%str] ; t7
2281 SUMSUB_BADC w, 5, 2, 4, 3
2282 mova [tmpq+24*%%str], m5
2283 mova [tmpq+23*%%str], m2
2284 mova [tmpq+28*%%str], m4
2285 mova [tmpq+19*%%str], m3
2287 mova m2, [tmpq+16*%%str] ; t4
2288 mova m3, [tmpq+20*%%str] ; t5
2289 SUMSUB_BA w, 1, 2, 5
2290 SUMSUB_BA w, 7, 3, 5
2291 mova [tmpq+15*%%str], m2
2292 mova [tmpq+11*%%str], m3
2294 mova m2, [tmpq+ 0*%%str] ; t0
2295 mova m3, [tmpq+ 4*%%str] ; t1
2296 SUMSUB_BA w, 6, 2, 5
2297 SUMSUB_BA w, 0, 3, 5
2298 mova [tmpq+31*%%str], m2
2299 mova [tmpq+27*%%str], m3
2301 mova m2, [tmpq+ 8*%%str] ; t2
2302 mova m3, [tmpq+12*%%str] ; t3
2303 mova m5, [tmpq+ 7*%%str]
2304 mova m4, [tmpq+ 3*%%str]
2305 SUMSUB_BADC w, 5, 2, 4, 3
2306 mova [tmpq+ 7*%%str], m2
2307 mova [tmpq+ 3*%%str], m3
2309 mova m3, [tmpq+28*%%str]
2310 TRANSPOSE8x8W 6, 0, 5, 4, 1, 7, 2, 3, [tmpq+24*%%str], [tmpq+16*%%str], 1
2311 mova [tmpq+ 0*%%str], m6
2312 mova [tmpq+ 4*%%str], m0
2313 mova [tmpq+ 8*%%str], m5
2314 mova [tmpq+12*%%str], m4
2315 mova [tmpq+20*%%str], m7
2316 mova [tmpq+24*%%str], m2
2317 mova [tmpq+28*%%str], m3
2319 mova m6, [tmpq+19*%%str]
2320 mova m0, [tmpq+23*%%str]
2321 mova m5, [tmpq+11*%%str]
2322 mova m4, [tmpq+15*%%str]
2323 mova m1, [tmpq+ 3*%%str]
2324 mova m7, [tmpq+ 7*%%str]
2325 mova m3, [tmpq+31*%%str]
2326 TRANSPOSE8x8W 6, 0, 5, 4, 1, 7, 2, 3, [tmpq+27*%%str], [tmpq+19*%%str], 1
2327 mova [tmpq+ 3*%%str], m6
2328 mova [tmpq+ 7*%%str], m0
2329 mova [tmpq+11*%%str], m5
2330 mova [tmpq+15*%%str], m4
2331 mova [tmpq+23*%%str], m7
2332 mova [tmpq+27*%%str], m2
2333 mova [tmpq+31*%%str], m3
2335 mova m1, [tmpq+ 6*%%str] ; t14
2336 mova m0, [tmpq+ 2*%%str] ; t15
2337 mova m7, [tmpq+21*%%str] ; t16
2338 mova m6, [tmpq+29*%%str] ; t17
2339 SUMSUB_BA w, 7, 0, 2
2340 SUMSUB_BA w, 6, 1, 2
2341 mova [tmpq+29*%%str], m7
2342 mova [tmpq+ 2*%%str], m0
2343 mova [tmpq+21*%%str], m6
2344 mova [tmpq+ 6*%%str], m1
2346 mova m1, [tmpq+14*%%str] ; t12
2347 mova m0, [tmpq+10*%%str] ; t13
2348 mova m5, [tmpq+ 9*%%str] ; t18
2349 mova m4, [tmpq+13*%%str] ; t19
2350 SUMSUB_BA w, 5, 0, 2
2351 SUMSUB_BA w, 4, 1, 2
2352 mova [tmpq+10*%%str], m0
2353 mova [tmpq+14*%%str], m1
2355 mova m1, [tmpq+22*%%str] ; t10
2356 mova m0, [tmpq+18*%%str] ; t11
2357 mova m3, [tmpq+17*%%str] ; t20
2358 mova m2, [tmpq+ 1*%%str] ; t21
2359 SUMSUB_BA w, 3, 0, 6
2360 SUMSUB_BA w, 2, 1, 6
2361 mova [tmpq+18*%%str], m0
2362 mova [tmpq+22*%%str], m1
2364 mova m7, [tmpq+30*%%str] ; t8
2365 mova m6, [tmpq+26*%%str] ; t9
2366 mova m1, [tmpq+25*%%str] ; t22
2367 mova m0, [tmpq+ 5*%%str] ; t23
2368 SUMSUB_BADC w, 1, 6, 0, 7
2369 mova [tmpq+26*%%str], m6
2370 mova [tmpq+30*%%str], m7
2372 mova m7, [tmpq+29*%%str]
2373 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [tmpq+21*%%str], [tmpq+17*%%str], 1
2374 mova [tmpq+ 1*%%str], m0
2375 mova [tmpq+ 5*%%str], m1
2376 mova [tmpq+ 9*%%str], m2
2377 mova [tmpq+13*%%str], m3
2378 mova [tmpq+21*%%str], m5
2379 mova [tmpq+25*%%str], m6
2380 mova [tmpq+29*%%str], m7
2382 mova m0, [tmpq+ 2*%%str]
2383 mova m1, [tmpq+ 6*%%str]
2384 mova m2, [tmpq+10*%%str]
2385 mova m3, [tmpq+14*%%str]
2386 mova m4, [tmpq+18*%%str]
2387 mova m5, [tmpq+22*%%str]
2388 mova m7, [tmpq+30*%%str]
2389 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [tmpq+26*%%str], [tmpq+18*%%str], 1
2390 mova [tmpq+ 2*%%str], m0
2391 mova [tmpq+ 6*%%str], m1
2392 mova [tmpq+10*%%str], m2
2393 mova [tmpq+14*%%str], m3
2394 mova [tmpq+22*%%str], m5
2395 mova [tmpq+26*%%str], m6
2396 mova [tmpq+30*%%str], m7
2399 ; t0-7 is in [tmpq+{0,4,8,12,16,20,24,28}*%%str]
2400 ; t8-15 is in [tmpq+{2,6,10,14,18,22,26,30}*%%str]
2401 ; t16-19 and t23 is in [tmpq+{1,5,9,13,29}*%%str]
2403 ; t24-31 is in m8-15
2406 %define ROUND_REG [pw_512]
2408 %define ROUND_REG [pw_32]
2411 %macro %%STORE_2X2 7-8 1 ; src[1-4], tmp[1-2], zero, inc_dst_ptrs
2412 SUMSUB_BA w, %4, %1, %5
2413 SUMSUB_BA w, %3, %2, %5
2414 VP9_IDCT8_WRITEx2 %4, %3, %5, %6, %7, ROUND_REG, 6
2418 VP9_IDCT8_WRITEx2 %2, %1, %5, %6, %7, ROUND_REG, 6, dst_endq
2420 sub dst_endq, stride2q
2427 ; store t0-1 and t30-31
2428 mova m8, [tmpq+ 0*%%str]
2429 mova m9, [tmpq+ 4*%%str]
2430 %%STORE_2X2 8, 9, 0, 6, 12, 11, 10
2432 ; store t2-3 and t28-29
2433 mova m8, [tmpq+ 8*%%str]
2434 mova m9, [tmpq+12*%%str]
2435 %%STORE_2X2 8, 9, 14, 15, 12, 11, 10
2437 ; store t4-5 and t26-27
2438 mova m8, [tmpq+16*%%str]
2439 mova m9, [tmpq+20*%%str]
2440 %%STORE_2X2 8, 9, 7, 1, 12, 11, 10
2442 ; store t6-7 and t24-25
2443 mova m8, [tmpq+24*%%str]
2444 mova m9, [tmpq+28*%%str]
2445 %%STORE_2X2 8, 9, 4, 5, 12, 11, 10
2447 ; store t8-9 and t22-23
2448 mova m8, [tmpq+30*%%str]
2449 mova m9, [tmpq+26*%%str]
2450 mova m0, [tmpq+ 5*%%str]
2451 %%STORE_2X2 8, 9, 13, 0, 12, 11, 10
2453 ; store t10-11 and t20-21
2454 mova m8, [tmpq+22*%%str]
2455 mova m9, [tmpq+18*%%str]
2456 %%STORE_2X2 8, 9, 2, 3, 12, 11, 10
2458 ; store t12-13 and t18-19
2459 mova m8, [tmpq+14*%%str]
2460 mova m9, [tmpq+10*%%str]
2461 mova m5, [tmpq+13*%%str]
2462 mova m4, [tmpq+ 9*%%str]
2463 %%STORE_2X2 8, 9, 4, 5, 12, 11, 10
2466 mova m8, [tmpq+ 6*%%str]
2467 mova m9, [tmpq+ 2*%%str]
2468 mova m5, [tmpq+29*%%str]
2469 mova m4, [tmpq+21*%%str]
2470 %%STORE_2X2 8, 9, 4, 5, 12, 11, 10, 0
2474 mova [tmpq+ 1*%%str], m1
2475 mova [tmpq+11*%%str], m2
2476 mova [tmpq+15*%%str], m3
2477 mova [tmpq+17*%%str], m4
2478 mova [tmpq+19*%%str], m5
2481 ; store t0-1 and t30-31
2482 mova m2, [tmpq+ 0*%%str]
2483 mova m3, [tmpq+ 4*%%str]
2484 %%STORE_2X2 2, 3, 0, 6, 4, 5, 1
2486 ; store t2-3 and t28-29
2487 mova m2, [tmpq+ 8*%%str]
2488 mova m3, [tmpq+12*%%str]
2489 mova m0, [tmpq+ 3*%%str]
2490 mova m6, [tmpq+ 7*%%str]
2491 %%STORE_2X2 2, 3, 0, 6, 4, 5, 1
2493 ; store t4-5 and t26-27
2494 mova m2, [tmpq+16*%%str]
2495 mova m3, [tmpq+20*%%str]
2496 mova m0, [tmpq+ 1*%%str]
2497 %%STORE_2X2 2, 3, 7, 0, 4, 5, 1
2499 ; store t6-7 and t24-25
2500 mova m2, [tmpq+24*%%str]
2501 mova m3, [tmpq+28*%%str]
2502 mova m0, [tmpq+17*%%str]
2503 mova m6, [tmpq+19*%%str]
2504 %%STORE_2X2 2, 3, 0, 6, 4, 5, 1
2506 ; store t8-9 and t22-23
2507 mova m2, [tmpq+30*%%str]
2508 mova m3, [tmpq+26*%%str]
2509 mova m0, [tmpq+25*%%str]
2510 mova m6, [tmpq+ 5*%%str]
2511 %%STORE_2X2 2, 3, 0, 6, 4, 5, 1
2513 ; store t10-11 and t20-21
2514 mova m2, [tmpq+22*%%str]
2515 mova m3, [tmpq+18*%%str]
2516 mova m0, [tmpq+11*%%str]
2517 mova m6, [tmpq+15*%%str]
2518 %%STORE_2X2 2, 3, 0, 6, 4, 5, 1
2520 ; store t12-13 and t18-19
2521 mova m2, [tmpq+14*%%str]
2522 mova m3, [tmpq+10*%%str]
2523 mova m6, [tmpq+13*%%str]
2524 mova m0, [tmpq+ 9*%%str]
2525 %%STORE_2X2 2, 3, 0, 6, 4, 5, 1
2528 mova m2, [tmpq+ 6*%%str]
2529 mova m3, [tmpq+ 2*%%str]
2530 mova m6, [tmpq+29*%%str]
2531 mova m0, [tmpq+21*%%str]
2532 %%STORE_2X2 2, 3, 0, 6, 4, 5, 1, 0
2538 %macro VP9_IDCT_IDCT_32x32_ADD_XMM 1
2540 cglobal vp9_idct_idct_32x32_add, 0, 6 + ARCH_X86_64 * 3, 16, 2048, dst, stride, block, eob
2541 movifnidn eobd, dword eobm
2555 movifnidn blockq, blockmp
2556 movifnidn dstq, dstmp
2557 movifnidn strideq, stridemp
2560 mova m1, [pw_11585x2]
2564 DEFINE_ARGS dst, stride, block, coef
2565 movsx coefd, word [blockq]
2570 add coefd, (32 << 14) + 8192
2574 SPLATW m0, m0, q0000
2576 pmulhrsw m0, [pw_512]
2581 VP9_STORE_2XFULL 0, 1, 2, 3, 4, 5, mmsize
2584 VP9_STORE_2XFULL 0, 1, 2, 3, 4, 5, mmsize
2588 DEFINE_ARGS dst_bak, stride, block, cnt, dst, stride30, dst_end, stride2, tmp
2590 %define dst_bakq r0mp
2595 DEFINE_ARGS block, u1, u2, u3, u4, tmp
2599 VP9_IDCT32_1D blockq, 1, 8
2602 DEFINE_ARGS dst, stride, stride30, dst_end, stride2, tmp
2604 %define cntd dword r3m
2606 mov stride30q, strideq ; stride
2607 lea stride2q, [strideq*2] ; stride*2
2608 shl stride30q, 5 ; stride*32
2610 sub stride30q, stride2q ; stride*30
2613 lea dst_endq, [dstq+stride30q]
2614 VP9_IDCT32_1D tmpq, 2, 8
2620 ; at the end of the loop, m7 should still be zero
2621 ; use that to zero out block coefficients
2626 ZERO_BLOCK blockq, 64, 8, m1
2631 DEFINE_ARGS block, tmp, cnt
2637 VP9_IDCT32_1D blockq, 1, 16
2646 DEFINE_ARGS dst, stride, stride30, dst_end, stride2, tmp
2648 %define cntd dword r3m
2651 mov stride30q, strideq ; stride
2652 lea stride2q, [strideq*2] ; stride*2
2653 shl stride30q, 5 ; stride*32
2656 sub stride30q, stride2q ; stride*30
2659 lea dst_endq, [dstq+stride30q]
2660 VP9_IDCT32_1D tmpq, 2, 16
2666 ; at the end of the loop, m7 should still be zero
2667 ; use that to zero out block coefficients
2672 ZERO_BLOCK blockq, 64, 16, m1
2678 DEFINE_ARGS block, tmp, cnt
2684 VP9_IDCT32_1D blockq, 1
2693 DEFINE_ARGS dst, stride, stride30, dst_end, stride2, tmp
2695 %define cntd dword r3m
2698 mov stride30q, strideq ; stride
2699 lea stride2q, [strideq*2] ; stride*2
2700 shl stride30q, 5 ; stride*32
2703 sub stride30q, stride2q ; stride*30
2706 lea dst_endq, [dstq+stride30q]
2707 VP9_IDCT32_1D tmpq, 2
2713 ; at the end of the loop, m7 should still be zero
2714 ; use that to zero out block coefficients
2719 ZERO_BLOCK blockq, 64, 32, m1
2723 VP9_IDCT_IDCT_32x32_ADD_XMM sse2
2724 VP9_IDCT_IDCT_32x32_ADD_XMM ssse3
2725 VP9_IDCT_IDCT_32x32_ADD_XMM avx