1 ;******************************************************************************
2 ;* VP9 IDCT SIMD optimizations
4 ;* Copyright (C) 2013 Clément Bœsch <u pkh me>
5 ;* Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
7 ;* This file is part of FFmpeg.
9 ;* FFmpeg is free software; you can redistribute it and/or
10 ;* modify it under the terms of the GNU Lesser General Public
11 ;* License as published by the Free Software Foundation; either
12 ;* version 2.1 of the License, or (at your option) any later version.
14 ;* FFmpeg is distributed in the hope that it will be useful,
15 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
16 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 ;* Lesser General Public License for more details.
19 ;* You should have received a copy of the GNU Lesser General Public
20 ;* License along with FFmpeg; if not, write to the Free Software
21 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 ;******************************************************************************
24 %include "libavutil/x86/x86util.asm"
25 %include "vp9itxfm_template.asm"
29 %macro VP9_IDCT_COEFFS 2-3 0
47 pw_m%1x2: times 16 dw -%1*2
49 pw_%1x2: times 16 dw %1*2
56 pw_%2x2: times 16 dw %2*2
60 VP9_IDCT_COEFFS 16364, 804
61 VP9_IDCT_COEFFS 16305, 1606
62 VP9_IDCT_COEFFS 16069, 3196, 1
63 VP9_IDCT_COEFFS 15893, 3981
64 VP9_IDCT_COEFFS 15137, 6270, 1
65 VP9_IDCT_COEFFS 14811, 7005
66 VP9_IDCT_COEFFS 14449, 7723
67 VP9_IDCT_COEFFS 13160, 9760
68 VP9_IDCT_COEFFS 11585, 11585, 1
69 VP9_IDCT_COEFFS 11003, 12140
70 VP9_IDCT_COEFFS 10394, 12665
71 VP9_IDCT_COEFFS 9102, 13623, 1
72 VP9_IDCT_COEFFS 8423, 14053
73 VP9_IDCT_COEFFS 5520, 15426
74 VP9_IDCT_COEFFS 4756, 15679
75 VP9_IDCT_COEFFS 2404, 16207
78 times 4 dw 5283, 13377
80 times 4 dw 9929, 13377
82 times 4 dw 15212, -13377
84 times 4 dw 15212, 9929
86 times 4 dw -5283, -15212
90 times 4 dw -13377, 13377
105 %macro VP9_UNPACK_MULSUB_2D_4X 6 ; dst1 [src1], dst2 [src2], dst3, dst4, mul1, mul2
106 punpckhwd m%4, m%2, m%1
108 pmaddwd m%3, m%4, [pw_m%5_%6]
109 pmaddwd m%4, [pw_%6_%5]
110 pmaddwd m%1, m%2, [pw_m%5_%6]
111 pmaddwd m%2, [pw_%6_%5]
114 %macro VP9_RND_SH_SUMSUB_BA 6 ; dst1 [src1], dst2 [src2], src3, src4, tmp, round
115 SUMSUB_BA d, %1, %2, %5
116 SUMSUB_BA d, %3, %4, %5
129 %macro VP9_STORE_2X 5-6 dstq ; reg1, reg2, tmp1, tmp2, zero, dst
132 pmovzxbw m%4, [%6+strideq]
135 movh m%4, [%6+strideq]
144 vpermq m%3, m%3, q3120
146 vextracti128 [%6+strideq], m%3, 1
150 movhps [%6+strideq], m%3
155 movh [%6+strideq], m%4
159 %macro ZERO_BLOCK 4 ; mem, stride, nnzcpl, zero_reg
164 mova [%1+%%y+%%x], %4
165 %assign %%x (%%x+mmsize)
171 ;-------------------------------------------------------------------------------------------
172 ; void vp9_iwht_iwht_4x4_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
173 ;-------------------------------------------------------------------------------------------
176 cglobal vp9_iwht_iwht_4x4_add, 3, 3, 0, dst, stride, block, eob
177 mova m0, [blockq+0*8]
178 mova m1, [blockq+1*8]
179 mova m2, [blockq+2*8]
180 mova m3, [blockq+3*8]
187 TRANSPOSE4x4W 0, 1, 2, 3, 4
191 VP9_STORE_2X 0, 1, 5, 6, 4
192 lea dstq, [dstq+strideq*2]
193 VP9_STORE_2X 2, 3, 5, 6, 4
194 ZERO_BLOCK blockq, 8, 4, m4
197 ;-------------------------------------------------------------------------------------------
198 ; void vp9_idct_idct_4x4_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
199 ;-------------------------------------------------------------------------------------------
201 ; 2x2 top left corner
202 %macro VP9_IDCT4_2x2_1D 0
203 pmulhrsw m0, m5 ; m0=t1
206 pmulhrsw m1, m6 ; m1=t2
207 pmulhrsw m3, m7 ; m3=t3
208 VP9_IDCT4_1D_FINALIZE
211 %macro VP9_IDCT4_WRITEOUT 0
214 pmulhrsw m0, m5 ; (x*2048 + (1<<14))>>15 <=> (x+8)>>4
223 VP9_STORE_2X 0, 1, 6, 7, 4
224 lea dstq, [dstq+2*strideq]
234 VP9_STORE_2X 2, 3, 6, 7, 4
239 cglobal vp9_idct_idct_4x4_add, 4, 4, 0, dst, stride, block, eob
242 cmp eobd, 4 ; 2x2 or smaller
245 cmp eobd, 1 ; faster path for when only DC is set
254 mova m5, [pw_11585x2]
258 DEFINE_ARGS dst, stride, block, coef
259 movsx coefd, word [blockq]
264 add coefd, (8 << 14) + 8192
272 pmulhrsw m0, [pw_2048] ; (x*2048 + (1<<14))>>15 <=> (x+8)>>4
274 VP9_STORE_2X 0, 0, 6, 7, 4
275 lea dstq, [dstq+2*strideq]
276 VP9_STORE_2X 0, 0, 6, 7, 4
280 ; faster path for when only top left 2x2 block is set
284 mova m5, [pw_11585x2]
286 mova m7, [pw_15137x2]
288 ; partial 2x4 transpose
291 SBUTTERFLY dq, 0, 2, 1
294 pxor m4, m4 ; used for the block reset, and VP9_STORE_2X
301 .idctfull: ; generic full 4x4 idct/idct
307 mova m6, [pw_11585x2]
309 mova m7, [pd_8192] ; rounding
311 TRANSPOSE4x4W 0, 1, 2, 3, 4
313 pxor m4, m4 ; used for the block reset, and VP9_STORE_2X
325 ;-------------------------------------------------------------------------------------------
326 ; void vp9_iadst_iadst_4x4_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
327 ;-------------------------------------------------------------------------------------------
331 cglobal vp9_%1_%3_4x4_add, 3, 3, 0, dst, stride, block, eob
332 %if WIN64 && notcpuflag(ssse3)
335 movdqa xmm5, [pd_8192]
341 mova m6, [pw_11585x2]
343 %ifnidn %1%3, iadstiadst
347 TRANSPOSE4x4W 0, 1, 2, 3, 4
349 pxor m4, m4 ; used for the block reset, and VP9_STORE_2X
358 IADST4_FN idct, IDCT4, iadst, IADST4, sse2
359 IADST4_FN iadst, IADST4, idct, IDCT4, sse2
360 IADST4_FN iadst, IADST4, iadst, IADST4, sse2
362 IADST4_FN idct, IDCT4, iadst, IADST4, ssse3
363 IADST4_FN iadst, IADST4, idct, IDCT4, ssse3
364 IADST4_FN iadst, IADST4, iadst, IADST4, ssse3
382 ;-------------------------------------------------------------------------------------------
383 ; void vp9_idct_idct_8x8_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
384 ;-------------------------------------------------------------------------------------------
386 %macro VP9_IDCT8_1D_FINALIZE 0
387 SUMSUB_BA w, 3, 6, 5 ; m3=t0+t7, m6=t0-t7
388 SUMSUB_BA w, 1, 2, 5 ; m1=t1+t6, m2=t1-t6
389 SUMSUB_BA w, 7, 0, 5 ; m7=t2+t5, m0=t2-t5
391 UNSCRATCH 5, 8, blockq+ 0
392 SCRATCH 2, 8, blockq+ 0
394 SUMSUB_BA w, 5, 4, 2 ; m5=t3+t4, m4=t3-t4
404 ; - in: m0/m4 is in mem
405 ; - out: m6 is in mem
407 ; - everything is in registers (m0-7)
408 %macro VP9_IDCT8_1D 0
414 VP9_UNPACK_MULSUB_2W_4X 5, 3, 9102, 13623, D_8192_REG, 0, 4 ; m5=t5a, m3=t6a
415 VP9_UNPACK_MULSUB_2W_4X 1, 7, 16069, 3196, D_8192_REG, 0, 4 ; m1=t4a, m7=t7a
416 SUMSUB_BA w, 5, 1, 0 ; m5=t4a+t5a (t4), m1=t4a-t5a (t5a)
417 SUMSUB_BA w, 3, 7, 0 ; m3=t7a+t6a (t7), m7=t7a-t6a (t6a)
419 SUMSUB_BA w, 1, 7, 0 ; m1=t6a+t5a (t6), m7=t6a-t5a (t5)
420 pmulhrsw m1, W_11585x2_REG ; m1=t6
421 pmulhrsw m7, W_11585x2_REG ; m7=t5
423 VP9_UNPACK_MULSUB_2W_4X 7, 1, 11585, 11585, D_8192_REG, 0, 4
425 VP9_UNPACK_MULSUB_2W_4X 2, 6, 15137, 6270, D_8192_REG, 0, 4 ; m2=t2a, m6=t3a
427 UNSCRATCH 0, 8, blockq+ 0 ; IN(0)
428 UNSCRATCH 4, 9, blockq+64 ; IN(4)
429 SCRATCH 5, 8, blockq+ 0
432 SUMSUB_BA w, 4, 0, 5 ; m4=IN(0)+IN(4) m0=IN(0)-IN(4)
433 pmulhrsw m4, W_11585x2_REG ; m4=t0a
434 pmulhrsw m0, W_11585x2_REG ; m0=t1a
436 SCRATCH 7, 9, blockq+64
437 VP9_UNPACK_MULSUB_2W_4X 0, 4, 11585, 11585, D_8192_REG, 5, 7
438 UNSCRATCH 7, 9, blockq+64
440 SUMSUB_BA w, 6, 4, 5 ; m6=t0a+t3a (t0), m4=t0a-t3a (t3)
441 SUMSUB_BA w, 2, 0, 5 ; m2=t1a+t2a (t1), m0=t1a-t2a (t2)
443 VP9_IDCT8_1D_FINALIZE
446 %macro VP9_IDCT8_4x4_1D 0
447 pmulhrsw m0, W_11585x2_REG ; m0=t1a/t0a
448 pmulhrsw m6, m2, [pw_15137x2] ; m6=t3a
449 pmulhrsw m2, [pw_6270x2] ; m2=t2a
450 pmulhrsw m7, m1, [pw_16069x2] ; m7=t7a
451 pmulhrsw m1, [pw_3196x2] ; m1=t4a
452 pmulhrsw m5, m3, [pw_m9102x2] ; m5=t5a
453 pmulhrsw m3, [pw_13623x2] ; m3=t6a
454 SUMSUB_BA w, 5, 1, 4 ; m1=t4a+t5a (t4), m5=t4a-t5a (t5a)
455 SUMSUB_BA w, 3, 7, 4 ; m3=t7a+t6a (t7), m7=t7a-t6a (t6a)
456 SUMSUB_BA w, 1, 7, 4 ; m1=t6a+t5a (t6), m7=t6a-t5a (t5)
457 pmulhrsw m1, W_11585x2_REG ; m1=t6
458 pmulhrsw m7, W_11585x2_REG ; m7=t5
459 psubw m4, m0, m6 ; m4=t0a-t3a (t3)
460 paddw m6, m0 ; m6=t0a+t3a (t0)
461 SCRATCH 5, 8, blockq+ 0
462 SUMSUB_BA w, 2, 0, 5 ; m2=t1a+t2a (t1), m0=t1a-t2a (t2)
463 VP9_IDCT8_1D_FINALIZE
466 %macro VP9_IDCT8_2x2_1D 1
467 pmulhrsw m0, W_11585x2_REG ; m0=t0
468 pmulhrsw m3, m1, W_16069x2_REG ; m3=t7
469 pmulhrsw m1, W_3196x2_REG ; m1=t4
470 psubw m7, m3, m1 ; t5 = t7a - t4a
471 paddw m5, m3, m1 ; t6 = t7a + t4a
472 pmulhrsw m7, W_11585x2_REG ; m7=t5
473 pmulhrsw m5, W_11585x2_REG ; m5=t6
475 ; merged VP9_IDCT8_1D_FINALIZE to make register-sharing w/ avx easier
476 psubw m6, m0, m3 ; m6=t0-t7
477 paddw m3, m0 ; m3=t0+t7
478 psubw m2, m0, m1 ; m2=t1-t6
479 paddw m1, m0 ; m1=t1+t6
482 %define SCRATCH_REG 1
485 %define SCRATCH_REG 2
487 %define SCRATCH_REG 8
489 psubw m4, m0, m5 ; m4=t3-t4
490 paddw m5, m0 ; m5=t3+t4
491 SUMSUB_BA w, 7, 0, SCRATCH_REG ; m7=t2+t5, m0=t2-t5
497 %macro VP9_IDCT8_WRITEx2 6-8 5 ; line1, line2, tmp1, tmp2, zero, pw_1024/pw_16, shift
499 pmulhrsw m%1, %6 ; (x*1024 + (1<<14))>>15 <=> (x+16)>>5
508 VP9_STORE_2X %1, %2, %3, %4, %5
510 VP9_STORE_2X %1, %2, %3, %4, %5, %8
517 ; - m8 holds m6 (SWAP)
519 %macro VP9_IDCT8_WRITEOUT 0
529 %define ROUND_REG [pw_1024]
531 %define ROUND_REG [pw_16]
534 SCRATCH 5, 10, blockq+16
535 SCRATCH 7, 11, blockq+32
536 VP9_IDCT8_WRITEx2 0, 1, 5, 7, 6, ROUND_REG
537 lea dstq, [dstq+2*strideq]
538 VP9_IDCT8_WRITEx2 2, 3, 5, 7, 6, ROUND_REG
539 lea dstq, [dstq+2*strideq]
540 UNSCRATCH 5, 10, blockq+16
541 UNSCRATCH 7, 11, blockq+32
542 VP9_IDCT8_WRITEx2 4, 5, 0, 1, 6, ROUND_REG
543 lea dstq, [dstq+2*strideq]
544 UNSCRATCH 5, 8, blockq+ 0
545 VP9_IDCT8_WRITEx2 5, 7, 0, 1, 6, ROUND_REG
550 %macro VP9_IDCT_IDCT_8x8_ADD_XMM 2
552 cglobal vp9_idct_idct_8x8_add, 4, 4, %2, dst, stride, block, eob
556 mova m12, [pw_11585x2] ; often used
557 %define W_11585x2_REG m12
559 %define W_11585x2_REG [pw_11585x2]
562 cmp eobd, 12 ; top left half or less
565 cmp eobd, 3 ; top left corner or less
568 cmp eobd, 1 ; faster path for when only DC is set
569 jne .idcttopleftcorner
577 pmulhrsw m0, W_11585x2_REG
578 pmulhrsw m0, W_11585x2_REG
580 DEFINE_ARGS dst, stride, block, coef
581 movsx coefd, word [blockq]
586 add coefd, (16 << 14) + 8192
594 pmulhrsw m0, [pw_1024] ; (x*1024 + (1<<14))>>15 <=> (x+16)>>5
597 VP9_STORE_2X 0, 0, 6, 7, 4
598 lea dstq, [dstq+2*strideq]
600 VP9_STORE_2X 0, 0, 6, 7, 4
604 ; faster path for when only left corner is set (3 input: DC, right to DC, below
605 ; to DC). Note: also working with a 2x2 block
610 mova m10, [pw_3196x2]
611 mova m11, [pw_16069x2]
612 %define W_3196x2_REG m10
613 %define W_16069x2_REG m11
615 %define W_3196x2_REG [pw_3196x2]
616 %define W_16069x2_REG [pw_16069x2]
619 ; partial 2x8 transpose
620 ; punpcklwd m0, m1 already done inside idct
626 SBUTTERFLY qdq, 0, 4, 1
632 pxor m6, m6 ; used for the block reset, and VP9_STORE_2X
645 movh m0, [blockq + 0]
646 movh m1, [blockq +16]
647 movh m2, [blockq +32]
648 movh m3, [blockq +48]
650 ; partial 4x8 transpose
658 SBUTTERFLY dq, 0, 2, 1
659 SBUTTERFLY dq, 4, 6, 5
660 SBUTTERFLY qdq, 0, 4, 1
661 SBUTTERFLY qdq, 2, 6, 5
683 .idctfull: ; generic full 8x8 idct/idct
685 mova m0, [blockq+ 0] ; IN(0)
687 mova m1, [blockq+ 16] ; IN(1)
688 mova m2, [blockq+ 32] ; IN(2)
689 mova m3, [blockq+ 48] ; IN(3)
691 mova m4, [blockq+ 64] ; IN(4)
693 mova m5, [blockq+ 80] ; IN(5)
694 mova m6, [blockq+ 96] ; IN(6)
695 mova m7, [blockq+112] ; IN(7)
697 mova m11, [pd_8192] ; rounding
698 %define D_8192_REG m11
700 %define D_8192_REG [pd_8192]
704 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 8
706 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [blockq+0], [blockq+64], 1
714 pxor m6, m6 ; used for the block reset, and VP9_STORE_2X
716 ZERO_BLOCK blockq, 16, 8, m6
721 VP9_IDCT_IDCT_8x8_ADD_XMM sse2, 12
722 VP9_IDCT_IDCT_8x8_ADD_XMM ssse3, 13
723 VP9_IDCT_IDCT_8x8_ADD_XMM avx, 13
725 ;---------------------------------------------------------------------------------------------
726 ; void vp9_iadst_iadst_8x8_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
727 ;---------------------------------------------------------------------------------------------
730 ; - in: m0/3/4/7 are in mem [blockq+N*16]
731 ; - out: m6 is in mem [blockq+0]
733 ; - everything is in registers
734 %macro VP9_IADST8_1D 0 ; input/output=m0/1/2/3/4/5/6/7
742 VP9_UNPACK_MULSUB_2D_4X 5, 2, 0, 3, 14449, 7723 ; m5/2=t3[d], m2/4=t2[d]
743 VP9_UNPACK_MULSUB_2D_4X 1, 6, 4, 7, 4756, 15679 ; m1/4=t7[d], m6/7=t6[d]
744 SCRATCH 4, 12, blockq+1*16
745 VP9_RND_SH_SUMSUB_BA 6, 2, 7, 3, 4, D_8192_REG ; m6=t2[w], m2=t6[w]
746 UNSCRATCH 4, 12, blockq+1*16
747 VP9_RND_SH_SUMSUB_BA 1, 5, 4, 0, 3, D_8192_REG ; m1=t3[w], m5=t7[w]
749 UNSCRATCH 0, 8, blockq+16*0
750 UNSCRATCH 3, 9, blockq+16*3
751 UNSCRATCH 4, 10, blockq+16*4
752 UNSCRATCH 7, 11, blockq+16*7
753 SCRATCH 1, 8, blockq+16*1
754 SCRATCH 2, 9, blockq+16*2
755 SCRATCH 5, 10, blockq+16*5
756 SCRATCH 6, 11, blockq+16*6
758 VP9_UNPACK_MULSUB_2D_4X 7, 0, 1, 2, 16305, 1606 ; m7/1=t1[d], m0/2=t0[d]
759 VP9_UNPACK_MULSUB_2D_4X 3, 4, 5, 6, 10394, 12665 ; m3/5=t5[d], m4/6=t4[d]
760 SCRATCH 1, 12, blockq+ 0*16
761 VP9_RND_SH_SUMSUB_BA 4, 0, 6, 2, 1, D_8192_REG ; m4=t0[w], m0=t4[w]
762 UNSCRATCH 1, 12, blockq+ 0*16
763 VP9_RND_SH_SUMSUB_BA 3, 7, 5, 1, 2, D_8192_REG ; m3=t1[w], m7=t5[w]
765 UNSCRATCH 2, 9, blockq+16*2
766 UNSCRATCH 5, 10, blockq+16*5
767 SCRATCH 3, 9, blockq+16*3
768 SCRATCH 4, 10, blockq+16*4
770 ; m4=t0, m3=t1, m6=t2, m1=t3, m0=t4, m7=t5, m2=t6, m5=t7
772 VP9_UNPACK_MULSUB_2D_4X 0, 7, 1, 3, 15137, 6270 ; m0/1=t5[d], m7/3=t4[d]
773 VP9_UNPACK_MULSUB_2D_4X 5, 2, 4, 6, 6270, 15137 ; m5/4=t6[d], m2/6=t7[d]
774 SCRATCH 1, 12, blockq+ 0*16
775 VP9_RND_SH_SUMSUB_BA 5, 7, 4, 3, 1, D_8192_REG
776 UNSCRATCH 1, 12, blockq+ 0*16
777 PSIGNW m5, W_M1_REG ; m5=out1[w], m7=t6[w]
778 VP9_RND_SH_SUMSUB_BA 2, 0, 6, 1, 3, D_8192_REG ; m2=out6[w], m0=t7[w]
780 UNSCRATCH 1, 8, blockq+16*1
781 UNSCRATCH 3, 9, blockq+16*3
782 UNSCRATCH 4, 10, blockq+16*4
783 UNSCRATCH 6, 11, blockq+16*6
784 SCRATCH 2, 8, blockq+16*0
786 SUMSUB_BA w, 6, 4, 2 ; m6=out0[w], m4=t2[w]
788 PSIGNW m1, W_M1_REG ; m1=out7[w], m3=t3[w]
790 ; m6=out0, m5=out1, m4=t2, m3=t3, m7=t6, m0=t7, m2=out6, m1=out7
792 ; unfortunately, the code below overflows in some cases
793 %if 0; cpuflag(ssse3)
796 pmulhrsw m3, W_11585x2_REG
797 pmulhrsw m7, W_11585x2_REG
798 pmulhrsw m4, W_11585x2_REG ; out4
799 pmulhrsw m0, W_11585x2_REG ; out2
801 SCRATCH 5, 9, blockq+16*1
802 VP9_UNPACK_MULSUB_2W_4X 4, 3, 11585, 11585, D_8192_REG, 2, 5
803 VP9_UNPACK_MULSUB_2W_4X 7, 0, 11585, 11585, D_8192_REG, 2, 5
804 UNSCRATCH 5, 9, blockq+16*1
806 PSIGNW m3, W_M1_REG ; out3
807 PSIGNW m7, W_M1_REG ; out5
809 ; m6=out0, m5=out1, m0=out2, m3=out3, m4=out4, m7=out5, m2=out6, m1=out7
820 cglobal vp9_%1_%3_8x8_add, 3, 3, %6, dst, stride, block, eob
823 %define first_is_idct 1
825 %define first_is_idct 0
829 %define second_is_idct 1
831 %define second_is_idct 0
835 mova m0, [blockq+ 0] ; IN(0)
837 mova m1, [blockq+ 16] ; IN(1)
838 mova m2, [blockq+ 32] ; IN(2)
839 %if ARCH_X86_64 || first_is_idct
840 mova m3, [blockq+ 48] ; IN(3)
843 mova m4, [blockq+ 64] ; IN(4)
845 mova m5, [blockq+ 80] ; IN(5)
846 mova m6, [blockq+ 96] ; IN(6)
847 %if ARCH_X86_64 || first_is_idct
848 mova m7, [blockq+112] ; IN(7)
852 mova m15, [pw_11585x2] ; often used
854 mova m13, [pd_8192] ; rounding
856 %define W_11585x2_REG m15
857 %define D_8192_REG m13
860 %define W_11585x2_REG [pw_11585x2]
861 %define D_8192_REG [pd_8192]
862 %define W_M1_REG [pw_m1]
865 ; note different calling conventions for idct8 vs. iadst8 on x86-32
868 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 8
870 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [blockq+0], [blockq+64], 1
872 %if second_is_idct == 0
873 mova [blockq+ 48], m3
874 mova [blockq+112], m7
882 pxor m6, m6 ; used for the block reset, and VP9_STORE_2X
884 ZERO_BLOCK blockq, 16, 8, m6
889 %undef second_is_idct
893 IADST8_FN idct, IDCT8, iadst, IADST8, sse2, 15
894 IADST8_FN iadst, IADST8, idct, IDCT8, sse2, 15
895 IADST8_FN iadst, IADST8, iadst, IADST8, sse2, 15
896 IADST8_FN idct, IDCT8, iadst, IADST8, ssse3, 16
897 IADST8_FN idct, IDCT8, iadst, IADST8, avx, 16
898 IADST8_FN iadst, IADST8, idct, IDCT8, ssse3, 16
899 IADST8_FN iadst, IADST8, idct, IDCT8, avx, 16
900 IADST8_FN iadst, IADST8, iadst, IADST8, ssse3, 16
901 IADST8_FN iadst, IADST8, iadst, IADST8, avx, 16
903 ;---------------------------------------------------------------------------------------------
904 ; void vp9_idct_idct_16x16_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
905 ;---------------------------------------------------------------------------------------------
908 ; at the end of this macro, m7 is stored in [%4+15*%5]
909 ; everything else (t0-6 and t8-15) is stored in m0-6 and m8-15
910 ; the following sumsubs have not been done yet:
911 ; SUMSUB_BA w, 6, 9, 15 ; t6, t9
912 ; SUMSUB_BA w, 7, 8, 15 ; t7, t8
913 ; or (x86-32) t0-t5 are in m0-m5, t10-t15 are in x11/9/7/5/3/1,
914 ; and the following simsubs have not been done yet:
915 ; SUMSUB_BA w, x13, x14, 7 ; t6, t9
916 ; SUMSUB_BA w, x15, x12, 7 ; t7, t8
918 %macro VP9_IDCT16_1D_START 6 ; src, nnzc, stride, scratch, scratch_stride, is_iadst
920 mova m3, [%1+ 1*%3] ; IN(1)
921 mova m0, [%1+ 3*%3] ; IN(3)
923 pmulhrsw m4, m3, [pw_16305x2] ; t14-15
924 pmulhrsw m3, [pw_1606x2] ; t8-9
925 pmulhrsw m7, m0, [pw_m4756x2] ; t10-11
926 pmulhrsw m0, [pw_15679x2] ; t12-13
928 ; m8=t0, m9=t1, m10=t2, m11=t3, m12=t4, m14=t5, m13=t6, m15=t7
929 ; m3=t8, m5=t9, m1=t10, m7=t11, m0=t12, m6=t13, m2=t14, m4=t15
931 VP9_UNPACK_MULSUB_2W_4X 2, 5, 4, 3, 15137, 6270, [pd_8192], 1, 6 ; t9, t14
932 SCRATCH 4, 10, %4+ 1*%5
933 SCRATCH 5, 11, %4+ 7*%5
934 VP9_UNPACK_MULSUB_2W_4X 6, 1, 0, 7, 6270, m15137, [pd_8192], 4, 5 ; t10, t13
935 UNSCRATCH 5, 11, %4+ 7*%5
937 ; m15=t0, m14=t1, m13=t2, m12=t3, m11=t4, m10=t5, m9=t6, m8=t7
938 ; m7=t8, m6=t9, m2=t10, m3=t11, m4=t12, m5=t13, m1=t14, m0=t15
940 mova m5, [%1+ 1*%3] ; IN(1)
941 mova m4, [%1+ 7*%3] ; IN(7)
943 pmulhrsw m2, m5, [pw_16305x2] ; t15
944 pmulhrsw m5, [pw_1606x2] ; t8
945 pmulhrsw m3, m4, [pw_m10394x2] ; t9
946 pmulhrsw m4, [pw_12665x2] ; t14
948 mova m3, [%1+ 9*%3] ; IN(9)
949 mova m2, [%1+15*%3] ; IN(15)
951 ; m10=in0, m5=in1, m14=in2, m6=in3, m9=in4, m7=in5, m15=in6, m4=in7
952 ; m11=in8, m3=in9, m12=in10 m0=in11, m8=in12, m1=in13, m13=in14, m2=in15
954 VP9_UNPACK_MULSUB_2W_4X 5, 2, 16305, 1606, [pd_8192], 0, 1 ; t8, t15
955 VP9_UNPACK_MULSUB_2W_4X 3, 4, 10394, 12665, [pd_8192], 0, 1 ; t9, t14
958 SUMSUB_BA w, 3, 5, 0 ; t8, t9
959 SUMSUB_BA w, 4, 2, 0 ; t15, t14
961 VP9_UNPACK_MULSUB_2W_4X 2, 5, 15137, 6270, [pd_8192], 0, 1 ; t9, t14
963 SCRATCH 4, 10, %4+ 1*%5
964 SCRATCH 5, 11, %4+ 7*%5
966 mova m6, [%1+ 3*%3] ; IN(3)
967 mova m7, [%1+ 5*%3] ; IN(5)
969 pmulhrsw m0, m7, [pw_14449x2] ; t13
970 pmulhrsw m7, [pw_7723x2] ; t10
971 pmulhrsw m1, m6, [pw_m4756x2] ; t11
972 pmulhrsw m6, [pw_15679x2] ; t12
974 mova m0, [%1+11*%3] ; IN(11)
975 mova m1, [%1+13*%3] ; IN(13)
977 VP9_UNPACK_MULSUB_2W_4X 7, 0, 14449, 7723, [pd_8192], 4, 5 ; t10, t13
978 VP9_UNPACK_MULSUB_2W_4X 1, 6, 4756, 15679, [pd_8192], 4, 5 ; t11, t12
981 ; m11=t0, m10=t1, m9=t2, m8=t3, m14=t4, m12=t5, m15=t6, m13=t7
982 ; m5=t8, m3=t9, m7=t10, m1=t11, m6=t12, m0=t13, m4=t14, m2=t15
984 SUMSUB_BA w, 7, 1, 4 ; t11, t10
985 SUMSUB_BA w, 0, 6, 4 ; t12, t13
987 ; m8=t0, m9=t1, m10=t2, m11=t3, m12=t4, m14=t5, m13=t6, m15=t7
988 ; m3=t8, m5=t9, m1=t10, m7=t11, m0=t12, m6=t13, m2=t14, m4=t15
990 VP9_UNPACK_MULSUB_2W_4X 6, 1, 6270, m15137, [pd_8192], 4, 5 ; t10, t13
992 UNSCRATCH 5, 11, %4+ 7*%5
995 ; m8=t0, m9=t1, m10=t2, m11=t3, m12=t4, m13=t5, m14=t6, m15=t7
996 ; m3=t8, m2=t9, m6=t10, m7=t11, m0=t12, m1=t13, m5=t14, m4=t15
998 SUMSUB_BA w, 7, 3, 4 ; t8, t11
1000 ; backup first register
1003 SUMSUB_BA w, 6, 2, 7 ; t9, t10
1004 UNSCRATCH 4, 10, %4+ 1*%5
1005 SUMSUB_BA w, 0, 4, 7 ; t15, t12
1006 SUMSUB_BA w, 1, 5, 7 ; t14. t13
1008 ; m15=t0, m14=t1, m13=t2, m12=t3, m11=t4, m10=t5, m9=t6, m8=t7
1009 ; m7=t8, m6=t9, m2=t10, m3=t11, m4=t12, m5=t13, m1=t14, m0=t15
1011 %if cpuflag(ssse3) && %6 == 0
1012 SUMSUB_BA w, 2, 5, 7
1013 SUMSUB_BA w, 3, 4, 7
1014 pmulhrsw m5, [pw_11585x2] ; t10
1015 pmulhrsw m4, [pw_11585x2] ; t11
1016 pmulhrsw m3, [pw_11585x2] ; t12
1017 pmulhrsw m2, [pw_11585x2] ; t13
1019 SCRATCH 6, 10, %4+ 1*%5
1020 VP9_UNPACK_MULSUB_2W_4X 5, 2, 11585, 11585, [pd_8192], 6, 7 ; t10, t13
1021 VP9_UNPACK_MULSUB_2W_4X 4, 3, 11585, 11585, [pd_8192], 6, 7 ; t11, t12
1022 UNSCRATCH 6, 10, %4+ 1*%5
1025 ; m15=t0, m14=t1, m13=t2, m12=t3, m11=t4, m10=t5, m9=t6, m8=t7
1026 ; m7=t8, m6=t9, m5=t10, m4=t11, m3=t12, m2=t13, m1=t14, m0=t15
1028 SCRATCH 0, 8, %4+ 1*%5
1029 SCRATCH 1, 9, %4+ 3*%5
1030 SCRATCH 2, 10, %4+ 5*%5
1031 SCRATCH 3, 11, %4+ 7*%5
1032 SCRATCH 4, 12, %4+ 9*%5
1033 SCRATCH 5, 13, %4+11*%5
1034 SCRATCH 6, 14, %4+13*%5
1038 mova m3, [%1+ 0*%3] ; IN(0)
1039 mova m4, [%1+ 2*%3] ; IN(2)
1041 pmulhrsw m3, [pw_11585x2] ; t0-t3
1042 pmulhrsw m7, m4, [pw_16069x2] ; t6-7
1043 pmulhrsw m4, [pw_3196x2] ; t4-5
1045 %if 0 ; overflows :(
1048 pmulhrsw m5, [pw_11585x2] ; t5
1049 pmulhrsw m6, [pw_11585x2] ; t6
1051 VP9_UNPACK_MULSUB_2W_4X 5, 6, 7, 4, 11585, 11585, [pd_8192], 0, 1 ; t5, t6
1064 SCRATCH 7, 15, %4+12*%5
1066 mova m6, [%1+ 2*%3] ; IN(2)
1067 mova m1, [%1+ 4*%3] ; IN(4)
1068 mova m7, [%1+ 6*%3] ; IN(6)
1070 pmulhrsw m0, m1, [pw_15137x2] ; t3
1071 pmulhrsw m1, [pw_6270x2] ; t2
1072 pmulhrsw m5, m6, [pw_16069x2] ; t7
1073 pmulhrsw m6, [pw_3196x2] ; t4
1074 pmulhrsw m4, m7, [pw_m9102x2] ; t5
1075 pmulhrsw m7, [pw_13623x2] ; t6
1077 mova m4, [%1+10*%3] ; IN(10)
1078 mova m0, [%1+12*%3] ; IN(12)
1079 mova m5, [%1+14*%3] ; IN(14)
1081 VP9_UNPACK_MULSUB_2W_4X 1, 0, 15137, 6270, [pd_8192], 2, 3 ; t2, t3
1082 VP9_UNPACK_MULSUB_2W_4X 6, 5, 16069, 3196, [pd_8192], 2, 3 ; t4, t7
1083 VP9_UNPACK_MULSUB_2W_4X 4, 7, 9102, 13623, [pd_8192], 2, 3 ; t5, t6
1086 SUMSUB_BA w, 4, 6, 2 ; t4, t5
1087 SUMSUB_BA w, 7, 5, 2 ; t7, t6
1089 %if cpuflag(ssse3) && %6 == 0
1090 SUMSUB_BA w, 6, 5, 2
1091 pmulhrsw m5, [pw_11585x2] ; t5
1092 pmulhrsw m6, [pw_11585x2] ; t6
1094 VP9_UNPACK_MULSUB_2W_4X 5, 6, 11585, 11585, [pd_8192], 2, 3 ; t5, t6
1097 SCRATCH 5, 15, %4+10*%5
1098 mova m2, [%1+ 0*%3] ; IN(0)
1100 pmulhrsw m2, [pw_11585x2] ; t0 and t1
1104 SUMSUB_BA w, 7, 0, 5 ; t0, t7
1106 mova m3, [%1+ 8*%3] ; IN(8)
1108 ; from 3 stages back
1109 %if cpuflag(ssse3) && %6 == 0
1110 SUMSUB_BA w, 3, 2, 5
1111 pmulhrsw m3, [pw_11585x2] ; t0
1112 pmulhrsw m2, [pw_11585x2] ; t1
1115 VP9_UNPACK_MULSUB_2W_4X 2, 3, 11585, 11585, [pd_8192], 5, 0 ; t0, t1
1119 ; from 2 stages back
1120 SUMSUB_BA w, 0, 3, 5 ; t0, t3
1122 SUMSUB_BA w, 7, 0, 5 ; t0, t7
1124 UNSCRATCH 5, 15, %4+10*%5
1128 SCRATCH 7, 15, %4+12*%5
1129 SUMSUB_BA w, 1, 2, 7 ; t1, t2
1132 SUMSUB_BA w, 6, 1, 7 ; t1, t6
1133 SUMSUB_BA w, 5, 2, 7 ; t2, t5
1135 SUMSUB_BA w, 4, 3, 7 ; t3, t4
1146 SUMSUB_BA w, 0, 15, 7 ; t0, t15
1147 SUMSUB_BA w, 1, 14, 7 ; t1, t14
1148 SUMSUB_BA w, 2, 13, 7 ; t2, t13
1149 SUMSUB_BA w, 3, 12, 7 ; t3, t12
1150 SUMSUB_BA w, 4, 11, 7 ; t4, t11
1151 SUMSUB_BA w, 5, 10, 7 ; t5, t10
1158 %macro %%SUMSUB_BA_STORE 5 ; reg, from_mem, to_mem, scratch, scratch_stride
1160 SUMSUB_BA w, 6, %1, 7
1165 %%SUMSUB_BA_STORE 0, 1, 1, %4, %5 ; t0, t15
1166 %%SUMSUB_BA_STORE 1, 3, 3, %4, %5 ; t1, t14
1167 %%SUMSUB_BA_STORE 2, 5, 5, %4, %5 ; t2, t13
1168 %%SUMSUB_BA_STORE 3, 7, 7, %4, %5 ; t3, t12
1169 %%SUMSUB_BA_STORE 4, 9, 9, %4, %5 ; t4, t11
1170 %%SUMSUB_BA_STORE 5, 11, 11, %4, %5 ; t5, t10
1174 %macro VP9_IDCT16_1D 2-4 16, 1 ; src, pass, nnzc, is_iadst
1176 VP9_IDCT16_1D_START %1, %3, 32, tmpq, 16, %4
1179 ; backup a different register
1180 mova m7, [tmpq+15*16]
1181 mova [tmpq+ 1*16], m15
1183 SUMSUB_BA w, 6, 9, 15 ; t6, t9
1184 SUMSUB_BA w, 7, 8, 15 ; t7, t8
1186 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 15
1196 mova m15, [tmpq+ 1*16]
1197 TRANSPOSE8x8W 8, 9, 10, 11, 12, 13, 14, 15, 0
1200 mova [tmpq+ 80], m10
1201 mova [tmpq+112], m11
1202 mova [tmpq+144], m12
1203 mova [tmpq+176], m13
1204 mova [tmpq+208], m14
1205 mova [tmpq+240], m15
1207 mova m6, [tmpq+13*16]
1208 mova m7, [tmpq+14*16]
1209 SUMSUB_BA w, 6, 7 ; t6, t9
1210 mova [tmpq+14*16], m6
1211 mova [tmpq+13*16], m7
1212 mova m7, [tmpq+15*16]
1213 mova m6, [tmpq+12*16]
1214 SUMSUB_BA w, 7, 6 ; t7, t8
1215 mova [tmpq+15*16], m6
1217 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [tmpq+14*16], [tmpq+ 8*16], 1
1218 mova [tmpq+ 0*16], m0
1219 mova [tmpq+ 2*16], m1
1220 mova [tmpq+ 4*16], m2
1221 mova [tmpq+ 6*16], m3
1222 mova [tmpq+10*16], m5
1223 mova [tmpq+12*16], m6
1224 mova [tmpq+14*16], m7
1226 mova m0, [tmpq+15*16]
1227 mova m1, [tmpq+13*16]
1228 mova m2, [tmpq+11*16]
1229 mova m3, [tmpq+ 9*16]
1230 mova m4, [tmpq+ 7*16]
1231 mova m5, [tmpq+ 5*16]
1232 mova m7, [tmpq+ 1*16]
1233 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [tmpq+ 3*16], [tmpq+ 9*16], 1
1234 mova [tmpq+ 1*16], m0
1235 mova [tmpq+ 3*16], m1
1236 mova [tmpq+ 5*16], m2
1237 mova [tmpq+ 7*16], m3
1238 mova [tmpq+11*16], m5
1239 mova [tmpq+13*16], m6
1240 mova [tmpq+15*16], m7
1243 VP9_IDCT16_1D_START %1, %3, 32, %1, 32, %4
1246 %define ROUND_REG [pw_512]
1248 %define ROUND_REG [pw_32]
1253 ; backup more registers
1257 VP9_IDCT8_WRITEx2 0, 1, 8, 9, 7, ROUND_REG, 6
1258 lea dstq, [dstq+strideq*2]
1259 VP9_IDCT8_WRITEx2 2, 3, 8, 9, 7, ROUND_REG, 6
1260 lea dstq, [dstq+strideq*2]
1261 VP9_IDCT8_WRITEx2 4, 5, 8, 9, 7, ROUND_REG, 6
1262 lea dstq, [dstq+strideq*2]
1264 ; restore from cache
1265 SWAP 0, 7 ; move zero from m7 to m0
1270 SUMSUB_BA w, 6, 9, 3 ; t6, t9
1271 SUMSUB_BA w, 7, 8, 3 ; t7, t8
1273 VP9_IDCT8_WRITEx2 6, 7, 3, 4, 0, ROUND_REG, 6
1274 lea dstq, [dstq+strideq*2]
1275 VP9_IDCT8_WRITEx2 8, 9, 3, 4, 0, ROUND_REG, 6
1276 lea dstq, [dstq+strideq*2]
1277 VP9_IDCT8_WRITEx2 10, 11, 1, 2, 0, ROUND_REG, 6
1278 lea dstq, [dstq+strideq*2]
1279 VP9_IDCT8_WRITEx2 12, 13, 1, 2, 0, ROUND_REG, 6
1280 lea dstq, [dstq+strideq*2]
1281 VP9_IDCT8_WRITEx2 14, 15, 1, 2, 0, ROUND_REG, 6
1283 mova [tmpq+ 0*32], m5
1285 VP9_IDCT8_WRITEx2 0, 1, 5, 6, 7, ROUND_REG, 6
1286 lea dstq, [dstq+strideq*2]
1287 VP9_IDCT8_WRITEx2 2, 3, 5, 6, 7, ROUND_REG, 6
1288 lea dstq, [dstq+strideq*2]
1290 SWAP 0, 7 ; move zero from m7 to m0
1291 mova m5, [tmpq+ 0*32]
1293 VP9_IDCT8_WRITEx2 4, 5, 1, 2, 0, ROUND_REG, 6
1294 lea dstq, [dstq+strideq*2]
1296 mova m4, [tmpq+13*32]
1297 mova m7, [tmpq+14*32]
1298 mova m5, [tmpq+15*32]
1299 mova m6, [tmpq+12*32]
1300 SUMSUB_BADC w, 4, 7, 5, 6, 1
1302 VP9_IDCT8_WRITEx2 4, 5, 1, 2, 0, ROUND_REG, 6
1303 lea dstq, [dstq+strideq*2]
1304 VP9_IDCT8_WRITEx2 6, 7, 1, 2, 0, ROUND_REG, 6
1305 lea dstq, [dstq+strideq*2]
1307 mova m4, [tmpq+11*32]
1308 mova m5, [tmpq+ 9*32]
1309 mova m6, [tmpq+ 7*32]
1310 mova m7, [tmpq+ 5*32]
1312 VP9_IDCT8_WRITEx2 4, 5, 1, 2, 0, ROUND_REG, 6
1313 lea dstq, [dstq+strideq*2]
1314 VP9_IDCT8_WRITEx2 6, 7, 1, 2, 0, ROUND_REG, 6
1315 lea dstq, [dstq+strideq*2]
1317 mova m4, [tmpq+ 3*32]
1318 mova m5, [tmpq+ 1*32]
1320 VP9_IDCT8_WRITEx2 4, 5, 1, 2, 0, ROUND_REG, 6
1321 lea dstq, [dstq+strideq*2]
1328 %macro VP9_STORE_2XFULL 6-7 strideq; dc, tmp1, tmp2, tmp3, tmp4, zero, stride
1331 punpcklbw m%2, m%3, m%6
1333 punpcklbw m%4, m%5, m%6
1345 %macro VP9_IDCT_IDCT_16x16_ADD_XMM 1
1347 cglobal vp9_idct_idct_16x16_add, 4, 6, 16, 512, dst, stride, block, eob
1349 ; 2x2=eob=3, 4x4=eob=10
1352 cmp eobd, 1 ; faster path for when only DC is set
1355 cmp eobd, 1 ; faster path for when only DC is set
1362 mova m1, [pw_11585x2]
1366 DEFINE_ARGS dst, stride, block, coef
1367 movsx coefd, word [blockq]
1372 add coefd, (32 << 14) + 8192
1376 SPLATW m0, m0, q0000
1378 pmulhrsw m0, [pw_512]
1383 VP9_STORE_2XFULL 0, 1, 2, 3, 4, 5
1384 lea dstq, [dstq+2*strideq]
1386 VP9_STORE_2XFULL 0, 1, 2, 3, 4, 5
1389 DEFINE_ARGS dst, stride, block, cnt, dst_bak, tmp
1393 VP9_IDCT16_1D blockq, 1, 8, 0
1398 VP9_IDCT16_1D tmpq, 2, 8, 0
1399 lea dstq, [dst_bakq+8]
1404 ; at the end of the loop, m0 should still be zero
1405 ; use that to zero out block coefficients
1406 ZERO_BLOCK blockq, 32, 8, m0
1414 VP9_IDCT16_1D blockq, 1, 16, 0
1425 VP9_IDCT16_1D tmpq, 2, 16, 0
1426 lea dstq, [dst_bakq+8]
1431 ; at the end of the loop, m0 should still be zero
1432 ; use that to zero out block coefficients
1433 ZERO_BLOCK blockq, 32, 16, m0
1437 VP9_IDCT_IDCT_16x16_ADD_XMM sse2
1438 VP9_IDCT_IDCT_16x16_ADD_XMM ssse3
1439 VP9_IDCT_IDCT_16x16_ADD_XMM avx
1441 %macro VP9_IDCT16_YMM_1D 0
1442 VP9_UNPACK_MULSUB_2W_4X 1, 15, 16305, 1606, [pd_8192], 0, 4 ; t8, t15
1443 VP9_UNPACK_MULSUB_2W_4X 9, 7, 10394, 12665, [pd_8192], 0, 4 ; t9, t14
1445 SUMSUB_BA w, 9, 1, 0 ; t8, t9
1446 SUMSUB_BA w, 7, 15, 0 ; t15, t14
1448 VP9_UNPACK_MULSUB_2W_4X 15, 1, 15137, 6270, [pd_8192], 0, 4 ; t9, t14
1450 VP9_UNPACK_MULSUB_2W_4X 5, 11, 14449, 7723, [pd_8192], 0, 4 ; t10, t13
1451 VP9_UNPACK_MULSUB_2W_4X 13, 3, 4756, 15679, [pd_8192], 0, 4 ; t11, t12
1453 SUMSUB_BA w, 5, 13, 0 ; t11, t10
1454 SUMSUB_BA w, 11, 3, 0 ; t12, t13
1456 VP9_UNPACK_MULSUB_2W_4X 3, 13, 6270, m15137, [pd_8192], 0, 4 ; t10, t13
1458 SUMSUB_BA w, 5, 9, 0 ; t8, t11
1459 SUMSUB_BA w, 3, 15, 0 ; t9, t10
1460 SUMSUB_BA w, 11, 7, 0 ; t15, t12
1461 SUMSUB_BA w, 13, 1, 0 ; t14, t13
1463 SUMSUB_BA w, 15, 1, 0
1464 SUMSUB_BA w, 9, 7, 0
1465 pmulhrsw m1, [pw_11585x2] ; t10
1466 pmulhrsw m7, [pw_11585x2] ; t11
1467 pmulhrsw m9, [pw_11585x2] ; t12
1468 pmulhrsw m15, [pw_11585x2] ; t13
1471 mova m4, [blockq+128]
1472 mova [blockq+128], m5
1473 VP9_UNPACK_MULSUB_2W_4X 4, 12, 15137, 6270, [pd_8192], 0, 5 ; t2, t3
1474 VP9_UNPACK_MULSUB_2W_4X 2, 14, 16069, 3196, [pd_8192], 0, 5 ; t4, t7
1475 VP9_UNPACK_MULSUB_2W_4X 10, 6, 9102, 13623, [pd_8192], 0, 5 ; t5, t6
1476 mova m0, [blockq+ 0]
1477 SUMSUB_BA w, 8, 0, 5
1478 pmulhrsw m8, [pw_11585x2] ; t0
1479 pmulhrsw m0, [pw_11585x2] ; t1
1481 SUMSUB_BA w, 10, 2, 5 ; t4, t5
1482 SUMSUB_BA w, 6, 14, 5 ; t7, t6
1483 SUMSUB_BA w, 12, 8, 5 ; t0, t3
1484 SUMSUB_BA w, 4, 0, 5 ; t1, t2
1486 SUMSUB_BA w, 2, 14, 5
1487 pmulhrsw m14, [pw_11585x2] ; t5
1488 pmulhrsw m2, [pw_11585x2] ; t6
1490 SUMSUB_BA w, 6, 12, 5 ; t0, t7
1491 SUMSUB_BA w, 2, 4, 5 ; t1, t6
1492 SUMSUB_BA w, 14, 0, 5 ; t2, t5
1493 SUMSUB_BA w, 10, 8, 5 ; t3, t4
1496 SUMSUB_BA w, 11, 6, 5 ; out0, out15
1497 SUMSUB_BA w, 13, 2, 5 ; out1, out14
1498 SUMSUB_BA w, 15, 14, 5 ; out2, out13
1499 SUMSUB_BA w, 9, 10, 5 ; out3, out12
1500 SUMSUB_BA w, 7, 8, 5 ; out4, out11
1501 SUMSUB_BA w, 1, 0, 5 ; out5, out10
1502 SUMSUB_BA w, 3, 4, 5 ; out6, out9
1503 mova m5, [blockq+128]
1504 mova [blockq+192], m3
1505 SUMSUB_BA w, 5, 12, 3 ; out7, out8
1507 SWAP 0, 11, 8, 12, 10
1508 SWAP 1, 13, 14, 2, 15, 6, 3, 9, 4, 7, 5
1511 ; this is almost identical to VP9_STORE_2X, but it does two rows
1512 ; for slightly improved interleaving, and it omits vpermq since the
1513 ; input is DC so all values are identical
1514 %macro VP9_STORE_YMM_DC_4X 6 ; reg, tmp1, tmp2, tmp3, tmp4, zero
1516 mova xm%4, [dstq+strideq*2]
1517 vinserti128 m%2, m%2, [dstq+strideq], 1
1518 vinserti128 m%4, m%4, [dstq+stride3q], 1
1519 punpckhbw m%3, m%2, m%6
1521 punpckhbw m%5, m%4, m%6
1530 mova [dstq+strideq*2], xm%4
1531 vextracti128 [dstq+strideq], m%2, 1
1532 vextracti128 [dstq+stride3q], m%4, 1
1535 %if ARCH_X86_64 && HAVE_AVX2_EXTERNAL
1537 cglobal vp9_idct_idct_16x16_add, 4, 4, 16, dst, stride, block, eob
1538 cmp eobd, 1 ; faster path for when only DC is set
1543 mova m1, [pw_11585x2]
1546 vpbroadcastw m0, xm0
1547 pmulhrsw m0, [pw_512]
1551 DEFINE_ARGS dst, stride, stride3, cnt
1553 lea stride3q, [strideq*3]
1555 VP9_STORE_YMM_DC_4X 0, 1, 2, 3, 4, 5
1556 lea dstq, [dstq+4*strideq]
1561 DEFINE_ARGS dst, stride, block, eob
1563 mova m1, [blockq+ 32]
1564 mova m2, [blockq+ 64]
1565 mova m3, [blockq+ 96]
1566 mova m5, [blockq+160]
1567 mova m6, [blockq+192]
1568 mova m7, [blockq+224]
1569 mova m8, [blockq+256]
1570 mova m9, [blockq+288]
1571 mova m10, [blockq+320]
1572 mova m11, [blockq+352]
1573 mova m12, [blockq+384]
1574 mova m13, [blockq+416]
1575 mova m14, [blockq+448]
1576 mova m15, [blockq+480]
1579 TRANSPOSE16x16W 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, \
1580 [blockq+192], [blockq+128], 1
1581 mova [blockq+ 0], m0
1584 mova [blockq+224], m7
1585 mova [blockq+480], m15
1589 VP9_IDCT8_WRITEx2 0, 1, 6, 7, 15, [pw_512], 6
1590 lea dstq, [dstq+2*strideq]
1591 VP9_IDCT8_WRITEx2 2, 3, 6, 7, 15, [pw_512], 6
1592 lea dstq, [dstq+2*strideq]
1593 VP9_IDCT8_WRITEx2 4, 5, 6, 7, 15, [pw_512], 6
1594 lea dstq, [dstq+2*strideq]
1595 mova m6, [blockq+192]
1596 mova m7, [blockq+224]
1598 mova m15, [blockq+480]
1599 VP9_IDCT8_WRITEx2 6, 7, 1, 2, 0, [pw_512], 6
1600 lea dstq, [dstq+2*strideq]
1601 VP9_IDCT8_WRITEx2 8, 9, 1, 2, 0, [pw_512], 6
1602 lea dstq, [dstq+2*strideq]
1603 VP9_IDCT8_WRITEx2 10, 11, 1, 2, 0, [pw_512], 6
1604 lea dstq, [dstq+2*strideq]
1605 VP9_IDCT8_WRITEx2 12, 13, 1, 2, 0, [pw_512], 6
1606 lea dstq, [dstq+2*strideq]
1607 VP9_IDCT8_WRITEx2 14, 15, 1, 2, 0, [pw_512], 6
1608 lea dstq, [dstq+2*strideq]
1610 ; at the end of the loop, m0 should still be zero
1611 ; use that to zero out block coefficients
1612 ZERO_BLOCK blockq, 32, 16, m0
1616 ;---------------------------------------------------------------------------------------------
1617 ; void vp9_iadst_iadst_16x16_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
1618 ;---------------------------------------------------------------------------------------------
1620 %macro VP9_IADST16_1D 2 ; src, pass
1622 mova m0, [%1+ 0*32] ; in0
1623 mova m1, [%1+15*32] ; in15
1624 mova m2, [%1+ 7*32] ; in7
1625 mova m3, [%1+ 8*32] ; in8
1627 VP9_UNPACK_MULSUB_2D_4X 1, 0, 4, 5, 16364, 804 ; m1/4=t1[d], m0/5=t0[d]
1628 VP9_UNPACK_MULSUB_2D_4X 2, 3, 7, 6, 11003, 12140 ; m2/7=t9[d], m3/6=t8[d]
1629 SCRATCH 4, 8, tmpq+ 0*%%str
1630 VP9_RND_SH_SUMSUB_BA 3, 0, 6, 5, 4, [pd_8192] ; m3=t0[w], m0=t8[w]
1631 UNSCRATCH 4, 8, tmpq+ 0*%%str
1632 VP9_RND_SH_SUMSUB_BA 2, 1, 7, 4, 5, [pd_8192] ; m2=t1[w], m1=t9[w]
1634 SCRATCH 0, 10, tmpq+ 0*%%str
1635 SCRATCH 1, 11, tmpq+15*%%str
1636 mova [tmpq+ 7*%%str], m2
1637 mova [tmpq+ 8*%%str], m3
1639 mova m1, [%1+ 2*32] ; in2
1640 mova m0, [%1+13*32] ; in13
1641 mova m3, [%1+ 5*32] ; in5
1642 mova m2, [%1+10*32] ; in10
1644 VP9_UNPACK_MULSUB_2D_4X 0, 1, 6, 7, 15893, 3981 ; m0/6=t3[d], m1/7=t2[d]
1645 VP9_UNPACK_MULSUB_2D_4X 3, 2, 4, 5, 8423, 14053 ; m3/4=t11[d], m2/5=t10[d]
1646 SCRATCH 4, 12, tmpq+ 2*%%str
1647 VP9_RND_SH_SUMSUB_BA 2, 1, 5, 7, 4, [pd_8192] ; m2=t2[w], m1=t10[w]
1648 UNSCRATCH 4, 12, tmpq+ 2*%%str
1649 VP9_RND_SH_SUMSUB_BA 3, 0, 4, 6, 5, [pd_8192] ; m3=t3[w], m0=t11[w]
1651 SCRATCH 0, 12, tmpq+ 2*%%str
1652 SCRATCH 1, 13, tmpq+13*%%str
1653 mova [tmpq+ 5*%%str], m2
1654 mova [tmpq+10*%%str], m3
1656 mova m2, [%1+ 4*32] ; in4
1657 mova m3, [%1+11*32] ; in11
1658 mova m0, [%1+ 3*32] ; in3
1659 mova m1, [%1+12*32] ; in12
1661 VP9_UNPACK_MULSUB_2D_4X 3, 2, 7, 6, 14811, 7005 ; m3/7=t5[d], m2/6=t4[d]
1662 VP9_UNPACK_MULSUB_2D_4X 0, 1, 4, 5, 5520, 15426 ; m0/4=t13[d], m1/5=t12[d]
1663 SCRATCH 4, 9, tmpq+ 4*%%str
1664 VP9_RND_SH_SUMSUB_BA 1, 2, 5, 6, 4, [pd_8192] ; m1=t4[w], m2=t12[w]
1665 UNSCRATCH 4, 9, tmpq+ 4*%%str
1666 VP9_RND_SH_SUMSUB_BA 0, 3, 4, 7, 6, [pd_8192] ; m0=t5[w], m3=t13[w]
1668 SCRATCH 0, 8, tmpq+ 4*%%str
1669 mova [tmpq+11*%%str], m1 ; t4:m1->r11
1670 UNSCRATCH 0, 10, tmpq+ 0*%%str
1671 UNSCRATCH 1, 11, tmpq+15*%%str
1673 ; round 2 interleaved part 1
1674 VP9_UNPACK_MULSUB_2D_4X 0, 1, 6, 7, 16069, 3196 ; m1/7=t8[d], m0/6=t9[d]
1675 VP9_UNPACK_MULSUB_2D_4X 3, 2, 5, 4, 3196, 16069 ; m3/5=t12[d], m2/4=t13[d]
1676 SCRATCH 4, 9, tmpq+ 3*%%str
1677 VP9_RND_SH_SUMSUB_BA 3, 1, 5, 7, 4, [pd_8192] ; m3=t8[w], m1=t12[w]
1678 UNSCRATCH 4, 9, tmpq+ 3*%%str
1679 VP9_RND_SH_SUMSUB_BA 2, 0, 4, 6, 5, [pd_8192] ; m2=t9[w], m0=t13[w]
1681 SCRATCH 0, 10, tmpq+ 0*%%str
1682 SCRATCH 1, 11, tmpq+15*%%str
1683 SCRATCH 2, 14, tmpq+ 3*%%str
1684 SCRATCH 3, 15, tmpq+12*%%str
1686 mova m2, [%1+ 6*32] ; in6
1687 mova m3, [%1+ 9*32] ; in9
1688 mova m0, [%1+ 1*32] ; in1
1689 mova m1, [%1+14*32] ; in14
1691 VP9_UNPACK_MULSUB_2D_4X 3, 2, 7, 6, 13160, 9760 ; m3/7=t7[d], m2/6=t6[d]
1692 VP9_UNPACK_MULSUB_2D_4X 0, 1, 4, 5, 2404, 16207 ; m0/4=t15[d], m1/5=t14[d]
1693 SCRATCH 4, 9, tmpq+ 6*%%str
1694 VP9_RND_SH_SUMSUB_BA 1, 2, 5, 6, 4, [pd_8192] ; m1=t6[w], m2=t14[w]
1695 UNSCRATCH 4, 9, tmpq+ 6*%%str
1696 VP9_RND_SH_SUMSUB_BA 0, 3, 4, 7, 6, [pd_8192] ; m0=t7[w], m3=t15[w]
1698 ; r8=t0, r7=t1, r5=t2, r10=t3, r11=t4, m8|r4=t5, m1=t6, m0=t7
1699 ; m10|r0=t8, m11|r15=t9, m13|r13=t10, m12|r2=t11, m14|r3=t12, m15|r12=t13, m2=t14, m3=t15
1701 UNSCRATCH 4, 12, tmpq+ 2*%%str
1702 UNSCRATCH 5, 13, tmpq+13*%%str
1703 SCRATCH 0, 12, tmpq+ 1*%%str
1704 SCRATCH 1, 13, tmpq+14*%%str
1706 ; remainder of round 2 (rest of t8-15)
1707 VP9_UNPACK_MULSUB_2D_4X 5, 4, 6, 7, 9102, 13623 ; m5/6=t11[d], m4/7=t10[d]
1708 VP9_UNPACK_MULSUB_2D_4X 3, 2, 1, 0, 13623, 9102 ; m3/1=t14[d], m2/0=t15[d]
1709 SCRATCH 0, 9, tmpq+ 6*%%str
1710 VP9_RND_SH_SUMSUB_BA 3, 4, 1, 7, 0, [pd_8192] ; m3=t10[w], m4=t14[w]
1711 UNSCRATCH 0, 9, tmpq+ 6*%%str
1712 VP9_RND_SH_SUMSUB_BA 2, 5, 0, 6, 1, [pd_8192] ; m2=t11[w], m5=t15[w]
1714 ; m15|r12=t8, m14|r3=t9, m3=t10, m2=t11, m11|r15=t12, m10|r0=t13, m4=t14, m5=t15
1716 UNSCRATCH 6, 14, tmpq+ 3*%%str
1717 UNSCRATCH 7, 15, tmpq+12*%%str
1719 SUMSUB_BA w, 3, 7, 1
1720 PSIGNW m3, [pw_m1] ; m3=out1[w], m7=t10[w]
1721 SUMSUB_BA w, 2, 6, 1 ; m2=out14[w], m6=t11[w]
1723 ; unfortunately, the code below overflows in some cases, e.g.
1724 ; http://downloads.webmproject.org/test_data/libvpx/vp90-2-14-resize-fp-tiles-16-8.webm
1725 %if 0; cpuflag(ssse3)
1726 SUMSUB_BA w, 7, 6, 1
1727 pmulhrsw m7, [pw_11585x2] ; m7=out6[w]
1728 pmulhrsw m6, [pw_11585x2] ; m6=out9[w]
1730 VP9_UNPACK_MULSUB_2W_4X 6, 7, 11585, 11585, [pd_8192], 1, 0
1733 mova [tmpq+ 3*%%str], m6
1734 mova [tmpq+ 6*%%str], m7
1735 UNSCRATCH 6, 10, tmpq+ 0*%%str
1736 UNSCRATCH 7, 11, tmpq+15*%%str
1737 mova [tmpq+13*%%str], m2
1738 SCRATCH 3, 11, tmpq+ 9*%%str
1740 VP9_UNPACK_MULSUB_2D_4X 7, 6, 2, 3, 15137, 6270 ; m6/3=t13[d], m7/2=t12[d]
1741 VP9_UNPACK_MULSUB_2D_4X 5, 4, 1, 0, 6270, 15137 ; m5/1=t14[d], m4/0=t15[d]
1742 SCRATCH 0, 9, tmpq+ 2*%%str
1743 VP9_RND_SH_SUMSUB_BA 5, 6, 1, 3, 0, [pd_8192] ; m5=out2[w], m6=t14[w]
1744 UNSCRATCH 0, 9, tmpq+ 2*%%str
1745 VP9_RND_SH_SUMSUB_BA 4, 7, 0, 2, 1, [pd_8192]
1746 PSIGNW m4, [pw_m1] ; m4=out13[w], m7=t15[w]
1748 ; unfortunately, the code below overflows in some cases
1749 %if 0; cpuflag(ssse3)
1750 SUMSUB_BA w, 7, 6, 1
1751 pmulhrsw m7, [pw_m11585x2] ; m7=out5[w]
1752 pmulhrsw m6, [pw_11585x2] ; m6=out10[w]
1755 VP9_UNPACK_MULSUB_2W_4X 7, 6, 11585, 11585, [pd_8192], 1, 0
1758 ; m11|r13=out1, m5=out2, m7=out5, r15=out6, r3=out9, m6=out10, m4=out13, r2=out14
1760 mova m2, [tmpq+ 8*%%str]
1761 mova m3, [tmpq+ 7*%%str]
1762 mova m1, [tmpq+11*%%str]
1763 mova [tmpq+ 7*%%str], m6
1764 mova [tmpq+11*%%str], m4
1765 mova m4, [tmpq+ 5*%%str]
1766 SCRATCH 5, 14, tmpq+ 5*%%str
1767 SCRATCH 7, 15, tmpq+ 8*%%str
1768 UNSCRATCH 6, 8, tmpq+ 4*%%str
1769 UNSCRATCH 5, 12, tmpq+ 1*%%str
1770 UNSCRATCH 7, 13, tmpq+14*%%str
1772 ; m2=t0, m3=t1, m9=t2, m0=t3, m1=t4, m8=t5, m13=t6, m12=t7
1773 ; m11|r13=out1, m5=out2, m7=out5, r15=out6, r3=out9, r10=out10, r11=out13, r2=out14
1775 SUMSUB_BA w, 1, 2, 0 ; m1=t0[w], m2=t4[w]
1776 mova m0, [tmpq+10*%%str]
1777 SCRATCH 1, 12, tmpq+ 1*%%str
1778 SUMSUB_BA w, 6, 3, 1 ; m8=t1[w], m3=t5[w]
1779 SCRATCH 6, 13, tmpq+ 4*%%str
1780 SUMSUB_BA w, 7, 4, 1 ; m13=t2[w], m9=t6[w]
1781 SCRATCH 7, 8, tmpq+10*%%str
1782 SUMSUB_BA w, 5, 0, 1 ; m12=t3[w], m0=t7[w]
1783 SCRATCH 5, 9, tmpq+14*%%str
1785 VP9_UNPACK_MULSUB_2D_4X 2, 3, 7, 5, 15137, 6270 ; m2/6=t5[d], m3/10=t4[d]
1786 VP9_UNPACK_MULSUB_2D_4X 0, 4, 1, 6, 6270, 15137 ; m0/14=t6[d], m9/15=t7[d]
1787 SCRATCH 6, 10, tmpq+ 0*%%str
1788 VP9_RND_SH_SUMSUB_BA 0, 3, 1, 5, 6, [pd_8192]
1789 UNSCRATCH 6, 10, tmpq+ 0*%%str
1790 PSIGNW m0, [pw_m1] ; m0=out3[w], m3=t6[w]
1791 VP9_RND_SH_SUMSUB_BA 4, 2, 6, 7, 5, [pd_8192] ; m9=out12[w], m2=t7[w]
1793 UNSCRATCH 1, 8, tmpq+10*%%str
1794 UNSCRATCH 5, 9, tmpq+14*%%str
1795 UNSCRATCH 6, 12, tmpq+ 1*%%str
1796 UNSCRATCH 7, 13, tmpq+ 4*%%str
1797 SCRATCH 4, 9, tmpq+14*%%str
1799 SUMSUB_BA w, 1, 6, 4 ; m13=out0[w], m1=t2[w]
1800 SUMSUB_BA w, 5, 7, 4
1801 PSIGNW m5, [pw_m1] ; m12=out15[w], m8=t3[w]
1803 ; unfortunately, the code below overflows in some cases, e.g.
1804 ; http://downloads.webmproject.org/test_data/libvpx/vp90-2-14-resize-fp-tiles-16-8-4-2-1.webm
1805 %if 0 ; cpuflag(ssse3)
1806 SUMSUB_BA w, 7, 6, 4
1807 pmulhrsw m7, [pw_m11585x2] ; m8=out7[w]
1808 pmulhrsw m6, [pw_11585x2] ; m1=out8[w]
1810 SUMSUB_BA w, 3, 2, 4
1811 pmulhrsw m3, [pw_11585x2] ; m3=out4[w]
1812 pmulhrsw m2, [pw_11585x2] ; m2=out11[w]
1814 SCRATCH 5, 8, tmpq+10*%%str
1815 VP9_UNPACK_MULSUB_2W_4X 6, 7, 11585, m11585, [pd_8192], 5, 4
1816 VP9_UNPACK_MULSUB_2W_4X 2, 3, 11585, 11585, [pd_8192], 5, 4
1817 UNSCRATCH 5, 8, tmpq+10*%%str
1820 ; m13=out0, m0=out3, m3=out4, m8=out7, m1=out8, m2=out11, m9=out12, m12=out15
1821 ; m11|r13=out1, m5=out2, m7=out5, r15=out6, r3=out9, r10=out10, r11=out13, r2=out14
1825 mova m13, [tmpq+ 6*%%str]
1826 TRANSPOSE8x8W 1, 11, 14, 0, 3, 15, 13, 6, 10
1827 mova [tmpq+ 0*16], m1
1828 mova [tmpq+ 2*16], m11
1829 mova [tmpq+ 4*16], m14
1830 mova [tmpq+ 6*16], m0
1831 mova m1, [tmpq+ 3*%%str]
1832 mova m11, [tmpq+ 7*%%str]
1833 mova m14, [tmpq+11*%%str]
1834 mova m0, [tmpq+13*%%str]
1835 mova [tmpq+ 8*16], m3
1836 mova [tmpq+10*16], m15
1837 mova [tmpq+12*16], m13
1838 mova [tmpq+14*16], m6
1840 TRANSPOSE8x8W 7, 1, 11, 2, 9, 14, 0, 5, 10
1841 mova [tmpq+ 1*16], m7
1842 mova [tmpq+ 3*16], m1
1843 mova [tmpq+ 5*16], m11
1844 mova [tmpq+ 7*16], m2
1845 mova [tmpq+ 9*16], m9
1846 mova [tmpq+11*16], m14
1847 mova [tmpq+13*16], m0
1848 mova [tmpq+15*16], m5
1850 mova [tmpq+12*%%str], m2
1851 mova [tmpq+ 1*%%str], m5
1852 mova [tmpq+15*%%str], m7
1853 mova m2, [tmpq+ 9*%%str]
1854 mova m5, [tmpq+ 5*%%str]
1855 mova m7, [tmpq+ 8*%%str]
1856 TRANSPOSE8x8W 1, 2, 5, 0, 3, 7, 4, 6, [tmpq+ 6*%%str], [tmpq+ 8*%%str], 1
1857 mova [tmpq+ 0*16], m1
1858 mova [tmpq+ 2*16], m2
1859 mova [tmpq+ 4*16], m5
1860 mova [tmpq+ 6*16], m0
1861 mova [tmpq+10*16], m7
1862 mova m3, [tmpq+12*%%str]
1863 mova [tmpq+12*16], m4
1864 mova m4, [tmpq+14*%%str]
1865 mova [tmpq+14*16], m6
1867 mova m0, [tmpq+15*%%str]
1868 mova m1, [tmpq+ 3*%%str]
1869 mova m2, [tmpq+ 7*%%str]
1870 mova m5, [tmpq+11*%%str]
1871 mova m7, [tmpq+ 1*%%str]
1872 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [tmpq+13*%%str], [tmpq+ 9*%%str], 1
1873 mova [tmpq+ 1*16], m0
1874 mova [tmpq+ 3*16], m1
1875 mova [tmpq+ 5*16], m2
1876 mova [tmpq+ 7*16], m3
1877 mova [tmpq+11*16], m5
1878 mova [tmpq+13*16], m6
1879 mova [tmpq+15*16], m7
1885 %define ROUND_REG [pw_512]
1887 %define ROUND_REG [pw_32]
1891 mova m12, [tmpq+ 6*%%str]
1892 VP9_IDCT8_WRITEx2 1, 11, 10, 8, 4, ROUND_REG, 6
1893 lea dstq, [dstq+strideq*2]
1894 VP9_IDCT8_WRITEx2 14, 0, 10, 8, 4, ROUND_REG, 6
1895 lea dstq, [dstq+strideq*2]
1896 VP9_IDCT8_WRITEx2 3, 15, 10, 8, 4, ROUND_REG, 6
1897 lea dstq, [dstq+strideq*2]
1898 VP9_IDCT8_WRITEx2 12, 6, 10, 8, 4, ROUND_REG, 6
1899 lea dstq, [dstq+strideq*2]
1901 mova m1, [tmpq+ 3*%%str]
1902 mova m11, [tmpq+ 7*%%str]
1903 mova m14, [tmpq+11*%%str]
1904 mova m0, [tmpq+13*%%str]
1906 VP9_IDCT8_WRITEx2 7, 1, 10, 8, 4, ROUND_REG, 6
1907 lea dstq, [dstq+strideq*2]
1908 VP9_IDCT8_WRITEx2 11, 2, 10, 8, 4, ROUND_REG, 6
1909 lea dstq, [dstq+strideq*2]
1910 VP9_IDCT8_WRITEx2 9, 14, 10, 8, 4, ROUND_REG, 6
1911 lea dstq, [dstq+strideq*2]
1912 VP9_IDCT8_WRITEx2 0, 5, 10, 8, 4, ROUND_REG, 6
1914 mova [tmpq+ 0*%%str], m2
1915 mova [tmpq+ 1*%%str], m5
1916 mova [tmpq+ 2*%%str], m7
1917 mova m2, [tmpq+ 9*%%str]
1918 VP9_IDCT8_WRITEx2 1, 2, 5, 7, 4, ROUND_REG, 6
1919 lea dstq, [dstq+strideq*2]
1920 mova m5, [tmpq+ 5*%%str]
1921 VP9_IDCT8_WRITEx2 5, 0, 1, 2, 4, ROUND_REG, 6
1922 lea dstq, [dstq+strideq*2]
1923 mova m5, [tmpq+ 8*%%str]
1924 VP9_IDCT8_WRITEx2 3, 5, 1, 2, 4, ROUND_REG, 6
1925 lea dstq, [dstq+strideq*2]
1926 mova m5, [tmpq+ 6*%%str]
1927 VP9_IDCT8_WRITEx2 5, 6, 1, 2, 4, ROUND_REG, 6
1928 lea dstq, [dstq+strideq*2]
1930 mova m0, [tmpq+ 2*%%str]
1931 mova m3, [tmpq+ 3*%%str]
1932 VP9_IDCT8_WRITEx2 0, 3, 1, 2, 4, ROUND_REG, 6
1933 lea dstq, [dstq+strideq*2]
1934 mova m0, [tmpq+ 7*%%str]
1935 mova m3, [tmpq+ 0*%%str]
1936 VP9_IDCT8_WRITEx2 0, 3, 1, 2, 4, ROUND_REG, 6
1937 lea dstq, [dstq+strideq*2]
1938 mova m0, [tmpq+14*%%str]
1939 mova m3, [tmpq+11*%%str]
1940 VP9_IDCT8_WRITEx2 0, 3, 1, 2, 4, ROUND_REG, 6
1941 lea dstq, [dstq+strideq*2]
1942 mova m0, [tmpq+13*%%str]
1943 mova m3, [tmpq+ 1*%%str]
1944 VP9_IDCT8_WRITEx2 0, 3, 1, 2, 4, ROUND_REG, 6
1954 cglobal vp9_%1_%3_16x16_add, 3, 6, 16, 512, dst, stride, block, cnt, dst_bak, tmp
1970 lea dstq, [dst_bakq+8]
1975 ; at the end of the loop, m0 should still be zero
1976 ; use that to zero out block coefficients
1977 ZERO_BLOCK blockq, 32, 16, m0
1981 IADST16_FN idct, IDCT16, iadst, IADST16, sse2
1982 IADST16_FN iadst, IADST16, idct, IDCT16, sse2
1983 IADST16_FN iadst, IADST16, iadst, IADST16, sse2
1984 IADST16_FN idct, IDCT16, iadst, IADST16, ssse3
1985 IADST16_FN iadst, IADST16, idct, IDCT16, ssse3
1986 IADST16_FN iadst, IADST16, iadst, IADST16, ssse3
1987 IADST16_FN idct, IDCT16, iadst, IADST16, avx
1988 IADST16_FN iadst, IADST16, idct, IDCT16, avx
1989 IADST16_FN iadst, IADST16, iadst, IADST16, avx
1991 ;---------------------------------------------------------------------------------------------
1992 ; void vp9_idct_idct_32x32_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
1993 ;---------------------------------------------------------------------------------------------
1995 %macro VP9_IDCT32_1D 2-3 32 ; src, pass, nnzc
1996 %assign %%str 16*%2*%2
1997 ; first do t0-15, this can be done identical to idct16x16
1998 VP9_IDCT16_1D_START %1, %3/2, 64*2, tmpq, 2*%%str, 1
2000 ; store everything on stack to make space available for t16-31
2001 ; we store interleaved with the output of the second half (t16-31)
2002 ; so we don't need to allocate extra stack space
2003 mova [tmpq+ 0*%%str], m0 ; t0
2004 mova [tmpq+ 4*%%str], m1 ; t1
2005 mova [tmpq+ 8*%%str], m2 ; t2
2006 mova [tmpq+12*%%str], m3 ; t3
2007 mova [tmpq+16*%%str], m4 ; t4
2008 mova [tmpq+20*%%str], m5 ; t5
2010 mova [tmpq+22*%%str], m10 ; t10
2011 mova [tmpq+18*%%str], m11 ; t11
2012 mova [tmpq+14*%%str], m12 ; t12
2013 mova [tmpq+10*%%str], m13 ; t13
2014 mova [tmpq+ 6*%%str], m14 ; t14
2015 mova [tmpq+ 2*%%str], m15 ; t15
2018 mova m0, [tmpq+ 30*%%str]
2019 UNSCRATCH 1, 6, tmpq+26*%%str
2020 UNSCRATCH 2, 8, tmpq+24*%%str
2021 UNSCRATCH 3, 9, tmpq+28*%%str
2022 SUMSUB_BA w, 1, 3, 4 ; t6, t9
2023 SUMSUB_BA w, 0, 2, 4 ; t7, t8
2025 mova [tmpq+24*%%str], m1 ; t6
2026 mova [tmpq+28*%%str], m0 ; t7
2027 mova [tmpq+30*%%str], m2 ; t8
2028 mova [tmpq+26*%%str], m3 ; t9
2030 ; then, secondly, do t16-31
2035 pmulhrsw m1, m4, [pw_16364x2] ;t31
2036 pmulhrsw m4, [pw_804x2] ;t16
2038 VP9_UNPACK_MULSUB_2W_4X 5, 0, 1, 4, 16069, 3196, [pd_8192], 6, 2 ; t17, t30
2040 pmulhrsw m3, m7, [pw_m5520x2] ;t19
2041 pmulhrsw m7, [pw_15426x2] ;t28
2043 SCRATCH 4, 13, tmpq+ 1*%%str
2044 SCRATCH 5, 12, tmpq+15*%%str
2046 VP9_UNPACK_MULSUB_2W_4X 2, 6, 7, 3, 3196, m16069, [pd_8192], 4, 5 ; t18, t29
2051 pmulhrsw m5, m0, [pw_16364x2]
2052 pmulhrsw m0, [pw_804x2]
2053 pmulhrsw m4, m1, [pw_m11003x2]
2054 pmulhrsw m1, [pw_12140x2]
2059 VP9_UNPACK_MULSUB_2W_4X 0, 5, 16364, 804, [pd_8192], 2, 3 ; t16, t31
2060 VP9_UNPACK_MULSUB_2W_4X 4, 1, 11003, 12140, [pd_8192], 2, 3 ; t17, t30
2062 SUMSUB_BA w, 4, 0, 2
2063 SUMSUB_BA w, 1, 5, 2
2065 VP9_UNPACK_MULSUB_2W_4X 5, 0, 16069, 3196, [pd_8192], 2, 3 ; t17, t30
2067 SCRATCH 4, 13, tmpq+ 1*%%str
2068 SCRATCH 5, 12, tmpq+15*%%str
2073 pmulhrsw m7, m3, [pw_14811x2]
2074 pmulhrsw m3, [pw_7005x2]
2075 pmulhrsw m6, m2, [pw_m5520x2]
2076 pmulhrsw m2, [pw_15426x2]
2081 VP9_UNPACK_MULSUB_2W_4X 3, 7, 14811, 7005, [pd_8192], 4, 5 ; t18, t29
2082 VP9_UNPACK_MULSUB_2W_4X 6, 2, 5520, 15426, [pd_8192], 4, 5 ; t19, t28
2084 SUMSUB_BA w, 3, 6, 4
2085 SUMSUB_BA w, 7, 2, 4
2087 VP9_UNPACK_MULSUB_2W_4X 2, 6, 3196, m16069, [pd_8192], 4, 5 ; t18, t29
2090 UNSCRATCH 5, 12, tmpq+15*%%str
2091 SUMSUB_BA w, 6, 0, 4
2092 mova [tmpq+25*%%str], m6 ; t19
2093 UNSCRATCH 4, 13, tmpq+ 1*%%str
2094 SUMSUB_BA w, 7, 1, 6
2095 SUMSUB_BA w, 3, 4, 6
2096 mova [tmpq+23*%%str], m3 ; t16
2097 SUMSUB_BA w, 2, 5, 6
2099 VP9_UNPACK_MULSUB_2W_4X 0, 5, 15137, 6270, [pd_8192], 6, 3 ; t18, t29
2100 VP9_UNPACK_MULSUB_2W_4X 1, 4, 15137, 6270, [pd_8192], 6, 3 ; t19, t28
2102 SCRATCH 0, 10, tmpq+ 1*%%str
2103 SCRATCH 1, 11, tmpq+ 7*%%str
2104 SCRATCH 2, 9, tmpq+ 9*%%str
2105 SCRATCH 4, 14, tmpq+15*%%str
2106 SCRATCH 5, 15, tmpq+17*%%str
2107 SCRATCH 7, 13, tmpq+31*%%str
2113 pmulhrsw m5, m0, [pw_15893x2] ;t27
2114 pmulhrsw m0, [pw_3981x2] ;t20
2116 VP9_UNPACK_MULSUB_2W_4X 1, 4, 5, 0, 9102, 13623, [pd_8192], 7, 2 ; t21, t26
2118 pmulhrsw m6, m3, [pw_m2404x2] ;t23
2119 pmulhrsw m3, [pw_16207x2] ;t24
2121 SCRATCH 5, 8, tmpq+ 5*%%str
2122 SCRATCH 4, 12, tmpq+11*%%str
2124 VP9_UNPACK_MULSUB_2W_4X 7, 2, 3, 6, 13623, m9102, [pd_8192], 4, 5 ; t22, t25
2129 pmulhrsw m1, m4, [pw_15893x2]
2130 pmulhrsw m4, [pw_3981x2]
2131 pmulhrsw m0, m5, [pw_m8423x2]
2132 pmulhrsw m5, [pw_14053x2]
2137 VP9_UNPACK_MULSUB_2W_4X 4, 1, 15893, 3981, [pd_8192], 2, 3 ; t20, t27
2138 VP9_UNPACK_MULSUB_2W_4X 0, 5, 8423, 14053, [pd_8192], 2, 3 ; t21, t26
2140 SUMSUB_BA w, 0, 4, 2
2141 SUMSUB_BA w, 5, 1, 2
2143 VP9_UNPACK_MULSUB_2W_4X 1, 4, 9102, 13623, [pd_8192], 2, 3 ; t21, t26
2145 SCRATCH 5, 8, tmpq+ 5*%%str
2146 SCRATCH 4, 12, tmpq+11*%%str
2151 pmulhrsw m3, m6, [pw_13160x2]
2152 pmulhrsw m6, [pw_9760x2]
2153 pmulhrsw m2, m7, [pw_m2404x2]
2154 pmulhrsw m7, [pw_16207x2]
2158 VP9_UNPACK_MULSUB_2W_4X 6, 3, 13160, 9760, [pd_8192], 4, 5 ; t22, t25
2159 VP9_UNPACK_MULSUB_2W_4X 2, 7, 2404, 16207, [pd_8192], 4, 5 ; t23, t24
2161 SUMSUB_BA w, 6, 2, 4
2162 SUMSUB_BA w, 3, 7, 4
2164 VP9_UNPACK_MULSUB_2W_4X 7, 2, 13623, m9102, [pd_8192], 4, 5 ; t22, t25
2167 ; m4=t16, m5=t17, m9=t18, m8=t19, m0=t20, m1=t21, m13=t22, m12=t23,
2168 ; m3=t24, m2=t25, m14=t26, m15=t27, m7=t28, m6=t29, m10=t30, m11=t31
2170 UNSCRATCH 4, 12, tmpq+11*%%str
2171 SUMSUB_BA w, 0, 6, 5
2172 SUMSUB_BA w, 4, 2, 5
2173 UNSCRATCH 5, 8, tmpq+ 5*%%str
2174 SCRATCH 4, 8, tmpq+11*%%str
2175 SUMSUB_BA w, 1, 7, 4
2176 SUMSUB_BA w, 5, 3, 4
2177 SCRATCH 5, 12, tmpq+ 5*%%str
2179 VP9_UNPACK_MULSUB_2W_4X 3, 6, 6270, m15137, [pd_8192], 4, 5 ; t20, t27
2180 VP9_UNPACK_MULSUB_2W_4X 2, 7, 6270, m15137, [pd_8192], 4, 5 ; t21, t26
2182 ; m8[s]=t16, m9=t17, m5=t18, m4[s]=t19, m12=t20, m13=t21, m1=t22, m0=t23,
2183 ; m15=t24, m14=t25, m2=t26, m3=t27, m11=t28, m10=t29, m6=t30, m7=t31
2185 UNSCRATCH 5, 9, tmpq+ 9*%%str
2186 mova m4, [tmpq+23*%%str] ; t16
2188 SUMSUB_BA w, 1, 5, 9
2189 SUMSUB_BA w, 0, 4, 9
2191 SUMSUB_BADC w, 1, 5, 0, 4
2193 mova [tmpq+29*%%str], m1 ; t17
2194 mova [tmpq+21*%%str], m0 ; t16
2195 UNSCRATCH 0, 10, tmpq+ 1*%%str
2196 UNSCRATCH 1, 11, tmpq+ 7*%%str
2198 SUMSUB_BA w, 2, 0, 9
2199 SUMSUB_BA w, 3, 1, 9
2201 SUMSUB_BADC w, 2, 0, 3, 1
2203 mova [tmpq+ 9*%%str], m2 ; t18
2204 mova [tmpq+13*%%str], m3 ; t19
2205 SCRATCH 0, 10, tmpq+23*%%str
2206 SCRATCH 1, 11, tmpq+27*%%str
2208 UNSCRATCH 2, 14, tmpq+15*%%str
2209 UNSCRATCH 3, 15, tmpq+17*%%str
2210 SUMSUB_BA w, 6, 2, 0
2211 SUMSUB_BA w, 7, 3, 0
2212 SCRATCH 6, 14, tmpq+ 3*%%str
2213 SCRATCH 7, 15, tmpq+ 7*%%str
2215 UNSCRATCH 0, 8, tmpq+11*%%str
2216 mova m1, [tmpq+25*%%str] ; t19
2217 UNSCRATCH 6, 12, tmpq+ 5*%%str
2218 UNSCRATCH 7, 13, tmpq+31*%%str
2220 SUMSUB_BA w, 0, 1, 9
2221 SUMSUB_BA w, 6, 7, 9
2223 SUMSUB_BADC w, 0, 1, 6, 7
2226 ; m0=t16, m1=t17, m2=t18, m3=t19, m11=t20, m10=t21, m9=t22, m8=t23,
2227 ; m7=t24, m6=t25, m5=t26, m4=t27, m12=t28, m13=t29, m14=t30, m15=t31
2229 %if 0; cpuflag(ssse3)
2231 SUMSUB_BA w, 4, 7, 8
2232 SUMSUB_BA w, 5, 1, 8
2234 SUMSUB_BADC w, 4, 7, 5, 1
2237 pmulhrsw m7, [pw_11585x2]
2238 pmulhrsw m4, [pw_11585x2]
2239 pmulhrsw m1, [pw_11585x2]
2240 pmulhrsw m5, [pw_11585x2]
2242 mova [tmpq+ 5*%%str], m7 ; t23
2243 SCRATCH 1, 13, tmpq+25*%%str
2244 UNSCRATCH 7, 10, tmpq+23*%%str
2245 UNSCRATCH 1, 11, tmpq+27*%%str
2248 SUMSUB_BA w, 7, 3, 10
2249 SUMSUB_BA w, 1, 2, 10
2251 SUMSUB_BADC w, 7, 3, 1, 2
2254 pmulhrsw m3, [pw_11585x2]
2255 pmulhrsw m7, [pw_11585x2]
2256 pmulhrsw m2, [pw_11585x2]
2257 pmulhrsw m1, [pw_11585x2]
2259 SCRATCH 0, 8, tmpq+15*%%str
2260 SCRATCH 6, 9, tmpq+17*%%str
2261 VP9_UNPACK_MULSUB_2W_4X 7, 4, 11585, 11585, [pd_8192], 0, 6
2262 mova [tmpq+ 5*%%str], m7 ; t23
2263 UNSCRATCH 7, 10, tmpq+23*%%str
2264 VP9_UNPACK_MULSUB_2W_4X 1, 5, 11585, 11585, [pd_8192], 0, 6
2265 SCRATCH 1, 13, tmpq+25*%%str
2266 UNSCRATCH 1, 11, tmpq+27*%%str
2267 VP9_UNPACK_MULSUB_2W_4X 3, 7, 11585, 11585, [pd_8192], 0, 6
2268 VP9_UNPACK_MULSUB_2W_4X 2, 1, 11585, 11585, [pd_8192], 0, 6
2269 UNSCRATCH 0, 8, tmpq+15*%%str
2270 UNSCRATCH 6, 9, tmpq+17*%%str
2273 ; m0=t16, m1=t17, m2=t18, m3=t19, m4=t20, m5=t21, m6=t22, m7=t23,
2274 ; m8=t24, m9=t25, m10=t26, m11=t27, m12=t28, m13=t29, m14=t30, m15=t31
2276 ; then do final pass to sumsub+store the two halves
2278 mova [tmpq+17*%%str], m2 ; t20
2279 mova [tmpq+ 1*%%str], m3 ; t21
2281 mova [tmpq+25*%%str], m13 ; t22
2283 mova m8, [tmpq+ 0*%%str] ; t0
2284 mova m9, [tmpq+ 4*%%str] ; t1
2285 mova m12, [tmpq+ 8*%%str] ; t2
2286 mova m11, [tmpq+12*%%str] ; t3
2287 mova m2, [tmpq+16*%%str] ; t4
2288 mova m3, [tmpq+20*%%str] ; t5
2289 mova m13, [tmpq+24*%%str] ; t6
2291 SUMSUB_BA w, 6, 8, 10
2292 mova [tmpq+ 3*%%str], m8 ; t15
2293 mova m10, [tmpq+28*%%str] ; t7
2294 SUMSUB_BA w, 0, 9, 8
2295 SUMSUB_BA w, 15, 12, 8
2296 SUMSUB_BA w, 14, 11, 8
2297 SUMSUB_BA w, 1, 2, 8
2298 SUMSUB_BA w, 7, 3, 8
2299 SUMSUB_BA w, 5, 13, 8
2300 SUMSUB_BA w, 4, 10, 8
2302 TRANSPOSE8x8W 6, 0, 15, 14, 1, 7, 5, 4, 8
2303 mova [tmpq+ 0*%%str], m6
2304 mova [tmpq+ 4*%%str], m0
2305 mova [tmpq+ 8*%%str], m15
2306 mova [tmpq+12*%%str], m14
2307 mova [tmpq+16*%%str], m1
2308 mova [tmpq+20*%%str], m7
2309 mova [tmpq+24*%%str], m5
2310 mova [tmpq+28*%%str], m4
2312 mova m8, [tmpq+ 3*%%str] ; t15
2313 TRANSPOSE8x8W 10, 13, 3, 2, 11, 12, 9, 8, 0
2314 mova [tmpq+ 3*%%str], m10
2315 mova [tmpq+ 7*%%str], m13
2316 mova [tmpq+11*%%str], m3
2317 mova [tmpq+15*%%str], m2
2318 mova [tmpq+19*%%str], m11
2319 mova [tmpq+23*%%str], m12
2320 mova [tmpq+27*%%str], m9
2321 mova [tmpq+31*%%str], m8
2323 mova m15, [tmpq+30*%%str] ; t8
2324 mova m14, [tmpq+26*%%str] ; t9
2325 mova m13, [tmpq+22*%%str] ; t10
2326 mova m12, [tmpq+18*%%str] ; t11
2327 mova m11, [tmpq+14*%%str] ; t12
2328 mova m10, [tmpq+10*%%str] ; t13
2329 mova m9, [tmpq+ 6*%%str] ; t14
2330 mova m8, [tmpq+ 2*%%str] ; t15
2331 mova m7, [tmpq+21*%%str] ; t16
2332 mova m6, [tmpq+29*%%str] ; t17
2333 mova m5, [tmpq+ 9*%%str] ; t18
2334 mova m4, [tmpq+13*%%str] ; t19
2335 mova m3, [tmpq+17*%%str] ; t20
2336 mova m2, [tmpq+ 1*%%str] ; t21
2337 mova m1, [tmpq+25*%%str] ; t22
2339 SUMSUB_BA w, 7, 8, 0
2340 mova [tmpq+ 2*%%str], m8
2341 mova m0, [tmpq+ 5*%%str] ; t23
2342 SUMSUB_BA w, 6, 9, 8
2343 SUMSUB_BA w, 5, 10, 8
2344 SUMSUB_BA w, 4, 11, 8
2345 SUMSUB_BA w, 3, 12, 8
2346 SUMSUB_BA w, 2, 13, 8
2347 SUMSUB_BA w, 1, 14, 8
2348 SUMSUB_BA w, 0, 15, 8
2350 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 8
2351 mova [tmpq+ 1*%%str], m0
2352 mova [tmpq+ 5*%%str], m1
2353 mova [tmpq+ 9*%%str], m2
2354 mova [tmpq+13*%%str], m3
2355 mova [tmpq+17*%%str], m4
2356 mova [tmpq+21*%%str], m5
2357 mova [tmpq+25*%%str], m6
2358 mova [tmpq+29*%%str], m7
2360 mova m8, [tmpq+ 2*%%str]
2361 TRANSPOSE8x8W 8, 9, 10, 11, 12, 13, 14, 15, 0
2362 mova [tmpq+ 2*%%str], m8
2363 mova [tmpq+ 6*%%str], m9
2364 mova [tmpq+10*%%str], m10
2365 mova [tmpq+14*%%str], m11
2366 mova [tmpq+18*%%str], m12
2367 mova [tmpq+22*%%str], m13
2368 mova [tmpq+26*%%str], m14
2369 mova [tmpq+30*%%str], m15
2371 mova m2, [tmpq+24*%%str] ; t6
2372 mova m3, [tmpq+28*%%str] ; t7
2373 SUMSUB_BADC w, 5, 2, 4, 3
2374 mova [tmpq+24*%%str], m5
2375 mova [tmpq+23*%%str], m2
2376 mova [tmpq+28*%%str], m4
2377 mova [tmpq+19*%%str], m3
2379 mova m2, [tmpq+16*%%str] ; t4
2380 mova m3, [tmpq+20*%%str] ; t5
2381 SUMSUB_BA w, 1, 2, 5
2382 SUMSUB_BA w, 7, 3, 5
2383 mova [tmpq+15*%%str], m2
2384 mova [tmpq+11*%%str], m3
2386 mova m2, [tmpq+ 0*%%str] ; t0
2387 mova m3, [tmpq+ 4*%%str] ; t1
2388 SUMSUB_BA w, 6, 2, 5
2389 SUMSUB_BA w, 0, 3, 5
2390 mova [tmpq+31*%%str], m2
2391 mova [tmpq+27*%%str], m3
2393 mova m2, [tmpq+ 8*%%str] ; t2
2394 mova m3, [tmpq+12*%%str] ; t3
2395 mova m5, [tmpq+ 7*%%str]
2396 mova m4, [tmpq+ 3*%%str]
2397 SUMSUB_BADC w, 5, 2, 4, 3
2398 mova [tmpq+ 7*%%str], m2
2399 mova [tmpq+ 3*%%str], m3
2401 mova m3, [tmpq+28*%%str]
2402 TRANSPOSE8x8W 6, 0, 5, 4, 1, 7, 2, 3, [tmpq+24*%%str], [tmpq+16*%%str], 1
2403 mova [tmpq+ 0*%%str], m6
2404 mova [tmpq+ 4*%%str], m0
2405 mova [tmpq+ 8*%%str], m5
2406 mova [tmpq+12*%%str], m4
2407 mova [tmpq+20*%%str], m7
2408 mova [tmpq+24*%%str], m2
2409 mova [tmpq+28*%%str], m3
2411 mova m6, [tmpq+19*%%str]
2412 mova m0, [tmpq+23*%%str]
2413 mova m5, [tmpq+11*%%str]
2414 mova m4, [tmpq+15*%%str]
2415 mova m1, [tmpq+ 3*%%str]
2416 mova m7, [tmpq+ 7*%%str]
2417 mova m3, [tmpq+31*%%str]
2418 TRANSPOSE8x8W 6, 0, 5, 4, 1, 7, 2, 3, [tmpq+27*%%str], [tmpq+19*%%str], 1
2419 mova [tmpq+ 3*%%str], m6
2420 mova [tmpq+ 7*%%str], m0
2421 mova [tmpq+11*%%str], m5
2422 mova [tmpq+15*%%str], m4
2423 mova [tmpq+23*%%str], m7
2424 mova [tmpq+27*%%str], m2
2425 mova [tmpq+31*%%str], m3
2427 mova m1, [tmpq+ 6*%%str] ; t14
2428 mova m0, [tmpq+ 2*%%str] ; t15
2429 mova m7, [tmpq+21*%%str] ; t16
2430 mova m6, [tmpq+29*%%str] ; t17
2431 SUMSUB_BA w, 7, 0, 2
2432 SUMSUB_BA w, 6, 1, 2
2433 mova [tmpq+29*%%str], m7
2434 mova [tmpq+ 2*%%str], m0
2435 mova [tmpq+21*%%str], m6
2436 mova [tmpq+ 6*%%str], m1
2438 mova m1, [tmpq+14*%%str] ; t12
2439 mova m0, [tmpq+10*%%str] ; t13
2440 mova m5, [tmpq+ 9*%%str] ; t18
2441 mova m4, [tmpq+13*%%str] ; t19
2442 SUMSUB_BA w, 5, 0, 2
2443 SUMSUB_BA w, 4, 1, 2
2444 mova [tmpq+10*%%str], m0
2445 mova [tmpq+14*%%str], m1
2447 mova m1, [tmpq+22*%%str] ; t10
2448 mova m0, [tmpq+18*%%str] ; t11
2449 mova m3, [tmpq+17*%%str] ; t20
2450 mova m2, [tmpq+ 1*%%str] ; t21
2451 SUMSUB_BA w, 3, 0, 6
2452 SUMSUB_BA w, 2, 1, 6
2453 mova [tmpq+18*%%str], m0
2454 mova [tmpq+22*%%str], m1
2456 mova m7, [tmpq+30*%%str] ; t8
2457 mova m6, [tmpq+26*%%str] ; t9
2458 mova m1, [tmpq+25*%%str] ; t22
2459 mova m0, [tmpq+ 5*%%str] ; t23
2460 SUMSUB_BADC w, 1, 6, 0, 7
2461 mova [tmpq+26*%%str], m6
2462 mova [tmpq+30*%%str], m7
2464 mova m7, [tmpq+29*%%str]
2465 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [tmpq+21*%%str], [tmpq+17*%%str], 1
2466 mova [tmpq+ 1*%%str], m0
2467 mova [tmpq+ 5*%%str], m1
2468 mova [tmpq+ 9*%%str], m2
2469 mova [tmpq+13*%%str], m3
2470 mova [tmpq+21*%%str], m5
2471 mova [tmpq+25*%%str], m6
2472 mova [tmpq+29*%%str], m7
2474 mova m0, [tmpq+ 2*%%str]
2475 mova m1, [tmpq+ 6*%%str]
2476 mova m2, [tmpq+10*%%str]
2477 mova m3, [tmpq+14*%%str]
2478 mova m4, [tmpq+18*%%str]
2479 mova m5, [tmpq+22*%%str]
2480 mova m7, [tmpq+30*%%str]
2481 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [tmpq+26*%%str], [tmpq+18*%%str], 1
2482 mova [tmpq+ 2*%%str], m0
2483 mova [tmpq+ 6*%%str], m1
2484 mova [tmpq+10*%%str], m2
2485 mova [tmpq+14*%%str], m3
2486 mova [tmpq+22*%%str], m5
2487 mova [tmpq+26*%%str], m6
2488 mova [tmpq+30*%%str], m7
2491 ; t0-7 is in [tmpq+{0,4,8,12,16,20,24,28}*%%str]
2492 ; t8-15 is in [tmpq+{2,6,10,14,18,22,26,30}*%%str]
2493 ; t16-19 and t23 is in [tmpq+{1,5,9,13,29}*%%str]
2495 ; t24-31 is in m8-15
2498 %define ROUND_REG [pw_512]
2500 %define ROUND_REG [pw_32]
2503 %macro %%STORE_2X2 7-8 1 ; src[1-4], tmp[1-2], zero, inc_dst_ptrs
2504 SUMSUB_BA w, %4, %1, %5
2505 SUMSUB_BA w, %3, %2, %5
2506 VP9_IDCT8_WRITEx2 %4, %3, %5, %6, %7, ROUND_REG, 6
2510 VP9_IDCT8_WRITEx2 %2, %1, %5, %6, %7, ROUND_REG, 6, dst_endq
2512 sub dst_endq, stride2q
2519 ; store t0-1 and t30-31
2520 mova m8, [tmpq+ 0*%%str]
2521 mova m9, [tmpq+ 4*%%str]
2522 %%STORE_2X2 8, 9, 0, 6, 12, 11, 10
2524 ; store t2-3 and t28-29
2525 mova m8, [tmpq+ 8*%%str]
2526 mova m9, [tmpq+12*%%str]
2527 %%STORE_2X2 8, 9, 14, 15, 12, 11, 10
2529 ; store t4-5 and t26-27
2530 mova m8, [tmpq+16*%%str]
2531 mova m9, [tmpq+20*%%str]
2532 %%STORE_2X2 8, 9, 7, 1, 12, 11, 10
2534 ; store t6-7 and t24-25
2535 mova m8, [tmpq+24*%%str]
2536 mova m9, [tmpq+28*%%str]
2537 %%STORE_2X2 8, 9, 4, 5, 12, 11, 10
2539 ; store t8-9 and t22-23
2540 mova m8, [tmpq+30*%%str]
2541 mova m9, [tmpq+26*%%str]
2542 mova m0, [tmpq+ 5*%%str]
2543 %%STORE_2X2 8, 9, 13, 0, 12, 11, 10
2545 ; store t10-11 and t20-21
2546 mova m8, [tmpq+22*%%str]
2547 mova m9, [tmpq+18*%%str]
2548 %%STORE_2X2 8, 9, 2, 3, 12, 11, 10
2550 ; store t12-13 and t18-19
2551 mova m8, [tmpq+14*%%str]
2552 mova m9, [tmpq+10*%%str]
2553 mova m5, [tmpq+13*%%str]
2554 mova m4, [tmpq+ 9*%%str]
2555 %%STORE_2X2 8, 9, 4, 5, 12, 11, 10
2558 mova m8, [tmpq+ 6*%%str]
2559 mova m9, [tmpq+ 2*%%str]
2560 mova m5, [tmpq+29*%%str]
2561 mova m4, [tmpq+21*%%str]
2562 %%STORE_2X2 8, 9, 4, 5, 12, 11, 10, 0
2566 mova [tmpq+ 1*%%str], m1
2567 mova [tmpq+11*%%str], m2
2568 mova [tmpq+15*%%str], m3
2569 mova [tmpq+17*%%str], m4
2570 mova [tmpq+19*%%str], m5
2573 ; store t0-1 and t30-31
2574 mova m2, [tmpq+ 0*%%str]
2575 mova m3, [tmpq+ 4*%%str]
2576 %%STORE_2X2 2, 3, 0, 6, 4, 5, 1
2578 ; store t2-3 and t28-29
2579 mova m2, [tmpq+ 8*%%str]
2580 mova m3, [tmpq+12*%%str]
2581 mova m0, [tmpq+ 3*%%str]
2582 mova m6, [tmpq+ 7*%%str]
2583 %%STORE_2X2 2, 3, 0, 6, 4, 5, 1
2585 ; store t4-5 and t26-27
2586 mova m2, [tmpq+16*%%str]
2587 mova m3, [tmpq+20*%%str]
2588 mova m0, [tmpq+ 1*%%str]
2589 %%STORE_2X2 2, 3, 7, 0, 4, 5, 1
2591 ; store t6-7 and t24-25
2592 mova m2, [tmpq+24*%%str]
2593 mova m3, [tmpq+28*%%str]
2594 mova m0, [tmpq+17*%%str]
2595 mova m6, [tmpq+19*%%str]
2596 %%STORE_2X2 2, 3, 0, 6, 4, 5, 1
2598 ; store t8-9 and t22-23
2599 mova m2, [tmpq+30*%%str]
2600 mova m3, [tmpq+26*%%str]
2601 mova m0, [tmpq+25*%%str]
2602 mova m6, [tmpq+ 5*%%str]
2603 %%STORE_2X2 2, 3, 0, 6, 4, 5, 1
2605 ; store t10-11 and t20-21
2606 mova m2, [tmpq+22*%%str]
2607 mova m3, [tmpq+18*%%str]
2608 mova m0, [tmpq+11*%%str]
2609 mova m6, [tmpq+15*%%str]
2610 %%STORE_2X2 2, 3, 0, 6, 4, 5, 1
2612 ; store t12-13 and t18-19
2613 mova m2, [tmpq+14*%%str]
2614 mova m3, [tmpq+10*%%str]
2615 mova m6, [tmpq+13*%%str]
2616 mova m0, [tmpq+ 9*%%str]
2617 %%STORE_2X2 2, 3, 0, 6, 4, 5, 1
2620 mova m2, [tmpq+ 6*%%str]
2621 mova m3, [tmpq+ 2*%%str]
2622 mova m6, [tmpq+29*%%str]
2623 mova m0, [tmpq+21*%%str]
2624 %%STORE_2X2 2, 3, 0, 6, 4, 5, 1, 0
2630 %macro VP9_IDCT_IDCT_32x32_ADD_XMM 1
2632 cglobal vp9_idct_idct_32x32_add, 0, 6 + ARCH_X86_64 * 3, 16, 2048, dst, stride, block, eob
2633 movifnidn eobd, dword eobm
2647 movifnidn blockq, blockmp
2648 movifnidn dstq, dstmp
2649 movifnidn strideq, stridemp
2652 mova m1, [pw_11585x2]
2656 DEFINE_ARGS dst, stride, block, coef
2657 movsx coefd, word [blockq]
2662 add coefd, (32 << 14) + 8192
2666 SPLATW m0, m0, q0000
2668 pmulhrsw m0, [pw_512]
2673 VP9_STORE_2XFULL 0, 1, 2, 3, 4, 5, mmsize
2676 VP9_STORE_2XFULL 0, 1, 2, 3, 4, 5, mmsize
2680 DEFINE_ARGS dst_bak, stride, block, cnt, dst, stride30, dst_end, stride2, tmp
2682 %define dst_bakq r0mp
2687 DEFINE_ARGS block, u1, u2, u3, u4, tmp
2691 VP9_IDCT32_1D blockq, 1, 8
2694 DEFINE_ARGS dst, stride, stride30, dst_end, stride2, tmp
2696 %define cntd dword r3m
2698 mov stride30q, strideq ; stride
2699 lea stride2q, [strideq*2] ; stride*2
2700 shl stride30q, 5 ; stride*32
2702 sub stride30q, stride2q ; stride*30
2705 lea dst_endq, [dstq+stride30q]
2706 VP9_IDCT32_1D tmpq, 2, 8
2712 ; at the end of the loop, m7 should still be zero
2713 ; use that to zero out block coefficients
2718 ZERO_BLOCK blockq, 64, 8, m1
2723 DEFINE_ARGS block, tmp, cnt
2729 VP9_IDCT32_1D blockq, 1, 16
2738 DEFINE_ARGS dst, stride, stride30, dst_end, stride2, tmp
2740 %define cntd dword r3m
2743 mov stride30q, strideq ; stride
2744 lea stride2q, [strideq*2] ; stride*2
2745 shl stride30q, 5 ; stride*32
2748 sub stride30q, stride2q ; stride*30
2751 lea dst_endq, [dstq+stride30q]
2752 VP9_IDCT32_1D tmpq, 2, 16
2758 ; at the end of the loop, m7 should still be zero
2759 ; use that to zero out block coefficients
2764 ZERO_BLOCK blockq, 64, 16, m1
2770 DEFINE_ARGS block, tmp, cnt
2776 VP9_IDCT32_1D blockq, 1
2785 DEFINE_ARGS dst, stride, stride30, dst_end, stride2, tmp
2787 %define cntd dword r3m
2790 mov stride30q, strideq ; stride
2791 lea stride2q, [strideq*2] ; stride*2
2792 shl stride30q, 5 ; stride*32
2795 sub stride30q, stride2q ; stride*30
2798 lea dst_endq, [dstq+stride30q]
2799 VP9_IDCT32_1D tmpq, 2
2805 ; at the end of the loop, m7 should still be zero
2806 ; use that to zero out block coefficients
2811 ZERO_BLOCK blockq, 64, 32, m1
2815 VP9_IDCT_IDCT_32x32_ADD_XMM sse2
2816 VP9_IDCT_IDCT_32x32_ADD_XMM ssse3
2817 VP9_IDCT_IDCT_32x32_ADD_XMM avx