1 ;******************************************************************************
2 ;* VP9 IDCT SIMD optimizations
4 ;* Copyright (C) 2013 Clément Bœsch <u pkh me>
5 ;* Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
7 ;* This file is part of FFmpeg.
9 ;* FFmpeg is free software; you can redistribute it and/or
10 ;* modify it under the terms of the GNU Lesser General Public
11 ;* License as published by the Free Software Foundation; either
12 ;* version 2.1 of the License, or (at your option) any later version.
14 ;* FFmpeg is distributed in the hope that it will be useful,
15 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
16 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 ;* Lesser General Public License for more details.
19 ;* You should have received a copy of the GNU Lesser General Public
20 ;* License along with FFmpeg; if not, write to the Free Software
21 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 ;******************************************************************************
24 %include "libavutil/x86/x86util.asm"
25 %include "vp9itxfm_template.asm"
29 %macro VP9_IDCT_COEFFS 2-3 0
47 pw_m%1x2: times 8 dw -%1*2
49 pw_%1x2: times 8 dw %1*2
56 pw_%2x2: times 8 dw %2*2
60 VP9_IDCT_COEFFS 16364, 804
61 VP9_IDCT_COEFFS 16305, 1606
62 VP9_IDCT_COEFFS 16069, 3196, 1
63 VP9_IDCT_COEFFS 15893, 3981
64 VP9_IDCT_COEFFS 15137, 6270, 1
65 VP9_IDCT_COEFFS 14811, 7005
66 VP9_IDCT_COEFFS 14449, 7723
67 VP9_IDCT_COEFFS 13160, 9760
68 VP9_IDCT_COEFFS 11585, 11585, 1
69 VP9_IDCT_COEFFS 11003, 12140
70 VP9_IDCT_COEFFS 10394, 12665
71 VP9_IDCT_COEFFS 9102, 13623, 1
72 VP9_IDCT_COEFFS 8423, 14053
73 VP9_IDCT_COEFFS 5520, 15426
74 VP9_IDCT_COEFFS 4756, 15679
75 VP9_IDCT_COEFFS 2404, 16207
78 times 4 dw 5283, 13377
80 times 4 dw 9929, 13377
82 times 4 dw 15212, -13377
84 times 4 dw 15212, 9929
86 times 4 dw -5283, -15212
90 times 4 dw -13377, 13377
105 %macro VP9_UNPACK_MULSUB_2D_4X 6 ; dst1 [src1], dst2 [src2], dst3, dst4, mul1, mul2
106 punpckhwd m%4, m%2, m%1
108 pmaddwd m%3, m%4, [pw_m%5_%6]
109 pmaddwd m%4, [pw_%6_%5]
110 pmaddwd m%1, m%2, [pw_m%5_%6]
111 pmaddwd m%2, [pw_%6_%5]
114 %macro VP9_RND_SH_SUMSUB_BA 6 ; dst1 [src1], dst2 [src2], src3, src4, tmp, round
115 SUMSUB_BA d, %1, %2, %5
116 SUMSUB_BA d, %3, %4, %5
129 %macro VP9_STORE_2X 5-6 dstq ; reg1, reg2, tmp1, tmp2, zero, dst
131 movh m%4, [%6+strideq]
139 movh [%6+strideq], m%4
142 %macro ZERO_BLOCK 4 ; mem, stride, nnzcpl, zero_reg
147 mova [%1+%%y+%%x], %4
148 %assign %%x (%%x+mmsize)
154 ;-------------------------------------------------------------------------------------------
155 ; void vp9_iwht_iwht_4x4_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
156 ;-------------------------------------------------------------------------------------------
159 cglobal vp9_iwht_iwht_4x4_add, 3, 3, 0, dst, stride, block, eob
160 mova m0, [blockq+0*8]
161 mova m1, [blockq+1*8]
162 mova m2, [blockq+2*8]
163 mova m3, [blockq+3*8]
170 TRANSPOSE4x4W 0, 1, 2, 3, 4
174 VP9_STORE_2X 0, 1, 5, 6, 4
175 lea dstq, [dstq+strideq*2]
176 VP9_STORE_2X 2, 3, 5, 6, 4
177 ZERO_BLOCK blockq, 8, 4, m4
180 ;-------------------------------------------------------------------------------------------
181 ; void vp9_idct_idct_4x4_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
182 ;-------------------------------------------------------------------------------------------
184 ; 2x2 top left corner
185 %macro VP9_IDCT4_2x2_1D 0
186 pmulhrsw m0, m5 ; m0=t1
189 pmulhrsw m1, m6 ; m1=t2
190 pmulhrsw m3, m7 ; m3=t3
191 VP9_IDCT4_1D_FINALIZE
194 %macro VP9_IDCT4_WRITEOUT 0
197 pmulhrsw m0, m5 ; (x*2048 + (1<<14))>>15 <=> (x+8)>>4
206 VP9_STORE_2X 0, 1, 6, 7, 4
207 lea dstq, [dstq+2*strideq]
217 VP9_STORE_2X 2, 3, 6, 7, 4
222 cglobal vp9_idct_idct_4x4_add, 4, 4, 0, dst, stride, block, eob
225 cmp eobd, 4 ; 2x2 or smaller
228 cmp eobd, 1 ; faster path for when only DC is set
237 mova m5, [pw_11585x2]
241 DEFINE_ARGS dst, stride, block, coef
242 movsx coefd, word [blockq]
247 add coefd, (8 << 14) + 8192
255 pmulhrsw m0, [pw_2048] ; (x*2048 + (1<<14))>>15 <=> (x+8)>>4
257 VP9_STORE_2X 0, 0, 6, 7, 4
258 lea dstq, [dstq+2*strideq]
259 VP9_STORE_2X 0, 0, 6, 7, 4
263 ; faster path for when only top left 2x2 block is set
267 mova m5, [pw_11585x2]
269 mova m7, [pw_15137x2]
271 ; partial 2x4 transpose
274 SBUTTERFLY dq, 0, 2, 1
277 pxor m4, m4 ; used for the block reset, and VP9_STORE_2X
284 .idctfull: ; generic full 4x4 idct/idct
290 mova m6, [pw_11585x2]
292 mova m7, [pd_8192] ; rounding
294 TRANSPOSE4x4W 0, 1, 2, 3, 4
296 pxor m4, m4 ; used for the block reset, and VP9_STORE_2X
308 ;-------------------------------------------------------------------------------------------
309 ; void vp9_iadst_iadst_4x4_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
310 ;-------------------------------------------------------------------------------------------
314 cglobal vp9_%1_%3_4x4_add, 3, 3, 0, dst, stride, block, eob
315 %if WIN64 && notcpuflag(ssse3)
318 movdqa xmm5, [pd_8192]
324 mova m6, [pw_11585x2]
326 %ifnidn %1%3, iadstiadst
330 TRANSPOSE4x4W 0, 1, 2, 3, 4
332 pxor m4, m4 ; used for the block reset, and VP9_STORE_2X
341 IADST4_FN idct, IDCT4, iadst, IADST4, sse2
342 IADST4_FN iadst, IADST4, idct, IDCT4, sse2
343 IADST4_FN iadst, IADST4, iadst, IADST4, sse2
345 IADST4_FN idct, IDCT4, iadst, IADST4, ssse3
346 IADST4_FN iadst, IADST4, idct, IDCT4, ssse3
347 IADST4_FN iadst, IADST4, iadst, IADST4, ssse3
365 ;-------------------------------------------------------------------------------------------
366 ; void vp9_idct_idct_8x8_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
367 ;-------------------------------------------------------------------------------------------
369 %macro VP9_IDCT8_1D_FINALIZE 0
370 SUMSUB_BA w, 3, 6, 5 ; m3=t0+t7, m6=t0-t7
371 SUMSUB_BA w, 1, 2, 5 ; m1=t1+t6, m2=t1-t6
372 SUMSUB_BA w, 7, 0, 5 ; m7=t2+t5, m0=t2-t5
374 UNSCRATCH 5, 8, blockq+ 0
375 SCRATCH 2, 8, blockq+ 0
377 SUMSUB_BA w, 5, 4, 2 ; m5=t3+t4, m4=t3-t4
387 ; - in: m0/m4 is in mem
388 ; - out: m6 is in mem
390 ; - everything is in registers (m0-7)
391 %macro VP9_IDCT8_1D 0
397 VP9_UNPACK_MULSUB_2W_4X 5, 3, 9102, 13623, D_8192_REG, 0, 4 ; m5=t5a, m3=t6a
398 VP9_UNPACK_MULSUB_2W_4X 1, 7, 16069, 3196, D_8192_REG, 0, 4 ; m1=t4a, m7=t7a
399 SUMSUB_BA w, 5, 1, 0 ; m5=t4a+t5a (t4), m1=t4a-t5a (t5a)
400 SUMSUB_BA w, 3, 7, 0 ; m3=t7a+t6a (t7), m7=t7a-t6a (t6a)
402 SUMSUB_BA w, 1, 7, 0 ; m1=t6a+t5a (t6), m7=t6a-t5a (t5)
403 pmulhrsw m1, W_11585x2_REG ; m1=t6
404 pmulhrsw m7, W_11585x2_REG ; m7=t5
406 VP9_UNPACK_MULSUB_2W_4X 7, 1, 11585, 11585, D_8192_REG, 0, 4
408 VP9_UNPACK_MULSUB_2W_4X 2, 6, 15137, 6270, D_8192_REG, 0, 4 ; m2=t2a, m6=t3a
410 UNSCRATCH 0, 8, blockq+ 0 ; IN(0)
411 UNSCRATCH 4, 9, blockq+64 ; IN(4)
412 SCRATCH 5, 8, blockq+ 0
415 SUMSUB_BA w, 4, 0, 5 ; m4=IN(0)+IN(4) m0=IN(0)-IN(4)
416 pmulhrsw m4, W_11585x2_REG ; m4=t0a
417 pmulhrsw m0, W_11585x2_REG ; m0=t1a
419 SCRATCH 7, 9, blockq+64
420 VP9_UNPACK_MULSUB_2W_4X 0, 4, 11585, 11585, D_8192_REG, 5, 7
421 UNSCRATCH 7, 9, blockq+64
423 SUMSUB_BA w, 6, 4, 5 ; m6=t0a+t3a (t0), m4=t0a-t3a (t3)
424 SUMSUB_BA w, 2, 0, 5 ; m2=t1a+t2a (t1), m0=t1a-t2a (t2)
426 VP9_IDCT8_1D_FINALIZE
429 %macro VP9_IDCT8_4x4_1D 0
430 pmulhrsw m0, W_11585x2_REG ; m0=t1a/t0a
431 pmulhrsw m6, m2, [pw_15137x2] ; m6=t3a
432 pmulhrsw m2, [pw_6270x2] ; m2=t2a
433 pmulhrsw m7, m1, [pw_16069x2] ; m7=t7a
434 pmulhrsw m1, [pw_3196x2] ; m1=t4a
435 pmulhrsw m5, m3, [pw_m9102x2] ; m5=t5a
436 pmulhrsw m3, [pw_13623x2] ; m3=t6a
437 SUMSUB_BA w, 5, 1, 4 ; m1=t4a+t5a (t4), m5=t4a-t5a (t5a)
438 SUMSUB_BA w, 3, 7, 4 ; m3=t7a+t6a (t7), m7=t7a-t6a (t6a)
439 SUMSUB_BA w, 1, 7, 4 ; m1=t6a+t5a (t6), m7=t6a-t5a (t5)
440 pmulhrsw m1, W_11585x2_REG ; m1=t6
441 pmulhrsw m7, W_11585x2_REG ; m7=t5
442 psubw m4, m0, m6 ; m4=t0a-t3a (t3)
443 paddw m6, m0 ; m6=t0a+t3a (t0)
444 SCRATCH 5, 8, blockq+ 0
445 SUMSUB_BA w, 2, 0, 5 ; m2=t1a+t2a (t1), m0=t1a-t2a (t2)
446 VP9_IDCT8_1D_FINALIZE
449 %macro VP9_IDCT8_2x2_1D 1
450 pmulhrsw m0, W_11585x2_REG ; m0=t0
451 pmulhrsw m3, m1, W_16069x2_REG ; m3=t7
452 pmulhrsw m1, W_3196x2_REG ; m1=t4
453 psubw m7, m3, m1 ; t5 = t7a - t4a
454 paddw m5, m3, m1 ; t6 = t7a + t4a
455 pmulhrsw m7, W_11585x2_REG ; m7=t5
456 pmulhrsw m5, W_11585x2_REG ; m5=t6
458 ; merged VP9_IDCT8_1D_FINALIZE to make register-sharing w/ avx easier
459 psubw m6, m0, m3 ; m6=t0-t7
460 paddw m3, m0 ; m3=t0+t7
461 psubw m2, m0, m1 ; m2=t1-t6
462 paddw m1, m0 ; m1=t1+t6
465 %define SCRATCH_REG 1
468 %define SCRATCH_REG 2
470 %define SCRATCH_REG 8
472 psubw m4, m0, m5 ; m4=t3-t4
473 paddw m5, m0 ; m5=t3+t4
474 SUMSUB_BA w, 7, 0, SCRATCH_REG ; m7=t2+t5, m0=t2-t5
480 %macro VP9_IDCT8_WRITEx2 6-8 5 ; line1, line2, tmp1, tmp2, zero, pw_1024/pw_16, shift
482 pmulhrsw m%1, %6 ; (x*1024 + (1<<14))>>15 <=> (x+16)>>5
491 VP9_STORE_2X %1, %2, %3, %4, %5
493 VP9_STORE_2X %1, %2, %3, %4, %5, %8
500 ; - m8 holds m6 (SWAP)
502 %macro VP9_IDCT8_WRITEOUT 0
512 %define ROUND_REG [pw_1024]
514 %define ROUND_REG [pw_16]
517 SCRATCH 5, 10, blockq+16
518 SCRATCH 7, 11, blockq+32
519 VP9_IDCT8_WRITEx2 0, 1, 5, 7, 6, ROUND_REG
520 lea dstq, [dstq+2*strideq]
521 VP9_IDCT8_WRITEx2 2, 3, 5, 7, 6, ROUND_REG
522 lea dstq, [dstq+2*strideq]
523 UNSCRATCH 5, 10, blockq+16
524 UNSCRATCH 7, 11, blockq+32
525 VP9_IDCT8_WRITEx2 4, 5, 0, 1, 6, ROUND_REG
526 lea dstq, [dstq+2*strideq]
527 UNSCRATCH 5, 8, blockq+ 0
528 VP9_IDCT8_WRITEx2 5, 7, 0, 1, 6, ROUND_REG
533 %macro VP9_IDCT_IDCT_8x8_ADD_XMM 2
535 cglobal vp9_idct_idct_8x8_add, 4, 4, %2, dst, stride, block, eob
539 mova m12, [pw_11585x2] ; often used
540 %define W_11585x2_REG m12
542 %define W_11585x2_REG [pw_11585x2]
545 cmp eobd, 12 ; top left half or less
548 cmp eobd, 3 ; top left corner or less
551 cmp eobd, 1 ; faster path for when only DC is set
552 jne .idcttopleftcorner
560 pmulhrsw m0, W_11585x2_REG
561 pmulhrsw m0, W_11585x2_REG
563 DEFINE_ARGS dst, stride, block, coef
564 movsx coefd, word [blockq]
569 add coefd, (16 << 14) + 8192
577 pmulhrsw m0, [pw_1024] ; (x*1024 + (1<<14))>>15 <=> (x+16)>>5
580 VP9_STORE_2X 0, 0, 6, 7, 4
581 lea dstq, [dstq+2*strideq]
583 VP9_STORE_2X 0, 0, 6, 7, 4
587 ; faster path for when only left corner is set (3 input: DC, right to DC, below
588 ; to DC). Note: also working with a 2x2 block
593 mova m10, [pw_3196x2]
594 mova m11, [pw_16069x2]
595 %define W_3196x2_REG m10
596 %define W_16069x2_REG m11
598 %define W_3196x2_REG [pw_3196x2]
599 %define W_16069x2_REG [pw_16069x2]
602 ; partial 2x8 transpose
603 ; punpcklwd m0, m1 already done inside idct
609 SBUTTERFLY qdq, 0, 4, 1
615 pxor m6, m6 ; used for the block reset, and VP9_STORE_2X
628 movh m0, [blockq + 0]
629 movh m1, [blockq +16]
630 movh m2, [blockq +32]
631 movh m3, [blockq +48]
633 ; partial 4x8 transpose
641 SBUTTERFLY dq, 0, 2, 1
642 SBUTTERFLY dq, 4, 6, 5
643 SBUTTERFLY qdq, 0, 4, 1
644 SBUTTERFLY qdq, 2, 6, 5
666 .idctfull: ; generic full 8x8 idct/idct
668 mova m0, [blockq+ 0] ; IN(0)
670 mova m1, [blockq+ 16] ; IN(1)
671 mova m2, [blockq+ 32] ; IN(2)
672 mova m3, [blockq+ 48] ; IN(3)
674 mova m4, [blockq+ 64] ; IN(4)
676 mova m5, [blockq+ 80] ; IN(5)
677 mova m6, [blockq+ 96] ; IN(6)
678 mova m7, [blockq+112] ; IN(7)
680 mova m11, [pd_8192] ; rounding
681 %define D_8192_REG m11
683 %define D_8192_REG [pd_8192]
687 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 8
689 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [blockq+0], [blockq+64], 1
697 pxor m6, m6 ; used for the block reset, and VP9_STORE_2X
699 ZERO_BLOCK blockq, 16, 8, m6
704 VP9_IDCT_IDCT_8x8_ADD_XMM sse2, 12
705 VP9_IDCT_IDCT_8x8_ADD_XMM ssse3, 13
706 VP9_IDCT_IDCT_8x8_ADD_XMM avx, 13
708 ;---------------------------------------------------------------------------------------------
709 ; void vp9_iadst_iadst_8x8_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
710 ;---------------------------------------------------------------------------------------------
713 ; - in: m0/3/4/7 are in mem [blockq+N*16]
714 ; - out: m6 is in mem [blockq+0]
716 ; - everything is in registers
717 %macro VP9_IADST8_1D 0 ; input/output=m0/1/2/3/4/5/6/7
725 VP9_UNPACK_MULSUB_2D_4X 5, 2, 0, 3, 14449, 7723 ; m5/2=t3[d], m2/4=t2[d]
726 VP9_UNPACK_MULSUB_2D_4X 1, 6, 4, 7, 4756, 15679 ; m1/4=t7[d], m6/7=t6[d]
727 SCRATCH 4, 12, blockq+1*16
728 VP9_RND_SH_SUMSUB_BA 6, 2, 7, 3, 4, D_8192_REG ; m6=t2[w], m2=t6[w]
729 UNSCRATCH 4, 12, blockq+1*16
730 VP9_RND_SH_SUMSUB_BA 1, 5, 4, 0, 3, D_8192_REG ; m1=t3[w], m5=t7[w]
732 UNSCRATCH 0, 8, blockq+16*0
733 UNSCRATCH 3, 9, blockq+16*3
734 UNSCRATCH 4, 10, blockq+16*4
735 UNSCRATCH 7, 11, blockq+16*7
736 SCRATCH 1, 8, blockq+16*1
737 SCRATCH 2, 9, blockq+16*2
738 SCRATCH 5, 10, blockq+16*5
739 SCRATCH 6, 11, blockq+16*6
741 VP9_UNPACK_MULSUB_2D_4X 7, 0, 1, 2, 16305, 1606 ; m7/1=t1[d], m0/2=t0[d]
742 VP9_UNPACK_MULSUB_2D_4X 3, 4, 5, 6, 10394, 12665 ; m3/5=t5[d], m4/6=t4[d]
743 SCRATCH 1, 12, blockq+ 0*16
744 VP9_RND_SH_SUMSUB_BA 4, 0, 6, 2, 1, D_8192_REG ; m4=t0[w], m0=t4[w]
745 UNSCRATCH 1, 12, blockq+ 0*16
746 VP9_RND_SH_SUMSUB_BA 3, 7, 5, 1, 2, D_8192_REG ; m3=t1[w], m7=t5[w]
748 UNSCRATCH 2, 9, blockq+16*2
749 UNSCRATCH 5, 10, blockq+16*5
750 SCRATCH 3, 9, blockq+16*3
751 SCRATCH 4, 10, blockq+16*4
753 ; m4=t0, m3=t1, m6=t2, m1=t3, m0=t4, m7=t5, m2=t6, m5=t7
755 VP9_UNPACK_MULSUB_2D_4X 0, 7, 1, 3, 15137, 6270 ; m0/1=t5[d], m7/3=t4[d]
756 VP9_UNPACK_MULSUB_2D_4X 5, 2, 4, 6, 6270, 15137 ; m5/4=t6[d], m2/6=t7[d]
757 SCRATCH 1, 12, blockq+ 0*16
758 VP9_RND_SH_SUMSUB_BA 5, 7, 4, 3, 1, D_8192_REG
759 UNSCRATCH 1, 12, blockq+ 0*16
760 PSIGNW m5, W_M1_REG ; m5=out1[w], m7=t6[w]
761 VP9_RND_SH_SUMSUB_BA 2, 0, 6, 1, 3, D_8192_REG ; m2=out6[w], m0=t7[w]
763 UNSCRATCH 1, 8, blockq+16*1
764 UNSCRATCH 3, 9, blockq+16*3
765 UNSCRATCH 4, 10, blockq+16*4
766 UNSCRATCH 6, 11, blockq+16*6
767 SCRATCH 2, 8, blockq+16*0
769 SUMSUB_BA w, 6, 4, 2 ; m6=out0[w], m4=t2[w]
771 PSIGNW m1, W_M1_REG ; m1=out7[w], m3=t3[w]
773 ; m6=out0, m5=out1, m4=t2, m3=t3, m7=t6, m0=t7, m2=out6, m1=out7
775 ; unfortunately, the code below overflows in some cases
776 %if 0; cpuflag(ssse3)
779 pmulhrsw m3, W_11585x2_REG
780 pmulhrsw m7, W_11585x2_REG
781 pmulhrsw m4, W_11585x2_REG ; out4
782 pmulhrsw m0, W_11585x2_REG ; out2
784 SCRATCH 5, 9, blockq+16*1
785 VP9_UNPACK_MULSUB_2W_4X 4, 3, 11585, 11585, D_8192_REG, 2, 5
786 VP9_UNPACK_MULSUB_2W_4X 7, 0, 11585, 11585, D_8192_REG, 2, 5
787 UNSCRATCH 5, 9, blockq+16*1
789 PSIGNW m3, W_M1_REG ; out3
790 PSIGNW m7, W_M1_REG ; out5
792 ; m6=out0, m5=out1, m0=out2, m3=out3, m4=out4, m7=out5, m2=out6, m1=out7
803 cglobal vp9_%1_%3_8x8_add, 3, 3, %6, dst, stride, block, eob
806 %define first_is_idct 1
808 %define first_is_idct 0
812 %define second_is_idct 1
814 %define second_is_idct 0
818 mova m0, [blockq+ 0] ; IN(0)
820 mova m1, [blockq+ 16] ; IN(1)
821 mova m2, [blockq+ 32] ; IN(2)
822 %if ARCH_X86_64 || first_is_idct
823 mova m3, [blockq+ 48] ; IN(3)
826 mova m4, [blockq+ 64] ; IN(4)
828 mova m5, [blockq+ 80] ; IN(5)
829 mova m6, [blockq+ 96] ; IN(6)
830 %if ARCH_X86_64 || first_is_idct
831 mova m7, [blockq+112] ; IN(7)
835 mova m15, [pw_11585x2] ; often used
837 mova m13, [pd_8192] ; rounding
839 %define W_11585x2_REG m15
840 %define D_8192_REG m13
843 %define W_11585x2_REG [pw_11585x2]
844 %define D_8192_REG [pd_8192]
845 %define W_M1_REG [pw_m1]
848 ; note different calling conventions for idct8 vs. iadst8 on x86-32
851 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 8
853 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [blockq+0], [blockq+64], 1
855 %if second_is_idct == 0
856 mova [blockq+ 48], m3
857 mova [blockq+112], m7
865 pxor m6, m6 ; used for the block reset, and VP9_STORE_2X
867 ZERO_BLOCK blockq, 16, 8, m6
872 %undef second_is_idct
876 IADST8_FN idct, IDCT8, iadst, IADST8, sse2, 15
877 IADST8_FN iadst, IADST8, idct, IDCT8, sse2, 15
878 IADST8_FN iadst, IADST8, iadst, IADST8, sse2, 15
879 IADST8_FN idct, IDCT8, iadst, IADST8, ssse3, 16
880 IADST8_FN idct, IDCT8, iadst, IADST8, avx, 16
881 IADST8_FN iadst, IADST8, idct, IDCT8, ssse3, 16
882 IADST8_FN iadst, IADST8, idct, IDCT8, avx, 16
883 IADST8_FN iadst, IADST8, iadst, IADST8, ssse3, 16
884 IADST8_FN iadst, IADST8, iadst, IADST8, avx, 16
886 ;---------------------------------------------------------------------------------------------
887 ; void vp9_idct_idct_16x16_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
888 ;---------------------------------------------------------------------------------------------
891 ; at the end of this macro, m7 is stored in [%4+15*%5]
892 ; everything else (t0-6 and t8-15) is stored in m0-6 and m8-15
893 ; the following sumsubs have not been done yet:
894 ; SUMSUB_BA w, 6, 9, 15 ; t6, t9
895 ; SUMSUB_BA w, 7, 8, 15 ; t7, t8
896 ; or (x86-32) t0-t5 are in m0-m5, t10-t15 are in x11/9/7/5/3/1,
897 ; and the following simsubs have not been done yet:
898 ; SUMSUB_BA w, x13, x14, 7 ; t6, t9
899 ; SUMSUB_BA w, x15, x12, 7 ; t7, t8
901 %macro VP9_IDCT16_1D_START 6 ; src, nnzc, stride, scratch, scratch_stride, is_iadst
903 mova m3, [%1+ 1*%3] ; IN(1)
904 mova m0, [%1+ 3*%3] ; IN(3)
906 pmulhrsw m4, m3, [pw_16305x2] ; t14-15
907 pmulhrsw m3, [pw_1606x2] ; t8-9
908 pmulhrsw m7, m0, [pw_m4756x2] ; t10-11
909 pmulhrsw m0, [pw_15679x2] ; t12-13
911 ; m8=t0, m9=t1, m10=t2, m11=t3, m12=t4, m14=t5, m13=t6, m15=t7
912 ; m3=t8, m5=t9, m1=t10, m7=t11, m0=t12, m6=t13, m2=t14, m4=t15
914 VP9_UNPACK_MULSUB_2W_4X 2, 5, 4, 3, 15137, 6270, [pd_8192], 1, 6 ; t9, t14
915 SCRATCH 4, 10, %4+ 1*%5
916 SCRATCH 5, 11, %4+ 7*%5
917 VP9_UNPACK_MULSUB_2W_4X 6, 1, 0, 7, 6270, m15137, [pd_8192], 4, 5 ; t10, t13
918 UNSCRATCH 5, 11, %4+ 7*%5
920 ; m15=t0, m14=t1, m13=t2, m12=t3, m11=t4, m10=t5, m9=t6, m8=t7
921 ; m7=t8, m6=t9, m2=t10, m3=t11, m4=t12, m5=t13, m1=t14, m0=t15
923 mova m5, [%1+ 1*%3] ; IN(1)
924 mova m4, [%1+ 7*%3] ; IN(7)
926 pmulhrsw m2, m5, [pw_16305x2] ; t15
927 pmulhrsw m5, [pw_1606x2] ; t8
928 pmulhrsw m3, m4, [pw_m10394x2] ; t9
929 pmulhrsw m4, [pw_12665x2] ; t14
931 mova m3, [%1+ 9*%3] ; IN(9)
932 mova m2, [%1+15*%3] ; IN(15)
934 ; m10=in0, m5=in1, m14=in2, m6=in3, m9=in4, m7=in5, m15=in6, m4=in7
935 ; m11=in8, m3=in9, m12=in10 m0=in11, m8=in12, m1=in13, m13=in14, m2=in15
937 VP9_UNPACK_MULSUB_2W_4X 5, 2, 16305, 1606, [pd_8192], 0, 1 ; t8, t15
938 VP9_UNPACK_MULSUB_2W_4X 3, 4, 10394, 12665, [pd_8192], 0, 1 ; t9, t14
941 SUMSUB_BA w, 3, 5, 0 ; t8, t9
942 SUMSUB_BA w, 4, 2, 0 ; t15, t14
944 VP9_UNPACK_MULSUB_2W_4X 2, 5, 15137, 6270, [pd_8192], 0, 1 ; t9, t14
946 SCRATCH 4, 10, %4+ 1*%5
947 SCRATCH 5, 11, %4+ 7*%5
949 mova m6, [%1+ 3*%3] ; IN(3)
950 mova m7, [%1+ 5*%3] ; IN(5)
952 pmulhrsw m0, m7, [pw_14449x2] ; t13
953 pmulhrsw m7, [pw_7723x2] ; t10
954 pmulhrsw m1, m6, [pw_m4756x2] ; t11
955 pmulhrsw m6, [pw_15679x2] ; t12
957 mova m0, [%1+11*%3] ; IN(11)
958 mova m1, [%1+13*%3] ; IN(13)
960 VP9_UNPACK_MULSUB_2W_4X 7, 0, 14449, 7723, [pd_8192], 4, 5 ; t10, t13
961 VP9_UNPACK_MULSUB_2W_4X 1, 6, 4756, 15679, [pd_8192], 4, 5 ; t11, t12
964 ; m11=t0, m10=t1, m9=t2, m8=t3, m14=t4, m12=t5, m15=t6, m13=t7
965 ; m5=t8, m3=t9, m7=t10, m1=t11, m6=t12, m0=t13, m4=t14, m2=t15
967 SUMSUB_BA w, 7, 1, 4 ; t11, t10
968 SUMSUB_BA w, 0, 6, 4 ; t12, t13
970 ; m8=t0, m9=t1, m10=t2, m11=t3, m12=t4, m14=t5, m13=t6, m15=t7
971 ; m3=t8, m5=t9, m1=t10, m7=t11, m0=t12, m6=t13, m2=t14, m4=t15
973 VP9_UNPACK_MULSUB_2W_4X 6, 1, 6270, m15137, [pd_8192], 4, 5 ; t10, t13
975 UNSCRATCH 5, 11, %4+ 7*%5
978 ; m8=t0, m9=t1, m10=t2, m11=t3, m12=t4, m13=t5, m14=t6, m15=t7
979 ; m3=t8, m2=t9, m6=t10, m7=t11, m0=t12, m1=t13, m5=t14, m4=t15
981 SUMSUB_BA w, 7, 3, 4 ; t8, t11
983 ; backup first register
986 SUMSUB_BA w, 6, 2, 7 ; t9, t10
987 UNSCRATCH 4, 10, %4+ 1*%5
988 SUMSUB_BA w, 0, 4, 7 ; t15, t12
989 SUMSUB_BA w, 1, 5, 7 ; t14. t13
991 ; m15=t0, m14=t1, m13=t2, m12=t3, m11=t4, m10=t5, m9=t6, m8=t7
992 ; m7=t8, m6=t9, m2=t10, m3=t11, m4=t12, m5=t13, m1=t14, m0=t15
994 %if cpuflag(ssse3) && %6 == 0
997 pmulhrsw m5, [pw_11585x2] ; t10
998 pmulhrsw m4, [pw_11585x2] ; t11
999 pmulhrsw m3, [pw_11585x2] ; t12
1000 pmulhrsw m2, [pw_11585x2] ; t13
1002 SCRATCH 6, 10, %4+ 1*%5
1003 VP9_UNPACK_MULSUB_2W_4X 5, 2, 11585, 11585, [pd_8192], 6, 7 ; t10, t13
1004 VP9_UNPACK_MULSUB_2W_4X 4, 3, 11585, 11585, [pd_8192], 6, 7 ; t11, t12
1005 UNSCRATCH 6, 10, %4+ 1*%5
1008 ; m15=t0, m14=t1, m13=t2, m12=t3, m11=t4, m10=t5, m9=t6, m8=t7
1009 ; m7=t8, m6=t9, m5=t10, m4=t11, m3=t12, m2=t13, m1=t14, m0=t15
1011 SCRATCH 0, 8, %4+ 1*%5
1012 SCRATCH 1, 9, %4+ 3*%5
1013 SCRATCH 2, 10, %4+ 5*%5
1014 SCRATCH 3, 11, %4+ 7*%5
1015 SCRATCH 4, 12, %4+ 9*%5
1016 SCRATCH 5, 13, %4+11*%5
1017 SCRATCH 6, 14, %4+13*%5
1021 mova m3, [%1+ 0*%3] ; IN(0)
1022 mova m4, [%1+ 2*%3] ; IN(2)
1024 pmulhrsw m3, [pw_11585x2] ; t0-t3
1025 pmulhrsw m7, m4, [pw_16069x2] ; t6-7
1026 pmulhrsw m4, [pw_3196x2] ; t4-5
1028 %if 0 ; overflows :(
1031 pmulhrsw m5, [pw_11585x2] ; t5
1032 pmulhrsw m6, [pw_11585x2] ; t6
1034 VP9_UNPACK_MULSUB_2W_4X 5, 6, 7, 4, 11585, 11585, [pd_8192], 0, 1 ; t5, t6
1047 SCRATCH 7, 15, %4+12*%5
1049 mova m6, [%1+ 2*%3] ; IN(2)
1050 mova m1, [%1+ 4*%3] ; IN(4)
1051 mova m7, [%1+ 6*%3] ; IN(6)
1053 pmulhrsw m0, m1, [pw_15137x2] ; t3
1054 pmulhrsw m1, [pw_6270x2] ; t2
1055 pmulhrsw m5, m6, [pw_16069x2] ; t7
1056 pmulhrsw m6, [pw_3196x2] ; t4
1057 pmulhrsw m4, m7, [pw_m9102x2] ; t5
1058 pmulhrsw m7, [pw_13623x2] ; t6
1060 mova m4, [%1+10*%3] ; IN(10)
1061 mova m0, [%1+12*%3] ; IN(12)
1062 mova m5, [%1+14*%3] ; IN(14)
1064 VP9_UNPACK_MULSUB_2W_4X 1, 0, 15137, 6270, [pd_8192], 2, 3 ; t2, t3
1065 VP9_UNPACK_MULSUB_2W_4X 6, 5, 16069, 3196, [pd_8192], 2, 3 ; t4, t7
1066 VP9_UNPACK_MULSUB_2W_4X 4, 7, 9102, 13623, [pd_8192], 2, 3 ; t5, t6
1069 SUMSUB_BA w, 4, 6, 2 ; t4, t5
1070 SUMSUB_BA w, 7, 5, 2 ; t7, t6
1072 %if cpuflag(ssse3) && %6 == 0
1073 SUMSUB_BA w, 6, 5, 2
1074 pmulhrsw m5, [pw_11585x2] ; t5
1075 pmulhrsw m6, [pw_11585x2] ; t6
1077 VP9_UNPACK_MULSUB_2W_4X 5, 6, 11585, 11585, [pd_8192], 2, 3 ; t5, t6
1080 SCRATCH 5, 15, %4+10*%5
1081 mova m2, [%1+ 0*%3] ; IN(0)
1083 pmulhrsw m2, [pw_11585x2] ; t0 and t1
1087 SUMSUB_BA w, 7, 0, 5 ; t0, t7
1089 mova m3, [%1+ 8*%3] ; IN(8)
1091 ; from 3 stages back
1092 %if cpuflag(ssse3) && %6 == 0
1093 SUMSUB_BA w, 3, 2, 5
1094 pmulhrsw m3, [pw_11585x2] ; t0
1095 pmulhrsw m2, [pw_11585x2] ; t1
1098 VP9_UNPACK_MULSUB_2W_4X 2, 3, 11585, 11585, [pd_8192], 5, 0 ; t0, t1
1102 ; from 2 stages back
1103 SUMSUB_BA w, 0, 3, 5 ; t0, t3
1105 SUMSUB_BA w, 7, 0, 5 ; t0, t7
1107 UNSCRATCH 5, 15, %4+10*%5
1111 SCRATCH 7, 15, %4+12*%5
1112 SUMSUB_BA w, 1, 2, 7 ; t1, t2
1115 SUMSUB_BA w, 6, 1, 7 ; t1, t6
1116 SUMSUB_BA w, 5, 2, 7 ; t2, t5
1118 SUMSUB_BA w, 4, 3, 7 ; t3, t4
1129 SUMSUB_BA w, 0, 15, 7 ; t0, t15
1130 SUMSUB_BA w, 1, 14, 7 ; t1, t14
1131 SUMSUB_BA w, 2, 13, 7 ; t2, t13
1132 SUMSUB_BA w, 3, 12, 7 ; t3, t12
1133 SUMSUB_BA w, 4, 11, 7 ; t4, t11
1134 SUMSUB_BA w, 5, 10, 7 ; t5, t10
1141 %macro %%SUMSUB_BA_STORE 5 ; reg, from_mem, to_mem, scratch, scratch_stride
1143 SUMSUB_BA w, 6, %1, 7
1148 %%SUMSUB_BA_STORE 0, 1, 1, %4, %5 ; t0, t15
1149 %%SUMSUB_BA_STORE 1, 3, 3, %4, %5 ; t1, t14
1150 %%SUMSUB_BA_STORE 2, 5, 5, %4, %5 ; t2, t13
1151 %%SUMSUB_BA_STORE 3, 7, 7, %4, %5 ; t3, t12
1152 %%SUMSUB_BA_STORE 4, 9, 9, %4, %5 ; t4, t11
1153 %%SUMSUB_BA_STORE 5, 11, 11, %4, %5 ; t5, t10
1157 %macro VP9_IDCT16_1D 2-4 16, 1 ; src, pass, nnzc, is_iadst
1159 VP9_IDCT16_1D_START %1, %3, 32, tmpq, 16, %4
1162 ; backup a different register
1163 mova m7, [tmpq+15*16]
1164 mova [tmpq+ 1*16], m15
1166 SUMSUB_BA w, 6, 9, 15 ; t6, t9
1167 SUMSUB_BA w, 7, 8, 15 ; t7, t8
1169 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 15
1179 mova m15, [tmpq+ 1*16]
1180 TRANSPOSE8x8W 8, 9, 10, 11, 12, 13, 14, 15, 0
1183 mova [tmpq+ 80], m10
1184 mova [tmpq+112], m11
1185 mova [tmpq+144], m12
1186 mova [tmpq+176], m13
1187 mova [tmpq+208], m14
1188 mova [tmpq+240], m15
1190 mova m6, [tmpq+13*16]
1191 mova m7, [tmpq+14*16]
1192 SUMSUB_BA w, 6, 7 ; t6, t9
1193 mova [tmpq+14*16], m6
1194 mova [tmpq+13*16], m7
1195 mova m7, [tmpq+15*16]
1196 mova m6, [tmpq+12*16]
1197 SUMSUB_BA w, 7, 6 ; t7, t8
1198 mova [tmpq+15*16], m6
1200 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [tmpq+14*16], [tmpq+ 8*16], 1
1201 mova [tmpq+ 0*16], m0
1202 mova [tmpq+ 2*16], m1
1203 mova [tmpq+ 4*16], m2
1204 mova [tmpq+ 6*16], m3
1205 mova [tmpq+10*16], m5
1206 mova [tmpq+12*16], m6
1207 mova [tmpq+14*16], m7
1209 mova m0, [tmpq+15*16]
1210 mova m1, [tmpq+13*16]
1211 mova m2, [tmpq+11*16]
1212 mova m3, [tmpq+ 9*16]
1213 mova m4, [tmpq+ 7*16]
1214 mova m5, [tmpq+ 5*16]
1215 mova m7, [tmpq+ 1*16]
1216 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [tmpq+ 3*16], [tmpq+ 9*16], 1
1217 mova [tmpq+ 1*16], m0
1218 mova [tmpq+ 3*16], m1
1219 mova [tmpq+ 5*16], m2
1220 mova [tmpq+ 7*16], m3
1221 mova [tmpq+11*16], m5
1222 mova [tmpq+13*16], m6
1223 mova [tmpq+15*16], m7
1226 VP9_IDCT16_1D_START %1, %3, 32, %1, 32, %4
1229 %define ROUND_REG [pw_512]
1231 %define ROUND_REG [pw_32]
1236 ; backup more registers
1240 VP9_IDCT8_WRITEx2 0, 1, 8, 9, 7, ROUND_REG, 6
1241 lea dstq, [dstq+strideq*2]
1242 VP9_IDCT8_WRITEx2 2, 3, 8, 9, 7, ROUND_REG, 6
1243 lea dstq, [dstq+strideq*2]
1244 VP9_IDCT8_WRITEx2 4, 5, 8, 9, 7, ROUND_REG, 6
1245 lea dstq, [dstq+strideq*2]
1247 ; restore from cache
1248 SWAP 0, 7 ; move zero from m7 to m0
1253 SUMSUB_BA w, 6, 9, 3 ; t6, t9
1254 SUMSUB_BA w, 7, 8, 3 ; t7, t8
1256 VP9_IDCT8_WRITEx2 6, 7, 3, 4, 0, ROUND_REG, 6
1257 lea dstq, [dstq+strideq*2]
1258 VP9_IDCT8_WRITEx2 8, 9, 3, 4, 0, ROUND_REG, 6
1259 lea dstq, [dstq+strideq*2]
1260 VP9_IDCT8_WRITEx2 10, 11, 1, 2, 0, ROUND_REG, 6
1261 lea dstq, [dstq+strideq*2]
1262 VP9_IDCT8_WRITEx2 12, 13, 1, 2, 0, ROUND_REG, 6
1263 lea dstq, [dstq+strideq*2]
1264 VP9_IDCT8_WRITEx2 14, 15, 1, 2, 0, ROUND_REG, 6
1266 mova [tmpq+ 0*32], m5
1268 VP9_IDCT8_WRITEx2 0, 1, 5, 6, 7, ROUND_REG, 6
1269 lea dstq, [dstq+strideq*2]
1270 VP9_IDCT8_WRITEx2 2, 3, 5, 6, 7, ROUND_REG, 6
1271 lea dstq, [dstq+strideq*2]
1273 SWAP 0, 7 ; move zero from m7 to m0
1274 mova m5, [tmpq+ 0*32]
1276 VP9_IDCT8_WRITEx2 4, 5, 1, 2, 0, ROUND_REG, 6
1277 lea dstq, [dstq+strideq*2]
1279 mova m4, [tmpq+13*32]
1280 mova m7, [tmpq+14*32]
1281 mova m5, [tmpq+15*32]
1282 mova m6, [tmpq+12*32]
1283 SUMSUB_BADC w, 4, 7, 5, 6, 1
1285 VP9_IDCT8_WRITEx2 4, 5, 1, 2, 0, ROUND_REG, 6
1286 lea dstq, [dstq+strideq*2]
1287 VP9_IDCT8_WRITEx2 6, 7, 1, 2, 0, ROUND_REG, 6
1288 lea dstq, [dstq+strideq*2]
1290 mova m4, [tmpq+11*32]
1291 mova m5, [tmpq+ 9*32]
1292 mova m6, [tmpq+ 7*32]
1293 mova m7, [tmpq+ 5*32]
1295 VP9_IDCT8_WRITEx2 4, 5, 1, 2, 0, ROUND_REG, 6
1296 lea dstq, [dstq+strideq*2]
1297 VP9_IDCT8_WRITEx2 6, 7, 1, 2, 0, ROUND_REG, 6
1298 lea dstq, [dstq+strideq*2]
1300 mova m4, [tmpq+ 3*32]
1301 mova m5, [tmpq+ 1*32]
1303 VP9_IDCT8_WRITEx2 4, 5, 1, 2, 0, ROUND_REG, 6
1304 lea dstq, [dstq+strideq*2]
1311 %macro VP9_STORE_2XFULL 6-7 strideq; dc, tmp1, tmp2, tmp3, tmp4, zero, stride
1314 punpcklbw m%2, m%3, m%6
1316 punpcklbw m%4, m%5, m%6
1328 %macro VP9_IDCT_IDCT_16x16_ADD_XMM 1
1330 cglobal vp9_idct_idct_16x16_add, 4, 6, 16, 512, dst, stride, block, eob
1332 ; 2x2=eob=3, 4x4=eob=10
1335 cmp eobd, 1 ; faster path for when only DC is set
1338 cmp eobd, 1 ; faster path for when only DC is set
1345 mova m1, [pw_11585x2]
1349 DEFINE_ARGS dst, stride, block, coef
1350 movsx coefd, word [blockq]
1355 add coefd, (32 << 14) + 8192
1359 SPLATW m0, m0, q0000
1361 pmulhrsw m0, [pw_512]
1366 VP9_STORE_2XFULL 0, 1, 2, 3, 4, 5
1367 lea dstq, [dstq+2*strideq]
1369 VP9_STORE_2XFULL 0, 1, 2, 3, 4, 5
1372 DEFINE_ARGS dst, stride, block, cnt, dst_bak, tmp
1376 VP9_IDCT16_1D blockq, 1, 8, 0
1381 VP9_IDCT16_1D tmpq, 2, 8, 0
1382 lea dstq, [dst_bakq+8]
1387 ; at the end of the loop, m0 should still be zero
1388 ; use that to zero out block coefficients
1389 ZERO_BLOCK blockq, 32, 8, m0
1397 VP9_IDCT16_1D blockq, 1, 16, 0
1408 VP9_IDCT16_1D tmpq, 2, 16, 0
1409 lea dstq, [dst_bakq+8]
1414 ; at the end of the loop, m0 should still be zero
1415 ; use that to zero out block coefficients
1416 ZERO_BLOCK blockq, 32, 16, m0
1420 VP9_IDCT_IDCT_16x16_ADD_XMM sse2
1421 VP9_IDCT_IDCT_16x16_ADD_XMM ssse3
1422 VP9_IDCT_IDCT_16x16_ADD_XMM avx
1424 ;---------------------------------------------------------------------------------------------
1425 ; void vp9_iadst_iadst_16x16_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
1426 ;---------------------------------------------------------------------------------------------
1428 %macro VP9_IADST16_1D 2 ; src, pass
1430 mova m0, [%1+ 0*32] ; in0
1431 mova m1, [%1+15*32] ; in15
1432 mova m2, [%1+ 7*32] ; in7
1433 mova m3, [%1+ 8*32] ; in8
1435 VP9_UNPACK_MULSUB_2D_4X 1, 0, 4, 5, 16364, 804 ; m1/4=t1[d], m0/5=t0[d]
1436 VP9_UNPACK_MULSUB_2D_4X 2, 3, 7, 6, 11003, 12140 ; m2/7=t9[d], m3/6=t8[d]
1437 SCRATCH 4, 8, tmpq+ 0*%%str
1438 VP9_RND_SH_SUMSUB_BA 3, 0, 6, 5, 4, [pd_8192] ; m3=t0[w], m0=t8[w]
1439 UNSCRATCH 4, 8, tmpq+ 0*%%str
1440 VP9_RND_SH_SUMSUB_BA 2, 1, 7, 4, 5, [pd_8192] ; m2=t1[w], m1=t9[w]
1442 SCRATCH 0, 10, tmpq+ 0*%%str
1443 SCRATCH 1, 11, tmpq+15*%%str
1444 mova [tmpq+ 7*%%str], m2
1445 mova [tmpq+ 8*%%str], m3
1447 mova m1, [%1+ 2*32] ; in2
1448 mova m0, [%1+13*32] ; in13
1449 mova m3, [%1+ 5*32] ; in5
1450 mova m2, [%1+10*32] ; in10
1452 VP9_UNPACK_MULSUB_2D_4X 0, 1, 6, 7, 15893, 3981 ; m0/6=t3[d], m1/7=t2[d]
1453 VP9_UNPACK_MULSUB_2D_4X 3, 2, 4, 5, 8423, 14053 ; m3/4=t11[d], m2/5=t10[d]
1454 SCRATCH 4, 12, tmpq+ 2*%%str
1455 VP9_RND_SH_SUMSUB_BA 2, 1, 5, 7, 4, [pd_8192] ; m2=t2[w], m1=t10[w]
1456 UNSCRATCH 4, 12, tmpq+ 2*%%str
1457 VP9_RND_SH_SUMSUB_BA 3, 0, 4, 6, 5, [pd_8192] ; m3=t3[w], m0=t11[w]
1459 SCRATCH 0, 12, tmpq+ 2*%%str
1460 SCRATCH 1, 13, tmpq+13*%%str
1461 mova [tmpq+ 5*%%str], m2
1462 mova [tmpq+10*%%str], m3
1464 mova m2, [%1+ 4*32] ; in4
1465 mova m3, [%1+11*32] ; in11
1466 mova m0, [%1+ 3*32] ; in3
1467 mova m1, [%1+12*32] ; in12
1469 VP9_UNPACK_MULSUB_2D_4X 3, 2, 7, 6, 14811, 7005 ; m3/7=t5[d], m2/6=t4[d]
1470 VP9_UNPACK_MULSUB_2D_4X 0, 1, 4, 5, 5520, 15426 ; m0/4=t13[d], m1/5=t12[d]
1471 SCRATCH 4, 9, tmpq+ 4*%%str
1472 VP9_RND_SH_SUMSUB_BA 1, 2, 5, 6, 4, [pd_8192] ; m1=t4[w], m2=t12[w]
1473 UNSCRATCH 4, 9, tmpq+ 4*%%str
1474 VP9_RND_SH_SUMSUB_BA 0, 3, 4, 7, 6, [pd_8192] ; m0=t5[w], m3=t13[w]
1476 SCRATCH 0, 8, tmpq+ 4*%%str
1477 mova [tmpq+11*%%str], m1 ; t4:m1->r11
1478 UNSCRATCH 0, 10, tmpq+ 0*%%str
1479 UNSCRATCH 1, 11, tmpq+15*%%str
1481 ; round 2 interleaved part 1
1482 VP9_UNPACK_MULSUB_2D_4X 0, 1, 6, 7, 16069, 3196 ; m1/7=t8[d], m0/6=t9[d]
1483 VP9_UNPACK_MULSUB_2D_4X 3, 2, 5, 4, 3196, 16069 ; m3/5=t12[d], m2/4=t13[d]
1484 SCRATCH 4, 9, tmpq+ 3*%%str
1485 VP9_RND_SH_SUMSUB_BA 3, 1, 5, 7, 4, [pd_8192] ; m3=t8[w], m1=t12[w]
1486 UNSCRATCH 4, 9, tmpq+ 3*%%str
1487 VP9_RND_SH_SUMSUB_BA 2, 0, 4, 6, 5, [pd_8192] ; m2=t9[w], m0=t13[w]
1489 SCRATCH 0, 10, tmpq+ 0*%%str
1490 SCRATCH 1, 11, tmpq+15*%%str
1491 SCRATCH 2, 14, tmpq+ 3*%%str
1492 SCRATCH 3, 15, tmpq+12*%%str
1494 mova m2, [%1+ 6*32] ; in6
1495 mova m3, [%1+ 9*32] ; in9
1496 mova m0, [%1+ 1*32] ; in1
1497 mova m1, [%1+14*32] ; in14
1499 VP9_UNPACK_MULSUB_2D_4X 3, 2, 7, 6, 13160, 9760 ; m3/7=t7[d], m2/6=t6[d]
1500 VP9_UNPACK_MULSUB_2D_4X 0, 1, 4, 5, 2404, 16207 ; m0/4=t15[d], m1/5=t14[d]
1501 SCRATCH 4, 9, tmpq+ 6*%%str
1502 VP9_RND_SH_SUMSUB_BA 1, 2, 5, 6, 4, [pd_8192] ; m1=t6[w], m2=t14[w]
1503 UNSCRATCH 4, 9, tmpq+ 6*%%str
1504 VP9_RND_SH_SUMSUB_BA 0, 3, 4, 7, 6, [pd_8192] ; m0=t7[w], m3=t15[w]
1506 ; r8=t0, r7=t1, r5=t2, r10=t3, r11=t4, m8|r4=t5, m1=t6, m0=t7
1507 ; m10|r0=t8, m11|r15=t9, m13|r13=t10, m12|r2=t11, m14|r3=t12, m15|r12=t13, m2=t14, m3=t15
1509 UNSCRATCH 4, 12, tmpq+ 2*%%str
1510 UNSCRATCH 5, 13, tmpq+13*%%str
1511 SCRATCH 0, 12, tmpq+ 1*%%str
1512 SCRATCH 1, 13, tmpq+14*%%str
1514 ; remainder of round 2 (rest of t8-15)
1515 VP9_UNPACK_MULSUB_2D_4X 5, 4, 6, 7, 9102, 13623 ; m5/6=t11[d], m4/7=t10[d]
1516 VP9_UNPACK_MULSUB_2D_4X 3, 2, 1, 0, 13623, 9102 ; m3/1=t14[d], m2/0=t15[d]
1517 SCRATCH 0, 9, tmpq+ 6*%%str
1518 VP9_RND_SH_SUMSUB_BA 3, 4, 1, 7, 0, [pd_8192] ; m3=t10[w], m4=t14[w]
1519 UNSCRATCH 0, 9, tmpq+ 6*%%str
1520 VP9_RND_SH_SUMSUB_BA 2, 5, 0, 6, 1, [pd_8192] ; m2=t11[w], m5=t15[w]
1522 ; m15|r12=t8, m14|r3=t9, m3=t10, m2=t11, m11|r15=t12, m10|r0=t13, m4=t14, m5=t15
1524 UNSCRATCH 6, 14, tmpq+ 3*%%str
1525 UNSCRATCH 7, 15, tmpq+12*%%str
1527 SUMSUB_BA w, 3, 7, 1
1528 PSIGNW m3, [pw_m1] ; m3=out1[w], m7=t10[w]
1529 SUMSUB_BA w, 2, 6, 1 ; m2=out14[w], m6=t11[w]
1531 ; unfortunately, the code below overflows in some cases, e.g.
1532 ; http://downloads.webmproject.org/test_data/libvpx/vp90-2-14-resize-fp-tiles-16-8.webm
1533 %if 0; cpuflag(ssse3)
1534 SUMSUB_BA w, 7, 6, 1
1535 pmulhrsw m7, [pw_11585x2] ; m7=out6[w]
1536 pmulhrsw m6, [pw_11585x2] ; m6=out9[w]
1538 VP9_UNPACK_MULSUB_2W_4X 6, 7, 11585, 11585, [pd_8192], 1, 0
1541 mova [tmpq+ 3*%%str], m6
1542 mova [tmpq+ 6*%%str], m7
1543 UNSCRATCH 6, 10, tmpq+ 0*%%str
1544 UNSCRATCH 7, 11, tmpq+15*%%str
1545 mova [tmpq+13*%%str], m2
1546 SCRATCH 3, 11, tmpq+ 9*%%str
1548 VP9_UNPACK_MULSUB_2D_4X 7, 6, 2, 3, 15137, 6270 ; m6/3=t13[d], m7/2=t12[d]
1549 VP9_UNPACK_MULSUB_2D_4X 5, 4, 1, 0, 6270, 15137 ; m5/1=t14[d], m4/0=t15[d]
1550 SCRATCH 0, 9, tmpq+ 2*%%str
1551 VP9_RND_SH_SUMSUB_BA 5, 6, 1, 3, 0, [pd_8192] ; m5=out2[w], m6=t14[w]
1552 UNSCRATCH 0, 9, tmpq+ 2*%%str
1553 VP9_RND_SH_SUMSUB_BA 4, 7, 0, 2, 1, [pd_8192]
1554 PSIGNW m4, [pw_m1] ; m4=out13[w], m7=t15[w]
1556 ; unfortunately, the code below overflows in some cases
1557 %if 0; cpuflag(ssse3)
1558 SUMSUB_BA w, 7, 6, 1
1559 pmulhrsw m7, [pw_m11585x2] ; m7=out5[w]
1560 pmulhrsw m6, [pw_11585x2] ; m6=out10[w]
1563 VP9_UNPACK_MULSUB_2W_4X 7, 6, 11585, 11585, [pd_8192], 1, 0
1566 ; m11|r13=out1, m5=out2, m7=out5, r15=out6, r3=out9, m6=out10, m4=out13, r2=out14
1568 mova m2, [tmpq+ 8*%%str]
1569 mova m3, [tmpq+ 7*%%str]
1570 mova m1, [tmpq+11*%%str]
1571 mova [tmpq+ 7*%%str], m6
1572 mova [tmpq+11*%%str], m4
1573 mova m4, [tmpq+ 5*%%str]
1574 SCRATCH 5, 14, tmpq+ 5*%%str
1575 SCRATCH 7, 15, tmpq+ 8*%%str
1576 UNSCRATCH 6, 8, tmpq+ 4*%%str
1577 UNSCRATCH 5, 12, tmpq+ 1*%%str
1578 UNSCRATCH 7, 13, tmpq+14*%%str
1580 ; m2=t0, m3=t1, m9=t2, m0=t3, m1=t4, m8=t5, m13=t6, m12=t7
1581 ; m11|r13=out1, m5=out2, m7=out5, r15=out6, r3=out9, r10=out10, r11=out13, r2=out14
1583 SUMSUB_BA w, 1, 2, 0 ; m1=t0[w], m2=t4[w]
1584 mova m0, [tmpq+10*%%str]
1585 SCRATCH 1, 12, tmpq+ 1*%%str
1586 SUMSUB_BA w, 6, 3, 1 ; m8=t1[w], m3=t5[w]
1587 SCRATCH 6, 13, tmpq+ 4*%%str
1588 SUMSUB_BA w, 7, 4, 1 ; m13=t2[w], m9=t6[w]
1589 SCRATCH 7, 8, tmpq+10*%%str
1590 SUMSUB_BA w, 5, 0, 1 ; m12=t3[w], m0=t7[w]
1591 SCRATCH 5, 9, tmpq+14*%%str
1593 VP9_UNPACK_MULSUB_2D_4X 2, 3, 7, 5, 15137, 6270 ; m2/6=t5[d], m3/10=t4[d]
1594 VP9_UNPACK_MULSUB_2D_4X 0, 4, 1, 6, 6270, 15137 ; m0/14=t6[d], m9/15=t7[d]
1595 SCRATCH 6, 10, tmpq+ 0*%%str
1596 VP9_RND_SH_SUMSUB_BA 0, 3, 1, 5, 6, [pd_8192]
1597 UNSCRATCH 6, 10, tmpq+ 0*%%str
1598 PSIGNW m0, [pw_m1] ; m0=out3[w], m3=t6[w]
1599 VP9_RND_SH_SUMSUB_BA 4, 2, 6, 7, 5, [pd_8192] ; m9=out12[w], m2=t7[w]
1601 UNSCRATCH 1, 8, tmpq+10*%%str
1602 UNSCRATCH 5, 9, tmpq+14*%%str
1603 UNSCRATCH 6, 12, tmpq+ 1*%%str
1604 UNSCRATCH 7, 13, tmpq+ 4*%%str
1605 SCRATCH 4, 9, tmpq+14*%%str
1607 SUMSUB_BA w, 1, 6, 4 ; m13=out0[w], m1=t2[w]
1608 SUMSUB_BA w, 5, 7, 4
1609 PSIGNW m5, [pw_m1] ; m12=out15[w], m8=t3[w]
1611 ; unfortunately, the code below overflows in some cases, e.g.
1612 ; http://downloads.webmproject.org/test_data/libvpx/vp90-2-14-resize-fp-tiles-16-8-4-2-1.webm
1613 %if 0 ; cpuflag(ssse3)
1614 SUMSUB_BA w, 7, 6, 4
1615 pmulhrsw m7, [pw_m11585x2] ; m8=out7[w]
1616 pmulhrsw m6, [pw_11585x2] ; m1=out8[w]
1618 SUMSUB_BA w, 3, 2, 4
1619 pmulhrsw m3, [pw_11585x2] ; m3=out4[w]
1620 pmulhrsw m2, [pw_11585x2] ; m2=out11[w]
1622 SCRATCH 5, 8, tmpq+10*%%str
1623 VP9_UNPACK_MULSUB_2W_4X 6, 7, 11585, m11585, [pd_8192], 5, 4
1624 VP9_UNPACK_MULSUB_2W_4X 2, 3, 11585, 11585, [pd_8192], 5, 4
1625 UNSCRATCH 5, 8, tmpq+10*%%str
1628 ; m13=out0, m0=out3, m3=out4, m8=out7, m1=out8, m2=out11, m9=out12, m12=out15
1629 ; m11|r13=out1, m5=out2, m7=out5, r15=out6, r3=out9, r10=out10, r11=out13, r2=out14
1633 mova m13, [tmpq+ 6*%%str]
1634 TRANSPOSE8x8W 1, 11, 14, 0, 3, 15, 13, 6, 10
1635 mova [tmpq+ 0*16], m1
1636 mova [tmpq+ 2*16], m11
1637 mova [tmpq+ 4*16], m14
1638 mova [tmpq+ 6*16], m0
1639 mova m1, [tmpq+ 3*%%str]
1640 mova m11, [tmpq+ 7*%%str]
1641 mova m14, [tmpq+11*%%str]
1642 mova m0, [tmpq+13*%%str]
1643 mova [tmpq+ 8*16], m3
1644 mova [tmpq+10*16], m15
1645 mova [tmpq+12*16], m13
1646 mova [tmpq+14*16], m6
1648 TRANSPOSE8x8W 7, 1, 11, 2, 9, 14, 0, 5, 10
1649 mova [tmpq+ 1*16], m7
1650 mova [tmpq+ 3*16], m1
1651 mova [tmpq+ 5*16], m11
1652 mova [tmpq+ 7*16], m2
1653 mova [tmpq+ 9*16], m9
1654 mova [tmpq+11*16], m14
1655 mova [tmpq+13*16], m0
1656 mova [tmpq+15*16], m5
1658 mova [tmpq+12*%%str], m2
1659 mova [tmpq+ 1*%%str], m5
1660 mova [tmpq+15*%%str], m7
1661 mova m2, [tmpq+ 9*%%str]
1662 mova m5, [tmpq+ 5*%%str]
1663 mova m7, [tmpq+ 8*%%str]
1664 TRANSPOSE8x8W 1, 2, 5, 0, 3, 7, 4, 6, [tmpq+ 6*%%str], [tmpq+ 8*%%str], 1
1665 mova [tmpq+ 0*16], m1
1666 mova [tmpq+ 2*16], m2
1667 mova [tmpq+ 4*16], m5
1668 mova [tmpq+ 6*16], m0
1669 mova [tmpq+10*16], m7
1670 mova m3, [tmpq+12*%%str]
1671 mova [tmpq+12*16], m4
1672 mova m4, [tmpq+14*%%str]
1673 mova [tmpq+14*16], m6
1675 mova m0, [tmpq+15*%%str]
1676 mova m1, [tmpq+ 3*%%str]
1677 mova m2, [tmpq+ 7*%%str]
1678 mova m5, [tmpq+11*%%str]
1679 mova m7, [tmpq+ 1*%%str]
1680 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [tmpq+13*%%str], [tmpq+ 9*%%str], 1
1681 mova [tmpq+ 1*16], m0
1682 mova [tmpq+ 3*16], m1
1683 mova [tmpq+ 5*16], m2
1684 mova [tmpq+ 7*16], m3
1685 mova [tmpq+11*16], m5
1686 mova [tmpq+13*16], m6
1687 mova [tmpq+15*16], m7
1693 %define ROUND_REG [pw_512]
1695 %define ROUND_REG [pw_32]
1699 mova m12, [tmpq+ 6*%%str]
1700 VP9_IDCT8_WRITEx2 1, 11, 10, 8, 4, ROUND_REG, 6
1701 lea dstq, [dstq+strideq*2]
1702 VP9_IDCT8_WRITEx2 14, 0, 10, 8, 4, ROUND_REG, 6
1703 lea dstq, [dstq+strideq*2]
1704 VP9_IDCT8_WRITEx2 3, 15, 10, 8, 4, ROUND_REG, 6
1705 lea dstq, [dstq+strideq*2]
1706 VP9_IDCT8_WRITEx2 12, 6, 10, 8, 4, ROUND_REG, 6
1707 lea dstq, [dstq+strideq*2]
1709 mova m1, [tmpq+ 3*%%str]
1710 mova m11, [tmpq+ 7*%%str]
1711 mova m14, [tmpq+11*%%str]
1712 mova m0, [tmpq+13*%%str]
1714 VP9_IDCT8_WRITEx2 7, 1, 10, 8, 4, ROUND_REG, 6
1715 lea dstq, [dstq+strideq*2]
1716 VP9_IDCT8_WRITEx2 11, 2, 10, 8, 4, ROUND_REG, 6
1717 lea dstq, [dstq+strideq*2]
1718 VP9_IDCT8_WRITEx2 9, 14, 10, 8, 4, ROUND_REG, 6
1719 lea dstq, [dstq+strideq*2]
1720 VP9_IDCT8_WRITEx2 0, 5, 10, 8, 4, ROUND_REG, 6
1722 mova [tmpq+ 0*%%str], m2
1723 mova [tmpq+ 1*%%str], m5
1724 mova [tmpq+ 2*%%str], m7
1725 mova m2, [tmpq+ 9*%%str]
1726 VP9_IDCT8_WRITEx2 1, 2, 5, 7, 4, ROUND_REG, 6
1727 lea dstq, [dstq+strideq*2]
1728 mova m5, [tmpq+ 5*%%str]
1729 VP9_IDCT8_WRITEx2 5, 0, 1, 2, 4, ROUND_REG, 6
1730 lea dstq, [dstq+strideq*2]
1731 mova m5, [tmpq+ 8*%%str]
1732 VP9_IDCT8_WRITEx2 3, 5, 1, 2, 4, ROUND_REG, 6
1733 lea dstq, [dstq+strideq*2]
1734 mova m5, [tmpq+ 6*%%str]
1735 VP9_IDCT8_WRITEx2 5, 6, 1, 2, 4, ROUND_REG, 6
1736 lea dstq, [dstq+strideq*2]
1738 mova m0, [tmpq+ 2*%%str]
1739 mova m3, [tmpq+ 3*%%str]
1740 VP9_IDCT8_WRITEx2 0, 3, 1, 2, 4, ROUND_REG, 6
1741 lea dstq, [dstq+strideq*2]
1742 mova m0, [tmpq+ 7*%%str]
1743 mova m3, [tmpq+ 0*%%str]
1744 VP9_IDCT8_WRITEx2 0, 3, 1, 2, 4, ROUND_REG, 6
1745 lea dstq, [dstq+strideq*2]
1746 mova m0, [tmpq+14*%%str]
1747 mova m3, [tmpq+11*%%str]
1748 VP9_IDCT8_WRITEx2 0, 3, 1, 2, 4, ROUND_REG, 6
1749 lea dstq, [dstq+strideq*2]
1750 mova m0, [tmpq+13*%%str]
1751 mova m3, [tmpq+ 1*%%str]
1752 VP9_IDCT8_WRITEx2 0, 3, 1, 2, 4, ROUND_REG, 6
1762 cglobal vp9_%1_%3_16x16_add, 3, 6, 16, 512, dst, stride, block, cnt, dst_bak, tmp
1778 lea dstq, [dst_bakq+8]
1783 ; at the end of the loop, m0 should still be zero
1784 ; use that to zero out block coefficients
1785 ZERO_BLOCK blockq, 32, 16, m0
1789 IADST16_FN idct, IDCT16, iadst, IADST16, sse2
1790 IADST16_FN iadst, IADST16, idct, IDCT16, sse2
1791 IADST16_FN iadst, IADST16, iadst, IADST16, sse2
1792 IADST16_FN idct, IDCT16, iadst, IADST16, ssse3
1793 IADST16_FN iadst, IADST16, idct, IDCT16, ssse3
1794 IADST16_FN iadst, IADST16, iadst, IADST16, ssse3
1795 IADST16_FN idct, IDCT16, iadst, IADST16, avx
1796 IADST16_FN iadst, IADST16, idct, IDCT16, avx
1797 IADST16_FN iadst, IADST16, iadst, IADST16, avx
1799 ;---------------------------------------------------------------------------------------------
1800 ; void vp9_idct_idct_32x32_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
1801 ;---------------------------------------------------------------------------------------------
1803 %macro VP9_IDCT32_1D 2-3 32 ; src, pass, nnzc
1804 %assign %%str 16*%2*%2
1805 ; first do t0-15, this can be done identical to idct16x16
1806 VP9_IDCT16_1D_START %1, %3/2, 64*2, tmpq, 2*%%str, 1
1808 ; store everything on stack to make space available for t16-31
1809 ; we store interleaved with the output of the second half (t16-31)
1810 ; so we don't need to allocate extra stack space
1811 mova [tmpq+ 0*%%str], m0 ; t0
1812 mova [tmpq+ 4*%%str], m1 ; t1
1813 mova [tmpq+ 8*%%str], m2 ; t2
1814 mova [tmpq+12*%%str], m3 ; t3
1815 mova [tmpq+16*%%str], m4 ; t4
1816 mova [tmpq+20*%%str], m5 ; t5
1818 mova [tmpq+22*%%str], m10 ; t10
1819 mova [tmpq+18*%%str], m11 ; t11
1820 mova [tmpq+14*%%str], m12 ; t12
1821 mova [tmpq+10*%%str], m13 ; t13
1822 mova [tmpq+ 6*%%str], m14 ; t14
1823 mova [tmpq+ 2*%%str], m15 ; t15
1826 mova m0, [tmpq+ 30*%%str]
1827 UNSCRATCH 1, 6, tmpq+26*%%str
1828 UNSCRATCH 2, 8, tmpq+24*%%str
1829 UNSCRATCH 3, 9, tmpq+28*%%str
1830 SUMSUB_BA w, 1, 3, 4 ; t6, t9
1831 SUMSUB_BA w, 0, 2, 4 ; t7, t8
1833 mova [tmpq+24*%%str], m1 ; t6
1834 mova [tmpq+28*%%str], m0 ; t7
1835 mova [tmpq+30*%%str], m2 ; t8
1836 mova [tmpq+26*%%str], m3 ; t9
1838 ; then, secondly, do t16-31
1843 pmulhrsw m1, m4, [pw_16364x2] ;t31
1844 pmulhrsw m4, [pw_804x2] ;t16
1846 VP9_UNPACK_MULSUB_2W_4X 5, 0, 1, 4, 16069, 3196, [pd_8192], 6, 2 ; t17, t30
1848 pmulhrsw m3, m7, [pw_m5520x2] ;t19
1849 pmulhrsw m7, [pw_15426x2] ;t28
1851 SCRATCH 4, 13, tmpq+ 1*%%str
1852 SCRATCH 5, 12, tmpq+15*%%str
1854 VP9_UNPACK_MULSUB_2W_4X 2, 6, 7, 3, 3196, m16069, [pd_8192], 4, 5 ; t18, t29
1859 pmulhrsw m5, m0, [pw_16364x2]
1860 pmulhrsw m0, [pw_804x2]
1861 pmulhrsw m4, m1, [pw_m11003x2]
1862 pmulhrsw m1, [pw_12140x2]
1867 VP9_UNPACK_MULSUB_2W_4X 0, 5, 16364, 804, [pd_8192], 2, 3 ; t16, t31
1868 VP9_UNPACK_MULSUB_2W_4X 4, 1, 11003, 12140, [pd_8192], 2, 3 ; t17, t30
1870 SUMSUB_BA w, 4, 0, 2
1871 SUMSUB_BA w, 1, 5, 2
1873 VP9_UNPACK_MULSUB_2W_4X 5, 0, 16069, 3196, [pd_8192], 2, 3 ; t17, t30
1875 SCRATCH 4, 13, tmpq+ 1*%%str
1876 SCRATCH 5, 12, tmpq+15*%%str
1881 pmulhrsw m7, m3, [pw_14811x2]
1882 pmulhrsw m3, [pw_7005x2]
1883 pmulhrsw m6, m2, [pw_m5520x2]
1884 pmulhrsw m2, [pw_15426x2]
1889 VP9_UNPACK_MULSUB_2W_4X 3, 7, 14811, 7005, [pd_8192], 4, 5 ; t18, t29
1890 VP9_UNPACK_MULSUB_2W_4X 6, 2, 5520, 15426, [pd_8192], 4, 5 ; t19, t28
1892 SUMSUB_BA w, 3, 6, 4
1893 SUMSUB_BA w, 7, 2, 4
1895 VP9_UNPACK_MULSUB_2W_4X 2, 6, 3196, m16069, [pd_8192], 4, 5 ; t18, t29
1898 UNSCRATCH 5, 12, tmpq+15*%%str
1899 SUMSUB_BA w, 6, 0, 4
1900 mova [tmpq+25*%%str], m6 ; t19
1901 UNSCRATCH 4, 13, tmpq+ 1*%%str
1902 SUMSUB_BA w, 7, 1, 6
1903 SUMSUB_BA w, 3, 4, 6
1904 mova [tmpq+23*%%str], m3 ; t16
1905 SUMSUB_BA w, 2, 5, 6
1907 VP9_UNPACK_MULSUB_2W_4X 0, 5, 15137, 6270, [pd_8192], 6, 3 ; t18, t29
1908 VP9_UNPACK_MULSUB_2W_4X 1, 4, 15137, 6270, [pd_8192], 6, 3 ; t19, t28
1910 SCRATCH 0, 10, tmpq+ 1*%%str
1911 SCRATCH 1, 11, tmpq+ 7*%%str
1912 SCRATCH 2, 9, tmpq+ 9*%%str
1913 SCRATCH 4, 14, tmpq+15*%%str
1914 SCRATCH 5, 15, tmpq+17*%%str
1915 SCRATCH 7, 13, tmpq+31*%%str
1921 pmulhrsw m5, m0, [pw_15893x2] ;t27
1922 pmulhrsw m0, [pw_3981x2] ;t20
1924 VP9_UNPACK_MULSUB_2W_4X 1, 4, 5, 0, 9102, 13623, [pd_8192], 7, 2 ; t21, t26
1926 pmulhrsw m6, m3, [pw_m2404x2] ;t23
1927 pmulhrsw m3, [pw_16207x2] ;t24
1929 SCRATCH 5, 8, tmpq+ 5*%%str
1930 SCRATCH 4, 12, tmpq+11*%%str
1932 VP9_UNPACK_MULSUB_2W_4X 7, 2, 3, 6, 13623, m9102, [pd_8192], 4, 5 ; t22, t25
1937 pmulhrsw m1, m4, [pw_15893x2]
1938 pmulhrsw m4, [pw_3981x2]
1939 pmulhrsw m0, m5, [pw_m8423x2]
1940 pmulhrsw m5, [pw_14053x2]
1945 VP9_UNPACK_MULSUB_2W_4X 4, 1, 15893, 3981, [pd_8192], 2, 3 ; t20, t27
1946 VP9_UNPACK_MULSUB_2W_4X 0, 5, 8423, 14053, [pd_8192], 2, 3 ; t21, t26
1948 SUMSUB_BA w, 0, 4, 2
1949 SUMSUB_BA w, 5, 1, 2
1951 VP9_UNPACK_MULSUB_2W_4X 1, 4, 9102, 13623, [pd_8192], 2, 3 ; t21, t26
1953 SCRATCH 5, 8, tmpq+ 5*%%str
1954 SCRATCH 4, 12, tmpq+11*%%str
1959 pmulhrsw m3, m6, [pw_13160x2]
1960 pmulhrsw m6, [pw_9760x2]
1961 pmulhrsw m2, m7, [pw_m2404x2]
1962 pmulhrsw m7, [pw_16207x2]
1966 VP9_UNPACK_MULSUB_2W_4X 6, 3, 13160, 9760, [pd_8192], 4, 5 ; t22, t25
1967 VP9_UNPACK_MULSUB_2W_4X 2, 7, 2404, 16207, [pd_8192], 4, 5 ; t23, t24
1969 SUMSUB_BA w, 6, 2, 4
1970 SUMSUB_BA w, 3, 7, 4
1972 VP9_UNPACK_MULSUB_2W_4X 7, 2, 13623, m9102, [pd_8192], 4, 5 ; t22, t25
1975 ; m4=t16, m5=t17, m9=t18, m8=t19, m0=t20, m1=t21, m13=t22, m12=t23,
1976 ; m3=t24, m2=t25, m14=t26, m15=t27, m7=t28, m6=t29, m10=t30, m11=t31
1978 UNSCRATCH 4, 12, tmpq+11*%%str
1979 SUMSUB_BA w, 0, 6, 5
1980 SUMSUB_BA w, 4, 2, 5
1981 UNSCRATCH 5, 8, tmpq+ 5*%%str
1982 SCRATCH 4, 8, tmpq+11*%%str
1983 SUMSUB_BA w, 1, 7, 4
1984 SUMSUB_BA w, 5, 3, 4
1985 SCRATCH 5, 12, tmpq+ 5*%%str
1987 VP9_UNPACK_MULSUB_2W_4X 3, 6, 6270, m15137, [pd_8192], 4, 5 ; t20, t27
1988 VP9_UNPACK_MULSUB_2W_4X 2, 7, 6270, m15137, [pd_8192], 4, 5 ; t21, t26
1990 ; m8[s]=t16, m9=t17, m5=t18, m4[s]=t19, m12=t20, m13=t21, m1=t22, m0=t23,
1991 ; m15=t24, m14=t25, m2=t26, m3=t27, m11=t28, m10=t29, m6=t30, m7=t31
1993 UNSCRATCH 5, 9, tmpq+ 9*%%str
1994 mova m4, [tmpq+23*%%str] ; t16
1996 SUMSUB_BA w, 1, 5, 9
1997 SUMSUB_BA w, 0, 4, 9
1999 SUMSUB_BADC w, 1, 5, 0, 4
2001 mova [tmpq+29*%%str], m1 ; t17
2002 mova [tmpq+21*%%str], m0 ; t16
2003 UNSCRATCH 0, 10, tmpq+ 1*%%str
2004 UNSCRATCH 1, 11, tmpq+ 7*%%str
2006 SUMSUB_BA w, 2, 0, 9
2007 SUMSUB_BA w, 3, 1, 9
2009 SUMSUB_BADC w, 2, 0, 3, 1
2011 mova [tmpq+ 9*%%str], m2 ; t18
2012 mova [tmpq+13*%%str], m3 ; t19
2013 SCRATCH 0, 10, tmpq+23*%%str
2014 SCRATCH 1, 11, tmpq+27*%%str
2016 UNSCRATCH 2, 14, tmpq+15*%%str
2017 UNSCRATCH 3, 15, tmpq+17*%%str
2018 SUMSUB_BA w, 6, 2, 0
2019 SUMSUB_BA w, 7, 3, 0
2020 SCRATCH 6, 14, tmpq+ 3*%%str
2021 SCRATCH 7, 15, tmpq+ 7*%%str
2023 UNSCRATCH 0, 8, tmpq+11*%%str
2024 mova m1, [tmpq+25*%%str] ; t19
2025 UNSCRATCH 6, 12, tmpq+ 5*%%str
2026 UNSCRATCH 7, 13, tmpq+31*%%str
2028 SUMSUB_BA w, 0, 1, 9
2029 SUMSUB_BA w, 6, 7, 9
2031 SUMSUB_BADC w, 0, 1, 6, 7
2034 ; m0=t16, m1=t17, m2=t18, m3=t19, m11=t20, m10=t21, m9=t22, m8=t23,
2035 ; m7=t24, m6=t25, m5=t26, m4=t27, m12=t28, m13=t29, m14=t30, m15=t31
2037 %if 0; cpuflag(ssse3)
2039 SUMSUB_BA w, 4, 7, 8
2040 SUMSUB_BA w, 5, 1, 8
2042 SUMSUB_BADC w, 4, 7, 5, 1
2045 pmulhrsw m7, [pw_11585x2]
2046 pmulhrsw m4, [pw_11585x2]
2047 pmulhrsw m1, [pw_11585x2]
2048 pmulhrsw m5, [pw_11585x2]
2050 mova [tmpq+ 5*%%str], m7 ; t23
2051 SCRATCH 1, 13, tmpq+25*%%str
2052 UNSCRATCH 7, 10, tmpq+23*%%str
2053 UNSCRATCH 1, 11, tmpq+27*%%str
2056 SUMSUB_BA w, 7, 3, 10
2057 SUMSUB_BA w, 1, 2, 10
2059 SUMSUB_BADC w, 7, 3, 1, 2
2062 pmulhrsw m3, [pw_11585x2]
2063 pmulhrsw m7, [pw_11585x2]
2064 pmulhrsw m2, [pw_11585x2]
2065 pmulhrsw m1, [pw_11585x2]
2067 SCRATCH 0, 8, tmpq+15*%%str
2068 SCRATCH 6, 9, tmpq+17*%%str
2069 VP9_UNPACK_MULSUB_2W_4X 7, 4, 11585, 11585, [pd_8192], 0, 6
2070 mova [tmpq+ 5*%%str], m7 ; t23
2071 UNSCRATCH 7, 10, tmpq+23*%%str
2072 VP9_UNPACK_MULSUB_2W_4X 1, 5, 11585, 11585, [pd_8192], 0, 6
2073 SCRATCH 1, 13, tmpq+25*%%str
2074 UNSCRATCH 1, 11, tmpq+27*%%str
2075 VP9_UNPACK_MULSUB_2W_4X 3, 7, 11585, 11585, [pd_8192], 0, 6
2076 VP9_UNPACK_MULSUB_2W_4X 2, 1, 11585, 11585, [pd_8192], 0, 6
2077 UNSCRATCH 0, 8, tmpq+15*%%str
2078 UNSCRATCH 6, 9, tmpq+17*%%str
2081 ; m0=t16, m1=t17, m2=t18, m3=t19, m4=t20, m5=t21, m6=t22, m7=t23,
2082 ; m8=t24, m9=t25, m10=t26, m11=t27, m12=t28, m13=t29, m14=t30, m15=t31
2084 ; then do final pass to sumsub+store the two halves
2086 mova [tmpq+17*%%str], m2 ; t20
2087 mova [tmpq+ 1*%%str], m3 ; t21
2089 mova [tmpq+25*%%str], m13 ; t22
2091 mova m8, [tmpq+ 0*%%str] ; t0
2092 mova m9, [tmpq+ 4*%%str] ; t1
2093 mova m12, [tmpq+ 8*%%str] ; t2
2094 mova m11, [tmpq+12*%%str] ; t3
2095 mova m2, [tmpq+16*%%str] ; t4
2096 mova m3, [tmpq+20*%%str] ; t5
2097 mova m13, [tmpq+24*%%str] ; t6
2099 SUMSUB_BA w, 6, 8, 10
2100 mova [tmpq+ 3*%%str], m8 ; t15
2101 mova m10, [tmpq+28*%%str] ; t7
2102 SUMSUB_BA w, 0, 9, 8
2103 SUMSUB_BA w, 15, 12, 8
2104 SUMSUB_BA w, 14, 11, 8
2105 SUMSUB_BA w, 1, 2, 8
2106 SUMSUB_BA w, 7, 3, 8
2107 SUMSUB_BA w, 5, 13, 8
2108 SUMSUB_BA w, 4, 10, 8
2110 TRANSPOSE8x8W 6, 0, 15, 14, 1, 7, 5, 4, 8
2111 mova [tmpq+ 0*%%str], m6
2112 mova [tmpq+ 4*%%str], m0
2113 mova [tmpq+ 8*%%str], m15
2114 mova [tmpq+12*%%str], m14
2115 mova [tmpq+16*%%str], m1
2116 mova [tmpq+20*%%str], m7
2117 mova [tmpq+24*%%str], m5
2118 mova [tmpq+28*%%str], m4
2120 mova m8, [tmpq+ 3*%%str] ; t15
2121 TRANSPOSE8x8W 10, 13, 3, 2, 11, 12, 9, 8, 0
2122 mova [tmpq+ 3*%%str], m10
2123 mova [tmpq+ 7*%%str], m13
2124 mova [tmpq+11*%%str], m3
2125 mova [tmpq+15*%%str], m2
2126 mova [tmpq+19*%%str], m11
2127 mova [tmpq+23*%%str], m12
2128 mova [tmpq+27*%%str], m9
2129 mova [tmpq+31*%%str], m8
2131 mova m15, [tmpq+30*%%str] ; t8
2132 mova m14, [tmpq+26*%%str] ; t9
2133 mova m13, [tmpq+22*%%str] ; t10
2134 mova m12, [tmpq+18*%%str] ; t11
2135 mova m11, [tmpq+14*%%str] ; t12
2136 mova m10, [tmpq+10*%%str] ; t13
2137 mova m9, [tmpq+ 6*%%str] ; t14
2138 mova m8, [tmpq+ 2*%%str] ; t15
2139 mova m7, [tmpq+21*%%str] ; t16
2140 mova m6, [tmpq+29*%%str] ; t17
2141 mova m5, [tmpq+ 9*%%str] ; t18
2142 mova m4, [tmpq+13*%%str] ; t19
2143 mova m3, [tmpq+17*%%str] ; t20
2144 mova m2, [tmpq+ 1*%%str] ; t21
2145 mova m1, [tmpq+25*%%str] ; t22
2147 SUMSUB_BA w, 7, 8, 0
2148 mova [tmpq+ 2*%%str], m8
2149 mova m0, [tmpq+ 5*%%str] ; t23
2150 SUMSUB_BA w, 6, 9, 8
2151 SUMSUB_BA w, 5, 10, 8
2152 SUMSUB_BA w, 4, 11, 8
2153 SUMSUB_BA w, 3, 12, 8
2154 SUMSUB_BA w, 2, 13, 8
2155 SUMSUB_BA w, 1, 14, 8
2156 SUMSUB_BA w, 0, 15, 8
2158 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 8
2159 mova [tmpq+ 1*%%str], m0
2160 mova [tmpq+ 5*%%str], m1
2161 mova [tmpq+ 9*%%str], m2
2162 mova [tmpq+13*%%str], m3
2163 mova [tmpq+17*%%str], m4
2164 mova [tmpq+21*%%str], m5
2165 mova [tmpq+25*%%str], m6
2166 mova [tmpq+29*%%str], m7
2168 mova m8, [tmpq+ 2*%%str]
2169 TRANSPOSE8x8W 8, 9, 10, 11, 12, 13, 14, 15, 0
2170 mova [tmpq+ 2*%%str], m8
2171 mova [tmpq+ 6*%%str], m9
2172 mova [tmpq+10*%%str], m10
2173 mova [tmpq+14*%%str], m11
2174 mova [tmpq+18*%%str], m12
2175 mova [tmpq+22*%%str], m13
2176 mova [tmpq+26*%%str], m14
2177 mova [tmpq+30*%%str], m15
2179 mova m2, [tmpq+24*%%str] ; t6
2180 mova m3, [tmpq+28*%%str] ; t7
2181 SUMSUB_BADC w, 5, 2, 4, 3
2182 mova [tmpq+24*%%str], m5
2183 mova [tmpq+23*%%str], m2
2184 mova [tmpq+28*%%str], m4
2185 mova [tmpq+19*%%str], m3
2187 mova m2, [tmpq+16*%%str] ; t4
2188 mova m3, [tmpq+20*%%str] ; t5
2189 SUMSUB_BA w, 1, 2, 5
2190 SUMSUB_BA w, 7, 3, 5
2191 mova [tmpq+15*%%str], m2
2192 mova [tmpq+11*%%str], m3
2194 mova m2, [tmpq+ 0*%%str] ; t0
2195 mova m3, [tmpq+ 4*%%str] ; t1
2196 SUMSUB_BA w, 6, 2, 5
2197 SUMSUB_BA w, 0, 3, 5
2198 mova [tmpq+31*%%str], m2
2199 mova [tmpq+27*%%str], m3
2201 mova m2, [tmpq+ 8*%%str] ; t2
2202 mova m3, [tmpq+12*%%str] ; t3
2203 mova m5, [tmpq+ 7*%%str]
2204 mova m4, [tmpq+ 3*%%str]
2205 SUMSUB_BADC w, 5, 2, 4, 3
2206 mova [tmpq+ 7*%%str], m2
2207 mova [tmpq+ 3*%%str], m3
2209 mova m3, [tmpq+28*%%str]
2210 TRANSPOSE8x8W 6, 0, 5, 4, 1, 7, 2, 3, [tmpq+24*%%str], [tmpq+16*%%str], 1
2211 mova [tmpq+ 0*%%str], m6
2212 mova [tmpq+ 4*%%str], m0
2213 mova [tmpq+ 8*%%str], m5
2214 mova [tmpq+12*%%str], m4
2215 mova [tmpq+20*%%str], m7
2216 mova [tmpq+24*%%str], m2
2217 mova [tmpq+28*%%str], m3
2219 mova m6, [tmpq+19*%%str]
2220 mova m0, [tmpq+23*%%str]
2221 mova m5, [tmpq+11*%%str]
2222 mova m4, [tmpq+15*%%str]
2223 mova m1, [tmpq+ 3*%%str]
2224 mova m7, [tmpq+ 7*%%str]
2225 mova m3, [tmpq+31*%%str]
2226 TRANSPOSE8x8W 6, 0, 5, 4, 1, 7, 2, 3, [tmpq+27*%%str], [tmpq+19*%%str], 1
2227 mova [tmpq+ 3*%%str], m6
2228 mova [tmpq+ 7*%%str], m0
2229 mova [tmpq+11*%%str], m5
2230 mova [tmpq+15*%%str], m4
2231 mova [tmpq+23*%%str], m7
2232 mova [tmpq+27*%%str], m2
2233 mova [tmpq+31*%%str], m3
2235 mova m1, [tmpq+ 6*%%str] ; t14
2236 mova m0, [tmpq+ 2*%%str] ; t15
2237 mova m7, [tmpq+21*%%str] ; t16
2238 mova m6, [tmpq+29*%%str] ; t17
2239 SUMSUB_BA w, 7, 0, 2
2240 SUMSUB_BA w, 6, 1, 2
2241 mova [tmpq+29*%%str], m7
2242 mova [tmpq+ 2*%%str], m0
2243 mova [tmpq+21*%%str], m6
2244 mova [tmpq+ 6*%%str], m1
2246 mova m1, [tmpq+14*%%str] ; t12
2247 mova m0, [tmpq+10*%%str] ; t13
2248 mova m5, [tmpq+ 9*%%str] ; t18
2249 mova m4, [tmpq+13*%%str] ; t19
2250 SUMSUB_BA w, 5, 0, 2
2251 SUMSUB_BA w, 4, 1, 2
2252 mova [tmpq+10*%%str], m0
2253 mova [tmpq+14*%%str], m1
2255 mova m1, [tmpq+22*%%str] ; t10
2256 mova m0, [tmpq+18*%%str] ; t11
2257 mova m3, [tmpq+17*%%str] ; t20
2258 mova m2, [tmpq+ 1*%%str] ; t21
2259 SUMSUB_BA w, 3, 0, 6
2260 SUMSUB_BA w, 2, 1, 6
2261 mova [tmpq+18*%%str], m0
2262 mova [tmpq+22*%%str], m1
2264 mova m7, [tmpq+30*%%str] ; t8
2265 mova m6, [tmpq+26*%%str] ; t9
2266 mova m1, [tmpq+25*%%str] ; t22
2267 mova m0, [tmpq+ 5*%%str] ; t23
2268 SUMSUB_BADC w, 1, 6, 0, 7
2269 mova [tmpq+26*%%str], m6
2270 mova [tmpq+30*%%str], m7
2272 mova m7, [tmpq+29*%%str]
2273 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [tmpq+21*%%str], [tmpq+17*%%str], 1
2274 mova [tmpq+ 1*%%str], m0
2275 mova [tmpq+ 5*%%str], m1
2276 mova [tmpq+ 9*%%str], m2
2277 mova [tmpq+13*%%str], m3
2278 mova [tmpq+21*%%str], m5
2279 mova [tmpq+25*%%str], m6
2280 mova [tmpq+29*%%str], m7
2282 mova m0, [tmpq+ 2*%%str]
2283 mova m1, [tmpq+ 6*%%str]
2284 mova m2, [tmpq+10*%%str]
2285 mova m3, [tmpq+14*%%str]
2286 mova m4, [tmpq+18*%%str]
2287 mova m5, [tmpq+22*%%str]
2288 mova m7, [tmpq+30*%%str]
2289 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [tmpq+26*%%str], [tmpq+18*%%str], 1
2290 mova [tmpq+ 2*%%str], m0
2291 mova [tmpq+ 6*%%str], m1
2292 mova [tmpq+10*%%str], m2
2293 mova [tmpq+14*%%str], m3
2294 mova [tmpq+22*%%str], m5
2295 mova [tmpq+26*%%str], m6
2296 mova [tmpq+30*%%str], m7
2299 ; t0-7 is in [tmpq+{0,4,8,12,16,20,24,28}*%%str]
2300 ; t8-15 is in [tmpq+{2,6,10,14,18,22,26,30}*%%str]
2301 ; t16-19 and t23 is in [tmpq+{1,5,9,13,29}*%%str]
2303 ; t24-31 is in m8-15
2306 %define ROUND_REG [pw_512]
2308 %define ROUND_REG [pw_32]
2311 %macro %%STORE_2X2 7-8 1 ; src[1-4], tmp[1-2], zero, inc_dst_ptrs
2312 SUMSUB_BA w, %4, %1, %5
2313 SUMSUB_BA w, %3, %2, %5
2314 VP9_IDCT8_WRITEx2 %4, %3, %5, %6, %7, ROUND_REG, 6
2318 VP9_IDCT8_WRITEx2 %2, %1, %5, %6, %7, ROUND_REG, 6, dst_endq
2320 sub dst_endq, stride2q
2327 ; store t0-1 and t30-31
2328 mova m8, [tmpq+ 0*%%str]
2329 mova m9, [tmpq+ 4*%%str]
2330 %%STORE_2X2 8, 9, 0, 6, 12, 11, 10
2332 ; store t2-3 and t28-29
2333 mova m8, [tmpq+ 8*%%str]
2334 mova m9, [tmpq+12*%%str]
2335 %%STORE_2X2 8, 9, 14, 15, 12, 11, 10
2337 ; store t4-5 and t26-27
2338 mova m8, [tmpq+16*%%str]
2339 mova m9, [tmpq+20*%%str]
2340 %%STORE_2X2 8, 9, 7, 1, 12, 11, 10
2342 ; store t6-7 and t24-25
2343 mova m8, [tmpq+24*%%str]
2344 mova m9, [tmpq+28*%%str]
2345 %%STORE_2X2 8, 9, 4, 5, 12, 11, 10
2347 ; store t8-9 and t22-23
2348 mova m8, [tmpq+30*%%str]
2349 mova m9, [tmpq+26*%%str]
2350 mova m0, [tmpq+ 5*%%str]
2351 %%STORE_2X2 8, 9, 13, 0, 12, 11, 10
2353 ; store t10-11 and t20-21
2354 mova m8, [tmpq+22*%%str]
2355 mova m9, [tmpq+18*%%str]
2356 %%STORE_2X2 8, 9, 2, 3, 12, 11, 10
2358 ; store t12-13 and t18-19
2359 mova m8, [tmpq+14*%%str]
2360 mova m9, [tmpq+10*%%str]
2361 mova m5, [tmpq+13*%%str]
2362 mova m4, [tmpq+ 9*%%str]
2363 %%STORE_2X2 8, 9, 4, 5, 12, 11, 10
2366 mova m8, [tmpq+ 6*%%str]
2367 mova m9, [tmpq+ 2*%%str]
2368 mova m5, [tmpq+29*%%str]
2369 mova m4, [tmpq+21*%%str]
2370 %%STORE_2X2 8, 9, 4, 5, 12, 11, 10, 0
2374 mova [tmpq+ 1*%%str], m1
2375 mova [tmpq+11*%%str], m2
2376 mova [tmpq+15*%%str], m3
2377 mova [tmpq+17*%%str], m4
2378 mova [tmpq+19*%%str], m5
2381 ; store t0-1 and t30-31
2382 mova m2, [tmpq+ 0*%%str]
2383 mova m3, [tmpq+ 4*%%str]
2384 %%STORE_2X2 2, 3, 0, 6, 4, 5, 1
2386 ; store t2-3 and t28-29
2387 mova m2, [tmpq+ 8*%%str]
2388 mova m3, [tmpq+12*%%str]
2389 mova m0, [tmpq+ 3*%%str]
2390 mova m6, [tmpq+ 7*%%str]
2391 %%STORE_2X2 2, 3, 0, 6, 4, 5, 1
2393 ; store t4-5 and t26-27
2394 mova m2, [tmpq+16*%%str]
2395 mova m3, [tmpq+20*%%str]
2396 mova m0, [tmpq+ 1*%%str]
2397 %%STORE_2X2 2, 3, 7, 0, 4, 5, 1
2399 ; store t6-7 and t24-25
2400 mova m2, [tmpq+24*%%str]
2401 mova m3, [tmpq+28*%%str]
2402 mova m0, [tmpq+17*%%str]
2403 mova m6, [tmpq+19*%%str]
2404 %%STORE_2X2 2, 3, 0, 6, 4, 5, 1
2406 ; store t8-9 and t22-23
2407 mova m2, [tmpq+30*%%str]
2408 mova m3, [tmpq+26*%%str]
2409 mova m0, [tmpq+25*%%str]
2410 mova m6, [tmpq+ 5*%%str]
2411 %%STORE_2X2 2, 3, 0, 6, 4, 5, 1
2413 ; store t10-11 and t20-21
2414 mova m2, [tmpq+22*%%str]
2415 mova m3, [tmpq+18*%%str]
2416 mova m0, [tmpq+11*%%str]
2417 mova m6, [tmpq+15*%%str]
2418 %%STORE_2X2 2, 3, 0, 6, 4, 5, 1
2420 ; store t12-13 and t18-19
2421 mova m2, [tmpq+14*%%str]
2422 mova m3, [tmpq+10*%%str]
2423 mova m6, [tmpq+13*%%str]
2424 mova m0, [tmpq+ 9*%%str]
2425 %%STORE_2X2 2, 3, 0, 6, 4, 5, 1
2428 mova m2, [tmpq+ 6*%%str]
2429 mova m3, [tmpq+ 2*%%str]
2430 mova m6, [tmpq+29*%%str]
2431 mova m0, [tmpq+21*%%str]
2432 %%STORE_2X2 2, 3, 0, 6, 4, 5, 1, 0
2438 %macro VP9_IDCT_IDCT_32x32_ADD_XMM 1
2440 cglobal vp9_idct_idct_32x32_add, 0, 6 + ARCH_X86_64 * 3, 16, 2048, dst, stride, block, eob
2441 movifnidn eobd, dword eobm
2455 movifnidn blockq, blockmp
2456 movifnidn dstq, dstmp
2457 movifnidn strideq, stridemp
2460 mova m1, [pw_11585x2]
2464 DEFINE_ARGS dst, stride, block, coef
2465 movsx coefd, word [blockq]
2470 add coefd, (32 << 14) + 8192
2474 SPLATW m0, m0, q0000
2476 pmulhrsw m0, [pw_512]
2481 VP9_STORE_2XFULL 0, 1, 2, 3, 4, 5, mmsize
2484 VP9_STORE_2XFULL 0, 1, 2, 3, 4, 5, mmsize
2488 DEFINE_ARGS dst_bak, stride, block, cnt, dst, stride30, dst_end, stride2, tmp
2490 %define dst_bakq r0mp
2495 DEFINE_ARGS block, u1, u2, u3, u4, tmp
2499 VP9_IDCT32_1D blockq, 1, 8
2502 DEFINE_ARGS dst, stride, stride30, dst_end, stride2, tmp
2504 %define cntd dword r3m
2506 mov stride30q, strideq ; stride
2507 lea stride2q, [strideq*2] ; stride*2
2508 shl stride30q, 5 ; stride*32
2510 sub stride30q, stride2q ; stride*30
2513 lea dst_endq, [dstq+stride30q]
2514 VP9_IDCT32_1D tmpq, 2, 8
2520 ; at the end of the loop, m7 should still be zero
2521 ; use that to zero out block coefficients
2526 ZERO_BLOCK blockq, 64, 8, m1
2531 DEFINE_ARGS block, tmp, cnt
2537 VP9_IDCT32_1D blockq, 1, 16
2546 DEFINE_ARGS dst, stride, stride30, dst_end, stride2, tmp
2548 %define cntd dword r3m
2551 mov stride30q, strideq ; stride
2552 lea stride2q, [strideq*2] ; stride*2
2553 shl stride30q, 5 ; stride*32
2556 sub stride30q, stride2q ; stride*30
2559 lea dst_endq, [dstq+stride30q]
2560 VP9_IDCT32_1D tmpq, 2, 16
2566 ; at the end of the loop, m7 should still be zero
2567 ; use that to zero out block coefficients
2572 ZERO_BLOCK blockq, 64, 16, m1
2578 DEFINE_ARGS block, tmp, cnt
2584 VP9_IDCT32_1D blockq, 1
2593 DEFINE_ARGS dst, stride, stride30, dst_end, stride2, tmp
2595 %define cntd dword r3m
2598 mov stride30q, strideq ; stride
2599 lea stride2q, [strideq*2] ; stride*2
2600 shl stride30q, 5 ; stride*32
2603 sub stride30q, stride2q ; stride*30
2606 lea dst_endq, [dstq+stride30q]
2607 VP9_IDCT32_1D tmpq, 2
2613 ; at the end of the loop, m7 should still be zero
2614 ; use that to zero out block coefficients
2619 ZERO_BLOCK blockq, 64, 32, m1
2623 VP9_IDCT_IDCT_32x32_ADD_XMM sse2
2624 VP9_IDCT_IDCT_32x32_ADD_XMM ssse3
2625 VP9_IDCT_IDCT_32x32_ADD_XMM avx