1 ;******************************************************************************
2 ;* VP9 inverse transform x86 SIMD optimizations
4 ;* Copyright (C) 2015 Ronald S. Bultje <rsbultje gmail com>
6 ;* This file is part of FFmpeg.
8 ;* FFmpeg is free software; you can redistribute it and/or
9 ;* modify it under the terms of the GNU Lesser General Public
10 ;* License as published by the Free Software Foundation; either
11 ;* version 2.1 of the License, or (at your option) any later version.
13 ;* FFmpeg is distributed in the hope that it will be useful,
14 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 ;* Lesser General Public License for more details.
18 ;* You should have received a copy of the GNU Lesser General Public
19 ;* License along with FFmpeg; if not, write to the Free Software
20 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 ;******************************************************************************
23 %include "libavutil/x86/x86util.asm"
24 %include "vp9itxfm_template.asm"
39 pd_3fff: times 4 dd 0x3fff
45 cextern pw_15212_m13377
47 cextern pw_m5283_m15212
49 cextern pw_m13377_13377
52 pw_9929_m5283: times 4 dw 9929, -5283
67 COEF_PAIR 3196, 16069, 1
70 COEF_PAIR 6270, 15137, 1
72 COEF_PAIR 10394, 12665
73 COEF_PAIR 11003, 12140
74 COEF_PAIR 11585, 11585, 1
76 COEF_PAIR 13623, 9102, 1
119 %macro VP9_STORE_2X 6-7 dstq ; reg1, reg2, tmp1, tmp2, min, max, dst
121 mova m%4, [%7+strideq]
129 mova [%7+strideq], m%4
132 %macro ZERO_BLOCK 4 ; mem, stride, nnzcpl, zero_reg
137 mova [%1+%%y+%%x], %4
138 %assign %%x (%%x+mmsize)
144 ; the input coefficients are scaled up by 2 bit (which we downscale immediately
145 ; in the iwht), and is otherwise orthonormally increased by 1 bit per iwht_1d.
146 ; therefore, a diff of 10-12+sign bit will fit in 12-14+sign bit after scaling,
147 ; i.e. everything can be done in 15+1bpp words. Since the quant fractional bits
148 ; add 2 bits, we need to scale before converting to word in 12bpp, since the
149 ; input will be 16+sign bit which doesn't fit in 15+sign words, but in 10bpp
150 ; we can scale after converting to words (which is half the instructions),
151 ; since the input is only 14+sign bit, which fits in 15+sign words directly.
153 %macro IWHT4_FN 2 ; bpp, max
154 cglobal vp9_iwht_iwht_4x4_add_%1, 3, 3, 8, dst, stride, block, eob
156 mova m0, [blockq+0*16+0]
157 mova m1, [blockq+1*16+0]
159 mova m4, [blockq+0*16+8]
160 mova m5, [blockq+1*16+8]
168 packssdw m0, [blockq+0*16+8]
169 packssdw m1, [blockq+1*16+8]
173 mova m2, [blockq+2*16+0]
174 mova m3, [blockq+3*16+0]
176 mova m4, [blockq+2*16+8]
177 mova m5, [blockq+3*16+8]
185 packssdw m2, [blockq+2*16+8]
186 packssdw m3, [blockq+3*16+8]
192 TRANSPOSE4x4W 0, 1, 2, 3, 4
196 VP9_STORE_2X 0, 1, 4, 5, 6, 7
197 lea dstq, [dstq+strideq*2]
198 VP9_STORE_2X 2, 3, 4, 5, 6, 7
199 ZERO_BLOCK blockq, 16, 4, m6
208 %macro VP9_IDCT4_WRITEOUT 0
227 VP9_STORE_2X 0, 1, 6, 7, 4, 5
228 lea dstq, [dstq+2*strideq]
229 VP9_STORE_2X 2, 3, 6, 7, 4, 5
232 %macro DC_ONLY 2 ; shift, zero
233 mov coefd, dword [blockq]
239 add coefd, ((1 << (%1 - 1)) << 14) + 8192
243 ; 4x4 coefficients are 5+depth+sign bits, so for 10bpp, everything still fits
244 ; in 15+1 words without additional effort, since the coefficients are 15bpp.
247 cglobal vp9_idct_idct_4x4_add_10, 4, 4, 8, dst, stride, block, eob
256 mova m5, [pw_11585x2]
260 DEFINE_ARGS dst, stride, block, coef
267 pmulhrsw m0, [pw_2048] ; (x*2048 + (1<<14))>>15 <=> (x+8)>>4
269 VP9_STORE_2X 0, 0, 6, 7, 4, 5
270 lea dstq, [dstq+2*strideq]
271 VP9_STORE_2X 0, 0, 6, 7, 4, 5
275 mova m0, [blockq+0*16+0]
276 mova m1, [blockq+1*16+0]
277 packssdw m0, [blockq+0*16+8]
278 packssdw m1, [blockq+1*16+8]
279 mova m2, [blockq+2*16+0]
280 mova m3, [blockq+3*16+0]
281 packssdw m2, [blockq+2*16+8]
282 packssdw m3, [blockq+3*16+8]
285 mova m6, [pw_11585x2]
287 mova m7, [pd_8192] ; rounding
289 TRANSPOSE4x4W 0, 1, 2, 3, 4
293 ZERO_BLOCK blockq, 16, 4, m4
304 cglobal vp9_%1_%3_4x4_add_10, 3, 3, 0, dst, stride, block, eob
305 %if WIN64 && notcpuflag(ssse3)
308 movdqa xmm5, [pd_8192]
309 mova m0, [blockq+0*16+0]
310 mova m1, [blockq+1*16+0]
311 packssdw m0, [blockq+0*16+8]
312 packssdw m1, [blockq+1*16+8]
313 mova m2, [blockq+2*16+0]
314 mova m3, [blockq+3*16+0]
315 packssdw m2, [blockq+2*16+8]
316 packssdw m3, [blockq+3*16+8]
319 mova m6, [pw_11585x2]
321 %ifnidn %1%3, iadstiadst
325 TRANSPOSE4x4W 0, 1, 2, 3, 4
329 ZERO_BLOCK blockq, 16, 4, m4
335 IADST4_FN idct, IDCT4, iadst, IADST4
336 IADST4_FN iadst, IADST4, idct, IDCT4
337 IADST4_FN iadst, IADST4, iadst, IADST4
340 IADST4_FN idct, IDCT4, iadst, IADST4
341 IADST4_FN iadst, IADST4, idct, IDCT4
342 IADST4_FN iadst, IADST4, iadst, IADST4
344 ; inputs and outputs are dwords, coefficients are words
346 ; dst1 = src1 * coef1 + src2 * coef2 + rnd >> 14
347 ; dst2 = src1 * coef2 - src2 * coef1 + rnd >> 14
348 %macro SUMSUB_MUL 6-8 [pd_8192], [pd_3fff] ; src/dst 1-2, tmp1-2, coef1-2, rnd, mask
355 punpckhwd m%2, m%4, m%3
357 pmaddwd m%3, m%4, [pw_%6_%5]
358 pmaddwd m%1, m%2, [pw_%6_%5]
359 pmaddwd m%4, [pw_m%5_%6]
360 pmaddwd m%2, [pw_m%5_%6]
369 %macro IDCT4_12BPP_1D 0-8 [pd_8192], [pd_3fff], 0, 1, 2, 3, 4, 5 ; rnd, mask, in/out0-3, tmp0-1
370 SUMSUB_MUL %3, %5, %7, %8, 11585, 11585, %1, %2
371 SUMSUB_MUL %4, %6, %7, %8, 15137, 6270, %1, %2
372 SUMSUB_BA d, %4, %3, %7
373 SUMSUB_BA d, %6, %5, %7
377 %macro STORE_4x4 6 ; tmp1-2, reg1-2, min, max
378 movh m%1, [dstq+strideq*0]
379 movh m%2, [dstq+strideq*2]
380 movhps m%1, [dstq+strideq*1]
381 movhps m%2, [dstq+stride3q ]
388 movh [dstq+strideq*0], m%1
389 movhps [dstq+strideq*1], m%1
390 movh [dstq+strideq*2], m%2
391 movhps [dstq+stride3q ], m%2
394 %macro ROUND_AND_STORE_4x4 8 ; reg1-4, min, max, rnd, shift
405 STORE_4x4 %2, %4, %1, %3, %5, %6
409 cglobal vp9_idct_idct_4x4_add_12, 4, 4, 8, dst, stride, block, eob
413 ; dc-only - this is special, since for 4x4 12bpp, the max coef size is
414 ; 17+sign bpp. Since the multiply is with 11585, which is 14bpp, the
415 ; result of each multiply is 31+sign bit, i.e. it _exactly_ fits in a
416 ; dword. After the final shift (4), the result is 13+sign bits, so we
417 ; don't need any additional processing to fit it in a word
418 DEFINE_ARGS dst, stride, block, coef
422 pshuflw m0, m0, q0000
425 DEFINE_ARGS dst, stride, stride3
426 lea stride3q, [strideq*3]
427 STORE_4x4 1, 3, 0, 0, m4, m5
431 DEFINE_ARGS dst, stride, block, eob
432 mova m0, [blockq+0*16]
433 mova m1, [blockq+1*16]
434 mova m2, [blockq+2*16]
435 mova m3, [blockq+3*16]
439 IDCT4_12BPP_1D m6, m7
440 TRANSPOSE4x4D 0, 1, 2, 3, 4
441 IDCT4_12BPP_1D m6, m7
444 ZERO_BLOCK blockq, 16, 4, m4
447 DEFINE_ARGS dst, stride, stride3
448 lea stride3q, [strideq*3]
451 ROUND_AND_STORE_4x4 0, 1, 2, 3, m4, m5, m6, 4
490 ; out0 = 5283 * in0 + 13377 + in1 + 15212 * in2 + 9929 * in3 + rnd >> 14
491 ; out1 = 9929 * in0 + 13377 * in1 - 5283 * in2 - 15282 * in3 + rnd >> 14
492 ; out2 = 13377 * in0 - 13377 * in2 + 13377 * in3 + rnd >> 14
493 ; out3 = 15212 * in0 - 13377 * in1 + 9929 * in2 - 5283 * in3 + rnd >> 14
494 %macro IADST4_12BPP_1D 0-2 [pd_8192], [pd_3fff] ; rnd, mask
511 SCRATCH 1, 8, rsp+0*mmsize, a
512 SCRATCH 5, 9, rsp+1*mmsize, b
514 ; m1/3 have the high bits of 0,1,2,3
515 ; m4/5 have the low bits of 0,1,2,3
518 mova m2, [pw_15212_9929]
519 mova m0, [pw_5283_13377]
520 pmaddwd m7, m2, reg_b
526 mova m1, [pw_m13377_13377]
527 mova m5, [pw_13377_0]
528 pmaddwd m7, m1, reg_b
541 mova m7, [pw_m5283_m15212]
542 mova m5, [pw_9929_13377]
543 pmaddwd m1, m7, reg_b
549 UNSCRATCH 5, 9, rsp+1*mmsize, b
550 pmaddwd m5, [pw_9929_m5283]
551 pmaddwd m4, [pw_15212_m13377]
552 pmaddwd m3, [pw_9929_m5283]
553 UNSCRATCH 1, 8, rsp+0*mmsize, a
554 pmaddwd m1, [pw_15212_m13377]
567 %macro IADST4_12BPP_FN 4
568 cglobal vp9_%1_%3_4x4_add_12, 3, 3, 12, 2 * ARCH_X86_32 * mmsize, dst, stride, block, eob
569 mova m0, [blockq+0*16]
570 mova m1, [blockq+1*16]
571 mova m2, [blockq+2*16]
572 mova m3, [blockq+3*16]
574 PRELOAD 10, pd_8192, rnd
575 PRELOAD 11, pd_3fff, mask
576 %2_12BPP_1D reg_rnd, reg_mask
577 TRANSPOSE4x4D 0, 1, 2, 3, 4
578 %4_12BPP_1D reg_rnd, reg_mask
581 ZERO_BLOCK blockq, 16, 4, m4
584 DEFINE_ARGS dst, stride, stride3
585 lea stride3q, [strideq*3]
588 ROUND_AND_STORE_4x4 0, 1, 2, 3, m4, m5, m6, 4
593 IADST4_12BPP_FN idct, IDCT4, iadst, IADST4
594 IADST4_12BPP_FN iadst, IADST4, idct, IDCT4
595 IADST4_12BPP_FN iadst, IADST4, iadst, IADST4
597 ; the following line has not been executed at the end of this macro:
598 ; UNSCRATCH 6, 8, rsp+%3*mmsize
599 %macro IDCT8_1D 1-5 [pd_8192], [pd_3fff], 2 * mmsize, 17 ; src, rnd, mask, src_stride, stack_offset
604 IDCT4_12BPP_1D %2, %3, 0, 2, 4, 6, 1, 3 ; m0/2/4/6 have t0/1/2/3
605 SCRATCH 4, 8, rsp+(%5+0)*mmsize
606 SCRATCH 6, 9, rsp+(%5+1)*mmsize
611 SUMSUB_MUL 1, 7, 4, 6, 16069, 3196, %2, %3 ; m1=t7a, m7=t4a
612 SUMSUB_MUL 5, 3, 4, 6, 9102, 13623, %2, %3 ; m5=t6a, m3=t5a
613 SUMSUB_BA d, 3, 7, 4 ; m3=t4, m7=t5a
614 SUMSUB_BA d, 5, 1, 4 ; m5=t7, m1=t6a
615 SUMSUB_MUL 1, 7, 4, 6, 11585, 11585, %2, %3 ; m1=t6, m7=t5
616 SUMSUB_BA d, 5, 0, 4 ; m5=out0, m0=out7
617 SUMSUB_BA d, 1, 2, 4 ; m1=out1, m2=out6
618 UNSCRATCH 4, 8, rsp+(%5+0)*mmsize
619 UNSCRATCH 6, 9, rsp+(%5+1)*mmsize
620 SCRATCH 2, 8, rsp+(%5+0)*mmsize
621 SUMSUB_BA d, 7, 4, 2 ; m7=out2, m4=out5
622 SUMSUB_BA d, 3, 6, 2 ; m3=out3, m6=out4
623 SWAP 0, 5, 4, 6, 2, 7
626 %macro STORE_2x8 5-7 dstq, strideq ; tmp1-2, reg, min, max
639 ; FIXME we can use the intermediate storage (rsp[0-15]) on x86-32 for temp
640 ; storage also instead of allocating two more stack spaces. This doesn't
641 ; matter much but it's something...
643 cglobal vp9_idct_idct_8x8_add_10, 4, 6 + ARCH_X86_64, 14, \
644 16 * mmsize + 3 * ARCH_X86_32 * mmsize, \
645 dst, stride, block, eob
650 ; dc-only - the 10bit version can be done entirely in 32bit, since the max
651 ; coef values are 16+sign bit, and the coef is 14bit, so 30+sign easily
653 DEFINE_ARGS dst, stride, block, coef
657 pshuflw m1, m1, q0000
659 DEFINE_ARGS dst, stride, cnt
662 STORE_2x8 3, 4, 1, m2, m0
663 lea dstq, [dstq+strideq*2]
669 SCRATCH 0, 12, rsp+16*mmsize, max
670 DEFINE_ARGS dst, stride, block, cnt, ptr, skip, dstbak
676 lea ptrq, [default_8x8]
677 movzx cntd, byte [ptrq+cntq-1]
679 movzx cntd, byte [default_8x8+cntq-1]
684 PRELOAD 10, pd_8192, rnd
685 PRELOAD 11, pd_3fff, mask
686 PRELOAD 13, pd_16, srnd
688 IDCT8_1D blockq, reg_rnd, reg_mask
690 TRANSPOSE4x4D 0, 1, 2, 3, 6
691 mova [ptrq+ 0*mmsize], m0
692 mova [ptrq+ 2*mmsize], m1
693 mova [ptrq+ 4*mmsize], m2
694 mova [ptrq+ 6*mmsize], m3
695 UNSCRATCH 6, 8, rsp+17*mmsize
696 TRANSPOSE4x4D 4, 5, 6, 7, 0
697 mova [ptrq+ 1*mmsize], m4
698 mova [ptrq+ 3*mmsize], m5
699 mova [ptrq+ 5*mmsize], m6
700 mova [ptrq+ 7*mmsize], m7
706 ; zero-pad the remainder (skipped cols)
710 lea blockq, [blockq+skipq*(mmsize/2)]
713 mova [ptrq+mmsize*0], m0
714 mova [ptrq+mmsize*1], m0
715 mova [ptrq+mmsize*2], m0
716 mova [ptrq+mmsize*3], m0
722 DEFINE_ARGS dst, stride, block, cnt, ptr, stride3, dstbak
723 lea stride3q, [strideq*3]
727 IDCT8_1D ptrq, reg_rnd, reg_mask
730 ROUND_AND_STORE_4x4 0, 1, 2, 3, m6, reg_max, reg_srnd, 5
731 lea dstq, [dstq+strideq*4]
732 UNSCRATCH 0, 8, rsp+17*mmsize
733 UNSCRATCH 1, 12, rsp+16*mmsize, max
734 UNSCRATCH 2, 13, pd_16, srnd
735 ROUND_AND_STORE_4x4 4, 5, 0, 7, m6, m1, m2, 5
738 lea dstq, [dstbakq+8]
747 ZERO_BLOCK blockq-2*mmsize, 32, 8, m6
750 %macro DC_ONLY_64BIT 2 ; shift, zero
752 movsxd coefq, dword [blockq]
758 add coefq, ((1 << (%1 - 1)) << 14) + 8192
761 mov coefd, dword [blockq]
763 DEFINE_ARGS dst, stride, cnt, coef, coefl
776 add coefd, 1 << (%1 - 1)
782 cglobal vp9_idct_idct_8x8_add_12, 4, 6 + ARCH_X86_64, 14, \
783 16 * mmsize + 3 * ARCH_X86_32 * mmsize, \
784 dst, stride, block, eob
787 jg mangle(private_prefix %+ _ %+ vp9_idct_idct_8x8_add_10 %+ SUFFIX).idctfull
789 ; dc-only - unfortunately, this one can overflow, since coefs are 18+sign
790 ; bpp, and 18+14+sign does not fit in 32bit, so we do 2-stage multiplies
791 DEFINE_ARGS dst, stride, block, coef, coefl
795 pshuflw m1, m1, q0000
797 DEFINE_ARGS dst, stride, cnt
800 STORE_2x8 3, 4, 1, m2, m0
801 lea dstq, [dstq+strideq*2]
806 ; inputs and outputs are dwords, coefficients are words
808 ; dst1[hi]:dst3[lo] = src1 * coef1 + src2 * coef2
809 ; dst2[hi]:dst4[lo] = src1 * coef2 - src2 * coef1
810 %macro SUMSUB_MUL_D 6-7 [pd_3fff] ; src/dst 1-2, dst3-4, coef1-2, mask
817 punpckhwd m%2, m%4, m%3
819 pmaddwd m%3, m%4, [pw_%6_%5]
820 pmaddwd m%1, m%2, [pw_%6_%5]
821 pmaddwd m%4, [pw_m%5_%6]
822 pmaddwd m%2, [pw_m%5_%6]
825 ; dst1 = src2[hi]:src4[lo] + src1[hi]:src3[lo] + rnd >> 14
826 ; dst2 = src2[hi]:src4[lo] - src1[hi]:src3[lo] + rnd >> 14
827 %macro SUMSUB_PACK_D 5-6 [pd_8192] ; src/dst 1-2, src3-4, tmp, rnd
828 SUMSUB_BA d, %1, %2, %5
829 SUMSUB_BA d, %3, %4, %5
847 ; the following line has not been executed at the end of this macro:
848 ; UNSCRATCH 6, 8, rsp+17*mmsize
849 %macro IADST8_1D 1-3 [pd_8192], [pd_3fff] ; src, rnd, mask
850 mova m0, [%1+ 0*mmsize]
851 mova m3, [%1+ 6*mmsize]
852 mova m4, [%1+ 8*mmsize]
853 mova m7, [%1+14*mmsize]
854 SUMSUB_MUL_D 7, 0, 1, 2, 16305, 1606, %3 ; m7/1=t0a, m0/2=t1a
855 SUMSUB_MUL_D 3, 4, 5, 6, 10394, 12665, %3 ; m3/5=t4a, m4/6=t5a
856 SCRATCH 0, 8, rsp+17*mmsize
857 SUMSUB_PACK_D 3, 7, 5, 1, 0, %2 ; m3=t0, m7=t4
858 UNSCRATCH 0, 8, rsp+17*mmsize
859 SUMSUB_PACK_D 4, 0, 6, 2, 1, %2 ; m4=t1, m0=t5
861 SCRATCH 3, 8, rsp+17*mmsize
862 SCRATCH 4, 9, rsp+18*mmsize
863 SCRATCH 7, 10, rsp+19*mmsize
864 SCRATCH 0, 11, rsp+20*mmsize
866 mova m1, [%1+ 2*mmsize]
867 mova m2, [%1+ 4*mmsize]
868 mova m5, [%1+10*mmsize]
869 mova m6, [%1+12*mmsize]
870 SUMSUB_MUL_D 5, 2, 3, 4, 14449, 7723, %3 ; m5/8=t2a, m2/9=t3a
871 SUMSUB_MUL_D 1, 6, 7, 0, 4756, 15679, %3 ; m1/10=t6a, m6/11=t7a
872 SCRATCH 2, 12, rsp+21*mmsize
873 SUMSUB_PACK_D 1, 5, 7, 3, 2, %2 ; m1=t2, m5=t6
874 UNSCRATCH 2, 12, rsp+21*mmsize
875 SUMSUB_PACK_D 6, 2, 0, 4, 3, %2 ; m6=t3, m2=t7
877 UNSCRATCH 7, 10, rsp+19*mmsize
878 UNSCRATCH 0, 11, rsp+20*mmsize
879 SCRATCH 1, 10, rsp+19*mmsize
880 SCRATCH 6, 11, rsp+20*mmsize
882 SUMSUB_MUL_D 7, 0, 3, 4, 15137, 6270, %3 ; m7/8=t4a, m0/9=t5a
883 SUMSUB_MUL_D 2, 5, 1, 6, 6270, 15137, %3 ; m2/10=t7a, m5/11=t6a
884 SCRATCH 2, 12, rsp+21*mmsize
885 SUMSUB_PACK_D 5, 7, 6, 3, 2, %2 ; m5=-out1, m7=t6
886 UNSCRATCH 2, 12, rsp+21*mmsize
888 SUMSUB_PACK_D 2, 0, 1, 4, 3, %2 ; m2=out6, m0=t7
889 SUMSUB_MUL 7, 0, 3, 4, 11585, 11585, %2, %3 ; m7=out2, m0=-out5
892 UNSCRATCH 3, 8, rsp+17*mmsize
893 UNSCRATCH 4, 9, rsp+18*mmsize
894 UNSCRATCH 1, 10, rsp+19*mmsize
895 UNSCRATCH 6, 11, rsp+20*mmsize
896 SCRATCH 2, 8, rsp+17*mmsize
897 SCRATCH 0, 9, rsp+18*mmsize
899 SUMSUB_BA d, 1, 3, 2 ; m1=out0, m3=t2
900 SUMSUB_BA d, 6, 4, 2 ; m6=-out7, m4=t3
902 SUMSUB_MUL 3, 4, 2, 0, 11585, 11585, %2, %3 ; m3=-out3, m4=out4
905 UNSCRATCH 0, 9, rsp+18*mmsize
912 cglobal vp9_%1_%3_8x8_add_10, 4, 6 + ARCH_X86_64, 16, \
913 16 * mmsize + ARCH_X86_32 * 6 * mmsize, \
914 dst, stride, block, eob
918 SCRATCH 0, 13, rsp+16*mmsize, max
919 DEFINE_ARGS dst, stride, block, cnt, ptr, skip, dstbak
926 movzx cntd, byte [ptrq+cntq-1]
928 movzx cntd, byte [%5_8x8+cntq-1]
933 PRELOAD 14, pd_8192, rnd
934 PRELOAD 15, pd_3fff, mask
936 %2_1D blockq, reg_rnd, reg_mask
938 TRANSPOSE4x4D 0, 1, 2, 3, 6
939 mova [ptrq+ 0*mmsize], m0
940 mova [ptrq+ 2*mmsize], m1
941 mova [ptrq+ 4*mmsize], m2
942 mova [ptrq+ 6*mmsize], m3
943 UNSCRATCH 6, 8, rsp+17*mmsize
944 TRANSPOSE4x4D 4, 5, 6, 7, 0
945 mova [ptrq+ 1*mmsize], m4
946 mova [ptrq+ 3*mmsize], m5
947 mova [ptrq+ 5*mmsize], m6
948 mova [ptrq+ 7*mmsize], m7
954 ; zero-pad the remainder (skipped cols)
958 lea blockq, [blockq+skipq*(mmsize/2)]
961 mova [ptrq+mmsize*0], m0
962 mova [ptrq+mmsize*1], m0
963 mova [ptrq+mmsize*2], m0
964 mova [ptrq+mmsize*3], m0
970 DEFINE_ARGS dst, stride, block, cnt, ptr, stride3, dstbak
971 lea stride3q, [strideq*3]
975 %4_1D ptrq, reg_rnd, reg_mask
978 PRELOAD 9, pd_16, srnd
979 ROUND_AND_STORE_4x4 0, 1, 2, 3, m6, reg_max, reg_srnd, 5
980 lea dstq, [dstq+strideq*4]
981 UNSCRATCH 0, 8, rsp+17*mmsize
982 UNSCRATCH 1, 13, rsp+16*mmsize, max
983 UNSCRATCH 2, 9, pd_16, srnd
984 ROUND_AND_STORE_4x4 4, 5, 0, 7, m6, m1, m2, 5
987 lea dstq, [dstbakq+8]
996 ZERO_BLOCK blockq-2*mmsize, 32, 8, m6
999 cglobal vp9_%1_%3_8x8_add_12, 4, 6 + ARCH_X86_64, 16, \
1000 16 * mmsize + ARCH_X86_32 * 6 * mmsize, \
1001 dst, stride, block, eob
1003 jmp mangle(private_prefix %+ _ %+ vp9_%1_%3_8x8_add_10 %+ SUFFIX).body
1007 IADST8_FN idct, IDCT8, iadst, IADST8, row
1008 IADST8_FN iadst, IADST8, idct, IDCT8, col
1009 IADST8_FN iadst, IADST8, iadst, IADST8, default
1011 %macro IDCT16_1D 1-4 4 * mmsize, 65, 67 ; src, src_stride, stack_offset, mm32bit_stack_offset
1012 IDCT8_1D %1, [pd_8192], [pd_3fff], %2 * 2, %4 ; m0-3=t0-3a, m4-5/m8|r67/m7=t4-7
1013 ; SCRATCH 6, 8, rsp+(%4+0)*mmsize ; t6
1014 SCRATCH 0, 15, rsp+(%4+7)*mmsize ; t0a
1015 SCRATCH 1, 14, rsp+(%4+6)*mmsize ; t1a
1016 SCRATCH 2, 13, rsp+(%4+5)*mmsize ; t2a
1017 SCRATCH 3, 12, rsp+(%4+4)*mmsize ; t3a
1018 SCRATCH 4, 11, rsp+(%4+3)*mmsize ; t4
1019 mova [rsp+(%3+0)*mmsize], m5 ; t5
1020 mova [rsp+(%3+1)*mmsize], m7 ; t7
1022 mova m0, [%1+ 1*%2] ; in1
1023 mova m3, [%1+ 7*%2] ; in7
1024 mova m4, [%1+ 9*%2] ; in9
1025 mova m7, [%1+15*%2] ; in15
1027 SUMSUB_MUL 0, 7, 1, 2, 16305, 1606 ; m0=t15a, m7=t8a
1028 SUMSUB_MUL 4, 3, 1, 2, 10394, 12665 ; m4=t14a, m3=t9a
1029 SUMSUB_BA d, 3, 7, 1 ; m3=t8, m7=t9
1030 SUMSUB_BA d, 4, 0, 1 ; m4=t15,m0=t14
1031 SUMSUB_MUL 0, 7, 1, 2, 15137, 6270 ; m0=t14a, m7=t9a
1033 mova m1, [%1+ 3*%2] ; in3
1034 mova m2, [%1+ 5*%2] ; in5
1035 mova m5, [%1+11*%2] ; in11
1036 mova m6, [%1+13*%2] ; in13
1038 SCRATCH 0, 9, rsp+(%4+1)*mmsize
1039 SCRATCH 7, 10, rsp+(%4+2)*mmsize
1041 SUMSUB_MUL 2, 5, 0, 7, 14449, 7723 ; m2=t13a, m5=t10a
1042 SUMSUB_MUL 6, 1, 0, 7, 4756, 15679 ; m6=t12a, m1=t11a
1043 SUMSUB_BA d, 5, 1, 0 ; m5=t11,m1=t10
1044 SUMSUB_BA d, 2, 6, 0 ; m2=t12,m6=t13
1046 SUMSUB_MUL 1, 6, 0, 7, 15137, 6270 ; m1=t13a, m6=t10a
1048 UNSCRATCH 7, 10, rsp+(%4+2)*mmsize
1049 SUMSUB_BA d, 5, 3, 0 ; m5=t8a, m3=t11a
1050 SUMSUB_BA d, 6, 7, 0 ; m6=t9, m7=t10
1051 SUMSUB_BA d, 2, 4, 0 ; m2=t15a,m4=t12a
1052 SCRATCH 5, 10, rsp+(%4+2)*mmsize
1053 SUMSUB_MUL 4, 3, 0, 5, 11585, 11585 ; m4=t12, m3=t11
1054 UNSCRATCH 0, 9, rsp+(%4+1)*mmsize
1055 SUMSUB_BA d, 1, 0, 5 ; m1=t14, m0=t13
1056 SCRATCH 6, 9, rsp+(%4+1)*mmsize
1057 SUMSUB_MUL 0, 7, 6, 5, 11585, 11585 ; m0=t13a,m7=t10a
1059 ; order: 15|r74,14|r73,13|r72,12|r71,11|r70,r65,8|r67,r66,10|r69,9|r68,7,3,4,0,1,2
1062 UNSCRATCH 5, 15, rsp+(%4+7)*mmsize
1063 SUMSUB_BA d, 2, 5, 6 ; m2=out0, m5=out15
1064 SCRATCH 5, 15, rsp+(%4+7)*mmsize
1065 UNSCRATCH 5, 14, rsp+(%4+6)*mmsize
1066 SUMSUB_BA d, 1, 5, 6 ; m1=out1, m5=out14
1067 SCRATCH 5, 14, rsp+(%4+6)*mmsize
1068 UNSCRATCH 5, 13, rsp+(%4+5)*mmsize
1069 SUMSUB_BA d, 0, 5, 6 ; m0=out2, m5=out13
1070 SCRATCH 5, 13, rsp+(%4+5)*mmsize
1071 UNSCRATCH 5, 12, rsp+(%4+4)*mmsize
1072 SUMSUB_BA d, 4, 5, 6 ; m4=out3, m5=out12
1073 SCRATCH 5, 12, rsp+(%4+4)*mmsize
1074 UNSCRATCH 5, 11, rsp+(%4+3)*mmsize
1075 SUMSUB_BA d, 3, 5, 6 ; m3=out4, m5=out11
1076 SCRATCH 4, 11, rsp+(%4+3)*mmsize
1077 mova m4, [rsp+(%3+0)*mmsize]
1078 SUMSUB_BA d, 7, 4, 6 ; m7=out5, m4=out10
1079 mova [rsp+(%3+0)*mmsize], m5
1080 UNSCRATCH 5, 8, rsp+(%4+0)*mmsize
1081 UNSCRATCH 6, 9, rsp+(%4+1)*mmsize
1082 SCRATCH 2, 8, rsp+(%4+0)*mmsize
1083 SCRATCH 1, 9, rsp+(%4+1)*mmsize
1084 UNSCRATCH 1, 10, rsp+(%4+2)*mmsize
1085 SCRATCH 0, 10, rsp+(%4+2)*mmsize
1086 mova m0, [rsp+(%3+1)*mmsize]
1087 SUMSUB_BA d, 6, 5, 2 ; m6=out6, m5=out9
1088 SUMSUB_BA d, 1, 0, 2 ; m1=out7, m0=out8
1090 SWAP 0, 3, 1, 7, 2, 6, 4
1092 ; output order: 8-11|r67-70=out0-3
1094 ; 12-15|r71-74=out12-15
1098 cglobal vp9_idct_idct_16x16_add_10, 4, 6 + ARCH_X86_64, 16, \
1099 67 * mmsize + ARCH_X86_32 * 8 * mmsize, \
1100 dst, stride, block, eob
1105 ; dc-only - the 10bit version can be done entirely in 32bit, since the max
1106 ; coef values are 17+sign bit, and the coef is 14bit, so 31+sign easily
1108 DEFINE_ARGS dst, stride, block, coef
1112 pshuflw m1, m1, q0000
1114 DEFINE_ARGS dst, stride, cnt
1117 STORE_2x8 3, 4, 1, m2, m0, dstq, mmsize
1118 STORE_2x8 3, 4, 1, m2, m0, dstq+strideq, mmsize
1119 lea dstq, [dstq+strideq*2]
1125 mova [rsp+64*mmsize], m0
1126 DEFINE_ARGS dst, stride, block, cnt, ptr, skip, dstbak
1132 lea ptrq, [default_16x16]
1133 movzx cntd, byte [ptrq+cntq-1]
1135 movzx cntd, byte [default_16x16+cntq-1]
1143 TRANSPOSE4x4D 0, 1, 2, 3, 7
1144 mova [ptrq+ 1*mmsize], m0
1145 mova [ptrq+ 5*mmsize], m1
1146 mova [ptrq+ 9*mmsize], m2
1147 mova [ptrq+13*mmsize], m3
1148 mova m7, [rsp+65*mmsize]
1149 TRANSPOSE4x4D 4, 5, 6, 7, 0
1150 mova [ptrq+ 2*mmsize], m4
1151 mova [ptrq+ 6*mmsize], m5
1152 mova [ptrq+10*mmsize], m6
1153 mova [ptrq+14*mmsize], m7
1154 UNSCRATCH 0, 8, rsp+67*mmsize
1155 UNSCRATCH 1, 9, rsp+68*mmsize
1156 UNSCRATCH 2, 10, rsp+69*mmsize
1157 UNSCRATCH 3, 11, rsp+70*mmsize
1158 TRANSPOSE4x4D 0, 1, 2, 3, 7
1159 mova [ptrq+ 0*mmsize], m0
1160 mova [ptrq+ 4*mmsize], m1
1161 mova [ptrq+ 8*mmsize], m2
1162 mova [ptrq+12*mmsize], m3
1163 UNSCRATCH 4, 12, rsp+71*mmsize
1164 UNSCRATCH 5, 13, rsp+72*mmsize
1165 UNSCRATCH 6, 14, rsp+73*mmsize
1166 UNSCRATCH 7, 15, rsp+74*mmsize
1167 TRANSPOSE4x4D 4, 5, 6, 7, 0
1168 mova [ptrq+ 3*mmsize], m4
1169 mova [ptrq+ 7*mmsize], m5
1170 mova [ptrq+11*mmsize], m6
1171 mova [ptrq+15*mmsize], m7
1172 add ptrq, 16 * mmsize
1177 ; zero-pad the remainder (skipped cols)
1181 lea blockq, [blockq+skipq*(mmsize/2)]
1184 mova [ptrq+mmsize*0], m0
1185 mova [ptrq+mmsize*1], m0
1186 mova [ptrq+mmsize*2], m0
1187 mova [ptrq+mmsize*3], m0
1188 mova [ptrq+mmsize*4], m0
1189 mova [ptrq+mmsize*5], m0
1190 mova [ptrq+mmsize*6], m0
1191 mova [ptrq+mmsize*7], m0
1192 add ptrq, 8 * mmsize
1197 DEFINE_ARGS dst, stride, block, cnt, ptr, stride3, dstbak
1198 lea stride3q, [strideq*3]
1205 lea dstq, [dstq+strideq*4]
1206 ROUND_AND_STORE_4x4 0, 1, 2, 3, m7, [rsp+64*mmsize], [pd_32], 6
1207 lea dstq, [dstq+strideq*4]
1208 mova m0, [rsp+65*mmsize]
1209 mova m1, [rsp+64*mmsize]
1211 ROUND_AND_STORE_4x4 4, 5, 6, 0, m7, m1, m2, 6
1214 DEFINE_ARGS dstbak, stride, block, cnt, ptr, stride3, dst
1218 UNSCRATCH 0, 8, rsp+67*mmsize
1219 UNSCRATCH 4, 9, rsp+68*mmsize
1220 UNSCRATCH 5, 10, rsp+69*mmsize
1221 UNSCRATCH 3, 11, rsp+70*mmsize
1222 ROUND_AND_STORE_4x4 0, 4, 5, 3, m7, m1, m2, 6
1224 DEFINE_ARGS dst, stride, block, cnt, ptr, stride3, dstbak
1225 lea dstq, [dstbakq+stride3q*4]
1227 lea dstq, [dstq+stride3q*4]
1229 UNSCRATCH 4, 12, rsp+71*mmsize
1230 UNSCRATCH 5, 13, rsp+72*mmsize
1231 UNSCRATCH 6, 14, rsp+73*mmsize
1232 UNSCRATCH 0, 15, rsp+74*mmsize
1233 ROUND_AND_STORE_4x4 4, 5, 6, 0, m7, m1, m2, 6
1247 ZERO_BLOCK blockq-4*mmsize, 64, 16, m7
1251 cglobal vp9_idct_idct_16x16_add_12, 4, 6 + ARCH_X86_64, 16, \
1252 67 * mmsize + ARCH_X86_32 * 8 * mmsize, \
1253 dst, stride, block, eob
1256 jg mangle(private_prefix %+ _ %+ vp9_idct_idct_16x16_add_10 %+ SUFFIX).idctfull
1258 ; dc-only - unfortunately, this one can overflow, since coefs are 19+sign
1259 ; bpp, and 19+14+sign does not fit in 32bit, so we do 2-stage multiplies
1260 DEFINE_ARGS dst, stride, block, coef, coefl
1264 pshuflw m1, m1, q0000
1266 DEFINE_ARGS dst, stride, cnt
1269 STORE_2x8 3, 4, 1, m2, m0, dstq, mmsize
1270 STORE_2x8 3, 4, 1, m2, m0, dstq+strideq, mmsize
1271 lea dstq, [dstq+strideq*2]
1276 ; r65-69 are available for spills
1277 ; r70-77 are available on x86-32 only (x86-64 should use m8-15)
1278 ; output should be in m8-11|r70-73, m0-6,r65 and m12-15|r74-77
1279 %macro IADST16_1D 1 ; src
1280 mova m0, [%1+ 0*4*mmsize] ; in0
1281 mova m1, [%1+ 7*4*mmsize] ; in7
1282 mova m2, [%1+ 8*4*mmsize] ; in8
1283 mova m3, [%1+15*4*mmsize] ; in15
1284 SUMSUB_MUL_D 3, 0, 4, 5, 16364, 804 ; m3/4=t0, m0/5=t1
1285 SUMSUB_MUL_D 1, 2, 6, 7, 11003, 12140 ; m1/6=t8, m2/7=t9
1286 SCRATCH 0, 8, rsp+70*mmsize
1287 SUMSUB_PACK_D 1, 3, 6, 4, 0 ; m1=t0a, m3=t8a
1288 UNSCRATCH 0, 8, rsp+70*mmsize
1289 SUMSUB_PACK_D 2, 0, 7, 5, 4 ; m2=t1a, m0=t9a
1290 mova [rsp+67*mmsize], m1
1291 SCRATCH 2, 9, rsp+71*mmsize
1292 SCRATCH 3, 12, rsp+74*mmsize
1293 SCRATCH 0, 13, rsp+75*mmsize
1295 mova m0, [%1+ 3*4*mmsize] ; in3
1296 mova m1, [%1+ 4*4*mmsize] ; in4
1297 mova m2, [%1+11*4*mmsize] ; in11
1298 mova m3, [%1+12*4*mmsize] ; in12
1299 SUMSUB_MUL_D 2, 1, 4, 5, 14811, 7005 ; m2/4=t4, m1/5=t5
1300 SUMSUB_MUL_D 0, 3, 6, 7, 5520, 15426 ; m0/6=t12, m3/7=t13
1301 SCRATCH 1, 10, rsp+72*mmsize
1302 SUMSUB_PACK_D 0, 2, 6, 4, 1 ; m0=t4a, m2=t12a
1303 UNSCRATCH 1, 10, rsp+72*mmsize
1304 SUMSUB_PACK_D 3, 1, 7, 5, 4 ; m3=t5a, m1=t13a
1305 SCRATCH 0, 15, rsp+77*mmsize
1306 SCRATCH 3, 11, rsp+73*mmsize
1308 UNSCRATCH 0, 12, rsp+74*mmsize ; t8a
1309 UNSCRATCH 3, 13, rsp+75*mmsize ; t9a
1310 SUMSUB_MUL_D 0, 3, 4, 5, 16069, 3196 ; m0/4=t8, m3/5=t9
1311 SUMSUB_MUL_D 1, 2, 6, 7, 3196, 16069 ; m1/6=t13, m2/7=t12
1312 SCRATCH 1, 12, rsp+74*mmsize
1313 SUMSUB_PACK_D 2, 0, 7, 4, 1 ; m2=t8a, m0=t12a
1314 UNSCRATCH 1, 12, rsp+74*mmsize
1315 SUMSUB_PACK_D 1, 3, 6, 5, 4 ; m1=t9a, m3=t13a
1316 mova [rsp+65*mmsize], m2
1317 mova [rsp+66*mmsize], m1
1318 SCRATCH 0, 8, rsp+70*mmsize
1319 SCRATCH 3, 12, rsp+74*mmsize
1321 mova m0, [%1+ 2*4*mmsize] ; in2
1322 mova m1, [%1+ 5*4*mmsize] ; in5
1323 mova m2, [%1+10*4*mmsize] ; in10
1324 mova m3, [%1+13*4*mmsize] ; in13
1325 SUMSUB_MUL_D 3, 0, 4, 5, 15893, 3981 ; m3/4=t2, m0/5=t3
1326 SUMSUB_MUL_D 1, 2, 6, 7, 8423, 14053 ; m1/6=t10, m2/7=t11
1327 SCRATCH 0, 10, rsp+72*mmsize
1328 SUMSUB_PACK_D 1, 3, 6, 4, 0 ; m1=t2a, m3=t10a
1329 UNSCRATCH 0, 10, rsp+72*mmsize
1330 SUMSUB_PACK_D 2, 0, 7, 5, 4 ; m2=t3a, m0=t11a
1331 mova [rsp+68*mmsize], m1
1332 mova [rsp+69*mmsize], m2
1333 SCRATCH 3, 13, rsp+75*mmsize
1334 SCRATCH 0, 14, rsp+76*mmsize
1336 mova m0, [%1+ 1*4*mmsize] ; in1
1337 mova m1, [%1+ 6*4*mmsize] ; in6
1338 mova m2, [%1+ 9*4*mmsize] ; in9
1339 mova m3, [%1+14*4*mmsize] ; in14
1340 SUMSUB_MUL_D 2, 1, 4, 5, 13160, 9760 ; m2/4=t6, m1/5=t7
1341 SUMSUB_MUL_D 0, 3, 6, 7, 2404, 16207 ; m0/6=t14, m3/7=t15
1342 SCRATCH 1, 10, rsp+72*mmsize
1343 SUMSUB_PACK_D 0, 2, 6, 4, 1 ; m0=t6a, m2=t14a
1344 UNSCRATCH 1, 10, rsp+72*mmsize
1345 SUMSUB_PACK_D 3, 1, 7, 5, 4 ; m3=t7a, m1=t15a
1347 UNSCRATCH 4, 13, rsp+75*mmsize ; t10a
1348 UNSCRATCH 5, 14, rsp+76*mmsize ; t11a
1349 SCRATCH 0, 13, rsp+75*mmsize
1350 SCRATCH 3, 14, rsp+76*mmsize
1351 SUMSUB_MUL_D 4, 5, 6, 7, 9102, 13623 ; m4/6=t10, m5/7=t11
1352 SUMSUB_MUL_D 1, 2, 0, 3, 13623, 9102 ; m1/0=t15, m2/3=t14
1353 SCRATCH 0, 10, rsp+72*mmsize
1354 SUMSUB_PACK_D 2, 4, 3, 6, 0 ; m2=t10a, m4=t14a
1355 UNSCRATCH 0, 10, rsp+72*mmsize
1356 SUMSUB_PACK_D 1, 5, 0, 7, 6 ; m1=t11a, m5=t15a
1358 UNSCRATCH 0, 8, rsp+70*mmsize ; t12a
1359 UNSCRATCH 3, 12, rsp+74*mmsize ; t13a
1360 SCRATCH 2, 8, rsp+70*mmsize
1361 SCRATCH 1, 12, rsp+74*mmsize
1362 SUMSUB_MUL_D 0, 3, 1, 2, 15137, 6270 ; m0/1=t12, m3/2=t13
1363 SUMSUB_MUL_D 5, 4, 7, 6, 6270, 15137 ; m5/7=t15, m4/6=t14
1364 SCRATCH 2, 10, rsp+72*mmsize
1365 SUMSUB_PACK_D 4, 0, 6, 1, 2 ; m4=out2, m0=t14a
1366 UNSCRATCH 2, 10, rsp+72*mmsize
1367 SUMSUB_PACK_D 5, 3, 7, 2, 1 ; m5=-out13, m3=t15a
1370 UNSCRATCH 1, 9, rsp+71*mmsize ; t1a
1371 mova m2, [rsp+68*mmsize] ; t2a
1372 UNSCRATCH 6, 13, rsp+75*mmsize ; t6a
1373 UNSCRATCH 7, 14, rsp+76*mmsize ; t7a
1374 SCRATCH 4, 10, rsp+72*mmsize
1375 SCRATCH 5, 13, rsp+75*mmsize
1376 UNSCRATCH 4, 15, rsp+77*mmsize ; t4a
1377 UNSCRATCH 5, 11, rsp+73*mmsize ; t5a
1378 SCRATCH 0, 14, rsp+76*mmsize
1379 SCRATCH 3, 15, rsp+77*mmsize
1380 mova m0, [rsp+67*mmsize] ; t0a
1381 SUMSUB_BA d, 4, 0, 3 ; m4=t0, m0=t4
1382 SUMSUB_BA d, 5, 1, 3 ; m5=t1, m1=t5
1383 SUMSUB_BA d, 6, 2, 3 ; m6=t2, m2=t6
1384 SCRATCH 4, 9, rsp+71*mmsize
1385 mova m3, [rsp+69*mmsize] ; t3a
1386 SUMSUB_BA d, 7, 3, 4 ; m7=t3, m3=t7
1388 mova [rsp+67*mmsize], m5
1389 mova [rsp+68*mmsize], m6
1390 mova [rsp+69*mmsize], m7
1391 SUMSUB_MUL_D 0, 1, 4, 5, 15137, 6270 ; m0/4=t4a, m1/5=t5a
1392 SUMSUB_MUL_D 3, 2, 7, 6, 6270, 15137 ; m3/7=t7a, m2/6=t6a
1393 SCRATCH 1, 11, rsp+73*mmsize
1394 SUMSUB_PACK_D 2, 0, 6, 4, 1 ; m2=-out3, m0=t6
1396 UNSCRATCH 1, 11, rsp+73*mmsize
1397 SUMSUB_PACK_D 3, 1, 7, 5, 4 ; m3=out12, m1=t7
1398 SCRATCH 2, 11, rsp+73*mmsize
1399 UNSCRATCH 2, 12, rsp+74*mmsize ; t11a
1400 SCRATCH 3, 12, rsp+74*mmsize
1402 UNSCRATCH 3, 8, rsp+70*mmsize ; t10a
1403 mova m4, [rsp+65*mmsize] ; t8a
1404 mova m5, [rsp+66*mmsize] ; t9a
1405 SUMSUB_BA d, 3, 4, 6 ; m3=-out1, m4=t10
1407 SUMSUB_BA d, 2, 5, 6 ; m2=out14, m5=t11
1408 UNSCRATCH 6, 9, rsp+71*mmsize ; t0
1409 UNSCRATCH 7, 14, rsp+76*mmsize ; t14a
1410 SCRATCH 3, 9, rsp+71*mmsize
1411 SCRATCH 2, 14, rsp+76*mmsize
1413 SUMSUB_MUL 1, 0, 2, 3, 11585, 11585 ; m1=out4, m0=out11
1414 mova [rsp+65*mmsize], m0
1415 SUMSUB_MUL 5, 4, 2, 3, 11585, 11585 ; m5=out6, m4=out9
1416 UNSCRATCH 0, 15, rsp+77*mmsize ; t15a
1417 SUMSUB_MUL 7, 0, 2, 3, 11585, m11585 ; m7=out10, m0=out5
1419 mova m2, [rsp+68*mmsize] ; t2
1420 SUMSUB_BA d, 2, 6, 3 ; m2=out0, m6=t2a
1421 SCRATCH 2, 8, rsp+70*mmsize
1422 mova m2, [rsp+67*mmsize] ; t1
1423 mova m3, [rsp+69*mmsize] ; t3
1424 mova [rsp+67*mmsize], m7
1425 SUMSUB_BA d, 3, 2, 7 ; m3=-out15, m2=t3a
1427 SCRATCH 3, 15, rsp+77*mmsize
1428 SUMSUB_MUL 6, 2, 7, 3, 11585, m11585 ; m6=out8, m2=out7
1429 mova m7, [rsp+67*mmsize]
1432 SWAP 2, 5, 4, 6, 7, 3
1436 cglobal vp9_%1_%4_16x16_add_10, 4, 6 + ARCH_X86_64, 16, \
1437 70 * mmsize + ARCH_X86_32 * 8 * mmsize, \
1438 dst, stride, block, eob
1442 mova [rsp+64*mmsize], m0
1443 DEFINE_ARGS dst, stride, block, cnt, ptr, skip, dstbak
1449 lea ptrq, [%7_16x16]
1450 movzx cntd, byte [ptrq+cntq-1]
1452 movzx cntd, byte [%7_16x16+cntq-1]
1460 TRANSPOSE4x4D 0, 1, 2, 3, 7
1461 mova [ptrq+ 1*mmsize], m0
1462 mova [ptrq+ 5*mmsize], m1
1463 mova [ptrq+ 9*mmsize], m2
1464 mova [ptrq+13*mmsize], m3
1465 mova m7, [rsp+65*mmsize]
1466 TRANSPOSE4x4D 4, 5, 6, 7, 0
1467 mova [ptrq+ 2*mmsize], m4
1468 mova [ptrq+ 6*mmsize], m5
1469 mova [ptrq+10*mmsize], m6
1470 mova [ptrq+14*mmsize], m7
1471 UNSCRATCH 0, 8, rsp+(%3+0)*mmsize
1472 UNSCRATCH 1, 9, rsp+(%3+1)*mmsize
1473 UNSCRATCH 2, 10, rsp+(%3+2)*mmsize
1474 UNSCRATCH 3, 11, rsp+(%3+3)*mmsize
1475 TRANSPOSE4x4D 0, 1, 2, 3, 7
1476 mova [ptrq+ 0*mmsize], m0
1477 mova [ptrq+ 4*mmsize], m1
1478 mova [ptrq+ 8*mmsize], m2
1479 mova [ptrq+12*mmsize], m3
1480 UNSCRATCH 4, 12, rsp+(%3+4)*mmsize
1481 UNSCRATCH 5, 13, rsp+(%3+5)*mmsize
1482 UNSCRATCH 6, 14, rsp+(%3+6)*mmsize
1483 UNSCRATCH 7, 15, rsp+(%3+7)*mmsize
1484 TRANSPOSE4x4D 4, 5, 6, 7, 0
1485 mova [ptrq+ 3*mmsize], m4
1486 mova [ptrq+ 7*mmsize], m5
1487 mova [ptrq+11*mmsize], m6
1488 mova [ptrq+15*mmsize], m7
1489 add ptrq, 16 * mmsize
1494 ; zero-pad the remainder (skipped cols)
1498 lea blockq, [blockq+skipq*(mmsize/2)]
1501 mova [ptrq+mmsize*0], m0
1502 mova [ptrq+mmsize*1], m0
1503 mova [ptrq+mmsize*2], m0
1504 mova [ptrq+mmsize*3], m0
1505 mova [ptrq+mmsize*4], m0
1506 mova [ptrq+mmsize*5], m0
1507 mova [ptrq+mmsize*6], m0
1508 mova [ptrq+mmsize*7], m0
1509 add ptrq, 8 * mmsize
1514 DEFINE_ARGS dst, stride, block, cnt, ptr, stride3, dstbak
1515 lea stride3q, [strideq*3]
1522 lea dstq, [dstq+strideq*4]
1523 ROUND_AND_STORE_4x4 0, 1, 2, 3, m7, [rsp+64*mmsize], [pd_32], 6
1524 lea dstq, [dstq+strideq*4]
1525 mova m0, [rsp+65*mmsize]
1526 mova m1, [rsp+64*mmsize]
1528 ROUND_AND_STORE_4x4 4, 5, 6, 0, m7, m1, m2, 6
1531 DEFINE_ARGS dstbak, stride, block, cnt, ptr, stride3, dst
1535 UNSCRATCH 0, 8, rsp+(%6+0)*mmsize
1536 UNSCRATCH 4, 9, rsp+(%6+1)*mmsize
1537 UNSCRATCH 5, 10, rsp+(%6+2)*mmsize
1538 UNSCRATCH 3, 11, rsp+(%6+3)*mmsize
1539 ROUND_AND_STORE_4x4 0, 4, 5, 3, m7, m1, m2, 6
1541 DEFINE_ARGS dst, stride, block, cnt, ptr, stride3, dstbak
1542 lea dstq, [dstbakq+stride3q*4]
1544 lea dstq, [dstq+stride3q*4]
1546 UNSCRATCH 4, 12, rsp+(%6+4)*mmsize
1547 UNSCRATCH 5, 13, rsp+(%6+5)*mmsize
1548 UNSCRATCH 6, 14, rsp+(%6+6)*mmsize
1549 UNSCRATCH 0, 15, rsp+(%6+7)*mmsize
1550 ROUND_AND_STORE_4x4 4, 5, 6, 0, m7, m1, m2, 6
1564 ZERO_BLOCK blockq-4*mmsize, 64, 16, m7
1567 cglobal vp9_%1_%4_16x16_add_12, 4, 6 + ARCH_X86_64, 16, \
1568 70 * mmsize + ARCH_X86_32 * 8 * mmsize, \
1569 dst, stride, block, eob
1571 jmp mangle(private_prefix %+ _ %+ vp9_%1_%4_16x16_add_10 %+ SUFFIX).body
1575 IADST16_FN idct, IDCT16, 67, iadst, IADST16, 70, row
1576 IADST16_FN iadst, IADST16, 70, idct, IDCT16, 67, col
1577 IADST16_FN iadst, IADST16, 70, iadst, IADST16, 70, default
1579 %macro IDCT32_1D 2-3 8 * mmsize; pass[1/2], src, src_stride
1580 IDCT16_1D %2, 2 * %3, 272, 257
1582 mova [rsp+257*mmsize], m8
1583 mova [rsp+258*mmsize], m9
1584 mova [rsp+259*mmsize], m10
1585 mova [rsp+260*mmsize], m11
1586 mova [rsp+261*mmsize], m12
1587 mova [rsp+262*mmsize], m13
1588 mova [rsp+263*mmsize], m14
1589 mova [rsp+264*mmsize], m15
1591 mova [rsp+265*mmsize], m0
1592 mova [rsp+266*mmsize], m1
1593 mova [rsp+267*mmsize], m2
1594 mova [rsp+268*mmsize], m3
1595 mova [rsp+269*mmsize], m4
1596 mova [rsp+270*mmsize], m5
1597 mova [rsp+271*mmsize], m6
1600 ; r265-272: t4/5a/6a/7/8/9a/10/11a
1601 ; r261-264: t12a/13/14a/15
1602 ; r273-274 is free as scratch space, and 275-282 mirrors m8-15 on 32bit
1604 mova m0, [%2+ 1*%3] ; in1
1605 mova m1, [%2+15*%3] ; in15
1606 mova m2, [%2+17*%3] ; in17
1607 mova m3, [%2+31*%3] ; in31
1608 SUMSUB_MUL 0, 3, 4, 5, 16364, 804 ; m0=t31a, m3=t16a
1609 SUMSUB_MUL 2, 1, 4, 5, 11003, 12140 ; m2=t30a, m1=t17a
1610 SUMSUB_BA d, 1, 3, 4 ; m1=t16, m3=t17
1611 SUMSUB_BA d, 2, 0, 4 ; m2=t31, m0=t30
1612 SUMSUB_MUL 0, 3, 4, 5, 16069, 3196 ; m0=t30a, m3=t17a
1613 SCRATCH 0, 8, rsp+275*mmsize
1614 SCRATCH 2, 9, rsp+276*mmsize
1616 ; end of stage 1-3 first quart
1618 mova m0, [%2+ 7*%3] ; in7
1619 mova m2, [%2+ 9*%3] ; in9
1620 mova m4, [%2+23*%3] ; in23
1621 mova m5, [%2+25*%3] ; in25
1622 SUMSUB_MUL 2, 4, 6, 7, 14811, 7005 ; m2=t29a, m4=t18a
1623 SUMSUB_MUL 5, 0, 6, 7, 5520, 15426 ; m5=t28a, m0=t19a
1624 SUMSUB_BA d, 4, 0, 6 ; m4=t19, m0=t18
1625 SUMSUB_BA d, 2, 5, 6 ; m2=t28, m5=t29
1626 SUMSUB_MUL 5, 0, 6, 7, 3196, m16069 ; m5=t29a, m0=t18a
1628 ; end of stage 1-3 second quart
1630 SUMSUB_BA d, 4, 1, 6 ; m4=t16a, m1=t19a
1631 SUMSUB_BA d, 0, 3, 6 ; m0=t17, m3=t18
1632 UNSCRATCH 6, 8, rsp+275*mmsize ; t30a
1633 UNSCRATCH 7, 9, rsp+276*mmsize ; t31
1634 mova [rsp+273*mmsize], m4
1635 mova [rsp+274*mmsize], m0
1636 SUMSUB_BA d, 2, 7, 0 ; m2=t31a, m7=t28a
1637 SUMSUB_BA d, 5, 6, 0 ; m5=t30, m6=t29
1638 SUMSUB_MUL 6, 3, 0, 4, 15137, 6270 ; m6=t29a, m3=t18a
1639 SUMSUB_MUL 7, 1, 0, 4, 15137, 6270 ; m7=t28, m1=t19
1640 SCRATCH 3, 10, rsp+277*mmsize
1641 SCRATCH 1, 11, rsp+278*mmsize
1642 SCRATCH 7, 12, rsp+279*mmsize
1643 SCRATCH 6, 13, rsp+280*mmsize
1644 SCRATCH 5, 14, rsp+281*mmsize
1645 SCRATCH 2, 15, rsp+282*mmsize
1647 ; end of stage 4-5 first half
1649 mova m0, [%2+ 5*%3] ; in5
1650 mova m1, [%2+11*%3] ; in11
1651 mova m2, [%2+21*%3] ; in21
1652 mova m3, [%2+27*%3] ; in27
1653 SUMSUB_MUL 0, 3, 4, 5, 15893, 3981 ; m0=t27a, m3=t20a
1654 SUMSUB_MUL 2, 1, 4, 5, 8423, 14053 ; m2=t26a, m1=t21a
1655 SUMSUB_BA d, 1, 3, 4 ; m1=t20, m3=t21
1656 SUMSUB_BA d, 2, 0, 4 ; m2=t27, m0=t26
1657 SUMSUB_MUL 0, 3, 4, 5, 9102, 13623 ; m0=t26a, m3=t21a
1658 SCRATCH 0, 8, rsp+275*mmsize
1659 SCRATCH 2, 9, rsp+276*mmsize
1661 ; end of stage 1-3 third quart
1663 mova m0, [%2+ 3*%3] ; in3
1664 mova m2, [%2+13*%3] ; in13
1665 mova m4, [%2+19*%3] ; in19
1666 mova m5, [%2+29*%3] ; in29
1667 SUMSUB_MUL 2, 4, 6, 7, 13160, 9760 ; m2=t25a, m4=t22a
1668 SUMSUB_MUL 5, 0, 6, 7, 2404, 16207 ; m5=t24a, m0=t23a
1669 SUMSUB_BA d, 4, 0, 6 ; m4=t23, m0=t22
1670 SUMSUB_BA d, 2, 5, 6 ; m2=t24, m5=t25
1671 SUMSUB_MUL 5, 0, 6, 7, 13623, m9102 ; m5=t25a, m0=t22a
1673 ; end of stage 1-3 fourth quart
1675 SUMSUB_BA d, 1, 4, 6 ; m1=t23a, m4=t20a
1676 SUMSUB_BA d, 3, 0, 6 ; m3=t22, m0=t21
1677 UNSCRATCH 6, 8, rsp+275*mmsize ; t26a
1678 UNSCRATCH 7, 9, rsp+276*mmsize ; t27
1679 SCRATCH 3, 8, rsp+275*mmsize
1680 SCRATCH 1, 9, rsp+276*mmsize
1681 SUMSUB_BA d, 7, 2, 1 ; m7=t24a, m2=t27a
1682 SUMSUB_BA d, 6, 5, 1 ; m6=t25, m5=t26
1683 SUMSUB_MUL 2, 4, 1, 3, 6270, m15137 ; m2=t27, m4=t20
1684 SUMSUB_MUL 5, 0, 1, 3, 6270, m15137 ; m5=t26a, m0=t21a
1686 ; end of stage 4-5 second half
1688 UNSCRATCH 1, 12, rsp+279*mmsize ; t28
1689 UNSCRATCH 3, 13, rsp+280*mmsize ; t29a
1690 SCRATCH 4, 12, rsp+279*mmsize
1691 SCRATCH 0, 13, rsp+280*mmsize
1692 SUMSUB_BA d, 5, 3, 0 ; m5=t29, m3=t26
1693 SUMSUB_BA d, 2, 1, 0 ; m2=t28a, m1=t27a
1694 UNSCRATCH 0, 14, rsp+281*mmsize ; t30
1695 UNSCRATCH 4, 15, rsp+282*mmsize ; t31a
1696 SCRATCH 2, 14, rsp+281*mmsize
1697 SCRATCH 5, 15, rsp+282*mmsize
1698 SUMSUB_BA d, 6, 0, 2 ; m6=t30a, m0=t25a
1699 SUMSUB_BA d, 7, 4, 2 ; m7=t31, m4=t24
1701 mova m2, [rsp+273*mmsize] ; t16a
1702 mova m5, [rsp+274*mmsize] ; t17
1703 mova [rsp+273*mmsize], m6
1704 mova [rsp+274*mmsize], m7
1705 UNSCRATCH 6, 10, rsp+277*mmsize ; t18a
1706 UNSCRATCH 7, 11, rsp+278*mmsize ; t19
1707 SCRATCH 4, 10, rsp+277*mmsize
1708 SCRATCH 0, 11, rsp+278*mmsize
1709 UNSCRATCH 4, 12, rsp+279*mmsize ; t20
1710 UNSCRATCH 0, 13, rsp+280*mmsize ; t21a
1711 SCRATCH 3, 12, rsp+279*mmsize
1712 SCRATCH 1, 13, rsp+280*mmsize
1713 SUMSUB_BA d, 0, 6, 1 ; m0=t18, m6=t21
1714 SUMSUB_BA d, 4, 7, 1 ; m4=t19a, m7=t20a
1715 UNSCRATCH 3, 8, rsp+275*mmsize ; t22
1716 UNSCRATCH 1, 9, rsp+276*mmsize ; t23a
1717 SCRATCH 0, 8, rsp+275*mmsize
1718 SCRATCH 4, 9, rsp+276*mmsize
1719 SUMSUB_BA d, 3, 5, 0 ; m3=t17a, m5=t22a
1720 SUMSUB_BA d, 1, 2, 0 ; m1=t16, m2=t23
1724 UNSCRATCH 0, 10, rsp+277*mmsize ; t24
1725 UNSCRATCH 4, 11, rsp+278*mmsize ; t25a
1726 SCRATCH 1, 10, rsp+277*mmsize
1727 SCRATCH 3, 11, rsp+278*mmsize
1728 SUMSUB_MUL 0, 2, 1, 3, 11585, 11585 ; m0=t24a, m2=t23a
1729 SUMSUB_MUL 4, 5, 1, 3, 11585, 11585 ; m4=t25, m5=t22
1730 UNSCRATCH 1, 12, rsp+279*mmsize ; t26
1731 UNSCRATCH 3, 13, rsp+280*mmsize ; t27a
1732 SCRATCH 0, 12, rsp+279*mmsize
1733 SCRATCH 4, 13, rsp+280*mmsize
1734 SUMSUB_MUL 3, 7, 0, 4, 11585, 11585 ; m3=t27, m7=t20
1735 SUMSUB_MUL 1, 6, 0, 4, 11585, 11585 ; m1=t26a, m6=t21a
1739 mova m0, [rsp+269*mmsize] ; t8
1740 mova m4, [rsp+270*mmsize] ; t9a
1741 mova [rsp+269*mmsize], m1 ; t26a
1742 mova [rsp+270*mmsize], m3 ; t27
1743 mova m3, [rsp+271*mmsize] ; t10
1744 SUMSUB_BA d, 2, 0, 1 ; m2=out8, m0=out23
1745 SUMSUB_BA d, 5, 4, 1 ; m5=out9, m4=out22
1746 SUMSUB_BA d, 6, 3, 1 ; m6=out10, m3=out21
1747 mova m1, [rsp+272*mmsize] ; t11a
1748 mova [rsp+271*mmsize], m0
1749 SUMSUB_BA d, 7, 1, 0 ; m7=out11, m1=out20
1752 TRANSPOSE4x4D 2, 5, 6, 7, 0
1753 mova [ptrq+ 2*mmsize], m2
1754 mova [ptrq+10*mmsize], m5
1755 mova [ptrq+18*mmsize], m6
1756 mova [ptrq+26*mmsize], m7
1759 lea dstq, [dstq+strideq*8]
1760 ROUND_AND_STORE_4x4 2, 5, 6, 7, m0, [rsp+256*mmsize], [pd_32], 6
1762 mova m2, [rsp+271*mmsize]
1764 TRANSPOSE4x4D 1, 3, 4, 2, 0
1765 mova [ptrq+ 5*mmsize], m1
1766 mova [ptrq+13*mmsize], m3
1767 mova [ptrq+21*mmsize], m4
1768 mova [ptrq+29*mmsize], m2
1770 lea dstq, [dstq+stride3q*4]
1771 ROUND_AND_STORE_4x4 1, 3, 4, 2, m0, [rsp+256*mmsize], [pd_32], 6
1774 ; end of last stage + store for out8-11 and out20-23
1776 UNSCRATCH 0, 9, rsp+276*mmsize ; t19a
1777 UNSCRATCH 1, 8, rsp+275*mmsize ; t18
1778 UNSCRATCH 2, 11, rsp+278*mmsize ; t17a
1779 UNSCRATCH 3, 10, rsp+277*mmsize ; t16
1780 mova m7, [rsp+261*mmsize] ; t12a
1781 mova m6, [rsp+262*mmsize] ; t13
1782 mova m5, [rsp+263*mmsize] ; t14a
1783 SUMSUB_BA d, 0, 7, 4 ; m0=out12, m7=out19
1784 SUMSUB_BA d, 1, 6, 4 ; m1=out13, m6=out18
1785 SUMSUB_BA d, 2, 5, 4 ; m2=out14, m5=out17
1786 mova m4, [rsp+264*mmsize] ; t15
1787 SCRATCH 7, 8, rsp+275*mmsize
1788 SUMSUB_BA d, 3, 4, 7 ; m3=out15, m4=out16
1791 TRANSPOSE4x4D 0, 1, 2, 3, 7
1792 mova [ptrq+ 3*mmsize], m0
1793 mova [ptrq+11*mmsize], m1
1794 mova [ptrq+19*mmsize], m2
1795 mova [ptrq+27*mmsize], m3
1799 lea dstq, [dstbakq+stride3q*4]
1803 lea dstq, [dstq+stride3q*4]
1805 ROUND_AND_STORE_4x4 0, 1, 2, 3, m7, [rsp+256*mmsize], [pd_32], 6
1807 UNSCRATCH 0, 8, rsp+275*mmsize ; out19
1809 TRANSPOSE4x4D 4, 5, 6, 0, 7
1810 mova [ptrq+ 4*mmsize], m4
1811 mova [ptrq+12*mmsize], m5
1812 mova [ptrq+20*mmsize], m6
1813 mova [ptrq+28*mmsize], m0
1815 lea dstq, [dstq+strideq*4]
1816 ROUND_AND_STORE_4x4 4, 5, 6, 0, m7, [rsp+256*mmsize], [pd_32], 6
1819 ; end of last stage + store for out12-19
1824 mova m7, [rsp+257*mmsize] ; t0
1825 mova m6, [rsp+258*mmsize] ; t1
1826 mova m5, [rsp+259*mmsize] ; t2
1827 mova m4, [rsp+260*mmsize] ; t3
1828 mova m0, [rsp+274*mmsize] ; t31
1829 mova m1, [rsp+273*mmsize] ; t30a
1830 UNSCRATCH 2, 15, rsp+282*mmsize ; t29
1831 SUMSUB_BA d, 0, 7, 3 ; m0=out0, m7=out31
1832 SUMSUB_BA d, 1, 6, 3 ; m1=out1, m6=out30
1833 SUMSUB_BA d, 2, 5, 3 ; m2=out2, m5=out29
1834 SCRATCH 0, 9, rsp+276*mmsize
1835 UNSCRATCH 3, 14, rsp+281*mmsize ; t28a
1836 SUMSUB_BA d, 3, 4, 0 ; m3=out3, m4=out28
1839 TRANSPOSE4x4D 4, 5, 6, 7, 0
1840 mova [ptrq+ 7*mmsize], m4
1841 mova [ptrq+15*mmsize], m5
1842 mova [ptrq+23*mmsize], m6
1843 mova [ptrq+31*mmsize], m7
1850 lea dstq, [dstq+stride3q*4]
1851 ROUND_AND_STORE_4x4 4, 5, 6, 7, m0, [rsp+256*mmsize], [pd_32], 6
1853 UNSCRATCH 7, 9, rsp+276*mmsize ; out0
1855 TRANSPOSE4x4D 7, 1, 2, 3, 0
1856 mova [ptrq+ 0*mmsize], m7
1857 mova [ptrq+ 8*mmsize], m1
1858 mova [ptrq+16*mmsize], m2
1859 mova [ptrq+24*mmsize], m3
1862 DEFINE_ARGS dstbak, stride, block, cnt, ptr, stride3, dst
1866 ROUND_AND_STORE_4x4 7, 1, 2, 3, m0, [rsp+256*mmsize], [pd_32], 6
1868 DEFINE_ARGS dst, stride, block, cnt, ptr, stride3, dstbak
1872 ; end of last stage + store for out0-3 and out28-31
1877 mova m7, [rsp+265*mmsize] ; t4
1878 mova m6, [rsp+266*mmsize] ; t5a
1879 mova m5, [rsp+267*mmsize] ; t6a
1880 mova m4, [rsp+268*mmsize] ; t7
1881 mova m0, [rsp+270*mmsize] ; t27
1882 mova m1, [rsp+269*mmsize] ; t26a
1883 UNSCRATCH 2, 13, rsp+280*mmsize ; t25
1884 SUMSUB_BA d, 0, 7, 3 ; m0=out4, m7=out27
1885 SUMSUB_BA d, 1, 6, 3 ; m1=out5, m6=out26
1886 SUMSUB_BA d, 2, 5, 3 ; m2=out6, m5=out25
1887 UNSCRATCH 3, 12, rsp+279*mmsize ; t24a
1888 SCRATCH 7, 9, rsp+276*mmsize
1889 SUMSUB_BA d, 3, 4, 7 ; m3=out7, m4=out24
1892 TRANSPOSE4x4D 0, 1, 2, 3, 7
1893 mova [ptrq+ 1*mmsize], m0
1894 mova [ptrq+ 9*mmsize], m1
1895 mova [ptrq+17*mmsize], m2
1896 mova [ptrq+25*mmsize], m3
1900 lea dstq, [dstbakq+strideq*4]
1903 lea dstq, [dstq+strideq*4]
1905 ROUND_AND_STORE_4x4 0, 1, 2, 3, m7, [rsp+256*mmsize], [pd_32], 6
1907 UNSCRATCH 0, 9, rsp+276*mmsize ; out27
1909 TRANSPOSE4x4D 4, 5, 6, 0, 7
1910 mova [ptrq+ 6*mmsize], m4
1911 mova [ptrq+14*mmsize], m5
1912 mova [ptrq+22*mmsize], m6
1913 mova [ptrq+30*mmsize], m0
1916 lea dstq, [dstbakq+stride3q*8]
1919 lea dstq, [dstq+stride3q*8]
1921 ROUND_AND_STORE_4x4 4, 5, 6, 0, m7, [rsp+256*mmsize], [pd_32], 6
1924 ; end of last stage + store for out4-7 and out24-27
1928 cglobal vp9_idct_idct_32x32_add_10, 4, 6 + ARCH_X86_64, 16, \
1929 275 * mmsize + ARCH_X86_32 * 8 * mmsize, \
1930 dst, stride, block, eob
1935 ; dc-only - the 10bit version can be done entirely in 32bit, since the max
1936 ; coef values are 17+sign bit, and the coef is 14bit, so 31+sign easily
1938 DEFINE_ARGS dst, stride, block, coef
1942 pshuflw m1, m1, q0000
1944 DEFINE_ARGS dst, stride, cnt
1947 STORE_2x8 3, 4, 1, m2, m0, dstq, mmsize
1948 STORE_2x8 3, 4, 1, m2, m0, dstq+mmsize*2, mmsize
1955 mova [rsp+256*mmsize], m0
1956 DEFINE_ARGS dst, stride, block, cnt, ptr, skip, dstbak
1962 lea ptrq, [default_32x32]
1963 movzx cntd, byte [ptrq+cntq-1]
1965 movzx cntd, byte [default_32x32+cntq-1]
1973 add ptrq, 32 * mmsize
1978 ; zero-pad the remainder (skipped cols)
1982 lea blockq, [blockq+skipq*(mmsize/4)]
1985 mova [ptrq+mmsize*0], m0
1986 mova [ptrq+mmsize*1], m0
1987 mova [ptrq+mmsize*2], m0
1988 mova [ptrq+mmsize*3], m0
1989 mova [ptrq+mmsize*4], m0
1990 mova [ptrq+mmsize*5], m0
1991 mova [ptrq+mmsize*6], m0
1992 mova [ptrq+mmsize*7], m0
1993 add ptrq, 8 * mmsize
1998 DEFINE_ARGS dst, stride, block, cnt, ptr, stride3, dstbak
1999 lea stride3q, [strideq*3]
2017 ZERO_BLOCK blockq-8*mmsize, 128, 32, m7
2021 cglobal vp9_idct_idct_32x32_add_12, 4, 6 + ARCH_X86_64, 16, \
2022 275 * mmsize + ARCH_X86_32 * 8 * mmsize, \
2023 dst, stride, block, eob
2026 jg mangle(private_prefix %+ _ %+ vp9_idct_idct_32x32_add_10 %+ SUFFIX).idctfull
2028 ; dc-only - unfortunately, this one can overflow, since coefs are 19+sign
2029 ; bpp, and 19+14+sign does not fit in 32bit, so we do 2-stage multiplies
2030 DEFINE_ARGS dst, stride, block, coef, coefl
2034 pshuflw m1, m1, q0000
2036 DEFINE_ARGS dst, stride, cnt
2039 STORE_2x8 3, 4, 1, m2, m0, dstq, mmsize
2040 STORE_2x8 3, 4, 1, m2, m0, dstq+mmsize*2, mmsize