1 ;******************************************************************************
2 ;* VP8 MMXEXT optimizations
3 ;* Copyright (c) 2010 Ronald S. Bultje <rsbultje@gmail.com>
4 ;* Copyright (c) 2010 Jason Garrett-Glaser <darkshikari@gmail.com>
6 ;* This file is part of Libav.
8 ;* Libav is free software; you can redistribute it and/or
9 ;* modify it under the terms of the GNU Lesser General Public
10 ;* License as published by the Free Software Foundation; either
11 ;* version 2.1 of the License, or (at your option) any later version.
13 ;* Libav is distributed in the hope that it will be useful,
14 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 ;* Lesser General Public License for more details.
18 ;* You should have received a copy of the GNU Lesser General Public
19 ;* License along with Libav; if not, write to the Free Software
20 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 ;******************************************************************************
24 %include "x86util.asm"
28 fourtap_filter_hw_m: times 4 dw -6, 123
37 sixtap_filter_hw_m: times 4 dw 2, -11
47 fourtap_filter_hb_m: times 8 db -6, 123
56 sixtap_filter_hb_m: times 8 db 2, 1
66 fourtap_filter_v_m: times 8 dw -6
83 sixtap_filter_v_m: times 8 dw 2
102 bilinear_filter_vw_m: times 8 dw 1
110 bilinear_filter_vb_m: times 8 db 7, 1
119 %define fourtap_filter_hw picregq
120 %define sixtap_filter_hw picregq
121 %define fourtap_filter_hb picregq
122 %define sixtap_filter_hb picregq
123 %define fourtap_filter_v picregq
124 %define sixtap_filter_v picregq
125 %define bilinear_filter_vw picregq
126 %define bilinear_filter_vb picregq
129 %define fourtap_filter_hw fourtap_filter_hw_m
130 %define sixtap_filter_hw sixtap_filter_hw_m
131 %define fourtap_filter_hb fourtap_filter_hb_m
132 %define sixtap_filter_hb sixtap_filter_hb_m
133 %define fourtap_filter_v fourtap_filter_v_m
134 %define sixtap_filter_v sixtap_filter_v_m
135 %define bilinear_filter_vw bilinear_filter_vw_m
136 %define bilinear_filter_vb bilinear_filter_vb_m
140 filter_h2_shuf: db 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8
141 filter_h4_shuf: db 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10
143 filter_h6_shuf1: db 0, 5, 1, 6, 2, 7, 3, 8, 4, 9, 5, 10, 6, 11, 7, 12
144 filter_h6_shuf2: db 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9
145 filter_h6_shuf3: db 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11
147 pw_256: times 8 dw 256
149 pw_20091: times 4 dw 20091
150 pw_17734: times 4 dw 17734
152 pb_27_63: times 8 db 27, 63
153 pb_18_63: times 8 db 18, 63
154 pb_9_63: times 8 db 9, 63
172 ;-----------------------------------------------------------------------------
173 ; subpel MC functions:
175 ; void put_vp8_epel<size>_h<htap>v<vtap>_<opt>(uint8_t *dst, int deststride,
176 ; uint8_t *src, int srcstride,
177 ; int height, int mx, int my);
178 ;-----------------------------------------------------------------------------
180 %macro FILTER_SSSE3 1
181 cglobal put_vp8_epel%1_h6, 6, 6 + npicregs, 8, dst, dststride, src, srcstride, height, mx, picreg
183 mova m3, [filter_h6_shuf2]
184 mova m4, [filter_h6_shuf3]
186 lea picregq, [sixtap_filter_hb_m]
188 mova m5, [sixtap_filter_hb+mxq*8-48] ; set up 6tap filter in bytes
189 mova m6, [sixtap_filter_hb+mxq*8-32]
190 mova m7, [sixtap_filter_hb+mxq*8-16]
197 ; For epel4, we need 9 bytes, but only 8 get loaded; to compensate, do the
198 ; shuffle with a memory operand
199 punpcklbw m0, [srcq+3]
201 pshufb m0, [filter_h6_shuf1]
210 pmulhrsw m0, [pw_256]
212 movh [dstq], m0 ; store
217 dec heightd ; next row
221 cglobal put_vp8_epel%1_h4, 6, 6 + npicregs, 7, dst, dststride, src, srcstride, height, mx, picreg
224 mova m3, [filter_h2_shuf]
225 mova m4, [filter_h4_shuf]
227 lea picregq, [fourtap_filter_hb_m]
229 mova m5, [fourtap_filter_hb+mxq-16] ; set up 4tap filter in bytes
230 mova m6, [fourtap_filter_hb+mxq]
242 movh [dstq], m0 ; store
247 dec heightd ; next row
251 cglobal put_vp8_epel%1_v4, 7, 7, 8, dst, dststride, src, srcstride, height, picreg, my
254 lea picregq, [fourtap_filter_hb_m]
256 mova m5, [fourtap_filter_hb+myq-16]
257 mova m6, [fourtap_filter_hb+myq]
263 movh m1, [srcq+ srcstrideq]
264 movh m2, [srcq+2*srcstrideq]
268 movh m3, [srcq+2*srcstrideq] ; read new row
285 dec heightd ; next row
289 cglobal put_vp8_epel%1_v6, 7, 7, 8, dst, dststride, src, srcstride, height, picreg, my
292 lea picregq, [sixtap_filter_hb_m]
294 lea myq, [sixtap_filter_hb+myq*8]
300 movh m1, [srcq+srcstrideq]
301 movh m2, [srcq+srcstrideq*2]
302 lea srcq, [srcq+srcstrideq*2]
305 movh m4, [srcq+srcstrideq]
308 movh m5, [srcq+2*srcstrideq] ; read new row
315 pmaddubsw m6, [myq-48]
316 pmaddubsw m1, [myq-32]
317 pmaddubsw m7, [myq-16]
322 pmulhrsw m6, [pw_256]
331 dec heightd ; next row
341 ; 4x4 block, H-only 4-tap filter
343 cglobal put_vp8_epel4_h4, 6, 6 + npicregs, 0, dst, dststride, src, srcstride, height, mx, picreg
346 lea picregq, [fourtap_filter_hw_m]
348 movq mm4, [fourtap_filter_hw+mxq-16] ; set up 4tap filter in words
349 movq mm5, [fourtap_filter_hw+mxq]
354 movq mm1, [srcq-1] ; (ABCDEFGH) load 8 horizontal pixels
356 ; first set of 2 pixels
357 movq mm2, mm1 ; byte ABCD..
358 punpcklbw mm1, mm6 ; byte->word ABCD
359 pshufw mm0, mm2, 9 ; byte CDEF..
360 punpcklbw mm0, mm6 ; byte->word CDEF
361 pshufw mm3, mm1, 0x94 ; word ABBC
362 pshufw mm1, mm0, 0x94 ; word CDDE
363 pmaddwd mm3, mm4 ; multiply 2px with F0/F1
364 movq mm0, mm1 ; backup for second set of pixels
365 pmaddwd mm1, mm5 ; multiply 2px with F2/F3
366 paddd mm3, mm1 ; finish 1st 2px
368 ; second set of 2 pixels, use backup of above
369 punpckhbw mm2, mm6 ; byte->word EFGH
370 pmaddwd mm0, mm4 ; multiply backed up 2px with F0/F1
371 pshufw mm1, mm2, 0x94 ; word EFFG
372 pmaddwd mm1, mm5 ; multiply 2px with F2/F3
373 paddd mm0, mm1 ; finish 2nd 2px
375 ; merge two sets of 2 pixels into one set of 4, round/clip/store
376 packssdw mm3, mm0 ; merge dword->word (4px)
377 paddsw mm3, mm7 ; rounding
379 packuswb mm3, mm6 ; clip and word->bytes
380 movd [dstq], mm3 ; store
385 dec heightd ; next row
389 ; 4x4 block, H-only 6-tap filter
391 cglobal put_vp8_epel4_h6, 6, 6 + npicregs, 0, dst, dststride, src, srcstride, height, mx, picreg
394 lea picregq, [sixtap_filter_hw_m]
396 movq mm4, [sixtap_filter_hw+mxq*8-48] ; set up 4tap filter in words
397 movq mm5, [sixtap_filter_hw+mxq*8-32]
398 movq mm6, [sixtap_filter_hw+mxq*8-16]
403 movq mm1, [srcq-2] ; (ABCDEFGH) load 8 horizontal pixels
405 ; first set of 2 pixels
406 movq mm2, mm1 ; byte ABCD..
407 punpcklbw mm1, mm3 ; byte->word ABCD
408 pshufw mm0, mm2, 0x9 ; byte CDEF..
409 punpckhbw mm2, mm3 ; byte->word EFGH
410 punpcklbw mm0, mm3 ; byte->word CDEF
411 pshufw mm1, mm1, 0x94 ; word ABBC
412 pshufw mm2, mm2, 0x94 ; word EFFG
413 pmaddwd mm1, mm4 ; multiply 2px with F0/F1
414 pshufw mm3, mm0, 0x94 ; word CDDE
415 movq mm0, mm3 ; backup for second set of pixels
416 pmaddwd mm3, mm5 ; multiply 2px with F2/F3
417 paddd mm1, mm3 ; add to 1st 2px cache
418 movq mm3, mm2 ; backup for second set of pixels
419 pmaddwd mm2, mm6 ; multiply 2px with F4/F5
420 paddd mm1, mm2 ; finish 1st 2px
422 ; second set of 2 pixels, use backup of above
423 movd mm2, [srcq+3] ; byte FGHI (prevent overreads)
424 pmaddwd mm0, mm4 ; multiply 1st backed up 2px with F0/F1
425 pmaddwd mm3, mm5 ; multiply 2nd backed up 2px with F2/F3
426 paddd mm0, mm3 ; add to 2nd 2px cache
428 punpcklbw mm2, mm3 ; byte->word FGHI
429 pshufw mm2, mm2, 0xE9 ; word GHHI
430 pmaddwd mm2, mm6 ; multiply 2px with F4/F5
431 paddd mm0, mm2 ; finish 2nd 2px
433 ; merge two sets of 2 pixels into one set of 4, round/clip/store
434 packssdw mm1, mm0 ; merge dword->word (4px)
435 paddsw mm1, mm7 ; rounding
437 packuswb mm1, mm3 ; clip and word->bytes
438 movd [dstq], mm1 ; store
443 dec heightd ; next row
448 cglobal put_vp8_epel8_h4, 6, 6 + npicregs, 10, dst, dststride, src, srcstride, height, mx, picreg
451 lea picregq, [fourtap_filter_v_m]
453 lea mxq, [fourtap_filter_v+mxq-32]
486 movh [dstq], m0 ; store
491 dec heightd ; next row
496 cglobal put_vp8_epel8_h6, 6, 6 + npicregs, 14, dst, dststride, src, srcstride, height, mx, picreg
500 lea picregq, [sixtap_filter_v_m]
502 lea mxq, [sixtap_filter_v+mxq-96]
549 movh [dstq], m0 ; store
554 dec heightd ; next row
559 ; 4x4 block, V-only 4-tap filter
560 cglobal put_vp8_epel%1_v4, 7, 7, 8, dst, dststride, src, srcstride, height, picreg, my
563 lea picregq, [fourtap_filter_v_m]
565 lea myq, [fourtap_filter_v+myq-32]
573 movh m1, [srcq+ srcstrideq]
574 movh m2, [srcq+2*srcstrideq]
581 ; first calculate negative taps (to prevent losing positive overflows)
582 movh m4, [srcq+2*srcstrideq] ; read new row
589 ; then calculate positive taps
607 dec heightd ; next row
612 ; 4x4 block, V-only 6-tap filter
613 cglobal put_vp8_epel%1_v6, 7, 7, 8, dst, dststride, src, srcstride, height, picreg, my
617 lea picregq, [sixtap_filter_v_m]
619 lea myq, [sixtap_filter_v+myq-96]
626 movh m1, [srcq+srcstrideq]
627 movh m2, [srcq+srcstrideq*2]
628 lea srcq, [srcq+srcstrideq*2]
631 movh m4, [srcq+srcstrideq]
639 ; first calculate negative taps (to prevent losing positive overflows)
646 ; then calculate positive taps
647 movh m5, [srcq+2*srcstrideq] ; read new row
672 dec heightd ; next row
682 %macro FILTER_BILINEAR 1
683 cglobal put_vp8_bilinear%1_v, 7, 7, 7, dst, dststride, src, srcstride, height, picreg, my
686 lea picregq, [bilinear_filter_vw_m]
689 mova m5, [bilinear_filter_vw+myq-1*16]
691 mova m4, [bilinear_filter_vw+myq+7*16]
693 movh m0, [srcq+srcstrideq*0]
694 movh m1, [srcq+srcstrideq*1]
695 movh m3, [srcq+srcstrideq*2]
713 movh [dstq+dststrideq*0], m0
714 movh [dstq+dststrideq*1], m2
717 movh [dstq+dststrideq*0], m0
718 movhps [dstq+dststrideq*1], m0
721 lea dstq, [dstq+dststrideq*2]
722 lea srcq, [srcq+srcstrideq*2]
727 cglobal put_vp8_bilinear%1_h, 6, 6 + npicregs, 7, dst, dststride, src, srcstride, height, mx, picreg
730 lea picregq, [bilinear_filter_vw_m]
733 mova m5, [bilinear_filter_vw+mxq-1*16]
735 mova m4, [bilinear_filter_vw+mxq+7*16]
737 movh m0, [srcq+srcstrideq*0+0]
738 movh m1, [srcq+srcstrideq*0+1]
739 movh m2, [srcq+srcstrideq*1+0]
740 movh m3, [srcq+srcstrideq*1+1]
758 movh [dstq+dststrideq*0], m0
759 movh [dstq+dststrideq*1], m2
762 movh [dstq+dststrideq*0], m0
763 movhps [dstq+dststrideq*1], m0
766 lea dstq, [dstq+dststrideq*2]
767 lea srcq, [srcq+srcstrideq*2]
778 %macro FILTER_BILINEAR_SSSE3 1
779 cglobal put_vp8_bilinear%1_v, 7, 7, 5, dst, dststride, src, srcstride, height, picreg, my
782 lea picregq, [bilinear_filter_vb_m]
785 mova m3, [bilinear_filter_vb+myq-16]
787 movh m0, [srcq+srcstrideq*0]
788 movh m1, [srcq+srcstrideq*1]
789 movh m2, [srcq+srcstrideq*2]
801 movh [dstq+dststrideq*0], m0
802 movh [dstq+dststrideq*1], m1
805 movh [dstq+dststrideq*0], m0
806 movhps [dstq+dststrideq*1], m0
809 lea dstq, [dstq+dststrideq*2]
810 lea srcq, [srcq+srcstrideq*2]
815 cglobal put_vp8_bilinear%1_h, 6, 6 + npicregs, 5, dst, dststride, src, srcstride, height, mx, picreg
818 lea picregq, [bilinear_filter_vb_m]
821 mova m2, [filter_h2_shuf]
822 mova m3, [bilinear_filter_vb+mxq-16]
824 movu m0, [srcq+srcstrideq*0]
825 movu m1, [srcq+srcstrideq*1]
837 movh [dstq+dststrideq*0], m0
838 movh [dstq+dststrideq*1], m1
841 movh [dstq+dststrideq*0], m0
842 movhps [dstq+dststrideq*1], m0
845 lea dstq, [dstq+dststrideq*2]
846 lea srcq, [srcq+srcstrideq*2]
853 FILTER_BILINEAR_SSSE3 4
855 FILTER_BILINEAR_SSSE3 8
858 cglobal put_vp8_pixels8, 5, 5, 0, dst, dststride, src, srcstride, height
860 movq mm0, [srcq+srcstrideq*0]
861 movq mm1, [srcq+srcstrideq*1]
862 lea srcq, [srcq+srcstrideq*2]
863 movq [dstq+dststrideq*0], mm0
864 movq [dstq+dststrideq*1], mm1
865 lea dstq, [dstq+dststrideq*2]
872 cglobal put_vp8_pixels16, 5, 5, 0, dst, dststride, src, srcstride, height
874 movq mm0, [srcq+srcstrideq*0+0]
875 movq mm1, [srcq+srcstrideq*0+8]
876 movq mm2, [srcq+srcstrideq*1+0]
877 movq mm3, [srcq+srcstrideq*1+8]
878 lea srcq, [srcq+srcstrideq*2]
879 movq [dstq+dststrideq*0+0], mm0
880 movq [dstq+dststrideq*0+8], mm1
881 movq [dstq+dststrideq*1+0], mm2
882 movq [dstq+dststrideq*1+8], mm3
883 lea dstq, [dstq+dststrideq*2]
890 cglobal put_vp8_pixels16, 5, 5, 2, dst, dststride, src, srcstride, height
892 movups xmm0, [srcq+srcstrideq*0]
893 movups xmm1, [srcq+srcstrideq*1]
894 lea srcq, [srcq+srcstrideq*2]
895 movaps [dstq+dststrideq*0], xmm0
896 movaps [dstq+dststrideq*1], xmm1
897 lea dstq, [dstq+dststrideq*2]
902 ;-----------------------------------------------------------------------------
903 ; void vp8_idct_dc_add_<opt>(uint8_t *dst, DCTELEM block[16], int stride);
904 ;-----------------------------------------------------------------------------
908 %4 m3, [dst1q+strideq+%3]
910 %4 m5, [dst2q+strideq+%3]
920 %4 [dst1q+strideq+%3], m3
922 %4 [dst2q+strideq+%3], m5
926 cglobal vp8_idct_dc_add, 3, 3, 0, dst, block, stride
944 DEFINE_ARGS dst1, dst2, stride
945 lea dst2q, [dst1q+strideq*2]
946 ADD_DC m0, m1, 0, movh
950 cglobal vp8_idct_dc_add, 3, 3, 6, dst, block, stride
958 DEFINE_ARGS dst1, dst2, stride
959 lea dst2q, [dst1q+strideq*2]
961 movd m3, [dst1q+strideq]
963 movd m5, [dst2q+strideq]
975 pextrd [dst1q+strideq], m2, 1
976 pextrd [dst2q], m2, 2
977 pextrd [dst2q+strideq], m2, 3
980 ;-----------------------------------------------------------------------------
981 ; void vp8_idct_dc_add4y_<opt>(uint8_t *dst, DCTELEM block[4][16], int stride);
982 ;-----------------------------------------------------------------------------
986 cglobal vp8_idct_dc_add4y, 3, 3, 0, dst, block, stride
988 movd m0, [blockq+32*0] ; A
989 movd m1, [blockq+32*2] ; C
990 punpcklwd m0, [blockq+32*1] ; A B
991 punpcklwd m1, [blockq+32*3] ; C D
992 punpckldq m0, m1 ; A B C D
997 movd [blockq+32*0], m6
998 movd [blockq+32*1], m6
999 movd [blockq+32*2], m6
1000 movd [blockq+32*3], m6
1005 punpcklbw m0, m0 ; AABBCCDD
1006 punpcklbw m6, m6 ; AABBCCDD
1009 punpcklbw m0, m0 ; AAAABBBB
1010 punpckhbw m1, m1 ; CCCCDDDD
1011 punpcklbw m6, m6 ; AAAABBBB
1012 punpckhbw m7, m7 ; CCCCDDDD
1015 DEFINE_ARGS dst1, dst2, stride
1016 lea dst2q, [dst1q+strideq*2]
1017 ADD_DC m0, m6, 0, mova
1018 ADD_DC m1, m7, 8, mova
1023 cglobal vp8_idct_dc_add4y, 3, 3, 6, dst, block, stride
1025 movd m0, [blockq+32*0] ; A
1026 movd m1, [blockq+32*2] ; C
1027 punpcklwd m0, [blockq+32*1] ; A B
1028 punpcklwd m1, [blockq+32*3] ; C D
1029 punpckldq m0, m1 ; A B C D
1034 movd [blockq+32*0], m1
1035 movd [blockq+32*1], m1
1036 movd [blockq+32*2], m1
1037 movd [blockq+32*3], m1
1048 DEFINE_ARGS dst1, dst2, stride
1049 lea dst2q, [dst1q+strideq*2]
1050 ADD_DC m0, m1, 0, mova
1053 ;-----------------------------------------------------------------------------
1054 ; void vp8_idct_dc_add4uv_<opt>(uint8_t *dst, DCTELEM block[4][16], int stride);
1055 ;-----------------------------------------------------------------------------
1058 cglobal vp8_idct_dc_add4uv, 3, 3, 0, dst, block, stride
1060 movd m0, [blockq+32*0] ; A
1061 movd m1, [blockq+32*2] ; C
1062 punpcklwd m0, [blockq+32*1] ; A B
1063 punpcklwd m1, [blockq+32*3] ; C D
1064 punpckldq m0, m1 ; A B C D
1069 movd [blockq+32*0], m6
1070 movd [blockq+32*1], m6
1071 movd [blockq+32*2], m6
1072 movd [blockq+32*3], m6
1077 punpcklbw m0, m0 ; AABBCCDD
1078 punpcklbw m6, m6 ; AABBCCDD
1081 punpcklbw m0, m0 ; AAAABBBB
1082 punpckhbw m1, m1 ; CCCCDDDD
1083 punpcklbw m6, m6 ; AAAABBBB
1084 punpckhbw m7, m7 ; CCCCDDDD
1087 DEFINE_ARGS dst1, dst2, stride
1088 lea dst2q, [dst1q+strideq*2]
1089 ADD_DC m0, m6, 0, mova
1090 lea dst1q, [dst1q+strideq*4]
1091 lea dst2q, [dst2q+strideq*4]
1092 ADD_DC m1, m7, 0, mova
1095 ;-----------------------------------------------------------------------------
1096 ; void vp8_idct_add_<opt>(uint8_t *dst, DCTELEM block[16], int stride);
1097 ;-----------------------------------------------------------------------------
1099 ; calculate %1=mul_35468(%1)-mul_20091(%2); %2=mul_20091(%1)+mul_35468(%2)
1100 ; this macro assumes that m6/m7 have words for 20091/17734 loaded
1101 %macro VP8_MULTIPLY_SUMSUB 4
1104 pmulhw %3, m6 ;20091(1)
1105 pmulhw %4, m6 ;20091(2)
1110 pmulhw %1, m7 ;35468(1)
1111 pmulhw %2, m7 ;35468(2)
1116 ; calculate x0=%1+%3; x1=%1-%3
1117 ; x2=mul_35468(%2)-mul_20091(%4); x3=mul_20091(%2)+mul_35468(%4)
1118 ; %1=x0+x3 (tmp0); %2=x1+x2 (tmp1); %3=x1-x2 (tmp2); %4=x0-x3 (tmp3)
1119 ; %5/%6 are temporary registers
1120 ; we assume m6/m7 have constant words 20091/17734 loaded in them
1121 %macro VP8_IDCT_TRANSFORM4x4_1D 6
1122 SUMSUB_BA w, %3, %1, %5 ;t0, t1
1123 VP8_MULTIPLY_SUMSUB m%2, m%4, m%5,m%6 ;t2, t3
1124 SUMSUB_BA w, %4, %3, %5 ;tmp0, tmp3
1125 SUMSUB_BA w, %2, %1, %5 ;tmp1, tmp2
1130 %macro VP8_IDCT_ADD 0
1131 cglobal vp8_idct_add, 3, 3, 0, dst, block, stride
1133 movq m0, [blockq+ 0]
1134 movq m1, [blockq+ 8]
1135 movq m2, [blockq+16]
1136 movq m3, [blockq+24]
1141 movaps [blockq+ 0], xmm0
1142 movaps [blockq+16], xmm0
1145 movq [blockq+ 0], m4
1146 movq [blockq+ 8], m4
1147 movq [blockq+16], m4
1148 movq [blockq+24], m4
1152 VP8_IDCT_TRANSFORM4x4_1D 0, 1, 2, 3, 4, 5
1153 TRANSPOSE4x4W 0, 1, 2, 3, 4
1155 VP8_IDCT_TRANSFORM4x4_1D 0, 1, 2, 3, 4, 5
1156 TRANSPOSE4x4W 0, 1, 2, 3, 4
1160 DEFINE_ARGS dst1, dst2, stride
1161 lea dst2q, [dst1q+2*strideq]
1162 STORE_DIFFx2 m0, m1, m6, m7, m4, 3, dst1q, strideq
1163 STORE_DIFFx2 m2, m3, m6, m7, m4, 3, dst2q, strideq
1175 ;-----------------------------------------------------------------------------
1176 ; void vp8_luma_dc_wht_mmxext(DCTELEM block[4][4][16], DCTELEM dc[16])
1177 ;-----------------------------------------------------------------------------
1179 %macro SCATTER_WHT 3
1182 mov [blockq+2*16*(0+%3)], dc1w
1183 mov [blockq+2*16*(1+%3)], dc2w
1188 mov [blockq+2*16*(4+%3)], dc1w
1189 mov [blockq+2*16*(5+%3)], dc2w
1192 mov [blockq+2*16*(8+%3)], dc1w
1193 mov [blockq+2*16*(9+%3)], dc2w
1196 mov [blockq+2*16*(12+%3)], dc1w
1197 mov [blockq+2*16*(13+%3)], dc2w
1200 %macro HADAMARD4_1D 4
1201 SUMSUB_BADC w, %2, %1, %4, %3
1202 SUMSUB_BADC w, %4, %2, %3, %1
1207 cglobal vp8_luma_dc_wht, 2, 3, 0, block, dc1, dc2
1214 movaps [dc1q+ 0], xmm0
1215 movaps [dc1q+16], xmm0
1223 HADAMARD4_1D 0, 1, 2, 3
1224 TRANSPOSE4x4W 0, 1, 2, 3, 4
1226 HADAMARD4_1D 0, 1, 2, 3
1243 ;-----------------------------------------------------------------------------
1244 ; void vp8_h/v_loop_filter_simple_<opt>(uint8_t *dst, int stride, int flim);
1245 ;-----------------------------------------------------------------------------
1247 ; macro called with 7 mm register indexes as argument, and 4 regular registers
1249 ; first 4 mm registers will carry the transposed pixel data
1250 ; the other three are scratchspace (one would be sufficient, but this allows
1251 ; for more spreading/pipelining and thus faster execution on OOE CPUs)
1253 ; first two regular registers are buf+4*stride and buf+5*stride
1254 ; third is -stride, fourth is +stride
1255 %macro READ_8x4_INTERLEAVED 11
1256 ; interleave 8 (A-H) rows of 4 pixels each
1257 movd m%1, [%8+%10*4] ; A0-3
1258 movd m%5, [%9+%10*4] ; B0-3
1259 movd m%2, [%8+%10*2] ; C0-3
1260 movd m%6, [%8+%10] ; D0-3
1261 movd m%3, [%8] ; E0-3
1262 movd m%7, [%9] ; F0-3
1263 movd m%4, [%9+%11] ; G0-3
1264 punpcklbw m%1, m%5 ; A/B interleaved
1265 movd m%5, [%9+%11*2] ; H0-3
1266 punpcklbw m%2, m%6 ; C/D interleaved
1267 punpcklbw m%3, m%7 ; E/F interleaved
1268 punpcklbw m%4, m%5 ; G/H interleaved
1271 ; macro called with 7 mm register indexes as argument, and 5 regular registers
1272 ; first 11 mean the same as READ_8x4_TRANSPOSED above
1273 ; fifth regular register is scratchspace to reach the bottom 8 rows, it
1274 ; will be set to second regular register + 8*stride at the end
1275 %macro READ_16x4_INTERLEAVED 12
1276 ; transpose 16 (A-P) rows of 4 pixels each
1279 ; read (and interleave) those addressable by %8 (=r0), A/C/D/E/I/K/L/M
1280 movd m%1, [%8+%10*4] ; A0-3
1281 movd m%3, [%12+%10*4] ; I0-3
1282 movd m%2, [%8+%10*2] ; C0-3
1283 movd m%4, [%12+%10*2] ; K0-3
1284 movd m%6, [%8+%10] ; D0-3
1285 movd m%5, [%12+%10] ; L0-3
1286 movd m%7, [%12] ; M0-3
1288 punpcklbw m%1, m%3 ; A/I
1289 movd m%3, [%8] ; E0-3
1290 punpcklbw m%2, m%4 ; C/K
1291 punpcklbw m%6, m%5 ; D/L
1292 punpcklbw m%3, m%7 ; E/M
1293 punpcklbw m%2, m%6 ; C/D/K/L interleaved
1295 ; read (and interleave) those addressable by %9 (=r4), B/F/G/H/J/N/O/P
1296 movd m%5, [%9+%10*4] ; B0-3
1297 movd m%4, [%12+%10*4] ; J0-3
1298 movd m%7, [%9] ; F0-3
1299 movd m%6, [%12] ; N0-3
1300 punpcklbw m%5, m%4 ; B/J
1301 punpcklbw m%7, m%6 ; F/N
1302 punpcklbw m%1, m%5 ; A/B/I/J interleaved
1303 punpcklbw m%3, m%7 ; E/F/M/N interleaved
1304 movd m%4, [%9+%11] ; G0-3
1305 movd m%6, [%12+%11] ; O0-3
1306 movd m%5, [%9+%11*2] ; H0-3
1307 movd m%7, [%12+%11*2] ; P0-3
1308 punpcklbw m%4, m%6 ; G/O
1309 punpcklbw m%5, m%7 ; H/P
1310 punpcklbw m%4, m%5 ; G/H/O/P interleaved
1313 ; write 4 mm registers of 2 dwords each
1314 ; first four arguments are mm register indexes containing source data
1315 ; last four are registers containing buf+4*stride, buf+5*stride,
1316 ; -stride and +stride
1318 ; write out (2 dwords per register)
1333 ; write 4 xmm registers of 4 dwords each
1334 ; arguments same as WRITE_2x4D, but with an extra register, so that the 5 regular
1335 ; registers contain buf+4*stride, buf+5*stride, buf+12*stride, -stride and +stride
1336 ; we add 1*stride to the third regular registry in the process
1337 ; the 10th argument is 16 if it's a Y filter (i.e. all regular registers cover the
1338 ; same memory region), or 8 if they cover two separate buffers (third one points to
1339 ; a different memory region than the first two), allowing for more optimal code for
1341 %macro WRITE_4x4D 10
1342 ; write out (4 dwords per register), start with dwords zero
1393 ; write 4 or 8 words in the mmx/xmm registers as 8 lines
1394 ; 1 and 2 are the registers to write, this can be the same (for SSE2)
1396 ; 3 is a general-purpose register that we will clobber
1398 ; 3 is a pointer to the destination's 5th line
1399 ; 4 is a pointer to the destination's 4th line
1400 ; 5/6 is -stride and +stride
1431 pextrw [%3+%4*4], %1, 0
1432 pextrw [%2+%4*4], %1, 1
1433 pextrw [%3+%4*2], %1, 2
1434 pextrw [%3+%4 ], %1, 3
1437 pextrw [%2+%5 ], %1, 6
1438 pextrw [%2+%5*2], %1, 7
1468 %macro SPLATB_REG 2-3
1489 %macro SIMPLE_LOOPFILTER 2
1490 cglobal vp8_%1_loop_filter_simple, 3, %2, 8, dst, stride, flim, cntr
1491 %if mmsize == 8 ; mmx/mmxext
1497 SPLATB_REG m7, flim, m0 ; splat "flim" into register
1499 ; set up indexes to address 4 rows
1501 DEFINE_ARGS dst1, mstride, stride, cntr, dst2
1503 DEFINE_ARGS dst1, mstride, stride, dst3, dst2
1505 mov strideq, mstrideq
1508 lea dst1q, [dst1q+4*strideq-2]
1511 %if mmsize == 8 ; mmx / mmxext
1515 ; read 4 half/full rows of pixels
1516 mova m0, [dst1q+mstrideq*2] ; p1
1517 mova m1, [dst1q+mstrideq] ; p0
1518 mova m2, [dst1q] ; q0
1519 mova m3, [dst1q+ strideq] ; q1
1521 lea dst2q, [dst1q+ strideq]
1523 %if mmsize == 8 ; mmx/mmxext
1524 READ_8x4_INTERLEAVED 0, 1, 2, 3, 4, 5, 6, dst1q, dst2q, mstrideq, strideq
1526 READ_16x4_INTERLEAVED 0, 1, 2, 3, 4, 5, 6, dst1q, dst2q, mstrideq, strideq, dst3q
1528 TRANSPOSE4x4W 0, 1, 2, 3, 4
1532 mova m5, m2 ; m5=backup of q0
1533 mova m6, m1 ; m6=backup of p0
1534 psubusb m1, m2 ; p0-q0
1535 psubusb m2, m6 ; q0-p0
1536 por m1, m2 ; FFABS(p0-q0)
1537 paddusb m1, m1 ; m1=FFABS(p0-q0)*2
1541 psubusb m3, m0 ; q1-p1
1542 psubusb m0, m4 ; p1-q1
1543 por m3, m0 ; FFABS(p1-q1)
1547 psubsb m2, m4 ; m2=p1-q1 (signed) backup for below
1549 psrlq m3, 1 ; m3=FFABS(p1-q1)/2, this can be used signed
1553 pcmpeqb m3, m1 ; abs(p0-q0)*2+abs(p1-q1)/2<=flim mask(0xff/0x0)
1555 ; filter_common (use m2/p1-q1, m4=q0, m6=p0, m5/q0-p0 and m3/mask)
1559 psubsb m5, m0 ; q0-p0 (signed)
1562 paddsb m2, m5 ; a=(p1-q1) + 3*(q0-p0)
1563 pand m2, m3 ; apply filter mask (m3)
1567 paddsb m2, [pb_4] ; f1<<3=a+4
1568 paddsb m1, [pb_3] ; f2<<3=a+3
1570 pand m1, m3 ; cache f2<<3
1574 pcmpgtb m0, m2 ; which values are <0?
1575 psubb m3, m2 ; -f1<<3
1581 paddusb m4, m3 ; q0-f1
1585 pcmpgtb m0, m1 ; which values are <0?
1586 psubb m3, m1 ; -f2<<3
1592 psubusb m6, m3 ; p0+f2
1597 mova [dst1q+mstrideq], m6
1600 SBUTTERFLY bw, 6, 4, 0
1602 %if mmsize == 16 ; sse2
1606 WRITE_8W m6, dst2q, dst1q, mstrideq, strideq
1607 lea dst2q, [dst3q+mstrideq+1]
1611 WRITE_8W m4, dst3q, dst2q, mstrideq, strideq
1613 WRITE_2x4W m6, m4, dst2q, dst1q, mstrideq, strideq
1617 %if mmsize == 8 ; mmx/mmxext
1620 add dst1q, 8 ; advance 8 cols = pixels
1622 lea dst1q, [dst1q+strideq*8-1] ; advance 8 rows = lines
1634 SIMPLE_LOOPFILTER v, 4
1635 SIMPLE_LOOPFILTER h, 5
1637 SIMPLE_LOOPFILTER v, 4
1638 SIMPLE_LOOPFILTER h, 5
1642 SIMPLE_LOOPFILTER v, 3
1643 SIMPLE_LOOPFILTER h, 5
1645 SIMPLE_LOOPFILTER v, 3
1646 SIMPLE_LOOPFILTER h, 5
1648 SIMPLE_LOOPFILTER h, 5
1650 ;-----------------------------------------------------------------------------
1651 ; void vp8_h/v_loop_filter<size>_inner_<opt>(uint8_t *dst, [uint8_t *v,] int stride,
1652 ; int flimE, int flimI, int hev_thr);
1653 ;-----------------------------------------------------------------------------
1655 %macro INNER_LOOPFILTER 2
1656 %if %2 == 8 ; chroma
1657 cglobal vp8_%1_loop_filter8uv_inner, 6, 6, 13, dst, dst8, stride, flimE, flimI, hevthr
1659 cglobal vp8_%1_loop_filter16y_inner, 5, 5, 13, dst, stride, flimE, flimI, hevthr
1665 %ifndef m8 ; stack layout: [0]=E, [1]=I, [2]=hev_thr
1666 %ifidn %1, v ; [3]=hev() result
1667 %assign pad 16 + mmsize * 4 - gprsize - (stack_offset & 15)
1668 %else ; h ; extra storage space for transposes
1669 %assign pad 16 + mmsize * 5 - gprsize - (stack_offset & 15)
1671 ; splat function arguments
1672 SPLATB_REG m0, flimEq, m7 ; E
1673 SPLATB_REG m1, flimIq, m7 ; I
1674 SPLATB_REG m2, hevthrq, m7 ; hev_thresh
1678 %define m_flimE [rsp]
1679 %define m_flimI [rsp+mmsize]
1680 %define m_hevthr [rsp+mmsize*2]
1681 %define m_maskres [rsp+mmsize*3]
1682 %define m_p0backup [rsp+mmsize*3]
1683 %define m_q0backup [rsp+mmsize*4]
1691 %define m_hevthr m11
1692 %define m_maskres m12
1693 %define m_p0backup m12
1694 %define m_q0backup m8
1696 ; splat function arguments
1697 SPLATB_REG m_flimE, flimEq, m7 ; E
1698 SPLATB_REG m_flimI, flimIq, m7 ; I
1699 SPLATB_REG m_hevthr, hevthrq, m7 ; hev_thresh
1702 %if %2 == 8 ; chroma
1703 DEFINE_ARGS dst1, dst8, mstride, stride, dst2
1705 DEFINE_ARGS dst1, mstride, stride, dst2, cntr
1708 DEFINE_ARGS dst1, mstride, stride, dst2, dst8
1710 mov strideq, mstrideq
1713 lea dst1q, [dst1q+strideq*4-4]
1714 %if %2 == 8 ; chroma
1715 lea dst8q, [dst8q+strideq*4-4]
1723 lea dst2q, [dst1q+strideq]
1725 %if %2 == 8 && mmsize == 16
1730 movrow m0, [dst1q+mstrideq*4] ; p3
1731 movrow m1, [dst2q+mstrideq*4] ; p2
1732 movrow m2, [dst1q+mstrideq*2] ; p1
1733 movrow m5, [dst2q] ; q1
1734 movrow m6, [dst2q+ strideq*1] ; q2
1735 movrow m7, [dst2q+ strideq*2] ; q3
1736 %if mmsize == 16 && %2 == 8
1737 movhps m0, [dst8q+mstrideq*4]
1738 movhps m2, [dst8q+mstrideq*2]
1740 movhps m1, [dst8q+mstrideq*4]
1742 movhps m6, [dst8q+ strideq ]
1743 movhps m7, [dst8q+ strideq*2]
1746 %elif mmsize == 8 ; mmx/mmxext (h)
1747 ; read 8 rows of 8px each
1748 movu m0, [dst1q+mstrideq*4]
1749 movu m1, [dst2q+mstrideq*4]
1750 movu m2, [dst1q+mstrideq*2]
1751 movu m3, [dst1q+mstrideq ]
1754 movu m6, [dst2q+ strideq ]
1757 TRANSPOSE4x4B 0, 1, 2, 3, 7
1759 movu m7, [dst2q+ strideq*2]
1760 TRANSPOSE4x4B 4, 5, 6, 7, 1
1761 SBUTTERFLY dq, 0, 4, 1 ; p3/p2
1762 SBUTTERFLY dq, 2, 6, 1 ; q0/q1
1763 SBUTTERFLY dq, 3, 7, 1 ; q2/q3
1765 mova m_q0backup, m2 ; store q0
1766 SBUTTERFLY dq, 1, 5, 2 ; p1/p0
1767 mova m_p0backup, m5 ; store p0
1774 lea dst8q, [dst1q+ strideq*8]
1777 ; read 16 rows of 8px each, interleave
1778 movh m0, [dst1q+mstrideq*4]
1779 movh m1, [dst8q+mstrideq*4]
1780 movh m2, [dst1q+mstrideq*2]
1781 movh m5, [dst8q+mstrideq*2]
1782 movh m3, [dst1q+mstrideq ]
1783 movh m6, [dst8q+mstrideq ]
1786 punpcklbw m0, m1 ; A/I
1787 punpcklbw m2, m5 ; C/K
1788 punpcklbw m3, m6 ; D/L
1789 punpcklbw m4, m7 ; E/M
1792 movh m1, [dst2q+mstrideq*4]
1793 movh m6, [dst8q+mstrideq*4]
1796 punpcklbw m1, m6 ; B/J
1797 punpcklbw m5, m7 ; F/N
1798 movh m6, [dst2q+ strideq ]
1799 movh m7, [dst8q+ strideq ]
1800 punpcklbw m6, m7 ; G/O
1803 TRANSPOSE4x4B 0, 1, 2, 3, 7
1809 movh m7, [dst2q+ strideq*2]
1810 movh m1, [dst8q+ strideq*2]
1811 punpcklbw m7, m1 ; H/P
1812 TRANSPOSE4x4B 4, 5, 6, 7, 1
1813 SBUTTERFLY dq, 0, 4, 1 ; p3/p2
1814 SBUTTERFLY dq, 2, 6, 1 ; q0/q1
1815 SBUTTERFLY dq, 3, 7, 1 ; q2/q3
1821 mova m_q0backup, m2 ; store q0
1823 SBUTTERFLY dq, 1, 5, 2 ; p1/p0
1827 mova m_p0backup, m5 ; store p0
1835 ; normal_limit for p3-p2, p2-p1, q3-q2 and q2-q1
1838 psubusb m4, m0 ; p2-p3
1839 psubusb m0, m1 ; p3-p2
1840 por m0, m4 ; abs(p3-p2)
1844 psubusb m4, m1 ; p1-p2
1845 psubusb m1, m2 ; p2-p1
1846 por m1, m4 ; abs(p2-p1)
1850 psubusb m4, m7 ; q2-q3
1851 psubusb m7, m6 ; q3-q2
1852 por m7, m4 ; abs(q3-q2)
1856 psubusb m4, m6 ; q1-q2
1857 psubusb m6, m5 ; q2-q1
1858 por m6, m4 ; abs(q2-q1)
1860 %if notcpuflag(mmx2)
1867 pcmpeqb m0, m3 ; abs(p3-p2) <= I
1868 pcmpeqb m1, m3 ; abs(p2-p1) <= I
1869 pcmpeqb m7, m3 ; abs(q3-q2) <= I
1870 pcmpeqb m6, m3 ; abs(q2-q1) <= I
1880 ; normal_limit and high_edge_variance for p1-p0, q1-q0
1881 SWAP 7, 3 ; now m7 is zero
1883 movrow m3, [dst1q+mstrideq ] ; p0
1884 %if mmsize == 16 && %2 == 8
1885 movhps m3, [dst8q+mstrideq ]
1897 psubusb m1, m3 ; p1-p0
1898 psubusb m6, m2 ; p0-p1
1899 por m1, m6 ; abs(p1-p0)
1900 %if notcpuflag(mmx2)
1903 psubusb m6, m_hevthr
1904 pcmpeqb m1, m7 ; abs(p1-p0) <= I
1905 pcmpeqb m6, m7 ; abs(p1-p0) <= hev_thresh
1909 pmaxub m0, m1 ; max_I
1910 SWAP 1, 4 ; max_hev_thresh
1913 SWAP 6, 4 ; now m6 is I
1915 movrow m4, [dst1q] ; q0
1916 %if mmsize == 16 && %2 == 8
1928 psubusb m1, m5 ; q0-q1
1929 psubusb m7, m4 ; q1-q0
1930 por m1, m7 ; abs(q1-q0)
1931 %if notcpuflag(mmx2)
1934 psubusb m7, m_hevthr
1936 pcmpeqb m1, m6 ; abs(q1-q0) <= I
1937 pcmpeqb m7, m6 ; abs(q1-q0) <= hev_thresh
1939 pand m0, m1 ; abs([pq][321]-[pq][210]) <= I
1946 psubusb m6, m_hevthr
1947 pcmpeqb m0, m7 ; max(abs(..)) <= I
1948 pcmpeqb m6, m7 ; !(max(abs..) > thresh)
1953 mova m_maskres, m6 ; !(abs(p1-p0) > hev_t || abs(q1-q0) > hev_t)
1959 mova m6, m4 ; keep copies of p0/q0 around for later use
1961 psubusb m1, m4 ; p0-q0
1962 psubusb m6, m3 ; q0-p0
1963 por m1, m6 ; abs(q0-p0)
1964 paddusb m1, m1 ; m1=2*abs(q0-p0)
1970 psubusb m7, m5 ; p1-q1
1971 psubusb m6, m2 ; q1-p1
1972 por m7, m6 ; abs(q1-p1)
1975 psrlq m7, 1 ; abs(q1-p1)/2
1976 paddusb m7, m1 ; abs(q0-p0)*2+abs(q1-p1)/2
1978 pcmpeqb m7, m6 ; abs(q0-p0)*2+abs(q1-p1)/2 <= E
1979 pand m0, m7 ; normal_limit result
1981 ; filter_common; at this point, m2-m5=p1-q1 and m0 is filter_mask
1982 %ifdef m8 ; x86-64 && sse2
1985 %else ; x86-32 or mmx/mmxext
1986 %define m_pb_80 [pb_80]
1992 psubsb m1, m7 ; (signed) q0-p0
1997 psubsb m6, m7 ; (signed) p1-q1
2002 paddsb m7, m1 ; 3*(q0-p0)+is4tap?(p1-q1)
2021 paddusb m3, m1 ; p0+f2
2032 paddusb m4, m1 ; q0-f1
2039 %if notcpuflag(mmx2)
2046 %if notcpuflag(mmx2)
2060 paddusb m5, m1 ; q1-a
2061 paddusb m2, m0 ; p1+a
2065 movrow [dst1q+mstrideq*2], m2
2066 movrow [dst1q+mstrideq ], m3
2068 movrow [dst1q+ strideq ], m5
2069 %if mmsize == 16 && %2 == 8
2070 movhps [dst8q+mstrideq*2], m2
2071 movhps [dst8q+mstrideq ], m3
2073 movhps [dst8q+ strideq ], m5
2080 TRANSPOSE4x4B 2, 3, 4, 5, 6
2082 %if mmsize == 8 ; mmx/mmxext (h)
2083 WRITE_4x2D 2, 3, 4, 5, dst1q, dst2q, mstrideq, strideq
2085 lea dst8q, [dst8q+mstrideq +2]
2086 WRITE_4x4D 2, 3, 4, 5, dst1q, dst2q, dst8q, mstrideq, strideq, %2
2091 %if %2 == 8 ; chroma
2100 lea dst1q, [dst1q+ strideq*8-2]
2109 %ifndef m8 ; sse2 on x86-32 or mmx/mmxext
2117 INNER_LOOPFILTER v, 16
2118 INNER_LOOPFILTER h, 16
2119 INNER_LOOPFILTER v, 8
2120 INNER_LOOPFILTER h, 8
2123 INNER_LOOPFILTER v, 16
2124 INNER_LOOPFILTER h, 16
2125 INNER_LOOPFILTER v, 8
2126 INNER_LOOPFILTER h, 8
2130 INNER_LOOPFILTER v, 16
2131 INNER_LOOPFILTER h, 16
2132 INNER_LOOPFILTER v, 8
2133 INNER_LOOPFILTER h, 8
2136 INNER_LOOPFILTER v, 16
2137 INNER_LOOPFILTER h, 16
2138 INNER_LOOPFILTER v, 8
2139 INNER_LOOPFILTER h, 8
2141 ;-----------------------------------------------------------------------------
2142 ; void vp8_h/v_loop_filter<size>_mbedge_<opt>(uint8_t *dst, [uint8_t *v,] int stride,
2143 ; int flimE, int flimI, int hev_thr);
2144 ;-----------------------------------------------------------------------------
2146 %macro MBEDGE_LOOPFILTER 2
2147 %if %2 == 8 ; chroma
2148 cglobal vp8_%1_loop_filter8uv_mbedge, 6, 6, 15, dst1, dst8, stride, flimE, flimI, hevthr
2150 cglobal vp8_%1_loop_filter16y_mbedge, 5, 5, 15, dst1, stride, flimE, flimI, hevthr
2156 %ifndef m8 ; stack layout: [0]=E, [1]=I, [2]=hev_thr
2157 %if mmsize == 16 ; [3]=hev() result
2158 ; [4]=filter tmp result
2159 ; [5]/[6] = p2/q2 backup
2160 ; [7]=lim_res sign result
2161 %assign pad 16 + mmsize * 7 - gprsize - (stack_offset & 15)
2162 %else ; 8 ; extra storage space for transposes
2163 %assign pad 16 + mmsize * 8 - gprsize - (stack_offset & 15)
2165 ; splat function arguments
2166 SPLATB_REG m0, flimEq, m7 ; E
2167 SPLATB_REG m1, flimIq, m7 ; I
2168 SPLATB_REG m2, hevthrq, m7 ; hev_thresh
2172 %define m_flimE [rsp]
2173 %define m_flimI [rsp+mmsize]
2174 %define m_hevthr [rsp+mmsize*2]
2175 %define m_maskres [rsp+mmsize*3]
2176 %define m_limres [rsp+mmsize*4]
2177 %define m_p0backup [rsp+mmsize*3]
2178 %define m_q0backup [rsp+mmsize*4]
2179 %define m_p2backup [rsp+mmsize*5]
2180 %define m_q2backup [rsp+mmsize*6]
2182 %define m_limsign [rsp]
2184 %define m_limsign [rsp+mmsize*7]
2190 %else ; sse2 on x86-64
2193 %define m_hevthr m11
2194 %define m_maskres m12
2196 %define m_p0backup m12
2197 %define m_q0backup m8
2198 %define m_p2backup m13
2199 %define m_q2backup m14
2200 %define m_limsign m9
2202 ; splat function arguments
2203 SPLATB_REG m_flimE, flimEq, m7 ; E
2204 SPLATB_REG m_flimI, flimIq, m7 ; I
2205 SPLATB_REG m_hevthr, hevthrq, m7 ; hev_thresh
2208 %if %2 == 8 ; chroma
2209 DEFINE_ARGS dst1, dst8, mstride, stride, dst2
2211 DEFINE_ARGS dst1, mstride, stride, dst2, cntr
2214 DEFINE_ARGS dst1, mstride, stride, dst2, dst8
2216 mov strideq, mstrideq
2219 lea dst1q, [dst1q+strideq*4-4]
2220 %if %2 == 8 ; chroma
2221 lea dst8q, [dst8q+strideq*4-4]
2229 lea dst2q, [dst1q+ strideq ]
2231 %if %2 == 8 && mmsize == 16
2236 movrow m0, [dst1q+mstrideq*4] ; p3
2237 movrow m1, [dst2q+mstrideq*4] ; p2
2238 movrow m2, [dst1q+mstrideq*2] ; p1
2239 movrow m5, [dst2q] ; q1
2240 movrow m6, [dst2q+ strideq ] ; q2
2241 movrow m7, [dst2q+ strideq*2] ; q3
2242 %if mmsize == 16 && %2 == 8
2243 movhps m0, [dst8q+mstrideq*4]
2244 movhps m2, [dst8q+mstrideq*2]
2246 movhps m1, [dst8q+mstrideq*4]
2248 movhps m6, [dst8q+ strideq ]
2249 movhps m7, [dst8q+ strideq*2]
2252 %elif mmsize == 8 ; mmx/mmxext (h)
2253 ; read 8 rows of 8px each
2254 movu m0, [dst1q+mstrideq*4]
2255 movu m1, [dst2q+mstrideq*4]
2256 movu m2, [dst1q+mstrideq*2]
2257 movu m3, [dst1q+mstrideq ]
2260 movu m6, [dst2q+ strideq ]
2263 TRANSPOSE4x4B 0, 1, 2, 3, 7
2265 movu m7, [dst2q+ strideq*2]
2266 TRANSPOSE4x4B 4, 5, 6, 7, 1
2267 SBUTTERFLY dq, 0, 4, 1 ; p3/p2
2268 SBUTTERFLY dq, 2, 6, 1 ; q0/q1
2269 SBUTTERFLY dq, 3, 7, 1 ; q2/q3
2271 mova m_q0backup, m2 ; store q0
2272 SBUTTERFLY dq, 1, 5, 2 ; p1/p0
2273 mova m_p0backup, m5 ; store p0
2280 lea dst8q, [dst1q+ strideq*8 ]
2283 ; read 16 rows of 8px each, interleave
2284 movh m0, [dst1q+mstrideq*4]
2285 movh m1, [dst8q+mstrideq*4]
2286 movh m2, [dst1q+mstrideq*2]
2287 movh m5, [dst8q+mstrideq*2]
2288 movh m3, [dst1q+mstrideq ]
2289 movh m6, [dst8q+mstrideq ]
2292 punpcklbw m0, m1 ; A/I
2293 punpcklbw m2, m5 ; C/K
2294 punpcklbw m3, m6 ; D/L
2295 punpcklbw m4, m7 ; E/M
2298 movh m1, [dst2q+mstrideq*4]
2299 movh m6, [dst8q+mstrideq*4]
2302 punpcklbw m1, m6 ; B/J
2303 punpcklbw m5, m7 ; F/N
2304 movh m6, [dst2q+ strideq ]
2305 movh m7, [dst8q+ strideq ]
2306 punpcklbw m6, m7 ; G/O
2309 TRANSPOSE4x4B 0, 1, 2, 3, 7
2315 movh m7, [dst2q+ strideq*2]
2316 movh m1, [dst8q+ strideq*2]
2317 punpcklbw m7, m1 ; H/P
2318 TRANSPOSE4x4B 4, 5, 6, 7, 1
2319 SBUTTERFLY dq, 0, 4, 1 ; p3/p2
2320 SBUTTERFLY dq, 2, 6, 1 ; q0/q1
2321 SBUTTERFLY dq, 3, 7, 1 ; q2/q3
2327 mova m_q0backup, m2 ; store q0
2329 SBUTTERFLY dq, 1, 5, 2 ; p1/p0
2333 mova m_p0backup, m5 ; store p0
2341 ; normal_limit for p3-p2, p2-p1, q3-q2 and q2-q1
2344 psubusb m4, m0 ; p2-p3
2345 psubusb m0, m1 ; p3-p2
2346 por m0, m4 ; abs(p3-p2)
2350 psubusb m4, m1 ; p1-p2
2352 psubusb m1, m2 ; p2-p1
2353 por m1, m4 ; abs(p2-p1)
2357 psubusb m4, m7 ; q2-q3
2358 psubusb m7, m6 ; q3-q2
2359 por m7, m4 ; abs(q3-q2)
2363 psubusb m4, m6 ; q1-q2
2365 psubusb m6, m5 ; q2-q1
2366 por m6, m4 ; abs(q2-q1)
2368 %if notcpuflag(mmx2)
2375 pcmpeqb m0, m3 ; abs(p3-p2) <= I
2376 pcmpeqb m1, m3 ; abs(p2-p1) <= I
2377 pcmpeqb m7, m3 ; abs(q3-q2) <= I
2378 pcmpeqb m6, m3 ; abs(q2-q1) <= I
2388 ; normal_limit and high_edge_variance for p1-p0, q1-q0
2389 SWAP 7, 3 ; now m7 is zero
2391 movrow m3, [dst1q+mstrideq ] ; p0
2392 %if mmsize == 16 && %2 == 8
2393 movhps m3, [dst8q+mstrideq ]
2405 psubusb m1, m3 ; p1-p0
2406 psubusb m6, m2 ; p0-p1
2407 por m1, m6 ; abs(p1-p0)
2408 %if notcpuflag(mmx2)
2411 psubusb m6, m_hevthr
2412 pcmpeqb m1, m7 ; abs(p1-p0) <= I
2413 pcmpeqb m6, m7 ; abs(p1-p0) <= hev_thresh
2417 pmaxub m0, m1 ; max_I
2418 SWAP 1, 4 ; max_hev_thresh
2421 SWAP 6, 4 ; now m6 is I
2423 movrow m4, [dst1q] ; q0
2424 %if mmsize == 16 && %2 == 8
2436 psubusb m1, m5 ; q0-q1
2437 psubusb m7, m4 ; q1-q0
2438 por m1, m7 ; abs(q1-q0)
2439 %if notcpuflag(mmx2)
2442 psubusb m7, m_hevthr
2444 pcmpeqb m1, m6 ; abs(q1-q0) <= I
2445 pcmpeqb m7, m6 ; abs(q1-q0) <= hev_thresh
2447 pand m0, m1 ; abs([pq][321]-[pq][210]) <= I
2454 psubusb m6, m_hevthr
2455 pcmpeqb m0, m7 ; max(abs(..)) <= I
2456 pcmpeqb m6, m7 ; !(max(abs..) > thresh)
2461 mova m_maskres, m6 ; !(abs(p1-p0) > hev_t || abs(q1-q0) > hev_t)
2467 mova m6, m4 ; keep copies of p0/q0 around for later use
2469 psubusb m1, m4 ; p0-q0
2470 psubusb m6, m3 ; q0-p0
2471 por m1, m6 ; abs(q0-p0)
2472 paddusb m1, m1 ; m1=2*abs(q0-p0)
2478 psubusb m7, m5 ; p1-q1
2479 psubusb m6, m2 ; q1-p1
2480 por m7, m6 ; abs(q1-p1)
2483 psrlq m7, 1 ; abs(q1-p1)/2
2484 paddusb m7, m1 ; abs(q0-p0)*2+abs(q1-p1)/2
2486 pcmpeqb m7, m6 ; abs(q0-p0)*2+abs(q1-p1)/2 <= E
2487 pand m0, m7 ; normal_limit result
2489 ; filter_common; at this point, m2-m5=p1-q1 and m0 is filter_mask
2490 %ifdef m8 ; x86-64 && sse2
2493 %else ; x86-32 or mmx/mmxext
2494 %define m_pb_80 [pb_80]
2500 psubsb m1, m7 ; (signed) q0-p0
2505 psubsb m6, m7 ; (signed) p1-q1
2512 mova m_limres, m6 ; 3*(qp-p0)+(p1-q1) masked for filter_mbedge
2519 pandn m7, m6 ; 3*(q0-p0)+(p1-q1) masked for filter_common
2537 paddusb m3, m1 ; p0+f2
2548 paddusb m4, m1 ; q0-f1
2550 ; filter_mbedge (m2-m5 = p1-q1; lim_res carries w)
2563 pcmpgtb m0, m1 ; which are negative
2565 punpcklbw m6, m7 ; interleave with "1" for rounding
2568 punpcklbw m6, m0 ; signed byte->word
2578 SWAP 0, 10 ; don't lose lim_sign copy
2591 mova m_maskres, m6 ; backup for later in filter
2600 packsswb m6, m1 ; a0
2606 mova m6, [pb_18_63] ; pipelining
2610 paddusb m3, m0 ; p0+a0
2611 psubusb m4, m0 ; q0-a0
2640 packsswb m6, m1 ; a1
2650 paddusb m2, m0 ; p1+a1
2651 psubusb m5, m0 ; q1-a1
2685 packsswb m6, m1 ; a1
2699 paddusb m1, m7 ; p1+a1
2700 psubusb m6, m7 ; q1-a1
2704 movrow [dst2q+mstrideq*4], m1
2705 movrow [dst1q+mstrideq*2], m2
2706 movrow [dst1q+mstrideq ], m3
2709 movrow [dst2q+ strideq ], m6
2710 %if mmsize == 16 && %2 == 8
2712 movhps [dst8q+mstrideq*2], m1
2713 movhps [dst8q+mstrideq ], m2
2717 movhps [dst8q+ strideq ], m5
2718 movhps [dst8q+ strideq*2], m6
2725 TRANSPOSE4x4B 1, 2, 3, 4, 0
2726 SBUTTERFLY bw, 5, 6, 0
2728 %if mmsize == 8 ; mmx/mmxext (h)
2729 WRITE_4x2D 1, 2, 3, 4, dst1q, dst2q, mstrideq, strideq
2731 WRITE_2x4W m5, m6, dst2q, dst1q, mstrideq, strideq
2733 lea dst8q, [dst8q+mstrideq+1]
2734 WRITE_4x4D 1, 2, 3, 4, dst1q, dst2q, dst8q, mstrideq, strideq, %2
2735 lea dst1q, [dst2q+mstrideq+4]
2736 lea dst8q, [dst8q+mstrideq+4]
2740 WRITE_8W m5, dst2q, dst1q, mstrideq, strideq
2742 lea dst2q, [dst8q+ strideq ]
2744 WRITE_8W m6, dst2q, dst8q, mstrideq, strideq
2749 %if %2 == 8 ; chroma
2758 lea dst1q, [dst1q+ strideq*8-5]
2767 %ifndef m8 ; sse2 on x86-32 or mmx/mmxext
2775 MBEDGE_LOOPFILTER v, 16
2776 MBEDGE_LOOPFILTER h, 16
2777 MBEDGE_LOOPFILTER v, 8
2778 MBEDGE_LOOPFILTER h, 8
2781 MBEDGE_LOOPFILTER v, 16
2782 MBEDGE_LOOPFILTER h, 16
2783 MBEDGE_LOOPFILTER v, 8
2784 MBEDGE_LOOPFILTER h, 8
2788 MBEDGE_LOOPFILTER v, 16
2789 MBEDGE_LOOPFILTER h, 16
2790 MBEDGE_LOOPFILTER v, 8
2791 MBEDGE_LOOPFILTER h, 8
2794 MBEDGE_LOOPFILTER v, 16
2795 MBEDGE_LOOPFILTER h, 16
2796 MBEDGE_LOOPFILTER v, 8
2797 MBEDGE_LOOPFILTER h, 8
2800 MBEDGE_LOOPFILTER h, 16
2801 MBEDGE_LOOPFILTER h, 8