1 ;******************************************************************************
2 ;* VP8 MMXEXT optimizations
3 ;* Copyright (c) 2010 Ronald S. Bultje <rsbultje@gmail.com>
4 ;* Copyright (c) 2010 Jason Garrett-Glaser <darkshikari@gmail.com>
6 ;* This file is part of Libav.
8 ;* Libav is free software; you can redistribute it and/or
9 ;* modify it under the terms of the GNU Lesser General Public
10 ;* License as published by the Free Software Foundation; either
11 ;* version 2.1 of the License, or (at your option) any later version.
13 ;* Libav is distributed in the hope that it will be useful,
14 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 ;* Lesser General Public License for more details.
18 ;* You should have received a copy of the GNU Lesser General Public
19 ;* License along with Libav; if not, write to the Free Software
20 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 ;******************************************************************************
24 %include "x86util.asm"
28 fourtap_filter_hw_m: times 4 dw -6, 123
37 sixtap_filter_hw_m: times 4 dw 2, -11
47 fourtap_filter_hb_m: times 8 db -6, 123
56 sixtap_filter_hb_m: times 8 db 2, 1
66 fourtap_filter_v_m: times 8 dw -6
83 sixtap_filter_v_m: times 8 dw 2
102 bilinear_filter_vw_m: times 8 dw 1
110 bilinear_filter_vb_m: times 8 db 7, 1
119 %define fourtap_filter_hw picregq
120 %define sixtap_filter_hw picregq
121 %define fourtap_filter_hb picregq
122 %define sixtap_filter_hb picregq
123 %define fourtap_filter_v picregq
124 %define sixtap_filter_v picregq
125 %define bilinear_filter_vw picregq
126 %define bilinear_filter_vb picregq
129 %define fourtap_filter_hw fourtap_filter_hw_m
130 %define sixtap_filter_hw sixtap_filter_hw_m
131 %define fourtap_filter_hb fourtap_filter_hb_m
132 %define sixtap_filter_hb sixtap_filter_hb_m
133 %define fourtap_filter_v fourtap_filter_v_m
134 %define sixtap_filter_v sixtap_filter_v_m
135 %define bilinear_filter_vw bilinear_filter_vw_m
136 %define bilinear_filter_vb bilinear_filter_vb_m
140 filter_h2_shuf: db 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8
141 filter_h4_shuf: db 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10
143 filter_h6_shuf1: db 0, 5, 1, 6, 2, 7, 3, 8, 4, 9, 5, 10, 6, 11, 7, 12
144 filter_h6_shuf2: db 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9
145 filter_h6_shuf3: db 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11
147 pw_256: times 8 dw 256
149 pw_20091: times 4 dw 20091
150 pw_17734: times 4 dw 17734
152 pb_27_63: times 8 db 27, 63
153 pb_18_63: times 8 db 18, 63
154 pb_9_63: times 8 db 9, 63
172 ;-----------------------------------------------------------------------------
173 ; subpel MC functions:
175 ; void put_vp8_epel<size>_h<htap>v<vtap>_<opt>(uint8_t *dst, int deststride,
176 ; uint8_t *src, int srcstride,
177 ; int height, int mx, int my);
178 ;-----------------------------------------------------------------------------
180 %macro FILTER_SSSE3 1
181 cglobal put_vp8_epel%1_h6, 6, 6 + npicregs, 8, dst, dststride, src, srcstride, height, mx, picreg
183 mova m3, [filter_h6_shuf2]
184 mova m4, [filter_h6_shuf3]
186 lea picregq, [sixtap_filter_hb_m]
188 mova m5, [sixtap_filter_hb+mxq*8-48] ; set up 6tap filter in bytes
189 mova m6, [sixtap_filter_hb+mxq*8-32]
190 mova m7, [sixtap_filter_hb+mxq*8-16]
197 ; For epel4, we need 9 bytes, but only 8 get loaded; to compensate, do the
198 ; shuffle with a memory operand
199 punpcklbw m0, [srcq+3]
201 pshufb m0, [filter_h6_shuf1]
210 pmulhrsw m0, [pw_256]
212 movh [dstq], m0 ; store
217 dec heightd ; next row
221 cglobal put_vp8_epel%1_h4, 6, 6 + npicregs, 7, dst, dststride, src, srcstride, height, mx, picreg
224 mova m3, [filter_h2_shuf]
225 mova m4, [filter_h4_shuf]
227 lea picregq, [fourtap_filter_hb_m]
229 mova m5, [fourtap_filter_hb+mxq-16] ; set up 4tap filter in bytes
230 mova m6, [fourtap_filter_hb+mxq]
242 movh [dstq], m0 ; store
247 dec heightd ; next row
251 cglobal put_vp8_epel%1_v4, 7, 7, 8, dst, dststride, src, srcstride, height, picreg, my
254 lea picregq, [fourtap_filter_hb_m]
256 mova m5, [fourtap_filter_hb+myq-16]
257 mova m6, [fourtap_filter_hb+myq]
263 movh m1, [srcq+ srcstrideq]
264 movh m2, [srcq+2*srcstrideq]
268 movh m3, [srcq+2*srcstrideq] ; read new row
285 dec heightd ; next row
289 cglobal put_vp8_epel%1_v6, 7, 7, 8, dst, dststride, src, srcstride, height, picreg, my
292 lea picregq, [sixtap_filter_hb_m]
294 lea myq, [sixtap_filter_hb+myq*8]
300 movh m1, [srcq+srcstrideq]
301 movh m2, [srcq+srcstrideq*2]
302 lea srcq, [srcq+srcstrideq*2]
305 movh m4, [srcq+srcstrideq]
308 movh m5, [srcq+2*srcstrideq] ; read new row
315 pmaddubsw m6, [myq-48]
316 pmaddubsw m1, [myq-32]
317 pmaddubsw m7, [myq-16]
322 pmulhrsw m6, [pw_256]
331 dec heightd ; next row
341 ; 4x4 block, H-only 4-tap filter
343 cglobal put_vp8_epel4_h4, 6, 6 + npicregs, 0, dst, dststride, src, srcstride, height, mx, picreg
346 lea picregq, [fourtap_filter_hw_m]
348 movq mm4, [fourtap_filter_hw+mxq-16] ; set up 4tap filter in words
349 movq mm5, [fourtap_filter_hw+mxq]
354 movq mm1, [srcq-1] ; (ABCDEFGH) load 8 horizontal pixels
356 ; first set of 2 pixels
357 movq mm2, mm1 ; byte ABCD..
358 punpcklbw mm1, mm6 ; byte->word ABCD
359 pshufw mm0, mm2, 9 ; byte CDEF..
360 punpcklbw mm0, mm6 ; byte->word CDEF
361 pshufw mm3, mm1, 0x94 ; word ABBC
362 pshufw mm1, mm0, 0x94 ; word CDDE
363 pmaddwd mm3, mm4 ; multiply 2px with F0/F1
364 movq mm0, mm1 ; backup for second set of pixels
365 pmaddwd mm1, mm5 ; multiply 2px with F2/F3
366 paddd mm3, mm1 ; finish 1st 2px
368 ; second set of 2 pixels, use backup of above
369 punpckhbw mm2, mm6 ; byte->word EFGH
370 pmaddwd mm0, mm4 ; multiply backed up 2px with F0/F1
371 pshufw mm1, mm2, 0x94 ; word EFFG
372 pmaddwd mm1, mm5 ; multiply 2px with F2/F3
373 paddd mm0, mm1 ; finish 2nd 2px
375 ; merge two sets of 2 pixels into one set of 4, round/clip/store
376 packssdw mm3, mm0 ; merge dword->word (4px)
377 paddsw mm3, mm7 ; rounding
379 packuswb mm3, mm6 ; clip and word->bytes
380 movd [dstq], mm3 ; store
385 dec heightd ; next row
389 ; 4x4 block, H-only 6-tap filter
391 cglobal put_vp8_epel4_h6, 6, 6 + npicregs, 0, dst, dststride, src, srcstride, height, mx, picreg
394 lea picregq, [sixtap_filter_hw_m]
396 movq mm4, [sixtap_filter_hw+mxq*8-48] ; set up 4tap filter in words
397 movq mm5, [sixtap_filter_hw+mxq*8-32]
398 movq mm6, [sixtap_filter_hw+mxq*8-16]
403 movq mm1, [srcq-2] ; (ABCDEFGH) load 8 horizontal pixels
405 ; first set of 2 pixels
406 movq mm2, mm1 ; byte ABCD..
407 punpcklbw mm1, mm3 ; byte->word ABCD
408 pshufw mm0, mm2, 0x9 ; byte CDEF..
409 punpckhbw mm2, mm3 ; byte->word EFGH
410 punpcklbw mm0, mm3 ; byte->word CDEF
411 pshufw mm1, mm1, 0x94 ; word ABBC
412 pshufw mm2, mm2, 0x94 ; word EFFG
413 pmaddwd mm1, mm4 ; multiply 2px with F0/F1
414 pshufw mm3, mm0, 0x94 ; word CDDE
415 movq mm0, mm3 ; backup for second set of pixels
416 pmaddwd mm3, mm5 ; multiply 2px with F2/F3
417 paddd mm1, mm3 ; add to 1st 2px cache
418 movq mm3, mm2 ; backup for second set of pixels
419 pmaddwd mm2, mm6 ; multiply 2px with F4/F5
420 paddd mm1, mm2 ; finish 1st 2px
422 ; second set of 2 pixels, use backup of above
423 movd mm2, [srcq+3] ; byte FGHI (prevent overreads)
424 pmaddwd mm0, mm4 ; multiply 1st backed up 2px with F0/F1
425 pmaddwd mm3, mm5 ; multiply 2nd backed up 2px with F2/F3
426 paddd mm0, mm3 ; add to 2nd 2px cache
428 punpcklbw mm2, mm3 ; byte->word FGHI
429 pshufw mm2, mm2, 0xE9 ; word GHHI
430 pmaddwd mm2, mm6 ; multiply 2px with F4/F5
431 paddd mm0, mm2 ; finish 2nd 2px
433 ; merge two sets of 2 pixels into one set of 4, round/clip/store
434 packssdw mm1, mm0 ; merge dword->word (4px)
435 paddsw mm1, mm7 ; rounding
437 packuswb mm1, mm3 ; clip and word->bytes
438 movd [dstq], mm1 ; store
443 dec heightd ; next row
448 cglobal put_vp8_epel8_h4, 6, 6 + npicregs, 10, dst, dststride, src, srcstride, height, mx, picreg
451 lea picregq, [fourtap_filter_v_m]
453 lea mxq, [fourtap_filter_v+mxq-32]
486 movh [dstq], m0 ; store
491 dec heightd ; next row
496 cglobal put_vp8_epel8_h6, 6, 6 + npicregs, 14, dst, dststride, src, srcstride, height, mx, picreg
500 lea picregq, [sixtap_filter_v_m]
502 lea mxq, [sixtap_filter_v+mxq-96]
549 movh [dstq], m0 ; store
554 dec heightd ; next row
559 ; 4x4 block, V-only 4-tap filter
560 cglobal put_vp8_epel%1_v4, 7, 7, 8, dst, dststride, src, srcstride, height, picreg, my
563 lea picregq, [fourtap_filter_v_m]
565 lea myq, [fourtap_filter_v+myq-32]
573 movh m1, [srcq+ srcstrideq]
574 movh m2, [srcq+2*srcstrideq]
581 ; first calculate negative taps (to prevent losing positive overflows)
582 movh m4, [srcq+2*srcstrideq] ; read new row
589 ; then calculate positive taps
607 dec heightd ; next row
612 ; 4x4 block, V-only 6-tap filter
613 cglobal put_vp8_epel%1_v6, 7, 7, 8, dst, dststride, src, srcstride, height, picreg, my
617 lea picregq, [sixtap_filter_v_m]
619 lea myq, [sixtap_filter_v+myq-96]
626 movh m1, [srcq+srcstrideq]
627 movh m2, [srcq+srcstrideq*2]
628 lea srcq, [srcq+srcstrideq*2]
631 movh m4, [srcq+srcstrideq]
639 ; first calculate negative taps (to prevent losing positive overflows)
646 ; then calculate positive taps
647 movh m5, [srcq+2*srcstrideq] ; read new row
672 dec heightd ; next row
682 %macro FILTER_BILINEAR 1
683 cglobal put_vp8_bilinear%1_v, 7, 7, 7, dst, dststride, src, srcstride, height, picreg, my
686 lea picregq, [bilinear_filter_vw_m]
689 mova m5, [bilinear_filter_vw+myq-1*16]
691 mova m4, [bilinear_filter_vw+myq+7*16]
693 movh m0, [srcq+srcstrideq*0]
694 movh m1, [srcq+srcstrideq*1]
695 movh m3, [srcq+srcstrideq*2]
713 movh [dstq+dststrideq*0], m0
714 movh [dstq+dststrideq*1], m2
717 movh [dstq+dststrideq*0], m0
718 movhps [dstq+dststrideq*1], m0
721 lea dstq, [dstq+dststrideq*2]
722 lea srcq, [srcq+srcstrideq*2]
727 cglobal put_vp8_bilinear%1_h, 6, 6 + npicregs, 7, dst, dststride, src, srcstride, height, mx, picreg
730 lea picregq, [bilinear_filter_vw_m]
733 mova m5, [bilinear_filter_vw+mxq-1*16]
735 mova m4, [bilinear_filter_vw+mxq+7*16]
737 movh m0, [srcq+srcstrideq*0+0]
738 movh m1, [srcq+srcstrideq*0+1]
739 movh m2, [srcq+srcstrideq*1+0]
740 movh m3, [srcq+srcstrideq*1+1]
758 movh [dstq+dststrideq*0], m0
759 movh [dstq+dststrideq*1], m2
762 movh [dstq+dststrideq*0], m0
763 movhps [dstq+dststrideq*1], m0
766 lea dstq, [dstq+dststrideq*2]
767 lea srcq, [srcq+srcstrideq*2]
778 %macro FILTER_BILINEAR_SSSE3 1
779 cglobal put_vp8_bilinear%1_v, 7, 7, 5, dst, dststride, src, srcstride, height, picreg, my
782 lea picregq, [bilinear_filter_vb_m]
785 mova m3, [bilinear_filter_vb+myq-16]
787 movh m0, [srcq+srcstrideq*0]
788 movh m1, [srcq+srcstrideq*1]
789 movh m2, [srcq+srcstrideq*2]
801 movh [dstq+dststrideq*0], m0
802 movh [dstq+dststrideq*1], m1
805 movh [dstq+dststrideq*0], m0
806 movhps [dstq+dststrideq*1], m0
809 lea dstq, [dstq+dststrideq*2]
810 lea srcq, [srcq+srcstrideq*2]
815 cglobal put_vp8_bilinear%1_h, 6, 6 + npicregs, 5, dst, dststride, src, srcstride, height, mx, picreg
818 lea picregq, [bilinear_filter_vb_m]
821 mova m2, [filter_h2_shuf]
822 mova m3, [bilinear_filter_vb+mxq-16]
824 movu m0, [srcq+srcstrideq*0]
825 movu m1, [srcq+srcstrideq*1]
837 movh [dstq+dststrideq*0], m0
838 movh [dstq+dststrideq*1], m1
841 movh [dstq+dststrideq*0], m0
842 movhps [dstq+dststrideq*1], m0
845 lea dstq, [dstq+dststrideq*2]
846 lea srcq, [srcq+srcstrideq*2]
853 FILTER_BILINEAR_SSSE3 4
855 FILTER_BILINEAR_SSSE3 8
858 cglobal put_vp8_pixels8, 5, 5, 0, dst, dststride, src, srcstride, height
860 movq mm0, [srcq+srcstrideq*0]
861 movq mm1, [srcq+srcstrideq*1]
862 lea srcq, [srcq+srcstrideq*2]
863 movq [dstq+dststrideq*0], mm0
864 movq [dstq+dststrideq*1], mm1
865 lea dstq, [dstq+dststrideq*2]
872 cglobal put_vp8_pixels16, 5, 5, 0, dst, dststride, src, srcstride, height
874 movq mm0, [srcq+srcstrideq*0+0]
875 movq mm1, [srcq+srcstrideq*0+8]
876 movq mm2, [srcq+srcstrideq*1+0]
877 movq mm3, [srcq+srcstrideq*1+8]
878 lea srcq, [srcq+srcstrideq*2]
879 movq [dstq+dststrideq*0+0], mm0
880 movq [dstq+dststrideq*0+8], mm1
881 movq [dstq+dststrideq*1+0], mm2
882 movq [dstq+dststrideq*1+8], mm3
883 lea dstq, [dstq+dststrideq*2]
890 cglobal put_vp8_pixels16, 5, 5, 2, dst, dststride, src, srcstride, height
892 movups xmm0, [srcq+srcstrideq*0]
893 movups xmm1, [srcq+srcstrideq*1]
894 lea srcq, [srcq+srcstrideq*2]
895 movaps [dstq+dststrideq*0], xmm0
896 movaps [dstq+dststrideq*1], xmm1
897 lea dstq, [dstq+dststrideq*2]
902 ;-----------------------------------------------------------------------------
903 ; void vp8_idct_dc_add_<opt>(uint8_t *dst, DCTELEM block[16], int stride);
904 ;-----------------------------------------------------------------------------
908 %4 m3, [dst1q+strideq+%3]
910 %4 m5, [dst2q+strideq+%3]
920 %4 [dst1q+strideq+%3], m3
922 %4 [dst2q+strideq+%3], m5
926 cglobal vp8_idct_dc_add, 3, 3, 0, dst, block, stride
944 DEFINE_ARGS dst1, dst2, stride
945 lea dst2q, [dst1q+strideq*2]
946 ADD_DC m0, m1, 0, movh
950 cglobal vp8_idct_dc_add, 3, 3, 6, dst, block, stride
958 DEFINE_ARGS dst1, dst2, stride
959 lea dst2q, [dst1q+strideq*2]
961 movd m3, [dst1q+strideq]
963 movd m5, [dst2q+strideq]
975 pextrd [dst1q+strideq], m2, 1
976 pextrd [dst2q], m2, 2
977 pextrd [dst2q+strideq], m2, 3
980 ;-----------------------------------------------------------------------------
981 ; void vp8_idct_dc_add4y_<opt>(uint8_t *dst, DCTELEM block[4][16], int stride);
982 ;-----------------------------------------------------------------------------
986 cglobal vp8_idct_dc_add4y, 3, 3, 0, dst, block, stride
988 movd m0, [blockq+32*0] ; A
989 movd m1, [blockq+32*2] ; C
990 punpcklwd m0, [blockq+32*1] ; A B
991 punpcklwd m1, [blockq+32*3] ; C D
992 punpckldq m0, m1 ; A B C D
997 movd [blockq+32*0], m6
998 movd [blockq+32*1], m6
999 movd [blockq+32*2], m6
1000 movd [blockq+32*3], m6
1005 punpcklbw m0, m0 ; AABBCCDD
1006 punpcklbw m6, m6 ; AABBCCDD
1009 punpcklbw m0, m0 ; AAAABBBB
1010 punpckhbw m1, m1 ; CCCCDDDD
1011 punpcklbw m6, m6 ; AAAABBBB
1012 punpckhbw m7, m7 ; CCCCDDDD
1015 DEFINE_ARGS dst1, dst2, stride
1016 lea dst2q, [dst1q+strideq*2]
1017 ADD_DC m0, m6, 0, mova
1018 ADD_DC m1, m7, 8, mova
1023 cglobal vp8_idct_dc_add4y, 3, 3, 6, dst, block, stride
1025 movd m0, [blockq+32*0] ; A
1026 movd m1, [blockq+32*2] ; C
1027 punpcklwd m0, [blockq+32*1] ; A B
1028 punpcklwd m1, [blockq+32*3] ; C D
1029 punpckldq m0, m1 ; A B C D
1034 movd [blockq+32*0], m1
1035 movd [blockq+32*1], m1
1036 movd [blockq+32*2], m1
1037 movd [blockq+32*3], m1
1048 DEFINE_ARGS dst1, dst2, stride
1049 lea dst2q, [dst1q+strideq*2]
1050 ADD_DC m0, m1, 0, mova
1053 ;-----------------------------------------------------------------------------
1054 ; void vp8_idct_dc_add4uv_<opt>(uint8_t *dst, DCTELEM block[4][16], int stride);
1055 ;-----------------------------------------------------------------------------
1058 cglobal vp8_idct_dc_add4uv, 3, 3, 0, dst, block, stride
1060 movd m0, [blockq+32*0] ; A
1061 movd m1, [blockq+32*2] ; C
1062 punpcklwd m0, [blockq+32*1] ; A B
1063 punpcklwd m1, [blockq+32*3] ; C D
1064 punpckldq m0, m1 ; A B C D
1069 movd [blockq+32*0], m6
1070 movd [blockq+32*1], m6
1071 movd [blockq+32*2], m6
1072 movd [blockq+32*3], m6
1077 punpcklbw m0, m0 ; AABBCCDD
1078 punpcklbw m6, m6 ; AABBCCDD
1081 punpcklbw m0, m0 ; AAAABBBB
1082 punpckhbw m1, m1 ; CCCCDDDD
1083 punpcklbw m6, m6 ; AAAABBBB
1084 punpckhbw m7, m7 ; CCCCDDDD
1087 DEFINE_ARGS dst1, dst2, stride
1088 lea dst2q, [dst1q+strideq*2]
1089 ADD_DC m0, m6, 0, mova
1090 lea dst1q, [dst1q+strideq*4]
1091 lea dst2q, [dst2q+strideq*4]
1092 ADD_DC m1, m7, 0, mova
1095 ;-----------------------------------------------------------------------------
1096 ; void vp8_idct_add_<opt>(uint8_t *dst, DCTELEM block[16], int stride);
1097 ;-----------------------------------------------------------------------------
1099 ; calculate %1=mul_35468(%1)-mul_20091(%2); %2=mul_20091(%1)+mul_35468(%2)
1100 ; this macro assumes that m6/m7 have words for 20091/17734 loaded
1101 %macro VP8_MULTIPLY_SUMSUB 4
1104 pmulhw %3, m6 ;20091(1)
1105 pmulhw %4, m6 ;20091(2)
1110 pmulhw %1, m7 ;35468(1)
1111 pmulhw %2, m7 ;35468(2)
1116 ; calculate x0=%1+%3; x1=%1-%3
1117 ; x2=mul_35468(%2)-mul_20091(%4); x3=mul_20091(%2)+mul_35468(%4)
1118 ; %1=x0+x3 (tmp0); %2=x1+x2 (tmp1); %3=x1-x2 (tmp2); %4=x0-x3 (tmp3)
1119 ; %5/%6 are temporary registers
1120 ; we assume m6/m7 have constant words 20091/17734 loaded in them
1121 %macro VP8_IDCT_TRANSFORM4x4_1D 6
1122 SUMSUB_BA w, %3, %1, %5 ;t0, t1
1123 VP8_MULTIPLY_SUMSUB m%2, m%4, m%5,m%6 ;t2, t3
1124 SUMSUB_BA w, %4, %3, %5 ;tmp0, tmp3
1125 SUMSUB_BA w, %2, %1, %5 ;tmp1, tmp2
1130 %macro VP8_IDCT_ADD 0
1131 cglobal vp8_idct_add, 3, 3, 0, dst, block, stride
1133 movq m0, [blockq+ 0]
1134 movq m1, [blockq+ 8]
1135 movq m2, [blockq+16]
1136 movq m3, [blockq+24]
1141 movaps [blockq+ 0], xmm0
1142 movaps [blockq+16], xmm0
1145 movq [blockq+ 0], m4
1146 movq [blockq+ 8], m4
1147 movq [blockq+16], m4
1148 movq [blockq+24], m4
1152 VP8_IDCT_TRANSFORM4x4_1D 0, 1, 2, 3, 4, 5
1153 TRANSPOSE4x4W 0, 1, 2, 3, 4
1155 VP8_IDCT_TRANSFORM4x4_1D 0, 1, 2, 3, 4, 5
1156 TRANSPOSE4x4W 0, 1, 2, 3, 4
1160 DEFINE_ARGS dst1, dst2, stride
1161 lea dst2q, [dst1q+2*strideq]
1162 STORE_DIFFx2 m0, m1, m6, m7, m4, 3, dst1q, strideq
1163 STORE_DIFFx2 m2, m3, m6, m7, m4, 3, dst2q, strideq
1175 ;-----------------------------------------------------------------------------
1176 ; void vp8_luma_dc_wht_mmxext(DCTELEM block[4][4][16], DCTELEM dc[16])
1177 ;-----------------------------------------------------------------------------
1179 %macro SCATTER_WHT 3
1182 mov [blockq+2*16*(0+%3)], dc1w
1183 mov [blockq+2*16*(1+%3)], dc2w
1188 mov [blockq+2*16*(4+%3)], dc1w
1189 mov [blockq+2*16*(5+%3)], dc2w
1192 mov [blockq+2*16*(8+%3)], dc1w
1193 mov [blockq+2*16*(9+%3)], dc2w
1196 mov [blockq+2*16*(12+%3)], dc1w
1197 mov [blockq+2*16*(13+%3)], dc2w
1200 %macro HADAMARD4_1D 4
1201 SUMSUB_BADC w, %2, %1, %4, %3
1202 SUMSUB_BADC w, %4, %2, %3, %1
1207 cglobal vp8_luma_dc_wht, 2, 3, 0, block, dc1, dc2
1214 movaps [dc1q+ 0], xmm0
1215 movaps [dc1q+16], xmm0
1223 HADAMARD4_1D 0, 1, 2, 3
1224 TRANSPOSE4x4W 0, 1, 2, 3, 4
1226 HADAMARD4_1D 0, 1, 2, 3
1243 ;-----------------------------------------------------------------------------
1244 ; void vp8_h/v_loop_filter_simple_<opt>(uint8_t *dst, int stride, int flim);
1245 ;-----------------------------------------------------------------------------
1247 ; macro called with 7 mm register indexes as argument, and 4 regular registers
1249 ; first 4 mm registers will carry the transposed pixel data
1250 ; the other three are scratchspace (one would be sufficient, but this allows
1251 ; for more spreading/pipelining and thus faster execution on OOE CPUs)
1253 ; first two regular registers are buf+4*stride and buf+5*stride
1254 ; third is -stride, fourth is +stride
1255 %macro READ_8x4_INTERLEAVED 11
1256 ; interleave 8 (A-H) rows of 4 pixels each
1257 movd m%1, [%8+%10*4] ; A0-3
1258 movd m%5, [%9+%10*4] ; B0-3
1259 movd m%2, [%8+%10*2] ; C0-3
1260 movd m%6, [%8+%10] ; D0-3
1261 movd m%3, [%8] ; E0-3
1262 movd m%7, [%9] ; F0-3
1263 movd m%4, [%9+%11] ; G0-3
1264 punpcklbw m%1, m%5 ; A/B interleaved
1265 movd m%5, [%9+%11*2] ; H0-3
1266 punpcklbw m%2, m%6 ; C/D interleaved
1267 punpcklbw m%3, m%7 ; E/F interleaved
1268 punpcklbw m%4, m%5 ; G/H interleaved
1271 ; macro called with 7 mm register indexes as argument, and 5 regular registers
1272 ; first 11 mean the same as READ_8x4_TRANSPOSED above
1273 ; fifth regular register is scratchspace to reach the bottom 8 rows, it
1274 ; will be set to second regular register + 8*stride at the end
1275 %macro READ_16x4_INTERLEAVED 12
1276 ; transpose 16 (A-P) rows of 4 pixels each
1279 ; read (and interleave) those addressable by %8 (=r0), A/C/D/E/I/K/L/M
1280 movd m%1, [%8+%10*4] ; A0-3
1281 movd m%3, [%12+%10*4] ; I0-3
1282 movd m%2, [%8+%10*2] ; C0-3
1283 movd m%4, [%12+%10*2] ; K0-3
1284 movd m%6, [%8+%10] ; D0-3
1285 movd m%5, [%12+%10] ; L0-3
1286 movd m%7, [%12] ; M0-3
1288 punpcklbw m%1, m%3 ; A/I
1289 movd m%3, [%8] ; E0-3
1290 punpcklbw m%2, m%4 ; C/K
1291 punpcklbw m%6, m%5 ; D/L
1292 punpcklbw m%3, m%7 ; E/M
1293 punpcklbw m%2, m%6 ; C/D/K/L interleaved
1295 ; read (and interleave) those addressable by %9 (=r4), B/F/G/H/J/N/O/P
1296 movd m%5, [%9+%10*4] ; B0-3
1297 movd m%4, [%12+%10*4] ; J0-3
1298 movd m%7, [%9] ; F0-3
1299 movd m%6, [%12] ; N0-3
1300 punpcklbw m%5, m%4 ; B/J
1301 punpcklbw m%7, m%6 ; F/N
1302 punpcklbw m%1, m%5 ; A/B/I/J interleaved
1303 punpcklbw m%3, m%7 ; E/F/M/N interleaved
1304 movd m%4, [%9+%11] ; G0-3
1305 movd m%6, [%12+%11] ; O0-3
1306 movd m%5, [%9+%11*2] ; H0-3
1307 movd m%7, [%12+%11*2] ; P0-3
1308 punpcklbw m%4, m%6 ; G/O
1309 punpcklbw m%5, m%7 ; H/P
1310 punpcklbw m%4, m%5 ; G/H/O/P interleaved
1313 ; write 4 mm registers of 2 dwords each
1314 ; first four arguments are mm register indexes containing source data
1315 ; last four are registers containing buf+4*stride, buf+5*stride,
1316 ; -stride and +stride
1318 ; write out (2 dwords per register)
1333 ; write 4 xmm registers of 4 dwords each
1334 ; arguments same as WRITE_2x4D, but with an extra register, so that the 5 regular
1335 ; registers contain buf+4*stride, buf+5*stride, buf+12*stride, -stride and +stride
1336 ; we add 1*stride to the third regular registry in the process
1337 ; the 10th argument is 16 if it's a Y filter (i.e. all regular registers cover the
1338 ; same memory region), or 8 if they cover two separate buffers (third one points to
1339 ; a different memory region than the first two), allowing for more optimal code for
1341 %macro WRITE_4x4D 10
1342 ; write out (4 dwords per register), start with dwords zero
1393 ; write 4 or 8 words in the mmx/xmm registers as 8 lines
1394 ; 1 and 2 are the registers to write, this can be the same (for SSE2)
1396 ; 3 is a general-purpose register that we will clobber
1398 ; 3 is a pointer to the destination's 5th line
1399 ; 4 is a pointer to the destination's 4th line
1400 ; 5/6 is -stride and +stride
1431 pextrw [%3+%4*4], %1, 0
1432 pextrw [%2+%4*4], %1, 1
1433 pextrw [%3+%4*2], %1, 2
1434 pextrw [%3+%4 ], %1, 3
1437 pextrw [%2+%5 ], %1, 6
1438 pextrw [%2+%5*2], %1, 7
1468 %macro SIMPLE_LOOPFILTER 2
1469 cglobal vp8_%1_loop_filter_simple, 3, %2, 8, dst, stride, flim, cntr
1470 %if mmsize == 8 ; mmx/mmxext
1476 SPLATB_REG m7, flim, m0 ; splat "flim" into register
1478 ; set up indexes to address 4 rows
1480 DEFINE_ARGS dst1, mstride, stride, cntr, dst2
1482 DEFINE_ARGS dst1, mstride, stride, dst3, dst2
1484 mov strideq, mstrideq
1487 lea dst1q, [dst1q+4*strideq-2]
1490 %if mmsize == 8 ; mmx / mmxext
1494 ; read 4 half/full rows of pixels
1495 mova m0, [dst1q+mstrideq*2] ; p1
1496 mova m1, [dst1q+mstrideq] ; p0
1497 mova m2, [dst1q] ; q0
1498 mova m3, [dst1q+ strideq] ; q1
1500 lea dst2q, [dst1q+ strideq]
1502 %if mmsize == 8 ; mmx/mmxext
1503 READ_8x4_INTERLEAVED 0, 1, 2, 3, 4, 5, 6, dst1q, dst2q, mstrideq, strideq
1505 READ_16x4_INTERLEAVED 0, 1, 2, 3, 4, 5, 6, dst1q, dst2q, mstrideq, strideq, dst3q
1507 TRANSPOSE4x4W 0, 1, 2, 3, 4
1511 mova m5, m2 ; m5=backup of q0
1512 mova m6, m1 ; m6=backup of p0
1513 psubusb m1, m2 ; p0-q0
1514 psubusb m2, m6 ; q0-p0
1515 por m1, m2 ; FFABS(p0-q0)
1516 paddusb m1, m1 ; m1=FFABS(p0-q0)*2
1520 psubusb m3, m0 ; q1-p1
1521 psubusb m0, m4 ; p1-q1
1522 por m3, m0 ; FFABS(p1-q1)
1526 psubsb m2, m4 ; m2=p1-q1 (signed) backup for below
1528 psrlq m3, 1 ; m3=FFABS(p1-q1)/2, this can be used signed
1532 pcmpeqb m3, m1 ; abs(p0-q0)*2+abs(p1-q1)/2<=flim mask(0xff/0x0)
1534 ; filter_common (use m2/p1-q1, m4=q0, m6=p0, m5/q0-p0 and m3/mask)
1538 psubsb m5, m0 ; q0-p0 (signed)
1541 paddsb m2, m5 ; a=(p1-q1) + 3*(q0-p0)
1542 pand m2, m3 ; apply filter mask (m3)
1546 paddsb m2, [pb_4] ; f1<<3=a+4
1547 paddsb m1, [pb_3] ; f2<<3=a+3
1549 pand m1, m3 ; cache f2<<3
1553 pcmpgtb m0, m2 ; which values are <0?
1554 psubb m3, m2 ; -f1<<3
1560 paddusb m4, m3 ; q0-f1
1564 pcmpgtb m0, m1 ; which values are <0?
1565 psubb m3, m1 ; -f2<<3
1571 psubusb m6, m3 ; p0+f2
1576 mova [dst1q+mstrideq], m6
1579 SBUTTERFLY bw, 6, 4, 0
1581 %if mmsize == 16 ; sse2
1585 WRITE_8W m6, dst2q, dst1q, mstrideq, strideq
1586 lea dst2q, [dst3q+mstrideq+1]
1590 WRITE_8W m4, dst3q, dst2q, mstrideq, strideq
1592 WRITE_2x4W m6, m4, dst2q, dst1q, mstrideq, strideq
1596 %if mmsize == 8 ; mmx/mmxext
1599 add dst1q, 8 ; advance 8 cols = pixels
1601 lea dst1q, [dst1q+strideq*8-1] ; advance 8 rows = lines
1613 SIMPLE_LOOPFILTER v, 4
1614 SIMPLE_LOOPFILTER h, 5
1616 SIMPLE_LOOPFILTER v, 4
1617 SIMPLE_LOOPFILTER h, 5
1621 SIMPLE_LOOPFILTER v, 3
1622 SIMPLE_LOOPFILTER h, 5
1624 SIMPLE_LOOPFILTER v, 3
1625 SIMPLE_LOOPFILTER h, 5
1627 SIMPLE_LOOPFILTER h, 5
1629 ;-----------------------------------------------------------------------------
1630 ; void vp8_h/v_loop_filter<size>_inner_<opt>(uint8_t *dst, [uint8_t *v,] int stride,
1631 ; int flimE, int flimI, int hev_thr);
1632 ;-----------------------------------------------------------------------------
1634 %macro INNER_LOOPFILTER 2
1635 %if %2 == 8 ; chroma
1636 cglobal vp8_%1_loop_filter8uv_inner, 6, 6, 13, dst, dst8, stride, flimE, flimI, hevthr
1638 cglobal vp8_%1_loop_filter16y_inner, 5, 5, 13, dst, stride, flimE, flimI, hevthr
1644 %ifndef m8 ; stack layout: [0]=E, [1]=I, [2]=hev_thr
1645 %ifidn %1, v ; [3]=hev() result
1646 %assign pad 16 + mmsize * 4 - gprsize - (stack_offset & 15)
1647 %else ; h ; extra storage space for transposes
1648 %assign pad 16 + mmsize * 5 - gprsize - (stack_offset & 15)
1650 ; splat function arguments
1651 SPLATB_REG m0, flimEq, m7 ; E
1652 SPLATB_REG m1, flimIq, m7 ; I
1653 SPLATB_REG m2, hevthrq, m7 ; hev_thresh
1657 %define m_flimE [rsp]
1658 %define m_flimI [rsp+mmsize]
1659 %define m_hevthr [rsp+mmsize*2]
1660 %define m_maskres [rsp+mmsize*3]
1661 %define m_p0backup [rsp+mmsize*3]
1662 %define m_q0backup [rsp+mmsize*4]
1670 %define m_hevthr m11
1671 %define m_maskres m12
1672 %define m_p0backup m12
1673 %define m_q0backup m8
1675 ; splat function arguments
1676 SPLATB_REG m_flimE, flimEq, m7 ; E
1677 SPLATB_REG m_flimI, flimIq, m7 ; I
1678 SPLATB_REG m_hevthr, hevthrq, m7 ; hev_thresh
1681 %if %2 == 8 ; chroma
1682 DEFINE_ARGS dst1, dst8, mstride, stride, dst2
1684 DEFINE_ARGS dst1, mstride, stride, dst2, cntr
1687 DEFINE_ARGS dst1, mstride, stride, dst2, dst8
1689 mov strideq, mstrideq
1692 lea dst1q, [dst1q+strideq*4-4]
1693 %if %2 == 8 ; chroma
1694 lea dst8q, [dst8q+strideq*4-4]
1702 lea dst2q, [dst1q+strideq]
1704 %if %2 == 8 && mmsize == 16
1709 movrow m0, [dst1q+mstrideq*4] ; p3
1710 movrow m1, [dst2q+mstrideq*4] ; p2
1711 movrow m2, [dst1q+mstrideq*2] ; p1
1712 movrow m5, [dst2q] ; q1
1713 movrow m6, [dst2q+ strideq*1] ; q2
1714 movrow m7, [dst2q+ strideq*2] ; q3
1715 %if mmsize == 16 && %2 == 8
1716 movhps m0, [dst8q+mstrideq*4]
1717 movhps m2, [dst8q+mstrideq*2]
1719 movhps m1, [dst8q+mstrideq*4]
1721 movhps m6, [dst8q+ strideq ]
1722 movhps m7, [dst8q+ strideq*2]
1725 %elif mmsize == 8 ; mmx/mmxext (h)
1726 ; read 8 rows of 8px each
1727 movu m0, [dst1q+mstrideq*4]
1728 movu m1, [dst2q+mstrideq*4]
1729 movu m2, [dst1q+mstrideq*2]
1730 movu m3, [dst1q+mstrideq ]
1733 movu m6, [dst2q+ strideq ]
1736 TRANSPOSE4x4B 0, 1, 2, 3, 7
1738 movu m7, [dst2q+ strideq*2]
1739 TRANSPOSE4x4B 4, 5, 6, 7, 1
1740 SBUTTERFLY dq, 0, 4, 1 ; p3/p2
1741 SBUTTERFLY dq, 2, 6, 1 ; q0/q1
1742 SBUTTERFLY dq, 3, 7, 1 ; q2/q3
1744 mova m_q0backup, m2 ; store q0
1745 SBUTTERFLY dq, 1, 5, 2 ; p1/p0
1746 mova m_p0backup, m5 ; store p0
1753 lea dst8q, [dst1q+ strideq*8]
1756 ; read 16 rows of 8px each, interleave
1757 movh m0, [dst1q+mstrideq*4]
1758 movh m1, [dst8q+mstrideq*4]
1759 movh m2, [dst1q+mstrideq*2]
1760 movh m5, [dst8q+mstrideq*2]
1761 movh m3, [dst1q+mstrideq ]
1762 movh m6, [dst8q+mstrideq ]
1765 punpcklbw m0, m1 ; A/I
1766 punpcklbw m2, m5 ; C/K
1767 punpcklbw m3, m6 ; D/L
1768 punpcklbw m4, m7 ; E/M
1771 movh m1, [dst2q+mstrideq*4]
1772 movh m6, [dst8q+mstrideq*4]
1775 punpcklbw m1, m6 ; B/J
1776 punpcklbw m5, m7 ; F/N
1777 movh m6, [dst2q+ strideq ]
1778 movh m7, [dst8q+ strideq ]
1779 punpcklbw m6, m7 ; G/O
1782 TRANSPOSE4x4B 0, 1, 2, 3, 7
1788 movh m7, [dst2q+ strideq*2]
1789 movh m1, [dst8q+ strideq*2]
1790 punpcklbw m7, m1 ; H/P
1791 TRANSPOSE4x4B 4, 5, 6, 7, 1
1792 SBUTTERFLY dq, 0, 4, 1 ; p3/p2
1793 SBUTTERFLY dq, 2, 6, 1 ; q0/q1
1794 SBUTTERFLY dq, 3, 7, 1 ; q2/q3
1800 mova m_q0backup, m2 ; store q0
1802 SBUTTERFLY dq, 1, 5, 2 ; p1/p0
1806 mova m_p0backup, m5 ; store p0
1814 ; normal_limit for p3-p2, p2-p1, q3-q2 and q2-q1
1817 psubusb m4, m0 ; p2-p3
1818 psubusb m0, m1 ; p3-p2
1819 por m0, m4 ; abs(p3-p2)
1823 psubusb m4, m1 ; p1-p2
1824 psubusb m1, m2 ; p2-p1
1825 por m1, m4 ; abs(p2-p1)
1829 psubusb m4, m7 ; q2-q3
1830 psubusb m7, m6 ; q3-q2
1831 por m7, m4 ; abs(q3-q2)
1835 psubusb m4, m6 ; q1-q2
1836 psubusb m6, m5 ; q2-q1
1837 por m6, m4 ; abs(q2-q1)
1839 %if notcpuflag(mmx2)
1846 pcmpeqb m0, m3 ; abs(p3-p2) <= I
1847 pcmpeqb m1, m3 ; abs(p2-p1) <= I
1848 pcmpeqb m7, m3 ; abs(q3-q2) <= I
1849 pcmpeqb m6, m3 ; abs(q2-q1) <= I
1859 ; normal_limit and high_edge_variance for p1-p0, q1-q0
1860 SWAP 7, 3 ; now m7 is zero
1862 movrow m3, [dst1q+mstrideq ] ; p0
1863 %if mmsize == 16 && %2 == 8
1864 movhps m3, [dst8q+mstrideq ]
1876 psubusb m1, m3 ; p1-p0
1877 psubusb m6, m2 ; p0-p1
1878 por m1, m6 ; abs(p1-p0)
1879 %if notcpuflag(mmx2)
1882 psubusb m6, m_hevthr
1883 pcmpeqb m1, m7 ; abs(p1-p0) <= I
1884 pcmpeqb m6, m7 ; abs(p1-p0) <= hev_thresh
1888 pmaxub m0, m1 ; max_I
1889 SWAP 1, 4 ; max_hev_thresh
1892 SWAP 6, 4 ; now m6 is I
1894 movrow m4, [dst1q] ; q0
1895 %if mmsize == 16 && %2 == 8
1907 psubusb m1, m5 ; q0-q1
1908 psubusb m7, m4 ; q1-q0
1909 por m1, m7 ; abs(q1-q0)
1910 %if notcpuflag(mmx2)
1913 psubusb m7, m_hevthr
1915 pcmpeqb m1, m6 ; abs(q1-q0) <= I
1916 pcmpeqb m7, m6 ; abs(q1-q0) <= hev_thresh
1918 pand m0, m1 ; abs([pq][321]-[pq][210]) <= I
1925 psubusb m6, m_hevthr
1926 pcmpeqb m0, m7 ; max(abs(..)) <= I
1927 pcmpeqb m6, m7 ; !(max(abs..) > thresh)
1932 mova m_maskres, m6 ; !(abs(p1-p0) > hev_t || abs(q1-q0) > hev_t)
1938 mova m6, m4 ; keep copies of p0/q0 around for later use
1940 psubusb m1, m4 ; p0-q0
1941 psubusb m6, m3 ; q0-p0
1942 por m1, m6 ; abs(q0-p0)
1943 paddusb m1, m1 ; m1=2*abs(q0-p0)
1949 psubusb m7, m5 ; p1-q1
1950 psubusb m6, m2 ; q1-p1
1951 por m7, m6 ; abs(q1-p1)
1954 psrlq m7, 1 ; abs(q1-p1)/2
1955 paddusb m7, m1 ; abs(q0-p0)*2+abs(q1-p1)/2
1957 pcmpeqb m7, m6 ; abs(q0-p0)*2+abs(q1-p1)/2 <= E
1958 pand m0, m7 ; normal_limit result
1960 ; filter_common; at this point, m2-m5=p1-q1 and m0 is filter_mask
1961 %ifdef m8 ; x86-64 && sse2
1964 %else ; x86-32 or mmx/mmxext
1965 %define m_pb_80 [pb_80]
1971 psubsb m1, m7 ; (signed) q0-p0
1976 psubsb m6, m7 ; (signed) p1-q1
1981 paddsb m7, m1 ; 3*(q0-p0)+is4tap?(p1-q1)
2000 paddusb m3, m1 ; p0+f2
2011 paddusb m4, m1 ; q0-f1
2018 %if notcpuflag(mmx2)
2025 %if notcpuflag(mmx2)
2039 paddusb m5, m1 ; q1-a
2040 paddusb m2, m0 ; p1+a
2044 movrow [dst1q+mstrideq*2], m2
2045 movrow [dst1q+mstrideq ], m3
2047 movrow [dst1q+ strideq ], m5
2048 %if mmsize == 16 && %2 == 8
2049 movhps [dst8q+mstrideq*2], m2
2050 movhps [dst8q+mstrideq ], m3
2052 movhps [dst8q+ strideq ], m5
2059 TRANSPOSE4x4B 2, 3, 4, 5, 6
2061 %if mmsize == 8 ; mmx/mmxext (h)
2062 WRITE_4x2D 2, 3, 4, 5, dst1q, dst2q, mstrideq, strideq
2064 lea dst8q, [dst8q+mstrideq +2]
2065 WRITE_4x4D 2, 3, 4, 5, dst1q, dst2q, dst8q, mstrideq, strideq, %2
2070 %if %2 == 8 ; chroma
2079 lea dst1q, [dst1q+ strideq*8-2]
2088 %ifndef m8 ; sse2 on x86-32 or mmx/mmxext
2096 INNER_LOOPFILTER v, 16
2097 INNER_LOOPFILTER h, 16
2098 INNER_LOOPFILTER v, 8
2099 INNER_LOOPFILTER h, 8
2102 INNER_LOOPFILTER v, 16
2103 INNER_LOOPFILTER h, 16
2104 INNER_LOOPFILTER v, 8
2105 INNER_LOOPFILTER h, 8
2109 INNER_LOOPFILTER v, 16
2110 INNER_LOOPFILTER h, 16
2111 INNER_LOOPFILTER v, 8
2112 INNER_LOOPFILTER h, 8
2115 INNER_LOOPFILTER v, 16
2116 INNER_LOOPFILTER h, 16
2117 INNER_LOOPFILTER v, 8
2118 INNER_LOOPFILTER h, 8
2120 ;-----------------------------------------------------------------------------
2121 ; void vp8_h/v_loop_filter<size>_mbedge_<opt>(uint8_t *dst, [uint8_t *v,] int stride,
2122 ; int flimE, int flimI, int hev_thr);
2123 ;-----------------------------------------------------------------------------
2125 %macro MBEDGE_LOOPFILTER 2
2126 %if %2 == 8 ; chroma
2127 cglobal vp8_%1_loop_filter8uv_mbedge, 6, 6, 15, dst1, dst8, stride, flimE, flimI, hevthr
2129 cglobal vp8_%1_loop_filter16y_mbedge, 5, 5, 15, dst1, stride, flimE, flimI, hevthr
2135 %ifndef m8 ; stack layout: [0]=E, [1]=I, [2]=hev_thr
2136 %if mmsize == 16 ; [3]=hev() result
2137 ; [4]=filter tmp result
2138 ; [5]/[6] = p2/q2 backup
2139 ; [7]=lim_res sign result
2140 %assign pad 16 + mmsize * 7 - gprsize - (stack_offset & 15)
2141 %else ; 8 ; extra storage space for transposes
2142 %assign pad 16 + mmsize * 8 - gprsize - (stack_offset & 15)
2144 ; splat function arguments
2145 SPLATB_REG m0, flimEq, m7 ; E
2146 SPLATB_REG m1, flimIq, m7 ; I
2147 SPLATB_REG m2, hevthrq, m7 ; hev_thresh
2151 %define m_flimE [rsp]
2152 %define m_flimI [rsp+mmsize]
2153 %define m_hevthr [rsp+mmsize*2]
2154 %define m_maskres [rsp+mmsize*3]
2155 %define m_limres [rsp+mmsize*4]
2156 %define m_p0backup [rsp+mmsize*3]
2157 %define m_q0backup [rsp+mmsize*4]
2158 %define m_p2backup [rsp+mmsize*5]
2159 %define m_q2backup [rsp+mmsize*6]
2161 %define m_limsign [rsp]
2163 %define m_limsign [rsp+mmsize*7]
2169 %else ; sse2 on x86-64
2172 %define m_hevthr m11
2173 %define m_maskres m12
2175 %define m_p0backup m12
2176 %define m_q0backup m8
2177 %define m_p2backup m13
2178 %define m_q2backup m14
2179 %define m_limsign m9
2181 ; splat function arguments
2182 SPLATB_REG m_flimE, flimEq, m7 ; E
2183 SPLATB_REG m_flimI, flimIq, m7 ; I
2184 SPLATB_REG m_hevthr, hevthrq, m7 ; hev_thresh
2187 %if %2 == 8 ; chroma
2188 DEFINE_ARGS dst1, dst8, mstride, stride, dst2
2190 DEFINE_ARGS dst1, mstride, stride, dst2, cntr
2193 DEFINE_ARGS dst1, mstride, stride, dst2, dst8
2195 mov strideq, mstrideq
2198 lea dst1q, [dst1q+strideq*4-4]
2199 %if %2 == 8 ; chroma
2200 lea dst8q, [dst8q+strideq*4-4]
2208 lea dst2q, [dst1q+ strideq ]
2210 %if %2 == 8 && mmsize == 16
2215 movrow m0, [dst1q+mstrideq*4] ; p3
2216 movrow m1, [dst2q+mstrideq*4] ; p2
2217 movrow m2, [dst1q+mstrideq*2] ; p1
2218 movrow m5, [dst2q] ; q1
2219 movrow m6, [dst2q+ strideq ] ; q2
2220 movrow m7, [dst2q+ strideq*2] ; q3
2221 %if mmsize == 16 && %2 == 8
2222 movhps m0, [dst8q+mstrideq*4]
2223 movhps m2, [dst8q+mstrideq*2]
2225 movhps m1, [dst8q+mstrideq*4]
2227 movhps m6, [dst8q+ strideq ]
2228 movhps m7, [dst8q+ strideq*2]
2231 %elif mmsize == 8 ; mmx/mmxext (h)
2232 ; read 8 rows of 8px each
2233 movu m0, [dst1q+mstrideq*4]
2234 movu m1, [dst2q+mstrideq*4]
2235 movu m2, [dst1q+mstrideq*2]
2236 movu m3, [dst1q+mstrideq ]
2239 movu m6, [dst2q+ strideq ]
2242 TRANSPOSE4x4B 0, 1, 2, 3, 7
2244 movu m7, [dst2q+ strideq*2]
2245 TRANSPOSE4x4B 4, 5, 6, 7, 1
2246 SBUTTERFLY dq, 0, 4, 1 ; p3/p2
2247 SBUTTERFLY dq, 2, 6, 1 ; q0/q1
2248 SBUTTERFLY dq, 3, 7, 1 ; q2/q3
2250 mova m_q0backup, m2 ; store q0
2251 SBUTTERFLY dq, 1, 5, 2 ; p1/p0
2252 mova m_p0backup, m5 ; store p0
2259 lea dst8q, [dst1q+ strideq*8 ]
2262 ; read 16 rows of 8px each, interleave
2263 movh m0, [dst1q+mstrideq*4]
2264 movh m1, [dst8q+mstrideq*4]
2265 movh m2, [dst1q+mstrideq*2]
2266 movh m5, [dst8q+mstrideq*2]
2267 movh m3, [dst1q+mstrideq ]
2268 movh m6, [dst8q+mstrideq ]
2271 punpcklbw m0, m1 ; A/I
2272 punpcklbw m2, m5 ; C/K
2273 punpcklbw m3, m6 ; D/L
2274 punpcklbw m4, m7 ; E/M
2277 movh m1, [dst2q+mstrideq*4]
2278 movh m6, [dst8q+mstrideq*4]
2281 punpcklbw m1, m6 ; B/J
2282 punpcklbw m5, m7 ; F/N
2283 movh m6, [dst2q+ strideq ]
2284 movh m7, [dst8q+ strideq ]
2285 punpcklbw m6, m7 ; G/O
2288 TRANSPOSE4x4B 0, 1, 2, 3, 7
2294 movh m7, [dst2q+ strideq*2]
2295 movh m1, [dst8q+ strideq*2]
2296 punpcklbw m7, m1 ; H/P
2297 TRANSPOSE4x4B 4, 5, 6, 7, 1
2298 SBUTTERFLY dq, 0, 4, 1 ; p3/p2
2299 SBUTTERFLY dq, 2, 6, 1 ; q0/q1
2300 SBUTTERFLY dq, 3, 7, 1 ; q2/q3
2306 mova m_q0backup, m2 ; store q0
2308 SBUTTERFLY dq, 1, 5, 2 ; p1/p0
2312 mova m_p0backup, m5 ; store p0
2320 ; normal_limit for p3-p2, p2-p1, q3-q2 and q2-q1
2323 psubusb m4, m0 ; p2-p3
2324 psubusb m0, m1 ; p3-p2
2325 por m0, m4 ; abs(p3-p2)
2329 psubusb m4, m1 ; p1-p2
2331 psubusb m1, m2 ; p2-p1
2332 por m1, m4 ; abs(p2-p1)
2336 psubusb m4, m7 ; q2-q3
2337 psubusb m7, m6 ; q3-q2
2338 por m7, m4 ; abs(q3-q2)
2342 psubusb m4, m6 ; q1-q2
2344 psubusb m6, m5 ; q2-q1
2345 por m6, m4 ; abs(q2-q1)
2347 %if notcpuflag(mmx2)
2354 pcmpeqb m0, m3 ; abs(p3-p2) <= I
2355 pcmpeqb m1, m3 ; abs(p2-p1) <= I
2356 pcmpeqb m7, m3 ; abs(q3-q2) <= I
2357 pcmpeqb m6, m3 ; abs(q2-q1) <= I
2367 ; normal_limit and high_edge_variance for p1-p0, q1-q0
2368 SWAP 7, 3 ; now m7 is zero
2370 movrow m3, [dst1q+mstrideq ] ; p0
2371 %if mmsize == 16 && %2 == 8
2372 movhps m3, [dst8q+mstrideq ]
2384 psubusb m1, m3 ; p1-p0
2385 psubusb m6, m2 ; p0-p1
2386 por m1, m6 ; abs(p1-p0)
2387 %if notcpuflag(mmx2)
2390 psubusb m6, m_hevthr
2391 pcmpeqb m1, m7 ; abs(p1-p0) <= I
2392 pcmpeqb m6, m7 ; abs(p1-p0) <= hev_thresh
2396 pmaxub m0, m1 ; max_I
2397 SWAP 1, 4 ; max_hev_thresh
2400 SWAP 6, 4 ; now m6 is I
2402 movrow m4, [dst1q] ; q0
2403 %if mmsize == 16 && %2 == 8
2415 psubusb m1, m5 ; q0-q1
2416 psubusb m7, m4 ; q1-q0
2417 por m1, m7 ; abs(q1-q0)
2418 %if notcpuflag(mmx2)
2421 psubusb m7, m_hevthr
2423 pcmpeqb m1, m6 ; abs(q1-q0) <= I
2424 pcmpeqb m7, m6 ; abs(q1-q0) <= hev_thresh
2426 pand m0, m1 ; abs([pq][321]-[pq][210]) <= I
2433 psubusb m6, m_hevthr
2434 pcmpeqb m0, m7 ; max(abs(..)) <= I
2435 pcmpeqb m6, m7 ; !(max(abs..) > thresh)
2440 mova m_maskres, m6 ; !(abs(p1-p0) > hev_t || abs(q1-q0) > hev_t)
2446 mova m6, m4 ; keep copies of p0/q0 around for later use
2448 psubusb m1, m4 ; p0-q0
2449 psubusb m6, m3 ; q0-p0
2450 por m1, m6 ; abs(q0-p0)
2451 paddusb m1, m1 ; m1=2*abs(q0-p0)
2457 psubusb m7, m5 ; p1-q1
2458 psubusb m6, m2 ; q1-p1
2459 por m7, m6 ; abs(q1-p1)
2462 psrlq m7, 1 ; abs(q1-p1)/2
2463 paddusb m7, m1 ; abs(q0-p0)*2+abs(q1-p1)/2
2465 pcmpeqb m7, m6 ; abs(q0-p0)*2+abs(q1-p1)/2 <= E
2466 pand m0, m7 ; normal_limit result
2468 ; filter_common; at this point, m2-m5=p1-q1 and m0 is filter_mask
2469 %ifdef m8 ; x86-64 && sse2
2472 %else ; x86-32 or mmx/mmxext
2473 %define m_pb_80 [pb_80]
2479 psubsb m1, m7 ; (signed) q0-p0
2484 psubsb m6, m7 ; (signed) p1-q1
2491 mova m_limres, m6 ; 3*(qp-p0)+(p1-q1) masked for filter_mbedge
2498 pandn m7, m6 ; 3*(q0-p0)+(p1-q1) masked for filter_common
2516 paddusb m3, m1 ; p0+f2
2527 paddusb m4, m1 ; q0-f1
2529 ; filter_mbedge (m2-m5 = p1-q1; lim_res carries w)
2542 pcmpgtb m0, m1 ; which are negative
2544 punpcklbw m6, m7 ; interleave with "1" for rounding
2547 punpcklbw m6, m0 ; signed byte->word
2557 SWAP 0, 10 ; don't lose lim_sign copy
2570 mova m_maskres, m6 ; backup for later in filter
2579 packsswb m6, m1 ; a0
2585 mova m6, [pb_18_63] ; pipelining
2589 paddusb m3, m0 ; p0+a0
2590 psubusb m4, m0 ; q0-a0
2619 packsswb m6, m1 ; a1
2629 paddusb m2, m0 ; p1+a1
2630 psubusb m5, m0 ; q1-a1
2664 packsswb m6, m1 ; a1
2678 paddusb m1, m7 ; p1+a1
2679 psubusb m6, m7 ; q1-a1
2683 movrow [dst2q+mstrideq*4], m1
2684 movrow [dst1q+mstrideq*2], m2
2685 movrow [dst1q+mstrideq ], m3
2688 movrow [dst2q+ strideq ], m6
2689 %if mmsize == 16 && %2 == 8
2691 movhps [dst8q+mstrideq*2], m1
2692 movhps [dst8q+mstrideq ], m2
2696 movhps [dst8q+ strideq ], m5
2697 movhps [dst8q+ strideq*2], m6
2704 TRANSPOSE4x4B 1, 2, 3, 4, 0
2705 SBUTTERFLY bw, 5, 6, 0
2707 %if mmsize == 8 ; mmx/mmxext (h)
2708 WRITE_4x2D 1, 2, 3, 4, dst1q, dst2q, mstrideq, strideq
2710 WRITE_2x4W m5, m6, dst2q, dst1q, mstrideq, strideq
2712 lea dst8q, [dst8q+mstrideq+1]
2713 WRITE_4x4D 1, 2, 3, 4, dst1q, dst2q, dst8q, mstrideq, strideq, %2
2714 lea dst1q, [dst2q+mstrideq+4]
2715 lea dst8q, [dst8q+mstrideq+4]
2719 WRITE_8W m5, dst2q, dst1q, mstrideq, strideq
2721 lea dst2q, [dst8q+ strideq ]
2723 WRITE_8W m6, dst2q, dst8q, mstrideq, strideq
2728 %if %2 == 8 ; chroma
2737 lea dst1q, [dst1q+ strideq*8-5]
2746 %ifndef m8 ; sse2 on x86-32 or mmx/mmxext
2754 MBEDGE_LOOPFILTER v, 16
2755 MBEDGE_LOOPFILTER h, 16
2756 MBEDGE_LOOPFILTER v, 8
2757 MBEDGE_LOOPFILTER h, 8
2760 MBEDGE_LOOPFILTER v, 16
2761 MBEDGE_LOOPFILTER h, 16
2762 MBEDGE_LOOPFILTER v, 8
2763 MBEDGE_LOOPFILTER h, 8
2767 MBEDGE_LOOPFILTER v, 16
2768 MBEDGE_LOOPFILTER h, 16
2769 MBEDGE_LOOPFILTER v, 8
2770 MBEDGE_LOOPFILTER h, 8
2773 MBEDGE_LOOPFILTER v, 16
2774 MBEDGE_LOOPFILTER h, 16
2775 MBEDGE_LOOPFILTER v, 8
2776 MBEDGE_LOOPFILTER h, 8
2779 MBEDGE_LOOPFILTER h, 16
2780 MBEDGE_LOOPFILTER h, 8