1 ;******************************************************************************
2 ;* VP8 MMXEXT optimizations
3 ;* Copyright (c) 2010 Ronald S. Bultje <rsbultje@gmail.com>
4 ;* Copyright (c) 2010 Jason Garrett-Glaser <darkshikari@gmail.com>
6 ;* This file is part of FFmpeg.
8 ;* FFmpeg is free software; you can redistribute it and/or
9 ;* modify it under the terms of the GNU Lesser General Public
10 ;* License as published by the Free Software Foundation; either
11 ;* version 2.1 of the License, or (at your option) any later version.
13 ;* FFmpeg is distributed in the hope that it will be useful,
14 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 ;* Lesser General Public License for more details.
18 ;* You should have received a copy of the GNU Lesser General Public
19 ;* License along with FFmpeg; if not, write to the Free Software
20 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 ;******************************************************************************
23 %include "libavutil/x86/x86util.asm"
27 fourtap_filter_hw_m: times 4 dw -6, 123
36 sixtap_filter_hw_m: times 4 dw 2, -11
46 fourtap_filter_hb_m: times 8 db -6, 123
55 sixtap_filter_hb_m: times 8 db 2, 1
65 fourtap_filter_v_m: times 8 dw -6
82 sixtap_filter_v_m: times 8 dw 2
101 bilinear_filter_vw_m: times 8 dw 1
109 bilinear_filter_vb_m: times 8 db 7, 1
118 %define fourtap_filter_hw picregq
119 %define sixtap_filter_hw picregq
120 %define fourtap_filter_hb picregq
121 %define sixtap_filter_hb picregq
122 %define fourtap_filter_v picregq
123 %define sixtap_filter_v picregq
124 %define bilinear_filter_vw picregq
125 %define bilinear_filter_vb picregq
128 %define fourtap_filter_hw fourtap_filter_hw_m
129 %define sixtap_filter_hw sixtap_filter_hw_m
130 %define fourtap_filter_hb fourtap_filter_hb_m
131 %define sixtap_filter_hb sixtap_filter_hb_m
132 %define fourtap_filter_v fourtap_filter_v_m
133 %define sixtap_filter_v sixtap_filter_v_m
134 %define bilinear_filter_vw bilinear_filter_vw_m
135 %define bilinear_filter_vb bilinear_filter_vb_m
139 filter_h2_shuf: db 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8
140 filter_h4_shuf: db 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10
142 filter_h6_shuf1: db 0, 5, 1, 6, 2, 7, 3, 8, 4, 9, 5, 10, 6, 11, 7, 12
143 filter_h6_shuf2: db 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9
144 filter_h6_shuf3: db 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11
146 pw_256: times 8 dw 256
148 pw_20091: times 4 dw 20091
149 pw_17734: times 4 dw 17734
151 pb_27_63: times 8 db 27, 63
152 pb_18_63: times 8 db 18, 63
153 pb_9_63: times 8 db 9, 63
171 ;-----------------------------------------------------------------------------
172 ; subpel MC functions:
174 ; void put_vp8_epel<size>_h<htap>v<vtap>_<opt>(uint8_t *dst, int deststride,
175 ; uint8_t *src, int srcstride,
176 ; int height, int mx, int my);
177 ;-----------------------------------------------------------------------------
179 %macro FILTER_SSSE3 1
180 cglobal put_vp8_epel%1_h6, 6, 6 + npicregs, 8, dst, dststride, src, srcstride, height, mx, picreg
182 mova m3, [filter_h6_shuf2]
183 mova m4, [filter_h6_shuf3]
185 lea picregq, [sixtap_filter_hb_m]
187 mova m5, [sixtap_filter_hb+mxq*8-48] ; set up 6tap filter in bytes
188 mova m6, [sixtap_filter_hb+mxq*8-32]
189 mova m7, [sixtap_filter_hb+mxq*8-16]
196 ; For epel4, we need 9 bytes, but only 8 get loaded; to compensate, do the
197 ; shuffle with a memory operand
198 punpcklbw m0, [srcq+3]
200 pshufb m0, [filter_h6_shuf1]
209 pmulhrsw m0, [pw_256]
211 movh [dstq], m0 ; store
216 dec heightd ; next row
220 cglobal put_vp8_epel%1_h4, 6, 6 + npicregs, 7, dst, dststride, src, srcstride, height, mx, picreg
223 mova m3, [filter_h2_shuf]
224 mova m4, [filter_h4_shuf]
226 lea picregq, [fourtap_filter_hb_m]
228 mova m5, [fourtap_filter_hb+mxq-16] ; set up 4tap filter in bytes
229 mova m6, [fourtap_filter_hb+mxq]
241 movh [dstq], m0 ; store
246 dec heightd ; next row
250 cglobal put_vp8_epel%1_v4, 7, 7, 8, dst, dststride, src, srcstride, height, picreg, my
253 lea picregq, [fourtap_filter_hb_m]
255 mova m5, [fourtap_filter_hb+myq-16]
256 mova m6, [fourtap_filter_hb+myq]
262 movh m1, [srcq+ srcstrideq]
263 movh m2, [srcq+2*srcstrideq]
267 movh m3, [srcq+2*srcstrideq] ; read new row
284 dec heightd ; next row
288 cglobal put_vp8_epel%1_v6, 7, 7, 8, dst, dststride, src, srcstride, height, picreg, my
291 lea picregq, [sixtap_filter_hb_m]
293 lea myq, [sixtap_filter_hb+myq*8]
299 movh m1, [srcq+srcstrideq]
300 movh m2, [srcq+srcstrideq*2]
301 lea srcq, [srcq+srcstrideq*2]
304 movh m4, [srcq+srcstrideq]
307 movh m5, [srcq+2*srcstrideq] ; read new row
314 pmaddubsw m6, [myq-48]
315 pmaddubsw m1, [myq-32]
316 pmaddubsw m7, [myq-16]
321 pmulhrsw m6, [pw_256]
330 dec heightd ; next row
340 ; 4x4 block, H-only 4-tap filter
342 cglobal put_vp8_epel4_h4, 6, 6 + npicregs, 0, dst, dststride, src, srcstride, height, mx, picreg
345 lea picregq, [fourtap_filter_hw_m]
347 movq mm4, [fourtap_filter_hw+mxq-16] ; set up 4tap filter in words
348 movq mm5, [fourtap_filter_hw+mxq]
353 movq mm1, [srcq-1] ; (ABCDEFGH) load 8 horizontal pixels
355 ; first set of 2 pixels
356 movq mm2, mm1 ; byte ABCD..
357 punpcklbw mm1, mm6 ; byte->word ABCD
358 pshufw mm0, mm2, 9 ; byte CDEF..
359 punpcklbw mm0, mm6 ; byte->word CDEF
360 pshufw mm3, mm1, 0x94 ; word ABBC
361 pshufw mm1, mm0, 0x94 ; word CDDE
362 pmaddwd mm3, mm4 ; multiply 2px with F0/F1
363 movq mm0, mm1 ; backup for second set of pixels
364 pmaddwd mm1, mm5 ; multiply 2px with F2/F3
365 paddd mm3, mm1 ; finish 1st 2px
367 ; second set of 2 pixels, use backup of above
368 punpckhbw mm2, mm6 ; byte->word EFGH
369 pmaddwd mm0, mm4 ; multiply backed up 2px with F0/F1
370 pshufw mm1, mm2, 0x94 ; word EFFG
371 pmaddwd mm1, mm5 ; multiply 2px with F2/F3
372 paddd mm0, mm1 ; finish 2nd 2px
374 ; merge two sets of 2 pixels into one set of 4, round/clip/store
375 packssdw mm3, mm0 ; merge dword->word (4px)
376 paddsw mm3, mm7 ; rounding
378 packuswb mm3, mm6 ; clip and word->bytes
379 movd [dstq], mm3 ; store
384 dec heightd ; next row
388 ; 4x4 block, H-only 6-tap filter
390 cglobal put_vp8_epel4_h6, 6, 6 + npicregs, 0, dst, dststride, src, srcstride, height, mx, picreg
393 lea picregq, [sixtap_filter_hw_m]
395 movq mm4, [sixtap_filter_hw+mxq*8-48] ; set up 4tap filter in words
396 movq mm5, [sixtap_filter_hw+mxq*8-32]
397 movq mm6, [sixtap_filter_hw+mxq*8-16]
402 movq mm1, [srcq-2] ; (ABCDEFGH) load 8 horizontal pixels
404 ; first set of 2 pixels
405 movq mm2, mm1 ; byte ABCD..
406 punpcklbw mm1, mm3 ; byte->word ABCD
407 pshufw mm0, mm2, 0x9 ; byte CDEF..
408 punpckhbw mm2, mm3 ; byte->word EFGH
409 punpcklbw mm0, mm3 ; byte->word CDEF
410 pshufw mm1, mm1, 0x94 ; word ABBC
411 pshufw mm2, mm2, 0x94 ; word EFFG
412 pmaddwd mm1, mm4 ; multiply 2px with F0/F1
413 pshufw mm3, mm0, 0x94 ; word CDDE
414 movq mm0, mm3 ; backup for second set of pixels
415 pmaddwd mm3, mm5 ; multiply 2px with F2/F3
416 paddd mm1, mm3 ; add to 1st 2px cache
417 movq mm3, mm2 ; backup for second set of pixels
418 pmaddwd mm2, mm6 ; multiply 2px with F4/F5
419 paddd mm1, mm2 ; finish 1st 2px
421 ; second set of 2 pixels, use backup of above
422 movd mm2, [srcq+3] ; byte FGHI (prevent overreads)
423 pmaddwd mm0, mm4 ; multiply 1st backed up 2px with F0/F1
424 pmaddwd mm3, mm5 ; multiply 2nd backed up 2px with F2/F3
425 paddd mm0, mm3 ; add to 2nd 2px cache
427 punpcklbw mm2, mm3 ; byte->word FGHI
428 pshufw mm2, mm2, 0xE9 ; word GHHI
429 pmaddwd mm2, mm6 ; multiply 2px with F4/F5
430 paddd mm0, mm2 ; finish 2nd 2px
432 ; merge two sets of 2 pixels into one set of 4, round/clip/store
433 packssdw mm1, mm0 ; merge dword->word (4px)
434 paddsw mm1, mm7 ; rounding
436 packuswb mm1, mm3 ; clip and word->bytes
437 movd [dstq], mm1 ; store
442 dec heightd ; next row
447 cglobal put_vp8_epel8_h4, 6, 6 + npicregs, 10, dst, dststride, src, srcstride, height, mx, picreg
450 lea picregq, [fourtap_filter_v_m]
452 lea mxq, [fourtap_filter_v+mxq-32]
485 movh [dstq], m0 ; store
490 dec heightd ; next row
495 cglobal put_vp8_epel8_h6, 6, 6 + npicregs, 14, dst, dststride, src, srcstride, height, mx, picreg
499 lea picregq, [sixtap_filter_v_m]
501 lea mxq, [sixtap_filter_v+mxq-96]
548 movh [dstq], m0 ; store
553 dec heightd ; next row
558 ; 4x4 block, V-only 4-tap filter
559 cglobal put_vp8_epel%1_v4, 7, 7, 8, dst, dststride, src, srcstride, height, picreg, my
562 lea picregq, [fourtap_filter_v_m]
564 lea myq, [fourtap_filter_v+myq-32]
572 movh m1, [srcq+ srcstrideq]
573 movh m2, [srcq+2*srcstrideq]
580 ; first calculate negative taps (to prevent losing positive overflows)
581 movh m4, [srcq+2*srcstrideq] ; read new row
588 ; then calculate positive taps
606 dec heightd ; next row
611 ; 4x4 block, V-only 6-tap filter
612 cglobal put_vp8_epel%1_v6, 7, 7, 8, dst, dststride, src, srcstride, height, picreg, my
616 lea picregq, [sixtap_filter_v_m]
618 lea myq, [sixtap_filter_v+myq-96]
625 movh m1, [srcq+srcstrideq]
626 movh m2, [srcq+srcstrideq*2]
627 lea srcq, [srcq+srcstrideq*2]
630 movh m4, [srcq+srcstrideq]
638 ; first calculate negative taps (to prevent losing positive overflows)
645 ; then calculate positive taps
646 movh m5, [srcq+2*srcstrideq] ; read new row
671 dec heightd ; next row
681 %macro FILTER_BILINEAR 1
682 cglobal put_vp8_bilinear%1_v, 7, 7, 7, dst, dststride, src, srcstride, height, picreg, my
685 lea picregq, [bilinear_filter_vw_m]
688 mova m5, [bilinear_filter_vw+myq-1*16]
690 mova m4, [bilinear_filter_vw+myq+7*16]
692 movh m0, [srcq+srcstrideq*0]
693 movh m1, [srcq+srcstrideq*1]
694 movh m3, [srcq+srcstrideq*2]
712 movh [dstq+dststrideq*0], m0
713 movh [dstq+dststrideq*1], m2
716 movh [dstq+dststrideq*0], m0
717 movhps [dstq+dststrideq*1], m0
720 lea dstq, [dstq+dststrideq*2]
721 lea srcq, [srcq+srcstrideq*2]
726 cglobal put_vp8_bilinear%1_h, 6, 6 + npicregs, 7, dst, dststride, src, srcstride, height, mx, picreg
729 lea picregq, [bilinear_filter_vw_m]
732 mova m5, [bilinear_filter_vw+mxq-1*16]
734 mova m4, [bilinear_filter_vw+mxq+7*16]
736 movh m0, [srcq+srcstrideq*0+0]
737 movh m1, [srcq+srcstrideq*0+1]
738 movh m2, [srcq+srcstrideq*1+0]
739 movh m3, [srcq+srcstrideq*1+1]
757 movh [dstq+dststrideq*0], m0
758 movh [dstq+dststrideq*1], m2
761 movh [dstq+dststrideq*0], m0
762 movhps [dstq+dststrideq*1], m0
765 lea dstq, [dstq+dststrideq*2]
766 lea srcq, [srcq+srcstrideq*2]
777 %macro FILTER_BILINEAR_SSSE3 1
778 cglobal put_vp8_bilinear%1_v, 7, 7, 5, dst, dststride, src, srcstride, height, picreg, my
781 lea picregq, [bilinear_filter_vb_m]
784 mova m3, [bilinear_filter_vb+myq-16]
786 movh m0, [srcq+srcstrideq*0]
787 movh m1, [srcq+srcstrideq*1]
788 movh m2, [srcq+srcstrideq*2]
800 movh [dstq+dststrideq*0], m0
801 movh [dstq+dststrideq*1], m1
804 movh [dstq+dststrideq*0], m0
805 movhps [dstq+dststrideq*1], m0
808 lea dstq, [dstq+dststrideq*2]
809 lea srcq, [srcq+srcstrideq*2]
814 cglobal put_vp8_bilinear%1_h, 6, 6 + npicregs, 5, dst, dststride, src, srcstride, height, mx, picreg
817 lea picregq, [bilinear_filter_vb_m]
820 mova m2, [filter_h2_shuf]
821 mova m3, [bilinear_filter_vb+mxq-16]
823 movu m0, [srcq+srcstrideq*0]
824 movu m1, [srcq+srcstrideq*1]
836 movh [dstq+dststrideq*0], m0
837 movh [dstq+dststrideq*1], m1
840 movh [dstq+dststrideq*0], m0
841 movhps [dstq+dststrideq*1], m0
844 lea dstq, [dstq+dststrideq*2]
845 lea srcq, [srcq+srcstrideq*2]
852 FILTER_BILINEAR_SSSE3 4
854 FILTER_BILINEAR_SSSE3 8
857 cglobal put_vp8_pixels8, 5, 5, 0, dst, dststride, src, srcstride, height
859 movq mm0, [srcq+srcstrideq*0]
860 movq mm1, [srcq+srcstrideq*1]
861 lea srcq, [srcq+srcstrideq*2]
862 movq [dstq+dststrideq*0], mm0
863 movq [dstq+dststrideq*1], mm1
864 lea dstq, [dstq+dststrideq*2]
871 cglobal put_vp8_pixels16, 5, 5, 0, dst, dststride, src, srcstride, height
873 movq mm0, [srcq+srcstrideq*0+0]
874 movq mm1, [srcq+srcstrideq*0+8]
875 movq mm2, [srcq+srcstrideq*1+0]
876 movq mm3, [srcq+srcstrideq*1+8]
877 lea srcq, [srcq+srcstrideq*2]
878 movq [dstq+dststrideq*0+0], mm0
879 movq [dstq+dststrideq*0+8], mm1
880 movq [dstq+dststrideq*1+0], mm2
881 movq [dstq+dststrideq*1+8], mm3
882 lea dstq, [dstq+dststrideq*2]
889 cglobal put_vp8_pixels16, 5, 5, 2, dst, dststride, src, srcstride, height
891 movups xmm0, [srcq+srcstrideq*0]
892 movups xmm1, [srcq+srcstrideq*1]
893 lea srcq, [srcq+srcstrideq*2]
894 movaps [dstq+dststrideq*0], xmm0
895 movaps [dstq+dststrideq*1], xmm1
896 lea dstq, [dstq+dststrideq*2]
901 ;-----------------------------------------------------------------------------
902 ; void vp8_idct_dc_add_<opt>(uint8_t *dst, DCTELEM block[16], int stride);
903 ;-----------------------------------------------------------------------------
907 %4 m3, [dst1q+strideq+%3]
909 %4 m5, [dst2q+strideq+%3]
919 %4 [dst1q+strideq+%3], m3
921 %4 [dst2q+strideq+%3], m5
925 cglobal vp8_idct_dc_add, 3, 3, 0, dst, block, stride
943 DEFINE_ARGS dst1, dst2, stride
944 lea dst2q, [dst1q+strideq*2]
945 ADD_DC m0, m1, 0, movh
949 cglobal vp8_idct_dc_add, 3, 3, 6, dst, block, stride
957 DEFINE_ARGS dst1, dst2, stride
958 lea dst2q, [dst1q+strideq*2]
960 movd m3, [dst1q+strideq]
962 movd m5, [dst2q+strideq]
974 pextrd [dst1q+strideq], m2, 1
975 pextrd [dst2q], m2, 2
976 pextrd [dst2q+strideq], m2, 3
979 ;-----------------------------------------------------------------------------
980 ; void vp8_idct_dc_add4y_<opt>(uint8_t *dst, DCTELEM block[4][16], int stride);
981 ;-----------------------------------------------------------------------------
985 cglobal vp8_idct_dc_add4y, 3, 3, 0, dst, block, stride
987 movd m0, [blockq+32*0] ; A
988 movd m1, [blockq+32*2] ; C
989 punpcklwd m0, [blockq+32*1] ; A B
990 punpcklwd m1, [blockq+32*3] ; C D
991 punpckldq m0, m1 ; A B C D
996 movd [blockq+32*0], m6
997 movd [blockq+32*1], m6
998 movd [blockq+32*2], m6
999 movd [blockq+32*3], m6
1004 punpcklbw m0, m0 ; AABBCCDD
1005 punpcklbw m6, m6 ; AABBCCDD
1008 punpcklbw m0, m0 ; AAAABBBB
1009 punpckhbw m1, m1 ; CCCCDDDD
1010 punpcklbw m6, m6 ; AAAABBBB
1011 punpckhbw m7, m7 ; CCCCDDDD
1014 DEFINE_ARGS dst1, dst2, stride
1015 lea dst2q, [dst1q+strideq*2]
1016 ADD_DC m0, m6, 0, mova
1017 ADD_DC m1, m7, 8, mova
1022 cglobal vp8_idct_dc_add4y, 3, 3, 6, dst, block, stride
1024 movd m0, [blockq+32*0] ; A
1025 movd m1, [blockq+32*2] ; C
1026 punpcklwd m0, [blockq+32*1] ; A B
1027 punpcklwd m1, [blockq+32*3] ; C D
1028 punpckldq m0, m1 ; A B C D
1033 movd [blockq+32*0], m1
1034 movd [blockq+32*1], m1
1035 movd [blockq+32*2], m1
1036 movd [blockq+32*3], m1
1047 DEFINE_ARGS dst1, dst2, stride
1048 lea dst2q, [dst1q+strideq*2]
1049 ADD_DC m0, m1, 0, mova
1052 ;-----------------------------------------------------------------------------
1053 ; void vp8_idct_dc_add4uv_<opt>(uint8_t *dst, DCTELEM block[4][16], int stride);
1054 ;-----------------------------------------------------------------------------
1057 cglobal vp8_idct_dc_add4uv, 3, 3, 0, dst, block, stride
1059 movd m0, [blockq+32*0] ; A
1060 movd m1, [blockq+32*2] ; C
1061 punpcklwd m0, [blockq+32*1] ; A B
1062 punpcklwd m1, [blockq+32*3] ; C D
1063 punpckldq m0, m1 ; A B C D
1068 movd [blockq+32*0], m6
1069 movd [blockq+32*1], m6
1070 movd [blockq+32*2], m6
1071 movd [blockq+32*3], m6
1076 punpcklbw m0, m0 ; AABBCCDD
1077 punpcklbw m6, m6 ; AABBCCDD
1080 punpcklbw m0, m0 ; AAAABBBB
1081 punpckhbw m1, m1 ; CCCCDDDD
1082 punpcklbw m6, m6 ; AAAABBBB
1083 punpckhbw m7, m7 ; CCCCDDDD
1086 DEFINE_ARGS dst1, dst2, stride
1087 lea dst2q, [dst1q+strideq*2]
1088 ADD_DC m0, m6, 0, mova
1089 lea dst1q, [dst1q+strideq*4]
1090 lea dst2q, [dst2q+strideq*4]
1091 ADD_DC m1, m7, 0, mova
1094 ;-----------------------------------------------------------------------------
1095 ; void vp8_idct_add_<opt>(uint8_t *dst, DCTELEM block[16], int stride);
1096 ;-----------------------------------------------------------------------------
1098 ; calculate %1=mul_35468(%1)-mul_20091(%2); %2=mul_20091(%1)+mul_35468(%2)
1099 ; this macro assumes that m6/m7 have words for 20091/17734 loaded
1100 %macro VP8_MULTIPLY_SUMSUB 4
1103 pmulhw %3, m6 ;20091(1)
1104 pmulhw %4, m6 ;20091(2)
1109 pmulhw %1, m7 ;35468(1)
1110 pmulhw %2, m7 ;35468(2)
1115 ; calculate x0=%1+%3; x1=%1-%3
1116 ; x2=mul_35468(%2)-mul_20091(%4); x3=mul_20091(%2)+mul_35468(%4)
1117 ; %1=x0+x3 (tmp0); %2=x1+x2 (tmp1); %3=x1-x2 (tmp2); %4=x0-x3 (tmp3)
1118 ; %5/%6 are temporary registers
1119 ; we assume m6/m7 have constant words 20091/17734 loaded in them
1120 %macro VP8_IDCT_TRANSFORM4x4_1D 6
1121 SUMSUB_BA w, %3, %1, %5 ;t0, t1
1122 VP8_MULTIPLY_SUMSUB m%2, m%4, m%5,m%6 ;t2, t3
1123 SUMSUB_BA w, %4, %3, %5 ;tmp0, tmp3
1124 SUMSUB_BA w, %2, %1, %5 ;tmp1, tmp2
1129 %macro VP8_IDCT_ADD 0
1130 cglobal vp8_idct_add, 3, 3, 0, dst, block, stride
1132 movq m0, [blockq+ 0]
1133 movq m1, [blockq+ 8]
1134 movq m2, [blockq+16]
1135 movq m3, [blockq+24]
1140 movaps [blockq+ 0], xmm0
1141 movaps [blockq+16], xmm0
1144 movq [blockq+ 0], m4
1145 movq [blockq+ 8], m4
1146 movq [blockq+16], m4
1147 movq [blockq+24], m4
1151 VP8_IDCT_TRANSFORM4x4_1D 0, 1, 2, 3, 4, 5
1152 TRANSPOSE4x4W 0, 1, 2, 3, 4
1154 VP8_IDCT_TRANSFORM4x4_1D 0, 1, 2, 3, 4, 5
1155 TRANSPOSE4x4W 0, 1, 2, 3, 4
1159 DEFINE_ARGS dst1, dst2, stride
1160 lea dst2q, [dst1q+2*strideq]
1161 STORE_DIFFx2 m0, m1, m6, m7, m4, 3, dst1q, strideq
1162 STORE_DIFFx2 m2, m3, m6, m7, m4, 3, dst2q, strideq
1174 ;-----------------------------------------------------------------------------
1175 ; void vp8_luma_dc_wht_mmxext(DCTELEM block[4][4][16], DCTELEM dc[16])
1176 ;-----------------------------------------------------------------------------
1178 %macro SCATTER_WHT 3
1181 mov [blockq+2*16*(0+%3)], dc1w
1182 mov [blockq+2*16*(1+%3)], dc2w
1187 mov [blockq+2*16*(4+%3)], dc1w
1188 mov [blockq+2*16*(5+%3)], dc2w
1191 mov [blockq+2*16*(8+%3)], dc1w
1192 mov [blockq+2*16*(9+%3)], dc2w
1195 mov [blockq+2*16*(12+%3)], dc1w
1196 mov [blockq+2*16*(13+%3)], dc2w
1199 %macro HADAMARD4_1D 4
1200 SUMSUB_BADC w, %2, %1, %4, %3
1201 SUMSUB_BADC w, %4, %2, %3, %1
1206 cglobal vp8_luma_dc_wht, 2, 3, 0, block, dc1, dc2
1213 movaps [dc1q+ 0], xmm0
1214 movaps [dc1q+16], xmm0
1222 HADAMARD4_1D 0, 1, 2, 3
1223 TRANSPOSE4x4W 0, 1, 2, 3, 4
1225 HADAMARD4_1D 0, 1, 2, 3
1242 ;-----------------------------------------------------------------------------
1243 ; void vp8_h/v_loop_filter_simple_<opt>(uint8_t *dst, int stride, int flim);
1244 ;-----------------------------------------------------------------------------
1246 ; macro called with 7 mm register indexes as argument, and 4 regular registers
1248 ; first 4 mm registers will carry the transposed pixel data
1249 ; the other three are scratchspace (one would be sufficient, but this allows
1250 ; for more spreading/pipelining and thus faster execution on OOE CPUs)
1252 ; first two regular registers are buf+4*stride and buf+5*stride
1253 ; third is -stride, fourth is +stride
1254 %macro READ_8x4_INTERLEAVED 11
1255 ; interleave 8 (A-H) rows of 4 pixels each
1256 movd m%1, [%8+%10*4] ; A0-3
1257 movd m%5, [%9+%10*4] ; B0-3
1258 movd m%2, [%8+%10*2] ; C0-3
1259 movd m%6, [%8+%10] ; D0-3
1260 movd m%3, [%8] ; E0-3
1261 movd m%7, [%9] ; F0-3
1262 movd m%4, [%9+%11] ; G0-3
1263 punpcklbw m%1, m%5 ; A/B interleaved
1264 movd m%5, [%9+%11*2] ; H0-3
1265 punpcklbw m%2, m%6 ; C/D interleaved
1266 punpcklbw m%3, m%7 ; E/F interleaved
1267 punpcklbw m%4, m%5 ; G/H interleaved
1270 ; macro called with 7 mm register indexes as argument, and 5 regular registers
1271 ; first 11 mean the same as READ_8x4_TRANSPOSED above
1272 ; fifth regular register is scratchspace to reach the bottom 8 rows, it
1273 ; will be set to second regular register + 8*stride at the end
1274 %macro READ_16x4_INTERLEAVED 12
1275 ; transpose 16 (A-P) rows of 4 pixels each
1278 ; read (and interleave) those addressable by %8 (=r0), A/C/D/E/I/K/L/M
1279 movd m%1, [%8+%10*4] ; A0-3
1280 movd m%3, [%12+%10*4] ; I0-3
1281 movd m%2, [%8+%10*2] ; C0-3
1282 movd m%4, [%12+%10*2] ; K0-3
1283 movd m%6, [%8+%10] ; D0-3
1284 movd m%5, [%12+%10] ; L0-3
1285 movd m%7, [%12] ; M0-3
1287 punpcklbw m%1, m%3 ; A/I
1288 movd m%3, [%8] ; E0-3
1289 punpcklbw m%2, m%4 ; C/K
1290 punpcklbw m%6, m%5 ; D/L
1291 punpcklbw m%3, m%7 ; E/M
1292 punpcklbw m%2, m%6 ; C/D/K/L interleaved
1294 ; read (and interleave) those addressable by %9 (=r4), B/F/G/H/J/N/O/P
1295 movd m%5, [%9+%10*4] ; B0-3
1296 movd m%4, [%12+%10*4] ; J0-3
1297 movd m%7, [%9] ; F0-3
1298 movd m%6, [%12] ; N0-3
1299 punpcklbw m%5, m%4 ; B/J
1300 punpcklbw m%7, m%6 ; F/N
1301 punpcklbw m%1, m%5 ; A/B/I/J interleaved
1302 punpcklbw m%3, m%7 ; E/F/M/N interleaved
1303 movd m%4, [%9+%11] ; G0-3
1304 movd m%6, [%12+%11] ; O0-3
1305 movd m%5, [%9+%11*2] ; H0-3
1306 movd m%7, [%12+%11*2] ; P0-3
1307 punpcklbw m%4, m%6 ; G/O
1308 punpcklbw m%5, m%7 ; H/P
1309 punpcklbw m%4, m%5 ; G/H/O/P interleaved
1312 ; write 4 mm registers of 2 dwords each
1313 ; first four arguments are mm register indexes containing source data
1314 ; last four are registers containing buf+4*stride, buf+5*stride,
1315 ; -stride and +stride
1317 ; write out (2 dwords per register)
1332 ; write 4 xmm registers of 4 dwords each
1333 ; arguments same as WRITE_2x4D, but with an extra register, so that the 5 regular
1334 ; registers contain buf+4*stride, buf+5*stride, buf+12*stride, -stride and +stride
1335 ; we add 1*stride to the third regular registry in the process
1336 ; the 10th argument is 16 if it's a Y filter (i.e. all regular registers cover the
1337 ; same memory region), or 8 if they cover two separate buffers (third one points to
1338 ; a different memory region than the first two), allowing for more optimal code for
1340 %macro WRITE_4x4D 10
1341 ; write out (4 dwords per register), start with dwords zero
1392 ; write 4 or 8 words in the mmx/xmm registers as 8 lines
1393 ; 1 and 2 are the registers to write, this can be the same (for SSE2)
1395 ; 3 is a general-purpose register that we will clobber
1397 ; 3 is a pointer to the destination's 5th line
1398 ; 4 is a pointer to the destination's 4th line
1399 ; 5/6 is -stride and +stride
1430 pextrw [%3+%4*4], %1, 0
1431 pextrw [%2+%4*4], %1, 1
1432 pextrw [%3+%4*2], %1, 2
1433 pextrw [%3+%4 ], %1, 3
1436 pextrw [%2+%5 ], %1, 6
1437 pextrw [%2+%5*2], %1, 7
1467 %macro SIMPLE_LOOPFILTER 2
1468 cglobal vp8_%1_loop_filter_simple, 3, %2, 8, dst, stride, flim, cntr
1469 %if mmsize == 8 ; mmx/mmxext
1475 SPLATB_REG m7, flim, m0 ; splat "flim" into register
1477 ; set up indexes to address 4 rows
1479 DEFINE_ARGS dst1, mstride, stride, cntr, dst2
1481 DEFINE_ARGS dst1, mstride, stride, dst3, dst2
1483 mov strideq, mstrideq
1486 lea dst1q, [dst1q+4*strideq-2]
1489 %if mmsize == 8 ; mmx / mmxext
1493 ; read 4 half/full rows of pixels
1494 mova m0, [dst1q+mstrideq*2] ; p1
1495 mova m1, [dst1q+mstrideq] ; p0
1496 mova m2, [dst1q] ; q0
1497 mova m3, [dst1q+ strideq] ; q1
1499 lea dst2q, [dst1q+ strideq]
1501 %if mmsize == 8 ; mmx/mmxext
1502 READ_8x4_INTERLEAVED 0, 1, 2, 3, 4, 5, 6, dst1q, dst2q, mstrideq, strideq
1504 READ_16x4_INTERLEAVED 0, 1, 2, 3, 4, 5, 6, dst1q, dst2q, mstrideq, strideq, dst3q
1506 TRANSPOSE4x4W 0, 1, 2, 3, 4
1510 mova m5, m2 ; m5=backup of q0
1511 mova m6, m1 ; m6=backup of p0
1512 psubusb m1, m2 ; p0-q0
1513 psubusb m2, m6 ; q0-p0
1514 por m1, m2 ; FFABS(p0-q0)
1515 paddusb m1, m1 ; m1=FFABS(p0-q0)*2
1519 psubusb m3, m0 ; q1-p1
1520 psubusb m0, m4 ; p1-q1
1521 por m3, m0 ; FFABS(p1-q1)
1525 psubsb m2, m4 ; m2=p1-q1 (signed) backup for below
1527 psrlq m3, 1 ; m3=FFABS(p1-q1)/2, this can be used signed
1531 pcmpeqb m3, m1 ; abs(p0-q0)*2+abs(p1-q1)/2<=flim mask(0xff/0x0)
1533 ; filter_common (use m2/p1-q1, m4=q0, m6=p0, m5/q0-p0 and m3/mask)
1537 psubsb m5, m0 ; q0-p0 (signed)
1540 paddsb m2, m5 ; a=(p1-q1) + 3*(q0-p0)
1541 pand m2, m3 ; apply filter mask (m3)
1545 paddsb m2, [pb_4] ; f1<<3=a+4
1546 paddsb m1, [pb_3] ; f2<<3=a+3
1548 pand m1, m3 ; cache f2<<3
1552 pcmpgtb m0, m2 ; which values are <0?
1553 psubb m3, m2 ; -f1<<3
1559 paddusb m4, m3 ; q0-f1
1563 pcmpgtb m0, m1 ; which values are <0?
1564 psubb m3, m1 ; -f2<<3
1570 psubusb m6, m3 ; p0+f2
1575 mova [dst1q+mstrideq], m6
1578 SBUTTERFLY bw, 6, 4, 0
1580 %if mmsize == 16 ; sse2
1584 WRITE_8W m6, dst2q, dst1q, mstrideq, strideq
1585 lea dst2q, [dst3q+mstrideq+1]
1589 WRITE_8W m4, dst3q, dst2q, mstrideq, strideq
1591 WRITE_2x4W m6, m4, dst2q, dst1q, mstrideq, strideq
1595 %if mmsize == 8 ; mmx/mmxext
1598 add dst1q, 8 ; advance 8 cols = pixels
1600 lea dst1q, [dst1q+strideq*8-1] ; advance 8 rows = lines
1612 SIMPLE_LOOPFILTER v, 4
1613 SIMPLE_LOOPFILTER h, 5
1615 SIMPLE_LOOPFILTER v, 4
1616 SIMPLE_LOOPFILTER h, 5
1620 SIMPLE_LOOPFILTER v, 3
1621 SIMPLE_LOOPFILTER h, 5
1623 SIMPLE_LOOPFILTER v, 3
1624 SIMPLE_LOOPFILTER h, 5
1626 SIMPLE_LOOPFILTER h, 5
1628 ;-----------------------------------------------------------------------------
1629 ; void vp8_h/v_loop_filter<size>_inner_<opt>(uint8_t *dst, [uint8_t *v,] int stride,
1630 ; int flimE, int flimI, int hev_thr);
1631 ;-----------------------------------------------------------------------------
1633 %macro INNER_LOOPFILTER 2
1634 %if %2 == 8 ; chroma
1635 cglobal vp8_%1_loop_filter8uv_inner, 6, 6, 13, dst, dst8, stride, flimE, flimI, hevthr
1637 cglobal vp8_%1_loop_filter16y_inner, 5, 5, 13, dst, stride, flimE, flimI, hevthr
1643 %ifndef m8 ; stack layout: [0]=E, [1]=I, [2]=hev_thr
1644 %ifidn %1, v ; [3]=hev() result
1645 %assign pad 16 + mmsize * 4 - gprsize - (stack_offset & 15)
1646 %else ; h ; extra storage space for transposes
1647 %assign pad 16 + mmsize * 5 - gprsize - (stack_offset & 15)
1649 ; splat function arguments
1650 SPLATB_REG m0, flimEq, m7 ; E
1651 SPLATB_REG m1, flimIq, m7 ; I
1652 SPLATB_REG m2, hevthrq, m7 ; hev_thresh
1656 %define m_flimE [rsp]
1657 %define m_flimI [rsp+mmsize]
1658 %define m_hevthr [rsp+mmsize*2]
1659 %define m_maskres [rsp+mmsize*3]
1660 %define m_p0backup [rsp+mmsize*3]
1661 %define m_q0backup [rsp+mmsize*4]
1669 %define m_hevthr m11
1670 %define m_maskres m12
1671 %define m_p0backup m12
1672 %define m_q0backup m8
1674 ; splat function arguments
1675 SPLATB_REG m_flimE, flimEq, m7 ; E
1676 SPLATB_REG m_flimI, flimIq, m7 ; I
1677 SPLATB_REG m_hevthr, hevthrq, m7 ; hev_thresh
1680 %if %2 == 8 ; chroma
1681 DEFINE_ARGS dst1, dst8, mstride, stride, dst2
1683 DEFINE_ARGS dst1, mstride, stride, dst2, cntr
1686 DEFINE_ARGS dst1, mstride, stride, dst2, dst8
1688 mov strideq, mstrideq
1691 lea dst1q, [dst1q+strideq*4-4]
1692 %if %2 == 8 ; chroma
1693 lea dst8q, [dst8q+strideq*4-4]
1701 lea dst2q, [dst1q+strideq]
1703 %if %2 == 8 && mmsize == 16
1708 movrow m0, [dst1q+mstrideq*4] ; p3
1709 movrow m1, [dst2q+mstrideq*4] ; p2
1710 movrow m2, [dst1q+mstrideq*2] ; p1
1711 movrow m5, [dst2q] ; q1
1712 movrow m6, [dst2q+ strideq*1] ; q2
1713 movrow m7, [dst2q+ strideq*2] ; q3
1714 %if mmsize == 16 && %2 == 8
1715 movhps m0, [dst8q+mstrideq*4]
1716 movhps m2, [dst8q+mstrideq*2]
1718 movhps m1, [dst8q+mstrideq*4]
1720 movhps m6, [dst8q+ strideq ]
1721 movhps m7, [dst8q+ strideq*2]
1724 %elif mmsize == 8 ; mmx/mmxext (h)
1725 ; read 8 rows of 8px each
1726 movu m0, [dst1q+mstrideq*4]
1727 movu m1, [dst2q+mstrideq*4]
1728 movu m2, [dst1q+mstrideq*2]
1729 movu m3, [dst1q+mstrideq ]
1732 movu m6, [dst2q+ strideq ]
1735 TRANSPOSE4x4B 0, 1, 2, 3, 7
1737 movu m7, [dst2q+ strideq*2]
1738 TRANSPOSE4x4B 4, 5, 6, 7, 1
1739 SBUTTERFLY dq, 0, 4, 1 ; p3/p2
1740 SBUTTERFLY dq, 2, 6, 1 ; q0/q1
1741 SBUTTERFLY dq, 3, 7, 1 ; q2/q3
1743 mova m_q0backup, m2 ; store q0
1744 SBUTTERFLY dq, 1, 5, 2 ; p1/p0
1745 mova m_p0backup, m5 ; store p0
1752 lea dst8q, [dst1q+ strideq*8]
1755 ; read 16 rows of 8px each, interleave
1756 movh m0, [dst1q+mstrideq*4]
1757 movh m1, [dst8q+mstrideq*4]
1758 movh m2, [dst1q+mstrideq*2]
1759 movh m5, [dst8q+mstrideq*2]
1760 movh m3, [dst1q+mstrideq ]
1761 movh m6, [dst8q+mstrideq ]
1764 punpcklbw m0, m1 ; A/I
1765 punpcklbw m2, m5 ; C/K
1766 punpcklbw m3, m6 ; D/L
1767 punpcklbw m4, m7 ; E/M
1770 movh m1, [dst2q+mstrideq*4]
1771 movh m6, [dst8q+mstrideq*4]
1774 punpcklbw m1, m6 ; B/J
1775 punpcklbw m5, m7 ; F/N
1776 movh m6, [dst2q+ strideq ]
1777 movh m7, [dst8q+ strideq ]
1778 punpcklbw m6, m7 ; G/O
1781 TRANSPOSE4x4B 0, 1, 2, 3, 7
1787 movh m7, [dst2q+ strideq*2]
1788 movh m1, [dst8q+ strideq*2]
1789 punpcklbw m7, m1 ; H/P
1790 TRANSPOSE4x4B 4, 5, 6, 7, 1
1791 SBUTTERFLY dq, 0, 4, 1 ; p3/p2
1792 SBUTTERFLY dq, 2, 6, 1 ; q0/q1
1793 SBUTTERFLY dq, 3, 7, 1 ; q2/q3
1799 mova m_q0backup, m2 ; store q0
1801 SBUTTERFLY dq, 1, 5, 2 ; p1/p0
1805 mova m_p0backup, m5 ; store p0
1813 ; normal_limit for p3-p2, p2-p1, q3-q2 and q2-q1
1816 psubusb m4, m0 ; p2-p3
1817 psubusb m0, m1 ; p3-p2
1818 por m0, m4 ; abs(p3-p2)
1822 psubusb m4, m1 ; p1-p2
1823 psubusb m1, m2 ; p2-p1
1824 por m1, m4 ; abs(p2-p1)
1828 psubusb m4, m7 ; q2-q3
1829 psubusb m7, m6 ; q3-q2
1830 por m7, m4 ; abs(q3-q2)
1834 psubusb m4, m6 ; q1-q2
1835 psubusb m6, m5 ; q2-q1
1836 por m6, m4 ; abs(q2-q1)
1838 %if notcpuflag(mmxext)
1845 pcmpeqb m0, m3 ; abs(p3-p2) <= I
1846 pcmpeqb m1, m3 ; abs(p2-p1) <= I
1847 pcmpeqb m7, m3 ; abs(q3-q2) <= I
1848 pcmpeqb m6, m3 ; abs(q2-q1) <= I
1858 ; normal_limit and high_edge_variance for p1-p0, q1-q0
1859 SWAP 7, 3 ; now m7 is zero
1861 movrow m3, [dst1q+mstrideq ] ; p0
1862 %if mmsize == 16 && %2 == 8
1863 movhps m3, [dst8q+mstrideq ]
1875 psubusb m1, m3 ; p1-p0
1876 psubusb m6, m2 ; p0-p1
1877 por m1, m6 ; abs(p1-p0)
1878 %if notcpuflag(mmxext)
1881 psubusb m6, m_hevthr
1882 pcmpeqb m1, m7 ; abs(p1-p0) <= I
1883 pcmpeqb m6, m7 ; abs(p1-p0) <= hev_thresh
1887 pmaxub m0, m1 ; max_I
1888 SWAP 1, 4 ; max_hev_thresh
1891 SWAP 6, 4 ; now m6 is I
1893 movrow m4, [dst1q] ; q0
1894 %if mmsize == 16 && %2 == 8
1906 psubusb m1, m5 ; q0-q1
1907 psubusb m7, m4 ; q1-q0
1908 por m1, m7 ; abs(q1-q0)
1909 %if notcpuflag(mmxext)
1912 psubusb m7, m_hevthr
1914 pcmpeqb m1, m6 ; abs(q1-q0) <= I
1915 pcmpeqb m7, m6 ; abs(q1-q0) <= hev_thresh
1917 pand m0, m1 ; abs([pq][321]-[pq][210]) <= I
1924 psubusb m6, m_hevthr
1925 pcmpeqb m0, m7 ; max(abs(..)) <= I
1926 pcmpeqb m6, m7 ; !(max(abs..) > thresh)
1931 mova m_maskres, m6 ; !(abs(p1-p0) > hev_t || abs(q1-q0) > hev_t)
1937 mova m6, m4 ; keep copies of p0/q0 around for later use
1939 psubusb m1, m4 ; p0-q0
1940 psubusb m6, m3 ; q0-p0
1941 por m1, m6 ; abs(q0-p0)
1942 paddusb m1, m1 ; m1=2*abs(q0-p0)
1948 psubusb m7, m5 ; p1-q1
1949 psubusb m6, m2 ; q1-p1
1950 por m7, m6 ; abs(q1-p1)
1953 psrlq m7, 1 ; abs(q1-p1)/2
1954 paddusb m7, m1 ; abs(q0-p0)*2+abs(q1-p1)/2
1956 pcmpeqb m7, m6 ; abs(q0-p0)*2+abs(q1-p1)/2 <= E
1957 pand m0, m7 ; normal_limit result
1959 ; filter_common; at this point, m2-m5=p1-q1 and m0 is filter_mask
1960 %ifdef m8 ; x86-64 && sse2
1963 %else ; x86-32 or mmx/mmxext
1964 %define m_pb_80 [pb_80]
1970 psubsb m1, m7 ; (signed) q0-p0
1975 psubsb m6, m7 ; (signed) p1-q1
1980 paddsb m7, m1 ; 3*(q0-p0)+is4tap?(p1-q1)
1999 paddusb m3, m1 ; p0+f2
2010 paddusb m4, m1 ; q0-f1
2017 %if notcpuflag(mmxext)
2024 %if notcpuflag(mmxext)
2038 paddusb m5, m1 ; q1-a
2039 paddusb m2, m0 ; p1+a
2043 movrow [dst1q+mstrideq*2], m2
2044 movrow [dst1q+mstrideq ], m3
2046 movrow [dst1q+ strideq ], m5
2047 %if mmsize == 16 && %2 == 8
2048 movhps [dst8q+mstrideq*2], m2
2049 movhps [dst8q+mstrideq ], m3
2051 movhps [dst8q+ strideq ], m5
2058 TRANSPOSE4x4B 2, 3, 4, 5, 6
2060 %if mmsize == 8 ; mmx/mmxext (h)
2061 WRITE_4x2D 2, 3, 4, 5, dst1q, dst2q, mstrideq, strideq
2063 lea dst8q, [dst8q+mstrideq +2]
2064 WRITE_4x4D 2, 3, 4, 5, dst1q, dst2q, dst8q, mstrideq, strideq, %2
2069 %if %2 == 8 ; chroma
2078 lea dst1q, [dst1q+ strideq*8-2]
2087 %ifndef m8 ; sse2 on x86-32 or mmx/mmxext
2095 INNER_LOOPFILTER v, 16
2096 INNER_LOOPFILTER h, 16
2097 INNER_LOOPFILTER v, 8
2098 INNER_LOOPFILTER h, 8
2101 INNER_LOOPFILTER v, 16
2102 INNER_LOOPFILTER h, 16
2103 INNER_LOOPFILTER v, 8
2104 INNER_LOOPFILTER h, 8
2108 INNER_LOOPFILTER v, 16
2109 INNER_LOOPFILTER h, 16
2110 INNER_LOOPFILTER v, 8
2111 INNER_LOOPFILTER h, 8
2114 INNER_LOOPFILTER v, 16
2115 INNER_LOOPFILTER h, 16
2116 INNER_LOOPFILTER v, 8
2117 INNER_LOOPFILTER h, 8
2119 ;-----------------------------------------------------------------------------
2120 ; void vp8_h/v_loop_filter<size>_mbedge_<opt>(uint8_t *dst, [uint8_t *v,] int stride,
2121 ; int flimE, int flimI, int hev_thr);
2122 ;-----------------------------------------------------------------------------
2124 %macro MBEDGE_LOOPFILTER 2
2125 %if %2 == 8 ; chroma
2126 cglobal vp8_%1_loop_filter8uv_mbedge, 6, 6, 15, dst1, dst8, stride, flimE, flimI, hevthr
2128 cglobal vp8_%1_loop_filter16y_mbedge, 5, 5, 15, dst1, stride, flimE, flimI, hevthr
2134 %ifndef m8 ; stack layout: [0]=E, [1]=I, [2]=hev_thr
2135 %if mmsize == 16 ; [3]=hev() result
2136 ; [4]=filter tmp result
2137 ; [5]/[6] = p2/q2 backup
2138 ; [7]=lim_res sign result
2139 %assign pad 16 + mmsize * 7 - gprsize - (stack_offset & 15)
2140 %else ; 8 ; extra storage space for transposes
2141 %assign pad 16 + mmsize * 8 - gprsize - (stack_offset & 15)
2143 ; splat function arguments
2144 SPLATB_REG m0, flimEq, m7 ; E
2145 SPLATB_REG m1, flimIq, m7 ; I
2146 SPLATB_REG m2, hevthrq, m7 ; hev_thresh
2150 %define m_flimE [rsp]
2151 %define m_flimI [rsp+mmsize]
2152 %define m_hevthr [rsp+mmsize*2]
2153 %define m_maskres [rsp+mmsize*3]
2154 %define m_limres [rsp+mmsize*4]
2155 %define m_p0backup [rsp+mmsize*3]
2156 %define m_q0backup [rsp+mmsize*4]
2157 %define m_p2backup [rsp+mmsize*5]
2158 %define m_q2backup [rsp+mmsize*6]
2160 %define m_limsign [rsp]
2162 %define m_limsign [rsp+mmsize*7]
2168 %else ; sse2 on x86-64
2171 %define m_hevthr m11
2172 %define m_maskres m12
2174 %define m_p0backup m12
2175 %define m_q0backup m8
2176 %define m_p2backup m13
2177 %define m_q2backup m14
2178 %define m_limsign m9
2180 ; splat function arguments
2181 SPLATB_REG m_flimE, flimEq, m7 ; E
2182 SPLATB_REG m_flimI, flimIq, m7 ; I
2183 SPLATB_REG m_hevthr, hevthrq, m7 ; hev_thresh
2186 %if %2 == 8 ; chroma
2187 DEFINE_ARGS dst1, dst8, mstride, stride, dst2
2189 DEFINE_ARGS dst1, mstride, stride, dst2, cntr
2192 DEFINE_ARGS dst1, mstride, stride, dst2, dst8
2194 mov strideq, mstrideq
2197 lea dst1q, [dst1q+strideq*4-4]
2198 %if %2 == 8 ; chroma
2199 lea dst8q, [dst8q+strideq*4-4]
2207 lea dst2q, [dst1q+ strideq ]
2209 %if %2 == 8 && mmsize == 16
2214 movrow m0, [dst1q+mstrideq*4] ; p3
2215 movrow m1, [dst2q+mstrideq*4] ; p2
2216 movrow m2, [dst1q+mstrideq*2] ; p1
2217 movrow m5, [dst2q] ; q1
2218 movrow m6, [dst2q+ strideq ] ; q2
2219 movrow m7, [dst2q+ strideq*2] ; q3
2220 %if mmsize == 16 && %2 == 8
2221 movhps m0, [dst8q+mstrideq*4]
2222 movhps m2, [dst8q+mstrideq*2]
2224 movhps m1, [dst8q+mstrideq*4]
2226 movhps m6, [dst8q+ strideq ]
2227 movhps m7, [dst8q+ strideq*2]
2230 %elif mmsize == 8 ; mmx/mmxext (h)
2231 ; read 8 rows of 8px each
2232 movu m0, [dst1q+mstrideq*4]
2233 movu m1, [dst2q+mstrideq*4]
2234 movu m2, [dst1q+mstrideq*2]
2235 movu m3, [dst1q+mstrideq ]
2238 movu m6, [dst2q+ strideq ]
2241 TRANSPOSE4x4B 0, 1, 2, 3, 7
2243 movu m7, [dst2q+ strideq*2]
2244 TRANSPOSE4x4B 4, 5, 6, 7, 1
2245 SBUTTERFLY dq, 0, 4, 1 ; p3/p2
2246 SBUTTERFLY dq, 2, 6, 1 ; q0/q1
2247 SBUTTERFLY dq, 3, 7, 1 ; q2/q3
2249 mova m_q0backup, m2 ; store q0
2250 SBUTTERFLY dq, 1, 5, 2 ; p1/p0
2251 mova m_p0backup, m5 ; store p0
2258 lea dst8q, [dst1q+ strideq*8 ]
2261 ; read 16 rows of 8px each, interleave
2262 movh m0, [dst1q+mstrideq*4]
2263 movh m1, [dst8q+mstrideq*4]
2264 movh m2, [dst1q+mstrideq*2]
2265 movh m5, [dst8q+mstrideq*2]
2266 movh m3, [dst1q+mstrideq ]
2267 movh m6, [dst8q+mstrideq ]
2270 punpcklbw m0, m1 ; A/I
2271 punpcklbw m2, m5 ; C/K
2272 punpcklbw m3, m6 ; D/L
2273 punpcklbw m4, m7 ; E/M
2276 movh m1, [dst2q+mstrideq*4]
2277 movh m6, [dst8q+mstrideq*4]
2280 punpcklbw m1, m6 ; B/J
2281 punpcklbw m5, m7 ; F/N
2282 movh m6, [dst2q+ strideq ]
2283 movh m7, [dst8q+ strideq ]
2284 punpcklbw m6, m7 ; G/O
2287 TRANSPOSE4x4B 0, 1, 2, 3, 7
2293 movh m7, [dst2q+ strideq*2]
2294 movh m1, [dst8q+ strideq*2]
2295 punpcklbw m7, m1 ; H/P
2296 TRANSPOSE4x4B 4, 5, 6, 7, 1
2297 SBUTTERFLY dq, 0, 4, 1 ; p3/p2
2298 SBUTTERFLY dq, 2, 6, 1 ; q0/q1
2299 SBUTTERFLY dq, 3, 7, 1 ; q2/q3
2305 mova m_q0backup, m2 ; store q0
2307 SBUTTERFLY dq, 1, 5, 2 ; p1/p0
2311 mova m_p0backup, m5 ; store p0
2319 ; normal_limit for p3-p2, p2-p1, q3-q2 and q2-q1
2322 psubusb m4, m0 ; p2-p3
2323 psubusb m0, m1 ; p3-p2
2324 por m0, m4 ; abs(p3-p2)
2328 psubusb m4, m1 ; p1-p2
2330 psubusb m1, m2 ; p2-p1
2331 por m1, m4 ; abs(p2-p1)
2335 psubusb m4, m7 ; q2-q3
2336 psubusb m7, m6 ; q3-q2
2337 por m7, m4 ; abs(q3-q2)
2341 psubusb m4, m6 ; q1-q2
2343 psubusb m6, m5 ; q2-q1
2344 por m6, m4 ; abs(q2-q1)
2346 %if notcpuflag(mmxext)
2353 pcmpeqb m0, m3 ; abs(p3-p2) <= I
2354 pcmpeqb m1, m3 ; abs(p2-p1) <= I
2355 pcmpeqb m7, m3 ; abs(q3-q2) <= I
2356 pcmpeqb m6, m3 ; abs(q2-q1) <= I
2366 ; normal_limit and high_edge_variance for p1-p0, q1-q0
2367 SWAP 7, 3 ; now m7 is zero
2369 movrow m3, [dst1q+mstrideq ] ; p0
2370 %if mmsize == 16 && %2 == 8
2371 movhps m3, [dst8q+mstrideq ]
2383 psubusb m1, m3 ; p1-p0
2384 psubusb m6, m2 ; p0-p1
2385 por m1, m6 ; abs(p1-p0)
2386 %if notcpuflag(mmxext)
2389 psubusb m6, m_hevthr
2390 pcmpeqb m1, m7 ; abs(p1-p0) <= I
2391 pcmpeqb m6, m7 ; abs(p1-p0) <= hev_thresh
2395 pmaxub m0, m1 ; max_I
2396 SWAP 1, 4 ; max_hev_thresh
2399 SWAP 6, 4 ; now m6 is I
2401 movrow m4, [dst1q] ; q0
2402 %if mmsize == 16 && %2 == 8
2414 psubusb m1, m5 ; q0-q1
2415 psubusb m7, m4 ; q1-q0
2416 por m1, m7 ; abs(q1-q0)
2417 %if notcpuflag(mmxext)
2420 psubusb m7, m_hevthr
2422 pcmpeqb m1, m6 ; abs(q1-q0) <= I
2423 pcmpeqb m7, m6 ; abs(q1-q0) <= hev_thresh
2425 pand m0, m1 ; abs([pq][321]-[pq][210]) <= I
2432 psubusb m6, m_hevthr
2433 pcmpeqb m0, m7 ; max(abs(..)) <= I
2434 pcmpeqb m6, m7 ; !(max(abs..) > thresh)
2439 mova m_maskres, m6 ; !(abs(p1-p0) > hev_t || abs(q1-q0) > hev_t)
2445 mova m6, m4 ; keep copies of p0/q0 around for later use
2447 psubusb m1, m4 ; p0-q0
2448 psubusb m6, m3 ; q0-p0
2449 por m1, m6 ; abs(q0-p0)
2450 paddusb m1, m1 ; m1=2*abs(q0-p0)
2456 psubusb m7, m5 ; p1-q1
2457 psubusb m6, m2 ; q1-p1
2458 por m7, m6 ; abs(q1-p1)
2461 psrlq m7, 1 ; abs(q1-p1)/2
2462 paddusb m7, m1 ; abs(q0-p0)*2+abs(q1-p1)/2
2464 pcmpeqb m7, m6 ; abs(q0-p0)*2+abs(q1-p1)/2 <= E
2465 pand m0, m7 ; normal_limit result
2467 ; filter_common; at this point, m2-m5=p1-q1 and m0 is filter_mask
2468 %ifdef m8 ; x86-64 && sse2
2471 %else ; x86-32 or mmx/mmxext
2472 %define m_pb_80 [pb_80]
2478 psubsb m1, m7 ; (signed) q0-p0
2483 psubsb m6, m7 ; (signed) p1-q1
2490 mova m_limres, m6 ; 3*(qp-p0)+(p1-q1) masked for filter_mbedge
2497 pandn m7, m6 ; 3*(q0-p0)+(p1-q1) masked for filter_common
2515 paddusb m3, m1 ; p0+f2
2526 paddusb m4, m1 ; q0-f1
2528 ; filter_mbedge (m2-m5 = p1-q1; lim_res carries w)
2541 pcmpgtb m0, m1 ; which are negative
2543 punpcklbw m6, m7 ; interleave with "1" for rounding
2546 punpcklbw m6, m0 ; signed byte->word
2556 SWAP 0, 10 ; don't lose lim_sign copy
2569 mova m_maskres, m6 ; backup for later in filter
2578 packsswb m6, m1 ; a0
2584 mova m6, [pb_18_63] ; pipelining
2588 paddusb m3, m0 ; p0+a0
2589 psubusb m4, m0 ; q0-a0
2618 packsswb m6, m1 ; a1
2628 paddusb m2, m0 ; p1+a1
2629 psubusb m5, m0 ; q1-a1
2663 packsswb m6, m1 ; a1
2677 paddusb m1, m7 ; p1+a1
2678 psubusb m6, m7 ; q1-a1
2682 movrow [dst2q+mstrideq*4], m1
2683 movrow [dst1q+mstrideq*2], m2
2684 movrow [dst1q+mstrideq ], m3
2687 movrow [dst2q+ strideq ], m6
2688 %if mmsize == 16 && %2 == 8
2690 movhps [dst8q+mstrideq*2], m1
2691 movhps [dst8q+mstrideq ], m2
2695 movhps [dst8q+ strideq ], m5
2696 movhps [dst8q+ strideq*2], m6
2703 TRANSPOSE4x4B 1, 2, 3, 4, 0
2704 SBUTTERFLY bw, 5, 6, 0
2706 %if mmsize == 8 ; mmx/mmxext (h)
2707 WRITE_4x2D 1, 2, 3, 4, dst1q, dst2q, mstrideq, strideq
2709 WRITE_2x4W m5, m6, dst2q, dst1q, mstrideq, strideq
2711 lea dst8q, [dst8q+mstrideq+1]
2712 WRITE_4x4D 1, 2, 3, 4, dst1q, dst2q, dst8q, mstrideq, strideq, %2
2713 lea dst1q, [dst2q+mstrideq+4]
2714 lea dst8q, [dst8q+mstrideq+4]
2718 WRITE_8W m5, dst2q, dst1q, mstrideq, strideq
2720 lea dst2q, [dst8q+ strideq ]
2722 WRITE_8W m6, dst2q, dst8q, mstrideq, strideq
2727 %if %2 == 8 ; chroma
2736 lea dst1q, [dst1q+ strideq*8-5]
2745 %ifndef m8 ; sse2 on x86-32 or mmx/mmxext
2753 MBEDGE_LOOPFILTER v, 16
2754 MBEDGE_LOOPFILTER h, 16
2755 MBEDGE_LOOPFILTER v, 8
2756 MBEDGE_LOOPFILTER h, 8
2759 MBEDGE_LOOPFILTER v, 16
2760 MBEDGE_LOOPFILTER h, 16
2761 MBEDGE_LOOPFILTER v, 8
2762 MBEDGE_LOOPFILTER h, 8
2766 MBEDGE_LOOPFILTER v, 16
2767 MBEDGE_LOOPFILTER h, 16
2768 MBEDGE_LOOPFILTER v, 8
2769 MBEDGE_LOOPFILTER h, 8
2772 MBEDGE_LOOPFILTER v, 16
2773 MBEDGE_LOOPFILTER h, 16
2774 MBEDGE_LOOPFILTER v, 8
2775 MBEDGE_LOOPFILTER h, 8
2778 MBEDGE_LOOPFILTER h, 16
2779 MBEDGE_LOOPFILTER h, 8