1 ;******************************************************************************
2 ;* VP8 MMXEXT optimizations
3 ;* Copyright (c) 2010 Ronald S. Bultje <rsbultje@gmail.com>
4 ;* Copyright (c) 2010 Fiona Glaser <fiona@x264.com>
6 ;* This file is part of FFmpeg.
8 ;* FFmpeg is free software; you can redistribute it and/or
9 ;* modify it under the terms of the GNU Lesser General Public
10 ;* License as published by the Free Software Foundation; either
11 ;* version 2.1 of the License, or (at your option) any later version.
13 ;* FFmpeg is distributed in the hope that it will be useful,
14 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 ;* Lesser General Public License for more details.
18 ;* You should have received a copy of the GNU Lesser General Public
19 ;* License along with FFmpeg; if not, write to the Free Software
20 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 ;******************************************************************************
23 %include "libavutil/x86/x86util.asm"
27 fourtap_filter_hw_m: times 4 dw -6, 123
36 sixtap_filter_hw_m: times 4 dw 2, -11
46 fourtap_filter_hb_m: times 8 db -6, 123
55 sixtap_filter_hb_m: times 8 db 2, 1
65 fourtap_filter_v_m: times 8 dw -6
82 sixtap_filter_v_m: times 8 dw 2
101 bilinear_filter_vw_m: times 8 dw 1
109 bilinear_filter_vb_m: times 8 db 7, 1
118 %define fourtap_filter_hw picregq
119 %define sixtap_filter_hw picregq
120 %define fourtap_filter_hb picregq
121 %define sixtap_filter_hb picregq
122 %define fourtap_filter_v picregq
123 %define sixtap_filter_v picregq
124 %define bilinear_filter_vw picregq
125 %define bilinear_filter_vb picregq
128 %define fourtap_filter_hw fourtap_filter_hw_m
129 %define sixtap_filter_hw sixtap_filter_hw_m
130 %define fourtap_filter_hb fourtap_filter_hb_m
131 %define sixtap_filter_hb sixtap_filter_hb_m
132 %define fourtap_filter_v fourtap_filter_v_m
133 %define sixtap_filter_v sixtap_filter_v_m
134 %define bilinear_filter_vw bilinear_filter_vw_m
135 %define bilinear_filter_vb bilinear_filter_vb_m
139 filter_h2_shuf: db 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8
140 filter_h4_shuf: db 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10
142 filter_h6_shuf1: db 0, 5, 1, 6, 2, 7, 3, 8, 4, 9, 5, 10, 6, 11, 7, 12
143 filter_h6_shuf2: db 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9
144 filter_h6_shuf3: db 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11
146 pw_20091: times 4 dw 20091
147 pw_17734: times 4 dw 17734
156 ;-------------------------------------------------------------------------------
157 ; subpel MC functions:
159 ; void ff_put_vp8_epel<size>_h<htap>v<vtap>_<opt>(uint8_t *dst, ptrdiff_t deststride,
160 ; uint8_t *src, ptrdiff_t srcstride,
161 ; int height, int mx, int my);
162 ;-------------------------------------------------------------------------------
164 %macro FILTER_SSSE3 1
165 cglobal put_vp8_epel%1_h6, 6, 6 + npicregs, 8, dst, dststride, src, srcstride, height, mx, picreg
167 mova m3, [filter_h6_shuf2]
168 mova m4, [filter_h6_shuf3]
170 lea picregq, [sixtap_filter_hb_m]
172 mova m5, [sixtap_filter_hb+mxq*8-48] ; set up 6tap filter in bytes
173 mova m6, [sixtap_filter_hb+mxq*8-32]
174 mova m7, [sixtap_filter_hb+mxq*8-16]
181 ; For epel4, we need 9 bytes, but only 8 get loaded; to compensate, do the
182 ; shuffle with a memory operand
183 punpcklbw m0, [srcq+3]
185 pshufb m0, [filter_h6_shuf1]
194 pmulhrsw m0, [pw_256]
196 movh [dstq], m0 ; store
201 dec heightd ; next row
205 cglobal put_vp8_epel%1_h4, 6, 6 + npicregs, 7, dst, dststride, src, srcstride, height, mx, picreg
208 mova m3, [filter_h2_shuf]
209 mova m4, [filter_h4_shuf]
211 lea picregq, [fourtap_filter_hb_m]
213 mova m5, [fourtap_filter_hb+mxq-16] ; set up 4tap filter in bytes
214 mova m6, [fourtap_filter_hb+mxq]
226 movh [dstq], m0 ; store
231 dec heightd ; next row
235 cglobal put_vp8_epel%1_v4, 7, 7, 8, dst, dststride, src, srcstride, height, picreg, my
238 lea picregq, [fourtap_filter_hb_m]
240 mova m5, [fourtap_filter_hb+myq-16]
241 mova m6, [fourtap_filter_hb+myq]
247 movh m1, [srcq+ srcstrideq]
248 movh m2, [srcq+2*srcstrideq]
252 movh m3, [srcq+2*srcstrideq] ; read new row
269 dec heightd ; next row
273 cglobal put_vp8_epel%1_v6, 7, 7, 8, dst, dststride, src, srcstride, height, picreg, my
276 lea picregq, [sixtap_filter_hb_m]
278 lea myq, [sixtap_filter_hb+myq*8]
284 movh m1, [srcq+srcstrideq]
285 movh m2, [srcq+srcstrideq*2]
286 lea srcq, [srcq+srcstrideq*2]
289 movh m4, [srcq+srcstrideq]
292 movh m5, [srcq+2*srcstrideq] ; read new row
299 pmaddubsw m6, [myq-48]
300 pmaddubsw m1, [myq-32]
301 pmaddubsw m7, [myq-16]
306 pmulhrsw m6, [pw_256]
315 dec heightd ; next row
325 ; 4x4 block, H-only 4-tap filter
327 cglobal put_vp8_epel4_h4, 6, 6 + npicregs, 0, dst, dststride, src, srcstride, height, mx, picreg
330 lea picregq, [fourtap_filter_hw_m]
332 movq mm4, [fourtap_filter_hw+mxq-16] ; set up 4tap filter in words
333 movq mm5, [fourtap_filter_hw+mxq]
338 movq mm1, [srcq-1] ; (ABCDEFGH) load 8 horizontal pixels
340 ; first set of 2 pixels
341 movq mm2, mm1 ; byte ABCD..
342 punpcklbw mm1, mm6 ; byte->word ABCD
343 pshufw mm0, mm2, 9 ; byte CDEF..
344 punpcklbw mm0, mm6 ; byte->word CDEF
345 pshufw mm3, mm1, 0x94 ; word ABBC
346 pshufw mm1, mm0, 0x94 ; word CDDE
347 pmaddwd mm3, mm4 ; multiply 2px with F0/F1
348 movq mm0, mm1 ; backup for second set of pixels
349 pmaddwd mm1, mm5 ; multiply 2px with F2/F3
350 paddd mm3, mm1 ; finish 1st 2px
352 ; second set of 2 pixels, use backup of above
353 punpckhbw mm2, mm6 ; byte->word EFGH
354 pmaddwd mm0, mm4 ; multiply backed up 2px with F0/F1
355 pshufw mm1, mm2, 0x94 ; word EFFG
356 pmaddwd mm1, mm5 ; multiply 2px with F2/F3
357 paddd mm0, mm1 ; finish 2nd 2px
359 ; merge two sets of 2 pixels into one set of 4, round/clip/store
360 packssdw mm3, mm0 ; merge dword->word (4px)
361 paddsw mm3, mm7 ; rounding
363 packuswb mm3, mm6 ; clip and word->bytes
364 movd [dstq], mm3 ; store
369 dec heightd ; next row
373 ; 4x4 block, H-only 6-tap filter
375 cglobal put_vp8_epel4_h6, 6, 6 + npicregs, 0, dst, dststride, src, srcstride, height, mx, picreg
378 lea picregq, [sixtap_filter_hw_m]
380 movq mm4, [sixtap_filter_hw+mxq*8-48] ; set up 4tap filter in words
381 movq mm5, [sixtap_filter_hw+mxq*8-32]
382 movq mm6, [sixtap_filter_hw+mxq*8-16]
387 movq mm1, [srcq-2] ; (ABCDEFGH) load 8 horizontal pixels
389 ; first set of 2 pixels
390 movq mm2, mm1 ; byte ABCD..
391 punpcklbw mm1, mm3 ; byte->word ABCD
392 pshufw mm0, mm2, 0x9 ; byte CDEF..
393 punpckhbw mm2, mm3 ; byte->word EFGH
394 punpcklbw mm0, mm3 ; byte->word CDEF
395 pshufw mm1, mm1, 0x94 ; word ABBC
396 pshufw mm2, mm2, 0x94 ; word EFFG
397 pmaddwd mm1, mm4 ; multiply 2px with F0/F1
398 pshufw mm3, mm0, 0x94 ; word CDDE
399 movq mm0, mm3 ; backup for second set of pixels
400 pmaddwd mm3, mm5 ; multiply 2px with F2/F3
401 paddd mm1, mm3 ; add to 1st 2px cache
402 movq mm3, mm2 ; backup for second set of pixels
403 pmaddwd mm2, mm6 ; multiply 2px with F4/F5
404 paddd mm1, mm2 ; finish 1st 2px
406 ; second set of 2 pixels, use backup of above
407 movd mm2, [srcq+3] ; byte FGHI (prevent overreads)
408 pmaddwd mm0, mm4 ; multiply 1st backed up 2px with F0/F1
409 pmaddwd mm3, mm5 ; multiply 2nd backed up 2px with F2/F3
410 paddd mm0, mm3 ; add to 2nd 2px cache
412 punpcklbw mm2, mm3 ; byte->word FGHI
413 pshufw mm2, mm2, 0xE9 ; word GHHI
414 pmaddwd mm2, mm6 ; multiply 2px with F4/F5
415 paddd mm0, mm2 ; finish 2nd 2px
417 ; merge two sets of 2 pixels into one set of 4, round/clip/store
418 packssdw mm1, mm0 ; merge dword->word (4px)
419 paddsw mm1, mm7 ; rounding
421 packuswb mm1, mm3 ; clip and word->bytes
422 movd [dstq], mm1 ; store
427 dec heightd ; next row
432 cglobal put_vp8_epel8_h4, 6, 6 + npicregs, 10, dst, dststride, src, srcstride, height, mx, picreg
435 lea picregq, [fourtap_filter_v_m]
437 lea mxq, [fourtap_filter_v+mxq-32]
470 movh [dstq], m0 ; store
475 dec heightd ; next row
480 cglobal put_vp8_epel8_h6, 6, 6 + npicregs, 14, dst, dststride, src, srcstride, height, mx, picreg
484 lea picregq, [sixtap_filter_v_m]
486 lea mxq, [sixtap_filter_v+mxq-96]
533 movh [dstq], m0 ; store
538 dec heightd ; next row
543 ; 4x4 block, V-only 4-tap filter
544 cglobal put_vp8_epel%1_v4, 7, 7, 8, dst, dststride, src, srcstride, height, picreg, my
547 lea picregq, [fourtap_filter_v_m]
549 lea myq, [fourtap_filter_v+myq-32]
557 movh m1, [srcq+ srcstrideq]
558 movh m2, [srcq+2*srcstrideq]
565 ; first calculate negative taps (to prevent losing positive overflows)
566 movh m4, [srcq+2*srcstrideq] ; read new row
573 ; then calculate positive taps
591 dec heightd ; next row
596 ; 4x4 block, V-only 6-tap filter
597 cglobal put_vp8_epel%1_v6, 7, 7, 8, dst, dststride, src, srcstride, height, picreg, my
601 lea picregq, [sixtap_filter_v_m]
603 lea myq, [sixtap_filter_v+myq-96]
610 movh m1, [srcq+srcstrideq]
611 movh m2, [srcq+srcstrideq*2]
612 lea srcq, [srcq+srcstrideq*2]
615 movh m4, [srcq+srcstrideq]
623 ; first calculate negative taps (to prevent losing positive overflows)
630 ; then calculate positive taps
631 movh m5, [srcq+2*srcstrideq] ; read new row
656 dec heightd ; next row
666 %macro FILTER_BILINEAR 1
667 cglobal put_vp8_bilinear%1_v, 7, 7, 7, dst, dststride, src, srcstride, height, picreg, my
670 lea picregq, [bilinear_filter_vw_m]
673 mova m5, [bilinear_filter_vw+myq-1*16]
675 mova m4, [bilinear_filter_vw+myq+7*16]
677 movh m0, [srcq+srcstrideq*0]
678 movh m1, [srcq+srcstrideq*1]
679 movh m3, [srcq+srcstrideq*2]
697 movh [dstq+dststrideq*0], m0
698 movh [dstq+dststrideq*1], m2
701 movh [dstq+dststrideq*0], m0
702 movhps [dstq+dststrideq*1], m0
705 lea dstq, [dstq+dststrideq*2]
706 lea srcq, [srcq+srcstrideq*2]
711 cglobal put_vp8_bilinear%1_h, 6, 6 + npicregs, 7, dst, dststride, src, srcstride, height, mx, picreg
714 lea picregq, [bilinear_filter_vw_m]
717 mova m5, [bilinear_filter_vw+mxq-1*16]
719 mova m4, [bilinear_filter_vw+mxq+7*16]
721 movh m0, [srcq+srcstrideq*0+0]
722 movh m1, [srcq+srcstrideq*0+1]
723 movh m2, [srcq+srcstrideq*1+0]
724 movh m3, [srcq+srcstrideq*1+1]
742 movh [dstq+dststrideq*0], m0
743 movh [dstq+dststrideq*1], m2
746 movh [dstq+dststrideq*0], m0
747 movhps [dstq+dststrideq*1], m0
750 lea dstq, [dstq+dststrideq*2]
751 lea srcq, [srcq+srcstrideq*2]
762 %macro FILTER_BILINEAR_SSSE3 1
763 cglobal put_vp8_bilinear%1_v, 7, 7, 5, dst, dststride, src, srcstride, height, picreg, my
766 lea picregq, [bilinear_filter_vb_m]
769 mova m3, [bilinear_filter_vb+myq-16]
771 movh m0, [srcq+srcstrideq*0]
772 movh m1, [srcq+srcstrideq*1]
773 movh m2, [srcq+srcstrideq*2]
785 movh [dstq+dststrideq*0], m0
786 movh [dstq+dststrideq*1], m1
789 movh [dstq+dststrideq*0], m0
790 movhps [dstq+dststrideq*1], m0
793 lea dstq, [dstq+dststrideq*2]
794 lea srcq, [srcq+srcstrideq*2]
799 cglobal put_vp8_bilinear%1_h, 6, 6 + npicregs, 5, dst, dststride, src, srcstride, height, mx, picreg
802 lea picregq, [bilinear_filter_vb_m]
805 mova m2, [filter_h2_shuf]
806 mova m3, [bilinear_filter_vb+mxq-16]
808 movu m0, [srcq+srcstrideq*0]
809 movu m1, [srcq+srcstrideq*1]
821 movh [dstq+dststrideq*0], m0
822 movh [dstq+dststrideq*1], m1
825 movh [dstq+dststrideq*0], m0
826 movhps [dstq+dststrideq*1], m0
829 lea dstq, [dstq+dststrideq*2]
830 lea srcq, [srcq+srcstrideq*2]
837 FILTER_BILINEAR_SSSE3 4
839 FILTER_BILINEAR_SSSE3 8
842 cglobal put_vp8_pixels8, 5, 5, 0, dst, dststride, src, srcstride, height
844 movq mm0, [srcq+srcstrideq*0]
845 movq mm1, [srcq+srcstrideq*1]
846 lea srcq, [srcq+srcstrideq*2]
847 movq [dstq+dststrideq*0], mm0
848 movq [dstq+dststrideq*1], mm1
849 lea dstq, [dstq+dststrideq*2]
856 cglobal put_vp8_pixels16, 5, 5, 0, dst, dststride, src, srcstride, height
858 movq mm0, [srcq+srcstrideq*0+0]
859 movq mm1, [srcq+srcstrideq*0+8]
860 movq mm2, [srcq+srcstrideq*1+0]
861 movq mm3, [srcq+srcstrideq*1+8]
862 lea srcq, [srcq+srcstrideq*2]
863 movq [dstq+dststrideq*0+0], mm0
864 movq [dstq+dststrideq*0+8], mm1
865 movq [dstq+dststrideq*1+0], mm2
866 movq [dstq+dststrideq*1+8], mm3
867 lea dstq, [dstq+dststrideq*2]
874 cglobal put_vp8_pixels16, 5, 5, 2, dst, dststride, src, srcstride, height
876 movups xmm0, [srcq+srcstrideq*0]
877 movups xmm1, [srcq+srcstrideq*1]
878 lea srcq, [srcq+srcstrideq*2]
879 movaps [dstq+dststrideq*0], xmm0
880 movaps [dstq+dststrideq*1], xmm1
881 lea dstq, [dstq+dststrideq*2]
886 ;-----------------------------------------------------------------------------
887 ; void ff_vp8_idct_dc_add_<opt>(uint8_t *dst, int16_t block[16], ptrdiff_t stride);
888 ;-----------------------------------------------------------------------------
892 %4 m3, [dst1q+strideq+%3]
894 %4 m5, [dst2q+strideq+%3]
904 %4 [dst1q+strideq+%3], m3
906 %4 [dst2q+strideq+%3], m5
911 cglobal vp8_idct_dc_add, 3, 3, 0, dst, block, stride
929 DEFINE_ARGS dst1, dst2, stride
930 lea dst2q, [dst1q+strideq*2]
931 ADD_DC m0, m1, 0, movh
935 %macro VP8_IDCT_DC_ADD 0
936 cglobal vp8_idct_dc_add, 3, 3, 6, dst, block, stride
944 DEFINE_ARGS dst1, dst2, stride
945 lea dst2q, [dst1q+strideq*2]
947 movd m3, [dst1q+strideq]
949 movd m5, [dst2q+strideq]
962 pextrd [dst1q+strideq], m2, 1
963 pextrd [dst2q], m2, 2
964 pextrd [dst2q+strideq], m2, 3
967 movd [dst1q+strideq], m2
971 movd [dst2q+strideq], m2
981 ;-----------------------------------------------------------------------------
982 ; void ff_vp8_idct_dc_add4y_<opt>(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride);
983 ;-----------------------------------------------------------------------------
987 cglobal vp8_idct_dc_add4y, 3, 3, 0, dst, block, stride
989 movd m0, [blockq+32*0] ; A
990 movd m1, [blockq+32*2] ; C
991 punpcklwd m0, [blockq+32*1] ; A B
992 punpcklwd m1, [blockq+32*3] ; C D
993 punpckldq m0, m1 ; A B C D
998 movd [blockq+32*0], m6
999 movd [blockq+32*1], m6
1000 movd [blockq+32*2], m6
1001 movd [blockq+32*3], m6
1006 punpcklbw m0, m0 ; AABBCCDD
1007 punpcklbw m6, m6 ; AABBCCDD
1010 punpcklbw m0, m0 ; AAAABBBB
1011 punpckhbw m1, m1 ; CCCCDDDD
1012 punpcklbw m6, m6 ; AAAABBBB
1013 punpckhbw m7, m7 ; CCCCDDDD
1016 DEFINE_ARGS dst1, dst2, stride
1017 lea dst2q, [dst1q+strideq*2]
1018 ADD_DC m0, m6, 0, mova
1019 ADD_DC m1, m7, 8, mova
1024 cglobal vp8_idct_dc_add4y, 3, 3, 6, dst, block, stride
1026 movd m0, [blockq+32*0] ; A
1027 movd m1, [blockq+32*2] ; C
1028 punpcklwd m0, [blockq+32*1] ; A B
1029 punpcklwd m1, [blockq+32*3] ; C D
1030 punpckldq m0, m1 ; A B C D
1035 movd [blockq+32*0], m1
1036 movd [blockq+32*1], m1
1037 movd [blockq+32*2], m1
1038 movd [blockq+32*3], m1
1049 DEFINE_ARGS dst1, dst2, stride
1050 lea dst2q, [dst1q+strideq*2]
1051 ADD_DC m0, m1, 0, mova
1054 ;-----------------------------------------------------------------------------
1055 ; void ff_vp8_idct_dc_add4uv_<opt>(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride);
1056 ;-----------------------------------------------------------------------------
1059 cglobal vp8_idct_dc_add4uv, 3, 3, 0, dst, block, stride
1061 movd m0, [blockq+32*0] ; A
1062 movd m1, [blockq+32*2] ; C
1063 punpcklwd m0, [blockq+32*1] ; A B
1064 punpcklwd m1, [blockq+32*3] ; C D
1065 punpckldq m0, m1 ; A B C D
1070 movd [blockq+32*0], m6
1071 movd [blockq+32*1], m6
1072 movd [blockq+32*2], m6
1073 movd [blockq+32*3], m6
1078 punpcklbw m0, m0 ; AABBCCDD
1079 punpcklbw m6, m6 ; AABBCCDD
1082 punpcklbw m0, m0 ; AAAABBBB
1083 punpckhbw m1, m1 ; CCCCDDDD
1084 punpcklbw m6, m6 ; AAAABBBB
1085 punpckhbw m7, m7 ; CCCCDDDD
1088 DEFINE_ARGS dst1, dst2, stride
1089 lea dst2q, [dst1q+strideq*2]
1090 ADD_DC m0, m6, 0, mova
1091 lea dst1q, [dst1q+strideq*4]
1092 lea dst2q, [dst2q+strideq*4]
1093 ADD_DC m1, m7, 0, mova
1096 ;-----------------------------------------------------------------------------
1097 ; void ff_vp8_idct_add_<opt>(uint8_t *dst, int16_t block[16], ptrdiff_t stride);
1098 ;-----------------------------------------------------------------------------
1100 ; calculate %1=mul_35468(%1)-mul_20091(%2); %2=mul_20091(%1)+mul_35468(%2)
1101 ; this macro assumes that m6/m7 have words for 20091/17734 loaded
1102 %macro VP8_MULTIPLY_SUMSUB 4
1105 pmulhw %3, m6 ;20091(1)
1106 pmulhw %4, m6 ;20091(2)
1111 pmulhw %1, m7 ;35468(1)
1112 pmulhw %2, m7 ;35468(2)
1117 ; calculate x0=%1+%3; x1=%1-%3
1118 ; x2=mul_35468(%2)-mul_20091(%4); x3=mul_20091(%2)+mul_35468(%4)
1119 ; %1=x0+x3 (tmp0); %2=x1+x2 (tmp1); %3=x1-x2 (tmp2); %4=x0-x3 (tmp3)
1120 ; %5/%6 are temporary registers
1121 ; we assume m6/m7 have constant words 20091/17734 loaded in them
1122 %macro VP8_IDCT_TRANSFORM4x4_1D 6
1123 SUMSUB_BA w, %3, %1, %5 ;t0, t1
1124 VP8_MULTIPLY_SUMSUB m%2, m%4, m%5,m%6 ;t2, t3
1125 SUMSUB_BA w, %4, %3, %5 ;tmp0, tmp3
1126 SUMSUB_BA w, %2, %1, %5 ;tmp1, tmp2
1131 %macro VP8_IDCT_ADD 0
1132 cglobal vp8_idct_add, 3, 3, 0, dst, block, stride
1134 movq m0, [blockq+ 0]
1135 movq m1, [blockq+ 8]
1136 movq m2, [blockq+16]
1137 movq m3, [blockq+24]
1142 movaps [blockq+ 0], xmm0
1143 movaps [blockq+16], xmm0
1146 movq [blockq+ 0], m4
1147 movq [blockq+ 8], m4
1148 movq [blockq+16], m4
1149 movq [blockq+24], m4
1153 VP8_IDCT_TRANSFORM4x4_1D 0, 1, 2, 3, 4, 5
1154 TRANSPOSE4x4W 0, 1, 2, 3, 4
1156 VP8_IDCT_TRANSFORM4x4_1D 0, 1, 2, 3, 4, 5
1157 TRANSPOSE4x4W 0, 1, 2, 3, 4
1161 DEFINE_ARGS dst1, dst2, stride
1162 lea dst2q, [dst1q+2*strideq]
1163 STORE_DIFFx2 m0, m1, m6, m7, m4, 3, dst1q, strideq
1164 STORE_DIFFx2 m2, m3, m6, m7, m4, 3, dst2q, strideq
1176 ;-----------------------------------------------------------------------------
1177 ; void ff_vp8_luma_dc_wht(int16_t block[4][4][16], int16_t dc[16])
1178 ;-----------------------------------------------------------------------------
1180 %macro SCATTER_WHT 3
1183 mov [blockq+2*16*(0+%3)], dc1w
1184 mov [blockq+2*16*(1+%3)], dc2w
1189 mov [blockq+2*16*(4+%3)], dc1w
1190 mov [blockq+2*16*(5+%3)], dc2w
1193 mov [blockq+2*16*(8+%3)], dc1w
1194 mov [blockq+2*16*(9+%3)], dc2w
1197 mov [blockq+2*16*(12+%3)], dc1w
1198 mov [blockq+2*16*(13+%3)], dc2w
1201 %macro HADAMARD4_1D 4
1202 SUMSUB_BADC w, %2, %1, %4, %3
1203 SUMSUB_BADC w, %4, %2, %3, %1
1208 cglobal vp8_luma_dc_wht, 2, 3, 0, block, dc1, dc2
1215 movaps [dc1q+ 0], xmm0
1216 movaps [dc1q+16], xmm0
1224 HADAMARD4_1D 0, 1, 2, 3
1225 TRANSPOSE4x4W 0, 1, 2, 3, 4
1227 HADAMARD4_1D 0, 1, 2, 3