1 ;*****************************************************************************
2 ;* pixel.asm: x86 pixel metrics
3 ;*****************************************************************************
4 ;* Copyright (C) 2003-2012 x264 project
6 ;* Authors: Loren Merritt <lorenm@u.washington.edu>
7 ;* Holger Lubitz <holger@lubitz.org>
8 ;* Laurent Aimar <fenrir@via.ecp.fr>
9 ;* Alex Izvorski <aizvorksi@gmail.com>
10 ;* Fiona Glaser <fiona@x264.com>
11 ;* Oskar Arvidsson <oskar@irock.se>
13 ;* This program is free software; you can redistribute it and/or modify
14 ;* it under the terms of the GNU General Public License as published by
15 ;* the Free Software Foundation; either version 2 of the License, or
16 ;* (at your option) any later version.
18 ;* This program is distributed in the hope that it will be useful,
19 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
20 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 ;* GNU General Public License for more details.
23 ;* You should have received a copy of the GNU General Public License
24 ;* along with this program; if not, write to the Free Software
25 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
27 ;* This program is also available under a commercial proprietary license.
28 ;* For more information, contact us at licensing@x264.com.
29 ;*****************************************************************************
32 %include "x86util.asm"
35 mask_ff: times 16 db 0xff
38 ssim_c1: times 4 dd 6697.7856 ; .01*.01*1023*1023*64
39 ssim_c2: times 4 dd 3797644.4352 ; .03*.03*1023*1023*64*63
40 pf_64: times 4 dd 64.0
41 pf_128: times 4 dd 128.0
43 ssim_c1: times 4 dd 1671 ; .01*.01*511*511*64
44 ssim_c2: times 4 dd 947556 ; .03*.03*511*511*64*63
46 ssim_c1: times 4 dd 416 ; .01*.01*255*255*64
47 ssim_c2: times 4 dd 235963 ; .03*.03*255*255*64*63
49 mask_ac4: dw 0, -1, -1, -1, 0, -1, -1, -1
50 mask_ac4b: dw 0, -1, 0, -1, -1, -1, -1, -1
51 mask_ac8: dw 0, -1, -1, -1, -1, -1, -1, -1
52 hmul_4p: times 2 db 1, 1, 1, 1, 1, -1, 1, -1
55 mask_10: times 4 dw 0, -1
56 mask_1100: times 2 dd 0, -1
57 pb_pppm: times 4 db 1,1,1,-1
58 deinterleave_shuf: db 0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15
59 intrax3_shuf: db 7,6,7,6,5,4,5,4,3,2,3,2,1,0,1,0
61 intrax9a_ddlr1: db 6, 7, 8, 9, 7, 8, 9,10, 4, 5, 6, 7, 3, 4, 5, 6
62 intrax9a_ddlr2: db 8, 9,10,11, 9,10,11,12, 2, 3, 4, 5, 1, 2, 3, 4
63 intrax9a_hdu1: db 15, 4, 5, 6,14, 3,15, 4,14, 2,13, 1,13, 1,12, 0
64 intrax9a_hdu2: db 13, 2,14, 3,12, 1,13, 2,12, 0,11,11,11,11,11,11
65 intrax9a_vrl1: db 10,11,12,13, 3, 4, 5, 6,11,12,13,14, 5, 6, 7, 8
66 intrax9a_vrl2: db 2,10,11,12, 1, 3, 4, 5,12,13,14,15, 6, 7, 8, 9
67 intrax9a_vh1: db 6, 7, 8, 9, 6, 7, 8, 9, 4, 4, 4, 4, 3, 3, 3, 3
68 intrax9a_vh2: db 6, 7, 8, 9, 6, 7, 8, 9, 2, 2, 2, 2, 1, 1, 1, 1
69 intrax9a_dc: db 1, 2, 3, 4, 6, 7, 8, 9,-1,-1,-1,-1,-1,-1,-1,-1
70 intrax9a_lut: db 0x60,0x68,0x80,0x00,0x08,0x20,0x40,0x28,0x48,0,0,0,0,0,0,0
71 pw_s01234567: dw 0x8000,0x8001,0x8002,0x8003,0x8004,0x8005,0x8006,0x8007
72 pw_s01234657: dw 0x8000,0x8001,0x8002,0x8003,0x8004,0x8006,0x8005,0x8007
73 intrax9_edge: db 0, 0, 1, 2, 3, 7, 8, 9,10,11,12,13,14,15,15,15
75 intrax9b_ddlr1: db 6, 7, 8, 9, 4, 5, 6, 7, 7, 8, 9,10, 3, 4, 5, 6
76 intrax9b_ddlr2: db 8, 9,10,11, 2, 3, 4, 5, 9,10,11,12, 1, 2, 3, 4
77 intrax9b_hdu1: db 15, 4, 5, 6,14, 2,13, 1,14, 3,15, 4,13, 1,12, 0
78 intrax9b_hdu2: db 13, 2,14, 3,12, 0,11,11,12, 1,13, 2,11,11,11,11
79 intrax9b_vrl1: db 10,11,12,13,11,12,13,14, 3, 4, 5, 6, 5, 6, 7, 8
80 intrax9b_vrl2: db 2,10,11,12,12,13,14,15, 1, 3, 4, 5, 6, 7, 8, 9
81 intrax9b_vh1: db 6, 7, 8, 9, 4, 4, 4, 4, 6, 7, 8, 9, 3, 3, 3, 3
82 intrax9b_vh2: db 6, 7, 8, 9, 2, 2, 2, 2, 6, 7, 8, 9, 1, 1, 1, 1
83 intrax9b_edge2: db 6, 7, 8, 9, 6, 7, 8, 9, 4, 3, 2, 1, 4, 3, 2, 1
84 intrax9b_v1: db 0, 1,-1,-1,-1,-1,-1,-1, 4, 5,-1,-1,-1,-1,-1,-1
85 intrax9b_v2: db 2, 3,-1,-1,-1,-1,-1,-1, 6, 7,-1,-1,-1,-1,-1,-1
86 intrax9b_lut: db 0x60,0x64,0x80,0x00,0x04,0x20,0x40,0x24,0x44,0,0,0,0,0,0,0
88 intra8x9_h1: db 7, 7, 7, 7, 7, 7, 7, 7, 5, 5, 5, 5, 5, 5, 5, 5
89 intra8x9_h2: db 6, 6, 6, 6, 6, 6, 6, 6, 4, 4, 4, 4, 4, 4, 4, 4
90 intra8x9_h3: db 3, 3, 3, 3, 3, 3, 3, 3, 1, 1, 1, 1, 1, 1, 1, 1
91 intra8x9_h4: db 2, 2, 2, 2, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0
92 intra8x9_ddl1: db 1, 2, 3, 4, 5, 6, 7, 8, 3, 4, 5, 6, 7, 8, 9,10
93 intra8x9_ddl2: db 2, 3, 4, 5, 6, 7, 8, 9, 4, 5, 6, 7, 8, 9,10,11
94 intra8x9_ddl3: db 5, 6, 7, 8, 9,10,11,12, 7, 8, 9,10,11,12,13,14
95 intra8x9_ddl4: db 6, 7, 8, 9,10,11,12,13, 8, 9,10,11,12,13,14,15
96 intra8x9_vl1: db 0, 1, 2, 3, 4, 5, 6, 7, 1, 2, 3, 4, 5, 6, 7, 8
97 intra8x9_vl2: db 1, 2, 3, 4, 5, 6, 7, 8, 2, 3, 4, 5, 6, 7, 8, 9
98 intra8x9_vl3: db 2, 3, 4, 5, 6, 7, 8, 9, 3, 4, 5, 6, 7, 8, 9,10
99 intra8x9_vl4: db 3, 4, 5, 6, 7, 8, 9,10, 4, 5, 6, 7, 8, 9,10,11
100 intra8x9_ddr1: db 8, 9,10,11,12,13,14,15, 6, 7, 8, 9,10,11,12,13
101 intra8x9_ddr2: db 7, 8, 9,10,11,12,13,14, 5, 6, 7, 8, 9,10,11,12
102 intra8x9_ddr3: db 4, 5, 6, 7, 8, 9,10,11, 2, 3, 4, 5, 6, 7, 8, 9
103 intra8x9_ddr4: db 3, 4, 5, 6, 7, 8, 9,10, 1, 2, 3, 4, 5, 6, 7, 8
104 intra8x9_vr1: db 8, 9,10,11,12,13,14,15, 7, 8, 9,10,11,12,13,14
105 intra8x9_vr2: db 8, 9,10,11,12,13,14,15, 6, 8, 9,10,11,12,13,14
106 intra8x9_vr3: db 5, 7, 8, 9,10,11,12,13, 3, 5, 7, 8, 9,10,11,12
107 intra8x9_vr4: db 4, 6, 8, 9,10,11,12,13, 2, 4, 6, 8, 9,10,11,12
108 intra8x9_hd1: db 3, 8, 9,10,11,12,13,14, 1, 6, 2, 7, 3, 8, 9,10
109 intra8x9_hd2: db 2, 7, 3, 8, 9,10,11,12, 0, 5, 1, 6, 2, 7, 3, 8
110 intra8x9_hd3: db 7, 8, 9,10,11,12,13,14, 3, 4, 5, 6, 7, 8, 9,10
111 intra8x9_hd4: db 5, 6, 7, 8, 9,10,11,12, 1, 2, 3, 4, 5, 6, 7, 8
112 intra8x9_hu1: db 13,12,11,10, 9, 8, 7, 6, 9, 8, 7, 6, 5, 4, 3, 2
113 intra8x9_hu2: db 11,10, 9, 8, 7, 6, 5, 4, 7, 6, 5, 4, 3, 2, 1, 0
114 intra8x9_hu3: db 5, 4, 3, 2, 1, 0,15,15, 1, 0,15,15,15,15,15,15
115 intra8x9_hu4: db 3, 2, 1, 0,15,15,15,15,15,15,15,15,15,15,15,15
116 pw_s00112233: dw 0x8000,0x8000,0x8001,0x8001,0x8002,0x8002,0x8003,0x8003
117 pw_s00001111: dw 0x8000,0x8000,0x8000,0x8000,0x8001,0x8001,0x8001,0x8001
119 transd_shuf1: SHUFFLE_MASK_W 0, 8, 2, 10, 4, 12, 6, 14
120 transd_shuf2: SHUFFLE_MASK_W 1, 9, 3, 11, 5, 13, 7, 15
123 sq_0f: dq 0xffffffff, 0
124 pd_f0: times 4 dd 0xffff0000
141 ;=============================================================================
143 ;=============================================================================
145 %ifdef HIGH_BIT_DEPTH
146 ;-----------------------------------------------------------------------------
147 ; int pixel_ssd_MxN( uint16_t *, int, uint16_t *, int )
148 ;-----------------------------------------------------------------------------
150 cglobal pixel_ssd_%1x%2, 4,5,6
161 %define offset mmsize
164 lea r0, [r0+r1*2*num_rows]
166 psubw m3, [r2+offset]
167 lea r2, [r2+r3*2*num_rows]
180 cglobal pixel_ssd_%1x%2, 4,5
181 mov r4, %1*%2/mmsize/2
188 mova m5, [r0+mmsize*2]
189 mova m6, [r2+mmsize*2]
190 mova m7, [r0+mmsize*3]
193 mova m2, [r2+mmsize*3]
228 %endif ; HIGH_BIT_DEPTH
230 %ifndef HIGH_BIT_DEPTH
231 %macro SSD_LOAD_FULL 5
275 DEINTB %2, %1, %4, %3, 7
290 %macro SSD_LOAD_HALF 5
291 LOAD 1, 2, [t0+%1], [t0+%3], 1
292 JOIN 1, 2, 3, 4, [t2+%2], [t2+%4], 1
293 LOAD 3, 4, [t0+%1], [t0+%3], %5
294 JOIN 3, 4, 5, 6, [t2+%2], [t2+%4], %5
307 punpcklbw m%2, m%1, m%5
309 punpcklbw m%4, m%3, m%5
318 %macro SSD_CORE_SSE2 7-8
320 DEINTB %6, %1, %7, %2, %5
324 DEINTB %6, %3, %7, %4, %5
335 %macro SSD_CORE_SSSE3 7-8
337 punpckhbw m%6, m%1, m%2
338 punpckhbw m%7, m%3, m%4
355 SSD_LOAD_%1 %2,%3,%4,%5,%6
356 SSD_CORE 1, 2, 3, 4, 7, 5, 6, %1
363 ;-----------------------------------------------------------------------------
364 ; int pixel_ssd_16x16( uint8_t *, int, uint8_t *, int )
365 ;-----------------------------------------------------------------------------
368 %assign function_align 8
370 %assign function_align 16
372 cglobal pixel_ssd_%1x%2, 0,0,0
373 mov al, %1*%2/mmsize/2
376 jmp mangle(x264_pixel_ssd_%1x%1 %+ SUFFIX %+ .startloop)
381 DECLARE_REG_TMP 0,1,2,3
385 DECLARE_REG_TMP 1,2,3,4
394 %elifidn cpuname, sse2
404 SSD_ITER FULL, 0, 0, mmsize, mmsize, 1
406 SSD_ITER FULL, 0, 0, t1, t3, 2
408 SSD_ITER HALF, 0, 0, t1, t3, 2
434 %define SSD_CORE SSD_CORE_SSE2
435 %define JOIN JOIN_SSE2
442 %define SSD_CORE SSD_CORE_SSSE3
443 %define JOIN JOIN_SSSE3
465 %assign function_align 16
466 %endif ; !HIGH_BIT_DEPTH
468 ;-----------------------------------------------------------------------------
469 ; void pixel_ssd_nv12_core( uint16_t *pixuv1, int stride1, uint16_t *pixuv2, int stride2,
470 ; int width, int height, uint64_t *ssd_u, uint64_t *ssd_v )
472 ; The maximum width this function can handle without risk of overflow is given
473 ; in the following equation: (mmsize in bits)
475 ; 2 * mmsize/32 * (2^32 - 1) / (2^BIT_DEPTH - 1)^2
477 ; For 10-bit MMX this means width >= 16416 and for XMM >= 32832. At sane
478 ; distortion levels it will take much more than that though.
479 ;-----------------------------------------------------------------------------
480 %ifdef HIGH_BIT_DEPTH
482 cglobal pixel_ssd_nv12_core, 6,7,7
498 mova m1, [r0+r6+mmsize]
500 psubw m1, [r2+r6+mmsize]
501 PSHUFLW m0, m0, q3120
502 PSHUFLW m1, m1, q3120
504 pshufhw m0, m0, q3120
505 pshufhw m1, m1, q3120
513 %if mmsize==16 ; using HADDD would remove the mmsize/32 part from the
514 ; equation above, putting the width limit at 8208
523 %else ; unfortunately paddq is sse2
524 ; emulate 48 bit precision for mmx2 instead
545 %else ; fixup for mmx2
546 SBUTTERFLY dq, 4, 5, 0
551 SBUTTERFLY dq, 0, 5, 4
559 %endif ; HIGH_BIT_DEPTH
561 %ifndef HIGH_BIT_DEPTH
562 ;-----------------------------------------------------------------------------
563 ; void pixel_ssd_nv12_core( uint8_t *pixuv1, int stride1, uint8_t *pixuv2, int stride2,
564 ; int width, int height, uint64_t *ssd_u, uint64_t *ssd_v )
566 ; This implementation can potentially overflow on image widths >= 11008 (or
567 ; 6604 if interlaced), since it is called on blocks of height up to 12 (resp
568 ; 20). At sane distortion levels it will take much more than that though.
569 ;-----------------------------------------------------------------------------
571 cglobal pixel_ssd_nv12_core, 6,7
610 %endif ; !HIGH_BIT_DEPTH
619 ;=============================================================================
621 ;=============================================================================
625 pxor m6, m6 ; sum squared
626 %ifndef HIGH_BIT_DEPTH
632 %endif ; !HIGH_BIT_DEPTH
636 %ifdef HIGH_BIT_DEPTH
637 %if mmsize == 8 && %1*%2 == 256
642 %else ; !HIGH_BIT_DEPTH
644 %endif ; HIGH_BIT_DEPTH
673 %ifdef HIGH_BIT_DEPTH
677 mova m4, [r0+%1+mmsize]
678 %else ; !HIGH_BIT_DEPTH
684 %endif ; HIGH_BIT_DEPTH
690 %ifndef HIGH_BIT_DEPTH
693 %endif ; !HIGH_BIT_DEPTH
699 ;-----------------------------------------------------------------------------
700 ; int pixel_var_wxh( uint8_t *, int )
701 ;-----------------------------------------------------------------------------
703 cglobal pixel_var_16x16, 2,3
706 VAR_2ROW 8*SIZEOF_PIXEL, 16
709 cglobal pixel_var_8x16, 2,3
715 cglobal pixel_var_8x8, 2,3
721 %ifdef HIGH_BIT_DEPTH
723 cglobal pixel_var_16x16, 2,3,8
729 cglobal pixel_var_8x8, 2,3,8
752 %endif ; HIGH_BIT_DEPTH
754 %ifndef HIGH_BIT_DEPTH
756 cglobal pixel_var_16x16, 2,3,8
769 cglobal pixel_var_8x8, 2,4,8
785 cglobal pixel_var_8x16, 2,4,8
808 %endif ; !HIGH_BIT_DEPTH
818 sub eax, r1d ; sqr - (sum * sum >> shift)
822 ;-----------------------------------------------------------------------------
823 ; int pixel_var2_8x8( pixel *, int, pixel *, int, int * )
824 ;-----------------------------------------------------------------------------
825 %macro VAR2_8x8_MMX 2
826 cglobal pixel_var2_8x%1, 5,6
831 %ifdef HIGH_BIT_DEPTH
835 psubw m1, [r2+mmsize]
836 %else ; !HIGH_BIT_DEPTH
847 %endif ; HIGH_BIT_DEPTH
867 %macro VAR2_8x8_SSE2 2
868 cglobal pixel_var2_8x%1, 5,6,8
872 %ifdef HIGH_BIT_DEPTH
877 %else ; !HIGH_BIT_DEPTH
883 %endif ; HIGH_BIT_DEPTH
892 lea r0, [r0+r1*2*SIZEOF_PIXEL]
893 lea r2, [r2+r3*2*SIZEOF_PIXEL]
903 %ifndef HIGH_BIT_DEPTH
904 %macro VAR2_8x8_SSSE3 2
905 cglobal pixel_var2_8x%1, 5,6,8
907 pxor m6, m6 ; sum squared
955 %endif ; !HIGH_BIT_DEPTH
957 ;=============================================================================
959 ;=============================================================================
963 ; just use shufps on anything post conroe
966 ; join 2x 32 bit and duplicate them
967 ; emulating shufps is faster on conroe
971 ; doesn't need to dup. sse2 does things by zero extending to words and full h_2d
983 %macro DIFF_UNPACK_SSE2 5
992 %macro DIFF_SUMSUB_SSSE3 5
993 HSUMSUB %1, %2, %3, %4, %5
998 %macro LOAD_DUP_2x4P 4 ; dst, tmp, 2* pointer
1004 %macro LOAD_DUP_4x8P_CONROE 8 ; 4*dst, 4*pointer
1011 %macro LOAD_DUP_4x8P_PENRYN 8
1012 ; penryn and nehalem run punpcklqdq and movddup in different units
1021 %macro LOAD_SUMSUB_8x2P 9
1022 LOAD_DUP_4x8P %1, %2, %3, %4, %6, %7, %8, %9
1023 DIFF_SUMSUB_SSSE3 %1, %3, %2, %4, %5
1026 %macro LOAD_SUMSUB_8x4P_SSSE3 7-10 r0, r2, 0
1027 ; 4x dest, 2x tmp, 1x mul, [2* ptr], [increment?]
1028 LOAD_SUMSUB_8x2P %1, %2, %5, %6, %7, [%8], [%9], [%8+r1], [%9+r3]
1029 LOAD_SUMSUB_8x2P %3, %4, %5, %6, %7, [%8+2*r1], [%9+2*r3], [%8+r4], [%9+r5]
1036 %macro LOAD_SUMSUB_16P_SSSE3 7 ; 2*dst, 2*tmp, mul, 2*ptr
1042 DIFF_SUMSUB_SSSE3 %1, %3, %2, %4, %5
1045 %macro LOAD_SUMSUB_16P_SSE2 7 ; 2*dst, 2*tmp, mask, 2*ptr
1048 DEINTB %1, %2, %3, %4, %5
1051 SUMSUB_BA w, %1, %2, %3
1054 %macro LOAD_SUMSUB_16x4P 10-13 r0, r2, none
1055 ; 8x dest, 1x tmp, 1x mul, [2* ptr] [2nd tmp]
1056 LOAD_SUMSUB_16P %1, %5, %2, %3, %10, %11, %12
1057 LOAD_SUMSUB_16P %2, %6, %3, %4, %10, %11+r1, %12+r3
1058 LOAD_SUMSUB_16P %3, %7, %4, %9, %10, %11+2*r1, %12+2*r3
1059 LOAD_SUMSUB_16P %4, %8, %13, %9, %10, %11+r4, %12+r5
1062 ; in: r4=3*stride1, r5=3*stride2
1063 ; in: %2 = horizontal offset
1064 ; in: %3 = whether we need to increment pix1 and pix2
1067 %macro SATD_4x4_MMX 3
1069 %assign offset %2*SIZEOF_PIXEL
1070 LOAD_DIFF m4, m3, none, [r0+ offset], [r2+ offset]
1071 LOAD_DIFF m5, m3, none, [r0+ r1+offset], [r2+ r3+offset]
1072 LOAD_DIFF m6, m3, none, [r0+2*r1+offset], [r2+2*r3+offset]
1073 LOAD_DIFF m7, m3, none, [r0+ r4+offset], [r2+ r5+offset]
1078 HADAMARD4_2D 4, 5, 6, 7, 3, %%n
1083 %macro SATD_8x4_SSE 8-9
1085 HADAMARD4_2D_SSE %2, %3, %4, %5, %6, amax
1087 HADAMARD4_V %2, %3, %4, %5, %6
1088 ; doing the abs first is a slight advantage
1089 ABSW2 m%2, m%4, m%2, m%4, m%6, m%7
1090 ABSW2 m%3, m%5, m%3, m%5, m%6, m%7
1091 HADAMARD 1, max, %2, %4, %6, %7
1101 HADAMARD 1, max, %3, %5, %6, %7
1106 %macro SATD_START_MMX 0
1108 lea r4, [3*r1] ; 3*stride1
1109 lea r5, [3*r3] ; 3*stride2
1112 %macro SATD_END_MMX 0
1113 %ifdef HIGH_BIT_DEPTH
1116 %else ; !HIGH_BIT_DEPTH
1117 pshufw m1, m0, q1032
1119 pshufw m1, m0, q2301
1123 %endif ; HIGH_BIT_DEPTH
1127 ; FIXME avoid the spilling of regs to hold 3*stride.
1128 ; for small blocks on x86_32, modify pixel pointer instead.
1130 ;-----------------------------------------------------------------------------
1131 ; int pixel_satd_16x16( uint8_t *, int, uint8_t *, int )
1132 ;-----------------------------------------------------------------------------
1134 cglobal pixel_satd_16x4_internal
1135 SATD_4x4_MMX m2, 0, 0
1136 SATD_4x4_MMX m1, 4, 0
1138 SATD_4x4_MMX m2, 8, 0
1140 SATD_4x4_MMX m1, 12, 0
1145 cglobal pixel_satd_8x8_internal
1146 SATD_4x4_MMX m2, 0, 0
1147 SATD_4x4_MMX m1, 4, 1
1150 pixel_satd_8x4_internal_mmx2:
1151 SATD_4x4_MMX m2, 0, 0
1152 SATD_4x4_MMX m1, 4, 0
1157 %ifdef HIGH_BIT_DEPTH
1158 %macro SATD_MxN_MMX 3
1159 cglobal pixel_satd_%1x%2, 4,7
1162 call pixel_satd_%1x%3_internal_mmx2
1169 call pixel_satd_%1x%3_internal_mmx2
1180 SATD_MxN_MMX 16, 16, 4
1181 SATD_MxN_MMX 16, 8, 4
1182 SATD_MxN_MMX 8, 16, 8
1183 %endif ; HIGH_BIT_DEPTH
1185 %ifndef HIGH_BIT_DEPTH
1186 cglobal pixel_satd_16x16, 4,6
1190 call pixel_satd_16x4_internal_mmx2
1194 call pixel_satd_16x4_internal_mmx2
1199 cglobal pixel_satd_16x8, 4,6
1202 call pixel_satd_16x4_internal_mmx2
1205 call pixel_satd_16x4_internal_mmx2
1208 cglobal pixel_satd_8x16, 4,6
1211 call pixel_satd_8x8_internal_mmx2
1214 call pixel_satd_8x8_internal_mmx2
1216 %endif ; !HIGH_BIT_DEPTH
1218 cglobal pixel_satd_8x8, 4,6
1221 call pixel_satd_8x8_internal_mmx2
1224 cglobal pixel_satd_8x4, 4,6
1227 call pixel_satd_8x4_internal_mmx2
1230 cglobal pixel_satd_4x16, 4,6
1232 SATD_4x4_MMX m0, 0, 1
1233 SATD_4x4_MMX m1, 0, 1
1235 SATD_4x4_MMX m1, 0, 1
1237 SATD_4x4_MMX m1, 0, 0
1241 cglobal pixel_satd_4x8, 4,6
1243 SATD_4x4_MMX m0, 0, 1
1244 SATD_4x4_MMX m1, 0, 0
1248 cglobal pixel_satd_4x4, 4,6
1250 SATD_4x4_MMX m0, 0, 0
1253 %macro SATD_START_SSE2 2
1262 %macro SATD_END_SSE2 1
1268 %macro BACKUP_POINTERS 0
1278 %macro RESTORE_AND_INC_POINTERS 0
1293 %macro SATD_4x8_SSE 2
1310 %if cpuflag(ssse3) && %1==1
1312 DIFFOP 0, 4, 1, 5, 3
1314 DIFFOP 0, 4, 1, 5, 7
1328 %if cpuflag(ssse3) && %1==1
1330 DIFFOP 2, 6, 3, 5, 4
1332 DIFFOP 2, 6, 3, 5, 7
1334 SATD_8x4_SSE cpuname, 0, 1, 2, 3, 4, 5, 7, %2
1337 ;-----------------------------------------------------------------------------
1338 ; int pixel_satd_8x4( uint8_t *, int, uint8_t *, int )
1339 ;-----------------------------------------------------------------------------
1342 cglobal pixel_satd_4x4, 4, 6, 6
1345 LOAD_DUP_2x4P m2, m5, [r2], [r2+r3]
1346 LOAD_DUP_2x4P m3, m5, [r2+2*r3], [r2+r5]
1347 LOAD_DUP_2x4P m0, m5, [r0], [r0+r1]
1348 LOAD_DUP_2x4P m1, m5, [r0+2*r1], [r0+r4]
1349 DIFF_SUMSUB_SSSE3 0, 2, 1, 3, 4
1350 HADAMARD 0, sumsub, 0, 1, 2, 3
1351 HADAMARD 4, sumsub, 0, 1, 2, 3
1352 HADAMARD 1, amax, 0, 1, 2, 3
1358 cglobal pixel_satd_4x8, 4, 6, 8
1363 SATD_4x8_SSE 0, swap
1368 cglobal pixel_satd_4x16, 4, 6, 8
1373 SATD_4x8_SSE 0, swap
1381 cglobal pixel_satd_8x8_internal
1382 LOAD_SUMSUB_8x4P 0, 1, 2, 3, 4, 5, 7, r0, r2, 1
1383 SATD_8x4_SSE cpuname, 0, 1, 2, 3, 4, 5, 6
1384 %%pixel_satd_8x4_internal:
1385 LOAD_SUMSUB_8x4P 0, 1, 2, 3, 4, 5, 7, r0, r2, 1
1386 SATD_8x4_SSE cpuname, 0, 1, 2, 3, 4, 5, 6
1389 %ifdef UNIX64 ; 16x8 regresses on phenom win64, 16x16 is almost the same
1390 cglobal pixel_satd_16x4_internal
1391 LOAD_SUMSUB_16x4P 0, 1, 2, 3, 4, 8, 5, 9, 6, 7, r0, r2, 11
1394 ; FIXME: this doesn't really mean ssse3, but rather selects between two different behaviors implemented with sse2?
1395 SATD_8x4_SSE ssse3, 0, 1, 2, 3, 6, 11, 10
1396 SATD_8x4_SSE ssse3, 4, 8, 5, 9, 6, 3, 10
1399 cglobal pixel_satd_16x8, 4,6,12
1400 SATD_START_SSE2 m10, m7
1401 %if notcpuflag(ssse3)
1404 jmp %%pixel_satd_16x8_internal
1406 cglobal pixel_satd_16x16, 4,6,12
1407 SATD_START_SSE2 m10, m7
1408 %if notcpuflag(ssse3)
1411 call pixel_satd_16x4_internal
1412 call pixel_satd_16x4_internal
1413 %%pixel_satd_16x8_internal:
1414 call pixel_satd_16x4_internal
1415 call pixel_satd_16x4_internal
1418 cglobal pixel_satd_16x8, 4,6,8
1419 SATD_START_SSE2 m6, m7
1421 call pixel_satd_8x8_internal
1422 RESTORE_AND_INC_POINTERS
1423 call pixel_satd_8x8_internal
1426 cglobal pixel_satd_16x16, 4,6,8
1427 SATD_START_SSE2 m6, m7
1429 call pixel_satd_8x8_internal
1430 call pixel_satd_8x8_internal
1431 RESTORE_AND_INC_POINTERS
1432 call pixel_satd_8x8_internal
1433 call pixel_satd_8x8_internal
1437 cglobal pixel_satd_8x16, 4,6,8
1438 SATD_START_SSE2 m6, m7
1439 call pixel_satd_8x8_internal
1440 call pixel_satd_8x8_internal
1443 cglobal pixel_satd_8x8, 4,6,8
1444 SATD_START_SSE2 m6, m7
1445 call pixel_satd_8x8_internal
1448 cglobal pixel_satd_8x4, 4,6,8
1449 SATD_START_SSE2 m6, m7
1450 call %%pixel_satd_8x4_internal
1452 %endmacro ; SATDS_SSE2
1462 %ifdef HIGH_BIT_DEPTH
1467 %endif ; HIGH_BIT_DEPTH
1471 %ifdef HIGH_BIT_DEPTH
1473 %else ; sse2 doesn't seem to like the horizontal way of doing things
1474 %define vertical (cpuflags == cpuflags_sse2)
1478 ;-----------------------------------------------------------------------------
1479 ; int pixel_sa8d_8x8( uint8_t *, int, uint8_t *, int )
1480 ;-----------------------------------------------------------------------------
1481 cglobal pixel_sa8d_8x8_internal
1484 LOAD_SUMSUB_8x4P 0, 1, 2, 8, 5, 6, 7, r0, r2
1485 LOAD_SUMSUB_8x4P 4, 5, 3, 9, 11, 6, 7, r6, r7
1487 HADAMARD8_2D 0, 1, 2, 8, 4, 5, 3, 9, 6, amax
1489 HADAMARD8_2D_HMUL 0, 1, 2, 8, 4, 5, 3, 9, 6, 11
1497 cglobal pixel_sa8d_8x8, 4,8,12
1504 call pixel_sa8d_8x8_internal
1505 %ifdef HIGH_BIT_DEPTH
1509 %endif ; HIGH_BIT_DEPTH
1515 cglobal pixel_sa8d_16x16, 4,8,12
1522 call pixel_sa8d_8x8_internal ; pix[0]
1523 add r2, 8*SIZEOF_PIXEL
1524 add r0, 8*SIZEOF_PIXEL
1525 %ifdef HIGH_BIT_DEPTH
1529 call pixel_sa8d_8x8_internal ; pix[8]
1533 call pixel_sa8d_8x8_internal ; pix[8*stride+8]
1534 sub r2, 8*SIZEOF_PIXEL
1535 sub r0, 8*SIZEOF_PIXEL
1537 call pixel_sa8d_8x8_internal ; pix[8*stride]
1540 %ifndef HIGH_BIT_DEPTH
1550 cglobal pixel_sa8d_8x8_internal
1551 %define spill0 [esp+4]
1552 %define spill1 [esp+20]
1553 %define spill2 [esp+36]
1555 LOAD_DIFF_8x4P 0, 1, 2, 3, 4, 5, 6, r0, r2, 1
1556 HADAMARD4_2D 0, 1, 2, 3, 4
1558 LOAD_DIFF_8x4P 4, 5, 6, 7, 3, 3, 2, r0, r2, 1
1559 HADAMARD4_2D 4, 5, 6, 7, 3
1560 HADAMARD2_2D 0, 4, 1, 5, 3, qdq, amax
1563 HADAMARD2_2D 2, 6, 3, 7, 5, qdq, amax
1566 LOAD_SUMSUB_8x4P 0, 1, 2, 3, 5, 6, 7, r0, r2, 1
1567 ; could do first HADAMARD4_V here to save spilling later
1568 ; surprisingly, not a win on conroe or even p4
1573 LOAD_SUMSUB_8x4P 4, 5, 6, 7, 2, 3, 1, r0, r2, 1
1574 HADAMARD4_V 4, 5, 6, 7, 3
1580 HADAMARD4_V 0, 1, 2, 3, 7
1581 SUMSUB_BADC w, 0, 4, 1, 5, 7
1582 HADAMARD 2, sumsub, 0, 4, 7, 6
1583 HADAMARD 2, sumsub, 1, 5, 7, 6
1584 HADAMARD 1, amax, 0, 4, 7, 6
1585 HADAMARD 1, amax, 1, 5, 7, 6
1589 SUMSUB_BADC w, 2, 6, 3, 7, 4
1590 HADAMARD 2, sumsub, 2, 6, 4, 5
1591 HADAMARD 2, sumsub, 3, 7, 4, 5
1592 HADAMARD 1, amax, 2, 6, 4, 5
1593 HADAMARD 1, amax, 3, 7, 4, 5
1594 %endif ; sse2/non-sse2
1599 %endif ; ifndef mmx2
1601 cglobal pixel_sa8d_8x8, 4,7
1608 call pixel_sa8d_8x8_internal
1609 %ifdef HIGH_BIT_DEPTH
1613 %endif ; HIGH_BIT_DEPTH
1620 cglobal pixel_sa8d_16x16, 4,7
1627 call pixel_sa8d_8x8_internal
1632 %ifdef HIGH_BIT_DEPTH
1636 call pixel_sa8d_8x8_internal
1639 add r0, 8*SIZEOF_PIXEL
1640 add r2, 8*SIZEOF_PIXEL
1643 call pixel_sa8d_8x8_internal
1650 mova [esp+64-mmsize], m0
1651 call pixel_sa8d_8x8_internal
1652 %ifdef HIGH_BIT_DEPTH
1654 %else ; !HIGH_BIT_DEPTH
1655 paddusw m0, [esp+64-mmsize]
1672 %endif ; HIGH_BIT_DEPTH
1678 %endif ; !ARCH_X86_64
1681 ;=============================================================================
1683 ;=============================================================================
1694 ; intra_sa8d_x3_8x8 and intra_satd_x3_4x4 are obsoleted by x9 on ssse3+,
1695 ; and are only retained for old cpus.
1696 %macro INTRA_SA8D_SSE2 0
1698 ;-----------------------------------------------------------------------------
1699 ; void intra_sa8d_x3_8x8( uint8_t *fenc, uint8_t edge[36], int *res )
1700 ;-----------------------------------------------------------------------------
1701 cglobal intra_sa8d_x3_8x8, 3,3,14
1704 movq m0, [r0+0*FENC_STRIDE]
1705 movq m1, [r0+1*FENC_STRIDE]
1706 movq m2, [r0+2*FENC_STRIDE]
1707 movq m3, [r0+3*FENC_STRIDE]
1708 movq m4, [r0+4*FENC_STRIDE]
1709 movq m5, [r0+5*FENC_STRIDE]
1710 movq m6, [r0+6*FENC_STRIDE]
1711 movq m7, [r0+7*FENC_STRIDE]
1721 HADAMARD8_2D 0, 1, 2, 3, 4, 5, 6, 7, 8
1723 ABSW2 m8, m9, m2, m3, m2, m3
1724 ABSW2 m10, m11, m4, m5, m4, m5
1727 ABSW2 m10, m11, m6, m7, m6, m7
1734 ; 1D hadamard of edges
1740 HSUMSUB2 pmullw, m8, m9, m10, m11, m11, q1032, [pw_ppppmmmm]
1741 HSUMSUB2 pmullw, m8, m9, m10, m11, m11, q2301, [pw_ppmmppmm]
1742 pshuflw m10, m8, q2301
1743 pshuflw m11, m9, q2301
1744 pshufhw m10, m10, q2301
1745 pshufhw m11, m11, q2301
1746 pmullw m8, [pw_pmpmpmpm]
1747 pmullw m11, [pw_pmpmpmpm]
1757 psllw m8, 3 ; left edge
1760 ABSW2 m8, m10, m8, m10, m11, m12 ; 1x8 sum
1769 punpcklqdq m0, m4 ; transpose
1770 psllw m9, 3 ; top edge
1771 psrldq m2, m13, 2 ; 8x7 sum
1772 psubw m0, m9 ; 8x1 sum
1781 punpckhdq m3, m2, m8
1783 pshufd m5, m13, q3311
1786 punpckhqdq m0, m2, m5
1791 movq [r2], m0 ; i8x8_v, i8x8_h
1793 movd [r2+8], m0 ; i8x8_dc
1795 %endif ; ARCH_X86_64
1796 %endmacro ; INTRA_SA8D_SSE2
1799 ; out: m0..m3 = hadamard coefs
1801 cglobal hadamard_load
1802 ; not really a global, but otherwise cycles get attributed to the wrong function in profiling
1803 %ifdef HIGH_BIT_DEPTH
1804 mova m0, [r0+0*FENC_STRIDEB]
1805 mova m1, [r0+1*FENC_STRIDEB]
1806 mova m2, [r0+2*FENC_STRIDEB]
1807 mova m3, [r0+3*FENC_STRIDEB]
1810 movd m0, [r0+0*FENC_STRIDE]
1811 movd m1, [r0+1*FENC_STRIDE]
1812 movd m2, [r0+2*FENC_STRIDE]
1813 movd m3, [r0+3*FENC_STRIDE]
1819 HADAMARD4_2D 0, 1, 2, 3, 4
1823 %macro SCALAR_HADAMARD 4-5 ; direction, offset, 3x tmp
1825 %ifdef HIGH_BIT_DEPTH
1826 mova %3, [r1+%2*SIZEOF_PIXEL-FDEC_STRIDEB]
1828 movd %3, [r1+%2*SIZEOF_PIXEL-FDEC_STRIDEB]
1834 shl %2d, 5 ; log(FDEC_STRIDEB)
1836 movd %3, [r1+%2*SIZEOF_PIXEL-4+1*FDEC_STRIDEB]
1837 pinsrw %3, [r1+%2*SIZEOF_PIXEL-2+0*FDEC_STRIDEB], 0
1838 pinsrw %3, [r1+%2*SIZEOF_PIXEL-2+2*FDEC_STRIDEB], 2
1839 pinsrw %3, [r1+%2*SIZEOF_PIXEL-2+3*FDEC_STRIDEB], 3
1840 %ifndef HIGH_BIT_DEPTH
1848 %define %%sign psignw
1850 %define %%sign pmullw
1852 pshufw %4, %3, q1032
1853 %%sign %4, [pw_ppmmppmm]
1855 pshufw %4, %3, q2301
1856 %%sign %4, [pw_pmpmpmpm]
1859 mova [%1_1d+2*%2], %3
1862 %macro SUM_MM_X3 8 ; 3x sum, 4x tmp, op
1864 pshufw %4, %1, q1032
1865 pshufw %5, %2, q1032
1866 pshufw %6, %3, q1032
1873 pshufw %4, %1, q1032
1874 pshufw %5, %2, q1032
1875 pshufw %6, %3, q1032
1885 ABSW2 m4, m5, m1, m2, m1, m2
1892 ; out: m0 v, m4 h, m5 dc
1894 %macro SUM4x3 3 ; dc, left, top
1905 punpckldq m0, m2 ; transpose
1907 ABSW2 m4, m5, m4, m5, m2, m3 ; 1x4 sum
1908 ABSW m0, m0, m1 ; 4x1 sum
1911 %macro INTRA_X3_MMX 0
1912 ;-----------------------------------------------------------------------------
1913 ; void intra_satd_x3_4x4( uint8_t *fenc, uint8_t *fdec, int *res )
1914 ;-----------------------------------------------------------------------------
1915 cglobal intra_satd_x3_4x4, 3,3
1917 ; stack is 16 byte aligned because abi says so
1918 %define top_1d rsp-8 ; size 8
1919 %define left_1d rsp-16 ; size 8
1921 ; stack is 16 byte aligned at least in gcc, and we've pushed 3 regs + return address, so it's still aligned
1923 %define top_1d esp+8
1928 SCALAR_HADAMARD left, 0, m4, m5
1929 SCALAR_HADAMARD top, 0, m6, m5, m7
1932 pand m6, [sw_f0] ; dc
1935 SUM4x3 m6, [left_1d], [top_1d]
1939 psrlq m1, 16 ; 4x3 sum
1942 SUM_MM_X3 m0, m4, m5, m1, m2, m3, m6, pavgw
1943 movd [r2+0], m0 ; i4x4_v satd
1944 movd [r2+4], m4 ; i4x4_h satd
1945 movd [r2+8], m5 ; i4x4_dc satd
1951 ;-----------------------------------------------------------------------------
1952 ; void intra_satd_x3_16x16( uint8_t *fenc, uint8_t *fdec, int *res )
1953 ;-----------------------------------------------------------------------------
1954 cglobal intra_satd_x3_16x16, 0,5
1955 %assign stack_pad 120 + ((stack_offset+120+gprsize)&15)
1956 ; not really needed on x86_64, just shuts up valgrind about storing data below the stack across a function call
1958 %define sums rsp+64 ; size 56
1959 %define top_1d rsp+32 ; size 32
1960 %define left_1d rsp ; size 32
1967 %ifdef HIGH_BIT_DEPTH
1978 SCALAR_HADAMARD left, r3, m0, m1
1979 SCALAR_HADAMARD top, r3, m1, m2, m3
1985 pand m6, [sw_f0] ; dc
1996 SUM4x3 m6, [left_1d+8*(r3+4)], [top_1d+8*(r4+4)]
1999 paddw m0, [sums+ 0] ; i16x16_v satd
2000 paddw m4, [sums+ 8] ; i16x16_h satd
2001 paddw m5, [sums+16] ; i16x16_dc satd
2006 add r0, 4*SIZEOF_PIXEL
2009 %ifdef HIGH_BIT_DEPTH
2018 punpckhwd m3, m5, m7
2028 add r0, 4*FENC_STRIDEB-16*SIZEOF_PIXEL
2034 %ifdef HIGH_BIT_DEPTH
2037 HADDD m5, m7 ; DC satd
2038 HADDD m4, m7 ; H satd
2039 HADDD m0, m7 ; the part of V satd that doesn't overlap with DC
2041 psrlq m1, 32 ; DC[1]
2042 paddd m0, m3 ; DC[2]
2043 psrlq m3, 32 ; DC[3]
2048 SUM_MM_X3 m0, m4, m5, m3, m1, m2, m6, paddd
2055 movd [r2+8], m5 ; i16x16_dc satd
2056 movd [r2+4], m4 ; i16x16_h satd
2057 movd [r2+0], m0 ; i16x16_v satd
2067 ;-----------------------------------------------------------------------------
2068 ; void intra_satd_x3_8x8c( uint8_t *fenc, uint8_t *fdec, int *res )
2069 ;-----------------------------------------------------------------------------
2070 cglobal intra_satd_x3_8x8c, 0,6
2071 ; not really needed on x86_64, just shuts up valgrind about storing data below the stack across a function call
2073 %define sums rsp+48 ; size 24
2074 %define dc_1d rsp+32 ; size 16
2075 %define top_1d rsp+16 ; size 16
2076 %define left_1d rsp ; size 16
2086 SCALAR_HADAMARD left, r3, m0, m1
2087 SCALAR_HADAMARD top, r3, m0, m1, m2
2092 movzx t0d, word [left_1d+0]
2093 movzx r3d, word [top_1d+0]
2094 movzx r4d, word [left_1d+8]
2095 movzx r5d, word [top_1d+8]
2096 lea t0d, [t0 + r3 + 16]
2097 lea r3d, [r4 + r5 + 16]
2106 mov [dc_1d+ 0], t0d ; tl
2107 mov [dc_1d+ 4], r5d ; tr
2108 mov [dc_1d+ 8], r4d ; bl
2109 mov [dc_1d+12], r3d ; br
2122 SUM4x3 [r5+4*(r4+2)], [left_1d+8*(r3+2)], [top_1d+8*(r4+2)]
2125 paddw m0, [sums+16] ; i4x4_v satd
2126 paddw m4, [sums+8] ; i4x4_h satd
2127 paddw m5, [sums+0] ; i4x4_dc satd
2132 add r0, 4*SIZEOF_PIXEL
2135 add r0, 4*FENC_STRIDEB-8*SIZEOF_PIXEL
2145 %ifdef HIGH_BIT_DEPTH
2148 SUM_MM_X3 m0, m1, m2, m3, m4, m5, m6, paddd
2154 SUM_MM_X3 m0, m1, m2, m3, m4, m5, m6, paddd
2157 movd [r2+0], m0 ; i8x8c_dc satd
2158 movd [r2+4], m1 ; i8x8c_h satd
2159 movd [r2+8], m2 ; i8x8c_v satd
2162 %endmacro ; INTRA_X3_MMX
2166 %macro PRED4x4_LOWPASS 5
2183 %macro INTRA_X9_PRED 2
2185 movu m1, [r1-1*FDEC_STRIDE-8]
2186 pinsrb m1, [r1+3*FDEC_STRIDE-1], 0
2187 pinsrb m1, [r1+2*FDEC_STRIDE-1], 1
2188 pinsrb m1, [r1+1*FDEC_STRIDE-1], 2
2189 pinsrb m1, [r1+0*FDEC_STRIDE-1], 3
2191 movd mm0, [r1+3*FDEC_STRIDE-4]
2192 punpcklbw mm0, [r1+2*FDEC_STRIDE-4]
2193 movd mm1, [r1+1*FDEC_STRIDE-4]
2194 punpcklbw mm1, [r1+0*FDEC_STRIDE-4]
2198 movu m1, [r1-1*FDEC_STRIDE-8]
2199 movss m1, m0 ; l3 l2 l1 l0 __ __ __ lt t0 t1 t2 t3 t4 t5 t6 t7
2201 pshufb m1, [intrax9_edge] ; l3 l3 l2 l1 l0 lt t0 t1 t2 t3 t4 t5 t6 t7 t7 __
2202 psrldq m0, m1, 1 ; l3 l2 l1 l0 lt t0 t1 t2 t3 t4 t5 t6 t7 t7 __ __
2203 psrldq m2, m1, 2 ; l2 l1 l0 lt t0 t1 t2 t3 t4 t5 t6 t7 t7 __ __ __
2204 pavgb m5, m0, m1 ; Gl3 Gl2 Gl1 Gl0 Glt Gt0 Gt1 Gt2 Gt3 Gt4 Gt5 __ __ __ __ __
2206 PRED4x4_LOWPASS m0, m1, m2, m0, m4 ; Fl3 Fl2 Fl1 Fl0 Flt Ft0 Ft1 Ft2 Ft3 Ft4 Ft5 Ft6 Ft7 __ __ __
2208 ; Ft1 Ft2 Ft3 Ft4 Flt Ft0 Ft1 Ft2
2209 ; Ft2 Ft3 Ft4 Ft5 Fl0 Flt Ft0 Ft1
2210 ; Ft3 Ft4 Ft5 Ft6 Fl1 Fl0 Flt Ft0
2211 ; Ft4 Ft5 Ft6 Ft7 Fl2 Fl1 Fl0 Flt
2212 pshufb m2, m0, [%1_ddlr1] ; a: ddl row0, ddl row1, ddr row0, ddr row1 / b: ddl row0, ddr row0, ddl row1, ddr row1
2213 pshufb m3, m0, [%1_ddlr2] ; rows 2,3
2215 ; Glt Flt Ft0 Ft1 Gl0 Fl1 Gl1 Fl2
2216 ; Gl0 Fl0 Glt Flt Gl1 Fl2 Gl2 Fl3
2217 ; Gl1 Fl1 Gl0 Fl0 Gl2 Fl3 Gl3 Gl3
2218 ; Gl2 Fl2 Gl1 Fl1 Gl3 Gl3 Gl3 Gl3
2219 pslldq m0, 5 ; ___ ___ ___ ___ ___ Fl3 Fl2 Fl1 Fl0 Flt Ft0 Ft1 Ft2 Ft3 Ft4 Ft5
2220 palignr m7, m5, m0, 5 ; Fl3 Fl2 Fl1 Fl0 Flt Ft0 Ft1 Ft2 Ft3 Ft4 Ft5 Gl3 Gl2 Gl1 Gl0 Glt
2221 pshufb m6, m7, [%1_hdu1]
2222 pshufb m7, m7, [%1_hdu2]
2224 ; Gt0 Gt1 Gt2 Gt3 Gt1 Gt2 Gt3 Gt4
2225 ; Flt Ft0 Ft1 Ft2 Ft1 Ft2 Ft3 Ft4
2226 ; Fl0 Gt0 Gt1 Gt2 Gt2 Gt3 Gt4 Gt5
2227 ; Fl1 Flt Ft0 Ft1 Ft2 Ft3 Ft4 Ft5
2228 psrldq m5, 5 ; Gt0 Gt1 Gt2 Gt3 Gt4 Gt5 ...
2229 palignr m5, m0, 6 ; ___ Fl1 Fl0 Flt Ft0 Ft1 Ft2 Ft3 Ft4 Ft5 Gt0 Gt1 Gt2 Gt3 Gt4 Gt5
2230 pshufb m4, m5, [%1_vrl1]
2231 pshufb m5, m5, [%1_vrl2]
2232 %endmacro ; INTRA_X9_PRED
2234 %macro INTRA_X9_VHDC 5 ; edge, fenc01, fenc23, tmp, tmp
2235 pshufb m2, m%1, [intrax9b_vh1]
2236 pshufb m3, m%1, [intrax9b_vh2]
2237 mova [pred_buf+0x60], m2
2238 mova [pred_buf+0x70], m3
2239 pshufb m%1, [intrax9b_edge2] ; t0 t1 t2 t3 t0 t1 t2 t3 l0 l1 l2 l3 l0 l1 l2 l3
2240 pmaddubsw m%1, [hmul_4p]
2241 pshufhw m0, m%1, q2301
2242 pshuflw m0, m0, q2301
2243 psignw m%1, [pw_pmpmpmpm]
2245 psllw m0, 2 ; hadamard(top), hadamard(left)
2247 pshufb m1, m0, [intrax9b_v1]
2248 pshufb m2, m0, [intrax9b_v2]
2250 psignw m3, [pw_pmmpzzzz] ; FIXME could this be eliminated?
2252 pand m0, [sw_f0] ; dc
2253 ; This (as well as one of the steps in intra_satd_x9_4x4.satd_8x4) could be
2254 ; changed from a wd transpose to a qdq, with appropriate rearrangement of inputs.
2255 ; Which would be faster on conroe, but slower on penryn and sandybridge, and too invasive to ifdef.
2256 HADAMARD 0, sumsub, %2, %3, %4, %5
2257 HADAMARD 1, sumsub, %2, %3, %4, %5
2260 imul r3d, 0x01010101
2261 mov [pred_buf+0x80], r3d
2262 mov [pred_buf+0x88], r3d
2263 mov [pred_buf+0x90], r3d
2264 mov [pred_buf+0x98], r3d
2280 SBUTTERFLY qdq, 3, 0, 2
2291 pmaddwd m1, [pw_1] ; v, _, h, dc
2293 %endmacro ; INTRA_X9_VHDC
2295 %macro INTRA_X9_END 2
2297 phminposuw m0, m0 ; h,dc,ddl,ddr,vr,hd,vl,hu
2304 ; 4x4 sad is up to 12 bits; +bitcosts -> 13 bits; pack with 3 bit index
2306 paddw m0, [pw_s01234567] ; h,dc,ddl,ddr,vr,hd,vl,hu
2308 ; 4x4 satd is up to 13 bits; +bitcosts and saturate -> 13 bits; pack with 3 bit index
2311 paddw m0, [pw_s01234657] ; h,dc,ddl,ddr,vr,vl,hd,hu
2315 pshuflw m1, m0, q0032
2317 pshuflw m1, m0, q0001
2324 ; 1<<16: increment index to match intra4x4_pred_e. couldn't do this before because it had to fit in 3 bits
2325 ; 1<<12: undo sign manipulation
2326 lea eax, [rax+r2+(1<<16)+(1<<12)]
2331 ; output the predicted samples
2336 movzx r2d, byte [r2+r3]
2338 movzx r2d, byte [%2_lut+r3]
2341 movq mm0, [pred_buf+r2]
2342 movq mm1, [pred_buf+r2+16]
2343 movd [r1+0*FDEC_STRIDE], mm0
2344 movd [r1+2*FDEC_STRIDE], mm1
2347 movd [r1+1*FDEC_STRIDE], mm0
2348 movd [r1+3*FDEC_STRIDE], mm1
2352 mov r3d, [pred_buf+r2+8*i]
2353 mov [r1+i*FDEC_STRIDE], r3d
2357 %endmacro ; INTRA_X9_END
2360 ;-----------------------------------------------------------------------------
2361 ; int intra_sad_x9_4x4( uint8_t *fenc, uint8_t *fdec, uint16_t *bitcosts )
2362 ;-----------------------------------------------------------------------------
2364 cglobal intra_sad_x9_4x4, 3,4,9
2365 %assign pad 0xc0-gprsize-(stack_offset&15)
2366 %define pred_buf rsp
2369 INTRA_X9_PRED intrax9a, m8
2371 INTRA_X9_PRED intrax9a, [rsp+0xa0]
2380 movd m0, [r0+0*FENC_STRIDE]
2381 pinsrd m0, [r0+1*FENC_STRIDE], 1
2382 movd m1, [r0+2*FENC_STRIDE]
2383 pinsrd m1, [r0+3*FENC_STRIDE], 1
2385 movd mm0, [r0+0*FENC_STRIDE]
2386 punpckldq mm0, [r0+1*FENC_STRIDE]
2387 movd mm1, [r0+2*FENC_STRIDE]
2388 punpckldq mm1, [r0+3*FENC_STRIDE]
2409 %define %%zero [pb_0]
2411 pshufb m3, m7, [intrax9a_vh1]
2412 pshufb m5, m7, [intrax9a_vh2]
2413 pshufb m7, [intrax9a_dc]
2428 movzx r3d, word [r2]
2431 punpckhqdq m3, m0 ; h, dc
2432 shufps m3, m2, q2020
2438 INTRA_X9_END 1, intrax9a
2444 ;-----------------------------------------------------------------------------
2445 ; int intra_satd_x9_4x4( uint8_t *fenc, uint8_t *fdec, uint16_t *bitcosts )
2446 ;-----------------------------------------------------------------------------
2447 cglobal intra_satd_x9_4x4, 3,4,16
2448 %assign pad 0xb0-gprsize-(stack_offset&15)
2449 %define pred_buf rsp
2451 INTRA_X9_PRED intrax9b, m15
2458 movd m8, [r0+0*FENC_STRIDE]
2459 movd m9, [r0+1*FENC_STRIDE]
2460 movd m10, [r0+2*FENC_STRIDE]
2461 movd m11, [r0+3*FENC_STRIDE]
2472 pshufd m1, m2, q3232
2475 call .satd_8x4 ; ddr, ddl
2477 pshufd m3, m5, q3232
2480 pshufd m1, m4, q3232
2481 call .satd_8x4 ; vr, vl
2483 pshufd m3, m7, q3232
2486 pshufd m1, m6, q3232
2487 call .satd_8x4 ; hd, hu
2491 punpcklqdq m4, m0 ; conroe dislikes punpckldq, and ssse3 INTRA_X9_END can handle arbitrary orders whereas phminposuw can't
2493 mova m1, [pw_ppmmppmm]
2498 INTRA_X9_VHDC 15, 8, 10, 6, 7
2503 %if notcpuflag(sse4)
2504 pshufhw m0, m0, q3120 ; compensate for different order in unpack
2508 movzx r0d, word [r2]
2510 INTRA_X9_END 0, intrax9b
2513 RESET_MM_PERMUTATION
2524 SATD_8x4_SSE cpuname, 0, 1, 2, 3, 13, 14, 0, swap
2527 pshufd m1, m0, q0032
2531 paddd xmm0, m0, m1 ; consistent location of return value. only the avx version of hadamard permutes m0, so 3arg is free
2534 %else ; !ARCH_X86_64
2535 cglobal intra_satd_x9_4x4, 3,4,8
2536 %assign pad 0x120-gprsize-(stack_offset&15)
2537 %define fenc_buf rsp
2538 %define pred_buf rsp+0x40
2539 %define spill rsp+0xe0
2541 INTRA_X9_PRED intrax9b, [spill+0x20]
2542 mova [pred_buf+0x00], m2
2543 mova [pred_buf+0x10], m3
2544 mova [pred_buf+0x20], m4
2545 mova [pred_buf+0x30], m5
2546 mova [pred_buf+0x40], m6
2547 mova [pred_buf+0x50], m7
2548 movd m4, [r0+0*FENC_STRIDE]
2549 movd m5, [r0+1*FENC_STRIDE]
2550 movd m6, [r0+2*FENC_STRIDE]
2551 movd m0, [r0+3*FENC_STRIDE]
2561 mova [fenc_buf+0x00], m4
2562 mova [fenc_buf+0x10], m5
2563 mova [fenc_buf+0x20], m6
2564 mova [fenc_buf+0x30], m0
2566 pshufd m1, m2, q3232
2576 call .satd_8x4b ; ddr, ddl
2577 mova m3, [pred_buf+0x30]
2578 mova m1, [pred_buf+0x20]
2581 movq [spill+0x08], m0
2584 call .satd_8x4 ; vr, vl
2585 mova m3, [pred_buf+0x50]
2586 mova m1, [pred_buf+0x40]
2589 movq [spill+0x10], m0
2592 call .satd_8x4 ; hd, hu
2593 movq [spill+0x18], m0
2594 mova m1, [spill+0x20]
2595 mova m4, [fenc_buf+0x00]
2596 mova m5, [fenc_buf+0x20]
2597 mova m2, [pw_ppmmppmm]
2600 paddw m4, [fenc_buf+0x10]
2601 paddw m5, [fenc_buf+0x30]
2602 INTRA_X9_VHDC 1, 4, 5, 6, 7
2606 punpckhqdq m1, [spill+0x00]
2607 packssdw m1, [spill+0x10]
2609 pshufhw m1, m1, q3120
2611 pshufhw m0, m0, q3120
2614 movzx r0d, word [r2]
2616 INTRA_X9_END 0, intrax9b
2619 RESET_MM_PERMUTATION
2626 %xdefine fenc_buf fenc_buf+gprsize
2627 psubw m0, [fenc_buf+0x00]
2628 psubw m1, [fenc_buf+0x10]
2629 psubw m2, [fenc_buf+0x20]
2631 psubw m3, [fenc_buf+0x30]
2632 SATD_8x4_SSE cpuname, 0, 1, 2, 3, 4, 5, 0, swap
2635 pshufd m1, m0, q0032
2642 %endmacro ; INTRA_X9
2647 ;-----------------------------------------------------------------------------
2648 ; int intra_sad_x9_8x8( uint8_t *fenc, uint8_t *fdec, uint8_t edge[36], uint16_t *bitcosts, uint16_t *satds )
2649 ;-----------------------------------------------------------------------------
2650 cglobal intra_sad_x9_8x8, 5,6,9
2660 %assign padbase 0x10
2662 %assign pad 0x240+0x10+padbase-gprsize-(stack_offset&15)
2663 %define pred(i,j) [rsp+i*0x40+j*0x10+padbase]
2666 movq fenc02, [r0+FENC_STRIDE* 0]
2667 movq fenc13, [r0+FENC_STRIDE* 1]
2668 movq fenc46, [r0+FENC_STRIDE* 4]
2669 movq fenc57, [r0+FENC_STRIDE* 5]
2670 movhps fenc02, [r0+FENC_STRIDE* 2]
2671 movhps fenc13, [r0+FENC_STRIDE* 3]
2672 movhps fenc46, [r0+FENC_STRIDE* 6]
2673 movhps fenc57, [r0+FENC_STRIDE* 7]
2675 ; save instruction size: avoid 4-byte memory offsets
2676 lea r0, [intra8x9_h1+128]
2677 %define off(m) (r0+m-(intra8x9_h1+128))
2682 psadbw m1, m0, fenc02
2684 psadbw m2, m0, fenc13
2686 psadbw m3, m0, fenc46
2688 psadbw m0, m0, fenc57
2698 pshufb m1, m0, [off(intra8x9_h1)]
2699 pshufb m2, m0, [off(intra8x9_h2)]
2705 pshufb m3, m0, [off(intra8x9_h3)]
2706 pshufb m2, m0, [off(intra8x9_h4)]
2717 lea r5, [rsp+padbase+0x100]
2718 %define pred(i,j) [r5+i*0x40+j*0x10-0x100]
2730 psadbw m1, m0, fenc02
2732 psadbw m2, m0, fenc13
2734 psadbw m3, m0, fenc46
2736 psadbw m0, m0, fenc57
2745 ; Ft1 Ft2 Ft3 Ft4 Ft5 Ft6 Ft7 Ft8
2746 ; Ft2 Ft3 Ft4 Ft5 Ft6 Ft7 Ft8 Ft9
2747 ; Ft3 Ft4 Ft5 Ft6 Ft7 Ft8 Ft9 FtA
2748 ; Ft4 Ft5 Ft6 Ft7 Ft8 Ft9 FtA FtB
2749 ; Ft5 Ft6 Ft7 Ft8 Ft9 FtA FtB FtC
2750 ; Ft6 Ft7 Ft8 Ft9 FtA FtB FtC FtD
2751 ; Ft7 Ft8 Ft9 FtA FtB FtC FtD FtE
2752 ; Ft8 Ft9 FtA FtB FtC FtD FtE FtF
2756 pavgb m3, m0, m2 ; Gt1 Gt2 Gt3 Gt4 Gt5 Gt6 Gt7 Gt8 Gt9 GtA GtB ___ ___ ___ ___ ___
2757 PRED4x4_LOWPASS m0, m1, m2, m0, tmp ; ___ Ft1 Ft2 Ft3 Ft4 Ft5 Ft6 Ft7 Ft8 Ft9 FtA FtB FtC FtD FtE FtF
2758 pshufb m1, m0, [off(intra8x9_ddl1)]
2759 pshufb m2, m0, [off(intra8x9_ddl2)]
2765 pshufb m2, m0, [off(intra8x9_ddl3)]
2769 pshufb m2, m0, [off(intra8x9_ddl4)]
2778 ; Gt1 Gt2 Gt3 Gt4 Gt5 Gt6 Gt7 Gt8
2779 ; Ft1 Ft2 Ft3 Ft4 Ft5 Ft6 Ft7 Ft8
2780 ; Gt2 Gt3 Gt4 Gt5 Gt6 Gt7 Gt8 Gt9
2781 ; Ft2 Ft3 Ft4 Ft5 Ft6 Ft7 Ft8 Ft9
2782 ; Gt3 Gt4 Gt5 Gt6 Gt7 Gt8 Gt9 GtA
2783 ; Ft3 Ft4 Ft5 Ft6 Ft7 Ft8 Ft9 FtA
2784 ; Gt4 Gt5 Gt6 Gt7 Gt8 Gt9 GtA GtB
2785 ; Ft4 Ft5 Ft6 Ft7 Ft8 Ft9 FtA FtB
2786 pshufb m1, m3, [off(intra8x9_vl1)]
2787 pshufb m2, m0, [off(intra8x9_vl2)]
2788 pshufb m3, m3, [off(intra8x9_vl3)]
2789 pshufb m0, m0, [off(intra8x9_vl4)]
2804 pextrw [r4+14], m0, 0
2808 lea r5, [rsp+padbase+0x100]
2812 ; Flt Ft0 Ft1 Ft2 Ft3 Ft4 Ft5 Ft6
2813 ; Fl0 Flt Ft0 Ft1 Ft2 Ft3 Ft4 Ft5
2814 ; Fl1 Fl0 Flt Ft0 Ft1 Ft2 Ft3 Ft4
2815 ; Fl2 Fl1 Fl0 Flt Ft0 Ft1 Ft2 Ft3
2816 ; Fl3 Fl2 Fl1 Fl0 Flt Ft0 Ft1 Ft2
2817 ; Fl4 Fl3 Fl2 Fl1 Fl0 Flt Ft0 Ft1
2818 ; Fl5 Fl4 Fl3 Fl2 Fl1 Fl0 Flt Ft0
2819 ; Fl6 Fl5 Fl4 Fl3 Fl2 Fl1 Fl0 Flt
2823 pavgb m3, m2, m0 ; Gl6 Gl5 Gl4 Gl3 Gl2 Gl1 Gl0 Glt Gt0 Gt1 Gt2 Gt3 Gt4 Gt5 Gt6 Gt7
2824 PRED4x4_LOWPASS m0, m1, m2, m0, tmp ; Fl7 Fl6 Fl5 Fl4 Fl3 Fl2 Fl1 Fl0 Flt Ft0 Ft1 Ft2 Ft3 Ft4 Ft5 Ft6
2825 pshufb m1, m0, [off(intra8x9_ddr1)]
2826 pshufb m2, m0, [off(intra8x9_ddr2)]
2832 pshufb m2, m0, [off(intra8x9_ddr3)]
2836 pshufb m2, m0, [off(intra8x9_ddr4)]
2846 %define off(m) (r0+m-(intra8x9_h1+256+128))
2847 %define pred(i,j) [r5+i*0x40+j*0x10-0x1C0]
2850 ; Gt0 Gt1 Gt2 Gt3 Gt4 Gt5 Gt6 Gt7
2851 ; Flt Ft0 Ft1 Ft2 Ft3 Ft4 Ft5 Ft6
2852 ; Fl0 Gt0 Gt1 Gt2 Gt3 Gt4 Gt5 Gt6
2853 ; Fl1 Flt Ft0 Ft1 Ft2 Ft3 Ft4 Ft5
2854 ; Fl2 Fl0 Gt0 Gt1 Gt2 Gt3 Gt4 Gt5
2855 ; Fl3 Fl1 Flt Ft0 Ft1 Ft2 Ft3 Ft4
2856 ; Fl4 Fl2 Fl0 Gt0 Gt1 Gt2 Gt3 Gt4
2857 ; Fl5 Fl3 Fl1 Flt Ft0 Ft1 Ft2 Ft3
2858 movsd m2, m3, m0 ; Fl7 Fl6 Fl5 Fl4 Fl3 Fl2 Fl1 Fl0 Gt0 Gt1 Gt2 Gt3 Gt4 Gt5 Gt6 Gt7
2859 pshufb m1, m2, [off(intra8x9_vr1)]
2860 pshufb m2, m2, [off(intra8x9_vr3)]
2866 pshufb m2, m0, [off(intra8x9_vr2)]
2870 pshufb m2, m0, [off(intra8x9_vr4)]
2879 ; Glt Flt Ft0 Ft1 Ft2 Ft3 Ft4 Ft5
2880 ; Gl0 Fl0 Glt Flt Ft0 Ft1 Ft2 Ft3
2881 ; Gl1 Fl1 Gl0 Fl0 Glt Flt Ft0 Ft1
2882 ; Gl2 Fl2 Gl1 Fl1 Gl0 Fl0 Glt Flt
2883 ; Gl3 Fl3 Gl2 Fl2 Gl1 Fl1 Gl0 Fl0
2884 ; Gl4 Fl4 Gl3 Fl3 Gl2 Fl2 Gl1 Fl1
2885 ; Gl5 Fl5 Gl4 Fl4 Gl3 Fl3 Gl2 Fl2
2886 ; Gl6 Fl6 Gl5 Fl5 Gl4 Fl4 Gl3 Fl3
2887 pshufd m2, m3, q0001
2889 pblendw m2, m0, q3330 ; Gl2 Gl1 Gl0 Glt ___ Fl2 Fl1 Fl0 Flt Ft0 Ft1 Ft2 Ft3 Ft4 Ft5 ___
2894 punpcklbw m0, m3 ; Fl7 Gl6 Fl6 Gl5 Fl5 Gl4 Fl4 Gl3 Fl3 Gl2 Fl2 Gl1 Fl1 Gl0 Fl0 ___
2895 pshufb m1, m2, [off(intra8x9_hd1)]
2896 pshufb m2, m2, [off(intra8x9_hd2)]
2902 pshufb m2, m0, [off(intra8x9_hd3)]
2903 pshufb m3, m0, [off(intra8x9_hd4)]
2912 ; don't just store to [r4+12]. this is too close to the load of dqword [r4] and would cause a forwarding stall
2917 ; Gl0 Fl1 Gl1 Fl2 Gl2 Fl3 Gl3 Fl4
2918 ; Gl1 Fl2 Gl2 Fl3 Gl3 Fl4 Gl4 Fl5
2919 ; Gl2 Fl3 Gl3 Gl3 Gl4 Fl5 Gl5 Fl6
2920 ; Gl3 Gl3 Gl4 Fl5 Gl5 Fl6 Gl6 Fl7
2921 ; Gl4 Fl5 Gl5 Fl6 Gl6 Fl7 Gl7 Gl7
2922 ; Gl5 Fl6 Gl6 Fl7 Gl7 Gl7 Gl7 Gl7
2923 ; Gl6 Fl7 Gl7 Gl7 Gl7 Gl7 Gl7 Gl7
2924 ; Gl7 Gl7 Gl7 Gl7 Gl7 Gl7 Gl7 Gl7
2926 pinsrb m0, [r2+7], 15 ; Gl7
2933 pshufb m1, m0, [off(intra8x9_hu1)]
2934 pshufb m2, m0, [off(intra8x9_hu2)]
2940 pshufb m2, m0, [off(intra8x9_hu3)]
2941 pshufb m0, m0, [off(intra8x9_hu4)]
2956 movzx r5d, word [r3+16]
2961 phminposuw m0, m0 ; v,h,dc,ddl,ddr,vr,hd,vl
2964 ; 8x8 sad is up to 14 bits; +bitcosts and saturate -> 14 bits; pack with 2 bit index
2967 paddw m0, [off(pw_s00112233)]
2970 pshuflw m1, m0, q0032
2973 ; repack with 3 bit index
2981 ; reverse to phminposuw order
2995 add r1, 4*FDEC_STRIDE
2996 mova m0, [rsp+padbase+r2+0x00]
2997 mova m1, [rsp+padbase+r2+0x10]
2998 mova m2, [rsp+padbase+r2+0x20]
2999 mova m3, [rsp+padbase+r2+0x30]
3000 movq [r1+FDEC_STRIDE*-4], m0
3001 movhps [r1+FDEC_STRIDE*-2], m0
3002 movq [r1+FDEC_STRIDE*-3], m1
3003 movhps [r1+FDEC_STRIDE*-1], m1
3004 movq [r1+FDEC_STRIDE* 0], m2
3005 movhps [r1+FDEC_STRIDE* 2], m2
3006 movq [r1+FDEC_STRIDE* 1], m3
3007 movhps [r1+FDEC_STRIDE* 3], m3
3012 ;-----------------------------------------------------------------------------
3013 ; int intra_sa8d_x9_8x8( uint8_t *fenc, uint8_t *fdec, uint8_t edge[36], uint16_t *bitcosts, uint16_t *satds )
3014 ;-----------------------------------------------------------------------------
3015 cglobal intra_sa8d_x9_8x8, 5,6,16
3016 %assign pad 0x2c0+0x10-gprsize-(stack_offset&15)
3017 %define fenc_buf rsp
3018 %define pred_buf rsp+0x80
3024 movddup m %+ %%i, [r0+%%i*FENC_STRIDE]
3025 pmaddubsw m9, m %+ %%i, m15
3026 punpcklbw m %+ %%i, m8
3027 mova [fenc_buf+%%i*0x10], m9
3031 ; save instruction size: avoid 4-byte memory offsets
3032 lea r0, [intra8x9_h1+0x80]
3033 %define off(m) (r0+m-(intra8x9_h1+0x80))
3034 lea r5, [pred_buf+0x80]
3037 HADAMARD8_2D 0, 1, 2, 3, 4, 5, 6, 7, 8
3046 ; 1D hadamard of edges
3054 pshufb m9, [intrax3_shuf]
3055 pmaddubsw m8, [pb_pppm]
3056 pmaddubsw m9, [pb_pppm]
3057 HSUMSUB2 psignw, m8, m9, m12, m13, m9, q1032, [pw_ppppmmmm]
3058 HSUMSUB2 psignw, m8, m9, m12, m13, m9, q2301, [pw_ppmmppmm]
3074 psllw m8, 3 ; left edge
3077 pabsw m8, m8 ; 1x8 sum
3087 punpcklqdq m0, m4 ; transpose
3088 psllw m9, 3 ; top edge
3089 psrldq m10, m11, 2 ; 8x7 sum
3090 psubw m0, m9 ; 8x1 sum
3094 phaddd m10, m8 ; logically phaddw, but this is faster and it won't overflow
3100 pshufb m0, m3, [off(intra8x9_h1)]
3101 pshufb m1, m3, [off(intra8x9_h2)]
3102 pshufb m2, m3, [off(intra8x9_h3)]
3103 pshufb m3, m3, [off(intra8x9_h4)]
3114 PRED4x4_LOWPASS m8, m1, m2, m8, m3
3115 pshufb m0, m8, [off(intra8x9_ddl1)]
3116 pshufb m1, m8, [off(intra8x9_ddl2)]
3117 pshufb m2, m8, [off(intra8x9_ddl3)]
3118 pshufb m3, m8, [off(intra8x9_ddl4)]
3124 pshufb m0, m9, [off(intra8x9_vl1)]
3125 pshufb m1, m8, [off(intra8x9_vl2)]
3126 pshufb m2, m9, [off(intra8x9_vl3)]
3127 pshufb m3, m8, [off(intra8x9_vl4)]
3138 PRED4x4_LOWPASS m8, m1, m2, m8, m3
3139 pshufb m0, m8, [off(intra8x9_ddr1)]
3140 pshufb m1, m8, [off(intra8x9_ddr2)]
3141 pshufb m2, m8, [off(intra8x9_ddr3)]
3142 pshufb m3, m8, [off(intra8x9_ddr4)]
3148 %define off(m) (r0+m-(intra8x9_h1+0x180))
3152 pshufb m0, m2, [off(intra8x9_vr1)]
3153 pshufb m1, m8, [off(intra8x9_vr2)]
3154 pshufb m2, m2, [off(intra8x9_vr3)]
3155 pshufb m3, m8, [off(intra8x9_vr4)]
3162 pshufd m1, m9, q0001
3163 pblendw m1, m8, q3330
3165 pshufd m2, m9, q0001
3169 pshufb m0, m1, [off(intra8x9_hd1)]
3170 pshufb m1, m1, [off(intra8x9_hd2)]
3171 pshufb m2, m8, [off(intra8x9_hd3)]
3172 pshufb m3, m8, [off(intra8x9_hd4)]
3180 pinsrb m8, [r2+7], 15
3187 pshufb m0, m8, [off(intra8x9_hu1)]
3188 pshufb m1, m8, [off(intra8x9_hu2)]
3189 pshufb m2, m8, [off(intra8x9_hu3)]
3190 pshufb m3, m8, [off(intra8x9_hu4)]
3198 pshuflw m1, m0, q0032
3207 movzx r5d, word [r3+16]
3215 ; 8x8 sa8d is up to 15 bits; +bitcosts and saturate -> 15 bits; pack with 1 bit index
3217 paddw m0, [off(pw_s00001111)]
3220 pshuflw m1, m0, q0032
3223 pcmpgtw m2, m1 ; 2nd index bit
3226 ; repack with 3 bit index
3234 lea r3d, [ r3*4+r4+1]
3237 ; reverse to phminposuw order
3251 add r1, 4*FDEC_STRIDE
3252 mova m0, [pred_buf+r2+0x00]
3253 mova m1, [pred_buf+r2+0x10]
3254 mova m2, [pred_buf+r2+0x20]
3255 mova m3, [pred_buf+r2+0x30]
3256 movq [r1+FDEC_STRIDE*-4], m0
3257 movhps [r1+FDEC_STRIDE*-2], m0
3258 movq [r1+FDEC_STRIDE*-3], m1
3259 movhps [r1+FDEC_STRIDE*-1], m1
3260 movq [r1+FDEC_STRIDE* 0], m2
3261 movhps [r1+FDEC_STRIDE* 2], m2
3262 movq [r1+FDEC_STRIDE* 1], m3
3263 movhps [r1+FDEC_STRIDE* 3], m3
3270 %xdefine fenc_buf fenc_buf+gprsize
3283 PERMUTE 0,4, 1,5, 2,0, 3,1, 4,6, 5,7, 6,2, 7,3
3286 psubw m0, [fenc_buf+0x00]
3287 psubw m1, [fenc_buf+0x10]
3290 psubw m2, [fenc_buf+0x20]
3291 psubw m3, [fenc_buf+0x30]
3294 psubw m4, [fenc_buf+0x40]
3295 psubw m5, [fenc_buf+0x50]
3298 psubw m6, [fenc_buf+0x60]
3299 psubw m7, [fenc_buf+0x70]
3300 HADAMARD8_2D_HMUL 0, 1, 2, 3, 4, 5, 6, 7, 13, 14
3305 %endif ; ARCH_X86_64
3306 %endmacro ; INTRA8_X9
3308 ; in: r0=pix, r1=stride, r2=stride*3, r3=tmp, m6=mask_ac4, m7=0
3309 ; out: [tmp]=hadamard4, m0=satd
3311 cglobal hadamard_ac_4x4
3312 %ifdef HIGH_BIT_DEPTH
3317 %else ; !HIGH_BIT_DEPTH
3326 %endif ; HIGH_BIT_DEPTH
3327 HADAMARD4_2D 0, 1, 2, 3, 4
3343 cglobal hadamard_ac_2x2max
3349 SUMSUB_BADC w, 0, 1, 2, 3, 4
3350 ABSW2 m0, m2, m0, m2, m4, m5
3351 ABSW2 m1, m3, m1, m3, m4, m5
3352 HADAMARD 0, max, 0, 2, 4, 5
3353 HADAMARD 0, max, 1, 3, 4, 5
3354 %ifdef HIGH_BIT_DEPTH
3359 %else ; !HIGH_BIT_DEPTH
3362 %endif ; HIGH_BIT_DEPTH
3367 %ifdef HIGH_BIT_DEPTH
3373 %ifdef HIGH_BIT_DEPTH
3378 %endif ; HIGH_BIT_DEPTH
3381 cglobal hadamard_ac_8x8
3383 %ifdef HIGH_BIT_DEPTH
3387 %endif ; HIGH_BIT_DEPTH
3388 call hadamard_ac_4x4_mmx2
3389 add r0, 4*SIZEOF_PIXEL
3393 call hadamard_ac_4x4_mmx2
3397 call hadamard_ac_4x4_mmx2
3398 sub r0, 4*SIZEOF_PIXEL
3401 call hadamard_ac_4x4_mmx2
3404 mova [rsp+gprsize+8], m5 ; save satd
3405 %ifdef HIGH_BIT_DEPTH
3409 call hadamard_ac_2x2max_mmx2
3415 SUMSUB_BADC w, 0, 1, 2, 3, 4
3416 HADAMARD 0, sumsub, 0, 2, 4, 5
3417 ABSW2 m1, m3, m1, m3, m4, m5
3418 ABSW2 m0, m2, m0, m2, m4, m5
3419 HADAMARD 0, max, 1, 3, 4, 5
3420 %ifdef HIGH_BIT_DEPTH
3430 %else ; !HIGH_BIT_DEPTH
3436 %endif ; HIGH_BIT_DEPTH
3437 mova [rsp+gprsize], m6 ; save sa8d
3442 %macro HADAMARD_AC_WXH_SUM_MMX 2
3443 mova m1, [rsp+1*mmsize]
3444 %ifdef HIGH_BIT_DEPTH
3446 paddd m0, [rsp+2*mmsize]
3447 paddd m1, [rsp+3*mmsize]
3450 mova m2, [rsp+4*mmsize]
3451 paddd m1, [rsp+5*mmsize]
3452 paddd m2, [rsp+6*mmsize]
3454 paddd m1, [rsp+7*mmsize]
3461 %else ; !HIGH_BIT_DEPTH
3463 paddusw m0, [rsp+2*mmsize]
3464 paddusw m1, [rsp+3*mmsize]
3467 mova m2, [rsp+4*mmsize]
3468 paddusw m1, [rsp+5*mmsize]
3469 paddusw m2, [rsp+6*mmsize]
3471 paddusw m1, [rsp+7*mmsize]
3483 %endif ; HIGH_BIT_DEPTH
3486 %macro HADAMARD_AC_WXH_MMX 2
3487 cglobal pixel_hadamard_ac_%1x%2, 2,4
3488 %assign pad 16-gprsize-(stack_offset&15)
3494 call hadamard_ac_8x8_mmx2
3499 call hadamard_ac_8x8_mmx2
3504 lea r0, [r0+ysub*4+8*SIZEOF_PIXEL]
3506 call hadamard_ac_8x8_mmx2
3510 call hadamard_ac_8x8_mmx2
3513 HADAMARD_AC_WXH_SUM_MMX %1, %2
3521 add rsp, 128+%1*%2/4+pad
3523 %endmacro ; HADAMARD_AC_WXH_MMX
3525 HADAMARD_AC_WXH_MMX 16, 16
3526 HADAMARD_AC_WXH_MMX 8, 16
3527 HADAMARD_AC_WXH_MMX 16, 8
3528 HADAMARD_AC_WXH_MMX 8, 8
3530 %macro LOAD_INC_8x4W_SSE2 5
3531 %ifdef HIGH_BIT_DEPTH
3539 %else ; !HIGH_BIT_DEPTH
3551 %endif ; HIGH_BIT_DEPTH
3554 %macro LOAD_INC_8x4W_SSSE3 5
3555 LOAD_DUP_4x8P %3, %4, %1, %2, [r0+r1*2], [r0+r2], [r0], [r0+r1]
3559 HSUMSUB %1, %2, %3, %4, %5
3562 %macro HADAMARD_AC_SSE2 0
3563 ; in: r0=pix, r1=stride, r2=stride*3
3564 ; out: [esp+16]=sa8d, [esp+32]=satd, r0+=stride*4
3565 cglobal hadamard_ac_8x8
3571 %define spill0 [rsp+gprsize]
3572 %define spill1 [rsp+gprsize+16]
3573 %define spill2 [rsp+gprsize+32]
3575 %ifdef HIGH_BIT_DEPTH
3577 %elif cpuflag(ssse3)
3579 ;LOAD_INC loads sumsubs
3583 ;LOAD_INC only unpacks to words
3586 LOAD_INC_8x4W 0, 1, 2, 3, 7
3588 HADAMARD4_2D_SSE 0, 1, 2, 3, 4
3590 HADAMARD4_V 0, 1, 2, 3, 4
3594 LOAD_INC_8x4W 4, 5, 6, 7, 1
3596 HADAMARD4_2D_SSE 4, 5, 6, 7, 1
3598 HADAMARD4_V 4, 5, 6, 7, 1
3603 HADAMARD 1, sumsub, 0, 1, 6, 7
3604 HADAMARD 1, sumsub, 2, 3, 6, 7
3609 HADAMARD 1, sumsub, 4, 5, 1, 0
3610 HADAMARD 1, sumsub, 6, 7, 1, 0
3623 pand m1, [mask_ac4b]
3627 AC_PADD m1, m3, [pw_1]
3629 AC_PADD m1, m2, [pw_1]
3631 AC_PADD m1, m3, [pw_1]
3633 AC_PADD m1, m2, [pw_1]
3635 AC_PADD m1, m3, [pw_1]
3637 AC_PADD m1, m2, [pw_1]
3641 mova [rsp+gprsize+32], m1 ; save satd
3652 HADAMARD %%x, amax, 3, 7, 4
3653 HADAMARD %%x, amax, 2, 6, 7, 4
3655 HADAMARD %%x, amax, 1, 5, 6, 7
3656 HADAMARD %%x, sumsub, 0, 4, 5, 6
3658 AC_PADD m2, m3, [pw_1]
3659 AC_PADD m2, m1, [pw_1]
3660 %ifdef HIGH_BIT_DEPTH
3664 %endif ; HIGH_BIT_DEPTH
3668 AC_PADD m2, m4, [pw_1]
3669 AC_PADD m2, m0, [pw_1]
3670 mova [rsp+gprsize+16], m2 ; save sa8d
3675 HADAMARD_AC_WXH_SSE2 16, 16
3676 HADAMARD_AC_WXH_SSE2 8, 16
3677 HADAMARD_AC_WXH_SSE2 16, 8
3678 HADAMARD_AC_WXH_SSE2 8, 8
3679 %endmacro ; HADAMARD_AC_SSE2
3681 %macro HADAMARD_AC_WXH_SUM_SSE2 2
3682 mova m1, [rsp+2*mmsize]
3683 %ifdef HIGH_BIT_DEPTH
3685 paddd m0, [rsp+3*mmsize]
3686 paddd m1, [rsp+4*mmsize]
3689 paddd m0, [rsp+5*mmsize]
3690 paddd m1, [rsp+6*mmsize]
3691 paddd m0, [rsp+7*mmsize]
3692 paddd m1, [rsp+8*mmsize]
3697 %else ; !HIGH_BIT_DEPTH
3699 paddusw m0, [rsp+3*mmsize]
3700 paddusw m1, [rsp+4*mmsize]
3703 paddusw m0, [rsp+5*mmsize]
3704 paddusw m1, [rsp+6*mmsize]
3705 paddusw m0, [rsp+7*mmsize]
3706 paddusw m1, [rsp+8*mmsize]
3711 %endif ; HIGH_BIT_DEPTH
3714 ; struct { int satd, int sa8d; } pixel_hadamard_ac_16x16( uint8_t *pix, int stride )
3715 %macro HADAMARD_AC_WXH_SSE2 2
3716 cglobal pixel_hadamard_ac_%1x%2, 2,3,11
3717 %assign pad 16-gprsize-(stack_offset&15)
3722 call hadamard_ac_8x8
3727 call hadamard_ac_8x8
3732 lea r0, [r0+ysub*4+8*SIZEOF_PIXEL]
3734 call hadamard_ac_8x8
3738 call hadamard_ac_8x8
3741 HADAMARD_AC_WXH_SUM_SSE2 %1, %2
3744 shr edx, 2 - (%1*%2 >> 8)
3750 add rsp, 16+%1*%2/2+pad
3752 %endmacro ; HADAMARD_AC_WXH_SSE2
3757 cextern pixel_sa8d_8x8_internal_mmx2
3762 %define TRANS TRANS_SSE2
3763 %define DIFFOP DIFF_UNPACK_SSE2
3764 %define LOAD_INC_8x4W LOAD_INC_8x4W_SSE2
3765 %define LOAD_SUMSUB_8x4P LOAD_DIFF_8x4P
3766 %define LOAD_SUMSUB_16P LOAD_SUMSUB_16P_SSE2
3767 %define movdqa movaps ; doesn't hurt pre-nehalem, might as well save size
3768 %define movdqu movups
3769 %define punpcklqdq movlhps
3773 %ifndef HIGH_BIT_DEPTH
3781 %define DIFFOP DIFF_SUMSUB_SSSE3
3782 %define LOAD_DUP_4x8P LOAD_DUP_4x8P_CONROE
3783 %ifndef HIGH_BIT_DEPTH
3784 %define LOAD_INC_8x4W LOAD_INC_8x4W_SSSE3
3785 %define LOAD_SUMSUB_8x4P LOAD_SUMSUB_8x4P_SSSE3
3786 %define LOAD_SUMSUB_16P LOAD_SUMSUB_16P_SSSE3
3792 %ifndef HIGH_BIT_DEPTH
3796 %undef movdqa ; nehalem doesn't like movaps
3797 %undef movdqu ; movups
3798 %undef punpcklqdq ; or movlhps
3799 %ifndef HIGH_BIT_DEPTH
3804 %define TRANS TRANS_SSE4
3805 %define LOAD_DUP_4x8P LOAD_DUP_4x8P_PENRYN
3810 %ifndef HIGH_BIT_DEPTH
3818 %ifndef HIGH_BIT_DEPTH
3824 %define TRANS TRANS_XOP
3828 %ifndef HIGH_BIT_DEPTH
3830 ; no xop INTRA8_X9. it's slower than avx on bulldozer. dunno why.
3834 ;=============================================================================
3836 ;=============================================================================
3838 ;-----------------------------------------------------------------------------
3839 ; void pixel_ssim_4x4x2_core( const uint8_t *pix1, int stride1,
3840 ; const uint8_t *pix2, int stride2, int sums[2][4] )
3841 ;-----------------------------------------------------------------------------
3843 %ifdef HIGH_BIT_DEPTH
3844 movdqu m5, [r0+(%1&1)*r1]
3845 movdqu m6, [r2+(%1&1)*r3]
3847 movq m5, [r0+(%1&1)*r1]
3848 movq m6, [r2+(%1&1)*r3]
3866 ACCUM paddd, 3, 5, %1
3867 ACCUM paddd, 4, 7, %1
3872 cglobal pixel_ssim_4x4x2_core, 4,4,8
3882 pshufd m5, m3, q2301
3885 pshufd m6, m4, q2301
3888 pshufd m1, m1, q3120
3891 punpckhdq m5, m3, m4
3907 ;-----------------------------------------------------------------------------
3908 ; float pixel_ssim_end( int sum0[5][4], int sum1[5][4], int width )
3909 ;-----------------------------------------------------------------------------
3910 cglobal pixel_ssim_end4, 3,3,7
3925 movdqa m5, [ssim_c1]
3926 movdqa m6, [ssim_c2]
3927 TRANSPOSE4x4D 0, 1, 2, 3, 4
3929 ; s1=m0, s2=m1, ss=m2, s12=m3
3935 mulps m2, [pf_64] ; ss*64
3936 mulps m3, [pf_128] ; s12*128
3938 mulps m4, m0 ; s1*s2
3939 mulps m1, m1 ; s2*s2
3940 mulps m0, m0 ; s1*s1
3941 addps m4, m4 ; s1*s2*2
3942 addps m0, m1 ; s1*s1 + s2*s2
3944 subps m3, m4 ; covar*2
3945 addps m4, m5 ; s1*s2*2 + ssim_c1
3946 addps m0, m5 ; s1*s1 + s2*s2 + ssim_c1
3947 addps m2, m6 ; vars + ssim_c2
3948 addps m3, m6 ; covar*2 + ssim_c2
3950 pmaddwd m4, m1, m0 ; s1*s2
3953 pmaddwd m0, m0 ; s1*s1 + s2*s2
3957 psubd m3, m4 ; covar*2
3963 cvtdq2ps m0, m0 ; (float)(s1*s1 + s2*s2 + ssim_c1)
3964 cvtdq2ps m4, m4 ; (float)(s1*s2*2 + ssim_c1)
3965 cvtdq2ps m3, m3 ; (float)(covar*2 + ssim_c2)
3966 cvtdq2ps m2, m2 ; (float)(vars + ssim_c2)
3973 je .skip ; faster only if this is the common case; remove branch if we use ssim on a macroblock level
3976 lea r3, [mask_ff + 16]
3977 movdqu m1, [r3 + r2*4]
3979 movdqu m1, [mask_ff + r2*4 + 16]
3985 pshuflw m4, m0, q0032
3999 ;=============================================================================
4000 ; Successive Elimination ADS
4001 ;=============================================================================
4013 %macro ADS_END 1 ; unroll_size
4019 WIN64_RESTORE_XMM rsp
4023 ;-----------------------------------------------------------------------------
4024 ; int pixel_ads4( int enc_dc[4], uint16_t *sums, int delta,
4025 ; uint16_t *cost_mvx, int16_t *mvs, int width, int thresh )
4026 ;-----------------------------------------------------------------------------
4028 cglobal pixel_ads4, 6,7
4032 pshufw mm6, mm6, q2222
4034 pshufw mm4, mm4, q2222
4044 movq mm3, [r1+r2+16]
4059 cglobal pixel_ads2, 6,7
4063 pshufw mm6, mm6, q2222
4080 cglobal pixel_ads1, 6,7
4102 cglobal pixel_ads4, 6,7,12
4104 pshuflw xmm7, xmm4, 0
4105 pshuflw xmm6, xmm4, q2222
4106 pshufhw xmm5, xmm4, 0
4107 pshufhw xmm4, xmm4, q2222
4108 punpcklqdq xmm7, xmm7
4109 punpcklqdq xmm6, xmm6
4110 punpckhqdq xmm5, xmm5
4111 punpckhqdq xmm4, xmm4
4113 pshuflw xmm8, r6m, 0
4114 punpcklqdq xmm8, xmm8
4117 movdqu xmm11, [r1+r2]
4119 psubw xmm0, xmm10, xmm7
4120 movdqu xmm10, [r1+16]
4121 psubw xmm1, xmm10, xmm6
4122 ABSW xmm0, xmm0, xmm2
4123 ABSW xmm1, xmm1, xmm3
4124 psubw xmm2, xmm11, xmm5
4125 movdqu xmm11, [r1+r2+16]
4127 psubw xmm3, xmm11, xmm4
4129 ABSW xmm2, xmm2, xmm1
4130 ABSW xmm3, xmm3, xmm1
4134 psubusw xmm1, xmm8, xmm0
4141 movdqu xmm1, [r1+16]
4144 ABSW xmm0, xmm0, xmm2
4145 ABSW xmm1, xmm1, xmm3
4146 movdqu xmm2, [r1+r2]
4147 movdqu xmm3, [r1+r2+16]
4151 ABSW xmm2, xmm2, xmm1
4152 ABSW xmm3, xmm3, xmm1
4157 pshuflw xmm1, xmm1, 0
4158 punpcklqdq xmm1, xmm1
4166 cglobal pixel_ads2, 6,7,8
4169 pshuflw xmm7, xmm6, 0
4170 pshuflw xmm6, xmm6, q2222
4171 pshuflw xmm5, xmm5, 0
4172 punpcklqdq xmm7, xmm7
4173 punpcklqdq xmm6, xmm6
4174 punpcklqdq xmm5, xmm5
4178 movdqu xmm1, [r1+r2]
4182 ABSW xmm0, xmm0, xmm2
4183 ABSW xmm1, xmm1, xmm3
4186 psubusw xmm1, xmm5, xmm0
4191 cglobal pixel_ads1, 6,7,8
4194 pshuflw xmm7, xmm7, 0
4195 pshuflw xmm6, xmm6, 0
4196 punpcklqdq xmm7, xmm7
4197 punpcklqdq xmm6, xmm6
4201 movdqu xmm1, [r1+16]
4205 movdqu xmm3, [r3+16]
4206 ABSW xmm0, xmm0, xmm4
4207 ABSW xmm1, xmm1, xmm5
4210 psubusw xmm4, xmm6, xmm0
4211 psubusw xmm5, xmm6, xmm1
4224 ; int pixel_ads_mvs( int16_t *mvs, uint8_t *masks, int width )
4227 ; *(uint32_t*)(masks+width) = 0;
4228 ; for( i=0; i<width; i+=8 )
4230 ; uint64_t mask = *(uint64_t*)(masks+i);
4231 ; if( !mask ) continue;
4232 ; for( j=0; j<8; j++ )
4233 ; if( mask & (255<<j*8) )
4241 test r2d, 0xff<<(%1*8)
4248 cglobal pixel_ads_mvs, 0,7,0
4255 ; clear last block in case width isn't divisible by 8. (assume divisible by 4, so clearing 4 bytes is enough.)