1 ;*****************************************************************************
2 ;* x86-optimized functions for ssim filter
4 ;* Copyright (C) 2015 Ronald S. Bultje <rsbultje@gmail.com>
6 ;* This file is part of FFmpeg.
8 ;* FFmpeg is free software; you can redistribute it and/or
9 ;* modify it under the terms of the GNU Lesser General Public
10 ;* License as published by the Free Software Foundation; either
11 ;* version 2.1 of the License, or (at your option) any later version.
13 ;* FFmpeg is distributed in the hope that it will be useful,
14 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 ;* Lesser General Public License for more details.
18 ;* You should have received a copy of the GNU Lesser General Public
19 ;* License along with FFmpeg; if not, write to the Free Software
20 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 ;******************************************************************************
23 %include "libavutil/x86/x86util.asm"
28 ssim_c1: times 4 dd 416 ;(.01*.01*255*255*64 + .5)
29 ssim_c2: times 4 dd 235963 ;(.03*.03*255*255*64*63 + .5)
33 %macro SSIM_4X4_LINE 1
35 cglobal ssim_4x4_line, 6, 8, %1, buf, buf_stride, ref, ref_stride, sums, w, buf_stride3, ref_stride3
37 cglobal ssim_4x4_line, 5, 7, %1, buf, buf_stride, ref, ref_stride, sums, buf_stride3, ref_stride3
40 lea ref_stride3q, [ref_strideq*3]
41 lea buf_stride3q, [buf_strideq*3]
49 pmovzxbw m0, [bufq+buf_strideq*0]
50 pmovzxbw m1, [refq+ref_strideq*0]
53 pmovzxbw m2, [bufq+buf_strideq*1]
54 vpmadcswd m4, m1, m1, m4
55 pmovzxbw m3, [refq+ref_strideq*1]
57 vpmadcswd m4, m2, m2, m4
58 vpmadcswd m6, m2, m3, m6
60 vpmadcswd m4, m3, m3, m4
62 pmovzxbw m2, [bufq+buf_strideq*2]
63 pmovzxbw m3, [refq+ref_strideq*2]
64 vpmadcswd m4, m2, m2, m4
65 vpmadcswd m6, m2, m3, m6
66 pmovzxbw m5, [bufq+buf_stride3q]
67 pmovzxbw m7, [refq+ref_stride3q]
68 vpmadcswd m4, m3, m3, m4
69 vpmadcswd m6, m5, m7, m6
72 vpmadcswd m4, m5, m5, m4
75 vpmadcswd m4, m7, m7, m4
77 movh m0, [bufq+buf_strideq*0] ; a1
78 movh m1, [refq+ref_strideq*0] ; b1
79 movh m2, [bufq+buf_strideq*1] ; a2
80 movh m3, [refq+ref_strideq*1] ; b2
81 punpcklbw m0, m7 ; s1 [word]
82 punpcklbw m1, m7 ; s2 [word]
83 punpcklbw m2, m7 ; s1 [word]
84 punpcklbw m3, m7 ; s2 [word]
85 pmaddwd m4, m0, m0 ; a1 * a1
86 pmaddwd m5, m1, m1 ; b1 * b1
87 pmaddwd m8, m2, m2 ; a2 * a2
88 pmaddwd m9, m3, m3 ; b2 * b2
91 pmaddwd m6, m0, m1 ; a1 * b1 = ss12
92 pmaddwd m5, m2, m3 ; a2 * b2 = ss12
98 movh m2, [bufq+buf_strideq*2] ; a3
99 movh m3, [refq+ref_strideq*2] ; b3
100 movh m5, [bufq+buf_stride3q] ; a4
101 movh m8, [refq+ref_stride3q] ; b4
102 punpcklbw m2, m7 ; s1 [word]
103 punpcklbw m3, m7 ; s2 [word]
104 punpcklbw m5, m7 ; s1 [word]
105 punpcklbw m8, m7 ; s2 [word]
106 pmaddwd m9, m2, m2 ; a3 * a3
107 pmaddwd m10, m3, m3 ; b3 * b3
108 pmaddwd m12, m5, m5 ; a4 * a4
109 pmaddwd m13, m8, m8 ; b4 * b4
110 pmaddwd m11, m2, m3 ; a3 * b3 = ss12
111 pmaddwd m14, m5, m8 ; a4 * b4 = ss12
124 ; m0 = [word] s1 a,a,a,a,b,b,b,b
125 ; m1 = [word] s2 a,a,a,a,b,b,b,b
126 ; m4 = [dword] ss a,a,b,b
127 ; m6 = [dword] s12 a,a,b,b
130 vphaddwq m0, m0 ; [dword] s1 a, 0, b, 0
131 vphaddwq m1, m1 ; [dword] s2 a, 0, b, 0
132 vphadddq m4, m4 ; [dword] ss a, 0, b, 0
133 vphadddq m6, m6 ; [dword] s12 a, 0, b, 0
134 punpckhdq m2, m0, m1 ; [dword] s1 b, s2 b, 0, 0
135 punpckldq m0, m1 ; [dword] s1 a, s2 a, 0, 0
136 punpckhdq m3, m4, m6 ; [dword] ss b, s12 b, 0, 0
137 punpckldq m4, m6 ; [dword] ss a, s12 a, 0, 0
138 punpcklqdq m1, m2, m3 ; [dword] b s1, s2, ss, s12
139 punpcklqdq m0, m4 ; [dword] a s1, s2, ss, s12
141 pmaddwd m0, m15 ; [dword] s1 a,a,b,b
142 pmaddwd m1, m15 ; [dword] s2 a,a,b,b
143 phaddd m0, m4 ; [dword] s1 a, b, ss a, b
144 phaddd m1, m6 ; [dword] s2 a, b, s12 a, b
145 punpckhdq m2, m0, m1 ; [dword] ss a, s12 a, ss b, s12 b
146 punpckldq m0, m1 ; [dword] s1 a, s2 a, s1 b, s2 b
147 punpckhqdq m1, m0, m2 ; [dword] b s1, s2, ss, s12
148 punpcklqdq m0, m2 ; [dword] a s1, s2, ss, s12
152 mova [sumsq+mmsize], m1
166 %if HAVE_XOP_EXTERNAL
172 cglobal ssim_end_line, 3, 3, 6, sum0, sum1, w
175 mova m1, [sum0q+mmsize*0]
176 mova m2, [sum0q+mmsize*1]
177 mova m3, [sum0q+mmsize*2]
178 mova m4, [sum0q+mmsize*3]
179 paddd m1, [sum1q+mmsize*0]
180 paddd m2, [sum1q+mmsize*1]
181 paddd m3, [sum1q+mmsize*2]
182 paddd m4, [sum1q+mmsize*3]
186 paddd m4, [sum0q+mmsize*4]
187 paddd m4, [sum1q+mmsize*4]
188 TRANSPOSE4x4D 1, 2, 3, 4, 5
190 ; m1 = fs1, m2 = fs2, m3 = fss, m4 = fs12
193 pmulld m5, m1, m2 ; fs1 * fs2
194 pmulld m1, m1 ; fs1 * fs1
195 pmulld m2, m2 ; fs2 * fs2
197 psubd m4, m5 ; covariance
198 psubd m3, m2 ; variance
200 ; m1 = fs1 * fs1, m2 = fs2 * fs2, m3 = variance, m4 = covariance, m5 = fs1 * fs2
201 paddd m4, m4 ; 2 * covariance
202 paddd m5, m5 ; 2 * fs1 * fs2
203 paddd m1, m2 ; fs1 * fs1 + fs2 * fs2
204 paddd m3, [ssim_c2] ; variance + ssim_c2
205 paddd m4, [ssim_c2] ; 2 * covariance + ssim_c2
206 paddd m5, [ssim_c1] ; 2 * fs1 * fs2 + ssim_c1
207 paddd m1, [ssim_c1] ; fs1 * fs1 + fs2 * fs2 + ssim_c1
216 divps m4, m3 ; ssim_endl
223 ; subps the ones we added too much