1 ;******************************************************************************
2 ;* linear least squares model
4 ;* Copyright (c) 2013 Loren Merritt
6 ;* This file is part of FFmpeg.
8 ;* FFmpeg is free software; you can redistribute it and/or
9 ;* modify it under the terms of the GNU Lesser General Public
10 ;* License as published by the Free Software Foundation; either
11 ;* version 2.1 of the License, or (at your option) any later version.
13 ;* FFmpeg is distributed in the hope that it will be useful,
14 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 ;* Lesser General Public License for more details.
18 ;* You should have received a copy of the GNU Lesser General Public
19 ;* License along with FFmpeg; if not, write to the Free Software
20 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 ;******************************************************************************
23 %include "x86util.asm"
28 %define MAX_VARS_ALIGN (MAX_VARS+4)
29 %define COVAR_STRIDE MAX_VARS_ALIGN*8
30 %define COVAR(x,y) [covarq + (x)*8 + (y)*COVAR_STRIDE]
33 .covariance: resq MAX_VARS_ALIGN*MAX_VARS_ALIGN
34 .coeff: resq MAX_VARS*MAX_VARS
35 .variance: resq MAX_VARS
50 cglobal update_lls, 2,5,8, ctx, var, i, j, covar2
52 mov id, [ctxq + LLSModel2.indep_count]
53 lea varq, [varq + iq*8]
57 ; Compute all 3 pairwise products of a 2x2 block that lies on the diagonal
58 mova m1, [varq + iq*8]
59 mova m3, [varq + iq*8 + 16]
66 lea covarq, [covar2q + 16]
67 ADDPD_MEM COVAR(-2,0), m0
68 ADDPD_MEM COVAR(-2,1), m1
73 ; Compute all 16 pairwise products of a 4x4 block
78 ADDPD_MEM COVAR(0,0), m0
79 ADDPD_MEM COVAR(0,1), m1
80 ADDPD_MEM COVAR(0,2), m2
81 ADDPD_MEM COVAR(0,3), m3
82 mova m3, [varq + jq*8 + 16]
87 ADDPD_MEM COVAR(2,0), m0
88 ADDPD_MEM COVAR(2,1), m1
89 ADDPD_MEM COVAR(2,2), m2
90 ADDPD_MEM COVAR(2,3), m3
91 mova m3, [varq + jq*8 + 32]
103 ADDPD_MEM COVAR(0,0), m4
104 ADDPD_MEM COVAR(0,1), m5
105 ADDPD_MEM COVAR(0,2), m6
106 ADDPD_MEM COVAR(0,3), m7
109 add covar2q, 4*COVAR_STRIDE+32
115 %define covarq covar2q
117 movsd m0, [varq + iq*8]
119 mulpd m0, [varq + jq*8]
120 ADDPD_MEM COVAR(0,0), m0
122 add covarq, COVAR_STRIDE
128 %if HAVE_AVX_EXTERNAL
130 cglobal update_lls, 3,6,8, ctx, var, count, i, j, count2
132 mov countd, [ctxq + LLSModel2.indep_count]
133 lea count2d, [countq-2]
136 ; Compute all 10 pairwise products of a 4x4 block that lies on the diagonal
137 mova ymm1, [varq + iq*8]
138 vbroadcastsd ymm4, [varq + iq*8]
139 vbroadcastsd ymm5, [varq + iq*8 + 8]
140 vbroadcastsd ymm6, [varq + iq*8 + 16]
141 vbroadcastsd ymm7, [varq + iq*8 + 24]
142 vextractf128 xmm3, ymm1, 1
143 vmulpd ymm0, ymm1, ymm4
144 vmulpd ymm1, ymm1, ymm5
145 vmulpd xmm2, xmm3, xmm6
146 vmulpd xmm3, xmm3, xmm7
147 ADDPD_MEM COVAR(iq ,0), ymm0
148 ADDPD_MEM COVAR(iq ,1), ymm1
149 ADDPD_MEM COVAR(iq+2,2), xmm2
150 ADDPD_MEM COVAR(iq+2,3), xmm3
155 ; Compute all 16 pairwise products of a 4x4 block
156 mova ymm3, [varq + jq*8]
157 vmulpd ymm0, ymm3, ymm4
158 vmulpd ymm1, ymm3, ymm5
159 vmulpd ymm2, ymm3, ymm6
160 vmulpd ymm3, ymm3, ymm7
161 ADDPD_MEM COVAR(jq,0), ymm0
162 ADDPD_MEM COVAR(jq,1), ymm1
163 ADDPD_MEM COVAR(jq,2), ymm2
164 ADDPD_MEM COVAR(jq,3), ymm3
171 mova xmm3, [varq + jq*8]
172 vmulpd xmm0, xmm3, xmm4
173 vmulpd xmm1, xmm3, xmm5
174 vmulpd xmm2, xmm3, xmm6
175 vmulpd xmm3, xmm3, xmm7
176 ADDPD_MEM COVAR(jq,0), xmm0
177 ADDPD_MEM COVAR(jq,1), xmm1
178 ADDPD_MEM COVAR(jq,2), xmm2
179 ADDPD_MEM COVAR(jq,3), xmm3
182 add covarq, 4*COVAR_STRIDE
189 vmovddup xmm0, [varq + iq*8]
190 vmulpd xmm0, [varq + jq*8]
191 ADDPD_MEM COVAR(jq,0), xmm0
193 add covarq, COVAR_STRIDE
201 cglobal evaluate_lls, 3,4,2, ctx, var, order, i
202 ; This function is often called on the same buffer as update_lls, but with
203 ; an offset. They can't both be aligned.
204 ; Load halves rather than movu to avoid store-forwarding stalls, since the
205 ; input was initialized immediately prior to this function using scalar math.
208 imul orderd, MAX_VARS
209 lea coefsq, [ctxq + LLSModel2.coeff + orderq*8]
211 movhpd m0, [varq + 8]
213 lea coefsq, [coefsq + iq*8]
214 lea varq, [varq + iq*8]
218 movsd m1, [varq + iq*8]
219 movhpd m1, [varq + iq*8 + 8]
220 mulpd m1, [coefsq + iq*8]
225 movsd m1, [varq + iq*8]
226 mulsd m1, [coefsq + iq*8]