1 ;******************************************************************************
2 ;* SSE-optimized functions for the DCA decoder
3 ;* Copyright (C) 2012-2014 Christophe Gisquet <christophe.gisquet@gmail.com>
5 ;* This file is part of Libav.
7 ;* Libav is free software; you can redistribute it and/or
8 ;* modify it under the terms of the GNU Lesser General Public
9 ;* License as published by the Free Software Foundation; either
10 ;* version 2.1 of the License, or (at your option) any later version.
12 ;* Libav is distributed in the hope that it will be useful,
13 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
14 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 ;* Lesser General Public License for more details.
17 ;* You should have received a copy of the GNU Lesser General Public
18 ;* License along with Libav; if not, write to the Free Software
19 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 ;******************************************************************************
22 %include "libavutil/x86/x86util.asm"
25 pf_inv16: times 4 dd 0x3D800000 ; 1/16
29 ; void decode_hf(float dst[DCA_SUBBANDS][8], const int32_t vq_num[DCA_SUBBANDS],
30 ; const int8_t hf_vq[1024][32], intptr_t vq_offset,
31 ; int32_t scale[DCA_SUBBANDS][2], intptr_t start, intptr_t end)
34 cglobal decode_hf, 6,6,5, dst, num, src, offset, scale, start, end
35 lea srcq, [srcq + offsetq]
43 mov offsetd, [scaleq + 2 * startq]
46 cvtsi2ss m0, [scaleq + 2 * startq]
48 mov offsetd, [numq + startq]
54 pmovsxbd m1, [srcq + DICT + 0]
55 pmovsxbd m2, [srcq + DICT + 4]
57 movq m1, [srcq + DICT]
68 movd mm0, [srcq + DICT + 0]
69 movd mm1, [srcq + DICT + 4]
92 mova [dstq + 8 * startq + 0], m1
93 mova [dstq + 8 * startq + 16], m2
115 ; %1=v0/v1 %2=in1 %3=in2
123 %define OFFSET NUM_COEF*count
125 ; for v0, incrementing and for v1, decrementing
126 mova va, [cf0q + OFFSET]
127 mova vb, [cf0q + OFFSET + 4*NUM_COEF]
129 mova m4, [cf0q + OFFSET + mmsize]
130 mova m0, [cf0q + OFFSET + 4*NUM_COEF + mmsize]
140 ; va = va1 va2 va3 va4
141 ; vb = vb1 vb2 vb3 vb4
146 unpcklps va, vb ; va3 vb3 va4 vb4
147 unpckhps m4, vb ; va1 vb1 va2 vb2
148 addps m4, va ; va1+3 vb1+3 va2+4 vb2+4
149 movhlps vb, m4 ; va1+3 vb1+3
150 addps vb, m4 ; va0..4 vb0..4
151 movlps [outq + count], vb
159 ; void dca_lfe_fir(float *out, float *in, float *coefs)
161 cglobal dca_lfe_fir%1, 3,3,6-%1, out, in, cf0
165 %define NUM_COEF 4*(2-%1)
166 %define NUM_OUT 32*(%1+1)
168 movu IN1, [inq + 4 - 1*mmsize]
169 shufps IN1, IN1, q0123
171 movu IN2, [inq + 4 - 2*mmsize]
172 shufps IN2, IN2, q0123
175 mov count, -4*NUM_OUT
176 add cf0q, 4*NUM_COEF*NUM_OUT
184 shufps IN1, IN1, q0123
185 mov count, -4*NUM_OUT
186 ; cf1 already correctly positioned
187 add outq, 4*NUM_OUT ; outq now at out2
190 shufps IN2, IN2, q0123
203 %if cpuflag(sse2) && notcpuflag(avx)
213 vperm2f128 %1, %3, %3, 1
214 vshufps %1, %1, %1, q0123
216 pshufd %1, [%2], q0123
224 ; reading backwards: ptr1 = synth_buf + j + i; ptr2 = synth_buf + j - i
225 ;~ a += window[i + j] * (-synth_buf[15 - i + j])
226 ;~ b += window[i + j + 16] * (synth_buf[i + j])
227 SHUF m5, ptr2 + j + (15 - 3) * 4, m6
230 SHUF m11, ptr2 + j + (15 - 3) * 4 - mmsize, m12
231 mova m12, [ptr1 + j + mmsize]
234 fmaddps m2, m6, [win + %1 + j + 16 * 4], m2
235 fnmaddps m1, m5, [win + %1 + j], m1
237 fmaddps m8, m12, [win + %1 + j + mmsize + 16 * 4], m8
238 fnmaddps m7, m11, [win + %1 + j + mmsize], m7
241 mulps m6, m6, [win + %1 + j + 16 * 4]
242 mulps m5, m5, [win + %1 + j]
244 mulps m12, m12, [win + %1 + j + mmsize + 16 * 4]
245 mulps m11, m11, [win + %1 + j + mmsize]
253 %endif ; cpuflag(fma3)
254 ;~ c += window[i + j + 32] * (synth_buf[16 + i + j])
255 ;~ d += window[i + j + 48] * (synth_buf[31 - i + j])
256 SHUF m6, ptr2 + j + (31 - 3) * 4, m5
257 mova m5, [ptr1 + j + 16 * 4]
259 SHUF m12, ptr2 + j + (31 - 3) * 4 - mmsize, m11
260 mova m11, [ptr1 + j + mmsize + 16 * 4]
263 fmaddps m3, m5, [win + %1 + j + 32 * 4], m3
264 fmaddps m4, m6, [win + %1 + j + 48 * 4], m4
266 fmaddps m9, m11, [win + %1 + j + mmsize + 32 * 4], m9
267 fmaddps m10, m12, [win + %1 + j + mmsize + 48 * 4], m10
270 mulps m5, m5, [win + %1 + j + 32 * 4]
271 mulps m6, m6, [win + %1 + j + 48 * 4]
273 mulps m11, m11, [win + %1 + j + mmsize + 32 * 4]
274 mulps m12, m12, [win + %1 + j + mmsize + 48 * 4]
282 %endif ; cpuflag(fma3)
286 ; void ff_synth_filter_inner_<opt>(float *synth_buf, float synth_buf2[32],
287 ; const float window[512], float out[32],
288 ; intptr_t offset, float scale)
289 %macro SYNTH_FILTER 0
290 cglobal synth_filter_inner, 0, 6 + 4 * ARCH_X86_64, 7 + 6 * ARCH_X86_64, \
291 synth_buf, synth_buf2, window, out, off, scale
293 %if ARCH_X86_32 || WIN64
294 %if cpuflag(sse2) && notcpuflag(avx)
298 VBROADCASTSS m0, scalem
300 ; Make sure offset is in a register and not on the stack
305 vinsertf128 m0, m0, xmm0, 1
309 ; prepare inner counter limit 1
314 %if ARCH_X86_32 || notcpuflag(avx)
317 mov i, 16 * 4 - (ARCH_X86_64 + 1) * mmsize ; main loop counter
323 %define buf2 synth_buf2q
325 mov buf2, synth_buf2mp
328 ; m1 = a m2 = b m3 = c m4 = d
332 mova m2, [buf2 + i + 16 * 4]
340 %if ARCH_X86_32 || notcpuflag(avx)
346 %define ptr2 r7q ; must be loaded
351 mova m7, [buf2 + i + mmsize]
352 mova m8, [buf2 + i + mmsize + 16 * 4]
353 lea win, [windowq + i]
354 lea ptr1, [synth_bufq + i]
356 mov ptr2, synth_bufmp
357 ; prepare the inner loop counter
359 %if ARCH_X86_32 || notcpuflag(avx)
371 add win, OFFQ ; now at j-64, so define OFFSET
379 mov buf2, synth_buf2m ; needed for next iteration anyway
380 mov outq, outmp ; j, which will be set again during it
382 ;~ out[i] = a * scale;
383 ;~ out[i + 16] = b * scale;
390 ;~ synth_buf2[i] = c;
391 ;~ synth_buf2[i + 16] = d;
392 mova [buf2 + i + 0 * 4], m3
393 mova [buf2 + i + 16 * 4], m4
395 mova [buf2 + i + 0 * 4 + mmsize], m9
396 mova [buf2 + i + 16 * 4 + mmsize], m10
400 mova [outq + i + 0 * 4], m1
401 mova [outq + i + 16 * 4], m2
403 mova [outq + i + 0 * 4 + mmsize], m7
404 mova [outq + i + 16 * 4 + mmsize], m8
406 %if ARCH_X86_32 || notcpuflag(avx)
407 sub i, (ARCH_X86_64 + 1) * mmsize