1 ;******************************************************************************
2 ;* x86 optimized channel mixing
3 ;* Copyright (c) 2012 Justin Ruggles <justin.ruggles@gmail.com>
5 ;* This file is part of Libav.
7 ;* Libav is free software; you can redistribute it and/or
8 ;* modify it under the terms of the GNU Lesser General Public
9 ;* License as published by the Free Software Foundation; either
10 ;* version 2.1 of the License, or (at your option) any later version.
12 ;* Libav is distributed in the hope that it will be useful,
13 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
14 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 ;* Lesser General Public License for more details.
17 ;* You should have received a copy of the GNU Lesser General Public
18 ;* License along with Libav; if not, write to the Free Software
19 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 ;******************************************************************************
22 %include "libavutil/x86/x86util.asm"
27 ;-----------------------------------------------------------------------------
28 ; void ff_mix_2_to_1_fltp_flt(float **src, float **matrix, int len,
29 ; int out_ch, int in_ch);
30 ;-----------------------------------------------------------------------------
32 %macro MIX_2_TO_1_FLTP_FLT 0
33 cglobal mix_2_to_1_fltp_flt, 3,4,6, src, matrix, len, src1
34 mov src1q, [srcq+gprsize]
37 mov matrixq, [matrixq ]
38 VBROADCASTSS m4, [matrixq ]
39 VBROADCASTSS m5, [matrixq+4]
43 mulps m1, m5, [srcq+src1q ]
44 mulps m2, m4, [srcq+ mmsize]
45 mulps m3, m5, [srcq+src1q+mmsize]
49 mova [srcq+mmsize], m2
61 ;-----------------------------------------------------------------------------
62 ; void ff_mix_2_to_1_s16p_flt(int16_t **src, float **matrix, int len,
63 ; int out_ch, int in_ch);
64 ;-----------------------------------------------------------------------------
66 %macro MIX_2_TO_1_S16P_FLT 0
67 cglobal mix_2_to_1_s16p_flt, 3,4,6, src, matrix, len, src1
68 mov src1q, [srcq+gprsize]
71 mov matrixq, [matrixq ]
72 VBROADCASTSS m4, [matrixq ]
73 VBROADCASTSS m5, [matrixq+4]
105 ;-----------------------------------------------------------------------------
106 ; void ff_mix_2_to_1_s16p_q8(int16_t **src, int16_t **matrix, int len,
107 ; int out_ch, int in_ch);
108 ;-----------------------------------------------------------------------------
111 cglobal mix_2_to_1_s16p_q8, 3,4,6, src, matrix, len, src1
112 mov src1q, [srcq+gprsize]
115 mov matrixq, [matrixq]
126 mova m2, [srcq+src1q]
146 ;-----------------------------------------------------------------------------
147 ; void ff_mix_1_to_2_fltp_flt(float **src, float **matrix, int len,
148 ; int out_ch, int in_ch);
149 ;-----------------------------------------------------------------------------
151 %macro MIX_1_TO_2_FLTP_FLT 0
152 cglobal mix_1_to_2_fltp_flt, 3,5,4, src0, matrix0, len, src1, matrix1
153 mov src1q, [src0q+gprsize]
156 mov matrix1q, [matrix0q+gprsize]
157 mov matrix0q, [matrix0q]
158 VBROADCASTSS m2, [matrix0q]
159 VBROADCASTSS m3, [matrix1q]
166 mova [src0q+src1q], m1
178 ;-----------------------------------------------------------------------------
179 ; void ff_mix_1_to_2_s16p_flt(int16_t **src, float **matrix, int len,
180 ; int out_ch, int in_ch);
181 ;-----------------------------------------------------------------------------
183 %macro MIX_1_TO_2_S16P_FLT 0
184 cglobal mix_1_to_2_s16p_flt, 3,5,6, src0, matrix0, len, src1, matrix1
185 mov src1q, [src0q+gprsize]
188 mov matrix1q, [matrix0q+gprsize]
189 mov matrix0q, [matrix0q]
190 VBROADCASTSS m4, [matrix0q]
191 VBROADCASTSS m5, [matrix1q]
209 mova [src0q+src1q], m1
223 ;-----------------------------------------------------------------------------
224 ; void ff_mix_3_8_to_1_2_fltp/s16p_flt(float/int16_t **src, float **matrix,
225 ; int len, int out_ch, int in_ch);
226 ;-----------------------------------------------------------------------------
228 %macro MIX_3_8_TO_1_2_FLT 3 ; %1 = in channels, %2 = out channels, %3 = s16p or fltp
229 ; define some names to make the code clearer
230 %assign in_channels %1
231 %assign out_channels %2
232 %assign stereo out_channels - 1
239 ; determine how many matrix elements must go on the stack vs. mmregs
240 %assign matrix_elements in_channels * out_channels
243 %assign needed_mmregs 7
245 %assign needed_mmregs 5
249 %assign needed_mmregs 4
251 %assign needed_mmregs 3
254 %assign matrix_elements_mm num_mmregs - needed_mmregs
255 %if matrix_elements < matrix_elements_mm
256 %assign matrix_elements_mm matrix_elements
258 %if matrix_elements_mm < matrix_elements
259 %assign matrix_elements_stack matrix_elements - matrix_elements_mm
261 %assign matrix_elements_stack 0
263 %assign matrix_stack_size matrix_elements_stack * mmsize
265 %assign needed_stack_size -1 * matrix_stack_size
266 %if ARCH_X86_32 && in_channels >= 7
267 %assign needed_stack_size needed_stack_size - 16
270 cglobal mix_%1_to_%2_%3_flt, 3,in_channels+2,needed_mmregs+matrix_elements_mm, needed_stack_size, src0, src1, len, src2, src3, src4, src5, src6, src7
272 ; define src pointers on stack if needed
273 %if matrix_elements_stack > 0 && ARCH_X86_32 && in_channels >= 7
274 %define src5m [rsp+matrix_stack_size+0]
275 %define src6m [rsp+matrix_stack_size+4]
276 %define src7m [rsp+matrix_stack_size+8]
279 ; load matrix pointers
283 mov matrix1q, [matrix0q+gprsize]
285 mov matrix0q, [matrix0q]
287 ; define matrix coeff names
289 %assign %%j needed_mmregs
291 %if %%i >= matrix_elements_mm
292 CAT_XDEFINE mx_stack_0_, %%i, 1
293 CAT_XDEFINE mx_0_, %%i, [rsp+(%%i-matrix_elements_mm)*mmsize]
295 CAT_XDEFINE mx_stack_0_, %%i, 0
296 CAT_XDEFINE mx_0_, %%i, m %+ %%j
304 %if in_channels + %%i >= matrix_elements_mm
305 CAT_XDEFINE mx_stack_1_, %%i, 1
306 CAT_XDEFINE mx_1_, %%i, [rsp+(in_channels+%%i-matrix_elements_mm)*mmsize]
308 CAT_XDEFINE mx_stack_1_, %%i, 0
309 CAT_XDEFINE mx_1_, %%i, m %+ %%j
316 ; load/splat matrix coeffs
319 %if mx_stack_0_ %+ %%i
320 VBROADCASTSS m0, [matrix0q+4*%%i]
321 mova mx_0_ %+ %%i, m0
323 VBROADCASTSS mx_0_ %+ %%i, [matrix0q+4*%%i]
326 %if mx_stack_1_ %+ %%i
327 VBROADCASTSS m0, [matrix1q+4*%%i]
328 mova mx_1_ %+ %%i, m0
330 VBROADCASTSS mx_1_ %+ %%i, [matrix1q+4*%%i]
336 ; load channel pointers to registers as offsets from the first channel pointer
342 %rep (in_channels - 1)
343 %if ARCH_X86_32 && in_channels >= 7 && %%i >= 5
344 mov src5q, [src0q+%%i*gprsize]
346 mov src %+ %%i %+ m, src5q
348 mov src %+ %%i %+ q, [src0q+%%i*gprsize]
349 add src %+ %%i %+ q, lenq
357 ; for x86-32 with 7-8 channels we do not have enough gp registers for all src
358 ; pointers, so we have to load some of them from the stack each time
359 %define copy_src_from_stack ARCH_X86_32 && in_channels >= 7 && %%i >= 5
361 ; mix with s16p input
362 mova m0, [src0q+lenq]
373 %rep (in_channels - 1)
374 %if copy_src_from_stack
375 %define src_ptr src5q
377 %define src_ptr src %+ %%i %+ q
380 %if copy_src_from_stack
381 mov src_ptr, src %+ %%i %+ m
383 mova m4, [src_ptr+lenq]
387 FMULADD_PS m2, m4, mx_1_ %+ %%i, m2, m6
388 FMULADD_PS m3, m5, mx_1_ %+ %%i, m3, m6
389 FMULADD_PS m0, m4, mx_0_ %+ %%i, m0, m4
390 FMULADD_PS m1, m5, mx_0_ %+ %%i, m1, m5
392 %if copy_src_from_stack
393 mov src_ptr, src %+ %%i %+ m
395 mova m2, [src_ptr+lenq]
399 FMULADD_PS m0, m2, mx_0_ %+ %%i, m0, m4
400 FMULADD_PS m1, m3, mx_0_ %+ %%i, m1, m4
408 mova [src1q+lenq], m2
413 mova [src0q+lenq], m0
415 ; mix with fltp input
416 %if stereo || mx_stack_0_0
417 mova m0, [src0q+lenq]
422 %if stereo || mx_stack_0_0
425 mulps m0, mx_0_0, [src0q+lenq]
428 %rep (in_channels - 1)
429 %if copy_src_from_stack
430 %define src_ptr src5q
431 mov src_ptr, src %+ %%i %+ m
433 %define src_ptr src %+ %%i %+ q
435 ; avoid extra load for mono if matrix is in a mm register
436 %if stereo || mx_stack_0_ %+ %%i
437 mova m2, [src_ptr+lenq]
440 FMULADD_PS m1, m2, mx_1_ %+ %%i, m1, m3
442 %if stereo || mx_stack_0_ %+ %%i
443 FMULADD_PS m0, m2, mx_0_ %+ %%i, m0, m2
445 FMULADD_PS m0, mx_0_ %+ %%i, [src_ptr+lenq], m0, m1
449 mova [src0q+lenq], m0
451 mova [src1q+lenq], m1
457 ; zero ymm high halves
464 %macro MIX_3_8_TO_1_2_FLT_FUNCS 0
468 MIX_3_8_TO_1_2_FLT %%i, 1, fltp
469 MIX_3_8_TO_1_2_FLT %%i, 2, fltp
471 MIX_3_8_TO_1_2_FLT %%i, 1, s16p
472 MIX_3_8_TO_1_2_FLT %%i, 2, s16p
474 MIX_3_8_TO_1_2_FLT %%i, 1, s16p
475 MIX_3_8_TO_1_2_FLT %%i, 2, s16p
476 ; do not use ymm AVX or FMA4 in x86-32 for 6 or more channels due to stack alignment issues
477 %if ARCH_X86_64 || %%i < 6
482 MIX_3_8_TO_1_2_FLT %%i, 1, fltp
483 MIX_3_8_TO_1_2_FLT %%i, 2, fltp
485 MIX_3_8_TO_1_2_FLT %%i, 1, s16p
486 MIX_3_8_TO_1_2_FLT %%i, 2, s16p
487 %if HAVE_FMA4_EXTERNAL
488 %if ARCH_X86_64 || %%i < 6
493 MIX_3_8_TO_1_2_FLT %%i, 1, fltp
494 MIX_3_8_TO_1_2_FLT %%i, 2, fltp
496 MIX_3_8_TO_1_2_FLT %%i, 1, s16p
497 MIX_3_8_TO_1_2_FLT %%i, 2, s16p
503 MIX_3_8_TO_1_2_FLT_FUNCS