2 * Copyright (c) 2013 RISC OS Open Ltd
3 * Author: Ben Avison <bavison@riscosopen.org>
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include "libavutil/arm/asm.S"
55 SBUF_DAT_ALT0 .req s20
56 SBUF_DAT_ALT1 .req s21
57 SBUF_DAT_ALT2 .req s22
58 SBUF_DAT_ALT3 .req s23
63 .macro inner_loop half, tail, head
64 .if (OFFSET & (64*4)) == 0 @ even numbered call
65 SBUF_DAT_THIS0 .req SBUF_DAT0
66 SBUF_DAT_THIS1 .req SBUF_DAT1
67 SBUF_DAT_THIS2 .req SBUF_DAT2
68 SBUF_DAT_THIS3 .req SBUF_DAT3
70 vldr d8, [P_SB, #OFFSET] @ d8 = SBUF_DAT
71 vldr d9, [P_SB, #OFFSET+8]
74 SBUF_DAT_THIS0 .req SBUF_DAT_ALT0
75 SBUF_DAT_THIS1 .req SBUF_DAT_ALT1
76 SBUF_DAT_THIS2 .req SBUF_DAT_ALT2
77 SBUF_DAT_THIS3 .req SBUF_DAT_ALT3
79 vldr d10, [P_SB, #OFFSET] @ d10 = SBUF_DAT_ALT
80 vldr d11, [P_SB, #OFFSET+8]
85 vmls.f VA0, SBUF_DAT_REV0, WIN_DN_DAT0 @ all operands treated as vectors
87 vmla.f VD0, SBUF_DAT_REV0, WIN_DN_DAT0 @ all operands treated as vectors
91 vldr d14, [P_WIN_UP, #OFFSET] @ d14 = WIN_UP_DAT
92 vldr d15, [P_WIN_UP, #OFFSET+8]
93 vldr d12, [P_WIN_DN, #OFFSET] @ d12 = WIN_DN_DAT
94 vldr d13, [P_WIN_DN, #OFFSET+8]
95 vmov SBUF_DAT_REV3, SBUF_DAT_THIS0
96 vmov SBUF_DAT_REV2, SBUF_DAT_THIS1
97 vmov SBUF_DAT_REV1, SBUF_DAT_THIS2
98 vmov SBUF_DAT_REV0, SBUF_DAT_THIS3
100 vmla.f VB0, SBUF_DAT_THIS0, WIN_UP_DAT0
102 vmla.f VC0, SBUF_DAT_THIS0, WIN_UP_DAT0
105 bne 2f @ strongly predictable, so better than cond exec in this case
106 sub P_SB, P_SB, #512*4
109 .set OFFSET, OFFSET + 64*4
111 .unreq SBUF_DAT_THIS0
112 .unreq SBUF_DAT_THIS1
113 .unreq SBUF_DAT_THIS2
114 .unreq SBUF_DAT_THIS3
118 /* void ff_synth_filter_float_vfp(FFTContext *imdct,
119 * float *synth_buf_ptr, int *synth_buf_offset,
120 * float synth_buf2[32], const float window[512],
121 * float out[32], const float in[32], float scale)
123 function ff_synth_filter_float_vfp, export=1
127 add a2, ORIG_P_SB, lr, lsl #2 @ calculate synth_buf to pass to imdct_half
128 mov P_SB, a2 @ and keep a copy for ourselves
129 bic J_WRAP, lr, #63 @ mangled to make testing for wrap easier in inner loop
132 str lr, [P_SB_OFF] @ rotate offset, modulo buffer size, ready for next call
133 ldr a3, [sp, #(16+6+2)*4] @ fetch in from stack, to pass to imdct_half
134 VFP vmov s16, SCALE @ imdct_half is free to corrupt s0, but it contains one of our arguments in hardfp case
135 bl X(ff_imdct_half_vfp)
139 ldr lr, =0x03030000 @ RunFast mode, short vectors of length 4, stride 1
141 ldr P_SB2_DN, [sp, #16*4]
142 ldr P_WIN_DN, [sp, #(16+6+0)*4]
143 ldr P_OUT_DN, [sp, #(16+6+1)*4]
144 NOVFP vldr SCALE, [sp, #(16+6+3)*4]
146 #define IMM_OFF_SKEW 956 /* also valid immediate constant when you add 16*4 */
147 add P_SB, P_SB, #IMM_OFF_SKEW @ so we can use -ve offsets to use full immediate offset range
148 add P_SB2_UP, P_SB2_DN, #16*4
149 add P_WIN_UP, P_WIN_DN, #16*4+IMM_OFF_SKEW
150 add P_OUT_UP, P_OUT_DN, #16*4
151 add P_SB2_DN, P_SB2_DN, #16*4
152 add P_WIN_DN, P_WIN_DN, #12*4+IMM_OFF_SKEW
153 add P_OUT_DN, P_OUT_DN, #16*4
156 vldmia P_SB2_UP!, {VB0-VB3}
157 vldmdb P_SB2_DN!, {VA0-VA3}
159 .set OFFSET, -IMM_OFF_SKEW
162 inner_loop ab, tail, head
165 add P_WIN_UP, P_WIN_UP, #4*4
166 sub P_WIN_DN, P_WIN_DN, #4*4
167 vmul.f VB0, VB0, SCALE @ SCALE treated as scalar
168 add P_SB, P_SB, #(512+4)*4
170 vmul.f VA0, VA0, SCALE
171 vstmia P_OUT_UP!, {VB0-VB3}
172 vstmdb P_OUT_DN!, {VA0-VA3}
175 add P_SB2_DN, P_SB2_DN, #(16+28-12)*4
176 sub P_SB2_UP, P_SB2_UP, #(16+16)*4
177 add P_WIN_DN, P_WIN_DN, #(32+16+28-12)*4
180 vldr.d d4, zero @ d4 = VC0
182 vldr.d d6, zero @ d6 = VD0
185 .set OFFSET, -IMM_OFF_SKEW
188 inner_loop cd, tail, head
191 add P_WIN_UP, P_WIN_UP, #4*4
192 sub P_WIN_DN, P_WIN_DN, #4*4
193 add P_SB, P_SB, #(512+4)*4
195 vstmia P_SB2_UP!, {VC0-VC3}
196 vstmdb P_SB2_DN!, {VD0-VD3}