2 * Copyright (c) 2008 Siarhei Siamashka <ssvb@users.sourceforge.net>
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 .fpu neon @ required for gas to accept UAL syntax
26 * VFP is a floating point coprocessor used in some ARM cores. VFP11 has 1 cycle
27 * throughput for almost all the instructions (except for double precision
28 * arithmetics), but rather high latency. Latency is 4 cycles for loads and 8 cycles
29 * for arithmetic operations. Scheduling code to avoid pipeline stalls is very
30 * important for performance. One more interesting feature is that VFP has
31 * independent load/store and arithmetics pipelines, so it is possible to make
32 * them work simultaneously and get more than 1 operation per cycle. Load/store
33 * pipeline can process 2 single precision floating point values per cycle and
34 * supports bulk loads and stores for large sets of registers. Arithmetic operations
35 * can be done on vectors, which allows to keep the arithmetics pipeline busy,
36 * while the processor may issue and execute other instructions. Detailed
37 * optimization manuals can be found at http://www.arm.com
41 * ARM VFP optimized implementation of 'vector_fmul_c' function.
42 * Assume that len is a positive number and is multiple of 8
44 @ void ff_vector_fmul_vfp(float *dst, const float *src, int len)
45 function ff_vector_fmul_vfp, export=1
49 orr r12, r12, #(3 << 16) /* set vector size to 4 */
60 vldmiage r3!, {s16-s19}
61 vldmiage r1!, {s24-s27}
62 vldmiage r3!, {s20-s23}
63 vldmiage r1!, {s28-s31}
64 vmulge.f32 s24, s16, s24
67 vmulge.f32 s28, s20, s28
69 vldmiagt r1!, {s8-s11}
71 vldmiagt r1!, {s12-s15}
73 vstmiage r0!, {s24-s27}
74 vstmiage r0!, {s28-s31}
77 bic r12, r12, #(7 << 16) /* set vector size back to 1 */
84 * ARM VFP optimized implementation of 'vector_fmul_reverse_c' function.
85 * Assume that len is a positive number and is multiple of 8
87 @ void ff_vector_fmul_reverse_vfp(float *dst, const float *src0,
88 @ const float *src1, int len)
89 function ff_vector_fmul_reverse_vfp, export=1
91 add r2, r2, r3, lsl #2
102 vldmdbge r2!, {s16-s19}
103 vmul.f32 s12, s7, s12
104 vldmiage r1!, {s24-s27}
105 vmul.f32 s13, s6, s13
106 vldmdbge r2!, {s20-s23}
107 vmul.f32 s14, s5, s14
108 vldmiage r1!, {s28-s31}
109 vmul.f32 s15, s4, s15
110 vmulge.f32 s24, s19, s24
111 vldmdbgt r2!, {s0-s3}
112 vmulge.f32 s25, s18, s25
114 vmulge.f32 s26, s17, s26
115 vldmiagt r1!, {s8-s11}
116 vmulge.f32 s27, s16, s27
117 vmulge.f32 s28, s23, s28
118 vldmdbgt r2!, {s4-s7}
119 vmulge.f32 s29, s22, s29
120 vstmia r0!, {s14-s15}
121 vmulge.f32 s30, s21, s30
122 vmulge.f32 s31, s20, s31
123 vmulge.f32 s8, s3, s8
124 vldmiagt r1!, {s12-s15}
125 vmulge.f32 s9, s2, s9
126 vmulge.f32 s10, s1, s10
127 vstmiage r0!, {s24-s27}
128 vmulge.f32 s11, s0, s11
129 vstmiage r0!, {s28-s31}
138 * ARM VFP optimized float to int16 conversion.
139 * Assume that len is a positive number and is multiple of 8, destination
140 * buffer is at least 4 bytes aligned (8 bytes alignment is better for
141 * performance), little endian byte sex
143 @ void ff_float_to_int16_vfp(int16_t *dst, const float *src, int len)
144 function ff_float_to_int16_vfp, export=1
147 vldmia r1!, {s16-s23}
162 vldmiagt r1!, {s16-s23}
167 pkhbt r3, r3, r4, lsl #16
168 pkhbt r4, r5, r6, lsl #16
169 vcvtgt.s32.f32 s0, s16
170 vcvtgt.s32.f32 s1, s17
171 vcvtgt.s32.f32 s2, s18
172 vcvtgt.s32.f32 s3, s19
173 vcvtgt.s32.f32 s4, s20
174 vcvtgt.s32.f32 s5, s21
175 vcvtgt.s32.f32 s6, s22
176 vcvtgt.s32.f32 s7, s23
181 pkhbt r5, r7, r8, lsl #16
182 pkhbt r6, ip, lr, lsl #16