X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libavcodec%2Farm%2Ffmtconvert_vfp.S;h=d841802158f3f91cdf2e8bff675306511ca19105;hb=69e6702c013283d0e043e19e079c8eecb08bfd39;hp=1d19e7758b7b09b28fd3f5ce265945c4158bc846;hpb=c73d99e672329c8f2df290736ffc474c360ac4ae;p=ffmpeg diff --git a/libavcodec/arm/fmtconvert_vfp.S b/libavcodec/arm/fmtconvert_vfp.S index 1d19e7758b7..d841802158f 100644 --- a/libavcodec/arm/fmtconvert_vfp.S +++ b/libavcodec/arm/fmtconvert_vfp.S @@ -1,33 +1,32 @@ /* * Copyright (c) 2008 Siarhei Siamashka + * Copyright (c) 2013 RISC OS Open Ltd * - * This file is part of FFmpeg. + * This file is part of Libav. * - * FFmpeg is free software; you can redistribute it and/or + * Libav is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * FFmpeg is distributed in the hope that it will be useful, + * Libav is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software + * License along with Libav; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "config.h" -#include "asm.S" - - .syntax unified +#include "libavutil/arm/asm.S" /** * ARM VFP optimized float to int16 conversion. * Assume that len is a positive number and is multiple of 8, destination * buffer is at least 4 bytes aligned (8 bytes alignment is better for - * performance), little endian byte sex + * performance), little-endian byte sex. */ @ void ff_float_to_int16_vfp(int16_t *dst, const float *src, int len) function ff_float_to_int16_vfp, export=1 @@ -48,6 +47,7 @@ function ff_float_to_int16_vfp, export=1 vmov r5, r6, s2, s3 vmov r7, r8, s4, s5 vmov ip, lr, s6, s7 + it gt vldmiagt r1!, {s16-s23} ssat r4, #16, r4 ssat r3, #16, r3 @@ -55,10 +55,12 @@ function ff_float_to_int16_vfp, export=1 ssat r5, #16, r5 pkhbt r3, r3, r4, lsl #16 pkhbt r4, r5, r6, lsl #16 + itttt gt vcvtgt.s32.f32 s0, s16 vcvtgt.s32.f32 s1, s17 vcvtgt.s32.f32 s2, s18 vcvtgt.s32.f32 s3, s19 + itttt gt vcvtgt.s32.f32 s4, s20 vcvtgt.s32.f32 s5, s21 vcvtgt.s32.f32 s6, s22 @@ -75,3 +77,202 @@ function ff_float_to_int16_vfp, export=1 vpop {d8-d11} pop {r4-r8,pc} endfunc + +/** + * ARM VFP optimised int32 to float conversion. + * Assume len is a multiple of 8, destination buffer is at least 4 bytes aligned + * (16 bytes alignment is best for BCM2835), little-endian. + */ +@ void ff_int32_to_float_fmul_array8_vfp(FmtConvertContext *c, float *dst, const int32_t *src, const float *mul, int len) +function ff_int32_to_float_fmul_array8_vfp, export=1 + push {lr} + ldr a1, [sp, #4] + subs lr, a1, #3*8 + bcc 50f @ too short to pipeline + @ Now need to find (len / 8) % 3. The approximation + @ x / 24 = (x * 0xAB) >> 12 + @ is good for x < 4096, which is true for both AC3 and DCA. + mov a1, #0xAB + ldr ip, =0x03070000 @ RunFast mode, short vectors of length 8, stride 1 + mul a1, lr, a1 + vpush {s16-s31} + mov a1, a1, lsr #12 + add a1, a1, a1, lsl #1 + rsb a1, a1, lr, lsr #3 + cmp a1, #1 + fmrx a1, FPSCR + fmxr FPSCR, ip + beq 11f + blo 10f + @ Array is (2 + multiple of 3) x 8 floats long + @ drop through... + vldmia a3!, {s16-s23} + vldmia a4!, {s2,s3} + vldmia a3!, {s24-s31} + vcvt.f32.s32 s16, s16 + vcvt.f32.s32 s17, s17 + vcvt.f32.s32 s18, s18 + vcvt.f32.s32 s19, s19 + vcvt.f32.s32 s20, s20 + vcvt.f32.s32 s21, s21 + vcvt.f32.s32 s22, s22 + vcvt.f32.s32 s23, s23 + vmul.f32 s16, s16, s2 + @ drop through... +3: + vldmia a3!, {s8-s15} + vldmia a4!, {s1} + vcvt.f32.s32 s24, s24 + vcvt.f32.s32 s25, s25 + vcvt.f32.s32 s26, s26 + vcvt.f32.s32 s27, s27 + vcvt.f32.s32 s28, s28 + vcvt.f32.s32 s29, s29 + vcvt.f32.s32 s30, s30 + vcvt.f32.s32 s31, s31 + vmul.f32 s24, s24, s3 + vstmia a2!, {s16-s19} + vstmia a2!, {s20-s23} +2: + vldmia a3!, {s16-s23} + vldmia a4!, {s2} + vcvt.f32.s32 s8, s8 + vcvt.f32.s32 s9, s9 + vcvt.f32.s32 s10, s10 + vcvt.f32.s32 s11, s11 + vcvt.f32.s32 s12, s12 + vcvt.f32.s32 s13, s13 + vcvt.f32.s32 s14, s14 + vcvt.f32.s32 s15, s15 + vmul.f32 s8, s8, s1 + vstmia a2!, {s24-s27} + vstmia a2!, {s28-s31} +1: + vldmia a3!, {s24-s31} + vldmia a4!, {s3} + vcvt.f32.s32 s16, s16 + vcvt.f32.s32 s17, s17 + vcvt.f32.s32 s18, s18 + vcvt.f32.s32 s19, s19 + vcvt.f32.s32 s20, s20 + vcvt.f32.s32 s21, s21 + vcvt.f32.s32 s22, s22 + vcvt.f32.s32 s23, s23 + vmul.f32 s16, s16, s2 + vstmia a2!, {s8-s11} + vstmia a2!, {s12-s15} + + subs lr, lr, #8*3 + bpl 3b + + vcvt.f32.s32 s24, s24 + vcvt.f32.s32 s25, s25 + vcvt.f32.s32 s26, s26 + vcvt.f32.s32 s27, s27 + vcvt.f32.s32 s28, s28 + vcvt.f32.s32 s29, s29 + vcvt.f32.s32 s30, s30 + vcvt.f32.s32 s31, s31 + vmul.f32 s24, s24, s3 + vstmia a2!, {s16-s19} + vstmia a2!, {s20-s23} + vstmia a2!, {s24-s27} + vstmia a2!, {s28-s31} + + fmxr FPSCR, a1 + vpop {s16-s31} + pop {pc} + +10: @ Array is (multiple of 3) x 8 floats long + vldmia a3!, {s8-s15} + vldmia a4!, {s1,s2} + vldmia a3!, {s16-s23} + vcvt.f32.s32 s8, s8 + vcvt.f32.s32 s9, s9 + vcvt.f32.s32 s10, s10 + vcvt.f32.s32 s11, s11 + vcvt.f32.s32 s12, s12 + vcvt.f32.s32 s13, s13 + vcvt.f32.s32 s14, s14 + vcvt.f32.s32 s15, s15 + vmul.f32 s8, s8, s1 + b 1b + +11: @ Array is (1 + multiple of 3) x 8 floats long + vldmia a3!, {s24-s31} + vldmia a4!, {s3} + vldmia a3!, {s8-s15} + vldmia a4!, {s1} + vcvt.f32.s32 s24, s24 + vcvt.f32.s32 s25, s25 + vcvt.f32.s32 s26, s26 + vcvt.f32.s32 s27, s27 + vcvt.f32.s32 s28, s28 + vcvt.f32.s32 s29, s29 + vcvt.f32.s32 s30, s30 + vcvt.f32.s32 s31, s31 + vmul.f32 s24, s24, s3 + b 2b + +50: + ldr lr, =0x03070000 @ RunFast mode, short vectors of length 8, stride 1 + fmrx ip, FPSCR + fmxr FPSCR, lr +51: + vldmia a3!, {s8-s15} + vldmia a4!, {s0} + vcvt.f32.s32 s8, s8 + vcvt.f32.s32 s9, s9 + vcvt.f32.s32 s10, s10 + vcvt.f32.s32 s11, s11 + vcvt.f32.s32 s12, s12 + vcvt.f32.s32 s13, s13 + vcvt.f32.s32 s14, s14 + vcvt.f32.s32 s15, s15 + vmul.f32 s8, s8, s0 + subs a1, a1, #8 + vstmia a2!, {s8-s11} + vstmia a2!, {s12-s15} + bne 51b + + fmxr FPSCR, ip + pop {pc} +endfunc + +/** + * ARM VFP optimised int32 to float conversion. + * Assume len is a multiple of 8, destination buffer is at least 4 bytes aligned + * (16 bytes alignment is best for BCM2835), little-endian. + * TODO: could be further optimised by unrolling and interleaving, as above + */ +@ void ff_int32_to_float_fmul_scalar_vfp(float *dst, const int32_t *src, float mul, int len) +function ff_int32_to_float_fmul_scalar_vfp, export=1 +VFP tmp .req a4 +VFP len .req a3 +NOVFP tmp .req a3 +NOVFP len .req a4 +NOVFP vmov s0, a3 + ldr tmp, =0x03070000 @ RunFast mode, short vectors of length 8, stride 1 + fmrx ip, FPSCR + fmxr FPSCR, tmp +1: + vldmia a2!, {s8-s15} + vcvt.f32.s32 s8, s8 + vcvt.f32.s32 s9, s9 + vcvt.f32.s32 s10, s10 + vcvt.f32.s32 s11, s11 + vcvt.f32.s32 s12, s12 + vcvt.f32.s32 s13, s13 + vcvt.f32.s32 s14, s14 + vcvt.f32.s32 s15, s15 + vmul.f32 s8, s8, s0 + subs len, len, #8 + vstmia a1!, {s8-s11} + vstmia a1!, {s12-s15} + bne 1b + + fmxr FPSCR, ip + bx lr +endfunc + .unreq tmp + .unreq len