]> git.sesse.net Git - ffmpeg/blob - libswscale/arm/output.S
Merge commit '49f9c4272c4029b57ff300d908ba03c6332fc9c4'
[ffmpeg] / libswscale / arm / output.S
1 /*
2  * Copyright (c) 2016 Clément Bœsch <clement stupeflix.com>
3  * Copyright (c) 2016 Matthieu Bouron <matthieu.bouron stupeflix.com>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "libavutil/arm/asm.S"
23
24 function ff_yuv2planeX_8_neon, export=1
25     push {r4-r12, lr}
26     vpush {q4-q7}
27     ldr                 r4, [sp, #104]                                 @ dstW
28     ldr                 r5, [sp, #108]                                 @ dither
29     ldr                 r6, [sp, #112]                                 @ offset
30     vld1.8              {d0}, [r5]                                     @ load 8x8-bit dither values
31     cmp                 r6, #0                                         @ check offsetting which can be 0 or 3 only
32     beq                 1f
33     vext.u8             d0, d0, d0, #3                                 @ honor offseting which can be 3 only
34 1:  vmovl.u8            q0, d0                                         @ extend dither to 16-bit
35     vshll.u16           q1, d0, #12                                    @ extend dither to 32-bit with left shift by 12 (part 1)
36     vshll.u16           q2, d1, #12                                    @ extend dither to 32-bit with left shift by 12 (part 2)
37     mov                 r7, #0                                         @ i = 0
38 2:  vmov.u8             q3, q1                                         @ initialize accumulator with dithering values (part 1)
39     vmov.u8             q4, q2                                         @ initialize accumulator with dithering values (part 2)
40     mov                 r8, r1                                         @ tmpFilterSize = filterSize
41     mov                 r9, r2                                         @ srcp
42     mov                 r10, r0                                        @ filterp
43 3:  ldr                 r11, [r9], #4                                  @ get pointer @ src[j]
44     ldr                 r12, [r9], #4                                  @ get pointer @ src[j+1]
45     add                 r11, r11, r7, lsl #1                           @ &src[j][i]
46     add                 r12, r12, r7, lsl #1                           @ &src[j+1][i]
47     vld1.16             {q5}, [r11]                                    @ read 8x16-bit @ src[j  ][i + {0..7}]: A,B,C,D,E,F,G,H
48     vld1.16             {q6}, [r12]                                    @ read 8x16-bit @ src[j+1][i + {0..7}]: I,J,K,L,M,N,O,P
49     ldr                 r11, [r10], #4                                 @ read 2x16-bit coeffs (X, Y) at (filter[j], filter[j+1])
50     vmov.16             q7, q5                                         @ copy 8x16-bit @ src[j  ][i + {0..7}] for following inplace zip instruction
51     vmov.16             q8, q6                                         @ copy 8x16-bit @ src[j+1][i + {0..7}] for following inplace zip instruction
52     vzip.16             q7, q8                                         @ A,I,B,J,C,K,D,L,E,M,F,N,G,O,H,P
53     vdup.32             q15, r11                                       @ X,Y,X,Y,X,Y,X,Y
54     vmull.s16           q9, d14, d30                                   @ A*X,I*Y,B*X,J*Y
55     vmull.s16           q10, d15, d31                                  @ C*X,K*Y,D*X,L*Y
56     vmull.s16           q11, d16, d30                                  @ E*X,M*Y,F*X,N*Y
57     vmull.s16           q12, d17, d31                                  @ G*X,O*Y,H*X,P*Y
58     vpadd.s32           d10, d18, d19                                  @ A*X+I*Y,B*X+J*Y
59     vpadd.s32           d11, d20, d21                                  @ C*X+K*Y,D*X+L*Y
60     vpadd.s32           d12, d22, d23                                  @ E*X+M*Y,F*X+N*Y
61     vpadd.s32           d13, d24, d25                                  @ G*X+O*Y,H*X+P*Y
62     vadd.s32            q3, q5                                         @ update val accumulator (part 1)
63     vadd.s32            q4, q6                                         @ update val accumulator (part 2)
64     subs                r8, #2                                         @ tmpFilterSize -= 2
65     bgt                 3b                                             @ loop until filterSize is consumed
66     vshr.s32            q3, q3, #19                                    @ val>>19 (part 1)
67     vshr.s32            q4, q4, #19                                    @ val>>19 (part 2)
68     vqmovun.s32         d6, q3                                         @ clip16(val>>19) (part 1)
69     vqmovun.s32         d7, q4                                         @ clip16(val>>19) (part 2)
70     vqmovn.u16          d6, q3                                         @ merge part 1 and part 2
71     vst1.8              {d6}, [r3]!                                    @ write destination
72     add                 r7, #8                                         @ i += 8
73     subs                r4, r4, #8                                     @ dstW -= 8
74     bgt                 2b                                             @ loop until width is consumed
75     vpop                {q4-q7}
76     pop                 {r4-r12, lr}
77     mov                 pc, lr
78 endfunc