2 * Copyright (c) 2016 Matthieu Bouron <matthieu.bouron stupeflix.com>
3 * Copyright (c) 2016 Clément Bœsch <clement stupeflix.com>
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include "libavutil/aarch64/asm.S"
26 ldr w9, [sp, #8] // y_offset
27 ldr w10, [sp, #16] // y_coeff
31 sub w3, w3, w0, lsl #2 // w3 = linesize - width * 4 (padding)
32 sub w5, w5, w0 // w5 = linesizeY - width (paddingY)
33 sub w7, w7, w0 // w7 = linesizeC - width (paddingC)
41 .macro load_args_yuv420p
43 ldr w14, [sp, #8] // linesizeV
44 ldr x8, [sp, #16] // table
45 ldr w9, [sp, #24] // y_offset
46 ldr w10, [sp, #32] // y_coeff
50 sub w3, w3, w0, lsl #2 // w3 = linesize - width * 4 (padding)
51 sub w5, w5, w0 // w5 = linesizeY - width (paddingY)
52 sub w7, w7, w0, lsr #1 // w7 = linesizeU - width / 2 (paddingU)
53 sub w14, w14, w0, lsr #1 // w14 = linesizeV - width / 2 (paddingV)
58 .macro load_args_yuv422p
60 ldr w14, [sp, #8] // linesizeV
61 ldr x8, [sp, #16] // table
62 ldr w9, [sp, #24] // y_offset
63 ldr w10, [sp, #32] // y_coeff
67 sub w3, w3, w0, lsl #2 // w3 = linesize - width * 4 (padding)
68 sub w5, w5, w0 // w5 = linesizeY - width (paddingY)
69 sub w7, w7, w0, lsr #1 // w7 = linesizeU - width / 2 (paddingU)
70 sub w14, w14, w0, lsr #1 // w14 = linesizeV - width / 2 (paddingV)
73 .macro load_chroma_nv12
74 ld2 {v16.8B, v17.8B}, [x6], #16
75 ushll v18.8H, v16.8B, #3
76 ushll v19.8H, v17.8B, #3
79 .macro load_chroma_nv21
80 ld2 {v16.8B, v17.8B}, [x6], #16
81 ushll v19.8H, v16.8B, #3
82 ushll v18.8H, v17.8B, #3
85 .macro load_chroma_yuv420p
86 ld1 {v16.8B}, [ x6], #8
87 ld1 {v17.8B}, [x13], #8
88 ushll v18.8H, v16.8B, #3
89 ushll v19.8H, v17.8B, #3
92 .macro load_chroma_yuv422p
98 csel w16, w7, w11, ne // incC = (h & 1) ? paddincC : -width
99 add x6, x6, w16, SXTW // srcC += incC
102 .macro increment_nv21
106 .macro increment_yuv420p
108 csel w16, w7, w11, ne // incU = (h & 1) ? paddincU : -width/2
109 csel w17, w14, w11, ne // incV = (h & 1) ? paddincV : -width/2
110 add x6, x6, w16, SXTW // srcU += incU
111 add x13, x13, w17, SXTW // srcV += incV
114 .macro increment_yuv422p
115 add x6, x6, w7, UXTW // srcU += incU
116 add x13, x13, w14, UXTW // srcV += incV
119 .macro compute_rgba r1 g1 b1 a1 r2 g2 b2 a2
120 add v20.8H, v26.8H, v20.8H // Y1 + R1
121 add v21.8H, v27.8H, v21.8H // Y2 + R2
122 add v22.8H, v26.8H, v22.8H // Y1 + G1
123 add v23.8H, v27.8H, v23.8H // Y2 + G2
124 add v24.8H, v26.8H, v24.8H // Y1 + B1
125 add v25.8H, v27.8H, v25.8H // Y2 + B2
126 sqrshrun \r1, v20.8H, #1 // clip_u8((Y1 + R1) >> 1)
127 sqrshrun \r2, v21.8H, #1 // clip_u8((Y2 + R1) >> 1)
128 sqrshrun \g1, v22.8H, #1 // clip_u8((Y1 + G1) >> 1)
129 sqrshrun \g2, v23.8H, #1 // clip_u8((Y2 + G1) >> 1)
130 sqrshrun \b1, v24.8H, #1 // clip_u8((Y1 + B1) >> 1)
131 sqrshrun \b2, v25.8H, #1 // clip_u8((Y2 + B1) >> 1)
136 .macro declare_func ifmt ofmt
137 function ff_\ifmt\()_to_\ofmt\()_neon, export=1
140 mov w8, w0 // w8 = width
142 movi v5.8H, #4, lsl #8 // 128 * (1<<3)
144 sub v18.8H, v18.8H, v5.8H // U*(1<<3) - 128*(1<<3)
145 sub v19.8H, v19.8H, v5.8H // V*(1<<3) - 128*(1<<3)
146 zip1 v6.8H, v19.8H, v19.8H // V1
147 zip2 v7.8H, v19.8H, v19.8H // V2
148 zip1 v4.8H, v18.8H, v18.8H // U1
149 zip2 v5.8H, v18.8H, v18.8H // U2
150 sqdmulh v20.8H, v6.8H, v1.H[0] // V1 * v2r (R1)
151 sqdmulh v21.8H, v7.8H, v1.H[0] // V2 * v2r (R2)
152 sqdmulh v22.8H, v4.8H, v1.H[1] // U1 * u2g
153 sqdmulh v23.8H, v5.8H, v1.H[1] // U2 * u2g
154 sqdmulh v6.8H, v6.8H, v1.H[2] // V1 * v2g
155 sqdmulh v7.8H, v7.8H, v1.H[2] // V2 * v2g
156 add v22.8H, v22.8H, v6.8H // U1 * u2g + V1 * v2g (G1)
157 add v23.8H, v23.8H, v7.8H // U2 * u2g + V2 * v2g (G2)
158 sqdmulh v24.8H, v4.8H, v1.H[3] // U1 * u2b (B1)
159 sqdmulh v25.8H, v5.8H, v1.H[3] // U2 * u2b (B2)
160 ld1 {v2.16B}, [x4], #16 // load luma
161 ushll v26.8H, v2.8B, #3 // Y1*(1<<3)
162 ushll2 v27.8H, v2.16B, #3 // Y2*(1<<3)
163 sub v26.8H, v26.8H, v3.8H // Y1*(1<<3) - y_offset
164 sub v27.8H, v27.8H, v3.8H // Y2*(1<<3) - y_offset
165 sqdmulh v26.8H, v26.8H, v0.8H // ((Y1*(1<<3) - y_offset) * y_coeff) >> 15
166 sqdmulh v27.8H, v27.8H, v0.8H // ((Y2*(1<<3) - y_offset) * y_coeff) >> 15
168 .ifc \ofmt,argb // 1 2 3 0
169 compute_rgba v5.8B,v6.8B,v7.8B,v4.8B, v17.8B,v18.8B,v19.8B,v16.8B
172 .ifc \ofmt,rgba // 0 1 2 3
173 compute_rgba v4.8B,v5.8B,v6.8B,v7.8B, v16.8B,v17.8B,v18.8B,v19.8B
176 .ifc \ofmt,abgr // 3 2 1 0
177 compute_rgba v7.8B,v6.8B,v5.8B,v4.8B, v19.8B,v18.8B,v17.8B,v16.8B
180 .ifc \ofmt,bgra // 2 1 0 3
181 compute_rgba v6.8B,v5.8B,v4.8B,v7.8B, v18.8B,v17.8B,v16.8B,v19.8B
184 st4 { v4.8B, v5.8B, v6.8B, v7.8B}, [x2], #32
185 st4 {v16.8B,v17.8B,v18.8B,v19.8B}, [x2], #32
186 subs w8, w8, #16 // width -= 16
188 add x2, x2, w3, UXTW // dst += padding
189 add x4, x4, w5, UXTW // srcY += paddingY
191 subs w1, w1, #1 // height -= 1
197 .macro declare_rgb_funcs ifmt
198 declare_func \ifmt, argb
199 declare_func \ifmt, rgba
200 declare_func \ifmt, abgr
201 declare_func \ifmt, bgra
204 declare_rgb_funcs nv12
205 declare_rgb_funcs nv21
206 declare_rgb_funcs yuv420p
207 declare_rgb_funcs yuv422p