2 * Copyright (c) 2015 Matthieu Bouron <matthieu.bouron stupeflix.com>
3 * Copyright (c) 2015 Clément Bœsch <clement stupeflix.com>
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include "libavutil/arm/asm.S"
25 .macro compute_premult_16 half_u1, half_u2, half_v1, half_v2
26 vmov d2, \half_u1 @ copy left q14 to left q1
27 vmov d3, \half_u1 @ copy left q14 to right q1
28 vmov d4, \half_u2 @ copy right q14 to left q2
29 vmov d5, \half_u2 @ copy right q14 to right q2
31 vmov d6, \half_v1 @ copy left q15 to left q3
32 vmov d7, \half_v1 @ copy left q15 to right q3
33 vmov d8, \half_v2 @ copy right q15 to left q4
34 vmov d9, \half_v2 @ copy right q15 to right q4
36 vzip.16 d2, d3 @ U1U1U2U2U3U3U4U4
37 vzip.16 d4, d5 @ U5U5U6U6U7U7U8U8
39 vzip.16 d6, d7 @ V1V1V2V2V3V3V4V4
40 vzip.16 d8, d9 @ V5V5V6V6V7V7V8V8
42 vmul.s16 q8, q3, d1[0] @ V * v2r (left, red)
43 vmul.s16 q9, q4, d1[0] @ V * v2r (right, red)
44 vmul.s16 q10, q1, d1[1] @ U * u2g
45 vmul.s16 q11, q2, d1[1] @ U * u2g
46 vmla.s16 q10, q3, d1[2] @ U * u2g + V * v2g (left, green)
47 vmla.s16 q11, q4, d1[2] @ U * u2g + V * v2g (right, green)
48 vmul.s16 q12, q1, d1[3] @ U * u2b (left, blue)
49 vmul.s16 q13, q2, d1[3] @ U * u2b (right, blue)
52 .macro compute_premult_32 half_u half_v
53 vmov d2, \half_u @ copy left q14 to left q1
54 vmov d3, \half_u @ copy left q14 to right q1
55 vmov d4, \half_v @ copy left q15 to left q2
56 vmov d5, \half_v @ copy left q15 to right q2
58 vzip.16 d2, d3 @ U1U1U2U2U3U3U4U4
59 vzip.16 d4, d5 @ V1V1V2V2V3V3V4V4
61 vmull.s16 q8, d4, d1[0] @ V * v2r (left, red)
62 vmull.s16 q9, d5, d1[0] @ V * v2r (right, red)
63 vmull.s16 q10, d2, d1[1] @ U * u2g
64 vmull.s16 q11, d3, d1[1] @ U * u2g
65 vmlal.s16 q10, d4, d1[2] @ U * u2g + V * v2g (left, green)
66 vmlal.s16 q11, d5, d1[2] @ U * u2g + V * v2g (right, green)
67 vmull.s16 q12, d2, d1[3] @ U * u2b (left, blue)
68 vmull.s16 q13, d3, d1[3] @ U * u2b (right, blue)
71 .macro compute_color_16 dst_comp1 dst_comp2 pre1 pre2
72 vadd.s16 q1, q14, \pre1
73 vadd.s16 q2, q15, \pre2
74 vqrshrun.s16 \dst_comp1, q1, #6
75 vqrshrun.s16 \dst_comp2, q2, #6
78 .macro compute_color_32 dst_comp pre1 pre2
79 vadd.s32 q3, q1, \pre1
80 vadd.s32 q4, q2, \pre2
81 vqrshrun.s32 d10, q3, #13
82 vqrshrun.s32 d11, q4, #13 @ q5 = ({q3,q4} + (1<<12)) >> 13
83 vqmovn.u16 \dst_comp, q5 @ saturate 16bit -> 8bit
86 .macro compute_rgba_16 r1 r2 g1 g2 b1 b2 a1 a2
87 compute_color_16 \r1, \r2, q8, q9
88 compute_color_16 \g1, \g2, q10, q11
89 compute_color_16 \b1, \b2, q12, q13
94 .macro compute_rgba_32 r g b a
95 compute_color_32 \r, q8, q9
96 compute_color_32 \g, q10, q11
97 compute_color_32 \b, q12, q13
101 .macro compute_16px_16 dst y0 y1 ofmt
102 vmovl.u8 q14, \y0 @ 8px of y
103 vmovl.u8 q15, \y1 @ 8px of y
105 vdup.16 q5, r9 @ q5 = y_offset
106 vmov d14, d0 @ q7 = y_coeff
107 vmov d15, d0 @ q7 = y_coeff
112 vmul.s16 q14, q7 @ q14 = (srcY - y_offset) * y_coeff (left)
113 vmul.s16 q15, q7 @ q15 = (srcY - y_offset) * y_coeff (right)
117 compute_rgba_16 d7, d11, d8, d12, d9, d13, d6, d10
121 compute_rgba_16 d6, d10, d7, d11, d8, d12, d9, d13
125 compute_rgba_16 d9, d13, d8, d12, d7, d11, d6, d10
129 compute_rgba_16 d8, d12, d7, d11, d6, d10, d9, d13
131 vst4.8 {q3, q4}, [\dst,:128]!
132 vst4.8 {q5, q6}, [\dst,:128]!
136 .macro compute_8px_32 dst half_y ofmt
137 vmovl.u8 q7, \half_y @ 8px of Y
140 vmull.s16 q1, d14, d0 @ q1 = (srcY - y_offset) * y_coeff (left)
141 vmull.s16 q2, d15, d0 @ q2 = (srcY - y_offset) * y_coeff (right)
144 compute_rgba_32 d13, d14, d15, d12
148 compute_rgba_32 d12, d13, d14, d15
152 compute_rgba_32 d15, d14, d13, d12
156 compute_rgba_32 d14, d13, d12, d15
159 vst4.8 {q6, q7}, [\dst,:128]!
162 .macro process_16px_16 ofmt
163 compute_premult_16 d28, d29, d30, d31
165 vld1.8 {q7}, [r4]! @ first line of luma
166 compute_16px_16 r2, d14, d15, \ofmt
168 vld1.8 {q7}, [r12]! @ second line of luma
169 compute_16px_16 r11, d14, d15, \ofmt
172 .macro process_16px_32 ofmt
173 compute_premult_32 d28, d30
175 vld1.8 {q7}, [r4]! @ first line of luma
176 vmov d28, d15 @ save right of the first line of luma for later use
177 compute_8px_32 r2, d14, \ofmt
179 vld1.8 {q7}, [r12]! @ second line of luma
180 vmov d30, d15 @ save right of the second line of luma for later use
181 compute_8px_32 r11, d14, \ofmt
183 compute_premult_32 d29, d31
184 compute_8px_32 r2, d28, \ofmt
185 compute_8px_32 r11, d30, \ofmt
191 ldr r4, [sp, #104] @ r4 = srcY
192 ldr r5, [sp, #108] @ r5 = linesizeY
193 ldr r6, [sp, #112] @ r6 = srcC
194 ldr r7, [sp, #116] @ r7 = linesizeC
195 ldr r8, [sp, #120] @ r8 = table
196 ldr r9, [sp, #124] @ r9 = y_offset
197 ldr r10,[sp, #128] @ r10 = y_coeff
198 vdup.16 d0, r10 @ d0 = y_coeff
199 vld1.16 {d1}, [r8] @ d1 = *table
200 add r11, r2, r3 @ r11 = dst + linesize (dst2)
201 add r12, r4, r5 @ r12 = srcY + linesizeY (srcY2)
205 sub r3, r3, r8 @ r3 = linesize * 2 - width * 4 (padding)
206 sub r5, r5, r0 @ r5 = linesizeY * 2 - width (paddingY)
207 sub r7, r7, r0 @ r7 = linesizeC - width (paddingC)
210 .macro load_args_yuv420p
213 ldr r4, [sp, #104] @ r4 = srcY
214 ldr r5, [sp, #108] @ r5 = linesizeY
215 ldr r6, [sp, #112] @ r6 = srcU
216 ldr r8, [sp, #128] @ r8 = table
217 ldr r9, [sp, #132] @ r9 = y_offset
218 ldr r10,[sp, #136] @ r10 = y_coeff
219 vdup.16 d0, r10 @ d0 = y_coeff
220 vld1.16 {d1}, [r8] @ d1 = *table
221 add r11, r2, r3 @ r11 = dst + linesize (dst2)
222 add r12, r4, r5 @ r12 = srcY + linesizeY (srcY2)
226 sub r3, r3, r8 @ r3 = linesize * 2 - width * 4 (padding)
227 sub r5, r5, r0 @ r5 = linesizeY * 2 - width (paddingY)
228 ldr r10,[sp, #120] @ r10 = srcV
231 .macro declare_func ifmt ofmt precision
232 function ff_\ifmt\()_to_\ofmt\()_neon_\precision\(), export=1
247 mov r8, r0 @ r8 = width
256 vld2.8 {d2, d3}, [r6]! @ q1: interleaved chroma line
257 vsubl.u8 q14, d2, d10 @ q14 = U - 128
258 vsubl.u8 q15, d3, d10 @ q15 = V - 128
262 vld2.8 {d2, d3}, [r6]! @ q1: interleaved chroma line
263 vsubl.u8 q14, d3, d10 @ q14 = U - 128
264 vsubl.u8 q15, d2, d10 @ q15 = V - 128
270 vld1.8 d2, [r6]! @ d2: chroma red line
271 vld1.8 d3, [r10]! @ d3: chroma blue line
272 vsubl.u8 q14, d2, d10 @ q14 = U - 128
273 vsubl.u8 q15, d3, d10 @ q15 = V - 128
277 process_16px_\precision \ofmt
279 subs r8, r8, #16 @ width -= 16
282 add r2, r2, r3 @ dst += padding
283 add r4, r4, r5 @ srcY += paddingY
284 add r11, r11, r3 @ dst2 += padding
285 add r12, r12, r5 @ srcY2 += paddingY
288 add r6, r6, r7 @ srcC += paddingC
292 add r6, r6, r7 @ srcC += paddingC
296 ldr r7, [sp, #116] @ r7 = linesizeU
297 sub r7, r7, r0, lsr #1 @ r7 = linesizeU - width / 2 (paddingU)
298 add r6, r6, r7 @ srcU += paddingU
300 ldr r7, [sp, #124] @ r7 = linesizeV
301 sub r7, r7, r0, lsr #1 @ r7 = linesizeV - width / 2 (paddingV)
302 add r10, r10, r7 @ srcU += paddingV
305 subs r1, r1, #2 @ height -= 2
314 .macro declare_rgb_funcs ifmt precision
315 declare_func \ifmt, argb, \precision
316 declare_func \ifmt, rgba, \precision
317 declare_func \ifmt, abgr, \precision
318 declare_func \ifmt, bgra, \precision
321 declare_rgb_funcs nv12, 16
322 declare_rgb_funcs nv21, 16
323 declare_rgb_funcs nv12, 32
324 declare_rgb_funcs nv21, 32
325 declare_rgb_funcs yuv420p, 16
326 declare_rgb_funcs yuv420p, 32