2 * Copyright (c) 2015 Matthieu Bouron <matthieu.bouron stupeflix.com>
3 * Copyright (c) 2015 Clément Bœsch <clement stupeflix.com>
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include "libavutil/arm/asm.S"
25 .macro compute_premult_16 half_u1, half_u2, half_v1, half_v2
26 vmov d2, \half_u1 @ copy left q14 to left q1
27 vmov d3, \half_u1 @ copy left q14 to right q1
28 vmov d4, \half_u2 @ copy right q14 to left q2
29 vmov d5, \half_u2 @ copy right q14 to right q2
31 vmov d6, \half_v1 @ copy left q15 to left q3
32 vmov d7, \half_v1 @ copy left q15 to right q3
33 vmov d8, \half_v2 @ copy right q15 to left q4
34 vmov d9, \half_v2 @ copy right q15 to right q4
36 vzip.16 d2, d3 @ U1U1U2U2U3U3U4U4
37 vzip.16 d4, d5 @ U5U5U6U6U7U7U8U8
39 vzip.16 d6, d7 @ V1V1V2V2V3V3V4V4
40 vzip.16 d8, d9 @ V5V5V6V6V7V7V8V8
42 vmul.s16 q8, q3, d1[0] @ V * v2r (left, red)
43 vmul.s16 q9, q4, d1[0] @ V * v2r (right, red)
44 vmul.s16 q10, q1, d1[1] @ U * u2g
45 vmul.s16 q11, q2, d1[1] @ U * u2g
46 vmla.s16 q10, q3, d1[2] @ U * u2g + V * v2g (left, green)
47 vmla.s16 q11, q4, d1[2] @ U * u2g + V * v2g (right, green)
48 vmul.s16 q12, q1, d1[3] @ U * u2b (left, blue)
49 vmul.s16 q13, q2, d1[3] @ U * u2b (right, blue)
52 .macro compute_premult_32 half_u half_v
53 vmov d2, \half_u @ copy left q14 to left q1
54 vmov d3, \half_u @ copy left q14 to right q1
55 vmov d4, \half_v @ copy left q15 to left q2
56 vmov d5, \half_v @ copy left q15 to right q2
58 vzip.16 d2, d3 @ U1U1U2U2U3U3U4U4
59 vzip.16 d4, d5 @ V1V1V2V2V3V3V4V4
61 vmull.s16 q8, d4, d1[0] @ V * v2r (left, red)
62 vmull.s16 q9, d5, d1[0] @ V * v2r (right, red)
63 vmull.s16 q10, d2, d1[1] @ U * u2g
64 vmull.s16 q11, d3, d1[1] @ U * u2g
65 vmlal.s16 q10, d4, d1[2] @ U * u2g + V * v2g (left, green)
66 vmlal.s16 q11, d5, d1[2] @ U * u2g + V * v2g (right, green)
67 vmull.s16 q12, d2, d1[3] @ U * u2b (left, blue)
68 vmull.s16 q13, d3, d1[3] @ U * u2b (right, blue)
71 .macro compute_color_16 dst_comp1 dst_comp2 pre1 pre2
72 vadd.s16 q1, q14, \pre1
73 vadd.s16 q2, q15, \pre2
74 vqrshrun.s16 \dst_comp1, q1, #6
75 vqrshrun.s16 \dst_comp2, q2, #6
78 .macro compute_color_32 dst_comp pre1 pre2
79 vadd.s32 q3, q1, \pre1
80 vadd.s32 q4, q2, \pre2
81 vqrshrun.s32 d10, q3, #13
82 vqrshrun.s32 d11, q4, #13 @ q5 = ({q3,q4} + (1<<12)) >> 13
83 vqmovn.u16 \dst_comp, q5 @ saturate 16bit -> 8bit
86 .macro compute_rgba_16 r1 r2 g1 g2 b1 b2 a1 a2
87 compute_color_16 \r1, \r2, q8, q9
88 compute_color_16 \g1, \g2, q10, q11
89 compute_color_16 \b1, \b2, q12, q13
94 .macro compute_rgba_32 r g b a
95 compute_color_32 \r, q8, q9
96 compute_color_32 \g, q10, q11
97 compute_color_32 \b, q12, q13
101 .macro compute_16px_16 dst y0 y1 ofmt
102 vmovl.u8 q14, \y0 @ 8px of y
103 vmovl.u8 q15, \y1 @ 8px of y
105 vdup.16 q5, r9 @ q5 = y_offset
106 vmov d14, d0 @ q7 = y_coeff
107 vmov d15, d0 @ q7 = y_coeff
112 vmul.s16 q14, q7 @ q14 = (srcY - y_offset) * y_coeff (left)
113 vmul.s16 q15, q7 @ q15 = (srcY - y_offset) * y_coeff (right)
117 compute_rgba_16 d7, d11, d8, d12, d9, d13, d6, d10
121 compute_rgba_16 d6, d10, d7, d11, d8, d12, d9, d13
125 compute_rgba_16 d9, d13, d8, d12, d7, d11, d6, d10
129 compute_rgba_16 d8, d12, d7, d11, d6, d10, d9, d13
131 vst4.8 {q3, q4}, [\dst,:128]!
132 vst4.8 {q5, q6}, [\dst,:128]!
136 .macro compute_8px_32 dst half_y ofmt
137 vmovl.u8 q7, \half_y @ 8px of Y
140 vmull.s16 q1, d14, d0 @ q1 = (srcY - y_offset) * y_coeff (left)
141 vmull.s16 q2, d15, d0 @ q2 = (srcY - y_offset) * y_coeff (right)
144 compute_rgba_32 d13, d14, d15, d12
148 compute_rgba_32 d12, d13, d14, d15
152 compute_rgba_32 d15, d14, d13, d12
156 compute_rgba_32 d14, d13, d12, d15
159 vst4.8 {q6, q7}, [\dst,:128]!
162 .macro process_1l_16px_16 ofmt
163 compute_premult_16 d28, d29, d30, d31
165 compute_16px_16 r2, d14, d15, \ofmt
168 .macro process_1l_16px_32 ofmt
169 compute_premult_32 d28, d30
171 vmov d28, d15 @ save right of the line of luma for later use
172 compute_8px_32 r2, d14, \ofmt
174 compute_premult_32 d29, d31
175 compute_8px_32 r2, d28, \ofmt
178 .macro process_2l_16px_16 ofmt
179 compute_premult_16 d28, d29, d30, d31
181 vld1.8 {q7}, [r4]! @ first line of luma
182 compute_16px_16 r2, d14, d15, \ofmt
184 vld1.8 {q7}, [r12]! @ second line of luma
185 compute_16px_16 r11, d14, d15, \ofmt
188 .macro process_2l_16px_32 ofmt
189 compute_premult_32 d28, d30
191 vld1.8 {q7}, [r4]! @ first line of luma
192 vmov d28, d15 @ save right of the first line of luma for later use
193 compute_8px_32 r2, d14, \ofmt
195 vld1.8 {q7}, [r12]! @ second line of luma
196 vmov d30, d15 @ save right of the second line of luma for later use
197 compute_8px_32 r11, d14, \ofmt
199 compute_premult_32 d29, d31
200 compute_8px_32 r2, d28, \ofmt
201 compute_8px_32 r11, d30, \ofmt
207 ldr r4, [sp, #104] @ r4 = srcY
208 ldr r5, [sp, #108] @ r5 = linesizeY
209 ldr r6, [sp, #112] @ r6 = srcC
210 ldr r7, [sp, #116] @ r7 = linesizeC
211 ldr r8, [sp, #120] @ r8 = table
212 ldr r9, [sp, #124] @ r9 = y_offset
213 ldr r10,[sp, #128] @ r10 = y_coeff
214 vdup.16 d0, r10 @ d0 = y_coeff
215 vld1.16 {d1}, [r8] @ d1 = *table
216 add r11, r2, r3 @ r11 = dst + linesize (dst2)
217 add r12, r4, r5 @ r12 = srcY + linesizeY (srcY2)
221 sub r3, r3, r8 @ r3 = linesize * 2 - width * 4 (padding)
222 sub r5, r5, r0 @ r5 = linesizeY * 2 - width (paddingY)
223 sub r7, r7, r0 @ r7 = linesizeC - width (paddingC)
226 .macro load_args_yuv420p
229 ldr r4, [sp, #104] @ r4 = srcY
230 ldr r5, [sp, #108] @ r5 = linesizeY
231 ldr r6, [sp, #112] @ r6 = srcU
232 ldr r8, [sp, #128] @ r8 = table
233 ldr r9, [sp, #132] @ r9 = y_offset
234 ldr r10,[sp, #136] @ r10 = y_coeff
235 vdup.16 d0, r10 @ d0 = y_coeff
236 vld1.16 {d1}, [r8] @ d1 = *table
237 add r11, r2, r3 @ r11 = dst + linesize (dst2)
238 add r12, r4, r5 @ r12 = srcY + linesizeY (srcY2)
242 sub r3, r3, r8 @ r3 = linesize * 2 - width * 4 (padding)
243 sub r5, r5, r0 @ r5 = linesizeY * 2 - width (paddingY)
244 ldr r10,[sp, #120] @ r10 = srcV
247 .macro load_args_yuv422p
250 ldr r4, [sp, #104] @ r4 = srcY
251 ldr r5, [sp, #108] @ r5 = linesizeY
252 ldr r6, [sp, #112] @ r6 = srcU
253 ldr r7, [sp, #116] @ r7 = linesizeU
254 ldr r12,[sp, #124] @ r12 = linesizeV
255 ldr r8, [sp, #128] @ r8 = table
256 ldr r9, [sp, #132] @ r9 = y_offset
257 ldr r10,[sp, #136] @ r10 = y_coeff
258 vdup.16 d0, r10 @ d0 = y_coeff
259 vld1.16 {d1}, [r8] @ d1 = *table
260 add r11, r2, r3 @ r11 = dst + linesize (dst2)
262 sub r3, r3, r8 @ r3 = linesize * 2 - width * 4 (padding)
263 sub r5, r5, r0 @ r5 = linesizeY * 2 - width (paddingY)
264 sub r7, r7, r0, lsr #1 @ r7 = linesizeU - width / 2 (paddingU)
265 sub r12,r12,r0, lsr #1 @ r12 = linesizeV - width / 2 (paddingV)
266 ldr r10,[sp, #120] @ r10 = srcV
269 .macro declare_func ifmt ofmt precision
270 function ff_\ifmt\()_to_\ofmt\()_neon_\precision\(), export=1
290 mov r8, r0 @ r8 = width
300 vld2.8 {d2, d3}, [r6]! @ q1: interleaved chroma line
301 vsubl.u8 q14, d2, d10 @ q14 = U - 128
302 vsubl.u8 q15, d3, d10 @ q15 = V - 128
304 process_2l_16px_\precision \ofmt
310 vld2.8 {d2, d3}, [r6]! @ q1: interleaved chroma line
311 vsubl.u8 q14, d3, d10 @ q14 = U - 128
312 vsubl.u8 q15, d2, d10 @ q15 = V - 128
314 process_2l_16px_\precision \ofmt
321 vld1.8 d2, [r6]! @ d2: chroma red line
322 vld1.8 d3, [r10]! @ d3: chroma blue line
323 vsubl.u8 q14, d2, d10 @ q14 = U - 128
324 vsubl.u8 q15, d3, d10 @ q15 = V - 128
326 process_2l_16px_\precision \ofmt
332 vld1.8 d2, [r6]! @ d2: chroma red line
333 vld1.8 d3, [r10]! @ d3: chroma blue line
334 vsubl.u8 q14, d2, d10 @ q14 = U - 128
335 vsubl.u8 q15, d3, d10 @ q15 = V - 128
337 process_1l_16px_\precision \ofmt
340 subs r8, r8, #16 @ width -= 16
343 add r2, r2, r3 @ dst += padding
344 add r4, r4, r5 @ srcY += paddingY
347 add r11, r11, r3 @ dst2 += padding
348 add r12, r12, r5 @ srcY2 += paddingY
350 add r6, r6, r7 @ srcC += paddingC
352 subs r1, r1, #2 @ height -= 2
356 add r11, r11, r3 @ dst2 += padding
357 add r12, r12, r5 @ srcY2 += paddingY
359 add r6, r6, r7 @ srcC += paddingC
360 subs r1, r1, #2 @ height -= 2
364 add r11, r11, r3 @ dst2 += padding
365 add r12, r12, r5 @ srcY2 += paddingY
367 ldr r7, [sp, #116] @ r7 = linesizeU
368 sub r7, r7, r0, lsr #1 @ r7 = linesizeU - width / 2 (paddingU)
369 add r6, r6, r7 @ srcU += paddingU
371 ldr r7, [sp, #124] @ r7 = linesizeV
372 sub r7, r7, r0, lsr #1 @ r7 = linesizeV - width / 2 (paddingV)
373 add r10, r10, r7 @ srcV += paddingV
375 subs r1, r1, #2 @ height -= 2
379 add r6, r6, r7 @ srcU += paddingU
380 add r10,r10,r12 @ srcV += paddingV
382 subs r1, r1, #1 @ height -= 1
393 .macro declare_rgb_funcs ifmt precision
394 declare_func \ifmt, argb, \precision
395 declare_func \ifmt, rgba, \precision
396 declare_func \ifmt, abgr, \precision
397 declare_func \ifmt, bgra, \precision
400 declare_rgb_funcs nv12, 16
401 declare_rgb_funcs nv21, 16
402 declare_rgb_funcs nv12, 32
403 declare_rgb_funcs nv21, 32
404 declare_rgb_funcs yuv420p, 16
405 declare_rgb_funcs yuv420p, 32
406 declare_rgb_funcs yuv422p, 16
407 declare_rgb_funcs yuv422p, 32