1 /*****************************************************************************
2 * deblock.S: arm deblocking
3 *****************************************************************************
4 * Copyright (C) 2009-2015 x264 project
6 * Authors: Mans Rullgard <mans@mansr.com>
7 * Martin Storsjo <martin@martin.st>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
23 * This program is also available under a commercial proprietary license.
24 * For more information, contact us at licensing@x264.com.
25 *****************************************************************************/
29 .macro h264_loop_filter_start
33 and ip, ip, ip, lsl #16
34 ands ip, ip, ip, lsl #8
38 .macro align_push_regs
42 vst1.64 {d12-d15}, [sp,:128]
44 vst1.64 {d8-d11}, [sp,:128]
48 vld1.64 {d8-d11}, [sp,:128]!
49 vld1.64 {d12-d15}, [sp,:128], ip
52 .macro h264_loop_filter_luma
53 vdup.8 q11, r2 @ alpha
55 vabd.u8 q6, q8, q0 @ abs(p0 - q0)
57 vabd.u8 q14, q9, q8 @ abs(p1 - p0)
59 vabd.u8 q15, q1, q0 @ abs(q1 - q0)
61 vclt.u8 q6, q6, q11 @ < alpha
64 vclt.u8 q14, q14, q11 @ < beta
65 vclt.u8 q15, q15, q11 @ < beta
67 vabd.u8 q4, q10, q8 @ abs(p2 - p0)
69 vabd.u8 q5, q2, q0 @ abs(q2 - q0)
70 vclt.u8 q4, q4, q11 @ < beta
72 vclt.u8 q5, q5, q11 @ < beta
79 vhadd.u8 q10, q10, q14
92 vsubw.u8 q10, q10, d17
96 vaddw.u8 q10, q10, d19
100 vrshrn.i16 d5, q10, #3
110 vaddw.s8 q14, q14, d4
112 vsubw.s8 q11, q11, d4
113 vsubw.s8 q12, q12, d5
120 function x264_deblock_v_luma_neon
121 h264_loop_filter_start
123 vld1.64 {d0, d1}, [r0,:128], r1
124 vld1.64 {d2, d3}, [r0,:128], r1
125 vld1.64 {d4, d5}, [r0,:128], r1
126 sub r0, r0, r1, lsl #2
127 sub r0, r0, r1, lsl #1
128 vld1.64 {d20,d21}, [r0,:128], r1
129 vld1.64 {d18,d19}, [r0,:128], r1
130 vld1.64 {d16,d17}, [r0,:128], r1
134 h264_loop_filter_luma
136 sub r0, r0, r1, lsl #1
137 vst1.64 {d8, d9}, [r0,:128], r1
138 vst1.64 {d16,d17}, [r0,:128], r1
139 vst1.64 {d0, d1}, [r0,:128], r1
140 vst1.64 {d10,d11}, [r0,:128]
146 function x264_deblock_h_luma_neon
147 h264_loop_filter_start
150 vld1.64 {d6}, [r0], r1
151 vld1.64 {d20}, [r0], r1
152 vld1.64 {d18}, [r0], r1
153 vld1.64 {d16}, [r0], r1
154 vld1.64 {d0}, [r0], r1
155 vld1.64 {d2}, [r0], r1
156 vld1.64 {d4}, [r0], r1
157 vld1.64 {d26}, [r0], r1
158 vld1.64 {d7}, [r0], r1
159 vld1.64 {d21}, [r0], r1
160 vld1.64 {d19}, [r0], r1
161 vld1.64 {d17}, [r0], r1
162 vld1.64 {d1}, [r0], r1
163 vld1.64 {d3}, [r0], r1
164 vld1.64 {d5}, [r0], r1
165 vld1.64 {d27}, [r0], r1
167 TRANSPOSE8x8 q3, q10, q9, q8, q0, q1, q2, q13
171 h264_loop_filter_luma
173 TRANSPOSE4x4 q4, q8, q0, q5
175 sub r0, r0, r1, lsl #4
177 vst1.32 {d8[0]}, [r0], r1
178 vst1.32 {d16[0]}, [r0], r1
179 vst1.32 {d0[0]}, [r0], r1
180 vst1.32 {d10[0]}, [r0], r1
181 vst1.32 {d8[1]}, [r0], r1
182 vst1.32 {d16[1]}, [r0], r1
183 vst1.32 {d0[1]}, [r0], r1
184 vst1.32 {d10[1]}, [r0], r1
185 vst1.32 {d9[0]}, [r0], r1
186 vst1.32 {d17[0]}, [r0], r1
187 vst1.32 {d1[0]}, [r0], r1
188 vst1.32 {d11[0]}, [r0], r1
189 vst1.32 {d9[1]}, [r0], r1
190 vst1.32 {d17[1]}, [r0], r1
191 vst1.32 {d1[1]}, [r0], r1
192 vst1.32 {d11[1]}, [r0], r1
198 .macro h264_loop_filter_chroma
199 vdup.8 q11, r2 // alpha
201 vabd.u8 q13, q8, q0 // abs(p0 - q0)
202 vabd.u8 q14, q9, q8 // abs(p1 - p0)
208 vabd.u8 q15, q1, q0 // abs(q1 - q0)
212 vclt.u8 q13, q13, q11 // < alpha
216 vdup.8 q11, r3 // beta
218 vrshrn.i16 d4, q2, #3
219 vrshrn.i16 d5, q3, #3
220 vclt.u8 q14, q14, q11 // < beta
222 vclt.u8 q15, q15, q11 // < beta
233 vaddw.s8 q14, q14, d4
234 vaddw.s8 q15, q15, d5
235 vsubw.s8 q11, q11, d4
236 vsubw.s8 q12, q12, d5
243 function x264_deblock_v_chroma_neon
244 h264_loop_filter_start
246 sub r0, r0, r1, lsl #1
247 vld1.8 {d18,d19}, [r0,:128], r1
248 vld1.8 {d16,d17}, [r0,:128], r1
249 vld1.8 {d0, d1}, [r0,:128], r1
250 vld1.8 {d2, d3}, [r0,:128]
252 h264_loop_filter_chroma
254 sub r0, r0, r1, lsl #1
255 vst1.8 {d16,d17}, [r0,:128], r1
256 vst1.8 {d0, d1}, [r0,:128], r1
261 function x264_deblock_h_chroma_neon
262 h264_loop_filter_start
266 vld1.8 {d18}, [r0], r1
267 vld1.8 {d16}, [r0], r1
268 vld1.8 {d0}, [r0], r1
269 vld1.8 {d2}, [r0], r1
270 vld1.8 {d19}, [r0], r1
271 vld1.8 {d17}, [r0], r1
272 vld1.8 {d1}, [r0], r1
273 vld1.8 {d3}, [r0], r1
275 TRANSPOSE4x4_16 q9, q8, q0, q1
277 h264_loop_filter_chroma
281 sub r0, r0, r1, lsl #3
283 vst1.32 {d16[0]}, [r0], r1
284 vst1.32 {d0[0]}, [r0], r1
285 vst1.32 {d16[1]}, [r0], r1
286 vst1.32 {d0[1]}, [r0], r1
287 vst1.32 {d17[0]}, [r0], r1
288 vst1.32 {d1[0]}, [r0], r1
289 vst1.32 {d17[1]}, [r0], r1
290 vst1.32 {d1[1]}, [r0], r1
295 function x264_deblock_h_chroma_422_neon
296 h264_loop_filter_start
304 sub r0, r0, r1, lsl #3
305 add r0, r0, r1, lsr #1
311 .macro h264_loop_filter_chroma8
312 vdup.8 d22, r2 @ alpha
314 vabd.u8 d26, d16, d0 @ abs(p0 - q0)
315 vabd.u8 d28, d18, d16 @ abs(p1 - p0)
319 vabd.u8 d30, d2, d0 @ abs(q1 - q0)
321 vclt.u8 d26, d26, d22 @ < alpha
323 vdup.8 d22, r3 @ beta
325 vrshrn.i16 d4, q2, #3
326 vclt.u8 d28, d28, d22 @ < beta
328 vclt.u8 d30, d30, d22 @ < beta
337 vaddw.s8 q14, q14, d4
338 vsubw.s8 q11, q11, d4
343 function x264_deblock_h_chroma_mbaff_neon
344 h264_loop_filter_start
347 vld1.8 {d18}, [r0], r1
348 vld1.8 {d16}, [r0], r1
349 vld1.8 {d0}, [r0], r1
350 vld1.8 {d2}, [r0], r1
352 TRANSPOSE4x4_16 d18, d16, d0, d2
354 h264_loop_filter_chroma8
358 sub r0, r0, r1, lsl #2
360 vst1.32 {d16[0]}, [r0], r1
361 vst1.32 {d0[0]}, [r0], r1
362 vst1.32 {d16[1]}, [r0], r1
363 vst1.32 {d0[1]}, [r0]
368 function x264_deblock_strength_neon
373 sub ip, ip, #(1<<8)-3
385 vext.8 q3, q0, q1, #15
386 vext.8 q0, q0, q2, #15
389 vext.8 q1, q15, q2, #12
396 vld1.16 {q11}, [r2,:128]! @ mv + 0x10
397 vld1.16 {q3}, [r2,:128]! @ mv + 0x20
398 vld1.16 {q12}, [r2,:128]! @ mv + 0x30
399 vld1.16 {q2}, [r2,:128]! @ mv + 0x40
400 vld1.16 {q13}, [r2,:128]! @ mv + 0x50
401 vext.8 q3, q3, q12, #12
402 vext.8 q2, q2, q13, #12
404 vld1.16 {q3}, [r2,:128]! @ mv + 0x60
406 vld1.16 {q14}, [r2,:128]! @ mv + 0x70
408 vld1.16 {q2}, [r2,:128]! @ mv + 0x80
409 vld1.16 {q15}, [r2,:128]! @ mv + 0x90
411 vext.8 q3, q3, q14, #12
412 vext.8 q2, q2, q15, #12
423 vabd.s16 q1, q12, q13
426 vabd.s16 q0, q11, q12
427 vabd.s16 q2, q13, q14
428 vabd.s16 q3, q14, q15
448 vext.8 q3, q0, q1, #15
449 vext.8 q0, q0, q2, #15
452 vext.8 q1, q15, q2, #12
459 vmin.u8 q8, q8, q10 @ mv ? 1 : 0
461 vadd.u8 q0, q0, q0 @ nnz ? 2 : 0
466 vst1.8 {q9}, [r3,:128], ip @ bs[1]
470 vst1.8 {q8}, [r3,:128] @ bs[0]