1 /*****************************************************************************
2 * deblock.S: arm deblocking
3 *****************************************************************************
4 * Copyright (C) 2009-2012 x264 project
6 * Authors: Mans Rullgard <mans@mansr.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
22 * This program is also available under a commercial proprietary license.
23 * For more information, contact us at licensing@x264.com.
24 *****************************************************************************/
30 .macro h264_loop_filter_start
34 and ip, ip, ip, lsl #16
35 ands ip, ip, ip, lsl #8
39 .macro align_push_regs
43 vst1.64 {d12-d15}, [sp,:128]
45 vst1.64 {d8-d11}, [sp,:128]
49 vld1.64 {d8-d11}, [sp,:128]!
50 vld1.64 {d12-d15}, [sp,:128], ip
53 .macro h264_loop_filter_luma
54 vdup.8 q11, r2 @ alpha
56 vabd.u8 q6, q8, q0 @ abs(p0 - q0)
58 vabd.u8 q14, q9, q8 @ abs(p1 - p0)
60 vabd.u8 q15, q1, q0 @ abs(q1 - q0)
62 vclt.u8 q6, q6, q11 @ < alpha
65 vclt.u8 q14, q14, q11 @ < beta
66 vclt.u8 q15, q15, q11 @ < beta
68 vabd.u8 q4, q10, q8 @ abs(p2 - p0)
70 vabd.u8 q5, q2, q0 @ abs(q2 - q0)
71 vclt.u8 q4, q4, q11 @ < beta
73 vclt.u8 q5, q5, q11 @ < beta
80 vhadd.u8 q10, q10, q14
93 vsubw.u8 q10, q10, d17
97 vaddw.u8 q10, q10, d19
100 vrshrn.i16 d4, q2, #3
101 vrshrn.i16 d5, q10, #3
111 vaddw.s8 q14, q14, d4
113 vsubw.s8 q11, q11, d4
114 vsubw.s8 q12, q12, d5
121 function x264_deblock_v_luma_neon
122 h264_loop_filter_start
124 vld1.64 {d0, d1}, [r0,:128], r1
125 vld1.64 {d2, d3}, [r0,:128], r1
126 vld1.64 {d4, d5}, [r0,:128], r1
127 sub r0, r0, r1, lsl #2
128 sub r0, r0, r1, lsl #1
129 vld1.64 {d20,d21}, [r0,:128], r1
130 vld1.64 {d18,d19}, [r0,:128], r1
131 vld1.64 {d16,d17}, [r0,:128], r1
135 h264_loop_filter_luma
137 sub r0, r0, r1, lsl #1
138 vst1.64 {d8, d9}, [r0,:128], r1
139 vst1.64 {d16,d17}, [r0,:128], r1
140 vst1.64 {d0, d1}, [r0,:128], r1
141 vst1.64 {d10,d11}, [r0,:128]
147 function x264_deblock_h_luma_neon
148 h264_loop_filter_start
151 vld1.64 {d6}, [r0], r1
152 vld1.64 {d20}, [r0], r1
153 vld1.64 {d18}, [r0], r1
154 vld1.64 {d16}, [r0], r1
155 vld1.64 {d0}, [r0], r1
156 vld1.64 {d2}, [r0], r1
157 vld1.64 {d4}, [r0], r1
158 vld1.64 {d26}, [r0], r1
159 vld1.64 {d7}, [r0], r1
160 vld1.64 {d21}, [r0], r1
161 vld1.64 {d19}, [r0], r1
162 vld1.64 {d17}, [r0], r1
163 vld1.64 {d1}, [r0], r1
164 vld1.64 {d3}, [r0], r1
165 vld1.64 {d5}, [r0], r1
166 vld1.64 {d27}, [r0], r1
168 TRANSPOSE8x8 q3, q10, q9, q8, q0, q1, q2, q13
172 h264_loop_filter_luma
174 TRANSPOSE4x4 q4, q8, q0, q5
176 sub r0, r0, r1, lsl #4
178 vst1.32 {d8[0]}, [r0], r1
179 vst1.32 {d16[0]}, [r0], r1
180 vst1.32 {d0[0]}, [r0], r1
181 vst1.32 {d10[0]}, [r0], r1
182 vst1.32 {d8[1]}, [r0], r1
183 vst1.32 {d16[1]}, [r0], r1
184 vst1.32 {d0[1]}, [r0], r1
185 vst1.32 {d10[1]}, [r0], r1
186 vst1.32 {d9[0]}, [r0], r1
187 vst1.32 {d17[0]}, [r0], r1
188 vst1.32 {d1[0]}, [r0], r1
189 vst1.32 {d11[0]}, [r0], r1
190 vst1.32 {d9[1]}, [r0], r1
191 vst1.32 {d17[1]}, [r0], r1
192 vst1.32 {d1[1]}, [r0], r1
193 vst1.32 {d11[1]}, [r0], r1
199 .macro h264_loop_filter_chroma
200 vdup.8 q11, r2 // alpha
202 vabd.u8 q13, q8, q0 // abs(p0 - q0)
203 vabd.u8 q14, q9, q8 // abs(p1 - p0)
209 vabd.u8 q15, q1, q0 // abs(q1 - q0)
212 vclt.u8 q13, q13, q11 // < alpha
215 vdup.8 q11, r3 // beta
217 vrshrn.i16 d4, q2, #3
218 vrshrn.i16 d5, q3, #3
219 vclt.u8 q14, q14, q11 // < beta
221 vclt.u8 q15, q15, q11 // < beta
232 vaddw.s8 q14, q14, d4
233 vaddw.s8 q15, q15, d5
234 vsubw.s8 q11, q11, d4
235 vsubw.s8 q12, q12, d5
242 function x264_deblock_v_chroma_neon
243 h264_loop_filter_start
245 sub r0, r0, r1, lsl #1
246 vld2.8 {d18,d19}, [r0,:128], r1
247 vld2.8 {d16,d17}, [r0,:128], r1
248 vld2.8 {d0, d1}, [r0,:128], r1
249 vld2.8 {d2, d3}, [r0,:128]
251 h264_loop_filter_chroma
253 sub r0, r0, r1, lsl #1
254 vst2.8 {d16,d17}, [r0,:128], r1
255 vst2.8 {d0, d1}, [r0,:128], r1
260 function x264_deblock_h_chroma_neon
261 h264_loop_filter_start
264 vld1.8 {d18}, [r0], r1
265 vld1.8 {d16}, [r0], r1
266 vld1.8 {d0}, [r0], r1
267 vld1.8 {d2}, [r0], r1
268 vld1.8 {d19}, [r0], r1
269 vld1.8 {d17}, [r0], r1
270 vld1.8 {d1}, [r0], r1
271 vld1.8 {d3}, [r0], r1
283 h264_loop_filter_chroma
295 sub r0, r0, r1, lsl #3
296 vst1.8 {d18}, [r0], r1
297 vst1.8 {d16}, [r0], r1
298 vst1.8 {d0}, [r0], r1
299 vst1.8 {d2}, [r0], r1
300 vst1.8 {d19}, [r0], r1
301 vst1.8 {d17}, [r0], r1
302 vst1.8 {d1}, [r0], r1
303 vst1.8 {d3}, [r0], r1