2 * Copyright (c) 2016 Google Inc.
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "libavutil/aarch64/asm.S"
25 // The main loop filter macro is templated and can produce filters for
26 // vectors of 8 or 16 bytes. The register mapping throughout the filter
27 // is close to identical to the arm version (please try to maintain this,
28 // if either is changed!). When the arm version uses e.g. d20 for the
29 // input variable p3, the aarch64 version uses v20.8b or v20.16b, depending
32 // The number of elements in the vector is passed in via the macro parameter
33 // \sz, which is either .8b or .16b. For simple instructions that doesn't
34 // lengthen or narrow things, this can easily be templated like this:
35 // uabd v4\sz, v20\sz, v21\sz
37 // For instructions that lengthen or narrow content, the arm version would
38 // have used q registers. For these instructions, we have macros that expand
39 // into either a single e.g. uaddl instruction, or into a uaddl + uaddl2
40 // pair, depending on the \sz parameter. Wherever the arm version would have
41 // used a q register, these macros instead take two v registers, i.e. q3
42 // is mapped to v6+v7. For the case with 8 byte input vectors, such a
43 // lengthening operation is only stored in v6.8h (what was in q3 in the arm
44 // case), while the 16 byte input vectors will use v6.8h + v7.8h.
45 // Such a macro invocation would look like this:
46 // uaddl_sz v8.8h, v9.8h, v17, v18, \sz
48 // That is, in the 8 byte input vector case, the second register in these
49 // register pairs will be unused.
50 // Unfortunately, this makes the code quite hard to read. For readability,
51 // see the arm version instead.
54 .macro uabdl_sz dst1, dst2, in1, in2, sz
55 uabdl \dst1, \in1\().8b, \in2\().8b
57 uabdl2 \dst2, \in1\().16b, \in2\().16b
61 .macro add_sz dst1, dst2, in1, in2, in3, in4, sz
68 .macro sub_sz dst1, dst2, in1, in2, in3, in4, sz
75 .macro uaddw_sz dst1, dst2, in1, in2, in3, sz
76 uaddw \dst1, \in1, \in3\().8b
78 uaddw2 \dst2, \in2, \in3\().16b
82 .macro usubw_sz dst1, dst2, in1, in2, in3, sz
83 usubw \dst1, \in1, \in3\().8b
85 usubw2 \dst2, \in2, \in3\().16b
89 .macro cmhs_sz dst1, dst2, in1, in2, in3, in4, sz
90 cmhs \dst1, \in1, \in3
92 cmhs \dst2, \in2, \in4
96 .macro xtn_sz dst, in1, in2, sz
99 xtn2 \dst\().16b, \in2
103 .macro usubl_sz dst1, dst2, in1, in2, sz
104 usubl \dst1, \in1\().8b, \in2\().8b
106 usubl2 \dst2, \in1\().16b, \in2\().16b
110 .macro sqxtn_sz dst, in1, in2, sz
111 sqxtn \dst\().8b, \in1
113 sqxtn2 \dst\().16b, \in2
117 .macro sqxtun_sz dst, in1, in2, sz
118 sqxtun \dst\().8b, \in1
120 sqxtun2 \dst\().16b, \in2
124 .macro mul_sz dst1, dst2, in1, in2, in3, in4, sz
125 mul \dst1, \in1, \in3
127 mul \dst2, \in2, \in4
131 .macro saddw_sz dst1, dst2, in1, in2, in3, sz
132 saddw \dst1, \in1, \in3\().8b
134 saddw2 \dst2, \in2, \in3\().16b
138 .macro ssubw_sz dst1, dst2, in1, in2, in3, sz
139 ssubw \dst1, \in1, \in3\().8b
141 ssubw2 \dst2, \in2, \in3\().16b
145 .macro uxtl_sz dst1, dst2, in, sz
146 uxtl \dst1, \in\().8b
148 uxtl2 \dst2, \in\().16b
152 .macro uaddl_sz dst1, dst2, in1, in2, sz
153 uaddl \dst1, \in1\().8b, \in2\().8b
155 uaddl2 \dst2, \in1\().16b, \in2\().16b
159 .macro rshrn_sz dst, in1, in2, shift, sz
160 rshrn \dst\().8b, \in1, \shift
162 rshrn2 \dst\().16b, \in2, \shift
166 .macro ushll_sz dst1, dst2, in, shift, sz
167 ushll \dst1, \in\().8b, \shift
169 ushll2 \dst2, \in\().16b, \shift
173 // The input to and output from this macro is in the registers v16-v31,
174 // and v0-v7 are used as scratch registers.
175 // p7 = v16 .. p3 = v20, p0 = v23, q0 = v24, q3 = v27, q7 = v31
176 // Depending on the width of the loop filter, we either use v16-v19
177 // and v28-v31 as temp registers, or v8-v15.
178 // When comparing to the arm version, tmpq1 == tmp1 + tmp2,
179 // tmpq2 == tmp3 + tmp4, etc.
180 .macro loop_filter wd, sz, mix, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8
192 ushr v1.8h, v0.8h, #8 // E
194 bic v0.8h, #255, lsl 8 // E
196 trn1 v2.2d, v2.2d, v4.2d
197 trn1 v3.2d, v3.2d, v5.2d
200 uabd v4\sz, v20\sz, v21\sz // abs(p3 - p2)
201 uabd v5\sz, v21\sz, v22\sz // abs(p2 - p1)
202 uabd v6\sz, v22\sz, v23\sz // abs(p1 - p0)
203 uabd v7\sz, v24\sz, v25\sz // abs(q0 - q1)
204 uabd \tmp1\sz, v25\sz, v26\sz // abs(q1 - q2)
205 uabd \tmp2\sz, v26\sz, v27\sz // abs(q2 - q3)
206 umax v4\sz, v4\sz, v5\sz
207 umax v5\sz, v6\sz, v7\sz
208 umax \tmp1\sz, \tmp1\sz, \tmp2\sz
209 uabdl_sz v6.8h, v7.8h, v23, v24, \sz // abs(p0 - q0)
210 umax v4\sz, v4\sz, v5\sz
211 add_sz v6.8h, v7.8h, v6.8h, v7.8h, v6.8h, v7.8h, \sz // abs(p0 - q0) * 2
212 uabd v5\sz, v22\sz, v25\sz // abs(p1 - q1)
213 umax v4\sz, v4\sz, \tmp1\sz // max(abs(p3 - p2), ..., abs(q2 - q3))
214 ushr v5\sz, v5\sz, #1
215 cmhs v4\sz, v2\sz, v4\sz // max(abs()) <= I
216 uaddw_sz v6.8h, v7.8h, v6.8h, v7.8h, v5, \sz // abs(p0 - q0) * 2 + abs(p1 - q1) >> 1
217 cmhs_sz v6.8h, v7.8h, v0.8h, v1.8h, v6.8h, v7.8h, \sz
218 xtn_sz v5, v6.8h, v7.8h, \sz
219 and v4\sz, v4\sz, v5\sz // fm
226 // If no pixels need filtering, just exit as soon as possible
232 uabd v6\sz, v20\sz, v23\sz // abs(p3 - p0)
233 uabd v2\sz, v21\sz, v23\sz // abs(p2 - p0)
234 uabd v1\sz, v22\sz, v23\sz // abs(p1 - p0)
235 uabd \tmp1\sz, v25\sz, v24\sz // abs(q1 - q0)
236 uabd \tmp2\sz, v26\sz, v24\sz // abs(q2 - q0)
237 uabd \tmp3\sz, v27\sz, v24\sz // abs(q3 - q0)
238 umax v6\sz, v6\sz, v2\sz
239 umax v1\sz, v1\sz, \tmp1\sz
240 umax \tmp2\sz, \tmp2\sz, \tmp3\sz
242 uabd v7\sz, v16\sz, v23\sz // abs(p7 - p0)
243 umax v6\sz, v6\sz, v1\sz
244 uabd v2\sz, v17\sz, v23\sz // abs(p6 - p0)
245 umax v6\sz, v6\sz, \tmp2\sz
246 uabd v1\sz, v18\sz, v23\sz // abs(p5 - p0)
247 cmhs v6\sz, v0\sz, v6\sz // flat8in
248 uabd v8\sz, v19\sz, v23\sz // abs(p4 - p0)
249 and v6\sz, v6\sz, v4\sz // flat8in && fm
250 uabd v9\sz, v28\sz, v24\sz // abs(q4 - q0)
251 bic v4\sz, v4\sz, v6\sz // fm && !flat8in
252 uabd v10\sz, v29\sz, v24\sz // abs(q5 - q0)
253 uabd v11\sz, v30\sz, v24\sz // abs(q6 - q0)
254 uabd v12\sz, v31\sz, v24\sz // abs(q7 - q0)
256 umax v7\sz, v7\sz, v2\sz
257 umax v1\sz, v1\sz, v8\sz
258 umax v9\sz, v9\sz, v10\sz
259 umax v11\sz, v11\sz, v12\sz
260 // The rest of the calculation of flat8out is interleaved below
262 // The rest of the calculation of flat8in is interleaved below
266 // Calculate the normal inner loop filter for 2 or 4 pixels
267 uabd v5\sz, v22\sz, v23\sz // abs(p1 - p0)
269 umax v7\sz, v7\sz, v1\sz
270 umax v9\sz, v9\sz, v11\sz
272 umax v6\sz, v6\sz, v1\sz
274 uabd v1\sz, v25\sz, v24\sz // abs(q1 - q0)
276 umax v7\sz, v7\sz, v9\sz
278 umax v6\sz, v6\sz, \tmp2\sz
280 usubl_sz \tmp1\().8h, \tmp2\().8h, v22, v25, \sz // p1 - q1
281 umax v5\sz, v5\sz, v1\sz // max(abs(p1 - p0), abs(q1 - q0))
285 usubl_sz \tmp3\().8h, \tmp4\().8h, v24, v23, \sz // q0 - p0
288 cmhs v6\sz, v0\sz, v6\sz // flat8in
293 cmhi v5\sz, v5\sz, v3\sz // hev
295 // If a 4/8 or 8/4 mix is used, clear the relevant half of v6
297 and v6\sz, v6\sz, v1.16b
299 and v6\sz, v6\sz, v4\sz // flat8in && fm
301 sqxtn_sz \tmp1, \tmp1\().8h, \tmp2\().8h, \sz // av_clip_int8(p1 - q1)
303 cmhs v7\sz, v0\sz, v7\sz // flat8out
305 bic v4\sz, v4\sz, v6\sz // fm && !flat8in
307 mvn v5\sz, v5\sz // !hev
309 and v7\sz, v7\sz, v6\sz // flat8out && flat8in && fm
311 and v5\sz, v5\sz, v4\sz // !hev && fm && !flat8in
313 mul_sz \tmp3\().8h, \tmp4\().8h, \tmp3\().8h, \tmp4\().8h, \tmp5\().8h, \tmp5\().8h, \sz // 3 * (q0 - p0)
314 bic \tmp1\sz, \tmp1\sz, v5\sz // if (!hev) av_clip_int8 = 0
316 saddw_sz \tmp3\().8h, \tmp4\().8h, \tmp3\().8h, \tmp4\().8h, \tmp1, \sz // 3 * (q0 - p0) [+ av_clip_int8(p1 - q1)]
318 sqxtn_sz \tmp1, \tmp3\().8h, \tmp4\().8h, \sz // f
320 bic v6\sz, v6\sz, v7\sz // fm && flat8in && !flat8out
323 sqadd \tmp3\sz, \tmp1\sz, v2\sz // FFMIN(f + 4, 127)
324 sqadd \tmp4\sz, \tmp1\sz, v3\sz // FFMIN(f + 3, 127)
325 uxtl_sz v0.8h, v1.8h, v23, \sz // p0
326 sshr \tmp3\sz, \tmp3\sz, #3 // f1
327 sshr \tmp4\sz, \tmp4\sz, #3 // f2
329 uxtl_sz v2.8h, v3.8h, v24, \sz // q0
330 saddw_sz v0.8h, v1.8h, v0.8h, v1.8h, \tmp4, \sz // p0 + f2
331 ssubw_sz v2.8h, v3.8h, v2.8h, v3.8h, \tmp3, \sz // q0 - f1
332 sqxtun_sz v0, v0.8h, v1.8h, \sz // out p0
333 sqxtun_sz v1, v2.8h, v3.8h, \sz // out q0
334 srshr \tmp3\sz, \tmp3\sz, #1 // f = (f1 + 1) >> 1
335 bit v23\sz, v0\sz, v4\sz // if (fm && !flat8in)
336 bit v24\sz, v1\sz, v4\sz
338 uxtl_sz v0.8h, v1.8h, v22, \sz // p1
339 uxtl_sz v2.8h, v3.8h, v25, \sz // q1
340 saddw_sz v0.8h, v1.8h, v0.8h, v1.8h, \tmp3, \sz // p1 + f
341 ssubw_sz v2.8h, v3.8h, v2.8h, v3.8h, \tmp3, \sz // q1 - f
342 sqxtun_sz v0, v0.8h, v1.8h, \sz // out p1
343 sqxtun_sz v2, v2.8h, v3.8h, \sz // out q1
344 bit v22\sz, v0\sz, v5\sz // if (!hev && fm && !flat8in)
345 bit v25\sz, v2\sz, v5\sz
353 // If no pixels need flat8in, jump to flat8out
354 // (or to a writeout of the inner 4 pixels, for wd=8)
358 uaddl_sz \tmp1\().8h, \tmp2\().8h, v20, v21, \sz
359 uaddl_sz \tmp3\().8h, \tmp4\().8h, v22, v25, \sz
360 uaddl_sz \tmp5\().8h, \tmp6\().8h, v20, v22, \sz
361 uaddl_sz \tmp7\().8h, \tmp8\().8h, v23, v26, \sz
362 add_sz v0.8h, v1.8h, \tmp1\().8h, \tmp2\().8h, \tmp1\().8h, \tmp2\().8h, \sz
363 uaddw_sz v0.8h, v1.8h, v0.8h, v1.8h, v23, \sz
364 uaddw_sz v0.8h, v1.8h, v0.8h, v1.8h, v24, \sz
365 add_sz v0.8h, v1.8h, v0.8h, v1.8h, \tmp5\().8h, \tmp6\().8h, \sz
366 sub_sz \tmp3\().8h, \tmp4\().8h, \tmp3\().8h, \tmp4\().8h, \tmp1\().8h, \tmp2\().8h, \sz
367 sub_sz \tmp7\().8h, \tmp8\().8h, \tmp7\().8h, \tmp8\().8h, \tmp5\().8h, \tmp6\().8h, \sz
368 rshrn_sz v2, v0.8h, v1.8h, #3, \sz // out p2
370 add_sz v0.8h, v1.8h, v0.8h, v1.8h, \tmp3\().8h, \tmp4\().8h, \sz
371 uaddl_sz \tmp1\().8h, \tmp2\().8h, v20, v23, \sz
372 uaddl_sz \tmp3\().8h, \tmp4\().8h, v24, v27, \sz
373 rshrn_sz v3, v0.8h, v1.8h, #3, \sz // out p1
375 add_sz v0.8h, v1.8h, v0.8h, v1.8h, \tmp7\().8h, \tmp8\().8h, \sz
376 sub_sz \tmp3\().8h, \tmp4\().8h, \tmp3\().8h, \tmp4\().8h, \tmp1\().8h, \tmp2\().8h, \sz
377 uaddl_sz \tmp5\().8h, \tmp6\().8h, v21, v24, \sz
378 uaddl_sz \tmp7\().8h, \tmp8\().8h, v25, v27, \sz
379 rshrn_sz v4, v0.8h, v1.8h, #3, \sz // out p0
381 add_sz v0.8h, v1.8h, v0.8h, v1.8h, \tmp3\().8h, \tmp4\().8h, \sz
382 sub_sz \tmp7\().8h, \tmp8\().8h, \tmp7\().8h, \tmp8\().8h, \tmp5\().8h, \tmp6\().8h, \sz
383 uaddl_sz \tmp1\().8h, \tmp2\().8h, v22, v25, \sz
384 uaddl_sz \tmp3\().8h, \tmp4\().8h, v26, v27, \sz
385 rshrn_sz v5, v0.8h, v1.8h, #3, \sz // out q0
387 add_sz v0.8h, v1.8h, v0.8h, v1.8h, \tmp7\().8h, \tmp8\().8h, \sz
388 sub_sz \tmp3\().8h, \tmp4\().8h, \tmp3\().8h, \tmp4\().8h, \tmp1\().8h, \tmp2\().8h, \sz
389 rshrn_sz \tmp5, v0.8h, v1.8h, #3, \sz // out q1
391 add_sz v0.8h, v1.8h, v0.8h, v1.8h, \tmp3\().8h, \tmp4\().8h, \sz
392 // The output here is written back into the input registers. This doesn't
393 // matter for the flat8part below, since we only update those pixels
394 // which won't be touched below.
395 bit v21\sz, v2\sz, v6\sz
396 bit v22\sz, v3\sz, v6\sz
397 bit v23\sz, v4\sz, v6\sz
398 rshrn_sz \tmp6, v0.8h, v1.8h, #3, \sz // out q2
399 bit v24\sz, v5\sz, v6\sz
400 bit v25\sz, \tmp5\sz, v6\sz
401 bit v26\sz, \tmp6\sz, v6\sz
405 orr v2\sz, v6\sz, v7\sz
411 // If no pixels needed flat8in nor flat8out, jump to a
412 // writeout of the inner 4 pixels
419 // If no pixels need flat8out, jump to a writeout of the inner 6 pixels
423 // This writes all outputs into v2-v17 (skipping v6 and v16).
424 // If this part is skipped, the output is read from v21-v26 (which is the input
426 ushll_sz v0.8h, v1.8h, v16, #3, \sz // 8 * v16
427 usubw_sz v0.8h, v1.8h, v0.8h, v1.8h, v16, \sz // 7 * v16
428 uaddw_sz v0.8h, v1.8h, v0.8h, v1.8h, v17, \sz
429 uaddl_sz v8.8h, v9.8h, v17, v18, \sz
430 uaddl_sz v10.8h, v11.8h, v19, v20, \sz
431 add_sz v0.8h, v1.8h, v0.8h, v1.8h, v8.8h, v9.8h, \sz
432 uaddl_sz v8.8h, v9.8h, v16, v17, \sz
433 uaddl_sz v12.8h, v13.8h, v21, v22, \sz
434 add_sz v0.8h, v1.8h, v0.8h, v1.8h, v10.8h, v11.8h, \sz
435 uaddl_sz v10.8h, v11.8h, v18, v25, \sz
436 uaddl_sz v14.8h, v15.8h, v23, v24, \sz
437 sub_sz v10.8h, v11.8h, v10.8h, v11.8h, v8.8h, v9.8h, \sz
438 add_sz v0.8h, v1.8h, v0.8h, v1.8h, v12.8h, v13.8h, \sz
439 add_sz v0.8h, v1.8h, v0.8h, v1.8h, v14.8h, v15.8h, \sz
440 uaddl_sz v12.8h, v13.8h, v16, v18, \sz
441 uaddl_sz v14.8h, v15.8h, v19, v26, \sz
442 rshrn_sz v2, v0.8h, v1.8h, #4, \sz
444 add_sz v0.8h, v1.8h, v0.8h, v1.8h, v10.8h, v11.8h, \sz
445 uaddl_sz v8.8h, v9.8h, v16, v19, \sz
446 uaddl_sz v10.8h, v11.8h, v20, v27, \sz
447 sub_sz v14.8h, v15.8h, v14.8h, v15.8h, v12.8h, v13.8h, \sz
448 bif v2\sz, v17\sz, v7\sz
449 rshrn_sz v3, v0.8h, v1.8h, #4, \sz
451 add_sz v0.8h, v1.8h, v0.8h, v1.8h, v14.8h, v15.8h, \sz
452 uaddl_sz v12.8h, v13.8h, v16, v20, \sz
453 uaddl_sz v14.8h, v15.8h, v21, v28, \sz
454 sub_sz v10.8h, v11.8h, v10.8h, v11.8h, v8.8h, v9.8h, \sz
455 bif v3\sz, v18\sz, v7\sz
456 rshrn_sz v4, v0.8h, v1.8h, #4, \sz
458 add_sz v0.8h, v1.8h, v0.8h, v1.8h, v10.8h, v11.8h, \sz
459 uaddl_sz v8.8h, v9.8h, v16, v21, \sz
460 uaddl_sz v10.8h, v11.8h, v22, v29, \sz
461 sub_sz v14.8h, v15.8h, v14.8h, v15.8h, v12.8h, v13.8h, \sz
462 bif v4\sz, v19\sz, v7\sz
463 rshrn_sz v5, v0.8h, v1.8h, #4, \sz
465 add_sz v0.8h, v1.8h, v0.8h, v1.8h, v14.8h, v15.8h, \sz
466 uaddl_sz v12.8h, v13.8h, v16, v22, \sz
467 uaddl_sz v14.8h, v15.8h, v23, v30, \sz
468 sub_sz v10.8h, v11.8h, v10.8h, v11.8h, v8.8h, v9.8h, \sz
469 bif v5\sz, v20\sz, v7\sz
470 rshrn_sz v6, v0.8h, v1.8h, #4, \sz
472 add_sz v0.8h, v1.8h, v0.8h, v1.8h, v10.8h, v11.8h, \sz
473 uaddl_sz v10.8h, v11.8h, v16, v23, \sz
474 sub_sz v14.8h, v15.8h, v14.8h, v15.8h, v12.8h, v13.8h, \sz
475 uaddl_sz v12.8h, v13.8h, v24, v31, \sz
476 bif v6\sz, v21\sz, v7\sz
477 rshrn_sz v8, v0.8h, v1.8h, #4, \sz
479 add_sz v0.8h, v1.8h, v0.8h, v1.8h, v14.8h, v15.8h, \sz
480 sub_sz v10.8h, v11.8h, v12.8h, v13.8h, v10.8h, v11.8h, \sz
481 uaddl_sz v12.8h, v13.8h, v17, v24, \sz
482 uaddl_sz v14.8h, v15.8h, v25, v31, \sz
483 bif v8\sz, v22\sz, v7\sz
484 rshrn_sz v9, v0.8h, v1.8h, #4, \sz
486 add_sz v0.8h, v1.8h, v0.8h, v1.8h, v10.8h, v11.8h, \sz
487 sub_sz v14.8h, v15.8h, v14.8h, v15.8h, v12.8h, v13.8h, \sz
488 uaddl_sz v12.8h, v13.8h, v26, v31, \sz
489 bif v9\sz, v23\sz, v7\sz
490 rshrn_sz v10, v0.8h, v1.8h, #4, \sz
492 add_sz v0.8h, v1.8h, v0.8h, v1.8h, v14.8h, v15.8h, \sz
493 uaddl_sz v14.8h, v15.8h, v18, v25, \sz
494 uaddl_sz v18.8h, v19.8h, v19, v26, \sz
495 sub_sz v12.8h, v13.8h, v12.8h, v13.8h, v14.8h, v15.8h, \sz
496 uaddl_sz v14.8h, v15.8h, v27, v31, \sz
497 bif v10\sz, v24\sz, v7\sz
498 rshrn_sz v11, v0.8h, v1.8h, #4, \sz
500 add_sz v0.8h, v1.8h, v0.8h, v1.8h, v12.8h, v13.8h, \sz
501 uaddl_sz v12.8h, v13.8h, v20, v27, \sz
502 sub_sz v14.8h, v15.8h, v14.8h, v15.8h, v18.8h, v19.8h, \sz
503 uaddl_sz v18.8h, v19.8h, v28, v31, \sz
504 bif v11\sz, v25\sz, v7\sz
505 sub_sz v18.8h, v19.8h, v18.8h, v19.8h, v12.8h, v13.8h, \sz
506 rshrn_sz v12, v0.8h, v1.8h, #4, \sz
508 add_sz v0.8h, v1.8h, v0.8h, v1.8h, v14.8h, v15.8h, \sz
509 uaddl_sz v14.8h, v15.8h, v21, v28, \sz
510 uaddl_sz v20.8h, v21.8h, v29, v31, \sz
511 bif v12\sz, v26\sz, v7\sz
512 rshrn_sz v13, v0.8h, v1.8h, #4, \sz
514 add_sz v0.8h, v1.8h, v0.8h, v1.8h, v18.8h, v19.8h, \sz
515 sub_sz v20.8h, v21.8h, v20.8h, v21.8h, v14.8h, v15.8h, \sz
516 uaddl_sz v18.8h, v19.8h, v22, v29, \sz
517 uaddl_sz v22.8h, v23.8h, v30, v31, \sz
518 bif v13\sz, v27\sz, v7\sz
519 rshrn_sz v14, v0.8h, v1.8h, #4, \sz
521 add_sz v0.8h, v1.8h, v0.8h, v1.8h, v20.8h, v21.8h, \sz
522 sub_sz v22.8h, v23.8h, v22.8h, v23.8h, v18.8h, v19.8h, \sz
523 bif v14\sz, v28\sz, v7\sz
524 rshrn_sz v15, v0.8h, v1.8h, #4, \sz
526 add_sz v0.8h, v1.8h, v0.8h, v1.8h, v22.8h, v23.8h, \sz
527 bif v15\sz, v29\sz, v7\sz
528 rshrn_sz v17, v0.8h, v1.8h, #4, \sz
529 bif v17\sz, v30\sz, v7\sz
533 // For wd <= 8, we use v16-v19 and v28-v31 for temp registers,
534 // while we need those for inputs/outputs in wd=16 and use v8-v15
535 // for temp registers there instead.
536 function vp9_loop_filter_4
537 loop_filter 4, .8b, 0, v16, v17, v18, v19, v28, v29, v30, v31
543 function vp9_loop_filter_4_16b_mix_44
544 loop_filter 4, .16b, 44, v16, v17, v18, v19, v28, v29, v30, v31
550 function vp9_loop_filter_8
551 loop_filter 8, .8b, 0, v16, v17, v18, v19, v28, v29, v30, v31
561 function vp9_loop_filter_8_16b_mix
562 loop_filter 8, .16b, 88, v16, v17, v18, v19, v28, v29, v30, v31
572 function vp9_loop_filter_16
573 loop_filter 16, .8b, 0, v8, v9, v10, v11, v12, v13, v14, v15
583 ldp d8, d9, [sp], 0x10
584 ldp d10, d11, [sp], 0x10
585 ldp d12, d13, [sp], 0x10
586 ldp d14, d15, [sp], 0x10
590 function vp9_loop_filter_16_16b
591 loop_filter 16, .16b, 0, v8, v9, v10, v11, v12, v13, v14, v15
601 ldp d8, d9, [sp], 0x10
602 ldp d10, d11, [sp], 0x10
603 ldp d12, d13, [sp], 0x10
604 ldp d14, d15, [sp], 0x10
612 .macro loop_filter_4_16b_mix mix
613 bl vp9_loop_filter_4_16b_mix_\mix
621 .macro loop_filter_8_16b_mix mix
623 mov x11, #0xffffffff00000000
625 mov x11, #0x00000000ffffffff
627 mov x11, #0xffffffffffffffff
629 bl vp9_loop_filter_8_16b_mix
633 .macro loop_filter_16
634 bl vp9_loop_filter_16
640 .macro loop_filter_16_16b
641 bl vp9_loop_filter_16_16b
648 // The public functions in this file have got the following signature:
649 // void loop_filter(uint8_t *dst, ptrdiff_t stride, int mb_lim, int lim, int hev_thr);
651 function ff_vp9_loop_filter_v_4_8_neon, export=1
653 sub x9, x0, x1, lsl #2
654 ld1 {v20.8b}, [x9], x1 // p3
655 ld1 {v24.8b}, [x0], x1 // q0
656 ld1 {v21.8b}, [x9], x1 // p2
657 ld1 {v25.8b}, [x0], x1 // q1
658 ld1 {v22.8b}, [x9], x1 // p1
659 ld1 {v26.8b}, [x0], x1 // q2
660 ld1 {v23.8b}, [x9], x1 // p0
661 ld1 {v27.8b}, [x0], x1 // q3
662 sub x0, x0, x1, lsl #2
663 sub x9, x9, x1, lsl #1
667 st1 {v22.8b}, [x9], x1
668 st1 {v24.8b}, [x0], x1
669 st1 {v23.8b}, [x9], x1
670 st1 {v25.8b}, [x0], x1
675 function ff_vp9_loop_filter_v_44_16_neon, export=1
677 sub x9, x0, x1, lsl #2
678 ld1 {v20.16b}, [x9], x1 // p3
679 ld1 {v24.16b}, [x0], x1 // q0
680 ld1 {v21.16b}, [x9], x1 // p2
681 ld1 {v25.16b}, [x0], x1 // q1
682 ld1 {v22.16b}, [x9], x1 // p1
683 ld1 {v26.16b}, [x0], x1 // q2
684 ld1 {v23.16b}, [x9], x1 // p0
685 ld1 {v27.16b}, [x0], x1 // q3
686 sub x0, x0, x1, lsl #2
687 sub x9, x9, x1, lsl #1
689 loop_filter_4_16b_mix 44
691 st1 {v22.16b}, [x9], x1
692 st1 {v24.16b}, [x0], x1
693 st1 {v23.16b}, [x9], x1
694 st1 {v25.16b}, [x0], x1
699 function ff_vp9_loop_filter_h_4_8_neon, export=1
702 add x0, x9, x1, lsl #2
703 ld1 {v20.8b}, [x9], x1
704 ld1 {v24.8b}, [x0], x1
705 ld1 {v21.8b}, [x9], x1
706 ld1 {v25.8b}, [x0], x1
707 ld1 {v22.8b}, [x9], x1
708 ld1 {v26.8b}, [x0], x1
709 ld1 {v23.8b}, [x9], x1
710 ld1 {v27.8b}, [x0], x1
712 sub x9, x9, x1, lsl #2
713 sub x0, x0, x1, lsl #2
714 // Move x0/x9 forward by 2 pixels; we don't need to rewrite the
715 // outermost 2 pixels since they aren't changed.
719 transpose_8x8B v20, v21, v22, v23, v24, v25, v26, v27, v28, v29
723 // We only will write the mid 4 pixels back; after the loop filter,
724 // these are in v22, v23, v24, v25, ordered as rows (8x4 pixels).
725 // We need to transpose them to columns, done with a 4x8 transpose
726 // (which in practice is two 4x4 transposes of the two 4x4 halves
727 // of the 8x4 pixels; into 4x8 pixels).
728 transpose_4x8B v22, v23, v24, v25, v26, v27, v28, v29
729 st1 {v22.s}[0], [x9], x1
730 st1 {v22.s}[1], [x0], x1
731 st1 {v23.s}[0], [x9], x1
732 st1 {v23.s}[1], [x0], x1
733 st1 {v24.s}[0], [x9], x1
734 st1 {v24.s}[1], [x0], x1
735 st1 {v25.s}[0], [x9], x1
736 st1 {v25.s}[1], [x0], x1
741 function ff_vp9_loop_filter_h_44_16_neon, export=1
744 add x0, x9, x1, lsl #3
745 ld1 {v20.8b}, [x9], x1
746 ld1 {v20.d}[1], [x0], x1
747 ld1 {v21.8b}, [x9], x1
748 ld1 {v21.d}[1], [x0], x1
749 ld1 {v22.8b}, [x9], x1
750 ld1 {v22.d}[1], [x0], x1
751 ld1 {v23.8b}, [x9], x1
752 ld1 {v23.d}[1], [x0], x1
753 ld1 {v24.8b}, [x9], x1
754 ld1 {v24.d}[1], [x0], x1
755 ld1 {v25.8b}, [x9], x1
756 ld1 {v25.d}[1], [x0], x1
757 ld1 {v26.8b}, [x9], x1
758 ld1 {v26.d}[1], [x0], x1
759 ld1 {v27.8b}, [x9], x1
760 ld1 {v27.d}[1], [x0], x1
762 sub x9, x9, x1, lsl #3
763 sub x0, x0, x1, lsl #3
767 transpose_8x16B v20, v21, v22, v23, v24, v25, v26, v27, v28, v29
769 loop_filter_4_16b_mix 44
771 transpose_4x16B v22, v23, v24, v25, v26, v27, v28, v29
773 st1 {v22.s}[0], [x9], x1
774 st1 {v22.s}[2], [x0], x1
775 st1 {v23.s}[0], [x9], x1
776 st1 {v23.s}[2], [x0], x1
777 st1 {v24.s}[0], [x9], x1
778 st1 {v24.s}[2], [x0], x1
779 st1 {v25.s}[0], [x9], x1
780 st1 {v25.s}[2], [x0], x1
781 st1 {v22.s}[1], [x9], x1
782 st1 {v22.s}[3], [x0], x1
783 st1 {v23.s}[1], [x9], x1
784 st1 {v23.s}[3], [x0], x1
785 st1 {v24.s}[1], [x9], x1
786 st1 {v24.s}[3], [x0], x1
787 st1 {v25.s}[1], [x9], x1
788 st1 {v25.s}[3], [x0], x1
793 function ff_vp9_loop_filter_v_8_8_neon, export=1
795 sub x9, x0, x1, lsl #2
796 ld1 {v20.8b}, [x9], x1 // p3
797 ld1 {v24.8b}, [x0], x1 // q0
798 ld1 {v21.8b}, [x9], x1 // p2
799 ld1 {v25.8b}, [x0], x1 // q1
800 ld1 {v22.8b}, [x9], x1 // p1
801 ld1 {v26.8b}, [x0], x1 // q2
802 ld1 {v23.8b}, [x9], x1 // p0
803 ld1 {v27.8b}, [x0], x1 // q3
804 sub x9, x9, x1, lsl #2
805 sub x0, x0, x1, lsl #2
810 st1 {v21.8b}, [x9], x1
811 st1 {v24.8b}, [x0], x1
812 st1 {v22.8b}, [x9], x1
813 st1 {v25.8b}, [x0], x1
814 st1 {v23.8b}, [x9], x1
815 st1 {v26.8b}, [x0], x1
819 sub x9, x0, x1, lsl #1
820 st1 {v22.8b}, [x9], x1
821 st1 {v24.8b}, [x0], x1
822 st1 {v23.8b}, [x9], x1
823 st1 {v25.8b}, [x0], x1
828 function ff_vp9_loop_filter_v_\mix\()_16_neon, export=1
830 sub x9, x0, x1, lsl #2
831 ld1 {v20.16b}, [x9], x1 // p3
832 ld1 {v24.16b}, [x0], x1 // q0
833 ld1 {v21.16b}, [x9], x1 // p2
834 ld1 {v25.16b}, [x0], x1 // q1
835 ld1 {v22.16b}, [x9], x1 // p1
836 ld1 {v26.16b}, [x0], x1 // q2
837 ld1 {v23.16b}, [x9], x1 // p0
838 ld1 {v27.16b}, [x0], x1 // q3
839 sub x9, x9, x1, lsl #2
840 sub x0, x0, x1, lsl #2
843 loop_filter_8_16b_mix \mix
845 st1 {v21.16b}, [x9], x1
846 st1 {v24.16b}, [x0], x1
847 st1 {v22.16b}, [x9], x1
848 st1 {v25.16b}, [x0], x1
849 st1 {v23.16b}, [x9], x1
850 st1 {v26.16b}, [x0], x1
854 sub x9, x0, x1, lsl #1
855 st1 {v22.16b}, [x9], x1
856 st1 {v24.16b}, [x0], x1
857 st1 {v23.16b}, [x9], x1
858 st1 {v25.16b}, [x0], x1
867 function ff_vp9_loop_filter_h_8_8_neon, export=1
870 add x0, x9, x1, lsl #2
871 ld1 {v20.8b}, [x9], x1
872 ld1 {v24.8b}, [x0], x1
873 ld1 {v21.8b}, [x9], x1
874 ld1 {v25.8b}, [x0], x1
875 ld1 {v22.8b}, [x9], x1
876 ld1 {v26.8b}, [x0], x1
877 ld1 {v23.8b}, [x9], x1
878 ld1 {v27.8b}, [x0], x1
880 sub x9, x9, x1, lsl #2
881 sub x0, x0, x1, lsl #2
883 transpose_8x8B v20, v21, v22, v23, v24, v25, v26, v27, v28, v29
887 // Even though only 6 pixels per row have been changed, we write the
888 // full 8 pixel registers.
889 transpose_8x8B v20, v21, v22, v23, v24, v25, v26, v27, v28, v29
891 st1 {v20.8b}, [x9], x1
892 st1 {v24.8b}, [x0], x1
893 st1 {v21.8b}, [x9], x1
894 st1 {v25.8b}, [x0], x1
895 st1 {v22.8b}, [x9], x1
896 st1 {v26.8b}, [x0], x1
897 st1 {v23.8b}, [x9], x1
898 st1 {v27.8b}, [x0], x1
902 // If we didn't need to do the flat8in part, we use the same writeback
903 // as in loop_filter_h_4_8.
906 transpose_4x8B v22, v23, v24, v25, v26, v27, v28, v29
907 st1 {v22.s}[0], [x9], x1
908 st1 {v22.s}[1], [x0], x1
909 st1 {v23.s}[0], [x9], x1
910 st1 {v23.s}[1], [x0], x1
911 st1 {v24.s}[0], [x9], x1
912 st1 {v24.s}[1], [x0], x1
913 st1 {v25.s}[0], [x9], x1
914 st1 {v25.s}[1], [x0], x1
919 function ff_vp9_loop_filter_h_\mix\()_16_neon, export=1
922 add x0, x9, x1, lsl #3
923 ld1 {v20.8b}, [x9], x1
924 ld1 {v20.d}[1], [x0], x1
925 ld1 {v21.8b}, [x9], x1
926 ld1 {v21.d}[1], [x0], x1
927 ld1 {v22.8b}, [x9], x1
928 ld1 {v22.d}[1], [x0], x1
929 ld1 {v23.8b}, [x9], x1
930 ld1 {v23.d}[1], [x0], x1
931 ld1 {v24.8b}, [x9], x1
932 ld1 {v24.d}[1], [x0], x1
933 ld1 {v25.8b}, [x9], x1
934 ld1 {v25.d}[1], [x0], x1
935 ld1 {v26.8b}, [x9], x1
936 ld1 {v26.d}[1], [x0], x1
937 ld1 {v27.8b}, [x9], x1
938 ld1 {v27.d}[1], [x0], x1
940 sub x9, x9, x1, lsl #3
941 sub x0, x0, x1, lsl #3
943 transpose_8x16B v20, v21, v22, v23, v24, v25, v26, v27, v28, v29
945 loop_filter_8_16b_mix \mix
947 transpose_8x16B v20, v21, v22, v23, v24, v25, v26, v27, v28, v29
949 st1 {v20.8b}, [x9], x1
950 st1 {v20.d}[1], [x0], x1
951 st1 {v21.8b}, [x9], x1
952 st1 {v21.d}[1], [x0], x1
953 st1 {v22.8b}, [x9], x1
954 st1 {v22.d}[1], [x0], x1
955 st1 {v23.8b}, [x9], x1
956 st1 {v23.d}[1], [x0], x1
957 st1 {v24.8b}, [x9], x1
958 st1 {v24.d}[1], [x0], x1
959 st1 {v25.8b}, [x9], x1
960 st1 {v25.d}[1], [x0], x1
961 st1 {v26.8b}, [x9], x1
962 st1 {v26.d}[1], [x0], x1
963 st1 {v27.8b}, [x9], x1
964 st1 {v27.d}[1], [x0], x1
970 transpose_4x16B v22, v23, v24, v25, v26, v27, v28, v29
971 st1 {v22.s}[0], [x9], x1
972 st1 {v22.s}[2], [x0], x1
973 st1 {v23.s}[0], [x9], x1
974 st1 {v23.s}[2], [x0], x1
975 st1 {v24.s}[0], [x9], x1
976 st1 {v24.s}[2], [x0], x1
977 st1 {v25.s}[0], [x9], x1
978 st1 {v25.s}[2], [x0], x1
979 st1 {v22.s}[1], [x9], x1
980 st1 {v22.s}[3], [x0], x1
981 st1 {v23.s}[1], [x9], x1
982 st1 {v23.s}[3], [x0], x1
983 st1 {v24.s}[1], [x9], x1
984 st1 {v24.s}[3], [x0], x1
985 st1 {v25.s}[1], [x9], x1
986 st1 {v25.s}[3], [x0], x1
995 function ff_vp9_loop_filter_v_16_8_neon, export=1
997 stp d14, d15, [sp, #-0x10]!
998 stp d12, d13, [sp, #-0x10]!
999 stp d10, d11, [sp, #-0x10]!
1000 stp d8, d9, [sp, #-0x10]!
1001 sub x9, x0, x1, lsl #3
1002 ld1 {v16.8b}, [x9], x1 // p7
1003 ld1 {v24.8b}, [x0], x1 // q0
1004 ld1 {v17.8b}, [x9], x1 // p6
1005 ld1 {v25.8b}, [x0], x1 // q1
1006 ld1 {v18.8b}, [x9], x1 // p5
1007 ld1 {v26.8b}, [x0], x1 // q2
1008 ld1 {v19.8b}, [x9], x1 // p4
1009 ld1 {v27.8b}, [x0], x1 // q3
1010 ld1 {v20.8b}, [x9], x1 // p3
1011 ld1 {v28.8b}, [x0], x1 // q4
1012 ld1 {v21.8b}, [x9], x1 // p2
1013 ld1 {v29.8b}, [x0], x1 // q5
1014 ld1 {v22.8b}, [x9], x1 // p1
1015 ld1 {v30.8b}, [x0], x1 // q6
1016 ld1 {v23.8b}, [x9], x1 // p0
1017 ld1 {v31.8b}, [x0], x1 // q7
1018 sub x9, x9, x1, lsl #3
1019 sub x0, x0, x1, lsl #3
1024 // If we did the flat8out part, we get the output in
1025 // v2-v17 (skipping v7 and v16). x9 points to x0 - 7 * stride,
1026 // store v2-v9 there, and v10-v17 into x0.
1027 st1 {v2.8b}, [x9], x1
1028 st1 {v10.8b}, [x0], x1
1029 st1 {v3.8b}, [x9], x1
1030 st1 {v11.8b}, [x0], x1
1031 st1 {v4.8b}, [x9], x1
1032 st1 {v12.8b}, [x0], x1
1033 st1 {v5.8b}, [x9], x1
1034 st1 {v13.8b}, [x0], x1
1035 st1 {v6.8b}, [x9], x1
1036 st1 {v14.8b}, [x0], x1
1037 st1 {v8.8b}, [x9], x1
1038 st1 {v15.8b}, [x0], x1
1039 st1 {v9.8b}, [x9], x1
1040 st1 {v17.8b}, [x0], x1
1042 ldp d8, d9, [sp], 0x10
1043 ldp d10, d11, [sp], 0x10
1044 ldp d12, d13, [sp], 0x10
1045 ldp d14, d15, [sp], 0x10
1048 add x9, x9, x1, lsl #2
1049 // If we didn't do the flat8out part, the output is left in the
1051 st1 {v21.8b}, [x9], x1
1052 st1 {v24.8b}, [x0], x1
1053 st1 {v22.8b}, [x9], x1
1054 st1 {v25.8b}, [x0], x1
1055 st1 {v23.8b}, [x9], x1
1056 st1 {v26.8b}, [x0], x1
1059 sub x9, x0, x1, lsl #1
1060 st1 {v22.8b}, [x9], x1
1061 st1 {v24.8b}, [x0], x1
1062 st1 {v23.8b}, [x9], x1
1063 st1 {v25.8b}, [x0], x1
1067 function ff_vp9_loop_filter_v_16_16_neon, export=1
1069 stp d14, d15, [sp, #-0x10]!
1070 stp d12, d13, [sp, #-0x10]!
1071 stp d10, d11, [sp, #-0x10]!
1072 stp d8, d9, [sp, #-0x10]!
1073 sub x9, x0, x1, lsl #3
1074 ld1 {v16.16b}, [x9], x1 // p7
1075 ld1 {v24.16b}, [x0], x1 // q0
1076 ld1 {v17.16b}, [x9], x1 // p6
1077 ld1 {v25.16b}, [x0], x1 // q1
1078 ld1 {v18.16b}, [x9], x1 // p5
1079 ld1 {v26.16b}, [x0], x1 // q2
1080 ld1 {v19.16b}, [x9], x1 // p4
1081 ld1 {v27.16b}, [x0], x1 // q3
1082 ld1 {v20.16b}, [x9], x1 // p3
1083 ld1 {v28.16b}, [x0], x1 // q4
1084 ld1 {v21.16b}, [x9], x1 // p2
1085 ld1 {v29.16b}, [x0], x1 // q5
1086 ld1 {v22.16b}, [x9], x1 // p1
1087 ld1 {v30.16b}, [x0], x1 // q6
1088 ld1 {v23.16b}, [x9], x1 // p0
1089 ld1 {v31.16b}, [x0], x1 // q7
1090 sub x9, x9, x1, lsl #3
1091 sub x0, x0, x1, lsl #3
1096 st1 {v2.16b}, [x9], x1
1097 st1 {v10.16b}, [x0], x1
1098 st1 {v3.16b}, [x9], x1
1099 st1 {v11.16b}, [x0], x1
1100 st1 {v4.16b}, [x9], x1
1101 st1 {v12.16b}, [x0], x1
1102 st1 {v5.16b}, [x9], x1
1103 st1 {v13.16b}, [x0], x1
1104 st1 {v6.16b}, [x9], x1
1105 st1 {v14.16b}, [x0], x1
1106 st1 {v8.16b}, [x9], x1
1107 st1 {v15.16b}, [x0], x1
1108 st1 {v9.16b}, [x9], x1
1109 st1 {v17.16b}, [x0], x1
1111 ldp d8, d9, [sp], 0x10
1112 ldp d10, d11, [sp], 0x10
1113 ldp d12, d13, [sp], 0x10
1114 ldp d14, d15, [sp], 0x10
1117 add x9, x9, x1, lsl #2
1118 st1 {v21.16b}, [x9], x1
1119 st1 {v24.16b}, [x0], x1
1120 st1 {v22.16b}, [x9], x1
1121 st1 {v25.16b}, [x0], x1
1122 st1 {v23.16b}, [x9], x1
1123 st1 {v26.16b}, [x0], x1
1126 sub x9, x0, x1, lsl #1
1127 st1 {v22.16b}, [x9], x1
1128 st1 {v24.16b}, [x0], x1
1129 st1 {v23.16b}, [x9], x1
1130 st1 {v25.16b}, [x0], x1
1134 function ff_vp9_loop_filter_h_16_8_neon, export=1
1136 stp d14, d15, [sp, #-0x10]!
1137 stp d12, d13, [sp, #-0x10]!
1138 stp d10, d11, [sp, #-0x10]!
1139 stp d8, d9, [sp, #-0x10]!
1141 ld1 {v16.8b}, [x9], x1
1142 ld1 {v24.8b}, [x0], x1
1143 ld1 {v17.8b}, [x9], x1
1144 ld1 {v25.8b}, [x0], x1
1145 ld1 {v18.8b}, [x9], x1
1146 ld1 {v26.8b}, [x0], x1
1147 ld1 {v19.8b}, [x9], x1
1148 ld1 {v27.8b}, [x0], x1
1149 ld1 {v20.8b}, [x9], x1
1150 ld1 {v28.8b}, [x0], x1
1151 ld1 {v21.8b}, [x9], x1
1152 ld1 {v29.8b}, [x0], x1
1153 ld1 {v22.8b}, [x9], x1
1154 ld1 {v30.8b}, [x0], x1
1155 ld1 {v23.8b}, [x9], x1
1156 ld1 {v31.8b}, [x0], x1
1157 sub x0, x0, x1, lsl #3
1158 sub x9, x9, x1, lsl #3
1160 // The 16x8 pixels read above is in two 8x8 blocks; the left
1161 // half in v16-v23, and the right half in v24-v31. Do two 8x8 transposes
1162 // of this, to get one column per register.
1163 transpose_8x8B v16, v17, v18, v19, v20, v21, v22, v23, v0, v1
1164 transpose_8x8B v24, v25, v26, v27, v28, v29, v30, v31, v0, v1
1168 transpose_8x8B v16, v2, v3, v4, v5, v6, v8, v9, v0, v1
1169 transpose_8x8B v10, v11, v12, v13, v14, v15, v17, v31, v0, v1
1171 st1 {v16.8b}, [x9], x1
1172 st1 {v10.8b}, [x0], x1
1173 st1 {v2.8b}, [x9], x1
1174 st1 {v11.8b}, [x0], x1
1175 st1 {v3.8b}, [x9], x1
1176 st1 {v12.8b}, [x0], x1
1177 st1 {v4.8b}, [x9], x1
1178 st1 {v13.8b}, [x0], x1
1179 st1 {v5.8b}, [x9], x1
1180 st1 {v14.8b}, [x0], x1
1181 st1 {v6.8b}, [x9], x1
1182 st1 {v15.8b}, [x0], x1
1183 st1 {v8.8b}, [x9], x1
1184 st1 {v17.8b}, [x0], x1
1185 st1 {v9.8b}, [x9], x1
1186 st1 {v31.8b}, [x0], x1
1188 ldp d8, d9, [sp], 0x10
1189 ldp d10, d11, [sp], 0x10
1190 ldp d12, d13, [sp], 0x10
1191 ldp d14, d15, [sp], 0x10
1194 // The same writeback as in loop_filter_h_8_8
1196 add x0, x9, x1, lsl #2
1197 transpose_8x8B v20, v21, v22, v23, v24, v25, v26, v27, v28, v29
1199 st1 {v20.8b}, [x9], x1
1200 st1 {v24.8b}, [x0], x1
1201 st1 {v21.8b}, [x9], x1
1202 st1 {v25.8b}, [x0], x1
1203 st1 {v22.8b}, [x9], x1
1204 st1 {v26.8b}, [x0], x1
1205 st1 {v23.8b}, [x9], x1
1206 st1 {v27.8b}, [x0], x1
1209 // The same writeback as in loop_filter_h_4_8
1211 add x0, x9, x1, lsl #2
1212 transpose_4x8B v22, v23, v24, v25, v26, v27, v28, v29
1213 st1 {v22.s}[0], [x9], x1
1214 st1 {v22.s}[1], [x0], x1
1215 st1 {v23.s}[0], [x9], x1
1216 st1 {v23.s}[1], [x0], x1
1217 st1 {v24.s}[0], [x9], x1
1218 st1 {v24.s}[1], [x0], x1
1219 st1 {v25.s}[0], [x9], x1
1220 st1 {v25.s}[1], [x0], x1
1224 function ff_vp9_loop_filter_h_16_16_neon, export=1
1226 stp d14, d15, [sp, #-0x10]!
1227 stp d12, d13, [sp, #-0x10]!
1228 stp d10, d11, [sp, #-0x10]!
1229 stp d8, d9, [sp, #-0x10]!
1231 ld1 {v16.8b}, [x9], x1
1232 ld1 {v24.8b}, [x0], x1
1233 ld1 {v17.8b}, [x9], x1
1234 ld1 {v25.8b}, [x0], x1
1235 ld1 {v18.8b}, [x9], x1
1236 ld1 {v26.8b}, [x0], x1
1237 ld1 {v19.8b}, [x9], x1
1238 ld1 {v27.8b}, [x0], x1
1239 ld1 {v20.8b}, [x9], x1
1240 ld1 {v28.8b}, [x0], x1
1241 ld1 {v21.8b}, [x9], x1
1242 ld1 {v29.8b}, [x0], x1
1243 ld1 {v22.8b}, [x9], x1
1244 ld1 {v30.8b}, [x0], x1
1245 ld1 {v23.8b}, [x9], x1
1246 ld1 {v31.8b}, [x0], x1
1247 ld1 {v16.d}[1], [x9], x1
1248 ld1 {v24.d}[1], [x0], x1
1249 ld1 {v17.d}[1], [x9], x1
1250 ld1 {v25.d}[1], [x0], x1
1251 ld1 {v18.d}[1], [x9], x1
1252 ld1 {v26.d}[1], [x0], x1
1253 ld1 {v19.d}[1], [x9], x1
1254 ld1 {v27.d}[1], [x0], x1
1255 ld1 {v20.d}[1], [x9], x1
1256 ld1 {v28.d}[1], [x0], x1
1257 ld1 {v21.d}[1], [x9], x1
1258 ld1 {v29.d}[1], [x0], x1
1259 ld1 {v22.d}[1], [x9], x1
1260 ld1 {v30.d}[1], [x0], x1
1261 ld1 {v23.d}[1], [x9], x1
1262 ld1 {v31.d}[1], [x0], x1
1263 sub x0, x0, x1, lsl #4
1264 sub x9, x9, x1, lsl #4
1266 transpose_8x16B v16, v17, v18, v19, v20, v21, v22, v23, v0, v1
1267 transpose_8x16B v24, v25, v26, v27, v28, v29, v30, v31, v0, v1
1271 transpose_8x16B v16, v2, v3, v4, v5, v6, v8, v9, v0, v1
1272 transpose_8x16B v10, v11, v12, v13, v14, v15, v17, v31, v0, v1
1274 st1 {v16.8b}, [x9], x1
1275 st1 {v10.8b}, [x0], x1
1276 st1 {v2.8b}, [x9], x1
1277 st1 {v11.8b}, [x0], x1
1278 st1 {v3.8b}, [x9], x1
1279 st1 {v12.8b}, [x0], x1
1280 st1 {v4.8b}, [x9], x1
1281 st1 {v13.8b}, [x0], x1
1282 st1 {v5.8b}, [x9], x1
1283 st1 {v14.8b}, [x0], x1
1284 st1 {v6.8b}, [x9], x1
1285 st1 {v15.8b}, [x0], x1
1286 st1 {v8.8b}, [x9], x1
1287 st1 {v17.8b}, [x0], x1
1288 st1 {v9.8b}, [x9], x1
1289 st1 {v31.8b}, [x0], x1
1290 st1 {v16.d}[1], [x9], x1
1291 st1 {v10.d}[1], [x0], x1
1292 st1 {v2.d}[1], [x9], x1
1293 st1 {v11.d}[1], [x0], x1
1294 st1 {v3.d}[1], [x9], x1
1295 st1 {v12.d}[1], [x0], x1
1296 st1 {v4.d}[1], [x9], x1
1297 st1 {v13.d}[1], [x0], x1
1298 st1 {v5.d}[1], [x9], x1
1299 st1 {v14.d}[1], [x0], x1
1300 st1 {v6.d}[1], [x9], x1
1301 st1 {v15.d}[1], [x0], x1
1302 st1 {v8.d}[1], [x9], x1
1303 st1 {v17.d}[1], [x0], x1
1304 st1 {v9.d}[1], [x9], x1
1305 st1 {v31.d}[1], [x0], x1
1307 ldp d8, d9, [sp], 0x10
1308 ldp d10, d11, [sp], 0x10
1309 ldp d12, d13, [sp], 0x10
1310 ldp d14, d15, [sp], 0x10
1314 add x0, x9, x1, lsl #3
1315 transpose_8x16B v20, v21, v22, v23, v24, v25, v26, v27, v28, v29
1317 st1 {v20.8b}, [x9], x1
1318 st1 {v20.d}[1], [x0], x1
1319 st1 {v21.8b}, [x9], x1
1320 st1 {v21.d}[1], [x0], x1
1321 st1 {v22.8b}, [x9], x1
1322 st1 {v22.d}[1], [x0], x1
1323 st1 {v23.8b}, [x9], x1
1324 st1 {v23.d}[1], [x0], x1
1325 st1 {v24.8b}, [x9], x1
1326 st1 {v24.d}[1], [x0], x1
1327 st1 {v25.8b}, [x9], x1
1328 st1 {v25.d}[1], [x0], x1
1329 st1 {v26.8b}, [x9], x1
1330 st1 {v26.d}[1], [x0], x1
1331 st1 {v27.8b}, [x9], x1
1332 st1 {v27.d}[1], [x0], x1
1336 add x0, x9, x1, lsl #3
1337 transpose_4x16B v22, v23, v24, v25, v26, v27, v28, v29
1338 st1 {v22.s}[0], [x9], x1
1339 st1 {v22.s}[2], [x0], x1
1340 st1 {v23.s}[0], [x9], x1
1341 st1 {v23.s}[2], [x0], x1
1342 st1 {v24.s}[0], [x9], x1
1343 st1 {v24.s}[2], [x0], x1
1344 st1 {v25.s}[0], [x9], x1
1345 st1 {v25.s}[2], [x0], x1
1346 st1 {v22.s}[1], [x9], x1
1347 st1 {v22.s}[3], [x0], x1
1348 st1 {v23.s}[1], [x9], x1
1349 st1 {v23.s}[3], [x0], x1
1350 st1 {v24.s}[1], [x9], x1
1351 st1 {v24.s}[3], [x0], x1
1352 st1 {v25.s}[1], [x9], x1
1353 st1 {v25.s}[3], [x0], x1