2 * Copyright (c) 2016 Google Inc.
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "libavutil/aarch64/asm.S"
23 // All public functions in this file have the following signature:
24 // typedef void (*vp9_mc_func)(uint8_t *dst, ptrdiff_t dst_stride,
25 // const uint8_t *ref, ptrdiff_t ref_stride,
26 // int h, int mx, int my);
28 function ff_vp9_copy64_aarch64, export=1
33 ldp x9, x10, [x2, #32]
36 ldp x11, x12, [x2, #48]
37 stp x9, x10, [x0, #32]
38 stp x11, x12, [x0, #48]
45 function ff_vp9_avg64_neon, export=1
48 ld1 {v4.16b, v5.16b, v6.16b, v7.16b}, [x2], x3
49 ld1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0], x1
50 ld1 {v20.16b, v21.16b, v22.16b, v23.16b}, [x2], x3
51 urhadd v0.16b, v0.16b, v4.16b
52 urhadd v1.16b, v1.16b, v5.16b
53 ld1 {v16.16b, v17.16b, v18.16b, v19.16b}, [x0], x1
54 urhadd v2.16b, v2.16b, v6.16b
55 urhadd v3.16b, v3.16b, v7.16b
57 urhadd v16.16b, v16.16b, v20.16b
58 urhadd v17.16b, v17.16b, v21.16b
59 st1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x5], x1
60 urhadd v18.16b, v18.16b, v22.16b
61 urhadd v19.16b, v19.16b, v23.16b
62 st1 {v16.16b, v17.16b, v18.16b, v19.16b}, [x5], x1
67 function ff_vp9_copy32_aarch64, export=1
80 function ff_vp9_avg32_neon, export=1
82 ld1 {v2.16b, v3.16b}, [x2], x3
83 ld1 {v0.16b, v1.16b}, [x0]
84 urhadd v0.16b, v0.16b, v2.16b
85 urhadd v1.16b, v1.16b, v3.16b
87 st1 {v0.16b, v1.16b}, [x0], x1
92 function ff_vp9_copy16_neon, export=1
98 ld1 {v0.16b}, [x2], x3
99 ld1 {v1.16b}, [x6], x3
100 ld1 {v2.16b}, [x2], x3
101 ld1 {v3.16b}, [x6], x3
103 st1 {v0.16b}, [x0], x1
104 st1 {v1.16b}, [x5], x1
105 st1 {v2.16b}, [x0], x1
106 st1 {v3.16b}, [x5], x1
111 function ff_vp9_avg16_neon, export=1
114 ld1 {v2.16b}, [x2], x3
115 ld1 {v0.16b}, [x0], x1
116 ld1 {v3.16b}, [x2], x3
117 urhadd v0.16b, v0.16b, v2.16b
118 ld1 {v1.16b}, [x0], x1
119 urhadd v1.16b, v1.16b, v3.16b
121 st1 {v0.16b}, [x5], x1
122 st1 {v1.16b}, [x5], x1
127 function ff_vp9_copy8_neon, export=1
129 ld1 {v0.8b}, [x2], x3
130 ld1 {v1.8b}, [x2], x3
132 st1 {v0.8b}, [x0], x1
133 st1 {v1.8b}, [x0], x1
138 function ff_vp9_avg8_neon, export=1
141 ld1 {v2.8b}, [x2], x3
142 ld1 {v0.8b}, [x0], x1
143 ld1 {v3.8b}, [x2], x3
144 urhadd v0.8b, v0.8b, v2.8b
145 ld1 {v1.8b}, [x0], x1
146 urhadd v1.8b, v1.8b, v3.8b
148 st1 {v0.8b}, [x5], x1
149 st1 {v1.8b}, [x5], x1
154 function ff_vp9_copy4_neon, export=1
156 ld1 {v0.s}[0], [x2], x3
157 ld1 {v1.s}[0], [x2], x3
158 st1 {v0.s}[0], [x0], x1
159 ld1 {v2.s}[0], [x2], x3
160 st1 {v1.s}[0], [x0], x1
161 ld1 {v3.s}[0], [x2], x3
163 st1 {v2.s}[0], [x0], x1
164 st1 {v3.s}[0], [x0], x1
169 function ff_vp9_avg4_neon, export=1
172 ld1 {v2.s}[0], [x2], x3
173 ld1 {v0.s}[0], [x0], x1
174 ld1 {v2.s}[1], [x2], x3
175 ld1 {v0.s}[1], [x0], x1
176 ld1 {v3.s}[0], [x2], x3
177 ld1 {v1.s}[0], [x0], x1
178 ld1 {v3.s}[1], [x2], x3
179 ld1 {v1.s}[1], [x0], x1
181 urhadd v0.8b, v0.8b, v2.8b
182 urhadd v1.8b, v1.8b, v3.8b
183 st1 {v0.s}[0], [x5], x1
184 st1 {v0.s}[1], [x5], x1
185 st1 {v1.s}[0], [x5], x1
186 st1 {v1.s}[1], [x5], x1
192 // Extract a vector from src1-src2 and src4-src5 (src1-src3 and src4-src6
193 // for size >= 16), and multiply-accumulate into dst1 and dst3 (or
194 // dst1-dst2 and dst3-dst4 for size >= 16)
195 .macro extmla dst1, dst2, dst3, dst4, src1, src2, src3, src4, src5, src6, offset, size
196 ext v20.16b, \src1, \src2, #(2*\offset)
197 ext v22.16b, \src4, \src5, #(2*\offset)
199 mla \dst1, v20.8h, v0.h[\offset]
200 ext v21.16b, \src2, \src3, #(2*\offset)
201 mla \dst3, v22.8h, v0.h[\offset]
202 ext v23.16b, \src5, \src6, #(2*\offset)
203 mla \dst2, v21.8h, v0.h[\offset]
204 mla \dst4, v23.8h, v0.h[\offset]
206 mla \dst1, v20.8h, v0.h[\offset]
207 mla \dst3, v22.8h, v0.h[\offset]
210 // The same as above, but don't accumulate straight into the
211 // destination, but use a temp register and accumulate with saturation.
212 .macro extmulqadd dst1, dst2, dst3, dst4, src1, src2, src3, src4, src5, src6, offset, size
213 ext v20.16b, \src1, \src2, #(2*\offset)
214 ext v22.16b, \src4, \src5, #(2*\offset)
216 mul v20.8h, v20.8h, v0.h[\offset]
217 ext v21.16b, \src2, \src3, #(2*\offset)
218 mul v22.8h, v22.8h, v0.h[\offset]
219 ext v23.16b, \src5, \src6, #(2*\offset)
220 mul v21.8h, v21.8h, v0.h[\offset]
221 mul v23.8h, v23.8h, v0.h[\offset]
223 mul v20.8h, v20.8h, v0.h[\offset]
224 mul v22.8h, v22.8h, v0.h[\offset]
226 sqadd \dst1, \dst1, v20.8h
227 sqadd \dst3, \dst3, v22.8h
229 sqadd \dst2, \dst2, v21.8h
230 sqadd \dst4, \dst4, v23.8h
235 // Instantiate a horizontal filter function for the given size.
236 // This can work on 4, 8 or 16 pixels in parallel; for larger
237 // widths it will do 16 pixels at a time and loop horizontally.
238 // The actual width is passed in x5, the height in w4 and the
239 // filter coefficients in x9. idx2 is the index of the largest
240 // filter coefficient (3 or 4) and idx1 is the other one of them.
241 .macro do_8tap_h type, size, idx1, idx2
242 function \type\()_8tap_\size\()h_\idx1\idx2
248 // Only size >= 16 loops horizontally and needs
249 // reduced dst stride
253 // size >= 16 loads two qwords and increments x2,
254 // for size 4/8 it's enough with one qword and no
260 // Load the filter vector
268 ld1 {v4.8b, v5.8b, v6.8b}, [x2], #24
269 ld1 {v16.8b, v17.8b, v18.8b}, [x7], #24
271 ld1 {v4.8b, v5.8b}, [x2]
272 ld1 {v16.8b, v17.8b}, [x7]
284 // Accumulate, adding idx2 last with a separate
285 // saturating add. The positive filter coefficients
286 // for all indices except idx2 must add up to less
287 // than 127 for this not to overflow.
288 mul v1.8h, v4.8h, v0.h[0]
289 mul v24.8h, v16.8h, v0.h[0]
291 mul v2.8h, v5.8h, v0.h[0]
292 mul v25.8h, v17.8h, v0.h[0]
294 extmla v1.8h, v2.8h, v24.8h, v25.8h, v4.16b, v5.16b, v6.16b, v16.16b, v17.16b, v18.16b, 1, \size
295 extmla v1.8h, v2.8h, v24.8h, v25.8h, v4.16b, v5.16b, v6.16b, v16.16b, v17.16b, v18.16b, 2, \size
296 extmla v1.8h, v2.8h, v24.8h, v25.8h, v4.16b, v5.16b, v6.16b, v16.16b, v17.16b, v18.16b, \idx1, \size
297 extmla v1.8h, v2.8h, v24.8h, v25.8h, v4.16b, v5.16b, v6.16b, v16.16b, v17.16b, v18.16b, 5, \size
298 extmla v1.8h, v2.8h, v24.8h, v25.8h, v4.16b, v5.16b, v6.16b, v16.16b, v17.16b, v18.16b, 6, \size
299 extmla v1.8h, v2.8h, v24.8h, v25.8h, v4.16b, v5.16b, v6.16b, v16.16b, v17.16b, v18.16b, 7, \size
300 extmulqadd v1.8h, v2.8h, v24.8h, v25.8h, v4.16b, v5.16b, v6.16b, v16.16b, v17.16b, v18.16b, \idx2, \size
302 // Round, shift and saturate
303 sqrshrun v1.8b, v1.8h, #7
304 sqrshrun v24.8b, v24.8h, #7
306 sqrshrun2 v1.16b, v2.8h, #7
307 sqrshrun2 v24.16b, v25.8h, #7
314 urhadd v1.16b, v1.16b, v2.16b
315 urhadd v24.16b, v24.16b, v3.16b
319 urhadd v1.8b, v1.8b, v2.8b
320 urhadd v24.8b, v24.8b, v3.8b
324 urhadd v1.8b, v1.8b, v2.8b
325 urhadd v24.8b, v24.8b, v3.8b
328 // Store and loop horizontally (for size >= 16)
331 st1 {v1.16b}, [x0], #16
332 st1 {v24.16b}, [x6], #16
336 ld1 {v6.16b}, [x2], #16
337 ld1 {v18.16b}, [x7], #16
341 uxtl2 v18.8h, v18.16b
362 .macro do_8tap_h_size size
363 do_8tap_h put, \size, 3, 4
364 do_8tap_h avg, \size, 3, 4
365 do_8tap_h put, \size, 4, 3
366 do_8tap_h avg, \size, 4, 3
373 .macro do_8tap_h_func type, filter, offset, size
374 function ff_vp9_\type\()_\filter\()\size\()_h_neon, export=1
375 movrel x6, X(ff_vp9_subpel_filters), 256*\offset
377 add x9, x6, w5, uxtw #4
380 bge \type\()_8tap_16h_34
381 b \type\()_8tap_16h_43
383 bge \type\()_8tap_\size\()h_34
384 b \type\()_8tap_\size\()h_43
389 .macro do_8tap_h_filters size
390 do_8tap_h_func put, regular, 1, \size
391 do_8tap_h_func avg, regular, 1, \size
392 do_8tap_h_func put, sharp, 2, \size
393 do_8tap_h_func avg, sharp, 2, \size
394 do_8tap_h_func put, smooth, 0, \size
395 do_8tap_h_func avg, smooth, 0, \size
407 // Round, shift and saturate and store reg1-reg2 over 4 lines
408 .macro do_store4 reg1, reg2, tmp1, tmp2, type
409 sqrshrun \reg1\().8b, \reg1\().8h, #7
410 sqrshrun \reg2\().8b, \reg2\().8h, #7
412 ld1 {\tmp1\().s}[0], [x7], x1
413 ld1 {\tmp2\().s}[0], [x7], x1
414 ld1 {\tmp1\().s}[1], [x7], x1
415 ld1 {\tmp2\().s}[1], [x7], x1
416 urhadd \reg1\().8b, \reg1\().8b, \tmp1\().8b
417 urhadd \reg2\().8b, \reg2\().8b, \tmp2\().8b
419 st1 {\reg1\().s}[0], [x0], x1
420 st1 {\reg2\().s}[0], [x0], x1
421 st1 {\reg1\().s}[1], [x0], x1
422 st1 {\reg2\().s}[1], [x0], x1
425 // Round, shift and saturate and store reg1-4
426 .macro do_store reg1, reg2, reg3, reg4, tmp1, tmp2, tmp3, tmp4, type
427 sqrshrun \reg1\().8b, \reg1\().8h, #7
428 sqrshrun \reg2\().8b, \reg2\().8h, #7
429 sqrshrun \reg3\().8b, \reg3\().8h, #7
430 sqrshrun \reg4\().8b, \reg4\().8h, #7
432 ld1 {\tmp1\().8b}, [x7], x1
433 ld1 {\tmp2\().8b}, [x7], x1
434 ld1 {\tmp3\().8b}, [x7], x1
435 ld1 {\tmp4\().8b}, [x7], x1
436 urhadd \reg1\().8b, \reg1\().8b, \tmp1\().8b
437 urhadd \reg2\().8b, \reg2\().8b, \tmp2\().8b
438 urhadd \reg3\().8b, \reg3\().8b, \tmp3\().8b
439 urhadd \reg4\().8b, \reg4\().8b, \tmp4\().8b
441 st1 {\reg1\().8b}, [x0], x1
442 st1 {\reg2\().8b}, [x0], x1
443 st1 {\reg3\().8b}, [x0], x1
444 st1 {\reg4\().8b}, [x0], x1
447 // Evaluate the filter twice in parallel, from the inputs src1-src9 into dst1-dst2
448 // (src1-src8 into dst1, src2-src9 into dst2), adding idx2 separately
449 // at the end with saturation. Indices 0 and 7 always have negative or zero
450 // coefficients, so they can be accumulated into tmp1-tmp2 together with the
451 // largest coefficient.
452 .macro convolve dst1, dst2, src1, src2, src3, src4, src5, src6, src7, src8, src9, idx1, idx2, tmp1, tmp2
453 mul \dst1\().8h, \src2\().8h, v0.h[1]
454 mul \dst2\().8h, \src3\().8h, v0.h[1]
455 mul \tmp1\().8h, \src1\().8h, v0.h[0]
456 mul \tmp2\().8h, \src2\().8h, v0.h[0]
457 mla \dst1\().8h, \src3\().8h, v0.h[2]
458 mla \dst2\().8h, \src4\().8h, v0.h[2]
460 mla \dst1\().8h, \src4\().8h, v0.h[3]
461 mla \dst2\().8h, \src5\().8h, v0.h[3]
463 mla \dst1\().8h, \src5\().8h, v0.h[4]
464 mla \dst2\().8h, \src6\().8h, v0.h[4]
466 mla \dst1\().8h, \src6\().8h, v0.h[5]
467 mla \dst2\().8h, \src7\().8h, v0.h[5]
468 mla \tmp1\().8h, \src8\().8h, v0.h[7]
469 mla \tmp2\().8h, \src9\().8h, v0.h[7]
470 mla \dst1\().8h, \src7\().8h, v0.h[6]
471 mla \dst2\().8h, \src8\().8h, v0.h[6]
473 mla \tmp1\().8h, \src4\().8h, v0.h[3]
474 mla \tmp2\().8h, \src5\().8h, v0.h[3]
476 mla \tmp1\().8h, \src5\().8h, v0.h[4]
477 mla \tmp2\().8h, \src6\().8h, v0.h[4]
479 sqadd \dst1\().8h, \dst1\().8h, \tmp1\().8h
480 sqadd \dst2\().8h, \dst2\().8h, \tmp2\().8h
483 // Load pixels and extend them to 16 bit
484 .macro loadl dst1, dst2, dst3, dst4
485 ld1 {v1.8b}, [x2], x3
486 ld1 {v2.8b}, [x2], x3
487 ld1 {v3.8b}, [x2], x3
489 ld1 {v4.8b}, [x2], x3
491 uxtl \dst1\().8h, v1.8b
492 uxtl \dst2\().8h, v2.8b
493 uxtl \dst3\().8h, v3.8b
495 uxtl \dst4\().8h, v4.8b
499 // Instantiate a vertical filter function for filtering 8 pixels at a time.
500 // The height is passed in x4, the width in x5 and the filter coefficients
501 // in x6. idx2 is the index of the largest filter coefficient (3 or 4)
502 // and idx1 is the other one of them.
503 .macro do_8tap_8v type, idx1, idx2
504 function \type\()_8tap_8v_\idx1\idx2
505 sub x2, x2, x3, lsl #1
516 loadl v20, v21, v22, v23
518 loadl v24, v25, v26, v27
519 convolve v1, v2, v17, v18, v19, v20, v21, v22, v23, v24, v25, \idx1, \idx2, v5, v6
520 convolve v3, v4, v19, v20, v21, v22, v23, v24, v25, v26, v27, \idx1, \idx2, v5, v6
521 do_store v1, v2, v3, v4, v5, v6, v7, v28, \type
526 loadl v16, v17, v18, v19
527 convolve v1, v2, v21, v22, v23, v24, v25, v26, v27, v16, v17, \idx1, \idx2, v5, v6
528 convolve v3, v4, v23, v24, v25, v26, v27, v16, v17, v18, v19, \idx1, \idx2, v5, v6
529 do_store v1, v2, v3, v4, v5, v6, v7, v28, \type
534 loadl v20, v21, v22, v23
535 convolve v1, v2, v25, v26, v27, v16, v17, v18, v19, v20, v21, \idx1, \idx2, v5, v6
536 convolve v3, v4, v27, v16, v17, v18, v19, v20, v21, v22, v23, \idx1, \idx2, v5, v6
537 do_store v1, v2, v3, v4, v5, v6, v7, v28, \type
545 // x0 -= h * dst_stride
547 // x2 -= h * src_stride
549 // x2 -= 8 * src_stride
550 sub x2, x2, x3, lsl #3
551 // x2 += 1 * src_stride
567 // Instantiate a vertical filter function for filtering a 4 pixels wide
568 // slice. The first half of the registers contain one row, while the second
569 // half of a register contains the second-next row (also stored in the first
570 // half of the register two steps ahead). The convolution does two outputs
571 // at a time; the output of v17-v24 into one, and v18-v25 into another one.
572 // The first half of first output is the first output row, the first half
573 // of the other output is the second output row. The second halves of the
574 // registers are rows 3 and 4.
575 // This only is designed to work for 4 or 8 output lines.
576 .macro do_8tap_4v type, idx1, idx2
577 function \type\()_8tap_4v_\idx1\idx2
578 sub x2, x2, x3, lsl #1
585 ld1 {v1.s}[0], [x2], x3
586 ld1 {v2.s}[0], [x2], x3
587 ld1 {v3.s}[0], [x2], x3
588 ld1 {v4.s}[0], [x2], x3
589 ld1 {v5.s}[0], [x2], x3
590 ld1 {v6.s}[0], [x2], x3
591 trn1 v1.2s, v1.2s, v3.2s
592 ld1 {v7.s}[0], [x2], x3
593 trn1 v2.2s, v2.2s, v4.2s
594 ld1 {v26.s}[0], [x2], x3
596 trn1 v3.2s, v3.2s, v5.2s
597 ld1 {v27.s}[0], [x2], x3
599 trn1 v4.2s, v4.2s, v6.2s
600 ld1 {v28.s}[0], [x2], x3
602 trn1 v5.2s, v5.2s, v7.2s
603 ld1 {v29.s}[0], [x2], x3
605 trn1 v6.2s, v6.2s, v26.2s
607 trn1 v7.2s, v7.2s, v27.2s
609 trn1 v26.2s, v26.2s, v28.2s
611 trn1 v27.2s, v27.2s, v29.2s
615 convolve v1, v2, v17, v18, v19, v20, v21, v22, v23, v24, v25, \idx1, \idx2, v3, v4
616 do_store4 v1, v2, v5, v6, \type
621 ld1 {v1.s}[0], [x2], x3
622 ld1 {v2.s}[0], [x2], x3
623 trn1 v28.2s, v28.2s, v1.2s
624 trn1 v29.2s, v29.2s, v2.2s
625 ld1 {v1.s}[1], [x2], x3
627 ld1 {v2.s}[1], [x2], x3
632 convolve v1, v2, v21, v22, v23, v24, v25, v26, v27, v28, v29, \idx1, \idx2, v3, v4
633 do_store4 v1, v2, v5, v6, \type
646 .macro do_8tap_v_func type, filter, offset, size
647 function ff_vp9_\type\()_\filter\()\size\()_v_neon, export=1
649 movrel x5, X(ff_vp9_subpel_filters), 256*\offset
651 add x6, x5, w6, uxtw #4
654 b.ge \type\()_8tap_8v_34
655 b \type\()_8tap_8v_43
657 b.ge \type\()_8tap_4v_34
658 b \type\()_8tap_4v_43
663 .macro do_8tap_v_filters size
664 do_8tap_v_func put, regular, 1, \size
665 do_8tap_v_func avg, regular, 1, \size
666 do_8tap_v_func put, sharp, 2, \size
667 do_8tap_v_func avg, sharp, 2, \size
668 do_8tap_v_func put, smooth, 0, \size
669 do_8tap_v_func avg, smooth, 0, \size