2 * ARM NEON optimised DSP functions
3 * Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
28 function ff_clear_block_neon, export=1
31 vst1.16 {q0}, [r0,:128]!
36 function ff_clear_blocks_neon, export=1
39 vst1.16 {q0}, [r0,:128]!
48 1: vld1.64 {d0, d1}, [r1], r2
49 vld1.64 {d2, d3}, [r1], r2
50 vld1.64 {d4, d5}, [r1], r2
52 vld1.64 {d6, d7}, [r1], r2
57 vld1.64 {d16,d17}, [ip,:128], r2
59 vld1.64 {d18,d19}, [ip,:128], r2
61 vld1.64 {d20,d21}, [ip,:128], r2
63 vld1.64 {d22,d23}, [ip,:128], r2
67 vst1.64 {d0, d1}, [r0,:128], r2
68 vst1.64 {d2, d3}, [r0,:128], r2
69 vst1.64 {d4, d5}, [r0,:128], r2
70 vst1.64 {d6, d7}, [r0,:128], r2
75 .macro pixels16_x2 vhadd=vrhadd.u8
76 1: vld1.64 {d0-d2}, [r1], r2
77 vld1.64 {d4-d6}, [r1], r2
85 vst1.64 {d0, d1}, [r0,:128], r2
86 vst1.64 {d4, d5}, [r0,:128], r2
91 .macro pixels16_y2 vhadd=vrhadd.u8
92 vld1.64 {d0, d1}, [r1], r2
93 vld1.64 {d2, d3}, [r1], r2
96 vld1.64 {d0, d1}, [r1], r2
98 vld1.64 {d2, d3}, [r1], r2
101 vst1.64 {d4, d5}, [r0,:128], r2
102 vst1.64 {d6, d7}, [r0,:128], r2
107 .macro pixels16_xy2 vshrn=vrshrn.u16 no_rnd=0
108 vld1.64 {d0-d2}, [r1], r2
109 vld1.64 {d4-d6}, [r1], r2
115 vext.8 q1, q0, q1, #1
116 vext.8 q3, q2, q3, #1
122 vld1.64 {d0-d2}, [r1], r2
126 vadd.u16 q12, q12, q13
128 vext.8 q15, q0, q1, #1
129 vadd.u16 q1 , q10, q11
136 vld1.64 {d2-d4}, [r1], r2
137 vaddl.u8 q10, d1, d31
138 vst1.64 {d28,d29}, [r0,:128], r2
142 vadd.u16 q12, q12, q13
144 vext.8 q2, q1, q2, #1
145 vadd.u16 q0, q10, q11
153 vst1.64 {d30,d31}, [r0,:128], r2
159 1: vld1.64 {d0}, [r1], r2
160 vld1.64 {d1}, [r1], r2
161 vld1.64 {d2}, [r1], r2
163 vld1.64 {d3}, [r1], r2
168 vld1.64 {d4}, [r0,:64], r2
170 vld1.64 {d5}, [r0,:64], r2
172 vld1.64 {d6}, [r0,:64], r2
174 vld1.64 {d7}, [r0,:64], r2
176 sub r0, r0, r2, lsl #2
179 vst1.64 {d0}, [r0,:64], r2
180 vst1.64 {d1}, [r0,:64], r2
181 vst1.64 {d2}, [r0,:64], r2
182 vst1.64 {d3}, [r0,:64], r2
187 .macro pixels8_x2 vhadd=vrhadd.u8
188 1: vld1.64 {d0, d1}, [r1], r2
189 vext.8 d1, d0, d1, #1
190 vld1.64 {d2, d3}, [r1], r2
191 vext.8 d3, d2, d3, #1
197 vst1.64 {d0}, [r0,:64], r2
198 vst1.64 {d1}, [r0,:64], r2
203 .macro pixels8_y2 vhadd=vrhadd.u8
204 vld1.64 {d0}, [r1], r2
205 vld1.64 {d1}, [r1], r2
208 vld1.64 {d0}, [r1], r2
210 vld1.64 {d1}, [r1], r2
213 vst1.64 {d4}, [r0,:64], r2
214 vst1.64 {d5}, [r0,:64], r2
219 .macro pixels8_xy2 vshrn=vrshrn.u16 no_rnd=0
220 vld1.64 {d0, d1}, [r1], r2
221 vld1.64 {d2, d3}, [r1], r2
227 vext.8 d4, d0, d1, #1
228 vext.8 d6, d2, d3, #1
232 vld1.64 {d0, d1}, [r1], r2
235 vext.8 d4, d0, d1, #1
237 vadd.u16 q10, q10, q11
241 vld1.64 {d2, d3}, [r1], r2
245 vadd.u16 q10, q10, q11
247 vst1.64 {d5}, [r0,:64], r2
249 vext.8 d6, d2, d3, #1
251 vst1.64 {d7}, [r0,:64], r2
256 .macro pixfunc pfx name suf rnd_op args:vararg
257 function ff_\pfx\name\suf\()_neon, export=1
262 .macro pixfunc2 pfx name args:vararg
264 pixfunc \pfx \name \args
267 function ff_put_h264_qpel16_mc00_neon, export=1
271 pixfunc put_ pixels16
272 pixfunc2 put_ pixels16_x2, _no_rnd, vhadd.u8
273 pixfunc2 put_ pixels16_y2, _no_rnd, vhadd.u8
274 pixfunc2 put_ pixels16_xy2, _no_rnd, vshrn.u16, 1
276 function ff_avg_h264_qpel16_mc00_neon, export=1
280 pixfunc avg_ pixels16,, 1
282 function ff_put_h264_qpel8_mc00_neon, export=1
287 pixfunc2 put_ pixels8_x2, _no_rnd, vhadd.u8
288 pixfunc2 put_ pixels8_y2, _no_rnd, vhadd.u8
289 pixfunc2 put_ pixels8_xy2, _no_rnd, vshrn.u16, 1
291 function ff_avg_h264_qpel8_mc00_neon, export=1
295 pixfunc avg_ pixels8,, 1
297 function ff_put_pixels_clamped_neon, export=1
298 vld1.64 {d16-d19}, [r0,:128]!
300 vld1.64 {d20-d23}, [r0,:128]!
302 vld1.64 {d24-d27}, [r0,:128]!
304 vld1.64 {d28-d31}, [r0,:128]!
306 vst1.64 {d0}, [r1,:64], r2
308 vst1.64 {d1}, [r1,:64], r2
310 vst1.64 {d2}, [r1,:64], r2
312 vst1.64 {d3}, [r1,:64], r2
314 vst1.64 {d4}, [r1,:64], r2
315 vst1.64 {d5}, [r1,:64], r2
316 vst1.64 {d6}, [r1,:64], r2
317 vst1.64 {d7}, [r1,:64], r2
321 function ff_put_signed_pixels_clamped_neon, export=1
323 vld1.64 {d16-d17}, [r0,:128]!
325 vld1.64 {d18-d19}, [r0,:128]!
327 vld1.64 {d16-d17}, [r0,:128]!
329 vld1.64 {d18-d19}, [r0,:128]!
331 vld1.64 {d20-d21}, [r0,:128]!
333 vld1.64 {d22-d23}, [r0,:128]!
335 vst1.64 {d0}, [r1,:64], r2
337 vst1.64 {d1}, [r1,:64], r2
339 vst1.64 {d2}, [r1,:64], r2
341 vld1.64 {d24-d25}, [r0,:128]!
343 vld1.64 {d26-d27}, [r0,:128]!
346 vst1.64 {d3}, [r1,:64], r2
348 vst1.64 {d4}, [r1,:64], r2
350 vst1.64 {d5}, [r1,:64], r2
353 vst1.64 {d6}, [r1,:64], r2
354 vst1.64 {d7}, [r1,:64], r2
358 function ff_add_pixels_clamped_neon, export=1
360 vld1.64 {d16}, [r1,:64], r2
361 vld1.64 {d0-d1}, [r0,:128]!
363 vld1.64 {d17}, [r1,:64], r2
364 vld1.64 {d2-d3}, [r0,:128]!
366 vld1.64 {d18}, [r1,:64], r2
368 vld1.64 {d4-d5}, [r0,:128]!
370 vst1.64 {d0}, [r3,:64], r2
372 vld1.64 {d19}, [r1,:64], r2
373 vld1.64 {d6-d7}, [r0,:128]!
376 vst1.64 {d2}, [r3,:64], r2
377 vld1.64 {d16}, [r1,:64], r2
379 vld1.64 {d0-d1}, [r0,:128]!
381 vst1.64 {d4}, [r3,:64], r2
382 vld1.64 {d17}, [r1,:64], r2
383 vld1.64 {d2-d3}, [r0,:128]!
385 vst1.64 {d6}, [r3,:64], r2
387 vld1.64 {d18}, [r1,:64], r2
388 vld1.64 {d4-d5}, [r0,:128]!
390 vst1.64 {d0}, [r3,:64], r2
392 vld1.64 {d19}, [r1,:64], r2
394 vld1.64 {d6-d7}, [r0,:128]!
396 vst1.64 {d2}, [r3,:64], r2
398 vst1.64 {d4}, [r3,:64], r2
399 vst1.64 {d6}, [r3,:64], r2
403 function ff_vector_fmul_neon, export=1
405 vld1.64 {d0-d3}, [r1,:128]!
406 vld1.64 {d4-d7}, [r2,:128]!
413 vld1.64 {d0-d1}, [r1,:128]!
414 vld1.64 {d4-d5}, [r2,:128]!
416 vld1.64 {d2-d3}, [r1,:128]!
417 vld1.64 {d6-d7}, [r2,:128]!
419 vst1.64 {d16-d19},[r0,:128]!
420 vld1.64 {d0-d1}, [r1,:128]!
421 vld1.64 {d4-d5}, [r2,:128]!
423 vld1.64 {d2-d3}, [r1,:128]!
424 vld1.64 {d6-d7}, [r2,:128]!
426 vst1.64 {d20-d23},[r0,:128]!
430 2: vld1.64 {d0-d1}, [r1,:128]!
431 vld1.64 {d4-d5}, [r2,:128]!
432 vst1.64 {d16-d17},[r0,:128]!
434 vld1.64 {d2-d3}, [r1,:128]!
435 vld1.64 {d6-d7}, [r2,:128]!
436 vst1.64 {d18-d19},[r0,:128]!
438 3: vst1.64 {d16-d19},[r0,:128]!
442 function ff_vector_fmul_window_neon, export=1
447 add r2, r2, r5, lsl #2
448 add r4, r3, r5, lsl #3
449 add ip, r0, r5, lsl #3
451 vld1.64 {d0,d1}, [r1,:128]!
452 vld1.64 {d2,d3}, [r2,:128], r5
453 vld1.64 {d4,d5}, [r3,:128]!
454 vld1.64 {d6,d7}, [r4,:128], r5
464 vld1.64 {d0,d1}, [r1,:128]!
466 vld1.64 {d18,d19},[r2,:128], r5
468 vld1.64 {d24,d25},[r3,:128]!
470 vld1.64 {d6,d7}, [r4,:128], r5
475 vst1.64 {d20,d21},[r0,:128]!
476 vst1.64 {d22,d23},[ip,:128], r5
478 2: vmla.f32 d22, d3, d7
484 vst1.64 {d20,d21},[r0,:128]!
485 vst1.64 {d22,d23},[ip,:128], r5
489 #if CONFIG_VORBIS_DECODER
490 function ff_vorbis_inverse_coupling_neon, export=1
497 vld1.32 {d24-d25},[r1,:128]!
498 vld1.32 {d22-d23},[r0,:128]!
504 vadd.f32 q12, q11, q2
505 vsub.f32 q11, q11, q3
506 1: vld1.32 {d2-d3}, [r1,:128]!
507 vld1.32 {d0-d1}, [r0,:128]!
511 vst1.32 {d24-d25},[r3, :128]!
512 vst1.32 {d22-d23},[r12,:128]!
519 vld1.32 {d24-d25},[r1,:128]!
520 vld1.32 {d22-d23},[r0,:128]!
524 vst1.32 {d2-d3}, [r3, :128]!
525 vst1.32 {d0-d1}, [r12,:128]!
528 vadd.f32 q12, q11, q2
529 vsub.f32 q11, q11, q3
532 2: vst1.32 {d2-d3}, [r3, :128]!
533 vst1.32 {d0-d1}, [r12,:128]!
537 3: vld1.32 {d2-d3}, [r1,:128]
538 vld1.32 {d0-d1}, [r0,:128]
546 vst1.32 {d2-d3}, [r0,:128]!
547 vst1.32 {d0-d1}, [r1,:128]!
552 function ff_vector_fmul_scalar_neon, export=1
555 VFP vdup.32 q8, d0[0]
559 vld1.32 {q0},[r1,:128]!
560 vld1.32 {q1},[r1,:128]!
561 1: vmul.f32 q0, q0, q8
562 vld1.32 {q2},[r1,:128]!
564 vld1.32 {q3},[r1,:128]!
566 vst1.32 {q0},[r0,:128]!
568 vst1.32 {q1},[r0,:128]!
571 vld1.32 {q0},[r1,:128]!
572 vst1.32 {q2},[r0,:128]!
573 vld1.32 {q1},[r1,:128]!
574 vst1.32 {q3},[r0,:128]!
576 2: vst1.32 {q2},[r0,:128]!
577 vst1.32 {q3},[r0,:128]!
581 3: vld1.32 {q0},[r1,:128]!
583 vst1.32 {q0},[r0,:128]!
590 function ff_vector_fmul_sv_scalar_2_neon, export=1
591 VFP vdup.32 d16, d0[0]
592 NOVFP vdup.32 d16, r3
594 vld1.32 {d0},[r1,:64]!
595 vld1.32 {d1},[r1,:64]!
600 vld1.32 {d2},[r12,:64]
602 vld1.32 {d3},[r12,:64]
606 vld1.32 {d0},[r1,:64]!
607 vld1.32 {d1},[r1,:64]!
608 vst1.32 {d4},[r0,:64]!
609 vst1.32 {d5},[r0,:64]!
611 2: vst1.32 {d4},[r0,:64]!
612 vst1.32 {d5},[r0,:64]!
616 function ff_vector_fmul_sv_scalar_4_neon, export=1
617 VFP vdup.32 q10, d0[0]
618 NOVFP vdup.32 q10, r3
623 vld1.32 {q0},[r1,:128]!
624 vld1.32 {q2},[r1,:128]!
626 vld1.32 {q1},[r12,:128]
628 vld1.32 {q3},[r12,:128]
635 vld1.32 {q0},[r1,:128]!
636 vld1.32 {q2},[r1,:128]!
637 vst1.32 {q8},[r0,:128]!
638 vst1.32 {q9},[r0,:128]!
640 2: vst1.32 {q8},[r0,:128]!
641 vst1.32 {q9},[r0,:128]!
645 3: vld1.32 {q0},[r1,:128]!
647 vld1.32 {q1},[r12,:128]
650 vst1.32 {q0},[r0,:128]!
656 function ff_sv_fmul_scalar_2_neon, export=1
659 VFP vdup.32 q8, d0[0]
662 vld1.32 {d0},[r12,:64]
664 vld1.32 {d1},[r12,:64]
665 1: vmul.f32 q1, q0, q8
669 vld1.32 {d0},[r12,:64]
671 vld1.32 {d1},[r12,:64]
672 vst1.32 {q1},[r0,:128]!
674 2: vst1.32 {q1},[r0,:128]!
679 function ff_sv_fmul_scalar_4_neon, export=1
682 VFP vdup.32 q8, d0[0]
685 vld1.32 {q0},[r12,:128]
687 vst1.32 {q0},[r0,:128]!
694 function ff_butterflies_float_neon, export=1
695 1: vld1.32 {q0},[r0,:128]
696 vld1.32 {q1},[r1,:128]
699 vst1.32 {q2},[r1,:128]!
700 vst1.32 {q1},[r0,:128]!
706 function ff_scalarproduct_float_neon, export=1
708 1: vld1.32 {q0},[r0,:128]!
709 vld1.32 {q1},[r1,:128]!
715 NOVFP vmov.32 r0, d0[0]
719 function ff_vector_fmul_reverse_neon, export=1
720 add r2, r2, r3, lsl #2
723 vld1.32 {q0-q1}, [r1,:128]!
724 vld1.32 {q2-q3}, [r2,:128], r12
735 vld1.32 {q0-q1}, [r1,:128]!
736 vld1.32 {q2-q3}, [r2,:128], r12
737 vst1.32 {q8-q9}, [r0,:128]!
739 2: vst1.32 {q8-q9}, [r0,:128]!
743 function ff_vector_fmul_add_neon, export=1
745 vld1.32 {q0-q1}, [r1,:128]!
746 vld1.32 {q8-q9}, [r2,:128]!
747 vld1.32 {q2-q3}, [r3,:128]!
750 1: vadd.f32 q12, q2, q10
751 vadd.f32 q13, q3, q11
757 vld1.32 {q0}, [r1,:128]!
758 vld1.32 {q8}, [r2,:128]!
760 vld1.32 {q1}, [r1,:128]!
761 vld1.32 {q9}, [r2,:128]!
763 vld1.32 {q2-q3}, [r3,:128]!
764 vst1.32 {q12-q13},[r0,:128]!
766 2: vst1.32 {q12-q13},[r0,:128]!
770 function ff_vector_clipf_neon, export=1
771 VFP vdup.32 q1, d0[1]
772 VFP vdup.32 q0, d0[0]
776 vld1.f32 {q2},[r1,:128]!
778 vld1.f32 {q3},[r1,:128]!
780 1: vmax.f32 q8, q10, q0
784 vld1.f32 {q2},[r1,:128]!
786 vld1.f32 {q3},[r1,:128]!
788 vst1.f32 {q8},[r0,:128]!
789 vst1.f32 {q9},[r0,:128]!
791 2: vst1.f32 {q8},[r0,:128]!
792 vst1.f32 {q9},[r0,:128]!
796 function ff_apply_window_int16_neon, export=1
798 add r4, r1, r3, lsl #1
799 add lr, r0, r3, lsl #1
804 vld1.16 {q0}, [r1,:128]!
805 vld1.16 {q2}, [r2,:128]!
806 vld1.16 {q1}, [r4,:128], r12
808 vqrdmulh.s16 q0, q0, q2
809 vqrdmulh.s16 d2, d2, d7
810 vqrdmulh.s16 d3, d3, d6
811 vst1.16 {q0}, [r0,:128]!
812 vst1.16 {q1}, [lr,:128], r12
819 function ff_vector_clip_int32_neon, export=1
824 vld1.32 {q2-q3}, [r1,:128]!
829 vst1.32 {q2-q3}, [r0,:128]!