2 * Copyright (c) 2011 Janne Grunau <janne-libav@jannau.net>
3 * Copyright (c) 2011 Mans Rullgard <mans@mansr.com>
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include "libavutil/arm/asm.S"
25 .macro qpel_lowpass r0, r1, rc1, rc2, shift
26 vext.8 d25, \r0, \r1, #1 @ src[-1]
27 vext.8 d26, \r0, \r1, #4 @ src[ 2]
28 vext.8 d24, \r0, \r1, #5 @ src[ 3]
31 vext.8 d27, \r0, \r1, #2 @ src[ 0]
34 vext.8 d28, \r0, \r1, #3 @ src[ 1]
36 vmlal.u8 q8, d27, \rc1
37 vmlal.u8 q8, d28, \rc2
38 vqrshrun.s16 \r0, q8, #\shift
41 .macro qpel_lowpass_x2 r0, r1, r2, r3, rc1, rc2, shift
42 vext.8 d25, \r0, \r1, #1 @ src[-1]
43 vext.8 d26, \r0, \r1, #4 @ src[ 2]
44 vext.8 d24, \r0, \r1, #5 @ src[ 3]
47 vext.8 d29, \r0, \r1, #2 @ src[ 0]
48 vext.8 d28, \r0, \r1, #3 @ src[ 1]
50 vext.8 \r1, \r2, \r3, #1 @ src[-1]
52 vext.8 d22, \r2, \r3, #4 @ src[ 2]
53 vext.8 \r0, \r2, \r3, #5 @ src[ 3]
54 vaddl.u8 q13, \r1, d22
55 vaddl.u8 q12, \r2, \r0
58 vsub.s16 q12, q12, q13
59 vmlal.u8 q8, d29, \rc1
60 vmlal.u8 q8, d28, \rc2
62 vext.8 d26, \r2, \r3, #2 @ src[ 0]
63 vext.8 d27, \r2, \r3, #3 @ src[ 1]
64 vmlal.u8 q12, d26, \rc1
65 vmlal.u8 q12, d27, \rc2
66 vqrshrun.s16 \r0, q8, #\shift
67 vqrshrun.s16 \r2, q12, #\shift
70 .macro rv40_qpel8_h shift
71 function put_rv40_qpel8_h_lp_packed_s\shift\()_neon
75 qpel_lowpass_x2 d4, d5, d6, d7, d0, d1, \shift
76 vst1.8 {d4}, [r12,:64]!
77 vst1.8 {d6}, [r12,:64]!
81 qpel_lowpass d4, d5, d0, d1, \shift
82 vst1.8 {d4}, [r12,:64]!
87 .macro rv40_qpel8_v shift, type
88 function \type\()_rv40_qpel8_v_lp_packed_s\shift\()_neon
89 vld1.64 {d2}, [r1,:64]!
90 vld1.64 {d3}, [r1,:64]!
91 vld1.64 {d4}, [r1,:64]!
92 vld1.64 {d5}, [r1,:64]!
93 vld1.64 {d6}, [r1,:64]!
94 vld1.64 {d7}, [r1,:64]!
95 vld1.64 {d8}, [r1,:64]!
96 vld1.64 {d9}, [r1,:64]!
97 vld1.64 {d10}, [r1,:64]!
98 vld1.64 {d11}, [r1,:64]!
99 vld1.64 {d12}, [r1,:64]!
100 vld1.64 {d13}, [r1,:64]!
101 vld1.64 {d14}, [r1,:64]!
102 transpose_8x8 d2, d3, d4, d5, d6, d7, d8, d9
103 transpose_8x8 d10, d11, d12, d13, d14, d15, d30, d31
104 qpel_lowpass_x2 d2, d10, d3, d11, d0, d1, \shift
105 qpel_lowpass_x2 d4, d12, d5, d13, d0, d1, \shift
106 qpel_lowpass_x2 d6, d14, d7, d15, d0, d1, \shift
107 qpel_lowpass_x2 d8, d30, d9, d31, d0, d1, \shift
108 transpose_8x8 d2, d3, d4, d5, d6, d7, d8, d9
110 vld1.64 d12, [r0,:64], r2
111 vld1.64 d13, [r0,:64], r2
112 vld1.64 d14, [r0,:64], r2
113 vld1.64 d15, [r0,:64], r2
114 vld1.64 d16, [r0,:64], r2
115 vld1.64 d17, [r0,:64], r2
116 vld1.64 d18, [r0,:64], r2
117 vld1.64 d19, [r0,:64], r2
118 sub r0, r0, r2, lsl #3
124 vst1.64 d2, [r0,:64], r2
125 vst1.64 d3, [r0,:64], r2
126 vst1.64 d4, [r0,:64], r2
127 vst1.64 d5, [r0,:64], r2
128 vst1.64 d6, [r0,:64], r2
129 vst1.64 d7, [r0,:64], r2
130 vst1.64 d8, [r0,:64], r2
131 vst1.64 d9, [r0,:64], r2
139 .macro rv40_qpel type
140 function \type\()_rv40_qpel8_h_lowpass_neon
145 vld1.8 {q2}, [r1], r2
146 vld1.8 {q3}, [r1], r2
147 qpel_lowpass_x2 d4, d5, d6, d7, d0, d1, 6
149 vld1.8 {d3}, [r12,:64], r2
150 vld1.8 {d16}, [r12,:64], r2
152 vrhadd.u8 d6, d6, d16
154 vst1.8 {d4}, [r0,:64], r2
155 vst1.8 {d6}, [r0,:64], r2
161 function \type\()_rv40_qpel8_v_lowpass_neon
162 vld1.64 {d2}, [r1], r2
163 vld1.64 {d3}, [r1], r2
164 vld1.64 {d4}, [r1], r2
165 vld1.64 {d5}, [r1], r2
166 vld1.64 {d6}, [r1], r2
167 vld1.64 {d7}, [r1], r2
168 vld1.64 {d8}, [r1], r2
169 vld1.64 {d9}, [r1], r2
170 vld1.64 {d10}, [r1], r2
171 vld1.64 {d11}, [r1], r2
172 vld1.64 {d12}, [r1], r2
173 vld1.64 {d13}, [r1], r2
175 transpose_8x8 d2, d3, d4, d5, d6, d7, d8, d9
176 transpose_8x8 d10, d11, d12, d13, d14, d15, d30, d31
177 qpel_lowpass_x2 d2, d10, d3, d11, d0, d1, 6
178 qpel_lowpass_x2 d4, d12, d5, d13, d0, d1, 6
179 qpel_lowpass_x2 d6, d14, d7, d15, d0, d1, 6
180 qpel_lowpass_x2 d8, d30, d9, d31, d0, d1, 6
181 transpose_8x8 d2, d3, d4, d5, d6, d7, d8, d9
183 vld1.64 d12, [r0,:64], r2
184 vld1.64 d13, [r0,:64], r2
185 vld1.64 d14, [r0,:64], r2
186 vld1.64 d15, [r0,:64], r2
187 vld1.64 d16, [r0,:64], r2
188 vld1.64 d17, [r0,:64], r2
189 vld1.64 d18, [r0,:64], r2
190 vld1.64 d19, [r0,:64], r2
191 sub r0, r0, r2, lsl #3
197 vst1.64 d2, [r0,:64], r2
198 vst1.64 d3, [r0,:64], r2
199 vst1.64 d4, [r0,:64], r2
200 vst1.64 d5, [r0,:64], r2
201 vst1.64 d6, [r0,:64], r2
202 vst1.64 d7, [r0,:64], r2
203 vst1.64 d8, [r0,:64], r2
204 vst1.64 d9, [r0,:64], r2
208 rv40_qpel8_v 5, \type
209 rv40_qpel8_v 6, \type
211 function ff_\type\()_rv40_qpel8_mc10_neon, export=1
216 b \type\()_rv40_qpel8_h_lowpass_neon
219 function ff_\type\()_rv40_qpel8_mc30_neon, export=1
224 b \type\()_rv40_qpel8_h_lowpass_neon
227 function ff_\type\()_rv40_qpel8_mc01_neon, export=1
230 sub r1, r1, r2, lsl #1
233 bl \type\()_rv40_qpel8_v_lowpass_neon
238 function ff_\type\()_rv40_qpel8_mc11_neon, export=1
244 sub r1, r1, r2, lsl #1
249 bl put_rv40_qpel8_h_lp_packed_s6_neon
252 bl \type\()_rv40_qpel8_v_lp_packed_s6_neon
258 function ff_\type\()_rv40_qpel8_mc21_neon, export=1
264 sub r1, r1, r2, lsl #1
269 bl put_rv40_qpel8_h_lp_packed_s5_neon
273 bl \type\()_rv40_qpel8_v_lp_packed_s6_neon
279 function ff_\type\()_rv40_qpel8_mc31_neon, export=1
285 sub r1, r1, r2, lsl #1
290 bl put_rv40_qpel8_h_lp_packed_s6_neon
294 bl \type\()_rv40_qpel8_v_lp_packed_s6_neon
300 function ff_\type\()_rv40_qpel8_mc12_neon, export=1
306 sub r1, r1, r2, lsl #1
311 bl put_rv40_qpel8_h_lp_packed_s6_neon
315 bl \type\()_rv40_qpel8_v_lp_packed_s5_neon
321 function ff_\type\()_rv40_qpel8_mc22_neon, export=1
327 sub r1, r1, r2, lsl #1
332 bl put_rv40_qpel8_h_lp_packed_s5_neon
335 bl \type\()_rv40_qpel8_v_lp_packed_s5_neon
341 function ff_\type\()_rv40_qpel8_mc32_neon, export=1
347 sub r1, r1, r2, lsl #1
352 bl put_rv40_qpel8_h_lp_packed_s6_neon
356 bl \type\()_rv40_qpel8_v_lp_packed_s5_neon
362 function ff_\type\()_rv40_qpel8_mc03_neon, export=1
365 sub r1, r1, r2, lsl #1
368 bl \type\()_rv40_qpel8_v_lowpass_neon
373 function ff_\type\()_rv40_qpel8_mc33_neon, export=1
375 b X(ff_\type\()_pixels8_xy2_neon)
378 function ff_\type\()_rv40_qpel8_mc13_neon, export=1
384 sub r1, r1, r2, lsl #1
389 bl put_rv40_qpel8_h_lp_packed_s6_neon
393 bl \type\()_rv40_qpel8_v_lp_packed_s6_neon
399 function ff_\type\()_rv40_qpel8_mc23_neon, export=1
405 sub r1, r1, r2, lsl #1
410 bl put_rv40_qpel8_h_lp_packed_s5_neon
414 bl \type\()_rv40_qpel8_v_lp_packed_s6_neon
420 function ff_\type\()_rv40_qpel16_mc10_neon, export=1
423 .L\type\()_rv40_qpel16_h:
427 bl \type\()_rv40_qpel8_h_lowpass_neon
429 sub r0, r0, r2, lsl #4
433 b \type\()_rv40_qpel8_h_lowpass_neon
436 function ff_\type\()_rv40_qpel16_mc30_neon, export=1
439 b .L\type\()_rv40_qpel16_h
442 function ff_\type\()_rv40_qpel16_mc01_neon, export=1
445 .L\type\()_rv40_qpel16_v:
446 sub r1, r1, r2, lsl #1
449 bl \type\()_rv40_qpel8_v_lowpass_neon
450 sub r1, r1, r2, lsl #2
451 bl \type\()_rv40_qpel8_v_lowpass_neon
453 sub r0, r0, r2, lsl #4
456 bl \type\()_rv40_qpel8_v_lowpass_neon
457 sub r1, r1, r2, lsl #2
458 bl \type\()_rv40_qpel8_v_lowpass_neon
463 function ff_\type\()_rv40_qpel16_mc11_neon, export=1
464 sub r1, r1, r2, lsl #1
474 bl put_rv40_qpel8_h_lp_packed_s6_neon
478 bl put_rv40_qpel8_h_lp_packed_s6_neon
479 .L\type\()_rv40_qpel16_v_s6:
482 bl \type\()_rv40_qpel8_v_lp_packed_s6_neon
484 bl \type\()_rv40_qpel8_v_lp_packed_s6_neon
485 sub r0, r0, r2, lsl #4
487 bl \type\()_rv40_qpel8_v_lp_packed_s6_neon
489 bl \type\()_rv40_qpel8_v_lp_packed_s6_neon
495 function ff_\type\()_rv40_qpel16_mc21_neon, export=1
496 sub r1, r1, r2, lsl #1
506 bl put_rv40_qpel8_h_lp_packed_s5_neon
510 bl put_rv40_qpel8_h_lp_packed_s5_neon
512 b .L\type\()_rv40_qpel16_v_s6
515 function ff_\type\()_rv40_qpel16_mc31_neon, export=1
516 sub r1, r1, r2, lsl #1
526 bl put_rv40_qpel8_h_lp_packed_s6_neon
530 bl put_rv40_qpel8_h_lp_packed_s6_neon
532 b .L\type\()_rv40_qpel16_v_s6
535 function ff_\type\()_rv40_qpel16_mc12_neon, export=1
536 sub r1, r1, r2, lsl #1
546 bl put_rv40_qpel8_h_lp_packed_s6_neon
550 bl put_rv40_qpel8_h_lp_packed_s6_neon
552 .L\type\()_rv40_qpel16_v_s5:
555 bl \type\()_rv40_qpel8_v_lp_packed_s5_neon
557 bl \type\()_rv40_qpel8_v_lp_packed_s5_neon
558 sub r0, r0, r2, lsl #4
560 bl \type\()_rv40_qpel8_v_lp_packed_s5_neon
562 bl \type\()_rv40_qpel8_v_lp_packed_s5_neon
568 function ff_\type\()_rv40_qpel16_mc22_neon, export=1
569 sub r1, r1, r2, lsl #1
579 bl put_rv40_qpel8_h_lp_packed_s5_neon
583 bl put_rv40_qpel8_h_lp_packed_s5_neon
584 b .L\type\()_rv40_qpel16_v_s5
587 function ff_\type\()_rv40_qpel16_mc32_neon, export=1
588 sub r1, r1, r2, lsl #1
598 bl put_rv40_qpel8_h_lp_packed_s6_neon
602 bl put_rv40_qpel8_h_lp_packed_s6_neon
604 b .L\type\()_rv40_qpel16_v_s5
607 function ff_\type\()_rv40_qpel16_mc03_neon, export=1
610 b .L\type\()_rv40_qpel16_v
613 function ff_\type\()_rv40_qpel16_mc13_neon, export=1
614 sub r1, r1, r2, lsl #1
624 bl put_rv40_qpel8_h_lp_packed_s6_neon
628 bl put_rv40_qpel8_h_lp_packed_s6_neon
630 b .L\type\()_rv40_qpel16_v_s6
633 function ff_\type\()_rv40_qpel16_mc23_neon, export=1
634 sub r1, r1, r2, lsl #1
644 bl put_rv40_qpel8_h_lp_packed_s5_neon
648 bl put_rv40_qpel8_h_lp_packed_s5_neon
650 b .L\type\()_rv40_qpel16_v_s6
653 function ff_\type\()_rv40_qpel16_mc33_neon, export=1
655 b X(ff_\type\()_pixels16_xy2_neon)
667 vmull.u16 q2, d16, d0[2]
668 vmull.u16 q3, d17, d0[2]
669 vmull.u16 q8, d18, d0[2]
670 vmull.u16 q9, d19, d0[2]
671 vmull.u16 q12, d20, d0[0]
672 vmull.u16 q13, d21, d0[0]
673 vmull.u16 q14, d22, d0[0]
674 vmull.u16 q15, d23, d0[0]
679 vshrn.i32 d16, q12, #9
680 vshrn.i32 d17, q13, #9
681 vshrn.i32 d18, q14, #9
682 vshrn.i32 d19, q15, #9
685 vrshrn.i16 d2, q2, #5
686 vrshrn.i16 d3, q3, #5
689 /* void ff_rv40_weight_func_16_neon(uint8_t *dst, uint8_t *src1, uint8_t *src2,
690 int w1, int w2, int stride) */
691 function ff_rv40_weight_func_16_neon, export=1
697 vld1.8 {q1}, [r1,:128], r12
698 vld1.8 {q2}, [r2,:128], r12
700 vst1.8 {q1}, [r0,:128], r12
706 /* void ff_rv40_weight_func_8_neon(uint8_t *dst, uint8_t *src1, uint8_t *src2,
707 int w1, int w2, int stride) */
708 function ff_rv40_weight_func_8_neon, export=1
714 vld1.8 {d2}, [r1,:64], r12
715 vld1.8 {d3}, [r1,:64], r12
716 vld1.8 {d4}, [r2,:64], r12
717 vld1.8 {d5}, [r2,:64], r12
719 vst1.8 {d2}, [r0,:64], r12
720 vst1.8 {d3}, [r0,:64], r12
726 function ff_rv40_h_loop_filter_strength_neon, export=1
727 pkhbt r2, r3, r2, lsl #18
734 sub r0, r0, r1, lsl #1
736 vld1.32 {d4[]}, [r0,:32], r1 @ -3
737 vld1.32 {d0[]}, [r0,:32], r1 @ -2
738 vld1.32 {d4[1]}, [r0,:32], r1 @ -1
739 vld1.32 {d5[]}, [r0,:32], r1 @ 0
740 vld1.32 {d1[]}, [r0,:32], r1 @ 1
741 vld1.32 {d5[0]}, [r0,:32], r1 @ 2
743 vpaddl.u8 q8, q0 @ -2, -2, -2, -2, 1, 1, 1, 1
744 vpaddl.u8 q9, q2 @ -3, -3, -1, -1, 2, 2, 0, 0
745 vdup.32 d30, r2 @ beta2, beta << 2
746 vpadd.u16 d16, d16, d17 @ -2, -2, 1, 1
747 vpadd.u16 d18, d18, d19 @ -3, -1, 2, 0
748 vabd.u16 d16, d18, d16
749 vclt.u16 d16, d16, d30
751 ldrd r2, r3, [sp, #4]
754 vshr.u32 q12, q12, #15
756 vst1.32 {d24[1]}, [r2,:32]
757 vst1.32 {d25[1]}, [r3,:32]
769 ldrd r2, r3, [sp, #4]
776 function ff_rv40_v_loop_filter_strength_neon, export=1
778 pkhbt r2, r3, r2, lsl #18
780 vld1.8 {d0}, [r0], r1
781 vld1.8 {d1}, [r0], r1
782 vld1.8 {d2}, [r0], r1
783 vld1.8 {d3}, [r0], r1
788 vadd.u16 q0, q0, q1 @ -3, -2, -1, 0, 1, 2
789 vext.16 q1, q0, q0, #1 @ -2, -1, 0, 1, 2
793 ldrd r2, r3, [sp, #4]
795 vext.16 d1, d0, d1, #3
798 vst1.32 {d2[1]}, [r2,:32]
799 vst1.32 {d3[1]}, [r3,:32]
812 .macro rv40_weak_loop_filter
813 vdup.16 d30, r2 @ filter_p1
814 vdup.16 d31, r3 @ filter_q1
816 vdup.16 d28, r2 @ alpha
817 vdup.16 d29, r3 @ beta
819 vdup.16 d25, r12 @ lim_p0q0
820 ldrd r2, r3, [sp, #12]
821 vsubl.u8 q9, d5, d4 @ x, t
822 vabdl.u8 q8, d5, d4 @ x, abs(t)
824 vceq.i16 d16, d19, #0 @ !t
825 vshl.s16 d19, d19, #2 @ t << 2
826 vmul.u16 d18, d17, d28 @ alpha * abs(t)
827 vand d24, d30, d31 @ filter_p1 & filter_q1
828 vsubl.u8 q1, d0, d4 @ p1p2, p1p0
829 vsubl.u8 q3, d1, d5 @ q1q2, q1q0
831 vshr.u16 d18, d18, #7
832 vadd.i16 d22, d22, d24 @ 3 - (filter_p1 & filter_q1)
833 vsubl.u8 q10, d0, d1 @ src[-2] - src[1]
834 vcle.u16 d18, d18, d22
836 vneg.s16 d23, d25 @ -lim_p0q0
837 vadd.s16 d19, d19, d20
838 vbic d16, d18, d16 @ t && u <= 3 - (fp1 & fq1)
839 vtrn.32 d4, d5 @ -3, 2, -1, 0
840 vrshr.s16 d19, d19, #3
842 vswp d3, d6 @ q1q2, p1p0
843 vmin.s16 d19, d19, d25
846 vadd.s16 q10, q1, q3 @ p1p2 + p1p0, q1q2 + q1q0
847 vmax.s16 d19, d19, d23 @ diff
848 vabs.s16 q1, q1 @ abs(p1p2), abs(q1q2)
849 vand d18, d19, d16 @ diff
851 vneg.s16 d19, d18 @ -diff
852 vdup.16 d26, r3 @ lim_p1
853 vaddw.u8 q2, q9, d5 @ src[-1]+diff, src[0]-diff
854 vhsub.s16 q11, q10, q9
856 vqmovun.s16 d4, q2 @ -1, 0
858 vdup.16 d27, r2 @ lim_q1
862 vtrn.32 d0, d1 @ -2, 1, -2, 1
865 vqmovun.s16 d5, q3 @ -2, 1
868 function ff_rv40_h_weak_loop_filter_neon, export=1
869 sub r0, r0, r1, lsl #1
872 vld1.32 {d4[]}, [r0,:32], r1
873 vld1.32 {d0[]}, [r0,:32], r1
874 vld1.32 {d4[1]}, [r0,:32], r1
875 vld1.32 {d5[]}, [r0,:32], r1
876 vld1.32 {d1[]}, [r0,:32], r1
877 vld1.32 {d5[0]}, [r0,:32]
879 sub r0, r0, r1, lsl #2
881 rv40_weak_loop_filter
883 vst1.32 {d5[0]}, [r0,:32], r1
884 vst1.32 {d4[0]}, [r0,:32], r1
885 vst1.32 {d4[1]}, [r0,:32], r1
886 vst1.32 {d5[1]}, [r0,:32], r1
891 function ff_rv40_v_weak_loop_filter_neon, export=1
895 vld1.8 {d4}, [r12], r1
896 vld1.8 {d5}, [r12], r1
897 vld1.8 {d2}, [r12], r1
898 vld1.8 {d3}, [r12], r1
909 rv40_weak_loop_filter
914 vst4.8 {d4[0],d5[0],d6[0],d7[0]}, [r0], r1
915 vst4.8 {d4[1],d5[1],d6[1],d7[1]}, [r0], r1
916 vst4.8 {d4[2],d5[2],d6[2],d7[2]}, [r0], r1
917 vst4.8 {d4[3],d5[3],d6[3],d7[3]}, [r0], r1