2 @ ARMv4 optimized DSP utils
3 @ Copyright (c) 2004 AGAWA Koji <i (AT) atty (DOT) jp>
5 @ This file is part of Libav.
7 @ Libav is free software; you can redistribute it and/or
8 @ modify it under the terms of the GNU Lesser General Public
9 @ License as published by the Free Software Foundation; either
10 @ version 2.1 of the License, or (at your option) any later version.
12 @ Libav is distributed in the hope that it will be useful,
13 @ but WITHOUT ANY WARRANTY; without even the implied warranty of
14 @ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 @ Lesser General Public License for more details.
17 @ You should have received a copy of the GNU Lesser General Public
18 @ License along with Libav; if not, write to the Free Software
19 @ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "libavutil/arm/asm.S"
25 #if !HAVE_ARMV5TE_EXTERNAL
29 .macro ALIGN_QWORD_D shift, Rd0, Rd1, Rd2, Rd3, Rn0, Rn1, Rn2, Rn3, Rn4
30 mov \Rd0, \Rn0, lsr #(\shift * 8)
31 mov \Rd1, \Rn1, lsr #(\shift * 8)
32 mov \Rd2, \Rn2, lsr #(\shift * 8)
33 mov \Rd3, \Rn3, lsr #(\shift * 8)
34 orr \Rd0, \Rd0, \Rn1, lsl #(32 - \shift * 8)
35 orr \Rd1, \Rd1, \Rn2, lsl #(32 - \shift * 8)
36 orr \Rd2, \Rd2, \Rn3, lsl #(32 - \shift * 8)
37 orr \Rd3, \Rd3, \Rn4, lsl #(32 - \shift * 8)
39 .macro ALIGN_DWORD shift, R0, R1, R2
40 mov \R0, \R0, lsr #(\shift * 8)
41 orr \R0, \R0, \R1, lsl #(32 - \shift * 8)
42 mov \R1, \R1, lsr #(\shift * 8)
43 orr \R1, \R1, \R2, lsl #(32 - \shift * 8)
45 .macro ALIGN_DWORD_D shift, Rdst0, Rdst1, Rsrc0, Rsrc1, Rsrc2
46 mov \Rdst0, \Rsrc0, lsr #(\shift * 8)
47 mov \Rdst1, \Rsrc1, lsr #(\shift * 8)
48 orr \Rdst0, \Rdst0, \Rsrc1, lsl #(32 - (\shift * 8))
49 orr \Rdst1, \Rdst1, \Rsrc2, lsl #(32 - (\shift * 8))
52 .macro RND_AVG32 Rd0, Rd1, Rn0, Rn1, Rm0, Rm1, Rmask
53 @ Rd = (Rn | Rm) - (((Rn ^ Rm) & ~0x01010101) >> 1)
60 and \Rd0, \Rd0, \Rmask
61 and \Rd1, \Rd1, \Rmask
62 sub \Rd0, \Rn0, \Rd0, lsr #1
63 sub \Rd1, \Rn1, \Rd1, lsr #1
66 .macro NO_RND_AVG32 Rd0, Rd1, Rn0, Rn1, Rm0, Rm1, Rmask
67 @ Rd = (Rn & Rm) - (((Rn ^ Rm) & ~0x01010101) >> 1)
74 and \Rd0, \Rd0, \Rmask
75 and \Rd1, \Rd1, \Rmask
76 add \Rd0, \Rn0, \Rd0, lsr #1
77 add \Rd1, \Rn1, \Rd1, lsr #1
80 .macro JMP_ALIGN tmp, reg
91 @ ----------------------------------------------------------------
93 function ff_put_pixels16_arm, export=1
94 @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h)
95 @ block = word aligned, pixles = unaligned
112 ALIGN_QWORD_D 1, r9, r10, r11, r12, r4, r5, r6, r7, r8
123 ALIGN_QWORD_D 2, r9, r10, r11, r12, r4, r5, r6, r7, r8
134 ALIGN_QWORD_D 3, r9, r10, r11, r12, r4, r5, r6, r7, r8
143 @ ----------------------------------------------------------------
145 function ff_put_pixels8_arm, export=1
146 @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h)
147 @ block = word aligned, pixles = unaligned
164 ALIGN_DWORD 1, r4, r5, r12
175 ALIGN_DWORD 2, r4, r5, r12
186 ALIGN_DWORD 3, r4, r5, r12
195 @ ----------------------------------------------------------------
197 function ff_put_pixels8_x2_arm, export=1
198 @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h)
199 @ block = word aligned, pixles = unaligned
207 ALIGN_DWORD_D 1, r6, r7, r4, r5, r10
209 RND_AVG32 r8, r9, r4, r5, r6, r7, r12
219 ALIGN_DWORD_D 1, r6, r7, r4, r5, r10
220 ALIGN_DWORD_D 2, r8, r9, r4, r5, r10
222 RND_AVG32 r4, r5, r6, r7, r8, r9, r12
232 ALIGN_DWORD_D 2, r6, r7, r4, r5, r10
233 ALIGN_DWORD_D 3, r8, r9, r4, r5, r10
235 RND_AVG32 r4, r5, r6, r7, r8, r9, r12
245 ALIGN_DWORD_D 3, r6, r7, r4, r5, r10
247 RND_AVG32 r8, r9, r6, r7, r5, r10, r12
256 function ff_put_no_rnd_pixels8_x2_arm, export=1
257 @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h)
258 @ block = word aligned, pixles = unaligned
266 ALIGN_DWORD_D 1, r6, r7, r4, r5, r10
268 NO_RND_AVG32 r8, r9, r4, r5, r6, r7, r12
278 ALIGN_DWORD_D 1, r6, r7, r4, r5, r10
279 ALIGN_DWORD_D 2, r8, r9, r4, r5, r10
281 NO_RND_AVG32 r4, r5, r6, r7, r8, r9, r12
291 ALIGN_DWORD_D 2, r6, r7, r4, r5, r10
292 ALIGN_DWORD_D 3, r8, r9, r4, r5, r10
294 NO_RND_AVG32 r4, r5, r6, r7, r8, r9, r12
304 ALIGN_DWORD_D 3, r6, r7, r4, r5, r10
306 NO_RND_AVG32 r8, r9, r6, r7, r5, r10, r12
315 @ ----------------------------------------------------------------
317 function ff_put_pixels8_y2_arm, export=1
318 @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h)
319 @ block = word aligned, pixles = unaligned
331 RND_AVG32 r8, r9, r4, r5, r6, r7, r12
337 RND_AVG32 r8, r9, r6, r7, r4, r5, r12
348 ALIGN_DWORD 1, r4, r5, r6
352 ALIGN_DWORD 1, r7, r8, r9
353 RND_AVG32 r10, r11, r4, r5, r7, r8, r12
359 ALIGN_DWORD 1, r4, r5, r6
361 RND_AVG32 r10, r11, r7, r8, r4, r5, r12
371 ALIGN_DWORD 2, r4, r5, r6
375 ALIGN_DWORD 2, r7, r8, r9
376 RND_AVG32 r10, r11, r4, r5, r7, r8, r12
382 ALIGN_DWORD 2, r4, r5, r6
384 RND_AVG32 r10, r11, r7, r8, r4, r5, r12
394 ALIGN_DWORD 3, r4, r5, r6
398 ALIGN_DWORD 3, r7, r8, r9
399 RND_AVG32 r10, r11, r4, r5, r7, r8, r12
405 ALIGN_DWORD 3, r4, r5, r6
407 RND_AVG32 r10, r11, r7, r8, r4, r5, r12
415 function ff_put_no_rnd_pixels8_y2_arm, export=1
416 @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h)
417 @ block = word aligned, pixles = unaligned
429 NO_RND_AVG32 r8, r9, r4, r5, r6, r7, r12
435 NO_RND_AVG32 r8, r9, r6, r7, r4, r5, r12
446 ALIGN_DWORD 1, r4, r5, r6
450 ALIGN_DWORD 1, r7, r8, r9
451 NO_RND_AVG32 r10, r11, r4, r5, r7, r8, r12
457 ALIGN_DWORD 1, r4, r5, r6
459 NO_RND_AVG32 r10, r11, r7, r8, r4, r5, r12
469 ALIGN_DWORD 2, r4, r5, r6
473 ALIGN_DWORD 2, r7, r8, r9
474 NO_RND_AVG32 r10, r11, r4, r5, r7, r8, r12
480 ALIGN_DWORD 2, r4, r5, r6
482 NO_RND_AVG32 r10, r11, r7, r8, r4, r5, r12
492 ALIGN_DWORD 3, r4, r5, r6
496 ALIGN_DWORD 3, r7, r8, r9
497 NO_RND_AVG32 r10, r11, r4, r5, r7, r8, r12
503 ALIGN_DWORD 3, r4, r5, r6
505 NO_RND_AVG32 r10, r11, r7, r8, r4, r5, r12
514 @ ----------------------------------------------------------------
515 .macro RND_XY2_IT align, rnd
516 @ l1= (a & 0x03030303) + (b & 0x03030303) ?(+ 0x02020202)
517 @ h1= ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2)
528 ALIGN_DWORD_D 1, r4, r5, r6, r7, r8
530 ALIGN_DWORD_D 1, r4, r5, r8, r9, r10
531 ALIGN_DWORD_D 2, r6, r7, r8, r9, r10
533 ALIGN_DWORD_D 2, r4, r5, r8, r9, r10
534 ALIGN_DWORD_D 3, r6, r7, r8, r9, r10
536 ALIGN_DWORD_D 3, r4, r5, r5, r6, r7
545 andeq r14, r14, r14, \rnd #1
548 ldr r12, =0xfcfcfcfc >> 2
552 and r4, r12, r4, lsr #2
553 and r5, r12, r5, lsr #2
554 and r6, r12, r6, lsr #2
555 and r7, r12, r7, lsr #2
561 .macro RND_XY2_EXPAND align, rnd
562 RND_XY2_IT \align, \rnd
564 RND_XY2_IT \align, \rnd
571 and r4, r14, r4, lsr #2
572 and r5, r14, r5, lsr #2
582 function ff_put_pixels8_xy2_arm, export=1
583 @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h)
584 @ block = word aligned, pixles = unaligned
586 push {r4-r11,lr} @ R14 is also called LR
588 1: RND_XY2_EXPAND 0, lsl
590 2: RND_XY2_EXPAND 1, lsl
592 3: RND_XY2_EXPAND 2, lsl
594 4: RND_XY2_EXPAND 3, lsl
598 function ff_put_no_rnd_pixels8_xy2_arm, export=1
599 @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h)
600 @ block = word aligned, pixles = unaligned
604 1: RND_XY2_EXPAND 0, lsr
606 2: RND_XY2_EXPAND 1, lsr
608 3: RND_XY2_EXPAND 2, lsr
610 4: RND_XY2_EXPAND 3, lsr
614 @ void ff_add_pixels_clamped_arm(int16_t *block, uint8_t *dest, int stride)
615 function ff_add_pixels_clamped_arm, export=1
619 ldr r4, [r1] /* load dest */
620 /* block[0] and block[1]*/
626 add r8, r7, r8, lsr #8
631 movne r6, r5, lsr #24
634 movne r8, r7, lsr #24
636 ldrsh r5, [r0, #4] /* moved form [A] */
637 orr r9, r9, r8, lsl #8
638 /* block[2] and block[3] */
641 and r6, r4, #0xFF0000
642 and r8, r4, #0xFF000000
643 add r6, r5, r6, lsr #16
644 add r8, r7, r8, lsr #24
649 movne r6, r5, lsr #24
652 movne r8, r7, lsr #24
653 orr r9, r9, r6, lsl #16
654 ldr r4, [r1, #4] /* moved form [B] */
655 orr r9, r9, r8, lsl #24
657 ldrsh r5, [r0, #8] /* moved form [C] */
662 /* block[4] and block[5] */
668 add r8, r7, r8, lsr #8
673 movne r6, r5, lsr #24
676 movne r8, r7, lsr #24
678 ldrsh r5, [r0, #12] /* moved from [D] */
679 orr r9, r9, r8, lsl #8
680 /* block[6] and block[7] */
683 and r6, r4, #0xFF0000
684 and r8, r4, #0xFF000000
685 add r6, r5, r6, lsr #16
686 add r8, r7, r8, lsr #24
691 movne r6, r5, lsr #24
694 movne r8, r7, lsr #24
695 orr r9, r9, r6, lsl #16
696 add r0, r0, #16 /* moved from [E] */
697 orr r9, r9, r8, lsl #24
698 subs r10, r10, #1 /* moved from [F] */