2 @ ARMv4 optimized DSP utils
3 @ Copyright (c) 2004 AGAWA Koji <i (AT) atty (DOT) jp>
5 @ This file is part of FFmpeg.
7 @ FFmpeg is free software; you can redistribute it and/or
8 @ modify it under the terms of the GNU Lesser General Public
9 @ License as published by the Free Software Foundation; either
10 @ version 2.1 of the License, or (at your option) any later version.
12 @ FFmpeg is distributed in the hope that it will be useful,
13 @ but WITHOUT ANY WARRANTY; without even the implied warranty of
14 @ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 @ Lesser General Public License for more details.
17 @ You should have received a copy of the GNU Lesser General Public
18 @ License along with FFmpeg; if not, write to the Free Software
19 @ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "libavutil/arm/asm.S"
25 #if HAVE_ARMV5TE_EXTERNAL
26 function ff_prefetch_arm, export=1
37 .macro ALIGN_QWORD_D shift, Rd0, Rd1, Rd2, Rd3, Rn0, Rn1, Rn2, Rn3, Rn4
38 mov \Rd0, \Rn0, lsr #(\shift * 8)
39 mov \Rd1, \Rn1, lsr #(\shift * 8)
40 mov \Rd2, \Rn2, lsr #(\shift * 8)
41 mov \Rd3, \Rn3, lsr #(\shift * 8)
42 orr \Rd0, \Rd0, \Rn1, lsl #(32 - \shift * 8)
43 orr \Rd1, \Rd1, \Rn2, lsl #(32 - \shift * 8)
44 orr \Rd2, \Rd2, \Rn3, lsl #(32 - \shift * 8)
45 orr \Rd3, \Rd3, \Rn4, lsl #(32 - \shift * 8)
47 .macro ALIGN_DWORD shift, R0, R1, R2
48 mov \R0, \R0, lsr #(\shift * 8)
49 orr \R0, \R0, \R1, lsl #(32 - \shift * 8)
50 mov \R1, \R1, lsr #(\shift * 8)
51 orr \R1, \R1, \R2, lsl #(32 - \shift * 8)
53 .macro ALIGN_DWORD_D shift, Rdst0, Rdst1, Rsrc0, Rsrc1, Rsrc2
54 mov \Rdst0, \Rsrc0, lsr #(\shift * 8)
55 mov \Rdst1, \Rsrc1, lsr #(\shift * 8)
56 orr \Rdst0, \Rdst0, \Rsrc1, lsl #(32 - (\shift * 8))
57 orr \Rdst1, \Rdst1, \Rsrc2, lsl #(32 - (\shift * 8))
60 .macro RND_AVG32 Rd0, Rd1, Rn0, Rn1, Rm0, Rm1, Rmask
61 @ Rd = (Rn | Rm) - (((Rn ^ Rm) & ~0x01010101) >> 1)
68 and \Rd0, \Rd0, \Rmask
69 and \Rd1, \Rd1, \Rmask
70 sub \Rd0, \Rn0, \Rd0, lsr #1
71 sub \Rd1, \Rn1, \Rd1, lsr #1
74 .macro NO_RND_AVG32 Rd0, Rd1, Rn0, Rn1, Rm0, Rm1, Rmask
75 @ Rd = (Rn & Rm) - (((Rn ^ Rm) & ~0x01010101) >> 1)
82 and \Rd0, \Rd0, \Rmask
83 and \Rd1, \Rd1, \Rmask
84 add \Rd0, \Rn0, \Rd0, lsr #1
85 add \Rd1, \Rn1, \Rd1, lsr #1
88 .macro JMP_ALIGN tmp, reg
99 @ ----------------------------------------------------------------
101 function ff_put_pixels16_arm, export=1
102 @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h)
103 @ block = word aligned, pixles = unaligned
120 ALIGN_QWORD_D 1, r9, r10, r11, r12, r4, r5, r6, r7, r8
131 ALIGN_QWORD_D 2, r9, r10, r11, r12, r4, r5, r6, r7, r8
142 ALIGN_QWORD_D 3, r9, r10, r11, r12, r4, r5, r6, r7, r8
151 @ ----------------------------------------------------------------
153 function ff_put_pixels8_arm, export=1
154 @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h)
155 @ block = word aligned, pixles = unaligned
172 ALIGN_DWORD 1, r4, r5, r12
183 ALIGN_DWORD 2, r4, r5, r12
194 ALIGN_DWORD 3, r4, r5, r12
203 @ ----------------------------------------------------------------
205 function ff_put_pixels8_x2_arm, export=1
206 @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h)
207 @ block = word aligned, pixles = unaligned
215 ALIGN_DWORD_D 1, r6, r7, r4, r5, r10
217 RND_AVG32 r8, r9, r4, r5, r6, r7, r12
227 ALIGN_DWORD_D 1, r6, r7, r4, r5, r10
228 ALIGN_DWORD_D 2, r8, r9, r4, r5, r10
230 RND_AVG32 r4, r5, r6, r7, r8, r9, r12
240 ALIGN_DWORD_D 2, r6, r7, r4, r5, r10
241 ALIGN_DWORD_D 3, r8, r9, r4, r5, r10
243 RND_AVG32 r4, r5, r6, r7, r8, r9, r12
253 ALIGN_DWORD_D 3, r6, r7, r4, r5, r10
255 RND_AVG32 r8, r9, r6, r7, r5, r10, r12
264 function ff_put_no_rnd_pixels8_x2_arm, export=1
265 @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h)
266 @ block = word aligned, pixles = unaligned
274 ALIGN_DWORD_D 1, r6, r7, r4, r5, r10
276 NO_RND_AVG32 r8, r9, r4, r5, r6, r7, r12
286 ALIGN_DWORD_D 1, r6, r7, r4, r5, r10
287 ALIGN_DWORD_D 2, r8, r9, r4, r5, r10
289 NO_RND_AVG32 r4, r5, r6, r7, r8, r9, r12
299 ALIGN_DWORD_D 2, r6, r7, r4, r5, r10
300 ALIGN_DWORD_D 3, r8, r9, r4, r5, r10
302 NO_RND_AVG32 r4, r5, r6, r7, r8, r9, r12
312 ALIGN_DWORD_D 3, r6, r7, r4, r5, r10
314 NO_RND_AVG32 r8, r9, r6, r7, r5, r10, r12
323 @ ----------------------------------------------------------------
325 function ff_put_pixels8_y2_arm, export=1
326 @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h)
327 @ block = word aligned, pixles = unaligned
339 RND_AVG32 r8, r9, r4, r5, r6, r7, r12
345 RND_AVG32 r8, r9, r6, r7, r4, r5, r12
356 ALIGN_DWORD 1, r4, r5, r6
360 ALIGN_DWORD 1, r7, r8, r9
361 RND_AVG32 r10, r11, r4, r5, r7, r8, r12
367 ALIGN_DWORD 1, r4, r5, r6
369 RND_AVG32 r10, r11, r7, r8, r4, r5, r12
379 ALIGN_DWORD 2, r4, r5, r6
383 ALIGN_DWORD 2, r7, r8, r9
384 RND_AVG32 r10, r11, r4, r5, r7, r8, r12
390 ALIGN_DWORD 2, r4, r5, r6
392 RND_AVG32 r10, r11, r7, r8, r4, r5, r12
402 ALIGN_DWORD 3, r4, r5, r6
406 ALIGN_DWORD 3, r7, r8, r9
407 RND_AVG32 r10, r11, r4, r5, r7, r8, r12
413 ALIGN_DWORD 3, r4, r5, r6
415 RND_AVG32 r10, r11, r7, r8, r4, r5, r12
423 function ff_put_no_rnd_pixels8_y2_arm, export=1
424 @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h)
425 @ block = word aligned, pixles = unaligned
437 NO_RND_AVG32 r8, r9, r4, r5, r6, r7, r12
443 NO_RND_AVG32 r8, r9, r6, r7, r4, r5, r12
454 ALIGN_DWORD 1, r4, r5, r6
458 ALIGN_DWORD 1, r7, r8, r9
459 NO_RND_AVG32 r10, r11, r4, r5, r7, r8, r12
465 ALIGN_DWORD 1, r4, r5, r6
467 NO_RND_AVG32 r10, r11, r7, r8, r4, r5, r12
477 ALIGN_DWORD 2, r4, r5, r6
481 ALIGN_DWORD 2, r7, r8, r9
482 NO_RND_AVG32 r10, r11, r4, r5, r7, r8, r12
488 ALIGN_DWORD 2, r4, r5, r6
490 NO_RND_AVG32 r10, r11, r7, r8, r4, r5, r12
500 ALIGN_DWORD 3, r4, r5, r6
504 ALIGN_DWORD 3, r7, r8, r9
505 NO_RND_AVG32 r10, r11, r4, r5, r7, r8, r12
511 ALIGN_DWORD 3, r4, r5, r6
513 NO_RND_AVG32 r10, r11, r7, r8, r4, r5, r12
522 @ ----------------------------------------------------------------
523 .macro RND_XY2_IT align, rnd
524 @ l1= (a & 0x03030303) + (b & 0x03030303) ?(+ 0x02020202)
525 @ h1= ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2)
536 ALIGN_DWORD_D 1, r4, r5, r6, r7, r8
538 ALIGN_DWORD_D 1, r4, r5, r8, r9, r10
539 ALIGN_DWORD_D 2, r6, r7, r8, r9, r10
541 ALIGN_DWORD_D 2, r4, r5, r8, r9, r10
542 ALIGN_DWORD_D 3, r6, r7, r8, r9, r10
544 ALIGN_DWORD_D 3, r4, r5, r5, r6, r7
553 andeq r14, r14, r14, \rnd #1
556 ldr r12, =0xfcfcfcfc >> 2
560 and r4, r12, r4, lsr #2
561 and r5, r12, r5, lsr #2
562 and r6, r12, r6, lsr #2
563 and r7, r12, r7, lsr #2
569 .macro RND_XY2_EXPAND align, rnd
570 RND_XY2_IT \align, \rnd
572 RND_XY2_IT \align, \rnd
579 and r4, r14, r4, lsr #2
580 and r5, r14, r5, lsr #2
590 function ff_put_pixels8_xy2_arm, export=1
591 @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h)
592 @ block = word aligned, pixles = unaligned
594 push {r4-r11,lr} @ R14 is also called LR
596 1: RND_XY2_EXPAND 0, lsl
598 2: RND_XY2_EXPAND 1, lsl
600 3: RND_XY2_EXPAND 2, lsl
602 4: RND_XY2_EXPAND 3, lsl
606 function ff_put_no_rnd_pixels8_xy2_arm, export=1
607 @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h)
608 @ block = word aligned, pixles = unaligned
612 1: RND_XY2_EXPAND 0, lsr
614 2: RND_XY2_EXPAND 1, lsr
616 3: RND_XY2_EXPAND 2, lsr
618 4: RND_XY2_EXPAND 3, lsr
622 @ void ff_add_pixels_clamped_arm(int16_t *block, uint8_t *dest, int stride)
623 function ff_add_pixels_clamped_arm, export=1
627 ldr r4, [r1] /* load dest */
628 /* block[0] and block[1]*/
634 add r8, r7, r8, lsr #8
639 movne r6, r5, lsr #24
642 movne r8, r7, lsr #24
644 ldrsh r5, [r0, #4] /* moved form [A] */
645 orr r9, r9, r8, lsl #8
646 /* block[2] and block[3] */
649 and r6, r4, #0xFF0000
650 and r8, r4, #0xFF000000
651 add r6, r5, r6, lsr #16
652 add r8, r7, r8, lsr #24
657 movne r6, r5, lsr #24
660 movne r8, r7, lsr #24
661 orr r9, r9, r6, lsl #16
662 ldr r4, [r1, #4] /* moved form [B] */
663 orr r9, r9, r8, lsl #24
665 ldrsh r5, [r0, #8] /* moved form [C] */
670 /* block[4] and block[5] */
676 add r8, r7, r8, lsr #8
681 movne r6, r5, lsr #24
684 movne r8, r7, lsr #24
686 ldrsh r5, [r0, #12] /* moved from [D] */
687 orr r9, r9, r8, lsl #8
688 /* block[6] and block[7] */
691 and r6, r4, #0xFF0000
692 and r8, r4, #0xFF000000
693 add r6, r5, r6, lsr #16
694 add r8, r7, r8, lsr #24
699 movne r6, r5, lsr #24
702 movne r8, r7, lsr #24
703 orr r9, r9, r6, lsl #16
704 add r0, r0, #16 /* moved from [E] */
705 orr r9, r9, r8, lsl #24
706 subs r10, r10, #1 /* moved from [F] */