2 * VP8 ARMv6 optimisations
4 * Copyright (c) 2010 Google Inc.
5 * Copyright (c) 2010 Rob Clark <rob@ti.com>
6 * Copyright (c) 2011 Mans Rullgard <mans@mansr.com>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 * This code was partially ported from libvpx, which uses this license:
26 * Redistribution and use in source and binary forms, with or without
27 * modification, are permitted provided that the following conditions are
30 * * Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
33 * * Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in
35 * the documentation and/or other materials provided with the
38 * * Neither the name of Google nor the names of its contributors may
39 * be used to endorse or promote products derived from this software
40 * without specific prior written permission.
42 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
43 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
44 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
45 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
46 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
47 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
48 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
49 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
50 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
51 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
52 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
55 #include "libavutil/arm/asm.S"
59 @ void vp8_luma_dc_wht(int16_t block[4][4][16], int16_t dc[16])
60 function ff_vp8_luma_dc_wht_armv6, export=1
66 uadd16 r12, r2, r8 @ t0[0,1]
67 usub16 r2, r2, r8 @ t3[0,1]
69 uadd16 r8, r4, r6 @ t1[0,1]
70 usub16 r4, r4, r6 @ t2[0,1]
72 uadd16 r6, r12, r8 @ dc0[0,1]
73 usub16 r12, r12, r8 @ dc2[0,1]
75 uadd16 r8, r2, r4 @ dc1[0,1]
76 usub16 r2, r2, r4 @ dc3[0,1]
79 uadd16 lr, r3, r9 @ t0[2,3]
80 usub16 r3, r3, r9 @ t3[2,3]
81 uadd16 r9, r5, r7 @ t1[2,3]
82 usub16 r5, r5, r7 @ t2[2,3]
84 uadd16 r7, lr, r9 @ dc0[2,3]
85 usub16 lr, lr, r9 @ dc2[2,3]
86 uadd16 r9, r3, r5 @ dc1[2,3]
87 usub16 r3, r3, r5 @ dc3[2,3]
90 orr r1, r1, #0x30000 @ 3 | 3 (round)
92 pkhbt r4, r6, r8, lsl #16 @ dc{0,1}[0]
93 pkhtb r6, r8, r6, asr #16 @ dc{0,1}[1]
94 pkhbt r5, r12, r2, lsl #16 @ dc{2,3}[0]
95 pkhtb r12, r2, r12, asr #16 @ dc{2,3}[1]
96 pkhbt r8, r7, r9, lsl #16 @ dc{0,1}[2]
99 pkhtb r7, r9, r7, asr #16 @ dc{0,1}[3]
100 pkhbt r2, lr, r3, lsl #16 @ dc{2,3}[2]
101 pkhtb lr, r3, lr, asr #16 @ dc{2,3}[3]
103 uadd16 r9, r4, r7 @ t0[0,1]
104 uadd16 r3, r5, lr @ t0[2,3]
105 usub16 r4, r4, r7 @ t3[0,1]
106 usub16 r5, r5, lr @ t3[2,3]
107 uadd16 r7, r6, r8 @ t1[0,1]
108 uadd16 lr, r12, r2 @ t1[2,3]
109 usub16 r6, r6, r8 @ t2[0,1]
110 usub16 r12, r12, r2 @ t2[2,3]
112 uadd16 r8, r9, r7 @ block[0,1][0]
113 uadd16 r2, r3, lr @ block[2,3][0]
114 usub16 r9, r9, r7 @ block[0,1][2]
115 usub16 r3, r3, lr @ block[2,3][2]
116 uadd16 r7, r4, r6 @ block[0,1][1]
117 uadd16 lr, r5, r12 @ block[2,3][1]
118 usub16 r4, r4, r6 @ block[0,1][3]
119 usub16 r5, r5, r12 @ block[2,3][3]
121 #if HAVE_ARMV6T2_EXTERNAL
123 sbfx r12, r7, #3, #13
125 sbfx r10, r4, #3, #13
131 asr r6, #3 @ block[0][0]
132 asr r12, #3 @ block[0][1]
133 asr r1, #3 @ block[0][2]
134 asr r10, #3 @ block[0][3]
138 asr r8, r8, #19 @ block[1][0]
140 asr r7, r7, #19 @ block[1][1]
142 asr r9, r9, #19 @ block[1][2]
144 asr r4, r4, #19 @ block[1][3]
146 asr r6, r2, #19 @ block[3][0]
148 asr r12, lr, #19 @ block[3][1]
150 asr r1, r3, #19 @ block[3][2]
152 asr r10, r5, #19 @ block[3][3]
154 #if HAVE_ARMV6T2_EXTERNAL
164 asr r2, #3 @ block[2][0]
165 asr lr, #3 @ block[2][1]
166 asr r3, #3 @ block[2][2]
167 asr r5, #3 @ block[2][3]
182 @ void vp8_luma_dc_wht_dc(int16_t block[4][4][16], int16_t dc[16])
183 function ff_vp8_luma_dc_wht_dc_armv6, export=1
195 @ void vp8_idct_add(uint8_t *dst, int16_t block[16], int stride)
196 function ff_vp8_idct_add_armv6, export=1
200 movw r3, #20091 @ cospi8sqrt2minus1
201 movw r4, #35468 @ sinpi8sqrt2
204 ldr r6, [r1, #8] @ i5 | i4 = block1[1] | block1[0]
205 ldr lr, [r1, #16] @ i9 | i8 = block2[1] | block2[0]
206 ldr r12, [r1, #24] @ i13 | i12 = block3[1] | block3[0]
208 smulwt r9, r3, r6 @ ip[5] * cospi8sqrt2minus1
209 smulwb r7, r3, r6 @ ip[4] * cospi8sqrt2minus1
210 smulwt r10, r4, r6 @ ip[5] * sinpi8sqrt2
211 smulwb r8, r4, r6 @ ip[4] * sinpi8sqrt2
212 pkhbt r7, r7, r9, lsl #16 @ 5c | 4c
213 smulwt r11, r3, r12 @ ip[13] * cospi8sqrt2minus1
214 pkhbt r8, r8, r10, lsl #16 @ 5s | 4s = t2 first half
215 uadd16 r6, r6, r7 @ 5c+5 | 4c+4 = t3 first half
216 smulwb r9, r3, r12 @ ip[12] * cospi8sqrt2minus1
217 smulwt r7, r4, r12 @ ip[13] * sinpi8sqrt2
218 smulwb r10, r4, r12 @ ip[12] * sinpi8sqrt2
220 pkhbt r9, r9, r11, lsl #16 @ 13c | 12c
221 ldr r11, [r1] @ i1 | i0
222 pkhbt r10, r10, r7, lsl #16 @ 13s | 12s = t3 second half
223 uadd16 r7, r12, r9 @ 13c+13 | 12c+12 = t2 2nd half
224 uadd16 r6, r6, r10 @ d = t3
225 uadd16 r10, r11, lr @ a = t0
226 usub16 r7, r8, r7 @ c = t2
227 usub16 r8, r11, lr @ b = t1
228 uadd16 r9, r10, r6 @ a+d = tmp{0,1}[0]
229 usub16 r10, r10, r6 @ a-d = tmp{0,1}[3]
230 uadd16 r6, r8, r7 @ b+c = tmp{0,1}[1]
231 usub16 r7, r8, r7 @ b-c = tmp{0,1}[2]
234 str r6, [r5, #8] @ o5 | o4
235 str r7, [r5, #16] @ o9 | o8
236 str r10, [r5, #24] @ o13 | o12
237 str r9, [r5], #4 @ o1 | o0
246 pop {r1, r6, r12, lr}
247 smulwt r9, r3, r12 @ ip[5] * cospi8sqrt2minus1
248 smulwt r7, r3, r1 @ ip[1] * cospi8sqrt2minus1
249 smulwt r10, r4, r12 @ ip[5] * sinpi8sqrt2
250 smulwt r8, r4, r1 @ ip[1] * sinpi8sqrt2
251 pkhbt r11, r1, r12, lsl #16 @ i4 | i0 = t0/t1 first half
252 pkhtb r1, r12, r1, asr #16 @ i5 | i1
253 pkhbt r7, r7, r9, lsl #16 @ 5c | 1c
254 pkhbt r8, r8, r10, lsl #16 @ 5s | 1s = t2 first half
255 pkhbt r9, r6, lr, lsl #16 @ i6 | i2 = t0/t1 second half
256 pkhtb r12, lr, r6, asr #16 @ i7 | i3
257 uadd16 r1, r7, r1 @ 5c+5 | 1c+1 = t3 first half
258 uadd16 r10, r11, r9 @ a = t0
259 usub16 r9, r11, r9 @ b = t1
260 smulwt r7, r3, r12 @ ip[7] * cospi8sqrt2minus1
261 smulwb lr, r3, r12 @ ip[3] * cospi8sqrt2minus1
262 smulwt r11, r4, r12 @ ip[7] * sinpi8sqrt2
263 smulwb r6, r4, r12 @ ip[3] * sinpi8sqrt2
265 pkhbt r7, lr, r7, lsl #16 @ 7c | 3c
266 pkhbt r11, r6, r11, lsl #16 @ 7s | 3s = t3 second half
269 uadd16 r12, r7, r12 @ 7c+7 | 3c+3 = t2 second half
270 uadd16 r10, r10, r6 @ t0 + 4
271 uadd16 r9, r9, r6 @ t1 + 4
272 usub16 lr, r8, r12 @ c (o5 | o1) = t2
273 uadd16 r12, r11, r1 @ d (o7 | o3) = t3
274 usub16 r1, r9, lr @ b-c = dst{0,1}[2]
275 uadd16 r7, r10, r12 @ a+d = dst{0,1}[0]
276 usub16 r12, r10, r12 @ a-d = dst{0,1}[3]
277 uadd16 r10, r9, lr @ b+c = dst{0,1}[1]
279 asr lr, r1, #3 @ o[1][2]
280 asr r9, r12, #3 @ o[1][3]
281 pkhtb r8, lr, r7, asr #19 @ o[1][0,2]
282 pkhtb r11, r9, r10, asr #19 @ o[1][1,3]
287 #if HAVE_ARMV6T2_EXTERNAL
289 sbfx r10, r10, #3, #13
294 asr r10, #3 @ o[0][1]
296 pkhbt r7, r7, r1, lsl #13 @ o[0][0,2]
297 pkhbt r10, r10, r12, lsl #13 @ o[0][1,3]
300 uxtab16 r10, r10, lr, ror #8
302 uxtab16 r11, r11, r9, ror #8
307 orr r7, r7, r10, lsl #8
308 orr r8, r8, r11, lsl #8
310 str_post r7, r0, r2, lsl #1
317 @ void vp8_idct_dc_add(uint8_t *dst, int16_t block[16], int stride)
318 function ff_vp8_idct_dc_add_armv6, export=1
320 add r6, r0, r2, lsl #1
328 pkhbt r3, r3, r3, lsl #16
329 uxtab16 lr, r3, r5 @ a1+2 | a1+0
330 uxtab16 r5, r3, r5, ror #8 @ a1+3 | a1+1
332 uxtab16 r4, r3, r4, ror #8
337 orr lr, lr, r5, lsl #8
339 orr r12, r12, r4, lsl #8
344 uxtab16 r5, r3, r5, ror #8
346 uxtab16 r4, r3, r4, ror #8
351 orr lr, lr, r5, lsl #8
352 orr r12, r12, r4, lsl #8
358 @ void vp8_idct_dc_add4uv(uint8_t *dst, int16_t block[4][16], int stride)
359 function ff_vp8_idct_dc_add4uv_armv6, export=1
362 bl X(ff_vp8_idct_dc_add_armv6)
364 bl X(ff_vp8_idct_dc_add_armv6)
365 add r0, r0, r2, lsl #2
367 bl X(ff_vp8_idct_dc_add_armv6)
369 bl X(ff_vp8_idct_dc_add_armv6)
374 @ void vp8_idct_dc_add4y(uint8_t *dst, int16_t block[4][16], int stride)
375 function ff_vp8_idct_dc_add4y_armv6, export=1
378 bl X(ff_vp8_idct_dc_add_armv6)
380 bl X(ff_vp8_idct_dc_add_armv6)
382 bl X(ff_vp8_idct_dc_add_armv6)
384 bl X(ff_vp8_idct_dc_add_armv6)
391 .macro transpose o3, o2, o1, o0, i0, i1, i2, i3
392 uxtb16 \o1, \i1 @ xx 12 xx 10
393 uxtb16 \o0, \i0 @ xx 02 xx 00
394 uxtb16 \o3, \i3 @ xx 32 xx 30
395 uxtb16 \o2, \i2 @ xx 22 xx 20
396 orr \o1, \o0, \o1, lsl #8 @ 12 02 10 00
397 orr \o3, \o2, \o3, lsl #8 @ 32 22 30 20
399 uxtb16 \i1, \i1, ror #8 @ xx 13 xx 11
400 uxtb16 \i3, \i3, ror #8 @ xx 33 xx 31
401 uxtb16 \i0, \i0, ror #8 @ xx 03 xx 01
402 uxtb16 \i2, \i2, ror #8 @ xx 23 xx 21
403 orr \i0, \i0, \i1, lsl #8 @ 13 03 11 01
404 orr \i2, \i2, \i3, lsl #8 @ 33 23 31 21
406 pkhtb \o2, \o3, \o1, asr #16 @ 32 22 12 02
407 pkhbt \o0, \o1, \o3, lsl #16 @ 30 20 10 00
408 pkhtb \o3, \i2, \i0, asr #16 @ 33 23 13 03
409 pkhbt \o1, \i0, \i2, lsl #16 @ 31 21 11 01
413 uqsub8 r7, r3, r6 @ p1 - q1
414 uqsub8 r8, r6, r3 @ q1 - p1
415 uqsub8 r10, r4, r5 @ p0 - q0
416 uqsub8 r9, r5, r4 @ q0 - p0
417 orr r7, r7, r8 @ abs(p1 - q1)
418 orr r9, r9, r10 @ abs(p0 - q0)
419 uhadd8 r7, r7, lr @ abs(p1 - q2) >> 1
420 uqadd8 r9, r9, r9 @ abs(p0 - q0) * 2
421 uqadd8 r7, r7, r9 @ abs(p0 - q0)*2 + abs(p1-q1)/2
423 usub8 r10, r12, r7 @ compare to flimit
424 sel r10, r8, lr @ filter mask: F or 0
433 qsub8 r3, r3, r6 @ vp8_filter = p1 - q1
434 qsub8 r6, r5, r4 @ q0 - p0
435 qadd8 r3, r3, r6 @ += q0 - p0
436 lsr r7, r2, #5 @ 0x04040404
437 qadd8 r3, r3, r6 @ += q0 - p0
438 sub r9, r7, r2, lsr #7 @ 0x03030303
439 qadd8 r3, r3, r6 @ vp8_filter = p1-q1 + 3*(q0-p0)
440 and r3, r3, r10 @ vp8_filter &= mask
442 qadd8 r9, r3, r9 @ Filter2 = vp8_filter + 3
443 qadd8 r3, r3, r7 @ Filter1 = vp8_filter + 4
449 shadd8 r9, r9, lr @ Filter2 >>= 3
450 shadd8 r3, r3, lr @ Filter1 >>= 3
452 qadd8 r4, r4, r9 @ u = p0 + Filter2
453 qsub8 r5, r5, r3 @ u = q0 - Filter1
454 eor r4, r4, r2 @ *op0 = u ^ 0x80
455 eor r5, r5, r2 @ *oq0 = u ^ 0x80
458 @ void vp8_v_loop_filter16_simple(uint8_t *dst, int stride, int flim)
459 function ff_vp8_v_loop_filter16_simple_armv6, export=1
462 orr r2, r2, r2, lsl #16
465 orr r12, r2, r2, lsl #8
468 ldr_nreg r3, r0, r1, lsl #1 @ p1
469 ldr_nreg r4, r0, r1 @ p0
471 ldr r6, [r0, r1] @ q1
475 A str r4, [r0, -r1] @ op0
486 uqsub8 r6, r9, r10 @ p3 - p2
487 uqsub8 r7, r10, r9 @ p2 - p3
488 uqsub8 r8, r10, r11 @ p2 - p1
489 uqsub8 r10, r11, r10 @ p1 - p2
490 orr r6, r6, r7 @ abs(p3-p2)
491 orr r8, r8, r10 @ abs(p2-p1)
492 uqsub8 lr, r6, r2 @ compare to limit
493 uqsub8 r8, r8, r2 @ compare to limit
494 uqsub8 r6, r11, r12 @ p1 - p0
496 uqsub8 r7, r12, r11 @ p0 - p1
497 orr r6, r6, r7 @ abs(p1-p0)
498 uqsub8 r7, r6, r2 @ compare to limit
499 uqsub8 r8, r6, r3 @ compare to thresh
503 .macro filter_mask_pq
504 uqsub8 r6, r11, r10 @ p1 - q1
505 uqsub8 r7, r10, r11 @ q1 - p1
506 uqsub8 r11, r12, r9 @ p0 - q0
507 uqsub8 r12, r9, r12 @ q0 - p0
508 orr r6, r6, r7 @ abs(p1-q1)
509 orr r12, r11, r12 @ abs(p0-q0)
511 uqadd8 r12, r12, r12 @ abs(p0-q0) * 2
512 and r6, r7, r6, lsr #1 @ abs(p1-q1) / 2
513 uqadd8 r12, r12, r6 @ abs(p0-q0) * 2 + abs(p1-q1)/2
519 ldr r10, [r0, r1] @ q1
520 ldr_post r9, r0, r1, lsl #1 @ q0
526 uqsub8 r7, r9, r10 @ q0 - q1
527 uqsub8 r6, r10, r9 @ q1 - q0
528 uqsub8 r12, r12, r4 @ compare to flimit
529 uqsub8 r9, r11, r10 @ q2 - q1
530 uqsub8 r10, r10, r11 @ q1 - q2
532 ldr r12, [r0, r1] @ q3
533 orr r6, r7, r6 @ abs(q1-q0)
534 orr r10, r9, r10 @ abs(q2-q1)
535 uqsub8 r9, r12, r11 @ q3 - q2
536 uqsub8 r11, r11, r12 @ q2 - q3
537 uqsub8 r7, r6, r2 @ compare to limit
538 uqsub8 r10, r10, r2 @ compare to limit
539 uqsub8 r6, r6, r3 @ compare to thresh
540 orr r9, r9, r11 @ abs(q3-q2)
543 uqsub8 r9, r9, r2 @ compare to limit
549 sel lr, r11, r12 @ filter mask
550 sub r0, r0, r1, lsl #1
554 transpose r12, r11, r10, r9, r6, r7, r8, lr
558 stm sp, {r8, r11, r12, lr}
559 sub r0, r0, r1, lsl #2
563 ldr_post r6, r0, r1, lsl #1
567 transpose r12, r11, r10, r9, r6, r7, r8, lr
569 uqsub8 r8, r12, r11 @ q3 - q2
570 uqsub8 lr, r11, r12 @ q2 - q3
571 uqsub8 r7, r9, r10 @ q0 - q1
572 uqsub8 r6, r10, r9 @ q1 - q0
573 uqsub8 r12, r11, r10 @ q2 - q1
574 uqsub8 r11, r10, r11 @ q1 - q2
575 orr r8, r8, lr @ abs(q3-q2)
576 orr r6, r7, r6 @ abs(q1-q0)
577 orr r11, r12, r11 @ abs(q2-q1)
578 ldr lr, [sp, #12] @ load back (f)limit accumulator
579 uqsub8 r8, r8, r2 @ compare to limit
580 uqsub8 r7, r6, r2 @ compare to limit
581 uqsub8 r11, r11, r2 @ compare to limit
583 uqsub8 r8, r6, r3 @ compare to thresh
585 ldr r12, [sp, #8] @ p1
588 ldr r11, [sp, #4] @ p0
593 uqsub8 r12, r12, r4 @ compare to flimit
597 sel lr, r11, r10 @ filter mask
601 mov32 r12, 0x80808080
602 eor r11, r7, r12 @ ps1
603 eor r8, r8, r12 @ ps0
604 eor r9, r9, r12 @ qs0
605 eor r10, r10, r12 @ qs1
609 qsub8 r7, r11, r10 @ vp8_signed_char_clamp(ps1-qs1)
610 qsub8 r8, r9, r8 @ vp8_signed_char_clamp(vp8_filter + 3 * ( qs0 - ps0))
612 and r7, r7, r6 @ vp8_filter &= hev
615 lsr r10, r12, #5 @ 0x04040404
617 sub r9, r10, r12, lsr #7 @ 0x03030303
620 and r7, r7, lr @ vp8_filter &= mask
622 mov r12, r7 @ Filter2
623 and r7, r7, r6 @ Filter2 &= hev
625 qadd8 lr, r7, r9 @ Filter2 = vp8_signed_char_clamp(vp8_filter+3)
626 qadd8 r7, r7, r10 @ Filter1 = vp8_signed_char_clamp(vp8_filter+4)
629 shadd8 lr, lr, r9 @ Filter2 >>= 3
630 shadd8 r7, r7, r9 @ Filter1 >>= 3
633 shadd8 lr, lr, r9 @ Filter2
634 shadd8 r7, r7, r9 @ Filter1
637 .macro filter_v inner
638 orr r10, r6, r8 @ calculate vp8_hevmask
639 ldr_nreg r7, r0, r1, lsl #1 @ p1
641 ldr_nreg r8, r0, r1 @ p0
642 sel r6, r12, r11 @ obtain vp8_hevmask
644 ldr r10, [r0, r1] @ q1
648 .macro filter_h inner
651 sel r6, r12, r11 @ hev mask
655 ldr_nreg r12, r0, r1, lsl #1
660 transpose r10, r9, r8, r7, r12, r11, r6, lr
668 lsr r10, r10, #2 @ 0x01010101
669 qadd8 r8, r8, lr @ u = vp8_signed_char_clamp(ps0 + Filter2)
671 qsub8 r9, r9, r7 @ u = vp8_signed_char_clamp(qs0 - Filter1)
672 sadd8 r7, r7, r10 @ vp8_filter += 1
673 ldr r10, [sp, #8] @ qs1
674 shadd8 r7, r7, lr @ vp8_filter >>= 1
675 eor r8, r8, r12 @ *op0 = u ^ 0x80
676 bic r7, r7, r6 @ vp8_filter &= ~hev
677 qadd8 r11, r11, r7 @ u = vp8_signed_char_clamp(ps1 + vp8_filter)
678 eor r9, r9, r12 @ *oq0 = u ^ 0x80
679 qsub8 r10, r10, r7 @ u = vp8_signed_char_clamp(qs1 - vp8_filter)
680 eor r11, r11, r12 @ *op1 = u ^ 0x80
681 eor r10, r10, r12 @ *oq1 = u ^ 0x80
689 sxtb16 r10, r12, ror #8
690 smlabb r8, r6, lr, r7
691 smlatb r6, r6, lr, r7
692 smlabb r7, r10, lr, r7
694 ssat r8, #8, r8, asr #7
695 ssat r6, #8, r6, asr #7
697 ssat r7, #8, r7, asr #7
698 ssat r10, #8, r10, asr #7
700 pkhbt r6, r8, r6, lsl #16
701 pkhbt r10, r7, r10, lsl #16
707 orr r10, r6, r10, lsl #8 @ u = vp8_signed_char_clamp((63 + Filter2 * 27)>>7)
708 qsub8 r8, r9, r10 @ s = vp8_signed_char_clamp(qs0 - u)
709 qadd8 r10, r11, r10 @ s = vp8_signed_char_clamp(ps0 + u)
710 eor r8, r8, lr @ *oq0 = s ^ 0x80
711 eor r10, r10, lr @ *op0 = s ^ 0x80
718 bic r12, r12, r6 @ vp8_filter &= ~hev
723 ldr r9, [sp, #8] @ qs1
724 ldr r11, [sp, #12] @ ps1
734 function vp8_v_loop_filter_inner_armv6
738 orr r2, r2, r2, lsl #16
739 orr r3, r3, r3, lsl #16
740 orr r6, r6, r6, lsl #16
741 orr r4, r2, r2, lsl #8 @ flimE
742 orr r2, r3, r3, lsl #8 @ flimI
743 orr r3, r6, r6, lsl #8 @ thresh
745 sub r0, r0, r1, lsl #2
746 ldr r10, [r0, r1] @ p2
747 ldr_post r9, r0, r1, lsl #1 @ p3
748 ldr r12, [r0, r1] @ p0
749 ldr_post r11, r0, r1, lsl #1 @ p1
757 A str r11, [r0, -r1, lsl #1] @ op1
758 A str r8, [r0, -r1] @ op0
759 T sub r0, r0, r1, lsl #1
761 T str_post r11, r0, r1, lsl #1
763 str r10, [r0, r1] @ oq1
776 function ff_vp8_v_loop_filter16_inner_armv6, export=1
781 orr r2, r2, r2, lsl #16
782 b vp8_v_loop_filter_inner_armv6
785 function ff_vp8_v_loop_filter8uv_inner_armv6, export=1
786 push {r1, r4-r11, lr}
788 orr r2, r3, r3, lsl #16
791 b vp8_v_loop_filter_inner_armv6
794 function vp8_v_loop_filter_armv6
798 orr r3, r3, r3, lsl #16
799 orr r6, r6, r6, lsl #16
800 orr r4, r2, r2, lsl #8 @ flimE
801 orr r2, r3, r3, lsl #8 @ flimI
802 orr r3, r6, r6, lsl #8 @ thresh
804 sub r0, r0, r1, lsl #2
805 ldr r10, [r0, r1] @ p2
806 ldr_post r9, r0, r1, lsl #1 @ p3
807 ldr r12, [r0, r1] @ p0
808 ldr_post r11, r0, r1, lsl #1 @ p1
818 A str r10, [r0, -r1] @ *op0
819 T sub r0, r0, r1, lsl #1
824 A str r10, [r0, -r1, lsl #1] @ *op1
825 T str_post r10, r0, r1, lsl #1
826 str r8, [r0, r1] @ *oq1
828 ldr r9, [r0, r1, lsl #1] @ q2
830 A ldr r11, [r0, -r1, lsl #2] @ p2
831 T ldr_dpre r11, r0, r1, lsl #2
835 A str r10, [r0, -r1, lsl #2] @ *op2
836 T str_post r10, r0, r1, lsl #2
837 str r8, [r0, r1] @ *oq2
851 function ff_vp8_v_loop_filter16_armv6, export=1
856 orr r2, r2, r2, lsl #16
857 b vp8_v_loop_filter_armv6
860 function ff_vp8_v_loop_filter8uv_armv6, export=1
861 push {r1, r4-r11, lr}
863 orr r2, r3, r3, lsl #16
866 b vp8_v_loop_filter_armv6
869 @ void vp8_h_loop_filter16_simple(uint8_t *dst, int stride, int flim)
870 function ff_vp8_h_loop_filter16_simple_armv6, export=1
872 orr r12, r2, r2, lsl #16
874 orr r12, r12, r12, lsl #8
881 ldr_post r7, r0, r1, lsl #1
883 ldr_post r9, r0, r1, lsl #1
885 transpose r6, r5, r4, r3, r7, r8, r9, r10
887 sub r0, r0, r1, lsl #2
892 uxtb16 r7, r4, ror #8
893 uxtb16 r9, r5, ror #8
894 orr r6, r6, r8, lsl #8
895 orr r7, r7, r9, lsl #8
911 function vp8_h_loop_filter_inner_armv6
915 orr r3, r3, r3, lsl #16
916 orr r9, r9, r9, lsl #16
917 orr r4, r2, r2, lsl #8 @ flimE
918 orr r2, r3, r3, lsl #8 @ flimI
919 orr r3, r9, r9, lsl #8 @ thresh
923 ldr_post r6, r0, r1, lsl #1
925 ldr_post r8, r0, r1, lsl #1
938 transpose lr, r12, r7, r6, r11, r8, r9, r10
940 A str r6, [r0, -r1, lsl #1]
942 T sub r0, r0, r1, lsl #1
944 T str_post r6, r0, r1, lsl #1
949 add r0, r0, r1, lsl #1
960 function ff_vp8_h_loop_filter16_inner_armv6, export=1
962 add r12, r0, r1, lsl #3
966 orr r2, r2, r2, lsl #16
967 b vp8_h_loop_filter_inner_armv6
970 function ff_vp8_h_loop_filter8uv_inner_armv6, export=1
972 push {r1, r4-r11, lr}
974 orr r2, r3, r3, lsl #16
977 b vp8_h_loop_filter_inner_armv6
980 function vp8_h_loop_filter_armv6
984 orr r3, r3, r3, lsl #16
985 orr r9, r9, r9, lsl #16
986 orr r4, r2, r2, lsl #8 @ flimE
987 orr r2, r3, r3, lsl #8 @ flimI
988 orr r3, r9, r9, lsl #8 @ thresh
992 ldr_post r6, r0, r1, lsl #1
994 ldr_post r8, r0, r1, lsl #1
999 addeq r0, r0, r1, lsl #1
1008 sub r0, r0, r1, lsl #1
1011 uxtb16 r10, r10, ror #8
1012 uxtb16 r8, r8, ror #8
1013 orr r6, r6, r7, lsl #8
1014 orr r10, r10, r8, lsl #8
1019 strh_post r6, r0, r1
1020 strh_post r10, r0, r1
1021 strh_post r7, r0, r1
1022 strh_post r8, r0, r1
1026 sub r0, r0, r1, lsl #2
1029 ldrb r11, [r0, #-5] @ p2 for 1/7th difference
1030 strb r10, [r0, #-4] @ op1
1031 strb r8, [r0, #-1] @ oq1
1032 ldrb_post r9, r0, r1 @ q2 for 1/7th difference
1040 ldrb_post r7, r0, r1
1044 orr r11, r11, r6, lsl #8
1045 orr r9, r9, r7, lsl #8
1050 ldrb_post r7, r0, r1
1054 orr r11, r11, r6, lsl #16
1055 orr r9, r9, r7, lsl #16
1060 ldrb_post r7, r0, r1
1061 orr r11, r11, r6, lsl #24
1062 orr r9, r9, r7, lsl #24
1066 sub r0, r0, r1, lsl #2
1068 strb_post r8, r0, r1
1072 strb_post r8, r0, r1
1076 strb_post r8, r0, r1
1080 strb_post r8, r0, r1
1091 pop {r0, r4-r11, pc}
1094 function ff_vp8_h_loop_filter16_armv6, export=1
1096 add r12, r0, r1, lsl #3
1099 orr r2, r2, r2, lsl #16
1100 b vp8_h_loop_filter_armv6
1103 function ff_vp8_h_loop_filter8uv_armv6, export=1
1104 push {r1, r4-r11, lr}
1106 orr r2, r3, r3, lsl #16
1109 b vp8_h_loop_filter_armv6
1116 @ void put_vp8_pixels16(uint8_t *dst, int dststride, uint8_t *src,
1117 @ int srcstride, int h, int mx, int my)
1118 function ff_put_vp8_pixels16_armv6, export=1
1120 ldr r12, [sp, #32] @ h
1131 strd r6, r7, [r0, #8]
1132 strd_post r4, r5, r0, r1
1133 strd r10, r11, [r0, #8]
1134 strd_post r8, r9, r0, r1
1140 @ void put_vp8_pixels8(uint8_t *dst, int dststride, uint8_t *src,
1141 @ int srcstride, int h, int mx, int my)
1142 function ff_put_vp8_pixels8_armv6, export=1
1144 ldr r12, [sp, #32] @ h
1154 ldr_post r10, r2, r3
1155 strd_post r4, r5, r0, r1
1156 strd_post r6, r7, r0, r1
1157 strd_post r8, r9, r0, r1
1158 strd_post r10, r11, r0, r1
1164 @ void put_vp8_pixels4(uint8_t *dst, int dststride, uint8_t *src,
1165 @ int srcstride, int h, int mx, int my)
1166 function ff_put_vp8_pixels4_armv6, export=1
1167 ldr r12, [sp, #0] @ h
1183 @ note: worst case sum of all 6-tap filter values * 255 is 0x7f80 so 16 bit
1184 @ arithmatic can be used to apply filters
1185 const sixtap_filters_13245600, align=4
1186 .short 2, 108, -11, 36, -8, 1, 0, 0
1187 .short 3, 77, -16, 77, -16, 3, 0, 0
1188 .short 1, 36, -8, 108, -11, 2, 0, 0
1191 const fourtap_filters_1324, align=4
1192 .short -6, 12, 123, -1
1193 .short -9, 50, 93, -6
1194 .short -6, 93, 50, -9
1195 .short -1, 123, 12, -6
1198 .macro vp8_mc_1 name, size, hv
1199 function ff_put_vp8_\name\size\()_\hv\()_armv6, export=1
1202 push {r1, r4-r11, lr}
1207 b vp8_put_\name\()_\hv\()_armv6 + 4
1211 vp8_mc_1 epel, 16, h6
1212 vp8_mc_1 epel, 16, v6
1213 vp8_mc_1 epel, 8, h6
1214 vp8_mc_1 epel, 8, v6
1215 vp8_mc_1 epel, 8, h4
1216 vp8_mc_1 epel, 8, v4
1217 vp8_mc_1 epel, 4, h6
1218 vp8_mc_1 epel, 4, v6
1219 vp8_mc_1 epel, 4, h4
1220 vp8_mc_1 epel, 4, v4
1222 vp8_mc_1 bilin, 16, h
1223 vp8_mc_1 bilin, 16, v
1224 vp8_mc_1 bilin, 8, h
1225 vp8_mc_1 bilin, 8, v
1226 vp8_mc_1 bilin, 4, h
1227 vp8_mc_1 bilin, 4, v
1229 /* True relational expressions have the value -1 in the GNU assembler,
1232 # define TMPSIZE \size * (8 + 8*(\size > 4) + \ytaps - 1)
1234 # define TMPSIZE \size * (8 - 8*(\size > 4) + \ytaps - 1)
1237 .macro vp8_mc_hv name, size, h, v, ytaps
1238 function ff_put_vp8_\name\size\()_\h\v\()_armv6, export=1
1239 push {r0, r1, r4, lr}
1241 sub sp, sp, #TMPSIZE+16
1244 add lr, r0, #\ytaps-1
1246 sub r2, r2, r3, lsl #\ytaps >> 1 & 1
1251 bl vp8_put_\name\()_\h\()_armv6
1252 add r0, sp, #TMPSIZE+16
1253 ldr lr, [sp, #TMPSIZE+16+16]
1256 ldr r12, [sp, #TMPSIZE+16+16+8]
1258 add r2, sp, #16 + \size * (\ytaps / 2 - 1)
1260 bl vp8_put_\name\()_\v\()_armv6
1261 add sp, sp, #TMPSIZE+16+8
1266 vp8_mc_hv epel, 16, h6, v6, 6
1267 vp8_mc_hv epel, 8, h6, v6, 6
1268 vp8_mc_hv epel, 8, h4, v6, 6
1269 vp8_mc_hv epel, 8, h6, v4, 4
1270 vp8_mc_hv epel, 8, h4, v4, 4
1271 vp8_mc_hv epel, 4, h6, v6, 6
1272 vp8_mc_hv epel, 4, h4, v6, 6
1273 vp8_mc_hv epel, 4, h6, v4, 4
1274 vp8_mc_hv epel, 4, h4, v4, 4
1276 vp8_mc_hv bilin, 16, h, v, 2
1277 vp8_mc_hv bilin, 8, h, v, 2
1278 vp8_mc_hv bilin, 4, h, v, 2
1280 .macro sat4 r0, r1, r2, r3
1283 pkhbt \r0, \r0, \r2, lsl #9
1284 pkhbt \r1, \r1, \r3, lsl #9
1287 orr \r0, \r0, \r1, lsl #8
1290 @ Calling convention for the inner MC functions:
1292 @ r1 dst_stride - block_width
1301 function vp8_put_epel_h6_armv6
1302 push {r1, r4-r11, lr}
1304 movrel lr, sixtap_filters_13245600 - 16
1305 add lr, lr, r12, lsl #3
1308 ldm lr, {r1, r3, lr}
1310 ldr r7, [r2, #5] @ src[5-8]
1311 ldr r6, [r2, #2] @ src[2-5]
1312 ldr r5, [r2], #4 @ src[0-3]
1314 pkhtb r7, r7, r7, asr #8 @ src[8,7,7,6]
1315 uxtb16 r9, r6, ror #8 @ src[5] | src[3]
1316 uxtb16 r6, r6 @ src[4] | src[2]
1317 uxtb16 r8, r5, ror #8 @ src[3] | src[1]
1318 uxtb16 r11, r7, ror #8 @ src[8] | src[7]
1319 uxtb16 r7, r7 @ src[7] | src[6]
1320 uxtb16 r5, r5 @ src[2] | src[0]
1323 smlad r5, r5, r1, r10 @ filter[0][0]
1324 smlad r11, r11, lr, r10 @ filter[3][2]
1325 smlad r12, r7, lr, r10 @ filter[2][2]
1326 smlad r10, r8, r1, r10 @ filter[1][0]
1327 smlad r5, r8, r3, r5 @ filter[0][1]
1328 smlad r11, r9, r1, r11 @ filter[3][0]
1329 smlad r12, r9, r3, r12 @ filter[2][1]
1330 pkhtb r9, r9, r6, asr #16 @ src[5] | src[4]
1331 smlad r10, r6, r3, r10 @ filter[1][1]
1332 pkhbt r7, r9, r7, lsl #16 @ src[6] | src[4]
1333 smlad r5, r9, lr, r5 @ filter[0][2]
1334 pkhtb r8, r7, r9, asr #16 @ src[6] | src[5]
1335 smlad r11, r7, r3, r11 @ filter[3][1]
1336 smlad r9, r8, lr, r10 @ filter[1][2]
1337 smlad r7, r6, r1, r12 @ filter[2][0]
1341 sat4 r5, r9, r7, r11
1347 ldm r4, {r4, r5, r12}
1356 pop {r1, r4-r11, pc}
1359 function vp8_put_epel_v6_armv6
1360 push {r1, r4-r11, lr}
1361 movrel lr, sixtap_filters_13245600 - 16
1362 add lr, lr, r12, lsl #3
1365 add r1, r3, r3, lsl #1 @ stride * 3
1366 ldr_nreg r5, r2, r3 @ src[0,1,2,3 + stride * 1]
1367 ldr r6, [r2, r3] @ src[0,1,2,3 + stride * 3]
1368 ldr r7, [r2, r3, lsl #1] @ src[0,1,2,3 + stride * 4]
1369 ldr r8, [r2, r1] @ src[0,1,2,3 + stride * 5]
1371 uxtb16 r9, r5, ror #8 @ src[3 + s*1] | src[1 + s*1]
1372 uxtb16 r10, r6, ror #8 @ src[3 + s*3] | src[1 + s*3]
1373 uxtb16 r11, r7, ror #8 @ src[3 + s*4] | src[1 + s*4]
1374 uxtb16 r12, r8, ror #8 @ src[3 + s*5] | src[1 + s*5]
1375 uxtb16 r5, r5 @ src[2 + s*1] | src[0 + s*1]
1376 uxtb16 r6, r6 @ src[2 + s*3] | src[0 + s*3]
1377 uxtb16 r7, r7 @ src[2 + s*4] | src[0 + s*4]
1378 uxtb16 r8, r8 @ src[2 + s*5] | src[0 + s*5]
1379 pkhbt r1, r9, r10, lsl #16 @ src[1 + s*3] | src[1 + s*1]
1380 pkhtb r9, r10, r9, asr #16 @ src[3 + s*3] | src[3 + s*1]
1381 pkhbt r10, r11, r12, lsl #16 @ src[1 + s*5] | src[1 + s*4]
1382 pkhtb r11, r12, r11, asr #16 @ src[3 + s*5] | src[3 + s*4]
1383 pkhbt r12, r5, r6, lsl #16 @ src[0 + s*3] | src[0 + s*1]
1384 pkhtb r5, r6, r5, asr #16 @ src[2 + s*3] | src[2 + s*1]
1385 pkhbt r6, r7, r8, lsl #16 @ src[0 + s*5] | src[0 + s*4]
1386 pkhtb r7, r8, r7, asr #16 @ src[2 + s*5] | src[2 + s*4]
1390 smlad r12, r12, r8, r3 @ filter[0][1]
1391 smlad r1, r1, r8, r3 @ filter[1][1]
1392 smlad r5, r5, r8, r3 @ filter[2][1]
1393 smlad r9, r9, r8, r3 @ filter[3][1]
1396 smlad r12, r6, r8, r12 @ filter[0][2]
1397 smlad r1, r10, r8, r1 @ filter[1][2]
1398 ldr_nreg r6, r2, r3, lsl #1 @ src[0,1,2,3 + stride * 0]
1399 ldr r10, [r2], #4 @ src[0,1,2,3 + stride * 2]
1400 smlad r5, r7, r8, r5 @ filter[2][2]
1401 smlad r9, r11, r8, r9 @ filter[3][2]
1403 uxtb16 r7, r6, ror #8 @ src[3 + s*0] | src[1 + s*0]
1404 uxtb16 r11, r10, ror #8 @ src[3 + s*2] | src[1 + s*2]
1405 uxtb16 r6, r6 @ src[2 + s*0] | src[0 + s*0]
1406 uxtb16 r10, r10 @ src[2 + s*2] | src[0 + s*2]
1408 pkhbt r8, r7, r11, lsl #16 @ src[1 + s*2] | src[1 + s*0]
1409 pkhtb r7, r11, r7, asr #16 @ src[3 + s*2] | src[3 + s*0]
1410 pkhbt r11, r6, r10, lsl #16 @ src[0 + s*2] | src[0 + s*0]
1411 pkhtb r6, r10, r6, asr #16 @ src[2 + s*2] | src[2 + s*0]
1415 smlad r12, r11, r10, r12 @ filter[0][0]
1416 smlad r1, r8, r10, r1 @ filter[1][0]
1417 smlad r5, r6, r10, r5 @ filter[2][0]
1418 smlad r9, r7, r10, r9 @ filter[3][0]
1420 sat4 r12, r1, r5, r9
1425 ldrd r4, r5, [sp, #40]
1435 pop {r1, r4-r11, pc}
1438 function vp8_put_epel_h4_armv6
1439 push {r1, r4-r11, lr}
1441 movrel lr, fourtap_filters_1324 - 4
1442 add lr, lr, r12, lsl #2
1451 uxtb16 r9, r9, ror #8 @ src[6] | src[4]
1452 uxtb16 r10, r8, ror #8 @ src[5] | src[3]
1453 uxtb16 r8, r8 @ src[4] | src[2]
1454 uxtb16 r11, r7, ror #8 @ src[3] | src[1]
1455 uxtb16 r7, r7 @ src[2] | src[0]
1458 smlad r9, r9, r6, r12 @ filter[3][1]
1459 smlad r7, r7, r5, r12 @ filter[0][0]
1460 smlad r9, r10, r5, r9 @ filter[3][0]
1461 smlad r10, r10, r6, r12 @ filter[2][1]
1462 smlad r12, r11, r5, r12 @ filter[1][0]
1463 smlad r7, r11, r6, r7 @ filter[0][1]
1464 smlad r10, r8, r5, r10 @ filter[2][0]
1465 smlad r12, r8, r6, r12 @ filter[1][1]
1469 sat4 r7, r12, r10, r9
1481 pop {r1, r4-r11, pc}
1484 function vp8_put_epel_v4_armv6
1485 push {r1, r4-r11, lr}
1486 movrel lr, fourtap_filters_1324 - 4
1487 add lr, lr, r12, lsl #2
1491 ldr lr, [r2, r3, lsl #1]
1496 uxtb16 r8, lr, ror #8 @ src[3 + s*3] | src[1 + s*3]
1497 uxtb16 r9, r12, ror #8 @ src[3 + s*2] | src[1 + s*2]
1498 uxtb16 r3, r7, ror #8 @ src[3 + s*0] | src[1 + s*0]
1499 uxtb16 r1, r11, ror #8 @ src[3 + s*1] | src[1 + s*1]
1500 uxtb16 lr, lr @ src[2 + s*3] | src[0 + s*3]
1501 uxtb16 r12, r12 @ src[2 + s*2] | src[0 + s*2]
1502 uxtb16 r7, r7 @ src[2 + s*0] | src[0 + s*0]
1503 uxtb16 r11, r11 @ src[2 + s*1] | src[0 + s*1]
1504 pkhbt r10, r1, r8, lsl #16 @ src[1 + s*3] | src[1 + s*1]
1505 pkhtb r1, r8, r1, asr #16 @ src[3 + s*3] | src[3 + s*1]
1506 pkhbt r8, r3, r9, lsl #16 @ src[1 + s*2] | src[1 + s*0]
1507 pkhtb r3, r9, r3, asr #16 @ src[3 + s*2] | src[3 + s*0]
1508 pkhbt r9, r11, lr, lsl #16 @ src[0 + s*3] | src[0 + s*1]
1509 pkhtb r11, lr, r11, asr #16 @ src[2 + s*3] | src[2 + s*1]
1510 pkhbt lr, r7, r12, lsl #16 @ src[0 + s*2] | src[0 + s*0]
1511 pkhtb r7, r12, r7, asr #16 @ src[2 + s*2] | src[2 + s*0]
1514 smlad r9, r9, r6, r12 @ filter[0][1]
1515 smlad r10, r10, r6, r12 @ filter[1][1]
1516 smlad r11, r11, r6, r12 @ filter[2][1]
1517 smlad r1, r1, r6, r12 @ filter[3][1]
1518 smlad r9, lr, r5, r9 @ filter[0][0]
1519 smlad r10, r8, r5, r10 @ filter[1][0]
1520 smlad r11, r7, r5, r11 @ filter[2][0]
1521 smlad r1, r3, r5, r1 @ filter[3][0]
1526 sat4 r9, r10, r11, r1
1542 pop {r1, r4-r11, pc}
1545 function vp8_put_bilin_h_armv6
1546 push {r1, r4-r11, lr}
1547 rsb r5, r12, r12, lsl #16
1558 pkhbt r6, r6, r7, lsl #16 @ src[1] | src[0]
1559 pkhbt r7, r7, r8, lsl #16 @ src[2] | src[1]
1560 pkhbt r8, r8, r9, lsl #16 @ src[3] | src[2]
1561 pkhbt r9, r9, lr, lsl #16 @ src[4] | src[3]
1564 smlad r6, r6, r5, r10
1565 smlad r7, r7, r5, r10
1566 smlad r8, r8, r5, r10
1567 smlad r9, r9, r5, r10
1573 pkhbt r6, r6, r8, lsl #13
1574 pkhbt r7, r7, r9, lsl #13
1575 orr r6, r6, r7, lsl #8
1587 pop {r1, r4-r11, pc}
1590 function vp8_put_bilin_v_armv6
1591 push {r1, r4-r11, lr}
1592 rsb r5, r12, r12, lsl #16
1603 pkhbt r6, r6, r10, lsl #16
1605 pkhbt r7, r7, r11, lsl #16
1606 pkhbt r8, r8, lr, lsl #16
1607 pkhbt r9, r10, r9, lsl #16
1610 smlad r6, r6, r5, r10
1611 smlad r7, r7, r5, r10
1612 smlad r8, r8, r5, r10
1613 smlad r9, r9, r5, r10
1619 pkhbt r6, r6, r8, lsl #13
1620 pkhbt r7, r7, r9, lsl #13
1621 orr r6, r6, r7, lsl #8
1633 pop {r1, r4-r11, pc}