]> git.sesse.net Git - ffmpeg/blob - libavcodec/arm/h264dsp_neon.S
Merge remote-tracking branch 'qatar/master'
[ffmpeg] / libavcodec / arm / h264dsp_neon.S
1 /*
2  * Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20
21 #include "asm.S"
22
23         .macro transpose_8x8 r0 r1 r2 r3 r4 r5 r6 r7
24         vtrn.32         \r0, \r4
25         vtrn.32         \r1, \r5
26         vtrn.32         \r2, \r6
27         vtrn.32         \r3, \r7
28         vtrn.16         \r0, \r2
29         vtrn.16         \r1, \r3
30         vtrn.16         \r4, \r6
31         vtrn.16         \r5, \r7
32         vtrn.8          \r0, \r1
33         vtrn.8          \r2, \r3
34         vtrn.8          \r4, \r5
35         vtrn.8          \r6, \r7
36         .endm
37
38         .macro transpose_4x4 r0 r1 r2 r3
39         vtrn.16         \r0, \r2
40         vtrn.16         \r1, \r3
41         vtrn.8          \r0, \r1
42         vtrn.8          \r2, \r3
43         .endm
44
45         .macro swap4 r0 r1 r2 r3 r4 r5 r6 r7
46         vswp            \r0, \r4
47         vswp            \r1, \r5
48         vswp            \r2, \r6
49         vswp            \r3, \r7
50         .endm
51
52         .macro transpose16_4x4 r0 r1 r2 r3 r4 r5 r6 r7
53         vtrn.32         \r0, \r2
54         vtrn.32         \r1, \r3
55         vtrn.32         \r4, \r6
56         vtrn.32         \r5, \r7
57         vtrn.16         \r0, \r1
58         vtrn.16         \r2, \r3
59         vtrn.16         \r4, \r5
60         vtrn.16         \r6, \r7
61         .endm
62
63 /* chroma_mc8(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y) */
64         .macro  h264_chroma_mc8 type
65 function ff_\type\()_h264_chroma_mc8_neon, export=1
66         push            {r4-r7, lr}
67         ldrd            r4,  [sp, #20]
68 .ifc \type,avg
69         mov             lr,  r0
70 .endif
71         pld             [r1]
72         pld             [r1, r2]
73
74 A       muls            r7,  r4,  r5
75 T       mul             r7,  r4,  r5
76 T       cmp             r7,  #0
77         rsb             r6,  r7,  r5,  lsl #3
78         rsb             ip,  r7,  r4,  lsl #3
79         sub             r4,  r7,  r4,  lsl #3
80         sub             r4,  r4,  r5,  lsl #3
81         add             r4,  r4,  #64
82
83         beq             2f
84
85         add             r5,  r1,  r2
86
87         vdup.8          d0,  r4
88         lsl             r4,  r2,  #1
89         vdup.8          d1,  ip
90         vld1.64         {d4, d5}, [r1], r4
91         vdup.8          d2,  r6
92         vld1.64         {d6, d7}, [r5], r4
93         vdup.8          d3,  r7
94
95         vext.8          d5,  d4,  d5,  #1
96         vext.8          d7,  d6,  d7,  #1
97
98 1:      pld             [r5]
99         vmull.u8        q8,  d4,  d0
100         vmlal.u8        q8,  d5,  d1
101         vld1.64         {d4, d5}, [r1], r4
102         vmlal.u8        q8,  d6,  d2
103         vext.8          d5,  d4,  d5,  #1
104         vmlal.u8        q8,  d7,  d3
105         vmull.u8        q9,  d6,  d0
106         subs            r3,  r3,  #2
107         vmlal.u8        q9,  d7,  d1
108         vmlal.u8        q9,  d4,  d2
109         vmlal.u8        q9,  d5,  d3
110         vrshrn.u16      d16, q8,  #6
111         vld1.64         {d6, d7}, [r5], r4
112         pld             [r1]
113         vrshrn.u16      d17, q9,  #6
114 .ifc \type,avg
115         vld1.64         {d20}, [lr,:64], r2
116         vld1.64         {d21}, [lr,:64], r2
117         vrhadd.u8       q8,  q8,  q10
118 .endif
119         vext.8          d7,  d6,  d7,  #1
120         vst1.64         {d16}, [r0,:64], r2
121         vst1.64         {d17}, [r0,:64], r2
122         bgt             1b
123
124         pop             {r4-r7, pc}
125
126 2:      tst             r6,  r6
127         add             ip,  ip,  r6
128         vdup.8          d0,  r4
129         vdup.8          d1,  ip
130
131         beq             4f
132
133         add             r5,  r1,  r2
134         lsl             r4,  r2,  #1
135         vld1.64         {d4}, [r1], r4
136         vld1.64         {d6}, [r5], r4
137
138 3:      pld             [r5]
139         vmull.u8        q8,  d4,  d0
140         vmlal.u8        q8,  d6,  d1
141         vld1.64         {d4}, [r1], r4
142         vmull.u8        q9,  d6,  d0
143         vmlal.u8        q9,  d4,  d1
144         vld1.64         {d6}, [r5], r4
145         vrshrn.u16      d16, q8,  #6
146         vrshrn.u16      d17, q9,  #6
147 .ifc \type,avg
148         vld1.64         {d20}, [lr,:64], r2
149         vld1.64         {d21}, [lr,:64], r2
150         vrhadd.u8       q8,  q8,  q10
151 .endif
152         subs            r3,  r3,  #2
153         pld             [r1]
154         vst1.64         {d16}, [r0,:64], r2
155         vst1.64         {d17}, [r0,:64], r2
156         bgt             3b
157
158         pop             {r4-r7, pc}
159
160 4:      vld1.64         {d4, d5}, [r1], r2
161         vld1.64         {d6, d7}, [r1], r2
162         vext.8          d5,  d4,  d5,  #1
163         vext.8          d7,  d6,  d7,  #1
164
165 5:      pld             [r1]
166         subs            r3,  r3,  #2
167         vmull.u8        q8,  d4,  d0
168         vmlal.u8        q8,  d5,  d1
169         vld1.64         {d4, d5}, [r1], r2
170         vmull.u8        q9,  d6,  d0
171         vmlal.u8        q9,  d7,  d1
172         pld             [r1]
173         vext.8          d5,  d4,  d5,  #1
174         vrshrn.u16      d16, q8,  #6
175         vrshrn.u16      d17, q9,  #6
176 .ifc \type,avg
177         vld1.64         {d20}, [lr,:64], r2
178         vld1.64         {d21}, [lr,:64], r2
179         vrhadd.u8       q8,  q8,  q10
180 .endif
181         vld1.64         {d6, d7}, [r1], r2
182         vext.8          d7,  d6,  d7,  #1
183         vst1.64         {d16}, [r0,:64], r2
184         vst1.64         {d17}, [r0,:64], r2
185         bgt             5b
186
187         pop             {r4-r7, pc}
188 endfunc
189         .endm
190
191 /* chroma_mc4(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y) */
192         .macro  h264_chroma_mc4 type
193 function ff_\type\()_h264_chroma_mc4_neon, export=1
194         push            {r4-r7, lr}
195         ldrd            r4,  [sp, #20]
196 .ifc \type,avg
197         mov             lr,  r0
198 .endif
199         pld             [r1]
200         pld             [r1, r2]
201
202 A       muls            r7,  r4,  r5
203 T       mul             r7,  r4,  r5
204 T       cmp             r7,  #0
205         rsb             r6,  r7,  r5,  lsl #3
206         rsb             ip,  r7,  r4,  lsl #3
207         sub             r4,  r7,  r4,  lsl #3
208         sub             r4,  r4,  r5,  lsl #3
209         add             r4,  r4,  #64
210
211         beq             2f
212
213         add             r5,  r1,  r2
214
215         vdup.8          d0,  r4
216         lsl             r4,  r2,  #1
217         vdup.8          d1,  ip
218         vld1.64         {d4},     [r1], r4
219         vdup.8          d2,  r6
220         vld1.64         {d6},     [r5], r4
221         vdup.8          d3,  r7
222
223         vext.8          d5,  d4,  d5,  #1
224         vext.8          d7,  d6,  d7,  #1
225         vtrn.32         d4,  d5
226         vtrn.32         d6,  d7
227
228         vtrn.32         d0,  d1
229         vtrn.32         d2,  d3
230
231 1:      pld             [r5]
232         vmull.u8        q8,  d4,  d0
233         vmlal.u8        q8,  d6,  d2
234         vld1.64         {d4},     [r1], r4
235         vext.8          d5,  d4,  d5,  #1
236         vtrn.32         d4,  d5
237         vmull.u8        q9,  d6,  d0
238         vmlal.u8        q9,  d4,  d2
239         vld1.64         {d6},     [r5], r4
240         vadd.i16        d16, d16, d17
241         vadd.i16        d17, d18, d19
242         vrshrn.u16      d16, q8,  #6
243         subs            r3,  r3,  #2
244         pld             [r1]
245 .ifc \type,avg
246         vld1.32         {d20[0]}, [lr,:32], r2
247         vld1.32         {d20[1]}, [lr,:32], r2
248         vrhadd.u8       d16, d16, d20
249 .endif
250         vext.8          d7,  d6,  d7,  #1
251         vtrn.32         d6,  d7
252         vst1.32         {d16[0]}, [r0,:32], r2
253         vst1.32         {d16[1]}, [r0,:32], r2
254         bgt             1b
255
256         pop             {r4-r7, pc}
257
258 2:      tst             r6,  r6
259         add             ip,  ip,  r6
260         vdup.8          d0,  r4
261         vdup.8          d1,  ip
262         vtrn.32         d0,  d1
263
264         beq             4f
265
266         vext.32         d1,  d0,  d1,  #1
267         add             r5,  r1,  r2
268         lsl             r4,  r2,  #1
269         vld1.32         {d4[0]},  [r1], r4
270         vld1.32         {d4[1]},  [r5], r4
271
272 3:      pld             [r5]
273         vmull.u8        q8,  d4,  d0
274         vld1.32         {d4[0]},  [r1], r4
275         vmull.u8        q9,  d4,  d1
276         vld1.32         {d4[1]},  [r5], r4
277         vadd.i16        d16, d16, d17
278         vadd.i16        d17, d18, d19
279         vrshrn.u16      d16, q8,  #6
280 .ifc \type,avg
281         vld1.32         {d20[0]}, [lr,:32], r2
282         vld1.32         {d20[1]}, [lr,:32], r2
283         vrhadd.u8       d16, d16, d20
284 .endif
285         subs            r3,  r3,  #2
286         pld             [r1]
287         vst1.32         {d16[0]}, [r0,:32], r2
288         vst1.32         {d16[1]}, [r0,:32], r2
289         bgt             3b
290
291         pop             {r4-r7, pc}
292
293 4:      vld1.64         {d4},     [r1], r2
294         vld1.64         {d6},     [r1], r2
295         vext.8          d5,  d4,  d5,  #1
296         vext.8          d7,  d6,  d7,  #1
297         vtrn.32         d4,  d5
298         vtrn.32         d6,  d7
299
300 5:      vmull.u8        q8,  d4,  d0
301         vmull.u8        q9,  d6,  d0
302         subs            r3,  r3,  #2
303         vld1.64         {d4},     [r1], r2
304         vext.8          d5,  d4,  d5,  #1
305         vtrn.32         d4,  d5
306         vadd.i16        d16, d16, d17
307         vadd.i16        d17, d18, d19
308         pld             [r1]
309         vrshrn.u16      d16, q8,  #6
310 .ifc \type,avg
311         vld1.32         {d20[0]}, [lr,:32], r2
312         vld1.32         {d20[1]}, [lr,:32], r2
313         vrhadd.u8       d16, d16, d20
314 .endif
315         vld1.64         {d6},     [r1], r2
316         vext.8          d7,  d6,  d7,  #1
317         vtrn.32         d6,  d7
318         pld             [r1]
319         vst1.32         {d16[0]}, [r0,:32], r2
320         vst1.32         {d16[1]}, [r0,:32], r2
321         bgt             5b
322
323         pop             {r4-r7, pc}
324 endfunc
325         .endm
326
327         .macro  h264_chroma_mc2 type
328 function ff_\type\()_h264_chroma_mc2_neon, export=1
329         push            {r4-r6, lr}
330         ldr             r4,  [sp, #16]
331         ldr             lr,  [sp, #20]
332         pld             [r1]
333         pld             [r1, r2]
334         orrs            r5,  r4,  lr
335         beq             2f
336
337         mul             r5,  r4,  lr
338         rsb             r6,  r5,  lr,  lsl #3
339         rsb             r12, r5,  r4,  lsl #3
340         sub             r4,  r5,  r4,  lsl #3
341         sub             r4,  r4,  lr,  lsl #3
342         add             r4,  r4,  #64
343         vdup.8          d0,  r4
344         vdup.8          d2,  r12
345         vdup.8          d1,  r6
346         vdup.8          d3,  r5
347         vtrn.16         q0,  q1
348 1:
349         vld1.32         {d4[0]},  [r1], r2
350         vld1.32         {d4[1]},  [r1], r2
351         vrev64.32       d5,  d4
352         vld1.32         {d5[1]},  [r1]
353         vext.8          q3,  q2,  q2,  #1
354         vtrn.16         q2,  q3
355         vmull.u8        q8,  d4,  d0
356         vmlal.u8        q8,  d5,  d1
357 .ifc \type,avg
358         vld1.16         {d18[0]}, [r0,:16], r2
359         vld1.16         {d18[1]}, [r0,:16]
360         sub             r0,  r0,  r2
361 .endif
362         vtrn.32         d16, d17
363         vadd.i16        d16, d16, d17
364         vrshrn.u16      d16, q8,  #6
365 .ifc \type,avg
366         vrhadd.u8       d16, d16, d18
367 .endif
368         vst1.16         {d16[0]}, [r0,:16], r2
369         vst1.16         {d16[1]}, [r0,:16], r2
370         subs            r3,  r3,  #2
371         bgt             1b
372         pop             {r4-r6, pc}
373 2:
374 .ifc \type,put
375         ldrh_post       r5,  r1,  r2
376         strh_post       r5,  r0,  r2
377         ldrh_post       r6,  r1,  r2
378         strh_post       r6,  r0,  r2
379 .else
380         vld1.16         {d16[0]}, [r1], r2
381         vld1.16         {d16[1]}, [r1], r2
382         vld1.16         {d18[0]}, [r0,:16], r2
383         vld1.16         {d18[1]}, [r0,:16]
384         sub             r0,  r0,  r2
385         vrhadd.u8       d16, d16, d18
386         vst1.16         {d16[0]}, [r0,:16], r2
387         vst1.16         {d16[1]}, [r0,:16], r2
388 .endif
389         subs            r3,  r3,  #2
390         bgt             2b
391         pop             {r4-r6, pc}
392 endfunc
393 .endm
394
395         h264_chroma_mc8 put
396         h264_chroma_mc8 avg
397         h264_chroma_mc4 put
398         h264_chroma_mc4 avg
399         h264_chroma_mc2 put
400         h264_chroma_mc2 avg
401
402         /* H.264 loop filter */
403
404         .macro h264_loop_filter_start
405         ldr             ip,  [sp]
406         tst             r2,  r2
407         ldr             ip,  [ip]
408         it              ne
409         tstne           r3,  r3
410         vmov.32         d24[0], ip
411         and             ip,  ip,  ip, lsl #16
412         it              eq
413         bxeq            lr
414         ands            ip,  ip,  ip, lsl #8
415         it              lt
416         bxlt            lr
417         .endm
418
419         .macro h264_loop_filter_luma
420         vdup.8          q11, r2         @ alpha
421         vmovl.u8        q12, d24
422         vabd.u8         q6,  q8,  q0    @ abs(p0 - q0)
423         vmovl.u16       q12, d24
424         vabd.u8         q14, q9,  q8    @ abs(p1 - p0)
425         vsli.16         q12, q12, #8
426         vabd.u8         q15, q1,  q0    @ abs(q1 - q0)
427         vsli.32         q12, q12, #16
428         vclt.u8         q6,  q6,  q11   @ < alpha
429         vdup.8          q11, r3         @ beta
430         vclt.s8         q7,  q12, #0
431         vclt.u8         q14, q14, q11   @ < beta
432         vclt.u8         q15, q15, q11   @ < beta
433         vbic            q6,  q6,  q7
434         vabd.u8         q4,  q10, q8    @ abs(p2 - p0)
435         vand            q6,  q6,  q14
436         vabd.u8         q5,  q2,  q0    @ abs(q2 - q0)
437         vclt.u8         q4,  q4,  q11   @ < beta
438         vand            q6,  q6,  q15
439         vclt.u8         q5,  q5,  q11   @ < beta
440         vand            q4,  q4,  q6
441         vand            q5,  q5,  q6
442         vand            q12, q12, q6
443         vrhadd.u8       q14, q8,  q0
444         vsub.i8         q6,  q12, q4
445         vqadd.u8        q7,  q9,  q12
446         vhadd.u8        q10, q10, q14
447         vsub.i8         q6,  q6,  q5
448         vhadd.u8        q14, q2,  q14
449         vmin.u8         q7,  q7,  q10
450         vqsub.u8        q11, q9,  q12
451         vqadd.u8        q2,  q1,  q12
452         vmax.u8         q7,  q7,  q11
453         vqsub.u8        q11, q1,  q12
454         vmin.u8         q14, q2,  q14
455         vmovl.u8        q2,  d0
456         vmax.u8         q14, q14, q11
457         vmovl.u8        q10, d1
458         vsubw.u8        q2,  q2,  d16
459         vsubw.u8        q10, q10, d17
460         vshl.i16        q2,  q2,  #2
461         vshl.i16        q10, q10, #2
462         vaddw.u8        q2,  q2,  d18
463         vaddw.u8        q10, q10, d19
464         vsubw.u8        q2,  q2,  d2
465         vsubw.u8        q10, q10, d3
466         vrshrn.i16      d4,  q2,  #3
467         vrshrn.i16      d5,  q10, #3
468         vbsl            q4,  q7,  q9
469         vbsl            q5,  q14, q1
470         vneg.s8         q7,  q6
471         vmovl.u8        q14, d16
472         vmin.s8         q2,  q2,  q6
473         vmovl.u8        q6,  d17
474         vmax.s8         q2,  q2,  q7
475         vmovl.u8        q11, d0
476         vmovl.u8        q12, d1
477         vaddw.s8        q14, q14, d4
478         vaddw.s8        q6,  q6,  d5
479         vsubw.s8        q11, q11, d4
480         vsubw.s8        q12, q12, d5
481         vqmovun.s16     d16, q14
482         vqmovun.s16     d17, q6
483         vqmovun.s16     d0,  q11
484         vqmovun.s16     d1,  q12
485         .endm
486
487 function ff_h264_v_loop_filter_luma_neon, export=1
488         h264_loop_filter_start
489
490         vld1.64         {d0, d1},  [r0,:128], r1
491         vld1.64         {d2, d3},  [r0,:128], r1
492         vld1.64         {d4, d5},  [r0,:128], r1
493         sub             r0,  r0,  r1, lsl #2
494         sub             r0,  r0,  r1, lsl #1
495         vld1.64         {d20,d21}, [r0,:128], r1
496         vld1.64         {d18,d19}, [r0,:128], r1
497         vld1.64         {d16,d17}, [r0,:128], r1
498
499         vpush           {d8-d15}
500
501         h264_loop_filter_luma
502
503         sub             r0,  r0,  r1, lsl #1
504         vst1.64         {d8, d9},  [r0,:128], r1
505         vst1.64         {d16,d17}, [r0,:128], r1
506         vst1.64         {d0, d1},  [r0,:128], r1
507         vst1.64         {d10,d11}, [r0,:128]
508
509         vpop            {d8-d15}
510         bx              lr
511 endfunc
512
513 function ff_h264_h_loop_filter_luma_neon, export=1
514         h264_loop_filter_start
515
516         sub             r0,  r0,  #4
517         vld1.64         {d6},  [r0], r1
518         vld1.64         {d20}, [r0], r1
519         vld1.64         {d18}, [r0], r1
520         vld1.64         {d16}, [r0], r1
521         vld1.64         {d0},  [r0], r1
522         vld1.64         {d2},  [r0], r1
523         vld1.64         {d4},  [r0], r1
524         vld1.64         {d26}, [r0], r1
525         vld1.64         {d7},  [r0], r1
526         vld1.64         {d21}, [r0], r1
527         vld1.64         {d19}, [r0], r1
528         vld1.64         {d17}, [r0], r1
529         vld1.64         {d1},  [r0], r1
530         vld1.64         {d3},  [r0], r1
531         vld1.64         {d5},  [r0], r1
532         vld1.64         {d27}, [r0], r1
533
534         transpose_8x8   q3, q10, q9, q8, q0, q1, q2, q13
535
536         vpush           {d8-d15}
537
538         h264_loop_filter_luma
539
540         transpose_4x4   q4, q8, q0, q5
541
542         sub             r0,  r0,  r1, lsl #4
543         add             r0,  r0,  #2
544         vst1.32         {d8[0]},  [r0], r1
545         vst1.32         {d16[0]}, [r0], r1
546         vst1.32         {d0[0]},  [r0], r1
547         vst1.32         {d10[0]}, [r0], r1
548         vst1.32         {d8[1]},  [r0], r1
549         vst1.32         {d16[1]}, [r0], r1
550         vst1.32         {d0[1]},  [r0], r1
551         vst1.32         {d10[1]}, [r0], r1
552         vst1.32         {d9[0]},  [r0], r1
553         vst1.32         {d17[0]}, [r0], r1
554         vst1.32         {d1[0]},  [r0], r1
555         vst1.32         {d11[0]}, [r0], r1
556         vst1.32         {d9[1]},  [r0], r1
557         vst1.32         {d17[1]}, [r0], r1
558         vst1.32         {d1[1]},  [r0], r1
559         vst1.32         {d11[1]}, [r0], r1
560
561         vpop            {d8-d15}
562         bx              lr
563 endfunc
564
565         .macro h264_loop_filter_chroma
566         vdup.8          d22, r2         @ alpha
567         vmovl.u8        q12, d24
568         vabd.u8         d26, d16, d0    @ abs(p0 - q0)
569         vmovl.u8        q2,  d0
570         vabd.u8         d28, d18, d16   @ abs(p1 - p0)
571         vsubw.u8        q2,  q2,  d16
572         vsli.16         d24, d24, #8
573         vshl.i16        q2,  q2,  #2
574         vabd.u8         d30, d2,  d0    @ abs(q1 - q0)
575         vaddw.u8        q2,  q2,  d18
576         vclt.u8         d26, d26, d22   @ < alpha
577         vsubw.u8        q2,  q2,  d2
578         vdup.8          d22, r3         @ beta
579         vrshrn.i16      d4,  q2,  #3
580         vclt.u8         d28, d28, d22   @ < beta
581         vclt.u8         d30, d30, d22   @ < beta
582         vmin.s8         d4,  d4,  d24
583         vneg.s8         d25, d24
584         vand            d26, d26, d28
585         vmax.s8         d4,  d4,  d25
586         vand            d26, d26, d30
587         vmovl.u8        q11, d0
588         vand            d4,  d4,  d26
589         vmovl.u8        q14, d16
590         vaddw.s8        q14, q14, d4
591         vsubw.s8        q11, q11, d4
592         vqmovun.s16     d16, q14
593         vqmovun.s16     d0,  q11
594         .endm
595
596 function ff_h264_v_loop_filter_chroma_neon, export=1
597         h264_loop_filter_start
598
599         sub             r0,  r0,  r1, lsl #1
600         vld1.64         {d18}, [r0,:64], r1
601         vld1.64         {d16}, [r0,:64], r1
602         vld1.64         {d0},  [r0,:64], r1
603         vld1.64         {d2},  [r0,:64]
604
605         h264_loop_filter_chroma
606
607         sub             r0,  r0,  r1, lsl #1
608         vst1.64         {d16}, [r0,:64], r1
609         vst1.64         {d0},  [r0,:64], r1
610
611         bx              lr
612 endfunc
613
614 function ff_h264_h_loop_filter_chroma_neon, export=1
615         h264_loop_filter_start
616
617         sub             r0,  r0,  #2
618         vld1.32         {d18[0]}, [r0], r1
619         vld1.32         {d16[0]}, [r0], r1
620         vld1.32         {d0[0]},  [r0], r1
621         vld1.32         {d2[0]},  [r0], r1
622         vld1.32         {d18[1]}, [r0], r1
623         vld1.32         {d16[1]}, [r0], r1
624         vld1.32         {d0[1]},  [r0], r1
625         vld1.32         {d2[1]},  [r0], r1
626
627         vtrn.16         d18, d0
628         vtrn.16         d16, d2
629         vtrn.8          d18, d16
630         vtrn.8          d0,  d2
631
632         h264_loop_filter_chroma
633
634         vtrn.16         d18, d0
635         vtrn.16         d16, d2
636         vtrn.8          d18, d16
637         vtrn.8          d0,  d2
638
639         sub             r0,  r0,  r1, lsl #3
640         vst1.32         {d18[0]}, [r0], r1
641         vst1.32         {d16[0]}, [r0], r1
642         vst1.32         {d0[0]},  [r0], r1
643         vst1.32         {d2[0]},  [r0], r1
644         vst1.32         {d18[1]}, [r0], r1
645         vst1.32         {d16[1]}, [r0], r1
646         vst1.32         {d0[1]},  [r0], r1
647         vst1.32         {d2[1]},  [r0], r1
648
649         bx              lr
650 endfunc
651
652         /* H.264 qpel MC */
653
654         .macro  lowpass_const r
655         movw            \r,  #5
656         movt            \r,  #20
657         vmov.32         d6[0], \r
658         .endm
659
660         .macro  lowpass_8 r0, r1, r2, r3, d0, d1, narrow=1
661 .if \narrow
662         t0 .req q0
663         t1 .req q8
664 .else
665         t0 .req \d0
666         t1 .req \d1
667 .endif
668         vext.8          d2,  \r0, \r1, #2
669         vext.8          d3,  \r0, \r1, #3
670         vaddl.u8        q1,  d2,  d3
671         vext.8          d4,  \r0, \r1, #1
672         vext.8          d5,  \r0, \r1, #4
673         vaddl.u8        q2,  d4,  d5
674         vext.8          d30, \r0, \r1, #5
675         vaddl.u8        t0,  \r0, d30
676         vext.8          d18, \r2, \r3, #2
677         vmla.i16        t0,  q1,  d6[1]
678         vext.8          d19, \r2, \r3, #3
679         vaddl.u8        q9,  d18, d19
680         vext.8          d20, \r2, \r3, #1
681         vmls.i16        t0,  q2,  d6[0]
682         vext.8          d21, \r2, \r3, #4
683         vaddl.u8        q10, d20, d21
684         vext.8          d31, \r2, \r3, #5
685         vaddl.u8        t1,  \r2, d31
686         vmla.i16        t1,  q9,  d6[1]
687         vmls.i16        t1,  q10, d6[0]
688 .if \narrow
689         vqrshrun.s16    \d0, t0,  #5
690         vqrshrun.s16    \d1, t1,  #5
691 .endif
692         .unreq  t0
693         .unreq  t1
694         .endm
695
696         .macro  lowpass_8_1 r0, r1, d0, narrow=1
697 .if \narrow
698         t0 .req q0
699 .else
700         t0 .req \d0
701 .endif
702         vext.8          d2,  \r0, \r1, #2
703         vext.8          d3,  \r0, \r1, #3
704         vaddl.u8        q1,  d2,  d3
705         vext.8          d4,  \r0, \r1, #1
706         vext.8          d5,  \r0, \r1, #4
707         vaddl.u8        q2,  d4,  d5
708         vext.8          d30, \r0, \r1, #5
709         vaddl.u8        t0,  \r0, d30
710         vmla.i16        t0,  q1,  d6[1]
711         vmls.i16        t0,  q2,  d6[0]
712 .if \narrow
713         vqrshrun.s16    \d0, t0,  #5
714 .endif
715         .unreq  t0
716         .endm
717
718         .macro  lowpass_8.16 r0, r1, l0, h0, l1, h1, d
719         vext.16         q1,  \r0, \r1, #2
720         vext.16         q0,  \r0, \r1, #3
721         vaddl.s16       q9,  d2,  d0
722         vext.16         q2,  \r0, \r1, #1
723         vaddl.s16       q1,  d3,  d1
724         vext.16         q3,  \r0, \r1, #4
725         vaddl.s16       q10, d4,  d6
726         vext.16         \r1, \r0, \r1, #5
727         vaddl.s16       q2,  d5,  d7
728         vaddl.s16       q0,  \h0, \h1
729         vaddl.s16       q8,  \l0, \l1
730
731         vshl.i32        q3,  q9,  #4
732         vshl.i32        q9,  q9,  #2
733         vshl.i32        q15, q10, #2
734         vadd.i32        q9,  q9,  q3
735         vadd.i32        q10, q10, q15
736
737         vshl.i32        q3,  q1,  #4
738         vshl.i32        q1,  q1,  #2
739         vshl.i32        q15, q2,  #2
740         vadd.i32        q1,  q1,  q3
741         vadd.i32        q2,  q2,  q15
742
743         vadd.i32        q9,  q9,  q8
744         vsub.i32        q9,  q9,  q10
745
746         vadd.i32        q1,  q1,  q0
747         vsub.i32        q1,  q1,  q2
748
749         vrshrn.s32      d18, q9,  #10
750         vrshrn.s32      d19, q1,  #10
751
752         vqmovun.s16     \d,  q9
753         .endm
754
755 function put_h264_qpel16_h_lowpass_neon_packed
756         mov             r4,  lr
757         mov             ip,  #16
758         mov             r3,  #8
759         bl              put_h264_qpel8_h_lowpass_neon
760         sub             r1,  r1,  r2, lsl #4
761         add             r1,  r1,  #8
762         mov             ip,  #16
763         mov             lr,  r4
764         b               put_h264_qpel8_h_lowpass_neon
765 endfunc
766
767         .macro h264_qpel_h_lowpass type
768 function \type\()_h264_qpel16_h_lowpass_neon
769         push            {lr}
770         mov             ip,  #16
771         bl              \type\()_h264_qpel8_h_lowpass_neon
772         sub             r0,  r0,  r3, lsl #4
773         sub             r1,  r1,  r2, lsl #4
774         add             r0,  r0,  #8
775         add             r1,  r1,  #8
776         mov             ip,  #16
777         pop             {lr}
778 endfunc
779
780 function \type\()_h264_qpel8_h_lowpass_neon
781 1:      vld1.64         {d0, d1},  [r1], r2
782         vld1.64         {d16,d17}, [r1], r2
783         subs            ip,  ip,  #2
784         lowpass_8       d0,  d1,  d16, d17, d0,  d16
785 .ifc \type,avg
786         vld1.8          {d2},     [r0,:64], r3
787         vrhadd.u8       d0,  d0,  d2
788         vld1.8          {d3},     [r0,:64]
789         vrhadd.u8       d16, d16, d3
790         sub             r0,  r0,  r3
791 .endif
792         vst1.64         {d0},     [r0,:64], r3
793         vst1.64         {d16},    [r0,:64], r3
794         bne             1b
795         bx              lr
796 endfunc
797         .endm
798
799         h264_qpel_h_lowpass put
800         h264_qpel_h_lowpass avg
801
802         .macro h264_qpel_h_lowpass_l2 type
803 function \type\()_h264_qpel16_h_lowpass_l2_neon
804         push            {lr}
805         mov             ip,  #16
806         bl              \type\()_h264_qpel8_h_lowpass_l2_neon
807         sub             r0,  r0,  r2, lsl #4
808         sub             r1,  r1,  r2, lsl #4
809         sub             r3,  r3,  r2, lsl #4
810         add             r0,  r0,  #8
811         add             r1,  r1,  #8
812         add             r3,  r3,  #8
813         mov             ip,  #16
814         pop             {lr}
815 endfunc
816
817 function \type\()_h264_qpel8_h_lowpass_l2_neon
818 1:      vld1.64         {d0, d1},  [r1], r2
819         vld1.64         {d16,d17}, [r1], r2
820         vld1.64         {d28},     [r3], r2
821         vld1.64         {d29},     [r3], r2
822         subs            ip,  ip,  #2
823         lowpass_8       d0,  d1,  d16, d17, d0,  d1
824         vrhadd.u8       q0,  q0,  q14
825 .ifc \type,avg
826         vld1.8          {d2},      [r0,:64], r2
827         vrhadd.u8       d0,  d0,  d2
828         vld1.8          {d3},      [r0,:64]
829         vrhadd.u8       d1,  d1,  d3
830         sub             r0,  r0,  r2
831 .endif
832         vst1.64         {d0},      [r0,:64], r2
833         vst1.64         {d1},      [r0,:64], r2
834         bne             1b
835         bx              lr
836 endfunc
837         .endm
838
839         h264_qpel_h_lowpass_l2 put
840         h264_qpel_h_lowpass_l2 avg
841
842 function put_h264_qpel16_v_lowpass_neon_packed
843         mov             r4,  lr
844         mov             r2,  #8
845         bl              put_h264_qpel8_v_lowpass_neon
846         sub             r1,  r1,  r3, lsl #2
847         bl              put_h264_qpel8_v_lowpass_neon
848         sub             r1,  r1,  r3, lsl #4
849         sub             r1,  r1,  r3, lsl #2
850         add             r1,  r1,  #8
851         bl              put_h264_qpel8_v_lowpass_neon
852         sub             r1,  r1,  r3, lsl #2
853         mov             lr,  r4
854         b               put_h264_qpel8_v_lowpass_neon
855 endfunc
856
857         .macro h264_qpel_v_lowpass type
858 function \type\()_h264_qpel16_v_lowpass_neon
859         mov             r4,  lr
860         bl              \type\()_h264_qpel8_v_lowpass_neon
861         sub             r1,  r1,  r3, lsl #2
862         bl              \type\()_h264_qpel8_v_lowpass_neon
863         sub             r0,  r0,  r2, lsl #4
864         add             r0,  r0,  #8
865         sub             r1,  r1,  r3, lsl #4
866         sub             r1,  r1,  r3, lsl #2
867         add             r1,  r1,  #8
868         bl              \type\()_h264_qpel8_v_lowpass_neon
869         sub             r1,  r1,  r3, lsl #2
870         mov             lr,  r4
871 endfunc
872
873 function \type\()_h264_qpel8_v_lowpass_neon
874         vld1.64         {d8},  [r1], r3
875         vld1.64         {d10}, [r1], r3
876         vld1.64         {d12}, [r1], r3
877         vld1.64         {d14}, [r1], r3
878         vld1.64         {d22}, [r1], r3
879         vld1.64         {d24}, [r1], r3
880         vld1.64         {d26}, [r1], r3
881         vld1.64         {d28}, [r1], r3
882         vld1.64         {d9},  [r1], r3
883         vld1.64         {d11}, [r1], r3
884         vld1.64         {d13}, [r1], r3
885         vld1.64         {d15}, [r1], r3
886         vld1.64         {d23}, [r1]
887
888         transpose_8x8   q4,  q5,  q6,  q7,  q11, q12, q13, q14
889         lowpass_8       d8,  d9,  d10, d11, d8,  d10
890         lowpass_8       d12, d13, d14, d15, d12, d14
891         lowpass_8       d22, d23, d24, d25, d22, d24
892         lowpass_8       d26, d27, d28, d29, d26, d28
893         transpose_8x8   d8,  d10, d12, d14, d22, d24, d26, d28
894
895 .ifc \type,avg
896         vld1.8          {d9},  [r0,:64], r2
897         vrhadd.u8       d8,  d8,  d9
898         vld1.8          {d11}, [r0,:64], r2
899         vrhadd.u8       d10, d10, d11
900         vld1.8          {d13}, [r0,:64], r2
901         vrhadd.u8       d12, d12, d13
902         vld1.8          {d15}, [r0,:64], r2
903         vrhadd.u8       d14, d14, d15
904         vld1.8          {d23}, [r0,:64], r2
905         vrhadd.u8       d22, d22, d23
906         vld1.8          {d25}, [r0,:64], r2
907         vrhadd.u8       d24, d24, d25
908         vld1.8          {d27}, [r0,:64], r2
909         vrhadd.u8       d26, d26, d27
910         vld1.8          {d29}, [r0,:64], r2
911         vrhadd.u8       d28, d28, d29
912         sub             r0,  r0,  r2,  lsl #3
913 .endif
914
915         vst1.64         {d8},  [r0,:64], r2
916         vst1.64         {d10}, [r0,:64], r2
917         vst1.64         {d12}, [r0,:64], r2
918         vst1.64         {d14}, [r0,:64], r2
919         vst1.64         {d22}, [r0,:64], r2
920         vst1.64         {d24}, [r0,:64], r2
921         vst1.64         {d26}, [r0,:64], r2
922         vst1.64         {d28}, [r0,:64], r2
923
924         bx              lr
925 endfunc
926         .endm
927
928         h264_qpel_v_lowpass put
929         h264_qpel_v_lowpass avg
930
931         .macro h264_qpel_v_lowpass_l2 type
932 function \type\()_h264_qpel16_v_lowpass_l2_neon
933         mov             r4,  lr
934         bl              \type\()_h264_qpel8_v_lowpass_l2_neon
935         sub             r1,  r1,  r3, lsl #2
936         bl              \type\()_h264_qpel8_v_lowpass_l2_neon
937         sub             r0,  r0,  r3, lsl #4
938         sub             ip,  ip,  r2, lsl #4
939         add             r0,  r0,  #8
940         add             ip,  ip,  #8
941         sub             r1,  r1,  r3, lsl #4
942         sub             r1,  r1,  r3, lsl #2
943         add             r1,  r1,  #8
944         bl              \type\()_h264_qpel8_v_lowpass_l2_neon
945         sub             r1,  r1,  r3, lsl #2
946         mov             lr,  r4
947 endfunc
948
949 function \type\()_h264_qpel8_v_lowpass_l2_neon
950         vld1.64         {d8},  [r1], r3
951         vld1.64         {d10}, [r1], r3
952         vld1.64         {d12}, [r1], r3
953         vld1.64         {d14}, [r1], r3
954         vld1.64         {d22}, [r1], r3
955         vld1.64         {d24}, [r1], r3
956         vld1.64         {d26}, [r1], r3
957         vld1.64         {d28}, [r1], r3
958         vld1.64         {d9},  [r1], r3
959         vld1.64         {d11}, [r1], r3
960         vld1.64         {d13}, [r1], r3
961         vld1.64         {d15}, [r1], r3
962         vld1.64         {d23}, [r1]
963
964         transpose_8x8   q4,  q5,  q6,  q7,  q11, q12, q13, q14
965         lowpass_8       d8,  d9,  d10, d11, d8,  d9
966         lowpass_8       d12, d13, d14, d15, d12, d13
967         lowpass_8       d22, d23, d24, d25, d22, d23
968         lowpass_8       d26, d27, d28, d29, d26, d27
969         transpose_8x8   d8,  d9,  d12, d13, d22, d23, d26, d27
970
971         vld1.64         {d0},  [ip], r2
972         vld1.64         {d1},  [ip], r2
973         vld1.64         {d2},  [ip], r2
974         vld1.64         {d3},  [ip], r2
975         vld1.64         {d4},  [ip], r2
976         vrhadd.u8       q0,  q0,  q4
977         vld1.64         {d5},  [ip], r2
978         vrhadd.u8       q1,  q1,  q6
979         vld1.64         {d10}, [ip], r2
980         vrhadd.u8       q2,  q2,  q11
981         vld1.64         {d11}, [ip], r2
982         vrhadd.u8       q5,  q5,  q13
983
984 .ifc \type,avg
985         vld1.8          {d16}, [r0,:64], r3
986         vrhadd.u8       d0,  d0,  d16
987         vld1.8          {d17}, [r0,:64], r3
988         vrhadd.u8       d1,  d1,  d17
989         vld1.8          {d16}, [r0,:64], r3
990         vrhadd.u8       d2,  d2,  d16
991         vld1.8          {d17}, [r0,:64], r3
992         vrhadd.u8       d3,  d3,  d17
993         vld1.8          {d16}, [r0,:64], r3
994         vrhadd.u8       d4,  d4,  d16
995         vld1.8          {d17}, [r0,:64], r3
996         vrhadd.u8       d5,  d5,  d17
997         vld1.8          {d16}, [r0,:64], r3
998         vrhadd.u8       d10, d10, d16
999         vld1.8          {d17}, [r0,:64], r3
1000         vrhadd.u8       d11, d11, d17
1001         sub             r0,  r0,  r3,  lsl #3
1002 .endif
1003
1004         vst1.64         {d0},  [r0,:64], r3
1005         vst1.64         {d1},  [r0,:64], r3
1006         vst1.64         {d2},  [r0,:64], r3
1007         vst1.64         {d3},  [r0,:64], r3
1008         vst1.64         {d4},  [r0,:64], r3
1009         vst1.64         {d5},  [r0,:64], r3
1010         vst1.64         {d10}, [r0,:64], r3
1011         vst1.64         {d11}, [r0,:64], r3
1012
1013         bx              lr
1014 endfunc
1015         .endm
1016
1017         h264_qpel_v_lowpass_l2 put
1018         h264_qpel_v_lowpass_l2 avg
1019
1020 function put_h264_qpel8_hv_lowpass_neon_top
1021         lowpass_const   ip
1022         mov             ip,  #12
1023 1:      vld1.64         {d0, d1},  [r1], r3
1024         vld1.64         {d16,d17}, [r1], r3
1025         subs            ip,  ip,  #2
1026         lowpass_8       d0,  d1,  d16, d17, q11, q12, narrow=0
1027         vst1.64         {d22-d25}, [r4,:128]!
1028         bne             1b
1029
1030         vld1.64         {d0, d1},  [r1]
1031         lowpass_8_1     d0,  d1,  q12, narrow=0
1032
1033         mov             ip,  #-16
1034         add             r4,  r4,  ip
1035         vld1.64         {d30,d31}, [r4,:128], ip
1036         vld1.64         {d20,d21}, [r4,:128], ip
1037         vld1.64         {d18,d19}, [r4,:128], ip
1038         vld1.64         {d16,d17}, [r4,:128], ip
1039         vld1.64         {d14,d15}, [r4,:128], ip
1040         vld1.64         {d12,d13}, [r4,:128], ip
1041         vld1.64         {d10,d11}, [r4,:128], ip
1042         vld1.64         {d8, d9},  [r4,:128], ip
1043         vld1.64         {d6, d7},  [r4,:128], ip
1044         vld1.64         {d4, d5},  [r4,:128], ip
1045         vld1.64         {d2, d3},  [r4,:128], ip
1046         vld1.64         {d0, d1},  [r4,:128]
1047
1048         swap4           d1,  d3,  d5,  d7,  d8,  d10, d12, d14
1049         transpose16_4x4 q0,  q1,  q2,  q3,  q4,  q5,  q6,  q7
1050
1051         swap4           d17, d19, d21, d31, d24, d26, d28, d22
1052         transpose16_4x4 q8,  q9,  q10, q15, q12, q13, q14, q11
1053
1054         vst1.64         {d30,d31}, [r4,:128]!
1055         vst1.64         {d6, d7},  [r4,:128]!
1056         vst1.64         {d20,d21}, [r4,:128]!
1057         vst1.64         {d4, d5},  [r4,:128]!
1058         vst1.64         {d18,d19}, [r4,:128]!
1059         vst1.64         {d2, d3},  [r4,:128]!
1060         vst1.64         {d16,d17}, [r4,:128]!
1061         vst1.64         {d0, d1},  [r4,:128]
1062
1063         lowpass_8.16    q4,  q12, d8,  d9,  d24, d25, d8
1064         lowpass_8.16    q5,  q13, d10, d11, d26, d27, d9
1065         lowpass_8.16    q6,  q14, d12, d13, d28, d29, d10
1066         lowpass_8.16    q7,  q11, d14, d15, d22, d23, d11
1067
1068         vld1.64         {d16,d17}, [r4,:128], ip
1069         vld1.64         {d30,d31}, [r4,:128], ip
1070         lowpass_8.16    q8,  q15, d16, d17, d30, d31, d12
1071         vld1.64         {d16,d17}, [r4,:128], ip
1072         vld1.64         {d30,d31}, [r4,:128], ip
1073         lowpass_8.16    q8,  q15, d16, d17, d30, d31, d13
1074         vld1.64         {d16,d17}, [r4,:128], ip
1075         vld1.64         {d30,d31}, [r4,:128], ip
1076         lowpass_8.16    q8,  q15, d16, d17, d30, d31, d14
1077         vld1.64         {d16,d17}, [r4,:128], ip
1078         vld1.64         {d30,d31}, [r4,:128]
1079         lowpass_8.16    q8,  q15, d16, d17, d30, d31, d15
1080
1081         transpose_8x8   d12, d13, d14, d15, d8,  d9,  d10, d11
1082
1083         bx              lr
1084 endfunc
1085
1086         .macro h264_qpel8_hv_lowpass type
1087 function \type\()_h264_qpel8_hv_lowpass_neon
1088         mov             r10, lr
1089         bl              put_h264_qpel8_hv_lowpass_neon_top
1090 .ifc \type,avg
1091         vld1.8          {d0},      [r0,:64], r2
1092         vrhadd.u8       d12, d12, d0
1093         vld1.8          {d1},      [r0,:64], r2
1094         vrhadd.u8       d13, d13, d1
1095         vld1.8          {d2},      [r0,:64], r2
1096         vrhadd.u8       d14, d14, d2
1097         vld1.8          {d3},      [r0,:64], r2
1098         vrhadd.u8       d15, d15, d3
1099         vld1.8          {d4},      [r0,:64], r2
1100         vrhadd.u8       d8,  d8,  d4
1101         vld1.8          {d5},      [r0,:64], r2
1102         vrhadd.u8       d9,  d9,  d5
1103         vld1.8          {d6},      [r0,:64], r2
1104         vrhadd.u8       d10, d10, d6
1105         vld1.8          {d7},      [r0,:64], r2
1106         vrhadd.u8       d11, d11, d7
1107         sub             r0,  r0,  r2,  lsl #3
1108 .endif
1109
1110         vst1.64         {d12},     [r0,:64], r2
1111         vst1.64         {d13},     [r0,:64], r2
1112         vst1.64         {d14},     [r0,:64], r2
1113         vst1.64         {d15},     [r0,:64], r2
1114         vst1.64         {d8},      [r0,:64], r2
1115         vst1.64         {d9},      [r0,:64], r2
1116         vst1.64         {d10},     [r0,:64], r2
1117         vst1.64         {d11},     [r0,:64], r2
1118
1119         mov             lr,  r10
1120         bx              lr
1121 endfunc
1122         .endm
1123
1124         h264_qpel8_hv_lowpass put
1125         h264_qpel8_hv_lowpass avg
1126
1127         .macro h264_qpel8_hv_lowpass_l2 type
1128 function \type\()_h264_qpel8_hv_lowpass_l2_neon
1129         mov             r10, lr
1130         bl              put_h264_qpel8_hv_lowpass_neon_top
1131
1132         vld1.64         {d0, d1},  [r2,:128]!
1133         vld1.64         {d2, d3},  [r2,:128]!
1134         vrhadd.u8       q0,  q0,  q6
1135         vld1.64         {d4, d5},  [r2,:128]!
1136         vrhadd.u8       q1,  q1,  q7
1137         vld1.64         {d6, d7},  [r2,:128]!
1138         vrhadd.u8       q2,  q2,  q4
1139         vrhadd.u8       q3,  q3,  q5
1140 .ifc \type,avg
1141         vld1.8          {d16},     [r0,:64], r3
1142         vrhadd.u8       d0,  d0,  d16
1143         vld1.8          {d17},     [r0,:64], r3
1144         vrhadd.u8       d1,  d1,  d17
1145         vld1.8          {d18},     [r0,:64], r3
1146         vrhadd.u8       d2,  d2,  d18
1147         vld1.8          {d19},     [r0,:64], r3
1148         vrhadd.u8       d3,  d3,  d19
1149         vld1.8          {d20},     [r0,:64], r3
1150         vrhadd.u8       d4,  d4,  d20
1151         vld1.8          {d21},     [r0,:64], r3
1152         vrhadd.u8       d5,  d5,  d21
1153         vld1.8          {d22},     [r0,:64], r3
1154         vrhadd.u8       d6,  d6,  d22
1155         vld1.8          {d23},     [r0,:64], r3
1156         vrhadd.u8       d7,  d7,  d23
1157         sub             r0,  r0,  r3,  lsl #3
1158 .endif
1159         vst1.64         {d0},      [r0,:64], r3
1160         vst1.64         {d1},      [r0,:64], r3
1161         vst1.64         {d2},      [r0,:64], r3
1162         vst1.64         {d3},      [r0,:64], r3
1163         vst1.64         {d4},      [r0,:64], r3
1164         vst1.64         {d5},      [r0,:64], r3
1165         vst1.64         {d6},      [r0,:64], r3
1166         vst1.64         {d7},      [r0,:64], r3
1167
1168         mov             lr,  r10
1169         bx              lr
1170 endfunc
1171         .endm
1172
1173         h264_qpel8_hv_lowpass_l2 put
1174         h264_qpel8_hv_lowpass_l2 avg
1175
1176         .macro h264_qpel16_hv type
1177 function \type\()_h264_qpel16_hv_lowpass_neon
1178         mov             r9,  lr
1179         bl              \type\()_h264_qpel8_hv_lowpass_neon
1180         sub             r1,  r1,  r3, lsl #2
1181         bl              \type\()_h264_qpel8_hv_lowpass_neon
1182         sub             r1,  r1,  r3, lsl #4
1183         sub             r1,  r1,  r3, lsl #2
1184         add             r1,  r1,  #8
1185         sub             r0,  r0,  r2, lsl #4
1186         add             r0,  r0,  #8
1187         bl              \type\()_h264_qpel8_hv_lowpass_neon
1188         sub             r1,  r1,  r3, lsl #2
1189         mov             lr,  r9
1190         b               \type\()_h264_qpel8_hv_lowpass_neon
1191 endfunc
1192
1193 function \type\()_h264_qpel16_hv_lowpass_l2_neon
1194         mov             r9,  lr
1195         sub             r2,  r4,  #256
1196         bl              \type\()_h264_qpel8_hv_lowpass_l2_neon
1197         sub             r1,  r1,  r3, lsl #2
1198         bl              \type\()_h264_qpel8_hv_lowpass_l2_neon
1199         sub             r1,  r1,  r3, lsl #4
1200         sub             r1,  r1,  r3, lsl #2
1201         add             r1,  r1,  #8
1202         sub             r0,  r0,  r3, lsl #4
1203         add             r0,  r0,  #8
1204         bl              \type\()_h264_qpel8_hv_lowpass_l2_neon
1205         sub             r1,  r1,  r3, lsl #2
1206         mov             lr,  r9
1207         b               \type\()_h264_qpel8_hv_lowpass_l2_neon
1208 endfunc
1209         .endm
1210
1211         h264_qpel16_hv put
1212         h264_qpel16_hv avg
1213
1214         .macro h264_qpel8 type
1215 function ff_\type\()_h264_qpel8_mc10_neon, export=1
1216         lowpass_const   r3
1217         mov             r3,  r1
1218         sub             r1,  r1,  #2
1219         mov             ip,  #8
1220         b               \type\()_h264_qpel8_h_lowpass_l2_neon
1221 endfunc
1222
1223 function ff_\type\()_h264_qpel8_mc20_neon, export=1
1224         lowpass_const   r3
1225         sub             r1,  r1,  #2
1226         mov             r3,  r2
1227         mov             ip,  #8
1228         b               \type\()_h264_qpel8_h_lowpass_neon
1229 endfunc
1230
1231 function ff_\type\()_h264_qpel8_mc30_neon, export=1
1232         lowpass_const   r3
1233         add             r3,  r1,  #1
1234         sub             r1,  r1,  #2
1235         mov             ip,  #8
1236         b               \type\()_h264_qpel8_h_lowpass_l2_neon
1237 endfunc
1238
1239 function ff_\type\()_h264_qpel8_mc01_neon, export=1
1240         push            {lr}
1241         mov             ip,  r1
1242 \type\()_h264_qpel8_mc01:
1243         lowpass_const   r3
1244         mov             r3,  r2
1245         sub             r1,  r1,  r2, lsl #1
1246         vpush           {d8-d15}
1247         bl              \type\()_h264_qpel8_v_lowpass_l2_neon
1248         vpop            {d8-d15}
1249         pop             {pc}
1250 endfunc
1251
1252 function ff_\type\()_h264_qpel8_mc11_neon, export=1
1253         push            {r0, r1, r11, lr}
1254 \type\()_h264_qpel8_mc11:
1255         lowpass_const   r3
1256         mov             r11, sp
1257 A       bic             sp,  sp,  #15
1258 T       bic             r0,  r11, #15
1259 T       mov             sp,  r0
1260         sub             sp,  sp,  #64
1261         mov             r0,  sp
1262         sub             r1,  r1,  #2
1263         mov             r3,  #8
1264         mov             ip,  #8
1265         vpush           {d8-d15}
1266         bl              put_h264_qpel8_h_lowpass_neon
1267         ldrd            r0,  [r11], #8
1268         mov             r3,  r2
1269         add             ip,  sp,  #64
1270         sub             r1,  r1,  r2, lsl #1
1271         mov             r2,  #8
1272         bl              \type\()_h264_qpel8_v_lowpass_l2_neon
1273         vpop            {d8-d15}
1274         mov             sp,  r11
1275         pop             {r11, pc}
1276 endfunc
1277
1278 function ff_\type\()_h264_qpel8_mc21_neon, export=1
1279         push            {r0, r1, r4, r10, r11, lr}
1280 \type\()_h264_qpel8_mc21:
1281         lowpass_const   r3
1282         mov             r11, sp
1283 A       bic             sp,  sp,  #15
1284 T       bic             r0,  r11, #15
1285 T       mov             sp,  r0
1286         sub             sp,  sp,  #(8*8+16*12)
1287         sub             r1,  r1,  #2
1288         mov             r3,  #8
1289         mov             r0,  sp
1290         mov             ip,  #8
1291         vpush           {d8-d15}
1292         bl              put_h264_qpel8_h_lowpass_neon
1293         mov             r4,  r0
1294         ldrd            r0,  [r11], #8
1295         sub             r1,  r1,  r2, lsl #1
1296         sub             r1,  r1,  #2
1297         mov             r3,  r2
1298         sub             r2,  r4,  #64
1299         bl              \type\()_h264_qpel8_hv_lowpass_l2_neon
1300         vpop            {d8-d15}
1301         mov             sp,  r11
1302         pop             {r4, r10, r11, pc}
1303 endfunc
1304
1305 function ff_\type\()_h264_qpel8_mc31_neon, export=1
1306         add             r1,  r1,  #1
1307         push            {r0, r1, r11, lr}
1308         sub             r1,  r1,  #1
1309         b               \type\()_h264_qpel8_mc11
1310 endfunc
1311
1312 function ff_\type\()_h264_qpel8_mc02_neon, export=1
1313         push            {lr}
1314         lowpass_const   r3
1315         sub             r1,  r1,  r2, lsl #1
1316         mov             r3,  r2
1317         vpush           {d8-d15}
1318         bl              \type\()_h264_qpel8_v_lowpass_neon
1319         vpop            {d8-d15}
1320         pop             {pc}
1321 endfunc
1322
1323 function ff_\type\()_h264_qpel8_mc12_neon, export=1
1324         push            {r0, r1, r4, r10, r11, lr}
1325 \type\()_h264_qpel8_mc12:
1326         lowpass_const   r3
1327         mov             r11, sp
1328 A       bic             sp,  sp,  #15
1329 T       bic             r0,  r11, #15
1330 T       mov             sp,  r0
1331         sub             sp,  sp,  #(8*8+16*12)
1332         sub             r1,  r1,  r2, lsl #1
1333         mov             r3,  r2
1334         mov             r2,  #8
1335         mov             r0,  sp
1336         vpush           {d8-d15}
1337         bl              put_h264_qpel8_v_lowpass_neon
1338         mov             r4,  r0
1339         ldrd            r0,  [r11], #8
1340         sub             r1,  r1,  r3, lsl #1
1341         sub             r1,  r1,  #2
1342         sub             r2,  r4,  #64
1343         bl              \type\()_h264_qpel8_hv_lowpass_l2_neon
1344         vpop            {d8-d15}
1345         mov             sp,  r11
1346         pop             {r4, r10, r11, pc}
1347 endfunc
1348
1349 function ff_\type\()_h264_qpel8_mc22_neon, export=1
1350         push            {r4, r10, r11, lr}
1351         mov             r11, sp
1352 A       bic             sp,  sp,  #15
1353 T       bic             r4,  r11, #15
1354 T       mov             sp,  r4
1355         sub             r1,  r1,  r2, lsl #1
1356         sub             r1,  r1,  #2
1357         mov             r3,  r2
1358         sub             sp,  sp,  #(16*12)
1359         mov             r4,  sp
1360         vpush           {d8-d15}
1361         bl              \type\()_h264_qpel8_hv_lowpass_neon
1362         vpop            {d8-d15}
1363         mov             sp,  r11
1364         pop             {r4, r10, r11, pc}
1365 endfunc
1366
1367 function ff_\type\()_h264_qpel8_mc32_neon, export=1
1368         push            {r0, r1, r4, r10, r11, lr}
1369         add             r1,  r1,  #1
1370         b               \type\()_h264_qpel8_mc12
1371 endfunc
1372
1373 function ff_\type\()_h264_qpel8_mc03_neon, export=1
1374         push            {lr}
1375         add             ip,  r1,  r2
1376         b               \type\()_h264_qpel8_mc01
1377 endfunc
1378
1379 function ff_\type\()_h264_qpel8_mc13_neon, export=1
1380         push            {r0, r1, r11, lr}
1381         add             r1,  r1,  r2
1382         b               \type\()_h264_qpel8_mc11
1383 endfunc
1384
1385 function ff_\type\()_h264_qpel8_mc23_neon, export=1
1386         push            {r0, r1, r4, r10, r11, lr}
1387         add             r1,  r1,  r2
1388         b               \type\()_h264_qpel8_mc21
1389 endfunc
1390
1391 function ff_\type\()_h264_qpel8_mc33_neon, export=1
1392         add             r1,  r1,  #1
1393         push            {r0, r1, r11, lr}
1394         add             r1,  r1,  r2
1395         sub             r1,  r1,  #1
1396         b               \type\()_h264_qpel8_mc11
1397 endfunc
1398         .endm
1399
1400         h264_qpel8 put
1401         h264_qpel8 avg
1402
1403         .macro h264_qpel16 type
1404 function ff_\type\()_h264_qpel16_mc10_neon, export=1
1405         lowpass_const   r3
1406         mov             r3,  r1
1407         sub             r1,  r1,  #2
1408         b               \type\()_h264_qpel16_h_lowpass_l2_neon
1409 endfunc
1410
1411 function ff_\type\()_h264_qpel16_mc20_neon, export=1
1412         lowpass_const   r3
1413         sub             r1,  r1,  #2
1414         mov             r3,  r2
1415         b               \type\()_h264_qpel16_h_lowpass_neon
1416 endfunc
1417
1418 function ff_\type\()_h264_qpel16_mc30_neon, export=1
1419         lowpass_const   r3
1420         add             r3,  r1,  #1
1421         sub             r1,  r1,  #2
1422         b               \type\()_h264_qpel16_h_lowpass_l2_neon
1423 endfunc
1424
1425 function ff_\type\()_h264_qpel16_mc01_neon, export=1
1426         push            {r4, lr}
1427         mov             ip,  r1
1428 \type\()_h264_qpel16_mc01:
1429         lowpass_const   r3
1430         mov             r3,  r2
1431         sub             r1,  r1,  r2, lsl #1
1432         vpush           {d8-d15}
1433         bl              \type\()_h264_qpel16_v_lowpass_l2_neon
1434         vpop            {d8-d15}
1435         pop             {r4, pc}
1436 endfunc
1437
1438 function ff_\type\()_h264_qpel16_mc11_neon, export=1
1439         push            {r0, r1, r4, r11, lr}
1440 \type\()_h264_qpel16_mc11:
1441         lowpass_const   r3
1442         mov             r11, sp
1443 A       bic             sp,  sp,  #15
1444 T       bic             r0,  r11, #15
1445 T       mov             sp,  r0
1446         sub             sp,  sp,  #256
1447         mov             r0,  sp
1448         sub             r1,  r1,  #2
1449         mov             r3,  #16
1450         vpush           {d8-d15}
1451         bl              put_h264_qpel16_h_lowpass_neon
1452         ldrd            r0,  [r11], #8
1453         mov             r3,  r2
1454         add             ip,  sp,  #64
1455         sub             r1,  r1,  r2, lsl #1
1456         mov             r2,  #16
1457         bl              \type\()_h264_qpel16_v_lowpass_l2_neon
1458         vpop            {d8-d15}
1459         mov             sp,  r11
1460         pop             {r4, r11, pc}
1461 endfunc
1462
1463 function ff_\type\()_h264_qpel16_mc21_neon, export=1
1464         push            {r0, r1, r4-r5, r9-r11, lr}
1465 \type\()_h264_qpel16_mc21:
1466         lowpass_const   r3
1467         mov             r11, sp
1468 A       bic             sp,  sp,  #15
1469 T       bic             r0,  r11, #15
1470 T       mov             sp,  r0
1471         sub             sp,  sp,  #(16*16+16*12)
1472         sub             r1,  r1,  #2
1473         mov             r0,  sp
1474         vpush           {d8-d15}
1475         bl              put_h264_qpel16_h_lowpass_neon_packed
1476         mov             r4,  r0
1477         ldrd            r0,  [r11], #8
1478         sub             r1,  r1,  r2, lsl #1
1479         sub             r1,  r1,  #2
1480         mov             r3,  r2
1481         bl              \type\()_h264_qpel16_hv_lowpass_l2_neon
1482         vpop            {d8-d15}
1483         mov             sp,  r11
1484         pop             {r4-r5, r9-r11, pc}
1485 endfunc
1486
1487 function ff_\type\()_h264_qpel16_mc31_neon, export=1
1488         add             r1,  r1,  #1
1489         push            {r0, r1, r4, r11, lr}
1490         sub             r1,  r1,  #1
1491         b               \type\()_h264_qpel16_mc11
1492 endfunc
1493
1494 function ff_\type\()_h264_qpel16_mc02_neon, export=1
1495         push            {r4, lr}
1496         lowpass_const   r3
1497         sub             r1,  r1,  r2, lsl #1
1498         mov             r3,  r2
1499         vpush           {d8-d15}
1500         bl              \type\()_h264_qpel16_v_lowpass_neon
1501         vpop            {d8-d15}
1502         pop             {r4, pc}
1503 endfunc
1504
1505 function ff_\type\()_h264_qpel16_mc12_neon, export=1
1506         push            {r0, r1, r4-r5, r9-r11, lr}
1507 \type\()_h264_qpel16_mc12:
1508         lowpass_const   r3
1509         mov             r11, sp
1510 A       bic             sp,  sp,  #15
1511 T       bic             r0,  r11, #15
1512 T       mov             sp,  r0
1513         sub             sp,  sp,  #(16*16+16*12)
1514         sub             r1,  r1,  r2, lsl #1
1515         mov             r0,  sp
1516         mov             r3,  r2
1517         vpush           {d8-d15}
1518         bl              put_h264_qpel16_v_lowpass_neon_packed
1519         mov             r4,  r0
1520         ldrd            r0,  [r11], #8
1521         sub             r1,  r1,  r3, lsl #1
1522         sub             r1,  r1,  #2
1523         mov             r2,  r3
1524         bl              \type\()_h264_qpel16_hv_lowpass_l2_neon
1525         vpop            {d8-d15}
1526         mov             sp,  r11
1527         pop             {r4-r5, r9-r11, pc}
1528 endfunc
1529
1530 function ff_\type\()_h264_qpel16_mc22_neon, export=1
1531         push            {r4, r9-r11, lr}
1532         lowpass_const   r3
1533         mov             r11, sp
1534 A       bic             sp,  sp,  #15
1535 T       bic             r4,  r11, #15
1536 T       mov             sp,  r4
1537         sub             r1,  r1,  r2, lsl #1
1538         sub             r1,  r1,  #2
1539         mov             r3,  r2
1540         sub             sp,  sp,  #(16*12)
1541         mov             r4,  sp
1542         vpush           {d8-d15}
1543         bl              \type\()_h264_qpel16_hv_lowpass_neon
1544         vpop            {d8-d15}
1545         mov             sp,  r11
1546         pop             {r4, r9-r11, pc}
1547 endfunc
1548
1549 function ff_\type\()_h264_qpel16_mc32_neon, export=1
1550         push            {r0, r1, r4-r5, r9-r11, lr}
1551         add             r1,  r1,  #1
1552         b               \type\()_h264_qpel16_mc12
1553 endfunc
1554
1555 function ff_\type\()_h264_qpel16_mc03_neon, export=1
1556         push            {r4, lr}
1557         add             ip,  r1,  r2
1558         b               \type\()_h264_qpel16_mc01
1559 endfunc
1560
1561 function ff_\type\()_h264_qpel16_mc13_neon, export=1
1562         push            {r0, r1, r4, r11, lr}
1563         add             r1,  r1,  r2
1564         b               \type\()_h264_qpel16_mc11
1565 endfunc
1566
1567 function ff_\type\()_h264_qpel16_mc23_neon, export=1
1568         push            {r0, r1, r4-r5, r9-r11, lr}
1569         add             r1,  r1,  r2
1570         b               \type\()_h264_qpel16_mc21
1571 endfunc
1572
1573 function ff_\type\()_h264_qpel16_mc33_neon, export=1
1574         add             r1,  r1,  #1
1575         push            {r0, r1, r4, r11, lr}
1576         add             r1,  r1,  r2
1577         sub             r1,  r1,  #1
1578         b               \type\()_h264_qpel16_mc11
1579 endfunc
1580         .endm
1581
1582         h264_qpel16 put
1583         h264_qpel16 avg
1584
1585 @ Biweighted prediction
1586
1587         .macro  biweight_16 macs, macd
1588         vdup.8          d0,  r4
1589         vdup.8          d1,  r5
1590         vmov            q2,  q8
1591         vmov            q3,  q8
1592 1:      subs            r3,  r3,  #2
1593         vld1.8          {d20-d21},[r0,:128], r2
1594         \macd           q2,  d0,  d20
1595         pld             [r0]
1596         \macd           q3,  d0,  d21
1597         vld1.8          {d22-d23},[r1,:128], r2
1598         \macs           q2,  d1,  d22
1599         pld             [r1]
1600         \macs           q3,  d1,  d23
1601         vmov            q12, q8
1602         vld1.8          {d28-d29},[r0,:128], r2
1603         vmov            q13, q8
1604         \macd           q12, d0,  d28
1605         pld             [r0]
1606         \macd           q13, d0,  d29
1607         vld1.8          {d30-d31},[r1,:128], r2
1608         \macs           q12, d1,  d30
1609         pld             [r1]
1610         \macs           q13, d1,  d31
1611         vshl.s16        q2,  q2,  q9
1612         vshl.s16        q3,  q3,  q9
1613         vqmovun.s16     d4,  q2
1614         vqmovun.s16     d5,  q3
1615         vshl.s16        q12, q12, q9
1616         vshl.s16        q13, q13, q9
1617         vqmovun.s16     d24, q12
1618         vqmovun.s16     d25, q13
1619         vmov            q3,  q8
1620         vst1.8          {d4- d5}, [r6,:128], r2
1621         vmov            q2,  q8
1622         vst1.8          {d24-d25},[r6,:128], r2
1623         bne             1b
1624         pop             {r4-r6, pc}
1625         .endm
1626
1627         .macro  biweight_8 macs, macd
1628         vdup.8          d0,  r4
1629         vdup.8          d1,  r5
1630         vmov            q1,  q8
1631         vmov            q10, q8
1632 1:      subs            r3,  r3,  #2
1633         vld1.8          {d4},[r0,:64], r2
1634         \macd           q1,  d0,  d4
1635         pld             [r0]
1636         vld1.8          {d5},[r1,:64], r2
1637         \macs           q1,  d1,  d5
1638         pld             [r1]
1639         vld1.8          {d6},[r0,:64], r2
1640         \macd           q10, d0,  d6
1641         pld             [r0]
1642         vld1.8          {d7},[r1,:64], r2
1643         \macs           q10, d1,  d7
1644         pld             [r1]
1645         vshl.s16        q1,  q1,  q9
1646         vqmovun.s16     d2,  q1
1647         vshl.s16        q10, q10, q9
1648         vqmovun.s16     d4,  q10
1649         vmov            q10, q8
1650         vst1.8          {d2},[r6,:64], r2
1651         vmov            q1,  q8
1652         vst1.8          {d4},[r6,:64], r2
1653         bne             1b
1654         pop             {r4-r6, pc}
1655         .endm
1656
1657         .macro  biweight_4 macs, macd
1658         vdup.8          d0,  r4
1659         vdup.8          d1,  r5
1660         vmov            q1,  q8
1661         vmov            q10, q8
1662 1:      subs            r3,  r3,  #4
1663         vld1.32         {d4[0]},[r0,:32], r2
1664         vld1.32         {d4[1]},[r0,:32], r2
1665         \macd           q1,  d0,  d4
1666         pld             [r0]
1667         vld1.32         {d5[0]},[r1,:32], r2
1668         vld1.32         {d5[1]},[r1,:32], r2
1669         \macs           q1,  d1,  d5
1670         pld             [r1]
1671         blt             2f
1672         vld1.32         {d6[0]},[r0,:32], r2
1673         vld1.32         {d6[1]},[r0,:32], r2
1674         \macd           q10, d0,  d6
1675         pld             [r0]
1676         vld1.32         {d7[0]},[r1,:32], r2
1677         vld1.32         {d7[1]},[r1,:32], r2
1678         \macs           q10, d1,  d7
1679         pld             [r1]
1680         vshl.s16        q1,  q1,  q9
1681         vqmovun.s16     d2,  q1
1682         vshl.s16        q10, q10, q9
1683         vqmovun.s16     d4,  q10
1684         vmov            q10, q8
1685         vst1.32         {d2[0]},[r6,:32], r2
1686         vst1.32         {d2[1]},[r6,:32], r2
1687         vmov            q1,  q8
1688         vst1.32         {d4[0]},[r6,:32], r2
1689         vst1.32         {d4[1]},[r6,:32], r2
1690         bne             1b
1691         pop             {r4-r6, pc}
1692 2:      vshl.s16        q1,  q1,  q9
1693         vqmovun.s16     d2,  q1
1694         vst1.32         {d2[0]},[r6,:32], r2
1695         vst1.32         {d2[1]},[r6,:32], r2
1696         pop             {r4-r6, pc}
1697         .endm
1698
1699         .macro  biweight_func w
1700 function ff_biweight_h264_pixels_\w\()_neon, export=1
1701         push            {r4-r6, lr}
1702         ldr             r12, [sp, #16]
1703         add             r4,  sp,  #20
1704         ldm             r4,  {r4-r6}
1705         lsr             lr,  r4,  #31
1706         add             r6,  r6,  #1
1707         eors            lr,  lr,  r5,  lsr #30
1708         orr             r6,  r6,  #1
1709         vdup.16         q9,  r12
1710         lsl             r6,  r6,  r12
1711         vmvn            q9,  q9
1712         vdup.16         q8,  r6
1713         mov             r6,  r0
1714         beq             10f
1715         subs            lr,  lr,  #1
1716         beq             20f
1717         subs            lr,  lr,  #1
1718         beq             30f
1719         b               40f
1720 10:     biweight_\w     vmlal.u8, vmlal.u8
1721 20:     rsb             r4,  r4,  #0
1722         biweight_\w     vmlal.u8, vmlsl.u8
1723 30:     rsb             r4,  r4,  #0
1724         rsb             r5,  r5,  #0
1725         biweight_\w     vmlsl.u8, vmlsl.u8
1726 40:     rsb             r5,  r5,  #0
1727         biweight_\w     vmlsl.u8, vmlal.u8
1728 endfunc
1729         .endm
1730
1731         biweight_func   16
1732         biweight_func   8
1733         biweight_func   4
1734
1735 @ Weighted prediction
1736
1737         .macro  weight_16 add
1738         vdup.8          d0,  r12
1739 1:      subs            r2,  r2,  #2
1740         vld1.8          {d20-d21},[r0,:128], r1
1741         vmull.u8        q2,  d0,  d20
1742         pld             [r0]
1743         vmull.u8        q3,  d0,  d21
1744         vld1.8          {d28-d29},[r0,:128], r1
1745         vmull.u8        q12, d0,  d28
1746         pld             [r0]
1747         vmull.u8        q13, d0,  d29
1748         \add            q2,  q8,  q2
1749         vrshl.s16       q2,  q2,  q9
1750         \add            q3,  q8,  q3
1751         vrshl.s16       q3,  q3,  q9
1752         vqmovun.s16     d4,  q2
1753         vqmovun.s16     d5,  q3
1754         \add            q12, q8,  q12
1755         vrshl.s16       q12, q12, q9
1756         \add            q13, q8,  q13
1757         vrshl.s16       q13, q13, q9
1758         vqmovun.s16     d24, q12
1759         vqmovun.s16     d25, q13
1760         vst1.8          {d4- d5}, [r4,:128], r1
1761         vst1.8          {d24-d25},[r4,:128], r1
1762         bne             1b
1763         pop             {r4, pc}
1764         .endm
1765
1766         .macro  weight_8 add
1767         vdup.8          d0,  r12
1768 1:      subs            r2,  r2,  #2
1769         vld1.8          {d4},[r0,:64], r1
1770         vmull.u8        q1,  d0,  d4
1771         pld             [r0]
1772         vld1.8          {d6},[r0,:64], r1
1773         vmull.u8        q10, d0,  d6
1774         \add            q1,  q8,  q1
1775         pld             [r0]
1776         vrshl.s16       q1,  q1,  q9
1777         vqmovun.s16     d2,  q1
1778         \add            q10, q8,  q10
1779         vrshl.s16       q10, q10, q9
1780         vqmovun.s16     d4,  q10
1781         vst1.8          {d2},[r4,:64], r1
1782         vst1.8          {d4},[r4,:64], r1
1783         bne             1b
1784         pop             {r4, pc}
1785         .endm
1786
1787         .macro  weight_4 add
1788         vdup.8          d0,  r12
1789         vmov            q1,  q8
1790         vmov            q10, q8
1791 1:      subs            r2,  r2,  #4
1792         vld1.32         {d4[0]},[r0,:32], r1
1793         vld1.32         {d4[1]},[r0,:32], r1
1794         vmull.u8        q1,  d0,  d4
1795         pld             [r0]
1796         blt             2f
1797         vld1.32         {d6[0]},[r0,:32], r1
1798         vld1.32         {d6[1]},[r0,:32], r1
1799         vmull.u8        q10, d0,  d6
1800         pld             [r0]
1801         \add            q1,  q8,  q1
1802         vrshl.s16       q1,  q1,  q9
1803         vqmovun.s16     d2,  q1
1804         \add            q10, q8,  q10
1805         vrshl.s16       q10, q10, q9
1806         vqmovun.s16     d4,  q10
1807         vmov            q10, q8
1808         vst1.32         {d2[0]},[r4,:32], r1
1809         vst1.32         {d2[1]},[r4,:32], r1
1810         vmov            q1,  q8
1811         vst1.32         {d4[0]},[r4,:32], r1
1812         vst1.32         {d4[1]},[r4,:32], r1
1813         bne             1b
1814         pop             {r4, pc}
1815 2:      \add            q1,  q8,  q1
1816         vrshl.s16       q1,  q1,  q9
1817         vqmovun.s16     d2,  q1
1818         vst1.32         {d2[0]},[r4,:32], r1
1819         vst1.32         {d2[1]},[r4,:32], r1
1820         pop             {r4, pc}
1821         .endm
1822
1823         .macro  weight_func w
1824 function ff_weight_h264_pixels_\w\()_neon, export=1
1825         push            {r4, lr}
1826         ldr             r12, [sp, #8]
1827         ldr             r4,  [sp, #12]
1828         cmp             r3,  #1
1829         lsl             r4,  r4,  r3
1830         vdup.16         q8,  r4
1831         mov             r4,  r0
1832         ble             20f
1833         rsb             lr,  r3,  #1
1834         vdup.16         q9,  lr
1835         cmp             r12, #0
1836         blt             10f
1837         weight_\w       vhadd.s16
1838 10:     rsb             r12, r12, #0
1839         weight_\w       vhsub.s16
1840 20:     rsb             lr,  r3,  #0
1841         vdup.16         q9,  lr
1842         cmp             r12, #0
1843         blt             10f
1844         weight_\w       vadd.s16
1845 10:     rsb             r12, r12, #0
1846         weight_\w       vsub.s16
1847 endfunc
1848         .endm
1849
1850         weight_func     16
1851         weight_func     8
1852         weight_func     4