]> git.sesse.net Git - ffmpeg/blobdiff - libavcodec/arm/h264cmc_neon.S
arm: Use the matching endfunc macro instead of the assembler directive directly
[ffmpeg] / libavcodec / arm / h264cmc_neon.S
index e10adaca108174ebae1f886513737013de4e96b1..3183dd8d82d38caa882b8ab073d83e74af27a7f7 100644 (file)
  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  */
 
-#include "asm.S"
+#include "libavutil/arm/asm.S"
 
 /* chroma_mc8(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y) */
-.macro  h264_chroma_mc8 type
-function ff_\type\()_h264_chroma_mc8_neon, export=1
+.macro  h264_chroma_mc8 type, codec=h264
+function ff_\type\()_\codec\()_chroma_mc8_neon, export=1
         push            {r4-r7, lr}
-        ldrd            r4,  [sp, #20]
+        ldrd            r4,  r5,  [sp, #20]
   .ifc \type,avg
         mov             lr,  r0
   .endif
         pld             [r1]
         pld             [r1, r2]
 
+  .ifc \codec,rv40
+        movrel          r6,  rv40bias
+        lsr             r7,  r5,  #1
+        add             r6,  r6,  r7,  lsl #3
+        lsr             r7,  r4,  #1
+        add             r6,  r6,  r7,  lsl #1
+        vld1.16         {d22[],d23[]}, [r6,:16]
+  .endif
+  .ifc \codec,vc1
+        vmov.u16        q11, #28
+  .endif
+
 A       muls            r7,  r4,  r5
 T       mul             r7,  r4,  r5
 T       cmp             r7,  #0
@@ -42,24 +54,20 @@ T       cmp             r7,  #0
 
         beq             2f
 
-        add             r5,  r1,  r2
-
         vdup.8          d0,  r4
-        lsl             r4,  r2,  #1
         vdup.8          d1,  r12
-        vld1.8          {d4, d5}, [r1], r4
+        vld1.8          {d4, d5}, [r1], r2
         vdup.8          d2,  r6
-        vld1.8          {d6, d7}, [r5], r4
         vdup.8          d3,  r7
-
         vext.8          d5,  d4,  d5,  #1
-        vext.8          d7,  d6,  d7,  #1
 
-1:      pld             [r5]
+1:      vld1.8          {d6, d7}, [r1], r2
         vmull.u8        q8,  d4,  d0
         vmlal.u8        q8,  d5,  d1
-        vld1.8          {d4, d5}, [r1], r4
+        vext.8          d7,  d6,  d7,  #1
+        vld1.8          {d4, d5}, [r1], r2
         vmlal.u8        q8,  d6,  d2
+        pld             [r1]
         vext.8          d5,  d4,  d5,  #1
         vmlal.u8        q8,  d7,  d3
         vmull.u8        q9,  d6,  d0
@@ -67,16 +75,21 @@ T       cmp             r7,  #0
         vmlal.u8        q9,  d7,  d1
         vmlal.u8        q9,  d4,  d2
         vmlal.u8        q9,  d5,  d3
+        pld             [r1, r2]
+  .ifc \codec,h264
         vrshrn.u16      d16, q8,  #6
-        vld1.8          {d6, d7}, [r5], r4
-        pld             [r1]
         vrshrn.u16      d17, q9,  #6
+  .else
+        vadd.u16        q8,  q8,  q11
+        vadd.u16        q9,  q9,  q11
+        vshrn.u16       d16, q8,  #6
+        vshrn.u16       d17, q9,  #6
+  .endif
   .ifc \type,avg
         vld1.8          {d20}, [lr,:64], r2
         vld1.8          {d21}, [lr,:64], r2
         vrhadd.u8       q8,  q8,  q10
   .endif
-        vext.8          d7,  d6,  d7,  #1
         vst1.8          {d16}, [r0,:64], r2
         vst1.8          {d17}, [r0,:64], r2
         bgt             1b
@@ -90,27 +103,31 @@ T       cmp             r7,  #0
 
         beq             4f
 
-        add             r5,  r1,  r2
-        lsl             r4,  r2,  #1
-        vld1.8          {d4}, [r1], r4
-        vld1.8          {d6}, [r5], r4
+        vld1.8          {d4}, [r1], r2
 
-3:      pld             [r5]
+3:      vld1.8          {d6}, [r1], r2
         vmull.u8        q8,  d4,  d0
         vmlal.u8        q8,  d6,  d1
-        vld1.8          {d4}, [r1], r4
+        vld1.8          {d4}, [r1], r2
         vmull.u8        q9,  d6,  d0
         vmlal.u8        q9,  d4,  d1
-        vld1.8          {d6}, [r5], r4
+        pld             [r1]
+  .ifc \codec,h264
         vrshrn.u16      d16, q8,  #6
         vrshrn.u16      d17, q9,  #6
+  .else
+        vadd.u16        q8,  q8,  q11
+        vadd.u16        q9,  q9,  q11
+        vshrn.u16       d16, q8,  #6
+        vshrn.u16       d17, q9,  #6
+  .endif
+        pld             [r1, r2]
   .ifc \type,avg
         vld1.8          {d20}, [lr,:64], r2
         vld1.8          {d21}, [lr,:64], r2
         vrhadd.u8       q8,  q8,  q10
   .endif
         subs            r3,  r3,  #2
-        pld             [r1]
         vst1.8          {d16}, [r0,:64], r2
         vst1.8          {d17}, [r0,:64], r2
         bgt             3b
@@ -121,44 +138,58 @@ T       cmp             r7,  #0
         vld1.8          {d6, d7}, [r1], r2
         vext.8          d5,  d4,  d5,  #1
         vext.8          d7,  d6,  d7,  #1
-
-5:      pld             [r1]
+        pld             [r1]
         subs            r3,  r3,  #2
         vmull.u8        q8,  d4,  d0
         vmlal.u8        q8,  d5,  d1
-        vld1.8          {d4, d5}, [r1], r2
         vmull.u8        q9,  d6,  d0
         vmlal.u8        q9,  d7,  d1
-        pld             [r1]
-        vext.8          d5,  d4,  d5,  #1
+        pld             [r1, r2]
+  .ifc \codec,h264
         vrshrn.u16      d16, q8,  #6
         vrshrn.u16      d17, q9,  #6
+  .else
+        vadd.u16        q8,  q8,  q11
+        vadd.u16        q9,  q9,  q11
+        vshrn.u16       d16, q8,  #6
+        vshrn.u16       d17, q9,  #6
+  .endif
   .ifc \type,avg
         vld1.8          {d20}, [lr,:64], r2
         vld1.8          {d21}, [lr,:64], r2
         vrhadd.u8       q8,  q8,  q10
   .endif
-        vld1.8          {d6, d7}, [r1], r2
-        vext.8          d7,  d6,  d7,  #1
         vst1.8          {d16}, [r0,:64], r2
         vst1.8          {d17}, [r0,:64], r2
-        bgt             5b
+        bgt             4b
 
         pop             {r4-r7, pc}
 endfunc
 .endm
 
 /* chroma_mc4(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y) */
-.macro  h264_chroma_mc4 type
-function ff_\type\()_h264_chroma_mc4_neon, export=1
+.macro  h264_chroma_mc4 type, codec=h264
+function ff_\type\()_\codec\()_chroma_mc4_neon, export=1
         push            {r4-r7, lr}
-        ldrd            r4,  [sp, #20]
+        ldrd            r4,  r5,  [sp, #20]
   .ifc \type,avg
         mov             lr,  r0
   .endif
         pld             [r1]
         pld             [r1, r2]
 
+  .ifc \codec,rv40
+        movrel          r6,  rv40bias
+        lsr             r7,  r5,  #1
+        add             r6,  r6,  r7,  lsl #3
+        lsr             r7,  r4,  #1
+        add             r6,  r6,  r7,  lsl #1
+        vld1.16         {d22[],d23[]}, [r6,:16]
+  .endif
+  .ifc \codec,vc1
+        vmov.u16        q11, #28
+  .endif
+
 A       muls            r7,  r4,  r5
 T       mul             r7,  r4,  r5
 T       cmp             r7,  #0
@@ -170,45 +201,44 @@ T       cmp             r7,  #0
 
         beq             2f
 
-        add             r5,  r1,  r2
-
         vdup.8          d0,  r4
-        lsl             r4,  r2,  #1
         vdup.8          d1,  r12
-        vld1.8          {d4},     [r1], r4
+        vld1.8          {d4},     [r1], r2
         vdup.8          d2,  r6
-        vld1.8          {d6},     [r5], r4
         vdup.8          d3,  r7
 
         vext.8          d5,  d4,  d5,  #1
-        vext.8          d7,  d6,  d7,  #1
         vtrn.32         d4,  d5
-        vtrn.32         d6,  d7
 
         vtrn.32         d0,  d1
         vtrn.32         d2,  d3
 
-1:      pld             [r5]
+1:      vld1.8          {d6},     [r1], r2
+        vext.8          d7,  d6,  d7,  #1
+        vtrn.32         d6,  d7
         vmull.u8        q8,  d4,  d0
         vmlal.u8        q8,  d6,  d2
-        vld1.8          {d4},     [r1], r4
+        vld1.8          {d4},     [r1], r2
         vext.8          d5,  d4,  d5,  #1
         vtrn.32         d4,  d5
+        pld             [r1]
         vmull.u8        q9,  d6,  d0
         vmlal.u8        q9,  d4,  d2
-        vld1.8          {d6},     [r5], r4
         vadd.i16        d16, d16, d17
         vadd.i16        d17, d18, d19
+  .ifc \codec,h264
         vrshrn.u16      d16, q8,  #6
+  .else
+        vadd.u16        q8,  q8,  q11
+        vshrn.u16       d16, q8,  #6
+  .endif
         subs            r3,  r3,  #2
-        pld             [r1]
+        pld             [r1, r2]
   .ifc \type,avg
         vld1.32         {d20[0]}, [lr,:32], r2
         vld1.32         {d20[1]}, [lr,:32], r2
         vrhadd.u8       d16, d16, d20
   .endif
-        vext.8          d7,  d6,  d7,  #1
-        vtrn.32         d6,  d7
         vst1.32         {d16[0]}, [r0,:32], r2
         vst1.32         {d16[1]}, [r0,:32], r2
         bgt             1b
@@ -224,26 +254,28 @@ T       cmp             r7,  #0
         beq             4f
 
         vext.32         d1,  d0,  d1,  #1
-        add             r5,  r1,  r2
-        lsl             r4,  r2,  #1
-        vld1.32         {d4[0]},  [r1], r4
-        vld1.32         {d4[1]},  [r5], r4
+        vld1.32         {d4[0]},  [r1], r2
 
-3:      pld             [r5]
+3:      vld1.32         {d4[1]},  [r1], r2
         vmull.u8        q8,  d4,  d0
-        vld1.32         {d4[0]},  [r1], r4
+        vld1.32         {d4[0]},  [r1], r2
         vmull.u8        q9,  d4,  d1
-        vld1.32         {d4[1]},  [r5], r4
         vadd.i16        d16, d16, d17
         vadd.i16        d17, d18, d19
+        pld             [r1]
+  .ifc \codec,h264
         vrshrn.u16      d16, q8,  #6
+  .else
+        vadd.u16        q8,  q8,  q11
+        vshrn.u16       d16, q8,  #6
+  .endif
   .ifc \type,avg
         vld1.32         {d20[0]}, [lr,:32], r2
         vld1.32         {d20[1]}, [lr,:32], r2
         vrhadd.u8       d16, d16, d20
   .endif
         subs            r3,  r3,  #2
-        pld             [r1]
+        pld             [r1, r2]
         vst1.32         {d16[0]}, [r0,:32], r2
         vst1.32         {d16[1]}, [r0,:32], r2
         bgt             3b
@@ -256,29 +288,27 @@ T       cmp             r7,  #0
         vext.8          d7,  d6,  d7,  #1
         vtrn.32         d4,  d5
         vtrn.32         d6,  d7
-
-5:      vmull.u8        q8,  d4,  d0
+        vmull.u8        q8,  d4,  d0
         vmull.u8        q9,  d6,  d0
         subs            r3,  r3,  #2
-        vld1.8          {d4},     [r1], r2
-        vext.8          d5,  d4,  d5,  #1
-        vtrn.32         d4,  d5
         vadd.i16        d16, d16, d17
         vadd.i16        d17, d18, d19
         pld             [r1]
+  .ifc \codec,h264
         vrshrn.u16      d16, q8,  #6
+  .else
+        vadd.u16        q8,  q8,  q11
+        vshrn.u16       d16, q8,  #6
+  .endif
   .ifc \type,avg
         vld1.32         {d20[0]}, [lr,:32], r2
         vld1.32         {d20[1]}, [lr,:32], r2
         vrhadd.u8       d16, d16, d20
   .endif
-        vld1.8          {d6},     [r1], r2
-        vext.8          d7,  d6,  d7,  #1
-        vtrn.32         d6,  d7
         pld             [r1]
         vst1.32         {d16[0]}, [r0,:32], r2
         vst1.32         {d16[1]}, [r0,:32], r2
-        bgt             5b
+        bgt             4b
 
         pop             {r4-r7, pc}
 endfunc
@@ -358,3 +388,24 @@ endfunc
         h264_chroma_mc4 avg
         h264_chroma_mc2 put
         h264_chroma_mc2 avg
+
+#if CONFIG_RV40_DECODER
+const   rv40bias
+        .short           0, 16, 32, 16
+        .short          32, 28, 32, 28
+        .short           0, 32, 16, 32
+        .short          32, 28, 32, 28
+endconst
+
+        h264_chroma_mc8 put, rv40
+        h264_chroma_mc8 avg, rv40
+        h264_chroma_mc4 put, rv40
+        h264_chroma_mc4 avg, rv40
+#endif
+
+#if CONFIG_VC1_DECODER
+        h264_chroma_mc8 put, vc1
+        h264_chroma_mc8 avg, vc1
+        h264_chroma_mc4 put, vc1
+        h264_chroma_mc4 avg, vc1
+#endif