#if HAVE_INLINE_ASM
-# define MULL MULL
-static inline av_const int MULL(int a, int b, unsigned shift)
-{
- int lo, hi;
- __asm__("smull %0, %1, %2, %3 \n\t"
- "mov %0, %0, lsr %4 \n\t"
- "add %1, %0, %1, lsl %5 \n\t"
- : "=&r"(lo), "=&r"(hi)
- : "r"(b), "r"(a), "ir"(shift), "ir"(32-shift));
- return hi;
-}
-
-#define MULH MULH
#if HAVE_ARMV6
+#define MULH MULH
static inline av_const int MULH(int a, int b)
{
int r;
__asm__ ("smmul %0, %1, %2" : "=r"(r) : "r"(a), "r"(b));
return r;
}
-#else
-static inline av_const int MULH(int a, int b)
-{
- int lo, hi;
- __asm__ ("smull %0, %1, %2, %3" : "=&r"(lo), "=&r"(hi) : "r"(b), "r"(a));
- return hi;
-}
#endif
-static inline av_const int64_t MUL64(int a, int b)
-{
- union { uint64_t x; unsigned hl[2]; } x;
- __asm__ ("smull %0, %1, %2, %3"
- : "=r"(x.hl[0]), "=r"(x.hl[1]) : "r"(a), "r"(b));
- return x.x;
-}
-#define MUL64 MUL64
-
-static inline av_const int64_t MAC64(int64_t d, int a, int b)
-{
- union { uint64_t x; unsigned hl[2]; } x = { d };
- __asm__ ("smlal %0, %1, %2, %3"
- : "+r"(x.hl[0]), "+r"(x.hl[1]) : "r"(a), "r"(b));
- return x.x;
-}
-#define MAC64(d, a, b) ((d) = MAC64(d, a, b))
#define MLS64(d, a, b) MAC64(d, -(a), b)
#if HAVE_ARMV5TE
static inline av_const int mid_pred(int a, int b, int c)
{
int m;
- __asm__ volatile (
+ __asm__ (
"mov %0, %2 \n\t"
"cmp %1, %2 \n\t"
+ "itt gt \n\t"
"movgt %0, %1 \n\t"
"movgt %1, %2 \n\t"
"cmp %1, %3 \n\t"
+ "it le \n\t"
"movle %1, %3 \n\t"
"cmp %0, %1 \n\t"
+ "it gt \n\t"
"movgt %0, %1 \n\t"
: "=&r"(m), "+r"(a)
- : "r"(b), "r"(c));
+ : "r"(b), "r"(c)
+ : "cc");
return m;
}