X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=common%2Fx86%2Futil.h;h=51018df11d03ea446b0678f5987e776d1ee0c251;hb=c82c7374938f4342971adf8b2495c3a1bbe621c4;hp=03050da0592d86766b50498eb28199f205d970d0;hpb=6d12fae91a5faa4f82917f5caaed4ddad39ac591;p=x264 diff --git a/common/x86/util.h b/common/x86/util.h index 03050da0..51018df1 100644 --- a/common/x86/util.h +++ b/common/x86/util.h @@ -1,7 +1,7 @@ /***************************************************************************** - * mc.h: h264 encoder library + * util.h: x86 inline asm ***************************************************************************** - * Copyright (C) 2008 x264 Project + * Copyright (C) 2008-2016 x264 project * * Authors: Fiona Glaser * Loren Merritt @@ -19,19 +19,30 @@ * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA. + * + * This program is also available under a commercial proprietary license. + * For more information, contact us at licensing@x264.com. *****************************************************************************/ #ifndef X264_X86_UTIL_H #define X264_X86_UTIL_H -#ifdef __GNUC__ - #ifdef __SSE__ #include + +#undef M128_ZERO +#define M128_ZERO ((__m128){0,0,0,0}) +#define x264_union128_t x264_union128_sse_t +typedef union { __m128 i; uint64_t a[2]; uint32_t b[4]; uint16_t c[8]; uint8_t d[16]; } MAY_ALIAS x264_union128_sse_t; +#if HAVE_VECTOREXT +typedef uint32_t v4si __attribute__((vector_size (16))); #endif +#endif // __SSE__ -#define x264_median_mv x264_median_mv_mmxext -static ALWAYS_INLINE void x264_median_mv_mmxext( int16_t *dst, int16_t *a, int16_t *b, int16_t *c ) +#if HAVE_X86_INLINE_ASM && HAVE_MMX + +#define x264_median_mv x264_median_mv_mmx2 +static ALWAYS_INLINE void x264_median_mv_mmx2( int16_t *dst, int16_t *a, int16_t *b, int16_t *c ) { asm( "movd %1, %%mm0 \n" @@ -48,8 +59,8 @@ static ALWAYS_INLINE void x264_median_mv_mmxext( int16_t *dst, int16_t *a, int16 ); } -#define x264_predictor_difference x264_predictor_difference_mmxext -static ALWAYS_INLINE int x264_predictor_difference_mmxext( int16_t (*mvc)[2], intptr_t i_mvc ) +#define x264_predictor_difference x264_predictor_difference_mmx2 +static ALWAYS_INLINE int x264_predictor_difference_mmx2( int16_t (*mvc)[2], intptr_t i_mvc ) { int sum; static const uint64_t pw_1 = 0x0001000100010001ULL; @@ -84,16 +95,18 @@ static ALWAYS_INLINE int x264_predictor_difference_mmxext( int16_t (*mvc)[2], in return sum; } -#define x264_cabac_mvd_sum x264_cabac_mvd_sum_mmxext -static ALWAYS_INLINE uint16_t x264_cabac_mvd_sum_mmxext(uint8_t *mvdleft, uint8_t *mvdtop) +#define x264_cabac_mvd_sum x264_cabac_mvd_sum_mmx2 +static ALWAYS_INLINE uint16_t x264_cabac_mvd_sum_mmx2(uint8_t *mvdleft, uint8_t *mvdtop) { static const uint64_t pb_2 = 0x0202020202020202ULL; static const uint64_t pb_32 = 0x2020202020202020ULL; + static const uint64_t pb_33 = 0x2121212121212121ULL; int amvd; asm( "movd %1, %%mm0 \n" "movd %2, %%mm1 \n" - "paddb %%mm1, %%mm0 \n" + "paddusb %%mm1, %%mm0 \n" + "pminub %5, %%mm0 \n" "pxor %%mm2, %%mm2 \n" "movq %%mm0, %%mm1 \n" "pcmpgtb %3, %%mm0 \n" @@ -103,55 +116,138 @@ static ALWAYS_INLINE uint16_t x264_cabac_mvd_sum_mmxext(uint8_t *mvdleft, uint8_ "movd %%mm2, %0 \n" :"=r"(amvd) :"m"(M16( mvdleft )),"m"(M16( mvdtop )), - "m"(pb_2),"m"(pb_32) + "m"(pb_2),"m"(pb_32),"m"(pb_33) ); return amvd; } -#define x264_predictor_roundclip x264_predictor_roundclip_mmxext -static void ALWAYS_INLINE x264_predictor_roundclip_mmxext( int16_t (*dst)[2], int16_t (*mvc)[2], int i_mvc, int mv_x_min, int mv_x_max, int mv_y_min, int mv_y_max ) +#define x264_predictor_clip x264_predictor_clip_mmx2 +static int ALWAYS_INLINE x264_predictor_clip_mmx2( int16_t (*dst)[2], int16_t (*mvc)[2], int i_mvc, int16_t mv_limit[2][2], uint32_t pmv ) { - uint32_t mv_min = pack16to32_mask( mv_x_min, mv_y_min ); - uint32_t mv_max = pack16to32_mask( mv_x_max, mv_y_max ); - static const uint64_t pw_2 = 0x0002000200020002ULL; - intptr_t i = i_mvc; + static const uint32_t pd_32 = 0x20; + intptr_t tmp = (intptr_t)mv_limit, mvc_max = i_mvc, i = 0; + asm( - "movd %2, %%mm5 \n" - "movd %3, %%mm6 \n" - "movq %4, %%mm7 \n" - "punpckldq %%mm5, %%mm5 \n" - "punpckldq %%mm6, %%mm6 \n" - "test $1, %0 \n" - "jz 1f \n" - "movd -4(%6,%0,4), %%mm0 \n" - "paddw %%mm7, %%mm0 \n" - "psraw $2, %%mm0 \n" - "pmaxsw %%mm5, %%mm0 \n" - "pminsw %%mm6, %%mm0 \n" - "movd %%mm0, -4(%5,%0,4) \n" - "dec %0 \n" - "jz 2f \n" - "1: \n" - "movq -8(%6,%0,4), %%mm0 \n" - "paddw %%mm7, %%mm0 \n" - "psraw $2, %%mm0 \n" - "pmaxsw %%mm5, %%mm0 \n" - "pminsw %%mm6, %%mm0 \n" - "movq %%mm0, -8(%5,%0,4) \n" - "sub $2, %0 \n" - "jnz 1b \n" - "2: \n" - :"+r"(i), "=m"(M64( dst )) - :"g"(mv_min), "g"(mv_max), "m"(pw_2), "r"(dst), "r"(mvc), "m"(M64( mvc )) + "movq (%2), %%mm5 \n" + "movd %6, %%mm3 \n" + "psllw $2, %%mm5 \n" // Convert to subpel + "pshufw $0xEE, %%mm5, %%mm6 \n" + "dec %k3 \n" + "jz 2f \n" // if( i_mvc == 1 ) {do the last iteration} + "punpckldq %%mm3, %%mm3 \n" + "punpckldq %%mm5, %%mm5 \n" + "movd %7, %%mm4 \n" + "lea (%0,%3,4), %3 \n" + "1: \n" + "movq (%0), %%mm0 \n" + "add $8, %0 \n" + "movq %%mm3, %%mm1 \n" + "pxor %%mm2, %%mm2 \n" + "pcmpeqd %%mm0, %%mm1 \n" // mv == pmv + "pcmpeqd %%mm0, %%mm2 \n" // mv == 0 + "por %%mm1, %%mm2 \n" // (mv == pmv || mv == 0) * -1 + "pmovmskb %%mm2, %k2 \n" // (mv == pmv || mv == 0) * 0xf + "pmaxsw %%mm5, %%mm0 \n" + "pminsw %%mm6, %%mm0 \n" + "pand %%mm4, %%mm2 \n" // (mv0 == pmv || mv0 == 0) * 32 + "psrlq %%mm2, %%mm0 \n" // drop mv0 if it's skipped + "movq %%mm0, (%5,%4,4) \n" + "and $24, %k2 \n" + "add $2, %4 \n" + "add $8, %k2 \n" + "shr $4, %k2 \n" // (4-val)>>1 + "sub %2, %4 \n" // +1 for each valid motion vector + "cmp %3, %0 \n" + "jl 1b \n" + "jg 3f \n" // if( i == i_mvc - 1 ) {do the last iteration} + + /* Do the last iteration */ + "2: \n" + "movd (%0), %%mm0 \n" + "pxor %%mm2, %%mm2 \n" + "pcmpeqd %%mm0, %%mm3 \n" + "pcmpeqd %%mm0, %%mm2 \n" + "por %%mm3, %%mm2 \n" + "pmovmskb %%mm2, %k2 \n" + "pmaxsw %%mm5, %%mm0 \n" + "pminsw %%mm6, %%mm0 \n" + "movd %%mm0, (%5,%4,4) \n" + "inc %4 \n" + "and $1, %k2 \n" + "sub %2, %4 \n" // output += !(mv == pmv || mv == 0) + "3: \n" + :"+r"(mvc), "=m"(M64( dst )), "+r"(tmp), "+r"(mvc_max), "+r"(i) + :"r"(dst), "g"(pmv), "m"(pd_32), "m"(M64( mvc )) ); + return i; } -#ifdef __SSE__ -#undef M128_ZERO -#define M128_ZERO ((__m128){0,0,0,0}) -#define x264_union128_t x264_union128_sse_t -typedef union { __m128 i; uint64_t a[2]; uint32_t b[4]; uint16_t c[8]; uint8_t d[16]; } MAY_ALIAS x264_union128_sse_t; -#endif +/* Same as the above, except we do (mv + 2) >> 2 on the input. */ +#define x264_predictor_roundclip x264_predictor_roundclip_mmx2 +static int ALWAYS_INLINE x264_predictor_roundclip_mmx2( int16_t (*dst)[2], int16_t (*mvc)[2], int i_mvc, int16_t mv_limit[2][2], uint32_t pmv ) +{ + static const uint64_t pw_2 = 0x0002000200020002ULL; + static const uint32_t pd_32 = 0x20; + intptr_t tmp = (intptr_t)mv_limit, mvc_max = i_mvc, i = 0; + + asm( + "movq (%2), %%mm5 \n" + "movq %6, %%mm7 \n" + "movd %7, %%mm3 \n" + "pshufw $0xEE, %%mm5, %%mm6 \n" + "dec %k3 \n" + "jz 2f \n" + "punpckldq %%mm3, %%mm3 \n" + "punpckldq %%mm5, %%mm5 \n" + "movd %8, %%mm4 \n" + "lea (%0,%3,4), %3 \n" + "1: \n" + "movq (%0), %%mm0 \n" + "add $8, %0 \n" + "paddw %%mm7, %%mm0 \n" + "psraw $2, %%mm0 \n" + "movq %%mm3, %%mm1 \n" + "pxor %%mm2, %%mm2 \n" + "pcmpeqd %%mm0, %%mm1 \n" + "pcmpeqd %%mm0, %%mm2 \n" + "por %%mm1, %%mm2 \n" + "pmovmskb %%mm2, %k2 \n" + "pmaxsw %%mm5, %%mm0 \n" + "pminsw %%mm6, %%mm0 \n" + "pand %%mm4, %%mm2 \n" + "psrlq %%mm2, %%mm0 \n" + "movq %%mm0, (%5,%4,4) \n" + "and $24, %k2 \n" + "add $2, %4 \n" + "add $8, %k2 \n" + "shr $4, %k2 \n" + "sub %2, %4 \n" + "cmp %3, %0 \n" + "jl 1b \n" + "jg 3f \n" + + /* Do the last iteration */ + "2: \n" + "movd (%0), %%mm0 \n" + "paddw %%mm7, %%mm0 \n" + "psraw $2, %%mm0 \n" + "pxor %%mm2, %%mm2 \n" + "pcmpeqd %%mm0, %%mm3 \n" + "pcmpeqd %%mm0, %%mm2 \n" + "por %%mm3, %%mm2 \n" + "pmovmskb %%mm2, %k2 \n" + "pmaxsw %%mm5, %%mm0 \n" + "pminsw %%mm6, %%mm0 \n" + "movd %%mm0, (%5,%4,4) \n" + "inc %4 \n" + "and $1, %k2 \n" + "sub %2, %4 \n" + "3: \n" + :"+r"(mvc), "=m"(M64( dst )), "+r"(tmp), "+r"(mvc_max), "+r"(i) + :"r"(dst), "m"(pw_2), "g"(pmv), "m"(pd_32), "m"(M64( mvc )) + ); + return i; +} #endif