/*****************************************************************************
* transforms_yuvmmx.h: MMX YUV transformation assembly
*****************************************************************************
- * Copyright (C) 1999-2004 VideoLAN
+ * Copyright (C) 1999-2004 the VideoLAN team
* $Id$
*
* Authors: Olie Lho <ollie@sis.com.tw>
USED_U64(mmx_mask_fc) = 0xfcfcfcfcfcfcfcfcULL;
#undef USED_U64
+/* Use RIP-relative code in PIC mode on amd64 */
+#if defined(__x86_64__) && defined(__PIC__)
+# define G "(%%rip)"
+#else
+# define G
+#endif
+
#define MMX_INIT_16 " \n\
movd (%1), %%mm0 # Load 4 Cb 00 00 00 00 u3 u2 u1 u0 \n\
movd (%2), %%mm1 # Load 4 Cr 00 00 00 00 v3 v2 v1 v0 \n\
#movl $0, (%3) # cache preload for image \n\
"
+#define INTRINSICS_INIT_16 \
+ tmp64 = *(uint32_t *)p_u; \
+ mm0 = (__m64)tmp64; \
+ tmp64 = *(uint32_t *)p_v; \
+ mm1 = (__m64)tmp64; \
+ mm4 = (__m64)(uint64_t)0; \
+ mm6 = (__m64)*(uint64_t *)p_y; \
+ /* *(uint16_t *)p_buffer = 0; */
+
#define MMX_INIT_16_GRAY " \n\
movq (%0), %%mm6 # Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
#movl $0, (%3) # cache preload for image \n\
movq (%0), %%mm6 # Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
"
+#define INTRINSICS_INIT_32 \
+ tmp64 = *(uint32_t *)p_u; \
+ mm0 = (__m64)tmp64; \
+ *(uint16_t *)p_buffer = 0; \
+ tmp64 = *(uint32_t *)p_v; \
+ mm1 = (__m64)tmp64; \
+ mm4 = (__m64)(uint64_t)0; \
+ mm6 = (__m64)*(uint64_t *)p_y;
+
/*
* Do the multiply part of the conversion for even and odd pixels,
* register usage:
# convert the chroma part \n\
punpcklbw %%mm4, %%mm0 # scatter 4 Cb 00 u3 00 u2 00 u1 00 u0 \n\
punpcklbw %%mm4, %%mm1 # scatter 4 Cr 00 v3 00 v2 00 v1 00 v0 \n\
-psubsw mmx_80w, %%mm0 # Cb -= 128 \n\
-psubsw mmx_80w, %%mm1 # Cr -= 128 \n\
+psubsw mmx_80w"G", %%mm0 # Cb -= 128 \n\
+psubsw mmx_80w"G", %%mm1 # Cr -= 128 \n\
psllw $3, %%mm0 # Promote precision \n\
psllw $3, %%mm1 # Promote precision \n\
movq %%mm0, %%mm2 # Copy 4 Cb 00 u3 00 u2 00 u1 00 u0 \n\
movq %%mm1, %%mm3 # Copy 4 Cr 00 v3 00 v2 00 v1 00 v0 \n\
-pmulhw mmx_U_green, %%mm2 # Mul Cb with green coeff -> Cb green \n\
-pmulhw mmx_V_green, %%mm3 # Mul Cr with green coeff -> Cr green \n\
-pmulhw mmx_U_blue, %%mm0 # Mul Cb -> Cblue 00 b3 00 b2 00 b1 00 b0 \n\
-pmulhw mmx_V_red, %%mm1 # Mul Cr -> Cred 00 r3 00 r2 00 r1 00 r0 \n\
+pmulhw mmx_U_green"G", %%mm2 # Mul Cb with green coeff -> Cb green \n\
+pmulhw mmx_V_green"G", %%mm3 # Mul Cr with green coeff -> Cr green \n\
+pmulhw mmx_U_blue"G", %%mm0 # Mul Cb -> Cblue 00 b3 00 b2 00 b1 00 b0 \n\
+pmulhw mmx_V_red"G", %%mm1 # Mul Cr -> Cred 00 r3 00 r2 00 r1 00 r0 \n\
paddsw %%mm3, %%mm2 # Cb green + Cr green -> Cgreen \n\
\n\
# convert the luma part \n\
-psubusb mmx_10w, %%mm6 # Y -= 16 \n\
+psubusb mmx_10w"G", %%mm6 # Y -= 16 \n\
movq %%mm6, %%mm7 # Copy 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
-pand mmx_00ffw, %%mm6 # get Y even 00 Y6 00 Y4 00 Y2 00 Y0 \n\
+pand mmx_00ffw"G", %%mm6 # get Y even 00 Y6 00 Y4 00 Y2 00 Y0 \n\
psrlw $8, %%mm7 # get Y odd 00 Y7 00 Y5 00 Y3 00 Y1 \n\
psllw $3, %%mm6 # Promote precision \n\
psllw $3, %%mm7 # Promote precision \n\
-pmulhw mmx_Y_coeff, %%mm6 # Mul 4 Y even 00 y6 00 y4 00 y2 00 y0 \n\
-pmulhw mmx_Y_coeff, %%mm7 # Mul 4 Y odd 00 y7 00 y5 00 y3 00 y1 \n\
+pmulhw mmx_Y_coeff"G", %%mm6 # Mul 4 Y even 00 y6 00 y4 00 y2 00 y0 \n\
+pmulhw mmx_Y_coeff"G", %%mm7 # Mul 4 Y odd 00 y7 00 y5 00 y3 00 y1 \n\
"
+#define INTRINSICS_YUV_MUL \
+ mm0 = _mm_unpacklo_pi8(mm0, mm4); \
+ mm1 = _mm_unpacklo_pi8(mm1, mm4); \
+ mm0 = _mm_subs_pi16(mm0, (__m64)mmx_80w); \
+ mm1 = _mm_subs_pi16(mm1, (__m64)mmx_80w); \
+ mm0 = _mm_slli_pi16(mm0, 3); \
+ mm1 = _mm_slli_pi16(mm1, 3); \
+ mm2 = mm0; \
+ mm3 = mm1; \
+ mm2 = _mm_mulhi_pi16(mm2, (__m64)mmx_U_green); \
+ mm3 = _mm_mulhi_pi16(mm3, (__m64)mmx_V_green); \
+ mm0 = _mm_mulhi_pi16(mm0, (__m64)mmx_U_blue); \
+ mm1 = _mm_mulhi_pi16(mm1, (__m64)mmx_V_red); \
+ mm2 = _mm_adds_pi16(mm2, mm3); \
+ \
+ mm6 = _mm_subs_pu8(mm6, (__m64)mmx_10w); \
+ mm7 = mm6; \
+ mm6 = _mm_and_si64(mm6, (__m64)mmx_00ffw); \
+ mm7 = _mm_srli_pi16(mm7, 8); \
+ mm6 = _mm_slli_pi16(mm6, 3); \
+ mm7 = _mm_slli_pi16(mm7, 3); \
+ mm6 = _mm_mulhi_pi16(mm6, (__m64)mmx_Y_coeff); \
+ mm7 = _mm_mulhi_pi16(mm7, (__m64)mmx_Y_coeff);
+
/*
* Do the addition part of the conversion for even and odd pixels,
* register usage:
punpcklbw %%mm5, %%mm2 # G7 G6 G5 G4 G3 G2 G1 G0 \n\
"
+#define INTRINSICS_YUV_ADD \
+ mm3 = mm0; \
+ mm4 = mm1; \
+ mm5 = mm2; \
+ mm0 = _mm_adds_pi16(mm0, mm6); \
+ mm3 = _mm_adds_pi16(mm3, mm7); \
+ mm1 = _mm_adds_pi16(mm1, mm6); \
+ mm4 = _mm_adds_pi16(mm4, mm7); \
+ mm2 = _mm_adds_pi16(mm2, mm6); \
+ mm5 = _mm_adds_pi16(mm5, mm7); \
+ \
+ mm0 = _mm_packs_pu16(mm0, mm0); \
+ mm1 = _mm_packs_pu16(mm1, mm1); \
+ mm2 = _mm_packs_pu16(mm2, mm2); \
+ \
+ mm3 = _mm_packs_pu16(mm3, mm3); \
+ mm4 = _mm_packs_pu16(mm4, mm4); \
+ mm5 = _mm_packs_pu16(mm5, mm5); \
+ \
+ mm0 = _mm_unpacklo_pi8(mm0, mm3); \
+ mm1 = _mm_unpacklo_pi8(mm1, mm4); \
+ mm2 = _mm_unpacklo_pi8(mm2, mm5);
+
/*
* Grayscale case, only use Y
*/
#define MMX_YUV_GRAY " \n\
# convert the luma part \n\
-psubusb mmx_10w, %%mm6 \n\
+psubusb mmx_10w"G", %%mm6 \n\
movq %%mm6, %%mm7 \n\
-pand mmx_00ffw, %%mm6 \n\
+pand mmx_00ffw"G", %%mm6 \n\
psrlw $8, %%mm7 \n\
psllw $3, %%mm6 \n\
psllw $3, %%mm7 \n\
-pmulhw mmx_Y_coeff, %%mm6 \n\
-pmulhw mmx_Y_coeff, %%mm7 \n\
+pmulhw mmx_Y_coeff"G", %%mm6 \n\
+pmulhw mmx_Y_coeff"G", %%mm7 \n\
packuswb %%mm6, %%mm6 \n\
packuswb %%mm7, %%mm7 \n\
punpcklbw %%mm7, %%mm6 \n\
#define MMX_UNPACK_16_GRAY " \n\
movq %%mm6, %%mm5 \n\
-pand mmx_mask_f8, %%mm6 \n\
-pand mmx_mask_fc, %%mm5 \n\
+pand mmx_mask_f8"G", %%mm6 \n\
+pand mmx_mask_fc"G", %%mm5 \n\
movq %%mm6, %%mm7 \n\
psrlw $3, %%mm7 \n\
pxor %%mm3, %%mm3 \n\
#define MMX_UNPACK_15 " \n\
# mask unneeded bits off \n\
-pand mmx_mask_f8, %%mm0 # b7b6b5b4 b3______ b7b6b5b4 b3______ \n\
+pand mmx_mask_f8"G", %%mm0 # b7b6b5b4 b3______ b7b6b5b4 b3______ \n\
psrlw $3,%%mm0 # ______b7 b6b5b4b3 ______b7 b6b5b4b3 \n\
-pand mmx_mask_f8, %%mm2 # g7g6g5g4 g3______ g7g6g5g4 g3______ \n\
-pand mmx_mask_f8, %%mm1 # r7r6r5r4 r3______ r7r6r5r4 r3______ \n\
+pand mmx_mask_f8"G", %%mm2 # g7g6g5g4 g3______ g7g6g5g4 g3______ \n\
+pand mmx_mask_f8"G", %%mm1 # r7r6r5r4 r3______ r7r6r5r4 r3______ \n\
psrlw $1,%%mm1 # __r7r6r5 r4r3____ __r7r6r5 r4r3____ \n\
pxor %%mm4, %%mm4 # zero mm4 \n\
movq %%mm0, %%mm5 # Copy B7-B0 \n\
movq %%mm5, 8(%3) # store pixel 4-7 \n\
"
+#define INTRINSICS_UNPACK_15 \
+ mm0 = _mm_and_si64(mm0, (__m64)mmx_mask_f8); \
+ mm0 = _mm_srli_pi16(mm0, 3); \
+ mm2 = _mm_and_si64(mm2, (__m64)mmx_mask_f8); \
+ mm1 = _mm_and_si64(mm1, (__m64)mmx_mask_f8); \
+ mm1 = _mm_srli_pi16(mm1, 1); \
+ mm4 = (__m64)(uint64_t)0; \
+ mm5 = mm0; \
+ mm7 = mm2; \
+ \
+ mm2 = _mm_unpacklo_pi8(mm2, mm4); \
+ mm0 = _mm_unpacklo_pi8(mm0, mm1); \
+ mm2 = _mm_slli_pi16(mm2, 2); \
+ mm0 = _mm_or_si64(mm0, mm2); \
+ tmp64 = *(uint64_t *)(p_y + 8); \
+ mm6 = (__m64)tmp64; \
+ *(uint64_t *)p_buffer = (uint64_t)mm0; \
+ \
+ mm7 = _mm_unpackhi_pi8(mm7, mm4); \
+ mm5 = _mm_unpackhi_pi8(mm5, mm1); \
+ mm7 = _mm_slli_pi16(mm7, 2); \
+ tmp64 = (uint64_t)*(uint32_t *)(p_u + 4); \
+ mm0 = (__m64)tmp64; \
+ mm5 = _mm_or_si64(mm5, mm7); \
+ tmp64 = (uint64_t)*(uint32_t *)(p_v + 4); \
+ mm1 = (__m64)tmp64; \
+ *(uint64_t *)(p_buffer + 4) = (uint64_t)mm5;
+
/*
* convert RGB plane to RGB 16 bits,
* mm0 -> B, mm1 -> R, mm2 -> G,
#define MMX_UNPACK_16 " \n\
# mask unneeded bits off \n\
-pand mmx_mask_f8, %%mm0 # b7b6b5b4 b3______ b7b6b5b4 b3______ \n\
-pand mmx_mask_fc, %%mm2 # g7g6g5g4 g3g2____ g7g6g5g4 g3g2____ \n\
-pand mmx_mask_f8, %%mm1 # r7r6r5r4 r3______ r7r6r5r4 r3______ \n\
+pand mmx_mask_f8"G", %%mm0 # b7b6b5b4 b3______ b7b6b5b4 b3______ \n\
+pand mmx_mask_fc"G", %%mm2 # g7g6g5g4 g3g2____ g7g6g5g4 g3g2____ \n\
+pand mmx_mask_f8"G", %%mm1 # r7r6r5r4 r3______ r7r6r5r4 r3______ \n\
psrlw $3,%%mm0 # ______b7 b6b5b4b3 ______b7 b6b5b4b3 \n\
pxor %%mm4, %%mm4 # zero mm4 \n\
movq %%mm0, %%mm5 # Copy B7-B0 \n\
movq %%mm5, 8(%3) # store pixel 4-7 \n\
"
+#define INTRINSICS_UNPACK_16 \
+ mm0 = _mm_and_si64(mm0, (__m64)mmx_mask_f8); \
+ mm2 = _mm_and_si64(mm2, (__m64)mmx_mask_fc); \
+ mm1 = _mm_and_si64(mm1, (__m64)mmx_mask_f8); \
+ mm0 = _mm_srli_pi16(mm0, 3); \
+ mm4 = (__m64)(uint64_t)0; \
+ mm5 = mm0; \
+ mm7 = mm2; \
+ \
+ mm2 = _mm_unpacklo_pi8(mm2, mm4); \
+ mm0 = _mm_unpacklo_pi8(mm0, mm1); \
+ mm2 = _mm_slli_pi16(mm2, 3); \
+ mm0 = _mm_or_si64(mm0, mm2); \
+ tmp64 = *(uint64_t *)(p_y + 8); \
+ mm6 = (__m64)tmp64; \
+ *(uint64_t *)p_buffer = (uint64_t)mm0; \
+ \
+ mm7 = _mm_unpackhi_pi8(mm7, mm4); \
+ mm5 = _mm_unpackhi_pi8(mm5, mm1); \
+ mm7 = _mm_slli_pi16(mm7, 3); \
+ tmp64 = (uint64_t)*(uint32_t *)(p_u + 4); \
+ mm0 = (__m64)tmp64; \
+ mm5 = _mm_or_si64(mm5, mm7); \
+ tmp64 = (uint64_t)*(uint32_t *)(p_v + 4); \
+ mm1 = (__m64)tmp64; \
+ *(uint64_t *)(p_buffer + 4) = (uint64_t)mm5;
+
/*
* convert RGB plane to RGB packed format,
* mm0 -> B, mm1 -> R, mm2 -> G, mm3 -> 0,
#movq 8(%0), %%mm6 # Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
"
+#define INTRINSICS_UNPACK_32 \
+ mm3 = (__m64)(uint64_t)0; \
+ mm6 = mm0; \
+ mm7 = mm1; \
+ mm4 = mm0; \
+ mm5 = mm1; \
+ mm6 = _mm_unpacklo_pi8(mm6, mm2); \
+ mm7 = _mm_unpacklo_pi8(mm7, mm3); \
+ mm6 = _mm_unpacklo_pi16(mm6, mm7); \
+ *(uint64_t *)p_buffer = (uint64_t)mm6; \
+ mm6 = mm0; \
+ mm6 = _mm_unpacklo_pi8(mm6, mm2); \
+ mm6 = _mm_unpackhi_pi16(mm6, mm7); \
+ *(uint64_t *)(p_buffer + 2) = (uint64_t)mm6; \
+ mm4 = _mm_unpackhi_pi8(mm4, mm2); \
+ mm5 = _mm_unpackhi_pi8(mm5, mm3); \
+ mm4 = _mm_unpacklo_pi16(mm4, mm5); \
+ *(uint64_t *)(p_buffer + 4) = (uint64_t)mm4; \
+ mm4 = mm0; \
+ mm4 = _mm_unpackhi_pi8(mm4, mm2); \
+ mm4 = _mm_unpackhi_pi16(mm4, mm5); \
+ *(uint64_t *)(p_buffer + 6) = (uint64_t)mm4; \
+