X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=modules%2Fvideo_chroma%2Fi420_rgb_mmx.h;h=1c2f9bb5b02e1ecd676e8288cd41967ade9d7381;hb=467472bcecd6bae797dea03543ca105a61a8f6c2;hp=2a2ed8e9207def1fdebad0b5b73592e802c6d9c4;hpb=ae7273d20f50ce62af7f7a27c86103813b0cf92d;p=vlc diff --git a/modules/video_chroma/i420_rgb_mmx.h b/modules/video_chroma/i420_rgb_mmx.h index 2a2ed8e920..1c2f9bb5b0 100644 --- a/modules/video_chroma/i420_rgb_mmx.h +++ b/modules/video_chroma/i420_rgb_mmx.h @@ -5,7 +5,7 @@ * $Id$ * * Authors: Olie Lho - * Gaël Hendryckx + * Gaël Hendryckx * Samuel Hocevar * * This program is free software; you can redistribute it and/or modify @@ -20,7 +20,7 @@ * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA. + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA. *****************************************************************************/ /* hope these constant values are cache line aligned */ @@ -45,6 +45,13 @@ USED_U64(mmx_mask_f8) = 0xf8f8f8f8f8f8f8f8ULL; USED_U64(mmx_mask_fc) = 0xfcfcfcfcfcfcfcfcULL; #undef USED_U64 +/* Use RIP-relative code in PIC mode on amd64 */ +#if defined(__x86_64__) && defined(__PIC__) +# define G "(%%rip)" +#else +# define G +#endif + #define MMX_INIT_16 " \n\ movd (%1), %%mm0 # Load 4 Cb 00 00 00 00 u3 u2 u1 u0 \n\ movd (%2), %%mm1 # Load 4 Cr 00 00 00 00 v3 v2 v1 v0 \n\ @@ -96,27 +103,27 @@ movq (%0), %%mm6 # Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\ # convert the chroma part \n\ punpcklbw %%mm4, %%mm0 # scatter 4 Cb 00 u3 00 u2 00 u1 00 u0 \n\ punpcklbw %%mm4, %%mm1 # scatter 4 Cr 00 v3 00 v2 00 v1 00 v0 \n\ -psubsw mmx_80w, %%mm0 # Cb -= 128 \n\ -psubsw mmx_80w, %%mm1 # Cr -= 128 \n\ +psubsw mmx_80w"G", %%mm0 # Cb -= 128 \n\ +psubsw mmx_80w"G", %%mm1 # Cr -= 128 \n\ psllw $3, %%mm0 # Promote precision \n\ psllw $3, %%mm1 # Promote precision \n\ movq %%mm0, %%mm2 # Copy 4 Cb 00 u3 00 u2 00 u1 00 u0 \n\ movq %%mm1, %%mm3 # Copy 4 Cr 00 v3 00 v2 00 v1 00 v0 \n\ -pmulhw mmx_U_green, %%mm2 # Mul Cb with green coeff -> Cb green \n\ -pmulhw mmx_V_green, %%mm3 # Mul Cr with green coeff -> Cr green \n\ -pmulhw mmx_U_blue, %%mm0 # Mul Cb -> Cblue 00 b3 00 b2 00 b1 00 b0 \n\ -pmulhw mmx_V_red, %%mm1 # Mul Cr -> Cred 00 r3 00 r2 00 r1 00 r0 \n\ +pmulhw mmx_U_green"G", %%mm2 # Mul Cb with green coeff -> Cb green \n\ +pmulhw mmx_V_green"G", %%mm3 # Mul Cr with green coeff -> Cr green \n\ +pmulhw mmx_U_blue"G", %%mm0 # Mul Cb -> Cblue 00 b3 00 b2 00 b1 00 b0 \n\ +pmulhw mmx_V_red"G", %%mm1 # Mul Cr -> Cred 00 r3 00 r2 00 r1 00 r0 \n\ paddsw %%mm3, %%mm2 # Cb green + Cr green -> Cgreen \n\ \n\ # convert the luma part \n\ -psubusb mmx_10w, %%mm6 # Y -= 16 \n\ +psubusb mmx_10w"G", %%mm6 # Y -= 16 \n\ movq %%mm6, %%mm7 # Copy 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\ -pand mmx_00ffw, %%mm6 # get Y even 00 Y6 00 Y4 00 Y2 00 Y0 \n\ +pand mmx_00ffw"G", %%mm6 # get Y even 00 Y6 00 Y4 00 Y2 00 Y0 \n\ psrlw $8, %%mm7 # get Y odd 00 Y7 00 Y5 00 Y3 00 Y1 \n\ psllw $3, %%mm6 # Promote precision \n\ psllw $3, %%mm7 # Promote precision \n\ -pmulhw mmx_Y_coeff, %%mm6 # Mul 4 Y even 00 y6 00 y4 00 y2 00 y0 \n\ -pmulhw mmx_Y_coeff, %%mm7 # Mul 4 Y odd 00 y7 00 y5 00 y3 00 y1 \n\ +pmulhw mmx_Y_coeff"G", %%mm6 # Mul 4 Y even 00 y6 00 y4 00 y2 00 y0 \n\ +pmulhw mmx_Y_coeff"G", %%mm7 # Mul 4 Y odd 00 y7 00 y5 00 y3 00 y1 \n\ " #define INTRINSICS_YUV_MUL \ @@ -208,14 +215,14 @@ punpcklbw %%mm5, %%mm2 # G7 G6 G5 G4 G3 G2 G1 G0 \n\ #define MMX_YUV_GRAY " \n\ # convert the luma part \n\ -psubusb mmx_10w, %%mm6 \n\ +psubusb mmx_10w"G", %%mm6 \n\ movq %%mm6, %%mm7 \n\ -pand mmx_00ffw, %%mm6 \n\ +pand mmx_00ffw"G", %%mm6 \n\ psrlw $8, %%mm7 \n\ psllw $3, %%mm6 \n\ psllw $3, %%mm7 \n\ -pmulhw mmx_Y_coeff, %%mm6 \n\ -pmulhw mmx_Y_coeff, %%mm7 \n\ +pmulhw mmx_Y_coeff"G", %%mm6 \n\ +pmulhw mmx_Y_coeff"G", %%mm7 \n\ packuswb %%mm6, %%mm6 \n\ packuswb %%mm7, %%mm7 \n\ punpcklbw %%mm7, %%mm6 \n\ @@ -223,8 +230,8 @@ punpcklbw %%mm7, %%mm6 \n\ #define MMX_UNPACK_16_GRAY " \n\ movq %%mm6, %%mm5 \n\ -pand mmx_mask_f8, %%mm6 \n\ -pand mmx_mask_fc, %%mm5 \n\ +pand mmx_mask_f8"G", %%mm6 \n\ +pand mmx_mask_fc"G", %%mm5 \n\ movq %%mm6, %%mm7 \n\ psrlw $3, %%mm7 \n\ pxor %%mm3, %%mm3 \n\ @@ -253,10 +260,10 @@ movq %%mm2, 8(%3) \n\ #define MMX_UNPACK_15 " \n\ # mask unneeded bits off \n\ -pand mmx_mask_f8, %%mm0 # b7b6b5b4 b3______ b7b6b5b4 b3______ \n\ +pand mmx_mask_f8"G", %%mm0 # b7b6b5b4 b3______ b7b6b5b4 b3______ \n\ psrlw $3,%%mm0 # ______b7 b6b5b4b3 ______b7 b6b5b4b3 \n\ -pand mmx_mask_f8, %%mm2 # g7g6g5g4 g3______ g7g6g5g4 g3______ \n\ -pand mmx_mask_f8, %%mm1 # r7r6r5r4 r3______ r7r6r5r4 r3______ \n\ +pand mmx_mask_f8"G", %%mm2 # g7g6g5g4 g3______ g7g6g5g4 g3______ \n\ +pand mmx_mask_f8"G", %%mm1 # r7r6r5r4 r3______ r7r6r5r4 r3______ \n\ psrlw $1,%%mm1 # __r7r6r5 r4r3____ __r7r6r5 r4r3____ \n\ pxor %%mm4, %%mm4 # zero mm4 \n\ movq %%mm0, %%mm5 # Copy B7-B0 \n\ @@ -317,9 +324,9 @@ movq %%mm5, 8(%3) # store pixel 4-7 \n\ #define MMX_UNPACK_16 " \n\ # mask unneeded bits off \n\ -pand mmx_mask_f8, %%mm0 # b7b6b5b4 b3______ b7b6b5b4 b3______ \n\ -pand mmx_mask_fc, %%mm2 # g7g6g5g4 g3g2____ g7g6g5g4 g3g2____ \n\ -pand mmx_mask_f8, %%mm1 # r7r6r5r4 r3______ r7r6r5r4 r3______ \n\ +pand mmx_mask_f8"G", %%mm0 # b7b6b5b4 b3______ b7b6b5b4 b3______ \n\ +pand mmx_mask_fc"G", %%mm2 # g7g6g5g4 g3g2____ g7g6g5g4 g3g2____ \n\ +pand mmx_mask_f8"G", %%mm1 # r7r6r5r4 r3______ r7r6r5r4 r3______ \n\ psrlw $3,%%mm0 # ______b7 b6b5b4b3 ______b7 b6b5b4b3 \n\ pxor %%mm4, %%mm4 # zero mm4 \n\ movq %%mm0, %%mm5 # Copy B7-B0 \n\ @@ -356,15 +363,18 @@ movq %%mm5, 8(%3) # store pixel 4-7 \n\ mm0 = _mm_unpacklo_pi8(mm0, mm1); \ mm2 = _mm_slli_pi16(mm2, 3); \ mm0 = _mm_or_si64(mm0, mm2); \ - mm6 = (__m64)*(uint64_t *)(p_y + 8); \ + tmp64 = *(uint64_t *)(p_y + 8); \ + mm6 = (__m64)tmp64; \ *(uint64_t *)p_buffer = (uint64_t)mm0; \ \ mm7 = _mm_unpackhi_pi8(mm7, mm4); \ mm5 = _mm_unpackhi_pi8(mm5, mm1); \ mm7 = _mm_slli_pi16(mm7, 3); \ - mm0 = (__m64)(uint64_t)*(uint32_t *)(p_u + 4); \ + tmp64 = (uint64_t)*(uint32_t *)(p_u + 4); \ + mm0 = (__m64)tmp64; \ mm5 = _mm_or_si64(mm5, mm7); \ - mm1 = (__m64)(uint64_t)*(uint32_t *)(p_v + 4); \ + tmp64 = (uint64_t)*(uint32_t *)(p_v + 4); \ + mm1 = (__m64)tmp64; \ *(uint64_t *)(p_buffer + 4) = (uint64_t)mm5; /*