static const uint64_t ff_pb_3 attribute_used __attribute__ ((aligned(8))) = 0x0303030303030303ULL;
static const uint64_t ff_pb_7 attribute_used __attribute__ ((aligned(8))) = 0x0707070707070707ULL;
static const uint64_t ff_pb_3F attribute_used __attribute__ ((aligned(8))) = 0x3F3F3F3F3F3F3F3FULL;
+static const uint64_t ff_pb_A1 attribute_used __attribute__ ((aligned(8))) = 0xA1A1A1A1A1A1A1A1ULL;
+static const uint64_t ff_pb_5F attribute_used __attribute__ ((aligned(8))) = 0x5F5F5F5F5F5F5F5FULL;
static const uint64_t ff_pb_FC attribute_used __attribute__ ((aligned(8))) = 0xFCFCFCFCFCFCFCFCULL;
#define JUMPALIGN() __asm __volatile (ASMALIGN(3)::)
/* 3Dnow specific */
#define DEF(x) x ## _3dnow
-/* for Athlons PAVGUSB is prefered */
+/* for Athlons PAVGUSB is preferred */
#define PAVGB "pavgusb"
#include "dsputil_mmx_avg.h"
"punpckl" #n " " #b ", " #a " \n\t" /* aebf */\
"punpckh" #n " " #b ", " #t " \n\t" /* cgdh */\
+#define TRANSPOSE4(a,b,c,d,t)\
+ SBUTTERFLY(a,b,t,wd) /* a=aebf t=cgdh */\
+ SBUTTERFLY(c,d,b,wd) /* c=imjn b=kolp */\
+ SBUTTERFLY(a,c,d,dq) /* a=aeim d=bfjn */\
+ SBUTTERFLY(t,b,c,dq) /* t=cgko c=dhlp */
+
/***********************************/
/* standard MMX */
LBUTTERFLY2(%%mm0, %%mm4, %%mm1, %%mm5)\
LBUTTERFLY2(%%mm2, %%mm6, %%mm3, %%mm7)\
-#define MMABS(a,z)\
+#define MMABS_MMX(a,z)\
"pxor " #z ", " #z " \n\t"\
"pcmpgtw " #a ", " #z " \n\t"\
"pxor " #z ", " #a " \n\t"\
"psubw " #z ", " #a " \n\t"
-#define MMABS_SUM(a,z, sum)\
- "pxor " #z ", " #z " \n\t"\
- "pcmpgtw " #a ", " #z " \n\t"\
- "pxor " #z ", " #a " \n\t"\
- "psubw " #z ", " #a " \n\t"\
- "paddusw " #a ", " #sum " \n\t"
-
#define MMABS_MMX2(a,z)\
"pxor " #z ", " #z " \n\t"\
"psubw " #a ", " #z " \n\t"\
"pmaxsw " #z ", " #a " \n\t"
-#define MMABS_SUM_MMX2(a,z, sum)\
- "pxor " #z ", " #z " \n\t"\
- "psubw " #a ", " #z " \n\t"\
- "pmaxsw " #z ", " #a " \n\t"\
+#define MMABS_SUM_MMX(a,z, sum)\
+ MMABS_MMX(a,z)\
"paddusw " #a ", " #sum " \n\t"
-#define TRANSPOSE4(a,b,c,d,t)\
- SBUTTERFLY(a,b,t,wd) /* a=aebf t=cgdh */\
- SBUTTERFLY(c,d,b,wd) /* c=imjn b=kolp */\
- SBUTTERFLY(a,c,d,dq) /* a=aeim d=bfjn */\
- SBUTTERFLY(t,b,c,dq) /* t=cgko c=dhlp */
+#define MMABS_SUM_MMX2(a,z, sum)\
+ MMABS_MMX2(a,z)\
+ "paddusw " #a ", " #sum " \n\t"
#define LOAD4(o, a, b, c, d)\
"movq "#o"(%1), " #a " \n\t"\
"movq "#c", "#o"+32(%1) \n\t"\
"movq "#d", "#o"+48(%1) \n\t"\
-static int hadamard8_diff_mmx(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){
- DECLARE_ALIGNED_8(uint64_t, temp[16]);
- int sum=0;
-
- assert(h==8);
-
- diff_pixels_mmx((DCTELEM*)temp, src1, src2, stride);
-
- asm volatile(
- LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
- LOAD4(64, %%mm4, %%mm5, %%mm6, %%mm7)
-
- HADAMARD48
-
- "movq %%mm7, 112(%1) \n\t"
-
- TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
- STORE4(0 , %%mm0, %%mm3, %%mm7, %%mm2)
-
- "movq 112(%1), %%mm7 \n\t"
- TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
- STORE4(64, %%mm4, %%mm7, %%mm0, %%mm6)
-
- LOAD4(8 , %%mm0, %%mm1, %%mm2, %%mm3)
- LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
-
- HADAMARD48
-
- "movq %%mm7, 120(%1) \n\t"
-
- TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
- STORE4(8 , %%mm0, %%mm3, %%mm7, %%mm2)
-
- "movq 120(%1), %%mm7 \n\t"
- TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
- "movq %%mm7, %%mm5 \n\t"//FIXME remove
- "movq %%mm6, %%mm7 \n\t"
- "movq %%mm0, %%mm6 \n\t"
-// STORE4(72, %%mm4, %%mm7, %%mm0, %%mm6) //FIXME remove
-
- LOAD4(64, %%mm0, %%mm1, %%mm2, %%mm3)
-// LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
-
- HADAMARD48
- "movq %%mm7, 64(%1) \n\t"
- MMABS(%%mm0, %%mm7)
- MMABS_SUM(%%mm1, %%mm7, %%mm0)
- MMABS_SUM(%%mm2, %%mm7, %%mm0)
- MMABS_SUM(%%mm3, %%mm7, %%mm0)
- MMABS_SUM(%%mm4, %%mm7, %%mm0)
- MMABS_SUM(%%mm5, %%mm7, %%mm0)
- MMABS_SUM(%%mm6, %%mm7, %%mm0)
- "movq 64(%1), %%mm1 \n\t"
- MMABS_SUM(%%mm1, %%mm7, %%mm0)
- "movq %%mm0, 64(%1) \n\t"
-
- LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
- LOAD4(8 , %%mm4, %%mm5, %%mm6, %%mm7)
-
- HADAMARD48
- "movq %%mm7, (%1) \n\t"
- MMABS(%%mm0, %%mm7)
- MMABS_SUM(%%mm1, %%mm7, %%mm0)
- MMABS_SUM(%%mm2, %%mm7, %%mm0)
- MMABS_SUM(%%mm3, %%mm7, %%mm0)
- MMABS_SUM(%%mm4, %%mm7, %%mm0)
- MMABS_SUM(%%mm5, %%mm7, %%mm0)
- MMABS_SUM(%%mm6, %%mm7, %%mm0)
- "movq (%1), %%mm1 \n\t"
- MMABS_SUM(%%mm1, %%mm7, %%mm0)
- "movq 64(%1), %%mm1 \n\t"
- MMABS_SUM(%%mm1, %%mm7, %%mm0)
-
- "movq %%mm0, %%mm1 \n\t"
- "psrlq $32, %%mm0 \n\t"
- "paddusw %%mm1, %%mm0 \n\t"
- "movq %%mm0, %%mm1 \n\t"
- "psrlq $16, %%mm0 \n\t"
- "paddusw %%mm1, %%mm0 \n\t"
- "movd %%mm0, %0 \n\t"
-
- : "=r" (sum)
- : "r"(temp)
- );
- return sum&0xFFFF;
+#define HSUM_MMX(a, t, dst)\
+ "movq "#a", "#t" \n\t"\
+ "psrlq $32, "#a" \n\t"\
+ "paddusw "#t", "#a" \n\t"\
+ "movq "#a", "#t" \n\t"\
+ "psrlq $16, "#a" \n\t"\
+ "paddusw "#t", "#a" \n\t"\
+ "movd "#a", "#dst" \n\t"\
+
+#define HSUM_MMX2(a, t, dst)\
+ "pshufw $0x0E, "#a", "#t" \n\t"\
+ "paddusw "#t", "#a" \n\t"\
+ "pshufw $0x01, "#a", "#t" \n\t"\
+ "paddusw "#t", "#a" \n\t"\
+ "movd "#a", "#dst" \n\t"\
+
+#define HADAMARD8_DIFF_MMX(cpu) \
+static int hadamard8_diff_##cpu(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){\
+ DECLARE_ALIGNED_8(uint64_t, temp[16]);\
+ int sum=0;\
+\
+ assert(h==8);\
+\
+ diff_pixels_mmx((DCTELEM*)temp, src1, src2, stride);\
+\
+ asm volatile(\
+ LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)\
+ LOAD4(64, %%mm4, %%mm5, %%mm6, %%mm7)\
+\
+ HADAMARD48\
+\
+ "movq %%mm7, 112(%1) \n\t"\
+\
+ TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)\
+ STORE4(0 , %%mm0, %%mm3, %%mm7, %%mm2)\
+\
+ "movq 112(%1), %%mm7 \n\t"\
+ TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)\
+ STORE4(64, %%mm4, %%mm7, %%mm0, %%mm6)\
+\
+ LOAD4(8 , %%mm0, %%mm1, %%mm2, %%mm3)\
+ LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)\
+\
+ HADAMARD48\
+\
+ "movq %%mm7, 120(%1) \n\t"\
+\
+ TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)\
+ STORE4(8 , %%mm0, %%mm3, %%mm7, %%mm2)\
+\
+ "movq 120(%1), %%mm7 \n\t"\
+ TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)\
+ "movq %%mm7, %%mm5 \n\t"/*FIXME remove*/\
+ "movq %%mm6, %%mm7 \n\t"\
+ "movq %%mm0, %%mm6 \n\t"\
+\
+ LOAD4(64, %%mm0, %%mm1, %%mm2, %%mm3)\
+\
+ HADAMARD48\
+ "movq %%mm7, 64(%1) \n\t"\
+ MMABS(%%mm0, %%mm7)\
+ MMABS_SUM(%%mm1, %%mm7, %%mm0)\
+ MMABS_SUM(%%mm2, %%mm7, %%mm0)\
+ MMABS_SUM(%%mm3, %%mm7, %%mm0)\
+ MMABS_SUM(%%mm4, %%mm7, %%mm0)\
+ MMABS_SUM(%%mm5, %%mm7, %%mm0)\
+ MMABS_SUM(%%mm6, %%mm7, %%mm0)\
+ "movq 64(%1), %%mm1 \n\t"\
+ MMABS_SUM(%%mm1, %%mm7, %%mm0)\
+ "movq %%mm0, 64(%1) \n\t"\
+\
+ LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)\
+ LOAD4(8 , %%mm4, %%mm5, %%mm6, %%mm7)\
+\
+ HADAMARD48\
+ "movq %%mm7, (%1) \n\t"\
+ MMABS(%%mm0, %%mm7)\
+ MMABS_SUM(%%mm1, %%mm7, %%mm0)\
+ MMABS_SUM(%%mm2, %%mm7, %%mm0)\
+ MMABS_SUM(%%mm3, %%mm7, %%mm0)\
+ MMABS_SUM(%%mm4, %%mm7, %%mm0)\
+ MMABS_SUM(%%mm5, %%mm7, %%mm0)\
+ MMABS_SUM(%%mm6, %%mm7, %%mm0)\
+ "movq (%1), %%mm1 \n\t"\
+ MMABS_SUM(%%mm1, %%mm7, %%mm0)\
+ "movq 64(%1), %%mm1 \n\t"\
+ MMABS_SUM(%%mm1, %%mm7, %%mm0)\
+\
+ HSUM(%%mm0, %%mm1, %0)\
+\
+ : "=r" (sum)\
+ : "r"(temp)\
+ );\
+ return sum&0xFFFF;\
}
-static int hadamard8_diff_mmx2(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){
- DECLARE_ALIGNED_8(uint64_t, temp[16]);
- int sum=0;
+#define MMABS(a,z) MMABS_MMX(a,z)
+#define MMABS_SUM(a,z,sum) MMABS_SUM_MMX(a,z,sum)
+#define HSUM(a,t,dst) HSUM_MMX(a,t,dst)
+HADAMARD8_DIFF_MMX(mmx)
+#undef MMABS
+#undef MMABS_SUM
+#undef HSUM
- assert(h==8);
+#define MMABS(a,z) MMABS_MMX2(a,z)
+#define MMABS_SUM(a,z,sum) MMABS_SUM_MMX2(a,z,sum)
+#define HSUM(a,t,dst) HSUM_MMX2(a,t,dst)
+HADAMARD8_DIFF_MMX(mmx2)
+#undef MMABS
+#undef MMABS_SUM
+#undef HSUM
- diff_pixels_mmx((DCTELEM*)temp, src1, src2, stride);
+WARPER8_16_SQ(hadamard8_diff_mmx, hadamard8_diff16_mmx)
+WARPER8_16_SQ(hadamard8_diff_mmx2, hadamard8_diff16_mmx2)
+static int ssd_int8_vs_int16_mmx(int8_t *pix1, int16_t *pix2, int size){
+ int sum;
+ long i=size;
asm volatile(
- LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
- LOAD4(64, %%mm4, %%mm5, %%mm6, %%mm7)
-
- HADAMARD48
-
- "movq %%mm7, 112(%1) \n\t"
-
- TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
- STORE4(0 , %%mm0, %%mm3, %%mm7, %%mm2)
-
- "movq 112(%1), %%mm7 \n\t"
- TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
- STORE4(64, %%mm4, %%mm7, %%mm0, %%mm6)
-
- LOAD4(8 , %%mm0, %%mm1, %%mm2, %%mm3)
- LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
-
- HADAMARD48
-
- "movq %%mm7, 120(%1) \n\t"
-
- TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
- STORE4(8 , %%mm0, %%mm3, %%mm7, %%mm2)
-
- "movq 120(%1), %%mm7 \n\t"
- TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
- "movq %%mm7, %%mm5 \n\t"//FIXME remove
- "movq %%mm6, %%mm7 \n\t"
- "movq %%mm0, %%mm6 \n\t"
-// STORE4(72, %%mm4, %%mm7, %%mm0, %%mm6) //FIXME remove
-
- LOAD4(64, %%mm0, %%mm1, %%mm2, %%mm3)
-// LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
-
- HADAMARD48
- "movq %%mm7, 64(%1) \n\t"
- MMABS_MMX2(%%mm0, %%mm7)
- MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
- MMABS_SUM_MMX2(%%mm2, %%mm7, %%mm0)
- MMABS_SUM_MMX2(%%mm3, %%mm7, %%mm0)
- MMABS_SUM_MMX2(%%mm4, %%mm7, %%mm0)
- MMABS_SUM_MMX2(%%mm5, %%mm7, %%mm0)
- MMABS_SUM_MMX2(%%mm6, %%mm7, %%mm0)
- "movq 64(%1), %%mm1 \n\t"
- MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
- "movq %%mm0, 64(%1) \n\t"
-
- LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
- LOAD4(8 , %%mm4, %%mm5, %%mm6, %%mm7)
-
- HADAMARD48
- "movq %%mm7, (%1) \n\t"
- MMABS_MMX2(%%mm0, %%mm7)
- MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
- MMABS_SUM_MMX2(%%mm2, %%mm7, %%mm0)
- MMABS_SUM_MMX2(%%mm3, %%mm7, %%mm0)
- MMABS_SUM_MMX2(%%mm4, %%mm7, %%mm0)
- MMABS_SUM_MMX2(%%mm5, %%mm7, %%mm0)
- MMABS_SUM_MMX2(%%mm6, %%mm7, %%mm0)
- "movq (%1), %%mm1 \n\t"
- MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
- "movq 64(%1), %%mm1 \n\t"
- MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
-
- "pshufw $0x0E, %%mm0, %%mm1 \n\t"
- "paddusw %%mm1, %%mm0 \n\t"
- "pshufw $0x01, %%mm0, %%mm1 \n\t"
- "paddusw %%mm1, %%mm0 \n\t"
- "movd %%mm0, %0 \n\t"
-
- : "=r" (sum)
- : "r"(temp)
+ "pxor %%mm4, %%mm4 \n"
+ "1: \n"
+ "sub $8, %0 \n"
+ "movq (%2,%0), %%mm2 \n"
+ "movq (%3,%0,2), %%mm0 \n"
+ "movq 8(%3,%0,2), %%mm1 \n"
+ "punpckhbw %%mm2, %%mm3 \n"
+ "punpcklbw %%mm2, %%mm2 \n"
+ "psraw $8, %%mm3 \n"
+ "psraw $8, %%mm2 \n"
+ "psubw %%mm3, %%mm1 \n"
+ "psubw %%mm2, %%mm0 \n"
+ "pmaddwd %%mm1, %%mm1 \n"
+ "pmaddwd %%mm0, %%mm0 \n"
+ "paddd %%mm1, %%mm4 \n"
+ "paddd %%mm0, %%mm4 \n"
+ "jg 1b \n"
+ "movq %%mm4, %%mm3 \n"
+ "psrlq $32, %%mm3 \n"
+ "paddd %%mm3, %%mm4 \n"
+ "movd %%mm4, %1 \n"
+ :"+r"(i), "=r"(sum)
+ :"r"(pix1), "r"(pix2)
);
- return sum&0xFFFF;
+ return sum;
}
-
-WARPER8_16_SQ(hadamard8_diff_mmx, hadamard8_diff16_mmx)
-WARPER8_16_SQ(hadamard8_diff_mmx2, hadamard8_diff16_mmx2)
#endif //CONFIG_ENCODERS
#define put_no_rnd_pixels8_mmx(a,b,c,d) put_pixels8_mmx(a,b,c,d)
#endif /* CONFIG_ENCODERS */
#define PREFETCH(name, op) \
-void name(void *mem, int stride, int h){\
+static void name(void *mem, int stride, int h){\
const uint8_t *p= mem;\
do{\
asm volatile(#op" %0" :: "m"(*p));\
/* XXX: those functions should be suppressed ASAP when all IDCTs are
converted */
+#ifdef CONFIG_GPL
static void ff_libmpeg2mmx_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
{
ff_mmx_idct (block);
ff_mmxext_idct (block);
add_pixels_clamped_mmx(block, dest, line_size);
}
+#endif
static void ff_vp3_idct_put_sse2(uint8_t *dest, int line_size, DCTELEM *block)
{
ff_vp3_idct_sse2(block);
ff_vector_fmul_add_add_c(dst, src0, src1, src2, src3, len, step);
}
-void float_to_int16_3dnow(int16_t *dst, const float *src, int len){
+static void float_to_int16_3dnow(int16_t *dst, const float *src, int len){
// not bit-exact: pf2id uses different rounding than C and SSE
int i;
for(i=0; i<len; i+=4) {
}
asm volatile("femms");
}
-void float_to_int16_sse(int16_t *dst, const float *src, int len){
+static void float_to_int16_sse(int16_t *dst, const float *src, int len){
int i;
for(i=0; i<len; i+=4) {
asm volatile(
asm volatile("emms");
}
-#ifdef CONFIG_SNOW_ENCODER
+#ifdef CONFIG_SNOW_DECODER
extern void ff_snow_horizontal_compose97i_sse2(DWTELEM *b, int width);
extern void ff_snow_horizontal_compose97i_mmx(DWTELEM *b, int width);
extern void ff_snow_vertical_compose97i_sse2(DWTELEM *b0, DWTELEM *b1, DWTELEM *b2, DWTELEM *b3, DWTELEM *b4, DWTELEM *b5, int width);
extern void ff_snow_vertical_compose97i_mmx(DWTELEM *b0, DWTELEM *b1, DWTELEM *b2, DWTELEM *b3, DWTELEM *b4, DWTELEM *b5, int width);
-extern void ff_snow_inner_add_yblock_sse2(uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h,
+extern void ff_snow_inner_add_yblock_sse2(const uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h,
int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8);
-extern void ff_snow_inner_add_yblock_mmx(uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h,
+extern void ff_snow_inner_add_yblock_mmx(const uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h,
int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8);
#endif
}
c->add_8x8basis= add_8x8basis_mmx;
+ c->ssd_int8_vs_int16 = ssd_int8_vs_int16_mmx;
+
#endif //CONFIG_ENCODERS
c->h263_v_loop_filter= h263_v_loop_filter_mmx;
c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_3dnow;
}
-#ifdef CONFIG_SNOW_ENCODER
+#ifdef CONFIG_SNOW_DECODER
if(mm_flags & MM_SSE2){
c->horizontal_compose97i = ff_snow_horizontal_compose97i_sse2;
c->vertical_compose97i = ff_snow_vertical_compose97i_sse2;