* MMX optimization by Nick Kurshev <nickols_k@mail.ru>
*/
-#include "../dsputil.h"
-#include "../simple_idct.h"
-#include "../mpegvideo.h"
+#include "dsputil.h"
+#include "dsputil_mmx.h"
+#include "simple_idct.h"
+#include "mpegvideo.h"
#include "x86_cpu.h"
#include "mmx.h"
+#include "vp3dsp_mmx.h"
+#include "vp3dsp_sse2.h"
+#include "h263.h"
//#undef NDEBUG
//#include <assert.h>
int mm_flags; /* multimedia extension flags */
/* pixel operations */
-static const uint64_t mm_bone attribute_used __attribute__ ((aligned(8))) = 0x0101010101010101ULL;
-static const uint64_t mm_wone attribute_used __attribute__ ((aligned(8))) = 0x0001000100010001ULL;
-static const uint64_t mm_wtwo attribute_used __attribute__ ((aligned(8))) = 0x0002000200020002ULL;
+DECLARE_ALIGNED_8 (const uint64_t, ff_bone) = 0x0101010101010101ULL;
+DECLARE_ALIGNED_8 (const uint64_t, ff_wone) = 0x0001000100010001ULL;
+DECLARE_ALIGNED_8 (const uint64_t, ff_wtwo) = 0x0002000200020002ULL;
+DECLARE_ALIGNED_8 (const uint64_t, ff_wabs) = 0xFFFFFFFFFFFFFFFFULL;
-static const uint64_t ff_pdw_80000000[2] attribute_used __attribute__ ((aligned(16))) =
+DECLARE_ALIGNED_16(const uint64_t, ff_pdw_80000000[2]) =
{0x8000000080000000ULL, 0x8000000080000000ULL};
-static const uint64_t ff_pw_20 attribute_used __attribute__ ((aligned(8))) = 0x0014001400140014ULL;
-static const uint64_t ff_pw_3 attribute_used __attribute__ ((aligned(8))) = 0x0003000300030003ULL;
-static const uint64_t ff_pw_4 attribute_used __attribute__ ((aligned(8))) = 0x0004000400040004ULL;
-static const uint64_t ff_pw_5 attribute_used __attribute__ ((aligned(8))) = 0x0005000500050005ULL;
-static const uint64_t ff_pw_8 attribute_used __attribute__ ((aligned(8))) = 0x0008000800080008ULL;
-static const uint64_t ff_pw_16 attribute_used __attribute__ ((aligned(8))) = 0x0010001000100010ULL;
-static const uint64_t ff_pw_32 attribute_used __attribute__ ((aligned(8))) = 0x0020002000200020ULL;
-static const uint64_t ff_pw_64 attribute_used __attribute__ ((aligned(8))) = 0x0040004000400040ULL;
-static const uint64_t ff_pw_15 attribute_used __attribute__ ((aligned(8))) = 0x000F000F000F000FULL;
-
-static const uint64_t ff_pb_1 attribute_used __attribute__ ((aligned(8))) = 0x0101010101010101ULL;
-static const uint64_t ff_pb_3 attribute_used __attribute__ ((aligned(8))) = 0x0303030303030303ULL;
-static const uint64_t ff_pb_7 attribute_used __attribute__ ((aligned(8))) = 0x0707070707070707ULL;
-static const uint64_t ff_pb_3F attribute_used __attribute__ ((aligned(8))) = 0x3F3F3F3F3F3F3F3FULL;
-static const uint64_t ff_pb_A1 attribute_used __attribute__ ((aligned(8))) = 0xA1A1A1A1A1A1A1A1ULL;
-static const uint64_t ff_pb_5F attribute_used __attribute__ ((aligned(8))) = 0x5F5F5F5F5F5F5F5FULL;
-static const uint64_t ff_pb_FC attribute_used __attribute__ ((aligned(8))) = 0xFCFCFCFCFCFCFCFCULL;
+DECLARE_ALIGNED_8 (const uint64_t, ff_pw_3 ) = 0x0003000300030003ULL;
+DECLARE_ALIGNED_8 (const uint64_t, ff_pw_4 ) = 0x0004000400040004ULL;
+DECLARE_ALIGNED_8 (const uint64_t, ff_pw_5 ) = 0x0005000500050005ULL;
+DECLARE_ALIGNED_8 (const uint64_t, ff_pw_8 ) = 0x0008000800080008ULL;
+DECLARE_ALIGNED_8 (const uint64_t, ff_pw_15 ) = 0x000F000F000F000FULL;
+DECLARE_ALIGNED_8 (const uint64_t, ff_pw_16 ) = 0x0010001000100010ULL;
+DECLARE_ALIGNED_8 (const uint64_t, ff_pw_20 ) = 0x0014001400140014ULL;
+DECLARE_ALIGNED_8 (const uint64_t, ff_pw_32 ) = 0x0020002000200020ULL;
+DECLARE_ALIGNED_8 (const uint64_t, ff_pw_42 ) = 0x002A002A002A002AULL;
+DECLARE_ALIGNED_8 (const uint64_t, ff_pw_64 ) = 0x0040004000400040ULL;
+DECLARE_ALIGNED_8 (const uint64_t, ff_pw_96 ) = 0x0060006000600060ULL;
+DECLARE_ALIGNED_16(const uint64_t, ff_pw_128) = 0x0080008000800080ULL;
+
+DECLARE_ALIGNED_8 (const uint64_t, ff_pb_1 ) = 0x0101010101010101ULL;
+DECLARE_ALIGNED_8 (const uint64_t, ff_pb_3 ) = 0x0303030303030303ULL;
+DECLARE_ALIGNED_8 (const uint64_t, ff_pb_7 ) = 0x0707070707070707ULL;
+DECLARE_ALIGNED_8 (const uint64_t, ff_pb_3F ) = 0x3F3F3F3F3F3F3F3FULL;
+DECLARE_ALIGNED_8 (const uint64_t, ff_pb_5F ) = 0x5F5F5F5F5F5F5F5FULL;
+DECLARE_ALIGNED_8 (const uint64_t, ff_pb_A1 ) = 0xA1A1A1A1A1A1A1A1ULL;
+DECLARE_ALIGNED_8 (const uint64_t, ff_pb_FC ) = 0xFCFCFCFCFCFCFCFCULL;
+
+DECLARE_ALIGNED_16(const double, ff_pd_1[2]) = { 1.0, 1.0 };
+DECLARE_ALIGNED_16(const double, ff_pd_2[2]) = { 2.0, 2.0 };
#define JUMPALIGN() __asm __volatile (ASMALIGN(3)::)
#define MOVQ_ZERO(regd) __asm __volatile ("pxor %%" #regd ", %%" #regd ::)
"paddb %%" #regd ", %%" #regd " \n\t" ::)
#ifndef PIC
-#define MOVQ_BONE(regd) __asm __volatile ("movq %0, %%" #regd " \n\t" ::"m"(mm_bone))
-#define MOVQ_WTWO(regd) __asm __volatile ("movq %0, %%" #regd " \n\t" ::"m"(mm_wtwo))
+#define MOVQ_BONE(regd) __asm __volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_bone))
+#define MOVQ_WTWO(regd) __asm __volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_wtwo))
#else
// for shared library it's better to use this way for accessing constants
// pcmpeqd -> -1
/* 3Dnow specific */
#define DEF(x) x ## _3dnow
-/* for Athlons PAVGUSB is preferred */
#define PAVGB "pavgusb"
#include "dsputil_mmx_avg.h"
"paddb %%mm1, %%mm6 \n\t"
static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale){
+ if(ENABLE_ANY_H263) {
const int strength= ff_h263_loop_filter_strength[qscale];
asm volatile(
"+m" (*(uint64_t*)(src + 1*stride))
: "g" (2*strength), "m"(ff_pb_FC)
);
+ }
}
static inline void transpose4x4(uint8_t *dst, uint8_t *src, int dst_stride, int src_stride){
}
static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale){
+ if(ENABLE_ANY_H263) {
const int strength= ff_h263_loop_filter_strength[qscale];
uint64_t temp[4] __attribute__ ((aligned(8)));
uint8_t *btemp= (uint8_t*)temp;
"r" ((long) stride ),
"r" ((long)(3*stride))
);
+ }
}
#ifdef CONFIG_ENCODERS
"mov"#m1" "#mm"0, %0 \n\t"\
DIFF_PIXELS_1(m0, mm##7, mm##0, (%1,%3,4), (%2,%3,4))\
"mov"#m1" %0, "#mm"0 \n\t"\
- : "=m"(temp), "+r"(p1b), "+r"(p2b)\
+ : "+m"(temp), "+r"(p1b), "+r"(p2b)\
: "r"((long)stride), "r"((long)stride*3)\
);\
}
+ //the "+m"(temp) is needed as gcc 2.95 sometimes fails to compile "=m"(temp)
#define DIFF_PIXELS_4x8(p1,p2,stride,temp) DIFF_PIXELS_8(d, q, %%mm, p1, p2, stride, temp)
#define DIFF_PIXELS_8x8(p1,p2,stride,temp) DIFF_PIXELS_8(q, dqa, %%xmm, p1, p2, stride, temp)
#undef HSUM
#undef DCT_SAD
-static int ssd_int8_vs_int16_mmx(int8_t *pix1, int16_t *pix2, int size){
+static int ssd_int8_vs_int16_mmx(const int8_t *pix1, const int16_t *pix2, int size){
int sum;
long i=size;
asm volatile(
}
#ifdef CONFIG_ENCODERS
-static int try_8x8basis_mmx(int16_t rem[64], int16_t weight[64], int16_t basis[64], int scale){
- long i=0;
- assert(FFABS(scale) < 256);
- scale<<= 16 + 1 - BASIS_SHIFT + RECON_SHIFT;
+#define PHADDD(a, t)\
+ "movq "#a", "#t" \n\t"\
+ "psrlq $32, "#a" \n\t"\
+ "paddd "#t", "#a" \n\t"
+/*
+ pmulhw: dst[0-15]=(src[0-15]*dst[0-15])[16-31]
+ pmulhrw: dst[0-15]=(src[0-15]*dst[0-15] + 0x8000)[16-31]
+ pmulhrsw: dst[0-15]=(src[0-15]*dst[0-15] + 0x4000)[15-30]
+ */
+#define PMULHRW(x, y, s, o)\
+ "pmulhw " #s ", "#x " \n\t"\
+ "pmulhw " #s ", "#y " \n\t"\
+ "paddw " #o ", "#x " \n\t"\
+ "paddw " #o ", "#y " \n\t"\
+ "psraw $1, "#x " \n\t"\
+ "psraw $1, "#y " \n\t"
+#define DEF(x) x ## _mmx
+#define SET_RND MOVQ_WONE
+#define SCALE_OFFSET 1
+
+#include "dsputil_mmx_qns.h"
- asm volatile(
- "pcmpeqw %%mm6, %%mm6 \n\t" // -1w
- "psrlw $15, %%mm6 \n\t" // 1w
- "pxor %%mm7, %%mm7 \n\t"
- "movd %4, %%mm5 \n\t"
- "punpcklwd %%mm5, %%mm5 \n\t"
- "punpcklwd %%mm5, %%mm5 \n\t"
- "1: \n\t"
- "movq (%1, %0), %%mm0 \n\t"
- "movq 8(%1, %0), %%mm1 \n\t"
- "pmulhw %%mm5, %%mm0 \n\t"
- "pmulhw %%mm5, %%mm1 \n\t"
- "paddw %%mm6, %%mm0 \n\t"
- "paddw %%mm6, %%mm1 \n\t"
- "psraw $1, %%mm0 \n\t"
- "psraw $1, %%mm1 \n\t"
- "paddw (%2, %0), %%mm0 \n\t"
- "paddw 8(%2, %0), %%mm1 \n\t"
- "psraw $6, %%mm0 \n\t"
- "psraw $6, %%mm1 \n\t"
- "pmullw (%3, %0), %%mm0 \n\t"
- "pmullw 8(%3, %0), %%mm1 \n\t"
- "pmaddwd %%mm0, %%mm0 \n\t"
- "pmaddwd %%mm1, %%mm1 \n\t"
- "paddd %%mm1, %%mm0 \n\t"
- "psrld $4, %%mm0 \n\t"
- "paddd %%mm0, %%mm7 \n\t"
- "add $16, %0 \n\t"
- "cmp $128, %0 \n\t" //FIXME optimize & bench
- " jb 1b \n\t"
- "movq %%mm7, %%mm6 \n\t"
- "psrlq $32, %%mm7 \n\t"
- "paddd %%mm6, %%mm7 \n\t"
- "psrld $2, %%mm7 \n\t"
- "movd %%mm7, %0 \n\t"
+#undef DEF
+#undef SET_RND
+#undef SCALE_OFFSET
+#undef PMULHRW
- : "+r" (i)
- : "r"(basis), "r"(rem), "r"(weight), "g"(scale)
- );
- return i;
-}
+#define DEF(x) x ## _3dnow
+#define SET_RND(x)
+#define SCALE_OFFSET 0
+#define PMULHRW(x, y, s, o)\
+ "pmulhrw " #s ", "#x " \n\t"\
+ "pmulhrw " #s ", "#y " \n\t"
-static void add_8x8basis_mmx(int16_t rem[64], int16_t basis[64], int scale){
- long i=0;
+#include "dsputil_mmx_qns.h"
+
+#undef DEF
+#undef SET_RND
+#undef SCALE_OFFSET
+#undef PMULHRW
+
+#ifdef HAVE_SSSE3
+#undef PHADDD
+#define DEF(x) x ## _ssse3
+#define SET_RND(x)
+#define SCALE_OFFSET -1
+#define PHADDD(a, t)\
+ "pshufw $0x0E, "#a", "#t" \n\t"\
+ "paddd "#t", "#a" \n\t" /* faster than phaddd on core2 */
+#define PMULHRW(x, y, s, o)\
+ "pmulhrsw " #s ", "#x " \n\t"\
+ "pmulhrsw " #s ", "#y " \n\t"
+
+#include "dsputil_mmx_qns.h"
+
+#undef DEF
+#undef SET_RND
+#undef SCALE_OFFSET
+#undef PMULHRW
+#undef PHADDD
+#endif //HAVE_SSSE3
- if(FFABS(scale) < 256){
- scale<<= 16 + 1 - BASIS_SHIFT + RECON_SHIFT;
- asm volatile(
- "pcmpeqw %%mm6, %%mm6 \n\t" // -1w
- "psrlw $15, %%mm6 \n\t" // 1w
- "movd %3, %%mm5 \n\t"
- "punpcklwd %%mm5, %%mm5 \n\t"
- "punpcklwd %%mm5, %%mm5 \n\t"
- "1: \n\t"
- "movq (%1, %0), %%mm0 \n\t"
- "movq 8(%1, %0), %%mm1 \n\t"
- "pmulhw %%mm5, %%mm0 \n\t"
- "pmulhw %%mm5, %%mm1 \n\t"
- "paddw %%mm6, %%mm0 \n\t"
- "paddw %%mm6, %%mm1 \n\t"
- "psraw $1, %%mm0 \n\t"
- "psraw $1, %%mm1 \n\t"
- "paddw (%2, %0), %%mm0 \n\t"
- "paddw 8(%2, %0), %%mm1 \n\t"
- "movq %%mm0, (%2, %0) \n\t"
- "movq %%mm1, 8(%2, %0) \n\t"
- "add $16, %0 \n\t"
- "cmp $128, %0 \n\t" //FIXME optimize & bench
- " jb 1b \n\t"
-
- : "+r" (i)
- : "r"(basis), "r"(rem), "g"(scale)
- );
- }else{
- for(i=0; i<8*8; i++){
- rem[i] += (basis[i]*scale + (1<<(BASIS_SHIFT - RECON_SHIFT-1)))>>(BASIS_SHIFT - RECON_SHIFT);
- }
- }
-}
#endif /* CONFIG_ENCODERS */
#define PREFETCH(name, op) \
avg_pixels16_mmx(dst, src, stride, 16);
}
+/* VC1 specific */
+void ff_vc1dsp_init_mmx(DSPContext* dsp, AVCodecContext *avctx);
+
+void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
+ put_pixels8_mmx(dst, src, stride, 8);
+}
+
/* external functions, from idct_mmx.c */
void ff_mmx_idct(DCTELEM *block);
void ff_mmxext_idct(DCTELEM *block);
-void ff_vp3_idct_sse2(int16_t *input_data);
-void ff_vp3_idct_mmx(int16_t *data);
-void ff_vp3_dsp_init_mmx(void);
-
/* XXX: those functions should be suppressed ASAP when all IDCTs are
converted */
#ifdef CONFIG_GPL
add_pixels_clamped_mmx(block, dest, line_size);
}
#endif
-static void ff_vp3_idct_put_sse2(uint8_t *dest, int line_size, DCTELEM *block)
-{
- ff_vp3_idct_sse2(block);
- put_signed_pixels_clamped_mmx(block, dest, line_size);
-}
-static void ff_vp3_idct_add_sse2(uint8_t *dest, int line_size, DCTELEM *block)
-{
- ff_vp3_idct_sse2(block);
- add_pixels_clamped_mmx(block, dest, line_size);
-}
-static void ff_vp3_idct_put_mmx(uint8_t *dest, int line_size, DCTELEM *block)
-{
- ff_vp3_idct_mmx(block);
- put_signed_pixels_clamped_mmx(block, dest, line_size);
-}
-static void ff_vp3_idct_add_mmx(uint8_t *dest, int line_size, DCTELEM *block)
-{
- ff_vp3_idct_mmx(block);
- add_pixels_clamped_mmx(block, dest, line_size);
-}
static void ff_idct_xvid_mmx_put(uint8_t *dest, int line_size, DCTELEM *block)
{
ff_idct_xvid_mmx (block);
}
}
+#ifdef CONFIG_ENCODERS
+static void apply_welch_window_sse2(const int32_t *data, int len, double *w_data)
+{
+ double c = 2.0 / (len-1.0);
+ int n2 = len>>1;
+ long i = -n2*sizeof(int32_t);
+ long j = n2*sizeof(int32_t);
+ asm volatile(
+ "movsd %0, %%xmm7 \n\t"
+ "movapd %1, %%xmm6 \n\t"
+ "movapd %2, %%xmm5 \n\t"
+ "movlhps %%xmm7, %%xmm7 \n\t"
+ "subpd %%xmm5, %%xmm7 \n\t"
+ "addsd %%xmm6, %%xmm7 \n\t"
+ ::"m"(c), "m"(*ff_pd_1), "m"(*ff_pd_2)
+ );
+#define WELCH(MOVPD)\
+ asm volatile(\
+ "1: \n\t"\
+ "movapd %%xmm7, %%xmm1 \n\t"\
+ "mulpd %%xmm1, %%xmm1 \n\t"\
+ "movapd %%xmm6, %%xmm0 \n\t"\
+ "subpd %%xmm1, %%xmm0 \n\t"\
+ "pshufd $0x4e, %%xmm0, %%xmm1 \n\t"\
+ "cvtpi2pd (%4,%0), %%xmm2 \n\t"\
+ "cvtpi2pd (%5,%1), %%xmm3 \n\t"\
+ "mulpd %%xmm0, %%xmm2 \n\t"\
+ "mulpd %%xmm1, %%xmm3 \n\t"\
+ "movapd %%xmm2, (%2,%0,2) \n\t"\
+ MOVPD" %%xmm3, (%3,%1,2) \n\t"\
+ "subpd %%xmm5, %%xmm7 \n\t"\
+ "sub $8, %1 \n\t"\
+ "add $8, %0 \n\t"\
+ "jl 1b \n\t"\
+ :"+&r"(i), "+&r"(j)\
+ :"r"(w_data+n2), "r"(w_data+len-2-n2),\
+ "r"(data+n2), "r"(data+len-2-n2)\
+ );
+ if(len&1)
+ WELCH("movupd")
+ else
+ WELCH("movapd")
+#undef WELCH
+}
+
+static void flac_compute_autocorr_sse2(const int32_t *data, int len, int lag,
+ double *autoc)
+{
+ double tmp[len + lag + 2];
+ double *data1 = tmp + lag;
+ int j;
+
+ if((long)data1 & 15)
+ data1++;
+
+ apply_welch_window_sse2(data, len, data1);
+
+ for(j=0; j<lag; j++)
+ data1[j-lag]= 0.0;
+ data1[len] = 0.0;
+
+ for(j=0; j<lag; j+=2){
+ long i = -len*sizeof(double);
+ if(j == lag-2) {
+ asm volatile(
+ "movsd %6, %%xmm0 \n\t"
+ "movsd %6, %%xmm1 \n\t"
+ "movsd %6, %%xmm2 \n\t"
+ "1: \n\t"
+ "movapd (%4,%0), %%xmm3 \n\t"
+ "movupd -8(%5,%0), %%xmm4 \n\t"
+ "movapd (%5,%0), %%xmm5 \n\t"
+ "mulpd %%xmm3, %%xmm4 \n\t"
+ "mulpd %%xmm3, %%xmm5 \n\t"
+ "mulpd -16(%5,%0), %%xmm3 \n\t"
+ "addpd %%xmm4, %%xmm1 \n\t"
+ "addpd %%xmm5, %%xmm0 \n\t"
+ "addpd %%xmm3, %%xmm2 \n\t"
+ "add $16, %0 \n\t"
+ "jl 1b \n\t"
+ "movhlps %%xmm0, %%xmm3 \n\t"
+ "movhlps %%xmm1, %%xmm4 \n\t"
+ "movhlps %%xmm2, %%xmm5 \n\t"
+ "addsd %%xmm3, %%xmm0 \n\t"
+ "addsd %%xmm4, %%xmm1 \n\t"
+ "addsd %%xmm5, %%xmm2 \n\t"
+ "movsd %%xmm0, %1 \n\t"
+ "movsd %%xmm1, %2 \n\t"
+ "movsd %%xmm2, %3 \n\t"
+ :"+&r"(i), "=m"(autoc[j]), "=m"(autoc[j+1]), "=m"(autoc[j+2])
+ :"r"(data1+len), "r"(data1+len-j), "m"(*ff_pd_1)
+ );
+ } else {
+ asm volatile(
+ "movsd %5, %%xmm0 \n\t"
+ "movsd %5, %%xmm1 \n\t"
+ "1: \n\t"
+ "movapd (%3,%0), %%xmm3 \n\t"
+ "movupd -8(%4,%0), %%xmm4 \n\t"
+ "mulpd %%xmm3, %%xmm4 \n\t"
+ "mulpd (%4,%0), %%xmm3 \n\t"
+ "addpd %%xmm4, %%xmm1 \n\t"
+ "addpd %%xmm3, %%xmm0 \n\t"
+ "add $16, %0 \n\t"
+ "jl 1b \n\t"
+ "movhlps %%xmm0, %%xmm3 \n\t"
+ "movhlps %%xmm1, %%xmm4 \n\t"
+ "addsd %%xmm3, %%xmm0 \n\t"
+ "addsd %%xmm4, %%xmm1 \n\t"
+ "movsd %%xmm0, %1 \n\t"
+ "movsd %%xmm1, %2 \n\t"
+ :"+&r"(i), "=m"(autoc[j]), "=m"(autoc[j+1])
+ :"r"(data1+len), "r"(data1+len-j), "m"(*ff_pd_1)
+ );
+ }
+ }
+}
+#endif // CONFIG_ENCODERS
+
static void vector_fmul_3dnow(float *dst, const float *src, int len){
long i = (len-4)*4;
asm volatile(
#endif //CONFIG_ENCODERS
- c->h263_v_loop_filter= h263_v_loop_filter_mmx;
- c->h263_h_loop_filter= h263_h_loop_filter_mmx;
+ if (ENABLE_ANY_H263) {
+ c->h263_v_loop_filter= h263_v_loop_filter_mmx;
+ c->h263_h_loop_filter= h263_h_loop_filter_mmx;
+ }
c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_mmx;
c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_mmx;
c->biweight_h264_pixels_tab[6]= ff_h264_biweight_4x4_mmx2;
c->biweight_h264_pixels_tab[7]= ff_h264_biweight_4x2_mmx2;
-#ifdef CONFIG_CAVS_DECODER
+ if (ENABLE_CAVS_DECODER)
ff_cavsdsp_init_mmx2(c, avctx);
-#endif
+
+ if (ENABLE_VC1_DECODER || ENABLE_WMV3_DECODER)
+ ff_vc1dsp_init_mmx(c, avctx);
#ifdef CONFIG_ENCODERS
c->sub_hfyu_median_prediction= sub_hfyu_median_prediction_mmx2;
c->sum_abs_dctelem= sum_abs_dctelem_sse2;
c->hadamard8_diff[0]= hadamard8_diff16_sse2;
c->hadamard8_diff[1]= hadamard8_diff_sse2;
+ c->flac_compute_autocorr = flac_compute_autocorr_sse2;
}
#ifdef HAVE_SSSE3
if(mm_flags & MM_SSSE3){
+ if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
+ c->try_8x8basis= try_8x8basis_ssse3;
+ }
+ c->add_8x8basis= add_8x8basis_ssse3;
c->sum_abs_dctelem= sum_abs_dctelem_ssse3;
c->hadamard8_diff[0]= hadamard8_diff16_ssse3;
c->hadamard8_diff[1]= hadamard8_diff_ssse3;
#endif
#ifdef CONFIG_SNOW_DECODER
- if(mm_flags & MM_SSE2){
+ if(mm_flags & MM_SSE2 & 0){
c->horizontal_compose97i = ff_snow_horizontal_compose97i_sse2;
+#ifdef HAVE_7REGS
c->vertical_compose97i = ff_snow_vertical_compose97i_sse2;
+#endif
c->inner_add_yblock = ff_snow_inner_add_yblock_sse2;
}
else{
+ if(mm_flags & MM_MMXEXT){
c->horizontal_compose97i = ff_snow_horizontal_compose97i_mmx;
+#ifdef HAVE_7REGS
c->vertical_compose97i = ff_snow_vertical_compose97i_mmx;
+#endif
+ }
c->inner_add_yblock = ff_snow_inner_add_yblock_mmx;
}
#endif
if(mm_flags & MM_3DNOW){
+#ifdef CONFIG_ENCODERS
+ if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
+ c->try_8x8basis= try_8x8basis_3dnow;
+ }
+ c->add_8x8basis= add_8x8basis_3dnow;
+#endif //CONFIG_ENCODERS
c->vorbis_inverse_coupling = vorbis_inverse_coupling_3dnow;
c->vector_fmul = vector_fmul_3dnow;
if(!(avctx->flags & CODEC_FLAG_BITEXACT))