--- /dev/null
+/* mmx.h
+
+ MultiMedia eXtensions GCC interface library for IA32.
+
+ To use this library, simply include this header file
+ and compile with GCC. You MUST have inlining enabled
+ in order for mmx_ok() to work; this can be done by
+ simply using -O on the GCC command line.
+
+ Compiling with -DMMX_TRACE will cause detailed trace
+ output to be sent to stderr for each mmx operation.
+ This adds lots of code, and obviously slows execution to
+ a crawl, but can be very useful for debugging.
+
+ THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY
+ EXPRESS OR IMPLIED WARRANTIES, INCLUDING, WITHOUT
+ LIMITATION, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+ AND FITNESS FOR ANY PARTICULAR PURPOSE.
+
+ 1997-99 by H. Dietz and R. Fisher
+
+ Notes:
+ It appears that the latest gas has the pand problem fixed, therefore
+ I'll undefine BROKEN_PAND by default.
+*/
+
+#ifndef _MMX_H
+#define _MMX_H
+
+
+/* Warning: at this writing, the version of GAS packaged
+ with most Linux distributions does not handle the
+ parallel AND operation mnemonic correctly. If the
+ symbol BROKEN_PAND is defined, a slower alternative
+ coding will be used. If execution of mmxtest results
+ in an illegal instruction fault, define this symbol.
+*/
+#undef BROKEN_PAND
+
+
+/* The type of an value that fits in an MMX register
+ (note that long long constant values MUST be suffixed
+ by LL and unsigned long long values by ULL, lest
+ they be truncated by the compiler)
+*/
+typedef union {
+ long long q; /* Quadword (64-bit) value */
+ unsigned long long uq; /* Unsigned Quadword */
+ int d[2]; /* 2 Doubleword (32-bit) values */
+ unsigned int ud[2]; /* 2 Unsigned Doubleword */
+ short w[4]; /* 4 Word (16-bit) values */
+ unsigned short uw[4]; /* 4 Unsigned Word */
+ char b[8]; /* 8 Byte (8-bit) values */
+ unsigned char ub[8]; /* 8 Unsigned Byte */
+ float s[2]; /* Single-precision (32-bit) value */
+} __attribute__ ((aligned (8))) mmx_t; /* On an 8-byte (64-bit) boundary */
+
+
+
+/* Function to test if multimedia instructions are supported...
+*/
+inline extern int
+mm_support(void)
+{
+ /* Returns 1 if MMX instructions are supported,
+ 3 if Cyrix MMX and Extended MMX instructions are supported
+ 5 if AMD MMX and 3DNow! instructions are supported
+ 0 if hardware does not support any of these
+ */
+ register int rval = 0;
+
+ __asm__ __volatile__ (
+ /* See if CPUID instruction is supported ... */
+ /* ... Get copies of EFLAGS into eax and ecx */
+ "pushf\n\t"
+ "popl %%eax\n\t"
+ "movl %%eax, %%ecx\n\t"
+
+ /* ... Toggle the ID bit in one copy and store */
+ /* to the EFLAGS reg */
+ "xorl $0x200000, %%eax\n\t"
+ "push %%eax\n\t"
+ "popf\n\t"
+
+ /* ... Get the (hopefully modified) EFLAGS */
+ "pushf\n\t"
+ "popl %%eax\n\t"
+
+ /* ... Compare and test result */
+ "xorl %%eax, %%ecx\n\t"
+ "testl $0x200000, %%ecx\n\t"
+ "jz NotSupported1\n\t" /* CPUID not supported */
+
+
+ /* Get standard CPUID information, and
+ go to a specific vendor section */
+ "movl $0, %%eax\n\t"
+ "cpuid\n\t"
+
+ /* Check for Intel */
+ "cmpl $0x756e6547, %%ebx\n\t"
+ "jne TryAMD\n\t"
+ "cmpl $0x49656e69, %%edx\n\t"
+ "jne TryAMD\n\t"
+ "cmpl $0x6c65746e, %%ecx\n"
+ "jne TryAMD\n\t"
+ "jmp Intel\n\t"
+
+ /* Check for AMD */
+ "\nTryAMD:\n\t"
+ "cmpl $0x68747541, %%ebx\n\t"
+ "jne TryCyrix\n\t"
+ "cmpl $0x69746e65, %%edx\n\t"
+ "jne TryCyrix\n\t"
+ "cmpl $0x444d4163, %%ecx\n"
+ "jne TryCyrix\n\t"
+ "jmp AMD\n\t"
+
+ /* Check for Cyrix */
+ "\nTryCyrix:\n\t"
+ "cmpl $0x69727943, %%ebx\n\t"
+ "jne NotSupported2\n\t"
+ "cmpl $0x736e4978, %%edx\n\t"
+ "jne NotSupported3\n\t"
+ "cmpl $0x64616574, %%ecx\n\t"
+ "jne NotSupported4\n\t"
+ /* Drop through to Cyrix... */
+
+
+ /* Cyrix Section */
+ /* See if extended CPUID level 80000001 is supported */
+ /* The value of CPUID/80000001 for the 6x86MX is undefined
+ according to the Cyrix CPU Detection Guide (Preliminary
+ Rev. 1.01 table 1), so we'll check the value of eax for
+ CPUID/0 to see if standard CPUID level 2 is supported.
+ According to the table, the only CPU which supports level
+ 2 is also the only one which supports extended CPUID levels.
+ */
+ "cmpl $0x2, %%eax\n\t"
+ "jne MMXtest\n\t" /* Use standard CPUID instead */
+
+ /* Extended CPUID supported (in theory), so get extended
+ features */
+ "movl $0x80000001, %%eax\n\t"
+ "cpuid\n\t"
+ "testl $0x00800000, %%eax\n\t" /* Test for MMX */
+ "jz NotSupported5\n\t" /* MMX not supported */
+ "testl $0x01000000, %%eax\n\t" /* Test for Ext'd MMX */
+ "jnz EMMXSupported\n\t"
+ "movl $1, %0:\n\n\t" /* MMX Supported */
+ "jmp Return\n\n"
+ "EMMXSupported:\n\t"
+ "movl $3, %0:\n\n\t" /* EMMX and MMX Supported */
+ "jmp Return\n\t"
+
+
+ /* AMD Section */
+ "AMD:\n\t"
+
+ /* See if extended CPUID is supported */
+ "movl $0x80000000, %%eax\n\t"
+ "cpuid\n\t"
+ "cmpl $0x80000000, %%eax\n\t"
+ "jl MMXtest\n\t" /* Use standard CPUID instead */
+
+ /* Extended CPUID supported, so get extended features */
+ "movl $0x80000001, %%eax\n\t"
+ "cpuid\n\t"
+ "testl $0x00800000, %%edx\n\t" /* Test for MMX */
+ "jz NotSupported6\n\t" /* MMX not supported */
+ "testl $0x80000000, %%edx\n\t" /* Test for 3DNow! */
+ "jnz ThreeDNowSupported\n\t"
+ "movl $1, %0:\n\n\t" /* MMX Supported */
+ "jmp Return\n\n"
+ "ThreeDNowSupported:\n\t"
+ "movl $5, %0:\n\n\t" /* 3DNow! and MMX Supported */
+ "jmp Return\n\t"
+
+
+ /* Intel Section */
+ "Intel:\n\t"
+
+ /* Check for MMX */
+ "MMXtest:\n\t"
+ "movl $1, %%eax\n\t"
+ "cpuid\n\t"
+ "testl $0x00800000, %%edx\n\t" /* Test for MMX */
+ "jz NotSupported7\n\t" /* MMX Not supported */
+ "movl $1, %0:\n\n\t" /* MMX Supported */
+ "jmp Return\n\t"
+
+ /* Nothing supported */
+ "\nNotSupported1:\n\t"
+ "#movl $101, %0:\n\n\t"
+ "\nNotSupported2:\n\t"
+ "#movl $102, %0:\n\n\t"
+ "\nNotSupported3:\n\t"
+ "#movl $103, %0:\n\n\t"
+ "\nNotSupported4:\n\t"
+ "#movl $104, %0:\n\n\t"
+ "\nNotSupported5:\n\t"
+ "#movl $105, %0:\n\n\t"
+ "\nNotSupported6:\n\t"
+ "#movl $106, %0:\n\n\t"
+ "\nNotSupported7:\n\t"
+ "#movl $107, %0:\n\n\t"
+ "movl $0, %0:\n\n\t"
+
+ "Return:\n\t"
+ : "=a" (rval)
+ : /* no input */
+ : "eax", "ebx", "ecx", "edx"
+ );
+
+ /* Return */
+ return(rval);
+}
+
+/* Function to test if mmx instructions are supported...
+*/
+inline extern int
+mmx_ok(void)
+{
+ /* Returns 1 if MMX instructions are supported, 0 otherwise */
+ return ( mm_support() & 0x1 );
+}
+
+
+/* Helper functions for the instruction macros that follow...
+ (note that memory-to-register, m2r, instructions are nearly
+ as efficient as register-to-register, r2r, instructions;
+ however, memory-to-memory instructions are really simulated
+ as a convenience, and are only 1/3 as efficient)
+*/
+#ifdef MMX_TRACE
+
+/* Include the stuff for printing a trace to stderr...
+*/
+
+#include <stdio.h>
+
+#define mmx_i2r(op, imm, reg) \
+ { \
+ mmx_t mmx_trace; \
+ mmx_trace.uq = (imm); \
+ fprintf(stderr, #op "_i2r(" #imm "=0x%08x%08x, ", \
+ mmx_trace.d[1], mmx_trace.d[0]); \
+ __asm__ __volatile__ ("movq %%" #reg ", %0" \
+ : "=X" (mmx_trace) \
+ : /* nothing */ ); \
+ fprintf(stderr, #reg "=0x%08x%08x) => ", \
+ mmx_trace.d[1], mmx_trace.d[0]); \
+ __asm__ __volatile__ (#op " %0, %%" #reg \
+ : /* nothing */ \
+ : "X" (imm)); \
+ __asm__ __volatile__ ("movq %%" #reg ", %0" \
+ : "=X" (mmx_trace) \
+ : /* nothing */ ); \
+ fprintf(stderr, #reg "=0x%08x%08x\n", \
+ mmx_trace.d[1], mmx_trace.d[0]); \
+ }
+
+#define mmx_m2r(op, mem, reg) \
+ { \
+ mmx_t mmx_trace; \
+ mmx_trace = (mem); \
+ fprintf(stderr, #op "_m2r(" #mem "=0x%08x%08x, ", \
+ mmx_trace.d[1], mmx_trace.d[0]); \
+ __asm__ __volatile__ ("movq %%" #reg ", %0" \
+ : "=X" (mmx_trace) \
+ : /* nothing */ ); \
+ fprintf(stderr, #reg "=0x%08x%08x) => ", \
+ mmx_trace.d[1], mmx_trace.d[0]); \
+ __asm__ __volatile__ (#op " %0, %%" #reg \
+ : /* nothing */ \
+ : "X" (mem)); \
+ __asm__ __volatile__ ("movq %%" #reg ", %0" \
+ : "=X" (mmx_trace) \
+ : /* nothing */ ); \
+ fprintf(stderr, #reg "=0x%08x%08x\n", \
+ mmx_trace.d[1], mmx_trace.d[0]); \
+ }
+
+#define mmx_r2m(op, reg, mem) \
+ { \
+ mmx_t mmx_trace; \
+ __asm__ __volatile__ ("movq %%" #reg ", %0" \
+ : "=X" (mmx_trace) \
+ : /* nothing */ ); \
+ fprintf(stderr, #op "_r2m(" #reg "=0x%08x%08x, ", \
+ mmx_trace.d[1], mmx_trace.d[0]); \
+ mmx_trace = (mem); \
+ fprintf(stderr, #mem "=0x%08x%08x) => ", \
+ mmx_trace.d[1], mmx_trace.d[0]); \
+ __asm__ __volatile__ (#op " %%" #reg ", %0" \
+ : "=X" (mem) \
+ : /* nothing */ ); \
+ mmx_trace = (mem); \
+ fprintf(stderr, #mem "=0x%08x%08x\n", \
+ mmx_trace.d[1], mmx_trace.d[0]); \
+ }
+
+#define mmx_r2r(op, regs, regd) \
+ { \
+ mmx_t mmx_trace; \
+ __asm__ __volatile__ ("movq %%" #regs ", %0" \
+ : "=X" (mmx_trace) \
+ : /* nothing */ ); \
+ fprintf(stderr, #op "_r2r(" #regs "=0x%08x%08x, ", \
+ mmx_trace.d[1], mmx_trace.d[0]); \
+ __asm__ __volatile__ ("movq %%" #regd ", %0" \
+ : "=X" (mmx_trace) \
+ : /* nothing */ ); \
+ fprintf(stderr, #regd "=0x%08x%08x) => ", \
+ mmx_trace.d[1], mmx_trace.d[0]); \
+ __asm__ __volatile__ (#op " %" #regs ", %" #regd); \
+ __asm__ __volatile__ ("movq %%" #regd ", %0" \
+ : "=X" (mmx_trace) \
+ : /* nothing */ ); \
+ fprintf(stderr, #regd "=0x%08x%08x\n", \
+ mmx_trace.d[1], mmx_trace.d[0]); \
+ }
+
+#define mmx_m2m(op, mems, memd) \
+ { \
+ mmx_t mmx_trace; \
+ mmx_trace = (mems); \
+ fprintf(stderr, #op "_m2m(" #mems "=0x%08x%08x, ", \
+ mmx_trace.d[1], mmx_trace.d[0]); \
+ mmx_trace = (memd); \
+ fprintf(stderr, #memd "=0x%08x%08x) => ", \
+ mmx_trace.d[1], mmx_trace.d[0]); \
+ __asm__ __volatile__ ("movq %0, %%mm0\n\t" \
+ #op " %1, %%mm0\n\t" \
+ "movq %%mm0, %0" \
+ : "=X" (memd) \
+ : "X" (mems)); \
+ mmx_trace = (memd); \
+ fprintf(stderr, #memd "=0x%08x%08x\n", \
+ mmx_trace.d[1], mmx_trace.d[0]); \
+ }
+
+#else
+
+/* These macros are a lot simpler without the tracing...
+*/
+
+#define mmx_i2r(op, imm, reg) \
+ __asm__ __volatile__ (#op " %0, %%" #reg \
+ : /* nothing */ \
+ : "X" (imm) )
+
+#define mmx_m2r(op, mem, reg) \
+ __asm__ __volatile__ (#op " %0, %%" #reg \
+ : /* nothing */ \
+ : "X" (mem))
+
+#define mmx_r2m(op, reg, mem) \
+ __asm__ __volatile__ (#op " %%" #reg ", %0" \
+ : "=X" (mem) \
+ : /* nothing */ )
+
+#define mmx_r2r(op, regs, regd) \
+ __asm__ __volatile__ (#op " %" #regs ", %" #regd)
+
+#define mmx_m2m(op, mems, memd) \
+ __asm__ __volatile__ ("movq %0, %%mm0\n\t" \
+ #op " %1, %%mm0\n\t" \
+ "movq %%mm0, %0" \
+ : "=X" (memd) \
+ : "X" (mems))
+
+#endif
+
+
+/* 1x64 MOVe Quadword
+ (this is both a load and a store...
+ in fact, it is the only way to store)
+*/
+#define movq_m2r(var, reg) mmx_m2r(movq, var, reg)
+#define movq_r2m(reg, var) mmx_r2m(movq, reg, var)
+#define movq_r2r(regs, regd) mmx_r2r(movq, regs, regd)
+#define movq(vars, vard) \
+ __asm__ __volatile__ ("movq %1, %%mm0\n\t" \
+ "movq %%mm0, %0" \
+ : "=X" (vard) \
+ : "X" (vars))
+
+
+/* 1x32 MOVe Doubleword
+ (like movq, this is both load and store...
+ but is most useful for moving things between
+ mmx registers and ordinary registers)
+*/
+#define movd_m2r(var, reg) mmx_m2r(movd, var, reg)
+#define movd_r2m(reg, var) mmx_r2m(movd, reg, var)
+#define movd_r2r(regs, regd) mmx_r2r(movd, regs, regd)
+#define movd(vars, vard) \
+ __asm__ __volatile__ ("movd %1, %%mm0\n\t" \
+ "movd %%mm0, %0" \
+ : "=X" (vard) \
+ : "X" (vars))
+
+
+/* 2x32, 4x16, and 8x8 Parallel ADDs
+*/
+#define paddd_m2r(var, reg) mmx_m2r(paddd, var, reg)
+#define paddd_r2r(regs, regd) mmx_r2r(paddd, regs, regd)
+#define paddd(vars, vard) mmx_m2m(paddd, vars, vard)
+
+#define paddw_m2r(var, reg) mmx_m2r(paddw, var, reg)
+#define paddw_r2r(regs, regd) mmx_r2r(paddw, regs, regd)
+#define paddw(vars, vard) mmx_m2m(paddw, vars, vard)
+
+#define paddb_m2r(var, reg) mmx_m2r(paddb, var, reg)
+#define paddb_r2r(regs, regd) mmx_r2r(paddb, regs, regd)
+#define paddb(vars, vard) mmx_m2m(paddb, vars, vard)
+
+
+/* 4x16 and 8x8 Parallel ADDs using Saturation arithmetic
+*/
+#define paddsw_m2r(var, reg) mmx_m2r(paddsw, var, reg)
+#define paddsw_r2r(regs, regd) mmx_r2r(paddsw, regs, regd)
+#define paddsw(vars, vard) mmx_m2m(paddsw, vars, vard)
+
+#define paddsb_m2r(var, reg) mmx_m2r(paddsb, var, reg)
+#define paddsb_r2r(regs, regd) mmx_r2r(paddsb, regs, regd)
+#define paddsb(vars, vard) mmx_m2m(paddsb, vars, vard)
+
+
+/* 4x16 and 8x8 Parallel ADDs using Unsigned Saturation arithmetic
+*/
+#define paddusw_m2r(var, reg) mmx_m2r(paddusw, var, reg)
+#define paddusw_r2r(regs, regd) mmx_r2r(paddusw, regs, regd)
+#define paddusw(vars, vard) mmx_m2m(paddusw, vars, vard)
+
+#define paddusb_m2r(var, reg) mmx_m2r(paddusb, var, reg)
+#define paddusb_r2r(regs, regd) mmx_r2r(paddusb, regs, regd)
+#define paddusb(vars, vard) mmx_m2m(paddusb, vars, vard)
+
+
+/* 2x32, 4x16, and 8x8 Parallel SUBs
+*/
+#define psubd_m2r(var, reg) mmx_m2r(psubd, var, reg)
+#define psubd_r2r(regs, regd) mmx_r2r(psubd, regs, regd)
+#define psubd(vars, vard) mmx_m2m(psubd, vars, vard)
+
+#define psubw_m2r(var, reg) mmx_m2r(psubw, var, reg)
+#define psubw_r2r(regs, regd) mmx_r2r(psubw, regs, regd)
+#define psubw(vars, vard) mmx_m2m(psubw, vars, vard)
+
+#define psubb_m2r(var, reg) mmx_m2r(psubb, var, reg)
+#define psubb_r2r(regs, regd) mmx_r2r(psubb, regs, regd)
+#define psubb(vars, vard) mmx_m2m(psubb, vars, vard)
+
+
+/* 4x16 and 8x8 Parallel SUBs using Saturation arithmetic
+*/
+#define psubsw_m2r(var, reg) mmx_m2r(psubsw, var, reg)
+#define psubsw_r2r(regs, regd) mmx_r2r(psubsw, regs, regd)
+#define psubsw(vars, vard) mmx_m2m(psubsw, vars, vard)
+
+#define psubsb_m2r(var, reg) mmx_m2r(psubsb, var, reg)
+#define psubsb_r2r(regs, regd) mmx_r2r(psubsb, regs, regd)
+#define psubsb(vars, vard) mmx_m2m(psubsb, vars, vard)
+
+
+/* 4x16 and 8x8 Parallel SUBs using Unsigned Saturation arithmetic
+*/
+#define psubusw_m2r(var, reg) mmx_m2r(psubusw, var, reg)
+#define psubusw_r2r(regs, regd) mmx_r2r(psubusw, regs, regd)
+#define psubusw(vars, vard) mmx_m2m(psubusw, vars, vard)
+
+#define psubusb_m2r(var, reg) mmx_m2r(psubusb, var, reg)
+#define psubusb_r2r(regs, regd) mmx_r2r(psubusb, regs, regd)
+#define psubusb(vars, vard) mmx_m2m(psubusb, vars, vard)
+
+
+/* 4x16 Parallel MULs giving Low 4x16 portions of results
+*/
+#define pmullw_m2r(var, reg) mmx_m2r(pmullw, var, reg)
+#define pmullw_r2r(regs, regd) mmx_r2r(pmullw, regs, regd)
+#define pmullw(vars, vard) mmx_m2m(pmullw, vars, vard)
+
+
+/* 4x16 Parallel MULs giving High 4x16 portions of results
+*/
+#define pmulhw_m2r(var, reg) mmx_m2r(pmulhw, var, reg)
+#define pmulhw_r2r(regs, regd) mmx_r2r(pmulhw, regs, regd)
+#define pmulhw(vars, vard) mmx_m2m(pmulhw, vars, vard)
+
+
+/* 4x16->2x32 Parallel Mul-ADD
+ (muls like pmullw, then adds adjacent 16-bit fields
+ in the multiply result to make the final 2x32 result)
+*/
+#define pmaddwd_m2r(var, reg) mmx_m2r(pmaddwd, var, reg)
+#define pmaddwd_r2r(regs, regd) mmx_r2r(pmaddwd, regs, regd)
+#define pmaddwd(vars, vard) mmx_m2m(pmaddwd, vars, vard)
+
+
+/* 1x64 bitwise AND
+*/
+#ifdef BROKEN_PAND
+#define pand_m2r(var, reg) \
+ { \
+ mmx_m2r(pandn, (mmx_t) -1LL, reg); \
+ mmx_m2r(pandn, var, reg); \
+ }
+#define pand_r2r(regs, regd) \
+ { \
+ mmx_m2r(pandn, (mmx_t) -1LL, regd); \
+ mmx_r2r(pandn, regs, regd) \
+ }
+#define pand(vars, vard) \
+ { \
+ movq_m2r(vard, mm0); \
+ mmx_m2r(pandn, (mmx_t) -1LL, mm0); \
+ mmx_m2r(pandn, vars, mm0); \
+ movq_r2m(mm0, vard); \
+ }
+#else
+#define pand_m2r(var, reg) mmx_m2r(pand, var, reg)
+#define pand_r2r(regs, regd) mmx_r2r(pand, regs, regd)
+#define pand(vars, vard) mmx_m2m(pand, vars, vard)
+#endif
+
+
+/* 1x64 bitwise AND with Not the destination
+*/
+#define pandn_m2r(var, reg) mmx_m2r(pandn, var, reg)
+#define pandn_r2r(regs, regd) mmx_r2r(pandn, regs, regd)
+#define pandn(vars, vard) mmx_m2m(pandn, vars, vard)
+
+
+/* 1x64 bitwise OR
+*/
+#define por_m2r(var, reg) mmx_m2r(por, var, reg)
+#define por_r2r(regs, regd) mmx_r2r(por, regs, regd)
+#define por(vars, vard) mmx_m2m(por, vars, vard)
+
+
+/* 1x64 bitwise eXclusive OR
+*/
+#define pxor_m2r(var, reg) mmx_m2r(pxor, var, reg)
+#define pxor_r2r(regs, regd) mmx_r2r(pxor, regs, regd)
+#define pxor(vars, vard) mmx_m2m(pxor, vars, vard)
+
+
+/* 2x32, 4x16, and 8x8 Parallel CoMPare for EQuality
+ (resulting fields are either 0 or -1)
+*/
+#define pcmpeqd_m2r(var, reg) mmx_m2r(pcmpeqd, var, reg)
+#define pcmpeqd_r2r(regs, regd) mmx_r2r(pcmpeqd, regs, regd)
+#define pcmpeqd(vars, vard) mmx_m2m(pcmpeqd, vars, vard)
+
+#define pcmpeqw_m2r(var, reg) mmx_m2r(pcmpeqw, var, reg)
+#define pcmpeqw_r2r(regs, regd) mmx_r2r(pcmpeqw, regs, regd)
+#define pcmpeqw(vars, vard) mmx_m2m(pcmpeqw, vars, vard)
+
+#define pcmpeqb_m2r(var, reg) mmx_m2r(pcmpeqb, var, reg)
+#define pcmpeqb_r2r(regs, regd) mmx_r2r(pcmpeqb, regs, regd)
+#define pcmpeqb(vars, vard) mmx_m2m(pcmpeqb, vars, vard)
+
+
+/* 2x32, 4x16, and 8x8 Parallel CoMPare for Greater Than
+ (resulting fields are either 0 or -1)
+*/
+#define pcmpgtd_m2r(var, reg) mmx_m2r(pcmpgtd, var, reg)
+#define pcmpgtd_r2r(regs, regd) mmx_r2r(pcmpgtd, regs, regd)
+#define pcmpgtd(vars, vard) mmx_m2m(pcmpgtd, vars, vard)
+
+#define pcmpgtw_m2r(var, reg) mmx_m2r(pcmpgtw, var, reg)
+#define pcmpgtw_r2r(regs, regd) mmx_r2r(pcmpgtw, regs, regd)
+#define pcmpgtw(vars, vard) mmx_m2m(pcmpgtw, vars, vard)
+
+#define pcmpgtb_m2r(var, reg) mmx_m2r(pcmpgtb, var, reg)
+#define pcmpgtb_r2r(regs, regd) mmx_r2r(pcmpgtb, regs, regd)
+#define pcmpgtb(vars, vard) mmx_m2m(pcmpgtb, vars, vard)
+
+
+/* 1x64, 2x32, and 4x16 Parallel Shift Left Logical
+*/
+#define psllq_i2r(imm, reg) mmx_i2r(psllq, imm, reg)
+#define psllq_m2r(var, reg) mmx_m2r(psllq, var, reg)
+#define psllq_r2r(regs, regd) mmx_r2r(psllq, regs, regd)
+#define psllq(vars, vard) mmx_m2m(psllq, vars, vard)
+
+#define pslld_i2r(imm, reg) mmx_i2r(pslld, imm, reg)
+#define pslld_m2r(var, reg) mmx_m2r(pslld, var, reg)
+#define pslld_r2r(regs, regd) mmx_r2r(pslld, regs, regd)
+#define pslld(vars, vard) mmx_m2m(pslld, vars, vard)
+
+#define psllw_i2r(imm, reg) mmx_i2r(psllw, imm, reg)
+#define psllw_m2r(var, reg) mmx_m2r(psllw, var, reg)
+#define psllw_r2r(regs, regd) mmx_r2r(psllw, regs, regd)
+#define psllw(vars, vard) mmx_m2m(psllw, vars, vard)
+
+
+/* 1x64, 2x32, and 4x16 Parallel Shift Right Logical
+*/
+#define psrlq_i2r(imm, reg) mmx_i2r(psrlq, imm, reg)
+#define psrlq_m2r(var, reg) mmx_m2r(psrlq, var, reg)
+#define psrlq_r2r(regs, regd) mmx_r2r(psrlq, regs, regd)
+#define psrlq(vars, vard) mmx_m2m(psrlq, vars, vard)
+
+#define psrld_i2r(imm, reg) mmx_i2r(psrld, imm, reg)
+#define psrld_m2r(var, reg) mmx_m2r(psrld, var, reg)
+#define psrld_r2r(regs, regd) mmx_r2r(psrld, regs, regd)
+#define psrld(vars, vard) mmx_m2m(psrld, vars, vard)
+
+#define psrlw_i2r(imm, reg) mmx_i2r(psrlw, imm, reg)
+#define psrlw_m2r(var, reg) mmx_m2r(psrlw, var, reg)
+#define psrlw_r2r(regs, regd) mmx_r2r(psrlw, regs, regd)
+#define psrlw(vars, vard) mmx_m2m(psrlw, vars, vard)
+
+
+/* 2x32 and 4x16 Parallel Shift Right Arithmetic
+*/
+#define psrad_i2r(imm, reg) mmx_i2r(psrad, imm, reg)
+#define psrad_m2r(var, reg) mmx_m2r(psrad, var, reg)
+#define psrad_r2r(regs, regd) mmx_r2r(psrad, regs, regd)
+#define psrad(vars, vard) mmx_m2m(psrad, vars, vard)
+
+#define psraw_i2r(imm, reg) mmx_i2r(psraw, imm, reg)
+#define psraw_m2r(var, reg) mmx_m2r(psraw, var, reg)
+#define psraw_r2r(regs, regd) mmx_r2r(psraw, regs, regd)
+#define psraw(vars, vard) mmx_m2m(psraw, vars, vard)
+
+
+/* 2x32->4x16 and 4x16->8x8 PACK and Signed Saturate
+ (packs source and dest fields into dest in that order)
+*/
+#define packssdw_m2r(var, reg) mmx_m2r(packssdw, var, reg)
+#define packssdw_r2r(regs, regd) mmx_r2r(packssdw, regs, regd)
+#define packssdw(vars, vard) mmx_m2m(packssdw, vars, vard)
+
+#define packsswb_m2r(var, reg) mmx_m2r(packsswb, var, reg)
+#define packsswb_r2r(regs, regd) mmx_r2r(packsswb, regs, regd)
+#define packsswb(vars, vard) mmx_m2m(packsswb, vars, vard)
+
+
+/* 4x16->8x8 PACK and Unsigned Saturate
+ (packs source and dest fields into dest in that order)
+*/
+#define packuswb_m2r(var, reg) mmx_m2r(packuswb, var, reg)
+#define packuswb_r2r(regs, regd) mmx_r2r(packuswb, regs, regd)
+#define packuswb(vars, vard) mmx_m2m(packuswb, vars, vard)
+
+
+/* 2x32->1x64, 4x16->2x32, and 8x8->4x16 UNPaCK Low
+ (interleaves low half of dest with low half of source
+ as padding in each result field)
+*/
+#define punpckldq_m2r(var, reg) mmx_m2r(punpckldq, var, reg)
+#define punpckldq_r2r(regs, regd) mmx_r2r(punpckldq, regs, regd)
+#define punpckldq(vars, vard) mmx_m2m(punpckldq, vars, vard)
+
+#define punpcklwd_m2r(var, reg) mmx_m2r(punpcklwd, var, reg)
+#define punpcklwd_r2r(regs, regd) mmx_r2r(punpcklwd, regs, regd)
+#define punpcklwd(vars, vard) mmx_m2m(punpcklwd, vars, vard)
+
+#define punpcklbw_m2r(var, reg) mmx_m2r(punpcklbw, var, reg)
+#define punpcklbw_r2r(regs, regd) mmx_r2r(punpcklbw, regs, regd)
+#define punpcklbw(vars, vard) mmx_m2m(punpcklbw, vars, vard)
+
+
+/* 2x32->1x64, 4x16->2x32, and 8x8->4x16 UNPaCK High
+ (interleaves high half of dest with high half of source
+ as padding in each result field)
+*/
+#define punpckhdq_m2r(var, reg) mmx_m2r(punpckhdq, var, reg)
+#define punpckhdq_r2r(regs, regd) mmx_r2r(punpckhdq, regs, regd)
+#define punpckhdq(vars, vard) mmx_m2m(punpckhdq, vars, vard)
+
+#define punpckhwd_m2r(var, reg) mmx_m2r(punpckhwd, var, reg)
+#define punpckhwd_r2r(regs, regd) mmx_r2r(punpckhwd, regs, regd)
+#define punpckhwd(vars, vard) mmx_m2m(punpckhwd, vars, vard)
+
+#define punpckhbw_m2r(var, reg) mmx_m2r(punpckhbw, var, reg)
+#define punpckhbw_r2r(regs, regd) mmx_r2r(punpckhbw, regs, regd)
+#define punpckhbw(vars, vard) mmx_m2m(punpckhbw, vars, vard)
+
+
+/* Empty MMx State
+ (used to clean-up when going from mmx to float use
+ of the registers that are shared by both; note that
+ there is no float-to-mmx operation needed, because
+ only the float tag word info is corruptible)
+*/
+#ifdef MMX_TRACE
+
+#define emms() \
+ { \
+ fprintf(stderr, "emms()\n"); \
+ __asm__ __volatile__ ("emms"); \
+ }
+
+#else
+
+#define emms() __asm__ __volatile__ ("emms")
+
+#endif
+
+#endif
+
--- /dev/null
+/*****************************************************************************
+ * vdec_motion_inner_mmx.c : motion compensation inner routines optimized in
+ * MMX
+ *****************************************************************************
+ * Copyright (C) 1999, 2000 VideoLAN
+ *
+ * Authors:
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA.
+ *****************************************************************************/
+
+/*****************************************************************************
+ * Preamble
+ *****************************************************************************/
+#include "defs.h"
+
+#include <sys/types.h> /* on BSD, uio.h needs types.h */
+#include <sys/uio.h> /* for input.h */
+
+#include "config.h"
+#include "common.h"
+#include "threads.h"
+#include "mtime.h"
+#include "plugins.h"
+
+#include "intf_msg.h"
+
+#include "input.h"
+#include "decoder_fifo.h"
+#include "video.h"
+#include "video_output.h"
+
+#include "vdec_idct.h"
+#include "video_decoder.h"
+#include "vdec_motion.h"
+
+#include "vpar_blocks.h"
+#include "vpar_headers.h"
+#include "vpar_synchro.h"
+#include "video_parser.h"
+#include "video_fifo.h"
+
+#include "mmx.h"
+
+/* OK, I know, this code has been taken from livid's mpeg2dec --Meuuh */
+
+/* Some rounding constants */
+mmx_t round1 = {0x0001000100010001LL};
+mmx_t round4 = {0x0002000200020002LL};
+
+/*
+ * Useful functions
+ */
+
+static __inline__ void MMXZeroReg()
+{
+ /* load 0 into mm0 */
+ pxor_r2r(mm0,mm0);
+}
+
+static __inline__ void MMXAverage2( u8 *dst, u8 *src1, u8 *src2 )
+{
+ //
+ // *dst = clip_to_u8((*src1 + *src2 + 1)/2);
+ //
+
+ //mmx_zero_reg();
+
+ movq_m2r(*src1,mm1); // load 8 src1 bytes
+ movq_r2r(mm1,mm2); // copy 8 src1 bytes
+
+ movq_m2r(*src2,mm3); // load 8 src2 bytes
+ movq_r2r(mm3,mm4); // copy 8 src2 bytes
+
+ punpcklbw_r2r(mm0,mm1); // unpack low src1 bytes
+ punpckhbw_r2r(mm0,mm2); // unpack high src1 bytes
+
+ punpcklbw_r2r(mm0,mm3); // unpack low src2 bytes
+ punpckhbw_r2r(mm0,mm4); // unpack high src2 bytes
+
+ paddw_r2r(mm3,mm1); // add lows to mm1
+ paddw_m2r(round1,mm1);
+ psraw_i2r(1,mm1); // /2
+
+ paddw_r2r(mm4,mm2); // add highs to mm2
+ paddw_m2r(round1,mm2);
+ psraw_i2r(1,mm2); // /2
+
+ packuswb_r2r(mm2,mm1); // pack (w/ saturation)
+ movq_r2m(mm1,*dst); // store result in dst
+}
+
+static __inline__ void MMXInterpAverage2( u8 *dst, u8 *src1, u8 *src2 )
+{
+ //
+ // *dst = clip_to_u8((*dst + (*src1 + *src2 + 1)/2 + 1)/2);
+ //
+
+ //mmx_zero_reg();
+
+ movq_m2r(*dst,mm1); // load 8 dst bytes
+ movq_r2r(mm1,mm2); // copy 8 dst bytes
+
+ movq_m2r(*src1,mm3); // load 8 src1 bytes
+ movq_r2r(mm3,mm4); // copy 8 src1 bytes
+
+ movq_m2r(*src2,mm5); // load 8 src2 bytes
+ movq_r2r(mm5,mm6); // copy 8 src2 bytes
+
+ punpcklbw_r2r(mm0,mm1); // unpack low dst bytes
+ punpckhbw_r2r(mm0,mm2); // unpack high dst bytes
+
+ punpcklbw_r2r(mm0,mm3); // unpack low src1 bytes
+ punpckhbw_r2r(mm0,mm4); // unpack high src1 bytes
+
+ punpcklbw_r2r(mm0,mm5); // unpack low src2 bytes
+ punpckhbw_r2r(mm0,mm6); // unpack high src2 bytes
+
+ paddw_r2r(mm5,mm3); // add lows
+ paddw_m2r(round1,mm3);
+ psraw_i2r(1,mm3); // /2
+
+ paddw_r2r(mm6,mm4); // add highs
+ paddw_m2r(round1,mm4);
+ psraw_i2r(1,mm4); // /2
+
+ paddw_r2r(mm3,mm1); // add lows
+ paddw_m2r(round1,mm1);
+ psraw_i2r(1,mm1); // /2
+
+ paddw_r2r(mm4,mm2); // add highs
+ paddw_m2r(round1,mm2);
+ psraw_i2r(1,mm2); // /2
+
+ packuswb_r2r(mm2,mm1); // pack (w/ saturation)
+ movq_r2m(mm1,*dst); // store result in dst
+}
+
+static __inline__ void MMXAverage4( u8 *dst, u8 *src1, u8 *src2, u8 *src3,
+ u8 *src4 )
+{
+ //
+ // *dst = clip_to_u8((*src1 + *src2 + *src3 + *src4 + 2)/4);
+ //
+
+ //mmx_zero_reg();
+
+ movq_m2r(*src1,mm1); // load 8 src1 bytes
+ movq_r2r(mm1,mm2); // copy 8 src1 bytes
+
+ punpcklbw_r2r(mm0,mm1); // unpack low src1 bytes
+ punpckhbw_r2r(mm0,mm2); // unpack high src1 bytes
+
+ movq_m2r(*src2,mm3); // load 8 src2 bytes
+ movq_r2r(mm3,mm4); // copy 8 src2 bytes
+
+ punpcklbw_r2r(mm0,mm3); // unpack low src2 bytes
+ punpckhbw_r2r(mm0,mm4); // unpack high src2 bytes
+
+ paddw_r2r(mm3,mm1); // add lows
+ paddw_r2r(mm4,mm2); // add highs
+
+ // now have partials in mm1 and mm2
+
+ movq_m2r(*src3,mm3); // load 8 src3 bytes
+ movq_r2r(mm3,mm4); // copy 8 src3 bytes
+
+ punpcklbw_r2r(mm0,mm3); // unpack low src3 bytes
+ punpckhbw_r2r(mm0,mm4); // unpack high src3 bytes
+
+ paddw_r2r(mm3,mm1); // add lows
+ paddw_r2r(mm4,mm2); // add highs
+
+ movq_m2r(*src4,mm5); // load 8 src4 bytes
+ movq_r2r(mm5,mm6); // copy 8 src4 bytes
+
+ punpcklbw_r2r(mm0,mm5); // unpack low src4 bytes
+ punpckhbw_r2r(mm0,mm6); // unpack high src4 bytes
+
+ paddw_r2r(mm5,mm1); // add lows
+ paddw_r2r(mm6,mm2); // add highs
+
+ // now have subtotal in mm1 and mm2
+
+ paddw_m2r(round4,mm1);
+ psraw_i2r(2,mm1); // /4
+ paddw_m2r(round4,mm2);
+ psraw_i2r(2,mm2); // /4
+
+ packuswb_r2r(mm2,mm1); // pack (w/ saturation)
+ movq_r2m(mm1,*dst); // store result in dst
+}
+
+static __inline__ void MMXInterpAverage4( u8 *dst, u8 *src1, u8 *src2,
+ u8 *src3, u8 *src4 )
+{
+ //
+ // *dst = clip_to_u8((*dst + (*src1 + *src2 + *src3 + *src4 + 2)/4 + 1)/2);
+ //
+
+ //mmx_zero_reg();
+
+ movq_m2r(*src1,mm1); // load 8 src1 bytes
+ movq_r2r(mm1,mm2); // copy 8 src1 bytes
+
+ punpcklbw_r2r(mm0,mm1); // unpack low src1 bytes
+ punpckhbw_r2r(mm0,mm2); // unpack high src1 bytes
+
+ movq_m2r(*src2,mm3); // load 8 src2 bytes
+ movq_r2r(mm3,mm4); // copy 8 src2 bytes
+
+ punpcklbw_r2r(mm0,mm3); // unpack low src2 bytes
+ punpckhbw_r2r(mm0,mm4); // unpack high src2 bytes
+
+ paddw_r2r(mm3,mm1); // add lows
+ paddw_r2r(mm4,mm2); // add highs
+
+ // now have partials in mm1 and mm2
+
+ movq_m2r(*src3,mm3); // load 8 src3 bytes
+ movq_r2r(mm3,mm4); // copy 8 src3 bytes
+
+ punpcklbw_r2r(mm0,mm3); // unpack low src3 bytes
+ punpckhbw_r2r(mm0,mm4); // unpack high src3 bytes
+
+ paddw_r2r(mm3,mm1); // add lows
+ paddw_r2r(mm4,mm2); // add highs
+
+ movq_m2r(*src4,mm5); // load 8 src4 bytes
+ movq_r2r(mm5,mm6); // copy 8 src4 bytes
+
+ punpcklbw_r2r(mm0,mm5); // unpack low src4 bytes
+ punpckhbw_r2r(mm0,mm6); // unpack high src4 bytes
+
+ paddw_r2r(mm5,mm1); // add lows
+ paddw_r2r(mm6,mm2); // add highs
+
+ paddw_m2r(round4,mm1);
+ psraw_i2r(2,mm1); // /4
+ paddw_m2r(round4,mm2);
+ psraw_i2r(2,mm2); // /4
+
+ // now have subtotal/4 in mm1 and mm2
+
+ movq_m2r(*dst,mm3); // load 8 dst bytes
+ movq_r2r(mm3,mm4); // copy 8 dst bytes
+
+ punpcklbw_r2r(mm0,mm3); // unpack low dst bytes
+ punpckhbw_r2r(mm0,mm4); // unpack high dst bytes
+
+ paddw_r2r(mm3,mm1); // add lows
+ paddw_r2r(mm4,mm2); // add highs
+
+ paddw_m2r(round1,mm1);
+ psraw_i2r(1,mm1); // /2
+ paddw_m2r(round1,mm2);
+ psraw_i2r(1,mm2); // /2
+
+ // now have end value in mm1 and mm2
+
+ packuswb_r2r(mm2,mm1); // pack (w/ saturation)
+ movq_r2m(mm1,*dst); // store result in dst
+}
+
+
+/*
+ * Actual Motion compensation
+ */
+
+#define __MotionComponent_x_y_copy(width,height) \
+void MotionComponent_x_y_copy_##width##_##height(yuv_data_t * p_src, \
+ yuv_data_t * p_dest, \
+ int i_stride) \
+{ \
+ int i_y; \
+ \
+ MMXZeroReg(); \
+ \
+ for( i_y = 0; i_y < height; i_y ++ ) \
+ { \
+ movq_m2r( *p_src, mm1 ); /* load 8 ref bytes */ \
+ movq_r2m( mm1, *p_dest ); /* store 8 bytes at curr */ \
+ \
+ if( width == 16 ) \
+ { \
+ movq_m2r( *(p_src + 8), mm1 ); /* load 8 ref bytes */ \
+ movq_r2m( mm1, *(p_dest + 8) ); /* store 8 bytes at curr */ \
+ } \
+ \
+ p_dest += i_stride; \
+ p_src += i_stride; \
+ } \
+}
+
+#define __MotionComponent_X_y_copy(width,height) \
+void MotionComponent_X_y_copy_##width##_##height(yuv_data_t * p_src, \
+ yuv_data_t * p_dest, \
+ int i_stride) \
+{ \
+ int i_y; \
+ \
+ MMXZeroReg(); \
+ \
+ for( i_y = 0; i_y < height; i_y ++ ) \
+ { \
+ MMXAverage2( p_dest, p_src, p_src + 1 ); \
+ \
+ if( width == 16 ) \
+ { \
+ MMXAverage2( p_dest + 8, p_src + 8, p_src + 9 ); \
+ } \
+ \
+ p_dest += i_stride; \
+ p_src += i_stride; \
+ } \
+}
+
+#define __MotionComponent_x_Y_copy(width,height) \
+void MotionComponent_x_Y_copy_##width##_##height(yuv_data_t * p_src, \
+ yuv_data_t * p_dest, \
+ int i_stride, \
+ int i_step) \
+{ \
+ int i_y; \
+ yuv_data_t * p_next_src = p_src + i_step; \
+ \
+ MMXZeroReg(); \
+ \
+ for( i_y = 0; i_y < height; i_y ++ ) \
+ { \
+ MMXAverage2( p_dest, p_src, p_next_src ); \
+ \
+ if( width == 16 ) \
+ { \
+ MMXAverage2( p_dest + 8, p_src + 8, p_next_src + 8 ); \
+ } \
+ \
+ p_dest += i_stride; \
+ p_src += i_stride; \
+ p_next_src += i_stride; \
+ } \
+}
+
+#define __MotionComponent_X_Y_copy(width,height) \
+void MotionComponent_X_Y_copy_##width##_##height(yuv_data_t * p_src, \
+ yuv_data_t * p_dest, \
+ int i_stride, \
+ int i_step) \
+{ \
+ int i_y; \
+ yuv_data_t * p_next_src = p_src + i_step; \
+ \
+ MMXZeroReg(); \
+ \
+ for( i_y = 0; i_y < height; i_y ++ ) \
+ { \
+ MMXAverage4( p_dest, p_src, p_src + 1, p_next_src, p_next_src + 1 );\
+ \
+ if( width == 16 ) \
+ { \
+ MMXAverage4( p_dest + 8, p_src + 8, p_src + 9, \
+ p_next_src + 8, p_next_src + 9 ); \
+ } \
+ \
+ p_dest += i_stride; \
+ p_src += i_stride; \
+ p_next_src += i_stride; \
+ } \
+}
+
+#define __MotionComponent_x_y_avg(width,height) \
+void MotionComponent_x_y_avg_##width##_##height(yuv_data_t * p_src, \
+ yuv_data_t * p_dest, \
+ int i_stride) \
+{ \
+ int i_y; \
+ \
+ MMXZeroReg(); \
+ \
+ for( i_y = 0; i_y < height; i_y ++ ) \
+ { \
+ MMXAverage2( p_dest, p_dest, p_src ); \
+ \
+ if( width == 16 ) \
+ { \
+ MMXAverage2( p_dest + 8, p_dest + 8, p_src + 8 ); \
+ } \
+ \
+ p_dest += i_stride; \
+ p_src += i_stride; \
+ } \
+}
+
+#define __MotionComponent_X_y_avg(width,height) \
+void MotionComponent_X_y_avg_##width##_##height(yuv_data_t * p_src, \
+ yuv_data_t * p_dest, \
+ int i_stride) \
+{ \
+ int i_y; \
+ \
+ MMXZeroReg(); \
+ \
+ for( i_y = 0; i_y < height; i_y ++ ) \
+ { \
+ MMXInterpAverage2( p_dest, p_src, p_src + 1 ); \
+ \
+ if( width == 16 ) \
+ { \
+ MMXInterpAverage2( p_dest + 8, p_dest + 8, p_src + 9 ); \
+ } \
+ \
+ p_dest += i_stride; \
+ p_src += i_stride; \
+ } \
+}
+
+#define __MotionComponent_x_Y_avg(width,height) \
+void MotionComponent_x_Y_avg_##width##_##height(yuv_data_t * p_src, \
+ yuv_data_t * p_dest, \
+ int i_stride, \
+ int i_step) \
+{ \
+ int i_x, i_y; \
+ unsigned int i_dummy; \
+ \
+ for( i_y = 0; i_y < height; i_y ++ ) \
+ { \
+ for( i_x = 0; i_x < width; i_x++ ) \
+ { \
+ i_dummy = \
+ p_dest[i_x] + ((unsigned int)(p_src[i_x] \
+ + p_src[i_x + i_step] \
+ + 1) >> 1); \
+ p_dest[i_x] = (i_dummy + 1) >> 1; \
+ } \
+ p_dest += i_stride; \
+ p_src += i_stride; \
+ } \
+}
+
+#define __MotionComponent_X_Y_avg(width,height) \
+void MotionComponent_X_Y_avg_##width##_##height(yuv_data_t * p_src, \
+ yuv_data_t * p_dest, \
+ int i_stride, \
+ int i_step) \
+{ \
+ int i_y; \
+ yuv_data_t * p_next_src = p_src + i_step; \
+ \
+ MMXZeroReg(); \
+ \
+ for( i_y = 0; i_y < height; i_y ++ ) \
+ { \
+ MMXInterpAverage4( p_dest, p_src, p_src + 1, p_next_src, \
+ p_next_src + 1 ); \
+ \
+ if( width == 16 ) \
+ { \
+ MMXInterpAverage4( p_dest + 8, p_src + 8, p_src + 9, \
+ p_next_src + 8, p_next_src + 9 ); \
+ } \
+ \
+ p_dest += i_stride; \
+ p_src += i_stride; \
+ p_next_src += i_stride; \
+ } \
+}
+
+#define __MotionComponents(width,height) \
+__MotionComponent_x_y_copy(width,height) \
+__MotionComponent_X_y_copy(width,height) \
+__MotionComponent_x_Y_copy(width,height) \
+__MotionComponent_X_Y_copy(width,height) \
+__MotionComponent_x_y_avg(width,height) \
+__MotionComponent_X_y_avg(width,height) \
+__MotionComponent_x_Y_avg(width,height) \
+__MotionComponent_X_Y_avg(width,height)
+
+__MotionComponents (16,16) /* 444, 422, 420 */
+__MotionComponents (16,8) /* 444, 422, 420 */
+__MotionComponents (8,8) /* 422, 420 */
+__MotionComponents (8,4) /* 420 */
+#if 0
+__MotionComponents (8,16) /* 422 */
+#endif