/*
- * rgb2rgb.c, Software RGB to RGB convertor
- * pluralize by Software PAL8 to RGB convertor
- * Software YUV to YUV convertor
- * Software YUV to RGB convertor
- * Written by Nick Kurshev.
- * palette & YUV & runtime CPU stuff by Michael (michaelni@gmx.at)
- * lot of big-endian byteorder fixes by Alex Beregszaszi
+ * software RGB to RGB converter
+ * pluralize by software PAL8 to RGB converter
+ * software YUV to YUV converter
+ * software YUV to RGB converter
+ * Written by Nick Kurshev.
+ * palette & YUV & runtime CPU stuff by Michael (michaelni@gmx.at)
+ * lot of big-endian byte order fixes by Alex Beregszaszi
*
* This file is part of FFmpeg.
*
*
* You should have received a copy of the GNU General Public License
* along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
- * the C code (not assembly, mmx, ...) of this file can be used
- * under the LGPL license too
+ * The C code (not assembly, MMX, ...) of this file can be used
+ * under the LGPL license.
*/
#include <stddef.h>
-#include <inttypes.h> /* for __WORDSIZE */
-
-#ifndef __WORDSIZE
-// #warning You have misconfigured system and probably will lose performance!
-#define __WORDSIZE MP_WORDSIZE
-#endif
#undef PREFETCH
#undef MOVNTQ
#undef PREFETCHW
#undef PAVGB
-#ifdef HAVE_SSE2
+#if HAVE_SSE2
#define MMREG_SIZE 16
#else
#define MMREG_SIZE 8
#endif
-#ifdef HAVE_3DNOW
+#if HAVE_AMD3DNOW
#define PREFETCH "prefetch"
#define PREFETCHW "prefetchw"
#define PAVGB "pavgusb"
-#elif defined ( HAVE_MMX2 )
+#elif HAVE_MMX2
#define PREFETCH "prefetchnta"
#define PREFETCHW "prefetcht0"
#define PAVGB "pavgb"
#else
-#ifdef __APPLE__
-#define PREFETCH "#"
-#define PREFETCHW "#"
-#else
#define PREFETCH " # nop"
#define PREFETCHW " # nop"
#endif
-#endif
-#ifdef HAVE_3DNOW
-/* On K6 femms is faster of emms. On K7 femms is directly mapped on emms. */
+#if HAVE_AMD3DNOW
+/* On K6 femms is faster than emms. On K7 femms is directly mapped to emms. */
#define EMMS "femms"
#else
#define EMMS "emms"
#endif
-#ifdef HAVE_MMX2
+#if HAVE_MMX2
#define MOVNTQ "movntq"
#define SFENCE "sfence"
#else
#define SFENCE " # nop"
#endif
-static inline void RENAME(rgb24to32)(const uint8_t *src,uint8_t *dst,long src_size)
+static inline void RENAME(rgb24tobgr32)(const uint8_t *src, uint8_t *dst, long src_size)
{
uint8_t *dest = dst;
const uint8_t *s = src;
const uint8_t *end;
- #ifdef HAVE_MMX
+ #if HAVE_MMX
const uint8_t *mm_end;
#endif
end = s + src_size;
- #ifdef HAVE_MMX
- __asm __volatile(PREFETCH" %0"::"m"(*s):"memory");
+ #if HAVE_MMX
+ __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
mm_end = end - 23;
- __asm __volatile("movq %0, %%mm7"::"m"(mask32):"memory");
+ __asm__ volatile("movq %0, %%mm7"::"m"(mask32a):"memory");
while (s < mm_end)
{
- __asm __volatile(
+ __asm__ volatile(
PREFETCH" 32%1 \n\t"
"movd %1, %%mm0 \n\t"
"punpckldq 3%1, %%mm0 \n\t"
"punpckldq 15%1, %%mm2 \n\t"
"movd 18%1, %%mm3 \n\t"
"punpckldq 21%1, %%mm3 \n\t"
- "pand %%mm7, %%mm0 \n\t"
- "pand %%mm7, %%mm1 \n\t"
- "pand %%mm7, %%mm2 \n\t"
- "pand %%mm7, %%mm3 \n\t"
+ "por %%mm7, %%mm0 \n\t"
+ "por %%mm7, %%mm1 \n\t"
+ "por %%mm7, %%mm2 \n\t"
+ "por %%mm7, %%mm3 \n\t"
MOVNTQ" %%mm0, %0 \n\t"
MOVNTQ" %%mm1, 8%0 \n\t"
MOVNTQ" %%mm2, 16%0 \n\t"
dest += 32;
s += 24;
}
- __asm __volatile(SFENCE:::"memory");
- __asm __volatile(EMMS:::"memory");
+ __asm__ volatile(SFENCE:::"memory");
+ __asm__ volatile(EMMS:::"memory");
#endif
while (s < end)
{
#ifdef WORDS_BIGENDIAN
/* RGB24 (= R,G,B) -> RGB32 (= A,B,G,R) */
- *dest++ = 0;
+ *dest++ = 255;
*dest++ = s[2];
*dest++ = s[1];
*dest++ = s[0];
*dest++ = *s++;
*dest++ = *s++;
*dest++ = *s++;
- *dest++ = 0;
+ *dest++ = 255;
#endif
}
}
-static inline void RENAME(rgb32to24)(const uint8_t *src,uint8_t *dst,long src_size)
+static inline void RENAME(rgb32tobgr24)(const uint8_t *src, uint8_t *dst, long src_size)
{
uint8_t *dest = dst;
const uint8_t *s = src;
const uint8_t *end;
-#ifdef HAVE_MMX
+#if HAVE_MMX
const uint8_t *mm_end;
#endif
end = s + src_size;
-#ifdef HAVE_MMX
- __asm __volatile(PREFETCH" %0"::"m"(*s):"memory");
+#if HAVE_MMX
+ __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
mm_end = end - 31;
while (s < mm_end)
{
- __asm __volatile(
+ __asm__ volatile(
PREFETCH" 32%1 \n\t"
"movq %1, %%mm0 \n\t"
"movq 8%1, %%mm1 \n\t"
dest += 24;
s += 32;
}
- __asm __volatile(SFENCE:::"memory");
- __asm __volatile(EMMS:::"memory");
+ __asm__ volatile(SFENCE:::"memory");
+ __asm__ volatile(EMMS:::"memory");
#endif
while (s < end)
{
}
/*
- Original by Strepto/Astral
- ported to gcc & bugfixed : A'rpi
+ original by Strepto/Astral
+ ported to gcc & bugfixed: A'rpi
MMX2, 3DNOW optimization by Nick Kurshev
- 32bit c version, and and&add trick by Michael Niedermayer
+ 32-bit C version, and and&add trick by Michael Niedermayer
*/
-static inline void RENAME(rgb15to16)(const uint8_t *src,uint8_t *dst,long src_size)
+static inline void RENAME(rgb15to16)(const uint8_t *src, uint8_t *dst, long src_size)
{
register const uint8_t* s=src;
register uint8_t* d=dst;
register const uint8_t *end;
const uint8_t *mm_end;
end = s + src_size;
-#ifdef HAVE_MMX
- __asm __volatile(PREFETCH" %0"::"m"(*s));
- __asm __volatile("movq %0, %%mm4"::"m"(mask15s));
+#if HAVE_MMX
+ __asm__ volatile(PREFETCH" %0"::"m"(*s));
+ __asm__ volatile("movq %0, %%mm4"::"m"(mask15s));
mm_end = end - 15;
while (s<mm_end)
{
- __asm __volatile(
+ __asm__ volatile(
PREFETCH" 32%1 \n\t"
"movq %1, %%mm0 \n\t"
"movq 8%1, %%mm2 \n\t"
d+=16;
s+=16;
}
- __asm __volatile(SFENCE:::"memory");
- __asm __volatile(EMMS:::"memory");
+ __asm__ volatile(SFENCE:::"memory");
+ __asm__ volatile(EMMS:::"memory");
#endif
mm_end = end - 3;
while (s < mm_end)
{
- register unsigned x= *((uint32_t *)s);
+ register unsigned x= *((const uint32_t *)s);
*((uint32_t *)d) = (x&0x7FFF7FFF) + (x&0x7FE07FE0);
d+=4;
s+=4;
}
if (s < end)
{
- register unsigned short x= *((uint16_t *)s);
+ register unsigned short x= *((const uint16_t *)s);
*((uint16_t *)d) = (x&0x7FFF) + (x&0x7FE0);
}
}
-static inline void RENAME(rgb16to15)(const uint8_t *src,uint8_t *dst,long src_size)
+static inline void RENAME(rgb16to15)(const uint8_t *src, uint8_t *dst, long src_size)
{
register const uint8_t* s=src;
register uint8_t* d=dst;
register const uint8_t *end;
const uint8_t *mm_end;
end = s + src_size;
-#ifdef HAVE_MMX
- __asm __volatile(PREFETCH" %0"::"m"(*s));
- __asm __volatile("movq %0, %%mm7"::"m"(mask15rg));
- __asm __volatile("movq %0, %%mm6"::"m"(mask15b));
+#if HAVE_MMX
+ __asm__ volatile(PREFETCH" %0"::"m"(*s));
+ __asm__ volatile("movq %0, %%mm7"::"m"(mask15rg));
+ __asm__ volatile("movq %0, %%mm6"::"m"(mask15b));
mm_end = end - 15;
while (s<mm_end)
{
- __asm __volatile(
+ __asm__ volatile(
PREFETCH" 32%1 \n\t"
"movq %1, %%mm0 \n\t"
"movq 8%1, %%mm2 \n\t"
d+=16;
s+=16;
}
- __asm __volatile(SFENCE:::"memory");
- __asm __volatile(EMMS:::"memory");
+ __asm__ volatile(SFENCE:::"memory");
+ __asm__ volatile(EMMS:::"memory");
#endif
mm_end = end - 3;
while (s < mm_end)
{
- register uint32_t x= *((uint32_t *)s);
+ register uint32_t x= *((const uint32_t*)s);
*((uint32_t *)d) = ((x>>1)&0x7FE07FE0) | (x&0x001F001F);
s+=4;
d+=4;
}
if (s < end)
{
- register uint16_t x= *((uint16_t *)s);
+ register uint16_t x= *((const uint16_t*)s);
*((uint16_t *)d) = ((x>>1)&0x7FE0) | (x&0x001F);
s+=2;
d+=2;
{
const uint8_t *s = src;
const uint8_t *end;
-#ifdef HAVE_MMX
+#if HAVE_MMX
const uint8_t *mm_end;
#endif
uint16_t *d = (uint16_t *)dst;
end = s + src_size;
-#ifdef HAVE_MMX
+#if HAVE_MMX
mm_end = end - 15;
-#if 1 //is faster only if multiplies are reasonable fast (FIXME figure out on which CPUs this is faster, on Athlon it is slightly faster)
- asm volatile(
+#if 1 //is faster only if multiplies are reasonably fast (FIXME figure out on which CPUs this is faster, on Athlon it is slightly faster)
+ __asm__ volatile(
"movq %3, %%mm5 \n\t"
"movq %4, %%mm6 \n\t"
"movq %5, %%mm7 \n\t"
: "r" (mm_end), "m" (mask3216g), "m" (mask3216br), "m" (mul3216)
);
#else
- __asm __volatile(PREFETCH" %0"::"m"(*src):"memory");
- __asm __volatile(
+ __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
+ __asm__ volatile(
"movq %0, %%mm7 \n\t"
"movq %1, %%mm6 \n\t"
::"m"(red_16mask),"m"(green_16mask));
while (s < mm_end)
{
- __asm __volatile(
+ __asm__ volatile(
PREFETCH" 32%1 \n\t"
"movd %1, %%mm0 \n\t"
"movd 4%1, %%mm3 \n\t"
s += 16;
}
#endif
- __asm __volatile(SFENCE:::"memory");
- __asm __volatile(EMMS:::"memory");
+ __asm__ volatile(SFENCE:::"memory");
+ __asm__ volatile(EMMS:::"memory");
#endif
while (s < end)
{
- register int rgb = *(uint32_t*)s; s += 4;
+ register int rgb = *(const uint32_t*)s; s += 4;
*d++ = ((rgb&0xFF)>>3) + ((rgb&0xFC00)>>5) + ((rgb&0xF80000)>>8);
}
}
{
const uint8_t *s = src;
const uint8_t *end;
-#ifdef HAVE_MMX
+#if HAVE_MMX
const uint8_t *mm_end;
#endif
uint16_t *d = (uint16_t *)dst;
end = s + src_size;
-#ifdef HAVE_MMX
- __asm __volatile(PREFETCH" %0"::"m"(*src):"memory");
- __asm __volatile(
+#if HAVE_MMX
+ __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
+ __asm__ volatile(
"movq %0, %%mm7 \n\t"
"movq %1, %%mm6 \n\t"
::"m"(red_16mask),"m"(green_16mask));
mm_end = end - 15;
while (s < mm_end)
{
- __asm __volatile(
+ __asm__ volatile(
PREFETCH" 32%1 \n\t"
"movd %1, %%mm0 \n\t"
"movd 4%1, %%mm3 \n\t"
d += 4;
s += 16;
}
- __asm __volatile(SFENCE:::"memory");
- __asm __volatile(EMMS:::"memory");
+ __asm__ volatile(SFENCE:::"memory");
+ __asm__ volatile(EMMS:::"memory");
#endif
while (s < end)
{
- register int rgb = *(uint32_t*)s; s += 4;
+ register int rgb = *(const uint32_t*)s; s += 4;
*d++ = ((rgb&0xF8)<<8) + ((rgb&0xFC00)>>5) + ((rgb&0xF80000)>>19);
}
}
{
const uint8_t *s = src;
const uint8_t *end;
-#ifdef HAVE_MMX
+#if HAVE_MMX
const uint8_t *mm_end;
#endif
uint16_t *d = (uint16_t *)dst;
end = s + src_size;
-#ifdef HAVE_MMX
+#if HAVE_MMX
mm_end = end - 15;
-#if 1 //is faster only if multiplies are reasonable fast (FIXME figure out on which CPUs this is faster, on Athlon it is slightly faster)
- asm volatile(
+#if 1 //is faster only if multiplies are reasonably fast (FIXME figure out on which CPUs this is faster, on Athlon it is slightly faster)
+ __asm__ volatile(
"movq %3, %%mm5 \n\t"
"movq %4, %%mm6 \n\t"
"movq %5, %%mm7 \n\t"
: "r" (mm_end), "m" (mask3215g), "m" (mask3216br), "m" (mul3215)
);
#else
- __asm __volatile(PREFETCH" %0"::"m"(*src):"memory");
- __asm __volatile(
+ __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
+ __asm__ volatile(
"movq %0, %%mm7 \n\t"
"movq %1, %%mm6 \n\t"
::"m"(red_15mask),"m"(green_15mask));
while (s < mm_end)
{
- __asm __volatile(
+ __asm__ volatile(
PREFETCH" 32%1 \n\t"
"movd %1, %%mm0 \n\t"
"movd 4%1, %%mm3 \n\t"
s += 16;
}
#endif
- __asm __volatile(SFENCE:::"memory");
- __asm __volatile(EMMS:::"memory");
+ __asm__ volatile(SFENCE:::"memory");
+ __asm__ volatile(EMMS:::"memory");
#endif
while (s < end)
{
- register int rgb = *(uint32_t*)s; s += 4;
+ register int rgb = *(const uint32_t*)s; s += 4;
*d++ = ((rgb&0xFF)>>3) + ((rgb&0xF800)>>6) + ((rgb&0xF80000)>>9);
}
}
{
const uint8_t *s = src;
const uint8_t *end;
-#ifdef HAVE_MMX
+#if HAVE_MMX
const uint8_t *mm_end;
#endif
uint16_t *d = (uint16_t *)dst;
end = s + src_size;
-#ifdef HAVE_MMX
- __asm __volatile(PREFETCH" %0"::"m"(*src):"memory");
- __asm __volatile(
+#if HAVE_MMX
+ __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
+ __asm__ volatile(
"movq %0, %%mm7 \n\t"
"movq %1, %%mm6 \n\t"
::"m"(red_15mask),"m"(green_15mask));
mm_end = end - 15;
while (s < mm_end)
{
- __asm __volatile(
+ __asm__ volatile(
PREFETCH" 32%1 \n\t"
"movd %1, %%mm0 \n\t"
"movd 4%1, %%mm3 \n\t"
d += 4;
s += 16;
}
- __asm __volatile(SFENCE:::"memory");
- __asm __volatile(EMMS:::"memory");
+ __asm__ volatile(SFENCE:::"memory");
+ __asm__ volatile(EMMS:::"memory");
#endif
while (s < end)
{
- register int rgb = *(uint32_t*)s; s += 4;
+ register int rgb = *(const uint32_t*)s; s += 4;
*d++ = ((rgb&0xF8)<<7) + ((rgb&0xF800)>>6) + ((rgb&0xF80000)>>19);
}
}
-static inline void RENAME(rgb24to16)(const uint8_t *src, uint8_t *dst, long src_size)
+static inline void RENAME(rgb24tobgr16)(const uint8_t *src, uint8_t *dst, long src_size)
{
const uint8_t *s = src;
const uint8_t *end;
-#ifdef HAVE_MMX
+#if HAVE_MMX
const uint8_t *mm_end;
#endif
uint16_t *d = (uint16_t *)dst;
end = s + src_size;
-#ifdef HAVE_MMX
- __asm __volatile(PREFETCH" %0"::"m"(*src):"memory");
- __asm __volatile(
+#if HAVE_MMX
+ __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
+ __asm__ volatile(
"movq %0, %%mm7 \n\t"
"movq %1, %%mm6 \n\t"
::"m"(red_16mask),"m"(green_16mask));
mm_end = end - 11;
while (s < mm_end)
{
- __asm __volatile(
+ __asm__ volatile(
PREFETCH" 32%1 \n\t"
"movd %1, %%mm0 \n\t"
"movd 3%1, %%mm3 \n\t"
d += 4;
s += 12;
}
- __asm __volatile(SFENCE:::"memory");
- __asm __volatile(EMMS:::"memory");
+ __asm__ volatile(SFENCE:::"memory");
+ __asm__ volatile(EMMS:::"memory");
#endif
while (s < end)
{
}
}
-static inline void RENAME(rgb24tobgr16)(const uint8_t *src, uint8_t *dst, long src_size)
+static inline void RENAME(rgb24to16)(const uint8_t *src, uint8_t *dst, long src_size)
{
const uint8_t *s = src;
const uint8_t *end;
-#ifdef HAVE_MMX
+#if HAVE_MMX
const uint8_t *mm_end;
#endif
uint16_t *d = (uint16_t *)dst;
end = s + src_size;
-#ifdef HAVE_MMX
- __asm __volatile(PREFETCH" %0"::"m"(*src):"memory");
- __asm __volatile(
+#if HAVE_MMX
+ __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
+ __asm__ volatile(
"movq %0, %%mm7 \n\t"
"movq %1, %%mm6 \n\t"
::"m"(red_16mask),"m"(green_16mask));
mm_end = end - 15;
while (s < mm_end)
{
- __asm __volatile(
+ __asm__ volatile(
PREFETCH" 32%1 \n\t"
"movd %1, %%mm0 \n\t"
"movd 3%1, %%mm3 \n\t"
d += 4;
s += 12;
}
- __asm __volatile(SFENCE:::"memory");
- __asm __volatile(EMMS:::"memory");
+ __asm__ volatile(SFENCE:::"memory");
+ __asm__ volatile(EMMS:::"memory");
#endif
while (s < end)
{
}
}
-static inline void RENAME(rgb24to15)(const uint8_t *src, uint8_t *dst, long src_size)
+static inline void RENAME(rgb24tobgr15)(const uint8_t *src, uint8_t *dst, long src_size)
{
const uint8_t *s = src;
const uint8_t *end;
-#ifdef HAVE_MMX
+#if HAVE_MMX
const uint8_t *mm_end;
#endif
uint16_t *d = (uint16_t *)dst;
end = s + src_size;
-#ifdef HAVE_MMX
- __asm __volatile(PREFETCH" %0"::"m"(*src):"memory");
- __asm __volatile(
+#if HAVE_MMX
+ __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
+ __asm__ volatile(
"movq %0, %%mm7 \n\t"
"movq %1, %%mm6 \n\t"
::"m"(red_15mask),"m"(green_15mask));
mm_end = end - 11;
while (s < mm_end)
{
- __asm __volatile(
+ __asm__ volatile(
PREFETCH" 32%1 \n\t"
"movd %1, %%mm0 \n\t"
"movd 3%1, %%mm3 \n\t"
d += 4;
s += 12;
}
- __asm __volatile(SFENCE:::"memory");
- __asm __volatile(EMMS:::"memory");
+ __asm__ volatile(SFENCE:::"memory");
+ __asm__ volatile(EMMS:::"memory");
#endif
while (s < end)
{
}
}
-static inline void RENAME(rgb24tobgr15)(const uint8_t *src, uint8_t *dst, long src_size)
+static inline void RENAME(rgb24to15)(const uint8_t *src, uint8_t *dst, long src_size)
{
const uint8_t *s = src;
const uint8_t *end;
-#ifdef HAVE_MMX
+#if HAVE_MMX
const uint8_t *mm_end;
#endif
uint16_t *d = (uint16_t *)dst;
end = s + src_size;
-#ifdef HAVE_MMX
- __asm __volatile(PREFETCH" %0"::"m"(*src):"memory");
- __asm __volatile(
+#if HAVE_MMX
+ __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
+ __asm__ volatile(
"movq %0, %%mm7 \n\t"
"movq %1, %%mm6 \n\t"
::"m"(red_15mask),"m"(green_15mask));
mm_end = end - 15;
while (s < mm_end)
{
- __asm __volatile(
+ __asm__ volatile(
PREFETCH" 32%1 \n\t"
"movd %1, %%mm0 \n\t"
"movd 3%1, %%mm3 \n\t"
d += 4;
s += 12;
}
- __asm __volatile(SFENCE:::"memory");
- __asm __volatile(EMMS:::"memory");
+ __asm__ volatile(SFENCE:::"memory");
+ __asm__ volatile(EMMS:::"memory");
#endif
while (s < end)
{
}
/*
- I use here less accurate approximation by simply
- left-shifting the input
- value and filling the low order bits with
- zeroes. This method improves png's
- compression but this scheme cannot reproduce white exactly, since it does not
- generate an all-ones maximum value; the net effect is to darken the
+ I use less accurate approximation here by simply left-shifting the input
+ value and filling the low order bits with zeroes. This method improves PNG
+ compression but this scheme cannot reproduce white exactly, since it does
+ not generate an all-ones maximum value; the net effect is to darken the
image slightly.
The better method should be "left bit replication":
----------------
1 1 0 1 1 1 1 0
|=======| |===|
- | Leftmost Bits Repeated to Fill Open Bits
+ | leftmost bits repeated to fill open bits
|
- Original Bits
+ original bits
*/
-static inline void RENAME(rgb15to24)(const uint8_t *src, uint8_t *dst, long src_size)
+static inline void RENAME(rgb15tobgr24)(const uint8_t *src, uint8_t *dst, long src_size)
{
const uint16_t *end;
-#ifdef HAVE_MMX
+#if HAVE_MMX
const uint16_t *mm_end;
#endif
- uint8_t *d = (uint8_t *)dst;
- const uint16_t *s = (uint16_t *)src;
+ uint8_t *d = dst;
+ const uint16_t *s = (const uint16_t*)src;
end = s + src_size/2;
-#ifdef HAVE_MMX
- __asm __volatile(PREFETCH" %0"::"m"(*s):"memory");
+#if HAVE_MMX
+ __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
mm_end = end - 7;
while (s < mm_end)
{
- __asm __volatile(
+ __asm__ volatile(
PREFETCH" 32%1 \n\t"
"movq %1, %%mm0 \n\t"
"movq %1, %%mm1 \n\t"
:"=m"(*d)
:"m"(*s),"m"(mask15b),"m"(mask15g),"m"(mask15r), "m"(mmx_null)
:"memory");
- /* Borrowed 32 to 24 */
- __asm __volatile(
+ /* borrowed 32 to 24 */
+ __asm__ volatile(
"movq %%mm0, %%mm4 \n\t"
"movq %%mm3, %%mm5 \n\t"
"movq %%mm6, %%mm0 \n\t"
d += 24;
s += 8;
}
- __asm __volatile(SFENCE:::"memory");
- __asm __volatile(EMMS:::"memory");
+ __asm__ volatile(SFENCE:::"memory");
+ __asm__ volatile(EMMS:::"memory");
#endif
while (s < end)
{
}
}
-static inline void RENAME(rgb16to24)(const uint8_t *src, uint8_t *dst, long src_size)
+static inline void RENAME(rgb16tobgr24)(const uint8_t *src, uint8_t *dst, long src_size)
{
const uint16_t *end;
-#ifdef HAVE_MMX
+#if HAVE_MMX
const uint16_t *mm_end;
#endif
uint8_t *d = (uint8_t *)dst;
const uint16_t *s = (const uint16_t *)src;
end = s + src_size/2;
-#ifdef HAVE_MMX
- __asm __volatile(PREFETCH" %0"::"m"(*s):"memory");
+#if HAVE_MMX
+ __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
mm_end = end - 7;
while (s < mm_end)
{
- __asm __volatile(
+ __asm__ volatile(
PREFETCH" 32%1 \n\t"
"movq %1, %%mm0 \n\t"
"movq %1, %%mm1 \n\t"
:"=m"(*d)
:"m"(*s),"m"(mask16b),"m"(mask16g),"m"(mask16r),"m"(mmx_null)
:"memory");
- /* Borrowed 32 to 24 */
- __asm __volatile(
+ /* borrowed 32 to 24 */
+ __asm__ volatile(
"movq %%mm0, %%mm4 \n\t"
"movq %%mm3, %%mm5 \n\t"
"movq %%mm6, %%mm0 \n\t"
d += 24;
s += 8;
}
- __asm __volatile(SFENCE:::"memory");
- __asm __volatile(EMMS:::"memory");
+ __asm__ volatile(SFENCE:::"memory");
+ __asm__ volatile(EMMS:::"memory");
#endif
while (s < end)
{
}
}
+/*
+ * mm0 = 00 B3 00 B2 00 B1 00 B0
+ * mm1 = 00 G3 00 G2 00 G1 00 G0
+ * mm2 = 00 R3 00 R2 00 R1 00 R0
+ * mm6 = FF FF FF FF FF FF FF FF
+ * mm7 = 00 00 00 00 00 00 00 00
+ */
+#define PACK_RGB32 \
+ "packuswb %%mm7, %%mm0 \n\t" /* 00 00 00 00 B3 B2 B1 B0 */ \
+ "packuswb %%mm7, %%mm1 \n\t" /* 00 00 00 00 G3 G2 G1 G0 */ \
+ "packuswb %%mm7, %%mm2 \n\t" /* 00 00 00 00 R3 R2 R1 R0 */ \
+ "punpcklbw %%mm1, %%mm0 \n\t" /* G3 B3 G2 B2 G1 B1 G0 B0 */ \
+ "punpcklbw %%mm6, %%mm2 \n\t" /* FF R3 FF R2 FF R1 FF R0 */ \
+ "movq %%mm0, %%mm3 \n\t" \
+ "punpcklwd %%mm2, %%mm0 \n\t" /* FF R1 G1 B1 FF R0 G0 B0 */ \
+ "punpckhwd %%mm2, %%mm3 \n\t" /* FF R3 G3 B3 FF R2 G2 B2 */ \
+ MOVNTQ" %%mm0, %0 \n\t" \
+ MOVNTQ" %%mm3, 8%0 \n\t" \
+
static inline void RENAME(rgb15to32)(const uint8_t *src, uint8_t *dst, long src_size)
{
const uint16_t *end;
-#ifdef HAVE_MMX
+#if HAVE_MMX
const uint16_t *mm_end;
#endif
- uint8_t *d = (uint8_t *)dst;
+ uint8_t *d = dst;
const uint16_t *s = (const uint16_t *)src;
end = s + src_size/2;
-#ifdef HAVE_MMX
- __asm __volatile(PREFETCH" %0"::"m"(*s):"memory");
- __asm __volatile("pxor %%mm7,%%mm7 \n\t":::"memory");
+#if HAVE_MMX
+ __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
+ __asm__ volatile("pxor %%mm7,%%mm7 \n\t":::"memory");
+ __asm__ volatile("pcmpeqd %%mm6,%%mm6 \n\t":::"memory");
mm_end = end - 3;
while (s < mm_end)
{
- __asm __volatile(
+ __asm__ volatile(
PREFETCH" 32%1 \n\t"
"movq %1, %%mm0 \n\t"
"movq %1, %%mm1 \n\t"
"psllq $3, %%mm0 \n\t"
"psrlq $2, %%mm1 \n\t"
"psrlq $7, %%mm2 \n\t"
- "movq %%mm0, %%mm3 \n\t"
- "movq %%mm1, %%mm4 \n\t"
- "movq %%mm2, %%mm5 \n\t"
- "punpcklwd %%mm7, %%mm0 \n\t"
- "punpcklwd %%mm7, %%mm1 \n\t"
- "punpcklwd %%mm7, %%mm2 \n\t"
- "punpckhwd %%mm7, %%mm3 \n\t"
- "punpckhwd %%mm7, %%mm4 \n\t"
- "punpckhwd %%mm7, %%mm5 \n\t"
- "psllq $8, %%mm1 \n\t"
- "psllq $16, %%mm2 \n\t"
- "por %%mm1, %%mm0 \n\t"
- "por %%mm2, %%mm0 \n\t"
- "psllq $8, %%mm4 \n\t"
- "psllq $16, %%mm5 \n\t"
- "por %%mm4, %%mm3 \n\t"
- "por %%mm5, %%mm3 \n\t"
- MOVNTQ" %%mm0, %0 \n\t"
- MOVNTQ" %%mm3, 8%0 \n\t"
+ PACK_RGB32
:"=m"(*d)
:"m"(*s),"m"(mask15b),"m"(mask15g),"m"(mask15r)
:"memory");
d += 16;
s += 4;
}
- __asm __volatile(SFENCE:::"memory");
- __asm __volatile(EMMS:::"memory");
+ __asm__ volatile(SFENCE:::"memory");
+ __asm__ volatile(EMMS:::"memory");
#endif
while (s < end)
{
-#if 0 //slightly slower on athlon
+#if 0 //slightly slower on Athlon
int bgr= *s++;
*((uint32_t*)d)++ = ((bgr&0x1F)<<3) + ((bgr&0x3E0)<<6) + ((bgr&0x7C00)<<9);
#else
register uint16_t bgr;
bgr = *s++;
#ifdef WORDS_BIGENDIAN
- *d++ = 0;
+ *d++ = 255;
*d++ = (bgr&0x7C00)>>7;
*d++ = (bgr&0x3E0)>>2;
*d++ = (bgr&0x1F)<<3;
*d++ = (bgr&0x1F)<<3;
*d++ = (bgr&0x3E0)>>2;
*d++ = (bgr&0x7C00)>>7;
- *d++ = 0;
+ *d++ = 255;
#endif
#endif
static inline void RENAME(rgb16to32)(const uint8_t *src, uint8_t *dst, long src_size)
{
const uint16_t *end;
-#ifdef HAVE_MMX
+#if HAVE_MMX
const uint16_t *mm_end;
#endif
- uint8_t *d = (uint8_t *)dst;
- const uint16_t *s = (uint16_t *)src;
+ uint8_t *d = dst;
+ const uint16_t *s = (const uint16_t*)src;
end = s + src_size/2;
-#ifdef HAVE_MMX
- __asm __volatile(PREFETCH" %0"::"m"(*s):"memory");
- __asm __volatile("pxor %%mm7,%%mm7 \n\t":::"memory");
+#if HAVE_MMX
+ __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
+ __asm__ volatile("pxor %%mm7,%%mm7 \n\t":::"memory");
+ __asm__ volatile("pcmpeqd %%mm6,%%mm6 \n\t":::"memory");
mm_end = end - 3;
while (s < mm_end)
{
- __asm __volatile(
+ __asm__ volatile(
PREFETCH" 32%1 \n\t"
"movq %1, %%mm0 \n\t"
"movq %1, %%mm1 \n\t"
"psllq $3, %%mm0 \n\t"
"psrlq $3, %%mm1 \n\t"
"psrlq $8, %%mm2 \n\t"
- "movq %%mm0, %%mm3 \n\t"
- "movq %%mm1, %%mm4 \n\t"
- "movq %%mm2, %%mm5 \n\t"
- "punpcklwd %%mm7, %%mm0 \n\t"
- "punpcklwd %%mm7, %%mm1 \n\t"
- "punpcklwd %%mm7, %%mm2 \n\t"
- "punpckhwd %%mm7, %%mm3 \n\t"
- "punpckhwd %%mm7, %%mm4 \n\t"
- "punpckhwd %%mm7, %%mm5 \n\t"
- "psllq $8, %%mm1 \n\t"
- "psllq $16, %%mm2 \n\t"
- "por %%mm1, %%mm0 \n\t"
- "por %%mm2, %%mm0 \n\t"
- "psllq $8, %%mm4 \n\t"
- "psllq $16, %%mm5 \n\t"
- "por %%mm4, %%mm3 \n\t"
- "por %%mm5, %%mm3 \n\t"
- MOVNTQ" %%mm0, %0 \n\t"
- MOVNTQ" %%mm3, 8%0 \n\t"
+ PACK_RGB32
:"=m"(*d)
:"m"(*s),"m"(mask16b),"m"(mask16g),"m"(mask16r)
:"memory");
d += 16;
s += 4;
}
- __asm __volatile(SFENCE:::"memory");
- __asm __volatile(EMMS:::"memory");
+ __asm__ volatile(SFENCE:::"memory");
+ __asm__ volatile(EMMS:::"memory");
#endif
while (s < end)
{
register uint16_t bgr;
bgr = *s++;
#ifdef WORDS_BIGENDIAN
- *d++ = 0;
+ *d++ = 255;
*d++ = (bgr&0xF800)>>8;
*d++ = (bgr&0x7E0)>>3;
*d++ = (bgr&0x1F)<<3;
*d++ = (bgr&0x1F)<<3;
*d++ = (bgr&0x7E0)>>3;
*d++ = (bgr&0xF800)>>8;
- *d++ = 0;
+ *d++ = 255;
#endif
}
}
static inline void RENAME(rgb32tobgr32)(const uint8_t *src, uint8_t *dst, long src_size)
{
- long idx = 15 - src_size;
- uint8_t *s = (uint8_t *) src-idx, *d = dst-idx;
-#ifdef HAVE_MMX
- __asm __volatile(
+ x86_reg idx = 15 - src_size;
+ const uint8_t *s = src-idx;
+ uint8_t *d = dst-idx;
+#if HAVE_MMX
+ __asm__ volatile(
"test %0, %0 \n\t"
"jns 2f \n\t"
PREFETCH" (%1, %0) \n\t"
PREFETCH" 32(%1, %0) \n\t"
"movq (%1, %0), %%mm0 \n\t"
"movq 8(%1, %0), %%mm1 \n\t"
-# ifdef HAVE_MMX2
+# if HAVE_MMX2
"pshufw $177, %%mm0, %%mm3 \n\t"
"pshufw $177, %%mm1, %%mm5 \n\t"
"pand %%mm7, %%mm0 \n\t"
: "memory");
#endif
for (; idx<15; idx+=4) {
- register int v = *(uint32_t *)&s[idx], g = v & 0xff00ff00;
+ register int v = *(const uint32_t *)&s[idx], g = v & 0xff00ff00;
v &= 0xff00ff;
*(uint32_t *)&d[idx] = (v>>16) + g + (v<<16);
}
static inline void RENAME(rgb24tobgr24)(const uint8_t *src, uint8_t *dst, long src_size)
{
unsigned i;
-#ifdef HAVE_MMX
- long mmx_size= 23 - src_size;
- asm volatile (
+#if HAVE_MMX
+ x86_reg mmx_size= 23 - src_size;
+ __asm__ volatile (
"test %%"REG_a", %%"REG_a" \n\t"
"jns 2f \n\t"
"movq "MANGLE(mask24r)", %%mm5 \n\t"
: "r" (src-mmx_size), "r"(dst-mmx_size)
);
- __asm __volatile(SFENCE:::"memory");
- __asm __volatile(EMMS:::"memory");
+ __asm__ volatile(SFENCE:::"memory");
+ __asm__ volatile(EMMS:::"memory");
- if (mmx_size==23) return; //finihsed, was multiple of 8
+ if (mmx_size==23) return; //finished, was multiple of 8
src+= src_size;
dst+= src_size;
long lumStride, long chromStride, long dstStride, long vertLumPerChroma)
{
long y;
- const long chromWidth= width>>1;
+ const x86_reg chromWidth= width>>1;
for (y=0; y<height; y++)
{
-#ifdef HAVE_MMX
-//FIXME handle 2 lines a once (fewer prefetch, reuse some chrom, but very likely limited by mem anyway)
- asm volatile(
+#if HAVE_MMX
+//FIXME handle 2 lines at once (fewer prefetches, reuse some chroma, but very likely memory-limited anyway)
+ __asm__ volatile(
"xor %%"REG_a", %%"REG_a" \n\t"
ASMALIGN(4)
"1: \n\t"
);
#else
-#if defined ARCH_ALPHA && defined HAVE_MVI
+#if ARCH_ALPHA && HAVE_MVI
#define pl2yuy2(n) \
y1 = yc[n]; \
y2 = yc2[n]; \
u = uc[n]; \
v = vc[n]; \
- asm("unpkbw %1, %0" : "=r"(y1) : "r"(y1)); \
- asm("unpkbw %1, %0" : "=r"(y2) : "r"(y2)); \
- asm("unpkbl %1, %0" : "=r"(u) : "r"(u)); \
- asm("unpkbl %1, %0" : "=r"(v) : "r"(v)); \
+ __asm__("unpkbw %1, %0" : "=r"(y1) : "r"(y1)); \
+ __asm__("unpkbw %1, %0" : "=r"(y2) : "r"(y2)); \
+ __asm__("unpkbl %1, %0" : "=r"(u) : "r"(u)); \
+ __asm__("unpkbl %1, %0" : "=r"(v) : "r"(v)); \
yuv1 = (u << 8) + (v << 24); \
yuv2 = yuv1 + y2; \
yuv1 += y1; \
uint64_t y1, y2, yuv1, yuv2;
uint64_t u, v;
/* Prefetch */
- asm("ldq $31,64(%0)" :: "r"(yc));
- asm("ldq $31,64(%0)" :: "r"(yc2));
- asm("ldq $31,64(%0)" :: "r"(uc));
- asm("ldq $31,64(%0)" :: "r"(vc));
+ __asm__("ldq $31,64(%0)" :: "r"(yc));
+ __asm__("ldq $31,64(%0)" :: "r"(yc2));
+ __asm__("ldq $31,64(%0)" :: "r"(uc));
+ __asm__("ldq $31,64(%0)" :: "r"(vc));
pl2yuy2(0);
pl2yuy2(1);
ysrc += lumStride;
dst += dstStride;
-#elif __WORDSIZE >= 64
+#elif HAVE_FAST_64BIT
int i;
uint64_t *ldst = (uint64_t *) dst;
const uint8_t *yc = ysrc, *uc = usrc, *vc = vsrc;
}
#endif
#endif
- if ((y&(vertLumPerChroma-1))==(vertLumPerChroma-1) )
+ if ((y&(vertLumPerChroma-1)) == vertLumPerChroma-1)
{
usrc += chromStride;
vsrc += chromStride;
ysrc += lumStride;
dst += dstStride;
}
-#ifdef HAVE_MMX
-asm( EMMS" \n\t"
+#if HAVE_MMX
+__asm__( EMMS" \n\t"
SFENCE" \n\t"
:::"memory");
#endif
}
/**
- *
- * height should be a multiple of 2 and width should be a multiple of 16 (if this is a
- * problem for anyone then tell me, and ill fix it)
+ * Height should be a multiple of 2 and width should be a multiple of 16.
+ * (If this is a problem for anyone then tell me, and I will fix it.)
*/
static inline void RENAME(yv12toyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
long width, long height,
long lumStride, long chromStride, long dstStride, long vertLumPerChroma)
{
long y;
- const long chromWidth= width>>1;
+ const x86_reg chromWidth= width>>1;
for (y=0; y<height; y++)
{
-#ifdef HAVE_MMX
-//FIXME handle 2 lines a once (fewer prefetch, reuse some chrom, but very likely limited by mem anyway)
- asm volatile(
+#if HAVE_MMX
+//FIXME handle 2 lines at once (fewer prefetches, reuse some chroma, but very likely memory-limited anyway)
+ __asm__ volatile(
"xor %%"REG_a", %%"REG_a" \n\t"
ASMALIGN(4)
"1: \n\t"
: "%"REG_a
);
#else
-//FIXME adapt the alpha asm code from yv12->yuy2
+//FIXME adapt the Alpha ASM code from yv12->yuy2
-#if __WORDSIZE >= 64
+#if HAVE_FAST_64BIT
int i;
uint64_t *ldst = (uint64_t *) dst;
const uint8_t *yc = ysrc, *uc = usrc, *vc = vsrc;
(vc[0] << 8) + (yc[1] << 0);
#else
*idst++ = uc[0] + (yc[0] << 8) +
- (vc[0] << 16) + (yc[1] << 24);
+ (vc[0] << 16) + (yc[1] << 24);
#endif
yc += 2;
uc++;
}
#endif
#endif
- if ((y&(vertLumPerChroma-1))==(vertLumPerChroma-1) )
+ if ((y&(vertLumPerChroma-1)) == vertLumPerChroma-1)
{
usrc += chromStride;
vsrc += chromStride;
ysrc += lumStride;
dst += dstStride;
}
-#ifdef HAVE_MMX
-asm( EMMS" \n\t"
+#if HAVE_MMX
+__asm__( EMMS" \n\t"
SFENCE" \n\t"
:::"memory");
#endif
}
/**
- *
- * height should be a multiple of 2 and width should be a multiple of 16 (if this is a
- * problem for anyone then tell me, and ill fix it)
+ * Height should be a multiple of 2 and width should be a multiple of 16
+ * (If this is a problem for anyone then tell me, and I will fix it.)
*/
static inline void RENAME(yv12touyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
long width, long height,
}
/**
- *
- * width should be a multiple of 16
+ * Width should be a multiple of 16.
+ */
+static inline void RENAME(yuv422ptouyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
+ long width, long height,
+ long lumStride, long chromStride, long dstStride)
+{
+ RENAME(yuvPlanartouyvy)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 1);
+}
+
+/**
+ * Width should be a multiple of 16.
*/
static inline void RENAME(yuv422ptoyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
long width, long height,
}
/**
- *
- * height should be a multiple of 2 and width should be a multiple of 16 (if this is a
- * problem for anyone then tell me, and ill fix it)
+ * Height should be a multiple of 2 and width should be a multiple of 16.
+ * (If this is a problem for anyone then tell me, and I will fix it.)
*/
static inline void RENAME(yuy2toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
long width, long height,
long lumStride, long chromStride, long srcStride)
{
long y;
- const long chromWidth= width>>1;
+ const x86_reg chromWidth= width>>1;
for (y=0; y<height; y+=2)
{
-#ifdef HAVE_MMX
- asm volatile(
+#if HAVE_MMX
+ __asm__ volatile(
"xor %%"REG_a", %%"REG_a" \n\t"
"pcmpeqw %%mm7, %%mm7 \n\t"
"psrlw $8, %%mm7 \n\t" // FF,00,FF,00...
ydst += lumStride;
src += srcStride;
- asm volatile(
+ __asm__ volatile(
"xor %%"REG_a", %%"REG_a" \n\t"
ASMALIGN(4)
"1: \n\t"
ydst += lumStride;
src += srcStride;
}
-#ifdef HAVE_MMX
-asm volatile( EMMS" \n\t"
+#if HAVE_MMX
+__asm__ volatile( EMMS" \n\t"
SFENCE" \n\t"
:::"memory");
#endif
dst+= dstStride;
for (y=1; y<srcHeight; y++){
-#if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
- const long mmxSize= srcWidth&~15;
- asm volatile(
+#if HAVE_MMX2 || HAVE_AMD3DNOW
+ const x86_reg mmxSize= srcWidth&~15;
+ __asm__ volatile(
"mov %4, %%"REG_a" \n\t"
"1: \n\t"
"movq (%0, %%"REG_a"), %%mm0 \n\t"
);
#else
- const long mmxSize=1;
+ const x86_reg mmxSize=1;
#endif
dst[0 ]= (3*src[0] + src[srcStride])>>2;
dst[dstStride]= ( src[0] + 3*src[srcStride])>>2;
}
#endif
-#ifdef HAVE_MMX
-asm volatile( EMMS" \n\t"
+#if HAVE_MMX
+__asm__ volatile( EMMS" \n\t"
SFENCE" \n\t"
:::"memory");
#endif
}
/**
- *
- * height should be a multiple of 2 and width should be a multiple of 16 (if this is a
- * problem for anyone then tell me, and ill fix it)
- * chrominance data is only taken from every secound line others are ignored FIXME write HQ version
+ * Height should be a multiple of 2 and width should be a multiple of 16.
+ * (If this is a problem for anyone then tell me, and I will fix it.)
+ * Chrominance data is only taken from every second line, others are ignored.
+ * FIXME: Write HQ version.
*/
static inline void RENAME(uyvytoyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
long width, long height,
long lumStride, long chromStride, long srcStride)
{
long y;
- const long chromWidth= width>>1;
+ const x86_reg chromWidth= width>>1;
for (y=0; y<height; y+=2)
{
-#ifdef HAVE_MMX
- asm volatile(
- "xorl %%eax, %%eax \n\t"
+#if HAVE_MMX
+ __asm__ volatile(
+ "xor %%"REG_a", %%"REG_a" \n\t"
"pcmpeqw %%mm7, %%mm7 \n\t"
"psrlw $8, %%mm7 \n\t" // FF,00,FF,00...
ASMALIGN(4)
"1: \n\t"
- PREFETCH" 64(%0, %%eax, 4) \n\t"
- "movq (%0, %%eax, 4), %%mm0 \n\t" // UYVY UYVY(0)
- "movq 8(%0, %%eax, 4), %%mm1 \n\t" // UYVY UYVY(4)
+ PREFETCH" 64(%0, %%"REG_a", 4) \n\t"
+ "movq (%0, %%"REG_a", 4), %%mm0 \n\t" // UYVY UYVY(0)
+ "movq 8(%0, %%"REG_a", 4), %%mm1 \n\t" // UYVY UYVY(4)
"movq %%mm0, %%mm2 \n\t" // UYVY UYVY(0)
"movq %%mm1, %%mm3 \n\t" // UYVY UYVY(4)
"pand %%mm7, %%mm0 \n\t" // U0V0 U0V0(0)
"packuswb %%mm1, %%mm0 \n\t" // UVUV UVUV(0)
"packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(0)
- MOVNTQ" %%mm2, (%1, %%eax, 2) \n\t"
+ MOVNTQ" %%mm2, (%1, %%"REG_a", 2) \n\t"
- "movq 16(%0, %%eax, 4), %%mm1 \n\t" // UYVY UYVY(8)
- "movq 24(%0, %%eax, 4), %%mm2 \n\t" // UYVY UYVY(12)
+ "movq 16(%0, %%"REG_a", 4), %%mm1 \n\t" // UYVY UYVY(8)
+ "movq 24(%0, %%"REG_a", 4), %%mm2 \n\t" // UYVY UYVY(12)
"movq %%mm1, %%mm3 \n\t" // UYVY UYVY(8)
"movq %%mm2, %%mm4 \n\t" // UYVY UYVY(12)
"pand %%mm7, %%mm1 \n\t" // U0V0 U0V0(8)
"packuswb %%mm2, %%mm1 \n\t" // UVUV UVUV(8)
"packuswb %%mm4, %%mm3 \n\t" // YYYY YYYY(8)
- MOVNTQ" %%mm3, 8(%1, %%eax, 2) \n\t"
+ MOVNTQ" %%mm3, 8(%1, %%"REG_a", 2) \n\t"
"movq %%mm0, %%mm2 \n\t" // UVUV UVUV(0)
"movq %%mm1, %%mm3 \n\t" // UVUV UVUV(8)
"packuswb %%mm1, %%mm0 \n\t" // VVVV VVVV(0)
"packuswb %%mm3, %%mm2 \n\t" // UUUU UUUU(0)
- MOVNTQ" %%mm0, (%3, %%eax) \n\t"
- MOVNTQ" %%mm2, (%2, %%eax) \n\t"
+ MOVNTQ" %%mm0, (%3, %%"REG_a") \n\t"
+ MOVNTQ" %%mm2, (%2, %%"REG_a") \n\t"
- "addl $8, %%eax \n\t"
- "cmpl %4, %%eax \n\t"
+ "add $8, %%"REG_a" \n\t"
+ "cmp %4, %%"REG_a" \n\t"
" jb 1b \n\t"
::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
- : "memory", "%eax"
+ : "memory", "%"REG_a
);
ydst += lumStride;
src += srcStride;
- asm volatile(
- "xorl %%eax, %%eax \n\t"
+ __asm__ volatile(
+ "xor %%"REG_a", %%"REG_a" \n\t"
ASMALIGN(4)
"1: \n\t"
- PREFETCH" 64(%0, %%eax, 4) \n\t"
- "movq (%0, %%eax, 4), %%mm0 \n\t" // YUYV YUYV(0)
- "movq 8(%0, %%eax, 4), %%mm1 \n\t" // YUYV YUYV(4)
- "movq 16(%0, %%eax, 4), %%mm2 \n\t" // YUYV YUYV(8)
- "movq 24(%0, %%eax, 4), %%mm3 \n\t" // YUYV YUYV(12)
+ PREFETCH" 64(%0, %%"REG_a", 4) \n\t"
+ "movq (%0, %%"REG_a", 4), %%mm0 \n\t" // YUYV YUYV(0)
+ "movq 8(%0, %%"REG_a", 4), %%mm1 \n\t" // YUYV YUYV(4)
+ "movq 16(%0, %%"REG_a", 4), %%mm2 \n\t" // YUYV YUYV(8)
+ "movq 24(%0, %%"REG_a", 4), %%mm3 \n\t" // YUYV YUYV(12)
"psrlw $8, %%mm0 \n\t" // Y0Y0 Y0Y0(0)
"psrlw $8, %%mm1 \n\t" // Y0Y0 Y0Y0(4)
"psrlw $8, %%mm2 \n\t" // Y0Y0 Y0Y0(8)
"packuswb %%mm1, %%mm0 \n\t" // YYYY YYYY(0)
"packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(8)
- MOVNTQ" %%mm0, (%1, %%eax, 2) \n\t"
- MOVNTQ" %%mm2, 8(%1, %%eax, 2) \n\t"
+ MOVNTQ" %%mm0, (%1, %%"REG_a", 2) \n\t"
+ MOVNTQ" %%mm2, 8(%1, %%"REG_a", 2) \n\t"
- "addl $8, %%eax \n\t"
- "cmpl %4, %%eax \n\t"
+ "add $8, %%"REG_a" \n\t"
+ "cmp %4, %%"REG_a" \n\t"
" jb 1b \n\t"
::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
- : "memory", "%eax"
+ : "memory", "%"REG_a
);
#else
long i;
ydst += lumStride;
src += srcStride;
}
-#ifdef HAVE_MMX
-asm volatile( EMMS" \n\t"
+#if HAVE_MMX
+__asm__ volatile( EMMS" \n\t"
SFENCE" \n\t"
:::"memory");
#endif
}
/**
- *
- * height should be a multiple of 2 and width should be a multiple of 2 (if this is a
- * problem for anyone then tell me, and ill fix it)
- * chrominance data is only taken from every secound line others are ignored in the C version FIXME write HQ version
+ * Height should be a multiple of 2 and width should be a multiple of 2.
+ * (If this is a problem for anyone then tell me, and I will fix it.)
+ * Chrominance data is only taken from every second line,
+ * others are ignored in the C version.
+ * FIXME: Write HQ version.
*/
static inline void RENAME(rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
long width, long height,
long lumStride, long chromStride, long srcStride)
{
long y;
- const long chromWidth= width>>1;
-#ifdef HAVE_MMX
+ const x86_reg chromWidth= width>>1;
+#if HAVE_MMX
for (y=0; y<height-2; y+=2)
{
long i;
for (i=0; i<2; i++)
{
- asm volatile(
+ __asm__ volatile(
"mov %2, %%"REG_a" \n\t"
- "movq "MANGLE(bgr2YCoeff)", %%mm6 \n\t"
- "movq "MANGLE(w1111)", %%mm5 \n\t"
+ "movq "MANGLE(ff_bgr2YCoeff)", %%mm6 \n\t"
+ "movq "MANGLE(ff_w1111)", %%mm5 \n\t"
"pxor %%mm7, %%mm7 \n\t"
"lea (%%"REG_a", %%"REG_a", 2), %%"REG_d" \n\t"
ASMALIGN(4)
"psraw $7, %%mm4 \n\t"
"packuswb %%mm4, %%mm0 \n\t"
- "paddusb "MANGLE(bgr2YOffset)", %%mm0 \n\t"
+ "paddusb "MANGLE(ff_bgr2YOffset)", %%mm0 \n\t"
MOVNTQ" %%mm0, (%1, %%"REG_a") \n\t"
"add $8, %%"REG_a" \n\t"
" js 1b \n\t"
- : : "r" (src+width*3), "r" (ydst+width), "g" (-width)
+ : : "r" (src+width*3), "r" (ydst+width), "g" ((x86_reg)-width)
: "%"REG_a, "%"REG_d
);
ydst += lumStride;
src += srcStride;
}
src -= srcStride*2;
- asm volatile(
+ __asm__ volatile(
"mov %4, %%"REG_a" \n\t"
- "movq "MANGLE(w1111)", %%mm5 \n\t"
- "movq "MANGLE(bgr2UCoeff)", %%mm6 \n\t"
+ "movq "MANGLE(ff_w1111)", %%mm5 \n\t"
+ "movq "MANGLE(ff_bgr2UCoeff)", %%mm6 \n\t"
"pxor %%mm7, %%mm7 \n\t"
"lea (%%"REG_a", %%"REG_a", 2), %%"REG_d" \n\t"
"add %%"REG_d", %%"REG_d" \n\t"
"1: \n\t"
PREFETCH" 64(%0, %%"REG_d") \n\t"
PREFETCH" 64(%1, %%"REG_d") \n\t"
-#if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
+#if HAVE_MMX2 || HAVE_AMD3DNOW
"movq (%0, %%"REG_d"), %%mm0 \n\t"
"movq (%1, %%"REG_d"), %%mm1 \n\t"
"movq 6(%0, %%"REG_d"), %%mm2 \n\t"
"psrlw $2, %%mm0 \n\t"
"psrlw $2, %%mm2 \n\t"
#endif
- "movq "MANGLE(bgr2VCoeff)", %%mm1 \n\t"
- "movq "MANGLE(bgr2VCoeff)", %%mm3 \n\t"
+ "movq "MANGLE(ff_bgr2VCoeff)", %%mm1 \n\t"
+ "movq "MANGLE(ff_bgr2VCoeff)", %%mm3 \n\t"
"pmaddwd %%mm0, %%mm1 \n\t"
"pmaddwd %%mm2, %%mm3 \n\t"
"packssdw %%mm1, %%mm0 \n\t" // V1 V0 U1 U0
"psraw $7, %%mm0 \n\t"
-#if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
+#if HAVE_MMX2 || HAVE_AMD3DNOW
"movq 12(%0, %%"REG_d"), %%mm4 \n\t"
"movq 12(%1, %%"REG_d"), %%mm1 \n\t"
"movq 18(%0, %%"REG_d"), %%mm2 \n\t"
"paddw %%mm1, %%mm5 \n\t"
"paddw %%mm3, %%mm2 \n\t"
"paddw %%mm5, %%mm2 \n\t"
- "movq "MANGLE(w1111)", %%mm5 \n\t"
+ "movq "MANGLE(ff_w1111)", %%mm5 \n\t"
"psrlw $2, %%mm4 \n\t"
"psrlw $2, %%mm2 \n\t"
#endif
- "movq "MANGLE(bgr2VCoeff)", %%mm1 \n\t"
- "movq "MANGLE(bgr2VCoeff)", %%mm3 \n\t"
+ "movq "MANGLE(ff_bgr2VCoeff)", %%mm1 \n\t"
+ "movq "MANGLE(ff_bgr2VCoeff)", %%mm3 \n\t"
"pmaddwd %%mm4, %%mm1 \n\t"
"pmaddwd %%mm2, %%mm3 \n\t"
"punpckldq %%mm4, %%mm0 \n\t"
"punpckhdq %%mm4, %%mm1 \n\t"
"packsswb %%mm1, %%mm0 \n\t"
- "paddb "MANGLE(bgr2UVOffset)", %%mm0 \n\t"
+ "paddb "MANGLE(ff_bgr2UVOffset)", %%mm0 \n\t"
"movd %%mm0, (%2, %%"REG_a") \n\t"
"punpckhdq %%mm0, %%mm0 \n\t"
"movd %%mm0, (%3, %%"REG_a") \n\t"
src += srcStride*2;
}
- asm volatile( EMMS" \n\t"
+ __asm__ volatile( EMMS" \n\t"
SFENCE" \n\t"
:::"memory");
#else
}
}
-void RENAME(interleaveBytes)(uint8_t *src1, uint8_t *src2, uint8_t *dest,
+static void RENAME(interleaveBytes)(uint8_t *src1, uint8_t *src2, uint8_t *dest,
long width, long height, long src1Stride,
long src2Stride, long dstStride){
long h;
{
long w;
-#ifdef HAVE_MMX
-#ifdef HAVE_SSE2
- asm(
+#if HAVE_MMX
+#if HAVE_SSE2
+ __asm__(
"xor %%"REG_a", %%"REG_a" \n\t"
"1: \n\t"
PREFETCH" 64(%1, %%"REG_a") \n\t"
"add $16, %%"REG_a" \n\t"
"cmp %3, %%"REG_a" \n\t"
" jb 1b \n\t"
- ::"r"(dest), "r"(src1), "r"(src2), "r" (width-15)
+ ::"r"(dest), "r"(src1), "r"(src2), "r" ((x86_reg)width-15)
: "memory", "%"REG_a""
);
#else
- asm(
+ __asm__(
"xor %%"REG_a", %%"REG_a" \n\t"
"1: \n\t"
PREFETCH" 64(%1, %%"REG_a") \n\t"
"add $16, %%"REG_a" \n\t"
"cmp %3, %%"REG_a" \n\t"
" jb 1b \n\t"
- ::"r"(dest), "r"(src1), "r"(src2), "r" (width-15)
+ ::"r"(dest), "r"(src1), "r"(src2), "r" ((x86_reg)width-15)
: "memory", "%"REG_a
);
#endif
src1 += src1Stride;
src2 += src2Stride;
}
-#ifdef HAVE_MMX
- asm(
+#if HAVE_MMX
+ __asm__(
EMMS" \n\t"
SFENCE" \n\t"
::: "memory"
long srcStride1, long srcStride2,
long dstStride1, long dstStride2)
{
- long y,x,w,h;
+ x86_reg y;
+ long x,w,h;
w=width/2; h=height/2;
-#ifdef HAVE_MMX
- asm volatile(
+#if HAVE_MMX
+ __asm__ volatile(
PREFETCH" %0 \n\t"
PREFETCH" %1 \n\t"
::"m"(*(src1+srcStride1)),"m"(*(src2+srcStride2)):"memory");
const uint8_t* s1=src1+srcStride1*(y>>1);
uint8_t* d=dst1+dstStride1*y;
x=0;
-#ifdef HAVE_MMX
+#if HAVE_MMX
for (;x<w-31;x+=32)
{
- asm volatile(
+ __asm__ volatile(
PREFETCH" 32%1 \n\t"
"movq %1, %%mm0 \n\t"
"movq 8%1, %%mm2 \n\t"
const uint8_t* s2=src2+srcStride2*(y>>1);
uint8_t* d=dst2+dstStride2*y;
x=0;
-#ifdef HAVE_MMX
+#if HAVE_MMX
for (;x<w-31;x+=32)
{
- asm volatile(
+ __asm__ volatile(
PREFETCH" 32%1 \n\t"
"movq %1, %%mm0 \n\t"
"movq 8%1, %%mm2 \n\t"
#endif
for (;x<w;x++) d[2*x]=d[2*x+1]=s2[x];
}
-#ifdef HAVE_MMX
- asm(
+#if HAVE_MMX
+ __asm__(
EMMS" \n\t"
SFENCE" \n\t"
::: "memory"
long srcStride1, long srcStride2,
long srcStride3, long dstStride)
{
- long y,x,w,h;
+ x86_reg x;
+ long y,w,h;
w=width/2; h=height;
for (y=0;y<h;y++){
const uint8_t* yp=src1+srcStride1*y;
const uint8_t* vp=src3+srcStride3*(y>>2);
uint8_t* d=dst+dstStride*y;
x=0;
-#ifdef HAVE_MMX
+#if HAVE_MMX
for (;x<w-7;x+=8)
{
- asm volatile(
+ __asm__ volatile(
PREFETCH" 32(%1, %0) \n\t"
PREFETCH" 32(%2, %0) \n\t"
PREFETCH" 32(%3, %0) \n\t"
d[8*x+7] = vp[x];
}
}
-#ifdef HAVE_MMX
- asm(
+#if HAVE_MMX
+ __asm__(
+ EMMS" \n\t"
+ SFENCE" \n\t"
+ ::: "memory"
+ );
+#endif
+}
+
+static void RENAME(extract_even)(const uint8_t *src, uint8_t *dst, x86_reg count)
+{
+ dst += count;
+ src += 2*count;
+ count= - count;
+
+#if HAVE_MMX
+ if(count <= -16){
+ count += 15;
+ __asm__ volatile(
+ "pcmpeqw %%mm7, %%mm7 \n\t"
+ "psrlw $8, %%mm7 \n\t"
+ "1: \n\t"
+ "movq -30(%1, %0, 2), %%mm0 \n\t"
+ "movq -22(%1, %0, 2), %%mm1 \n\t"
+ "movq -14(%1, %0, 2), %%mm2 \n\t"
+ "movq -6(%1, %0, 2), %%mm3 \n\t"
+ "pand %%mm7, %%mm0 \n\t"
+ "pand %%mm7, %%mm1 \n\t"
+ "pand %%mm7, %%mm2 \n\t"
+ "pand %%mm7, %%mm3 \n\t"
+ "packuswb %%mm1, %%mm0 \n\t"
+ "packuswb %%mm3, %%mm2 \n\t"
+ MOVNTQ" %%mm0,-15(%2, %0) \n\t"
+ MOVNTQ" %%mm2,- 7(%2, %0) \n\t"
+ "add $16, %0 \n\t"
+ " js 1b \n\t"
+ : "+r"(count)
+ : "r"(src), "r"(dst)
+ );
+ count -= 15;
+ }
+#endif
+ while(count<0){
+ dst[count]= src[2*count];
+ count++;
+ }
+}
+
+static void RENAME(extract_even2)(const uint8_t *src, uint8_t *dst0, uint8_t *dst1, x86_reg count)
+{
+ dst0+= count;
+ dst1+= count;
+ src += 4*count;
+ count= - count;
+#if HAVE_MMX
+ if(count <= -8){
+ count += 7;
+ __asm__ volatile(
+ "pcmpeqw %%mm7, %%mm7 \n\t"
+ "psrlw $8, %%mm7 \n\t"
+ "1: \n\t"
+ "movq -28(%1, %0, 4), %%mm0 \n\t"
+ "movq -20(%1, %0, 4), %%mm1 \n\t"
+ "movq -12(%1, %0, 4), %%mm2 \n\t"
+ "movq -4(%1, %0, 4), %%mm3 \n\t"
+ "pand %%mm7, %%mm0 \n\t"
+ "pand %%mm7, %%mm1 \n\t"
+ "pand %%mm7, %%mm2 \n\t"
+ "pand %%mm7, %%mm3 \n\t"
+ "packuswb %%mm1, %%mm0 \n\t"
+ "packuswb %%mm3, %%mm2 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "psrlw $8, %%mm0 \n\t"
+ "psrlw $8, %%mm2 \n\t"
+ "pand %%mm7, %%mm1 \n\t"
+ "pand %%mm7, %%mm3 \n\t"
+ "packuswb %%mm2, %%mm0 \n\t"
+ "packuswb %%mm3, %%mm1 \n\t"
+ MOVNTQ" %%mm0,- 7(%3, %0) \n\t"
+ MOVNTQ" %%mm1,- 7(%2, %0) \n\t"
+ "add $8, %0 \n\t"
+ " js 1b \n\t"
+ : "+r"(count)
+ : "r"(src), "r"(dst0), "r"(dst1)
+ );
+ count -= 7;
+ }
+#endif
+ while(count<0){
+ dst0[count]= src[4*count+0];
+ dst1[count]= src[4*count+2];
+ count++;
+ }
+}
+
+static void RENAME(extract_even2avg)(const uint8_t *src0, const uint8_t *src1, uint8_t *dst0, uint8_t *dst1, x86_reg count)
+{
+ dst0 += count;
+ dst1 += count;
+ src0 += 4*count;
+ src1 += 4*count;
+ count= - count;
+#ifdef PAVGB
+ if(count <= -8){
+ count += 7;
+ __asm__ volatile(
+ "pcmpeqw %%mm7, %%mm7 \n\t"
+ "psrlw $8, %%mm7 \n\t"
+ "1: \n\t"
+ "movq -28(%1, %0, 4), %%mm0 \n\t"
+ "movq -20(%1, %0, 4), %%mm1 \n\t"
+ "movq -12(%1, %0, 4), %%mm2 \n\t"
+ "movq -4(%1, %0, 4), %%mm3 \n\t"
+ PAVGB" -28(%2, %0, 4), %%mm0 \n\t"
+ PAVGB" -20(%2, %0, 4), %%mm1 \n\t"
+ PAVGB" -12(%2, %0, 4), %%mm2 \n\t"
+ PAVGB" - 4(%2, %0, 4), %%mm3 \n\t"
+ "pand %%mm7, %%mm0 \n\t"
+ "pand %%mm7, %%mm1 \n\t"
+ "pand %%mm7, %%mm2 \n\t"
+ "pand %%mm7, %%mm3 \n\t"
+ "packuswb %%mm1, %%mm0 \n\t"
+ "packuswb %%mm3, %%mm2 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "psrlw $8, %%mm0 \n\t"
+ "psrlw $8, %%mm2 \n\t"
+ "pand %%mm7, %%mm1 \n\t"
+ "pand %%mm7, %%mm3 \n\t"
+ "packuswb %%mm2, %%mm0 \n\t"
+ "packuswb %%mm3, %%mm1 \n\t"
+ MOVNTQ" %%mm0,- 7(%4, %0) \n\t"
+ MOVNTQ" %%mm1,- 7(%3, %0) \n\t"
+ "add $8, %0 \n\t"
+ " js 1b \n\t"
+ : "+r"(count)
+ : "r"(src0), "r"(src1), "r"(dst0), "r"(dst1)
+ );
+ count -= 7;
+ }
+#endif
+ while(count<0){
+ dst0[count]= (src0[4*count+0]+src1[4*count+0])>>1;
+ dst1[count]= (src0[4*count+2]+src1[4*count+2])>>1;
+ count++;
+ }
+}
+
+static void RENAME(extract_odd2)(const uint8_t *src, uint8_t *dst0, uint8_t *dst1, x86_reg count)
+{
+ dst0+= count;
+ dst1+= count;
+ src += 4*count;
+ count= - count;
+#if HAVE_MMX
+ if(count <= -8){
+ count += 7;
+ __asm__ volatile(
+ "pcmpeqw %%mm7, %%mm7 \n\t"
+ "psrlw $8, %%mm7 \n\t"
+ "1: \n\t"
+ "movq -28(%1, %0, 4), %%mm0 \n\t"
+ "movq -20(%1, %0, 4), %%mm1 \n\t"
+ "movq -12(%1, %0, 4), %%mm2 \n\t"
+ "movq -4(%1, %0, 4), %%mm3 \n\t"
+ "psrlw $8, %%mm0 \n\t"
+ "psrlw $8, %%mm1 \n\t"
+ "psrlw $8, %%mm2 \n\t"
+ "psrlw $8, %%mm3 \n\t"
+ "packuswb %%mm1, %%mm0 \n\t"
+ "packuswb %%mm3, %%mm2 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "psrlw $8, %%mm0 \n\t"
+ "psrlw $8, %%mm2 \n\t"
+ "pand %%mm7, %%mm1 \n\t"
+ "pand %%mm7, %%mm3 \n\t"
+ "packuswb %%mm2, %%mm0 \n\t"
+ "packuswb %%mm3, %%mm1 \n\t"
+ MOVNTQ" %%mm0,- 7(%3, %0) \n\t"
+ MOVNTQ" %%mm1,- 7(%2, %0) \n\t"
+ "add $8, %0 \n\t"
+ " js 1b \n\t"
+ : "+r"(count)
+ : "r"(src), "r"(dst0), "r"(dst1)
+ );
+ count -= 7;
+ }
+#endif
+ src++;
+ while(count<0){
+ dst0[count]= src[4*count+0];
+ dst1[count]= src[4*count+2];
+ count++;
+ }
+}
+
+static void RENAME(extract_odd2avg)(const uint8_t *src0, const uint8_t *src1, uint8_t *dst0, uint8_t *dst1, x86_reg count)
+{
+ dst0 += count;
+ dst1 += count;
+ src0 += 4*count;
+ src1 += 4*count;
+ count= - count;
+#ifdef PAVGB
+ if(count <= -8){
+ count += 7;
+ __asm__ volatile(
+ "pcmpeqw %%mm7, %%mm7 \n\t"
+ "psrlw $8, %%mm7 \n\t"
+ "1: \n\t"
+ "movq -28(%1, %0, 4), %%mm0 \n\t"
+ "movq -20(%1, %0, 4), %%mm1 \n\t"
+ "movq -12(%1, %0, 4), %%mm2 \n\t"
+ "movq -4(%1, %0, 4), %%mm3 \n\t"
+ PAVGB" -28(%2, %0, 4), %%mm0 \n\t"
+ PAVGB" -20(%2, %0, 4), %%mm1 \n\t"
+ PAVGB" -12(%2, %0, 4), %%mm2 \n\t"
+ PAVGB" - 4(%2, %0, 4), %%mm3 \n\t"
+ "psrlw $8, %%mm0 \n\t"
+ "psrlw $8, %%mm1 \n\t"
+ "psrlw $8, %%mm2 \n\t"
+ "psrlw $8, %%mm3 \n\t"
+ "packuswb %%mm1, %%mm0 \n\t"
+ "packuswb %%mm3, %%mm2 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "psrlw $8, %%mm0 \n\t"
+ "psrlw $8, %%mm2 \n\t"
+ "pand %%mm7, %%mm1 \n\t"
+ "pand %%mm7, %%mm3 \n\t"
+ "packuswb %%mm2, %%mm0 \n\t"
+ "packuswb %%mm3, %%mm1 \n\t"
+ MOVNTQ" %%mm0,- 7(%4, %0) \n\t"
+ MOVNTQ" %%mm1,- 7(%3, %0) \n\t"
+ "add $8, %0 \n\t"
+ " js 1b \n\t"
+ : "+r"(count)
+ : "r"(src0), "r"(src1), "r"(dst0), "r"(dst1)
+ );
+ count -= 7;
+ }
+#endif
+ src0++;
+ src1++;
+ while(count<0){
+ dst0[count]= (src0[4*count+0]+src1[4*count+0])>>1;
+ dst1[count]= (src0[4*count+2]+src1[4*count+2])>>1;
+ count++;
+ }
+}
+
+static void RENAME(yuyvtoyuv420)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, const uint8_t *src,
+ long width, long height,
+ long lumStride, long chromStride, long srcStride)
+{
+ long y;
+ const long chromWidth= -((-width)>>1);
+
+ for (y=0; y<height; y++){
+ RENAME(extract_even)(src, ydst, width);
+ if(y&1){
+ RENAME(extract_odd2avg)(src-srcStride, src, udst, vdst, chromWidth);
+ udst+= chromStride;
+ vdst+= chromStride;
+ }
+
+ src += srcStride;
+ ydst+= lumStride;
+ }
+#if HAVE_MMX
+ __asm__(
+ EMMS" \n\t"
+ SFENCE" \n\t"
+ ::: "memory"
+ );
+#endif
+}
+
+static void RENAME(yuyvtoyuv422)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, const uint8_t *src,
+ long width, long height,
+ long lumStride, long chromStride, long srcStride)
+{
+ long y;
+ const long chromWidth= -((-width)>>1);
+
+ for (y=0; y<height; y++){
+ RENAME(extract_even)(src, ydst, width);
+ RENAME(extract_odd2)(src, udst, vdst, chromWidth);
+
+ src += srcStride;
+ ydst+= lumStride;
+ udst+= chromStride;
+ vdst+= chromStride;
+ }
+#if HAVE_MMX
+ __asm__(
+ EMMS" \n\t"
+ SFENCE" \n\t"
+ ::: "memory"
+ );
+#endif
+}
+
+static void RENAME(uyvytoyuv420)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, const uint8_t *src,
+ long width, long height,
+ long lumStride, long chromStride, long srcStride)
+{
+ long y;
+ const long chromWidth= -((-width)>>1);
+
+ for (y=0; y<height; y++){
+ RENAME(extract_even)(src+1, ydst, width);
+ if(y&1){
+ RENAME(extract_even2avg)(src-srcStride, src, udst, vdst, chromWidth);
+ udst+= chromStride;
+ vdst+= chromStride;
+ }
+
+ src += srcStride;
+ ydst+= lumStride;
+ }
+#if HAVE_MMX
+ __asm__(
+ EMMS" \n\t"
+ SFENCE" \n\t"
+ ::: "memory"
+ );
+#endif
+}
+
+static void RENAME(uyvytoyuv422)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, const uint8_t *src,
+ long width, long height,
+ long lumStride, long chromStride, long srcStride)
+{
+ long y;
+ const long chromWidth= -((-width)>>1);
+
+ for (y=0; y<height; y++){
+ RENAME(extract_even)(src+1, ydst, width);
+ RENAME(extract_even2)(src, udst, vdst, chromWidth);
+
+ src += srcStride;
+ ydst+= lumStride;
+ udst+= chromStride;
+ vdst+= chromStride;
+ }
+#if HAVE_MMX
+ __asm__(
EMMS" \n\t"
SFENCE" \n\t"
::: "memory"
static inline void RENAME(rgb2rgb_init)(void){
rgb15to16 = RENAME(rgb15to16);
- rgb15to24 = RENAME(rgb15to24);
+ rgb15tobgr24 = RENAME(rgb15tobgr24);
rgb15to32 = RENAME(rgb15to32);
- rgb16to24 = RENAME(rgb16to24);
+ rgb16tobgr24 = RENAME(rgb16tobgr24);
rgb16to32 = RENAME(rgb16to32);
rgb16to15 = RENAME(rgb16to15);
- rgb24to16 = RENAME(rgb24to16);
- rgb24to15 = RENAME(rgb24to15);
- rgb24to32 = RENAME(rgb24to32);
+ rgb24tobgr16 = RENAME(rgb24tobgr16);
+ rgb24tobgr15 = RENAME(rgb24tobgr15);
+ rgb24tobgr32 = RENAME(rgb24tobgr32);
rgb32to16 = RENAME(rgb32to16);
rgb32to15 = RENAME(rgb32to15);
- rgb32to24 = RENAME(rgb32to24);
- rgb24tobgr15 = RENAME(rgb24tobgr15);
- rgb24tobgr16 = RENAME(rgb24tobgr16);
+ rgb32tobgr24 = RENAME(rgb32tobgr24);
+ rgb24to15 = RENAME(rgb24to15);
+ rgb24to16 = RENAME(rgb24to16);
rgb24tobgr24 = RENAME(rgb24tobgr24);
rgb32tobgr32 = RENAME(rgb32tobgr32);
rgb32tobgr16 = RENAME(rgb32tobgr16);
yv12toyuy2 = RENAME(yv12toyuy2);
yv12touyvy = RENAME(yv12touyvy);
yuv422ptoyuy2 = RENAME(yuv422ptoyuy2);
+ yuv422ptouyvy = RENAME(yuv422ptouyvy);
yuy2toyv12 = RENAME(yuy2toyv12);
-// uyvytoyv12 = RENAME(uyvytoyv12);
// yvu9toyv12 = RENAME(yvu9toyv12);
planar2x = RENAME(planar2x);
rgb24toyv12 = RENAME(rgb24toyv12);
interleaveBytes = RENAME(interleaveBytes);
vu9_to_vu12 = RENAME(vu9_to_vu12);
yvu9_to_yuy2 = RENAME(yvu9_to_yuy2);
+
+ uyvytoyuv420 = RENAME(uyvytoyuv420);
+ uyvytoyuv422 = RENAME(uyvytoyuv422);
+ yuyvtoyuv420 = RENAME(yuyvtoyuv420);
+ yuyvtoyuv422 = RENAME(yuyvtoyuv422);
}