/*
supported Input formats: YV12, I420/IYUV, YUY2, UYVY, BGR32, BGR24, BGR16, BGR15, RGB32, RGB24, Y8/Y800, YVU9/IF09
- supported output formats: YV12, I420/IYUV, YUY2, {BGR,RGB}{1,4,8,15,16,24,32}, Y8/Y800, YVU9/IF09
+ supported output formats: YV12, I420/IYUV, YUY2, UYVY, {BGR,RGB}{1,4,8,15,16,24,32}, Y8/Y800, YVU9/IF09
{BGR,RGB}{1,4,8,15,16} support dithering
unscaled special converters (YV12=I420=IYUV, Y800=Y8)
#include <string.h>
#include <math.h>
#include <stdio.h>
+#include <unistd.h>
#include "../config.h"
#include "../mangle.h"
#include <assert.h>
#else
#include <stdlib.h>
#endif
+#ifdef HAVE_SYS_MMAN_H
+#include <sys/mman.h>
+#if defined(MAP_ANON) && !defined(MAP_ANONYMOUS)
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+#endif
#include "swscale.h"
#include "swscale_internal.h"
#include "../cpudetect.h"
#include "../libvo/img_format.h"
#include "rgb2rgb.h"
#include "../libvo/fastmemcpy.h"
-#include "../mp_msg.h"
-
-#define MSG_WARN(args...) mp_msg(MSGT_SWS,MSGL_WARN, ##args )
-#define MSG_FATAL(args...) mp_msg(MSGT_SWS,MSGL_FATAL, ##args )
-#define MSG_ERR(args...) mp_msg(MSGT_SWS,MSGL_ERR, ##args )
-#define MSG_V(args...) mp_msg(MSGT_SWS,MSGL_V, ##args )
-#define MSG_DBG2(args...) mp_msg(MSGT_SWS,MSGL_DBG2, ##args )
-#define MSG_INFO(args...) mp_msg(MSGT_SWS,MSGL_INFO, ##args )
#undef MOVNTQ
#undef PAVGB
//FIXME replace this with something faster
#define isPlanarYUV(x) ((x)==IMGFMT_YV12 || (x)==IMGFMT_YVU9 \
+ || (x)==IMGFMT_NV12 || (x)==IMGFMT_NV21 \
|| (x)==IMGFMT_444P || (x)==IMGFMT_422P || (x)==IMGFMT_411P)
#define isYUV(x) ((x)==IMGFMT_UYVY || (x)==IMGFMT_YUY2 || isPlanarYUV(x))
#define isGray(x) ((x)==IMGFMT_Y800)
|| (x)==IMGFMT_RGB32|| (x)==IMGFMT_RGB24\
|| (x)==IMGFMT_Y800 || (x)==IMGFMT_YVU9\
|| (x)==IMGFMT_444P || (x)==IMGFMT_422P || (x)==IMGFMT_411P)
-#define isSupportedOut(x) ((x)==IMGFMT_YV12 || (x)==IMGFMT_YUY2\
+#define isSupportedOut(x) ((x)==IMGFMT_YV12 || (x)==IMGFMT_YUY2 || (x)==IMGFMT_UYVY\
|| (x)==IMGFMT_444P || (x)==IMGFMT_422P || (x)==IMGFMT_411P\
|| isRGB(x) || isBGR(x)\
+ || (x)==IMGFMT_NV12 || (x)==IMGFMT_NV21\
|| (x)==IMGFMT_Y800 || (x)==IMGFMT_YVU9)
#define isPacked(x) ((x)==IMGFMT_YUY2 || (x)==IMGFMT_UYVY ||isRGB(x) || isBGR(x))
#define RV ((int)( 0.439*(1<<RGB2YUV_SHIFT)+0.5))
#define RU ((int)(-0.148*(1<<RGB2YUV_SHIFT)+0.5))
-extern int verbose; // defined in mplayer.c
extern const int32_t Inverse_Table_6_9[8][4];
/*
#define MIN(a,b) ((a) > (b) ? (b) : (a))
#define MAX(a,b) ((a) < (b) ? (b) : (a))
-#ifdef ARCH_X86
-#define CAN_COMPILE_X86_ASM
-#endif
-
-#ifdef CAN_COMPILE_X86_ASM
-static uint64_t __attribute__((aligned(8))) bF8= 0xF8F8F8F8F8F8F8F8LL;
-static uint64_t __attribute__((aligned(8))) bFC= 0xFCFCFCFCFCFCFCFCLL;
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
+static uint64_t attribute_used __attribute__((aligned(8))) bF8= 0xF8F8F8F8F8F8F8F8LL;
+static uint64_t attribute_used __attribute__((aligned(8))) bFC= 0xFCFCFCFCFCFCFCFCLL;
static uint64_t __attribute__((aligned(8))) w10= 0x0010001000100010LL;
-static uint64_t __attribute__((aligned(8))) w02= 0x0002000200020002LL;
-static uint64_t __attribute__((aligned(8))) bm00001111=0x00000000FFFFFFFFLL;
-static uint64_t __attribute__((aligned(8))) bm00000111=0x0000000000FFFFFFLL;
-static uint64_t __attribute__((aligned(8))) bm11111000=0xFFFFFFFFFF000000LL;
-static uint64_t __attribute__((aligned(8))) bm01010101=0x00FF00FF00FF00FFLL;
+static uint64_t attribute_used __attribute__((aligned(8))) w02= 0x0002000200020002LL;
+static uint64_t attribute_used __attribute__((aligned(8))) bm00001111=0x00000000FFFFFFFFLL;
+static uint64_t attribute_used __attribute__((aligned(8))) bm00000111=0x0000000000FFFFFFLL;
+static uint64_t attribute_used __attribute__((aligned(8))) bm11111000=0xFFFFFFFFFF000000LL;
+static uint64_t attribute_used __attribute__((aligned(8))) bm01010101=0x00FF00FF00FF00FFLL;
-static volatile uint64_t __attribute__((aligned(8))) b5Dither;
-static volatile uint64_t __attribute__((aligned(8))) g5Dither;
-static volatile uint64_t __attribute__((aligned(8))) g6Dither;
-static volatile uint64_t __attribute__((aligned(8))) r5Dither;
+static volatile uint64_t attribute_used __attribute__((aligned(8))) b5Dither;
+static volatile uint64_t attribute_used __attribute__((aligned(8))) g5Dither;
+static volatile uint64_t attribute_used __attribute__((aligned(8))) g6Dither;
+static volatile uint64_t attribute_used __attribute__((aligned(8))) r5Dither;
static uint64_t __attribute__((aligned(8))) dither4[2]={
0x0103010301030103LL,
0x0004000400040004LL,};
static uint64_t __attribute__((aligned(8))) b16Mask= 0x001F001F001F001FLL;
-static uint64_t __attribute__((aligned(8))) g16Mask= 0x07E007E007E007E0LL;
-static uint64_t __attribute__((aligned(8))) r16Mask= 0xF800F800F800F800LL;
+static uint64_t attribute_used __attribute__((aligned(8))) g16Mask= 0x07E007E007E007E0LL;
+static uint64_t attribute_used __attribute__((aligned(8))) r16Mask= 0xF800F800F800F800LL;
static uint64_t __attribute__((aligned(8))) b15Mask= 0x001F001F001F001FLL;
-static uint64_t __attribute__((aligned(8))) g15Mask= 0x03E003E003E003E0LL;
-static uint64_t __attribute__((aligned(8))) r15Mask= 0x7C007C007C007C00LL;
+static uint64_t attribute_used __attribute__((aligned(8))) g15Mask= 0x03E003E003E003E0LL;
+static uint64_t attribute_used __attribute__((aligned(8))) r15Mask= 0x7C007C007C007C00LL;
-static uint64_t __attribute__((aligned(8))) M24A= 0x00FF0000FF0000FFLL;
-static uint64_t __attribute__((aligned(8))) M24B= 0xFF0000FF0000FF00LL;
-static uint64_t __attribute__((aligned(8))) M24C= 0x0000FF0000FF0000LL;
+static uint64_t attribute_used __attribute__((aligned(8))) M24A= 0x00FF0000FF0000FFLL;
+static uint64_t attribute_used __attribute__((aligned(8))) M24B= 0xFF0000FF0000FF00LL;
+static uint64_t attribute_used __attribute__((aligned(8))) M24C= 0x0000FF0000FF0000LL;
#ifdef FAST_BGR2YV12
-static const uint64_t bgr2YCoeff __attribute__((aligned(8))) = 0x000000210041000DULL;
-static const uint64_t bgr2UCoeff __attribute__((aligned(8))) = 0x0000FFEEFFDC0038ULL;
-static const uint64_t bgr2VCoeff __attribute__((aligned(8))) = 0x00000038FFD2FFF8ULL;
+static const uint64_t bgr2YCoeff attribute_used __attribute__((aligned(8))) = 0x000000210041000DULL;
+static const uint64_t bgr2UCoeff attribute_used __attribute__((aligned(8))) = 0x0000FFEEFFDC0038ULL;
+static const uint64_t bgr2VCoeff attribute_used __attribute__((aligned(8))) = 0x00000038FFD2FFF8ULL;
#else
-static const uint64_t bgr2YCoeff __attribute__((aligned(8))) = 0x000020E540830C8BULL;
-static const uint64_t bgr2UCoeff __attribute__((aligned(8))) = 0x0000ED0FDAC23831ULL;
-static const uint64_t bgr2VCoeff __attribute__((aligned(8))) = 0x00003831D0E6F6EAULL;
+static const uint64_t bgr2YCoeff attribute_used __attribute__((aligned(8))) = 0x000020E540830C8BULL;
+static const uint64_t bgr2UCoeff attribute_used __attribute__((aligned(8))) = 0x0000ED0FDAC23831ULL;
+static const uint64_t bgr2VCoeff attribute_used __attribute__((aligned(8))) = 0x00003831D0E6F6EAULL;
#endif
-static const uint64_t bgr2YOffset __attribute__((aligned(8))) = 0x1010101010101010ULL;
-static const uint64_t bgr2UVOffset __attribute__((aligned(8)))= 0x8080808080808080ULL;
-static const uint64_t w1111 __attribute__((aligned(8))) = 0x0001000100010001ULL;
+static const uint64_t bgr2YOffset attribute_used __attribute__((aligned(8))) = 0x1010101010101010ULL;
+static const uint64_t bgr2UVOffset attribute_used __attribute__((aligned(8)))= 0x8080808080808080ULL;
+static const uint64_t w1111 attribute_used __attribute__((aligned(8))) = 0x0001000100010001ULL;
#endif
// clipping helper table for C implementations:
static unsigned char clip_table[768];
-//global sws_flags from the command line
-int sws_flags=2;
-
-//global srcFilter
-SwsFilter src_filter= {NULL, NULL, NULL, NULL};
-
-float sws_lum_gblur= 0.0;
-float sws_chr_gblur= 0.0;
-int sws_chr_vshift= 0;
-int sws_chr_hshift= 0;
-float sws_chr_sharpen= 0.0;
-float sws_lum_sharpen= 0.0;
-
-/* cpuCaps combined from cpudetect and whats actually compiled in
- (if there is no support for something compiled in it wont appear here) */
-static CpuCaps cpuCaps;
-
-int (*swScale)(SwsContext *context, uint8_t* src[], int srcStride[], int srcSliceY,
- int srcSliceH, uint8_t* dst[], int dstStride[])=NULL;
-
static SwsVector *sws_getConvVec(SwsVector *a, SwsVector *b);
extern const uint8_t dither_2x2_4[2][8];
extern const uint8_t dither_8x8_73[8][8];
extern const uint8_t dither_8x8_220[8][8];
-#ifdef CAN_COMPILE_X86_ASM
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
void in_asm_used_var_warning_killer()
{
volatile int i= bF8+bFC+w10+
}
#endif
-static int testFormat[]={
-IMGFMT_YVU9,
-IMGFMT_YV12,
-//IMGFMT_IYUV,
-IMGFMT_I420,
-IMGFMT_BGR15,
-IMGFMT_BGR16,
-IMGFMT_BGR24,
-IMGFMT_BGR32,
-IMGFMT_RGB24,
-IMGFMT_RGB32,
-//IMGFMT_Y8,
-IMGFMT_Y800,
-//IMGFMT_YUY2,
-0
-};
-
-static uint64_t getSSD(uint8_t *src1, uint8_t *src2, int stride1, int stride2, int w, int h){
- int x,y;
- uint64_t ssd=0;
-
- for(y=0; y<h; y++){
- for(x=0; x<w; x++){
- int d= src1[x + y*stride1] - src2[x + y*stride2];
- ssd+= d*d;
- }
- }
- return ssd;
-}
-
-// test by ref -> src -> dst -> out & compare out against ref
-// ref & out are YV12
-static void doTest(uint8_t *ref[3], int refStride[3], int w, int h, int srcFormat, int dstFormat,
- int srcW, int srcH, int dstW, int dstH, int flags){
- uint8_t *src[3];
- uint8_t *dst[3];
- uint8_t *out[3];
- int srcStride[3], dstStride[3];
+static inline void yuv2yuvXinC(int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
+ int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
+ uint8_t *dest, uint8_t *uDest, uint8_t *vDest, int dstW, int chrDstW)
+{
+ //FIXME Optimize (just quickly writen not opti..)
int i;
- uint64_t ssdY, ssdU, ssdV;
- SwsContext *srcContext, *dstContext, *outContext;
-
- for(i=0; i<3; i++){
- // avoid stride % bpp != 0
- if(srcFormat==IMGFMT_RGB24 || srcFormat==IMGFMT_BGR24)
- srcStride[i]= srcW*3;
- else
- srcStride[i]= srcW*4;
-
- if(dstFormat==IMGFMT_RGB24 || dstFormat==IMGFMT_BGR24)
- dstStride[i]= dstW*3;
- else
- dstStride[i]= dstW*4;
-
- src[i]= malloc(srcStride[i]*srcH);
- dst[i]= malloc(dstStride[i]*dstH);
- out[i]= malloc(refStride[i]*h);
- }
-
- srcContext= sws_getContext(w, h, IMGFMT_YV12, srcW, srcH, srcFormat, flags, NULL, NULL);
- dstContext= sws_getContext(srcW, srcH, srcFormat, dstW, dstH, dstFormat, flags, NULL, NULL);
- outContext= sws_getContext(dstW, dstH, dstFormat, w, h, IMGFMT_YV12, flags, NULL, NULL);
- if(srcContext==NULL ||dstContext==NULL ||outContext==NULL){
- printf("Failed allocating swsContext\n");
- goto end;
- }
-// printf("test %X %X %X -> %X %X %X\n", (int)ref[0], (int)ref[1], (int)ref[2],
-// (int)src[0], (int)src[1], (int)src[2]);
-
- srcContext->swScale(srcContext, ref, refStride, 0, h , src, srcStride);
- dstContext->swScale(dstContext, src, srcStride, 0, srcH, dst, dstStride);
- outContext->swScale(outContext, dst, dstStride, 0, dstH, out, refStride);
-
- ssdY= getSSD(ref[0], out[0], refStride[0], refStride[0], w, h);
- ssdU= getSSD(ref[1], out[1], refStride[1], refStride[1], (w+1)>>1, (h+1)>>1);
- ssdV= getSSD(ref[2], out[2], refStride[2], refStride[2], (w+1)>>1, (h+1)>>1);
-
- if(isGray(srcFormat) || isGray(dstFormat)) ssdU=ssdV=0; //FIXME check that output is really gray
-
- ssdY/= w*h;
- ssdU/= w*h/4;
- ssdV/= w*h/4;
-
- if(ssdY>100 || ssdU>50 || ssdV>50){
- printf(" %s %dx%d -> %s %4dx%4d flags=%2d SSD=%5lld,%5lld,%5lld\n",
- vo_format_name(srcFormat), srcW, srcH,
- vo_format_name(dstFormat), dstW, dstH,
- flags,
- ssdY, ssdU, ssdV);
- }
+ for(i=0; i<dstW; i++)
+ {
+ int val=1<<18;
+ int j;
+ for(j=0; j<lumFilterSize; j++)
+ val += lumSrc[j][i] * lumFilter[j];
- end:
-
- sws_freeContext(srcContext);
- sws_freeContext(dstContext);
- sws_freeContext(outContext);
-
- for(i=0; i<3; i++){
- free(src[i]);
- free(dst[i]);
- free(out[i]);
+ dest[i]= MIN(MAX(val>>19, 0), 255);
}
-}
-static void selfTest(uint8_t *src[3], int stride[3], int w, int h){
- int srcFormat, dstFormat, srcFormatIndex, dstFormatIndex;
- int srcW, srcH, dstW, dstH;
- int flags;
-
- for(srcFormatIndex=0; ;srcFormatIndex++){
- srcFormat= testFormat[srcFormatIndex];
- if(!srcFormat) break;
- for(dstFormatIndex=0; ;dstFormatIndex++){
- dstFormat= testFormat[dstFormatIndex];
- if(!dstFormat) break;
- if(!isSupportedOut(dstFormat)) continue;
-printf("%s -> %s\n",
- vo_format_name(srcFormat),
- vo_format_name(dstFormat));
-
- srcW= w+w/3;
- srcH= h+h/3;
- for(dstW=w; dstW<w*2; dstW+= dstW/3){
- for(dstH=h; dstH<h*2; dstH+= dstH/3){
- for(flags=1; flags<33; flags*=2)
- doTest(src, stride, w, h, srcFormat, dstFormat,
- srcW, srcH, dstW, dstH, flags);
- }
+ if(uDest != NULL)
+ for(i=0; i<chrDstW; i++)
+ {
+ int u=1<<18;
+ int v=1<<18;
+ int j;
+ for(j=0; j<chrFilterSize; j++)
+ {
+ u += chrSrc[j][i] * chrFilter[j];
+ v += chrSrc[j][i + 2048] * chrFilter[j];
}
+
+ uDest[i]= MIN(MAX(u>>19, 0), 255);
+ vDest[i]= MIN(MAX(v>>19, 0), 255);
}
- }
}
-static inline void yuv2yuvXinC(int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
- int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
- uint8_t *dest, uint8_t *uDest, uint8_t *vDest, int dstW, int chrDstW)
+static inline void yuv2nv12XinC(int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
+ int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
+ uint8_t *dest, uint8_t *uDest, int dstW, int chrDstW, int dstFormat)
{
//FIXME Optimize (just quickly writen not opti..)
int i;
for(i=0; i<dstW; i++)
{
- int val=0;
+ int val=1<<18;
int j;
for(j=0; j<lumFilterSize; j++)
val += lumSrc[j][i] * lumFilter[j];
dest[i]= MIN(MAX(val>>19, 0), 255);
}
- if(uDest != NULL)
+ if(uDest == NULL)
+ return;
+
+ if(dstFormat == IMGFMT_NV12)
for(i=0; i<chrDstW; i++)
{
- int u=0;
- int v=0;
+ int u=1<<18;
+ int v=1<<18;
int j;
for(j=0; j<chrFilterSize; j++)
{
v += chrSrc[j][i + 2048] * chrFilter[j];
}
- uDest[i]= MIN(MAX(u>>19, 0), 255);
- vDest[i]= MIN(MAX(v>>19, 0), 255);
+ uDest[2*i]= MIN(MAX(u>>19, 0), 255);
+ uDest[2*i+1]= MIN(MAX(v>>19, 0), 255);
}
-}
+ else
+ for(i=0; i<chrDstW; i++)
+ {
+ int u=1<<18;
+ int v=1<<18;
+ int j;
+ for(j=0; j<chrFilterSize; j++)
+ {
+ u += chrSrc[j][i] * chrFilter[j];
+ v += chrSrc[j][i + 2048] * chrFilter[j];
+ }
+ uDest[2*i]= MIN(MAX(v>>19, 0), 255);
+ uDest[2*i+1]= MIN(MAX(u>>19, 0), 255);
+ }
+}
#define YSCALE_YUV_2_PACKEDX_C(type) \
for(i=0; i<(dstW>>1); i++){\
int j;\
- int Y1=0;\
- int Y2=0;\
- int U=0;\
- int V=0;\
+ int Y1=1<<18;\
+ int Y2=1<<18;\
+ int U=1<<18;\
+ int V=1<<18;\
type *r, *b, *g;\
const int i2= 2*i;\
\
((uint8_t*)dest)[3]= r[Y2];\
((uint8_t*)dest)[4]= g[Y2];\
((uint8_t*)dest)[5]= b[Y2];\
- ((uint8_t*)dest)+=6;\
+ dest+=6;\
}\
break;\
case IMGFMT_BGR24:\
((uint8_t*)dest)[3]= b[Y2];\
((uint8_t*)dest)[4]= g[Y2];\
((uint8_t*)dest)[5]= r[Y2];\
- ((uint8_t*)dest)+=6;\
+ dest+=6;\
}\
break;\
case IMGFMT_RGB16:\
acc+= acc + g[((buf0[i+6]*yalpha1+buf1[i+6]*yalpha)>>19) + d128[6]];\
acc+= acc + g[((buf0[i+7]*yalpha1+buf1[i+7]*yalpha)>>19) + d128[7]];\
((uint8_t*)dest)[0]= acc;\
- ((uint8_t*)dest)++;\
+ dest++;\
}\
\
/*\
((uint8_t*)dest)[2*i2+3]= V;\
} \
break;\
+ case IMGFMT_UYVY:\
+ func2\
+ ((uint8_t*)dest)[2*i2+0]= U;\
+ ((uint8_t*)dest)[2*i2+1]= Y1;\
+ ((uint8_t*)dest)[2*i2+2]= V;\
+ ((uint8_t*)dest)[2*i2+3]= Y2;\
+ } \
+ break;\
}\
((uint8_t*)dest)[3]= r[Y2];
((uint8_t*)dest)[4]= g[Y2];
((uint8_t*)dest)[5]= b[Y2];
- ((uint8_t*)dest)+=6;
+ dest+=6;
}
break;
case IMGFMT_BGR24:
((uint8_t*)dest)[3]= b[Y2];
((uint8_t*)dest)[4]= g[Y2];
((uint8_t*)dest)[5]= r[Y2];
- ((uint8_t*)dest)+=6;
+ dest+=6;
}
break;
case IMGFMT_RGB16:
int acc=0;
for(i=0; i<dstW-1; i+=2){
int j;
- int Y1=0;
- int Y2=0;
+ int Y1=1<<18;
+ int Y2=1<<18;
for(j=0; j<lumFilterSize; j++)
{
acc+= acc + g[Y2+d128[(i+1)&7]];
if((i&7)==6){
((uint8_t*)dest)[0]= acc;
- ((uint8_t*)dest)++;
+ dest++;
}
}
}
((uint8_t*)dest)[2*i2+3]= V;
}
break;
+ case IMGFMT_UYVY:
+ YSCALE_YUV_2_PACKEDX_C(void)
+ ((uint8_t*)dest)[2*i2+0]= U;
+ ((uint8_t*)dest)[2*i2+1]= Y1;
+ ((uint8_t*)dest)[2*i2+2]= V;
+ ((uint8_t*)dest)[2*i2+3]= Y2;
+ }
+ break;
}
}
#define COMPILE_C
#endif
-#ifdef CAN_COMPILE_X86_ASM
+#ifdef ARCH_POWERPC
+#ifdef HAVE_ALTIVEC
+#define COMPILE_ALTIVEC
+#endif //HAVE_ALTIVEC
+#endif //ARCH_POWERPC
+
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
#if (defined (HAVE_MMX) && !defined (HAVE_3DNOW) && !defined (HAVE_MMX2)) || defined (RUNTIME_CPUDETECT)
#define COMPILE_MMX
#if (defined (HAVE_3DNOW) && !defined (HAVE_MMX2)) || defined (RUNTIME_CPUDETECT)
#define COMPILE_3DNOW
#endif
-#endif //CAN_COMPILE_X86_ASM
+#endif //ARCH_X86 || ARCH_X86_64
#undef HAVE_MMX
#undef HAVE_MMX2
#undef HAVE_MMX
#undef HAVE_MMX2
#undef HAVE_3DNOW
+#undef HAVE_ALTIVEC
#define RENAME(a) a ## _C
#include "swscale_template.c"
#endif
-#ifdef CAN_COMPILE_X86_ASM
+#ifdef ARCH_POWERPC
+#ifdef COMPILE_ALTIVEC
+#undef RENAME
+#define HAVE_ALTIVEC
+#define RENAME(a) a ## _altivec
+#include "swscale_template.c"
+#endif
+#endif //ARCH_POWERPC
+
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
//X86 versions
/*
#include "swscale_template.c"
#endif
-#endif //CAN_COMPILE_X86_ASM
-
-// minor note: the HAVE_xyz is messed up after that line so dont use it
-
-
-// old global scaler, dont use for new code
-// will use sws_flags from the command line
-void SwScale_YV12slice(unsigned char* src[], int srcStride[], int srcSliceY ,
- int srcSliceH, uint8_t* dst[], int dstStride, int dstbpp,
- int srcW, int srcH, int dstW, int dstH){
-
- static SwsContext *context=NULL;
- int dstFormat;
- int dstStride3[3]= {dstStride, dstStride>>1, dstStride>>1};
-
- switch(dstbpp)
- {
- case 8 : dstFormat= IMGFMT_Y8; break;
- case 12: dstFormat= IMGFMT_YV12; break;
- case 15: dstFormat= IMGFMT_BGR15; break;
- case 16: dstFormat= IMGFMT_BGR16; break;
- case 24: dstFormat= IMGFMT_BGR24; break;
- case 32: dstFormat= IMGFMT_BGR32; break;
- default: return;
- }
-
- if(!context) context=sws_getContextFromCmdLine(srcW, srcH, IMGFMT_YV12, dstW, dstH, dstFormat);
-
- context->swScale(context, src, srcStride, srcSliceY, srcSliceH, dst, dstStride3);
-}
-
-void sws_getFlagsAndFilterFromCmdLine(int *flags, SwsFilter **srcFilterParam, SwsFilter **dstFilterParam)
-{
- static int firstTime=1;
- *flags=0;
-
-#ifdef ARCH_X86
- if(gCpuCaps.hasMMX)
- asm volatile("emms\n\t"::: "memory"); //FIXME this shouldnt be required but it IS (even for non mmx versions)
-#endif
- if(firstTime)
- {
- firstTime=0;
- *flags= SWS_PRINT_INFO;
- }
- else if(verbose>1) *flags= SWS_PRINT_INFO;
-
- if(src_filter.lumH) sws_freeVec(src_filter.lumH);
- if(src_filter.lumV) sws_freeVec(src_filter.lumV);
- if(src_filter.chrH) sws_freeVec(src_filter.chrH);
- if(src_filter.chrV) sws_freeVec(src_filter.chrV);
-
- if(sws_lum_gblur!=0.0){
- src_filter.lumH= sws_getGaussianVec(sws_lum_gblur, 3.0);
- src_filter.lumV= sws_getGaussianVec(sws_lum_gblur, 3.0);
- }else{
- src_filter.lumH= sws_getIdentityVec();
- src_filter.lumV= sws_getIdentityVec();
- }
-
- if(sws_chr_gblur!=0.0){
- src_filter.chrH= sws_getGaussianVec(sws_chr_gblur, 3.0);
- src_filter.chrV= sws_getGaussianVec(sws_chr_gblur, 3.0);
- }else{
- src_filter.chrH= sws_getIdentityVec();
- src_filter.chrV= sws_getIdentityVec();
- }
-
- if(sws_chr_sharpen!=0.0){
- SwsVector *g= sws_getConstVec(-1.0, 3);
- SwsVector *id= sws_getConstVec(10.0/sws_chr_sharpen, 1);
- g->coeff[1]=2.0;
- sws_addVec(id, g);
- sws_convVec(src_filter.chrH, id);
- sws_convVec(src_filter.chrV, id);
- sws_freeVec(g);
- sws_freeVec(id);
- }
-
- if(sws_lum_sharpen!=0.0){
- SwsVector *g= sws_getConstVec(-1.0, 3);
- SwsVector *id= sws_getConstVec(10.0/sws_lum_sharpen, 1);
- g->coeff[1]=2.0;
- sws_addVec(id, g);
- sws_convVec(src_filter.lumH, id);
- sws_convVec(src_filter.lumV, id);
- sws_freeVec(g);
- sws_freeVec(id);
- }
-
- if(sws_chr_hshift)
- sws_shiftVec(src_filter.chrH, sws_chr_hshift);
-
- if(sws_chr_vshift)
- sws_shiftVec(src_filter.chrV, sws_chr_vshift);
+#endif //ARCH_X86 || ARCH_X86_64
- sws_normalizeVec(src_filter.chrH, 1.0);
- sws_normalizeVec(src_filter.chrV, 1.0);
- sws_normalizeVec(src_filter.lumH, 1.0);
- sws_normalizeVec(src_filter.lumV, 1.0);
-
- if(verbose > 1) sws_printVec(src_filter.chrH);
- if(verbose > 1) sws_printVec(src_filter.lumH);
-
- switch(sws_flags)
- {
- case 0: *flags|= SWS_FAST_BILINEAR; break;
- case 1: *flags|= SWS_BILINEAR; break;
- case 2: *flags|= SWS_BICUBIC; break;
- case 3: *flags|= SWS_X; break;
- case 4: *flags|= SWS_POINT; break;
- case 5: *flags|= SWS_AREA; break;
- case 6: *flags|= SWS_BICUBLIN; break;
- case 7: *flags|= SWS_GAUSS; break;
- case 8: *flags|= SWS_SINC; break;
- case 9: *flags|= SWS_LANCZOS; break;
- case 10:*flags|= SWS_SPLINE; break;
- default:*flags|= SWS_BILINEAR; break;
- }
-
- *srcFilterParam= &src_filter;
- *dstFilterParam= NULL;
-}
-
-// will use sws_flags & src_filter (from cmd line)
-SwsContext *sws_getContextFromCmdLine(int srcW, int srcH, int srcFormat, int dstW, int dstH, int dstFormat)
-{
- int flags;
- SwsFilter *dstFilterParam, *srcFilterParam;
- sws_getFlagsAndFilterFromCmdLine(&flags, &srcFilterParam, &dstFilterParam);
-
- return sws_getContext(srcW, srcH, srcFormat, dstW, dstH, dstFormat, flags, srcFilterParam, dstFilterParam);
-}
+// minor note: the HAVE_xyz is messed up after that line so don't use it
static double getSplineCoeff(double a, double b, double c, double d, double dist)
{
static inline void initFilter(int16_t **outFilter, int16_t **filterPos, int *outFilterSize, int xInc,
int srcW, int dstW, int filterAlign, int one, int flags,
- SwsVector *srcFilter, SwsVector *dstFilter)
+ SwsVector *srcFilter, SwsVector *dstFilter, double param[2])
{
int i;
int filterSize;
int minFilterSize;
double *filter=NULL;
double *filter2=NULL;
-#ifdef ARCH_X86
- if(gCpuCaps.hasMMX)
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
+ if(flags & SWS_CPU_CAPS_MMX)
asm volatile("emms\n\t"::: "memory"); //FIXME this shouldnt be required but it IS (even for non mmx versions)
#endif
double xDstInSrc;
double sizeFactor, filterSizeInSrc;
const double xInc1= (double)xInc / (double)(1<<16);
- int param= (flags&SWS_PARAM_MASK)>>SWS_PARAM_SHIFT;
if (flags&SWS_BICUBIC) sizeFactor= 4.0;
else if(flags&SWS_X) sizeFactor= 8.0;
else if(flags&SWS_AREA) sizeFactor= 1.0; //downscale only, for upscale it is bilinear
else if(flags&SWS_GAUSS) sizeFactor= 8.0; // infinite ;)
- else if(flags&SWS_LANCZOS) sizeFactor= param ? 2.0*param : 6.0;
+ else if(flags&SWS_LANCZOS) sizeFactor= param[0] != SWS_PARAM_DEFAULT ? 2.0*param[0] : 6.0;
else if(flags&SWS_SINC) sizeFactor= 20.0; // infinite ;)
else if(flags&SWS_SPLINE) sizeFactor= 20.0; // infinite ;)
else if(flags&SWS_BILINEAR) sizeFactor= 2.0;
double coeff;
if(flags & SWS_BICUBIC)
{
- double A= param ? -param*0.01 : -0.60;
-
- // Equation is from VirtualDub
- if(d<1.0)
- coeff = (1.0 - (A+3.0)*d*d + (A+2.0)*d*d*d);
+ double B= param[0] != SWS_PARAM_DEFAULT ? param[0] : 0.0;
+ double C= param[1] != SWS_PARAM_DEFAULT ? param[1] : 0.6;
+
+ if(d<1.0)
+ coeff = (12-9*B-6*C)*d*d*d + (-18+12*B+6*C)*d*d + 6-2*B;
else if(d<2.0)
- coeff = (-4.0*A + 8.0*A*d - 5.0*A*d*d + A*d*d*d);
+ coeff = (-B-6*C)*d*d*d + (6*B+30*C)*d*d + (-12*B-48*C)*d +8*B+24*C;
else
coeff=0.0;
}
}*/
else if(flags & SWS_X)
{
- double A= param ? param*0.1 : 1.0;
+ double A= param[0] != SWS_PARAM_DEFAULT ? param[0] : 1.0;
if(d<1.0)
coeff = cos(d*PI);
}
else if(flags & SWS_GAUSS)
{
- double p= param ? param*0.1 : 3.0;
+ double p= param[0] != SWS_PARAM_DEFAULT ? param[0] : 3.0;
coeff = pow(2.0, - p*d*d);
}
else if(flags & SWS_SINC)
}
else if(flags & SWS_LANCZOS)
{
- double p= param ? param : 3.0;
+ double p= param[0] != SWS_PARAM_DEFAULT ? param[0] : 3.0;
coeff = d ? sin(d*PI)*sin(d*PI/p)/(d*d*PI*PI/p) : 1.0;
if(d>p) coeff=0;
}
if(cutOff > SWS_MAX_REDUCE_CUTOFF) break;
- /* preserve Monotonicity because the core cant handle the filter otherwise */
+ /* preserve Monotonicity because the core can't handle the filter otherwise */
if(i<dstW-1 && (*filterPos)[i] >= (*filterPos)[i+1]) break;
// Move filter coeffs left
if(min>minFilterSize) minFilterSize= min;
}
+ if (flags & SWS_CPU_CAPS_ALTIVEC) {
+ // we can handle the special case 4,
+ // so we don't want to go to the full 8
+ if (minFilterSize < 5)
+ filterAlign = 4;
+
+ // we really don't want to waste our time
+ // doing useless computation, so fall-back on
+ // the scalar C code for very small filter.
+ // vectorizing is worth it only if you have
+ // decent-sized vector.
+ if (minFilterSize < 3)
+ filterAlign = 1;
+ }
+
ASSERT(minFilterSize > 0)
filterSize= (minFilterSize +(filterAlign-1)) & (~(filterAlign-1));
ASSERT(filterSize > 0)
for(i=0; i<dstW; i++)
{
int j;
+ double error=0;
double sum=0;
double scale= one;
+
for(j=0; j<filterSize; j++)
{
sum+= filter[i*filterSize + j];
scale/= sum;
for(j=0; j<*outFilterSize; j++)
{
- (*outFilter)[i*(*outFilterSize) + j]= (int)(filter[i*filterSize + j]*scale);
+ double v= filter[i*filterSize + j]*scale + error;
+ int intV= floor(v + 0.5);
+ (*outFilter)[i*(*outFilterSize) + j]= intV;
+ error = v - intV;
}
}
free(filter);
}
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
static void initMMX2HScaler(int dstW, int xInc, uint8_t *funnyCode, int16_t *filter, int32_t *filterPos, int numSplits)
{
uint8_t *fragmentA;
- int imm8OfPShufW1A;
- int imm8OfPShufW2A;
- int fragmentLengthA;
+ long imm8OfPShufW1A;
+ long imm8OfPShufW2A;
+ long fragmentLengthA;
uint8_t *fragmentB;
- int imm8OfPShufW1B;
- int imm8OfPShufW2B;
- int fragmentLengthB;
+ long imm8OfPShufW1B;
+ long imm8OfPShufW2B;
+ long fragmentLengthB;
int fragmentPos;
int xpos, i;
"jmp 9f \n\t"
// Begin
"0: \n\t"
- "movq (%%edx, %%eax), %%mm3 \n\t"
- "movd (%%ecx, %%esi), %%mm0 \n\t"
- "movd 1(%%ecx, %%esi), %%mm1 \n\t"
+ "movq (%%"REG_d", %%"REG_a"), %%mm3\n\t"
+ "movd (%%"REG_c", %%"REG_S"), %%mm0\n\t"
+ "movd 1(%%"REG_c", %%"REG_S"), %%mm1\n\t"
"punpcklbw %%mm7, %%mm1 \n\t"
"punpcklbw %%mm7, %%mm0 \n\t"
"pshufw $0xFF, %%mm1, %%mm1 \n\t"
"pshufw $0xFF, %%mm0, %%mm0 \n\t"
"2: \n\t"
"psubw %%mm1, %%mm0 \n\t"
- "movl 8(%%ebx, %%eax), %%esi \n\t"
+ "movl 8(%%"REG_b", %%"REG_a"), %%esi\n\t"
"pmullw %%mm3, %%mm0 \n\t"
"psllw $7, %%mm1 \n\t"
"paddw %%mm1, %%mm0 \n\t"
- "movq %%mm0, (%%edi, %%eax) \n\t"
+ "movq %%mm0, (%%"REG_D", %%"REG_a")\n\t"
- "addl $8, %%eax \n\t"
+ "add $8, %%"REG_a" \n\t"
// End
"9: \n\t"
// "int $3\n\t"
- "leal 0b, %0 \n\t"
- "leal 1b, %1 \n\t"
- "leal 2b, %2 \n\t"
- "decl %1 \n\t"
- "decl %2 \n\t"
- "subl %0, %1 \n\t"
- "subl %0, %2 \n\t"
- "leal 9b, %3 \n\t"
- "subl %0, %3 \n\t"
+ "lea 0b, %0 \n\t"
+ "lea 1b, %1 \n\t"
+ "lea 2b, %2 \n\t"
+ "dec %1 \n\t"
+ "dec %2 \n\t"
+ "sub %0, %1 \n\t"
+ "sub %0, %2 \n\t"
+ "lea 9b, %3 \n\t"
+ "sub %0, %3 \n\t"
:"=r" (fragmentA), "=r" (imm8OfPShufW1A), "=r" (imm8OfPShufW2A),
"jmp 9f \n\t"
// Begin
"0: \n\t"
- "movq (%%edx, %%eax), %%mm3 \n\t"
- "movd (%%ecx, %%esi), %%mm0 \n\t"
+ "movq (%%"REG_d", %%"REG_a"), %%mm3\n\t"
+ "movd (%%"REG_c", %%"REG_S"), %%mm0\n\t"
"punpcklbw %%mm7, %%mm0 \n\t"
"pshufw $0xFF, %%mm0, %%mm1 \n\t"
"1: \n\t"
"pshufw $0xFF, %%mm0, %%mm0 \n\t"
"2: \n\t"
"psubw %%mm1, %%mm0 \n\t"
- "movl 8(%%ebx, %%eax), %%esi \n\t"
+ "movl 8(%%"REG_b", %%"REG_a"), %%esi\n\t"
"pmullw %%mm3, %%mm0 \n\t"
"psllw $7, %%mm1 \n\t"
"paddw %%mm1, %%mm0 \n\t"
- "movq %%mm0, (%%edi, %%eax) \n\t"
+ "movq %%mm0, (%%"REG_D", %%"REG_a")\n\t"
- "addl $8, %%eax \n\t"
+ "add $8, %%"REG_a" \n\t"
// End
"9: \n\t"
// "int $3\n\t"
- "leal 0b, %0 \n\t"
- "leal 1b, %1 \n\t"
- "leal 2b, %2 \n\t"
- "decl %1 \n\t"
- "decl %2 \n\t"
- "subl %0, %1 \n\t"
- "subl %0, %2 \n\t"
- "leal 9b, %3 \n\t"
- "subl %0, %3 \n\t"
+ "lea 0b, %0 \n\t"
+ "lea 1b, %1 \n\t"
+ "lea 2b, %2 \n\t"
+ "dec %1 \n\t"
+ "dec %2 \n\t"
+ "sub %0, %1 \n\t"
+ "sub %0, %2 \n\t"
+ "lea 9b, %3 \n\t"
+ "sub %0, %3 \n\t"
:"=r" (fragmentB), "=r" (imm8OfPShufW1B), "=r" (imm8OfPShufW2B),
}
filterPos[i/2]= xpos>>16; // needed to jump to the next part
}
-#endif // ARCH_X86
-
-//FIXME remove
-void SwScale_Init(){
-}
+#endif // ARCH_X86 || ARCH_X86_64
static void globalInit(){
// generating tables:
int c= MIN(MAX(i-256, 0), 255);
clip_table[i]=c;
}
+}
-cpuCaps= gCpuCaps;
-
+static SwsFunc getSwsFunc(int flags){
+
#ifdef RUNTIME_CPUDETECT
-#ifdef CAN_COMPILE_X86_ASM
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
// ordered per speed fasterst first
- if(gCpuCaps.hasMMX2)
- swScale= swScale_MMX2;
- else if(gCpuCaps.has3DNow)
- swScale= swScale_3DNow;
- else if(gCpuCaps.hasMMX)
- swScale= swScale_MMX;
+ if(flags & SWS_CPU_CAPS_MMX2)
+ return swScale_MMX2;
+ else if(flags & SWS_CPU_CAPS_3DNOW)
+ return swScale_3DNow;
+ else if(flags & SWS_CPU_CAPS_MMX)
+ return swScale_MMX;
else
- swScale= swScale_C;
+ return swScale_C;
#else
- swScale= swScale_C;
- cpuCaps.hasMMX2 = cpuCaps.hasMMX = cpuCaps.has3DNow = 0;
+#ifdef ARCH_POWERPC
+ if(flags & SWS_CPU_CAPS_ALTIVEC)
+ return swScale_altivec;
+ else
+ return swScale_C;
+#endif
+ return swScale_C;
#endif
#else //RUNTIME_CPUDETECT
#ifdef HAVE_MMX2
- swScale= swScale_MMX2;
- cpuCaps.has3DNow = 0;
+ return swScale_MMX2;
#elif defined (HAVE_3DNOW)
- swScale= swScale_3DNow;
- cpuCaps.hasMMX2 = 0;
+ return swScale_3DNow;
#elif defined (HAVE_MMX)
- swScale= swScale_MMX;
- cpuCaps.hasMMX2 = cpuCaps.has3DNow = 0;
+ return swScale_MMX;
+#elif defined (HAVE_ALTIVEC)
+ return swScale_altivec;
#else
- swScale= swScale_C;
- cpuCaps.hasMMX2 = cpuCaps.hasMMX = cpuCaps.has3DNow = 0;
+ return swScale_C;
#endif
#endif //!RUNTIME_CPUDETECT
}
uint8_t *dstPtr= dst;
for(i=0; i<srcSliceH; i++)
{
- memcpy(dstPtr, srcPtr, srcStride[0]);
+ memcpy(dstPtr, srcPtr, c->srcW);
srcPtr+= srcStride[0];
dstPtr+= dstStride[0];
}
}
- dst = dstParam[1] + dstStride[1]*srcSliceY;
- interleaveBytes( src[1],src[2],dst,c->srcW,srcSliceH,srcStride[1],srcStride[2],dstStride[0] );
+ dst = dstParam[1] + dstStride[1]*srcSliceY/2;
+ if (c->dstFormat == IMGFMT_NV12)
+ interleaveBytes( src[1],src[2],dst,c->srcW/2,srcSliceH/2,srcStride[1],srcStride[2],dstStride[0] );
+ else
+ interleaveBytes( src[2],src[1],dst,c->srcW/2,srcSliceH/2,srcStride[2],srcStride[1],dstStride[0] );
return srcSliceH;
}
return srcSliceH;
}
+static int PlanarToUyvyWrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dstParam[], int dstStride[]){
+ uint8_t *dst=dstParam[0] + dstStride[0]*srcSliceY;
+
+ yv12touyvy( src[0],src[1],src[2],dst,c->srcW,srcSliceH,srcStride[0],srcStride[1],dstStride[0] );
+
+ return srcSliceH;
+}
+
/* {RGB,BGR}{15,16,24,32} -> {RGB,BGR}{15,16,24,32} */
static int rgb2rgbWrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
int srcSliceH, uint8_t* dst[], int dstStride[]){
sortedStride[1]= stride[2];
sortedStride[2]= stride[1];
}
- else if(isPacked(format) || isGray(format))
+ else if(isPacked(format) || isGray(format) || format == IMGFMT_Y8)
{
sortedP[0]= p[0];
sortedP[1]=
sortedStride[0]= stride[0];
sortedStride[1]= stride[1];
sortedStride[2]= stride[2];
+ }
+ else if(format == IMGFMT_NV12 || format == IMGFMT_NV21)
+ {
+ sortedP[0]= p[0];
+ sortedP[1]= p[1];
+ sortedP[2]= NULL;
+ sortedStride[0]= stride[0];
+ sortedStride[1]= stride[1];
+ sortedStride[2]= 0;
}else{
MSG_ERR("internal error in orderYUV\n");
}
break;
case IMGFMT_YV12:
case IMGFMT_Y800: //FIXME remove after different subsamplings are fully implemented
+ case IMGFMT_NV12:
+ case IMGFMT_NV21:
*h=1;
*v=1;
break;
yuv2rgb_c_init_tables(c, inv_table, srcRange, brightness, contrast, saturation);
//FIXME factorize
-
+
+#ifdef HAVE_ALTIVEC
+ yuv2rgb_altivec_init_tables (c, inv_table, brightness, contrast, saturation);
+#endif
return 0;
}
}
SwsContext *sws_getContext(int srcW, int srcH, int origSrcFormat, int dstW, int dstH, int origDstFormat, int flags,
- SwsFilter *srcFilter, SwsFilter *dstFilter){
+ SwsFilter *srcFilter, SwsFilter *dstFilter, double *param){
SwsContext *c;
int i;
- int usesFilter;
+ int usesVFilter, usesHFilter;
int unscaled, needsDither;
int srcFormat, dstFormat;
SwsFilter dummyFilter= {NULL, NULL, NULL, NULL};
-#ifdef ARCH_X86
- if(gCpuCaps.hasMMX)
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
+ if(flags & SWS_CPU_CAPS_MMX)
asm volatile("emms\n\t"::: "memory");
#endif
- if(swScale==NULL) globalInit();
- /* avoid dupplicate Formats, so we dont need to check to much */
+#ifndef RUNTIME_CPUDETECT //ensure that the flags match the compiled variant if cpudetect is off
+ flags &= ~(SWS_CPU_CAPS_MMX|SWS_CPU_CAPS_MMX2|SWS_CPU_CAPS_3DNOW|SWS_CPU_CAPS_ALTIVEC);
+#ifdef HAVE_MMX2
+ flags |= SWS_CPU_CAPS_MMX|SWS_CPU_CAPS_MMX2;
+#elif defined (HAVE_3DNOW)
+ flags |= SWS_CPU_CAPS_MMX|SWS_CPU_CAPS_3DNOW;
+#elif defined (HAVE_MMX)
+ flags |= SWS_CPU_CAPS_MMX;
+#elif defined (HAVE_ALTIVEC)
+ flags |= SWS_CPU_CAPS_ALTIVEC;
+#endif
+#endif
+ if(clip_table[512] != 255) globalInit();
+ if(rgb15to16 == NULL) sws_rgb2rgb_init(flags);
+
+ /* avoid duplicate Formats, so we don't need to check to much */
srcFormat = remove_dup_fourcc(origSrcFormat);
dstFormat = remove_dup_fourcc(origDstFormat);
c->srcFormat= srcFormat;
c->origDstFormat= origDstFormat;
c->origSrcFormat= origSrcFormat;
-
- usesFilter=0;
- if(dstFilter->lumV!=NULL && dstFilter->lumV->length>1) usesFilter=1;
- if(dstFilter->lumH!=NULL && dstFilter->lumH->length>1) usesFilter=1;
- if(dstFilter->chrV!=NULL && dstFilter->chrV->length>1) usesFilter=1;
- if(dstFilter->chrH!=NULL && dstFilter->chrH->length>1) usesFilter=1;
- if(srcFilter->lumV!=NULL && srcFilter->lumV->length>1) usesFilter=1;
- if(srcFilter->lumH!=NULL && srcFilter->lumH->length>1) usesFilter=1;
- if(srcFilter->chrV!=NULL && srcFilter->chrV->length>1) usesFilter=1;
- if(srcFilter->chrH!=NULL && srcFilter->chrH->length>1) usesFilter=1;
+ c->vRounder= 4* 0x0001000100010001ULL;
+
+ usesHFilter= usesVFilter= 0;
+ if(dstFilter->lumV!=NULL && dstFilter->lumV->length>1) usesVFilter=1;
+ if(dstFilter->lumH!=NULL && dstFilter->lumH->length>1) usesHFilter=1;
+ if(dstFilter->chrV!=NULL && dstFilter->chrV->length>1) usesVFilter=1;
+ if(dstFilter->chrH!=NULL && dstFilter->chrH->length>1) usesHFilter=1;
+ if(srcFilter->lumV!=NULL && srcFilter->lumV->length>1) usesVFilter=1;
+ if(srcFilter->lumH!=NULL && srcFilter->lumH->length>1) usesHFilter=1;
+ if(srcFilter->chrV!=NULL && srcFilter->chrV->length>1) usesVFilter=1;
+ if(srcFilter->chrH!=NULL && srcFilter->chrH->length>1) usesHFilter=1;
getSubSampleFactors(&c->chrSrcHSubSample, &c->chrSrcVSubSample, srcFormat);
getSubSampleFactors(&c->chrDstHSubSample, &c->chrDstVSubSample, dstFormat);
if((isBGR(srcFormat) || isRGB(srcFormat)) && !(flags&SWS_FULL_CHR_H_INP))
c->chrSrcHSubSample=1;
+ if(param){
+ c->param[0] = param[0];
+ c->param[1] = param[1];
+ }else{
+ c->param[0] =
+ c->param[1] = SWS_PARAM_DEFAULT;
+ }
+
c->chrIntHSubSample= c->chrDstHSubSample;
c->chrIntVSubSample= c->chrSrcVSubSample;
sws_setColorspaceDetails(c, Inverse_Table_6_9[SWS_CS_DEFAULT], 0, Inverse_Table_6_9[SWS_CS_DEFAULT] /* FIXME*/, 0, 0, 1<<16, 1<<16);
/* unscaled special Cases */
- if(unscaled && !usesFilter)
+ if(unscaled && !usesHFilter && !usesVFilter)
{
/* yv12_to_nv12 */
- if(srcFormat == IMGFMT_YV12 && dstFormat == IMGFMT_NV12)
+ if(srcFormat == IMGFMT_YV12 && (dstFormat == IMGFMT_NV12 || dstFormat == IMGFMT_NV21))
{
c->swScale= PlanarToNV12Wrapper;
}
c->swScale= rgb2rgbWrapper;
/* yv12_to_yuy2 */
- if(srcFormat == IMGFMT_YV12 && dstFormat == IMGFMT_YUY2)
+ if(srcFormat == IMGFMT_YV12 &&
+ (dstFormat == IMGFMT_YUY2 || dstFormat == IMGFMT_UYVY))
{
- c->swScale= PlanarToYuy2Wrapper;
+ if (dstFormat == IMGFMT_YUY2)
+ c->swScale= PlanarToYuy2Wrapper;
+ else
+ c->swScale= PlanarToUyvyWrapper;
}
}
+#ifdef HAVE_ALTIVEC
+ if ((c->flags & SWS_CPU_CAPS_ALTIVEC) &&
+ ((srcFormat == IMGFMT_YV12 &&
+ (dstFormat == IMGFMT_YUY2 || dstFormat == IMGFMT_UYVY)))) {
+ // unscaled YV12 -> packed YUV, we want speed
+ if (dstFormat == IMGFMT_YUY2)
+ c->swScale= yv12toyuy2_unscaled_altivec;
+ else
+ c->swScale= yv12touyvy_unscaled_altivec;
+ }
+#endif
+
/* simple copy */
if( srcFormat == dstFormat
|| (isPlanarYUV(srcFormat) && isGray(dstFormat))
}
}
- if(cpuCaps.hasMMX2)
+ if(flags & SWS_CPU_CAPS_MMX2)
{
c->canMMX2BeUsed= (dstW >=srcW && (dstW&31)==0 && (srcW&15)==0) ? 1 : 0;
if(!c->canMMX2BeUsed && dstW >=srcW && (srcW&15)==0 && (flags&SWS_FAST_BILINEAR))
if(flags&SWS_PRINT_INFO)
MSG_INFO("SwScaler: output Width is not a multiple of 32 -> no MMX2 scaler\n");
}
+ if(usesHFilter) c->canMMX2BeUsed=0;
}
else
c->canMMX2BeUsed=0;
c->lumXInc+= 20;
c->chrXInc+= 20;
}
- //we dont use the x86asm scaler if mmx is available
- else if(cpuCaps.hasMMX)
+ //we don't use the x86asm scaler if mmx is available
+ else if(flags & SWS_CPU_CAPS_MMX)
{
c->lumXInc = ((srcW-2)<<16)/(dstW-2) - 20;
c->chrXInc = ((c->chrSrcW-2)<<16)/(c->chrDstW-2) - 20;
/* precalculate horizontal scaler filter coefficients */
{
- const int filterAlign= cpuCaps.hasMMX ? 4 : 1;
+ const int filterAlign=
+ (flags & SWS_CPU_CAPS_MMX) ? 4 :
+ (flags & SWS_CPU_CAPS_ALTIVEC) ? 8 :
+ 1;
initFilter(&c->hLumFilter, &c->hLumFilterPos, &c->hLumFilterSize, c->lumXInc,
srcW , dstW, filterAlign, 1<<14,
(flags&SWS_BICUBLIN) ? (flags|SWS_BICUBIC) : flags,
- srcFilter->lumH, dstFilter->lumH);
+ srcFilter->lumH, dstFilter->lumH, c->param);
initFilter(&c->hChrFilter, &c->hChrFilterPos, &c->hChrFilterSize, c->chrXInc,
c->chrSrcW, c->chrDstW, filterAlign, 1<<14,
(flags&SWS_BICUBLIN) ? (flags|SWS_BILINEAR) : flags,
- srcFilter->chrH, dstFilter->chrH);
+ srcFilter->chrH, dstFilter->chrH, c->param);
-#ifdef ARCH_X86
-// cant downscale !!!
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
+// can't downscale !!!
if(c->canMMX2BeUsed && (flags & SWS_FAST_BILINEAR))
{
+#define MAX_FUNNY_CODE_SIZE 10000
+#ifdef MAP_ANONYMOUS
+ c->funnyYCode = (uint8_t*)mmap(NULL, MAX_FUNNY_CODE_SIZE, PROT_EXEC | PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
+ c->funnyUVCode = (uint8_t*)mmap(NULL, MAX_FUNNY_CODE_SIZE, PROT_EXEC | PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
+#else
+ c->funnyYCode = (uint8_t*)memalign(32, MAX_FUNNY_CODE_SIZE);
+ c->funnyUVCode = (uint8_t*)memalign(32, MAX_FUNNY_CODE_SIZE);
+#endif
+
c->lumMmx2Filter = (int16_t*)memalign(8, (dstW /8+8)*sizeof(int16_t));
c->chrMmx2Filter = (int16_t*)memalign(8, (c->chrDstW /4+8)*sizeof(int16_t));
c->lumMmx2FilterPos= (int32_t*)memalign(8, (dstW /2/8+8)*sizeof(int32_t));
/* precalculate vertical scaler filter coefficients */
- initFilter(&c->vLumFilter, &c->vLumFilterPos, &c->vLumFilterSize, c->lumYInc,
- srcH , dstH, 1, (1<<12)-4,
- (flags&SWS_BICUBLIN) ? (flags|SWS_BICUBIC) : flags,
- srcFilter->lumV, dstFilter->lumV);
- initFilter(&c->vChrFilter, &c->vChrFilterPos, &c->vChrFilterSize, c->chrYInc,
- c->chrSrcH, c->chrDstH, 1, (1<<12)-4,
- (flags&SWS_BICUBLIN) ? (flags|SWS_BILINEAR) : flags,
- srcFilter->chrV, dstFilter->chrV);
-
- // Calculate Buffer Sizes so that they wont run out while handling these damn slices
+ {
+ const int filterAlign=
+ (flags & SWS_CPU_CAPS_ALTIVEC) ? 8 :
+ 1;
+
+ initFilter(&c->vLumFilter, &c->vLumFilterPos, &c->vLumFilterSize, c->lumYInc,
+ srcH , dstH, filterAlign, (1<<12)-4,
+ (flags&SWS_BICUBLIN) ? (flags|SWS_BICUBIC) : flags,
+ srcFilter->lumV, dstFilter->lumV, c->param);
+ initFilter(&c->vChrFilter, &c->vChrFilterPos, &c->vChrFilterSize, c->chrYInc,
+ c->chrSrcH, c->chrDstH, filterAlign, (1<<12)-4,
+ (flags&SWS_BICUBLIN) ? (flags|SWS_BILINEAR) : flags,
+ srcFilter->chrV, dstFilter->chrV, c->param);
+ }
+
+ // Calculate Buffer Sizes so that they won't run out while handling these damn slices
c->vLumBufSize= c->vLumFilterSize;
c->vChrBufSize= c->vChrFilterSize;
for(i=0; i<dstH; i++)
int chrI= i*c->chrDstH / dstH;
int nextSlice= MAX(c->vLumFilterPos[i ] + c->vLumFilterSize - 1,
((c->vChrFilterPos[chrI] + c->vChrFilterSize - 1)<<c->chrSrcVSubSample));
- nextSlice&= ~3; // Slices start at boundaries which are divisable through 4
+
+ nextSlice>>= c->chrSrcVSubSample;
+ nextSlice<<= c->chrSrcVSubSample;
if(c->vLumFilterPos[i ] + c->vLumBufSize < nextSlice)
c->vLumBufSize= nextSlice - c->vLumFilterPos[i ];
if(c->vChrFilterPos[chrI] + c->vChrBufSize < (nextSlice>>c->chrSrcVSubSample))
MSG_INFO("from %s to %s ",
vo_format_name(srcFormat), vo_format_name(dstFormat));
- if(cpuCaps.hasMMX2)
+ if(flags & SWS_CPU_CAPS_MMX2)
MSG_INFO("using MMX2\n");
- else if(cpuCaps.has3DNow)
+ else if(flags & SWS_CPU_CAPS_3DNOW)
MSG_INFO("using 3DNOW\n");
- else if(cpuCaps.hasMMX)
+ else if(flags & SWS_CPU_CAPS_MMX)
MSG_INFO("using MMX\n");
- else
+ else if(flags & SWS_CPU_CAPS_ALTIVEC)
+ MSG_INFO("using AltiVec\n");
+ else
MSG_INFO("using C\n");
}
- if((flags & SWS_PRINT_INFO) && verbose>0)
+ if(flags & SWS_PRINT_INFO)
{
- if(cpuCaps.hasMMX)
+ if(flags & SWS_CPU_CAPS_MMX)
{
if(c->canMMX2BeUsed && (flags&SWS_FAST_BILINEAR))
MSG_V("SwScaler: using FAST_BILINEAR MMX2 scaler for horizontal scaling\n");
}
else
{
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
MSG_V("SwScaler: using X86-Asm scaler for horizontal scaling\n");
#else
if(flags & SWS_FAST_BILINEAR)
if(isPlanarYUV(dstFormat))
{
if(c->vLumFilterSize==1)
- MSG_V("SwScaler: using 1-tap %s \"scaler\" for vertical scaling (YV12 like)\n", cpuCaps.hasMMX ? "MMX" : "C");
+ MSG_V("SwScaler: using 1-tap %s \"scaler\" for vertical scaling (YV12 like)\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
else
- MSG_V("SwScaler: using n-tap %s scaler for vertical scaling (YV12 like)\n", cpuCaps.hasMMX ? "MMX" : "C");
+ MSG_V("SwScaler: using n-tap %s scaler for vertical scaling (YV12 like)\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
}
else
{
if(c->vLumFilterSize==1 && c->vChrFilterSize==2)
MSG_V("SwScaler: using 1-tap %s \"scaler\" for vertical luminance scaling (BGR)\n"
- "SwScaler: 2-tap scaler for vertical chrominance scaling (BGR)\n",cpuCaps.hasMMX ? "MMX" : "C");
+ "SwScaler: 2-tap scaler for vertical chrominance scaling (BGR)\n",(flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
else if(c->vLumFilterSize==2 && c->vChrFilterSize==2)
- MSG_V("SwScaler: using 2-tap linear %s scaler for vertical scaling (BGR)\n", cpuCaps.hasMMX ? "MMX" : "C");
+ MSG_V("SwScaler: using 2-tap linear %s scaler for vertical scaling (BGR)\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
else
- MSG_V("SwScaler: using n-tap %s scaler for vertical scaling (BGR)\n", cpuCaps.hasMMX ? "MMX" : "C");
+ MSG_V("SwScaler: using n-tap %s scaler for vertical scaling (BGR)\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
}
if(dstFormat==IMGFMT_BGR24)
MSG_V("SwScaler: using %s YV12->BGR24 Converter\n",
- cpuCaps.hasMMX2 ? "MMX2" : (cpuCaps.hasMMX ? "MMX" : "C"));
+ (flags & SWS_CPU_CAPS_MMX2) ? "MMX2" : ((flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C"));
else if(dstFormat==IMGFMT_BGR32)
- MSG_V("SwScaler: using %s YV12->BGR32 Converter\n", cpuCaps.hasMMX ? "MMX" : "C");
+ MSG_V("SwScaler: using %s YV12->BGR32 Converter\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
else if(dstFormat==IMGFMT_BGR16)
- MSG_V("SwScaler: using %s YV12->BGR16 Converter\n", cpuCaps.hasMMX ? "MMX" : "C");
+ MSG_V("SwScaler: using %s YV12->BGR16 Converter\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
else if(dstFormat==IMGFMT_BGR15)
- MSG_V("SwScaler: using %s YV12->BGR15 Converter\n", cpuCaps.hasMMX ? "MMX" : "C");
+ MSG_V("SwScaler: using %s YV12->BGR15 Converter\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
MSG_V("SwScaler: %dx%d -> %dx%d\n", srcW, srcH, dstW, dstH);
}
- if((flags & SWS_PRINT_INFO) && verbose>1)
+ if(flags & SWS_PRINT_INFO)
{
MSG_DBG2("SwScaler:Lum srcW=%d srcH=%d dstW=%d dstH=%d xInc=%d yInc=%d\n",
c->srcW, c->srcH, c->dstW, c->dstH, c->lumXInc, c->lumYInc);
c->chrSrcW, c->chrSrcH, c->chrDstW, c->chrDstH, c->chrXInc, c->chrYInc);
}
- c->swScale= swScale;
+ c->swScale= getSwsFunc(flags);
return c;
}
/**
- * swscale warper, so we dont need to export the SwsContext.
+ * swscale warper, so we don't need to export the SwsContext.
* assumes planar YUV to be in YUV order instead of YVU
*/
int sws_scale_ordered(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
int srcSliceH, uint8_t* dst[], int dstStride[]){
- c->swScale(c, src, srcStride, srcSliceY, srcSliceH, dst, dstStride);
+ //copy strides, so they can safely be modified
+ int srcStride2[3]= {srcStride[0], srcStride[1], srcStride[2]};
+ int dstStride2[3]= {dstStride[0], dstStride[1], dstStride[2]};
+ return c->swScale(c, src, srcStride2, srcSliceY, srcSliceH, dst, dstStride2);
}
/**
- * swscale warper, so we dont need to export the SwsContext
+ * swscale warper, so we don't need to export the SwsContext
*/
int sws_scale(SwsContext *c, uint8_t* srcParam[], int srcStrideParam[], int srcSliceY,
int srcSliceH, uint8_t* dstParam[], int dstStrideParam[]){
sws_orderYUV(c->origSrcFormat, src, srcStride, srcParam, srcStrideParam);
sws_orderYUV(c->origDstFormat, dst, dstStride, dstParam, dstStrideParam);
//printf("sws: slice %d %d\n", srcSliceY, srcSliceH);
- c->swScale(c, src, srcStride, srcSliceY, srcSliceH, dst, dstStride);
+
+ return c->swScale(c, src, srcStride, srcSliceY, srcSliceH, dst, dstStride);
+}
+
+SwsFilter *sws_getDefaultFilter(float lumaGBlur, float chromaGBlur,
+ float lumaSharpen, float chromaSharpen,
+ float chromaHShift, float chromaVShift,
+ int verbose)
+{
+ SwsFilter *filter= malloc(sizeof(SwsFilter));
+
+ if(lumaGBlur!=0.0){
+ filter->lumH= sws_getGaussianVec(lumaGBlur, 3.0);
+ filter->lumV= sws_getGaussianVec(lumaGBlur, 3.0);
+ }else{
+ filter->lumH= sws_getIdentityVec();
+ filter->lumV= sws_getIdentityVec();
+ }
+
+ if(chromaGBlur!=0.0){
+ filter->chrH= sws_getGaussianVec(chromaGBlur, 3.0);
+ filter->chrV= sws_getGaussianVec(chromaGBlur, 3.0);
+ }else{
+ filter->chrH= sws_getIdentityVec();
+ filter->chrV= sws_getIdentityVec();
+ }
+
+ if(chromaSharpen!=0.0){
+ SwsVector *g= sws_getConstVec(-1.0, 3);
+ SwsVector *id= sws_getConstVec(10.0/chromaSharpen, 1);
+ g->coeff[1]=2.0;
+ sws_addVec(id, g);
+ sws_convVec(filter->chrH, id);
+ sws_convVec(filter->chrV, id);
+ sws_freeVec(g);
+ sws_freeVec(id);
+ }
+
+ if(lumaSharpen!=0.0){
+ SwsVector *g= sws_getConstVec(-1.0, 3);
+ SwsVector *id= sws_getConstVec(10.0/lumaSharpen, 1);
+ g->coeff[1]=2.0;
+ sws_addVec(id, g);
+ sws_convVec(filter->lumH, id);
+ sws_convVec(filter->lumV, id);
+ sws_freeVec(g);
+ sws_freeVec(id);
+ }
+
+ if(chromaHShift != 0.0)
+ sws_shiftVec(filter->chrH, (int)(chromaHShift+0.5));
+
+ if(chromaVShift != 0.0)
+ sws_shiftVec(filter->chrV, (int)(chromaVShift+0.5));
+
+ sws_normalizeVec(filter->chrH, 1.0);
+ sws_normalizeVec(filter->chrV, 1.0);
+ sws_normalizeVec(filter->lumH, 1.0);
+ sws_normalizeVec(filter->lumV, 1.0);
+
+ if(verbose) sws_printVec(filter->chrH);
+ if(verbose) sws_printVec(filter->lumH);
+
+ return filter;
}
/**
* returns a normalized gaussian curve used to filter stuff
* quality=3 is high quality, lowwer is lowwer quality
*/
-
SwsVector *sws_getGaussianVec(double variance, double quality){
const int length= (int)(variance*quality + 0.5) | 1;
int i;
free(a);
}
+void sws_freeFilter(SwsFilter *filter){
+ if(!filter) return;
+
+ if(filter->lumH) sws_freeVec(filter->lumH);
+ if(filter->lumV) sws_freeVec(filter->lumV);
+ if(filter->chrH) sws_freeVec(filter->chrH);
+ if(filter->chrV) sws_freeVec(filter->chrV);
+ free(filter);
+}
+
+
void sws_freeContext(SwsContext *c){
int i;
if(!c) return;
if(c->hChrFilterPos) free(c->hChrFilterPos);
c->hChrFilterPos = NULL;
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
+#ifdef MAP_ANONYMOUS
+ if(c->funnyYCode) munmap(c->funnyYCode, MAX_FUNNY_CODE_SIZE);
+ if(c->funnyUVCode) munmap(c->funnyUVCode, MAX_FUNNY_CODE_SIZE);
+#else
+ if(c->funnyYCode) free(c->funnyYCode);
+ if(c->funnyUVCode) free(c->funnyUVCode);
+#endif
+ c->funnyYCode=NULL;
+ c->funnyUVCode=NULL;
+#endif
+
if(c->lumMmx2Filter) free(c->lumMmx2Filter);
c->lumMmx2Filter=NULL;
if(c->chrMmx2Filter) free(c->chrMmx2Filter);