2 // Software scaling and colorspace conversion routines for MPlayer
4 // Orginal C implementation by A'rpi/ESP-team <arpi@thot.banki.hu>
5 // current version mostly by Michael Niedermayer (michaelni@gmx.at)
6 // the parts written by michael are under GNU GPL
10 //#include <stdio.h> //FOR DEBUG ONLY
11 #include "../config.h"
13 #include "../cpudetect.h"
22 //disables the unscaled height version
25 #define RET 0xC3 //near return opcode
29 known BUGS with known cause (no bugreports please!, but patches are welcome :) )
30 horizontal MMX2 scaler reads 1-7 samples too much (might cause a sig11)
32 Supported output formats BGR15 BGR16 BGR24 BGR32
33 BGR15 & BGR16 MMX verions support dithering
34 Special versions: fast Y 1:1 scaling (no interpolation in y direction)
37 more intelligent missalignment avoidance for the horizontal scaler
40 change the distance of the u & v buffer
41 how to differenciate between x86 an C at runtime ?! (using C for now)
44 #define ABS(a) ((a) > 0 ? (a) : (-(a)))
45 #define MIN(a,b) ((a) > (b) ? (b) : (a))
46 #define MAX(a,b) ((a) < (b) ? (b) : (a))
49 #define CAN_COMPILE_X86_ASM
52 #ifdef CAN_COMPILE_X86_ASM
53 static uint64_t __attribute__((aligned(8))) yCoeff= 0x2568256825682568LL;
54 static uint64_t __attribute__((aligned(8))) vrCoeff= 0x3343334333433343LL;
55 static uint64_t __attribute__((aligned(8))) ubCoeff= 0x40cf40cf40cf40cfLL;
56 static uint64_t __attribute__((aligned(8))) vgCoeff= 0xE5E2E5E2E5E2E5E2LL;
57 static uint64_t __attribute__((aligned(8))) ugCoeff= 0xF36EF36EF36EF36ELL;
58 static uint64_t __attribute__((aligned(8))) bF8= 0xF8F8F8F8F8F8F8F8LL;
59 static uint64_t __attribute__((aligned(8))) bFC= 0xFCFCFCFCFCFCFCFCLL;
60 static uint64_t __attribute__((aligned(8))) w400= 0x0400040004000400LL;
61 static uint64_t __attribute__((aligned(8))) w80= 0x0080008000800080LL;
62 static uint64_t __attribute__((aligned(8))) w10= 0x0010001000100010LL;
63 static uint64_t __attribute__((aligned(8))) bm00001111=0x00000000FFFFFFFFLL;
64 static uint64_t __attribute__((aligned(8))) bm00000111=0x0000000000FFFFFFLL;
65 static uint64_t __attribute__((aligned(8))) bm11111000=0xFFFFFFFFFF000000LL;
67 static volatile uint64_t __attribute__((aligned(8))) b5Dither;
68 static volatile uint64_t __attribute__((aligned(8))) g5Dither;
69 static volatile uint64_t __attribute__((aligned(8))) g6Dither;
70 static volatile uint64_t __attribute__((aligned(8))) r5Dither;
72 static uint64_t __attribute__((aligned(8))) dither4[2]={
74 0x0200020002000200LL,};
76 static uint64_t __attribute__((aligned(8))) dither8[2]={
78 0x0004000400040004LL,};
80 static uint64_t __attribute__((aligned(8))) b16Mask= 0x001F001F001F001FLL;
81 static uint64_t __attribute__((aligned(8))) g16Mask= 0x07E007E007E007E0LL;
82 static uint64_t __attribute__((aligned(8))) r16Mask= 0xF800F800F800F800LL;
83 static uint64_t __attribute__((aligned(8))) b15Mask= 0x001F001F001F001FLL;
84 static uint64_t __attribute__((aligned(8))) g15Mask= 0x03E003E003E003E0LL;
85 static uint64_t __attribute__((aligned(8))) r15Mask= 0x7C007C007C007C00LL;
87 static uint64_t __attribute__((aligned(8))) M24A= 0x00FF0000FF0000FFLL;
88 static uint64_t __attribute__((aligned(8))) M24B= 0xFF0000FF0000FF00LL;
89 static uint64_t __attribute__((aligned(8))) M24C= 0x0000FF0000FF0000LL;
91 static uint64_t __attribute__((aligned(8))) temp0;
92 static uint64_t __attribute__((aligned(8))) asm_yalpha1;
93 static uint64_t __attribute__((aligned(8))) asm_uvalpha1;
95 // temporary storage for 4 yuv lines:
96 // 16bit for now (mmx likes it more compact)
97 static uint16_t __attribute__((aligned(8))) pix_buf_y[4][2048];
98 static uint16_t __attribute__((aligned(8))) pix_buf_uv[2][2048*2];
100 static uint16_t pix_buf_y[4][2048];
101 static uint16_t pix_buf_uv[2][2048*2];
104 // clipping helper table for C implementations:
105 static unsigned char clip_table[768];
107 static unsigned short clip_table16b[768];
108 static unsigned short clip_table16g[768];
109 static unsigned short clip_table16r[768];
110 static unsigned short clip_table15b[768];
111 static unsigned short clip_table15g[768];
112 static unsigned short clip_table15r[768];
114 // yuv->rgb conversion tables:
115 static int yuvtab_2568[256];
116 static int yuvtab_3343[256];
117 static int yuvtab_0c92[256];
118 static int yuvtab_1a1e[256];
119 static int yuvtab_40cf[256];
121 #ifdef CAN_COMPILE_X86_ASM
122 static uint8_t funnyYCode[10000];
123 static uint8_t funnyUVCode[10000];
126 static int canMMX2BeUsed=0;
128 #ifdef CAN_COMPILE_X86_ASM
129 void in_asm_used_var_warning_killer()
131 int i= yCoeff+vrCoeff+ubCoeff+vgCoeff+ugCoeff+bF8+bFC+w400+w80+w10+
132 bm00001111+bm00000111+bm11111000+b16Mask+g16Mask+r16Mask+b15Mask+g15Mask+r15Mask+temp0+asm_yalpha1+ asm_uvalpha1+
138 //Note: we have C, X86, MMX, MMX2, 3DNOW version therse no 3DNOW+MMX2 one
140 #if !defined (HAVE_MMX) || defined (RUNTIME_CPUDETECT)
144 #ifdef CAN_COMPILE_X86_ASM
146 #if (defined (HAVE_MMX) && !defined (HAVE_3DNOW) && !defined (HAVE_MMX2)) || defined (RUNTIME_CPUDETECT)
150 #if defined (HAVE_MMX2) || defined (RUNTIME_CPUDETECT)
154 #if (defined (HAVE_3DNOW) && !defined (HAVE_MMX2)) || defined (RUNTIME_CPUDETECT)
155 #define COMPILE_3DNOW
157 #endif //CAN_COMPILE_X86_ASM
169 #define RENAME(a) a ## _C
170 #include "swscale_template.c"
173 #ifdef CAN_COMPILE_X86_ASM
182 #define RENAME(a) a ## _X86
183 #include "swscale_template.c"
192 #define RENAME(a) a ## _MMX
193 #include "swscale_template.c"
203 #define RENAME(a) a ## _MMX2
204 #include "swscale_template.c"
214 #define RENAME(a) a ## _3DNow
215 #include "swscale_template.c"
218 #endif //CAN_COMPILE_X86_ASM
220 // minor note: the HAVE_xyz is messed up after that line so dont use it
223 // *** bilinear scaling and yuv->rgb or yuv->yuv conversion of yv12 slices:
224 // *** Note: it's called multiple times while decoding a frame, first time y==0
225 // *** Designed to upscale, but may work for downscale too.
226 // switching the cpu type during a sliced drawing can have bad effects, like sig11
227 void SwScale_YV12slice(unsigned char* srcptr[],int stride[], int srcSliceY ,
228 int srcSliceH, uint8_t* dstptr[], int dststride, int dstbpp,
229 int srcW, int srcH, int dstW, int dstH){
231 #ifdef RUNTIME_CPUDETECT
232 #ifdef CAN_COMPILE_X86_ASM
233 // ordered per speed fasterst first
235 SwScale_YV12slice_MMX2(srcptr, stride, srcSliceY, srcSliceH, dstptr, dststride, dstbpp, srcW, srcH, dstW, dstH);
236 else if(gCpuCaps.has3DNow)
237 SwScale_YV12slice_3DNow(srcptr, stride, srcSliceY, srcSliceH, dstptr, dststride, dstbpp, srcW, srcH, dstW, dstH);
238 else if(gCpuCaps.hasMMX)
239 SwScale_YV12slice_MMX(srcptr, stride, srcSliceY, srcSliceH, dstptr, dststride, dstbpp, srcW, srcH, dstW, dstH);
241 SwScale_YV12slice_C(srcptr, stride, srcSliceY, srcSliceH, dstptr, dststride, dstbpp, srcW, srcH, dstW, dstH);
243 SwScale_YV12slice_C(srcptr, stride, srcSliceY, srcSliceH, dstptr, dststride, dstbpp, srcW, srcH, dstW, dstH);
245 #else //RUNTIME_CPUDETECT
247 SwScale_YV12slice_MMX2(srcptr, stride, srcSliceY, srcSliceH, dstptr, dststride, dstbpp, srcW, srcH, dstW, dstH);
248 #elif defined (HAVE_3DNOW)
249 SwScale_YV12slice_3DNow(srcptr, stride, srcSliceY, srcSliceH, dstptr, dststride, dstbpp, srcW, srcH, dstW, dstH);
250 #elif defined (HAVE_MMX)
251 SwScale_YV12slice_MMX(srcptr, stride, srcSliceY, srcSliceH, dstptr, dststride, dstbpp, srcW, srcH, dstW, dstH);
253 SwScale_YV12slice_C(srcptr, stride, srcSliceY, srcSliceH, dstptr, dststride, dstbpp, srcW, srcH, dstW, dstH);
255 #endif //!RUNTIME_CPUDETECT
260 // generating tables:
265 clip_table[i+512]=255;
266 yuvtab_2568[i]=(0x2568*(i-16))+(256<<13);
267 yuvtab_3343[i]=0x3343*(i-128);
268 yuvtab_0c92[i]=-0x0c92*(i-128);
269 yuvtab_1a1e[i]=-0x1a1e*(i-128);
270 yuvtab_40cf[i]=0x40cf*(i-128);
275 int v= clip_table[i];
276 clip_table16b[i]= v>>3;
277 clip_table16g[i]= (v<<3)&0x07E0;
278 clip_table16r[i]= (v<<8)&0xF800;
279 clip_table15b[i]= v>>3;
280 clip_table15g[i]= (v<<2)&0x03E0;
281 clip_table15r[i]= (v<<7)&0x7C00;