2 Copyright (C) 2001-2002 Michael Niedermayer <michaelni@gmx.at>
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; either version 2 of the License, or
7 (at your option) any later version.
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 supported Input formats: YV12 (grayscale soon too)
21 supported output formats: YV12, BGR15, BGR16, BGR24, BGR32 (grayscale soon too)
28 #include "../config.h"
29 #include "../mangle.h"
34 #include "../cpudetect.h"
35 #include "../libvo/img_format.h"
45 #define RET 0xC3 //near return opcode
48 #define ASSERT(x) if(!(x)) { printf("ASSERT " #x " failed\n"); *((int*)0)=0; }
56 #define PI 3.14159265358979323846
59 extern int verbose; // defined in mplayer.c
63 known BUGS with known cause (no bugreports please!, but patches are welcome :) )
64 horizontal fast_bilinear MMX2 scaler reads 1-7 samples too much (might cause a sig11)
66 Supported output formats BGR15 BGR16 BGR24 BGR32 YV12
67 BGR15 & BGR16 MMX verions support dithering
68 Special versions: fast Y 1:1 scaling (no interpolation in y direction)
71 more intelligent missalignment avoidance for the horizontal scaler
72 change the distance of the u & v buffer
73 write special vertical cubic upscale version
74 Optimize C code (yv12 / minmax)
75 add support for packed pixel yuv input & output
76 add support for Y8 input & output
77 add BGR4 output support
78 add BGR32 / BGR24 input support
81 #define ABS(a) ((a) > 0 ? (a) : (-(a)))
82 #define MIN(a,b) ((a) > (b) ? (b) : (a))
83 #define MAX(a,b) ((a) < (b) ? (b) : (a))
86 #define CAN_COMPILE_X86_ASM
89 #ifdef CAN_COMPILE_X86_ASM
90 static uint64_t __attribute__((aligned(8))) yCoeff= 0x2568256825682568LL;
91 static uint64_t __attribute__((aligned(8))) vrCoeff= 0x3343334333433343LL;
92 static uint64_t __attribute__((aligned(8))) ubCoeff= 0x40cf40cf40cf40cfLL;
93 static uint64_t __attribute__((aligned(8))) vgCoeff= 0xE5E2E5E2E5E2E5E2LL;
94 static uint64_t __attribute__((aligned(8))) ugCoeff= 0xF36EF36EF36EF36ELL;
95 static uint64_t __attribute__((aligned(8))) bF8= 0xF8F8F8F8F8F8F8F8LL;
96 static uint64_t __attribute__((aligned(8))) bFC= 0xFCFCFCFCFCFCFCFCLL;
97 static uint64_t __attribute__((aligned(8))) w400= 0x0400040004000400LL;
98 static uint64_t __attribute__((aligned(8))) w80= 0x0080008000800080LL;
99 static uint64_t __attribute__((aligned(8))) w10= 0x0010001000100010LL;
100 static uint64_t __attribute__((aligned(8))) w02= 0x0002000200020002LL;
101 static uint64_t __attribute__((aligned(8))) bm00001111=0x00000000FFFFFFFFLL;
102 static uint64_t __attribute__((aligned(8))) bm00000111=0x0000000000FFFFFFLL;
103 static uint64_t __attribute__((aligned(8))) bm11111000=0xFFFFFFFFFF000000LL;
105 static volatile uint64_t __attribute__((aligned(8))) b5Dither;
106 static volatile uint64_t __attribute__((aligned(8))) g5Dither;
107 static volatile uint64_t __attribute__((aligned(8))) g6Dither;
108 static volatile uint64_t __attribute__((aligned(8))) r5Dither;
110 static uint64_t __attribute__((aligned(8))) dither4[2]={
111 0x0103010301030103LL,
112 0x0200020002000200LL,};
114 static uint64_t __attribute__((aligned(8))) dither8[2]={
115 0x0602060206020602LL,
116 0x0004000400040004LL,};
118 static uint64_t __attribute__((aligned(8))) b16Mask= 0x001F001F001F001FLL;
119 static uint64_t __attribute__((aligned(8))) g16Mask= 0x07E007E007E007E0LL;
120 static uint64_t __attribute__((aligned(8))) r16Mask= 0xF800F800F800F800LL;
121 static uint64_t __attribute__((aligned(8))) b15Mask= 0x001F001F001F001FLL;
122 static uint64_t __attribute__((aligned(8))) g15Mask= 0x03E003E003E003E0LL;
123 static uint64_t __attribute__((aligned(8))) r15Mask= 0x7C007C007C007C00LL;
125 static uint64_t __attribute__((aligned(8))) M24A= 0x00FF0000FF0000FFLL;
126 static uint64_t __attribute__((aligned(8))) M24B= 0xFF0000FF0000FF00LL;
127 static uint64_t __attribute__((aligned(8))) M24C= 0x0000FF0000FF0000LL;
130 static uint64_t __attribute__((aligned(8))) asm_yalpha1;
131 static uint64_t __attribute__((aligned(8))) asm_uvalpha1;
134 // clipping helper table for C implementations:
135 static unsigned char clip_table[768];
137 static unsigned short clip_table16b[768];
138 static unsigned short clip_table16g[768];
139 static unsigned short clip_table16r[768];
140 static unsigned short clip_table15b[768];
141 static unsigned short clip_table15g[768];
142 static unsigned short clip_table15r[768];
144 // yuv->rgb conversion tables:
145 static int yuvtab_2568[256];
146 static int yuvtab_3343[256];
147 static int yuvtab_0c92[256];
148 static int yuvtab_1a1e[256];
149 static int yuvtab_40cf[256];
150 // Needed for cubic scaler to catch overflows
151 static int clip_yuvtab_2568[768];
152 static int clip_yuvtab_3343[768];
153 static int clip_yuvtab_0c92[768];
154 static int clip_yuvtab_1a1e[768];
155 static int clip_yuvtab_40cf[768];
157 //global sws_flags from the command line
161 SwsFilter src_filter= {NULL, NULL, NULL, NULL};
163 float sws_lum_gblur= 0.0;
164 float sws_chr_gblur= 0.0;
165 int sws_chr_vshift= 0;
166 int sws_chr_hshift= 0;
167 float sws_chr_sharpen= 0.0;
168 float sws_lum_sharpen= 0.0;
170 /* cpuCaps combined from cpudetect and whats actually compiled in
171 (if there is no support for something compiled in it wont appear here) */
172 static CpuCaps cpuCaps;
174 void (*swScale)(SwsContext *context, uint8_t* src[], int srcStride[], int srcSliceY,
175 int srcSliceH, uint8_t* dst[], int dstStride[])=NULL;
177 static SwsVector *getConvVec(SwsVector *a, SwsVector *b);
179 #ifdef CAN_COMPILE_X86_ASM
180 void in_asm_used_var_warning_killer()
182 volatile int i= yCoeff+vrCoeff+ubCoeff+vgCoeff+ugCoeff+bF8+bFC+w400+w80+w10+
183 bm00001111+bm00000111+bm11111000+b16Mask+g16Mask+r16Mask+b15Mask+g15Mask+r15Mask+asm_yalpha1+ asm_uvalpha1+
184 M24A+M24B+M24C+w02 + b5Dither+g5Dither+r5Dither+g6Dither+dither4[0]+dither8[0];
189 static inline void yuv2yuvXinC(int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
190 int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
191 uint8_t *dest, uint8_t *uDest, uint8_t *vDest, int dstW)
193 //FIXME Optimize (just quickly writen not opti..)
195 for(i=0; i<dstW; i++)
199 for(j=0; j<lumFilterSize; j++)
200 val += lumSrc[j][i] * lumFilter[j];
202 dest[i]= MIN(MAX(val>>19, 0), 255);
206 for(i=0; i<(dstW>>1); i++)
211 for(j=0; j<chrFilterSize; j++)
213 u += chrSrc[j][i] * chrFilter[j];
214 v += chrSrc[j][i + 2048] * chrFilter[j];
217 uDest[i]= MIN(MAX(u>>19, 0), 255);
218 vDest[i]= MIN(MAX(v>>19, 0), 255);
222 static inline void yuv2rgbXinC(int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
223 int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
224 uint8_t *dest, int dstW, int dstFormat)
226 if(dstFormat==IMGFMT_BGR32)
229 for(i=0; i<(dstW>>1); i++){
236 for(j=0; j<lumFilterSize; j++)
238 Y1 += lumSrc[j][2*i] * lumFilter[j];
239 Y2 += lumSrc[j][2*i+1] * lumFilter[j];
241 for(j=0; j<chrFilterSize; j++)
243 U += chrSrc[j][i] * chrFilter[j];
244 V += chrSrc[j][i+2048] * chrFilter[j];
246 Y1= clip_yuvtab_2568[ (Y1>>19) + 256 ];
247 Y2= clip_yuvtab_2568[ (Y2>>19) + 256 ];
251 Cb= clip_yuvtab_40cf[U+ 256];
252 Cg= clip_yuvtab_1a1e[V+ 256] + yuvtab_0c92[U+ 256];
253 Cr= clip_yuvtab_3343[V+ 256];
255 dest[8*i+0]=clip_table[((Y1 + Cb) >>13)];
256 dest[8*i+1]=clip_table[((Y1 + Cg) >>13)];
257 dest[8*i+2]=clip_table[((Y1 + Cr) >>13)];
259 dest[8*i+4]=clip_table[((Y2 + Cb) >>13)];
260 dest[8*i+5]=clip_table[((Y2 + Cg) >>13)];
261 dest[8*i+6]=clip_table[((Y2 + Cr) >>13)];
264 else if(dstFormat==IMGFMT_BGR24)
267 for(i=0; i<(dstW>>1); i++){
274 for(j=0; j<lumFilterSize; j++)
276 Y1 += lumSrc[j][2*i] * lumFilter[j];
277 Y2 += lumSrc[j][2*i+1] * lumFilter[j];
279 for(j=0; j<chrFilterSize; j++)
281 U += chrSrc[j][i] * chrFilter[j];
282 V += chrSrc[j][i+2048] * chrFilter[j];
284 Y1= clip_yuvtab_2568[ (Y1>>19) + 256 ];
285 Y2= clip_yuvtab_2568[ (Y2>>19) + 256 ];
289 Cb= clip_yuvtab_40cf[U+ 256];
290 Cg= clip_yuvtab_1a1e[V+ 256] + yuvtab_0c92[U+ 256];
291 Cr= clip_yuvtab_3343[V+ 256];
293 dest[0]=clip_table[((Y1 + Cb) >>13)];
294 dest[1]=clip_table[((Y1 + Cg) >>13)];
295 dest[2]=clip_table[((Y1 + Cr) >>13)];
297 dest[3]=clip_table[((Y2 + Cb) >>13)];
298 dest[4]=clip_table[((Y2 + Cg) >>13)];
299 dest[5]=clip_table[((Y2 + Cr) >>13)];
303 else if(dstFormat==IMGFMT_BGR16)
307 static int ditherb1=1<<14;
308 static int ditherg1=1<<13;
309 static int ditherr1=2<<14;
310 static int ditherb2=3<<14;
311 static int ditherg2=3<<13;
312 static int ditherr2=0<<14;
314 ditherb1 ^= (1^2)<<14;
315 ditherg1 ^= (1^2)<<13;
316 ditherr1 ^= (1^2)<<14;
317 ditherb2 ^= (3^0)<<14;
318 ditherg2 ^= (3^0)<<13;
319 ditherr2 ^= (3^0)<<14;
321 const int ditherb1=0;
322 const int ditherg1=0;
323 const int ditherr1=0;
324 const int ditherb2=0;
325 const int ditherg2=0;
326 const int ditherr2=0;
328 for(i=0; i<(dstW>>1); i++){
335 for(j=0; j<lumFilterSize; j++)
337 Y1 += lumSrc[j][2*i] * lumFilter[j];
338 Y2 += lumSrc[j][2*i+1] * lumFilter[j];
340 for(j=0; j<chrFilterSize; j++)
342 U += chrSrc[j][i] * chrFilter[j];
343 V += chrSrc[j][i+2048] * chrFilter[j];
345 Y1= clip_yuvtab_2568[ (Y1>>19) + 256 ];
346 Y2= clip_yuvtab_2568[ (Y2>>19) + 256 ];
350 Cb= clip_yuvtab_40cf[U+ 256];
351 Cg= clip_yuvtab_1a1e[V+ 256] + yuvtab_0c92[U+ 256];
352 Cr= clip_yuvtab_3343[V+ 256];
354 ((uint16_t*)dest)[2*i] =
355 clip_table16b[(Y1 + Cb + ditherb1) >>13] |
356 clip_table16g[(Y1 + Cg + ditherg1) >>13] |
357 clip_table16r[(Y1 + Cr + ditherr1) >>13];
359 ((uint16_t*)dest)[2*i+1] =
360 clip_table16b[(Y2 + Cb + ditherb2) >>13] |
361 clip_table16g[(Y2 + Cg + ditherg2) >>13] |
362 clip_table16r[(Y2 + Cr + ditherr2) >>13];
365 else if(dstFormat==IMGFMT_BGR15)
369 static int ditherb1=1<<14;
370 static int ditherg1=1<<14;
371 static int ditherr1=2<<14;
372 static int ditherb2=3<<14;
373 static int ditherg2=3<<14;
374 static int ditherr2=0<<14;
376 ditherb1 ^= (1^2)<<14;
377 ditherg1 ^= (1^2)<<14;
378 ditherr1 ^= (1^2)<<14;
379 ditherb2 ^= (3^0)<<14;
380 ditherg2 ^= (3^0)<<14;
381 ditherr2 ^= (3^0)<<14;
383 const int ditherb1=0;
384 const int ditherg1=0;
385 const int ditherr1=0;
386 const int ditherb2=0;
387 const int ditherg2=0;
388 const int ditherr2=0;
390 for(i=0; i<(dstW>>1); i++){
397 for(j=0; j<lumFilterSize; j++)
399 Y1 += lumSrc[j][2*i] * lumFilter[j];
400 Y2 += lumSrc[j][2*i+1] * lumFilter[j];
402 for(j=0; j<chrFilterSize; j++)
404 U += chrSrc[j][i] * chrFilter[j];
405 V += chrSrc[j][i+2048] * chrFilter[j];
407 Y1= clip_yuvtab_2568[ (Y1>>19) + 256 ];
408 Y2= clip_yuvtab_2568[ (Y2>>19) + 256 ];
412 Cb= clip_yuvtab_40cf[U+ 256];
413 Cg= clip_yuvtab_1a1e[V+ 256] + yuvtab_0c92[U+ 256];
414 Cr= clip_yuvtab_3343[V+ 256];
416 ((uint16_t*)dest)[2*i] =
417 clip_table15b[(Y1 + Cb + ditherb1) >>13] |
418 clip_table15g[(Y1 + Cg + ditherg1) >>13] |
419 clip_table15r[(Y1 + Cr + ditherr1) >>13];
421 ((uint16_t*)dest)[2*i+1] =
422 clip_table15b[(Y2 + Cb + ditherb2) >>13] |
423 clip_table15g[(Y2 + Cg + ditherg2) >>13] |
424 clip_table15r[(Y2 + Cr + ditherr2) >>13];
430 //Note: we have C, X86, MMX, MMX2, 3DNOW version therse no 3DNOW+MMX2 one
432 #if !defined (HAVE_MMX) || defined (RUNTIME_CPUDETECT)
436 #ifdef CAN_COMPILE_X86_ASM
438 #if (defined (HAVE_MMX) && !defined (HAVE_3DNOW) && !defined (HAVE_MMX2)) || defined (RUNTIME_CPUDETECT)
442 #if defined (HAVE_MMX2) || defined (RUNTIME_CPUDETECT)
446 #if (defined (HAVE_3DNOW) && !defined (HAVE_MMX2)) || defined (RUNTIME_CPUDETECT)
447 #define COMPILE_3DNOW
449 #endif //CAN_COMPILE_X86_ASM
459 #define RENAME(a) a ## _C
460 #include "swscale_template.c"
463 #ifdef CAN_COMPILE_X86_ASM
472 #define RENAME(a) a ## _X86
473 #include "swscale_template.c"
481 #define RENAME(a) a ## _MMX
482 #include "swscale_template.c"
491 #define RENAME(a) a ## _MMX2
492 #include "swscale_template.c"
501 #define RENAME(a) a ## _3DNow
502 #include "swscale_template.c"
505 #endif //CAN_COMPILE_X86_ASM
507 // minor note: the HAVE_xyz is messed up after that line so dont use it
510 // old global scaler, dont use for new code, unless it uses only the stuff from the command line
511 // will use sws_flags from the command line
512 void SwScale_YV12slice(unsigned char* src[], int srcStride[], int srcSliceY ,
513 int srcSliceH, uint8_t* dst[], int dstStride, int dstbpp,
514 int srcW, int srcH, int dstW, int dstH){
516 static SwsContext *context=NULL;
519 static int firstTime=1;
520 int dstStride3[3]= {dstStride, dstStride>>1, dstStride>>1};
526 asm volatile("emms\n\t"::: "memory"); //FIXME this shouldnt be required but it IS (even for non mmx versions)
528 flags= SWS_PRINT_INFO;
531 if(src_filter.lumH) freeVec(src_filter.lumH);
532 if(src_filter.lumV) freeVec(src_filter.lumV);
533 if(src_filter.chrH) freeVec(src_filter.chrH);
534 if(src_filter.chrV) freeVec(src_filter.chrV);
536 if(sws_lum_gblur!=0.0){
537 src_filter.lumH= getGaussianVec(sws_lum_gblur, 3.0);
538 src_filter.lumV= getGaussianVec(sws_lum_gblur, 3.0);
540 src_filter.lumH= getIdentityVec();
541 src_filter.lumV= getIdentityVec();
544 if(sws_chr_gblur!=0.0){
545 src_filter.chrH= getGaussianVec(sws_chr_gblur, 3.0);
546 src_filter.chrV= getGaussianVec(sws_chr_gblur, 3.0);
548 src_filter.chrH= getIdentityVec();
549 src_filter.chrV= getIdentityVec();
552 if(sws_chr_sharpen!=0.0){
553 SwsVector *g= getConstVec(-1.0, 3);
554 SwsVector *id= getConstVec(10.0/sws_chr_sharpen, 1);
557 convVec(src_filter.chrH, id);
558 convVec(src_filter.chrV, id);
563 if(sws_lum_sharpen!=0.0){
564 SwsVector *g= getConstVec(-1.0, 3);
565 SwsVector *id= getConstVec(10.0/sws_lum_sharpen, 1);
568 convVec(src_filter.lumH, id);
569 convVec(src_filter.lumV, id);
575 shiftVec(src_filter.chrH, sws_chr_hshift);
578 shiftVec(src_filter.chrV, sws_chr_vshift);
580 normalizeVec(src_filter.chrH, 1.0);
581 normalizeVec(src_filter.chrV, 1.0);
582 normalizeVec(src_filter.lumH, 1.0);
583 normalizeVec(src_filter.lumV, 1.0);
585 if(verbose > 1) printVec(src_filter.chrH);
586 if(verbose > 1) printVec(src_filter.lumH);
591 case 8 : dstFormat= IMGFMT_Y8; break;
592 case 12: dstFormat= IMGFMT_YV12; break;
593 case 15: dstFormat= IMGFMT_BGR15; break;
594 case 16: dstFormat= IMGFMT_BGR16; break;
595 case 24: dstFormat= IMGFMT_BGR24; break;
596 case 32: dstFormat= IMGFMT_BGR32; break;
602 case 0: flags|= SWS_FAST_BILINEAR; break;
603 case 1: flags|= SWS_BILINEAR; break;
604 case 2: flags|= SWS_BICUBIC; break;
605 case 3: flags|= SWS_X; break;
606 case 4: flags|= SWS_POINT; break;
607 default:flags|= SWS_BILINEAR; break;
610 if(!context) context=getSwsContext(srcW, srcH, IMGFMT_YV12, dstW, dstH, dstFormat, flags, &src_filter, NULL);
613 swScale(context, src, srcStride, srcSliceY, srcSliceH, dst, dstStride3);
616 static inline void initFilter(int16_t **outFilter, int16_t **filterPos, int *outFilterSize, int xInc,
617 int srcW, int dstW, int filterAlign, int one, int flags,
618 SwsVector *srcFilter, SwsVector *dstFilter)
625 double *filter2=NULL;
628 asm volatile("emms\n\t"::: "memory"); //FIXME this shouldnt be required but it IS (even for non mmx versions)
631 *filterPos = (int16_t*)memalign(8, dstW*sizeof(int16_t));
632 if(ABS(xInc - 0x10000) <10) // unscaled
636 filter= (double*)memalign(8, dstW*sizeof(double)*filterSize);
637 for(i=0; i<dstW*filterSize; i++) filter[i]=0;
639 for(i=0; i<dstW; i++)
641 filter[i*filterSize]=1;
646 else if(flags&SWS_POINT) // lame looking point sampling mode
651 filter= (double*)memalign(8, dstW*sizeof(double)*filterSize);
653 xDstInSrc= xInc/2 - 0x8000;
654 for(i=0; i<dstW; i++)
656 int xx= (xDstInSrc>>16) - (filterSize>>1) + 1;
663 else if(xInc <= (1<<16) || (flags&SWS_FAST_BILINEAR)) // upscale
667 if (flags&SWS_BICUBIC) filterSize= 4;
668 else if(flags&SWS_X ) filterSize= 4;
670 // printf("%d %d %d\n", filterSize, srcW, dstW);
671 filter= (double*)memalign(8, dstW*sizeof(double)*filterSize);
673 xDstInSrc= xInc/2 - 0x8000;
674 for(i=0; i<dstW; i++)
676 int xx= (xDstInSrc>>16) - (filterSize>>1) + 1;
680 if((flags & SWS_BICUBIC) || (flags & SWS_X))
682 double d= ABS(((xx+1)<<16) - xDstInSrc)/(double)(1<<16);
685 if(flags & SWS_BICUBIC){
686 // Equation is from VirtualDub
687 y1 = ( + A*d - 2.0*A*d*d + A*d*d*d);
688 y2 = (+ 1.0 - (A+3.0)*d*d + (A+2.0)*d*d*d);
689 y3 = ( - A*d + (2.0*A+3.0)*d*d - (A+2.0)*d*d*d);
690 y4 = ( + A*d*d - A*d*d*d);
692 // cubic interpolation (derived it myself)
693 y1 = ( -2.0*d + 3.0*d*d - 1.0*d*d*d)/6.0;
694 y2 = (6.0 -3.0*d - 6.0*d*d + 3.0*d*d*d)/6.0;
695 y3 = ( +6.0*d + 3.0*d*d - 3.0*d*d*d)/6.0;
696 y4 = ( -1.0*d + 1.0*d*d*d)/6.0;
699 // printf("%d %d %d \n", coeff, (int)d, xDstInSrc);
700 filter[i*filterSize + 0]= y1;
701 filter[i*filterSize + 1]= y2;
702 filter[i*filterSize + 2]= y3;
703 filter[i*filterSize + 3]= y4;
704 // printf("%1.3f %1.3f %1.3f %1.3f %1.3f\n",d , y1, y2, y3, y4);
708 for(j=0; j<filterSize; j++)
710 double d= ABS((xx<<16) - xDstInSrc)/(double)(1<<16);
711 double coeff= 1.0 - d;
713 // printf("%d %d %d \n", coeff, (int)d, xDstInSrc);
714 filter[i*filterSize + j]= coeff;
724 if(flags&SWS_BICUBIC) filterSize= (int)ceil(1 + 4.0*srcW / (double)dstW);
725 else if(flags&SWS_X) filterSize= (int)ceil(1 + 4.0*srcW / (double)dstW);
726 else filterSize= (int)ceil(1 + 2.0*srcW / (double)dstW);
727 // printf("%d %d %d\n", *filterSize, srcW, dstW);
728 filter= (double*)memalign(8, dstW*sizeof(double)*filterSize);
730 xDstInSrc= xInc/2 - 0x8000;
731 for(i=0; i<dstW; i++)
733 int xx= (int)((double)xDstInSrc/(double)(1<<16) - (filterSize-1)*0.5 + 0.5);
736 for(j=0; j<filterSize; j++)
738 double d= ABS((xx<<16) - xDstInSrc)/(double)xInc;
740 if((flags & SWS_BICUBIC) || (flags & SWS_X))
744 // Equation is from VirtualDub
746 coeff = (1.0 - (A+3.0)*d*d + (A+2.0)*d*d*d);
748 coeff = (-4.0*A + 8.0*A*d - 5.0*A*d*d + A*d*d*d);
752 /* else if(flags & SWS_X)
760 // printf("%1.3f %d %d \n", coeff, (int)d, xDstInSrc);
761 filter[i*filterSize + j]= coeff;
768 /* apply src & dst Filter to filter -> filter2
771 filter2Size= filterSize;
772 if(srcFilter) filter2Size+= srcFilter->length - 1;
773 if(dstFilter) filter2Size+= dstFilter->length - 1;
774 filter2= (double*)memalign(8, filter2Size*dstW*sizeof(double));
776 for(i=0; i<dstW; i++)
779 SwsVector scaleFilter;
782 scaleFilter.coeff= filter + i*filterSize;
783 scaleFilter.length= filterSize;
785 if(srcFilter) outVec= getConvVec(srcFilter, &scaleFilter);
786 else outVec= &scaleFilter;
788 ASSERT(outVec->length == filter2Size)
791 for(j=0; j<outVec->length; j++)
793 filter2[i*filter2Size + j]= outVec->coeff[j];
796 (*filterPos)[i]+= (filterSize-1)/2 - (filter2Size-1)/2;
798 if(outVec != &scaleFilter) freeVec(outVec);
800 free(filter); filter=NULL;
802 /* try to reduce the filter-size (step1 find size and shift left) */
803 // Assume its near normalized (*0.5 or *2.0 is ok but * 0.001 is not)
805 for(i=dstW-1; i>=0; i--)
807 int min= filter2Size;
811 /* get rid off near zero elements on the left by shifting left */
812 for(j=0; j<filter2Size; j++)
815 cutOff += ABS(filter2[i*filter2Size]);
817 if(cutOff > SWS_MAX_REDUCE_CUTOFF) break;
819 /* preserve Monotonicity because the core cant handle the filter otherwise */
820 if(i<dstW-1 && (*filterPos)[i] >= (*filterPos)[i+1]) break;
822 // Move filter coeffs left
823 for(k=1; k<filter2Size; k++)
824 filter2[i*filter2Size + k - 1]= filter2[i*filter2Size + k];
825 filter2[i*filter2Size + k - 1]= 0.0;
830 /* count near zeros on the right */
831 for(j=filter2Size-1; j>0; j--)
833 cutOff += ABS(filter2[i*filter2Size + j]);
835 if(cutOff > SWS_MAX_REDUCE_CUTOFF) break;
839 if(min>minFilterSize) minFilterSize= min;
842 /* try to reduce the filter-size (step2 reduce it) */
843 for(i=0; i<dstW; i++)
847 for(j=0; j<minFilterSize; j++)
848 filter2[i*minFilterSize + j]= filter2[i*filter2Size + j];
850 if((flags&SWS_PRINT_INFO) && verbose)
851 printf("SwScaler: reducing filtersize %d -> %d\n", filter2Size, minFilterSize);
852 filter2Size= minFilterSize;
853 ASSERT(filter2Size > 0)
855 //FIXME try to align filterpos if possible
858 for(i=0; i<dstW; i++)
861 if((*filterPos)[i] < 0)
863 // Move filter coeffs left to compensate for filterPos
864 for(j=1; j<filter2Size; j++)
866 int left= MAX(j + (*filterPos)[i], 0);
867 filter2[i*filter2Size + left] += filter2[i*filter2Size + j];
868 filter2[i*filter2Size + j]=0;
873 if((*filterPos)[i] + filter2Size > srcW)
875 int shift= (*filterPos)[i] + filter2Size - srcW;
876 // Move filter coeffs right to compensate for filterPos
877 for(j=filter2Size-2; j>=0; j--)
879 int right= MIN(j + shift, filter2Size-1);
880 filter2[i*filter2Size +right] += filter2[i*filter2Size +j];
881 filter2[i*filter2Size +j]=0;
883 (*filterPos)[i]= srcW - filter2Size;
888 *outFilterSize= (filter2Size +(filterAlign-1)) & (~(filterAlign-1));
889 *outFilter= (int16_t*)memalign(8, *outFilterSize*dstW*sizeof(int16_t));
890 memset(*outFilter, 0, *outFilterSize*dstW*sizeof(int16_t));
892 /* Normalize & Store in outFilter */
893 for(i=0; i<dstW; i++)
898 for(j=0; j<filter2Size; j++)
900 sum+= filter2[i*filter2Size + j];
903 for(j=0; j<filter2Size; j++)
905 (*outFilter)[i*(*outFilterSize) + j]= (int)(filter2[i*filter2Size + j]*scale);
913 static void initMMX2HScaler(int dstW, int xInc, uint8_t *funnyCode)
922 // create an optimized horizontal scaling routine
930 "movq (%%esi), %%mm0 \n\t" //FIXME Alignment
931 "movq %%mm0, %%mm1 \n\t"
932 "psrlq $8, %%mm0 \n\t"
933 "punpcklbw %%mm7, %%mm1 \n\t"
934 "movq %%mm2, %%mm3 \n\t"
935 "punpcklbw %%mm7, %%mm0 \n\t"
936 "addw %%bx, %%cx \n\t" //2*xalpha += (4*lumXInc)&0xFFFF
937 "pshufw $0xFF, %%mm1, %%mm1 \n\t"
939 "adcl %%edx, %%esi \n\t" //xx+= (4*lumXInc)>>16 + carry
940 "pshufw $0xFF, %%mm0, %%mm0 \n\t"
942 "psrlw $9, %%mm3 \n\t"
943 "psubw %%mm1, %%mm0 \n\t"
944 "pmullw %%mm3, %%mm0 \n\t"
945 "paddw %%mm6, %%mm2 \n\t" // 2*alpha += xpos&0xFFFF
946 "psllw $7, %%mm1 \n\t"
947 "paddw %%mm1, %%mm0 \n\t"
949 "movq %%mm0, (%%edi, %%eax) \n\t"
951 "addl $8, %%eax \n\t"
964 :"=r" (fragment), "=r" (imm8OfPShufW1), "=r" (imm8OfPShufW2),
965 "=r" (fragmentLength)
968 xpos= 0; //lumXInc/2 - 0x8000; // difference between pixel centers
970 for(i=0; i<dstW/8; i++)
977 int b=((xpos+xInc)>>16) - xx;
978 int c=((xpos+xInc*2)>>16) - xx;
979 int d=((xpos+xInc*3)>>16) - xx;
981 memcpy(funnyCode + fragmentLength*i/4, fragment, fragmentLength);
983 funnyCode[fragmentLength*i/4 + imm8OfPShufW1]=
984 funnyCode[fragmentLength*i/4 + imm8OfPShufW2]=
985 a | (b<<2) | (c<<4) | (d<<6);
987 // if we dont need to read 8 bytes than dont :), reduces the chance of
988 // crossing a cache line
989 if(d<3) funnyCode[fragmentLength*i/4 + 1]= 0x6E;
991 funnyCode[fragmentLength*(i+4)/4]= RET;
1002 static void globalInit(){
1003 // generating tables:
1005 for(i=0; i<768; i++){
1006 int c= MIN(MAX(i-256, 0), 255);
1008 yuvtab_2568[c]= clip_yuvtab_2568[i]=(0x2568*(c-16))+(256<<13);
1009 yuvtab_3343[c]= clip_yuvtab_3343[i]=0x3343*(c-128);
1010 yuvtab_0c92[c]= clip_yuvtab_0c92[i]=-0x0c92*(c-128);
1011 yuvtab_1a1e[c]= clip_yuvtab_1a1e[i]=-0x1a1e*(c-128);
1012 yuvtab_40cf[c]= clip_yuvtab_40cf[i]=0x40cf*(c-128);
1015 for(i=0; i<768; i++)
1017 int v= clip_table[i];
1018 clip_table16b[i]= v>>3;
1019 clip_table16g[i]= (v<<3)&0x07E0;
1020 clip_table16r[i]= (v<<8)&0xF800;
1021 clip_table15b[i]= v>>3;
1022 clip_table15g[i]= (v<<2)&0x03E0;
1023 clip_table15r[i]= (v<<7)&0x7C00;
1028 #ifdef RUNTIME_CPUDETECT
1029 #ifdef CAN_COMPILE_X86_ASM
1030 // ordered per speed fasterst first
1031 if(gCpuCaps.hasMMX2)
1032 swScale= swScale_MMX2;
1033 else if(gCpuCaps.has3DNow)
1034 swScale= swScale_3DNow;
1035 else if(gCpuCaps.hasMMX)
1036 swScale= swScale_MMX;
1042 cpuCaps.hasMMX2 = cpuCaps.hasMMX = cpuCaps.has3DNow = 0;
1044 #else //RUNTIME_CPUDETECT
1046 swScale= swScale_MMX2;
1047 cpuCaps.has3DNow = 0;
1048 #elif defined (HAVE_3DNOW)
1049 swScale= swScale_3DNow;
1050 cpuCaps.hasMMX2 = 0;
1051 #elif defined (HAVE_MMX)
1052 swScale= swScale_MMX;
1053 cpuCaps.hasMMX2 = cpuCaps.has3DNow = 0;
1056 cpuCaps.hasMMX2 = cpuCaps.hasMMX = cpuCaps.has3DNow = 0;
1058 #endif //!RUNTIME_CPUDETECT
1062 SwsContext *getSwsContext(int srcW, int srcH, int srcFormat, int dstW, int dstH, int dstFormat, int flags,
1063 SwsFilter *srcFilter, SwsFilter *dstFilter){
1065 const int widthAlign= dstFormat==IMGFMT_YV12 ? 16 : 8;
1068 SwsFilter dummyFilter= {NULL, NULL, NULL, NULL};
1072 asm volatile("emms\n\t"::: "memory");
1075 if(swScale==NULL) globalInit();
1078 if(srcW<1 || srcH<1 || dstW<1 || dstH<1) return NULL;
1081 if(dstStride[0]%widthAlign !=0 )
1083 if(flags & SWS_PRINT_INFO)
1084 fprintf(stderr, "SwScaler: Warning: dstStride is not a multiple of %d!\n"
1085 "SwScaler: ->cannot do aligned memory acesses anymore\n",
1089 if(!dstFilter) dstFilter= &dummyFilter;
1090 if(!srcFilter) srcFilter= &dummyFilter;
1092 c= memalign(64, sizeof(SwsContext));
1093 memset(c, 0, sizeof(SwsContext));
1099 c->lumXInc= ((srcW<<16) + (dstW>>1))/dstW;
1100 c->lumYInc= ((srcH<<16) + (dstH>>1))/dstH;
1102 c->dstFormat= dstFormat;
1103 c->srcFormat= srcFormat;
1107 c->canMMX2BeUsed= (dstW >=srcW && (dstW&31)==0 && (srcW&15)==0) ? 1 : 0;
1108 if(!c->canMMX2BeUsed && dstW >=srcW && (srcW&15)==0 && (flags&SWS_FAST_BILINEAR))
1110 if(flags&SWS_PRINT_INFO)
1111 fprintf(stderr, "SwScaler: output Width is not a multiple of 32 -> no MMX2 scaler\n");
1117 // match pixel 0 of the src to pixel 0 of dst and match pixel n-2 of src to pixel n-2 of dst
1118 // but only for the FAST_BILINEAR mode otherwise do correct scaling
1119 // n-2 is the last chrominance sample available
1120 // this is not perfect, but noone shuld notice the difference, the more correct variant
1121 // would be like the vertical one, but that would require some special code for the
1122 // first and last pixel
1123 if(flags&SWS_FAST_BILINEAR)
1125 if(c->canMMX2BeUsed) c->lumXInc+= 20;
1126 //we dont use the x86asm scaler if mmx is available
1127 else if(cpuCaps.hasMMX) c->lumXInc = ((srcW-2)<<16)/(dstW-2) - 20;
1130 /* set chrXInc & chrDstW */
1131 if((flags&SWS_FULL_UV_IPOL) && dstFormat!=IMGFMT_YV12)
1132 c->chrXInc= c->lumXInc>>1, c->chrDstW= dstW;
1134 c->chrXInc= c->lumXInc, c->chrDstW= (dstW+1)>>1;
1136 /* set chrYInc & chrDstH */
1137 if(dstFormat==IMGFMT_YV12) c->chrYInc= c->lumYInc, c->chrDstH= (dstH+1)>>1;
1138 else c->chrYInc= c->lumYInc>>1, c->chrDstH= dstH;
1140 /* precalculate horizontal scaler filter coefficients */
1142 const int filterAlign= cpuCaps.hasMMX ? 4 : 1;
1144 initFilter(&c->hLumFilter, &c->hLumFilterPos, &c->hLumFilterSize, c->lumXInc,
1145 srcW , dstW, filterAlign, 1<<14, flags,
1146 srcFilter->lumH, dstFilter->lumH);
1147 initFilter(&c->hChrFilter, &c->hChrFilterPos, &c->hChrFilterSize, c->chrXInc,
1148 (srcW+1)>>1, c->chrDstW, filterAlign, 1<<14, flags,
1149 srcFilter->chrH, dstFilter->chrH);
1152 // cant downscale !!!
1153 if(c->canMMX2BeUsed && (flags & SWS_FAST_BILINEAR))
1155 initMMX2HScaler( dstW, c->lumXInc, c->funnyYCode);
1156 initMMX2HScaler(c->chrDstW, c->chrXInc, c->funnyUVCode);
1159 } // Init Horizontal stuff
1163 /* precalculate vertical scaler filter coefficients */
1164 initFilter(&c->vLumFilter, &c->vLumFilterPos, &c->vLumFilterSize, c->lumYInc,
1165 srcH , dstH, 1, (1<<12)-4, flags,
1166 srcFilter->lumV, dstFilter->lumV);
1167 initFilter(&c->vChrFilter, &c->vChrFilterPos, &c->vChrFilterSize, c->chrYInc,
1168 (srcH+1)>>1, c->chrDstH, 1, (1<<12)-4, flags,
1169 srcFilter->chrV, dstFilter->chrV);
1171 // Calculate Buffer Sizes so that they wont run out while handling these damn slices
1172 c->vLumBufSize= c->vLumFilterSize;
1173 c->vChrBufSize= c->vChrFilterSize;
1174 for(i=0; i<dstH; i++)
1176 int chrI= i*c->chrDstH / dstH;
1177 int nextSlice= MAX(c->vLumFilterPos[i ] + c->vLumFilterSize - 1,
1178 ((c->vChrFilterPos[chrI] + c->vChrFilterSize - 1)<<1));
1179 nextSlice&= ~1; // Slices start at even boundaries
1180 if(c->vLumFilterPos[i ] + c->vLumBufSize < nextSlice)
1181 c->vLumBufSize= nextSlice - c->vLumFilterPos[i ];
1182 if(c->vChrFilterPos[chrI] + c->vChrBufSize < (nextSlice>>1))
1183 c->vChrBufSize= (nextSlice>>1) - c->vChrFilterPos[chrI];
1186 // allocate pixbufs (we use dynamic allocation because otherwise we would need to
1187 // allocate several megabytes to handle all possible cases)
1188 c->lumPixBuf= (int16_t**)memalign(4, c->vLumBufSize*2*sizeof(int16_t*));
1189 c->chrPixBuf= (int16_t**)memalign(4, c->vChrBufSize*2*sizeof(int16_t*));
1190 for(i=0; i<c->vLumBufSize; i++)
1191 c->lumPixBuf[i]= c->lumPixBuf[i+c->vLumBufSize]= (uint16_t*)memalign(8, 4000);
1192 for(i=0; i<c->vChrBufSize; i++)
1193 c->chrPixBuf[i]= c->chrPixBuf[i+c->vChrBufSize]= (uint16_t*)memalign(8, 8000);
1195 //try to avoid drawing green stuff between the right end and the stride end
1196 for(i=0; i<c->vLumBufSize; i++) memset(c->lumPixBuf[i], 0, 4000);
1197 for(i=0; i<c->vChrBufSize; i++) memset(c->chrPixBuf[i], 64, 8000);
1199 ASSERT(c->chrDstH <= dstH)
1201 // pack filter data for mmx code
1204 c->lumMmxFilter= (int16_t*)memalign(8, c->vLumFilterSize* dstH*4*sizeof(int16_t));
1205 c->chrMmxFilter= (int16_t*)memalign(8, c->vChrFilterSize*c->chrDstH*4*sizeof(int16_t));
1206 for(i=0; i<c->vLumFilterSize*dstH; i++)
1207 c->lumMmxFilter[4*i]=c->lumMmxFilter[4*i+1]=c->lumMmxFilter[4*i+2]=c->lumMmxFilter[4*i+3]=
1209 for(i=0; i<c->vChrFilterSize*c->chrDstH; i++)
1210 c->chrMmxFilter[4*i]=c->chrMmxFilter[4*i+1]=c->chrMmxFilter[4*i+2]=c->chrMmxFilter[4*i+3]=
1214 if(flags&SWS_PRINT_INFO)
1217 char *dither= " dithered";
1221 if(flags&SWS_FAST_BILINEAR)
1222 fprintf(stderr, "\nSwScaler: FAST_BILINEAR scaler ");
1223 else if(flags&SWS_BILINEAR)
1224 fprintf(stderr, "\nSwScaler: BILINEAR scaler ");
1225 else if(flags&SWS_BICUBIC)
1226 fprintf(stderr, "\nSwScaler: BICUBIC scaler ");
1227 else if(flags&SWS_POINT)
1228 fprintf(stderr, "\nSwScaler: POINT scaler ");
1230 fprintf(stderr, "\nSwScaler: ehh flags invalid?! ");
1232 if(dstFormat==IMGFMT_BGR15)
1233 fprintf(stderr, "with%s BGR15 output ", dither);
1234 else if(dstFormat==IMGFMT_BGR16)
1235 fprintf(stderr, "with%s BGR16 output ", dither);
1236 else if(dstFormat==IMGFMT_BGR24)
1237 fprintf(stderr, "with BGR24 output ");
1238 else if(dstFormat==IMGFMT_BGR32)
1239 fprintf(stderr, "with BGR32 output ");
1240 else if(dstFormat==IMGFMT_YV12)
1241 fprintf(stderr, "with YV12 output ");
1243 fprintf(stderr, "without output ");
1246 fprintf(stderr, "using MMX2\n");
1247 else if(cpuCaps.has3DNow)
1248 fprintf(stderr, "using 3DNOW\n");
1249 else if(cpuCaps.hasMMX)
1250 fprintf(stderr, "using MMX\n");
1252 fprintf(stderr, "using C\n");
1255 if((flags & SWS_PRINT_INFO) && verbose)
1259 if(c->canMMX2BeUsed && (flags&SWS_FAST_BILINEAR))
1260 printf("SwScaler: using FAST_BILINEAR MMX2 scaler for horizontal scaling\n");
1263 if(c->hLumFilterSize==4)
1264 printf("SwScaler: using 4-tap MMX scaler for horizontal luminance scaling\n");
1265 else if(c->hLumFilterSize==8)
1266 printf("SwScaler: using 8-tap MMX scaler for horizontal luminance scaling\n");
1268 printf("SwScaler: using n-tap MMX scaler for horizontal luminance scaling\n");
1270 if(c->hChrFilterSize==4)
1271 printf("SwScaler: using 4-tap MMX scaler for horizontal chrominance scaling\n");
1272 else if(c->hChrFilterSize==8)
1273 printf("SwScaler: using 8-tap MMX scaler for horizontal chrominance scaling\n");
1275 printf("SwScaler: using n-tap MMX scaler for horizontal chrominance scaling\n");
1281 printf("SwScaler: using X86-Asm scaler for horizontal scaling\n");
1283 if(flags & SWS_FAST_BILINEAR)
1284 printf("SwScaler: using FAST_BILINEAR C scaler for horizontal scaling\n");
1286 printf("SwScaler: using C scaler for horizontal scaling\n");
1289 if(dstFormat==IMGFMT_YV12)
1291 if(c->vLumFilterSize==1)
1292 printf("SwScaler: using 1-tap %s \"scaler\" for vertical scaling (YV12)\n", cpuCaps.hasMMX ? "MMX" : "C");
1294 printf("SwScaler: using n-tap %s scaler for vertical scaling (YV12)\n", cpuCaps.hasMMX ? "MMX" : "C");
1298 if(c->vLumFilterSize==1 && c->vChrFilterSize==2)
1299 printf("SwScaler: using 1-tap %s \"scaler\" for vertical luminance scaling (BGR)\n"
1300 "SwScaler: 2-tap scaler for vertical chrominance scaling (BGR)\n",cpuCaps.hasMMX ? "MMX" : "C");
1301 else if(c->vLumFilterSize==2 && c->vChrFilterSize==2)
1302 printf("SwScaler: using 2-tap linear %s scaler for vertical scaling (BGR)\n", cpuCaps.hasMMX ? "MMX" : "C");
1304 printf("SwScaler: using n-tap %s scaler for vertical scaling (BGR)\n", cpuCaps.hasMMX ? "MMX" : "C");
1307 if(dstFormat==IMGFMT_BGR24)
1308 printf("SwScaler: using %s YV12->BGR24 Converter\n",
1309 cpuCaps.hasMMX2 ? "MMX2" : (cpuCaps.hasMMX ? "MMX" : "C"));
1310 else if(dstFormat==IMGFMT_BGR32)
1311 printf("SwScaler: using %s YV12->BGR32 Converter\n", cpuCaps.hasMMX ? "MMX" : "C");
1312 else if(dstFormat==IMGFMT_BGR16)
1313 printf("SwScaler: using %s YV12->BGR16 Converter\n", cpuCaps.hasMMX ? "MMX" : "C");
1314 else if(dstFormat==IMGFMT_BGR15)
1315 printf("SwScaler: using %s YV12->BGR15 Converter\n", cpuCaps.hasMMX ? "MMX" : "C");
1317 printf("SwScaler: %dx%d -> %dx%d\n", srcW, srcH, dstW, dstH);
1324 * returns a normalized gaussian curve used to filter stuff
1325 * quality=3 is high quality, lowwer is lowwer quality
1328 SwsVector *getGaussianVec(double variance, double quality){
1329 const int length= (int)(variance*quality + 0.5) | 1;
1331 double *coeff= memalign(sizeof(double), length*sizeof(double));
1332 double middle= (length-1)*0.5;
1333 SwsVector *vec= malloc(sizeof(SwsVector));
1336 vec->length= length;
1338 for(i=0; i<length; i++)
1340 double dist= i-middle;
1341 coeff[i]= exp( -dist*dist/(2*variance*variance) ) / sqrt(2*variance*PI);
1344 normalizeVec(vec, 1.0);
1349 SwsVector *getConstVec(double c, int length){
1351 double *coeff= memalign(sizeof(double), length*sizeof(double));
1352 SwsVector *vec= malloc(sizeof(SwsVector));
1355 vec->length= length;
1357 for(i=0; i<length; i++)
1364 SwsVector *getIdentityVec(void){
1365 double *coeff= memalign(sizeof(double), sizeof(double));
1366 SwsVector *vec= malloc(sizeof(SwsVector));
1375 void normalizeVec(SwsVector *a, double height){
1380 for(i=0; i<a->length; i++)
1385 for(i=0; i<a->length; i++)
1386 a->coeff[i]*= height;
1389 void scaleVec(SwsVector *a, double scalar){
1392 for(i=0; i<a->length; i++)
1393 a->coeff[i]*= scalar;
1396 static SwsVector *getConvVec(SwsVector *a, SwsVector *b){
1397 int length= a->length + b->length - 1;
1398 double *coeff= memalign(sizeof(double), length*sizeof(double));
1400 SwsVector *vec= malloc(sizeof(SwsVector));
1403 vec->length= length;
1405 for(i=0; i<length; i++) coeff[i]= 0.0;
1407 for(i=0; i<a->length; i++)
1409 for(j=0; j<b->length; j++)
1411 coeff[i+j]+= a->coeff[i]*b->coeff[j];
1418 static SwsVector *sumVec(SwsVector *a, SwsVector *b){
1419 int length= MAX(a->length, b->length);
1420 double *coeff= memalign(sizeof(double), length*sizeof(double));
1422 SwsVector *vec= malloc(sizeof(SwsVector));
1425 vec->length= length;
1427 for(i=0; i<length; i++) coeff[i]= 0.0;
1429 for(i=0; i<a->length; i++) coeff[i + (length-1)/2 - (a->length-1)/2]+= a->coeff[i];
1430 for(i=0; i<b->length; i++) coeff[i + (length-1)/2 - (b->length-1)/2]+= b->coeff[i];
1435 static SwsVector *diffVec(SwsVector *a, SwsVector *b){
1436 int length= MAX(a->length, b->length);
1437 double *coeff= memalign(sizeof(double), length*sizeof(double));
1439 SwsVector *vec= malloc(sizeof(SwsVector));
1442 vec->length= length;
1444 for(i=0; i<length; i++) coeff[i]= 0.0;
1446 for(i=0; i<a->length; i++) coeff[i + (length-1)/2 - (a->length-1)/2]+= a->coeff[i];
1447 for(i=0; i<b->length; i++) coeff[i + (length-1)/2 - (b->length-1)/2]-= b->coeff[i];
1452 /* shift left / or right if "shift" is negative */
1453 static SwsVector *getShiftedVec(SwsVector *a, int shift){
1454 int length= a->length + ABS(shift)*2;
1455 double *coeff= memalign(sizeof(double), length*sizeof(double));
1457 SwsVector *vec= malloc(sizeof(SwsVector));
1460 vec->length= length;
1462 for(i=0; i<length; i++) coeff[i]= 0.0;
1464 for(i=0; i<a->length; i++)
1466 coeff[i + (length-1)/2 - (a->length-1)/2 - shift]= a->coeff[i];
1472 void shiftVec(SwsVector *a, int shift){
1473 SwsVector *shifted= getShiftedVec(a, shift);
1475 a->coeff= shifted->coeff;
1476 a->length= shifted->length;
1480 void addVec(SwsVector *a, SwsVector *b){
1481 SwsVector *sum= sumVec(a, b);
1483 a->coeff= sum->coeff;
1484 a->length= sum->length;
1488 void subVec(SwsVector *a, SwsVector *b){
1489 SwsVector *diff= diffVec(a, b);
1491 a->coeff= diff->coeff;
1492 a->length= diff->length;
1496 void convVec(SwsVector *a, SwsVector *b){
1497 SwsVector *conv= getConvVec(a, b);
1499 a->coeff= conv->coeff;
1500 a->length= conv->length;
1504 SwsVector *cloneVec(SwsVector *a){
1505 double *coeff= memalign(sizeof(double), a->length*sizeof(double));
1507 SwsVector *vec= malloc(sizeof(SwsVector));
1510 vec->length= a->length;
1512 for(i=0; i<a->length; i++) coeff[i]= a->coeff[i];
1517 void printVec(SwsVector *a){
1523 for(i=0; i<a->length; i++)
1524 if(a->coeff[i]>max) max= a->coeff[i];
1526 for(i=0; i<a->length; i++)
1527 if(a->coeff[i]<min) min= a->coeff[i];
1531 for(i=0; i<a->length; i++)
1533 int x= (int)((a->coeff[i]-min)*60.0/range +0.5);
1534 printf("%1.3f ", a->coeff[i]);
1535 for(;x>0; x--) printf(" ");
1540 void freeVec(SwsVector *a){
1542 if(a->coeff) free(a->coeff);
1548 void freeSwsContext(SwsContext *c){
1555 for(i=0; i<c->vLumBufSize*2; i++)
1557 if(c->lumPixBuf[i]) free(c->lumPixBuf[i]);
1558 c->lumPixBuf[i]=NULL;
1566 for(i=0; i<c->vChrBufSize*2; i++)
1568 if(c->chrPixBuf[i]) free(c->chrPixBuf[i]);
1569 c->chrPixBuf[i]=NULL;
1575 if(c->vLumFilter) free(c->vLumFilter);
1576 c->vLumFilter = NULL;
1577 if(c->vChrFilter) free(c->vChrFilter);
1578 c->vChrFilter = NULL;
1579 if(c->hLumFilter) free(c->hLumFilter);
1580 c->hLumFilter = NULL;
1581 if(c->hChrFilter) free(c->hChrFilter);
1582 c->hChrFilter = NULL;
1584 if(c->vLumFilterPos) free(c->vLumFilterPos);
1585 c->vLumFilterPos = NULL;
1586 if(c->vChrFilterPos) free(c->vChrFilterPos);
1587 c->vChrFilterPos = NULL;
1588 if(c->hLumFilterPos) free(c->hLumFilterPos);
1589 c->hLumFilterPos = NULL;
1590 if(c->hChrFilterPos) free(c->hChrFilterPos);
1591 c->hChrFilterPos = NULL;
1593 if(c->lumMmxFilter) free(c->lumMmxFilter);
1594 c->lumMmxFilter = NULL;
1595 if(c->chrMmxFilter) free(c->chrMmxFilter);
1596 c->chrMmxFilter = NULL;