X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libavcodec%2Fdsputil.c;h=a9661eb2b5ef65a486a581d6c57811c90f66d8b3;hb=cc615d2ce8fd3dc651a9fc2cc9c522afd706fc06;hp=40f03394363df06159d791a30431540a8b774320;hpb=eb75a69818c15272e94de9aba8e87a2c3535437a;p=ffmpeg diff --git a/libavcodec/dsputil.c b/libavcodec/dsputil.c index 40f03394363..a9661eb2b5e 100644 --- a/libavcodec/dsputil.c +++ b/libavcodec/dsputil.c @@ -29,9 +29,9 @@ #include "avcodec.h" #include "dsputil.h" -#include "mpegvideo.h" #include "simple_idct.h" #include "faandct.h" +#include "faanidct.h" #include "h263.h" #include "snow.h" @@ -41,9 +41,19 @@ void ff_spatial_dwt(int *buffer, int width, int height, int stride, int type, in /* vorbis.c */ void vorbis_inverse_coupling(float *mag, float *ang, int blocksize); +/* flacenc.c */ +void ff_flac_compute_autocorr(const int32_t *data, int len, int lag, double *autoc); + +/* pngdec.c */ +void ff_add_png_paeth_prediction(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp); + uint8_t ff_cropTbl[256 + 2 * MAX_NEG_CROP] = {0, }; uint32_t ff_squareTbl[512] = {0, }; +// 0x7f7f7f7f or 0x7f7f7f7f7f7f7f7f or whatever, depending on the cpu's native arithmetic size +#define pb_7f (~0UL/255 * 0x7f) +#define pb_80 (~0UL/255 * 0x80) + const uint8_t ff_zigzag_direct[64] = { 0, 1, 8, 16, 9, 2, 3, 10, 17, 24, 32, 25, 18, 11, 4, 5, @@ -141,6 +151,32 @@ static const uint8_t simple_mmx_permutation[64]={ 0x32, 0x3A, 0x36, 0x3B, 0x33, 0x3E, 0x37, 0x3F, }; +static const uint8_t idct_sse2_row_perm[8] = {0, 4, 1, 5, 2, 6, 3, 7}; + +void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable){ + int i; + int end; + + st->scantable= src_scantable; + + for(i=0; i<64; i++){ + int j; + j = src_scantable[i]; + st->permutated[i] = permutation[j]; +#ifdef ARCH_POWERPC + st->inverse[j] = i; +#endif + } + + end=-1; + for(i=0; i<64; i++){ + int j; + j = st->permutated[i]; + if(j>end) end=j; + st->raster_end[i]= end; + } +} + static int pix_sum_c(uint8_t * pix, int line_size) { int s, i, j; @@ -211,7 +247,7 @@ static int pix_norm1_c(uint8_t * pix, int line_size) return s; } -static void bswap_buf(uint32_t *dst, uint32_t *src, int w){ +static void bswap_buf(uint32_t *dst, const uint32_t *src, int w){ int i; for(i=0; i+8<=w; i+=8){ @@ -393,6 +429,106 @@ int w97_32_c(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h){ } #endif +/* draw the edges of width 'w' of an image of size width, height */ +//FIXME check that this is ok for mpeg4 interlaced +static void draw_edges_c(uint8_t *buf, int wrap, int width, int height, int w) +{ + uint8_t *ptr, *last_line; + int i; + + last_line = buf + (height - 1) * wrap; + for(i=0;i= h){ + src+= (h-1-src_y)*linesize; + src_y=h-1; + }else if(src_y<=-block_h){ + src+= (1-block_h-src_y)*linesize; + src_y=1-block_h; + } + if(src_x>= w){ + src+= (w-1-src_x); + src_x=w-1; + }else if(src_x<=-block_w){ + src+= (1-block_w-src_x); + src_x=1-block_w; + } + + start_y= FFMAX(0, -src_y); + start_x= FFMAX(0, -src_x); + end_y= FFMIN(block_h, h-src_y); + end_x= FFMIN(block_w, w-src_x); + + // copy existing part + for(y=start_y; y>1));\ pixels+=line_size;\ block +=line_size;\ @@ -630,8 +766,8 @@ static void OPNAME ## _pixels_x2_c(uint8_t *block, const uint8_t *pixels, int li {\ int i;\ for(i=0; i>1));\ pixels+=line_size;\ block +=line_size;\ @@ -642,8 +778,8 @@ static void OPNAME ## _no_rnd_pixels_y2_c(uint8_t *block, const uint8_t *pixels, {\ int i;\ for(i=0; i>1));\ pixels+=line_size;\ block +=line_size;\ @@ -654,8 +790,8 @@ static void OPNAME ## _pixels_y2_c(uint8_t *block, const uint8_t *pixels, int li {\ int i;\ for(i=0; i>1));\ pixels+=line_size;\ block +=line_size;\ @@ -665,8 +801,8 @@ static void OPNAME ## _pixels_y2_c(uint8_t *block, const uint8_t *pixels, int li static void OPNAME ## _pixels_xy2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\ {\ int i;\ - const uint64_t a= LD64(pixels );\ - const uint64_t b= LD64(pixels+1);\ + const uint64_t a= AV_RN64(pixels );\ + const uint64_t b= AV_RN64(pixels+1);\ uint64_t l0= (a&0x0303030303030303ULL)\ + (b&0x0303030303030303ULL)\ + 0x0202020202020202ULL;\ @@ -676,8 +812,8 @@ static void OPNAME ## _pixels_xy2_c(uint8_t *block, const uint8_t *pixels, int l \ pixels+=line_size;\ for(i=0; i>2)\ @@ -685,8 +821,8 @@ static void OPNAME ## _pixels_xy2_c(uint8_t *block, const uint8_t *pixels, int l OP(*((uint64_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0F0F0F0F0FULL));\ pixels+=line_size;\ block +=line_size;\ - a= LD64(pixels );\ - b= LD64(pixels+1);\ + a= AV_RN64(pixels );\ + b= AV_RN64(pixels+1);\ l0= (a&0x0303030303030303ULL)\ + (b&0x0303030303030303ULL)\ + 0x0202020202020202ULL;\ @@ -701,8 +837,8 @@ static void OPNAME ## _pixels_xy2_c(uint8_t *block, const uint8_t *pixels, int l static void OPNAME ## _no_rnd_pixels_xy2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\ {\ int i;\ - const uint64_t a= LD64(pixels );\ - const uint64_t b= LD64(pixels+1);\ + const uint64_t a= AV_RN64(pixels );\ + const uint64_t b= AV_RN64(pixels+1);\ uint64_t l0= (a&0x0303030303030303ULL)\ + (b&0x0303030303030303ULL)\ + 0x0101010101010101ULL;\ @@ -712,8 +848,8 @@ static void OPNAME ## _no_rnd_pixels_xy2_c(uint8_t *block, const uint8_t *pixels \ pixels+=line_size;\ for(i=0; i>2)\ @@ -721,8 +857,8 @@ static void OPNAME ## _no_rnd_pixels_xy2_c(uint8_t *block, const uint8_t *pixels OP(*((uint64_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0F0F0F0F0FULL));\ pixels+=line_size;\ block +=line_size;\ - a= LD64(pixels );\ - b= LD64(pixels+1);\ + a= AV_RN64(pixels );\ + b= AV_RN64(pixels+1);\ l0= (a&0x0303030303030303ULL)\ + (b&0x0303030303030303ULL)\ + 0x0101010101010101ULL;\ @@ -749,7 +885,7 @@ CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_xy2_c, OPNAME ## _no_rnd_pixels_xy2_c, static void OPNAME ## _pixels2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h){\ int i;\ for(i=0; i>2)\ + ((d&0xFCFCFCFCUL)>>2);\ OP(*((uint32_t*)&dst[i*dst_stride]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\ - a= LD32(&src1[i*src_stride1+4]);\ - b= LD32(&src2[i*src_stride2+4]);\ - c= LD32(&src3[i*src_stride3+4]);\ - d= LD32(&src4[i*src_stride4+4]);\ + a= AV_RN32(&src1[i*src_stride1+4]);\ + b= AV_RN32(&src2[i*src_stride2+4]);\ + c= AV_RN32(&src3[i*src_stride3+4]);\ + d= AV_RN32(&src4[i*src_stride4+4]);\ l0= (a&0x03030303UL)\ + (b&0x03030303UL)\ + 0x02020202UL;\ @@ -910,10 +1046,10 @@ static inline void OPNAME ## _no_rnd_pixels8_l4(uint8_t *dst, const uint8_t *src int i;\ for(i=0; i>2)\ + ((d&0xFCFCFCFCUL)>>2);\ OP(*((uint32_t*)&dst[i*dst_stride]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\ - a= LD32(&src1[i*src_stride1+4]);\ - b= LD32(&src2[i*src_stride2+4]);\ - c= LD32(&src3[i*src_stride3+4]);\ - d= LD32(&src4[i*src_stride4+4]);\ + a= AV_RN32(&src1[i*src_stride1+4]);\ + b= AV_RN32(&src2[i*src_stride2+4]);\ + c= AV_RN32(&src3[i*src_stride3+4]);\ + d= AV_RN32(&src4[i*src_stride4+4]);\ l0= (a&0x03030303UL)\ + (b&0x03030303UL)\ + 0x01010101UL;\ @@ -987,8 +1123,8 @@ static inline void OPNAME ## _pixels2_xy2_c(uint8_t *block, const uint8_t *pixel static inline void OPNAME ## _pixels4_xy2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\ {\ int i;\ - const uint32_t a= LD32(pixels );\ - const uint32_t b= LD32(pixels+1);\ + const uint32_t a= AV_RN32(pixels );\ + const uint32_t b= AV_RN32(pixels+1);\ uint32_t l0= (a&0x03030303UL)\ + (b&0x03030303UL)\ + 0x02020202UL;\ @@ -998,8 +1134,8 @@ static inline void OPNAME ## _pixels4_xy2_c(uint8_t *block, const uint8_t *pixel \ pixels+=line_size;\ for(i=0; i>2)\ @@ -1007,8 +1143,8 @@ static inline void OPNAME ## _pixels4_xy2_c(uint8_t *block, const uint8_t *pixel OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\ pixels+=line_size;\ block +=line_size;\ - a= LD32(pixels );\ - b= LD32(pixels+1);\ + a= AV_RN32(pixels );\ + b= AV_RN32(pixels+1);\ l0= (a&0x03030303UL)\ + (b&0x03030303UL)\ + 0x02020202UL;\ @@ -1025,8 +1161,8 @@ static inline void OPNAME ## _pixels8_xy2_c(uint8_t *block, const uint8_t *pixel int j;\ for(j=0; j<2; j++){\ int i;\ - const uint32_t a= LD32(pixels );\ - const uint32_t b= LD32(pixels+1);\ + const uint32_t a= AV_RN32(pixels );\ + const uint32_t b= AV_RN32(pixels+1);\ uint32_t l0= (a&0x03030303UL)\ + (b&0x03030303UL)\ + 0x02020202UL;\ @@ -1036,8 +1172,8 @@ static inline void OPNAME ## _pixels8_xy2_c(uint8_t *block, const uint8_t *pixel \ pixels+=line_size;\ for(i=0; i>2)\ @@ -1045,8 +1181,8 @@ static inline void OPNAME ## _pixels8_xy2_c(uint8_t *block, const uint8_t *pixel OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\ pixels+=line_size;\ block +=line_size;\ - a= LD32(pixels );\ - b= LD32(pixels+1);\ + a= AV_RN32(pixels );\ + b= AV_RN32(pixels+1);\ l0= (a&0x03030303UL)\ + (b&0x03030303UL)\ + 0x02020202UL;\ @@ -1066,8 +1202,8 @@ static inline void OPNAME ## _no_rnd_pixels8_xy2_c(uint8_t *block, const uint8_t int j;\ for(j=0; j<2; j++){\ int i;\ - const uint32_t a= LD32(pixels );\ - const uint32_t b= LD32(pixels+1);\ + const uint32_t a= AV_RN32(pixels );\ + const uint32_t b= AV_RN32(pixels+1);\ uint32_t l0= (a&0x03030303UL)\ + (b&0x03030303UL)\ + 0x01010101UL;\ @@ -1077,8 +1213,8 @@ static inline void OPNAME ## _no_rnd_pixels8_xy2_c(uint8_t *block, const uint8_t \ pixels+=line_size;\ for(i=0; i>2)\ @@ -1086,8 +1222,8 @@ static inline void OPNAME ## _no_rnd_pixels8_xy2_c(uint8_t *block, const uint8_t OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\ pixels+=line_size;\ block +=line_size;\ - a= LD32(pixels );\ - b= LD32(pixels+1);\ + a= AV_RN32(pixels );\ + b= AV_RN32(pixels+1);\ l0= (a&0x03030303UL)\ + (b&0x03030303UL)\ + 0x01010101UL;\ @@ -1437,12 +1573,22 @@ static void OPNAME ## h264_chroma_mc2_c(uint8_t *dst/*align 8*/, uint8_t *src/*a \ assert(x<8 && y<8 && x>=0 && y>=0);\ \ - for(i=0; i=0 && y>=0);\ \ - for(i=0; i=0 && y>=0);\ \ - for(i=0; iblock_last_index[0/*FIXME*/]= s->fast_dct_quantize(s, temp, 0/*FIXME*/, s->qscale, &i); s->dct_unquantize_inter(s, temp, 0, s->qscale); - simple_idct(temp); //FIXME + ff_simple_idct(temp); //FIXME for(i=0; i<64; i++) sum+= (temp[i]-bak[i])*(temp[i]-bak[i]); @@ -3505,7 +3698,7 @@ static int rd8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int DECLARE_ALIGNED_8 (uint64_t, aligned_bak[stride]); DCTELEM * const temp= (DCTELEM*)aligned_temp; uint8_t * const bak= (uint8_t*)aligned_bak; - int i, last, run, bits, level, distoration, start_i; + int i, last, run, bits, level, distortion, start_i; const int esc_length= s->ac_esc_length; uint8_t * length; uint8_t * last_length; @@ -3572,9 +3765,9 @@ static int rd8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int s->dsp.idct_add(bak, stride, temp); - distoration= s->dsp.sse[1](NULL, bak, src1, stride, 8); + distortion= s->dsp.sse[1](NULL, bak, src1, stride, 8); - return distoration + ((bits*s->qscale*s->qscale*109 + 64)>>7); + return distortion + ((bits*s->qscale*s->qscale*109 + 64)>>7); } static int bit8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride, int h){ @@ -3707,16 +3900,16 @@ static int ssd_int8_vs_int16_c(const int8_t *pix1, const int16_t *pix2, return score; } -WARPER8_16_SQ(hadamard8_diff8x8_c, hadamard8_diff16_c) -WARPER8_16_SQ(hadamard8_intra8x8_c, hadamard8_intra16_c) -WARPER8_16_SQ(dct_sad8x8_c, dct_sad16_c) +WRAPPER8_16_SQ(hadamard8_diff8x8_c, hadamard8_diff16_c) +WRAPPER8_16_SQ(hadamard8_intra8x8_c, hadamard8_intra16_c) +WRAPPER8_16_SQ(dct_sad8x8_c, dct_sad16_c) #ifdef CONFIG_GPL -WARPER8_16_SQ(dct264_sad8x8_c, dct264_sad16_c) +WRAPPER8_16_SQ(dct264_sad8x8_c, dct264_sad16_c) #endif -WARPER8_16_SQ(dct_max8x8_c, dct_max16_c) -WARPER8_16_SQ(quant_psnr8x8_c, quant_psnr16_c) -WARPER8_16_SQ(rd8x8_c, rd16_c) -WARPER8_16_SQ(bit8x8_c, bit16_c) +WRAPPER8_16_SQ(dct_max8x8_c, dct_max16_c) +WRAPPER8_16_SQ(quant_psnr8x8_c, quant_psnr16_c) +WRAPPER8_16_SQ(rd8x8_c, rd16_c) +WRAPPER8_16_SQ(bit8x8_c, bit16_c) static void vector_fmul_c(float *dst, const float *src, int len){ int i; @@ -3737,22 +3930,158 @@ void ff_vector_fmul_add_add_c(float *dst, const float *src0, const float *src1, dst[i*step] = src0[i] * src1[i] + src2[i] + src3; } -void ff_float_to_int16_c(int16_t *dst, const float *src, int len){ +void ff_vector_fmul_window_c(float *dst, const float *src0, const float *src1, const float *win, float add_bias, int len){ + int i,j; + dst += len; + win += len; + src0+= len; + for(i=-len, j=len-1; i<0; i++, j--) { + float s0 = src0[i]; + float s1 = src1[j]; + float wi = win[i]; + float wj = win[j]; + dst[i] = s0*wj - s1*wi + add_bias; + dst[j] = s0*wi + s1*wj + add_bias; + } +} + +static av_always_inline int float_to_int16_one(const float *src){ + int_fast32_t tmp = *(const int32_t*)src; + if(tmp & 0xf0000){ + tmp = (0x43c0ffff - tmp)>>31; + // is this faster on some gcc/cpu combinations? +// if(tmp > 0x43c0ffff) tmp = 0xFFFF; +// else tmp = 0; + } + return tmp - 0x8000; +} + +void ff_float_to_int16_c(int16_t *dst, const float *src, long len){ int i; - for(i=0; i>31; - // is this faster on some gcc/cpu combinations? -// if(tmp > 0x43c0ffff) tmp = 0xFFFF; -// else tmp = 0; + for(i=0; i> shift; + + return res; +} + +#define W0 2048 +#define W1 2841 /* 2048*sqrt (2)*cos (1*pi/16) */ +#define W2 2676 /* 2048*sqrt (2)*cos (2*pi/16) */ +#define W3 2408 /* 2048*sqrt (2)*cos (3*pi/16) */ +#define W4 2048 /* 2048*sqrt (2)*cos (4*pi/16) */ +#define W5 1609 /* 2048*sqrt (2)*cos (5*pi/16) */ +#define W6 1108 /* 2048*sqrt (2)*cos (6*pi/16) */ +#define W7 565 /* 2048*sqrt (2)*cos (7*pi/16) */ + +static void wmv2_idct_row(short * b) +{ + int s1,s2; + int a0,a1,a2,a3,a4,a5,a6,a7; + /*step 1*/ + a1 = W1*b[1]+W7*b[7]; + a7 = W7*b[1]-W1*b[7]; + a5 = W5*b[5]+W3*b[3]; + a3 = W3*b[5]-W5*b[3]; + a2 = W2*b[2]+W6*b[6]; + a6 = W6*b[2]-W2*b[6]; + a0 = W0*b[0]+W0*b[4]; + a4 = W0*b[0]-W0*b[4]; + /*step 2*/ + s1 = (181*(a1-a5+a7-a3)+128)>>8;//1,3,5,7, + s2 = (181*(a1-a5-a7+a3)+128)>>8; + /*step 3*/ + b[0] = (a0+a2+a1+a5 + (1<<7))>>8; + b[1] = (a4+a6 +s1 + (1<<7))>>8; + b[2] = (a4-a6 +s2 + (1<<7))>>8; + b[3] = (a0-a2+a7+a3 + (1<<7))>>8; + b[4] = (a0-a2-a7-a3 + (1<<7))>>8; + b[5] = (a4-a6 -s2 + (1<<7))>>8; + b[6] = (a4+a6 -s1 + (1<<7))>>8; + b[7] = (a0+a2-a1-a5 + (1<<7))>>8; +} +static void wmv2_idct_col(short * b) +{ + int s1,s2; + int a0,a1,a2,a3,a4,a5,a6,a7; + /*step 1, with extended precision*/ + a1 = (W1*b[8*1]+W7*b[8*7] + 4)>>3; + a7 = (W7*b[8*1]-W1*b[8*7] + 4)>>3; + a5 = (W5*b[8*5]+W3*b[8*3] + 4)>>3; + a3 = (W3*b[8*5]-W5*b[8*3] + 4)>>3; + a2 = (W2*b[8*2]+W6*b[8*6] + 4)>>3; + a6 = (W6*b[8*2]-W2*b[8*6] + 4)>>3; + a0 = (W0*b[8*0]+W0*b[8*4] )>>3; + a4 = (W0*b[8*0]-W0*b[8*4] )>>3; + /*step 2*/ + s1 = (181*(a1-a5+a7-a3)+128)>>8; + s2 = (181*(a1-a5-a7+a3)+128)>>8; + /*step 3*/ + b[8*0] = (a0+a2+a1+a5 + (1<<13))>>14; + b[8*1] = (a4+a6 +s1 + (1<<13))>>14; + b[8*2] = (a4-a6 +s2 + (1<<13))>>14; + b[8*3] = (a0-a2+a7+a3 + (1<<13))>>14; + + b[8*4] = (a0-a2-a7-a3 + (1<<13))>>14; + b[8*5] = (a4-a6 -s2 + (1<<13))>>14; + b[8*6] = (a4+a6 -s1 + (1<<13))>>14; + b[8*7] = (a0+a2-a1-a5 + (1<<13))>>14; +} +void ff_wmv2_idct_c(short * block){ + int i; + + for(i=0;i<64;i+=8){ + wmv2_idct_row(block+i); + } + for(i=0;i<8;i++){ + wmv2_idct_col(block+i); + } +} /* XXX: those functions should be suppressed ASAP when all IDCTs are converted */ +static void ff_wmv2_idct_put_c(uint8_t *dest, int line_size, DCTELEM *block) +{ + ff_wmv2_idct_c(block); + put_pixels_clamped_c(block, dest, line_size); +} +static void ff_wmv2_idct_add_c(uint8_t *dest, int line_size, DCTELEM *block) +{ + ff_wmv2_idct_c(block); + add_pixels_clamped_c(block, dest, line_size); +} static void ff_jref_idct_put(uint8_t *dest, int line_size, DCTELEM *block) { j_rev_dct (block); @@ -3829,7 +4158,8 @@ int ff_check_alignment(void){ av_log(NULL, AV_LOG_ERROR, "Compiler did not align stack variables. Libavcodec has been miscompiled\n" "and may be very slow or crash. This is not a bug in libavcodec,\n" - "but in the compiler. Do not report crashes to FFmpeg developers.\n"); + "but in the compiler. You may try recompiling using gcc >= 4.2.\n" + "Do not report crashes to FFmpeg developers.\n"); #endif did_fail=1; } @@ -3891,10 +4221,20 @@ void dsputil_init(DSPContext* c, AVCodecContext *avctx) c->idct_add= ff_vp3_idct_add_c; c->idct = ff_vp3_idct_c; c->idct_permutation_type= FF_NO_IDCT_PERM; + }else if(avctx->idct_algo==FF_IDCT_WMV2){ + c->idct_put= ff_wmv2_idct_put_c; + c->idct_add= ff_wmv2_idct_add_c; + c->idct = ff_wmv2_idct_c; + c->idct_permutation_type= FF_NO_IDCT_PERM; + }else if(avctx->idct_algo==FF_IDCT_FAAN){ + c->idct_put= ff_faanidct_put; + c->idct_add= ff_faanidct_add; + c->idct = ff_faanidct; + c->idct_permutation_type= FF_NO_IDCT_PERM; }else{ //accurate/default - c->idct_put= simple_idct_put; - c->idct_add= simple_idct_add; - c->idct = simple_idct; + c->idct_put= ff_simple_idct_put; + c->idct_add= ff_simple_idct_add; + c->idct = ff_simple_idct; c->idct_permutation_type= FF_NO_IDCT_PERM; } } @@ -4042,14 +4382,19 @@ void dsputil_init(DSPContext* c, AVCodecContext *avctx) c->biweight_h264_pixels_tab[8]= biweight_h264_pixels2x4_c; c->biweight_h264_pixels_tab[9]= biweight_h264_pixels2x2_c; + c->draw_edges = draw_edges_c; + #ifdef CONFIG_CAVS_DECODER ff_cavsdsp_init(c,avctx); #endif #if defined(CONFIG_VC1_DECODER) || defined(CONFIG_WMV3_DECODER) ff_vc1dsp_init(c,avctx); #endif +#if defined(CONFIG_WMV2_DECODER) || defined(CONFIG_VC1_DECODER) || defined(CONFIG_WMV3_DECODER) + ff_intrax8dsp_init(c,avctx); +#endif #if defined(CONFIG_H264_ENCODER) - ff_h264dsp_init(c,avctx); + ff_h264dspenc_init(c,avctx); #endif c->put_mspel_pixels_tab[0]= put_mspel8_mc00_c; @@ -4096,9 +4441,13 @@ void dsputil_init(DSPContext* c, AVCodecContext *avctx) c->ssd_int8_vs_int16 = ssd_int8_vs_int16_c; c->add_bytes= add_bytes_c; + c->add_bytes_l2= add_bytes_l2_c; c->diff_bytes= diff_bytes_c; c->sub_hfyu_median_prediction= sub_hfyu_median_prediction_c; c->bswap_buf= bswap_buf; +#ifdef CONFIG_PNG_DECODER + c->add_png_paeth_prediction= ff_add_png_paeth_prediction; +#endif c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_c; c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_c; @@ -4109,8 +4458,8 @@ void dsputil_init(DSPContext* c, AVCodecContext *avctx) c->h264_loop_filter_strength= NULL; if (ENABLE_ANY_H263) { - c->h263_h_loop_filter= h263_h_loop_filter_c; - c->h263_v_loop_filter= h263_v_loop_filter_c; + c->h263_h_loop_filter= h263_h_loop_filter_c; + c->h263_v_loop_filter= h263_v_loop_filter_c; } c->h261_loop_filter= h261_loop_filter_c; @@ -4126,11 +4475,19 @@ void dsputil_init(DSPContext* c, AVCodecContext *avctx) #ifdef CONFIG_VORBIS_DECODER c->vorbis_inverse_coupling = vorbis_inverse_coupling; +#endif +#ifdef CONFIG_FLAC_ENCODER + c->flac_compute_autocorr = ff_flac_compute_autocorr; #endif c->vector_fmul = vector_fmul_c; c->vector_fmul_reverse = vector_fmul_reverse_c; c->vector_fmul_add_add = ff_vector_fmul_add_add_c; + c->vector_fmul_window = ff_vector_fmul_window_c; c->float_to_int16 = ff_float_to_int16_c; + c->float_to_int16_interleave = ff_float_to_int16_interleave_c; + c->add_int16 = add_int16_c; + c->sub_int16 = sub_int16_c; + c->scalarproduct_int16 = scalarproduct_int16_c; c->shrink[0]= ff_img_copy_plane; c->shrink[1]= ff_shrink22; @@ -4145,7 +4502,7 @@ void dsputil_init(DSPContext* c, AVCodecContext *avctx) if (ENABLE_MMX) dsputil_init_mmx (c, avctx); if (ENABLE_ARMV4L) dsputil_init_armv4l(c, avctx); if (ENABLE_MLIB) dsputil_init_mlib (c, avctx); - if (ENABLE_SPARC) dsputil_init_vis (c, avctx); + if (ENABLE_VIS) dsputil_init_vis (c, avctx); if (ENABLE_ALPHA) dsputil_init_alpha (c, avctx); if (ENABLE_POWERPC) dsputil_init_ppc (c, avctx); if (ENABLE_MMI) dsputil_init_mmi (c, avctx); @@ -4180,6 +4537,10 @@ void dsputil_init(DSPContext* c, AVCodecContext *avctx) for(i=0; i<64; i++) c->idct_permutation[i]= (i&0x24) | ((i&3)<<3) | ((i>>3)&3); break; + case FF_SSE2_IDCT_PERM: + for(i=0; i<64; i++) + c->idct_permutation[i]= (i&0x38) | idct_sse2_row_perm[i&7]; + break; default: av_log(avctx, AV_LOG_ERROR, "Internal error, IDCT permutation not set\n"); }