X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libavcodec%2Fdsputil.c;h=eb9e1ada155e094e60426c7470f9e192f9d3395e;hb=36d6b545a1a5309b3d9223b0db40ad2879817af5;hp=a2a313abf6fa3dd6df5dc1cdb605067fa03febe9;hpb=0752cd39d27d08cb9530a0450bfa8e54913bfa69;p=ffmpeg diff --git a/libavcodec/dsputil.c b/libavcodec/dsputil.c index a2a313abf6f..eb9e1ada155 100644 --- a/libavcodec/dsputil.c +++ b/libavcodec/dsputil.c @@ -23,7 +23,7 @@ */ /** - * @file libavcodec/dsputil.c + * @file * DSP utils */ @@ -33,27 +33,13 @@ #include "faandct.h" #include "faanidct.h" #include "mathops.h" -#include "snow.h" #include "mpegvideo.h" #include "config.h" - -/* snow.c */ -void ff_spatial_dwt(int *buffer, int width, int height, int stride, int type, int decomposition_count); - -/* vorbis.c */ -void vorbis_inverse_coupling(float *mag, float *ang, int blocksize); - -/* ac3dec.c */ -void ff_ac3_downmix_c(float (*samples)[256], float (*matrix)[2], int out_ch, int in_ch, int len); - -/* lpc.c */ -void ff_lpc_compute_autocorr(const int32_t *data, int len, int lag, double *autoc); - -/* pngdec.c */ -void ff_add_png_paeth_prediction(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp); - -/* eaidct.c */ -void ff_ea_idct_put_c(uint8_t *dest, int linesize, DCTELEM *block); +#include "lpc.h" +#include "ac3dec.h" +#include "vorbis.h" +#include "png.h" +#include "vp8dsp.h" uint8_t ff_cropTbl[256 + 2 * MAX_NEG_CROP] = {0, }; uint32_t ff_squareTbl[512] = {0, }; @@ -87,7 +73,7 @@ const uint8_t ff_zigzag248_direct[64] = { }; /* not permutated inverse zigzag_direct + 1 for MMX quantizer */ -DECLARE_ALIGNED_16(uint16_t, inv_zigzag_direct16[64]); +DECLARE_ALIGNED(16, uint16_t, inv_zigzag_direct16)[64]; const uint8_t ff_alternate_horizontal_scan[64] = { 0, 1, 2, 3, 8, 9, 16, 17, @@ -343,102 +329,6 @@ static int sse16_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) return s; } - -#if CONFIG_SNOW_ENCODER //dwt is in snow.c -static inline int w_c(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int w, int h, int type){ - int s, i, j; - const int dec_count= w==8 ? 3 : 4; - int tmp[32*32]; - int level, ori; - static const int scale[2][2][4][4]={ - { - { - // 9/7 8x8 dec=3 - {268, 239, 239, 213}, - { 0, 224, 224, 152}, - { 0, 135, 135, 110}, - },{ - // 9/7 16x16 or 32x32 dec=4 - {344, 310, 310, 280}, - { 0, 320, 320, 228}, - { 0, 175, 175, 136}, - { 0, 129, 129, 102}, - } - },{ - { - // 5/3 8x8 dec=3 - {275, 245, 245, 218}, - { 0, 230, 230, 156}, - { 0, 138, 138, 113}, - },{ - // 5/3 16x16 or 32x32 dec=4 - {352, 317, 317, 286}, - { 0, 328, 328, 233}, - { 0, 180, 180, 140}, - { 0, 132, 132, 105}, - } - } - }; - - for (i = 0; i < h; i++) { - for (j = 0; j < w; j+=4) { - tmp[32*i+j+0] = (pix1[j+0] - pix2[j+0])<<4; - tmp[32*i+j+1] = (pix1[j+1] - pix2[j+1])<<4; - tmp[32*i+j+2] = (pix1[j+2] - pix2[j+2])<<4; - tmp[32*i+j+3] = (pix1[j+3] - pix2[j+3])<<4; - } - pix1 += line_size; - pix2 += line_size; - } - - ff_spatial_dwt(tmp, w, h, 32, type, dec_count); - - s=0; - assert(w==h); - for(level=0; level>(dec_count-level); - int sx= (ori&1) ? size : 0; - int stride= 32<<(dec_count-level); - int sy= (ori&2) ? stride>>1 : 0; - - for(i=0; i=0); - return s>>9; -} - -static int w53_8_c(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h){ - return w_c(v, pix1, pix2, line_size, 8, h, 1); -} - -static int w97_8_c(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h){ - return w_c(v, pix1, pix2, line_size, 8, h, 0); -} - -static int w53_16_c(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h){ - return w_c(v, pix1, pix2, line_size, 16, h, 1); -} - -static int w97_16_c(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h){ - return w_c(v, pix1, pix2, line_size, 16, h, 0); -} - -int w53_32_c(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h){ - return w_c(v, pix1, pix2, line_size, 32, h, 1); -} - -int w97_32_c(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h){ - return w_c(v, pix1, pix2, line_size, 32, h, 0); -} -#endif - /* draw the edges of width 'w' of an image of size width, height */ //FIXME check that this is ok for mpeg4 interlaced static void draw_edges_c(uint8_t *buf, int wrap, int width, int height, int w) @@ -469,7 +359,7 @@ static void draw_edges_c(uint8_t *buf, int wrap, int width, int height, int w) } /** - * Copies a rectangular area of samples to a temporary buffer and replicates the boarder samples. + * Copy a rectangular area of samples to a temporary buffer and replicate the border samples. * @param buf destination buffer * @param src source buffer * @param linesize number of bytes between 2 vertically adjacent samples in both the source and destination buffers @@ -480,7 +370,7 @@ static void draw_edges_c(uint8_t *buf, int wrap, int width, int height, int w) * @param w width of the source buffer * @param h height of the source buffer */ -void ff_emulated_edge_mc(uint8_t *buf, uint8_t *src, int linesize, int block_w, int block_h, +void ff_emulated_edge_mc(uint8_t *buf, const uint8_t *src, int linesize, int block_w, int block_h, int src_x, int src_y, int w, int h){ int x, y; int start_y, start_x, end_y, end_x; @@ -656,6 +546,27 @@ static void put_signed_pixels_clamped_c(const DCTELEM *block, } } +static void put_pixels_nonclamped_c(const DCTELEM *block, uint8_t *restrict pixels, + int line_size) +{ + int i; + + /* read the pixels */ + for(i=0;i<8;i++) { + pixels[0] = block[0]; + pixels[1] = block[1]; + pixels[2] = block[2]; + pixels[3] = block[3]; + pixels[4] = block[4]; + pixels[5] = block[5]; + pixels[6] = block[6]; + pixels[7] = block[7]; + + pixels += line_size; + block += 8; + } +} + static void add_pixels_clamped_c(const DCTELEM *block, uint8_t *restrict pixels, int line_size) { @@ -747,6 +658,42 @@ static int sum_abs_dctelem_c(DCTELEM *block) return sum; } +static void fill_block16_c(uint8_t *block, uint8_t value, int line_size, int h) +{ + int i; + + for (i = 0; i < h; i++) { + memset(block, value, 16); + block += line_size; + } +} + +static void fill_block8_c(uint8_t *block, uint8_t value, int line_size, int h) +{ + int i; + + for (i = 0; i < h; i++) { + memset(block, value, 8); + block += line_size; + } +} + +static void scale_block_c(const uint8_t src[64]/*align 8*/, uint8_t *dst/*align 8*/, int linesize) +{ + int i, j; + uint16_t *dst1 = (uint16_t *) dst; + uint16_t *dst2 = (uint16_t *)(dst + linesize); + + for (j = 0; j < 8; j++) { + for (i = 0; i < 8; i++) { + dst1[i] = dst2[i] = src[i] * 0x0101; + } + src += 8; + dst1 += linesize; + dst2 += linesize; + } +} + #if 0 #define PIXOP2(OPNAME, OP) \ @@ -999,7 +946,7 @@ static inline void OPNAME ## _pixels8_y2_c(uint8_t *block, const uint8_t *pixels OPNAME ## _pixels8_l2(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\ }\ \ -static inline void OPNAME ## _pixels8_l4(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,\ +static inline void OPNAME ## _pixels8_l4(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\ int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\ int i;\ for(i=0; i>2)&0x0F0F0F0FUL));\ }\ }\ -static inline void OPNAME ## _pixels16_l4(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,\ +static inline void OPNAME ## _pixels16_l4(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\ int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\ OPNAME ## _pixels8_l4(dst , src1 , src2 , src3 , src4 , dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\ OPNAME ## _pixels8_l4(dst+8, src1+8, src2+8, src3+8, src4+8, dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\ }\ -static inline void OPNAME ## _no_rnd_pixels16_l4(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,\ +static inline void OPNAME ## _no_rnd_pixels16_l4(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\ int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\ OPNAME ## _no_rnd_pixels8_l4(dst , src1 , src2 , src3 , src4 , dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\ OPNAME ## _no_rnd_pixels8_l4(dst+8, src1+8, src2+8, src3+8, src4+8, dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\ @@ -2651,76 +2598,6 @@ H264_MC(avg_, 16) #undef op2_put #endif -#define op_scale1(x) block[x] = av_clip_uint8( (block[x]*weight + offset) >> log2_denom ) -#define op_scale2(x) dst[x] = av_clip_uint8( (src[x]*weights + dst[x]*weightd + offset) >> (log2_denom+1)) -#define H264_WEIGHT(W,H) \ -static void weight_h264_pixels ## W ## x ## H ## _c(uint8_t *block, int stride, int log2_denom, int weight, int offset){ \ - int y; \ - offset <<= log2_denom; \ - if(log2_denom) offset += 1<<(log2_denom-1); \ - for(y=0; y> 1 ) ) >> 1) - p1, -tc0[i], tc0[i] ); - tc++; - } - if( FFABS( q2 - q0 ) < beta ) { - pix[ xstride] = q1 + av_clip( (( q2 + ( ( p0 + q0 + 1 ) >> 1 ) ) >> 1) - q1, -tc0[i], tc0[i] ); - tc++; - } - - i_delta = av_clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc ); - pix[-xstride] = av_clip_uint8( p0 + i_delta ); /* p0' */ - pix[0] = av_clip_uint8( q0 - i_delta ); /* q0' */ - } - pix += ystride; - } - } -} -static void h264_v_loop_filter_luma_c(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) -{ - h264_loop_filter_luma_c(pix, stride, 1, alpha, beta, tc0); -} -static void h264_h_loop_filter_luma_c(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) -{ - h264_loop_filter_luma_c(pix, 1, stride, alpha, beta, tc0); -} - -static inline void h264_loop_filter_luma_intra_c(uint8_t *pix, int xstride, int ystride, int alpha, int beta) -{ - int d; - for( d = 0; d < 16; d++ ) { - const int p2 = pix[-3*xstride]; - const int p1 = pix[-2*xstride]; - const int p0 = pix[-1*xstride]; - - const int q0 = pix[ 0*xstride]; - const int q1 = pix[ 1*xstride]; - const int q2 = pix[ 2*xstride]; - - if( FFABS( p0 - q0 ) < alpha && - FFABS( p1 - p0 ) < beta && - FFABS( q1 - q0 ) < beta ) { - - if(FFABS( p0 - q0 ) < (( alpha >> 2 ) + 2 )){ - if( FFABS( p2 - p0 ) < beta) - { - const int p3 = pix[-4*xstride]; - /* p0', p1', p2' */ - pix[-1*xstride] = ( p2 + 2*p1 + 2*p0 + 2*q0 + q1 + 4 ) >> 3; - pix[-2*xstride] = ( p2 + p1 + p0 + q0 + 2 ) >> 2; - pix[-3*xstride] = ( 2*p3 + 3*p2 + p1 + p0 + q0 + 4 ) >> 3; - } else { - /* p0' */ - pix[-1*xstride] = ( 2*p1 + p0 + q1 + 2 ) >> 2; - } - if( FFABS( q2 - q0 ) < beta) - { - const int q3 = pix[3*xstride]; - /* q0', q1', q2' */ - pix[0*xstride] = ( p1 + 2*p0 + 2*q0 + 2*q1 + q2 + 4 ) >> 3; - pix[1*xstride] = ( p0 + q0 + q1 + q2 + 2 ) >> 2; - pix[2*xstride] = ( 2*q3 + 3*q2 + q1 + q0 + p0 + 4 ) >> 3; - } else { - /* q0' */ - pix[0*xstride] = ( 2*q1 + q0 + p1 + 2 ) >> 2; - } - }else{ - /* p0', q0' */ - pix[-1*xstride] = ( 2*p1 + p0 + q1 + 2 ) >> 2; - pix[ 0*xstride] = ( 2*q1 + q0 + p1 + 2 ) >> 2; - } - } - pix += ystride; - } -} -static void h264_v_loop_filter_luma_intra_c(uint8_t *pix, int stride, int alpha, int beta) -{ - h264_loop_filter_luma_intra_c(pix, stride, 1, alpha, beta); -} -static void h264_h_loop_filter_luma_intra_c(uint8_t *pix, int stride, int alpha, int beta) -{ - h264_loop_filter_luma_intra_c(pix, 1, stride, alpha, beta); -} - -static inline void h264_loop_filter_chroma_c(uint8_t *pix, int xstride, int ystride, int alpha, int beta, int8_t *tc0) -{ - int i, d; - for( i = 0; i < 4; i++ ) { - const int tc = tc0[i]; - if( tc <= 0 ) { - pix += 2*ystride; - continue; - } - for( d = 0; d < 2; d++ ) { - const int p0 = pix[-1*xstride]; - const int p1 = pix[-2*xstride]; - const int q0 = pix[0]; - const int q1 = pix[1*xstride]; - - if( FFABS( p0 - q0 ) < alpha && - FFABS( p1 - p0 ) < beta && - FFABS( q1 - q0 ) < beta ) { - - int delta = av_clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc ); - - pix[-xstride] = av_clip_uint8( p0 + delta ); /* p0' */ - pix[0] = av_clip_uint8( q0 - delta ); /* q0' */ - } - pix += ystride; - } - } -} -static void h264_v_loop_filter_chroma_c(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) -{ - h264_loop_filter_chroma_c(pix, stride, 1, alpha, beta, tc0); -} -static void h264_h_loop_filter_chroma_c(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) -{ - h264_loop_filter_chroma_c(pix, 1, stride, alpha, beta, tc0); -} - -static inline void h264_loop_filter_chroma_intra_c(uint8_t *pix, int xstride, int ystride, int alpha, int beta) -{ - int d; - for( d = 0; d < 8; d++ ) { - const int p0 = pix[-1*xstride]; - const int p1 = pix[-2*xstride]; - const int q0 = pix[0]; - const int q1 = pix[1*xstride]; - - if( FFABS( p0 - q0 ) < alpha && - FFABS( p1 - p0 ) < beta && - FFABS( q1 - q0 ) < beta ) { - - pix[-xstride] = ( 2*p1 + p0 + q1 + 2 ) >> 2; /* p0' */ - pix[0] = ( 2*q1 + q0 + p1 + 2 ) >> 2; /* q0' */ - } - pix += ystride; - } -} -static void h264_v_loop_filter_chroma_intra_c(uint8_t *pix, int stride, int alpha, int beta) -{ - h264_loop_filter_chroma_intra_c(pix, stride, 1, alpha, beta); -} -static void h264_h_loop_filter_chroma_intra_c(uint8_t *pix, int stride, int alpha, int beta) -{ - h264_loop_filter_chroma_intra_c(pix, 1, stride, alpha, beta); -} - static inline int pix_abs16_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) { int s, i; @@ -3500,7 +3189,7 @@ void ff_set_cmp(DSPContext* c, me_cmp_func *cmp, int type){ case FF_CMP_NSSE: cmp[i]= c->nsse[i]; break; -#if CONFIG_SNOW_ENCODER +#if CONFIG_DWT case FF_CMP_W53: cmp[i]= c->w53[i]; break; @@ -3632,35 +3321,42 @@ static int add_hfyu_left_prediction_c(uint8_t *dst, const uint8_t *src, int w, i #define B 3 #define G 2 #define R 1 +#define A 0 #else #define B 0 #define G 1 #define R 2 +#define A 3 #endif -static void add_hfyu_left_prediction_bgr32_c(uint8_t *dst, const uint8_t *src, int w, int *red, int *green, int *blue){ +static void add_hfyu_left_prediction_bgr32_c(uint8_t *dst, const uint8_t *src, int w, int *red, int *green, int *blue, int *alpha){ int i; - int r,g,b; + int r,g,b,a; r= *red; g= *green; b= *blue; + a= *alpha; for(i=0; iintra_scantable.permutated; - DECLARE_ALIGNED_16(uint64_t, aligned_temp[sizeof(DCTELEM)*64/8]); - DECLARE_ALIGNED_16(uint64_t, aligned_src1[8]); - DECLARE_ALIGNED_16(uint64_t, aligned_src2[8]); - DCTELEM * const temp= (DCTELEM*)aligned_temp; - uint8_t * const lsrc1 = (uint8_t*)aligned_src1; - uint8_t * const lsrc2 = (uint8_t*)aligned_src2; + LOCAL_ALIGNED_16(DCTELEM, temp, [64]); + LOCAL_ALIGNED_16(uint8_t, lsrc1, [64]); + LOCAL_ALIGNED_16(uint8_t, lsrc2, [64]); int i, last, run, bits, level, distortion, start_i; const int esc_length= s->ac_esc_length; uint8_t * length; @@ -3965,8 +3655,7 @@ static int rd8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int static int bit8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride, int h){ MpegEncContext * const s= (MpegEncContext *)c; const uint8_t *scantable= s->intra_scantable.permutated; - DECLARE_ALIGNED_16(uint64_t, aligned_temp[sizeof(DCTELEM)*64/8]); - DCTELEM * const temp= (DCTELEM*)aligned_temp; + LOCAL_ALIGNED_16(DCTELEM, temp, [64]); int i, last, run, bits, level, start_i; const int esc_length= s->ac_esc_length; uint8_t * length; @@ -4299,7 +3988,7 @@ void ff_float_to_int16_interleave_c(int16_t *dst, const float **src, long len, i } } -static int32_t scalarproduct_int16_c(int16_t * v1, int16_t * v2, int order, int shift) +static int32_t scalarproduct_int16_c(const int16_t * v1, const int16_t * v2, int order, int shift) { int res = 0; @@ -4309,7 +3998,7 @@ static int32_t scalarproduct_int16_c(int16_t * v1, int16_t * v2, int order, int return res; } -static int32_t scalarproduct_and_madd_int16_c(int16_t *v1, int16_t *v2, int16_t *v3, int order, int mul) +static int32_t scalarproduct_and_madd_int16_c(int16_t *v1, const int16_t *v2, const int16_t *v3, int order, int mul) { int res = 0; while (order--) { @@ -4471,7 +4160,7 @@ av_cold void dsputil_static_init(void) int ff_check_alignment(void){ static int did_fail=0; - DECLARE_ALIGNED_16(int, aligned); + DECLARE_ALIGNED(16, int, aligned); if((intptr_t)&aligned & 15){ if(!did_fail){ @@ -4555,6 +4244,11 @@ av_cold void dsputil_init(DSPContext* c, AVCodecContext *avctx) }else if(CONFIG_EATGQ_DECODER && avctx->idct_algo==FF_IDCT_EA) { c->idct_put= ff_ea_idct_put_c; c->idct_permutation_type= FF_NO_IDCT_PERM; + }else if(CONFIG_BINK_DECODER && avctx->idct_algo==FF_IDCT_BINK) { + c->idct = ff_bink_idct_c; + c->idct_add = ff_bink_idct_add_c; + c->idct_put = ff_bink_idct_put_c; + c->idct_permutation_type = FF_NO_IDCT_PERM; }else{ //accurate/default c->idct_put= ff_simple_idct_put; c->idct_add= ff_simple_idct_add; @@ -4563,21 +4257,11 @@ av_cold void dsputil_init(DSPContext* c, AVCodecContext *avctx) } } - if (CONFIG_H264_DECODER) { - c->h264_idct_add= ff_h264_idct_add_c; - c->h264_idct8_add= ff_h264_idct8_add_c; - c->h264_idct_dc_add= ff_h264_idct_dc_add_c; - c->h264_idct8_dc_add= ff_h264_idct8_dc_add_c; - c->h264_idct_add16 = ff_h264_idct_add16_c; - c->h264_idct8_add4 = ff_h264_idct8_add4_c; - c->h264_idct_add8 = ff_h264_idct_add8_c; - c->h264_idct_add16intra= ff_h264_idct_add16intra_c; - } - c->get_pixels = get_pixels_c; c->diff_pixels = diff_pixels_c; c->put_pixels_clamped = put_pixels_clamped_c; c->put_signed_pixels_clamped = put_signed_pixels_clamped_c; + c->put_pixels_nonclamped = put_pixels_nonclamped_c; c->add_pixels_clamped = add_pixels_clamped_c; c->add_pixels8 = add_pixels8_c; c->add_pixels4 = add_pixels4_c; @@ -4589,6 +4273,10 @@ av_cold void dsputil_init(DSPContext* c, AVCodecContext *avctx) c->pix_sum = pix_sum_c; c->pix_norm1 = pix_norm1_c; + c->fill_block_tab[0] = fill_block16_c; + c->fill_block_tab[1] = fill_block8_c; + c->scale_block = scale_block_c; + /* TODO [0] 16 [1] 8 */ c->pix_abs[0][0] = pix_abs16_c; c->pix_abs[0][1] = pix_abs16_x2_c; @@ -4691,27 +4379,6 @@ av_cold void dsputil_init(DSPContext* c, AVCodecContext *avctx) c->put_no_rnd_vc1_chroma_pixels_tab[0]= put_no_rnd_vc1_chroma_mc8_c; c->avg_no_rnd_vc1_chroma_pixels_tab[0]= avg_no_rnd_vc1_chroma_mc8_c; - c->weight_h264_pixels_tab[0]= weight_h264_pixels16x16_c; - c->weight_h264_pixels_tab[1]= weight_h264_pixels16x8_c; - c->weight_h264_pixels_tab[2]= weight_h264_pixels8x16_c; - c->weight_h264_pixels_tab[3]= weight_h264_pixels8x8_c; - c->weight_h264_pixels_tab[4]= weight_h264_pixels8x4_c; - c->weight_h264_pixels_tab[5]= weight_h264_pixels4x8_c; - c->weight_h264_pixels_tab[6]= weight_h264_pixels4x4_c; - c->weight_h264_pixels_tab[7]= weight_h264_pixels4x2_c; - c->weight_h264_pixels_tab[8]= weight_h264_pixels2x4_c; - c->weight_h264_pixels_tab[9]= weight_h264_pixels2x2_c; - c->biweight_h264_pixels_tab[0]= biweight_h264_pixels16x16_c; - c->biweight_h264_pixels_tab[1]= biweight_h264_pixels16x8_c; - c->biweight_h264_pixels_tab[2]= biweight_h264_pixels8x16_c; - c->biweight_h264_pixels_tab[3]= biweight_h264_pixels8x8_c; - c->biweight_h264_pixels_tab[4]= biweight_h264_pixels8x4_c; - c->biweight_h264_pixels_tab[5]= biweight_h264_pixels4x8_c; - c->biweight_h264_pixels_tab[6]= biweight_h264_pixels4x4_c; - c->biweight_h264_pixels_tab[7]= biweight_h264_pixels4x2_c; - c->biweight_h264_pixels_tab[8]= biweight_h264_pixels2x4_c; - c->biweight_h264_pixels_tab[9]= biweight_h264_pixels2x2_c; - c->draw_edges = draw_edges_c; #if CONFIG_CAVS_DECODER @@ -4775,11 +4442,8 @@ av_cold void dsputil_init(DSPContext* c, AVCodecContext *avctx) c->vsse[5]= vsse_intra8_c; c->nsse[0]= nsse16_c; c->nsse[1]= nsse8_c; -#if CONFIG_SNOW_ENCODER - c->w53[0]= w53_16_c; - c->w53[1]= w53_8_c; - c->w97[0]= w97_16_c; - c->w97[1]= w97_8_c; +#if CONFIG_DWT + ff_dsputil_init_dwt(c); #endif c->ssd_int8_vs_int16 = ssd_int8_vs_int16_c; @@ -4796,16 +4460,6 @@ av_cold void dsputil_init(DSPContext* c, AVCodecContext *avctx) c->add_png_paeth_prediction= ff_add_png_paeth_prediction; #endif - c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_c; - c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_c; - c->h264_v_loop_filter_luma_intra= h264_v_loop_filter_luma_intra_c; - c->h264_h_loop_filter_luma_intra= h264_h_loop_filter_luma_intra_c; - c->h264_v_loop_filter_chroma= h264_v_loop_filter_chroma_c; - c->h264_h_loop_filter_chroma= h264_h_loop_filter_chroma_c; - c->h264_v_loop_filter_chroma_intra= h264_v_loop_filter_chroma_intra_c; - c->h264_h_loop_filter_chroma_intra= h264_h_loop_filter_chroma_intra_c; - c->h264_loop_filter_strength= NULL; - if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) { c->h263_h_loop_filter= h263_h_loop_filter_c; c->h263_v_loop_filter= h263_v_loop_filter_c; @@ -4814,6 +4468,7 @@ av_cold void dsputil_init(DSPContext* c, AVCodecContext *avctx) if (CONFIG_VP3_DECODER) { c->vp3_h_loop_filter= ff_vp3_h_loop_filter_c; c->vp3_v_loop_filter= ff_vp3_v_loop_filter_c; + c->vp3_idct_dc_add= ff_vp3_idct_dc_add_c; } if (CONFIG_VP6_DECODER) { c->vp6_filter_diag4= ff_vp6_filter_diag4_c; @@ -4824,12 +4479,6 @@ av_cold void dsputil_init(DSPContext* c, AVCodecContext *avctx) c->try_8x8basis= try_8x8basis_c; c->add_8x8basis= add_8x8basis_c; -#if CONFIG_SNOW_DECODER - c->vertical_compose97i = ff_snow_vertical_compose97i; - c->horizontal_compose97i = ff_snow_horizontal_compose97i; - c->inner_add_yblock = ff_snow_inner_add_yblock; -#endif - #if CONFIG_VORBIS_DECODER c->vorbis_inverse_coupling = vorbis_inverse_coupling; #endif @@ -4886,6 +4535,16 @@ av_cold void dsputil_init(DSPContext* c, AVCodecContext *avctx) c->avg_2tap_qpel_pixels_tab[0][i]= c->avg_h264_qpel_pixels_tab[0][i]; } + c->put_rv30_tpel_pixels_tab[0][0] = c->put_h264_qpel_pixels_tab[0][0]; + c->put_rv30_tpel_pixels_tab[1][0] = c->put_h264_qpel_pixels_tab[1][0]; + c->avg_rv30_tpel_pixels_tab[0][0] = c->avg_h264_qpel_pixels_tab[0][0]; + c->avg_rv30_tpel_pixels_tab[1][0] = c->avg_h264_qpel_pixels_tab[1][0]; + + c->put_rv40_qpel_pixels_tab[0][0] = c->put_h264_qpel_pixels_tab[0][0]; + c->put_rv40_qpel_pixels_tab[1][0] = c->put_h264_qpel_pixels_tab[1][0]; + c->avg_rv40_qpel_pixels_tab[0][0] = c->avg_h264_qpel_pixels_tab[0][0]; + c->avg_rv40_qpel_pixels_tab[1][0] = c->avg_h264_qpel_pixels_tab[1][0]; + switch(c->idct_permutation_type){ case FF_NO_IDCT_PERM: for(i=0; i<64; i++)