X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=common%2Fdct.c;h=2816ce0dee18342952ffcb58781d0553da740d85;hb=d7ccd89f1bea53c8c524f8e6eb963d57defb6813;hp=3a2d9161ec5e46b9dab59b02ba95fe4d28bb7d61;hpb=205a032c22467c90c26d33ed9ab23d60461e57c1;p=x264 diff --git a/common/dct.c b/common/dct.c index 3a2d9161..2816ce0d 100644 --- a/common/dct.c +++ b/common/dct.c @@ -1,10 +1,11 @@ /***************************************************************************** - * dct.c: h264 encoder library + * dct.c: transform and zigzag ***************************************************************************** - * Copyright (C) 2003-2008 x264 project + * Copyright (C) 2003-2015 x264 project * * Authors: Loren Merritt * Laurent Aimar + * Henrik Gramner * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -19,142 +20,234 @@ * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA. + * + * This program is also available under a commercial proprietary license. + * For more information, contact us at licensing@x264.com. *****************************************************************************/ #include "common.h" -#ifdef HAVE_MMX +#if HAVE_MMX # include "x86/dct.h" #endif -#ifdef ARCH_PPC +#if ARCH_PPC # include "ppc/dct.h" #endif +#if ARCH_ARM +# include "arm/dct.h" +#endif +#if ARCH_AARCH64 +# include "aarch64/dct.h" +#endif -int x264_dct4_weight2_zigzag[2][16]; -int x264_dct8_weight2_zigzag[2][64]; - -/* - * XXX For all dct dc : input could be equal to output so ... - */ - -static void dct4x4dc( int16_t d[4][4] ) +/* the inverse of the scaling factors introduced by 8x8 fdct */ +/* uint32 is for the asm implementation of trellis. the actual values fit in uint16. */ +#define W(i) (i==0 ? FIX8(1.0000) :\ + i==1 ? FIX8(0.8859) :\ + i==2 ? FIX8(1.6000) :\ + i==3 ? FIX8(0.9415) :\ + i==4 ? FIX8(1.2651) :\ + i==5 ? FIX8(1.1910) :0) +const uint32_t x264_dct8_weight_tab[64] = { + W(0), W(3), W(4), W(3), W(0), W(3), W(4), W(3), + W(3), W(1), W(5), W(1), W(3), W(1), W(5), W(1), + W(4), W(5), W(2), W(5), W(4), W(5), W(2), W(5), + W(3), W(1), W(5), W(1), W(3), W(1), W(5), W(1), + + W(0), W(3), W(4), W(3), W(0), W(3), W(4), W(3), + W(3), W(1), W(5), W(1), W(3), W(1), W(5), W(1), + W(4), W(5), W(2), W(5), W(4), W(5), W(2), W(5), + W(3), W(1), W(5), W(1), W(3), W(1), W(5), W(1) +}; +#undef W + +#define W(i) (i==0 ? FIX8(1.76777) :\ + i==1 ? FIX8(1.11803) :\ + i==2 ? FIX8(0.70711) :0) +const uint32_t x264_dct4_weight_tab[16] = { + W(0), W(1), W(0), W(1), + W(1), W(2), W(1), W(2), + W(0), W(1), W(0), W(1), + W(1), W(2), W(1), W(2) +}; +#undef W + +/* inverse squared */ +#define W(i) (i==0 ? FIX8(3.125) :\ + i==1 ? FIX8(1.25) :\ + i==2 ? FIX8(0.5) :0) +const uint32_t x264_dct4_weight2_tab[16] = { + W(0), W(1), W(0), W(1), + W(1), W(2), W(1), W(2), + W(0), W(1), W(0), W(1), + W(1), W(2), W(1), W(2) +}; +#undef W + +#define W(i) (i==0 ? FIX8(1.00000) :\ + i==1 ? FIX8(0.78487) :\ + i==2 ? FIX8(2.56132) :\ + i==3 ? FIX8(0.88637) :\ + i==4 ? FIX8(1.60040) :\ + i==5 ? FIX8(1.41850) :0) +const uint32_t x264_dct8_weight2_tab[64] = { + W(0), W(3), W(4), W(3), W(0), W(3), W(4), W(3), + W(3), W(1), W(5), W(1), W(3), W(1), W(5), W(1), + W(4), W(5), W(2), W(5), W(4), W(5), W(2), W(5), + W(3), W(1), W(5), W(1), W(3), W(1), W(5), W(1), + + W(0), W(3), W(4), W(3), W(0), W(3), W(4), W(3), + W(3), W(1), W(5), W(1), W(3), W(1), W(5), W(1), + W(4), W(5), W(2), W(5), W(4), W(5), W(2), W(5), + W(3), W(1), W(5), W(1), W(3), W(1), W(5), W(1) +}; +#undef W + + +static void dct4x4dc( dctcoef d[16] ) { - int16_t tmp[4][4]; - int s01, s23; - int d01, d23; - int i; + dctcoef tmp[16]; - for( i = 0; i < 4; i++ ) + for( int i = 0; i < 4; i++ ) { - s01 = d[i][0] + d[i][1]; - d01 = d[i][0] - d[i][1]; - s23 = d[i][2] + d[i][3]; - d23 = d[i][2] - d[i][3]; - - tmp[0][i] = s01 + s23; - tmp[1][i] = s01 - s23; - tmp[2][i] = d01 - d23; - tmp[3][i] = d01 + d23; + int s01 = d[i*4+0] + d[i*4+1]; + int d01 = d[i*4+0] - d[i*4+1]; + int s23 = d[i*4+2] + d[i*4+3]; + int d23 = d[i*4+2] - d[i*4+3]; + + tmp[0*4+i] = s01 + s23; + tmp[1*4+i] = s01 - s23; + tmp[2*4+i] = d01 - d23; + tmp[3*4+i] = d01 + d23; } - for( i = 0; i < 4; i++ ) + for( int i = 0; i < 4; i++ ) { - s01 = tmp[i][0] + tmp[i][1]; - d01 = tmp[i][0] - tmp[i][1]; - s23 = tmp[i][2] + tmp[i][3]; - d23 = tmp[i][2] - tmp[i][3]; - - d[i][0] = ( s01 + s23 + 1 ) >> 1; - d[i][1] = ( s01 - s23 + 1 ) >> 1; - d[i][2] = ( d01 - d23 + 1 ) >> 1; - d[i][3] = ( d01 + d23 + 1 ) >> 1; + int s01 = tmp[i*4+0] + tmp[i*4+1]; + int d01 = tmp[i*4+0] - tmp[i*4+1]; + int s23 = tmp[i*4+2] + tmp[i*4+3]; + int d23 = tmp[i*4+2] - tmp[i*4+3]; + + d[i*4+0] = ( s01 + s23 + 1 ) >> 1; + d[i*4+1] = ( s01 - s23 + 1 ) >> 1; + d[i*4+2] = ( d01 - d23 + 1 ) >> 1; + d[i*4+3] = ( d01 + d23 + 1 ) >> 1; } } -static void idct4x4dc( int16_t d[4][4] ) +static void idct4x4dc( dctcoef d[16] ) { - int16_t tmp[4][4]; - int s01, s23; - int d01, d23; - int i; + dctcoef tmp[16]; - for( i = 0; i < 4; i++ ) + for( int i = 0; i < 4; i++ ) { - s01 = d[i][0] + d[i][1]; - d01 = d[i][0] - d[i][1]; - s23 = d[i][2] + d[i][3]; - d23 = d[i][2] - d[i][3]; - - tmp[0][i] = s01 + s23; - tmp[1][i] = s01 - s23; - tmp[2][i] = d01 - d23; - tmp[3][i] = d01 + d23; + int s01 = d[i*4+0] + d[i*4+1]; + int d01 = d[i*4+0] - d[i*4+1]; + int s23 = d[i*4+2] + d[i*4+3]; + int d23 = d[i*4+2] - d[i*4+3]; + + tmp[0*4+i] = s01 + s23; + tmp[1*4+i] = s01 - s23; + tmp[2*4+i] = d01 - d23; + tmp[3*4+i] = d01 + d23; } - for( i = 0; i < 4; i++ ) + for( int i = 0; i < 4; i++ ) { - s01 = tmp[i][0] + tmp[i][1]; - d01 = tmp[i][0] - tmp[i][1]; - s23 = tmp[i][2] + tmp[i][3]; - d23 = tmp[i][2] - tmp[i][3]; - - d[i][0] = s01 + s23; - d[i][1] = s01 - s23; - d[i][2] = d01 - d23; - d[i][3] = d01 + d23; + int s01 = tmp[i*4+0] + tmp[i*4+1]; + int d01 = tmp[i*4+0] - tmp[i*4+1]; + int s23 = tmp[i*4+2] + tmp[i*4+3]; + int d23 = tmp[i*4+2] - tmp[i*4+3]; + + d[i*4+0] = s01 + s23; + d[i*4+1] = s01 - s23; + d[i*4+2] = d01 - d23; + d[i*4+3] = d01 + d23; } } -static inline void pixel_sub_wxh( int16_t *diff, int i_size, - uint8_t *pix1, int i_pix1, uint8_t *pix2, int i_pix2 ) +static void dct2x4dc( dctcoef dct[8], dctcoef dct4x4[8][16] ) +{ + int a0 = dct4x4[0][0] + dct4x4[1][0]; + int a1 = dct4x4[2][0] + dct4x4[3][0]; + int a2 = dct4x4[4][0] + dct4x4[5][0]; + int a3 = dct4x4[6][0] + dct4x4[7][0]; + int a4 = dct4x4[0][0] - dct4x4[1][0]; + int a5 = dct4x4[2][0] - dct4x4[3][0]; + int a6 = dct4x4[4][0] - dct4x4[5][0]; + int a7 = dct4x4[6][0] - dct4x4[7][0]; + int b0 = a0 + a1; + int b1 = a2 + a3; + int b2 = a4 + a5; + int b3 = a6 + a7; + int b4 = a0 - a1; + int b5 = a2 - a3; + int b6 = a4 - a5; + int b7 = a6 - a7; + dct[0] = b0 + b1; + dct[1] = b2 + b3; + dct[2] = b0 - b1; + dct[3] = b2 - b3; + dct[4] = b4 - b5; + dct[5] = b6 - b7; + dct[6] = b4 + b5; + dct[7] = b6 + b7; + dct4x4[0][0] = 0; + dct4x4[1][0] = 0; + dct4x4[2][0] = 0; + dct4x4[3][0] = 0; + dct4x4[4][0] = 0; + dct4x4[5][0] = 0; + dct4x4[6][0] = 0; + dct4x4[7][0] = 0; +} + +static inline void pixel_sub_wxh( dctcoef *diff, int i_size, + pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 ) { - int y, x; - for( y = 0; y < i_size; y++ ) + for( int y = 0; y < i_size; y++ ) { - for( x = 0; x < i_size; x++ ) - { + for( int x = 0; x < i_size; x++ ) diff[x + y*i_size] = pix1[x] - pix2[x]; - } pix1 += i_pix1; pix2 += i_pix2; } } -static void sub4x4_dct( int16_t dct[4][4], uint8_t *pix1, uint8_t *pix2 ) +static void sub4x4_dct( dctcoef dct[16], pixel *pix1, pixel *pix2 ) { - int16_t d[4][4]; - int16_t tmp[4][4]; - int i; + dctcoef d[16]; + dctcoef tmp[16]; - pixel_sub_wxh( (int16_t*)d, 4, pix1, FENC_STRIDE, pix2, FDEC_STRIDE ); + pixel_sub_wxh( d, 4, pix1, FENC_STRIDE, pix2, FDEC_STRIDE ); - for( i = 0; i < 4; i++ ) + for( int i = 0; i < 4; i++ ) { - const int s03 = d[i][0] + d[i][3]; - const int s12 = d[i][1] + d[i][2]; - const int d03 = d[i][0] - d[i][3]; - const int d12 = d[i][1] - d[i][2]; - - tmp[0][i] = s03 + s12; - tmp[1][i] = 2*d03 + d12; - tmp[2][i] = s03 - s12; - tmp[3][i] = d03 - 2*d12; + int s03 = d[i*4+0] + d[i*4+3]; + int s12 = d[i*4+1] + d[i*4+2]; + int d03 = d[i*4+0] - d[i*4+3]; + int d12 = d[i*4+1] - d[i*4+2]; + + tmp[0*4+i] = s03 + s12; + tmp[1*4+i] = 2*d03 + d12; + tmp[2*4+i] = s03 - s12; + tmp[3*4+i] = d03 - 2*d12; } - for( i = 0; i < 4; i++ ) + for( int i = 0; i < 4; i++ ) { - const int s03 = tmp[i][0] + tmp[i][3]; - const int s12 = tmp[i][1] + tmp[i][2]; - const int d03 = tmp[i][0] - tmp[i][3]; - const int d12 = tmp[i][1] - tmp[i][2]; - - dct[i][0] = s03 + s12; - dct[i][1] = 2*d03 + d12; - dct[i][2] = s03 - s12; - dct[i][3] = d03 - 2*d12; + int s03 = tmp[i*4+0] + tmp[i*4+3]; + int s12 = tmp[i*4+1] + tmp[i*4+2]; + int d03 = tmp[i*4+0] - tmp[i*4+3]; + int d12 = tmp[i*4+1] - tmp[i*4+2]; + + dct[i*4+0] = s03 + s12; + dct[i*4+1] = 2*d03 + d12; + dct[i*4+2] = s03 - s12; + dct[i*4+3] = d03 - 2*d12; } } -static void sub8x8_dct( int16_t dct[4][4][4], uint8_t *pix1, uint8_t *pix2 ) +static void sub8x8_dct( dctcoef dct[4][16], pixel *pix1, pixel *pix2 ) { sub4x4_dct( dct[0], &pix1[0], &pix2[0] ); sub4x4_dct( dct[1], &pix1[4], &pix2[4] ); @@ -162,7 +255,7 @@ static void sub8x8_dct( int16_t dct[4][4][4], uint8_t *pix1, uint8_t *pix2 ) sub4x4_dct( dct[3], &pix1[4*FENC_STRIDE+4], &pix2[4*FDEC_STRIDE+4] ); } -static void sub16x16_dct( int16_t dct[16][4][4], uint8_t *pix1, uint8_t *pix2 ) +static void sub16x16_dct( dctcoef dct[16][16], pixel *pix1, pixel *pix2 ) { sub8x8_dct( &dct[ 0], &pix1[0], &pix2[0] ); sub8x8_dct( &dct[ 4], &pix1[8], &pix2[8] ); @@ -170,74 +263,112 @@ static void sub16x16_dct( int16_t dct[16][4][4], uint8_t *pix1, uint8_t *pix2 ) sub8x8_dct( &dct[12], &pix1[8*FENC_STRIDE+8], &pix2[8*FDEC_STRIDE+8] ); } -static int sub4x4_dct_dc( uint8_t *pix1, uint8_t *pix2 ) +static int sub4x4_dct_dc( pixel *pix1, pixel *pix2 ) { - int16_t d[4][4]; int sum = 0; - - pixel_sub_wxh( (int16_t*)d, 4, pix1, FENC_STRIDE, pix2, FDEC_STRIDE ); - - sum += d[0][0] + d[0][1] + d[0][2] + d[0][3]; - sum += d[1][0] + d[1][1] + d[1][2] + d[1][3]; - sum += d[2][0] + d[2][1] + d[2][2] + d[2][3]; - sum += d[3][0] + d[3][1] + d[3][2] + d[3][3]; - + for( int i=0; i<4; i++, pix1 += FENC_STRIDE, pix2 += FDEC_STRIDE ) + sum += pix1[0] + pix1[1] + pix1[2] + pix1[3] + - pix2[0] - pix2[1] - pix2[2] - pix2[3]; return sum; } -static void sub8x8_dct_dc( int16_t dct[2][2], uint8_t *pix1, uint8_t *pix2 ) +static void sub8x8_dct_dc( dctcoef dct[4], pixel *pix1, pixel *pix2 ) { - dct[0][0] = sub4x4_dct_dc( &pix1[0], &pix2[0] ); - dct[0][1] = sub4x4_dct_dc( &pix1[4], &pix2[4] ); - dct[1][0] = sub4x4_dct_dc( &pix1[4*FENC_STRIDE+0], &pix2[4*FDEC_STRIDE+0] ); - dct[1][1] = sub4x4_dct_dc( &pix1[4*FENC_STRIDE+4], &pix2[4*FDEC_STRIDE+4] ); + dct[0] = sub4x4_dct_dc( &pix1[0], &pix2[0] ); + dct[1] = sub4x4_dct_dc( &pix1[4], &pix2[4] ); + dct[2] = sub4x4_dct_dc( &pix1[4*FENC_STRIDE+0], &pix2[4*FDEC_STRIDE+0] ); + dct[3] = sub4x4_dct_dc( &pix1[4*FENC_STRIDE+4], &pix2[4*FDEC_STRIDE+4] ); + + /* 2x2 DC transform */ + int d0 = dct[0] + dct[1]; + int d1 = dct[2] + dct[3]; + int d2 = dct[0] - dct[1]; + int d3 = dct[2] - dct[3]; + dct[0] = d0 + d1; + dct[1] = d0 - d1; + dct[2] = d2 + d3; + dct[3] = d2 - d3; } -static void add4x4_idct( uint8_t *p_dst, int16_t dct[4][4] ) +static void sub8x16_dct_dc( dctcoef dct[8], pixel *pix1, pixel *pix2 ) { - int16_t d[4][4]; - int16_t tmp[4][4]; - int x, y; - int i; + int a0 = sub4x4_dct_dc( &pix1[ 0*FENC_STRIDE+0], &pix2[ 0*FDEC_STRIDE+0] ); + int a1 = sub4x4_dct_dc( &pix1[ 0*FENC_STRIDE+4], &pix2[ 0*FDEC_STRIDE+4] ); + int a2 = sub4x4_dct_dc( &pix1[ 4*FENC_STRIDE+0], &pix2[ 4*FDEC_STRIDE+0] ); + int a3 = sub4x4_dct_dc( &pix1[ 4*FENC_STRIDE+4], &pix2[ 4*FDEC_STRIDE+4] ); + int a4 = sub4x4_dct_dc( &pix1[ 8*FENC_STRIDE+0], &pix2[ 8*FDEC_STRIDE+0] ); + int a5 = sub4x4_dct_dc( &pix1[ 8*FENC_STRIDE+4], &pix2[ 8*FDEC_STRIDE+4] ); + int a6 = sub4x4_dct_dc( &pix1[12*FENC_STRIDE+0], &pix2[12*FDEC_STRIDE+0] ); + int a7 = sub4x4_dct_dc( &pix1[12*FENC_STRIDE+4], &pix2[12*FDEC_STRIDE+4] ); + + /* 2x4 DC transform */ + int b0 = a0 + a1; + int b1 = a2 + a3; + int b2 = a4 + a5; + int b3 = a6 + a7; + int b4 = a0 - a1; + int b5 = a2 - a3; + int b6 = a4 - a5; + int b7 = a6 - a7; + a0 = b0 + b1; + a1 = b2 + b3; + a2 = b4 + b5; + a3 = b6 + b7; + a4 = b0 - b1; + a5 = b2 - b3; + a6 = b4 - b5; + a7 = b6 - b7; + dct[0] = a0 + a1; + dct[1] = a2 + a3; + dct[2] = a0 - a1; + dct[3] = a2 - a3; + dct[4] = a4 - a5; + dct[5] = a6 - a7; + dct[6] = a4 + a5; + dct[7] = a6 + a7; +} - for( i = 0; i < 4; i++ ) - { - const int s02 = dct[0][i] + dct[2][i]; - const int d02 = dct[0][i] - dct[2][i]; - const int s13 = dct[1][i] + (dct[3][i]>>1); - const int d13 = (dct[1][i]>>1) - dct[3][i]; +static void add4x4_idct( pixel *p_dst, dctcoef dct[16] ) +{ + dctcoef d[16]; + dctcoef tmp[16]; - tmp[i][0] = s02 + s13; - tmp[i][1] = d02 + d13; - tmp[i][2] = d02 - d13; - tmp[i][3] = s02 - s13; + for( int i = 0; i < 4; i++ ) + { + int s02 = dct[0*4+i] + dct[2*4+i]; + int d02 = dct[0*4+i] - dct[2*4+i]; + int s13 = dct[1*4+i] + (dct[3*4+i]>>1); + int d13 = (dct[1*4+i]>>1) - dct[3*4+i]; + + tmp[i*4+0] = s02 + s13; + tmp[i*4+1] = d02 + d13; + tmp[i*4+2] = d02 - d13; + tmp[i*4+3] = s02 - s13; } - for( i = 0; i < 4; i++ ) + for( int i = 0; i < 4; i++ ) { - const int s02 = tmp[0][i] + tmp[2][i]; - const int d02 = tmp[0][i] - tmp[2][i]; - const int s13 = tmp[1][i] + (tmp[3][i]>>1); - const int d13 = (tmp[1][i]>>1) - tmp[3][i]; - - d[0][i] = ( s02 + s13 + 32 ) >> 6; - d[1][i] = ( d02 + d13 + 32 ) >> 6; - d[2][i] = ( d02 - d13 + 32 ) >> 6; - d[3][i] = ( s02 - s13 + 32 ) >> 6; + int s02 = tmp[0*4+i] + tmp[2*4+i]; + int d02 = tmp[0*4+i] - tmp[2*4+i]; + int s13 = tmp[1*4+i] + (tmp[3*4+i]>>1); + int d13 = (tmp[1*4+i]>>1) - tmp[3*4+i]; + + d[0*4+i] = ( s02 + s13 + 32 ) >> 6; + d[1*4+i] = ( d02 + d13 + 32 ) >> 6; + d[2*4+i] = ( d02 - d13 + 32 ) >> 6; + d[3*4+i] = ( s02 - s13 + 32 ) >> 6; } - for( y = 0; y < 4; y++ ) + for( int y = 0; y < 4; y++ ) { - for( x = 0; x < 4; x++ ) - { - p_dst[x] = x264_clip_uint8( p_dst[x] + d[y][x] ); - } + for( int x = 0; x < 4; x++ ) + p_dst[x] = x264_clip_pixel( p_dst[x] + d[y*4+x] ); p_dst += FDEC_STRIDE; } } -static void add8x8_idct( uint8_t *p_dst, int16_t dct[4][4][4] ) +static void add8x8_idct( pixel *p_dst, dctcoef dct[4][16] ) { add4x4_idct( &p_dst[0], dct[0] ); add4x4_idct( &p_dst[4], dct[1] ); @@ -245,7 +376,7 @@ static void add8x8_idct( uint8_t *p_dst, int16_t dct[4][4][4] ) add4x4_idct( &p_dst[4*FDEC_STRIDE+4], dct[3] ); } -static void add16x16_idct( uint8_t *p_dst, int16_t dct[16][4][4] ) +static void add16x16_idct( pixel *p_dst, dctcoef dct[16][16] ) { add8x8_idct( &p_dst[0], &dct[0] ); add8x8_idct( &p_dst[8], &dct[4] ); @@ -258,22 +389,22 @@ static void add16x16_idct( uint8_t *p_dst, int16_t dct[16][4][4] ) ****************************************************************************/ #define DCT8_1D {\ - const int s07 = SRC(0) + SRC(7);\ - const int s16 = SRC(1) + SRC(6);\ - const int s25 = SRC(2) + SRC(5);\ - const int s34 = SRC(3) + SRC(4);\ - const int a0 = s07 + s34;\ - const int a1 = s16 + s25;\ - const int a2 = s07 - s34;\ - const int a3 = s16 - s25;\ - const int d07 = SRC(0) - SRC(7);\ - const int d16 = SRC(1) - SRC(6);\ - const int d25 = SRC(2) - SRC(5);\ - const int d34 = SRC(3) - SRC(4);\ - const int a4 = d16 + d25 + (d07 + (d07>>1));\ - const int a5 = d07 - d34 - (d25 + (d25>>1));\ - const int a6 = d07 + d34 - (d16 + (d16>>1));\ - const int a7 = d16 - d25 + (d34 + (d34>>1));\ + int s07 = SRC(0) + SRC(7);\ + int s16 = SRC(1) + SRC(6);\ + int s25 = SRC(2) + SRC(5);\ + int s34 = SRC(3) + SRC(4);\ + int a0 = s07 + s34;\ + int a1 = s16 + s25;\ + int a2 = s07 - s34;\ + int a3 = s16 - s25;\ + int d07 = SRC(0) - SRC(7);\ + int d16 = SRC(1) - SRC(6);\ + int d25 = SRC(2) - SRC(5);\ + int d34 = SRC(3) - SRC(4);\ + int a4 = d16 + d25 + (d07 + (d07>>1));\ + int a5 = d07 - d34 - (d25 + (d25>>1));\ + int a6 = d07 + d34 - (d16 + (d16>>1));\ + int a7 = d16 - d25 + (d34 + (d34>>1));\ DST(0) = a0 + a1 ;\ DST(1) = a4 + (a7>>2);\ DST(2) = a2 + (a3>>1);\ @@ -284,29 +415,28 @@ static void add16x16_idct( uint8_t *p_dst, int16_t dct[16][4][4] ) DST(7) = (a4>>2) - a7 ;\ } -static void sub8x8_dct8( int16_t dct[8][8], uint8_t *pix1, uint8_t *pix2 ) +static void sub8x8_dct8( dctcoef dct[64], pixel *pix1, pixel *pix2 ) { - int i; - int16_t tmp[8][8]; + dctcoef tmp[64]; - pixel_sub_wxh( (int16_t*)tmp, 8, pix1, FENC_STRIDE, pix2, FDEC_STRIDE ); + pixel_sub_wxh( tmp, 8, pix1, FENC_STRIDE, pix2, FDEC_STRIDE ); -#define SRC(x) tmp[x][i] -#define DST(x) tmp[x][i] - for( i = 0; i < 8; i++ ) +#define SRC(x) tmp[x*8+i] +#define DST(x) tmp[x*8+i] + for( int i = 0; i < 8; i++ ) DCT8_1D #undef SRC #undef DST -#define SRC(x) tmp[i][x] -#define DST(x) dct[x][i] - for( i = 0; i < 8; i++ ) +#define SRC(x) tmp[i*8+x] +#define DST(x) dct[x*8+i] + for( int i = 0; i < 8; i++ ) DCT8_1D #undef SRC #undef DST } -static void sub16x16_dct8( int16_t dct[4][8][8], uint8_t *pix1, uint8_t *pix2 ) +static void sub16x16_dct8( dctcoef dct[4][64], pixel *pix1, pixel *pix2 ) { sub8x8_dct8( dct[0], &pix1[0], &pix2[0] ); sub8x8_dct8( dct[1], &pix1[8], &pix2[8] ); @@ -315,22 +445,22 @@ static void sub16x16_dct8( int16_t dct[4][8][8], uint8_t *pix1, uint8_t *pix2 ) } #define IDCT8_1D {\ - const int a0 = SRC(0) + SRC(4);\ - const int a2 = SRC(0) - SRC(4);\ - const int a4 = (SRC(2)>>1) - SRC(6);\ - const int a6 = (SRC(6)>>1) + SRC(2);\ - const int b0 = a0 + a6;\ - const int b2 = a2 + a4;\ - const int b4 = a2 - a4;\ - const int b6 = a0 - a6;\ - const int a1 = -SRC(3) + SRC(5) - SRC(7) - (SRC(7)>>1);\ - const int a3 = SRC(1) + SRC(7) - SRC(3) - (SRC(3)>>1);\ - const int a5 = -SRC(1) + SRC(7) + SRC(5) + (SRC(5)>>1);\ - const int a7 = SRC(3) + SRC(5) + SRC(1) + (SRC(1)>>1);\ - const int b1 = (a7>>2) + a1;\ - const int b3 = a3 + (a5>>2);\ - const int b5 = (a3>>2) - a5;\ - const int b7 = a7 - (a1>>2);\ + int a0 = SRC(0) + SRC(4);\ + int a2 = SRC(0) - SRC(4);\ + int a4 = (SRC(2)>>1) - SRC(6);\ + int a6 = (SRC(6)>>1) + SRC(2);\ + int b0 = a0 + a6;\ + int b2 = a2 + a4;\ + int b4 = a2 - a4;\ + int b6 = a0 - a6;\ + int a1 = -SRC(3) + SRC(5) - SRC(7) - (SRC(7)>>1);\ + int a3 = SRC(1) + SRC(7) - SRC(3) - (SRC(3)>>1);\ + int a5 = -SRC(1) + SRC(7) + SRC(5) + (SRC(5)>>1);\ + int a7 = SRC(3) + SRC(5) + SRC(1) + (SRC(1)>>1);\ + int b1 = (a7>>2) + a1;\ + int b3 = a3 + (a5>>2);\ + int b5 = (a3>>2) - a5;\ + int b7 = a7 - (a1>>2);\ DST(0, b0 + b7);\ DST(1, b2 + b5);\ DST(2, b4 + b3);\ @@ -341,28 +471,26 @@ static void sub16x16_dct8( int16_t dct[4][8][8], uint8_t *pix1, uint8_t *pix2 ) DST(7, b0 - b7);\ } -static void add8x8_idct8( uint8_t *dst, int16_t dct[8][8] ) +static void add8x8_idct8( pixel *dst, dctcoef dct[64] ) { - int i; - - dct[0][0] += 32; // rounding for the >>6 at the end + dct[0] += 32; // rounding for the >>6 at the end -#define SRC(x) dct[x][i] -#define DST(x,rhs) dct[x][i] = (rhs) - for( i = 0; i < 8; i++ ) +#define SRC(x) dct[x*8+i] +#define DST(x,rhs) dct[x*8+i] = (rhs) + for( int i = 0; i < 8; i++ ) IDCT8_1D #undef SRC #undef DST -#define SRC(x) dct[i][x] -#define DST(x,rhs) dst[i + x*FDEC_STRIDE] = x264_clip_uint8( dst[i + x*FDEC_STRIDE] + ((rhs) >> 6) ); - for( i = 0; i < 8; i++ ) +#define SRC(x) dct[i*8+x] +#define DST(x,rhs) dst[i + x*FDEC_STRIDE] = x264_clip_pixel( dst[i + x*FDEC_STRIDE] + ((rhs) >> 6) ); + for( int i = 0; i < 8; i++ ) IDCT8_1D #undef SRC #undef DST } -static void add16x16_idct8( uint8_t *dst, int16_t dct[4][8][8] ) +static void add16x16_idct8( pixel *dst, dctcoef dct[4][64] ) { add8x8_idct8( &dst[0], dct[0] ); add8x8_idct8( &dst[8], dct[1] ); @@ -370,36 +498,34 @@ static void add16x16_idct8( uint8_t *dst, int16_t dct[4][8][8] ) add8x8_idct8( &dst[8*FDEC_STRIDE+8], dct[3] ); } -static void inline add4x4_idct_dc( uint8_t *p_dst, int16_t dc ) +static void inline add4x4_idct_dc( pixel *p_dst, dctcoef dc ) { - int i; dc = (dc + 32) >> 6; - for( i = 0; i < 4; i++, p_dst += FDEC_STRIDE ) + for( int i = 0; i < 4; i++, p_dst += FDEC_STRIDE ) { - p_dst[0] = x264_clip_uint8( p_dst[0] + dc ); - p_dst[1] = x264_clip_uint8( p_dst[1] + dc ); - p_dst[2] = x264_clip_uint8( p_dst[2] + dc ); - p_dst[3] = x264_clip_uint8( p_dst[3] + dc ); + p_dst[0] = x264_clip_pixel( p_dst[0] + dc ); + p_dst[1] = x264_clip_pixel( p_dst[1] + dc ); + p_dst[2] = x264_clip_pixel( p_dst[2] + dc ); + p_dst[3] = x264_clip_pixel( p_dst[3] + dc ); } } -static void add8x8_idct_dc( uint8_t *p_dst, int16_t dct[2][2] ) +static void add8x8_idct_dc( pixel *p_dst, dctcoef dct[4] ) { - add4x4_idct_dc( &p_dst[0], dct[0][0] ); - add4x4_idct_dc( &p_dst[4], dct[0][1] ); - add4x4_idct_dc( &p_dst[4*FDEC_STRIDE+0], dct[1][0] ); - add4x4_idct_dc( &p_dst[4*FDEC_STRIDE+4], dct[1][1] ); + add4x4_idct_dc( &p_dst[0], dct[0] ); + add4x4_idct_dc( &p_dst[4], dct[1] ); + add4x4_idct_dc( &p_dst[4*FDEC_STRIDE+0], dct[2] ); + add4x4_idct_dc( &p_dst[4*FDEC_STRIDE+4], dct[3] ); } -static void add16x16_idct_dc( uint8_t *p_dst, int16_t dct[4][4] ) +static void add16x16_idct_dc( pixel *p_dst, dctcoef dct[16] ) { - int i; - for( i = 0; i < 4; i++, p_dst += 4*FDEC_STRIDE ) + for( int i = 0; i < 4; i++, dct += 4, p_dst += 4*FDEC_STRIDE ) { - add4x4_idct_dc( &p_dst[ 0], dct[i][0] ); - add4x4_idct_dc( &p_dst[ 4], dct[i][1] ); - add4x4_idct_dc( &p_dst[ 8], dct[i][2] ); - add4x4_idct_dc( &p_dst[12], dct[i][3] ); + add4x4_idct_dc( &p_dst[ 0], dct[0] ); + add4x4_idct_dc( &p_dst[ 4], dct[1] ); + add4x4_idct_dc( &p_dst[ 8], dct[2] ); + add4x4_idct_dc( &p_dst[12], dct[3] ); } } @@ -417,6 +543,8 @@ void x264_dct_init( int cpu, x264_dct_function_t *dctf ) dctf->add8x8_idct = add8x8_idct; dctf->add8x8_idct_dc = add8x8_idct_dc; + dctf->sub8x16_dct_dc = sub8x16_dct_dc; + dctf->sub16x16_dct = sub16x16_dct; dctf->add16x16_idct = add16x16_idct; dctf->add16x16_idct_dc = add16x16_idct_dc; @@ -430,18 +558,63 @@ void x264_dct_init( int cpu, x264_dct_function_t *dctf ) dctf->dct4x4dc = dct4x4dc; dctf->idct4x4dc = idct4x4dc; -#ifdef HAVE_MMX + dctf->dct2x4dc = dct2x4dc; + +#if HIGH_BIT_DEPTH +#if HAVE_MMX + if( cpu&X264_CPU_MMX ) + { + dctf->sub4x4_dct = x264_sub4x4_dct_mmx; + dctf->sub8x8_dct = x264_sub8x8_dct_mmx; + dctf->sub16x16_dct = x264_sub16x16_dct_mmx; + } + if( cpu&X264_CPU_SSE2 ) + { + dctf->add4x4_idct = x264_add4x4_idct_sse2; + dctf->dct4x4dc = x264_dct4x4dc_sse2; + dctf->idct4x4dc = x264_idct4x4dc_sse2; + dctf->sub8x8_dct8 = x264_sub8x8_dct8_sse2; + dctf->sub16x16_dct8 = x264_sub16x16_dct8_sse2; + dctf->add8x8_idct = x264_add8x8_idct_sse2; + dctf->add16x16_idct = x264_add16x16_idct_sse2; + dctf->add8x8_idct8 = x264_add8x8_idct8_sse2; + dctf->add16x16_idct8 = x264_add16x16_idct8_sse2; + dctf->sub8x8_dct_dc = x264_sub8x8_dct_dc_sse2; + dctf->add8x8_idct_dc = x264_add8x8_idct_dc_sse2; + dctf->sub8x16_dct_dc = x264_sub8x16_dct_dc_sse2; + dctf->add16x16_idct_dc= x264_add16x16_idct_dc_sse2; + } + if( cpu&X264_CPU_SSE4 ) + { + dctf->sub8x8_dct8 = x264_sub8x8_dct8_sse4; + dctf->sub16x16_dct8 = x264_sub16x16_dct8_sse4; + } + if( cpu&X264_CPU_AVX ) + { + dctf->add4x4_idct = x264_add4x4_idct_avx; + dctf->dct4x4dc = x264_dct4x4dc_avx; + dctf->idct4x4dc = x264_idct4x4dc_avx; + dctf->sub8x8_dct8 = x264_sub8x8_dct8_avx; + dctf->sub16x16_dct8 = x264_sub16x16_dct8_avx; + dctf->add8x8_idct = x264_add8x8_idct_avx; + dctf->add16x16_idct = x264_add16x16_idct_avx; + dctf->add8x8_idct8 = x264_add8x8_idct8_avx; + dctf->add16x16_idct8 = x264_add16x16_idct8_avx; + dctf->add8x8_idct_dc = x264_add8x8_idct_dc_avx; + dctf->sub8x16_dct_dc = x264_sub8x16_dct_dc_avx; + dctf->add16x16_idct_dc= x264_add16x16_idct_dc_avx; + } +#endif // HAVE_MMX +#else // !HIGH_BIT_DEPTH +#if HAVE_MMX if( cpu&X264_CPU_MMX ) { dctf->sub4x4_dct = x264_sub4x4_dct_mmx; dctf->add4x4_idct = x264_add4x4_idct_mmx; - dctf->add8x8_idct_dc = x264_add8x8_idct_dc_mmx; - dctf->add16x16_idct_dc = x264_add16x16_idct_dc_mmx; - dctf->dct4x4dc = x264_dct4x4dc_mmx; dctf->idct4x4dc = x264_idct4x4dc_mmx; - dctf->sub8x8_dct_dc = x264_sub8x8_dct_dc_mmxext; + dctf->sub8x8_dct_dc = x264_sub8x8_dct_dc_mmx2; -#ifndef ARCH_X86_64 +#if !ARCH_X86_64 dctf->sub8x8_dct = x264_sub8x8_dct_mmx; dctf->sub16x16_dct = x264_sub16x16_dct_mmx; dctf->add8x8_idct = x264_add8x8_idct_mmx; @@ -454,34 +627,87 @@ void x264_dct_init( int cpu, x264_dct_function_t *dctf ) #endif } + if( cpu&X264_CPU_MMX2 ) + { + dctf->dct4x4dc = x264_dct4x4dc_mmx2; + dctf->add8x8_idct_dc = x264_add8x8_idct_dc_mmx2; + dctf->add16x16_idct_dc = x264_add16x16_idct_dc_mmx2; + } + if( cpu&X264_CPU_SSE2 ) { dctf->sub8x8_dct8 = x264_sub8x8_dct8_sse2; dctf->sub16x16_dct8 = x264_sub16x16_dct8_sse2; dctf->sub8x8_dct_dc = x264_sub8x8_dct_dc_sse2; + dctf->sub8x16_dct_dc= x264_sub8x16_dct_dc_sse2; dctf->add8x8_idct8 = x264_add8x8_idct8_sse2; dctf->add16x16_idct8= x264_add16x16_idct8_sse2; - dctf->sub8x8_dct = x264_sub8x8_dct_sse2; - dctf->sub16x16_dct = x264_sub16x16_dct_sse2; - dctf->add8x8_idct = x264_add8x8_idct_sse2; - dctf->add16x16_idct = x264_add16x16_idct_sse2; - dctf->add16x16_idct_dc = x264_add16x16_idct_dc_sse2; + if( !(cpu&X264_CPU_SSE2_IS_SLOW) ) + { + dctf->sub8x8_dct = x264_sub8x8_dct_sse2; + dctf->sub16x16_dct = x264_sub16x16_dct_sse2; + dctf->add8x8_idct = x264_add8x8_idct_sse2; + dctf->add16x16_idct = x264_add16x16_idct_sse2; + dctf->add16x16_idct_dc = x264_add16x16_idct_dc_sse2; + } } - if( cpu&X264_CPU_SSSE3 ) + if( (cpu&X264_CPU_SSSE3) && !(cpu&X264_CPU_SSE2_IS_SLOW) ) { - dctf->sub4x4_dct = x264_sub4x4_dct_ssse3; - dctf->sub8x8_dct = x264_sub8x8_dct_ssse3; - dctf->sub16x16_dct = x264_sub16x16_dct_ssse3; - dctf->sub8x8_dct8 = x264_sub8x8_dct8_ssse3; - dctf->sub16x16_dct8 = x264_sub16x16_dct8_ssse3; - dctf->add8x8_idct_dc = x264_add8x8_idct_dc_ssse3; - dctf->add16x16_idct_dc = x264_add16x16_idct_dc_ssse3; + dctf->sub8x16_dct_dc = x264_sub8x16_dct_dc_ssse3; + if( !(cpu&X264_CPU_SLOW_ATOM) ) + { + dctf->sub4x4_dct = x264_sub4x4_dct_ssse3; + dctf->sub8x8_dct = x264_sub8x8_dct_ssse3; + dctf->sub16x16_dct = x264_sub16x16_dct_ssse3; + dctf->sub8x8_dct8 = x264_sub8x8_dct8_ssse3; + dctf->sub16x16_dct8 = x264_sub16x16_dct8_ssse3; + if( !(cpu&X264_CPU_SLOW_PSHUFB) ) + { + dctf->add8x8_idct_dc = x264_add8x8_idct_dc_ssse3; + dctf->add16x16_idct_dc = x264_add16x16_idct_dc_ssse3; + } + } + } + + if( cpu&X264_CPU_SSE4 ) + dctf->add4x4_idct = x264_add4x4_idct_sse4; + + if( cpu&X264_CPU_AVX ) + { + dctf->add4x4_idct = x264_add4x4_idct_avx; + dctf->add8x8_idct = x264_add8x8_idct_avx; + dctf->add16x16_idct = x264_add16x16_idct_avx; + dctf->add8x8_idct8 = x264_add8x8_idct8_avx; + dctf->add16x16_idct8 = x264_add16x16_idct8_avx; + dctf->add16x16_idct_dc = x264_add16x16_idct_dc_avx; + dctf->sub8x8_dct = x264_sub8x8_dct_avx; + dctf->sub16x16_dct = x264_sub16x16_dct_avx; + dctf->sub8x8_dct8 = x264_sub8x8_dct8_avx; + dctf->sub16x16_dct8 = x264_sub16x16_dct8_avx; + } + + if( cpu&X264_CPU_XOP ) + { + dctf->sub8x8_dct = x264_sub8x8_dct_xop; + dctf->sub16x16_dct = x264_sub16x16_dct_xop; + } + + if( cpu&X264_CPU_AVX2 ) + { + dctf->add8x8_idct = x264_add8x8_idct_avx2; + dctf->add16x16_idct = x264_add16x16_idct_avx2; + dctf->sub8x8_dct = x264_sub8x8_dct_avx2; + dctf->sub16x16_dct = x264_sub16x16_dct_avx2; + dctf->add16x16_idct_dc = x264_add16x16_idct_dc_avx2; +#if ARCH_X86_64 + dctf->sub16x16_dct8 = x264_sub16x16_dct8_avx2; +#endif } #endif //HAVE_MMX -#ifdef ARCH_PPC +#if HAVE_ALTIVEC if( cpu&X264_CPU_ALTIVEC ) { dctf->sub4x4_dct = x264_sub4x4_dct_altivec; @@ -499,23 +725,38 @@ void x264_dct_init( int cpu, x264_dct_function_t *dctf ) dctf->add16x16_idct8= x264_add16x16_idct8_altivec; } #endif -} -void x264_dct_init_weights( void ) -{ - int i, j; - for( j=0; j<2; j++ ) +#if HAVE_ARMV6 || ARCH_AARCH64 + if( cpu&X264_CPU_NEON ) { - for( i=0; i<16; i++ ) - x264_dct4_weight2_zigzag[j][i] = x264_dct4_weight2_tab[ x264_zigzag_scan4[j][i] ]; - for( i=0; i<64; i++ ) - x264_dct8_weight2_zigzag[j][i] = x264_dct8_weight2_tab[ x264_zigzag_scan8[j][i] ]; + dctf->sub4x4_dct = x264_sub4x4_dct_neon; + dctf->sub8x8_dct = x264_sub8x8_dct_neon; + dctf->sub16x16_dct = x264_sub16x16_dct_neon; + dctf->add8x8_idct_dc = x264_add8x8_idct_dc_neon; + dctf->add16x16_idct_dc = x264_add16x16_idct_dc_neon; + dctf->sub8x8_dct_dc = x264_sub8x8_dct_dc_neon; + dctf->dct4x4dc = x264_dct4x4dc_neon; + dctf->idct4x4dc = x264_idct4x4dc_neon; + + dctf->add4x4_idct = x264_add4x4_idct_neon; + dctf->add8x8_idct = x264_add8x8_idct_neon; + dctf->add16x16_idct = x264_add16x16_idct_neon; + + dctf->sub8x8_dct8 = x264_sub8x8_dct8_neon; + dctf->sub16x16_dct8 = x264_sub16x16_dct8_neon; + + dctf->add8x8_idct8 = x264_add8x8_idct8_neon; + dctf->add16x16_idct8= x264_add16x16_idct8_neon; +#if ARCH_AARCH64 + dctf->sub8x16_dct_dc= x264_sub8x16_dct_dc_neon; +#endif } +#endif +#endif // HIGH_BIT_DEPTH } -// gcc pessimizes multi-dimensional arrays here, even with constant indices -#define ZIG(i,y,x) level[i] = dct[0][x*8+y]; +#define ZIG(i,y,x) level[i] = dct[x*8+y]; #define ZIGZAG8_FRAME\ ZIG( 0,0,0) ZIG( 1,0,1) ZIG( 2,1,0) ZIG( 3,2,0)\ ZIG( 4,1,1) ZIG( 5,0,2) ZIG( 6,0,3) ZIG( 7,1,2)\ @@ -553,42 +794,41 @@ void x264_dct_init_weights( void ) ZIG(60,4,7) ZIG(61,5,7) ZIG(62,6,7) ZIG(63,7,7) #define ZIGZAG4_FRAME\ - ZIG( 0,0,0) ZIG( 1,0,1) ZIG( 2,1,0) ZIG( 3,2,0)\ + ZIGDC( 0,0,0) ZIG( 1,0,1) ZIG( 2,1,0) ZIG( 3,2,0)\ ZIG( 4,1,1) ZIG( 5,0,2) ZIG( 6,0,3) ZIG( 7,1,2)\ ZIG( 8,2,1) ZIG( 9,3,0) ZIG(10,3,1) ZIG(11,2,2)\ ZIG(12,1,3) ZIG(13,2,3) ZIG(14,3,2) ZIG(15,3,3) #define ZIGZAG4_FIELD\ - ZIG( 0,0,0) ZIG( 1,1,0) ZIG( 2,0,1) ZIG( 3,2,0)\ + ZIGDC( 0,0,0) ZIG( 1,1,0) ZIG( 2,0,1) ZIG( 3,2,0)\ ZIG( 4,3,0) ZIG( 5,1,1) ZIG( 6,2,1) ZIG( 7,3,1)\ ZIG( 8,0,2) ZIG( 9,1,2) ZIG(10,2,2) ZIG(11,3,2)\ ZIG(12,0,3) ZIG(13,1,3) ZIG(14,2,3) ZIG(15,3,3) -static void zigzag_scan_8x8_frame( int16_t level[64], int16_t dct[8][8] ) +static void zigzag_scan_8x8_frame( dctcoef level[64], dctcoef dct[64] ) { ZIGZAG8_FRAME } -static void zigzag_scan_8x8_field( int16_t level[64], int16_t dct[8][8] ) +static void zigzag_scan_8x8_field( dctcoef level[64], dctcoef dct[64] ) { ZIGZAG8_FIELD } #undef ZIG -#define ZIG(i,y,x) level[i] = dct[0][x*4+y]; +#define ZIG(i,y,x) level[i] = dct[x*4+y]; +#define ZIGDC(i,y,x) ZIG(i,y,x) -static void zigzag_scan_4x4_frame( int16_t level[16], int16_t dct[4][4] ) +static void zigzag_scan_4x4_frame( dctcoef level[16], dctcoef dct[16] ) { ZIGZAG4_FRAME } -static void zigzag_scan_4x4_field( int16_t level[16], int16_t dct[4][4] ) +static void zigzag_scan_4x4_field( dctcoef level[16], dctcoef dct[16] ) { - *(uint32_t*)level = *(uint32_t*)dct; + memcpy( level, dct, 2 * sizeof(dctcoef) ); ZIG(2,0,1) ZIG(3,2,0) ZIG(4,3,0) ZIG(5,1,1) - *(uint32_t*)(level+6) = *(uint32_t*)(*dct+6); - *(uint64_t*)(level+8) = *(uint64_t*)(*dct+8); - *(uint64_t*)(level+12) = *(uint64_t*)(*dct+12); + memcpy( level+6, dct+6, 10 * sizeof(dctcoef) ); } #undef ZIG @@ -596,55 +836,88 @@ static void zigzag_scan_4x4_field( int16_t level[16], int16_t dct[4][4] ) int oe = x+y*FENC_STRIDE;\ int od = x+y*FDEC_STRIDE;\ level[i] = p_src[oe] - p_dst[od];\ + nz |= level[i];\ } #define COPY4x4\ - *(uint32_t*)(p_dst+0*FDEC_STRIDE) = *(uint32_t*)(p_src+0*FENC_STRIDE);\ - *(uint32_t*)(p_dst+1*FDEC_STRIDE) = *(uint32_t*)(p_src+1*FENC_STRIDE);\ - *(uint32_t*)(p_dst+2*FDEC_STRIDE) = *(uint32_t*)(p_src+2*FENC_STRIDE);\ - *(uint32_t*)(p_dst+3*FDEC_STRIDE) = *(uint32_t*)(p_src+3*FENC_STRIDE); + CPPIXEL_X4( p_dst+0*FDEC_STRIDE, p_src+0*FENC_STRIDE );\ + CPPIXEL_X4( p_dst+1*FDEC_STRIDE, p_src+1*FENC_STRIDE );\ + CPPIXEL_X4( p_dst+2*FDEC_STRIDE, p_src+2*FENC_STRIDE );\ + CPPIXEL_X4( p_dst+3*FDEC_STRIDE, p_src+3*FENC_STRIDE ); +#define CPPIXEL_X8(dst,src) ( CPPIXEL_X4(dst,src), CPPIXEL_X4(dst+4,src+4) ) #define COPY8x8\ - *(uint64_t*)(p_dst+0*FDEC_STRIDE) = *(uint64_t*)(p_src+0*FENC_STRIDE);\ - *(uint64_t*)(p_dst+1*FDEC_STRIDE) = *(uint64_t*)(p_src+1*FENC_STRIDE);\ - *(uint64_t*)(p_dst+2*FDEC_STRIDE) = *(uint64_t*)(p_src+2*FENC_STRIDE);\ - *(uint64_t*)(p_dst+3*FDEC_STRIDE) = *(uint64_t*)(p_src+3*FENC_STRIDE);\ - *(uint64_t*)(p_dst+4*FDEC_STRIDE) = *(uint64_t*)(p_src+4*FENC_STRIDE);\ - *(uint64_t*)(p_dst+5*FDEC_STRIDE) = *(uint64_t*)(p_src+5*FENC_STRIDE);\ - *(uint64_t*)(p_dst+6*FDEC_STRIDE) = *(uint64_t*)(p_src+6*FENC_STRIDE);\ - *(uint64_t*)(p_dst+7*FDEC_STRIDE) = *(uint64_t*)(p_src+7*FENC_STRIDE); + CPPIXEL_X8( p_dst+0*FDEC_STRIDE, p_src+0*FENC_STRIDE );\ + CPPIXEL_X8( p_dst+1*FDEC_STRIDE, p_src+1*FENC_STRIDE );\ + CPPIXEL_X8( p_dst+2*FDEC_STRIDE, p_src+2*FENC_STRIDE );\ + CPPIXEL_X8( p_dst+3*FDEC_STRIDE, p_src+3*FENC_STRIDE );\ + CPPIXEL_X8( p_dst+4*FDEC_STRIDE, p_src+4*FENC_STRIDE );\ + CPPIXEL_X8( p_dst+5*FDEC_STRIDE, p_src+5*FENC_STRIDE );\ + CPPIXEL_X8( p_dst+6*FDEC_STRIDE, p_src+6*FENC_STRIDE );\ + CPPIXEL_X8( p_dst+7*FDEC_STRIDE, p_src+7*FENC_STRIDE ); + +static int zigzag_sub_4x4_frame( dctcoef level[16], const pixel *p_src, pixel *p_dst ) +{ + int nz = 0; + ZIGZAG4_FRAME + COPY4x4 + return !!nz; +} + +static int zigzag_sub_4x4_field( dctcoef level[16], const pixel *p_src, pixel *p_dst ) +{ + int nz = 0; + ZIGZAG4_FIELD + COPY4x4 + return !!nz; +} -static void zigzag_sub_4x4_frame( int16_t level[16], const uint8_t *p_src, uint8_t *p_dst ) +#undef ZIGDC +#define ZIGDC(i,y,x) {\ + int oe = x+y*FENC_STRIDE;\ + int od = x+y*FDEC_STRIDE;\ + *dc = p_src[oe] - p_dst[od];\ + level[0] = 0;\ +} + +static int zigzag_sub_4x4ac_frame( dctcoef level[16], const pixel *p_src, pixel *p_dst, dctcoef *dc ) { + int nz = 0; ZIGZAG4_FRAME COPY4x4 + return !!nz; } -static void zigzag_sub_4x4_field( int16_t level[16], const uint8_t *p_src, uint8_t *p_dst ) +static int zigzag_sub_4x4ac_field( dctcoef level[16], const pixel *p_src, pixel *p_dst, dctcoef *dc ) { + int nz = 0; ZIGZAG4_FIELD COPY4x4 + return !!nz; } -static void zigzag_sub_8x8_frame( int16_t level[64], const uint8_t *p_src, uint8_t *p_dst ) +static int zigzag_sub_8x8_frame( dctcoef level[64], const pixel *p_src, pixel *p_dst ) { + int nz = 0; ZIGZAG8_FRAME COPY8x8 + return !!nz; } -static void zigzag_sub_8x8_field( int16_t level[64], const uint8_t *p_src, uint8_t *p_dst ) +static int zigzag_sub_8x8_field( dctcoef level[64], const pixel *p_src, pixel *p_dst ) { + int nz = 0; ZIGZAG8_FIELD COPY8x8 + return !!nz; } #undef ZIG #undef COPY4x4 -static void zigzag_interleave_8x8_cavlc( int16_t *dst, int16_t *src, uint8_t *nnz ) +static void zigzag_interleave_8x8_cavlc( dctcoef *dst, dctcoef *src, uint8_t *nnz ) { - int i,j; - for( i=0; i<4; i++ ) + for( int i = 0; i < 4; i++ ) { int nz = 0; - for( j=0; j<16; j++ ) + for( int j = 0; j < 16; j++ ) { nz |= src[i+j*4]; dst[i*16+j] = src[i+j*4]; @@ -653,57 +926,150 @@ static void zigzag_interleave_8x8_cavlc( int16_t *dst, int16_t *src, uint8_t *nn } } -void x264_zigzag_init( int cpu, x264_zigzag_function_t *pf, int b_interlaced ) +void x264_zigzag_init( int cpu, x264_zigzag_function_t *pf_progressive, x264_zigzag_function_t *pf_interlaced ) { - if( b_interlaced ) + pf_interlaced->scan_8x8 = zigzag_scan_8x8_field; + pf_progressive->scan_8x8 = zigzag_scan_8x8_frame; + pf_interlaced->scan_4x4 = zigzag_scan_4x4_field; + pf_progressive->scan_4x4 = zigzag_scan_4x4_frame; + pf_interlaced->sub_8x8 = zigzag_sub_8x8_field; + pf_progressive->sub_8x8 = zigzag_sub_8x8_frame; + pf_interlaced->sub_4x4 = zigzag_sub_4x4_field; + pf_progressive->sub_4x4 = zigzag_sub_4x4_frame; + pf_interlaced->sub_4x4ac = zigzag_sub_4x4ac_field; + pf_progressive->sub_4x4ac = zigzag_sub_4x4ac_frame; + +#if HIGH_BIT_DEPTH +#if HAVE_MMX + if( cpu&X264_CPU_SSE2 ) { - pf->scan_8x8 = zigzag_scan_8x8_field; - pf->scan_4x4 = zigzag_scan_4x4_field; - pf->sub_8x8 = zigzag_sub_8x8_field; - pf->sub_4x4 = zigzag_sub_4x4_field; -#ifdef HAVE_MMX - if( cpu&X264_CPU_MMXEXT ) - pf->scan_4x4 = x264_zigzag_scan_4x4_field_mmxext; -#endif - -#ifdef ARCH_PPC - if( cpu&X264_CPU_ALTIVEC ) - pf->scan_4x4 = x264_zigzag_scan_4x4_field_altivec; -#endif + pf_interlaced->scan_4x4 = x264_zigzag_scan_4x4_field_sse2; + pf_progressive->scan_4x4 = x264_zigzag_scan_4x4_frame_sse2; + pf_progressive->scan_8x8 = x264_zigzag_scan_8x8_frame_sse2; } - else - { - pf->scan_8x8 = zigzag_scan_8x8_frame; - pf->scan_4x4 = zigzag_scan_4x4_frame; - pf->sub_8x8 = zigzag_sub_8x8_frame; - pf->sub_4x4 = zigzag_sub_4x4_frame; -#ifdef HAVE_MMX - if( cpu&X264_CPU_MMX ) - pf->scan_4x4 = x264_zigzag_scan_4x4_frame_mmx; - if( cpu&X264_CPU_MMXEXT ) - pf->scan_8x8 = x264_zigzag_scan_8x8_frame_mmxext; - if( cpu&X264_CPU_SSE2_IS_FAST ) - pf->scan_8x8 = x264_zigzag_scan_8x8_frame_sse2; - if( cpu&X264_CPU_SSSE3 ) - { - pf->sub_4x4 = x264_zigzag_sub_4x4_frame_ssse3; - pf->scan_8x8 = x264_zigzag_scan_8x8_frame_ssse3; - if( cpu&X264_CPU_SHUFFLE_IS_FAST ) - pf->scan_4x4 = x264_zigzag_scan_4x4_frame_ssse3; - } + if( cpu&X264_CPU_SSE4 ) + pf_interlaced->scan_8x8 = x264_zigzag_scan_8x8_field_sse4; + if( cpu&X264_CPU_AVX ) + pf_interlaced->scan_8x8 = x264_zigzag_scan_8x8_field_avx; +#if ARCH_X86_64 + if( cpu&X264_CPU_AVX ) + { + pf_progressive->scan_4x4 = x264_zigzag_scan_4x4_frame_avx; + pf_progressive->scan_8x8 = x264_zigzag_scan_8x8_frame_avx; + } +#endif // ARCH_X86_64 +#endif // HAVE_MMX +#else +#if HAVE_MMX + if( cpu&X264_CPU_MMX ) + pf_progressive->scan_4x4 = x264_zigzag_scan_4x4_frame_mmx; + if( cpu&X264_CPU_MMX2 ) + { + pf_interlaced->scan_4x4 = x264_zigzag_scan_4x4_field_mmx2; + pf_interlaced->scan_8x8 = x264_zigzag_scan_8x8_field_mmx2; + pf_progressive->scan_8x8 = x264_zigzag_scan_8x8_frame_mmx2; + } + if( cpu&X264_CPU_SSE2_IS_FAST ) + pf_progressive->scan_8x8 = x264_zigzag_scan_8x8_frame_sse2; + if( cpu&X264_CPU_SSSE3 ) + { + pf_interlaced->sub_4x4 = x264_zigzag_sub_4x4_field_ssse3; + pf_progressive->sub_4x4 = x264_zigzag_sub_4x4_frame_ssse3; + pf_interlaced->sub_4x4ac = x264_zigzag_sub_4x4ac_field_ssse3; + pf_progressive->sub_4x4ac= x264_zigzag_sub_4x4ac_frame_ssse3; + pf_progressive->scan_8x8 = x264_zigzag_scan_8x8_frame_ssse3; + if( !(cpu&X264_CPU_SLOW_SHUFFLE) ) + pf_progressive->scan_4x4 = x264_zigzag_scan_4x4_frame_ssse3; + } + if( cpu&X264_CPU_AVX ) + { + pf_interlaced->sub_4x4 = x264_zigzag_sub_4x4_field_avx; + pf_progressive->sub_4x4 = x264_zigzag_sub_4x4_frame_avx; +#if ARCH_X86_64 + pf_interlaced->sub_4x4ac = x264_zigzag_sub_4x4ac_field_avx; + pf_progressive->sub_4x4ac= x264_zigzag_sub_4x4ac_frame_avx; #endif - -#ifdef ARCH_PPC - if( cpu&X264_CPU_ALTIVEC ) - pf->scan_4x4 = x264_zigzag_scan_4x4_frame_altivec; + pf_progressive->scan_4x4 = x264_zigzag_scan_4x4_frame_avx; + } + if( cpu&X264_CPU_XOP ) + { + pf_progressive->scan_4x4 = x264_zigzag_scan_4x4_frame_xop; + pf_progressive->scan_8x8 = x264_zigzag_scan_8x8_frame_xop; + pf_interlaced->scan_8x8 = x264_zigzag_scan_8x8_field_xop; + } +#endif // HAVE_MMX +#if HAVE_ALTIVEC + if( cpu&X264_CPU_ALTIVEC ) + { + pf_interlaced->scan_4x4 = x264_zigzag_scan_4x4_field_altivec; + pf_progressive->scan_4x4 = x264_zigzag_scan_4x4_frame_altivec; + } #endif +#if HAVE_ARMV6 || ARCH_AARCH64 + if( cpu&X264_CPU_NEON ) + { + pf_progressive->scan_4x4 = x264_zigzag_scan_4x4_frame_neon; +#if ARCH_AARCH64 + pf_interlaced->scan_4x4 = x264_zigzag_scan_4x4_field_neon; + pf_interlaced->scan_8x8 = x264_zigzag_scan_8x8_field_neon; + pf_interlaced->sub_4x4 = x264_zigzag_sub_4x4_field_neon; + pf_interlaced->sub_4x4ac = x264_zigzag_sub_4x4ac_field_neon; + pf_interlaced->sub_8x8 = x264_zigzag_sub_8x8_field_neon; + pf_progressive->scan_8x8 = x264_zigzag_scan_8x8_frame_neon; + pf_progressive->sub_4x4 = x264_zigzag_sub_4x4_frame_neon; + pf_progressive->sub_4x4ac = x264_zigzag_sub_4x4ac_frame_neon; + pf_progressive->sub_8x8 = x264_zigzag_sub_8x8_frame_neon; +#endif // ARCH_AARCH64 } +#endif // HAVE_ARMV6 || ARCH_AARCH64 +#endif // HIGH_BIT_DEPTH - pf->interleave_8x8_cavlc = zigzag_interleave_8x8_cavlc; -#ifdef HAVE_MMX + pf_interlaced->interleave_8x8_cavlc = + pf_progressive->interleave_8x8_cavlc = zigzag_interleave_8x8_cavlc; +#if HAVE_MMX +#if HIGH_BIT_DEPTH + if( cpu&X264_CPU_SSE2 ) + { + pf_interlaced->interleave_8x8_cavlc = + pf_progressive->interleave_8x8_cavlc = x264_zigzag_interleave_8x8_cavlc_sse2; + } + if( cpu&X264_CPU_AVX ) + { + pf_interlaced->interleave_8x8_cavlc = + pf_progressive->interleave_8x8_cavlc = x264_zigzag_interleave_8x8_cavlc_avx; + } +#else if( cpu&X264_CPU_MMX ) - pf->interleave_8x8_cavlc = x264_zigzag_interleave_8x8_cavlc_mmx; - if( cpu&X264_CPU_SHUFFLE_IS_FAST ) - pf->interleave_8x8_cavlc = x264_zigzag_interleave_8x8_cavlc_sse2; + { + pf_interlaced->interleave_8x8_cavlc = + pf_progressive->interleave_8x8_cavlc = x264_zigzag_interleave_8x8_cavlc_mmx; + } + if( (cpu&X264_CPU_SSE2) && !(cpu&(X264_CPU_SLOW_SHUFFLE|X264_CPU_SSE2_IS_SLOW)) ) + { + pf_interlaced->interleave_8x8_cavlc = + pf_progressive->interleave_8x8_cavlc = x264_zigzag_interleave_8x8_cavlc_sse2; + } + + if( cpu&X264_CPU_AVX ) + { + pf_interlaced->interleave_8x8_cavlc = + pf_progressive->interleave_8x8_cavlc = x264_zigzag_interleave_8x8_cavlc_avx; + } + + if( cpu&X264_CPU_AVX2 ) + { + pf_interlaced->interleave_8x8_cavlc = + pf_progressive->interleave_8x8_cavlc = x264_zigzag_interleave_8x8_cavlc_avx2; + } +#endif // HIGH_BIT_DEPTH #endif +#if !HIGH_BIT_DEPTH +#if ARCH_AARCH64 + if( cpu&X264_CPU_NEON ) + { + pf_interlaced->interleave_8x8_cavlc = + pf_progressive->interleave_8x8_cavlc = x264_zigzag_interleave_8x8_cavlc_neon; + } +#endif // ARCH_AARCH64 +#endif // !HIGH_BIT_DEPTH }