]> git.sesse.net Git - ffmpeg/commitdiff
Merge commit '8d27bf1cff35be406b0fd89d832e1852d4c573bc'
authorMichael Niedermayer <michaelni@gmx.at>
Wed, 27 Aug 2014 19:20:39 +0000 (21:20 +0200)
committerMichael Niedermayer <michaelni@gmx.at>
Wed, 27 Aug 2014 19:20:39 +0000 (21:20 +0200)
* commit '8d27bf1cff35be406b0fd89d832e1852d4c573bc':
  x86: xvid: K&R formatting cosmetics

Conflicts:
libavcodec/x86/xvididct_sse2.c

Merged-by: Michael Niedermayer <michaelni@gmx.at>
1  2 
libavcodec/x86/xvididct_mmx.c
libavcodec/x86/xvididct_sse2.c

index 55ad1df54fb41ea67e7f85ab31d1c3eb069c4280,e371142974cd9cc238041bb254557117ee61510e..f4bb39fe91dce6277a45c150024144c82a279887
   *
   * conversion to gcc syntax by Michael Niedermayer
   *
 - * This file is part of Libav.
 + * This file is part of FFmpeg.
   *
 - * Libav is free software; you can redistribute it and/or
 + * FFmpeg is free software; you can redistribute it and/or
   * modify it under the terms of the GNU Lesser General Public
   * License as published by the Free Software Foundation; either
   * version 2.1 of the License, or (at your option) any later version.
   *
 - * Libav is distributed in the hope that it will be useful,
 + * FFmpeg is distributed in the hope that it will be useful,
   * but WITHOUT ANY WARRANTY; without even the implied warranty of
   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   * Lesser General Public License for more details.
   *
   * You should have received a copy of the GNU Lesser General Public License
 - * along with Libav; if not, write to the Free Software Foundation,
 + * along with FFmpeg; if not, write to the Free Software Foundation,
   * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
   */
  
  #include <inttypes.h>
  
  #include "config.h"
- #include "libavcodec/avcodec.h"
  #include "libavutil/mem.h"
  
+ #include "libavcodec/avcodec.h"
  #include "idctdsp.h"
  #include "xvididct.h"
  
  #if HAVE_MMX_INLINE
  
- //-----------------------------------------------------------------------------
+ // -----------------------------------------------------------------------------
  // Various memory constants (trigonometric values or rounding values)
- //-----------------------------------------------------------------------------
- DECLARE_ALIGNED(8, static const int16_t, tg_1_16)[4*4] = {
-   13036,13036,13036,13036,        // tg * (2<<16) + 0.5
-   27146,27146,27146,27146,        // tg * (2<<16) + 0.5
-   -21746,-21746,-21746,-21746,    // tg * (2<<16) + 0.5
-   23170,23170,23170,23170};       // cos * (2<<15) + 0.5
- DECLARE_ALIGNED(8, static const int32_t, rounder_0)[2*8] = {
-   65536,65536,
-   3597,3597,
-   2260,2260,
-   1203,1203,
-   0,0,
-   120,120,
-   512,512,
-   512,512};
- //-----------------------------------------------------------------------------
+ // -----------------------------------------------------------------------------
+ DECLARE_ALIGNED(8, static const int16_t, tg_1_16)[4 * 4] = {
+      13036,  13036,  13036,  13036, // tg * (2 << 16) + 0.5
+      27146,  27146,  27146,  27146, // tg * (2 << 16) + 0.5
+     -21746, -21746, -21746, -21746, // tg * (2 << 16) + 0.5
+      23170,  23170,  23170,  23170
+ };                                  // cos * (2 << 15) + 0.5
+ DECLARE_ALIGNED(8, static const int32_t, rounder_0)[2 * 8] = {
+     65536, 65536,
+     3597,   3597,
+     2260,   2260,
+     1203,   1203,
+     0,         0,
+     120,     120,
+     512,     512,
+     512, 512
+ };
+ // -----------------------------------------------------------------------------
  //
  // The first stage iDCT 8x8 - inverse DCTs of rows
  //
- //-----------------------------------------------------------------------------
+ // -----------------------------------------------------------------------------
  // The 8-point inverse DCT direct algorithm
- //-----------------------------------------------------------------------------
+ // -----------------------------------------------------------------------------
  //
  // static const short w[32] = {
- //       FIX(cos_4_16),  FIX(cos_2_16),  FIX(cos_4_16),  FIX(cos_6_16),
- //       FIX(cos_4_16),  FIX(cos_6_16), -FIX(cos_4_16), -FIX(cos_2_16),
- //       FIX(cos_4_16), -FIX(cos_6_16), -FIX(cos_4_16),  FIX(cos_2_16),
- //       FIX(cos_4_16), -FIX(cos_2_16),  FIX(cos_4_16), -FIX(cos_6_16),
- //       FIX(cos_1_16),  FIX(cos_3_16),  FIX(cos_5_16),  FIX(cos_7_16),
- //       FIX(cos_3_16), -FIX(cos_7_16), -FIX(cos_1_16), -FIX(cos_5_16),
- //       FIX(cos_5_16), -FIX(cos_1_16),  FIX(cos_7_16),  FIX(cos_3_16),
- //       FIX(cos_7_16), -FIX(cos_5_16),  FIX(cos_3_16), -FIX(cos_1_16) };
+ //     FIX(cos_4_16),  FIX(cos_2_16),  FIX(cos_4_16),  FIX(cos_6_16),
+ //     FIX(cos_4_16),  FIX(cos_6_16), -FIX(cos_4_16), -FIX(cos_2_16),
+ //     FIX(cos_4_16), -FIX(cos_6_16), -FIX(cos_4_16),  FIX(cos_2_16),
+ //     FIX(cos_4_16), -FIX(cos_2_16),  FIX(cos_4_16), -FIX(cos_6_16),
+ //     FIX(cos_1_16),  FIX(cos_3_16),  FIX(cos_5_16),  FIX(cos_7_16),
+ //     FIX(cos_3_16), -FIX(cos_7_16), -FIX(cos_1_16), -FIX(cos_5_16),
+ //     FIX(cos_5_16), -FIX(cos_1_16),  FIX(cos_7_16),  FIX(cos_3_16),
+ //     FIX(cos_7_16), -FIX(cos_5_16),  FIX(cos_3_16), -FIX(cos_1_16) };
  //
  // #define DCT_8_INV_ROW(x, y)
  // {
- //       int a0, a1, a2, a3, b0, b1, b2, b3;
+ //     int a0, a1, a2, a3, b0, b1, b2, b3;
  //
- //       a0 =x[0]*w[0]+x[2]*w[1]+x[4]*w[2]+x[6]*w[3];
- //       a1 =x[0]*w[4]+x[2]*w[5]+x[4]*w[6]+x[6]*w[7];
- //       a2 = x[0] * w[ 8] + x[2] * w[ 9] + x[4] * w[10] + x[6] * w[11];
- //       a3 = x[0] * w[12] + x[2] * w[13] + x[4] * w[14] + x[6] * w[15];
- //       b0 = x[1] * w[16] + x[3] * w[17] + x[5] * w[18] + x[7] * w[19];
- //       b1 = x[1] * w[20] + x[3] * w[21] + x[5] * w[22] + x[7] * w[23];
- //       b2 = x[1] * w[24] + x[3] * w[25] + x[5] * w[26] + x[7] * w[27];
- //       b3 = x[1] * w[28] + x[3] * w[29] + x[5] * w[30] + x[7] * w[31];
+ //     a0 = x[0] * w[0]  + x[2] * w[1]  + x[4] * w[2]  + x[6] * w[3];
+ //     a1 = x[0] * w[4]  + x[2] * w[5]  + x[4] * w[6]  + x[6] * w[7];
+ //     a2 = x[0] * w[8]  + x[2] * w[9]  + x[4] * w[10] + x[6] * w[11];
+ //     a3 = x[0] * w[12] + x[2] * w[13] + x[4] * w[14] + x[6] * w[15];
+ //     b0 = x[1] * w[16] + x[3] * w[17] + x[5] * w[18] + x[7] * w[19];
+ //     b1 = x[1] * w[20] + x[3] * w[21] + x[5] * w[22] + x[7] * w[23];
+ //     b2 = x[1] * w[24] + x[3] * w[25] + x[5] * w[26] + x[7] * w[27];
+ //     b3 = x[1] * w[28] + x[3] * w[29] + x[5] * w[30] + x[7] * w[31];
  //
- //       y[0] = SHIFT_ROUND ( a0 + b0 );
- //       y[1] = SHIFT_ROUND ( a1 + b1 );
- //       y[2] = SHIFT_ROUND ( a2 + b2 );
- //       y[3] = SHIFT_ROUND ( a3 + b3 );
- //       y[4] = SHIFT_ROUND ( a3 - b3 );
- //       y[5] = SHIFT_ROUND ( a2 - b2 );
- //       y[6] = SHIFT_ROUND ( a1 - b1 );
- //       y[7] = SHIFT_ROUND ( a0 - b0 );
+ //     y[0] = SHIFT_ROUND(a0 + b0);
+ //     y[1] = SHIFT_ROUND(a1 + b1);
+ //     y[2] = SHIFT_ROUND(a2 + b2);
+ //     y[3] = SHIFT_ROUND(a3 + b3);
+ //     y[4] = SHIFT_ROUND(a3 - b3);
+ //     y[5] = SHIFT_ROUND(a2 - b2);
+ //     y[6] = SHIFT_ROUND(a1 - b1);
+ //     y[7] = SHIFT_ROUND(a0 - b0);
  // }
  //
- //-----------------------------------------------------------------------------
+ // -----------------------------------------------------------------------------
  //
  // In this implementation the outputs of the iDCT-1D are multiplied
- //       for rows 0,4 - by cos_4_16,
- //       for rows 1,7 - by cos_1_16,
- //       for rows 2,6 - by cos_2_16,
- //       for rows 3,5 - by cos_3_16
- // and are shifted to the left for better accuracy
+ //     for rows 0,4 - by cos_4_16,
+ //     for rows 1,7 - by cos_1_16,
+ //     for rows 2,6 - by cos_2_16,
+ //     for rows 3,5 - by cos_3_16
+ // and are shifted to the left for better accuracy.
  //
  // For the constants used,
- //       FIX(float_const) = (short) (float_const * (1<<15) + 0.5)
+ //     FIX(float_const) = (short) (float_const * (1 << 15) + 0.5)
  //
- //-----------------------------------------------------------------------------
+ // -----------------------------------------------------------------------------
  
- //-----------------------------------------------------------------------------
+ // -----------------------------------------------------------------------------
  // Tables for mmx processors
- //-----------------------------------------------------------------------------
+ // -----------------------------------------------------------------------------
  
  // Table for rows 0,4 - constants are multiplied by cos_4_16
- DECLARE_ALIGNED(8, static const int16_t, tab_i_04_mmx)[32*4] = {
-   16384,16384,16384,-16384,       // movq-> w06 w04 w02 w00
-   21407,8867,8867,-21407,         // w07 w05 w03 w01
-   16384,-16384,16384,16384,       // w14 w12 w10 w08
-   -8867,21407,-21407,-8867,       // w15 w13 w11 w09
-   22725,12873,19266,-22725,       // w22 w20 w18 w16
-   19266,4520,-4520,-12873,        // w23 w21 w19 w17
-   12873,4520,4520,19266,          // w30 w28 w26 w24
-   -22725,19266,-12873,-22725,     // w31 w29 w27 w25
+ DECLARE_ALIGNED(8, static const int16_t, tab_i_04_mmx)[32 * 4] = {
+      16384,  16384,  16384, -16384, // movq-> w06 w04 w02 w00
+      21407,   8867,   8867, -21407, // w07 w05 w03 w01
+      16384, -16384,  16384,  16384, // w14 w12 w10 w08
+      -8867,  21407, -21407,  -8867, // w15 w13 w11 w09
+      22725,  12873,  19266, -22725, // w22 w20 w18 w16
+      19266,   4520,  -4520, -12873, // w23 w21 w19 w17
+      12873,   4520,   4520,  19266, // w30 w28 w26 w24
+     -22725,  19266, -12873, -22725, // w31 w29 w27 w25
  // Table for rows 1,7 - constants are multiplied by cos_1_16
-   22725,22725,22725,-22725,       // movq-> w06 w04 w02 w00
-   29692,12299,12299,-29692,       // w07 w05 w03 w01
-   22725,-22725,22725,22725,       // w14 w12 w10 w08
-   -12299,29692,-29692,-12299,     // w15 w13 w11 w09
-   31521,17855,26722,-31521,       // w22 w20 w18 w16
-   26722,6270,-6270,-17855,        // w23 w21 w19 w17
-   17855,6270,6270,26722,          // w30 w28 w26 w24
-   -31521,26722,-17855,-31521,     // w31 w29 w27 w25
+      22725,  22725,  22725, -22725, // movq-> w06 w04 w02 w00
+      29692,  12299,  12299, -29692, // w07 w05 w03 w01
+      22725, -22725,  22725,  22725, // w14 w12 w10 w08
+     -12299,  29692, -29692, -12299, // w15 w13 w11 w09
+      31521,  17855,  26722, -31521, // w22 w20 w18 w16
+      26722,   6270,  -6270, -17855, // w23 w21 w19 w17
+      17855,   6270,   6270,  26722, // w30 w28 w26 w24
+     -31521,  26722, -17855, -31521, // w31 w29 w27 w25
  // Table for rows 2,6 - constants are multiplied by cos_2_16
-   21407,21407,21407,-21407,       // movq-> w06 w04 w02 w00
-   27969,11585,11585,-27969,       // w07 w05 w03 w01
-   21407,-21407,21407,21407,       // w14 w12 w10 w08
-   -11585,27969,-27969,-11585,     // w15 w13 w11 w09
-   29692,16819,25172,-29692,       // w22 w20 w18 w16
-   25172,5906,-5906,-16819,        // w23 w21 w19 w17
-   16819,5906,5906,25172,          // w30 w28 w26 w24
-   -29692,25172,-16819,-29692,     // w31 w29 w27 w25
+      21407,  21407,  21407, -21407, // movq-> w06 w04 w02 w00
+      27969,  11585,  11585, -27969, // w07 w05 w03 w01
+      21407, -21407,  21407,  21407, // w14 w12 w10 w08
+     -11585,  27969, -27969, -11585, // w15 w13 w11 w09
+      29692,  16819,  25172, -29692, // w22 w20 w18 w16
+      25172,   5906,  -5906, -16819, // w23 w21 w19 w17
+      16819,   5906,   5906,  25172, // w30 w28 w26 w24
+     -29692,  25172, -16819, -29692, // w31 w29 w27 w25
  // Table for rows 3,5 - constants are multiplied by cos_3_16
-   19266,19266,19266,-19266,       // movq-> w06 w04 w02 w00
-   25172,10426,10426,-25172,       // w07 w05 w03 w01
-   19266,-19266,19266,19266,       // w14 w12 w10 w08
-   -10426,25172,-25172,-10426,     // w15 w13 w11 w09
-   26722,15137,22654,-26722,       // w22 w20 w18 w16
-   22654,5315,-5315,-15137,        // w23 w21 w19 w17
-   15137,5315,5315,22654,          // w30 w28 w26 w24
-   -26722,22654,-15137,-26722,     // w31 w29 w27 w25
+      19266,  19266,  19266, -19266, // movq-> w06 w04 w02 w00
+      25172,  10426,  10426, -25172, // w07 w05 w03 w01
+      19266, -19266,  19266,  19266, // w14 w12 w10 w08
+     -10426,  25172, -25172, -10426, // w15 w13 w11 w09
+      26722,  15137,  22654, -26722, // w22 w20 w18 w16
+      22654,   5315,  -5315, -15137, // w23 w21 w19 w17
+      15137,   5315,   5315,  22654, // w30 w28 w26 w24
+     -26722,  22654, -15137, -26722, // w31 w29 w27 w25
  };
- //-----------------------------------------------------------------------------
+ // -----------------------------------------------------------------------------
  // Tables for xmm processors
- //-----------------------------------------------------------------------------
+ // -----------------------------------------------------------------------------
  
  // %3 for rows 0,4 - constants are multiplied by cos_4_16
- DECLARE_ALIGNED(8, static const int16_t, tab_i_04_xmm)[32*4] = {
-   16384,21407,16384,8867,      // movq-> w05 w04 w01 w00
-   16384,8867,-16384,-21407,    // w07 w06 w03 w02
-   16384,-8867,16384,-21407,    // w13 w12 w09 w08
-   -16384,21407,16384,-8867,    // w15 w14 w11 w10
-   22725,19266,19266,-4520,     // w21 w20 w17 w16
-   12873,4520,-22725,-12873,    // w23 w22 w19 w18
-   12873,-22725,4520,-12873,    // w29 w28 w25 w24
-   4520,19266,19266,-22725,     // w31 w30 w27 w26
+ DECLARE_ALIGNED(8, static const int16_t, tab_i_04_xmm)[32 * 4] = {
+      16384,  21407,  16384,   8867, // movq-> w05 w04 w01 w00
+      16384,   8867, -16384, -21407, // w07 w06 w03 w02
+      16384,  -8867,  16384, -21407, // w13 w12 w09 w08
+     -16384,  21407,  16384,  -8867, // w15 w14 w11 w10
+      22725,  19266,  19266,  -4520, // w21 w20 w17 w16
+      12873,   4520, -22725, -12873, // w23 w22 w19 w18
+      12873, -22725,   4520, -12873, // w29 w28 w25 w24
+       4520,  19266,  19266, -22725, // w31 w30 w27 w26
  // %3 for rows 1,7 - constants are multiplied by cos_1_16
-   22725,29692,22725,12299,     // movq-> w05 w04 w01 w00
-   22725,12299,-22725,-29692,   // w07 w06 w03 w02
-   22725,-12299,22725,-29692,   // w13 w12 w09 w08
-   -22725,29692,22725,-12299,   // w15 w14 w11 w10
-   31521,26722,26722,-6270,     // w21 w20 w17 w16
-   17855,6270,-31521,-17855,    // w23 w22 w19 w18
-   17855,-31521,6270,-17855,    // w29 w28 w25 w24
-   6270,26722,26722,-31521,     // w31 w30 w27 w26
+      22725,  29692,  22725,  12299, // movq-> w05 w04 w01 w00
+      22725,  12299, -22725, -29692, // w07 w06 w03 w02
+      22725, -12299,  22725, -29692, // w13 w12 w09 w08
+     -22725,  29692,  22725, -12299, // w15 w14 w11 w10
+      31521,  26722,  26722,  -6270, // w21 w20 w17 w16
+      17855,   6270, -31521, -17855, // w23 w22 w19 w18
+      17855, -31521,   6270, -17855, // w29 w28 w25 w24
+       6270,  26722,  26722, -31521, // w31 w30 w27 w26
  // %3 for rows 2,6 - constants are multiplied by cos_2_16
-   21407,27969,21407,11585,     // movq-> w05 w04 w01 w00
-   21407,11585,-21407,-27969,   // w07 w06 w03 w02
-   21407,-11585,21407,-27969,   // w13 w12 w09 w08
-   -21407,27969,21407,-11585,   // w15 w14 w11 w10
-   29692,25172,25172,-5906,     // w21 w20 w17 w16
-   16819,5906,-29692,-16819,    // w23 w22 w19 w18
-   16819,-29692,5906,-16819,    // w29 w28 w25 w24
-   5906,25172,25172,-29692,     // w31 w30 w27 w26
+      21407,  27969,  21407,  11585, // movq-> w05 w04 w01 w00
+      21407,  11585, -21407, -27969, // w07 w06 w03 w02
+      21407, -11585,  21407, -27969, // w13 w12 w09 w08
+     -21407,  27969,  21407, -11585, // w15 w14 w11 w10
+      29692,  25172,  25172,  -5906, // w21 w20 w17 w16
+      16819,   5906, -29692, -16819, // w23 w22 w19 w18
+      16819, -29692,   5906, -16819, // w29 w28 w25 w24
+       5906,  25172,  25172, -29692, // w31 w30 w27 w26
  // %3 for rows 3,5 - constants are multiplied by cos_3_16
-   19266,25172,19266,10426,     // movq-> w05 w04 w01 w00
-   19266,10426,-19266,-25172,   // w07 w06 w03 w02
-   19266,-10426,19266,-25172,   // w13 w12 w09 w08
-   -19266,25172,19266,-10426,   // w15 w14 w11 w10
-   26722,22654,22654,-5315,     // w21 w20 w17 w16
-   15137,5315,-26722,-15137,    // w23 w22 w19 w18
-   15137,-26722,5315,-15137,    // w29 w28 w25 w24
-   5315,22654,22654,-26722,     // w31 w30 w27 w26
+      19266,  25172,  19266,  10426, // movq-> w05 w04 w01 w00
+      19266,  10426, -19266, -25172, // w07 w06 w03 w02
+      19266, -10426,  19266, -25172, // w13 w12 w09 w08
+     -19266,  25172,  19266, -10426, // w15 w14 w11 w10
+      26722,  22654,  22654,  -5315, // w21 w20 w17 w16
+      15137,   5315, -26722, -15137, // w23 w22 w19 w18
+      15137, -26722,   5315, -15137, // w29 w28 w25 w24
+       5315,  22654,  22654, -26722, // w31 w30 w27 w26
  };
- //=============================================================================
+ // =============================================================================
  // Helper macros for the code
- //=============================================================================
+ // =============================================================================
  
- //-----------------------------------------------------------------------------
+ // -----------------------------------------------------------------------------
  // DCT_8_INV_ROW_MMX( INP, OUT, TABLE, ROUNDER
- //-----------------------------------------------------------------------------
- #define DCT_8_INV_ROW_MMX(A1,A2,A3,A4)\
-   "movq " #A1 ",%%mm0              \n\t"/* 0 ; x3 x2 x1 x0*/\
-   "movq 8+" #A1 ",%%mm1            \n\t"/* 1 ; x7 x6 x5 x4*/\
-   "movq %%mm0,%%mm2            \n\t"/* 2  ; x3 x2 x1 x0*/\
-   "movq " #A3 ",%%mm3              \n\t"/* 3 ; w06 w04 w02 w00*/\
-   "punpcklwd %%mm1,%%mm0       \n\t"/* x5 x1 x4 x0*/\
-   "movq %%mm0,%%mm5            \n\t"/* 5 ; x5 x1 x4 x0*/\
-   "punpckldq %%mm0,%%mm0       \n\t"/* x4 x0 x4 x0*/\
-   "movq 8+" #A3 ",%%mm4            \n\t"/* 4 ; w07 w05 w03 w01*/\
-   "punpckhwd %%mm1,%%mm2       \n\t"/* 1 ; x7 x3 x6 x2*/\
-   "pmaddwd %%mm0,%%mm3         \n\t"/* x4*w06+x0*w04 x4*w02+x0*w00*/\
-   "movq %%mm2,%%mm6            \n\t"/* 6 ; x7 x3 x6 x2*/\
-   "movq 32+" #A3 ",%%mm1           \n\t"/* 1 ; w22 w20 w18 w16*/\
-   "punpckldq %%mm2,%%mm2       \n\t"/* x6 x2 x6 x2*/\
-   "pmaddwd %%mm2,%%mm4         \n\t"/* x6*w07+x2*w05 x6*w03+x2*w01*/\
-   "punpckhdq %%mm5,%%mm5       \n\t"/* x5 x1 x5 x1*/\
-   "pmaddwd 16+" #A3 ",%%mm0        \n\t"/* x4*w14+x0*w12 x4*w10+x0*w08*/\
-   "punpckhdq %%mm6,%%mm6       \n\t"/* x7 x3 x7 x3*/\
-   "movq 40+" #A3 ",%%mm7           \n\t"/* 7 ; w23 w21 w19 w17*/\
-   "pmaddwd %%mm5,%%mm1         \n\t"/* x5*w22+x1*w20 x5*w18+x1*w16*/\
-   "paddd " #A4 ",%%mm3             \n\t"/* +%4*/\
-   "pmaddwd %%mm6,%%mm7         \n\t"/* x7*w23+x3*w21 x7*w19+x3*w17*/\
-   "pmaddwd 24+" #A3 ",%%mm2        \n\t"/* x6*w15+x2*w13 x6*w11+x2*w09*/\
-   "paddd %%mm4,%%mm3           \n\t"/* 4 ; a1=sum(even1) a0=sum(even0)*/\
-   "pmaddwd 48+" #A3 ",%%mm5        \n\t"/* x5*w30+x1*w28 x5*w26+x1*w24*/\
-   "movq %%mm3,%%mm4            \n\t"/* 4 ; a1 a0*/\
-   "pmaddwd 56+" #A3 ",%%mm6        \n\t"/* x7*w31+x3*w29 x7*w27+x3*w25*/\
-   "paddd %%mm7,%%mm1           \n\t"/* 7 ; b1=sum(odd1) b0=sum(odd0)*/\
-   "paddd " #A4 ",%%mm0             \n\t"/* +%4*/\
-   "psubd %%mm1,%%mm3           \n\t"/* a1-b1 a0-b0*/\
-   "psrad $11,%%mm3 \n\t"/* y6=a1-b1 y7=a0-b0*/\
-   "paddd %%mm4,%%mm1           \n\t"/* 4 ; a1+b1 a0+b0*/\
-   "paddd %%mm2,%%mm0           \n\t"/* 2 ; a3=sum(even3) a2=sum(even2)*/\
-   "psrad $11,%%mm1 \n\t"/* y1=a1+b1 y0=a0+b0*/\
-   "paddd %%mm6,%%mm5           \n\t"/* 6 ; b3=sum(odd3) b2=sum(odd2)*/\
-   "movq %%mm0,%%mm4            \n\t"/* 4 ; a3 a2*/\
-   "paddd %%mm5,%%mm0           \n\t"/* a3+b3 a2+b2*/\
-   "psubd %%mm5,%%mm4           \n\t"/* 5 ; a3-b3 a2-b2*/\
-   "psrad $11,%%mm0 \n\t"/* y3=a3+b3 y2=a2+b2*/\
-   "psrad $11,%%mm4 \n\t"/* y4=a3-b3 y5=a2-b2*/\
-   "packssdw %%mm0,%%mm1        \n\t"/* 0 ; y3 y2 y1 y0*/\
-   "packssdw %%mm3,%%mm4        \n\t"/* 3 ; y6 y7 y4 y5*/\
-   "movq %%mm4,%%mm7            \n\t"/* 7 ; y6 y7 y4 y5*/\
-   "psrld $16,%%mm4            \n\t"/* 0 y6 0 y4*/\
-   "pslld $16,%%mm7            \n\t"/* y7 0 y5 0*/\
-   "movq %%mm1," #A2 "              \n\t"/* 1 ; save y3 y2 y1 y0*/\
-   "por %%mm4,%%mm7             \n\t"/* 4 ; y7 y6 y5 y4*/\
-   "movq %%mm7,8            +" #A2 "\n\t"/* 7 ; save y7 y6 y5 y4*/\
- //-----------------------------------------------------------------------------
+ // -----------------------------------------------------------------------------
+ #define DCT_8_INV_ROW_MMX(A1, A2, A3, A4)                                       \
+     "movq       "#A1", %%mm0    \n\t" /* 0 ; x3 x2 x1 x0 */                     \
+     "movq     8+"#A1", %%mm1    \n\t" /* 1 ; x7 x6 x5 x4 */                     \
+     "movq       %%mm0, %%mm2    \n\t" /* 2 ; x3 x2 x1 x0 */                     \
+     "movq       "#A3", %%mm3    \n\t" /* 3 ; w06 w04 w02 w00 */                 \
+     "punpcklwd  %%mm1, %%mm0    \n\t" /* x5 x1 x4 x0 */                         \
+     "movq       %%mm0, %%mm5    \n\t" /* 5 ; x5 x1 x4 x0 */                     \
+     "punpckldq  %%mm0, %%mm0    \n\t" /* x4 x0 x4 x0 */                         \
+     "movq     8+"#A3", %%mm4    \n\t" /* 4 ; w07 w05 w03 w01 */                 \
+     "punpckhwd  %%mm1, %%mm2    \n\t" /* 1 ; x7 x3 x6 x2 */                     \
+     "pmaddwd    %%mm0, %%mm3    \n\t" /* x4*w06+x0*w04 x4*w02+x0*w00 */         \
+     "movq       %%mm2, %%mm6    \n\t" /* 6 ; x7 x3 x6 x2 */                     \
+     "movq    32+"#A3", %%mm1    \n\t" /* 1 ; w22 w20 w18 w16 */                 \
+     "punpckldq  %%mm2, %%mm2    \n\t" /* x6 x2 x6 x2 */                         \
+     "pmaddwd    %%mm2, %%mm4    \n\t" /* x6*w07+x2*w05 x6*w03+x2*w01 */         \
+     "punpckhdq  %%mm5, %%mm5    \n\t" /* x5 x1 x5 x1 */                         \
+     "pmaddwd 16+"#A3", %%mm0    \n\t" /* x4*w14+x0*w12 x4*w10+x0*w08 */         \
+     "punpckhdq  %%mm6, %%mm6    \n\t" /* x7 x3 x7 x3 */                         \
+     "movq 40+   "#A3", %%mm7    \n\t" /* 7 ; w23 w21 w19 w17 */                 \
+     "pmaddwd    %%mm5, %%mm1    \n\t" /* x5*w22+x1*w20 x5*w18+x1*w16 */         \
+     "paddd      "#A4", %%mm3    \n\t" /* +%4 */                                 \
+     "pmaddwd    %%mm6, %%mm7    \n\t" /* x7*w23+x3*w21 x7*w19+x3*w17 */         \
+     "pmaddwd 24+"#A3", %%mm2    \n\t" /* x6*w15+x2*w13 x6*w11+x2*w09 */         \
+     "paddd      %%mm4, %%mm3    \n\t" /* 4 ; a1=sum(even1) a0=sum(even0) */     \
+     "pmaddwd 48+"#A3", %%mm5    \n\t" /* x5*w30+x1*w28 x5*w26+x1*w24 */         \
+     "movq       %%mm3, %%mm4    \n\t" /* 4 ; a1 a0 */                           \
+     "pmaddwd 56+"#A3", %%mm6    \n\t" /* x7*w31+x3*w29 x7*w27+x3*w25 */         \
+     "paddd      %%mm7, %%mm1    \n\t" /* 7 ; b1=sum(odd1) b0=sum(odd0) */       \
+     "paddd      "#A4", %%mm0    \n\t" /* +%4 */                                 \
+     "psubd      %%mm1, %%mm3    \n\t" /* a1-b1 a0-b0 */                         \
+     "psrad        $11, %%mm3    \n\t" /* y6=a1-b1 y7=a0-b0 */                   \
+     "paddd      %%mm4, %%mm1    \n\t" /* 4 ; a1+b1 a0+b0 */                     \
+     "paddd      %%mm2, %%mm0    \n\t" /* 2 ; a3=sum(even3) a2=sum(even2) */     \
+     "psrad        $11, %%mm1    \n\t" /* y1=a1+b1 y0=a0+b0 */                   \
+     "paddd      %%mm6, %%mm5    \n\t" /* 6 ; b3=sum(odd3) b2=sum(odd2) */       \
+     "movq       %%mm0, %%mm4    \n\t" /* 4 ; a3 a2 */                           \
+     "paddd      %%mm5, %%mm0    \n\t" /* a3+b3 a2+b2 */                         \
+     "psubd      %%mm5, %%mm4    \n\t" /* 5 ; a3-b3 a2-b2 */                     \
+     "psrad        $11, %%mm0    \n\t" /* y3=a3+b3 y2=a2+b2 */                   \
+     "psrad        $11, %%mm4    \n\t" /* y4=a3-b3 y5=a2-b2 */                   \
+     "packssdw   %%mm0, %%mm1    \n\t" /* 0 ; y3 y2 y1 y0 */                     \
+     "packssdw   %%mm3, %%mm4    \n\t" /* 3 ; y6 y7 y4 y5 */                     \
+     "movq       %%mm4, %%mm7    \n\t" /* 7 ; y6 y7 y4 y5 */                     \
+     "psrld        $16, %%mm4    \n\t" /* 0 y6 0 y4 */                           \
+     "pslld        $16, %%mm7    \n\t" /* y7 0 y5 0 */                           \
+     "movq       %%mm1, "#A2"    \n\t" /* 1 ; save y3 y2 y1 y0 */                \
+     "por        %%mm4, %%mm7    \n\t" /* 4 ; y7 y6 y5 y4 */                     \
+     "movq       %%mm7, 8+"#A2"  \n\t" /* 7 ; save y7 y6 y5 y4 */                \
+ // -----------------------------------------------------------------------------
  // DCT_8_INV_ROW_XMM( INP, OUT, TABLE, ROUNDER
- //-----------------------------------------------------------------------------
- #define DCT_8_INV_ROW_XMM(A1,A2,A3,A4)\
-   "movq " #A1 ",%%mm0                  \n\t"/* 0     ; x3 x2 x1 x0*/\
-   "movq 8+" #A1 ",%%mm1                \n\t"/* 1     ; x7 x6 x5 x4*/\
-   "movq %%mm0,%%mm2                \n\t"/* 2     ; x3 x2 x1 x0*/\
-   "movq " #A3 ",%%mm3                  \n\t"/* 3     ; w05 w04 w01 w00*/\
-   "pshufw $0x88,%%mm0,%%mm0        \n\t"/* x2 x0 x2 x0*/\
-   "movq 8+" #A3 ",%%mm4                \n\t"/* 4     ; w07 w06 w03 w02*/\
-   "movq %%mm1,%%mm5                \n\t"/* 5     ; x7 x6 x5 x4*/\
-   "pmaddwd %%mm0,%%mm3             \n\t"/* x2*w05+x0*w04 x2*w01+x0*w00*/\
-   "movq 32+" #A3 ",%%mm6               \n\t"/* 6     ; w21 w20 w17 w16*/\
-   "pshufw $0x88,%%mm1,%%mm1        \n\t"/* x6 x4 x6 x4*/\
-   "pmaddwd %%mm1,%%mm4             \n\t"/* x6*w07+x4*w06 x6*w03+x4*w02*/\
-   "movq 40+" #A3 ",%%mm7               \n\t"/* 7    ; w23 w22 w19 w18*/\
-   "pshufw $0xdd,%%mm2,%%mm2        \n\t"/* x3 x1 x3 x1*/\
-   "pmaddwd %%mm2,%%mm6             \n\t"/* x3*w21+x1*w20 x3*w17+x1*w16*/\
-   "pshufw $0xdd,%%mm5,%%mm5        \n\t"/* x7 x5 x7 x5*/\
-   "pmaddwd %%mm5,%%mm7             \n\t"/* x7*w23+x5*w22 x7*w19+x5*w18*/\
-   "paddd " #A4 ",%%mm3                 \n\t"/* +%4*/\
-   "pmaddwd 16+" #A3 ",%%mm0            \n\t"/* x2*w13+x0*w12 x2*w09+x0*w08*/\
-   "paddd %%mm4,%%mm3               \n\t"/* 4     ; a1=sum(even1) a0=sum(even0)*/\
-   "pmaddwd 24+" #A3 ",%%mm1            \n\t"/* x6*w15+x4*w14 x6*w11+x4*w10*/\
-   "movq %%mm3,%%mm4                \n\t"/* 4     ; a1 a0*/\
-   "pmaddwd 48+" #A3 ",%%mm2            \n\t"/* x3*w29+x1*w28 x3*w25+x1*w24*/\
-   "paddd %%mm7,%%mm6               \n\t"/* 7     ; b1=sum(odd1) b0=sum(odd0)*/\
-   "pmaddwd 56+" #A3 ",%%mm5            \n\t"/* x7*w31+x5*w30 x7*w27+x5*w26*/\
-   "paddd %%mm6,%%mm3               \n\t"/* a1+b1 a0+b0*/\
-   "paddd " #A4 ",%%mm0                 \n\t"/* +%4*/\
-   "psrad $11,%%mm3     \n\t"/* y1=a1+b1 y0=a0+b0*/\
-   "paddd %%mm1,%%mm0               \n\t"/* 1     ; a3=sum(even3) a2=sum(even2)*/\
-   "psubd %%mm6,%%mm4               \n\t"/* 6     ; a1-b1 a0-b0*/\
-   "movq %%mm0,%%mm7                \n\t"/* 7     ; a3 a2*/\
-   "paddd %%mm5,%%mm2               \n\t"/* 5     ; b3=sum(odd3) b2=sum(odd2)*/\
-   "paddd %%mm2,%%mm0               \n\t"/* a3+b3 a2+b2*/\
-   "psrad $11,%%mm4     \n\t"/* y6=a1-b1 y7=a0-b0*/\
-   "psubd %%mm2,%%mm7               \n\t"/* 2     ; a3-b3 a2-b2*/\
-   "psrad $11,%%mm0     \n\t"/* y3=a3+b3 y2=a2+b2*/\
-   "psrad $11,%%mm7     \n\t"/* y4=a3-b3 y5=a2-b2*/\
-   "packssdw %%mm0,%%mm3            \n\t"/* 0     ; y3 y2 y1 y0*/\
-   "packssdw %%mm4,%%mm7            \n\t"/* 4     ; y6 y7 y4 y5*/\
-   "movq %%mm3, " #A2 "                  \n\t"/* 3     ; save y3 y2 y1 y0*/\
-   "pshufw $0xb1,%%mm7,%%mm7        \n\t"/* y7 y6 y5 y4*/\
-   "movq %%mm7,8                +" #A2 "\n\t"/* 7     ; save y7 y6 y5 y4*/\
- //-----------------------------------------------------------------------------
+ // -----------------------------------------------------------------------------
+ #define DCT_8_INV_ROW_XMM(A1, A2, A3, A4)                                       \
+     "movq       "#A1", %%mm0        \n\t" /* 0 ; x3 x2 x1 x0 */                 \
+     "movq     8+"#A1", %%mm1        \n\t" /* 1 ; x7 x6 x5 x4 */                 \
+     "movq       %%mm0, %%mm2        \n\t" /* 2 ; x3 x2 x1 x0 */                 \
+     "movq       "#A3", %%mm3        \n\t" /* 3 ; w05 w04 w01 w00 */             \
+     "pshufw     $0x88, %%mm0, %%mm0 \n\t" /* x2 x0 x2 x0 */                     \
+     "movq     8+"#A3", %%mm4        \n\t" /* 4 ; w07 w06 w03 w02 */             \
+     "movq       %%mm1, %%mm5        \n\t" /* 5 ; x7 x6 x5 x4 */                 \
+     "pmaddwd    %%mm0, %%mm3        \n\t" /* x2*w05+x0*w04 x2*w01+x0*w00 */     \
+     "movq    32+"#A3", %%mm6        \n\t" /* 6 ; w21 w20 w17 w16 */             \
+     "pshufw     $0x88, %%mm1, %%mm1 \n\t" /* x6 x4 x6 x4 */                     \
+     "pmaddwd    %%mm1, %%mm4        \n\t" /* x6*w07+x4*w06 x6*w03+x4*w02 */     \
+     "movq    40+"#A3", %%mm7        \n\t" /* 7; w23 w22 w19 w18 */              \
+     "pshufw     $0xdd, %%mm2, %%mm2 \n\t" /* x3 x1 x3 x1 */                     \
+     "pmaddwd    %%mm2, %%mm6        \n\t" /* x3*w21+x1*w20 x3*w17+x1*w16 */     \
+     "pshufw     $0xdd, %%mm5, %%mm5 \n\t" /* x7 x5 x7 x5 */                     \
+     "pmaddwd    %%mm5, %%mm7        \n\t" /* x7*w23+x5*w22 x7*w19+x5*w18 */     \
+     "paddd      "#A4", %%mm3        \n\t" /* +%4 */                             \
+     "pmaddwd 16+"#A3", %%mm0        \n\t" /* x2*w13+x0*w12 x2*w09+x0*w08 */     \
+     "paddd      %%mm4, %%mm3        \n\t" /* 4 ; a1=sum(even1) a0=sum(even0) */ \
+     "pmaddwd 24+"#A3", %%mm1        \n\t" /* x6*w15+x4*w14 x6*w11+x4*w10 */     \
+     "movq       %%mm3, %%mm4        \n\t" /* 4 ; a1 a0 */                       \
+     "pmaddwd 48+"#A3", %%mm2        \n\t" /* x3*w29+x1*w28 x3*w25+x1*w24 */     \
+     "paddd      %%mm7, %%mm6        \n\t" /* 7 ; b1=sum(odd1) b0=sum(odd0) */   \
+     "pmaddwd 56+"#A3", %%mm5        \n\t" /* x7*w31+x5*w30 x7*w27+x5*w26 */     \
+     "paddd      %%mm6, %%mm3        \n\t" /* a1+b1 a0+b0 */                     \
+     "paddd      "#A4", %%mm0        \n\t" /* +%4 */                             \
+     "psrad        $11, %%mm3        \n\t" /* y1=a1+b1 y0=a0+b0 */               \
+     "paddd      %%mm1, %%mm0        \n\t" /* 1 ; a3=sum(even3) a2=sum(even2) */ \
+     "psubd      %%mm6, %%mm4        \n\t" /* 6 ; a1-b1 a0-b0 */                 \
+     "movq       %%mm0, %%mm7        \n\t" /* 7 ; a3 a2 */                       \
+     "paddd      %%mm5, %%mm2        \n\t" /* 5 ; b3=sum(odd3) b2=sum(odd2) */   \
+     "paddd      %%mm2, %%mm0        \n\t" /* a3+b3 a2+b2 */                     \
+     "psrad        $11, %%mm4        \n\t" /* y6=a1-b1 y7=a0-b0 */               \
+     "psubd      %%mm2, %%mm7        \n\t" /* 2 ; a3-b3 a2-b2 */                 \
+     "psrad        $11, %%mm0        \n\t" /* y3=a3+b3 y2=a2+b2 */               \
+     "psrad        $11, %%mm7        \n\t" /* y4=a3-b3 y5=a2-b2 */               \
+     "packssdw   %%mm0, %%mm3        \n\t" /* 0 ; y3 y2 y1 y0 */                 \
+     "packssdw   %%mm4, %%mm7        \n\t" /* 4 ; y6 y7 y4 y5 */                 \
+     "movq       %%mm3, "#A2"        \n\t" /* 3 ; save y3 y2 y1 y0 */            \
+     "pshufw     $0xb1, %%mm7, %%mm7 \n\t" /* y7 y6 y5 y4 */                     \
+     "movq       %%mm7, 8+"#A2"      \n\t" /* 7 ; save y7 y6 y5 y4 */            \
+ // -----------------------------------------------------------------------------
  //
  // The first stage DCT 8x8 - forward DCTs of columns
  //
  // for rows 3,5 - on cos_3_16
  // and are shifted to the left for rise of accuracy
  //
- //-----------------------------------------------------------------------------
+ // -----------------------------------------------------------------------------
  //
  // The 8-point scaled forward DCT algorithm (26a8m)
  //
- //-----------------------------------------------------------------------------
+ // -----------------------------------------------------------------------------
  //
- // #define DCT_8_FRW_COL(x, y)
- //{
- // short t0, t1, t2, t3, t4, t5, t6, t7;
- // short tp03, tm03, tp12, tm12, tp65, tm65;
- // short tp465, tm465, tp765, tm765;
+ //#define DCT_8_FRW_COL(x, y)
+ // {
+ //     short t0, t1, t2, t3, t4, t5, t6, t7;
+ //     short tp03, tm03, tp12, tm12, tp65, tm65;
+ //     short tp465, tm465, tp765, tm765;
  //
- // t0 = LEFT_SHIFT ( x[0] + x[7] );
- // t1 = LEFT_SHIFT ( x[1] + x[6] );
- // t2 = LEFT_SHIFT ( x[2] + x[5] );
- // t3 = LEFT_SHIFT ( x[3] + x[4] );
- // t4 = LEFT_SHIFT ( x[3] - x[4] );
- // t5 = LEFT_SHIFT ( x[2] - x[5] );
- // t6 = LEFT_SHIFT ( x[1] - x[6] );
- // t7 = LEFT_SHIFT ( x[0] - x[7] );
+ //     t0 = LEFT_SHIFT(x[0] + x[7]);
+ //     t1 = LEFT_SHIFT(x[1] + x[6]);
+ //     t2 = LEFT_SHIFT(x[2] + x[5]);
+ //     t3 = LEFT_SHIFT(x[3] + x[4]);
+ //     t4 = LEFT_SHIFT(x[3] - x[4]);
+ //     t5 = LEFT_SHIFT(x[2] - x[5]);
+ //     t6 = LEFT_SHIFT(x[1] - x[6]);
+ //     t7 = LEFT_SHIFT(x[0] - x[7]);
  //
- // tp03 = t0 + t3;
- // tm03 = t0 - t3;
- // tp12 = t1 + t2;
- // tm12 = t1 - t2;
+ //     tp03 = t0 + t3;
+ //     tm03 = t0 - t3;
+ //     tp12 = t1 + t2;
+ //     tm12 = t1 - t2;
  //
- // y[0] = tp03 + tp12;
- // y[4] = tp03 - tp12;
+ //     y[0] = tp03 + tp12;
+ //     y[4] = tp03 - tp12;
  //
- // y[2] = tm03 + tm12 * tg_2_16;
- // y[6] = tm03 * tg_2_16 - tm12;
+ //     y[2] = tm03 + tm12 * tg_2_16;
+ //     y[6] = tm03 * tg_2_16 - tm12;
  //
- // tp65 =(t6 +t5 )*cos_4_16;
- // tm65 =(t6 -t5 )*cos_4_16;
+ //     tp65 = (t6 + t5) * cos_4_16;
+ //     tm65 = (t6 - t5) * cos_4_16;
  //
- // tp765 = t7 + tp65;
- // tm765 = t7 - tp65;
- // tp465 = t4 + tm65;
- // tm465 = t4 - tm65;
+ //     tp765 = t7 + tp65;
+ //     tm765 = t7 - tp65;
+ //     tp465 = t4 + tm65;
+ //     tm465 = t4 - tm65;
  //
- // y[1] = tp765 + tp465 * tg_1_16;
- // y[7] = tp765 * tg_1_16 - tp465;
- // y[5] = tm765 * tg_3_16 + tm465;
- // y[3] = tm765 - tm465 * tg_3_16;
- //}
+ //     y[1] = tp765 + tp465 * tg_1_16;
+ //     y[7] = tp765 * tg_1_16 - tp465;
+ //     y[5] = tm765 * tg_3_16 + tm465;
+ //     y[3] = tm765 - tm465 * tg_3_16;
+ // }
  //
- //-----------------------------------------------------------------------------
+ // -----------------------------------------------------------------------------
  
- //-----------------------------------------------------------------------------
+ // -----------------------------------------------------------------------------
  // DCT_8_INV_COL_4  INP,OUT
- //-----------------------------------------------------------------------------
- #define DCT_8_INV_COL(A1,A2)\
-   "movq 2*8(%3),%%mm0\n\t"\
-   "movq 16*3+" #A1 ",%%mm3\n\t"\
-   "movq %%mm0,%%mm1            \n\t"/* tg_3_16*/\
-   "movq 16*5+" #A1 ",%%mm5\n\t"\
-   "pmulhw %%mm3,%%mm0          \n\t"/* x3*(tg_3_16-1)*/\
-   "movq (%3),%%mm4\n\t"\
-   "pmulhw %%mm5,%%mm1          \n\t"/* x5*(tg_3_16-1)*/\
-   "movq 16*7+" #A1 ",%%mm7\n\t"\
-   "movq %%mm4,%%mm2            \n\t"/* tg_1_16*/\
-   "movq 16*1+" #A1 ",%%mm6\n\t"\
-   "pmulhw %%mm7,%%mm4          \n\t"/* x7*tg_1_16*/\
-   "paddsw %%mm3,%%mm0          \n\t"/* x3*tg_3_16*/\
-   "pmulhw %%mm6,%%mm2          \n\t"/* x1*tg_1_16*/\
-   "paddsw %%mm3,%%mm1          \n\t"/* x3+x5*(tg_3_16-1)*/\
-   "psubsw %%mm5,%%mm0          \n\t"/* x3*tg_3_16-x5 = tm35*/\
-   "movq 3*8(%3),%%mm3\n\t"\
-   "paddsw %%mm5,%%mm1          \n\t"/* x3+x5*tg_3_16 = tp35*/\
-   "paddsw %%mm6,%%mm4          \n\t"/* x1+tg_1_16*x7 = tp17*/\
-   "psubsw %%mm7,%%mm2          \n\t"/* x1*tg_1_16-x7 = tm17*/\
-   "movq %%mm4,%%mm5            \n\t"/* tp17*/\
-   "movq %%mm2,%%mm6            \n\t"/* tm17*/\
-   "paddsw %%mm1,%%mm5          \n\t"/* tp17+tp35 = b0*/\
-   "psubsw %%mm0,%%mm6          \n\t"/* tm17-tm35 = b3*/\
-   "psubsw %%mm1,%%mm4          \n\t"/* tp17-tp35 = t1*/\
-   "paddsw %%mm0,%%mm2          \n\t"/* tm17+tm35 = t2*/\
-   "movq 1*8(%3),%%mm7\n\t"\
-   "movq %%mm4,%%mm1            \n\t"/* t1*/\
-   "movq %%mm5,3*16         +" #A2 "\n\t"/* save b0*/\
-   "paddsw %%mm2,%%mm1          \n\t"/* t1+t2*/\
-   "movq %%mm6,5*16         +" #A2 "\n\t"/* save b3*/\
-   "psubsw %%mm2,%%mm4          \n\t"/* t1-t2*/\
-   "movq 2*16+" #A1 ",%%mm5\n\t"\
-   "movq %%mm7,%%mm0            \n\t"/* tg_2_16*/\
-   "movq 6*16+" #A1 ",%%mm6\n\t"\
-   "pmulhw %%mm5,%%mm0          \n\t"/* x2*tg_2_16*/\
-   "pmulhw %%mm6,%%mm7          \n\t"/* x6*tg_2_16*/\
-   "pmulhw %%mm3,%%mm1          \n\t"/* ocos_4_16*(t1+t2) = b1/2*/\
-   "movq 0*16+" #A1 ",%%mm2\n\t"\
-   "pmulhw %%mm3,%%mm4          \n\t"/* ocos_4_16*(t1-t2) = b2/2*/\
-   "psubsw %%mm6,%%mm0          \n\t"/* t2*tg_2_16-x6 = tm26*/\
-   "movq %%mm2,%%mm3            \n\t"/* x0*/\
-   "movq 4*16+" #A1 ",%%mm6\n\t"\
-   "paddsw %%mm5,%%mm7          \n\t"/* x2+x6*tg_2_16 = tp26*/\
-   "paddsw %%mm6,%%mm2          \n\t"/* x0+x4 = tp04*/\
-   "psubsw %%mm6,%%mm3          \n\t"/* x0-x4 = tm04*/\
-   "movq %%mm2,%%mm5            \n\t"/* tp04*/\
-   "movq %%mm3,%%mm6            \n\t"/* tm04*/\
-   "psubsw %%mm7,%%mm2          \n\t"/* tp04-tp26 = a3*/\
-   "paddsw %%mm0,%%mm3          \n\t"/* tm04+tm26 = a1*/\
-   "paddsw %%mm1,%%mm1          \n\t"/* b1*/\
-   "paddsw %%mm4,%%mm4          \n\t"/* b2*/\
-   "paddsw %%mm7,%%mm5          \n\t"/* tp04+tp26 = a0*/\
-   "psubsw %%mm0,%%mm6          \n\t"/* tm04-tm26 = a2*/\
-   "movq %%mm3,%%mm7            \n\t"/* a1*/\
-   "movq %%mm6,%%mm0            \n\t"/* a2*/\
-   "paddsw %%mm1,%%mm3          \n\t"/* a1+b1*/\
-   "paddsw %%mm4,%%mm6          \n\t"/* a2+b2*/\
-   "psraw $6,%%mm3 \n\t"/* dst1*/\
-   "psubsw %%mm1,%%mm7          \n\t"/* a1-b1*/\
-   "psraw $6,%%mm6 \n\t"/* dst2*/\
-   "psubsw %%mm4,%%mm0          \n\t"/* a2-b2*/\
-   "movq 3*16+" #A2 ",%%mm1         \n\t"/* load b0*/\
-   "psraw $6,%%mm7 \n\t"/* dst6*/\
-   "movq %%mm5,%%mm4            \n\t"/* a0*/\
-   "psraw $6,%%mm0 \n\t"/* dst5*/\
-   "movq %%mm3,1*16+" #A2 "\n\t"\
-   "paddsw %%mm1,%%mm5          \n\t"/* a0+b0*/\
-   "movq %%mm6,2*16+" #A2 "\n\t"\
-   "psubsw %%mm1,%%mm4          \n\t"/* a0-b0*/\
-   "movq 5*16+" #A2 ",%%mm3         \n\t"/* load b3*/\
-   "psraw $6,%%mm5 \n\t"/* dst0*/\
-   "movq %%mm2,%%mm6            \n\t"/* a3*/\
-   "psraw $6,%%mm4 \n\t"/* dst7*/\
-   "movq %%mm0,5*16+" #A2 "\n\t"\
-   "paddsw %%mm3,%%mm2          \n\t"/* a3+b3*/\
-   "movq %%mm7,6*16+" #A2 "\n\t"\
-   "psubsw %%mm3,%%mm6          \n\t"/* a3-b3*/\
-   "movq %%mm5,0*16+" #A2 "\n\t"\
-   "psraw $6,%%mm2 \n\t"/* dst3*/\
-   "movq %%mm4,7*16+" #A2 "\n\t"\
-   "psraw $6,%%mm6 \n\t"/* dst4*/\
-   "movq %%mm2,3*16+" #A2 "\n\t"\
-   "movq %%mm6,4*16+" #A2 "\n\t"
- //=============================================================================
+ // -----------------------------------------------------------------------------
+ #define DCT_8_INV_COL(A1, A2)                                                   \
+     "movq    2*8(%3), %%mm0         \n\t"                                       \
+     "movq 16*3+"#A1", %%mm3         \n\t"                                       \
+     "movq      %%mm0, %%mm1         \n\t" /* tg_3_16 */                         \
+     "movq 16*5+"#A1", %%mm5         \n\t"                                       \
+     "pmulhw    %%mm3, %%mm0         \n\t" /* x3*(tg_3_16-1) */                  \
+     "movq       (%3), %%mm4         \n\t"                                       \
+     "pmulhw    %%mm5, %%mm1         \n\t" /* x5*(tg_3_16-1) */                  \
+     "movq 16*7+"#A1", %%mm7         \n\t"                                       \
+     "movq      %%mm4, %%mm2         \n\t" /* tg_1_16 */                         \
+     "movq 16*1+"#A1", %%mm6         \n\t"                                       \
+     "pmulhw    %%mm7, %%mm4         \n\t" /* x7*tg_1_16 */                      \
+     "paddsw    %%mm3, %%mm0         \n\t" /* x3*tg_3_16 */                      \
+     "pmulhw    %%mm6, %%mm2         \n\t" /* x1*tg_1_16 */                      \
+     "paddsw    %%mm3, %%mm1         \n\t" /* x3+x5*(tg_3_16-1) */               \
+     "psubsw    %%mm5, %%mm0         \n\t" /* x3*tg_3_16-x5 = tm35 */            \
+     "movq    3*8(%3), %%mm3         \n\t"                                       \
+     "paddsw    %%mm5, %%mm1         \n\t" /* x3+x5*tg_3_16 = tp35 */            \
+     "paddsw    %%mm6, %%mm4         \n\t" /* x1+tg_1_16*x7 = tp17 */            \
+     "psubsw    %%mm7, %%mm2         \n\t" /* x1*tg_1_16-x7 = tm17 */            \
+     "movq      %%mm4, %%mm5         \n\t" /* tp17 */                            \
+     "movq      %%mm2, %%mm6         \n\t" /* tm17 */                            \
+     "paddsw    %%mm1, %%mm5         \n\t" /* tp17+tp35 = b0 */                  \
+     "psubsw    %%mm0, %%mm6         \n\t" /* tm17-tm35 = b3 */                  \
+     "psubsw    %%mm1, %%mm4         \n\t" /* tp17-tp35 = t1 */                  \
+     "paddsw    %%mm0, %%mm2         \n\t" /* tm17+tm35 = t2 */                  \
+     "movq    1*8(%3), %%mm7         \n\t"                                       \
+     "movq      %%mm4, %%mm1         \n\t" /* t1 */                              \
+     "movq      %%mm5, 3*16+"#A2"    \n\t" /* save b0 */                         \
+     "paddsw    %%mm2, %%mm1         \n\t" /* t1+t2 */                           \
+     "movq      %%mm6, 5*16+"#A2"    \n\t" /* save b3 */                         \
+     "psubsw    %%mm2, %%mm4         \n\t" /* t1-t2 */                           \
+     "movq 2*16+"#A1", %%mm5         \n\t"                                       \
+     "movq      %%mm7, %%mm0         \n\t" /* tg_2_16 */                         \
+     "movq 6*16+"#A1", %%mm6         \n\t"                                       \
+     "pmulhw    %%mm5, %%mm0         \n\t" /* x2*tg_2_16 */                      \
+     "pmulhw    %%mm6, %%mm7         \n\t" /* x6*tg_2_16 */                      \
+     "pmulhw    %%mm3, %%mm1         \n\t" /* ocos_4_16*(t1+t2) = b1/2 */        \
+     "movq 0*16+"#A1", %%mm2         \n\t"                                       \
+     "pmulhw    %%mm3, %%mm4         \n\t" /* ocos_4_16*(t1-t2) = b2/2 */        \
+     "psubsw    %%mm6, %%mm0         \n\t" /* t2*tg_2_16-x6 = tm26 */            \
+     "movq      %%mm2, %%mm3         \n\t" /* x0 */                              \
+     "movq 4*16+"#A1", %%mm6         \n\t"                                       \
+     "paddsw    %%mm5, %%mm7         \n\t" /* x2+x6*tg_2_16 = tp26 */            \
+     "paddsw    %%mm6, %%mm2         \n\t" /* x0+x4 = tp04 */                    \
+     "psubsw    %%mm6, %%mm3         \n\t" /* x0-x4 = tm04 */                    \
+     "movq      %%mm2, %%mm5         \n\t" /* tp04 */                            \
+     "movq      %%mm3, %%mm6         \n\t" /* tm04 */                            \
+     "psubsw    %%mm7, %%mm2         \n\t" /* tp04-tp26 = a3 */                  \
+     "paddsw    %%mm0, %%mm3         \n\t" /* tm04+tm26 = a1 */                  \
+     "paddsw    %%mm1, %%mm1         \n\t" /* b1 */                              \
+     "paddsw    %%mm4, %%mm4         \n\t" /* b2 */                              \
+     "paddsw    %%mm7, %%mm5         \n\t" /* tp04+tp26 = a0 */                  \
+     "psubsw    %%mm0, %%mm6         \n\t" /* tm04-tm26 = a2 */                  \
+     "movq      %%mm3, %%mm7         \n\t" /* a1 */                              \
+     "movq      %%mm6, %%mm0         \n\t" /* a2 */                              \
+     "paddsw    %%mm1, %%mm3         \n\t" /* a1+b1 */                           \
+     "paddsw    %%mm4, %%mm6         \n\t" /* a2+b2 */                           \
+     "psraw        $6, %%mm3         \n\t" /* dst1 */                            \
+     "psubsw    %%mm1, %%mm7         \n\t" /* a1-b1 */                           \
+     "psraw        $6, %%mm6         \n\t" /* dst2 */                            \
+     "psubsw    %%mm4, %%mm0         \n\t" /* a2-b2 */                           \
+     "movq 3*16+"#A2", %%mm1         \n\t" /* load b0 */                         \
+     "psraw        $6, %%mm7         \n\t" /* dst6 */                            \
+     "movq      %%mm5, %%mm4         \n\t" /* a0 */                              \
+     "psraw        $6, %%mm0         \n\t" /* dst5 */                            \
+     "movq      %%mm3, 1*16+"#A2"    \n\t"                                       \
+     "paddsw    %%mm1, %%mm5         \n\t" /* a0+b0 */                           \
+     "movq      %%mm6, 2*16+"#A2"    \n\t"                                       \
+     "psubsw    %%mm1, %%mm4         \n\t" /* a0-b0 */                           \
+     "movq 5*16+"#A2", %%mm3         \n\t" /* load b3 */                         \
+     "psraw        $6, %%mm5         \n\t" /* dst0 */                            \
+     "movq      %%mm2, %%mm6         \n\t" /* a3 */                              \
+     "psraw        $6, %%mm4         \n\t" /* dst7 */                            \
+     "movq      %%mm0, 5*16+"#A2"    \n\t"                                       \
+     "paddsw    %%mm3, %%mm2         \n\t" /* a3+b3 */                           \
+     "movq      %%mm7, 6*16+"#A2"    \n\t"                                       \
+     "psubsw    %%mm3, %%mm6         \n\t" /* a3-b3 */                           \
+     "movq      %%mm5, 0*16+"#A2"    \n\t"                                       \
+     "psraw        $6, %%mm2         \n\t" /* dst3 */                            \
+     "movq      %%mm4, 7*16+"#A2"    \n\t"                                       \
+     "psraw        $6, %%mm6         \n\t" /* dst4 */                            \
+     "movq      %%mm2, 3*16+"#A2"    \n\t"                                       \
+     "movq      %%mm6, 4*16+"#A2"    \n\t"                                       \
+ // =============================================================================
  // Code
- //=============================================================================
+ // =============================================================================
  
- //-----------------------------------------------------------------------------
+ // -----------------------------------------------------------------------------
  // void idct_mmx(uint16_t block[64]);
- //-----------------------------------------------------------------------------
+ // -----------------------------------------------------------------------------
  
  void ff_xvid_idct_mmx(short *block)
  {
__asm__ volatile(
-             //# Process each row
-     DCT_8_INV_ROW_MMX(0*16(%0), 0*16(%0), 64*0(%2), 8*0(%1))
-     DCT_8_INV_ROW_MMX(1*16(%0), 1*16(%0), 64*1(%2), 8*1(%1))
-     DCT_8_INV_ROW_MMX(2*16(%0), 2*16(%0), 64*2(%2), 8*2(%1))
-     DCT_8_INV_ROW_MMX(3*16(%0), 3*16(%0), 64*3(%2), 8*3(%1))
-     DCT_8_INV_ROW_MMX(4*16(%0), 4*16(%0), 64*0(%2), 8*4(%1))
-     DCT_8_INV_ROW_MMX(5*16(%0), 5*16(%0), 64*3(%2), 8*5(%1))
-     DCT_8_INV_ROW_MMX(6*16(%0), 6*16(%0), 64*2(%2), 8*6(%1))
-     DCT_8_INV_ROW_MMX(7*16(%0), 7*16(%0), 64*1(%2), 8*7(%1))
-             //# Process the columns (4 at a time)
-     DCT_8_INV_COL(0(%0), 0(%0))
-     DCT_8_INV_COL(8(%0), 8(%0))
-     :: "r"(block), "r"(rounder_0), "r"(tab_i_04_mmx), "r"(tg_1_16));
    __asm__ volatile (
+         // # Process each row
+         DCT_8_INV_ROW_MMX(0 * 16(%0), 0 * 16(%0), 64 * 0(%2), 8 * 0(%1))
+         DCT_8_INV_ROW_MMX(1 * 16(%0), 1 * 16(%0), 64 * 1(%2), 8 * 1(%1))
+         DCT_8_INV_ROW_MMX(2 * 16(%0), 2 * 16(%0), 64 * 2(%2), 8 * 2(%1))
+         DCT_8_INV_ROW_MMX(3 * 16(%0), 3 * 16(%0), 64 * 3(%2), 8 * 3(%1))
+         DCT_8_INV_ROW_MMX(4 * 16(%0), 4 * 16(%0), 64 * 0(%2), 8 * 4(%1))
+         DCT_8_INV_ROW_MMX(5 * 16(%0), 5 * 16(%0), 64 * 3(%2), 8 * 5(%1))
+         DCT_8_INV_ROW_MMX(6 * 16(%0), 6 * 16(%0), 64 * 2(%2), 8 * 6(%1))
+         DCT_8_INV_ROW_MMX(7 * 16(%0), 7 * 16(%0), 64 * 1(%2), 8 * 7(%1))
+         // # Process the columns (4 at a time)
+         DCT_8_INV_COL(0(%0), 0(%0))
+         DCT_8_INV_COL(8(%0), 8(%0))
+         :: "r" (block), "r" (rounder_0), "r" (tab_i_04_mmx), "r" (tg_1_16));
  }
  
  void ff_xvid_idct_mmx_put(uint8_t *dest, int line_size, int16_t *block)
@@@ -508,28 -510,27 +510,27 @@@ void ff_xvid_idct_mmx_add(uint8_t *dest
  
  #if HAVE_MMXEXT_INLINE
  
- //-----------------------------------------------------------------------------
+ // -----------------------------------------------------------------------------
  // void idct_xmm(uint16_t block[64]);
- //-----------------------------------------------------------------------------
+ // -----------------------------------------------------------------------------
  
  void ff_xvid_idct_mmxext(short *block)
  {
__asm__ volatile(
-             //# Process each row
-     DCT_8_INV_ROW_XMM(0*16(%0), 0*16(%0), 64*0(%2), 8*0(%1))
-     DCT_8_INV_ROW_XMM(1*16(%0), 1*16(%0), 64*1(%2), 8*1(%1))
-     DCT_8_INV_ROW_XMM(2*16(%0), 2*16(%0), 64*2(%2), 8*2(%1))
-     DCT_8_INV_ROW_XMM(3*16(%0), 3*16(%0), 64*3(%2), 8*3(%1))
-     DCT_8_INV_ROW_XMM(4*16(%0), 4*16(%0), 64*0(%2), 8*4(%1))
-     DCT_8_INV_ROW_XMM(5*16(%0), 5*16(%0), 64*3(%2), 8*5(%1))
-     DCT_8_INV_ROW_XMM(6*16(%0), 6*16(%0), 64*2(%2), 8*6(%1))
-     DCT_8_INV_ROW_XMM(7*16(%0), 7*16(%0), 64*1(%2), 8*7(%1))
-             //# Process the columns (4 at a time)
-     DCT_8_INV_COL(0(%0), 0(%0))
-     DCT_8_INV_COL(8(%0), 8(%0))
-     :: "r"(block), "r"(rounder_0), "r"(tab_i_04_xmm), "r"(tg_1_16));
    __asm__ volatile (
+         // # Process each row
+         DCT_8_INV_ROW_XMM(0 * 16(%0), 0 * 16(%0), 64 * 0(%2), 8 * 0(%1))
+         DCT_8_INV_ROW_XMM(1 * 16(%0), 1 * 16(%0), 64 * 1(%2), 8 * 1(%1))
+         DCT_8_INV_ROW_XMM(2 * 16(%0), 2 * 16(%0), 64 * 2(%2), 8 * 2(%1))
+         DCT_8_INV_ROW_XMM(3 * 16(%0), 3 * 16(%0), 64 * 3(%2), 8 * 3(%1))
+         DCT_8_INV_ROW_XMM(4 * 16(%0), 4 * 16(%0), 64 * 0(%2), 8 * 4(%1))
+         DCT_8_INV_ROW_XMM(5 * 16(%0), 5 * 16(%0), 64 * 3(%2), 8 * 5(%1))
+         DCT_8_INV_ROW_XMM(6 * 16(%0), 6 * 16(%0), 64 * 2(%2), 8 * 6(%1))
+         DCT_8_INV_ROW_XMM(7 * 16(%0), 7 * 16(%0), 64 * 1(%2), 8 * 7(%1))
+         // # Process the columns (4 at a time)
+         DCT_8_INV_COL(0(%0), 0(%0))
+         DCT_8_INV_COL(8(%0), 8(%0))
+         :: "r" (block), "r" (rounder_0), "r" (tab_i_04_xmm), "r" (tg_1_16));
  }
  
  void ff_xvid_idct_mmxext_put(uint8_t *dest, int line_size, int16_t *block)
index 508458eafbe92b6917fb39c78582fe719f29c537,d4f01693ea3171ff7ba2095bd554c0538e769024..50a2f99736038833573e68b2f6069d6108ef6c09
@@@ -9,7 -9,7 +9,7 @@@
   *
   * Originally from dct/x86_asm/fdct_sse2_skal.asm in Xvid.
   *
 - * This file is part of Libav.
 + * This file is part of FFmpeg.
   *
   * Vertical pass is an implementation of the scheme:
   *  Loeffler C., Ligtenberg A., and Moschytz C.S.:
   *
   * More details at http://skal.planet-d.net/coding/dct.html
   *
 - * Libav is free software; you can redistribute it and/or
 + * FFmpeg is free software; you can redistribute it and/or
   * modify it under the terms of the GNU Lesser General Public
   * License as published by the Free Software Foundation; either
   * version 2.1 of the License, or (at your option) any later version.
   *
 - * Libav is distributed in the hope that it will be useful,
 + * FFmpeg is distributed in the hope that it will be useful,
   * but WITHOUT ANY WARRANTY; without even the implied warranty of
   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   * Lesser General Public License for more details.
   *
   * You should have received a copy of the GNU Lesser General Public License
 - * along with Libav; if not, write to the Free Software Foundation,
 + * along with FFmpeg; if not, write to the Free Software Foundation,
   * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
   */
  
 -#include "libavutil/internal.h"
  #include "libavutil/mem.h"
  #include "libavutil/x86/asm.h"
  
   * @brief SSE2 IDCT compatible with the Xvid IDCT
   */
  
- #define X8(x)     x,x,x,x,x,x,x,x
+ #define X8(x) x, x, x, x, x, x, x, x
  
- DECLARE_ASM_CONST(16, int16_t, tan1)[] = {X8(13036)}; // tan( pi/16)
- DECLARE_ASM_CONST(16, int16_t, tan2)[] = {X8(27146)}; // tan(2pi/16) = sqrt(2)-1
- DECLARE_ASM_CONST(16, int16_t, tan3)[] = {X8(43790)}; // tan(3pi/16)-1
- DECLARE_ASM_CONST(16, int16_t, sqrt2)[]= {X8(23170)}; // 0.5/sqrt(2)
- DECLARE_ASM_CONST(8,  uint8_t, m127)[] = {X8(127)};
+ DECLARE_ASM_CONST(16, int16_t, tan1)[]  = { X8(13036) }; // tan( pi/16)
+ DECLARE_ASM_CONST(16, int16_t, tan2)[]  = { X8(27146) }; // tan(2pi/16) = sqrt(2)-1
+ DECLARE_ASM_CONST(16, int16_t, tan3)[]  = { X8(43790) }; // tan(3pi/16)-1
+ DECLARE_ASM_CONST(16, int16_t, sqrt2)[] = { X8(23170) }; // 0.5/sqrt(2)
+ DECLARE_ASM_CONST(8,  uint8_t, m127)[]  = { X8(127) };
  
  DECLARE_ASM_CONST(16, int16_t, iTab1)[] = {
-  0x4000, 0x539f, 0xc000, 0xac61, 0x4000, 0xdd5d, 0x4000, 0xdd5d,
-  0x4000, 0x22a3, 0x4000, 0x22a3, 0xc000, 0x539f, 0x4000, 0xac61,
-  0x3249, 0x11a8, 0x4b42, 0xee58, 0x11a8, 0x4b42, 0x11a8, 0xcdb7,
-  0x58c5, 0x4b42, 0xa73b, 0xcdb7, 0x3249, 0xa73b, 0x4b42, 0xa73b
+     0x4000, 0x539f, 0xc000, 0xac61, 0x4000, 0xdd5d, 0x4000, 0xdd5d,
+     0x4000, 0x22a3, 0x4000, 0x22a3, 0xc000, 0x539f, 0x4000, 0xac61,
+     0x3249, 0x11a8, 0x4b42, 0xee58, 0x11a8, 0x4b42, 0x11a8, 0xcdb7,
+     0x58c5, 0x4b42, 0xa73b, 0xcdb7, 0x3249, 0xa73b, 0x4b42, 0xa73b
  };
  
  DECLARE_ASM_CONST(16, int16_t, iTab2)[] = {
-  0x58c5, 0x73fc, 0xa73b, 0x8c04, 0x58c5, 0xcff5, 0x58c5, 0xcff5,
-  0x58c5, 0x300b, 0x58c5, 0x300b, 0xa73b, 0x73fc, 0x58c5, 0x8c04,
-  0x45bf, 0x187e, 0x6862, 0xe782, 0x187e, 0x6862, 0x187e, 0xba41,
-  0x7b21, 0x6862, 0x84df, 0xba41, 0x45bf, 0x84df, 0x6862, 0x84df
+     0x58c5, 0x73fc, 0xa73b, 0x8c04, 0x58c5, 0xcff5, 0x58c5, 0xcff5,
+     0x58c5, 0x300b, 0x58c5, 0x300b, 0xa73b, 0x73fc, 0x58c5, 0x8c04,
+     0x45bf, 0x187e, 0x6862, 0xe782, 0x187e, 0x6862, 0x187e, 0xba41,
+     0x7b21, 0x6862, 0x84df, 0xba41, 0x45bf, 0x84df, 0x6862, 0x84df
  };
  
  DECLARE_ASM_CONST(16, int16_t, iTab3)[] = {
-  0x539f, 0x6d41, 0xac61, 0x92bf, 0x539f, 0xd2bf, 0x539f, 0xd2bf,
-  0x539f, 0x2d41, 0x539f, 0x2d41, 0xac61, 0x6d41, 0x539f, 0x92bf,
-  0x41b3, 0x1712, 0x6254, 0xe8ee, 0x1712, 0x6254, 0x1712, 0xbe4d,
-  0x73fc, 0x6254, 0x8c04, 0xbe4d, 0x41b3, 0x8c04, 0x6254, 0x8c04
+     0x539f, 0x6d41, 0xac61, 0x92bf, 0x539f, 0xd2bf, 0x539f, 0xd2bf,
+     0x539f, 0x2d41, 0x539f, 0x2d41, 0xac61, 0x6d41, 0x539f, 0x92bf,
+     0x41b3, 0x1712, 0x6254, 0xe8ee, 0x1712, 0x6254, 0x1712, 0xbe4d,
+     0x73fc, 0x6254, 0x8c04, 0xbe4d, 0x41b3, 0x8c04, 0x6254, 0x8c04
  };
  
  DECLARE_ASM_CONST(16, int16_t, iTab4)[] = {
-  0x4b42, 0x6254, 0xb4be, 0x9dac, 0x4b42, 0xd746, 0x4b42, 0xd746,
-  0x4b42, 0x28ba, 0x4b42, 0x28ba, 0xb4be, 0x6254, 0x4b42, 0x9dac,
-  0x3b21, 0x14c3, 0x587e, 0xeb3d, 0x14c3, 0x587e, 0x14c3, 0xc4df,
-  0x6862, 0x587e, 0x979e, 0xc4df, 0x3b21, 0x979e, 0x587e, 0x979e
+     0x4b42, 0x6254, 0xb4be, 0x9dac, 0x4b42, 0xd746, 0x4b42, 0xd746,
+     0x4b42, 0x28ba, 0x4b42, 0x28ba, 0xb4be, 0x6254, 0x4b42, 0x9dac,
+     0x3b21, 0x14c3, 0x587e, 0xeb3d, 0x14c3, 0x587e, 0x14c3, 0xc4df,
+     0x6862, 0x587e, 0x979e, 0xc4df, 0x3b21, 0x979e, 0x587e, 0x979e
  };
  
  DECLARE_ASM_CONST(16, int32_t, walkenIdctRounders)[] = {
-  65536, 65536, 65536, 65536,
-   3597,  3597,  3597,  3597,
-   2260,  2260,  2260,  2260,
-   1203,  1203,  1203,  1203,
-    120,   120,   120,   120,
-    512,   512,   512,   512
+     65536, 65536, 65536, 65536,
+      3597,  3597,  3597,  3597,
+      2260,  2260,  2260,  2260,
+      1203,  1203,  1203,  1203,
+       120,   120,   120,   120,
+       512,   512,   512,   512
  };
  
  // Temporary storage before the column pass
  
  #endif
  
 -#define ROUND(x) "paddd   "MANGLE(x)
 +#define ROUND(x) "paddd   "x
  
  #define JZ(reg, to)                         \
      "testl     "reg","reg"            \n\t" \
      "pmovmskb %%mm1, "reg"            \n\t"
  
  #define TEST_TWO_ROWS(row1, row2, reg1, reg2, clear1, clear2) \
-     clear1                                  \
-     clear2                                  \
-     "movq     "row1", %%mm1           \n\t" \
-     "por    8+"row1", %%mm1           \n\t" \
-     "movq     "row2", %%mm2           \n\t" \
-     "por    8+"row2", %%mm2           \n\t" \
-     "paddusb   %%mm0, %%mm1           \n\t" \
-     "paddusb   %%mm0, %%mm2           \n\t" \
-     "pmovmskb  %%mm1, "reg1"          \n\t" \
+     clear1                                                    \
+     clear2                                                    \
+     "movq     "row1", %%mm1           \n\t"                   \
+     "por    8+"row1", %%mm1           \n\t"                   \
+     "movq     "row2", %%mm2           \n\t"                   \
+     "por    8+"row2", %%mm2           \n\t"                   \
+     "paddusb   %%mm0, %%mm1           \n\t"                   \
+     "paddusb   %%mm0, %%mm2           \n\t"                   \
+     "pmovmskb  %%mm1, "reg1"          \n\t"                   \
      "pmovmskb  %%mm2, "reg2"          \n\t"
  
- ///IDCT pass on rows.
- #define iMTX_MULT(src, table, rounder, put) \
-     "movdqa        "src", %%xmm3      \n\t" \
-     "movdqa       %%xmm3, %%xmm0      \n\t" \
+ /// IDCT pass on rows.
+ #define iMTX_MULT(src, table, rounder, put)            \
+     "movdqa        "src", %%xmm3      \n\t"            \
+     "movdqa       %%xmm3, %%xmm0      \n\t"            \
      "pshufd   $0x11, %%xmm3, %%xmm1   \n\t" /* 4602 */ \
      "punpcklqdq   %%xmm0, %%xmm0      \n\t" /* 0246 */ \
-     "pmaddwd     "table", %%xmm0      \n\t" \
-     "pmaddwd  16+"table", %%xmm1      \n\t" \
+     "pmaddwd     "table", %%xmm0      \n\t"            \
+     "pmaddwd  16+"table", %%xmm1      \n\t"            \
      "pshufd   $0xBB, %%xmm3, %%xmm2   \n\t" /* 5713 */ \
      "punpckhqdq   %%xmm3, %%xmm3      \n\t" /* 1357 */ \
-     "pmaddwd  32+"table", %%xmm2      \n\t" \
-     "pmaddwd  48+"table", %%xmm3      \n\t" \
-     "paddd        %%xmm1, %%xmm0      \n\t" \
-     "paddd        %%xmm3, %%xmm2      \n\t" \
-     rounder",     %%xmm0              \n\t" \
-     "movdqa       %%xmm2, %%xmm3      \n\t" \
-     "paddd        %%xmm0, %%xmm2      \n\t" \
-     "psubd        %%xmm3, %%xmm0      \n\t" \
-     "psrad           $11, %%xmm2      \n\t" \
-     "psrad           $11, %%xmm0      \n\t" \
-     "packssdw     %%xmm0, %%xmm2      \n\t" \
-     put                                     \
+     "pmaddwd  32+"table", %%xmm2      \n\t"            \
+     "pmaddwd  48+"table", %%xmm3      \n\t"            \
+     "paddd        %%xmm1, %%xmm0      \n\t"            \
+     "paddd        %%xmm3, %%xmm2      \n\t"            \
+     rounder",     %%xmm0              \n\t"            \
+     "movdqa       %%xmm2, %%xmm3      \n\t"            \
+     "paddd        %%xmm0, %%xmm2      \n\t"            \
+     "psubd        %%xmm3, %%xmm0      \n\t"            \
+     "psrad           $11, %%xmm2      \n\t"            \
+     "psrad           $11, %%xmm0      \n\t"            \
+     "packssdw     %%xmm0, %%xmm2      \n\t"            \
+     put                                                \
      "1:                               \n\t"
  
  #define iLLM_HEAD                           \
      "movdqa   "MANGLE(tan3)", "TAN3"  \n\t" \
      "movdqa   "MANGLE(tan1)", "TAN1"  \n\t" \
  
- ///IDCT pass on columns.
+ /// IDCT pass on columns.
  #define iLLM_PASS(dct)                      \
      "movdqa   "TAN3", %%xmm1          \n\t" \
      "movdqa   "TAN1", %%xmm3          \n\t" \
      "movdqa   "REG4", 4*16("dct")     \n\t" \
      "movdqa   %%xmm7, 7*16("dct")     \n\t"
  
- ///IDCT pass on columns, assuming rows 4-7 are zero.
+ /// IDCT pass on columns, assuming rows 4-7 are zero.
  #define iLLM_PASS_SPARSE(dct)               \
      "pmulhw   %%xmm4, "TAN3"          \n\t" \
      "paddsw   %%xmm4, "TAN3"          \n\t" \
      "movdqa   %%xmm6, 4*16("dct")     \n\t" \
      "movdqa   "SREG2", 7*16("dct")    \n\t"
  
 -inline void ff_xvid_idct_sse2(short *block)
 +av_extern_inline void ff_xvid_idct_sse2(short *block)
  {
-     __asm__ volatile(
-     "movq     "MANGLE(m127)", %%mm0                              \n\t"
-     iMTX_MULT("(%0)",     MANGLE(iTab1), ROUND(MANGLE(walkenIdctRounders)),      PUT_EVEN(ROW0))
-     iMTX_MULT("1*16(%0)", MANGLE(iTab2), ROUND("1*16+"MANGLE(walkenIdctRounders)), PUT_ODD(ROW1))
-     iMTX_MULT("2*16(%0)", MANGLE(iTab3), ROUND("2*16+"MANGLE(walkenIdctRounders)), PUT_EVEN(ROW2))
-     TEST_TWO_ROWS("3*16(%0)", "4*16(%0)", "%%eax", "%%ecx", CLEAR_ODD(ROW3), CLEAR_EVEN(ROW4))
-     JZ("%%eax", "1f")
-     iMTX_MULT("3*16(%0)", MANGLE(iTab4), ROUND("3*16+"MANGLE(walkenIdctRounders)), PUT_ODD(ROW3))
-     TEST_TWO_ROWS("5*16(%0)", "6*16(%0)", "%%eax", "%%edx", CLEAR_ODD(ROW5), CLEAR_EVEN(ROW6))
-     TEST_ONE_ROW("7*16(%0)", "%%esi", CLEAR_ODD(ROW7))
-     iLLM_HEAD
-     ".p2align 4 \n\t"
-     JNZ("%%ecx", "2f")
-     JNZ("%%eax", "3f")
-     JNZ("%%edx", "4f")
-     JNZ("%%esi", "5f")
-     iLLM_PASS_SPARSE("%0")
-     "jmp 6f                                                      \n\t"
-     "2:                                                          \n\t"
-     iMTX_MULT("4*16(%0)", MANGLE(iTab1), "#", PUT_EVEN(ROW4))
-     "3:                                                          \n\t"
-     iMTX_MULT("5*16(%0)", MANGLE(iTab4), ROUND("4*16+"MANGLE(walkenIdctRounders)), PUT_ODD(ROW5))
-     JZ("%%edx", "1f")
-     "4:                                                          \n\t"
-     iMTX_MULT("6*16(%0)", MANGLE(iTab3), ROUND("5*16+"MANGLE(walkenIdctRounders)), PUT_EVEN(ROW6))
-     JZ("%%esi", "1f")
-     "5:                                                          \n\t"
-     iMTX_MULT("7*16(%0)", MANGLE(iTab2), ROUND("5*16+"MANGLE(walkenIdctRounders)), PUT_ODD(ROW7))
+     __asm__ volatile (
+         "movq     "MANGLE (m127) ", %%mm0                              \n\t"
 -        iMTX_MULT("(%0)",     MANGLE(iTab1), ROUND(walkenIdctRounders),          PUT_EVEN(ROW0))
 -        iMTX_MULT("1*16(%0)", MANGLE(iTab2), ROUND(walkenIdctRounders + 1 * 16), PUT_ODD(ROW1))
 -        iMTX_MULT("2*16(%0)", MANGLE(iTab3), ROUND(walkenIdctRounders + 2 * 16), PUT_EVEN(ROW2))
++        iMTX_MULT("(%0)",     MANGLE(iTab1), ROUND(MANGLE(walkenIdctRounders)),        PUT_EVEN(ROW0))
++        iMTX_MULT("1*16(%0)", MANGLE(iTab2), ROUND("1*16+"MANGLE(walkenIdctRounders)), PUT_ODD(ROW1))
++        iMTX_MULT("2*16(%0)", MANGLE(iTab3), ROUND("2*16+"MANGLE(walkenIdctRounders)), PUT_EVEN(ROW2))
+         TEST_TWO_ROWS("3*16(%0)", "4*16(%0)", "%%eax", "%%ecx", CLEAR_ODD(ROW3), CLEAR_EVEN(ROW4))
+         JZ("%%eax", "1f")
 -        iMTX_MULT("3*16(%0)", MANGLE(iTab4), ROUND(walkenIdctRounders + 3 * 16), PUT_ODD(ROW3))
++        iMTX_MULT("3*16(%0)", MANGLE(iTab4), ROUND("3*16+"MANGLE(walkenIdctRounders)), PUT_ODD(ROW3))
+         TEST_TWO_ROWS("5*16(%0)", "6*16(%0)", "%%eax", "%%edx", CLEAR_ODD(ROW5), CLEAR_EVEN(ROW6))
+         TEST_ONE_ROW("7*16(%0)", "%%esi", CLEAR_ODD(ROW7))
+         iLLM_HEAD
+         ".p2align 4 \n\t"
+         JNZ("%%ecx", "2f")
+         JNZ("%%eax", "3f")
+         JNZ("%%edx", "4f")
+         JNZ("%%esi", "5f")
+         iLLM_PASS_SPARSE("%0")
+         "jmp 6f                                                      \n\t"
+         "2:                                                          \n\t"
+         iMTX_MULT("4*16(%0)", MANGLE(iTab1), "#", PUT_EVEN(ROW4))
+         "3:                                                          \n\t"
 -        iMTX_MULT("5*16(%0)", MANGLE(iTab4), ROUND(walkenIdctRounders + 4 * 16), PUT_ODD(ROW5))
++        iMTX_MULT("5*16(%0)", MANGLE(iTab4), ROUND("4*16+"MANGLE(walkenIdctRounders)), PUT_ODD(ROW5))
+         JZ("%%edx", "1f")
+         "4:                                                          \n\t"
 -        iMTX_MULT("6*16(%0)", MANGLE(iTab3), ROUND(walkenIdctRounders + 5 * 16), PUT_EVEN(ROW6))
++        iMTX_MULT("6*16(%0)", MANGLE(iTab3), ROUND("5*16+"MANGLE(walkenIdctRounders)), PUT_EVEN(ROW6))
+         JZ("%%esi", "1f")
+         "5:                                                          \n\t"
 -        iMTX_MULT("7*16(%0)", MANGLE(iTab2), ROUND(walkenIdctRounders + 5 * 16), PUT_ODD(ROW7))
++        iMTX_MULT("7*16(%0)", MANGLE(iTab2), ROUND("5*16+"MANGLE(walkenIdctRounders)), PUT_ODD(ROW7))
  #if ARCH_X86_32
-     iLLM_HEAD
+         iLLM_HEAD
  #endif
-     iLLM_PASS("%0")
-     "6:                                                          \n\t"
-     : "+r"(block)
-     : NAMED_CONSTRAINTS_ARRAY(m127,iTab1,walkenIdctRounders,iTab2,iTab3,iTab4,tan3,tan1,tan2,sqrt2)
-     : XMM_CLOBBERS("%xmm0" , "%xmm1" , "%xmm2" , "%xmm3" ,
-                    "%xmm4" , "%xmm5" , "%xmm6" , "%xmm7" ,)
+         iLLM_PASS("%0")
+         "6:                                                          \n\t"
+         : "+r" (block)
 -        :
++        : NAMED_CONSTRAINTS_ARRAY(m127,iTab1,walkenIdctRounders,iTab2,iTab3,iTab4,tan3,tan1,tan2,sqrt2)
+         : XMM_CLOBBERS("%xmm0", "%xmm1", "%xmm2", "%xmm3",
+                        "%xmm4", "%xmm5", "%xmm6", "%xmm7", )
  #if ARCH_X86_64
-       XMM_CLOBBERS("%xmm8" , "%xmm9" , "%xmm10", "%xmm11",
-                    "%xmm12", "%xmm13", "%xmm14",)
+           XMM_CLOBBERS("%xmm8", "%xmm9", "%xmm10", "%xmm11",
+                        "%xmm12", "%xmm13", "%xmm14", )
  #endif
-       "%eax", "%ecx", "%edx", "%esi", "memory"
-     );
+           "%eax", "%ecx", "%edx", "%esi", "memory");
  }
  
  void ff_xvid_idct_sse2_put(uint8_t *dest, int line_size, short *block)