1 /*****************************************************************************
2 * idctmmxext.c : MMX EXT IDCT module
3 *****************************************************************************
4 * Copyright (C) 1999-2001 VideoLAN
5 * $Id: idctmmxext.c,v 1.22 2002/04/21 11:23:03 gbazin Exp $
7 * Authors: Aaron Holtzman <aholtzma@ess.engr.uvic.ca>
8 * Michel Lespinasse <walken@zoy.org>
9 * Peter Gubanov <peter@elecard.net.ru>
10 * (from the LiViD project)
11 * Christophe Massiot <massiot@via.ecp.fr>
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA.
26 *****************************************************************************/
28 /*****************************************************************************
30 *****************************************************************************/
34 #include <videolan/vlc.h>
39 #include "block_mmx.h"
41 /*****************************************************************************
43 *****************************************************************************/
44 static void idct_getfunctions( function_list_t * p_function_list );
46 /*****************************************************************************
47 * Build configuration tree.
48 *****************************************************************************/
53 SET_DESCRIPTION( _("MMX EXT IDCT module") )
54 ADD_CAPABILITY( IDCT, 200 )
55 ADD_REQUIREMENT( MMXEXT )
56 ADD_SHORTCUT( "mmxext" )
57 ADD_SHORTCUT( "idctmmxext" )
61 idct_getfunctions( &p_module->p_functions->idct );
64 MODULE_DEACTIVATE_START
65 MODULE_DEACTIVATE_STOP
67 /* Following functions are local */
69 /*****************************************************************************
70 * NormScan : This IDCT uses reordered coeffs, so we patch the scan table
71 *****************************************************************************/
72 static void NormScan( u8 ppi_scan[2][64] )
76 for( i = 0; i < 64; i++ )
79 ppi_scan[0][i] = (j & 0x38) | ((j & 6) >> 1) | ((j & 1) << 2);
82 ppi_scan[1][i] = (j & 0x38) | ((j & 6) >> 1) | ((j & 1) << 2);
86 /*****************************************************************************
88 *****************************************************************************/
92 #define round(bias) ((int)(((bias)+0.5) * (1<<ROW_SHIFT)))
93 #define rounder(bias) {round (bias), round (bias)}
95 #define table(c1,c2,c3,c4,c5,c6,c7) { c4, c2, -c4, -c2, \
104 static __inline__ void RowHead( dctelem_t * row, int offset, dctelem_t * table )
106 movq_m2r (*(row+offset), mm2); // mm2 = x6 x4 x2 x0
108 movq_m2r (*(row+offset+4), mm5); // mm5 = x7 x5 x3 x1
109 movq_r2r (mm2, mm0); // mm0 = x6 x4 x2 x0
111 movq_m2r (*table, mm3); // mm3 = -C2 -C4 C2 C4
112 movq_r2r (mm5, mm6); // mm6 = x7 x5 x3 x1
114 movq_m2r (*(table+4), mm4); // mm4 = C6 C4 C6 C4
115 pmaddwd_r2r (mm0, mm3); // mm3 = -C4*x4-C2*x6 C4*x0+C2*x2
117 pshufw_r2r (mm2, mm2, 0x4e); // mm2 = x2 x0 x6 x4
120 static __inline__ void Row( dctelem_t * table, s32 * rounder )
122 movq_m2r (*(table+8), mm1); // mm1 = -C5 -C1 C3 C1
123 pmaddwd_r2r (mm2, mm4); // mm4 = C4*x0+C6*x2 C4*x4+C6*x6
125 pmaddwd_m2r (*(table+16), mm0); // mm0 = C4*x4-C6*x6 C4*x0-C6*x2
126 pshufw_r2r (mm6, mm6, 0x4e); // mm6 = x3 x1 x7 x5
128 movq_m2r (*(table+12), mm7); // mm7 = -C7 C3 C7 C5
129 pmaddwd_r2r (mm5, mm1); // mm1 = -C1*x5-C5*x7 C1*x1+C3*x3
131 paddd_m2r (*rounder, mm3); // mm3 += rounder
132 pmaddwd_r2r (mm6, mm7); // mm7 = C3*x1-C7*x3 C5*x5+C7*x7
134 pmaddwd_m2r (*(table+20), mm2); // mm2 = C4*x0-C2*x2 -C4*x4+C2*x6
135 paddd_r2r (mm4, mm3); // mm3 = a1 a0 + rounder
137 pmaddwd_m2r (*(table+24), mm5); // mm5 = C3*x5-C1*x7 C5*x1-C1*x3
138 movq_r2r (mm3, mm4); // mm4 = a1 a0 + rounder
140 pmaddwd_m2r (*(table+28), mm6); // mm6 = C7*x1-C5*x3 C7*x5+C3*x7
141 paddd_r2r (mm7, mm1); // mm1 = b1 b0
143 paddd_m2r (*rounder, mm0); // mm0 += rounder
144 psubd_r2r (mm1, mm3); // mm3 = a1-b1 a0-b0 + rounder
146 psrad_i2r (ROW_SHIFT, mm3); // mm3 = y6 y7
147 paddd_r2r (mm4, mm1); // mm1 = a1+b1 a0+b0 + rounder
149 paddd_r2r (mm2, mm0); // mm0 = a3 a2 + rounder
150 psrad_i2r (ROW_SHIFT, mm1); // mm1 = y1 y0
152 paddd_r2r (mm6, mm5); // mm5 = b3 b2
153 movq_r2r (mm0, mm4); // mm4 = a3 a2 + rounder
155 paddd_r2r (mm5, mm0); // mm0 = a3+b3 a2+b2 + rounder
156 psubd_r2r (mm5, mm4); // mm4 = a3-b3 a2-b2 + rounder
159 static __inline__ void RowTail( dctelem_t * row, int store )
161 psrad_i2r (ROW_SHIFT, mm0); // mm0 = y3 y2
163 psrad_i2r (ROW_SHIFT, mm4); // mm4 = y4 y5
165 packssdw_r2r (mm0, mm1); // mm1 = y3 y2 y1 y0
167 packssdw_r2r (mm3, mm4); // mm4 = y6 y7 y4 y5
169 movq_r2m (mm1, *(row+store)); // save y3 y2 y1 y0
170 pshufw_r2r (mm4, mm4, 0xb1); // mm4 = y7 y6 y5 y4
173 movq_r2m (mm4, *(row+store+4)); // save y7 y6 y5 y4
176 static __inline__ void RowMid( dctelem_t * row, int store,
177 int offset, dctelem_t * table)
179 movq_m2r (*(row+offset), mm2); // mm2 = x6 x4 x2 x0
180 psrad_i2r (ROW_SHIFT, mm0); // mm0 = y3 y2
182 movq_m2r (*(row+offset+4), mm5); // mm5 = x7 x5 x3 x1
183 psrad_i2r (ROW_SHIFT, mm4); // mm4 = y4 y5
185 packssdw_r2r (mm0, mm1); // mm1 = y3 y2 y1 y0
186 movq_r2r (mm5, mm6); // mm6 = x7 x5 x3 x1
188 packssdw_r2r (mm3, mm4); // mm4 = y6 y7 y4 y5
189 movq_r2r (mm2, mm0); // mm0 = x6 x4 x2 x0
191 movq_r2m (mm1, *(row+store)); // save y3 y2 y1 y0
192 pshufw_r2r (mm4, mm4, 0xb1); // mm4 = y7 y6 y5 y4
194 movq_m2r (*table, mm3); // mm3 = -C2 -C4 C2 C4
195 movq_r2m (mm4, *(row+store+4)); // save y7 y6 y5 y4
197 pmaddwd_r2r (mm0, mm3); // mm3 = -C4*x4-C2*x6 C4*x0+C2*x2
199 movq_m2r (*(table+4), mm4); // mm4 = C6 C4 C6 C4
200 pshufw_r2r (mm2, mm2, 0x4e); // mm2 = x2 x0 x6 x4
203 static __inline__ void Col( dctelem_t * col, int offset )
210 static short _T1[] ATTR_ALIGN(8) = {T1,T1,T1,T1};
211 static short _T2[] ATTR_ALIGN(8) = {T2,T2,T2,T2};
212 static short _T3[] ATTR_ALIGN(8) = {T3,T3,T3,T3};
213 static short _C4[] ATTR_ALIGN(8) = {C4,C4,C4,C4};
214 static mmx_t scratch0, scratch1;
216 /* column code adapted from peter gubanov */
217 /* http://www.elecard.com/peter/idct.shtml */
219 movq_m2r (*_T1, mm0); // mm0 = T1
221 movq_m2r (*(col+offset+1*8), mm1); // mm1 = x1
222 movq_r2r (mm0, mm2); // mm2 = T1
224 movq_m2r (*(col+offset+7*8), mm4); // mm4 = x7
225 pmulhw_r2r (mm1, mm0); // mm0 = T1*x1
227 movq_m2r (*_T3, mm5); // mm5 = T3
228 pmulhw_r2r (mm4, mm2); // mm2 = T1*x7
230 movq_m2r (*(col+offset+5*8), mm6); // mm6 = x5
231 movq_r2r (mm5, mm7); // mm7 = T3-1
233 movq_m2r (*(col+offset+3*8), mm3); // mm3 = x3
234 psubsw_r2r (mm4, mm0); // mm0 = v17
236 movq_m2r (*_T2, mm4); // mm4 = T2
237 pmulhw_r2r (mm3, mm5); // mm5 = (T3-1)*x3
239 paddsw_r2r (mm2, mm1); // mm1 = u17
240 pmulhw_r2r (mm6, mm7); // mm7 = (T3-1)*x5
244 movq_r2r (mm4, mm2); // mm2 = T2
245 paddsw_r2r (mm3, mm5); // mm5 = T3*x3
247 pmulhw_m2r (*(col+offset+2*8), mm4);// mm4 = T2*x2
248 paddsw_r2r (mm6, mm7); // mm7 = T3*x5
250 psubsw_r2r (mm6, mm5); // mm5 = v35
251 paddsw_r2r (mm3, mm7); // mm7 = u35
253 movq_m2r (*(col+offset+6*8), mm3); // mm3 = x6
254 movq_r2r (mm0, mm6); // mm6 = v17
256 pmulhw_r2r (mm3, mm2); // mm2 = T2*x6
257 psubsw_r2r (mm5, mm0); // mm0 = b3
259 psubsw_r2r (mm3, mm4); // mm4 = v26
260 paddsw_r2r (mm6, mm5); // mm5 = v12
262 movq_r2m (mm0, scratch0); // save b3
263 movq_r2r (mm1, mm6); // mm6 = u17
265 paddsw_m2r (*(col+offset+2*8), mm2);// mm2 = u26
266 paddsw_r2r (mm7, mm6); // mm6 = b0
268 psubsw_r2r (mm7, mm1); // mm1 = u12
269 movq_r2r (mm1, mm7); // mm7 = u12
271 movq_m2r (*(col+offset+0*8), mm3); // mm3 = x0
272 paddsw_r2r (mm5, mm1); // mm1 = u12+v12
274 movq_m2r (*_C4, mm0); // mm0 = C4/2
275 psubsw_r2r (mm5, mm7); // mm7 = u12-v12
277 movq_r2m (mm6, scratch1); // save b0
278 pmulhw_r2r (mm0, mm1); // mm1 = b1/2
280 movq_r2r (mm4, mm6); // mm6 = v26
281 pmulhw_r2r (mm0, mm7); // mm7 = b2/2
283 movq_m2r (*(col+offset+4*8), mm5); // mm5 = x4
284 movq_r2r (mm3, mm0); // mm0 = x0
286 psubsw_r2r (mm5, mm3); // mm3 = v04
287 paddsw_r2r (mm5, mm0); // mm0 = u04
289 paddsw_r2r (mm3, mm4); // mm4 = a1
290 movq_r2r (mm0, mm5); // mm5 = u04
292 psubsw_r2r (mm6, mm3); // mm3 = a2
293 paddsw_r2r (mm2, mm5); // mm5 = a0
295 paddsw_r2r (mm1, mm1); // mm1 = b1
296 psubsw_r2r (mm2, mm0); // mm0 = a3
298 paddsw_r2r (mm7, mm7); // mm7 = b2
299 movq_r2r (mm3, mm2); // mm2 = a2
301 movq_r2r (mm4, mm6); // mm6 = a1
302 paddsw_r2r (mm7, mm3); // mm3 = a2+b2
304 psraw_i2r (COL_SHIFT, mm3); // mm3 = y2
305 paddsw_r2r (mm1, mm4); // mm4 = a1+b1
307 psraw_i2r (COL_SHIFT, mm4); // mm4 = y1
308 psubsw_r2r (mm1, mm6); // mm6 = a1-b1
310 movq_m2r (scratch1, mm1); // mm1 = b0
311 psubsw_r2r (mm7, mm2); // mm2 = a2-b2
313 psraw_i2r (COL_SHIFT, mm6); // mm6 = y6
314 movq_r2r (mm5, mm7); // mm7 = a0
316 movq_r2m (mm4, *(col+offset+1*8)); // save y1
317 psraw_i2r (COL_SHIFT, mm2); // mm2 = y5
319 movq_r2m (mm3, *(col+offset+2*8)); // save y2
320 paddsw_r2r (mm1, mm5); // mm5 = a0+b0
322 movq_m2r (scratch0, mm4); // mm4 = b3
323 psubsw_r2r (mm1, mm7); // mm7 = a0-b0
325 psraw_i2r (COL_SHIFT, mm5); // mm5 = y0
326 movq_r2r (mm0, mm3); // mm3 = a3
328 movq_r2m (mm2, *(col+offset+5*8)); // save y5
329 psubsw_r2r (mm4, mm3); // mm3 = a3-b3
331 psraw_i2r (COL_SHIFT, mm7); // mm7 = y7
332 paddsw_r2r (mm0, mm4); // mm4 = a3+b3
334 movq_r2m (mm5, *(col+offset+0*8)); // save y0
335 psraw_i2r (COL_SHIFT, mm3); // mm3 = y4
337 movq_r2m (mm6, *(col+offset+6*8)); // save y6
338 psraw_i2r (COL_SHIFT, mm4); // mm4 = y3
340 movq_r2m (mm7, *(col+offset+7*8)); // save y7
342 movq_r2m (mm3, *(col+offset+4*8)); // save y4
344 movq_r2m (mm4, *(col+offset+3*8)); // save y3
348 static s32 rounder0[] ATTR_ALIGN(8) =
349 rounder ((1 << (COL_SHIFT - 1)) - 0.5);
350 static s32 rounder4[] ATTR_ALIGN(8) = rounder (0);
351 static s32 rounder1[] ATTR_ALIGN(8) =
352 rounder (1.25683487303); // C1*(C1/C4+C1+C7)/2
353 static s32 rounder7[] ATTR_ALIGN(8) =
354 rounder (-0.25); // C1*(C7/C4+C7-C1)/2
355 static s32 rounder2[] ATTR_ALIGN(8) =
356 rounder (0.60355339059); // C2 * (C6+C2)/2
357 static s32 rounder6[] ATTR_ALIGN(8) =
358 rounder (-0.25); // C2 * (C6-C2)/2
359 static s32 rounder3[] ATTR_ALIGN(8) =
360 rounder (0.087788325588); // C3*(-C3/C4+C3+C5)/2
361 static s32 rounder5[] ATTR_ALIGN(8) =
362 rounder (-0.441341716183); // C3*(-C5/C4+C5-C3)/2
364 static void IDCT( dctelem_t * p_block )
366 static dctelem_t table04[] ATTR_ALIGN(16) =
367 table (22725, 21407, 19266, 16384, 12873, 8867, 4520);
368 static dctelem_t table17[] ATTR_ALIGN(16) =
369 table (31521, 29692, 26722, 22725, 17855, 12299, 6270);
370 static dctelem_t table26[] ATTR_ALIGN(16) =
371 table (29692, 27969, 25172, 21407, 16819, 11585, 5906);
372 static dctelem_t table35[] ATTR_ALIGN(16) =
373 table (26722, 25172, 22654, 19266, 15137, 10426, 5315);
375 RowHead( p_block, 0*8, table04 );
376 Row( table04, rounder0 );
377 RowMid( p_block, 0*8, 4*8, table04 );
378 Row( table04, rounder4 );
379 RowMid( p_block, 4*8, 1*8, table17 );
380 Row( table17, rounder1 );
381 RowMid( p_block, 1*8, 7*8, table17 );
382 Row( table17, rounder7 );
383 RowMid( p_block, 7*8, 2*8, table26 );
384 Row( table26, rounder2 );
385 RowMid( p_block, 2*8, 6*8, table26 );
386 Row( table26, rounder6 );
387 RowMid( p_block, 6*8, 3*8, table35 );
388 Row( table35, rounder3 );
389 RowMid( p_block, 3*8, 5*8, table35 );
390 Row( table35, rounder5 );
391 RowTail( p_block, 5*8);
397 static __inline__ void RestoreCPUState( )
399 /* reenables the FPU */
400 __asm__ __volatile__ ("emms");
403 #include "idct_sparse.h"
404 #include "idct_decl.h"