1 /*****************************************************************************
2 * vdec_idctmmx.S : MMX IDCT implementation
3 *****************************************************************************
4 * Copyright (C) 1999, 2000 VideoLAN
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA.
21 *****************************************************************************/
24 * the input data is tranposed and each 16 bit element in the 8x8 matrix
26 * for example in 11...1110000 format
27 * If the iDCT is of I macroblock then 0.5 needs to be added to the;DC Component
28 * (element[0][0] of the matrix)
34 preSC: .short 16384,22725,21407,19266,16384,12873,8867,4520
35 .short 22725,31521,29692,26722,22725,17855,12299,6270
36 .short 21407,29692,27969,25172,21407,16819,11585,5906
37 .short 19266,26722,25172,22654,19266,15137,10426,5315
38 .short 16384,22725,21407,19266,16384,12873,8867,4520
39 .short 12873,17855,16819,15137,25746,20228,13933,7103
40 .short 17734,24598,23170,20853,17734,13933,9597,4892
41 .short 18081,25080,23624,21261,18081,14206,9785,4988
44 .type x0005000200010001,@object
45 .size x0005000200010001,8
47 .long 0x00010001,0x00050002
49 .type x0040000000000000,@object
50 .size x0040000000000000,8
54 .type x5a825a825a825a82,@object
55 .size x5a825a825a825a82,8
57 .long 0x5a825a82, 0x5a825a82
59 .type x539f539f539f539f,@object
60 .size x539f539f539f539f,8
62 .long 0x539f539f,0x539f539f
64 .type x4546454645464546,@object
65 .size x4546454645464546,8
67 .long 0x45464546,0x45464546
69 .type x61f861f861f861f8,@object
70 .size x61f861f861f861f8,8
72 .long 0x61f861f8,0x61f861f8
74 .type scratch1,@object
79 .type scratch3,@object
84 .type scratch5,@object
89 .type scratch7,@object
100 /* this seems to annoy the compiler in -g mode, is it normal ? */
102 .type vdec_IDCT,@function
161 /* column 0: even part
162 * use V4, V12, V0, V8 to produce V22..V25
164 movq 8*12(%ecx), %mm0 /* maybe the first mul can be done together */
165 /* with the dequantization in iHuff module */
166 pmulhw 8*12(%esi), %mm0 /* V12 */
168 pmulhw 8*4(%esi), %mm1 /* V4 */
170 psraw $1, %mm0 /* t64=t66 */
171 pmulhw (%esi), %mm3 /* V0 */
172 movq 8*8(%ecx), %mm5 /* duplicate V4 */
173 movq %mm1, %mm2 /* added 11/1/96 */
174 pmulhw 8*8(%esi),%mm5 /* V8 */
175 psubsw %mm0, %mm1 /* V16 */
176 pmulhw x5a825a825a825a82, %mm1 /* 23170 ->V18 */
177 paddsw %mm0, %mm2 /* V17 */
178 movq %mm2, %mm0 /* duplicate V17 */
179 psraw $1, %mm2 /* t75=t82 */
180 psraw $2, %mm0 /* t72 */
181 movq %mm3, %mm4 /* duplicate V0 */
182 paddsw %mm5, %mm3 /* V19 */
183 psubsw %mm5, %mm4 /* V20 ;mm5 free */
184 /* moved from the block below */
185 movq 8*10(%ecx), %mm7
186 psraw $1, %mm3 /* t74=t81 */
187 movq %mm3, %mm6 /* duplicate t74=t81 */
188 psraw $2, %mm4 /* t77=t79 */
189 psubsw %mm0, %mm1 /* V21 ; mm0 free */
190 paddsw %mm2, %mm3 /* V22 */
191 movq %mm1, %mm5 /* duplicate V21 */
192 paddsw %mm4, %mm1 /* V23 */
193 movq %mm3, 8*4(%esi) /* V22 */
194 psubsw %mm5, %mm4 /* V24; mm5 free */
195 movq %mm1, 8*12(%esi) /* V23 */
196 psubsw %mm2, %mm6 /* V25; mm2 free */
197 movq %mm4, (%esi) /* V24 */
198 /* keep mm6 alive all along the next block */
199 /* movq %mm6, 8*8(%esi) V25 */
200 /* column 0: odd part
201 * use V2, V6, V10, V14 to produce V31, V39, V40, V41
203 /* moved above: movq 8*10(%ecx), %mm7 */
205 pmulhw 8*10(%esi), %mm7 /* V10 */
207 pmulhw 8*6(%esi), %mm0 /* V6 */
209 movq %mm7, %mm3 /* duplicate V10 */
210 pmulhw 8*2(%esi), %mm5 /* V2 */
211 movq 8*14(%ecx), %mm4
212 psubsw %mm0, %mm7 /* V26 */
213 pmulhw 8*14(%esi), %mm4 /* V14 */
214 paddsw %mm0, %mm3 /* V29 ; free mm0 */
215 movq %mm7, %mm1 /* duplicate V26 */
216 psraw $1, %mm3 /* t91=t94 */
217 pmulhw x539f539f539f539f,%mm7 /* V33 */
218 psraw $1, %mm1 /* t96 */
219 movq %mm5, %mm0 /* duplicate V2 */
220 psraw $2, %mm4 /* t85=t87 */
221 paddsw %mm4,%mm5 /* V27 */
222 psubsw %mm4, %mm0 /* V28 ; free mm4 */
223 movq %mm0, %mm2 /* duplicate V28 */
224 psraw $1, %mm5 /* t90=t93 */
225 pmulhw x4546454645464546,%mm0 /* V35 */
226 psraw $1, %mm2 /* t97 */
227 movq %mm5, %mm4 /* duplicate t90=t93 */
228 psubsw %mm2, %mm1 /* V32 ; free mm2 */
229 pmulhw x61f861f861f861f8,%mm1 /* V36 */
230 psllw $1, %mm7 /* t107 */
231 paddsw %mm3, %mm5 /* V31 */
232 psubsw %mm3, %mm4 /* V30 ; free mm3 */
233 pmulhw x5a825a825a825a82,%mm4 /* V34 */
235 psubsw %mm1, %mm0 /* V38 */
236 psubsw %mm7, %mm1 /* V37 ; free mm7 */
237 psllw $1, %mm1 /* t114 */
238 /* move from the next block */
239 movq %mm6, %mm3 /* duplicate V25 */
240 /* move from the next block */
241 movq 8*4(%esi), %mm7 /* V22 */
242 psllw $1, %mm0 /* t110 */
243 psubsw %mm5, %mm0 /* V39 (mm5 needed for next block) */
244 psllw $2, %mm4 /* t112 */
245 /* moved from the next block */
246 movq 8*12(%esi), %mm2 /* V23 */
247 psubsw %mm0, %mm4 /* V40 */
248 paddsw %mm4, %mm1 /* V41; free mm0 */
249 /* moved from the next block */
250 psllw $1, %mm2 /* t117=t125 */
251 /* column 0: output butterfly */
253 * movq %mm6, %mm3 duplicate V25
254 * movq 8*4(%esi), %mm7 V22
255 * movq 8*12(%esi), %mm2 V23
256 * psllw $1, %mm2 t117=t125
258 psubsw %mm1, %mm6 /* tm6 */
259 paddsw %mm1, %mm3 /* tm8; free mm1 */
260 movq %mm7, %mm1 /* duplicate V22 */
261 paddsw %mm5, %mm7 /* tm0 */
262 movq %mm3, 8*8(%esi) /* tm8; free mm3 */
263 psubsw %mm5, %mm1 /* tm14; free mm5 */
264 movq %mm6, 8*6(%esi) /* tm6; free mm6 */
265 movq %mm2, %mm3 /* duplicate t117=t125 */
266 movq (%esi), %mm6 /* V24 */
267 paddsw %mm0, %mm2 /* tm2 */
268 movq %mm7, (%esi) /* tm0; free mm7 */
269 psubsw %mm0, %mm3 /* tm12; free mm0 */
270 movq %mm1, 8*14(%esi) /* tm14; free mm1 */
271 psllw $1, %mm6 /* t119=t123 */
272 movq %mm2, 8*2(%esi) /* tm2; free mm2 */
273 movq %mm6, %mm0 /* duplicate t119=t123 */
274 movq %mm3, 8*12(%esi) /* tm12; free mm3 */
275 paddsw %mm4, %mm6 /* tm4 */
276 /* moved from next block */
278 psubsw %mm4, %mm0 /* tm10; free mm4 */
279 /* moved from next block */
280 pmulhw 8*5(%esi), %mm1 /* V5 */
281 movq %mm6, 8*4(%esi) /* tm4; free mm6 */
282 movq %mm0, 8*10(%esi) /* tm10; free mm0 */
283 /* column 1: even part
284 * use V5, V13, V1, V9 to produce V56..V59
286 /* moved to prev block:
287 * movq 8*5(%ecx), %mm1
288 * pmulhw 8*5(%esi), %mm1 V5
290 movq 8*13(%ecx), %mm7
291 psllw $1, %mm1 /* t128=t130 */
292 pmulhw 8*13(%esi), %mm7 /* V13 */
293 movq %mm1, %mm2 /* duplicate t128=t130 */
295 pmulhw 8(%esi), %mm3 /* V1 */
297 psubsw %mm7, %mm1 /* V50 */
298 pmulhw 8*9(%esi), %mm5 /* V9 */
299 paddsw %mm7, %mm2 /* V51 */
300 pmulhw x5a825a825a825a82, %mm1 /* 23170 ->V52 */
301 movq %mm2, %mm6 /* duplicate V51 */
302 psraw $1, %mm2 /* t138=t144 */
303 movq %mm3, %mm4 /* duplicate V1 */
304 psraw $2, %mm6 /* t136 */
305 paddsw %mm5, %mm3 /* V53 */
306 psubsw %mm5, %mm4 /* V54 ;mm5 free */
307 movq %mm3, %mm7 /* duplicate V53 */
308 /* moved from next block */
309 movq 8*11(%ecx), %mm0
310 psraw $1, %mm4 /* t140=t142 */
311 psubsw %mm6, %mm1 /* V55 ; mm6 free */
312 paddsw %mm2, %mm3 /* V56 */
313 movq %mm4, %mm5 /* duplicate t140=t142 */
314 paddsw %mm1, %mm4 /* V57 */
315 movq %mm3, 8*5(%esi) /* V56 */
316 psubsw %mm1, %mm5 /* V58; mm1 free */
317 movq %mm4, 8*13(%esi) /* V57 */
318 psubsw %mm2, %mm7 /* V59; mm2 free */
319 movq %mm5, 8*9(%esi) /* V58 */
320 /* keep mm7 alive all along the next block
321 * movq %mm7, 8(%esi) V59
323 * movq 8*11(%ecx), %mm0
325 pmulhw 8*11(%esi), %mm0 /* V11 */
327 pmulhw 8*7(%esi), %mm6 /* V7 */
328 movq 8*15(%ecx), %mm4
329 movq %mm0, %mm3 /* duplicate V11 */
330 pmulhw 8*15(%esi), %mm4 /* V15 */
332 psllw $1, %mm6 /* t146=t152 */
333 pmulhw 8*3(%esi), %mm5 /* V3 */
334 paddsw %mm6, %mm0 /* V63 */
335 /* note that V15 computation has a correction step:
336 * this is a 'magic' constant that rebiases the results to be closer to the
337 * expected result. this magic constant can be refined to reduce the error
338 * even more by doing the correction step in a later stage when the number
339 * is actually multiplied by 16
341 paddw x0005000200010001, %mm4
342 psubsw %mm6, %mm3 /* V60 ; free mm6 */
343 psraw $1, %mm0 /* t154=t156 */
344 movq %mm3, %mm1 /* duplicate V60 */
345 pmulhw x539f539f539f539f, %mm1 /* V67 */
346 movq %mm5, %mm6 /* duplicate V3 */
347 psraw $2, %mm4 /* t148=t150 */
348 paddsw %mm4, %mm5 /* V61 */
349 psubsw %mm4, %mm6 /* V62 ; free mm4 */
350 movq %mm5, %mm4 /* duplicate V61 */
351 psllw $1, %mm1 /* t169 */
352 paddsw %mm0, %mm5 /* V65 -> result */
353 psubsw %mm0, %mm4 /* V64 ; free mm0 */
354 pmulhw x5a825a825a825a82, %mm4 /* V68 */
355 psraw $1, %mm3 /* t158 */
356 psubsw %mm6, %mm3 /* V66 */
357 movq %mm5, %mm2 /* duplicate V65 */
358 pmulhw x61f861f861f861f8, %mm3 /* V70 */
359 psllw $1, %mm6 /* t165 */
360 pmulhw x4546454645464546, %mm6 /* V69 */
361 psraw $1, %mm2 /* t172 */
362 /* moved from next block */
363 movq 8*5(%esi), %mm0 /* V56 */
364 psllw $1, %mm4 /* t174 */
365 /* moved from next block */
366 psraw $1, %mm0 /* t177=t188 */
368 psubsw %mm3, %mm6 /* V72 */
369 psubsw %mm1, %mm3 /* V71 ; free mm1 */
370 psubsw %mm2, %mm6 /* V73 ; free mm2 */
371 /* moved from next block */
372 psraw $1, %mm5 /* t178=t189 */
373 psubsw %mm6, %mm4 /* V74 */
374 /* moved from next block */
375 movq %mm0, %mm1 /* duplicate t177=t188 */
376 paddsw %mm4, %mm3 /* V75 */
377 /* moved from next block */
378 paddsw %mm5, %mm0 /* tm1 */
388 * free mm0, mm1 & mm2
390 * movq 8*5(%esi), %mm0 V56
391 * psllw $1, %mm0 t177=t188 ! new !!
392 * psllw $1, %mm5 t178=t189 ! new !!
393 * movq %mm0, %mm1 duplicate t177=t188
394 * paddsw %mm5, %mm0 tm1
396 movq 8*13(%esi), %mm2 /* V57 */
397 psubsw %mm5, %mm1 /* tm15; free mm5 */
398 movq %mm0, 8(%esi) /* tm1; free mm0 */
399 psraw $1, %mm7 /* t182=t184 ! new !! */
400 /* save the store as used directly in the transpose
401 * movq %mm1, 120(%esi) tm15; free mm1
403 movq %mm7, %mm5 /* duplicate t182=t184 */
404 psubsw %mm3, %mm7 /* tm7 */
405 paddsw %mm3, %mm5 /* tm9; free mm3 */
406 movq 8*9(%esi), %mm0 /* V58 */
407 movq %mm2, %mm3 /* duplicate V57 */
408 movq %mm7, 8*7(%esi) /* tm7; free mm7 */
409 psubsw %mm6, %mm3 /* tm13 */
410 paddsw %mm6, %mm2 /* tm3 ; free mm6 */
411 /* moved up from the transpose */
413 /* moved up from the transpose */
415 movq %mm0, %mm6 /* duplicate V58 */
416 movq %mm2, 8*3(%esi) /* tm3; free mm2 */
417 paddsw %mm4, %mm0 /* tm5 */
418 psubsw %mm4, %mm6 /* tm11; free mm4 */
419 /* moved up from the transpose */
421 movq %mm0, 8*5(%esi) /* tm5; free mm0 */
422 /* moved up from the transpose */
424 /* transpose - M4 part
425 * --------- ---------
426 * | M1 | M2 | | M1'| M3'|
427 * --------- --> ---------
428 * | M3 | M4 | | M2'| M4'|
429 * --------- ---------
430 * Two alternatives: use full mmword approach so the following code can be
431 * scheduled before the transpose is done without stores, or use the faster
432 * half mmword stores (when possible)
434 movd %mm3, 8*9+4(%esi) /* MS part of tmt9 */
436 movd %mm7, 8*13+4(%esi) /* MS part of tmt13 */
438 movd %mm5, 8*9(%esi) /* LS part of tmt9 */
439 punpckhdq %mm3, %mm5 /* free mm3 */
440 movd %mm2, 8*13(%esi) /* LS part of tmt13 */
441 punpckhdq %mm7, %mm2 /* free mm7 */
442 /* moved up from the M3 transpose */
444 /* moved up from the M3 transpose */
445 movq 8*10(%esi), %mm1
446 /* moved up from the M3 transpose */
448 /* shuffle the rest of the data, and write it with 2 mmword writes */
449 movq %mm5, 8*11(%esi) /* tmt11 */
450 /* moved up from the M3 transpose */
452 movq %mm2, 8*15(%esi) /* tmt15 */
453 /* moved up from the M3 transpose */
455 /* transpose - M3 part
456 * moved up to previous code section
457 * movq 8*8(%esi), %mm0
458 * movq 8*10(%esi), %mm1
460 * punpcklwd %mm1, %mm0
461 * punpckhwd %mm1, %mm3
463 movq 8*12(%esi), %mm6
464 movq 8*14(%esi), %mm4
466 /* shuffle the data and write the lower parts of the transposed in 4 dwords */
471 punpckhwd %mm4, %mm2 /* free mm4 */
472 punpckldq %mm6, %mm0 /* free mm6 */
473 /* moved from next block */
474 movq 8*13(%esi), %mm4 /* tmt13 */
476 punpckhdq %mm2, %mm7 /* free mm2 */
477 /* moved from next block */
478 movq %mm3, %mm5 /* duplicate tmt5 */
479 /* column 1: even part (after transpose)
481 * movq %mm3, %mm5 duplicate tmt5
482 * movq 8*13(%esi), %mm4 tmt13
484 psubsw %mm4, %mm3 /* V134 */
485 pmulhw x5a825a825a825a82, %mm3 /* 23170 ->V136 */
486 movq 8*9(%esi), %mm6 /* tmt9 */
487 paddsw %mm4, %mm5 /* V135 ; mm4 free */
488 movq %mm0, %mm4 /* duplicate tmt1 */
489 paddsw %mm6, %mm0 /* V137 */
490 psubsw %mm6, %mm4 /* V138 ; mm6 free */
491 psllw $2, %mm3 /* t290 */
492 psubsw %mm5, %mm3 /* V139 */
493 movq %mm0, %mm6 /* duplicate V137 */
494 paddsw %mm5, %mm0 /* V140 */
495 movq %mm4, %mm2 /* duplicate V138 */
496 paddsw %mm3, %mm2 /* V141 */
497 psubsw %mm3, %mm4 /* V142 ; mm3 free */
498 movq %mm0, 8*9(%esi) /* V140 */
499 psubsw %mm5, %mm6 /* V143 ; mm5 free */
500 /* moved from next block */
501 movq 8*11(%esi), %mm0 /* tmt11 */
502 movq %mm2, 8*13(%esi) /* V141 */
503 /* moved from next block */
504 movq %mm0, %mm2 /* duplicate tmt11 */
505 /* column 1: odd part (after transpose) */
506 /* moved up to the prev block
507 * movq 8*11(%esi), %mm0 tmt11
508 * movq %mm0, %mm2 duplicate tmt11
510 movq 8*15(%esi), %mm5 /* tmt15 */
511 psubsw %mm7, %mm0 /* V144 */
512 movq %mm0, %mm3 /* duplicate V144 */
513 paddsw %mm7, %mm2 /* V147 ; free mm7 */
514 pmulhw x539f539f539f539f, %mm0 /* 21407-> V151 */
515 movq %mm1, %mm7 /* duplicate tmt3 */
516 paddsw %mm5, %mm7 /* V145 */
517 psubsw %mm5, %mm1 /* V146 ; free mm5 */
518 psubsw %mm1, %mm3 /* V150 */
519 movq %mm7, %mm5 /* duplicate V145 */
520 pmulhw x4546454645464546, %mm1 /* 17734-> V153 */
521 psubsw %mm2, %mm5 /* V148 */
522 pmulhw x61f861f861f861f8, %mm3 /* 25080-> V154 */
523 psllw $2, %mm0 /* t311 */
524 pmulhw x5a825a825a825a82, %mm5 /* 23170-> V152 */
525 paddsw %mm2, %mm7 /* V149 ; free mm2 */
526 psllw $1, %mm1 /* t313 */
527 nop /* without the nop - freeze here for one clock */
528 movq %mm3, %mm2 /* duplicate V154 */
529 psubsw %mm0, %mm3 /* V155 ; free mm0 */
530 psubsw %mm2, %mm1 /* V156 ; free mm2 */
531 /* moved from the next block */
532 movq %mm6, %mm2 /* duplicate V143 */
533 /* moved from the next block */
534 movq 8*13(%esi), %mm0 /* V141 */
535 psllw $1, %mm1 /* t315 */
536 psubsw %mm7, %mm1 /* V157 (keep V149) */
537 psllw $2, %mm5 /* t317 */
538 psubsw %mm1, %mm5 /* V158 */
539 psllw $1, %mm3 /* t319 */
540 paddsw %mm5, %mm3 /* V159 */
541 /* column 1: output butterfly (after transform)
542 * moved to the prev block
543 * movq %mm6, %mm2 duplicate V143
544 * movq 8*13(%esi), %mm0 V141
546 psubsw %mm3, %mm2 /* V163 */
547 paddsw %mm3, %mm6 /* V164 ; free mm3 */
548 movq %mm4, %mm3 /* duplicate V142 */
549 psubsw %mm5, %mm4 /* V165 ; free mm5 */
550 movq %mm2, scratch7 /* out7 */
553 paddsw %mm5, %mm3 /* V162 */
554 movq 8*9(%esi), %mm2 /* V140 */
555 movq %mm0, %mm5 /* duplicate V141 */
556 /* in order not to perculate this line up,
557 * we read 72(%esi) very near to this location
559 movq %mm6, 8*9(%esi) /* out9 */
560 paddsw %mm1, %mm0 /* V161 */
561 movq %mm3, scratch5 /* out5 */
562 psubsw %mm1, %mm5 /* V166 ; free mm1 */
563 movq %mm4, 8*11(%esi) /* out11 */
565 movq %mm0, scratch3 /* out3 */
566 movq %mm2, %mm4 /* duplicate V140 */
567 movq %mm5, 8*13(%esi) /* out13 */
568 paddsw %mm7, %mm2 /* V160 */
569 /* moved from the next block */
571 psubsw %mm7, %mm4 /* V167 ; free mm7 */
572 /* moved from the next block */
575 movq %mm2, scratch1 /* out1 */
576 /* moved from the next block */
578 movq %mm4, 8*15(%esi) /* out15 */
579 /* moved from the next block */
581 /* transpose - M2 parts
582 * moved up to the prev block
584 * movq 8*3(%esi), %mm7
586 * punpcklwd %mm7, %mm0
592 /* shuffle the data and write the lower parts of the trasposed in 4 dwords */
593 movd %mm0, 8*8(%esi) /* LS part of tmt8 */
595 movd %mm1, 8*12(%esi) /* LS part of tmt12 */
597 movd %mm5, 8*8+4(%esi) /* MS part of tmt8 */
598 punpckhdq %mm5, %mm0 /* tmt10 */
599 movd %mm3, 8*12+4(%esi) /* MS part of tmt12 */
600 punpckhdq %mm3, %mm1 /* tmt14 */
601 /* transpose - M1 parts */
608 punpckhwd %mm2, %mm6 /* free mm2 */
611 punpckhwd %mm4, %mm3 /* free mm4 */
614 punpckldq %mm5, %mm7 /* tmt0 */
615 punpckhdq %mm5, %mm2 /* tmt2 ; free mm5 */
616 /* shuffle the rest of the data, and write it with 2 mmword writes */
617 punpckldq %mm3, %mm6 /* tmt4 */
618 /* moved from next block */
619 movq %mm2, %mm5 /* duplicate tmt2 */
620 punpckhdq %mm3, %mm4 /* tmt6 ; free mm3 */
621 /* moved from next block */
622 movq %mm0, %mm3 /* duplicate tmt10 */
623 /* column 0: odd part (after transpose)
624 *moved up to prev block
625 * movq %mm0, %mm3 duplicate tmt10
626 * movq %mm2, %mm5 duplicate tmt2
628 psubsw %mm4, %mm0 /* V110 */
629 paddsw %mm4, %mm3 /* V113 ; free mm4 */
630 movq %mm0, %mm4 /* duplicate V110 */
631 paddsw %mm1, %mm2 /* V111 */
632 pmulhw x539f539f539f539f, %mm0 /* 21407-> V117 */
633 psubsw %mm1, %mm5 /* V112 ; free mm1 */
634 psubsw %mm5, %mm4 /* V116 */
635 movq %mm2, %mm1 /* duplicate V111 */
636 pmulhw x4546454645464546, %mm5 /* 17734-> V119 */
637 psubsw %mm3, %mm2 /* V114 */
638 pmulhw x61f861f861f861f8, %mm4 /* 25080-> V120 */
639 paddsw %mm3, %mm1 /* V115 ; free mm3 */
640 pmulhw x5a825a825a825a82, %mm2 /* 23170-> V118 */
641 psllw $2, %mm0 /* t266 */
642 movq %mm1, (%esi) /* save V115 */
643 psllw $1, %mm5 /* t268 */
644 psubsw %mm4, %mm5 /* V122 */
645 psubsw %mm0, %mm4 /* V121 ; free mm0 */
646 psllw $1, %mm5 /* t270 */
647 psubsw %mm1, %mm5 /* V123 ; free mm1 */
648 psllw $2, %mm2 /* t272 */
649 psubsw %mm5, %mm2 /* V124 (keep V123) */
650 psllw $1, %mm4 /* t274 */
651 movq %mm5, 8*2(%esi) /* save V123 ; free mm5 */
652 paddsw %mm2, %mm4 /* V125 (keep V124) */
653 /* column 0: even part (after transpose) */
654 movq 8*12(%esi), %mm0 /* tmt12 */
655 movq %mm6, %mm3 /* duplicate tmt4 */
656 psubsw %mm0, %mm6 /* V100 */
657 paddsw %mm0, %mm3 /* V101 ; free mm0 */
658 pmulhw x5a825a825a825a82, %mm6 /* 23170 ->V102 */
659 movq %mm7, %mm5 /* duplicate tmt0 */
660 movq 8*8(%esi), %mm1 /* tmt8 */
661 paddsw %mm1, %mm7 /* V103 */
662 psubsw %mm1, %mm5 /* V104 ; free mm1 */
663 movq %mm7, %mm0 /* duplicate V103 */
664 psllw $2, %mm6 /* t245 */
665 paddsw %mm3, %mm7 /* V106 */
666 movq %mm5, %mm1 /* duplicate V104 */
667 psubsw %mm3, %mm6 /* V105 */
668 psubsw %mm3, %mm0 /* V109; free mm3 */
669 paddsw %mm6, %mm5 /* V107 */
670 psubsw %mm6, %mm1 /* V108 ; free mm6 */
671 /* column 0: output butterfly (after transform) */
672 movq %mm1, %mm3 /* duplicate V108 */
673 paddsw %mm2, %mm1 /* out4 */
675 psubsw %mm2, %mm3 /* out10 ; free mm2 */
677 movq %mm0, %mm6 /* duplicate V109 */
678 movq %mm1, 8*4(%esi) /* out4 ; free mm1 */
679 psubsw %mm4, %mm0 /* out6 */
680 movq %mm3, 8*10(%esi) /* out10 ; free mm3 */
682 paddsw %mm4, %mm6 /* out8 ; free mm4 */
683 movq %mm7, %mm1 /* duplicate V106 */
684 movq %mm0, 8*6(%esi) /* out6 ; free mm0 */
686 movq (%esi), %mm4 /* V115 */
687 movq %mm6, 8*8(%esi) /* out8 ; free mm6 */
688 movq %mm5, %mm2 /* duplicate V107 */
689 movq 8*2(%esi), %mm3 /* V123 */
690 paddsw %mm4, %mm7 /* out0 */
691 /* moved up from next block */
694 /* moved up from next block */
696 psubsw %mm4, %mm1 /* out14 ; free mm4 */
697 paddsw %mm3, %mm5 /* out2 */
699 movq %mm7, (%esi) /* out0 ; free mm7 */
701 movq %mm1, 8*14(%esi) /* out14 ; free mm1 */
702 psubsw %mm3, %mm2 /* out12 ; free mm3 */
703 movq %mm5, 8*2(%esi) /* out2 ; free mm5 */
705 /* moved up to the prev block */
707 /* moved up to the prev block */
709 movq %mm2, 8*12(%esi) /* out12 ; free mm2 */
710 /* moved up to the prev block */
712 /* move back the data to its correct place
713 * moved up to the prev block
714 * movq scratch3, %mm0
715 * movq scratch5, %mm6
716 * movq scratch7, %mm4
722 movq %mm0, 8*3(%esi) /* out3 */
724 movq %mm6, 8*5(%esi) /* out5 */
725 movq %mm4, 8*7(%esi) /* out7 */
726 movq %mm1, 8(%esi) /* out1 */
727 /* transpose matrix */
728 movl $8, %ebx /* ebx is x_size */
729 movl %esi, %edi /* pointer to the matrix */
734 subl $4, %eax /* eax is inner loop variable */
735 addl %ebx, %ecx /* ecx is 6*row size */
736 movl %eax, %edx /* edx is the outer loop variable */
737 .L1: movq (%esi), %mm0 /* first line */
738 movq (%esi,%ebx,4), %mm2 /* third line */
739 movq %mm0, %mm6 /* copy first line */
740 punpcklwd (%esi,%ebx,2), %mm0 /* interleave fist and second lines */
741 movq %mm2, %mm7 /* copy third line */
742 punpcklwd (%esi,%ecx), %mm2 /* interleave third and fourth lines */
743 movq %mm0, %mm4 /* copy first intermediate result */
744 movq (%esi,%ebx,2), %mm1 /* second line */
745 /* the next line 'punpcklwd %mm2, %mm0' inverted two pixels. */
746 /* punpckldq make printing cleaner */
747 punpckldq %mm2, %mm0 /* interleave to produce result 1 */
748 movq (%esi,%ecx), %mm3 /* fourth line */
749 punpckhdq %mm2, %mm4 /* interleave to produce result 2 */
750 movq %mm0, (%esi) /* write result 1 */
751 punpckhwd %mm1, %mm6 /* interleave first and second lines */
752 movq %mm4, (%esi,%ebx,2) /* write result 2 */
753 punpckhwd %mm3, %mm7 /* interleave 3rd and 4th lines */
754 movq %mm6, %mm5 /* copy first intermediate result */
755 punpckldq %mm7, %mm6 /* interleave to produce result 3 */
756 leal (%edi,%ebx,8), %edi /* point to 4x4 set 4 rows down */
757 punpckhdq %mm7, %mm5 /* interleave to produce result 4 */
758 movq %mm6, (%esi,%ebx,4) /* write result 3 */
759 movq %mm5, (%esi,%ecx) /* write result 4 */
760 /* check to see if number of rows left is zero */
762 /* last time through you are done and ready to exit */
764 .L2: movq 8(%esi), %mm0 /* first line */
765 movq 8(%esi,%ebx,4), %mm2 /* third line */
766 movq %mm0, %mm6 /* copy first line */
767 punpcklwd 8(%esi,%ebx,2), %mm0 /* interleave first and second lines */
768 movq %mm2, %mm7 /* copy third line */
769 punpcklwd 8(%esi,%ecx), %mm2 /* interleave 3rd and 4th lines */
770 movq %mm0, %mm4 /* copy first intermediate */
771 movq (%edi), %mm1 /* first line */
772 punpckldq %mm2, %mm0 /* interleave to produce 1st result */
773 movq (%edi,%ebx,4), %mm3 /* third line */
774 punpckhdq %mm2, %mm4 /* interleave to produce 2nd result */
775 punpckhwd 8(%esi,%ebx,2), %mm6 /* interleave 1st and 2nd lines */
776 movq %mm1, %mm2 /* copy first line */
777 punpckhwd 8(%esi,%ecx), %mm7 /* interleave 3rd and 4th lines */
778 movq %mm6, %mm5 /* copy first intermediate */
779 movq %mm0, (%edi) /* write result 1 */
780 punpckhdq %mm7, %mm5 /* produce third result */
781 punpcklwd (%edi,%ebx,2), %mm1 /* interleave 1st and 2nd lines */
782 movq %mm3, %mm0 /* copy third line */
783 punpckhwd (%edi,%ebx,2), %mm2 /* interleave 1st and 2nd lines */
784 movq %mm4, (%edi,%ebx,2) /* write result 2 */
785 punpckldq %mm7, %mm6 /* produce fourth result */
786 punpcklwd (%edi,%ecx), %mm3 /* interleave 3rd and 4th lines */
787 movq %mm1, %mm4 /* copy first intermediate */
788 movq %mm6, (%edi,%ebx,4) /* write result 3 */
790 punpckhwd (%edi,%ecx), %mm0 /* interleave 3rd and 4th lines */
791 movq %mm2, %mm6 /* copy second intermediate */
792 movq %mm5, (%edi,%ecx) /* write result 4 */
793 punpckhdq %mm3, %mm4 /* produce second result */
794 movq %mm1, 8(%esi) /* write result 5 */
795 punpckldq %mm0, %mm2 /* produce third result */
796 movq %mm4, 8(%esi,%ebx,2) /* write result 6 */
797 punpckhdq %mm0, %mm6 /* produce fourth result */
798 movq %mm2, 8(%esi,%ebx,4) /* write result 7 */
799 movq %mm6, 8(%esi,%ecx) /* write result 8 */
800 /* increment %esi to point to next 4x4 block in same row */
802 /* increment %edi to point to nxt 4x4 block below current */
803 leal (%edi,%ebx,8), %edi
804 sub $4, %eax /* decrement inner loop var */
806 /* %edi points to start of second row in block just finished */
808 leal 8(%esi,%ebx,8), %esi
810 /* subtract the number of bytes in last row */
811 /* now we point to spot where row=col */
812 subl $8, %edx /* sub 4 from row number */
816 /* reset x_size to outer loop variable to start new row */
828 .size vdec_IDCT,.Lfe1-vdec_IDCT