]> git.sesse.net Git - qscale/commitdiff
Reformat some of the assembler code.
authorsgunderson@bigfoot.com <>
Sun, 3 Feb 2008 19:01:12 +0000 (20:01 +0100)
committersgunderson@bigfoot.com <>
Sun, 3 Feb 2008 19:01:12 +0000 (20:01 +0100)
qscale.c

index 992bf7d24e3aae137f887bd3215876db66921ed5..8dafced51c5f49ef7802d4748cbd0ab615721c4b 100644 (file)
--- a/qscale.c
+++ b/qscale.c
@@ -225,78 +225,78 @@ void vscale(unsigned char *pix, float *npix, unsigned w, unsigned h, unsigned nh
                         */
                        asm (
                                /* clear */
-                               "pxor %%xmm0, %%xmm0    \n"
-                               "pxor %%xmm1, %%xmm1    \n"
-                               "pxor %%xmm2, %%xmm2    \n"
-                               "pxor %%xmm3, %%xmm3    \n"
+                               "pxor %%xmm0, %%xmm0          \n"
+                               "pxor %%xmm1, %%xmm1          \n"
+                               "pxor %%xmm2, %%xmm2          \n"
+                               "pxor %%xmm3, %%xmm3          \n"
 
                                /* main loop */
-                               ".lbl:                   \n"
+                               ".lbl:                        \n"
                                
                                /* a zero is useful during unpacking */
-                               "pxor %%xmm4, %%xmm4     \n"
+                               "pxor %%xmm4, %%xmm4          \n"
                                
                                /* fetch all 16 source bytes */
-                               "movups (%0), %%xmm5     \n"
-                               "prefetcht0 (%0,%3,4)    \n"
+                               "movups (%0), %%xmm5          \n"
+                               "prefetcht0 (%0,%3,4)         \n"
 
                                /* unpack into words (xmm5, xmm7) */
-                               "movaps %%xmm5, %%xmm7    \n"
-                               "punpcklbw %%xmm4, %%xmm5 \n"
-                               "punpckhbw %%xmm4, %%xmm7 \n"
+                               "movaps %%xmm5, %%xmm7        \n"
+                               "punpcklbw %%xmm4, %%xmm5     \n"
+                               "punpckhbw %%xmm4, %%xmm7     \n"
 
                                /* unpack xmm5 into dwords (xmm5, xmm6) */
-                               "movaps %%xmm5, %%xmm6    \n"
-                               "punpcklwd %%xmm4, %%xmm5 \n"
-                               "punpckhwd %%xmm4, %%xmm6 \n"
+                               "movaps %%xmm5, %%xmm6        \n"
+                               "punpcklwd %%xmm4, %%xmm5     \n"
+                               "punpckhwd %%xmm4, %%xmm6     \n"
 
                                /* convert xmm5, xmm6 to floats */
-                               "cvtdq2ps %%xmm5, %%xmm5 \n"
-                               "cvtdq2ps %%xmm6, %%xmm6 \n"
+                               "cvtdq2ps %%xmm5, %%xmm5      \n"
+                               "cvtdq2ps %%xmm6, %%xmm6      \n"
 
                                /* fetch the coefficient */
-                               "movss (%2), %%xmm4      \n"
-                               "shufps $0x0, %%xmm4, %%xmm4 \n"
+                               "movss (%2), %%xmm4           \n"
+                               "shufps $0x0, %%xmm4, %%xmm4  \n"
 
                                /* do the muls for xmm5 and xmm6 */
-                               "mulps %%xmm4, %%xmm5    \n"
-                               "mulps %%xmm4, %%xmm6    \n"
-                               "addps %%xmm5, %%xmm0    \n"
-                               "addps %%xmm6, %%xmm1    \n"
+                               "mulps %%xmm4, %%xmm5         \n"
+                               "mulps %%xmm4, %%xmm6         \n"
+                               "addps %%xmm5, %%xmm0         \n"
+                               "addps %%xmm6, %%xmm1         \n"
 
                                /* get the zero back again */
-                               "pxor %%xmm4, %%xmm4     \n"
+                               "pxor %%xmm4, %%xmm4          \n"
 
                                /* unpack xmm7 into dwords (xmm7, xmm6) */
-                               "movaps %%xmm7, %%xmm6    \n"
-                               "punpcklwd %%xmm4, %%xmm7 \n"
-                               "punpckhwd %%xmm4, %%xmm6 \n"
+                               "movaps %%xmm7, %%xmm6        \n"
+                               "punpcklwd %%xmm4, %%xmm7     \n"
+                               "punpckhwd %%xmm4, %%xmm6     \n"
 
                                /* convert xmm7, xmm6 to floats */
-                               "cvtdq2ps %%xmm7, %%xmm7 \n"
-                               "cvtdq2ps %%xmm6, %%xmm6 \n"
+                               "cvtdq2ps %%xmm7, %%xmm7      \n"
+                               "cvtdq2ps %%xmm6, %%xmm6      \n"
 
                                /* fetch the coefficient */
-                               "movss (%2), %%xmm4      \n"
-                               "shufps $0x0, %%xmm4, %%xmm4 \n"
+                               "movss (%2), %%xmm4           \n"
+                               "shufps $0x0, %%xmm4, %%xmm4  \n"
 
                                /* do the second set of muls */
-                               "mulps %%xmm4, %%xmm7    \n"
-                               "mulps %%xmm4, %%xmm6    \n"
-                               "addps %%xmm7, %%xmm2    \n"
-                               "addps %%xmm6, %%xmm3    \n"
+                               "mulps %%xmm4, %%xmm7         \n"
+                               "mulps %%xmm4, %%xmm6         \n"
+                               "addps %%xmm7, %%xmm2         \n"
+                               "addps %%xmm6, %%xmm3         \n"
 
                                /* move along, and loop */
-                               "add $4, %2              \n"
-                               "add %3, %0              \n"
-                               "dec %1                  \n"
-                               "jnz .lbl                \n"
+                               "add $4, %2                   \n"
+                               "add %3, %0                   \n"
+                               "dec %1                       \n"
+                               "jnz .lbl                     \n"
 
                                /* store the values */
-                               "movaps %%xmm0, (%4)     \n"
-                               "movaps %%xmm1, 16(%4)   \n"
-                               "movaps %%xmm2, 32(%4)   \n"
-                               "movaps %%xmm3, 48(%4)   \n"
+                               "movaps %%xmm0, (%4)          \n"
+                               "movaps %%xmm1, 16(%4)        \n"
+                               "movaps %%xmm2, 32(%4)        \n"
+                               "movaps %%xmm3, 48(%4)        \n"
                                : :
                                "r" (&sptr[pd[y].start * w]),        /* 0: srcptr base */
                                "r" (pd[y].end - pd[y].start + 1),   /* 1: filter len */