]> git.sesse.net Git - qscale/blobdiff - qscale.c
Err, fixed more libqscale.h silliness.
[qscale] / qscale.c
index 54985cc7e6beb47640cbd5fc6c64eac7e26487d7..d89c8b9463d9a34cc367a22f72f4e4b9f75a7b7d 100644 (file)
--- a/qscale.c
+++ b/qscale.c
@@ -1,3 +1,21 @@
+/*
+ * qscale: Quick, high-quality JPEG-to-JPEG scaler.
+ * Copyright (C) 2008 Steinar H. Gunderson <sgunderson@bigfoot.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA
+ */
+
 #include <stdio.h>
 #include <malloc.h>
 #include <math.h>
 #include <stdlib.h>
 #include "jpeglib.h"
 
+/* The number of pixels to process at a time when scaling vertically. */
+#define CACHE_LINE_FACTOR 16
+
+/* Whether to use SSE for horizontal scaling or not (requires SSE3). */
+#define USE_HORIZONTAL_SSE 1
+
+/* Whether to use SSE for vertical scaling or not (requires only SSE1). */
+#define USE_VERTICAL_SSE 1
+
+#if USE_VERTICAL_SSE
+#undef CACHE_LINE_FACTOR
 #define CACHE_LINE_FACTOR 16
+#endif
+
+#ifndef M_PI
+#define M_PI 3.14159265358979323846264
+#endif
 
 double sinc(double x)
 {
-       static const double cutoff = 1.220703668e-4;  // sqrt(sqrt(eps))
+       static const double cutoff = 1.220703668e-4;  /* sqrt(sqrt(eps)) */
 
        if (abs(x) < cutoff) {
-               // For small |x|, use Taylor series instead
+               /* For small |x|, use Taylor series instead */
                const double x2 = x * x;
                const double x4 = x2 * x2;
 
@@ -38,7 +72,7 @@ struct pix_desc {
        unsigned startcoeff;
 };
 
-void hscale(float *pix, unsigned char *npix, unsigned w, unsigned h, unsigned nw, unsigned dstride)
+void hscale(float *pix, unsigned char *npix, unsigned w, unsigned h, unsigned nw, unsigned sstride, unsigned dstride)
 {
        struct pix_desc *pd = (struct pix_desc *)malloc(nw * sizeof(struct pix_desc));
        int size_coeffs = 8;
@@ -62,10 +96,11 @@ void hscale(float *pix, unsigned char *npix, unsigned w, unsigned h, unsigned nw
                        end = w - 1;
                }
 
-               // round up so we get a multiple of four for the SSE code
+#if USE_HORIZONTAL_SSE
+               /* round up so we get a multiple of four for the SSE code */
                int num = (end - start + 1);
                if (num % 4 != 0) {
-                       // prefer aligning it if possible
+                       /* prefer aligning it if possible */
                        if (start % 4 != 0 && start % 4 <= num % 4) {
                                num += start % 4;
                                start -= start % 4;
@@ -74,6 +109,7 @@ void hscale(float *pix, unsigned char *npix, unsigned w, unsigned h, unsigned nw
                                end += 4 - (num % 4);
                        }
                }
+#endif
 
                pd[x].start = start;
                pd[x].end = end;
@@ -86,7 +122,7 @@ void hscale(float *pix, unsigned char *npix, unsigned w, unsigned h, unsigned nw
                                size_coeffs <<= 1;
                                coeffs = (float *)realloc(coeffs, size_coeffs * sizeof(float));
                        }
-               
+
                        coeffs[num_coeffs++] = f;
                        sum += f;
                }
@@ -95,39 +131,54 @@ void hscale(float *pix, unsigned char *npix, unsigned w, unsigned h, unsigned nw
                        coeffs[pd[x].startcoeff + sx - start] /= sum;
                }
        }
-       
+
        for (y = 0; y < h; ++y) {
-               float *sptr = pix + y*w;
+               float *sptr = pix + y*sstride;
                unsigned char *dptr = npix + y*dstride;
                unsigned char ch;
                for (x = 0; x < nw; ++x) {
+#if USE_HORIZONTAL_SSE
+                       int result;
                        float acc;
+                       long tmp;
                        static const float low = 0.0, high = 255.0;
-                       asm (
-                               "pxor %0, %0               \n"
-                               "xor %%eax, %%eax          \n"
-                               ".lbl2:                    \n"
-                               "movups (%2,%%eax),%%xmm1  \n"
-                               "movups (%1,%%eax),%%xmm2  \n"
+                       __asm__ (
+                               "pxor %1, %1               \n"
+                               "xor %2, %2                \n"
+                               "0:                        \n"
+                               "movups (%4,%2),%%xmm1     \n"
+                               "movups (%3,%2),%%xmm2     \n"
                                "mulps %%xmm2,%%xmm1       \n"
-                               "addps %%xmm1,%0           \n"
-                               "addl $16,%%eax            \n"
-                               "dec %3                    \n"
-                               "jnz .lbl2                 \n"
-                               "haddps %0,%0              \n"
-                               "haddps %0,%0              \n"
-                               "maxss %4,%0               \n"
-                               "minss %5,%0               \n"
-                               : "=x" (acc)
+                               "addps %%xmm1,%1           \n"
+                               "add $16,%2                \n"
+                               "dec %5                    \n"
+                               "jnz 0b                    \n"
+                               "haddps %1,%1              \n"
+                               "haddps %1,%1              \n"
+                               "maxss %6,%1               \n"
+                               "minss %7,%1               \n"
+                               "cvtss2si %1,%0            \n"
+                               : "=r" (result),
+                                 "=&x" (acc),
+                                 "=&r" (tmp)
                                : "r" (&coeffs[pd[x].startcoeff]),
                                  "r" (&sptr[pd[x].start]),
                                  "r" ((pd[x].end - pd[x].start + 1)/4),
                                  "m" (low),
                                  "m" (high)
-                               : "eax", "xmm1", "xmm2"
+                               : "memory", "xmm1", "xmm2"
                        );
 
-#if 0
+                       *dptr++ = (unsigned char)result;
+#else
+                       float acc = 0.0;
+                       float *cf = &coeffs[pd[x].startcoeff];
+                       unsigned sx;
+                       
+                       for (sx = pd[x].start; sx <= pd[x].end; ++sx) {
+                               acc += sptr[sx] * *cf++;
+                       }
+
                        if (acc < 0.0)
                                ch = 0;
                        else if (acc > 255.0)
@@ -136,7 +187,6 @@ void hscale(float *pix, unsigned char *npix, unsigned w, unsigned h, unsigned nw
                                ch = (unsigned char)acc;
                        *dptr++ = ch;
 #endif
-                       *dptr++ = (unsigned char)acc;
                }
                ch = dptr[-1];
                for ( ; x < dstride; ++x) {
@@ -194,112 +244,112 @@ void vscale(unsigned char *pix, float *npix, unsigned w, unsigned h, unsigned nh
                unsigned char *sptr = pix + x;
                float *dptr = npix + x;
                for (y = 0; y < nh; ++y) {
-#if 0
-                       int i;
-                       float acc[CACHE_LINE_FACTOR];
-                       for (i = 0; i < CACHE_LINE_FACTOR; ++i)
-                               acc[i] = 0.0;
-                       float *cf = &coeffs[pd[y].startcoeff];
-                       unsigned sy;
-               
-                       for (sy = pd[y].start; sy <= pd[y].end; ++sy) {
-                               for (i = 0; i < CACHE_LINE_FACTOR; ++i) {
-                                       acc[i] += sptr[sy * w + i] * *cf;
-                               }
-                               ++cf;
-                       }
-
-                       for (i = 0; i < CACHE_LINE_FACTOR; ++i) {
-                               dptr[i] = acc[i];
-                       }
-#else
+#if USE_VERTICAL_SSE
                        /*
                         * xmm0 - xmm3: acc[0..15]
                         * xmm4: current filter coefficient
                         * xmm5, xmm6, xmm7: scratchpad
                         */
-                       asm (
+                       __asm__ (
                                /* clear */
-                               "pxor %%xmm0, %%xmm0    \n"
-                               "pxor %%xmm1, %%xmm1    \n"
-                               "pxor %%xmm2, %%xmm2    \n"
-                               "pxor %%xmm3, %%xmm3    \n"
+                               "pxor %%xmm0, %%xmm0          \n"
+                               "pxor %%xmm1, %%xmm1          \n"
+                               "pxor %%xmm2, %%xmm2          \n"
+                               "pxor %%xmm3, %%xmm3          \n"
 
                                /* main loop */
-                               ".lbl:                   \n"
+                               "0:                           \n"
                                
                                /* a zero is useful during unpacking */
-                               "pxor %%xmm4, %%xmm4     \n"
+                               "pxor %%xmm4, %%xmm4          \n"
                                
                                /* fetch all 16 source bytes */
-                               "movups (%0), %%xmm5     \n"
-                               "prefetcht0 (%0,%3,4)    \n"
+                               "movups (%0), %%xmm5          \n"
+                               "prefetcht0 (%0,%3,4)         \n"
 
                                /* unpack into words (xmm5, xmm7) */
-                               "movaps %%xmm5, %%xmm7    \n"
-                               "punpcklbw %%xmm4, %%xmm5 \n"
-                               "punpckhbw %%xmm4, %%xmm7 \n"
+                               "movaps %%xmm5, %%xmm7        \n"
+                               "punpcklbw %%xmm4, %%xmm5     \n"
+                               "punpckhbw %%xmm4, %%xmm7     \n"
 
                                /* unpack xmm5 into dwords (xmm5, xmm6) */
-                               "movaps %%xmm5, %%xmm6    \n"
-                               "punpcklwd %%xmm4, %%xmm5 \n"
-                               "punpckhwd %%xmm4, %%xmm6 \n"
+                               "movaps %%xmm5, %%xmm6        \n"
+                               "punpcklwd %%xmm4, %%xmm5     \n"
+                               "punpckhwd %%xmm4, %%xmm6     \n"
 
                                /* convert xmm5, xmm6 to floats */
-                               "cvtdq2ps %%xmm5, %%xmm5 \n"
-                               "cvtdq2ps %%xmm6, %%xmm6 \n"
+                               "cvtdq2ps %%xmm5, %%xmm5      \n"
+                               "cvtdq2ps %%xmm6, %%xmm6      \n"
 
                                /* fetch the coefficient */
-                               "movss (%2), %%xmm4      \n"
-                               "shufps $0x0, %%xmm4, %%xmm4 \n"
+                               "movss (%2), %%xmm4           \n"
+                               "shufps $0x0, %%xmm4, %%xmm4  \n"
 
                                /* do the muls for xmm5 and xmm6 */
-                               "mulps %%xmm4, %%xmm5    \n"
-                               "mulps %%xmm4, %%xmm6    \n"
-                               "addps %%xmm5, %%xmm0    \n"
-                               "addps %%xmm6, %%xmm1    \n"
+                               "mulps %%xmm4, %%xmm5         \n"
+                               "mulps %%xmm4, %%xmm6         \n"
+                               "addps %%xmm5, %%xmm0         \n"
+                               "addps %%xmm6, %%xmm1         \n"
 
                                /* get the zero back again */
-                               "pxor %%xmm4, %%xmm4     \n"
+                               "pxor %%xmm4, %%xmm4          \n"
 
                                /* unpack xmm7 into dwords (xmm7, xmm6) */
-                               "movaps %%xmm7, %%xmm6    \n"
-                               "punpcklwd %%xmm4, %%xmm7 \n"
-                               "punpckhwd %%xmm4, %%xmm6 \n"
+                               "movaps %%xmm7, %%xmm6        \n"
+                               "punpcklwd %%xmm4, %%xmm7     \n"
+                               "punpckhwd %%xmm4, %%xmm6     \n"
 
                                /* convert xmm7, xmm6 to floats */
-                               "cvtdq2ps %%xmm7, %%xmm7 \n"
-                               "cvtdq2ps %%xmm6, %%xmm6 \n"
+                               "cvtdq2ps %%xmm7, %%xmm7      \n"
+                               "cvtdq2ps %%xmm6, %%xmm6      \n"
 
                                /* fetch the coefficient */
-                               "movss (%2), %%xmm4      \n"
-                               "shufps $0x0, %%xmm4, %%xmm4 \n"
+                               "movss (%2), %%xmm4           \n"
+                               "shufps $0x0, %%xmm4, %%xmm4  \n"
 
                                /* do the second set of muls */
-                               "mulps %%xmm4, %%xmm7    \n"
-                               "mulps %%xmm4, %%xmm6    \n"
-                               "addps %%xmm7, %%xmm2    \n"
-                               "addps %%xmm6, %%xmm3    \n"
+                               "mulps %%xmm4, %%xmm7         \n"
+                               "mulps %%xmm4, %%xmm6         \n"
+                               "addps %%xmm7, %%xmm2         \n"
+                               "addps %%xmm6, %%xmm3         \n"
 
                                /* move along, and loop */
-                               "add $4, %2              \n"
-                               "add %3, %0              \n"
-                               "dec %1                  \n"
-                               "jnz .lbl                \n"
+                               "add $4, %2                   \n"
+                               "add %3, %0                   \n"
+                               "dec %1                       \n"
+                               "jnz 0b                       \n"
 
                                /* store the values */
-                               "movaps %%xmm0, (%4)     \n"
-                               "movaps %%xmm1, 16(%4)   \n"
-                               "movaps %%xmm2, 32(%4)   \n"
-                               "movaps %%xmm3, 48(%4)   \n"
+                               "movaps %%xmm0, (%4)          \n"
+                               "movaps %%xmm1, 16(%4)        \n"
+                               "movaps %%xmm2, 32(%4)        \n"
+                               "movaps %%xmm3, 48(%4)        \n"
                                : :
                                "r" (&sptr[pd[y].start * w]),        /* 0: srcptr base */
                                "r" (pd[y].end - pd[y].start + 1),   /* 1: filter len */
                                "r" (&coeffs[pd[y].startcoeff]),     /* 2: coeffs base */
-                               "r" (w),                             /* 3: stride */
+                               "r" ((long)w),                       /* 3: stride */
                                "r" (dptr)                           /* 4: dstptr base */
                                : "memory", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7"
                        );
+#else
+                       int i;
+                       float acc[CACHE_LINE_FACTOR];
+                       for (i = 0; i < CACHE_LINE_FACTOR; ++i)
+                               acc[i] = 0.0;
+                       float *cf = &coeffs[pd[y].startcoeff];
+                       unsigned sy;
+               
+                       for (sy = pd[y].start; sy <= pd[y].end; ++sy) {
+                               for (i = 0; i < CACHE_LINE_FACTOR; ++i) {
+                                       acc[i] += sptr[sy * w + i] * *cf;
+                               }
+                               ++cf;
+                       }
+
+                       for (i = 0; i < CACHE_LINE_FACTOR; ++i) {
+                               dptr[i] = acc[i];
+                       }
 #endif
                        dptr += dstride;
                }
@@ -327,13 +377,27 @@ void vscale(unsigned char *pix, float *npix, unsigned w, unsigned h, unsigned nh
 
 int main(int argc, char **argv)
 {
+       /* user-settable parameters */
        unsigned nominal_w = atoi(argv[1]);
        unsigned nominal_h = atoi(argv[2]);
-
        unsigned samp_h0 = 2, samp_v0 = 2;
        unsigned samp_h1 = 1, samp_v1 = 1;
        unsigned samp_h2 = 1, samp_v2 = 1;
-       unsigned max_samp_h = 2, max_samp_v = 2;
+       unsigned jpeg_quality = 85;
+       /* end */
+
+       unsigned max_samp_h, max_samp_v;
+       max_samp_h = samp_h0;
+       if (samp_h1 > max_samp_h)
+               max_samp_h = samp_h1;
+       if (samp_h2 > max_samp_h)
+               max_samp_h = samp_h2;
+       
+       max_samp_v = samp_v0;
+       if (samp_v1 > max_samp_v)
+               max_samp_v = samp_v1;
+       if (samp_v2 > max_samp_v)
+               max_samp_v = samp_v2;
 
        unsigned nw0 = nominal_w * samp_h0 / max_samp_h, nh0 = nominal_h * samp_v0 / max_samp_v;
        unsigned nw1 = nominal_w * samp_h1 / max_samp_h, nh1 = nominal_h * samp_v1 / max_samp_v;
@@ -352,6 +416,15 @@ int main(int argc, char **argv)
        dinfo.raw_data_out = TRUE;
        jpeg_start_decompress(&dinfo);
 
+       unsigned w0 = dinfo.image_width * dinfo.comp_info[0].h_samp_factor / dinfo.max_h_samp_factor;
+       unsigned h0 = dinfo.image_height * dinfo.comp_info[0].v_samp_factor / dinfo.max_v_samp_factor;
+
+       unsigned w1 = dinfo.image_width * dinfo.comp_info[1].h_samp_factor / dinfo.max_h_samp_factor;
+       unsigned h1 = dinfo.image_height * dinfo.comp_info[1].v_samp_factor / dinfo.max_v_samp_factor;
+
+       unsigned w2 = dinfo.image_width * dinfo.comp_info[2].h_samp_factor / dinfo.max_h_samp_factor;
+       unsigned h2 = dinfo.image_height * dinfo.comp_info[2].v_samp_factor / dinfo.max_v_samp_factor;
+
        fprintf(stderr, "Scaling using Lanczos filter:\n");
        fprintf(stderr, "  Y component: %ux%u -> %ux%u\n", dinfo.comp_info[0].width_in_blocks * DCTSIZE, dinfo.comp_info[0].height_in_blocks * DCTSIZE, nw0, nh0);
        fprintf(stderr, "  Cb component: %ux%u -> %ux%u\n", dinfo.comp_info[1].width_in_blocks * DCTSIZE, dinfo.comp_info[1].height_in_blocks * DCTSIZE, nw1, nh1);
@@ -387,23 +460,23 @@ int main(int argc, char **argv)
 
        {
                float *npix = (float*)memalign(16, dinfo.comp_info[0].width_in_blocks * DCTSIZE * nh0 * sizeof(float)); 
-               vscale(data_y, npix, dinfo.comp_info[0].width_in_blocks * DCTSIZE, dinfo.comp_info[0].height_in_blocks * DCTSIZE, nh0, dinfo.comp_info[0].width_in_blocks * DCTSIZE);
-               data_ny = (unsigned char *)malloc(nw0 * stride0);
-               hscale(npix, data_ny, dinfo.comp_info[0].width_in_blocks * DCTSIZE, nh0, nw0, stride0);
+               vscale(data_y, npix, dinfo.comp_info[0].width_in_blocks * DCTSIZE, h0, nh0, dinfo.comp_info[0].width_in_blocks * DCTSIZE);
+               data_ny = (unsigned char *)malloc(nh0 * stride0);
+               hscale(npix, data_ny, w0, nh0, nw0, dinfo.comp_info[0].width_in_blocks * DCTSIZE, stride0);
                free(npix);
        }
        {
                float *npix = (float*)memalign(16, dinfo.comp_info[1].width_in_blocks * DCTSIZE * nh1 * sizeof(float)); 
-               vscale(data_cr, npix, dinfo.comp_info[1].width_in_blocks * DCTSIZE, dinfo.comp_info[1].height_in_blocks * DCTSIZE, nh1, dinfo.comp_info[1].width_in_blocks * DCTSIZE);
-               data_ncr = (unsigned char *)malloc(nw1 * stride1);
-               hscale(npix, data_ncr, dinfo.comp_info[1].width_in_blocks * DCTSIZE, nh1, nw1, stride1);
+               vscale(data_cr, npix, dinfo.comp_info[1].width_in_blocks * DCTSIZE, h1, nh1, dinfo.comp_info[1].width_in_blocks * DCTSIZE);
+               data_ncr = (unsigned char *)malloc(nh1 * stride1);
+               hscale(npix, data_ncr, w1, nh1, nw1, dinfo.comp_info[1].width_in_blocks * DCTSIZE, stride1);
                free(npix);
        }
        {
                float *npix = (float*)memalign(16, dinfo.comp_info[2].width_in_blocks * DCTSIZE * nh2 * sizeof(float)); 
-               vscale(data_cb, npix, dinfo.comp_info[2].width_in_blocks * DCTSIZE, dinfo.comp_info[2].height_in_blocks * DCTSIZE, nh2, dinfo.comp_info[2].width_in_blocks * DCTSIZE);
-               data_ncb = (unsigned char *)malloc(nw2 * stride2);
-               hscale(npix, data_ncb, dinfo.comp_info[2].width_in_blocks * DCTSIZE, nh2, nw2, stride2);
+               vscale(data_cb, npix, dinfo.comp_info[2].width_in_blocks * DCTSIZE, h2, nh2, dinfo.comp_info[2].width_in_blocks * DCTSIZE);
+               data_ncb = (unsigned char *)malloc(nh2 * stride2);
+               hscale(npix, data_ncb, w2, nh2, nw2, dinfo.comp_info[2].width_in_blocks * DCTSIZE, stride2);
                free(npix);
        }
        jpeg_destroy_decompress(&dinfo);
@@ -414,7 +487,7 @@ int main(int argc, char **argv)
        jpeg_stdio_dest(&cinfo, stdout);
        cinfo.input_components = 3;
        jpeg_set_defaults(&cinfo);
-       jpeg_set_quality(&cinfo, 85, FALSE);
+       jpeg_set_quality(&cinfo, jpeg_quality, FALSE);
        cinfo.image_width = nominal_w;
        cinfo.image_height = nominal_h;
        cinfo.raw_data_in = TRUE;
@@ -439,7 +512,7 @@ int main(int argc, char **argv)
                int i;
 
                for (i = 0; i < max_lines; ++i) {
-                       // simple edge extension
+                       /* simple edge extension */
                        int yline = i + blocks*DCTSIZE*cinfo.comp_info[0].v_samp_factor;
                        if (yline > nh0 - 1)
                                yline = nh0 - 1;