8 /* The number of pixels to process at a time when scaling vertically. */
9 #define CACHE_LINE_FACTOR 16
11 /* Whether to use SSE for horizontal scaling or not (requires SSE3). */
12 #define USE_HORIZONTAL_SSE 1
14 /* Whether to use SSE for vertical scaling or not (requires only SSE1). */
15 #define USE_VERTICAL_SSE 1
18 #undef CACHE_LINE_FACTOR
19 #define CACHE_LINE_FACTOR 16
23 #define M_PI 3.14159265358979323846264
26 qscale_img *qscale_load_jpeg(const char *filename)
28 FILE *file = fopen(filename, "rb");
34 img = qscale_load_jpeg_from_stdio(file);
40 qscale_img *qscale_load_jpeg_from_stdio(FILE *file)
42 qscale_img *img = (qscale_img *)malloc(sizeof(qscale_img));
47 img->data_y = img->data_cb = img->data_cr = NULL;
49 /* FIXME: Better error handling here (ie., return NULL). */
50 struct jpeg_decompress_struct dinfo;
51 struct jpeg_error_mgr jerr;
52 dinfo.err = jpeg_std_error(&jerr);
53 jpeg_create_decompress(&dinfo);
54 jpeg_stdio_src(&dinfo, stdin);
55 jpeg_read_header(&dinfo, TRUE);
56 dinfo.raw_data_out = TRUE;
57 jpeg_start_decompress(&dinfo);
59 /* We do not handle anything but YCbCr images (yet?). */
60 if (dinfo.num_components != 3) {
65 img->width = dinfo.image_width;
66 img->height = dinfo.image_height;
68 img->w0 = dinfo.image_width * dinfo.comp_info[0].h_samp_factor / dinfo.max_h_samp_factor;
69 img->h0 = dinfo.image_height * dinfo.comp_info[0].v_samp_factor / dinfo.max_v_samp_factor;
71 img->w1 = dinfo.image_width * dinfo.comp_info[1].h_samp_factor / dinfo.max_h_samp_factor;
72 img->h1 = dinfo.image_height * dinfo.comp_info[1].v_samp_factor / dinfo.max_v_samp_factor;
74 img->w2 = dinfo.image_width * dinfo.comp_info[2].h_samp_factor / dinfo.max_h_samp_factor;
75 img->h2 = dinfo.image_height * dinfo.comp_info[2].v_samp_factor / dinfo.max_v_samp_factor;
77 img->samp_h0 = dinfo.comp_info[0].h_samp_factor;
78 img->samp_v0 = dinfo.comp_info[0].v_samp_factor;
80 img->samp_h1 = dinfo.comp_info[1].h_samp_factor;
81 img->samp_v1 = dinfo.comp_info[1].v_samp_factor;
83 img->samp_h2 = dinfo.comp_info[2].h_samp_factor;
84 img->samp_v2 = dinfo.comp_info[2].v_samp_factor;
86 img->data_y = (JSAMPLE*)memalign(16, dinfo.comp_info[0].height_in_blocks * dinfo.comp_info[0].width_in_blocks * DCTSIZE * DCTSIZE);
87 img->data_cb = (JSAMPLE*)memalign(16, dinfo.comp_info[1].height_in_blocks * dinfo.comp_info[1].width_in_blocks * DCTSIZE * DCTSIZE);
88 img->data_cr = (JSAMPLE*)memalign(16, dinfo.comp_info[2].height_in_blocks * dinfo.comp_info[2].width_in_blocks * DCTSIZE * DCTSIZE);
90 if (img->data_y == NULL || img->data_cb == NULL || img->data_cr == NULL) {
95 int total_lines = 0, blocks = 0;
96 while (total_lines < dinfo.comp_info[0].height_in_blocks * DCTSIZE) {
97 unsigned max_lines = dinfo.max_v_samp_factor * DCTSIZE;
99 JSAMPROW y_row_ptrs[max_lines];
100 JSAMPROW cb_row_ptrs[max_lines];
101 JSAMPROW cr_row_ptrs[max_lines];
102 JSAMPROW* ptrs[] = { y_row_ptrs, cb_row_ptrs, cr_row_ptrs };
105 for (i = 0; i < max_lines; ++i) {
106 y_row_ptrs[i] = img->data_y + (i+blocks*DCTSIZE*dinfo.comp_info[0].v_samp_factor) * dinfo.comp_info[0].width_in_blocks * DCTSIZE;
107 cb_row_ptrs[i] = img->data_cb + (i+blocks*DCTSIZE*dinfo.comp_info[1].v_samp_factor) * dinfo.comp_info[1].width_in_blocks * DCTSIZE;
108 cr_row_ptrs[i] = img->data_cr + (i+blocks*DCTSIZE*dinfo.comp_info[2].v_samp_factor) * dinfo.comp_info[2].width_in_blocks * DCTSIZE;
111 total_lines += max_lines;
114 if (jpeg_read_raw_data(&dinfo, ptrs, max_lines) == 0)
118 jpeg_destroy_decompress(&dinfo);
122 void qscale_destroy(qscale_img *img)
131 static double sinc(double x)
133 static const double cutoff = 1.220703668e-4; /* sqrt(sqrt(eps)) */
135 if (abs(x) < cutoff) {
136 /* For small |x|, use Taylor series instead */
137 const double x2 = x * x;
138 const double x4 = x2 * x2;
140 return 1.0 - x2 / 6.0 + x4 / 120.0;
146 static double lanczos_tap(double x)
148 if (x < -3.0 || x > 3.0)
151 return sinc(-x*M_PI) * sinc(-x*M_PI / 3.0);
153 return sinc(x*M_PI) * sinc(x*M_PI / 3.0);
161 static void hscale(float *pix, unsigned char *npix, unsigned w, unsigned h, unsigned nw, unsigned sstride, unsigned dstride)
163 struct pix_desc *pd = (struct pix_desc *)malloc(nw * sizeof(struct pix_desc));
165 float *coeffs = (float *)malloc(size_coeffs * sizeof(float));
168 double sf = (double)w / (double)nw;
169 double support = (w > nw) ? (3.0 * sf) : (3.0 / sf);
171 /* calculate the filter */
172 for (x = 0; x < nw; ++x) {
173 int start = ceil(x * sf - support);
174 int end = floor(x * sf + support);
185 #if USE_HORIZONTAL_SSE
186 /* round up so we get a multiple of four for the SSE code */
187 int num = (end - start + 1);
189 /* prefer aligning it if possible */
190 if (start % 4 != 0 && start % 4 <= num % 4) {
195 end += 4 - (num % 4);
202 pd[x].startcoeff = num_coeffs;
204 for (sx = start; sx <= end; ++sx) {
205 double nd = (w > nw) ? (sx/sf - x) : (sx - x*sf);
206 double f = lanczos_tap(nd);
207 if (num_coeffs == size_coeffs) {
209 coeffs = (float *)realloc(coeffs, size_coeffs * sizeof(float));
212 coeffs[num_coeffs++] = f;
216 for (sx = start; sx <= end; ++sx) {
217 coeffs[pd[x].startcoeff + sx - start] /= sum;
221 for (y = 0; y < h; ++y) {
222 float *sptr = pix + y*sstride;
223 unsigned char *dptr = npix + y*dstride;
225 for (x = 0; x < nw; ++x) {
226 #if USE_HORIZONTAL_SSE
230 static const float low = 0.0, high = 255.0;
235 "movups (%4,%2),%%xmm1 \n"
236 "movups (%3,%2),%%xmm2 \n"
237 "mulps %%xmm2,%%xmm1 \n"
250 : "r" (&coeffs[pd[x].startcoeff]),
251 "r" (&sptr[pd[x].start]),
252 "r" ((pd[x].end - pd[x].start + 1)/4),
255 : "memory", "xmm1", "xmm2"
258 *dptr++ = (unsigned char)result;
261 float *cf = &coeffs[pd[x].startcoeff];
264 for (sx = pd[x].start; sx <= pd[x].end; ++sx) {
265 acc += sptr[sx] * *cf++;
270 else if (acc > 255.0)
273 ch = (unsigned char)acc;
278 for ( ; x < dstride; ++x) {
284 static void vscale(unsigned char *pix, float *npix, unsigned w, unsigned h, unsigned nh, unsigned dstride)
286 struct pix_desc *pd = (struct pix_desc *)malloc(nh * sizeof(struct pix_desc));
288 float *coeffs = (float *)malloc(size_coeffs * sizeof(float));
291 double sf = (double)h / (double)nh;
292 double support = (h > nh) ? (3.0 * sf) : (3.0 / sf);
294 /* calculate the filter */
295 for (y = 0; y < nh; ++y) {
296 int start = ceil(y * sf - support);
297 int end = floor(y * sf + support);
309 pd[y].startcoeff = num_coeffs;
311 for (sy = start; sy <= end; ++sy) {
312 double nd = (h > nh) ? (sy/sf - y) : (sy - y*sf);
313 double f = lanczos_tap(nd);
314 if (num_coeffs == size_coeffs) {
316 coeffs = (float *)realloc(coeffs, size_coeffs * sizeof(float));
319 coeffs[num_coeffs++] = f;
323 for (sy = start; sy <= end; ++sy) {
324 coeffs[pd[y].startcoeff + sy - start] /= sum;
328 #if CACHE_LINE_FACTOR > 1
329 for (x = 0; x < (w/CACHE_LINE_FACTOR) * CACHE_LINE_FACTOR; x += CACHE_LINE_FACTOR) {
330 unsigned char *sptr = pix + x;
331 float *dptr = npix + x;
332 for (y = 0; y < nh; ++y) {
335 * xmm0 - xmm3: acc[0..15]
336 * xmm4: current filter coefficient
337 * xmm5, xmm6, xmm7: scratchpad
341 "pxor %%xmm0, %%xmm0 \n"
342 "pxor %%xmm1, %%xmm1 \n"
343 "pxor %%xmm2, %%xmm2 \n"
344 "pxor %%xmm3, %%xmm3 \n"
349 /* a zero is useful during unpacking */
350 "pxor %%xmm4, %%xmm4 \n"
352 /* fetch all 16 source bytes */
353 "movups (%0), %%xmm5 \n"
354 "prefetcht0 (%0,%3,4) \n"
356 /* unpack into words (xmm5, xmm7) */
357 "movaps %%xmm5, %%xmm7 \n"
358 "punpcklbw %%xmm4, %%xmm5 \n"
359 "punpckhbw %%xmm4, %%xmm7 \n"
361 /* unpack xmm5 into dwords (xmm5, xmm6) */
362 "movaps %%xmm5, %%xmm6 \n"
363 "punpcklwd %%xmm4, %%xmm5 \n"
364 "punpckhwd %%xmm4, %%xmm6 \n"
366 /* convert xmm5, xmm6 to floats */
367 "cvtdq2ps %%xmm5, %%xmm5 \n"
368 "cvtdq2ps %%xmm6, %%xmm6 \n"
370 /* fetch the coefficient */
371 "movss (%2), %%xmm4 \n"
372 "shufps $0x0, %%xmm4, %%xmm4 \n"
374 /* do the muls for xmm5 and xmm6 */
375 "mulps %%xmm4, %%xmm5 \n"
376 "mulps %%xmm4, %%xmm6 \n"
377 "addps %%xmm5, %%xmm0 \n"
378 "addps %%xmm6, %%xmm1 \n"
380 /* get the zero back again */
381 "pxor %%xmm4, %%xmm4 \n"
383 /* unpack xmm7 into dwords (xmm7, xmm6) */
384 "movaps %%xmm7, %%xmm6 \n"
385 "punpcklwd %%xmm4, %%xmm7 \n"
386 "punpckhwd %%xmm4, %%xmm6 \n"
388 /* convert xmm7, xmm6 to floats */
389 "cvtdq2ps %%xmm7, %%xmm7 \n"
390 "cvtdq2ps %%xmm6, %%xmm6 \n"
392 /* fetch the coefficient */
393 "movss (%2), %%xmm4 \n"
394 "shufps $0x0, %%xmm4, %%xmm4 \n"
396 /* do the second set of muls */
397 "mulps %%xmm4, %%xmm7 \n"
398 "mulps %%xmm4, %%xmm6 \n"
399 "addps %%xmm7, %%xmm2 \n"
400 "addps %%xmm6, %%xmm3 \n"
402 /* move along, and loop */
408 /* store the values */
409 "movaps %%xmm0, (%4) \n"
410 "movaps %%xmm1, 16(%4) \n"
411 "movaps %%xmm2, 32(%4) \n"
412 "movaps %%xmm3, 48(%4) \n"
414 "r" (&sptr[pd[y].start * w]), /* 0: srcptr base */
415 "r" (pd[y].end - pd[y].start + 1), /* 1: filter len */
416 "r" (&coeffs[pd[y].startcoeff]), /* 2: coeffs base */
417 "r" ((long)w), /* 3: stride */
418 "r" (dptr) /* 4: dstptr base */
419 : "memory", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7"
423 float acc[CACHE_LINE_FACTOR];
424 for (i = 0; i < CACHE_LINE_FACTOR; ++i)
426 float *cf = &coeffs[pd[y].startcoeff];
429 for (sy = pd[y].start; sy <= pd[y].end; ++sy) {
430 for (i = 0; i < CACHE_LINE_FACTOR; ++i) {
431 acc[i] += sptr[sy * w + i] * *cf;
436 for (i = 0; i < CACHE_LINE_FACTOR; ++i) {
443 for (x = (x/CACHE_LINE_FACTOR)*CACHE_LINE_FACTOR; x < w; ++x) {
445 for (x = 0; x < w; ++x) {
447 unsigned char *sptr = pix + x;
448 float *dptr = npix + x;
449 for (y = 0; y < nh; ++y) {
451 float *cf = &coeffs[pd[y].startcoeff];
454 for (sy = pd[y].start; sy <= pd[y].end; ++sy) {
455 acc += sptr[sy * w] * *cf++;
464 qscale_img *qscale_scale(qscale_img *src, unsigned width, unsigned height, unsigned samp_h0, unsigned samp_v0, unsigned samp_h1, unsigned samp_v1, unsigned samp_h2, unsigned samp_v2, enum qscale_scaling_filter scaling_filter)
466 qscale_img *dst = (qscale_img *)malloc(sizeof(qscale_img));
472 dst->height = height;
474 unsigned max_samp_h, max_samp_v;
475 max_samp_h = samp_h0;
476 if (samp_h1 > max_samp_h)
477 max_samp_h = samp_h1;
478 if (samp_h2 > max_samp_h)
479 max_samp_h = samp_h2;
481 max_samp_v = samp_v0;
482 if (samp_v1 > max_samp_v)
483 max_samp_v = samp_v1;
484 if (samp_v2 > max_samp_v)
485 max_samp_v = samp_v2;
487 dst->w0 = width * samp_h0 / max_samp_h;
488 dst->h0 = height * samp_v0 / max_samp_v;
490 dst->w1 = width * samp_h1 / max_samp_h;
491 dst->h1 = height * samp_v1 / max_samp_v;
493 dst->w2 = width * samp_h2 / max_samp_h;
494 dst->h2 = height * samp_v2 / max_samp_v;
496 dst->samp_h0 = samp_h0;
497 dst->samp_v0 = samp_v0;
499 dst->samp_h1 = samp_h1;
500 dst->samp_v1 = samp_v1;
502 dst->samp_h2 = samp_h2;
503 dst->samp_v2 = samp_v2;
505 unsigned dstride0 = (dst->w0 + DCTSIZE-1) & ~(DCTSIZE-1);
506 unsigned dstride1 = (dst->w1 + DCTSIZE-1) & ~(DCTSIZE-1);
507 unsigned dstride2 = (dst->w2 + DCTSIZE-1) & ~(DCTSIZE-1);
509 unsigned sstride0 = (src->w0 + DCTSIZE-1) & ~(DCTSIZE-1);
510 unsigned sstride1 = (src->w1 + DCTSIZE-1) & ~(DCTSIZE-1);
511 unsigned sstride2 = (src->w2 + DCTSIZE-1) & ~(DCTSIZE-1);
513 /* FIXME: handle out-of-memory gracefully */
515 float *npix = (float*)memalign(16, sstride0 * dst->h0 * sizeof(float));
516 vscale(src->data_y, npix, sstride0, src->h0, dst->h0, sstride0);
517 dst->data_y = (unsigned char *)malloc(dst->h0 * dstride0);
518 hscale(npix, dst->data_y, src->w0, dst->h0, dst->w0, sstride0, dstride0);
522 float *npix = (float*)memalign(16, sstride1 * dst->h1 * sizeof(float));
523 vscale(src->data_cr, npix, sstride1, src->h1, dst->h1, sstride1);
524 dst->data_cr = (unsigned char *)malloc(dst->h1 * dstride1);
525 hscale(npix, dst->data_cr, src->w1, dst->h1, dst->w1, sstride1, dstride1);
529 float *npix = (float*)memalign(16, sstride2 * dst->h2 * sizeof(float));
530 vscale(src->data_cb, npix, sstride2, src->h2, dst->h2, sstride2);
531 dst->data_cb = (unsigned char *)malloc(dst->h2 * dstride2);
532 hscale(npix, dst->data_cb, src->w2, dst->h2, dst->w2, sstride2, dstride2);