2 * qscale: Quick, high-quality JPEG-to-JPEG scaler.
3 * Copyright (C) 2008 Steinar H. Gunderson <sgunderson@bigfoot.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation, version 2 of the License.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
25 #include "libqscale.h"
27 /* The number of pixels to process at a time when scaling vertically. */
28 #define CACHE_LINE_FACTOR 16
30 /* Whether to use SSE for horizontal scaling or not (requires SSE3). */
31 #define USE_HORIZONTAL_SSE 1
33 /* Whether to use SSE for vertical scaling or not (requires only SSE1). */
34 #define USE_VERTICAL_SSE 1
37 #undef CACHE_LINE_FACTOR
38 #define CACHE_LINE_FACTOR 16
42 #define M_PI 3.14159265358979323846264
45 #if USE_VERTICAL_SSE || USE_HORIZONTAL_SSE
46 typedef float v4sf __attribute__((vector_size(16)));
47 typedef int v4si __attribute__((vector_size(16)));
48 typedef short v8hi __attribute__((vector_size(16)));
49 typedef char v16qi __attribute__((vector_size(16)));
52 qscale_img *qscale_load_jpeg(const char *filename)
54 FILE *file = fopen(filename, "rb");
60 img = qscale_load_jpeg_from_stdio(file);
66 qscale_img *qscale_load_jpeg_from_stdio(FILE *file)
68 qscale_img *img = (qscale_img *)malloc(sizeof(qscale_img));
73 img->data_y = img->data_cb = img->data_cr = NULL;
75 /* FIXME: Better error handling here (ie., return NULL). */
76 struct jpeg_decompress_struct dinfo;
77 struct jpeg_error_mgr jerr;
78 dinfo.err = jpeg_std_error(&jerr);
79 jpeg_create_decompress(&dinfo);
80 jpeg_stdio_src(&dinfo, file);
81 jpeg_read_header(&dinfo, TRUE);
82 dinfo.raw_data_out = TRUE;
83 jpeg_start_decompress(&dinfo);
85 if (dinfo.num_components != 1 && dinfo.num_components != 3) {
89 img->num_components = dinfo.num_components;
91 img->width = dinfo.image_width;
92 img->height = dinfo.image_height;
94 img->w0 = dinfo.image_width * dinfo.comp_info[0].h_samp_factor / dinfo.max_h_samp_factor;
95 img->h0 = dinfo.image_height * dinfo.comp_info[0].v_samp_factor / dinfo.max_v_samp_factor;
97 if (img->num_components == 3) {
98 img->w1 = dinfo.image_width * dinfo.comp_info[1].h_samp_factor / dinfo.max_h_samp_factor;
99 img->h1 = dinfo.image_height * dinfo.comp_info[1].v_samp_factor / dinfo.max_v_samp_factor;
101 img->w2 = dinfo.image_width * dinfo.comp_info[2].h_samp_factor / dinfo.max_h_samp_factor;
102 img->h2 = dinfo.image_height * dinfo.comp_info[2].v_samp_factor / dinfo.max_v_samp_factor;
105 img->samp_h0 = dinfo.comp_info[0].h_samp_factor;
106 img->samp_v0 = dinfo.comp_info[0].v_samp_factor;
108 if (img->num_components == 3) {
109 img->samp_h1 = dinfo.comp_info[1].h_samp_factor;
110 img->samp_v1 = dinfo.comp_info[1].v_samp_factor;
112 img->samp_h2 = dinfo.comp_info[2].h_samp_factor;
113 img->samp_v2 = dinfo.comp_info[2].v_samp_factor;
116 img->data_y = (JSAMPLE*)memalign(16, dinfo.comp_info[0].height_in_blocks * dinfo.comp_info[0].width_in_blocks * DCTSIZE * DCTSIZE);
117 if (img->data_y == NULL) {
122 if (img->num_components == 3) {
123 img->data_cb = (JSAMPLE*)memalign(16, dinfo.comp_info[1].height_in_blocks * dinfo.comp_info[1].width_in_blocks * DCTSIZE * DCTSIZE);
124 img->data_cr = (JSAMPLE*)memalign(16, dinfo.comp_info[2].height_in_blocks * dinfo.comp_info[2].width_in_blocks * DCTSIZE * DCTSIZE);
125 if (img->data_cb == NULL || img->data_cr == NULL) {
131 int total_lines = 0, blocks = 0;
132 while (total_lines < dinfo.comp_info[0].height_in_blocks * DCTSIZE) {
133 unsigned max_lines = dinfo.max_v_samp_factor * DCTSIZE;
135 JSAMPROW y_row_ptrs[max_lines];
136 JSAMPROW cb_row_ptrs[max_lines];
137 JSAMPROW cr_row_ptrs[max_lines];
138 JSAMPROW* ptrs[] = { y_row_ptrs, cb_row_ptrs, cr_row_ptrs };
141 for (i = 0; i < max_lines; ++i) {
142 y_row_ptrs[i] = img->data_y + (i+blocks*DCTSIZE*dinfo.comp_info[0].v_samp_factor) * dinfo.comp_info[0].width_in_blocks * DCTSIZE;
143 if (img->num_components == 3) {
144 cb_row_ptrs[i] = img->data_cb + (i+blocks*DCTSIZE*dinfo.comp_info[1].v_samp_factor) * dinfo.comp_info[1].width_in_blocks * DCTSIZE;
145 cr_row_ptrs[i] = img->data_cr + (i+blocks*DCTSIZE*dinfo.comp_info[2].v_samp_factor) * dinfo.comp_info[2].width_in_blocks * DCTSIZE;
149 total_lines += max_lines;
152 if (jpeg_read_raw_data(&dinfo, ptrs, max_lines) == 0)
156 jpeg_destroy_decompress(&dinfo);
160 void qscale_destroy(qscale_img *img)
169 static double sinc(double x)
171 static const double cutoff = 1.220703668e-4; /* sqrt(sqrt(eps)) */
173 if (abs(x) < cutoff) {
174 /* For small |x|, use Taylor series instead */
175 const double x2 = x * x;
176 const double x4 = x2 * x2;
178 return 1.0 - x2 / 6.0 + x4 / 120.0;
184 static double lanczos_tap(double x)
186 if (x < -3.0 || x > 3.0)
189 return sinc(-x*M_PI) * sinc(-x*M_PI / 3.0);
191 return sinc(x*M_PI) * sinc(x*M_PI / 3.0);
194 static double mitchell_tap(double x)
196 const double b = 1.0 / 3.0;
197 const double c = 1.0 / 3.0;
198 const double p0 = ( 6.0 - 2.0*b ) / 6.0;
199 const double p2 = (-18.0 + 12.0*b + 6.0*c) / 6.0;
200 const double p3 = ( 12.0 - 9.0*b - 6.0*c) / 6.0;
201 const double q0 = ( 8.0*b + 24.0*c) / 6.0;
202 const double q1 = ( - 12.0*b - 48.0*c) / 6.0;
203 const double q2 = ( 6.0*b + 30.0*c) / 6.0;
204 const double q3 = ( - b - 6.0*c) / 6.0;
208 } else if (x < -1.0) {
209 return q0 - x * (q1 - x * (q2 - x * q3));
210 } else if (x < 0.0) {
211 return p0 + x * x * (p2 - x * p3);
212 } else if (x < 1.0) {
213 return p0 + x * x * (p2 + x * p3);
214 } else if (x < 2.0) {
215 return q0 + x * (q1 + x * (q2 + x * q3));
226 static void hscale(float *pix, unsigned char *npix, unsigned w, unsigned h, unsigned nw, unsigned sstride, unsigned dstride, enum qscale_scaling_filter scaling_filter)
228 struct pix_desc *pd = (struct pix_desc *)malloc(nw * sizeof(struct pix_desc));
230 float *coeffs = (float *)malloc(size_coeffs * sizeof(float));
233 double sf = (double)w / (double)nw;
236 if (scaling_filter == LANCZOS) {
237 support = (w > nw) ? (3.0 * sf) : (3.0 / sf);
238 } else { /* Mitchell */
239 support = (w > nw) ? (2.0 * sf) : (2.0 / sf);
242 /* calculate the filter */
243 for (x = 0; x < nw; ++x) {
244 int start = ceil(x * sf - support);
245 int end = floor(x * sf + support);
256 #if USE_HORIZONTAL_SSE
257 /* round up so we get a multiple of four for the SSE code */
258 int num = (end - start + 1);
260 /* prefer aligning it if possible */
261 if (start % 4 != 0 && start % 4 <= num % 4) {
266 end += 4 - (num % 4);
273 pd[x].startcoeff = num_coeffs;
275 for (sx = start; sx <= end; ++sx) {
276 double nd = (w > nw) ? (sx/sf - x) : (sx - x*sf);
278 if (scaling_filter == LANCZOS) {
280 } else { /* Mitchell */
281 f = mitchell_tap(nd);
283 if (num_coeffs == size_coeffs) {
285 coeffs = (float *)realloc(coeffs, size_coeffs * sizeof(float));
288 coeffs[num_coeffs++] = f;
292 for (sx = start; sx <= end; ++sx) {
293 coeffs[pd[x].startcoeff + sx - start] /= sum;
297 for (y = 0; y < h; ++y) {
298 float *sptr = pix + y*sstride;
299 unsigned char *dptr = npix + y*dstride;
301 for (x = 0; x < nw; ++x) {
302 #if USE_HORIZONTAL_SSE
303 v4sf acc = { 0.0f, 0.0f, 0.0f, 0.0f };
304 static const v4sf low = { 0.0f, 0.0f, 0.0f, 0.0f };
305 static const v4sf high = { 255.0f, 255.0f, 255.0f, 255.0f };
309 const float *sptr_xmm = &sptr[pd[x].start];
310 const float *coeffptr = &coeffs[pd[x].startcoeff];
311 const int filter_len = (pd[x].end - pd[x].start + 1) / 4;
313 for (i = 0; i < filter_len; ++i) {
314 v4sf pixels = __builtin_ia32_loadups(&sptr_xmm[i * 4]);
315 v4sf coeffs = __builtin_ia32_loadups(&coeffptr[i * 4]);
316 acc = __builtin_ia32_addps(acc, __builtin_ia32_mulps(pixels, coeffs));
318 acc = __builtin_ia32_haddps(acc, acc);
319 acc = __builtin_ia32_haddps(acc, acc);
320 acc = __builtin_ia32_maxss(acc, low);
321 acc = __builtin_ia32_minss(acc, high);
322 result = __builtin_ia32_cvtss2si(acc);
324 *dptr++ = (unsigned char)result;
327 float *cf = &coeffs[pd[x].startcoeff];
330 for (sx = pd[x].start; sx <= pd[x].end; ++sx) {
331 acc += sptr[sx] * *cf++;
336 else if (acc > 255.0)
339 ch = (unsigned char)acc;
344 for ( ; x < dstride; ++x) {
353 static void vscale(unsigned char *pix, float *npix, unsigned w, unsigned h, unsigned nh, unsigned dstride, enum qscale_scaling_filter scaling_filter)
355 struct pix_desc *pd = (struct pix_desc *)malloc(nh * sizeof(struct pix_desc));
357 float *coeffs = (float *)malloc(size_coeffs * sizeof(float));
360 double sf = (double)h / (double)nh;
363 if (scaling_filter == LANCZOS) {
364 support = (h > nh) ? (3.0 * sf) : (3.0 / sf);
365 } else { /* Mitchell */
366 support = (h > nh) ? (2.0 * sf) : (2.0 / sf);
369 /* calculate the filter */
370 for (y = 0; y < nh; ++y) {
371 int start = ceil(y * sf - support);
372 int end = floor(y * sf + support);
384 pd[y].startcoeff = num_coeffs;
386 for (sy = start; sy <= end; ++sy) {
387 double nd = (h > nh) ? (sy/sf - y) : (sy - y*sf);
389 if (scaling_filter == LANCZOS) {
391 } else { /* Mitchell */
392 f = mitchell_tap(nd);
394 if (num_coeffs == size_coeffs) {
396 coeffs = (float *)realloc(coeffs, size_coeffs * sizeof(float));
399 coeffs[num_coeffs++] = f;
403 for (sy = start; sy <= end; ++sy) {
404 coeffs[pd[y].startcoeff + sy - start] /= sum;
408 #if CACHE_LINE_FACTOR > 1
409 for (x = 0; x < (w/CACHE_LINE_FACTOR) * CACHE_LINE_FACTOR; x += CACHE_LINE_FACTOR) {
410 unsigned char *sptr = pix + x;
411 float *dptr = npix + x;
412 for (y = 0; y < nh; ++y) {
414 /* A zero is useful during unpacking. */
415 static const v4sf zero = { 0.0f, 0.0f, 0.0f, 0.0f };
416 const unsigned char *sptr_xmm = &sptr[pd[y].start * w];
417 const float *coeffptr = &coeffs[pd[y].startcoeff];
418 const int filter_len = pd[y].end - pd[y].start + 1;
421 v4sf acc0 = { 0.0f, 0.0f, 0.0f, 0.0f };
422 v4sf acc1 = { 0.0f, 0.0f, 0.0f, 0.0f };
423 v4sf acc2 = { 0.0f, 0.0f, 0.0f, 0.0f };
424 v4sf acc3 = { 0.0f, 0.0f, 0.0f, 0.0f };
426 for (i = 0; i < filter_len; ++i, ++coeffptr, sptr_xmm += w) {
427 __builtin_prefetch(sptr_xmm + w, 0);
428 v16qi src = (v16qi)__builtin_ia32_loadups((float*)sptr_xmm);
431 v8hi src_lo = (v8hi)__builtin_ia32_punpcklbw128(src, (v16qi)zero);
432 v8hi src_hi = (v8hi)__builtin_ia32_punpckhbw128(src, (v16qi)zero);
434 // unpack into dwords, convert to floats
435 v4si src0_i = (v4si)__builtin_ia32_punpcklwd128(src_lo, (v8hi)zero);
436 v4si src1_i = (v4si)__builtin_ia32_punpckhwd128(src_lo, (v8hi)zero);
437 v4si src2_i = (v4si)__builtin_ia32_punpcklwd128(src_hi, (v8hi)zero);
438 v4si src3_i = (v4si)__builtin_ia32_punpckhwd128(src_hi, (v8hi)zero);
440 v4sf src0 = __builtin_ia32_cvtdq2ps(src0_i);
441 v4sf src1 = __builtin_ia32_cvtdq2ps(src1_i);
442 v4sf src2 = __builtin_ia32_cvtdq2ps(src2_i);
443 v4sf src3 = __builtin_ia32_cvtdq2ps(src3_i);
445 // fetch the coefficient, and replicate it
446 v4sf coeff = { *coeffptr, *coeffptr, *coeffptr, *coeffptr };
448 // do the actual muladds
449 acc0 = __builtin_ia32_addps(acc0, __builtin_ia32_mulps(src0, coeff));
450 acc1 = __builtin_ia32_addps(acc1, __builtin_ia32_mulps(src1, coeff));
451 acc2 = __builtin_ia32_addps(acc2, __builtin_ia32_mulps(src2, coeff));
452 acc3 = __builtin_ia32_addps(acc3, __builtin_ia32_mulps(src3, coeff));
455 *(v4sf *)(&dptr[0]) = acc0;
456 *(v4sf *)(&dptr[4]) = acc1;
457 *(v4sf *)(&dptr[8]) = acc2;
458 *(v4sf *)(&dptr[12]) = acc3;
461 float acc[CACHE_LINE_FACTOR];
462 for (i = 0; i < CACHE_LINE_FACTOR; ++i)
464 float *cf = &coeffs[pd[y].startcoeff];
467 for (sy = pd[y].start; sy <= pd[y].end; ++sy) {
468 for (i = 0; i < CACHE_LINE_FACTOR; ++i) {
469 acc[i] += sptr[sy * w + i] * *cf;
474 for (i = 0; i < CACHE_LINE_FACTOR; ++i) {
481 for (x = (x/CACHE_LINE_FACTOR)*CACHE_LINE_FACTOR; x < w; ++x) {
483 for (x = 0; x < w; ++x) {
485 unsigned char *sptr = pix + x;
486 float *dptr = npix + x;
487 for (y = 0; y < nh; ++y) {
489 float *cf = &coeffs[pd[y].startcoeff];
492 for (sy = pd[y].start; sy <= pd[y].end; ++sy) {
493 acc += sptr[sy * w] * *cf++;
505 qscale_img *qscale_clone(const qscale_img *img)
507 qscale_img *dst = (qscale_img *)malloc(sizeof(qscale_img));
514 unsigned dstride0 = (dst->w0 + DCTSIZE-1) & ~(DCTSIZE-1);
515 unsigned dstride1 = (dst->w1 + DCTSIZE-1) & ~(DCTSIZE-1);
516 unsigned dstride2 = (dst->w2 + DCTSIZE-1) & ~(DCTSIZE-1);
518 /* FIXME: handle out-of-memory gracefully */
520 dst->data_y = (unsigned char *)malloc(dst->h0 * dstride0);
521 memcpy(dst->data_y, img->data_y, dst->h0 * dstride0);
524 dst->data_cb = (unsigned char *)malloc(dst->h1 * dstride1);
525 memcpy(dst->data_cb, img->data_cb, dst->h1 * dstride1);
528 dst->data_cr = (unsigned char *)malloc(dst->h2 * dstride2);
529 memcpy(dst->data_cr, img->data_cr, dst->h2 * dstride2);
535 qscale_img *qscale_scale(qscale_img *src, unsigned width, unsigned height, unsigned samp_h0, unsigned samp_v0, unsigned samp_h1, unsigned samp_v1, unsigned samp_h2, unsigned samp_v2, enum qscale_scaling_filter scaling_filter)
537 qscale_img *dst = (qscale_img *)malloc(sizeof(qscale_img));
543 dst->height = height;
544 dst->num_components = src->num_components;
546 unsigned max_samp_h, max_samp_v;
547 max_samp_h = samp_h0;
548 if (src->num_components == 3) {
549 if (samp_h1 > max_samp_h)
550 max_samp_h = samp_h1;
551 if (samp_h2 > max_samp_h)
552 max_samp_h = samp_h2;
555 max_samp_v = samp_v0;
556 if (src->num_components == 3) {
557 if (samp_v1 > max_samp_v)
558 max_samp_v = samp_v1;
559 if (samp_v2 > max_samp_v)
560 max_samp_v = samp_v2;
563 dst->w0 = width * samp_h0 / max_samp_h;
564 dst->h0 = height * samp_v0 / max_samp_v;
566 if (src->num_components == 3) {
567 dst->w1 = width * samp_h1 / max_samp_h;
568 dst->h1 = height * samp_v1 / max_samp_v;
570 dst->w2 = width * samp_h2 / max_samp_h;
571 dst->h2 = height * samp_v2 / max_samp_v;
574 dst->samp_h0 = samp_h0;
575 dst->samp_v0 = samp_v0;
577 if (src->num_components == 3) {
578 dst->samp_h1 = samp_h1;
579 dst->samp_v1 = samp_v1;
581 dst->samp_h2 = samp_h2;
582 dst->samp_v2 = samp_v2;
585 unsigned dstride0 = (dst->w0 + DCTSIZE-1) & ~(DCTSIZE-1);
586 unsigned dstride1 = (dst->w1 + DCTSIZE-1) & ~(DCTSIZE-1);
587 unsigned dstride2 = (dst->w2 + DCTSIZE-1) & ~(DCTSIZE-1);
589 unsigned sstride0 = (src->w0 + DCTSIZE-1) & ~(DCTSIZE-1);
590 unsigned sstride1 = (src->w1 + DCTSIZE-1) & ~(DCTSIZE-1);
591 unsigned sstride2 = (src->w2 + DCTSIZE-1) & ~(DCTSIZE-1);
593 /* FIXME: handle out-of-memory gracefully */
595 float *npix = (float*)memalign(16, sstride0 * dst->h0 * sizeof(float));
596 vscale(src->data_y, npix, sstride0, src->h0, dst->h0, sstride0, scaling_filter);
597 dst->data_y = (unsigned char *)malloc(dst->h0 * dstride0);
598 hscale(npix, dst->data_y, src->w0, dst->h0, dst->w0, sstride0, dstride0, scaling_filter);
601 if (src->num_components == 3) {
603 float *npix = (float*)memalign(16, sstride1 * dst->h1 * sizeof(float));
604 vscale(src->data_cr, npix, sstride1, src->h1, dst->h1, sstride1, scaling_filter);
605 dst->data_cr = (unsigned char *)malloc(dst->h1 * dstride1);
606 hscale(npix, dst->data_cr, src->w1, dst->h1, dst->w1, sstride1, dstride1, scaling_filter);
610 float *npix = (float*)memalign(16, sstride2 * dst->h2 * sizeof(float));
611 vscale(src->data_cb, npix, sstride2, src->h2, dst->h2, sstride2, scaling_filter);
612 dst->data_cb = (unsigned char *)malloc(dst->h2 * dstride2);
613 hscale(npix, dst->data_cb, src->w2, dst->h2, dst->w2, sstride2, dstride2, scaling_filter);
621 int qscale_save_jpeg(const qscale_img *img, const char *filename, unsigned jpeg_quality, enum qscale_jpeg_mode jpeg_mode)
623 FILE *file = fopen(filename, "wb");
628 int err = qscale_save_jpeg_to_stdio(img, file, jpeg_quality, jpeg_mode);
634 int qscale_save_jpeg_to_stdio(const qscale_img *img, FILE *file, unsigned jpeg_quality, enum qscale_jpeg_mode jpeg_mode)
636 struct jpeg_compress_struct cinfo;
637 struct jpeg_error_mgr jerr;
638 cinfo.err = jpeg_std_error(&jerr);
639 jpeg_create_compress(&cinfo);
640 jpeg_stdio_dest(&cinfo, file);
641 cinfo.input_components = img->num_components;
642 jpeg_set_defaults(&cinfo);
643 jpeg_set_quality(&cinfo, jpeg_quality, FALSE);
645 if (jpeg_mode == PROGRESSIVE) {
646 jpeg_simple_progression(&cinfo);
649 cinfo.image_width = img->width;
650 cinfo.image_height = img->height;
651 cinfo.raw_data_in = TRUE;
652 if (img->num_components == 3) {
653 jpeg_set_colorspace(&cinfo, JCS_YCbCr);
655 jpeg_set_colorspace(&cinfo, JCS_GRAYSCALE);
657 cinfo.comp_info[0].h_samp_factor = img->samp_h0;
658 cinfo.comp_info[0].v_samp_factor = img->samp_v0;
659 if (img->num_components == 3) {
660 cinfo.comp_info[1].h_samp_factor = img->samp_h1;
661 cinfo.comp_info[1].v_samp_factor = img->samp_v1;
662 cinfo.comp_info[2].h_samp_factor = img->samp_h2;
663 cinfo.comp_info[2].v_samp_factor = img->samp_v2;
665 jpeg_start_compress(&cinfo, TRUE);
667 unsigned dstride0 = (img->w0 + DCTSIZE-1) & ~(DCTSIZE-1);
668 unsigned dstride1 = (img->w1 + DCTSIZE-1) & ~(DCTSIZE-1);
669 unsigned dstride2 = (img->w2 + DCTSIZE-1) & ~(DCTSIZE-1);
673 while (total_lines < cinfo.comp_info[0].height_in_blocks * DCTSIZE) {
674 unsigned max_lines = cinfo.max_v_samp_factor * DCTSIZE;
676 JSAMPROW y_row_ptrs[max_lines];
677 JSAMPROW cb_row_ptrs[max_lines];
678 JSAMPROW cr_row_ptrs[max_lines];
679 JSAMPROW* ptrs[] = { y_row_ptrs, cb_row_ptrs, cr_row_ptrs };
682 for (i = 0; i < max_lines; ++i) {
683 /* simple edge extension */
684 int yline = i + blocks*DCTSIZE*cinfo.comp_info[0].v_samp_factor;
685 if (yline > img->h0 - 1)
688 y_row_ptrs[i] = img->data_y + yline * dstride0;
690 if (img->num_components == 3) {
691 int cbline = i + blocks*DCTSIZE*cinfo.comp_info[1].v_samp_factor;
692 if (cbline > img->h1 - 1)
693 cbline = img->h1 - 1;
695 int crline = i + blocks*DCTSIZE*cinfo.comp_info[2].v_samp_factor;
696 if (crline > img->h2 - 1)
697 crline = img->h2 - 1;
699 cb_row_ptrs[i] = img->data_cb + cbline * dstride1;
700 cr_row_ptrs[i] = img->data_cr + crline * dstride2;
704 total_lines += max_lines;
707 jpeg_write_raw_data(&cinfo, ptrs, max_lines);
709 jpeg_finish_compress(&cinfo);
710 jpeg_destroy_compress(&cinfo);