2 * High quality image resampling with polyphase filters
3 * Copyright (c) 2001 Fabrice Bellard.
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 * High quality image resampling with polyphase filters .
32 #include "libvo/fastmemcpy.h"
35 #define NB_COMPONENTS 3
38 #define NB_PHASES (1 << PHASE_BITS)
40 #define FCENTER 1 /* index of the center of the filter */
41 //#define TEST 1 /* Test it */
43 #define POS_FRAC_BITS 16
44 #define POS_FRAC (1 << POS_FRAC_BITS)
45 /* 6 bits precision is needed for MMX */
48 #define LINE_BUF_HEIGHT (NB_TAPS * 4)
50 struct ImgReSampleContext {
51 int iwidth, iheight, owidth, oheight;
52 int topBand, bottomBand, leftBand, rightBand;
53 int padtop, padbottom, padleft, padright;
54 int pad_owidth, pad_oheight;
56 DECLARE_ALIGNED_8(int16_t, h_filters[NB_PHASES][NB_TAPS]); /* horizontal filters */
57 DECLARE_ALIGNED_8(int16_t, v_filters[NB_PHASES][NB_TAPS]); /* vertical filters */
61 void av_build_filter(int16_t *filter, double factor, int tap_count, int phase_count, int scale, int type);
63 static inline int get_phase(int pos)
65 return ((pos) >> (POS_FRAC_BITS - PHASE_BITS)) & ((1 << PHASE_BITS) - 1);
68 /* This function must be optimized */
69 static void h_resample_fast(uint8_t *dst, int dst_width, const uint8_t *src,
70 int src_width, int src_start, int src_incr,
73 int src_pos, phase, sum, i;
78 for(i=0;i<dst_width;i++) {
81 if ((src_pos >> POS_FRAC_BITS) < 0 ||
82 (src_pos >> POS_FRAC_BITS) > (src_width - NB_TAPS))
85 s = src + (src_pos >> POS_FRAC_BITS);
86 phase = get_phase(src_pos);
87 filter = filters + phase * NB_TAPS;
89 sum = s[0] * filter[0] +
97 for(j=0;j<NB_TAPS;j++)
98 sum += s[j] * filter[j];
101 sum = sum >> FILTER_BITS;
112 /* This function must be optimized */
113 static void v_resample(uint8_t *dst, int dst_width, const uint8_t *src,
114 int wrap, int16_t *filter)
120 for(i=0;i<dst_width;i++) {
122 sum = s[0 * wrap] * filter[0] +
123 s[1 * wrap] * filter[1] +
124 s[2 * wrap] * filter[2] +
125 s[3 * wrap] * filter[3];
132 for(j=0;j<NB_TAPS;j++) {
133 sum += s1[0] * filter[j];
138 sum = sum >> FILTER_BITS;
151 #include "i386/mmx.h"
153 #define FILTER4(reg) \
155 s = src + (src_pos >> POS_FRAC_BITS);\
156 phase = get_phase(src_pos);\
157 filter = filters + phase * NB_TAPS;\
159 punpcklbw_r2r(mm7, reg);\
160 movq_m2r(*filter, mm6);\
161 pmaddwd_r2r(reg, mm6);\
164 paddd_r2r(mm6, reg);\
165 psrad_i2r(FILTER_BITS, reg);\
166 src_pos += src_incr;\
169 #define DUMP(reg) movq_r2m(reg, tmp); printf(#reg "=%016Lx\n", tmp.uq);
171 /* XXX: do four pixels at a time */
172 static void h_resample_fast4_mmx(uint8_t *dst, int dst_width,
173 const uint8_t *src, int src_width,
174 int src_start, int src_incr, int16_t *filters)
184 while (dst_width >= 4) {
191 packuswb_r2r(mm7, mm0);
192 packuswb_r2r(mm7, mm1);
193 packuswb_r2r(mm7, mm3);
194 packuswb_r2r(mm7, mm2);
206 while (dst_width > 0) {
208 packuswb_r2r(mm7, mm0);
217 static void v_resample4_mmx(uint8_t *dst, int dst_width, const uint8_t *src,
218 int wrap, int16_t *filter)
235 while (dst_width >= 4) {
236 movq_m2r(s[0 * wrap], mm0);
237 punpcklbw_r2r(mm7, mm0);
238 movq_m2r(s[1 * wrap], mm1);
239 punpcklbw_r2r(mm7, mm1);
240 movq_m2r(s[2 * wrap], mm2);
241 punpcklbw_r2r(mm7, mm2);
242 movq_m2r(s[3 * wrap], mm3);
243 punpcklbw_r2r(mm7, mm3);
245 pmullw_m2r(coefs[0], mm0);
246 pmullw_m2r(coefs[1], mm1);
247 pmullw_m2r(coefs[2], mm2);
248 pmullw_m2r(coefs[3], mm3);
253 psraw_i2r(FILTER_BITS, mm0);
255 packuswb_r2r(mm7, mm0);
258 *(uint32_t *)dst = tmp.ud[0];
263 while (dst_width > 0) {
264 sum = s[0 * wrap] * filter[0] +
265 s[1 * wrap] * filter[1] +
266 s[2 * wrap] * filter[2] +
267 s[3 * wrap] * filter[3];
268 sum = sum >> FILTER_BITS;
284 vector unsigned char v;
289 vector signed short v;
293 void v_resample16_altivec(uint8_t *dst, int dst_width, const uint8_t *src,
294 int wrap, int16_t *filter)
298 vector unsigned char *tv, tmp, dstv, zero;
299 vec_ss_t srchv[4], srclv[4], fv[4];
300 vector signed short zeros, sumhv, sumlv;
306 The vec_madds later on does an implicit >>15 on the result.
307 Since FILTER_BITS is 8, and we have 15 bits of magnitude in
308 a signed short, we have just enough bits to pre-shift our
309 filter constants <<7 to compensate for vec_madds.
311 fv[i].s[0] = filter[i] << (15-FILTER_BITS);
312 fv[i].v = vec_splat(fv[i].v, 0);
315 zero = vec_splat_u8(0);
316 zeros = vec_splat_s16(0);
320 When we're resampling, we'd ideally like both our input buffers,
321 and output buffers to be 16-byte aligned, so we can do both aligned
322 reads and writes. Sadly we can't always have this at the moment, so
323 we opt for aligned writes, as unaligned writes have a huge overhead.
324 To do this, do enough scalar resamples to get dst 16-byte aligned.
326 i = (-(int)dst) & 0xf;
328 sum = s[0 * wrap] * filter[0] +
329 s[1 * wrap] * filter[1] +
330 s[2 * wrap] * filter[2] +
331 s[3 * wrap] * filter[3];
332 sum = sum >> FILTER_BITS;
333 if (sum<0) sum = 0; else if (sum>255) sum=255;
341 /* Do our altivec resampling on 16 pixels at once. */
342 while(dst_width>=16) {
344 Read 16 (potentially unaligned) bytes from each of
345 4 lines into 4 vectors, and split them into shorts.
346 Interleave the multipy/accumulate for the resample
347 filter with the loads to hide the 3 cycle latency
350 tv = (vector unsigned char *) &s[0 * wrap];
351 tmp = vec_perm(tv[0], tv[1], vec_lvsl(0, &s[i * wrap]));
352 srchv[0].v = (vector signed short) vec_mergeh(zero, tmp);
353 srclv[0].v = (vector signed short) vec_mergel(zero, tmp);
354 sumhv = vec_madds(srchv[0].v, fv[0].v, zeros);
355 sumlv = vec_madds(srclv[0].v, fv[0].v, zeros);
357 tv = (vector unsigned char *) &s[1 * wrap];
358 tmp = vec_perm(tv[0], tv[1], vec_lvsl(0, &s[1 * wrap]));
359 srchv[1].v = (vector signed short) vec_mergeh(zero, tmp);
360 srclv[1].v = (vector signed short) vec_mergel(zero, tmp);
361 sumhv = vec_madds(srchv[1].v, fv[1].v, sumhv);
362 sumlv = vec_madds(srclv[1].v, fv[1].v, sumlv);
364 tv = (vector unsigned char *) &s[2 * wrap];
365 tmp = vec_perm(tv[0], tv[1], vec_lvsl(0, &s[2 * wrap]));
366 srchv[2].v = (vector signed short) vec_mergeh(zero, tmp);
367 srclv[2].v = (vector signed short) vec_mergel(zero, tmp);
368 sumhv = vec_madds(srchv[2].v, fv[2].v, sumhv);
369 sumlv = vec_madds(srclv[2].v, fv[2].v, sumlv);
371 tv = (vector unsigned char *) &s[3 * wrap];
372 tmp = vec_perm(tv[0], tv[1], vec_lvsl(0, &s[3 * wrap]));
373 srchv[3].v = (vector signed short) vec_mergeh(zero, tmp);
374 srclv[3].v = (vector signed short) vec_mergel(zero, tmp);
375 sumhv = vec_madds(srchv[3].v, fv[3].v, sumhv);
376 sumlv = vec_madds(srclv[3].v, fv[3].v, sumlv);
379 Pack the results into our destination vector,
380 and do an aligned write of that back to memory.
382 dstv = vec_packsu(sumhv, sumlv) ;
383 vec_st(dstv, 0, (vector unsigned char *) dst);
391 If there are any leftover pixels, resample them
392 with the slow scalar method.
395 sum = s[0 * wrap] * filter[0] +
396 s[1 * wrap] * filter[1] +
397 s[2 * wrap] * filter[2] +
398 s[3 * wrap] * filter[3];
399 sum = sum >> FILTER_BITS;
400 if (sum<0) sum = 0; else if (sum>255) sum=255;
409 /* slow version to handle limit cases. Does not need optimisation */
410 static void h_resample_slow(uint8_t *dst, int dst_width,
411 const uint8_t *src, int src_width,
412 int src_start, int src_incr, int16_t *filters)
414 int src_pos, phase, sum, j, v, i;
415 const uint8_t *s, *src_end;
418 src_end = src + src_width;
420 for(i=0;i<dst_width;i++) {
421 s = src + (src_pos >> POS_FRAC_BITS);
422 phase = get_phase(src_pos);
423 filter = filters + phase * NB_TAPS;
425 for(j=0;j<NB_TAPS;j++) {
428 else if (s >= src_end)
432 sum += v * filter[j];
435 sum = sum >> FILTER_BITS;
446 static void h_resample(uint8_t *dst, int dst_width, const uint8_t *src,
447 int src_width, int src_start, int src_incr,
453 n = (0 - src_start + src_incr - 1) / src_incr;
454 h_resample_slow(dst, n, src, src_width, src_start, src_incr, filters);
457 src_start += n * src_incr;
459 src_end = src_start + dst_width * src_incr;
460 if (src_end > ((src_width - NB_TAPS) << POS_FRAC_BITS)) {
461 n = (((src_width - NB_TAPS + 1) << POS_FRAC_BITS) - 1 - src_start) /
467 if ((mm_flags & MM_MMX) && NB_TAPS == 4)
468 h_resample_fast4_mmx(dst, n,
469 src, src_width, src_start, src_incr, filters);
472 h_resample_fast(dst, n,
473 src, src_width, src_start, src_incr, filters);
477 src_start += n * src_incr;
478 h_resample_slow(dst, dst_width,
479 src, src_width, src_start, src_incr, filters);
483 static void component_resample(ImgReSampleContext *s,
484 uint8_t *output, int owrap, int owidth, int oheight,
485 uint8_t *input, int iwrap, int iwidth, int iheight)
487 int src_y, src_y1, last_src_y, ring_y, phase_y, y1, y;
488 uint8_t *new_line, *src_line;
490 last_src_y = - FCENTER - 1;
491 /* position of the bottom of the filter in the source image */
492 src_y = (last_src_y + NB_TAPS) * POS_FRAC;
493 ring_y = NB_TAPS; /* position in ring buffer */
494 for(y=0;y<oheight;y++) {
495 /* apply horizontal filter on new lines from input if needed */
496 src_y1 = src_y >> POS_FRAC_BITS;
497 while (last_src_y < src_y1) {
498 if (++ring_y >= LINE_BUF_HEIGHT + NB_TAPS)
501 /* handle limit conditions : replicate line (slightly
502 inefficient because we filter multiple times) */
506 } else if (y1 >= iheight) {
509 src_line = input + y1 * iwrap;
510 new_line = s->line_buf + ring_y * owidth;
511 /* apply filter and handle limit cases correctly */
512 h_resample(new_line, owidth,
513 src_line, iwidth, - FCENTER * POS_FRAC, s->h_incr,
514 &s->h_filters[0][0]);
515 /* handle ring buffer wraping */
516 if (ring_y >= LINE_BUF_HEIGHT) {
517 memcpy(s->line_buf + (ring_y - LINE_BUF_HEIGHT) * owidth,
521 /* apply vertical filter */
522 phase_y = get_phase(src_y);
524 /* desactivated MMX because loss of precision */
525 if ((mm_flags & MM_MMX) && NB_TAPS == 4 && 0)
526 v_resample4_mmx(output, owidth,
527 s->line_buf + (ring_y - NB_TAPS + 1) * owidth, owidth,
528 &s->v_filters[phase_y][0]);
532 if ((mm_flags & MM_ALTIVEC) && NB_TAPS == 4 && FILTER_BITS <= 6)
533 v_resample16_altivec(output, owidth,
534 s->line_buf + (ring_y - NB_TAPS + 1) * owidth, owidth,
535 &s->v_filters[phase_y][0]);
538 v_resample(output, owidth,
539 s->line_buf + (ring_y - NB_TAPS + 1) * owidth, owidth,
540 &s->v_filters[phase_y][0]);
548 ImgReSampleContext *img_resample_init(int owidth, int oheight,
549 int iwidth, int iheight)
551 return img_resample_full_init(owidth, oheight, iwidth, iheight,
552 0, 0, 0, 0, 0, 0, 0, 0);
555 ImgReSampleContext *img_resample_full_init(int owidth, int oheight,
556 int iwidth, int iheight,
557 int topBand, int bottomBand,
558 int leftBand, int rightBand,
559 int padtop, int padbottom,
560 int padleft, int padright)
562 ImgReSampleContext *s;
564 if (!owidth || !oheight || !iwidth || !iheight)
567 s = av_mallocz(sizeof(ImgReSampleContext));
570 if((unsigned)owidth >= UINT_MAX / (LINE_BUF_HEIGHT + NB_TAPS))
572 s->line_buf = av_mallocz(owidth * (LINE_BUF_HEIGHT + NB_TAPS));
577 s->oheight = oheight;
579 s->iheight = iheight;
581 s->topBand = topBand;
582 s->bottomBand = bottomBand;
583 s->leftBand = leftBand;
584 s->rightBand = rightBand;
587 s->padbottom = padbottom;
588 s->padleft = padleft;
589 s->padright = padright;
591 s->pad_owidth = owidth - (padleft + padright);
592 s->pad_oheight = oheight - (padtop + padbottom);
594 s->h_incr = ((iwidth - leftBand - rightBand) * POS_FRAC) / s->pad_owidth;
595 s->v_incr = ((iheight - topBand - bottomBand) * POS_FRAC) / s->pad_oheight;
597 av_build_filter(&s->h_filters[0][0], (float) s->pad_owidth /
598 (float) (iwidth - leftBand - rightBand), NB_TAPS, NB_PHASES, 1<<FILTER_BITS, 0);
599 av_build_filter(&s->v_filters[0][0], (float) s->pad_oheight /
600 (float) (iheight - topBand - bottomBand), NB_TAPS, NB_PHASES, 1<<FILTER_BITS, 0);
608 void img_resample(ImgReSampleContext *s,
609 AVPicture *output, const AVPicture *input)
615 shift = (i == 0) ? 0 : 1;
617 optr = output->data[i] + (((output->linesize[i] *
618 s->padtop) + s->padleft) >> shift);
620 component_resample(s, optr, output->linesize[i],
621 s->pad_owidth >> shift, s->pad_oheight >> shift,
622 input->data[i] + (input->linesize[i] *
623 (s->topBand >> shift)) + (s->leftBand >> shift),
624 input->linesize[i], ((s->iwidth - s->leftBand -
625 s->rightBand) >> shift),
626 (s->iheight - s->topBand - s->bottomBand) >> shift);
630 void img_resample_close(ImgReSampleContext *s)
632 av_free(s->line_buf);
636 struct SwsContext *sws_getContext(int srcW, int srcH, int srcFormat,
637 int dstW, int dstH, int dstFormat,
638 int flags, SwsFilter *srcFilter,
639 SwsFilter *dstFilter, double *param)
641 struct SwsContext *ctx;
643 ctx = av_malloc(sizeof(struct SwsContext));
645 av_log(NULL, AV_LOG_ERROR, "Cannot allocate a resampling context!\n");
650 if ((srcH != dstH) || (srcW != dstW)) {
651 if ((srcFormat != PIX_FMT_YUV420P) || (dstFormat != PIX_FMT_YUV420P)) {
652 av_log(NULL, AV_LOG_INFO, "PIX_FMT_YUV420P will be used as an intermediate format for rescaling\n");
654 ctx->resampling_ctx = img_resample_init(dstW, dstH, srcW, srcH);
656 ctx->resampling_ctx = av_malloc(sizeof(ImgReSampleContext));
657 ctx->resampling_ctx->iheight = srcH;
658 ctx->resampling_ctx->iwidth = srcW;
659 ctx->resampling_ctx->oheight = dstH;
660 ctx->resampling_ctx->owidth = dstW;
662 ctx->src_pix_fmt = srcFormat;
663 ctx->dst_pix_fmt = dstFormat;
668 void sws_freeContext(struct SwsContext *ctx)
670 if ((ctx->resampling_ctx->iwidth != ctx->resampling_ctx->owidth) ||
671 (ctx->resampling_ctx->iheight != ctx->resampling_ctx->oheight)) {
672 img_resample_close(ctx->resampling_ctx);
674 av_free(ctx->resampling_ctx);
679 int sws_scale(struct SwsContext *ctx, uint8_t* src[], int srcStride[],
680 int srcSliceY, int srcSliceH, uint8_t* dst[], int dstStride[])
682 AVPicture src_pict, dst_pict;
684 AVPicture picture_format_temp;
685 AVPicture picture_resample_temp, *formatted_picture, *resampled_picture;
686 uint8_t *buf1 = NULL, *buf2 = NULL;
687 enum PixelFormat current_pix_fmt;
689 for (i = 0; i < 3; i++) {
690 src_pict.data[i] = src[i];
691 src_pict.linesize[i] = srcStride[i];
692 dst_pict.data[i] = dst[i];
693 dst_pict.linesize[i] = dstStride[i];
695 if ((ctx->resampling_ctx->iwidth != ctx->resampling_ctx->owidth) ||
696 (ctx->resampling_ctx->iheight != ctx->resampling_ctx->oheight)) {
697 /* We have to rescale the picture, but only YUV420P rescaling is supported... */
699 if (ctx->src_pix_fmt != PIX_FMT_YUV420P) {
702 /* create temporary picture for rescaling input*/
703 size = avpicture_get_size(PIX_FMT_YUV420P, ctx->resampling_ctx->iwidth, ctx->resampling_ctx->iheight);
704 buf1 = av_malloc(size);
709 formatted_picture = &picture_format_temp;
710 avpicture_fill((AVPicture*)formatted_picture, buf1,
711 PIX_FMT_YUV420P, ctx->resampling_ctx->iwidth, ctx->resampling_ctx->iheight);
713 if (img_convert((AVPicture*)formatted_picture, PIX_FMT_YUV420P,
714 &src_pict, ctx->src_pix_fmt,
715 ctx->resampling_ctx->iwidth, ctx->resampling_ctx->iheight) < 0) {
717 av_log(NULL, AV_LOG_ERROR, "pixel format conversion not handled\n");
722 formatted_picture = &src_pict;
725 if (ctx->dst_pix_fmt != PIX_FMT_YUV420P) {
728 /* create temporary picture for rescaling output*/
729 size = avpicture_get_size(PIX_FMT_YUV420P, ctx->resampling_ctx->owidth, ctx->resampling_ctx->oheight);
730 buf2 = av_malloc(size);
735 resampled_picture = &picture_resample_temp;
736 avpicture_fill((AVPicture*)resampled_picture, buf2,
737 PIX_FMT_YUV420P, ctx->resampling_ctx->owidth, ctx->resampling_ctx->oheight);
740 resampled_picture = &dst_pict;
743 /* ...and finally rescale!!! */
744 img_resample(ctx->resampling_ctx, resampled_picture, formatted_picture);
745 current_pix_fmt = PIX_FMT_YUV420P;
747 resampled_picture = &src_pict;
748 current_pix_fmt = ctx->src_pix_fmt;
751 if (current_pix_fmt != ctx->dst_pix_fmt) {
752 if (img_convert(&dst_pict, ctx->dst_pix_fmt,
753 resampled_picture, current_pix_fmt,
754 ctx->resampling_ctx->owidth, ctx->resampling_ctx->oheight) < 0) {
756 av_log(NULL, AV_LOG_ERROR, "pixel format conversion not handled\n");
761 } else if (resampled_picture != &dst_pict) {
762 img_copy(&dst_pict, resampled_picture, current_pix_fmt,
763 ctx->resampling_ctx->owidth, ctx->resampling_ctx->oheight);
779 uint8_t img[XSIZE * YSIZE];
784 uint8_t img1[XSIZE1 * YSIZE1];
785 uint8_t img2[XSIZE1 * YSIZE1];
787 void save_pgm(const char *filename, uint8_t *img, int xsize, int ysize)
791 f=fopen(filename,"w");
792 fprintf(f,"P5\n%d %d\n%d\n", xsize, ysize, 255);
793 fwrite(img,1, xsize * ysize,f);
795 #define fprintf please_use_av_log
798 static void dump_filter(int16_t *filter)
802 for(ph=0;ph<NB_PHASES;ph++) {
803 av_log(NULL, AV_LOG_INFO, "%2d: ", ph);
804 for(i=0;i<NB_TAPS;i++) {
805 av_log(NULL, AV_LOG_INFO, " %5.2f", filter[ph * NB_TAPS + i] / 256.0);
807 av_log(NULL, AV_LOG_INFO, "\n");
815 int main(int argc, char **argv)
817 int x, y, v, i, xsize, ysize;
818 ImgReSampleContext *s;
819 float fact, factors[] = { 1/2.0, 3.0/4.0, 1.0, 4.0/3.0, 16.0/9.0, 2.0 };
822 /* build test image */
823 for(y=0;y<YSIZE;y++) {
824 for(x=0;x<XSIZE;x++) {
825 if (x < XSIZE/2 && y < YSIZE/2) {
826 if (x < XSIZE/4 && y < YSIZE/4) {
832 } else if (x < XSIZE/4) {
837 } else if (y < XSIZE/4) {
849 if (((x+3) % 4) <= 1 &&
856 } else if (x < XSIZE/2) {
857 v = ((x - (XSIZE/2)) * 255) / (XSIZE/2);
858 } else if (y < XSIZE/2) {
859 v = ((y - (XSIZE/2)) * 255) / (XSIZE/2);
861 v = ((x + y - XSIZE) * 255) / XSIZE;
863 img[(YSIZE - y) * XSIZE + (XSIZE - x)] = v;
866 save_pgm("/tmp/in.pgm", img, XSIZE, YSIZE);
867 for(i=0;i<sizeof(factors)/sizeof(float);i++) {
869 xsize = (int)(XSIZE * fact);
870 ysize = (int)((YSIZE - 100) * fact);
871 s = img_resample_full_init(xsize, ysize, XSIZE, YSIZE, 50 ,50, 0, 0, 0, 0, 0, 0);
872 av_log(NULL, AV_LOG_INFO, "Factor=%0.2f\n", fact);
873 dump_filter(&s->h_filters[0][0]);
874 component_resample(s, img1, xsize, xsize, ysize,
875 img + 50 * XSIZE, XSIZE, XSIZE, YSIZE - 100);
876 img_resample_close(s);
878 snprintf(buf, sizeof(buf), "/tmp/out%d.pgm", i);
879 save_pgm(buf, img1, xsize, ysize);
884 av_log(NULL, AV_LOG_INFO, "MMX test\n");
886 xsize = (int)(XSIZE * fact);
887 ysize = (int)(YSIZE * fact);
889 s = img_resample_init(xsize, ysize, XSIZE, YSIZE);
890 component_resample(s, img1, xsize, xsize, ysize,
891 img, XSIZE, XSIZE, YSIZE);
894 s = img_resample_init(xsize, ysize, XSIZE, YSIZE);
895 component_resample(s, img2, xsize, xsize, ysize,
896 img, XSIZE, XSIZE, YSIZE);
897 if (memcmp(img1, img2, xsize * ysize) != 0) {
898 av_log(NULL, AV_LOG_ERROR, "mmx error\n");
901 av_log(NULL, AV_LOG_INFO, "MMX OK\n");