2 * High quality image resampling with polyphase filters
3 * Copyright (c) 2001 Fabrice Bellard.
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 * High quality image resampling with polyphase filters .
31 #define NB_COMPONENTS 3
34 #define NB_PHASES (1 << PHASE_BITS)
36 #define FCENTER 1 /* index of the center of the filter */
37 //#define TEST 1 /* Test it */
39 #define POS_FRAC_BITS 16
40 #define POS_FRAC (1 << POS_FRAC_BITS)
41 /* 6 bits precision is needed for MMX */
44 #define LINE_BUF_HEIGHT (NB_TAPS * 4)
47 struct ImgReSampleContext *resampling_ctx;
48 enum PixelFormat src_pix_fmt, dst_pix_fmt;
51 struct ImgReSampleContext {
52 int iwidth, iheight, owidth, oheight;
53 int topBand, bottomBand, leftBand, rightBand;
54 int padtop, padbottom, padleft, padright;
55 int pad_owidth, pad_oheight;
57 DECLARE_ALIGNED_8(int16_t, h_filters[NB_PHASES][NB_TAPS]); /* horizontal filters */
58 DECLARE_ALIGNED_8(int16_t, v_filters[NB_PHASES][NB_TAPS]); /* vertical filters */
62 void av_build_filter(int16_t *filter, double factor, int tap_count, int phase_count, int scale, int type);
64 static inline int get_phase(int pos)
66 return ((pos) >> (POS_FRAC_BITS - PHASE_BITS)) & ((1 << PHASE_BITS) - 1);
69 /* This function must be optimized */
70 static void h_resample_fast(uint8_t *dst, int dst_width, const uint8_t *src,
71 int src_width, int src_start, int src_incr,
74 int src_pos, phase, sum, i;
79 for(i=0;i<dst_width;i++) {
82 if ((src_pos >> POS_FRAC_BITS) < 0 ||
83 (src_pos >> POS_FRAC_BITS) > (src_width - NB_TAPS))
86 s = src + (src_pos >> POS_FRAC_BITS);
87 phase = get_phase(src_pos);
88 filter = filters + phase * NB_TAPS;
90 sum = s[0] * filter[0] +
98 for(j=0;j<NB_TAPS;j++)
99 sum += s[j] * filter[j];
102 sum = sum >> FILTER_BITS;
113 /* This function must be optimized */
114 static void v_resample(uint8_t *dst, int dst_width, const uint8_t *src,
115 int wrap, int16_t *filter)
121 for(i=0;i<dst_width;i++) {
123 sum = s[0 * wrap] * filter[0] +
124 s[1 * wrap] * filter[1] +
125 s[2 * wrap] * filter[2] +
126 s[3 * wrap] * filter[3];
133 for(j=0;j<NB_TAPS;j++) {
134 sum += s1[0] * filter[j];
139 sum = sum >> FILTER_BITS;
152 #include "i386/mmx.h"
154 #define FILTER4(reg) \
156 s = src + (src_pos >> POS_FRAC_BITS);\
157 phase = get_phase(src_pos);\
158 filter = filters + phase * NB_TAPS;\
160 punpcklbw_r2r(mm7, reg);\
161 movq_m2r(*filter, mm6);\
162 pmaddwd_r2r(reg, mm6);\
165 paddd_r2r(mm6, reg);\
166 psrad_i2r(FILTER_BITS, reg);\
167 src_pos += src_incr;\
170 #define DUMP(reg) movq_r2m(reg, tmp); printf(#reg "=%016"PRIx64"\n", tmp.uq);
172 /* XXX: do four pixels at a time */
173 static void h_resample_fast4_mmx(uint8_t *dst, int dst_width,
174 const uint8_t *src, int src_width,
175 int src_start, int src_incr, int16_t *filters)
185 while (dst_width >= 4) {
192 packuswb_r2r(mm7, mm0);
193 packuswb_r2r(mm7, mm1);
194 packuswb_r2r(mm7, mm3);
195 packuswb_r2r(mm7, mm2);
207 while (dst_width > 0) {
209 packuswb_r2r(mm7, mm0);
218 static void v_resample4_mmx(uint8_t *dst, int dst_width, const uint8_t *src,
219 int wrap, int16_t *filter)
236 while (dst_width >= 4) {
237 movq_m2r(s[0 * wrap], mm0);
238 punpcklbw_r2r(mm7, mm0);
239 movq_m2r(s[1 * wrap], mm1);
240 punpcklbw_r2r(mm7, mm1);
241 movq_m2r(s[2 * wrap], mm2);
242 punpcklbw_r2r(mm7, mm2);
243 movq_m2r(s[3 * wrap], mm3);
244 punpcklbw_r2r(mm7, mm3);
246 pmullw_m2r(coefs[0], mm0);
247 pmullw_m2r(coefs[1], mm1);
248 pmullw_m2r(coefs[2], mm2);
249 pmullw_m2r(coefs[3], mm3);
254 psraw_i2r(FILTER_BITS, mm0);
256 packuswb_r2r(mm7, mm0);
259 *(uint32_t *)dst = tmp.ud[0];
264 while (dst_width > 0) {
265 sum = s[0 * wrap] * filter[0] +
266 s[1 * wrap] * filter[1] +
267 s[2 * wrap] * filter[2] +
268 s[3 * wrap] * filter[3];
269 sum = sum >> FILTER_BITS;
281 #endif /* HAVE_MMX */
285 vector unsigned char v;
290 vector signed short v;
294 void v_resample16_altivec(uint8_t *dst, int dst_width, const uint8_t *src,
295 int wrap, int16_t *filter)
299 vector unsigned char *tv, tmp, dstv, zero;
300 vec_ss_t srchv[4], srclv[4], fv[4];
301 vector signed short zeros, sumhv, sumlv;
307 The vec_madds later on does an implicit >>15 on the result.
308 Since FILTER_BITS is 8, and we have 15 bits of magnitude in
309 a signed short, we have just enough bits to pre-shift our
310 filter constants <<7 to compensate for vec_madds.
312 fv[i].s[0] = filter[i] << (15-FILTER_BITS);
313 fv[i].v = vec_splat(fv[i].v, 0);
316 zero = vec_splat_u8(0);
317 zeros = vec_splat_s16(0);
321 When we're resampling, we'd ideally like both our input buffers,
322 and output buffers to be 16-byte aligned, so we can do both aligned
323 reads and writes. Sadly we can't always have this at the moment, so
324 we opt for aligned writes, as unaligned writes have a huge overhead.
325 To do this, do enough scalar resamples to get dst 16-byte aligned.
327 i = (-(int)dst) & 0xf;
329 sum = s[0 * wrap] * filter[0] +
330 s[1 * wrap] * filter[1] +
331 s[2 * wrap] * filter[2] +
332 s[3 * wrap] * filter[3];
333 sum = sum >> FILTER_BITS;
334 if (sum<0) sum = 0; else if (sum>255) sum=255;
342 /* Do our altivec resampling on 16 pixels at once. */
343 while(dst_width>=16) {
345 Read 16 (potentially unaligned) bytes from each of
346 4 lines into 4 vectors, and split them into shorts.
347 Interleave the multipy/accumulate for the resample
348 filter with the loads to hide the 3 cycle latency
351 tv = (vector unsigned char *) &s[0 * wrap];
352 tmp = vec_perm(tv[0], tv[1], vec_lvsl(0, &s[i * wrap]));
353 srchv[0].v = (vector signed short) vec_mergeh(zero, tmp);
354 srclv[0].v = (vector signed short) vec_mergel(zero, tmp);
355 sumhv = vec_madds(srchv[0].v, fv[0].v, zeros);
356 sumlv = vec_madds(srclv[0].v, fv[0].v, zeros);
358 tv = (vector unsigned char *) &s[1 * wrap];
359 tmp = vec_perm(tv[0], tv[1], vec_lvsl(0, &s[1 * wrap]));
360 srchv[1].v = (vector signed short) vec_mergeh(zero, tmp);
361 srclv[1].v = (vector signed short) vec_mergel(zero, tmp);
362 sumhv = vec_madds(srchv[1].v, fv[1].v, sumhv);
363 sumlv = vec_madds(srclv[1].v, fv[1].v, sumlv);
365 tv = (vector unsigned char *) &s[2 * wrap];
366 tmp = vec_perm(tv[0], tv[1], vec_lvsl(0, &s[2 * wrap]));
367 srchv[2].v = (vector signed short) vec_mergeh(zero, tmp);
368 srclv[2].v = (vector signed short) vec_mergel(zero, tmp);
369 sumhv = vec_madds(srchv[2].v, fv[2].v, sumhv);
370 sumlv = vec_madds(srclv[2].v, fv[2].v, sumlv);
372 tv = (vector unsigned char *) &s[3 * wrap];
373 tmp = vec_perm(tv[0], tv[1], vec_lvsl(0, &s[3 * wrap]));
374 srchv[3].v = (vector signed short) vec_mergeh(zero, tmp);
375 srclv[3].v = (vector signed short) vec_mergel(zero, tmp);
376 sumhv = vec_madds(srchv[3].v, fv[3].v, sumhv);
377 sumlv = vec_madds(srclv[3].v, fv[3].v, sumlv);
380 Pack the results into our destination vector,
381 and do an aligned write of that back to memory.
383 dstv = vec_packsu(sumhv, sumlv) ;
384 vec_st(dstv, 0, (vector unsigned char *) dst);
392 If there are any leftover pixels, resample them
393 with the slow scalar method.
396 sum = s[0 * wrap] * filter[0] +
397 s[1 * wrap] * filter[1] +
398 s[2 * wrap] * filter[2] +
399 s[3 * wrap] * filter[3];
400 sum = sum >> FILTER_BITS;
401 if (sum<0) sum = 0; else if (sum>255) sum=255;
408 #endif /* HAVE_ALTIVEC */
410 /* slow version to handle limit cases. Does not need optimisation */
411 static void h_resample_slow(uint8_t *dst, int dst_width,
412 const uint8_t *src, int src_width,
413 int src_start, int src_incr, int16_t *filters)
415 int src_pos, phase, sum, j, v, i;
416 const uint8_t *s, *src_end;
419 src_end = src + src_width;
421 for(i=0;i<dst_width;i++) {
422 s = src + (src_pos >> POS_FRAC_BITS);
423 phase = get_phase(src_pos);
424 filter = filters + phase * NB_TAPS;
426 for(j=0;j<NB_TAPS;j++) {
429 else if (s >= src_end)
433 sum += v * filter[j];
436 sum = sum >> FILTER_BITS;
447 static void h_resample(uint8_t *dst, int dst_width, const uint8_t *src,
448 int src_width, int src_start, int src_incr,
454 n = (0 - src_start + src_incr - 1) / src_incr;
455 h_resample_slow(dst, n, src, src_width, src_start, src_incr, filters);
458 src_start += n * src_incr;
460 src_end = src_start + dst_width * src_incr;
461 if (src_end > ((src_width - NB_TAPS) << POS_FRAC_BITS)) {
462 n = (((src_width - NB_TAPS + 1) << POS_FRAC_BITS) - 1 - src_start) /
468 if ((mm_flags & MM_MMX) && NB_TAPS == 4)
469 h_resample_fast4_mmx(dst, n,
470 src, src_width, src_start, src_incr, filters);
473 h_resample_fast(dst, n,
474 src, src_width, src_start, src_incr, filters);
478 src_start += n * src_incr;
479 h_resample_slow(dst, dst_width,
480 src, src_width, src_start, src_incr, filters);
484 static void component_resample(ImgReSampleContext *s,
485 uint8_t *output, int owrap, int owidth, int oheight,
486 uint8_t *input, int iwrap, int iwidth, int iheight)
488 int src_y, src_y1, last_src_y, ring_y, phase_y, y1, y;
489 uint8_t *new_line, *src_line;
491 last_src_y = - FCENTER - 1;
492 /* position of the bottom of the filter in the source image */
493 src_y = (last_src_y + NB_TAPS) * POS_FRAC;
494 ring_y = NB_TAPS; /* position in ring buffer */
495 for(y=0;y<oheight;y++) {
496 /* apply horizontal filter on new lines from input if needed */
497 src_y1 = src_y >> POS_FRAC_BITS;
498 while (last_src_y < src_y1) {
499 if (++ring_y >= LINE_BUF_HEIGHT + NB_TAPS)
502 /* handle limit conditions : replicate line (slightly
503 inefficient because we filter multiple times) */
507 } else if (y1 >= iheight) {
510 src_line = input + y1 * iwrap;
511 new_line = s->line_buf + ring_y * owidth;
512 /* apply filter and handle limit cases correctly */
513 h_resample(new_line, owidth,
514 src_line, iwidth, - FCENTER * POS_FRAC, s->h_incr,
515 &s->h_filters[0][0]);
516 /* handle ring buffer wraping */
517 if (ring_y >= LINE_BUF_HEIGHT) {
518 memcpy(s->line_buf + (ring_y - LINE_BUF_HEIGHT) * owidth,
522 /* apply vertical filter */
523 phase_y = get_phase(src_y);
525 /* desactivated MMX because loss of precision */
526 if ((mm_flags & MM_MMX) && NB_TAPS == 4 && 0)
527 v_resample4_mmx(output, owidth,
528 s->line_buf + (ring_y - NB_TAPS + 1) * owidth, owidth,
529 &s->v_filters[phase_y][0]);
533 if ((mm_flags & MM_ALTIVEC) && NB_TAPS == 4 && FILTER_BITS <= 6)
534 v_resample16_altivec(output, owidth,
535 s->line_buf + (ring_y - NB_TAPS + 1) * owidth, owidth,
536 &s->v_filters[phase_y][0]);
539 v_resample(output, owidth,
540 s->line_buf + (ring_y - NB_TAPS + 1) * owidth, owidth,
541 &s->v_filters[phase_y][0]);
549 ImgReSampleContext *img_resample_init(int owidth, int oheight,
550 int iwidth, int iheight)
552 return img_resample_full_init(owidth, oheight, iwidth, iheight,
553 0, 0, 0, 0, 0, 0, 0, 0);
556 ImgReSampleContext *img_resample_full_init(int owidth, int oheight,
557 int iwidth, int iheight,
558 int topBand, int bottomBand,
559 int leftBand, int rightBand,
560 int padtop, int padbottom,
561 int padleft, int padright)
563 ImgReSampleContext *s;
565 if (!owidth || !oheight || !iwidth || !iheight)
568 s = av_mallocz(sizeof(ImgReSampleContext));
571 if((unsigned)owidth >= UINT_MAX / (LINE_BUF_HEIGHT + NB_TAPS))
573 s->line_buf = av_mallocz(owidth * (LINE_BUF_HEIGHT + NB_TAPS));
578 s->oheight = oheight;
580 s->iheight = iheight;
582 s->topBand = topBand;
583 s->bottomBand = bottomBand;
584 s->leftBand = leftBand;
585 s->rightBand = rightBand;
588 s->padbottom = padbottom;
589 s->padleft = padleft;
590 s->padright = padright;
592 s->pad_owidth = owidth - (padleft + padright);
593 s->pad_oheight = oheight - (padtop + padbottom);
595 s->h_incr = ((iwidth - leftBand - rightBand) * POS_FRAC) / s->pad_owidth;
596 s->v_incr = ((iheight - topBand - bottomBand) * POS_FRAC) / s->pad_oheight;
598 av_build_filter(&s->h_filters[0][0], (float) s->pad_owidth /
599 (float) (iwidth - leftBand - rightBand), NB_TAPS, NB_PHASES, 1<<FILTER_BITS, 0);
600 av_build_filter(&s->v_filters[0][0], (float) s->pad_oheight /
601 (float) (iheight - topBand - bottomBand), NB_TAPS, NB_PHASES, 1<<FILTER_BITS, 0);
609 void img_resample(ImgReSampleContext *s,
610 AVPicture *output, const AVPicture *input)
616 shift = (i == 0) ? 0 : 1;
618 optr = output->data[i] + (((output->linesize[i] *
619 s->padtop) + s->padleft) >> shift);
621 component_resample(s, optr, output->linesize[i],
622 s->pad_owidth >> shift, s->pad_oheight >> shift,
623 input->data[i] + (input->linesize[i] *
624 (s->topBand >> shift)) + (s->leftBand >> shift),
625 input->linesize[i], ((s->iwidth - s->leftBand -
626 s->rightBand) >> shift),
627 (s->iheight - s->topBand - s->bottomBand) >> shift);
631 void img_resample_close(ImgReSampleContext *s)
633 av_free(s->line_buf);
637 struct SwsContext *sws_getContext(int srcW, int srcH, int srcFormat,
638 int dstW, int dstH, int dstFormat,
639 int flags, SwsFilter *srcFilter,
640 SwsFilter *dstFilter, double *param)
642 struct SwsContext *ctx;
644 ctx = av_malloc(sizeof(struct SwsContext));
646 av_log(NULL, AV_LOG_ERROR, "Cannot allocate a resampling context!\n");
651 if ((srcH != dstH) || (srcW != dstW)) {
652 if ((srcFormat != PIX_FMT_YUV420P) || (dstFormat != PIX_FMT_YUV420P)) {
653 av_log(NULL, AV_LOG_INFO, "PIX_FMT_YUV420P will be used as an intermediate format for rescaling\n");
655 ctx->resampling_ctx = img_resample_init(dstW, dstH, srcW, srcH);
657 ctx->resampling_ctx = av_malloc(sizeof(ImgReSampleContext));
658 ctx->resampling_ctx->iheight = srcH;
659 ctx->resampling_ctx->iwidth = srcW;
660 ctx->resampling_ctx->oheight = dstH;
661 ctx->resampling_ctx->owidth = dstW;
663 ctx->src_pix_fmt = srcFormat;
664 ctx->dst_pix_fmt = dstFormat;
669 void sws_freeContext(struct SwsContext *ctx)
673 if ((ctx->resampling_ctx->iwidth != ctx->resampling_ctx->owidth) ||
674 (ctx->resampling_ctx->iheight != ctx->resampling_ctx->oheight)) {
675 img_resample_close(ctx->resampling_ctx);
677 av_free(ctx->resampling_ctx);
684 * Checks if context is valid or reallocs a new one instead.
685 * If context is NULL, just calls sws_getContext() to get a new one.
686 * Otherwise, checks if the parameters are the same already saved in context.
687 * If that is the case, returns the current context.
688 * Otherwise, frees context and gets a new one.
690 * Be warned that srcFilter, dstFilter are not checked, they are
691 * asumed to remain valid.
693 struct SwsContext *sws_getCachedContext(struct SwsContext *ctx,
694 int srcW, int srcH, int srcFormat,
695 int dstW, int dstH, int dstFormat, int flags,
696 SwsFilter *srcFilter, SwsFilter *dstFilter, double *param)
699 if ((ctx->resampling_ctx->iwidth != srcW) ||
700 (ctx->resampling_ctx->iheight != srcH) ||
701 (ctx->src_pix_fmt != srcFormat) ||
702 (ctx->resampling_ctx->owidth != dstW) ||
703 (ctx->resampling_ctx->oheight != dstH) ||
704 (ctx->dst_pix_fmt != dstFormat))
706 sws_freeContext(ctx);
711 return sws_getContext(srcW, srcH, srcFormat,
712 dstW, dstH, dstFormat, flags,
713 srcFilter, dstFilter, param);
718 int sws_scale(struct SwsContext *ctx, uint8_t* src[], int srcStride[],
719 int srcSliceY, int srcSliceH, uint8_t* dst[], int dstStride[])
721 AVPicture src_pict, dst_pict;
723 AVPicture picture_format_temp;
724 AVPicture picture_resample_temp, *formatted_picture, *resampled_picture;
725 uint8_t *buf1 = NULL, *buf2 = NULL;
726 enum PixelFormat current_pix_fmt;
728 for (i = 0; i < 4; i++) {
729 src_pict.data[i] = src[i];
730 src_pict.linesize[i] = srcStride[i];
731 dst_pict.data[i] = dst[i];
732 dst_pict.linesize[i] = dstStride[i];
734 if ((ctx->resampling_ctx->iwidth != ctx->resampling_ctx->owidth) ||
735 (ctx->resampling_ctx->iheight != ctx->resampling_ctx->oheight)) {
736 /* We have to rescale the picture, but only YUV420P rescaling is supported... */
738 if (ctx->src_pix_fmt != PIX_FMT_YUV420P) {
741 /* create temporary picture for rescaling input*/
742 size = avpicture_get_size(PIX_FMT_YUV420P, ctx->resampling_ctx->iwidth, ctx->resampling_ctx->iheight);
743 buf1 = av_malloc(size);
748 formatted_picture = &picture_format_temp;
749 avpicture_fill((AVPicture*)formatted_picture, buf1,
750 PIX_FMT_YUV420P, ctx->resampling_ctx->iwidth, ctx->resampling_ctx->iheight);
752 if (img_convert((AVPicture*)formatted_picture, PIX_FMT_YUV420P,
753 &src_pict, ctx->src_pix_fmt,
754 ctx->resampling_ctx->iwidth, ctx->resampling_ctx->iheight) < 0) {
756 av_log(NULL, AV_LOG_ERROR, "pixel format conversion not handled\n");
761 formatted_picture = &src_pict;
764 if (ctx->dst_pix_fmt != PIX_FMT_YUV420P) {
767 /* create temporary picture for rescaling output*/
768 size = avpicture_get_size(PIX_FMT_YUV420P, ctx->resampling_ctx->owidth, ctx->resampling_ctx->oheight);
769 buf2 = av_malloc(size);
774 resampled_picture = &picture_resample_temp;
775 avpicture_fill((AVPicture*)resampled_picture, buf2,
776 PIX_FMT_YUV420P, ctx->resampling_ctx->owidth, ctx->resampling_ctx->oheight);
779 resampled_picture = &dst_pict;
782 /* ...and finally rescale!!! */
783 img_resample(ctx->resampling_ctx, resampled_picture, formatted_picture);
784 current_pix_fmt = PIX_FMT_YUV420P;
786 resampled_picture = &src_pict;
787 current_pix_fmt = ctx->src_pix_fmt;
790 if (current_pix_fmt != ctx->dst_pix_fmt) {
791 if (img_convert(&dst_pict, ctx->dst_pix_fmt,
792 resampled_picture, current_pix_fmt,
793 ctx->resampling_ctx->owidth, ctx->resampling_ctx->oheight) < 0) {
795 av_log(NULL, AV_LOG_ERROR, "pixel format conversion not handled\n");
800 } else if (resampled_picture != &dst_pict) {
801 av_picture_copy(&dst_pict, resampled_picture, current_pix_fmt,
802 ctx->resampling_ctx->owidth, ctx->resampling_ctx->oheight);
819 uint8_t img[XSIZE * YSIZE];
824 uint8_t img1[XSIZE1 * YSIZE1];
825 uint8_t img2[XSIZE1 * YSIZE1];
827 void save_pgm(const char *filename, uint8_t *img, int xsize, int ysize)
831 f=fopen(filename,"w");
832 fprintf(f,"P5\n%d %d\n%d\n", xsize, ysize, 255);
833 fwrite(img,1, xsize * ysize,f);
835 #define fprintf please_use_av_log
838 static void dump_filter(int16_t *filter)
842 for(ph=0;ph<NB_PHASES;ph++) {
843 av_log(NULL, AV_LOG_INFO, "%2d: ", ph);
844 for(i=0;i<NB_TAPS;i++) {
845 av_log(NULL, AV_LOG_INFO, " %5.2f", filter[ph * NB_TAPS + i] / 256.0);
847 av_log(NULL, AV_LOG_INFO, "\n");
855 int main(int argc, char **argv)
857 int x, y, v, i, xsize, ysize;
858 ImgReSampleContext *s;
859 float fact, factors[] = { 1/2.0, 3.0/4.0, 1.0, 4.0/3.0, 16.0/9.0, 2.0 };
862 /* build test image */
863 for(y=0;y<YSIZE;y++) {
864 for(x=0;x<XSIZE;x++) {
865 if (x < XSIZE/2 && y < YSIZE/2) {
866 if (x < XSIZE/4 && y < YSIZE/4) {
872 } else if (x < XSIZE/4) {
877 } else if (y < XSIZE/4) {
889 if (((x+3) % 4) <= 1 &&
896 } else if (x < XSIZE/2) {
897 v = ((x - (XSIZE/2)) * 255) / (XSIZE/2);
898 } else if (y < XSIZE/2) {
899 v = ((y - (XSIZE/2)) * 255) / (XSIZE/2);
901 v = ((x + y - XSIZE) * 255) / XSIZE;
903 img[(YSIZE - y) * XSIZE + (XSIZE - x)] = v;
906 save_pgm("/tmp/in.pgm", img, XSIZE, YSIZE);
907 for(i=0;i<sizeof(factors)/sizeof(float);i++) {
909 xsize = (int)(XSIZE * fact);
910 ysize = (int)((YSIZE - 100) * fact);
911 s = img_resample_full_init(xsize, ysize, XSIZE, YSIZE, 50 ,50, 0, 0, 0, 0, 0, 0);
912 av_log(NULL, AV_LOG_INFO, "Factor=%0.2f\n", fact);
913 dump_filter(&s->h_filters[0][0]);
914 component_resample(s, img1, xsize, xsize, ysize,
915 img + 50 * XSIZE, XSIZE, XSIZE, YSIZE - 100);
916 img_resample_close(s);
918 snprintf(buf, sizeof(buf), "/tmp/out%d.pgm", i);
919 save_pgm(buf, img1, xsize, ysize);
924 av_log(NULL, AV_LOG_INFO, "MMX test\n");
926 xsize = (int)(XSIZE * fact);
927 ysize = (int)(YSIZE * fact);
929 s = img_resample_init(xsize, ysize, XSIZE, YSIZE);
930 component_resample(s, img1, xsize, xsize, ysize,
931 img, XSIZE, XSIZE, YSIZE);
934 s = img_resample_init(xsize, ysize, XSIZE, YSIZE);
935 component_resample(s, img2, xsize, xsize, ysize,
936 img, XSIZE, XSIZE, YSIZE);
937 if (memcmp(img1, img2, xsize * ysize) != 0) {
938 av_log(NULL, AV_LOG_ERROR, "mmx error\n");
941 av_log(NULL, AV_LOG_INFO, "MMX OK\n");
942 #endif /* HAVE_MMX */