1 ;*****************************************************************************
2 ;* SIMD-optimized motion compensation estimation
3 ;*****************************************************************************
4 ;* Copyright (c) 2000, 2001 Fabrice Bellard
5 ;* Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
7 ;* This file is part of FFmpeg.
9 ;* FFmpeg is free software; you can redistribute it and/or
10 ;* modify it under the terms of the GNU Lesser General Public
11 ;* License as published by the Free Software Foundation; either
12 ;* version 2.1 of the License, or (at your option) any later version.
14 ;* FFmpeg is distributed in the hope that it will be useful,
15 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
16 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 ;* Lesser General Public License for more details.
19 ;* You should have received a copy of the GNU Lesser General Public
20 ;* License along with FFmpeg; if not, write to the Free Software
21 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 ;*****************************************************************************
24 %include "libavutil/x86/x86util.asm"
33 %macro DIFF_PIXELS_1 4
41 ; %1=uint8_t *pix1, %2=uint8_t *pix2, %3=static offset, %4=stride, %5=stride*3
42 ; %6=temporary storage location
43 ; this macro requires $mmsize stack space (aligned) on %6 (except on SSE+x86-64)
44 %macro DIFF_PIXELS_8 6
45 DIFF_PIXELS_1 m0, m7, [%1 +%3], [%2 +%3]
46 DIFF_PIXELS_1 m1, m7, [%1+%4 +%3], [%2+%4 +%3]
47 DIFF_PIXELS_1 m2, m7, [%1+%4*2+%3], [%2+%4*2+%3]
50 DIFF_PIXELS_1 m3, m7, [%1 +%3], [%2 +%3]
51 DIFF_PIXELS_1 m4, m7, [%1+%4 +%3], [%2+%4 +%3]
52 DIFF_PIXELS_1 m5, m7, [%1+%4*2+%3], [%2+%4*2+%3]
53 DIFF_PIXELS_1 m6, m7, [%1+%5 +%3], [%2+%5 +%3]
55 DIFF_PIXELS_1 m7, m8, [%1+%4*4+%3], [%2+%4*4+%3]
58 DIFF_PIXELS_1 m7, m0, [%1+%4*4+%3], [%2+%4*4+%3]
66 SUMSUB_BADC w, 0, 1, 2, 3
67 SUMSUB_BADC w, 4, 5, 6, 7
68 SUMSUB_BADC w, 0, 2, 1, 3
69 SUMSUB_BADC w, 4, 6, 5, 7
70 SUMSUB_BADC w, 0, 4, 1, 5
71 SUMSUB_BADC w, 2, 6, 3, 7
85 %macro ABS_SUM_8x8_64 1
87 ABS2_SUM m2, m3, m8, m9, m0, m1
88 ABS2_SUM m4, m5, m8, m9, m0, m1
89 ABS2_SUM m6, m7, m8, m9, m0, m1
93 %macro ABS_SUM_8x8_32 1
107 ; FIXME: HSUM saturates at 64k, while an 8x8 hadamard or dct block can get up to
108 ; about 100k on extreme inputs. But that's very unlikely to occur in natural video,
109 ; and it's even more unlikely to not have any alternative mvs/modes with lower cost.
119 %elif cpuflag(mmxext)
137 mova [%1+mmsize*0], %2
138 mova [%1+mmsize*1], %3
139 mova [%1+mmsize*2], %4
140 mova [%1+mmsize*3], %5
144 mova %2, [%1+mmsize*0]
145 mova %3, [%1+mmsize*1]
146 mova %4, [%1+mmsize*2]
147 mova %5, [%1+mmsize*3]
150 %macro hadamard8_16_wrapper 2
151 cglobal hadamard8_diff, 4, 4, %1
153 %assign pad %2*mmsize-(4+stack_offset&(mmsize-1))
156 call hadamard8x8_diff %+ SUFFIX
162 cglobal hadamard8_diff16, 5, 6, %1
164 %assign pad %2*mmsize-(4+stack_offset&(mmsize-1))
168 call hadamard8x8_diff %+ SUFFIX
173 call hadamard8x8_diff %+ SUFFIX
181 call hadamard8x8_diff %+ SUFFIX
186 call hadamard8x8_diff %+ SUFFIX
197 %macro HADAMARD8_DIFF 0-1
199 hadamard8x8_diff %+ SUFFIX:
201 DIFF_PIXELS_8 r1, r2, 0, r3, r0, rsp+gprsize
204 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 8
206 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [rsp+gprsize], [rsp+mmsize+gprsize]
209 ABS_SUM_8x8 rsp+gprsize
214 hadamard8_16_wrapper %1, 3
217 ; int ff_hadamard8_diff_ ## cpu(MpegEncContext *s, uint8_t *src1,
218 ; uint8_t *src2, ptrdiff_t stride, int h)
219 ; r0 = void *s = unused, int h = unused (always 8)
220 ; note how r1, r2 and r3 are not clobbered in this function, so 16x16
221 ; can simply call this 2x2x (and that's why we access rsp+gprsize
222 ; everywhere, which is rsp of calling func
223 hadamard8x8_diff %+ SUFFIX:
227 DIFF_PIXELS_8 r1, r2, 0, r3, r0, rsp+gprsize+0x60
229 mova [rsp+gprsize+0x60], m7
230 TRANSPOSE4x4W 0, 1, 2, 3, 7
231 STORE4 rsp+gprsize, m0, m1, m2, m3
232 mova m7, [rsp+gprsize+0x60]
233 TRANSPOSE4x4W 4, 5, 6, 7, 0
234 STORE4 rsp+gprsize+0x40, m4, m5, m6, m7
237 DIFF_PIXELS_8 r1, r2, 4, r3, r0, rsp+gprsize+0x60
239 mova [rsp+gprsize+0x60], m7
240 TRANSPOSE4x4W 0, 1, 2, 3, 7
241 STORE4 rsp+gprsize+0x20, m0, m1, m2, m3
242 mova m7, [rsp+gprsize+0x60]
243 TRANSPOSE4x4W 4, 5, 6, 7, 0
245 LOAD4 rsp+gprsize+0x40, m0, m1, m2, m3
247 ABS_SUM_8x8_32 rsp+gprsize+0x60
248 mova [rsp+gprsize+0x60], m0
250 LOAD4 rsp+gprsize , m0, m1, m2, m3
251 LOAD4 rsp+gprsize+0x20, m4, m5, m6, m7
253 ABS_SUM_8x8_32 rsp+gprsize
254 paddusw m0, [rsp+gprsize+0x60]
260 hadamard8_16_wrapper 0, 14
272 %define ABS_SUM_8x8 ABS_SUM_8x8_64
274 %define ABS_SUM_8x8 ABS_SUM_8x8_32
279 %define ABS_SUM_8x8 ABS_SUM_8x8_64
282 ; int ff_sse*_*(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
283 ; ptrdiff_t line_size, int h)
285 %macro SUM_SQUARED_ERRORS 1
286 cglobal sse%1, 5,5,8, v, pix1, pix2, lsize, h
290 pxor m0, m0 ; mm0 = 0
291 pxor m7, m7 ; mm7 holds the sum
293 .next2lines: ; FIXME why are these unaligned movs? pix1[] is aligned
294 movu m1, [pix1q] ; m1 = pix1[0][0-15], [0-7] for mmx
295 movu m2, [pix2q] ; m2 = pix2[0][0-15], [0-7] for mmx
297 movu m3, [pix1q+lsizeq] ; m3 = pix1[1][0-15], [0-7] for mmx
298 movu m4, [pix2q+lsizeq] ; m4 = pix2[1][0-15], [0-7] for mmx
299 %else ; %1 / 2 == mmsize; mmx only
300 mova m3, [pix1q+8] ; m3 = pix1[0][8-15]
301 mova m4, [pix2q+8] ; m4 = pix2[0][8-15]
304 ; todo: mm1-mm2, mm3-mm4
305 ; algo: subtract mm1 from mm2 with saturation and vice versa
306 ; OR the result to get the absolute difference
317 ; now convert to 16-bit vectors so we can square them
323 punpcklbw m1, m0 ; mm1 not spread over (mm1,mm2)
324 punpcklbw m3, m0 ; mm4 not spread over (mm3,mm4)
337 lea pix1q, [pix1q + 2*lsizeq]
338 lea pix2q, [pix2q + 2*lsizeq]
347 movd eax, m7 ; return value
355 SUM_SQUARED_ERRORS 16
358 SUM_SQUARED_ERRORS 16
360 ;-----------------------------------------------
361 ;int ff_sum_abs_dctelem(int16_t *block)
362 ;-----------------------------------------------
363 ; %1 = number of xmm registers used
364 ; %2 = number of inline loops
366 %macro SUM_ABS_DCTELEM 2
367 cglobal sum_abs_dctelem, 1, 1, %1, block
372 mova m2, [blockq+mmsize*(0+%%i)]
373 mova m3, [blockq+mmsize*(1+%%i)]
374 mova m4, [blockq+mmsize*(2+%%i)]
375 mova m5, [blockq+mmsize*(3+%%i)]
397 ;------------------------------------------------------------------------------
398 ; int ff_hf_noise*_mmx(uint8_t *pix1, ptrdiff_t lsize, int h)
399 ;------------------------------------------------------------------------------
401 %macro HF_NOISE_PART1 5
422 %macro HF_NOISE_PART2 4
439 cglobal hf_noise%1, 3,3,0, pix1, lsize, h
443 HF_NOISE_PART1 %1, 0, 1, 2, 3
445 HF_NOISE_PART1 %1, 4, 1, 5, 3
446 HF_NOISE_PART2 0, 2, 4, 5
449 HF_NOISE_PART1 %1, 0, 1, 2, 3
450 HF_NOISE_PART2 4, 5, 0, 2
452 HF_NOISE_PART1 %1, 4, 1, 5, 3
453 HF_NOISE_PART2 0, 2, 4, 5
465 movd eax, m0 ; eax = result of hf_noise8;
466 REP_RET ; return eax;
473 ;---------------------------------------------------------------------------------------
474 ;int ff_sad_<opt>(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t stride, int h);
475 ;---------------------------------------------------------------------------------------
478 cglobal sad%1, 5, 5, 3, v, pix1, pix2, stride, h
480 movu m1, [pix2q+strideq]
482 psadbw m1, [pix1q+strideq]
486 movu m1, [pix2q+strideq+8]
488 psadbw m1, [pix1q+strideq+8]
496 lea pix1q, [pix1q+strideq*2]
497 lea pix2q, [pix2q+strideq*2]
499 movu m1, [pix2q+strideq]
501 psadbw m1, [pix1q+strideq]
506 movu m1, [pix2q+strideq+8]
508 psadbw m1, [pix1q+strideq+8]
528 ;------------------------------------------------------------------------------------------
529 ;int ff_sad_x2_<opt>(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t stride, int h);
530 ;------------------------------------------------------------------------------------------
533 cglobal sad%1_x2, 5, 5, 5, v, pix1, pix2, stride, h
535 movu m2, [pix2q+strideq]
538 movu m4, [pix2q+strideq+1]
543 pavgb m2, [pix2q+strideq+1]
546 psadbw m2, [pix1q+strideq]
550 movu m2, [pix2q+strideq+8]
552 pavgb m2, [pix2q+strideq+9]
554 psadbw m2, [pix1q+strideq+8]
562 lea pix1q, [pix1q+2*strideq]
563 lea pix2q, [pix2q+2*strideq]
565 movu m2, [pix2q+strideq]
568 movu m4, [pix2q+strideq+1]
573 pavgb m2, [pix2q+strideq+1]
576 psadbw m2, [pix1q+strideq]
581 movu m2, [pix2q+strideq+8]
583 pavgb m2, [pix2q+strideq+9]
585 psadbw m2, [pix1q+strideq+8]
605 ;------------------------------------------------------------------------------------------
606 ;int ff_sad_y2_<opt>(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t stride, int h);
607 ;------------------------------------------------------------------------------------------
610 cglobal sad%1_y2, 5, 5, 4, v, pix1, pix2, stride, h
612 movu m0, [pix2q+strideq]
613 movu m3, [pix2q+2*strideq]
617 psadbw m0, [pix1q+strideq]
622 movu m5, [pix2q+strideq+8]
623 movu m6, [pix2q+2*strideq+8]
627 psadbw m5, [pix1q+strideq+8]
637 lea pix1q, [pix1q+2*strideq]
638 lea pix2q, [pix2q+2*strideq]
640 movu m3, [pix2q+strideq]
644 psadbw m2, [pix1q+strideq]
650 movu m6, [pix2q+strideq+8]
654 psadbw m5, [pix1q+strideq+8]
675 ;-------------------------------------------------------------------------------------------
676 ;int ff_sad_approx_xy2_<opt>(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t stride, int h);
677 ;-------------------------------------------------------------------------------------------
679 %macro SAD_APPROX_XY2 1
680 cglobal sad%1_approx_xy2, 5, 5, 7, v, pix1, pix2, stride, h
683 movu m0, [pix2q+strideq]
684 movu m3, [pix2q+2*strideq]
687 movu m6, [pix2q+strideq+1]
688 movu m2, [pix2q+2*strideq+1]
694 pavgb m0, [pix2q+strideq+1]
695 pavgb m3, [pix2q+2*strideq+1]
701 psadbw m0, [pix1q+strideq]
706 movu m6, [pix2q+strideq+8]
707 movu m7, [pix2q+2*strideq+8]
708 pavgb m5, [pix2q+1+8]
709 pavgb m6, [pix2q+strideq+1+8]
710 pavgb m7, [pix2q+2*strideq+1+8]
715 psadbw m6, [pix1q+strideq+8]
725 lea pix1q, [pix1q+2*strideq]
726 lea pix2q, [pix2q+2*strideq]
728 movu m3, [pix2q+strideq]
731 movu m6, [pix2q+strideq+1]
736 pavgb m3, [pix2q+strideq+1]
742 psadbw m2, [pix1q+strideq]
748 movu m7, [pix2q+strideq+8]
749 pavgb m6, [pix2q+8+1]
750 pavgb m7, [pix2q+strideq+8+1]
755 psadbw m6, [pix1q+strideq+8]
776 ;--------------------------------------------------------------------
777 ;int ff_vsad_intra(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
778 ; ptrdiff_t line_size, int h);
779 ;--------------------------------------------------------------------
782 cglobal vsad_intra%1, 5, 5, 3, v, pix1, pix2, lsize, h
785 mova m2, [pix1q+lsizeq]
788 mova m2, [pix1q+lsizeq]
790 mova m4, [pix1q+lsizeq+8]
798 lea pix1q, [pix1q + 2*lsizeq]
803 mova m2, [pix1q+lsizeq]
813 mova m2, [pix1q+lsizeq]
814 mova m4, [pix1q+lsizeq+8]
837 ;---------------------------------------------------------------------
838 ;int ff_vsad_approx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
839 ; ptrdiff_t line_size, int h);
840 ;---------------------------------------------------------------------
843 cglobal vsad%1_approx, 5, 5, 5, v, pix1, pix2, lsize, h
846 %if %1 == mmsize ; vsad8_mmxext, vsad16_sse2
847 mova m4, [pix1q+lsizeq]
850 movu m2, [pix2q+lsizeq]
855 psubb m4, [pix2q+lsizeq]
860 %else ; vsad16_mmxext
866 mova m4, [pix1q+lsizeq]
867 mova m5, [pix1q+lsizeq+8]
868 psubb m4, [pix2q+lsizeq]
869 psubb m5, [pix2q+lsizeq+8]
879 lea pix1q, [pix1q + 2*lsizeq]
880 lea pix2q, [pix2q + 2*lsizeq]
882 %if %1 == mmsize ; vsad8_mmxext, vsad16_sse2
892 mova m4, [pix1q+lsizeq]
893 movu m3, [pix2q+lsizeq]
898 %else ; vsad16_mmxext
908 mova m4, [pix1q+lsizeq]
909 mova m5, [pix1q+lsizeq+8]
910 psubb m4, [pix2q+lsizeq]
911 psubb m5, [pix2q+lsizeq+8]