1 ;******************************************************************************
2 ;* x86-optimized horizontal line scaling functions
3 ;* Copyright (c) 2011 Ronald S. Bultje <rsbultje@gmail.com>
5 ;* This file is part of Libav.
7 ;* Libav is free software; you can redistribute it and/or
8 ;* modify it under the terms of the GNU Lesser General Public
9 ;* License as published by the Free Software Foundation; either
10 ;* version 2.1 of the License, or (at your option) any later version.
12 ;* Libav is distributed in the hope that it will be useful,
13 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
14 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 ;* Lesser General Public License for more details.
17 ;* You should have received a copy of the GNU Lesser General Public
18 ;* License along with Libav; if not, write to the Free Software
19 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 ;******************************************************************************
23 %include "x86util.asm"
27 max_19bit_int: times 4 dd 0x7ffff
28 max_19bit_flt: times 4 dd 524287.0
29 minshort: times 8 dw 0x8000
30 unicoeff: times 4 dd 0x20000000
34 ;-----------------------------------------------------------------------------
35 ; horizontal line scaling
37 ; void hscale<source_width>to<intermediate_nbits>_<filterSize>_<opt>
38 ; (SwsContext *c, int{16,32}_t *dst,
39 ; int dstW, const uint{8,16}_t *src,
40 ; const int16_t *filter,
41 ; const int32_t *filterPos, int filterSize);
43 ; Scale one horizontal line. Input is either 8-bits width or 16-bits width
44 ; ($source_width can be either 8, 9, 10 or 16, difference is whether we have to
45 ; downscale before multiplying). Filter is 14-bits. Output is either 15bits
46 ; (in int16_t) or 19bits (in int32_t), as given in $intermediate_nbits. Each
47 ; output pixel is generated from $filterSize input pixels, the position of
48 ; the first pixel is given in filterPos[nOutputPixel].
49 ;-----------------------------------------------------------------------------
51 ; SCALE_FUNC source_width, intermediate_nbits, filtersize, filtersuffix, opt, n_args, n_xmm
53 cglobal hscale%1to%2_%4_%5, %6, 7, %7
62 mova m2, [max_19bit_int]
64 mova m2, [max_19bit_int]
66 mova m2, [max_19bit_flt]
67 %endif ; mmx/sse2/ssse3/sse4
90 shl r2, 1 ; this allows *16 (i.e. now *8) in lea instructions for the 8-tap filter
97 lea r1, [r1+r2*(2>>r2shr)]
99 lea r1, [r1+r2*(4>>r2shr)]
101 lea r5, [r5+r2*(4>>r2shr)]
105 %if %3 == 4 ; filterSize == 4 scaling
106 ; load 2x4 or 4x4 source pixels into m0/m1
107 mov32 r0, dword [r5+r2*4+0] ; filterPos[0]
108 mov32 r6, dword [r5+r2*4+4] ; filterPos[1]
109 movlh m0, [r3+r0*srcmul] ; src[filterPos[0] + {0,1,2,3}]
111 movlh m1, [r3+r6*srcmul] ; src[filterPos[1] + {0,1,2,3}]
114 movhps m0, [r3+r6*srcmul] ; src[filterPos[1] + {0,1,2,3}]
116 movd m4, [r3+r6*srcmul] ; src[filterPos[1] + {0,1,2,3}]
118 mov32 r0, dword [r5+r2*4+8] ; filterPos[2]
119 mov32 r6, dword [r5+r2*4+12] ; filterPos[3]
120 movlh m1, [r3+r0*srcmul] ; src[filterPos[2] + {0,1,2,3}]
122 movhps m1, [r3+r6*srcmul] ; src[filterPos[3] + {0,1,2,3}]
124 movd m5, [r3+r6*srcmul] ; src[filterPos[3] + {0,1,2,3}]
127 %endif ; %1 == 8 && %5 <= ssse
128 %endif ; mmsize == 8/16
130 punpcklbw m0, m3 ; byte -> word
131 punpcklbw m1, m3 ; byte -> word
134 ; multiply with filter coefficients
135 %if %1 == 16 ; pmaddwd needs signed adds, so this moves unsigned -> signed, we'll
136 ; add back 0x8000 * sum(coeffs) after the horizontal add
140 pmaddwd m0, [r4+r2*8+mmsize*0] ; *= filter[{0,1,..,6,7}]
141 pmaddwd m1, [r4+r2*8+mmsize*1] ; *= filter[{8,9,..,14,15}]
143 ; add up horizontally (4 srcpix * 4 coefficients -> 1 dstpix)
144 %if mmsize == 8 ; mmx
151 shufps m0, m1, 10001000b
152 shufps m4, m1, 11011101b
155 phaddd m0, m1 ; filter[{ 0, 1, 2, 3}]*src[filterPos[0]+{0,1,2,3}],
156 ; filter[{ 4, 5, 6, 7}]*src[filterPos[1]+{0,1,2,3}],
157 ; filter[{ 8, 9,10,11}]*src[filterPos[2]+{0,1,2,3}],
158 ; filter[{12,13,14,15}]*src[filterPos[3]+{0,1,2,3}]
159 %endif ; mmx/sse2/ssse3/sse4
160 %else ; %3 == 8, i.e. filterSize == 8 scaling
161 ; load 2x8 or 4x8 source pixels into m0, m1, m4 and m5
162 mov32 r0, dword [r5+r2*2+0] ; filterPos[0]
163 mov32 r6, dword [r5+r2*2+4] ; filterPos[1]
164 movbh m0, [r3+ r0 *srcmul] ; src[filterPos[0] + {0,1,2,3,4,5,6,7}]
166 movbh m1, [r3+(r0+4)*srcmul] ; src[filterPos[0] + {4,5,6,7}]
167 movbh m4, [r3+ r6 *srcmul] ; src[filterPos[1] + {0,1,2,3}]
168 movbh m5, [r3+(r6+4)*srcmul] ; src[filterPos[1] + {4,5,6,7}]
170 movbh m1, [r3+ r6 *srcmul] ; src[filterPos[1] + {0,1,2,3,4,5,6,7}]
171 mov32 r0, dword [r5+r2*2+8] ; filterPos[2]
172 mov32 r6, dword [r5+r2*2+12] ; filterPos[3]
173 movbh m4, [r3+ r0 *srcmul] ; src[filterPos[2] + {0,1,2,3,4,5,6,7}]
174 movbh m5, [r3+ r6 *srcmul] ; src[filterPos[3] + {0,1,2,3,4,5,6,7}]
175 %endif ; mmsize == 8/16
177 punpcklbw m0, m3 ; byte -> word
178 punpcklbw m1, m3 ; byte -> word
179 punpcklbw m4, m3 ; byte -> word
180 punpcklbw m5, m3 ; byte -> word
184 %if %1 == 16 ; pmaddwd needs signed adds, so this moves unsigned -> signed, we'll
185 ; add back 0x8000 * sum(coeffs) after the horizontal add
191 pmaddwd m0, [r4+r2*8+mmsize*0] ; *= filter[{0,1,..,6,7}]
192 pmaddwd m1, [r4+r2*8+mmsize*1] ; *= filter[{8,9,..,14,15}]
193 pmaddwd m4, [r4+r2*8+mmsize*2] ; *= filter[{16,17,..,22,23}]
194 pmaddwd m5, [r4+r2*8+mmsize*3] ; *= filter[{24,25,..,30,31}]
196 ; add up horizontally (8 srcpix * 8 coefficients -> 1 dstpix)
210 ; emulate horizontal add as transpose + vertical add
224 ; FIXME if we rearrange the filter in pairs of 4, we can
225 ; load pixels likewise and use 2 x paddd + phaddd instead
226 ; of 3 x phaddd here, faster on older cpus
229 phaddd m0, m4 ; filter[{ 0, 1,..., 6, 7}]*src[filterPos[0]+{0,1,...,6,7}],
230 ; filter[{ 8, 9,...,14,15}]*src[filterPos[1]+{0,1,...,6,7}],
231 ; filter[{16,17,...,22,23}]*src[filterPos[2]+{0,1,...,6,7}],
232 ; filter[{24,25,...,30,31}]*src[filterPos[3]+{0,1,...,6,7}]
233 %endif ; mmx/sse2/ssse3/sse4
236 %else ; %3 == X, i.e. any filterSize scaling
240 %else ; %4 == X || %4 == X8
245 movsxd r6, r6d ; filterSize
246 lea r12, [r3+(r6-r6sub)*srcmul] ; &src[filterSize&~4]
251 lea r0, [r3+(r6-r6sub)*srcmul] ; &src[filterSize&~4]
267 mov32 r0, dword [r5+r2*4+0] ; filterPos[0]
268 mov32 r1x, dword [r5+r2*4+4] ; filterPos[1]
269 ; FIXME maybe do 4px/iteration on x86-64 (x86-32 wouldn't have enough regs)?
275 ; load 2x4 (mmx) or 2x8 (sse) source pixels into m0/m1 -> m4/m5
276 movbh m0, [src_reg+r0 *srcmul] ; src[filterPos[0] + {0,1,2,3(,4,5,6,7)}]
277 movbh m1, [src_reg+(r1x+r6sub)*srcmul] ; src[filterPos[1] + {0,1,2,3(,4,5,6,7)}]
284 %if %1 == 16 ; pmaddwd needs signed adds, so this moves unsigned -> signed, we'll
285 ; add back 0x8000 * sum(coeffs) after the horizontal add
289 pmaddwd m0, [r4 ] ; filter[{0,1,2,3(,4,5,6,7)}]
290 pmaddwd m1, [r4+(r6+r6sub)*2] ; filter[filtersize+{0,1,2,3(,4,5,6,7)}]
294 add src_reg, srcmul*mmsize/2
295 cmp src_reg, filter2 ; while (src += 4) < &src[filterSize]
299 mov32 r1x, dword [r5+r2*4+4] ; filterPos[1]
300 movlh m0, [src_reg+r0 *srcmul] ; split last 4 srcpx of dstpx[0]
301 sub r1x, r6 ; and first 4 srcpx of dstpx[1]
303 movhps m0, [src_reg+(r1x+r6sub)*srcmul]
305 movd m1, [src_reg+(r1x+r6sub)*srcmul]
307 %endif ; %1 == 8 && %5 <= ssse
311 %if %1 == 16 ; pmaddwd needs signed adds, so this moves unsigned -> signed, we'll
312 ; add back 0x8000 * sum(coeffs) after the horizontal add
318 lea r4, [r4+(r6+r6sub)*2]
320 %if mmsize == 8 ; mmx
333 %endif ; sse2/ssse3/sse4
338 pshufd m4, m4, 11011000b
344 %endif ; sse2/ssse3/sse4
345 %endif ; mmsize == 8/16
348 %if %1 == 16 ; add 0x8000 * sum(coeffs), i.e. back from signed -> unsigned
353 psrad m0, 14 + %1 - %2
360 movh [r1+r2*(2>>r2shr)], m0
366 PMINSD_MMX m0, m2, m4
373 %endif ; mmx/sse2/ssse3/sse4
375 mova [r1+r2*(4>>r2shr)], m0
381 add r2, (mmsize<<r2shr)/4 ; both 8tap and 4tap really only do 4 pixels (or for mmx: 2 pixels)
382 ; per iteration. see "shl r2,1" above as for why we do this
399 ; SCALE_FUNCS source_width, intermediate_nbits, opt, n_xmm
401 SCALE_FUNC %1, %2, 4, 4, %3, 6, %4
402 SCALE_FUNC %1, %2, 8, 8, %3, 6, %4
404 SCALE_FUNC %1, %2, X, X, %3, 7, %4
406 SCALE_FUNC %1, %2, X, X4, %3, 7, %4
407 SCALE_FUNC %1, %2, X, X8, %3, 7, %4
411 ; SCALE_FUNCS2 opt, 8_xmm_args, 9to10_xmm_args, 16_xmm_args
412 %macro SCALE_FUNCS2 4
414 SCALE_FUNCS 8, 15, %1, %2
415 SCALE_FUNCS 9, 15, %1, %3
416 SCALE_FUNCS 10, 15, %1, %3
417 SCALE_FUNCS 14, 15, %1, %3
418 SCALE_FUNCS 16, 15, %1, %4
420 SCALE_FUNCS 8, 19, %1, %2
421 SCALE_FUNCS 9, 19, %1, %3
422 SCALE_FUNCS 10, 19, %1, %3
423 SCALE_FUNCS 14, 19, %1, %3
424 SCALE_FUNCS 16, 19, %1, %4
429 SCALE_FUNCS2 mmx, 0, 0, 0
432 SCALE_FUNCS2 sse2, 6, 7, 8
433 SCALE_FUNCS2 ssse3, 6, 6, 8
434 SCALE_FUNCS2 sse4, 6, 6, 8