1 ;******************************************************************************
2 ;* VC1 DSP optimizations
3 ;* Copyright (c) 2007 Christophe GISQUET <christophe.gisquet@free.fr>
4 ;* Copyright (c) 2009 David Conrad
6 ;* This file is part of FFmpeg.
8 ;* FFmpeg is free software; you can redistribute it and/or
9 ;* modify it under the terms of the GNU Lesser General Public
10 ;* License as published by the Free Software Foundation; either
11 ;* version 2.1 of the License, or (at your option) any later version.
13 ;* FFmpeg is distributed in the hope that it will be useful,
14 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 ;* Lesser General Public License for more details.
18 ;* You should have received a copy of the GNU Lesser General Public
19 ;* License along with FFmpeg; if not, write to the Free Software
20 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 ;******************************************************************************
23 %include "libavutil/x86/x86util.asm"
31 ; dst_low, dst_high (src), zero
32 ; zero-extends one vector from 8 to 16 bits
39 %macro STORE_4_WORDS 6
62 ; in: p1 p0 q0 q1, clobbers p0
63 ; out: p1 = (2*(p1 - q1) - 5*(p0 - q0) + 4) >> 3
64 %macro VC1_LOOP_FILTER_A0 4
84 pcmpgtw m6, m3 ; if (a2 < a0 || a1 < a0)
86 pmullw m3, [pw_5] ; 5*(a3 - a0)
88 psraw m2, 3 ; abs(d/8)
89 pxor m7, m3 ; d_sign ^= a0_sign
97 pcmpgtw m3, m4 ; if (a0 < pq)
104 pxor m3, m7 ; d_sign ^ clip_sign
106 pminsw m2, m4 ; min(d, clip)
108 pand m6, m4 ; filt3 (C return value)
110 ; each set of 4 pixels is not filtered if the 3rd is not
121 pand m3, m2 ; d final
132 ; 1st param: size of filter
133 ; 2nd param: mov suffix equivalent to the filter size
134 %macro VC1_V_LOOP_FILTER 2
145 VC1_LOOP_FILTER_A0 m6, m4, m7, m0
151 VC1_LOOP_FILTER_A0 m7, m4, m1, m2
157 VC1_LOOP_FILTER_A0 m5, m2, m3, m4
164 ; 1st param: size of filter
165 ; NOTE: UNPACK_8TO16 this number of 8 bit numbers are in half a register
166 ; 2nd (optional) param: temp register to use for storing words
167 %macro VC1_H_LOOP_FILTER 1-2
173 TRANSPOSE4x4B 0, 1, 2, 3, 4
187 TRANSPOSE4x4W 0, 1, 2, 3, 4
191 UNPACK_8TO16 bw, 6, 0, 5
192 UNPACK_8TO16 bw, 7, 1, 5
193 VC1_LOOP_FILTER_A0 m6, m0, m7, m1
194 UNPACK_8TO16 bw, 4, 2, 5
195 mova m0, m1 ; m0 = p0
196 VC1_LOOP_FILTER_A0 m7, m1, m4, m2
197 UNPACK_8TO16 bw, 1, 3, 5
199 VC1_LOOP_FILTER_A0 m5, m2, m1, m3
205 STORE_4_WORDS [r0-1], [r0+r1-1], [r0+2*r1-1], [r0+r3-1], m0, %2
208 STORE_4_WORDS [r4-1], [r4+r1-1], [r4+2*r1-1], [r4+r3-1], m0, %2
211 STORE_4_WORDS [r0-1], [r0+r1-1], [r0+2*r1-1], [r0+r3-1], m0, 0
212 STORE_4_WORDS [r4-1], [r4+r1-1], [r4+2*r1-1], [r4+r3-1], m0, 4
217 %macro START_V_FILTER 0
225 %macro START_H_FILTER 1
234 cglobal vc1_v_loop_filter_internal
235 VC1_V_LOOP_FILTER 4, d
238 cglobal vc1_h_loop_filter_internal
239 VC1_H_LOOP_FILTER 4, r4
242 ; void ff_vc1_v_loop_filter4_mmxext(uint8_t *src, int stride, int pq)
243 cglobal vc1_v_loop_filter4, 3,5,0
245 call vc1_v_loop_filter_internal
248 ; void ff_vc1_h_loop_filter4_mmxext(uint8_t *src, int stride, int pq)
249 cglobal vc1_h_loop_filter4, 3,5,0
251 call vc1_h_loop_filter_internal
254 ; void ff_vc1_v_loop_filter8_mmxext(uint8_t *src, int stride, int pq)
255 cglobal vc1_v_loop_filter8, 3,5,0
257 call vc1_v_loop_filter_internal
260 call vc1_v_loop_filter_internal
263 ; void ff_vc1_h_loop_filter8_mmxext(uint8_t *src, int stride, int pq)
264 cglobal vc1_h_loop_filter8, 3,5,0
266 call vc1_h_loop_filter_internal
268 call vc1_h_loop_filter_internal
276 ; void ff_vc1_v_loop_filter8_sse2(uint8_t *src, int stride, int pq)
277 cglobal vc1_v_loop_filter8, 3,5,8
279 VC1_V_LOOP_FILTER 8, q
282 ; void ff_vc1_h_loop_filter8_sse2(uint8_t *src, int stride, int pq)
283 cglobal vc1_h_loop_filter8, 3,6,8
285 VC1_H_LOOP_FILTER 8, r5
289 ; void ff_vc1_v_loop_filter4_ssse3(uint8_t *src, int stride, int pq)
290 cglobal vc1_v_loop_filter4, 3,5,0
292 VC1_V_LOOP_FILTER 4, d
295 ; void ff_vc1_h_loop_filter4_ssse3(uint8_t *src, int stride, int pq)
296 cglobal vc1_h_loop_filter4, 3,5,0
298 VC1_H_LOOP_FILTER 4, r4
302 ; void ff_vc1_v_loop_filter8_ssse3(uint8_t *src, int stride, int pq)
303 cglobal vc1_v_loop_filter8, 3,5,8
305 VC1_V_LOOP_FILTER 8, q
308 ; void ff_vc1_h_loop_filter8_ssse3(uint8_t *src, int stride, int pq)
309 cglobal vc1_h_loop_filter8, 3,6,8
311 VC1_H_LOOP_FILTER 8, r5
315 ; void ff_vc1_h_loop_filter8_sse4(uint8_t *src, int stride, int pq)
316 cglobal vc1_h_loop_filter8, 3,5,8
322 ; Compute the rounder 32-r or 8-r and unpacks it to m7
323 %macro LOAD_ROUNDER_MMX 1 ; round
329 %macro SHIFT2_LINE 5 ; off, r0, r1, r2, r3
331 movh m%2, [srcq + stride_neg2]
334 movh m%5, [srcq + strideq]
340 movu [dstq + %1], m%3
345 ; void ff_vc1_put_ver_16b_shift2_mmx(int16_t *dst, const uint8_t *src,
346 ; x86_reg stride, int rnd, int64_t shift)
347 ; Sacrificing m6 makes it possible to pipeline loads from src
349 cglobal vc1_put_ver_16b_shift2, 3,6,0, dst, src, stride
350 DECLARE_REG_TMP 3, 4, 5
352 %define shift qword r4m
354 cglobal vc1_put_ver_16b_shift2, 4,7,0, dst, src, stride
355 DECLARE_REG_TMP 4, 5, 6
357 ; We need shift either in memory or in a mm reg as it's used in psraw
358 ; On WIN64, the arg is already on the stack
359 ; On UNIX64, m5 doesn't seem to be used
367 %define stride_neg2 t0q
368 %define stride_9minus4 t1q
370 mov stride_neg2, strideq
372 add stride_neg2, stride_neg2
373 lea stride_9minus4, [strideq * 9 - 4]
384 SHIFT2_LINE 0, 1, 2, 3, 4
385 SHIFT2_LINE 24, 2, 3, 4, 1
386 SHIFT2_LINE 48, 3, 4, 1, 2
387 SHIFT2_LINE 72, 4, 1, 2, 3
388 SHIFT2_LINE 96, 1, 2, 3, 4
389 SHIFT2_LINE 120, 2, 3, 4, 1
390 SHIFT2_LINE 144, 3, 4, 1, 2
391 SHIFT2_LINE 168, 4, 1, 2, 3
392 sub srcq, stride_9minus4
397 %endif ; HAVE_MMX_INLINE