1 ;******************************************************************************
2 ;* Copyright (c) 2010 David Conrad
4 ;* This file is part of FFmpeg.
6 ;* FFmpeg is free software; you can redistribute it and/or
7 ;* modify it under the terms of the GNU Lesser General Public
8 ;* License as published by the Free Software Foundation; either
9 ;* version 2.1 of the License, or (at your option) any later version.
11 ;* FFmpeg is distributed in the hope that it will be useful,
12 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
13 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 ;* Lesser General Public License for more details.
16 ;* You should have received a copy of the GNU Lesser General Public
17 ;* License along with FFmpeg; if not, write to the Free Software
18 ;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 ;******************************************************************************
21 %include "libavutil/x86/x86util.asm"
28 pb_128: times 16 db 128
46 ; dirac_hpel_filter_v_sse2(uint8_t *dst, uint8_t *src, int stride, int width);
47 cglobal dirac_hpel_filter_v_%1, 4,6,8, dst, src, stride, width, src0, stridex3
49 lea stridex3q, [3*strideq]
54 UNPACK_ADD m0, m1, [srcq], [srcq + strideq], a,a
58 ; 3*( ... + src[-2] + src[3])
59 UNPACK_ADD m2, m3, [src0q + strideq], [srcq + stridex3q], a,a
65 ; ... - 7*(src[-1] + src[2])
66 UNPACK_ADD m2, m3, [src0q + strideq*2], [srcq + strideq*2], a,a
72 ; ... - (src[-3] + src[4])
73 UNPACK_ADD m2, m3, [src0q], [srcq + strideq*4], a,a
90 ; dirac_hpel_filter_h_sse2(uint8_t *dst, uint8_t *src, int width);
91 cglobal dirac_hpel_filter_h_%1, 3,3,8, dst, src, width
94 and widthd, ~(mmsize-1)
97 UNPACK_ADD m0, m1, [srcq + widthq], [srcq + widthq + 1], u,u
101 ; 3*( ... + src[-2] + src[3])
102 UNPACK_ADD m2, m3, [srcq + widthq - 2], [srcq + widthq + 3], u,u
108 ; ... - 7*(src[-1] + src[2])
109 UNPACK_ADD m2, m3, [srcq + widthq - 1], [srcq + widthq + 2], u,u
115 ; ... - (src[-3] + src[4])
116 UNPACK_ADD m2, m3, [srcq + widthq - 3], [srcq + widthq + 4], u,u
125 mova [dstq + widthq], m0
132 ; void put_rect_clamped(uint8_t *dst, int dst_stride, int16_t *src, int src_stride, int width, int height)
133 cglobal put_signed_rect_clamped_%1, 5,9,3, dst, dst_stride, src, src_stride, w, dst2, src2
139 movsxd dst_strideq, dst_strided
140 movsxd src_strideq, src_strided
152 lea src2q, [srcq+src_strideq*2]
153 lea dst2q, [dstq+dst_strideq]
156 mova m1, [srcq +2*wq]
157 mova m2, [src2q+2*wq]
158 packsswb m1, [srcq +2*wq+mmsize]
159 packsswb m2, [src2q+2*wq+mmsize]
166 lea srcq, [srcq+src_strideq*4]
167 lea dstq, [dstq+dst_strideq*2]
175 ; void add_rect_clamped(uint8_t *dst, uint16_t *src, int stride, int16_t *idwt, int idwt_stride, int width, int height)
176 cglobal add_rect_clamped_%1, 7,9,3, dst, src, stride, idwt, idwt_stride, w, h
182 movsxd strideq, strided
183 movsxd idwt_strideq, idwt_strided
193 movu m1, [srcq +2*wq] ; FIXME: ensure alignment
196 movu m2, [srcq +2*wq+mmsize] ; FIXME: ensure alignment
199 paddw m1, [idwtq+2*wq]
200 paddw m2, [idwtq+2*wq+mmsize]
205 lea srcq, [srcq + 2*strideq]
207 lea idwtq, [idwtq+ 2*idwt_strideq]
215 ; void add_obmc(uint16_t *dst, uint8_t *src, int stride, uint8_t *obmc_weight, int yblen)
216 cglobal add_dirac_obmc%1_%2, 6,6,5, dst, src, stride, obmc, yblen
232 movu m3, [dstq+2*i+mmsize]
236 movu [dstq+2*i+mmsize], m1
239 lea srcq, [srcq+strideq]
240 lea dstq, [dstq+2*strideq]