1 ;******************************************************************************
2 ;* SIMD lossless video DSP utils
3 ;* Copyright (c) 2008 Loren Merritt
4 ;* Copyright (c) 2014 Michael Niedermayer
6 ;* This file is part of FFmpeg.
8 ;* FFmpeg is free software; you can redistribute it and/or
9 ;* modify it under the terms of the GNU Lesser General Public
10 ;* License as published by the Free Software Foundation; either
11 ;* version 2.1 of the License, or (at your option) any later version.
13 ;* FFmpeg is distributed in the hope that it will be useful,
14 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 ;* Lesser General Public License for more details.
18 ;* You should have received a copy of the GNU Lesser General Public
19 ;* License along with FFmpeg; if not, write to the Free Software
20 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 ;******************************************************************************
23 %include "libavutil/x86/x86util.asm"
27 pb_ef: times 8 db 14,15
28 pb_67: times 8 db 6, 7
29 pb_zzzz2323zzzzabab: db -1,-1,-1,-1, 2, 3, 2, 3,-1,-1,-1,-1,10,11,10,11
30 pb_zzzzzzzz67676767: db -1,-1,-1,-1,-1,-1,-1,-1, 6, 7, 6, 7, 6, 7, 6, 7
34 %macro INT16_LOOP 2 ; %1 = a/u (aligned/unaligned), %2 = add/sub
69 mov%1 m2, [srcq+wq+mmsize]
70 mov%1 m3, [dstq+wq+mmsize]
74 mov%1 m2, [src1q+wq+mmsize]
75 mov%1 m3, [src2q+wq+mmsize]
82 mov%1 [dstq+wq+mmsize], m2
90 cglobal add_int16, 4,4,5, dst, src, mask, w, tmp
94 cglobal add_int16, 4,4,5, dst, src, mask, w, tmp
104 cglobal diff_int16, 5,5,5, dst, src1, src2, mask, w, tmp
108 cglobal diff_int16, 5,5,5, dst, src1, src2, mask, w, tmp
120 %macro ADD_HFYU_LEFT_LOOP_INT16 2 ; %1 = dst alignment (a/u), %2 = src alignment (a/u)
146 movhps [dstq+wq+8], m0
161 ; int add_hfyu_left_pred_int16(uint16_t *dst, const uint16_t *src, unsigned mask, int w, int left)
163 cglobal add_hfyu_left_pred_int16, 4,4,8, dst, src, mask, w, left
166 mova m3, [pb_zzzz2323zzzzabab]
171 ADD_HFYU_LEFT_LOOP_INT16 a, a
174 cglobal add_hfyu_left_pred_int16, 4,4,8, dst, src, mask, w, left
176 mova m4, [pb_zzzzzzzz67676767]
177 mova m3, [pb_zzzz2323zzzzabab]
186 ADD_HFYU_LEFT_LOOP_INT16 a, a
188 ADD_HFYU_LEFT_LOOP_INT16 u, a
190 ADD_HFYU_LEFT_LOOP_INT16 u, u
192 ; void add_hfyu_median_prediction_mmxext(uint8_t *dst, const uint8_t *top, const uint8_t *diff, int mask, int w, int *left, int *left_top)
194 cglobal add_hfyu_median_pred_int16, 7,7,0, dst, top, diff, mask, w, left, left_top
200 movd mm4, [left_topq]
205 psubw mm0, mm4 ; t-tl
217 psubw mm0, mm4 ; t-tl
223 paddw mm4, mm3 ; t-tl+l
229 pmaxsw mm3, mm5 ; median
230 paddw mm3, mm2 ; +residual
251 movzx r2d, word [dstq-2]
253 movzx r2d, word [topq-2]
257 cglobal sub_hfyu_median_pred_int16, 7,7,0, dst, src1, src2, mask, w, left, left_top
265 movd mm6, [left_topq]
271 movq mm1, [src1q + maskq]
272 movq mm3, [src2q + maskq]
284 movq [dstq + maskq], mm3
286 movq mm0, [src1q + maskq - 2]
287 movq mm2, [src2q + maskq - 2]
290 movzx maskd, word [src1q + wq - 2]
291 mov [left_topq], maskd
292 movzx maskd, word [src2q + wq - 2]