1 ;******************************************************************************
2 ;* Copyright Nick Kurshev
3 ;* Copyright Michael (michaelni@gmx.at)
4 ;* Copyright 2018 Jokyo Images
5 ;* Copyright Ivo van Poorten
7 ;* This file is part of FFmpeg.
9 ;* FFmpeg is free software; you can redistribute it and/or
10 ;* modify it under the terms of the GNU Lesser General Public
11 ;* License as published by the Free Software Foundation; either
12 ;* version 2.1 of the License, or (at your option) any later version.
14 ;* FFmpeg is distributed in the hope that it will be useful,
15 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
16 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 ;* Lesser General Public License for more details.
19 ;* You should have received a copy of the GNU Lesser General Public
20 ;* License along with FFmpeg; if not, write to the Free Software
21 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 ;******************************************************************************
24 %include "libavutil/x86/x86util.asm"
28 pb_mask_shuffle2103_mmx times 8 dw 255
29 pb_shuffle2103: db 2, 1, 0, 3, 6, 5, 4, 7, 10, 9, 8, 11, 14, 13, 12, 15
30 pb_shuffle0321: db 0, 3, 2, 1, 4, 7, 6, 5, 8, 11, 10, 9, 12, 15, 14, 13
31 pb_shuffle1230: db 1, 2, 3, 0, 5, 6, 7, 4, 9, 10, 11, 8, 13, 14, 15, 12
32 pb_shuffle3012: db 3, 0, 1, 2, 7, 4, 5, 6, 11, 8, 9, 10, 15, 12, 13, 14
33 pb_shuffle3210: db 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
38 ; %1 dst ; %2 src ; %3 shift
47 ;------------------------------------------------------------------------------
48 ; shuffle_bytes_2103_mmext (const uint8_t *src, uint8_t *dst, int src_size)
49 ;------------------------------------------------------------------------------
51 cglobal shuffle_bytes_2103, 3, 5, 8, src, dst, w, tmp, x
52 mova m6, [pb_mask_shuffle2103_mmx]
68 mov tmpb, [srcq + wq + 2]
69 mov [dstq+wq + 0], tmpb
70 mov tmpb, [srcq + wq + 1]
71 mov [dstq+wq + 1], tmpb
72 mov tmpb, [srcq + wq + 0]
73 mov [dstq+wq + 2], tmpb
74 mov tmpb, [srcq + wq + 3]
75 mov [dstq+wq + 3], tmpb
80 ;check if src_size < mmsize * 2
101 movu [dstq+wq + 8], m1
109 ;------------------------------------------------------------------------------
110 ; shuffle_bytes_## (const uint8_t *src, uint8_t *dst, int src_size)
111 ;------------------------------------------------------------------------------
113 %macro SHUFFLE_BYTES 4
114 cglobal shuffle_bytes_%1%2%3%4, 3, 5, 2, src, dst, w, tmp, x
115 VBROADCASTI128 m0, [pb_shuffle%1%2%3%4]
128 mov tmpb, [srcq + wq + %1]
129 mov [dstq+wq + 0], tmpb
130 mov tmpb, [srcq + wq + %2]
131 mov [dstq+wq + 1], tmpb
132 mov tmpb, [srcq + wq + %3]
133 mov [dstq+wq + 2], tmpb
134 mov tmpb, [srcq + wq + %4]
135 mov [dstq+wq + 3], tmpb
140 ;check if src_size < mmsize
156 SHUFFLE_BYTES 2, 1, 0, 3
157 SHUFFLE_BYTES 0, 3, 2, 1
158 SHUFFLE_BYTES 1, 2, 3, 0
159 SHUFFLE_BYTES 3, 0, 1, 2
160 SHUFFLE_BYTES 3, 2, 1, 0
162 ;-----------------------------------------------------------------------------------------------
163 ; uyvytoyuv422(uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
164 ; const uint8_t *src, int width, int height,
165 ; int lumStride, int chromStride, int srcStride)
166 ;-----------------------------------------------------------------------------------------------
167 %macro UYVY_TO_YUV422 0
168 cglobal uyvytoyuv422, 9, 14, 8, ydst, udst, vdst, src, w, h, lum_stride, chrom_stride, src_stride, wtwo, whalf, tmp, x, back_w
174 movsxdifnidn lum_strideq, lum_strided
175 movsxdifnidn chrom_strideq, chrom_strided
176 movsxdifnidn src_strideq, src_strided
180 shr whalfq, 1 ; whalf = width / 2
182 lea srcq, [srcq + wq * 2]
190 add wtwoq, wtwoq ; wtwo = width * 2
196 ;calc scalar loop count
197 and xq, mmsize * 2 - 1
201 mov tmpb, [srcq + wtwoq + 0]
202 mov [udstq + whalfq], tmpb
204 mov tmpb, [srcq + wtwoq + 1]
205 mov [ydstq + wq], tmpb
207 mov tmpb, [srcq + wtwoq + 2]
208 mov [vdstq + whalfq], tmpb
210 mov tmpb, [srcq + wtwoq + 3]
211 mov [ydstq + wq + 1], tmpb
219 ; check if simd loop is need
224 movu m2, [srcq + wtwoq ]
225 movu m3, [srcq + wtwoq + mmsize ]
226 movu m4, [srcq + wtwoq + mmsize * 2]
227 movu m5, [srcq + wtwoq + mmsize * 3]
230 RSHIFT_COPY m6, m2, 1 ; UYVY UYVY -> YVYU YVY...
231 pand m6, m1; YxYx YxYx...
233 RSHIFT_COPY m7, m3, 1 ; UYVY UYVY -> YVYU YVY...
234 pand m7, m1 ; YxYx YxYx...
236 packuswb m6, m7 ; YYYY YYYY...
237 movu [ydstq + wq], m6
240 RSHIFT_COPY m6, m4, 1 ; UYVY UYVY -> YVYU YVY...
241 pand m6, m1; YxYx YxYx...
243 RSHIFT_COPY m7, m5, 1 ; UYVY UYVY -> YVYU YVY...
244 pand m7, m1 ; YxYx YxYx...
246 packuswb m6, m7 ; YYYY YYYY...
247 movu [ydstq + wq + mmsize], m6
250 pand m2, m1 ; UxVx...
251 pand m3, m1 ; UxVx...
252 pand m4, m1 ; UxVx...
253 pand m5, m1 ; UxVx...
255 packuswb m2, m3 ; UVUV...
256 packuswb m4, m5 ; UVUV...
259 pand m6, m2, m1 ; UxUx...
260 pand m7, m4, m1 ; UxUx...
262 packuswb m6, m7 ; UUUU
263 movu [udstq + whalfq], m6
267 psrlw m2, 8 ; VxVx...
268 psrlw m4, 8 ; VxVx...
269 packuswb m2, m4 ; VVVV
270 movu [vdstq + whalfq], m2
273 add wtwoq, mmsize * 4
278 add srcq, src_strideq
279 add ydstq, lum_strideq
280 add udstq, chrom_strideq
281 add vdstq, chrom_strideq
283 ;restore initial state of line variable
287 shr whalfq, 1 ; whalf = width / 2