1 ;******************************************************************************
2 ;* Copyright Nick Kurshev
3 ;* Copyright Michael (michaelni@gmx.at)
4 ;* Copyright 2018 Jokyo Images
6 ;* This file is part of FFmpeg.
8 ;* FFmpeg is free software; you can redistribute it and/or
9 ;* modify it under the terms of the GNU Lesser General Public
10 ;* License as published by the Free Software Foundation; either
11 ;* version 2.1 of the License, or (at your option) any later version.
13 ;* FFmpeg is distributed in the hope that it will be useful,
14 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 ;* Lesser General Public License for more details.
18 ;* You should have received a copy of the GNU Lesser General Public
19 ;* License along with FFmpeg; if not, write to the Free Software
20 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 ;******************************************************************************
23 %include "libavutil/x86/x86util.asm"
27 pb_shuffle2103: db 2, 1, 0, 3, 6, 5, 4, 7, 10, 9, 8, 11, 14, 13, 12, 15
28 pb_shuffle0321: db 0, 3, 2, 1, 4, 7, 6, 5, 8, 11, 10, 9, 12, 15, 14, 13
29 pb_shuffle1230: db 1, 2, 3, 0, 5, 6, 7, 4, 9, 10, 11, 8, 13, 14, 15, 12
30 pb_shuffle3012: db 3, 0, 1, 2, 7, 4, 5, 6, 11, 8, 9, 10, 15, 12, 13, 14
31 pb_shuffle3210: db 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
36 ; %1 dst ; %2 src ; %3 shift
45 ;------------------------------------------------------------------------------
46 ; shuffle_bytes_## (const uint8_t *src, uint8_t *dst, int src_size)
47 ;------------------------------------------------------------------------------
49 %macro SHUFFLE_BYTES 4
50 cglobal shuffle_bytes_%1%2%3%4, 3, 5, 2, src, dst, w, tmp, x
51 VBROADCASTI128 m0, [pb_shuffle%1%2%3%4]
64 mov tmpb, [srcq + wq + %1]
65 mov [dstq+wq + 0], tmpb
66 mov tmpb, [srcq + wq + %2]
67 mov [dstq+wq + 1], tmpb
68 mov tmpb, [srcq + wq + %3]
69 mov [dstq+wq + 2], tmpb
70 mov tmpb, [srcq + wq + %4]
71 mov [dstq+wq + 3], tmpb
76 ;check if src_size < mmsize
92 SHUFFLE_BYTES 2, 1, 0, 3
93 SHUFFLE_BYTES 0, 3, 2, 1
94 SHUFFLE_BYTES 1, 2, 3, 0
95 SHUFFLE_BYTES 3, 0, 1, 2
96 SHUFFLE_BYTES 3, 2, 1, 0
98 ;-----------------------------------------------------------------------------------------------
99 ; uyvytoyuv422(uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
100 ; const uint8_t *src, int width, int height,
101 ; int lumStride, int chromStride, int srcStride)
102 ;-----------------------------------------------------------------------------------------------
103 %macro UYVY_TO_YUV422 0
104 cglobal uyvytoyuv422, 9, 14, 8, ydst, udst, vdst, src, w, h, lum_stride, chrom_stride, src_stride, wtwo, whalf, tmp, x, back_w
110 movsxdifnidn lum_strideq, lum_strided
111 movsxdifnidn chrom_strideq, chrom_strided
112 movsxdifnidn src_strideq, src_strided
116 shr whalfq, 1 ; whalf = width / 2
118 lea srcq, [srcq + wq * 2]
126 add wtwoq, wtwoq ; wtwo = width * 2
132 ;calc scalar loop count
133 and xq, mmsize * 2 - 1
137 mov tmpb, [srcq + wtwoq + 0]
138 mov [udstq + whalfq], tmpb
140 mov tmpb, [srcq + wtwoq + 1]
141 mov [ydstq + wq], tmpb
143 mov tmpb, [srcq + wtwoq + 2]
144 mov [vdstq + whalfq], tmpb
146 mov tmpb, [srcq + wtwoq + 3]
147 mov [ydstq + wq + 1], tmpb
155 ; check if simd loop is need
160 movu m2, [srcq + wtwoq ]
161 movu m3, [srcq + wtwoq + mmsize ]
162 movu m4, [srcq + wtwoq + mmsize * 2]
163 movu m5, [srcq + wtwoq + mmsize * 3]
166 RSHIFT_COPY m6, m2, 1 ; UYVY UYVY -> YVYU YVY...
167 pand m6, m1; YxYx YxYx...
169 RSHIFT_COPY m7, m3, 1 ; UYVY UYVY -> YVYU YVY...
170 pand m7, m1 ; YxYx YxYx...
172 packuswb m6, m7 ; YYYY YYYY...
173 movu [ydstq + wq], m6
176 RSHIFT_COPY m6, m4, 1 ; UYVY UYVY -> YVYU YVY...
177 pand m6, m1; YxYx YxYx...
179 RSHIFT_COPY m7, m5, 1 ; UYVY UYVY -> YVYU YVY...
180 pand m7, m1 ; YxYx YxYx...
182 packuswb m6, m7 ; YYYY YYYY...
183 movu [ydstq + wq + mmsize], m6
186 pand m2, m1 ; UxVx...
187 pand m3, m1 ; UxVx...
188 pand m4, m1 ; UxVx...
189 pand m5, m1 ; UxVx...
191 packuswb m2, m3 ; UVUV...
192 packuswb m4, m5 ; UVUV...
195 pand m6, m2, m1 ; UxUx...
196 pand m7, m4, m1 ; UxUx...
198 packuswb m6, m7 ; UUUU
199 movu [udstq + whalfq], m6
203 psrlw m2, 8 ; VxVx...
204 psrlw m4, 8 ; VxVx...
205 packuswb m2, m4 ; VVVV
206 movu [vdstq + whalfq], m2
209 add wtwoq, mmsize * 4
214 add srcq, src_strideq
215 add ydstq, lum_strideq
216 add udstq, chrom_strideq
217 add vdstq, chrom_strideq
219 ;restore initial state of line variable
223 shr whalfq, 1 ; whalf = width / 2