1 /*****************************************************************************
2 * copy.c: Fast YV12/NV12 copy
3 *****************************************************************************
4 * Copyright (C) 2010 Laurent Aimar
7 * Authors: Laurent Aimar <fenrir _AT_ videolan _DOT_ org>
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU Lesser General Public License as published by
11 * the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this program; if not, write to the Free Software Foundation,
21 * Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
22 *****************************************************************************/
28 #include <vlc_common.h>
29 #include <vlc_picture.h>
35 int CopyInitCache(copy_cache_t *cache, unsigned width)
37 #ifdef CAN_COMPILE_SSE2
38 cache->size = __MAX((width + 0x0f) & ~ 0x0f, 4096);
39 cache->buffer = vlc_memalign(16, cache->size);
43 (void) cache; (void) width;
48 void CopyCleanCache(copy_cache_t *cache)
50 #ifdef CAN_COMPILE_SSE2
51 vlc_free(cache->buffer);
59 #ifdef CAN_COMPILE_SSE2
60 /* Copy 64 bytes from srcp to dstp loading data with the SSE>=2 instruction
61 * load and storing data with the SSE>=2 instruction store.
63 #define COPY64(dstp, srcp, load, store) \
65 load " 0(%[src]), %%xmm1\n" \
66 load " 16(%[src]), %%xmm2\n" \
67 load " 32(%[src]), %%xmm3\n" \
68 load " 48(%[src]), %%xmm4\n" \
69 store " %%xmm1, 0(%[dst])\n" \
70 store " %%xmm2, 16(%[dst])\n" \
71 store " %%xmm3, 32(%[dst])\n" \
72 store " %%xmm4, 48(%[dst])\n" \
73 : : [dst]"r"(dstp), [src]"r"(srcp) : "memory", "xmm1", "xmm2", "xmm3", "xmm4")
76 # undef vlc_CPU_SSE4_1
77 # define vlc_CPU_SSE4_1() ((cpu & VLC_CPU_SSE4_1) != 0)
82 # define vlc_CPU_SSSE3() ((cpu & VLC_CPU_SSSE3) != 0)
87 # define vlc_CPU_SSE2() ((cpu & VLC_CPU_SSE2) != 0)
90 /* Optimized copy from "Uncacheable Speculative Write Combining" memory
91 * as used by some video surface.
92 * XXX It is really efficient only when SSE4.1 is available.
95 static void CopyFromUswc(uint8_t *dst, size_t dst_pitch,
96 const uint8_t *src, size_t src_pitch,
97 unsigned width, unsigned height,
100 assert(((intptr_t)dst & 0x0f) == 0 && (dst_pitch & 0x0f) == 0);
102 asm volatile ("mfence");
104 for (unsigned y = 0; y < height; y++) {
105 const unsigned unaligned = (-(uintptr_t)src) & 0x0f;
108 for (; x < unaligned; x++)
111 #ifdef CAN_COMPILE_SSE4_1
112 if (vlc_CPU_SSE4_1()) {
114 for (; x+63 < width; x += 64)
115 COPY64(&dst[x], &src[x], "movntdqa", "movdqa");
117 for (; x+63 < width; x += 64)
118 COPY64(&dst[x], &src[x], "movntdqa", "movdqu");
124 for (; x+63 < width; x += 64)
125 COPY64(&dst[x], &src[x], "movdqa", "movdqa");
127 for (; x+63 < width; x += 64)
128 COPY64(&dst[x], &src[x], "movdqa", "movdqu");
132 for (; x < width; x++)
141 static void Copy2d(uint8_t *dst, size_t dst_pitch,
142 const uint8_t *src, size_t src_pitch,
143 unsigned width, unsigned height)
145 assert(((intptr_t)src & 0x0f) == 0 && (src_pitch & 0x0f) == 0);
147 asm volatile ("mfence");
149 for (unsigned y = 0; y < height; y++) {
152 bool unaligned = ((intptr_t)dst & 0x0f) != 0;
154 for (; x+63 < width; x += 64)
155 COPY64(&dst[x], &src[x], "movdqa", "movntdq");
157 for (; x+63 < width; x += 64)
158 COPY64(&dst[x], &src[x], "movdqa", "movdqu");
161 for (; x < width; x++)
170 static void SSE_SplitUV(uint8_t *dstu, size_t dstu_pitch,
171 uint8_t *dstv, size_t dstv_pitch,
172 const uint8_t *src, size_t src_pitch,
173 unsigned width, unsigned height, unsigned cpu)
176 const uint8_t shuffle[] = { 0, 2, 4, 6, 8, 10, 12, 14,
177 1, 3, 5, 7, 9, 11, 13, 15 };
178 const uint8_t mask[] = { 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00,
179 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00 };
181 assert(((intptr_t)src & 0x0f) == 0 && (src_pitch & 0x0f) == 0);
183 asm volatile ("mfence");
185 for (unsigned y = 0; y < height; y++) {
189 "movdqa 0(%[src]), %%xmm0\n" \
190 "movdqa 16(%[src]), %%xmm1\n" \
191 "movdqa 32(%[src]), %%xmm2\n" \
192 "movdqa 48(%[src]), %%xmm3\n"
195 "movq %%xmm0, 0(%[dst1])\n" \
196 "movq %%xmm1, 8(%[dst1])\n" \
197 "movhpd %%xmm0, 0(%[dst2])\n" \
198 "movhpd %%xmm1, 8(%[dst2])\n" \
199 "movq %%xmm2, 16(%[dst1])\n" \
200 "movq %%xmm3, 24(%[dst1])\n" \
201 "movhpd %%xmm2, 16(%[dst2])\n" \
202 "movhpd %%xmm3, 24(%[dst2])\n"
204 #ifdef CAN_COMPILE_SSSE3
207 for (x = 0; x < (width & ~31); x += 32) {
209 "movdqu (%[shuffle]), %%xmm7\n"
211 "pshufb %%xmm7, %%xmm0\n"
212 "pshufb %%xmm7, %%xmm1\n"
213 "pshufb %%xmm7, %%xmm2\n"
214 "pshufb %%xmm7, %%xmm3\n"
216 : : [dst1]"r"(&dstu[x]), [dst2]"r"(&dstv[x]), [src]"r"(&src[2*x]), [shuffle]"r"(shuffle) : "memory", "xmm0", "xmm1", "xmm2", "xmm3", "xmm7");
221 for (x = 0; x < (width & ~31); x += 32) {
223 "movdqu (%[mask]), %%xmm7\n"
225 "movdqa %%xmm0, %%xmm4\n"
226 "movdqa %%xmm1, %%xmm5\n"
227 "movdqa %%xmm2, %%xmm6\n"
230 "pand %%xmm7, %%xmm4\n"
231 "pand %%xmm7, %%xmm5\n"
232 "pand %%xmm7, %%xmm6\n"
233 "packuswb %%xmm4, %%xmm0\n"
234 "packuswb %%xmm5, %%xmm1\n"
235 "pand %%xmm3, %%xmm7\n"
238 "packuswb %%xmm6, %%xmm2\n"
239 "packuswb %%xmm7, %%xmm3\n"
241 : : [dst2]"r"(&dstu[x]), [dst1]"r"(&dstv[x]), [src]"r"(&src[2*x]), [mask]"r"(mask) : "memory", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7");
247 for (; x < width; x++) {
248 dstu[x] = src[2*x+0];
249 dstv[x] = src[2*x+1];
257 static void SSE_CopyPlane(uint8_t *dst, size_t dst_pitch,
258 const uint8_t *src, size_t src_pitch,
259 uint8_t *cache, size_t cache_size,
260 unsigned width, unsigned height, unsigned cpu)
262 const unsigned w16 = (width+15) & ~15;
263 const unsigned hstep = cache_size / w16;
266 for (unsigned y = 0; y < height; y += hstep) {
267 const unsigned hblock = __MIN(hstep, height - y);
269 /* Copy a bunch of line into our cache */
270 CopyFromUswc(cache, w16,
274 /* Copy from our cache to the destination */
275 Copy2d(dst, dst_pitch,
280 src += src_pitch * hblock;
281 dst += dst_pitch * hblock;
283 asm volatile ("mfence");
286 static void SSE_SplitPlanes(uint8_t *dstu, size_t dstu_pitch,
287 uint8_t *dstv, size_t dstv_pitch,
288 const uint8_t *src, size_t src_pitch,
289 uint8_t *cache, size_t cache_size,
290 unsigned width, unsigned height, unsigned cpu)
292 const unsigned w2_16 = (2*width+15) & ~15;
293 const unsigned hstep = cache_size / w2_16;
296 for (unsigned y = 0; y < height; y += hstep) {
297 const unsigned hblock = __MIN(hstep, height - y);
299 /* Copy a bunch of line into our cache */
300 CopyFromUswc(cache, w2_16, src, src_pitch,
301 2*width, hblock, cpu);
303 /* Copy from our cache to the destination */
304 SSE_SplitUV(dstu, dstu_pitch, dstv, dstv_pitch,
305 cache, w2_16, width, hblock, cpu);
308 src += src_pitch * hblock;
309 dstu += dstu_pitch * hblock;
310 dstv += dstv_pitch * hblock;
312 asm volatile ("mfence");
315 static void SSE_CopyFromNv12(picture_t *dst,
316 uint8_t *src[2], size_t src_pitch[2],
317 unsigned width, unsigned height,
318 copy_cache_t *cache, unsigned cpu)
320 SSE_CopyPlane(dst->p[0].p_pixels, dst->p[0].i_pitch,
321 src[0], src_pitch[0],
322 cache->buffer, cache->size,
324 SSE_SplitPlanes(dst->p[2].p_pixels, dst->p[2].i_pitch,
325 dst->p[1].p_pixels, dst->p[1].i_pitch,
326 src[1], src_pitch[1],
327 cache->buffer, cache->size,
328 width/2, height/2, cpu);
329 asm volatile ("emms");
332 static void SSE_CopyFromYv12(picture_t *dst,
333 uint8_t *src[3], size_t src_pitch[3],
334 unsigned width, unsigned height,
335 copy_cache_t *cache, unsigned cpu)
337 for (unsigned n = 0; n < 3; n++) {
338 const unsigned d = n > 0 ? 2 : 1;
339 SSE_CopyPlane(dst->p[n].p_pixels, dst->p[n].i_pitch,
340 src[n], src_pitch[n],
341 cache->buffer, cache->size,
342 width/d, height/d, cpu);
344 asm volatile ("emms");
347 #endif /* CAN_COMPILE_SSE2 */
349 static void CopyPlane(uint8_t *dst, size_t dst_pitch,
350 const uint8_t *src, size_t src_pitch,
351 unsigned width, unsigned height)
353 for (unsigned y = 0; y < height; y++) {
354 memcpy(dst, src, width);
360 static void SplitPlanes(uint8_t *dstu, size_t dstu_pitch,
361 uint8_t *dstv, size_t dstv_pitch,
362 const uint8_t *src, size_t src_pitch,
363 unsigned width, unsigned height)
365 for (unsigned y = 0; y < height; y++) {
366 for (unsigned x = 0; x < width; x++) {
367 dstu[x] = src[2*x+0];
368 dstv[x] = src[2*x+1];
376 void CopyFromNv12(picture_t *dst, uint8_t *src[2], size_t src_pitch[2],
377 unsigned width, unsigned height,
380 #ifdef CAN_COMPILE_SSE2
381 unsigned cpu = vlc_CPU();
383 return SSE_CopyFromNv12(dst, src, src_pitch, width, height,
389 CopyPlane(dst->p[0].p_pixels, dst->p[0].i_pitch,
390 src[0], src_pitch[0],
392 SplitPlanes(dst->p[2].p_pixels, dst->p[2].i_pitch,
393 dst->p[1].p_pixels, dst->p[1].i_pitch,
394 src[1], src_pitch[1],
398 void CopyFromYv12(picture_t *dst, uint8_t *src[3], size_t src_pitch[3],
399 unsigned width, unsigned height,
402 #ifdef CAN_COMPILE_SSE2
403 unsigned cpu = vlc_CPU();
405 return SSE_CopyFromYv12(dst, src, src_pitch, width, height,
411 CopyPlane(dst->p[0].p_pixels, dst->p[0].i_pitch,
412 src[0], src_pitch[0], width, height);
413 CopyPlane(dst->p[1].p_pixels, dst->p[1].i_pitch,
414 src[1], src_pitch[1], width / 2, height / 2);
415 CopyPlane(dst->p[2].p_pixels, dst->p[2].i_pitch,
416 src[2], src_pitch[2], width / 2, height / 2);