3 * rgb2rgb.c, Software RGB to RGB convertor
4 * pluralize by Software PAL8 to RGB convertor
5 * Software YUV to YUV convertor
6 * Software YUV to RGB convertor
7 * Written by Nick Kurshev.
8 * palette stuff & yuv stuff by Michael
11 #include "../config.h"
13 #include "../mmx_defs.h"
16 static const uint64_t mask32 __attribute__((aligned(8))) = 0x00FFFFFF00FFFFFFULL;
17 static const uint64_t mask24l __attribute__((aligned(8))) = 0x0000000000FFFFFFULL;
18 static const uint64_t mask24h __attribute__((aligned(8))) = 0x0000FFFFFF000000ULL;
19 static const uint64_t mask15b __attribute__((aligned(8))) = 0x001F001F001F001FULL; /* 00000000 00011111 xxB */
20 static const uint64_t mask15rg __attribute__((aligned(8))) = 0x7FE07FE07FE07FE0ULL; /* 01111111 11100000 RGx */
21 static const uint64_t mask15s __attribute__((aligned(8))) = 0xFFE0FFE0FFE0FFE0ULL;
22 static const uint64_t red_16mask __attribute__((aligned(8))) = 0x0000f8000000f800ULL;
23 static const uint64_t green_16mask __attribute__((aligned(8)))= 0x000007e0000007e0ULL;
24 static const uint64_t blue_16mask __attribute__((aligned(8))) = 0x0000001f0000001fULL;
25 static const uint64_t red_15mask __attribute__((aligned(8))) = 0x00007c000000f800ULL;
26 static const uint64_t green_15mask __attribute__((aligned(8)))= 0x000003e0000007e0ULL;
27 static const uint64_t blue_15mask __attribute__((aligned(8))) = 0x0000001f0000001fULL;
30 void rgb24to32(const uint8_t *src,uint8_t *dst,unsigned src_size)
33 const uint8_t *s = src;
40 __asm __volatile(PREFETCH" %0"::"m"(*s):"memory");
41 mm_end = (uint8_t*)((((unsigned long)end)/(MMREG_SIZE*4))*(MMREG_SIZE*4));
42 __asm __volatile("movq %0, %%mm7"::"m"(mask32):"memory");
43 if(mm_end == end) mm_end -= MMREG_SIZE*4;
49 "punpckldq 3%1, %%mm0\n\t"
51 "punpckldq 9%1, %%mm1\n\t"
52 "movd 12%1, %%mm2\n\t"
53 "punpckldq 15%1, %%mm2\n\t"
54 "movd 18%1, %%mm3\n\t"
55 "punpckldq 21%1, %%mm3\n\t"
56 "pand %%mm7, %%mm0\n\t"
57 "pand %%mm7, %%mm1\n\t"
58 "pand %%mm7, %%mm2\n\t"
59 "pand %%mm7, %%mm3\n\t"
60 MOVNTQ" %%mm0, %0\n\t"
61 MOVNTQ" %%mm1, 8%0\n\t"
62 MOVNTQ" %%mm2, 16%0\n\t"
70 __asm __volatile(SFENCE:::"memory");
71 __asm __volatile(EMMS:::"memory");
82 void rgb32to24(const uint8_t *src,uint8_t *dst,unsigned src_size)
85 const uint8_t *s = src;
92 __asm __volatile(PREFETCH" %0"::"m"(*s):"memory");
93 mm_end = (uint8_t*)((((unsigned long)end)/(MMREG_SIZE*2))*(MMREG_SIZE*2));
97 ::"m"(mask24l),"m"(mask24h):"memory");
103 "movq 8%1, %%mm1\n\t"
104 "movq %%mm0, %%mm2\n\t"
105 "movq %%mm1, %%mm3\n\t"
106 "psrlq $8, %%mm2\n\t"
107 "psrlq $8, %%mm3\n\t"
108 "pand %%mm7, %%mm0\n\t"
109 "pand %%mm7, %%mm1\n\t"
110 "pand %%mm6, %%mm2\n\t"
111 "pand %%mm6, %%mm3\n\t"
112 "por %%mm2, %%mm0\n\t"
113 "por %%mm3, %%mm1\n\t"
114 MOVNTQ" %%mm0, %0\n\t"
122 __asm __volatile(SFENCE:::"memory");
123 __asm __volatile(EMMS:::"memory");
135 Original by Strepto/Astral
136 ported to gcc & bugfixed : A'rpi
137 MMX2, 3DNOW optimization by Nick Kurshev
138 32bit c version, and and&add trick by Michael Niedermayer
140 void rgb15to16(const uint8_t *src,uint8_t *dst,unsigned src_size)
143 register const char* s=src+src_size;
144 register char* d=dst+src_size;
145 register int offs=-src_size;
146 __asm __volatile(PREFETCH" %0"::"m"(*(s+offs)));
155 "movq 8%1, %%mm2\n\t"
156 "movq %%mm0, %%mm1\n\t"
157 "movq %%mm2, %%mm3\n\t"
158 "pand %%mm4, %%mm0\n\t"
159 "pand %%mm4, %%mm2\n\t"
160 "paddw %%mm1, %%mm0\n\t"
161 "paddw %%mm3, %%mm2\n\t"
162 MOVNTQ" %%mm0, %0\n\t"
169 __asm __volatile(SFENCE:::"memory");
170 __asm __volatile(EMMS:::"memory");
173 const uint16_t *s1=( uint16_t * )src;
174 uint16_t *d1=( uint16_t * )dst;
175 uint16_t *e=((uint8_t *)s1)+src_size;
177 register int x=*( s1++ );
180 0111 1111 1110 0000=0x7FE0
181 00000000000001 1111=0x001F */
182 *( d1++ )=( x&0x001F )|( ( x&0x7FE0 )<<1 );
185 const unsigned *s1=( unsigned * )src;
186 unsigned *d1=( unsigned * )dst;
188 int size= src_size>>2;
189 for(i=0; i<size; i++)
191 register int x= s1[i];
192 // d1[i] = x + (x&0x7FE07FE0); //faster but need msbit =0 which might not allways be true
193 d1[i] = (x&0x7FFF7FFF) + (x&0x7FE07FE0);
201 * Pallete is assumed to contain bgr32
203 void palette8torgb32(const uint8_t *src, uint8_t *dst, unsigned num_pixels, const uint8_t *palette)
206 for(i=0; i<num_pixels; i++)
207 ((unsigned *)dst)[i] = ((unsigned *)palette)[ src[i] ];
211 * Pallete is assumed to contain bgr32
213 void palette8torgb24(const uint8_t *src, uint8_t *dst, unsigned num_pixels, const uint8_t *palette)
217 writes 1 byte o much and might cause alignment issues on some architectures?
218 for(i=0; i<num_pixels; i++)
219 ((unsigned *)(&dst[i*3])) = ((unsigned *)palette)[ src[i] ];
221 for(i=0; i<num_pixels; i++)
224 dst[0]= palette[ src[i]*4+0 ];
225 dst[1]= palette[ src[i]*4+1 ];
226 dst[2]= palette[ src[i]*4+2 ];
231 void rgb32to16(const uint8_t *src, uint8_t *dst, unsigned src_size)
234 const uint8_t *s = src;
235 const uint8_t *end,*mm_end;
236 uint16_t *d = (uint16_t *)dst;
238 mm_end = (uint8_t*)((((unsigned long)end)/(MMREG_SIZE*2))*(MMREG_SIZE*2));
239 __asm __volatile(PREFETCH" %0"::"m"(*src):"memory");
243 ::"m"(red_16mask),"m"(green_16mask));
249 "movd 4%1, %%mm3\n\t"
250 "punpckldq 8%1, %%mm0\n\t"
251 "punpckldq 12%1, %%mm3\n\t"
252 "movq %%mm0, %%mm1\n\t"
253 "movq %%mm0, %%mm2\n\t"
254 "movq %%mm3, %%mm4\n\t"
255 "movq %%mm3, %%mm5\n\t"
256 "psrlq $3, %%mm0\n\t"
257 "psrlq $3, %%mm3\n\t"
260 "psrlq $5, %%mm1\n\t"
261 "psrlq $5, %%mm4\n\t"
262 "pand %%mm6, %%mm1\n\t"
263 "pand %%mm6, %%mm4\n\t"
264 "psrlq $8, %%mm2\n\t"
265 "psrlq $8, %%mm5\n\t"
266 "pand %%mm7, %%mm2\n\t"
267 "pand %%mm7, %%mm5\n\t"
268 "por %%mm1, %%mm0\n\t"
269 "por %%mm4, %%mm3\n\t"
270 "por %%mm2, %%mm0\n\t"
271 "por %%mm5, %%mm3\n\t"
272 "psllq $16, %%mm3\n\t"
273 "por %%mm3, %%mm0\n\t"
274 MOVNTQ" %%mm0, %0\n\t"
275 :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory");
284 *d++ = (b>>3) | ((g&0xFC)<<3) | ((r&0xF8)<<8);
286 __asm __volatile(SFENCE:::"memory");
287 __asm __volatile(EMMS:::"memory");
289 unsigned j,i,num_pixels=src_size/4;
290 uint16_t *d = (uint16_t *)dst;
291 for(i=0,j=0; j<num_pixels; i+=4,j++)
293 const int b= src[i+0];
294 const int g= src[i+1];
295 const int r= src[i+2];
297 d[j]= (b>>3) | ((g&0xFC)<<3) | ((r&0xF8)<<8);
302 void rgb32to15(const uint8_t *src, uint8_t *dst, unsigned src_size)
305 const uint8_t *s = src;
306 const uint8_t *end,*mm_end;
307 uint16_t *d = (uint16_t *)dst;
309 mm_end = (uint8_t*)((((unsigned long)end)/(MMREG_SIZE*2))*(MMREG_SIZE*2));
310 __asm __volatile(PREFETCH" %0"::"m"(*src):"memory");
314 ::"m"(red_15mask),"m"(green_15mask));
320 "movd 4%1, %%mm3\n\t"
321 "punpckldq 8%1, %%mm0\n\t"
322 "punpckldq 12%1, %%mm3\n\t"
323 "movq %%mm0, %%mm1\n\t"
324 "movq %%mm0, %%mm2\n\t"
325 "movq %%mm3, %%mm4\n\t"
326 "movq %%mm3, %%mm5\n\t"
327 "psrlq $3, %%mm0\n\t"
328 "psrlq $3, %%mm3\n\t"
331 "psrlq $6, %%mm1\n\t"
332 "psrlq $6, %%mm4\n\t"
333 "pand %%mm6, %%mm1\n\t"
334 "pand %%mm6, %%mm4\n\t"
335 "psrlq $9, %%mm2\n\t"
336 "psrlq $9, %%mm5\n\t"
337 "pand %%mm7, %%mm2\n\t"
338 "pand %%mm7, %%mm5\n\t"
339 "por %%mm1, %%mm0\n\t"
340 "por %%mm4, %%mm3\n\t"
341 "por %%mm2, %%mm0\n\t"
342 "por %%mm5, %%mm3\n\t"
343 "psllq $16, %%mm3\n\t"
344 "por %%mm3, %%mm0\n\t"
345 MOVNTQ" %%mm0, %0\n\t"
346 :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory");
355 *d++ = (b>>3) | ((g&0xF8)<<2) | ((r&0xF8)<<7);
357 __asm __volatile(SFENCE:::"memory");
358 __asm __volatile(EMMS:::"memory");
360 unsigned j,i,num_pixels=src_size/4;
361 uint16_t *d = (uint16_t *)dst;
362 for(i=0,j=0; j<num_pixels; i+=4,j++)
364 const int b= src[i+0];
365 const int g= src[i+1];
366 const int r= src[i+2];
368 d[j]= (b>>3) | ((g&0xF8)<<2) | ((r&0xF8)<<7);
373 void rgb24to16(const uint8_t *src, uint8_t *dst, unsigned src_size)
376 const uint8_t *s = src;
377 const uint8_t *end,*mm_end;
378 uint16_t *d = (uint16_t *)dst;
380 mm_end = (uint8_t*)((((unsigned long)end)/(MMREG_SIZE*2))*(MMREG_SIZE*2));
381 __asm __volatile(PREFETCH" %0"::"m"(*src):"memory");
385 ::"m"(red_16mask),"m"(green_16mask));
386 if(mm_end == end) mm_end -= MMREG_SIZE*2;
392 "movd 3%1, %%mm3\n\t"
393 "punpckldq 6%1, %%mm0\n\t"
394 "punpckldq 9%1, %%mm3\n\t"
395 "movq %%mm0, %%mm1\n\t"
396 "movq %%mm0, %%mm2\n\t"
397 "movq %%mm3, %%mm4\n\t"
398 "movq %%mm3, %%mm5\n\t"
399 "psrlq $3, %%mm0\n\t"
400 "psrlq $3, %%mm3\n\t"
403 "psrlq $5, %%mm1\n\t"
404 "psrlq $5, %%mm4\n\t"
405 "pand %%mm6, %%mm1\n\t"
406 "pand %%mm6, %%mm4\n\t"
407 "psrlq $8, %%mm2\n\t"
408 "psrlq $8, %%mm5\n\t"
409 "pand %%mm7, %%mm2\n\t"
410 "pand %%mm7, %%mm5\n\t"
411 "por %%mm1, %%mm0\n\t"
412 "por %%mm4, %%mm3\n\t"
413 "por %%mm2, %%mm0\n\t"
414 "por %%mm5, %%mm3\n\t"
415 "psllq $16, %%mm3\n\t"
416 "por %%mm3, %%mm0\n\t"
417 MOVNTQ" %%mm0, %0\n\t"
418 :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory");
427 *d++ = (b>>3) | ((g&0xFC)<<3) | ((r&0xF8)<<8);
429 __asm __volatile(SFENCE:::"memory");
430 __asm __volatile(EMMS:::"memory");
432 unsigned j,i,num_pixels=src_size/3;
433 uint16_t *d = (uint16_t *)dst;
434 for(i=0,j=0; j<num_pixels; i+=3,j++)
436 const int b= src[i+0];
437 const int g= src[i+1];
438 const int r= src[i+2];
440 d[j]= (b>>3) | ((g&0xFC)<<3) | ((r&0xF8)<<8);
445 void rgb24to15(const uint8_t *src, uint8_t *dst, unsigned src_size)
448 const uint8_t *s = src;
449 const uint8_t *end,*mm_end;
450 uint16_t *d = (uint16_t *)dst;
452 mm_end = (uint8_t*)((((unsigned long)end)/(MMREG_SIZE*2))*(MMREG_SIZE*2));
453 __asm __volatile(PREFETCH" %0"::"m"(*src):"memory");
457 ::"m"(red_15mask),"m"(green_15mask));
458 if(mm_end == end) mm_end -= MMREG_SIZE*2;
464 "movd 3%1, %%mm3\n\t"
465 "punpckldq 6%1, %%mm0\n\t"
466 "punpckldq 9%1, %%mm3\n\t"
467 "movq %%mm0, %%mm1\n\t"
468 "movq %%mm0, %%mm2\n\t"
469 "movq %%mm3, %%mm4\n\t"
470 "movq %%mm3, %%mm5\n\t"
471 "psrlq $3, %%mm0\n\t"
472 "psrlq $3, %%mm3\n\t"
475 "psrlq $6, %%mm1\n\t"
476 "psrlq $6, %%mm4\n\t"
477 "pand %%mm6, %%mm1\n\t"
478 "pand %%mm6, %%mm4\n\t"
479 "psrlq $9, %%mm2\n\t"
480 "psrlq $9, %%mm5\n\t"
481 "pand %%mm7, %%mm2\n\t"
482 "pand %%mm7, %%mm5\n\t"
483 "por %%mm1, %%mm0\n\t"
484 "por %%mm4, %%mm3\n\t"
485 "por %%mm2, %%mm0\n\t"
486 "por %%mm5, %%mm3\n\t"
487 "psllq $16, %%mm3\n\t"
488 "por %%mm3, %%mm0\n\t"
489 MOVNTQ" %%mm0, %0\n\t"
490 :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory");
499 *d++ = (b>>3) | ((g&0xF8)<<2) | ((r&0xF8)<<7);
501 __asm __volatile(SFENCE:::"memory");
502 __asm __volatile(EMMS:::"memory");
504 unsigned j,i,num_pixels=src_size/3;
505 uint16_t *d = (uint16_t *)dst;
506 for(i=0,j=0; j<num_pixels; i+=3,j++)
508 const int b= src[i+0];
509 const int g= src[i+1];
510 const int r= src[i+2];
512 d[j]= (b>>3) | ((g&0xF8)<<2) | ((r&0xF8)<<7);
518 * Palette is assumed to contain bgr16, see rgb32to16 to convert the palette
520 void palette8torgb16(const uint8_t *src, uint8_t *dst, unsigned num_pixels, const uint8_t *palette)
523 for(i=0; i<num_pixels; i++)
524 ((uint16_t *)dst)[i] = ((uint16_t *)palette)[ src[i] ];
528 * Pallete is assumed to contain bgr15, see rgb32to15 to convert the palette
530 void palette8torgb15(const uint8_t *src, uint8_t *dst, unsigned num_pixels, const uint8_t *palette)
533 for(i=0; i<num_pixels; i++)
534 ((uint16_t *)dst)[i] = ((uint16_t *)palette)[ src[i] ];
538 * height should be a multiple of 2 and width should be a multiple of 16 (if this is a
539 * problem for anyone then tell me, and ill fix it)
541 void yv12toyuy2(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
542 unsigned int width, unsigned int height,
543 unsigned int lumStride, unsigned int chromStride, unsigned int dstStride)
546 const int chromWidth= width>>1;
547 for(y=0; y<height; y++)
550 //FIXME handle 2 lines a once (fewer prefetch, reuse some chrom, but very likely limited by mem anyway)
552 "xorl %%eax, %%eax \n\t"
554 PREFETCH" 32(%1, %%eax, 2) \n\t"
555 PREFETCH" 32(%2, %%eax) \n\t"
556 PREFETCH" 32(%3, %%eax) \n\t"
557 "movq (%2, %%eax), %%mm0 \n\t" // U(0)
558 "movq %%mm0, %%mm2 \n\t" // U(0)
559 "movq (%3, %%eax), %%mm1 \n\t" // V(0)
560 "punpcklbw %%mm1, %%mm0 \n\t" // UVUV UVUV(0)
561 "punpckhbw %%mm1, %%mm2 \n\t" // UVUV UVUV(8)
563 "movq (%1, %%eax,2), %%mm3 \n\t" // Y(0)
564 "movq 8(%1, %%eax,2), %%mm5 \n\t" // Y(8)
565 "movq %%mm3, %%mm4 \n\t" // Y(0)
566 "movq %%mm5, %%mm6 \n\t" // Y(8)
567 "punpcklbw %%mm0, %%mm3 \n\t" // YUYV YUYV(0)
568 "punpckhbw %%mm0, %%mm4 \n\t" // YUYV YUYV(4)
569 "punpcklbw %%mm2, %%mm5 \n\t" // YUYV YUYV(8)
570 "punpckhbw %%mm2, %%mm6 \n\t" // YUYV YUYV(12)
572 MOVNTQ" %%mm3, (%0, %%eax, 4) \n\t"
573 MOVNTQ" %%mm4, 8(%0, %%eax, 4) \n\t"
574 MOVNTQ" %%mm5, 16(%0, %%eax, 4) \n\t"
575 MOVNTQ" %%mm6, 24(%0, %%eax, 4) \n\t"
577 "addl $8, %%eax \n\t"
578 "cmpl %4, %%eax \n\t"
580 ::"r"(dst), "r"(ysrc), "r"(usrc), "r"(vsrc), "r" (chromWidth)
585 for(i=0; i<chromWidth; i++)
587 dst[4*i+0] = ysrc[2*i+0];
588 dst[4*i+1] = usrc[i];
589 dst[4*i+2] = ysrc[2*i+1];
590 dst[4*i+3] = vsrc[i];
610 * height should be a multiple of 2 and width should be a multiple of 16 (if this is a
611 * problem for anyone then tell me, and ill fix it)
613 void yuy2toyv12(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
614 unsigned int width, unsigned int height,
615 unsigned int lumStride, unsigned int chromStride, unsigned int srcStride)
618 const int chromWidth= width>>1;
619 for(y=0; y<height; y+=2)
623 "xorl %%eax, %%eax \n\t"
624 "pcmpeqw %%mm7, %%mm7 \n\t"
625 "psrlw $8, %%mm7 \n\t" // FF,00,FF,00...
627 PREFETCH" 64(%0, %%eax, 4) \n\t"
628 "movq (%0, %%eax, 4), %%mm0 \n\t" // YUYV YUYV(0)
629 "movq 8(%0, %%eax, 4), %%mm1 \n\t" // YUYV YUYV(4)
630 "movq %%mm0, %%mm2 \n\t" // YUYV YUYV(0)
631 "movq %%mm1, %%mm3 \n\t" // YUYV YUYV(4)
632 "psrlw $8, %%mm0 \n\t" // U0V0 U0V0(0)
633 "psrlw $8, %%mm1 \n\t" // U0V0 U0V0(4)
634 "pand %%mm7, %%mm2 \n\t" // Y0Y0 Y0Y0(0)
635 "pand %%mm7, %%mm3 \n\t" // Y0Y0 Y0Y0(4)
636 "packuswb %%mm1, %%mm0 \n\t" // UVUV UVUV(0)
637 "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(0)
639 MOVNTQ" %%mm2, (%1, %%eax, 2) \n\t"
641 "movq 16(%0, %%eax, 4), %%mm1 \n\t" // YUYV YUYV(8)
642 "movq 24(%0, %%eax, 4), %%mm2 \n\t" // YUYV YUYV(12)
643 "movq %%mm1, %%mm3 \n\t" // YUYV YUYV(8)
644 "movq %%mm2, %%mm4 \n\t" // YUYV YUYV(12)
645 "psrlw $8, %%mm1 \n\t" // U0V0 U0V0(8)
646 "psrlw $8, %%mm2 \n\t" // U0V0 U0V0(12)
647 "pand %%mm7, %%mm3 \n\t" // Y0Y0 Y0Y0(8)
648 "pand %%mm7, %%mm4 \n\t" // Y0Y0 Y0Y0(12)
649 "packuswb %%mm2, %%mm1 \n\t" // UVUV UVUV(8)
650 "packuswb %%mm4, %%mm3 \n\t" // YYYY YYYY(8)
652 MOVNTQ" %%mm3, 8(%1, %%eax, 2) \n\t"
654 "movq %%mm0, %%mm2 \n\t" // UVUV UVUV(0)
655 "movq %%mm1, %%mm3 \n\t" // UVUV UVUV(8)
656 "psrlw $8, %%mm0 \n\t" // V0V0 V0V0(0)
657 "psrlw $8, %%mm1 \n\t" // V0V0 V0V0(8)
658 "pand %%mm7, %%mm2 \n\t" // U0U0 U0U0(0)
659 "pand %%mm7, %%mm3 \n\t" // U0U0 U0U0(8)
660 "packuswb %%mm1, %%mm0 \n\t" // VVVV VVVV(0)
661 "packuswb %%mm3, %%mm2 \n\t" // UUUU UUUU(0)
663 MOVNTQ" %%mm0, (%3, %%eax) \n\t"
664 MOVNTQ" %%mm2, (%2, %%eax) \n\t"
666 "addl $8, %%eax \n\t"
667 "cmpl %4, %%eax \n\t"
669 ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "r" (chromWidth)
674 "xorl %%eax, %%eax \n\t"
676 PREFETCH" 64(%0, %%eax, 4) \n\t"
677 "movq (%0, %%eax, 4), %%mm0 \n\t" // YUYV YUYV(0)
678 "movq 8(%0, %%eax, 4), %%mm1 \n\t" // YUYV YUYV(4)
679 "movq 16(%0, %%eax, 4), %%mm2 \n\t" // YUYV YUYV(8)
680 "movq 24(%0, %%eax, 4), %%mm3 \n\t" // YUYV YUYV(12)
681 "pand %%mm7, %%mm0 \n\t" // Y0Y0 Y0Y0(0)
682 "pand %%mm7, %%mm1 \n\t" // Y0Y0 Y0Y0(4)
683 "pand %%mm7, %%mm2 \n\t" // Y0Y0 Y0Y0(8)
684 "pand %%mm7, %%mm3 \n\t" // Y0Y0 Y0Y0(12)
685 "packuswb %%mm1, %%mm0 \n\t" // YYYY YYYY(0)
686 "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(8)
688 MOVNTQ" %%mm0, (%1, %%eax, 2) \n\t"
689 MOVNTQ" %%mm2, 8(%1, %%eax, 2) \n\t"
691 "addl $8, %%eax \n\t"
692 "cmpl %4, %%eax \n\t"
695 ::"r"(src+srcStride), "r"(ydst+lumStride), "r"(udst), "r"(vdst), "r" (chromWidth)
700 for(i=0; i<chromWidth; i++)
702 ydst[2*i+0] = src[4*i+0];
703 udst[i] = src[4*i+1];
704 ydst[2*i+1] = src[4*i+2];
705 vdst[i] = src[4*i+3];
710 for(i=0; i<chromWidth; i++)
712 ydst[2*i+0] = src[4*i+0];
713 ydst[2*i+1] = src[4*i+2];