2 * yuv2rgb.c, Software YUV to RGB coverter
4 * Copyright (C) 1999, Aaron Holtzman <aholtzma@ess.engr.uvic.ca>
7 * Functions broken out from display_x11.c and several new modes
8 * added by HÃ¥kan Hjort <d95hjort@dtek.chalmers.se>
10 * 15 & 16 bpp support by Franck Sicard <Franck.Sicard@solsoft.fr>
12 * This file is part of mpeg2dec, a free MPEG-2 video decoder
14 * mpeg2dec is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
19 * mpeg2dec is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with GNU Make; see the file COPYING. If not, write to
26 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
28 * MMX/MMX2 Template stuff from Michael Niedermayer (michaelni@gmx.at) (needed for fast movntq support)
29 * 1,4,8bpp support by Michael Niedermayer (michaelni@gmx.at)
37 //#include "video_out.h"
39 #include "../cpudetect.h"
40 #include "../mangle.h"
41 #include "../mp_msg.h"
44 #include "yuv2rgb_mlib.c"
47 #define DITHER1XBPP // only for mmx
50 #define CAN_COMPILE_X86_ASM
53 const uint8_t __attribute__((aligned(8))) dither_2x2_4[2][8]={
54 { 1, 3, 1, 3, 1, 3, 1, 3, },
55 { 2, 0, 2, 0, 2, 0, 2, 0, },
58 const uint8_t __attribute__((aligned(8))) dither_2x2_8[2][8]={
59 { 6, 2, 6, 2, 6, 2, 6, 2, },
60 { 0, 4, 0, 4, 0, 4, 0, 4, },
63 const uint8_t __attribute__((aligned(8))) dither_8x8_32[8][8]={
64 { 17, 9, 23, 15, 16, 8, 22, 14, },
65 { 5, 29, 3, 27, 4, 28, 2, 26, },
66 { 21, 13, 19, 11, 20, 12, 18, 10, },
67 { 0, 24, 6, 30, 1, 25, 7, 31, },
68 { 16, 8, 22, 14, 17, 9, 23, 15, },
69 { 4, 28, 2, 26, 5, 29, 3, 27, },
70 { 20, 12, 18, 10, 21, 13, 19, 11, },
71 { 1, 25, 7, 31, 0, 24, 6, 30, },
75 const uint8_t __attribute__((aligned(8))) dither_8x8_64[8][8]={
76 { 0, 48, 12, 60, 3, 51, 15, 63, },
77 { 32, 16, 44, 28, 35, 19, 47, 31, },
78 { 8, 56, 4, 52, 11, 59, 7, 55, },
79 { 40, 24, 36, 20, 43, 27, 39, 23, },
80 { 2, 50, 14, 62, 1, 49, 13, 61, },
81 { 34, 18, 46, 30, 33, 17, 45, 29, },
82 { 10, 58, 6, 54, 9, 57, 5, 53, },
83 { 42, 26, 38, 22, 41, 25, 37, 21, },
87 const uint8_t __attribute__((aligned(8))) dither_8x8_73[8][8]={
88 { 0, 55, 14, 68, 3, 58, 17, 72, },
89 { 37, 18, 50, 32, 40, 22, 54, 35, },
90 { 9, 64, 5, 59, 13, 67, 8, 63, },
91 { 46, 27, 41, 23, 49, 31, 44, 26, },
92 { 2, 57, 16, 71, 1, 56, 15, 70, },
93 { 39, 21, 52, 34, 38, 19, 51, 33, },
94 { 11, 66, 7, 62, 10, 65, 6, 60, },
95 { 48, 30, 43, 25, 47, 29, 42, 24, },
99 const uint8_t __attribute__((aligned(8))) dither_8x8_128[8][8]={
100 { 68, 36, 92, 60, 66, 34, 90, 58, },
101 { 20, 116, 12, 108, 18, 114, 10, 106, },
102 { 84, 52, 76, 44, 82, 50, 74, 42, },
103 { 0, 96, 24, 120, 6, 102, 30, 126, },
104 { 64, 32, 88, 56, 70, 38, 94, 62, },
105 { 16, 112, 8, 104, 22, 118, 14, 110, },
106 { 80, 48, 72, 40, 86, 54, 78, 46, },
107 { 4, 100, 28, 124, 2, 98, 26, 122, },
112 const uint8_t __attribute__((aligned(8))) dither_8x8_220[8][8]={
113 {117, 62, 158, 103, 113, 58, 155, 100, },
114 { 34, 199, 21, 186, 31, 196, 17, 182, },
115 {144, 89, 131, 76, 141, 86, 127, 72, },
116 { 0, 165, 41, 206, 10, 175, 52, 217, },
117 {110, 55, 151, 96, 120, 65, 162, 107, },
118 { 28, 193, 14, 179, 38, 203, 24, 189, },
119 {138, 83, 124, 69, 148, 93, 134, 79, },
120 { 7, 172, 48, 213, 3, 168, 45, 210, },
123 // tries to correct a gamma of 1.5
124 const uint8_t __attribute__((aligned(8))) dither_8x8_220[8][8]={
125 { 0, 143, 18, 200, 2, 156, 25, 215, },
126 { 78, 28, 125, 64, 89, 36, 138, 74, },
127 { 10, 180, 3, 161, 16, 195, 8, 175, },
128 {109, 51, 93, 38, 121, 60, 105, 47, },
129 { 1, 152, 23, 210, 0, 147, 20, 205, },
130 { 85, 33, 134, 71, 81, 30, 130, 67, },
131 { 14, 190, 6, 171, 12, 185, 5, 166, },
132 {117, 57, 101, 44, 113, 54, 97, 41, },
135 // tries to correct a gamma of 2.0
136 const uint8_t __attribute__((aligned(8))) dither_8x8_220[8][8]={
137 { 0, 124, 8, 193, 0, 140, 12, 213, },
138 { 55, 14, 104, 42, 66, 19, 119, 52, },
139 { 3, 168, 1, 145, 6, 187, 3, 162, },
140 { 86, 31, 70, 21, 99, 39, 82, 28, },
141 { 0, 134, 11, 206, 0, 129, 9, 200, },
142 { 62, 17, 114, 48, 58, 16, 109, 45, },
143 { 5, 181, 2, 157, 4, 175, 1, 151, },
144 { 95, 36, 78, 26, 90, 34, 74, 24, },
147 // tries to correct a gamma of 2.5
148 const uint8_t __attribute__((aligned(8))) dither_8x8_220[8][8]={
149 { 0, 107, 3, 187, 0, 125, 6, 212, },
150 { 39, 7, 86, 28, 49, 11, 102, 36, },
151 { 1, 158, 0, 131, 3, 180, 1, 151, },
152 { 68, 19, 52, 12, 81, 25, 64, 17, },
153 { 0, 119, 5, 203, 0, 113, 4, 195, },
154 { 45, 9, 96, 33, 42, 8, 91, 30, },
155 { 2, 172, 1, 144, 2, 165, 0, 137, },
156 { 77, 23, 60, 15, 72, 21, 56, 14, },
160 #ifdef CAN_COMPILE_X86_ASM
162 /* hope these constant values are cache line aligned */
163 uint64_t __attribute__((aligned(8))) mmx_80w = 0x0080008000800080;
164 uint64_t __attribute__((aligned(8))) mmx_10w = 0x1010101010101010;
165 uint64_t __attribute__((aligned(8))) mmx_00ffw = 0x00ff00ff00ff00ff;
166 uint64_t __attribute__((aligned(8))) mmx_Y_coeff = 0x253f253f253f253f;
168 /* hope these constant values are cache line aligned */
169 uint64_t __attribute__((aligned(8))) mmx_U_green = 0xf37df37df37df37d;
170 uint64_t __attribute__((aligned(8))) mmx_U_blue = 0x4093409340934093;
171 uint64_t __attribute__((aligned(8))) mmx_V_red = 0x3312331233123312;
172 uint64_t __attribute__((aligned(8))) mmx_V_green = 0xe5fce5fce5fce5fc;
174 /* hope these constant values are cache line aligned */
175 uint64_t __attribute__((aligned(8))) mmx_redmask = 0xf8f8f8f8f8f8f8f8;
176 uint64_t __attribute__((aligned(8))) mmx_grnmask = 0xfcfcfcfcfcfcfcfc;
178 uint64_t __attribute__((aligned(8))) M24A= 0x00FF0000FF0000FFLL;
179 uint64_t __attribute__((aligned(8))) M24B= 0xFF0000FF0000FF00LL;
180 uint64_t __attribute__((aligned(8))) M24C= 0x0000FF0000FF0000LL;
182 // the volatile is required because gcc otherwise optimizes some writes away not knowing that these
183 // are read in the asm block
184 volatile uint64_t __attribute__((aligned(8))) b5Dither;
185 volatile uint64_t __attribute__((aligned(8))) g5Dither;
186 volatile uint64_t __attribute__((aligned(8))) g6Dither;
187 volatile uint64_t __attribute__((aligned(8))) r5Dither;
189 uint64_t __attribute__((aligned(8))) dither4[2]={
190 0x0103010301030103LL,
191 0x0200020002000200LL,};
193 uint64_t __attribute__((aligned(8))) dither8[2]={
194 0x0602060206020602LL,
195 0x0004000400040004LL,};
206 #define RENAME(a) a ## _MMX
207 #include "yuv2rgb_template.c"
215 #define RENAME(a) a ## _MMX2
216 #include "yuv2rgb_template.c"
218 #endif // CAN_COMPILE_X86_ASM
220 uint32_t matrix_coefficients = 6;
222 const int32_t Inverse_Table_6_9[8][4] = {
223 {117504, 138453, 13954, 34903}, /* no sequence_display_extension */
224 {117504, 138453, 13954, 34903}, /* ITU-R Rec. 709 (1990) */
225 {104597, 132201, 25675, 53279}, /* unspecified */
226 {104597, 132201, 25675, 53279}, /* reserved */
227 {104448, 132798, 24759, 53109}, /* FCC */
228 {104597, 132201, 25675, 53279}, /* ITU-R Rec. 624-4 System B, G */
229 {104597, 132201, 25675, 53279}, /* SMPTE 170M */
230 {117579, 136230, 16907, 35559} /* SMPTE 240M (1987) */
233 void *yuv2rgb_c_init (unsigned bpp, int mode, void *table_rV[256], void *table_gU[256], int table_gV[256], void *table_bU[256]);
235 yuv2rgb_fun yuv2rgb= NULL;
237 static void (* yuv2rgb_c_internal) (uint8_t *, uint8_t *,
238 uint8_t *, uint8_t *,
239 void *, void *, int, int);
241 static void yuv2rgb_c (void * dst, uint8_t * py,
242 uint8_t * pu, uint8_t * pv,
243 unsigned h_size, unsigned v_size,
244 unsigned rgb_stride, unsigned y_stride, unsigned uv_stride)
249 yuv2rgb_c_internal (py, py + y_stride, pu, pv, dst, dst + rgb_stride,
255 dst += 2 * rgb_stride;
259 void * table_rV[256];
260 void * table_gU[256];
262 void * table_bU[256];
264 void yuv2rgb_init (unsigned bpp, int mode)
267 #ifdef CAN_COMPILE_X86_ASM
270 if (yuv2rgb == NULL /*&& (config.flags & VO_MMX_ENABLE)*/) {
271 yuv2rgb = yuv2rgb_init_MMX2 (bpp, mode);
273 mp_msg(MSGT_SWS,MSGL_INFO,"Using MMX2 for colorspace transform\n");
275 mp_msg(MSGT_SWS,MSGL_WARN,"Cannot init MMX2 colorspace transform\n");
278 else if(gCpuCaps.hasMMX)
280 if (yuv2rgb == NULL /*&& (config.flags & VO_MMX_ENABLE)*/) {
281 yuv2rgb = yuv2rgb_init_MMX (bpp, mode);
283 mp_msg(MSGT_SWS,MSGL_INFO,"Using MMX for colorspace transform\n");
285 mp_msg(MSGT_SWS,MSGL_WARN,"Cannot init MMX colorspace transform\n");
290 if (yuv2rgb == NULL /*&& (config.flags & VO_MLIB_ENABLE)*/) {
291 yuv2rgb = yuv2rgb_init_mlib (bpp, mode);
293 mp_msg(MSGT_SWS,MSGL_INFO,"Using mlib for colorspace transform\n");
296 if (yuv2rgb == NULL) {
297 mp_msg(MSGT_SWS,MSGL_INFO,"No accelerated colorspace conversion found\n");
298 yuv2rgb_c_init (bpp, mode, table_rV, table_gU, table_gV, table_bU);
299 yuv2rgb = (yuv2rgb_fun)yuv2rgb_c;
307 g = table_gU[U] + table_gV[V]; \
312 dst_1[2*i] = r[Y] + g[Y] + b[Y]; \
314 dst_1[2*i+1] = r[Y] + g[Y] + b[Y];
318 dst_2[2*i] = r[Y] + g[Y] + b[Y]; \
320 dst_2[2*i+1] = r[Y] + g[Y] + b[Y];
324 dst_1[6*i] = r[Y]; dst_1[6*i+1] = g[Y]; dst_1[6*i+2] = b[Y]; \
326 dst_1[6*i+3] = r[Y]; dst_1[6*i+4] = g[Y]; dst_1[6*i+5] = b[Y];
330 dst_2[6*i] = r[Y]; dst_2[6*i+1] = g[Y]; dst_2[6*i+2] = b[Y]; \
332 dst_2[6*i+3] = r[Y]; dst_2[6*i+4] = g[Y]; dst_2[6*i+5] = b[Y];
336 dst_1[6*i] = b[Y]; dst_1[6*i+1] = g[Y]; dst_1[6*i+2] = r[Y]; \
338 dst_1[6*i+3] = b[Y]; dst_1[6*i+4] = g[Y]; dst_1[6*i+5] = r[Y];
342 dst_2[6*i] = b[Y]; dst_2[6*i+1] = g[Y]; dst_2[6*i+2] = r[Y]; \
344 dst_2[6*i+3] = b[Y]; dst_2[6*i+4] = g[Y]; dst_2[6*i+5] = r[Y];
346 static void yuv2rgb_c_32 (uint8_t * py_1, uint8_t * py_2,
347 uint8_t * pu, uint8_t * pv,
348 void * _dst_1, void * _dst_2, int h_size, int v_pos)
351 uint32_t * r, * g, * b;
352 uint32_t * dst_1, * dst_2;
384 // This is very near from the yuv2rgb_c_32 code
385 static void yuv2rgb_c_24_rgb (uint8_t * py_1, uint8_t * py_2,
386 uint8_t * pu, uint8_t * pv,
387 void * _dst_1, void * _dst_2, int h_size, int v_pos)
390 uint8_t * r, * g, * b;
391 uint8_t * dst_1, * dst_2;
423 // only trivial mods from yuv2rgb_c_24_rgb
424 static void yuv2rgb_c_24_bgr (uint8_t * py_1, uint8_t * py_2,
425 uint8_t * pu, uint8_t * pv,
426 void * _dst_1, void * _dst_2, int h_size, int v_pos)
429 uint8_t * r, * g, * b;
430 uint8_t * dst_1, * dst_2;
462 // This is exactly the same code as yuv2rgb_c_32 except for the types of
463 // r, g, b, dst_1, dst_2
464 static void yuv2rgb_c_16 (uint8_t * py_1, uint8_t * py_2,
465 uint8_t * pu, uint8_t * pv,
466 void * _dst_1, void * _dst_2, int h_size, int v_pos)
469 uint16_t * r, * g, * b;
470 uint16_t * dst_1, * dst_2;
502 // This is exactly the same code as yuv2rgb_c_32 except for the types of
503 // r, g, b, dst_1, dst_2
504 static void yuv2rgb_c_8 (uint8_t * py_1, uint8_t * py_2,
505 uint8_t * pu, uint8_t * pv,
506 void * _dst_1, void * _dst_2, int h_size, int v_pos)
509 uint8_t * r, * g, * b;
510 uint8_t * dst_1, * dst_2;
542 // r, g, b, dst_1, dst_2
543 static void yuv2rgb_c_8_ordered_dither (uint8_t * py_1, uint8_t * py_2,
544 uint8_t * pu, uint8_t * pv,
545 void * _dst_1, void * _dst_2, int h_size, int v_pos)
548 uint8_t * r, * g, * b;
549 uint8_t * dst_1, * dst_2;
556 const uint8_t *d32= dither_8x8_32[v_pos&7];
557 const uint8_t *d64= dither_8x8_73[v_pos&7];
558 #define DST1bpp8(i,o) \
560 dst_1[2*i] = r[Y+d32[0+o]] + g[Y+d32[0+o]] + b[Y+d64[0+o]]; \
562 dst_1[2*i+1] = r[Y+d32[1+o]] + g[Y+d32[1+o]] + b[Y+d64[1+o]];
564 #define DST2bpp8(i,o) \
566 dst_2[2*i] = r[Y+d32[8+o]] + g[Y+d32[8+o]] + b[Y+d64[8+o]]; \
568 dst_2[2*i+1] = r[Y+d32[9+o]] + g[Y+d32[9+o]] + b[Y+d64[9+o]];
597 // This is exactly the same code as yuv2rgb_c_32 except for the types of
598 // r, g, b, dst_1, dst_2
599 static void yuv2rgb_c_4 (uint8_t * py_1, uint8_t * py_2,
600 uint8_t * pu, uint8_t * pv,
601 void * _dst_1, void * _dst_2, int h_size, int v_pos)
604 uint8_t * r, * g, * b;
605 uint8_t * dst_1, * dst_2;
615 acc = r[Y] + g[Y] + b[Y]; \
617 acc |= (r[Y] + g[Y] + b[Y])<<4;\
622 acc = r[Y] + g[Y] + b[Y]; \
624 acc |= (r[Y] + g[Y] + b[Y])<<4;\
652 static void yuv2rgb_c_4_ordered_dither (uint8_t * py_1, uint8_t * py_2,
653 uint8_t * pu, uint8_t * pv,
654 void * _dst_1, void * _dst_2, int h_size, int v_pos)
657 uint8_t * r, * g, * b;
658 uint8_t * dst_1, * dst_2;
665 const uint8_t *d64= dither_8x8_73[v_pos&7];
666 const uint8_t *d128=dither_8x8_220[v_pos&7];
669 #define DST1bpp4(i,o) \
671 acc = r[Y+d128[0+o]] + g[Y+d64[0+o]] + b[Y+d128[0+o]]; \
673 acc |= (r[Y+d128[1+o]] + g[Y+d64[1+o]] + b[Y+d128[1+o]])<<4;\
676 #define DST2bpp4(i,o) \
678 acc = r[Y+d128[8+o]] + g[Y+d64[8+o]] + b[Y+d128[8+o]]; \
680 acc |= (r[Y+d128[9+o]] + g[Y+d64[9+o]] + b[Y+d128[9+o]])<<4;\
709 // This is exactly the same code as yuv2rgb_c_32 except for the types of
710 // r, g, b, dst_1, dst_2
711 static void yuv2rgb_c_4b (uint8_t * py_1, uint8_t * py_2,
712 uint8_t * pu, uint8_t * pv,
713 void * _dst_1, void * _dst_2, int h_size, int v_pos)
716 uint8_t * r, * g, * b;
717 uint8_t * dst_1, * dst_2;
749 static void yuv2rgb_c_4b_ordered_dither (uint8_t * py_1, uint8_t * py_2,
750 uint8_t * pu, uint8_t * pv,
751 void * _dst_1, void * _dst_2, int h_size, int v_pos)
754 uint8_t * r, * g, * b;
755 uint8_t * dst_1, * dst_2;
762 const uint8_t *d64= dither_8x8_73[v_pos&7];
763 const uint8_t *d128=dither_8x8_220[v_pos&7];
765 #define DST1bpp4b(i,o) \
767 dst_1[2*i] = r[Y+d128[0+o]] + g[Y+d64[0+o]] + b[Y+d128[0+o]]; \
769 dst_1[2*i+1] = r[Y+d128[1+o]] + g[Y+d64[1+o]] + b[Y+d128[1+o]];
771 #define DST2bpp4b(i,o) \
773 dst_2[2*i] = r[Y+d128[8+o]] + g[Y+d64[8+o]] + b[Y+d128[8+o]]; \
775 dst_2[2*i+1] = r[Y+d128[9+o]] + g[Y+d64[9+o]] + b[Y+d128[9+o]];
803 static void yuv2rgb_c_1_ordered_dither (uint8_t * py_1, uint8_t * py_2,
804 uint8_t * pu, uint8_t * pv,
805 void * _dst_1, void * _dst_2, int h_size, int v_pos)
809 uint8_t * dst_1, * dst_2;
814 g= table_gU[128] + table_gV[128];
817 const uint8_t *d128=dither_8x8_220[v_pos&7];
818 char out_1=0, out_2=0;
820 #define DST1bpp1(i,o) \
822 out_1+= out_1 + g[Y+d128[0+o]]; \
824 out_1+= out_1 + g[Y+d128[1+o]];
826 #define DST2bpp1(i,o) \
828 out_2+= out_2 + g[Y+d128[8+o]]; \
830 out_2+= out_2 + g[Y+d128[9+o]];
857 static int div_round (int dividend, int divisor)
860 return (dividend + (divisor>>1)) / divisor;
862 return -((-dividend + (divisor>>1)) / divisor);
865 void *yuv2rgb_c_init (unsigned bpp, int mode, void *table_rV[256], void *table_gU[256], int table_gV[256], void *table_bU[256])
868 uint8_t table_Y[1024];
869 uint32_t *table_32 = 0;
870 uint16_t *table_16 = 0;
871 uint8_t *table_8 = 0;
872 uint8_t *table_332 = 0;
873 uint8_t *table_121 = 0;
874 uint8_t *table_1 = 0;
876 void *table_r = 0, *table_g = 0, *table_b = 0;
879 int crv = Inverse_Table_6_9[matrix_coefficients][0];
880 int cbu = Inverse_Table_6_9[matrix_coefficients][1];
881 int cgu = -Inverse_Table_6_9[matrix_coefficients][2];
882 int cgv = -Inverse_Table_6_9[matrix_coefficients][3];
884 for (i = 0; i < 1024; i++) {
887 j = (76309 * (i - 384 - 16) + 32768) >> 16;
888 j = (j < 0) ? 0 : ((j > 255) ? 255 : j);
894 yuv2rgb_c_internal = yuv2rgb_c_32;
896 table_start= table_32 = malloc ((197 + 2*682 + 256 + 132) * sizeof (uint32_t));
898 entry_size = sizeof (uint32_t);
899 table_r = table_32 + 197;
900 table_b = table_32 + 197 + 685;
901 table_g = table_32 + 197 + 2*682;
903 for (i = -197; i < 256+197; i++)
904 ((uint32_t *)table_r)[i] = table_Y[i+384] << ((mode==MODE_RGB) ? 16 : 0);
905 for (i = -132; i < 256+132; i++)
906 ((uint32_t *)table_g)[i] = table_Y[i+384] << 8;
907 for (i = -232; i < 256+232; i++)
908 ((uint32_t *)table_b)[i] = table_Y[i+384] << ((mode==MODE_RGB) ? 0 : 16);
912 // yuv2rgb_c_internal = (mode==MODE_RGB) ? yuv2rgb_c_24_rgb : yuv2rgb_c_24_bgr;
913 yuv2rgb_c_internal = (mode!=MODE_RGB) ? yuv2rgb_c_24_rgb : yuv2rgb_c_24_bgr;
915 table_start= table_8 = malloc ((256 + 2*232) * sizeof (uint8_t));
917 entry_size = sizeof (uint8_t);
918 table_r = table_g = table_b = table_8 + 232;
920 for (i = -232; i < 256+232; i++)
921 ((uint8_t * )table_b)[i] = table_Y[i+384];
926 yuv2rgb_c_internal = yuv2rgb_c_16;
928 table_start= table_16 = malloc ((197 + 2*682 + 256 + 132) * sizeof (uint16_t));
930 entry_size = sizeof (uint16_t);
931 table_r = table_16 + 197;
932 table_b = table_16 + 197 + 685;
933 table_g = table_16 + 197 + 2*682;
935 for (i = -197; i < 256+197; i++) {
936 int j = table_Y[i+384] >> 3;
938 if (mode == MODE_RGB)
939 j <<= ((bpp==16) ? 11 : 10);
941 ((uint16_t *)table_r)[i] = j;
943 for (i = -132; i < 256+132; i++) {
944 int j = table_Y[i+384] >> ((bpp==16) ? 2 : 3);
946 ((uint16_t *)table_g)[i] = j << 5;
948 for (i = -232; i < 256+232; i++) {
949 int j = table_Y[i+384] >> 3;
951 if (mode == MODE_BGR)
952 j <<= ((bpp==16) ? 11 : 10);
954 ((uint16_t *)table_b)[i] = j;
959 yuv2rgb_c_internal = yuv2rgb_c_8_ordered_dither; //yuv2rgb_c_8;
961 table_start= table_332 = malloc ((197 + 2*682 + 256 + 132) * sizeof (uint8_t));
963 entry_size = sizeof (uint8_t);
964 table_r = table_332 + 197;
965 table_b = table_332 + 197 + 685;
966 table_g = table_332 + 197 + 2*682;
968 for (i = -197; i < 256+197; i++) {
969 int j = (table_Y[i+384 - 16] + 18)/36;
971 if (mode == MODE_RGB)
974 ((uint8_t *)table_r)[i] = j;
976 for (i = -132; i < 256+132; i++) {
977 int j = (table_Y[i+384 - 16] + 18)/36;
979 if (mode == MODE_BGR)
982 ((uint8_t *)table_g)[i] = j << 2;
984 for (i = -232; i < 256+232; i++) {
985 int j = (table_Y[i+384 - 37] + 43)/85;
987 if (mode == MODE_BGR)
990 ((uint8_t *)table_b)[i] = j;
996 yuv2rgb_c_internal = yuv2rgb_c_4_ordered_dither; //yuv2rgb_c_4;
998 yuv2rgb_c_internal = yuv2rgb_c_4b_ordered_dither; //yuv2rgb_c_4;
1000 table_start= table_121 = malloc ((197 + 2*682 + 256 + 132) * sizeof (uint8_t));
1002 entry_size = sizeof (uint8_t);
1003 table_r = table_121 + 197;
1004 table_b = table_121 + 197 + 685;
1005 table_g = table_121 + 197 + 2*682;
1007 for (i = -197; i < 256+197; i++) {
1008 int j = table_Y[i+384 - 110] >> 7;
1010 if (mode == MODE_RGB)
1013 ((uint8_t *)table_r)[i] = j;
1015 for (i = -132; i < 256+132; i++) {
1016 int j = (table_Y[i+384 - 37]+ 43)/85;
1018 ((uint8_t *)table_g)[i] = j << 1;
1020 for (i = -232; i < 256+232; i++) {
1021 int j =table_Y[i+384 - 110] >> 7;
1023 if (mode == MODE_BGR)
1026 ((uint8_t *)table_b)[i] = j;
1031 yuv2rgb_c_internal = yuv2rgb_c_1_ordered_dither;
1033 table_start= table_1 = malloc (256*2 * sizeof (uint8_t));
1035 entry_size = sizeof (uint8_t);
1037 table_r = table_b = NULL;
1039 for (i = 0; i < 256+256; i++) {
1040 int j = table_Y[i + 384 - 110]>>7;
1042 ((uint8_t *)table_g)[i] = j;
1048 mp_msg(MSGT_SWS,MSGL_ERR,"%ibpp not supported by yuv2rgb\n", bpp);
1052 for (i = 0; i < 256; i++) {
1053 table_rV[i] = table_r + entry_size * div_round (crv * (i-128), 76309);
1054 table_gU[i] = table_g + entry_size * div_round (cgu * (i-128), 76309);
1055 table_gV[i] = entry_size * div_round (cgv * (i-128), 76309);
1056 table_bU[i] = table_b + entry_size * div_round (cbu * (i-128), 76309);