2 * Copyright (C) 2001-2012 Michael Niedermayer <michaelni@gmx.at>
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 #include "libavutil/attributes.h"
27 #include "libavutil/avutil.h"
28 #include "libavutil/avassert.h"
29 #include "libavutil/bswap.h"
30 #include "libavutil/cpu.h"
31 #include "libavutil/intreadwrite.h"
32 #include "libavutil/mathematics.h"
33 #include "libavutil/pixdesc.h"
37 #include "swscale_internal.h"
39 DECLARE_ALIGNED(8, const uint8_t, ff_dither_2x2_4)[][8] = {
40 { 1, 3, 1, 3, 1, 3, 1, 3, },
41 { 2, 0, 2, 0, 2, 0, 2, 0, },
42 { 1, 3, 1, 3, 1, 3, 1, 3, },
45 DECLARE_ALIGNED(8, const uint8_t, ff_dither_2x2_8)[][8] = {
46 { 6, 2, 6, 2, 6, 2, 6, 2, },
47 { 0, 4, 0, 4, 0, 4, 0, 4, },
48 { 6, 2, 6, 2, 6, 2, 6, 2, },
51 DECLARE_ALIGNED(8, const uint8_t, ff_dither_4x4_16)[][8] = {
52 { 8, 4, 11, 7, 8, 4, 11, 7, },
53 { 2, 14, 1, 13, 2, 14, 1, 13, },
54 { 10, 6, 9, 5, 10, 6, 9, 5, },
55 { 0, 12, 3, 15, 0, 12, 3, 15, },
56 { 8, 4, 11, 7, 8, 4, 11, 7, },
59 DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_32)[][8] = {
60 { 17, 9, 23, 15, 16, 8, 22, 14, },
61 { 5, 29, 3, 27, 4, 28, 2, 26, },
62 { 21, 13, 19, 11, 20, 12, 18, 10, },
63 { 0, 24, 6, 30, 1, 25, 7, 31, },
64 { 16, 8, 22, 14, 17, 9, 23, 15, },
65 { 4, 28, 2, 26, 5, 29, 3, 27, },
66 { 20, 12, 18, 10, 21, 13, 19, 11, },
67 { 1, 25, 7, 31, 0, 24, 6, 30, },
68 { 17, 9, 23, 15, 16, 8, 22, 14, },
71 DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_73)[][8] = {
72 { 0, 55, 14, 68, 3, 58, 17, 72, },
73 { 37, 18, 50, 32, 40, 22, 54, 35, },
74 { 9, 64, 5, 59, 13, 67, 8, 63, },
75 { 46, 27, 41, 23, 49, 31, 44, 26, },
76 { 2, 57, 16, 71, 1, 56, 15, 70, },
77 { 39, 21, 52, 34, 38, 19, 51, 33, },
78 { 11, 66, 7, 62, 10, 65, 6, 60, },
79 { 48, 30, 43, 25, 47, 29, 42, 24, },
80 { 0, 55, 14, 68, 3, 58, 17, 72, },
84 DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_220)[][8] = {
85 {117, 62, 158, 103, 113, 58, 155, 100, },
86 { 34, 199, 21, 186, 31, 196, 17, 182, },
87 {144, 89, 131, 76, 141, 86, 127, 72, },
88 { 0, 165, 41, 206, 10, 175, 52, 217, },
89 {110, 55, 151, 96, 120, 65, 162, 107, },
90 { 28, 193, 14, 179, 38, 203, 24, 189, },
91 {138, 83, 124, 69, 148, 93, 134, 79, },
92 { 7, 172, 48, 213, 3, 168, 45, 210, },
93 {117, 62, 158, 103, 113, 58, 155, 100, },
96 // tries to correct a gamma of 1.5
97 DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_220)[][8] = {
98 { 0, 143, 18, 200, 2, 156, 25, 215, },
99 { 78, 28, 125, 64, 89, 36, 138, 74, },
100 { 10, 180, 3, 161, 16, 195, 8, 175, },
101 {109, 51, 93, 38, 121, 60, 105, 47, },
102 { 1, 152, 23, 210, 0, 147, 20, 205, },
103 { 85, 33, 134, 71, 81, 30, 130, 67, },
104 { 14, 190, 6, 171, 12, 185, 5, 166, },
105 {117, 57, 101, 44, 113, 54, 97, 41, },
106 { 0, 143, 18, 200, 2, 156, 25, 215, },
109 // tries to correct a gamma of 2.0
110 DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_220)[][8] = {
111 { 0, 124, 8, 193, 0, 140, 12, 213, },
112 { 55, 14, 104, 42, 66, 19, 119, 52, },
113 { 3, 168, 1, 145, 6, 187, 3, 162, },
114 { 86, 31, 70, 21, 99, 39, 82, 28, },
115 { 0, 134, 11, 206, 0, 129, 9, 200, },
116 { 62, 17, 114, 48, 58, 16, 109, 45, },
117 { 5, 181, 2, 157, 4, 175, 1, 151, },
118 { 95, 36, 78, 26, 90, 34, 74, 24, },
119 { 0, 124, 8, 193, 0, 140, 12, 213, },
122 // tries to correct a gamma of 2.5
123 DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_220)[][8] = {
124 { 0, 107, 3, 187, 0, 125, 6, 212, },
125 { 39, 7, 86, 28, 49, 11, 102, 36, },
126 { 1, 158, 0, 131, 3, 180, 1, 151, },
127 { 68, 19, 52, 12, 81, 25, 64, 17, },
128 { 0, 119, 5, 203, 0, 113, 4, 195, },
129 { 45, 9, 96, 33, 42, 8, 91, 30, },
130 { 2, 172, 1, 144, 2, 165, 0, 137, },
131 { 77, 23, 60, 15, 72, 21, 56, 14, },
132 { 0, 107, 3, 187, 0, 125, 6, 212, },
136 #define output_pixel(pos, val, bias, signedness) \
138 AV_WB16(pos, bias + av_clip_ ## signedness ## 16(val >> shift)); \
140 AV_WL16(pos, bias + av_clip_ ## signedness ## 16(val >> shift)); \
143 static av_always_inline void
144 yuv2plane1_16_c_template(const int32_t *src, uint16_t *dest, int dstW,
145 int big_endian, int output_bits)
149 av_assert0(output_bits == 16);
151 for (i = 0; i < dstW; i++) {
152 int val = src[i] + (1 << (shift - 1));
153 output_pixel(&dest[i], val, 0, uint);
157 static av_always_inline void
158 yuv2planeX_16_c_template(const int16_t *filter, int filterSize,
159 const int32_t **src, uint16_t *dest, int dstW,
160 int big_endian, int output_bits)
164 av_assert0(output_bits == 16);
166 for (i = 0; i < dstW; i++) {
167 int val = 1 << (shift - 1);
170 /* range of val is [0,0x7FFFFFFF], so 31 bits, but with lanczos/spline
171 * filters (or anything with negative coeffs, the range can be slightly
172 * wider in both directions. To account for this overflow, we subtract
173 * a constant so it always fits in the signed range (assuming a
174 * reasonable filterSize), and re-add that at the end. */
176 for (j = 0; j < filterSize; j++)
177 val += src[j][i] * (unsigned)filter[j];
179 output_pixel(&dest[i], val, 0x8000, int);
185 #define output_pixel(pos, val) \
187 AV_WB16(pos, av_clip_uintp2(val >> shift, output_bits)); \
189 AV_WL16(pos, av_clip_uintp2(val >> shift, output_bits)); \
192 static av_always_inline void
193 yuv2plane1_10_c_template(const int16_t *src, uint16_t *dest, int dstW,
194 int big_endian, int output_bits)
197 int shift = 15 - output_bits;
199 for (i = 0; i < dstW; i++) {
200 int val = src[i] + (1 << (shift - 1));
201 output_pixel(&dest[i], val);
205 static av_always_inline void
206 yuv2planeX_10_c_template(const int16_t *filter, int filterSize,
207 const int16_t **src, uint16_t *dest, int dstW,
208 int big_endian, int output_bits)
211 int shift = 11 + 16 - output_bits;
213 for (i = 0; i < dstW; i++) {
214 int val = 1 << (shift - 1);
217 for (j = 0; j < filterSize; j++)
218 val += src[j][i] * filter[j];
220 output_pixel(&dest[i], val);
226 #define yuv2NBPS(bits, BE_LE, is_be, template_size, typeX_t) \
227 static void yuv2plane1_ ## bits ## BE_LE ## _c(const int16_t *src, \
228 uint8_t *dest, int dstW, \
229 const uint8_t *dither, int offset)\
231 yuv2plane1_ ## template_size ## _c_template((const typeX_t *) src, \
232 (uint16_t *) dest, dstW, is_be, bits); \
234 static void yuv2planeX_ ## bits ## BE_LE ## _c(const int16_t *filter, int filterSize, \
235 const int16_t **src, uint8_t *dest, int dstW, \
236 const uint8_t *dither, int offset)\
238 yuv2planeX_## template_size ## _c_template(filter, \
239 filterSize, (const typeX_t **) src, \
240 (uint16_t *) dest, dstW, is_be, bits); \
242 yuv2NBPS( 9, BE, 1, 10, int16_t)
243 yuv2NBPS( 9, LE, 0, 10, int16_t)
244 yuv2NBPS(10, BE, 1, 10, int16_t)
245 yuv2NBPS(10, LE, 0, 10, int16_t)
246 yuv2NBPS(12, BE, 1, 10, int16_t)
247 yuv2NBPS(12, LE, 0, 10, int16_t)
248 yuv2NBPS(14, BE, 1, 10, int16_t)
249 yuv2NBPS(14, LE, 0, 10, int16_t)
250 yuv2NBPS(16, BE, 1, 16, int32_t)
251 yuv2NBPS(16, LE, 0, 16, int32_t)
253 static void yuv2planeX_8_c(const int16_t *filter, int filterSize,
254 const int16_t **src, uint8_t *dest, int dstW,
255 const uint8_t *dither, int offset)
258 for (i=0; i<dstW; i++) {
259 int val = dither[(i + offset) & 7] << 12;
261 for (j=0; j<filterSize; j++)
262 val += src[j][i] * filter[j];
264 dest[i]= av_clip_uint8(val>>19);
268 static void yuv2plane1_8_c(const int16_t *src, uint8_t *dest, int dstW,
269 const uint8_t *dither, int offset)
272 for (i=0; i<dstW; i++) {
273 int val = (src[i] + dither[(i + offset) & 7]) >> 7;
274 dest[i]= av_clip_uint8(val);
278 static void yuv2nv12cX_c(SwsContext *c, const int16_t *chrFilter, int chrFilterSize,
279 const int16_t **chrUSrc, const int16_t **chrVSrc,
280 uint8_t *dest, int chrDstW)
282 enum AVPixelFormat dstFormat = c->dstFormat;
283 const uint8_t *chrDither = c->chrDither8;
286 if (dstFormat == AV_PIX_FMT_NV12)
287 for (i=0; i<chrDstW; i++) {
288 int u = chrDither[i & 7] << 12;
289 int v = chrDither[(i + 3) & 7] << 12;
291 for (j=0; j<chrFilterSize; j++) {
292 u += chrUSrc[j][i] * chrFilter[j];
293 v += chrVSrc[j][i] * chrFilter[j];
296 dest[2*i]= av_clip_uint8(u>>19);
297 dest[2*i+1]= av_clip_uint8(v>>19);
300 for (i=0; i<chrDstW; i++) {
301 int u = chrDither[i & 7] << 12;
302 int v = chrDither[(i + 3) & 7] << 12;
304 for (j=0; j<chrFilterSize; j++) {
305 u += chrUSrc[j][i] * chrFilter[j];
306 v += chrVSrc[j][i] * chrFilter[j];
309 dest[2*i]= av_clip_uint8(v>>19);
310 dest[2*i+1]= av_clip_uint8(u>>19);
314 #define accumulate_bit(acc, val) \
317 #define output_pixel(pos, acc) \
318 if (target == AV_PIX_FMT_MONOBLACK) { \
324 static av_always_inline void
325 yuv2mono_X_c_template(SwsContext *c, const int16_t *lumFilter,
326 const int16_t **lumSrc, int lumFilterSize,
327 const int16_t *chrFilter, const int16_t **chrUSrc,
328 const int16_t **chrVSrc, int chrFilterSize,
329 const int16_t **alpSrc, uint8_t *dest, int dstW,
330 int y, enum AVPixelFormat target)
332 const uint8_t * const d128 = ff_dither_8x8_220[y&7];
337 for (i = 0; i < dstW; i += 2) {
342 for (j = 0; j < lumFilterSize; j++) {
343 Y1 += lumSrc[j][i] * lumFilter[j];
344 Y2 += lumSrc[j][i+1] * lumFilter[j];
348 if ((Y1 | Y2) & 0x100) {
349 Y1 = av_clip_uint8(Y1);
350 Y2 = av_clip_uint8(Y2);
352 if (c->dither == SWS_DITHER_ED) {
353 Y1 += (7*err + 1*c->dither_error[0][i] + 5*c->dither_error[0][i+1] + 3*c->dither_error[0][i+2] + 8 - 256)>>4;
354 c->dither_error[0][i] = err;
355 acc = 2*acc + (Y1 >= 128);
358 err = Y2 + ((7*Y1 + 1*c->dither_error[0][i+1] + 5*c->dither_error[0][i+2] + 3*c->dither_error[0][i+3] + 8 - 256)>>4);
359 c->dither_error[0][i+1] = Y1;
360 acc = 2*acc + (err >= 128);
363 accumulate_bit(acc, Y1 + d128[(i + 0) & 7]);
364 accumulate_bit(acc, Y2 + d128[(i + 1) & 7]);
367 output_pixel(*dest++, acc);
370 c->dither_error[0][i] = err;
373 output_pixel(*dest, acc);
377 static av_always_inline void
378 yuv2mono_2_c_template(SwsContext *c, const int16_t *buf[2],
379 const int16_t *ubuf[2], const int16_t *vbuf[2],
380 const int16_t *abuf[2], uint8_t *dest, int dstW,
381 int yalpha, int uvalpha, int y,
382 enum AVPixelFormat target)
384 const int16_t *buf0 = buf[0], *buf1 = buf[1];
385 const uint8_t * const d128 = ff_dither_8x8_220[y & 7];
386 int yalpha1 = 4096 - yalpha;
388 av_assert2(yalpha <= 4096U);
390 if (c->dither == SWS_DITHER_ED) {
393 for (i = 0; i < dstW; i +=2) {
396 Y = (buf0[i + 0] * yalpha1 + buf1[i + 0] * yalpha) >> 19;
397 Y += (7*err + 1*c->dither_error[0][i] + 5*c->dither_error[0][i+1] + 3*c->dither_error[0][i+2] + 8 - 256)>>4;
398 c->dither_error[0][i] = err;
399 acc = 2*acc + (Y >= 128);
402 err = (buf0[i + 1] * yalpha1 + buf1[i + 1] * yalpha) >> 19;
403 err += (7*Y + 1*c->dither_error[0][i+1] + 5*c->dither_error[0][i+2] + 3*c->dither_error[0][i+3] + 8 - 256)>>4;
404 c->dither_error[0][i+1] = Y;
405 acc = 2*acc + (err >= 128);
409 output_pixel(*dest++, acc);
411 c->dither_error[0][i] = err;
413 for (i = 0; i < dstW; i += 8) {
416 Y = (buf0[i + 0] * yalpha1 + buf1[i + 0] * yalpha) >> 19;
417 accumulate_bit(acc, Y + d128[0]);
418 Y = (buf0[i + 1] * yalpha1 + buf1[i + 1] * yalpha) >> 19;
419 accumulate_bit(acc, Y + d128[1]);
420 Y = (buf0[i + 2] * yalpha1 + buf1[i + 2] * yalpha) >> 19;
421 accumulate_bit(acc, Y + d128[2]);
422 Y = (buf0[i + 3] * yalpha1 + buf1[i + 3] * yalpha) >> 19;
423 accumulate_bit(acc, Y + d128[3]);
424 Y = (buf0[i + 4] * yalpha1 + buf1[i + 4] * yalpha) >> 19;
425 accumulate_bit(acc, Y + d128[4]);
426 Y = (buf0[i + 5] * yalpha1 + buf1[i + 5] * yalpha) >> 19;
427 accumulate_bit(acc, Y + d128[5]);
428 Y = (buf0[i + 6] * yalpha1 + buf1[i + 6] * yalpha) >> 19;
429 accumulate_bit(acc, Y + d128[6]);
430 Y = (buf0[i + 7] * yalpha1 + buf1[i + 7] * yalpha) >> 19;
431 accumulate_bit(acc, Y + d128[7]);
433 output_pixel(*dest++, acc);
438 static av_always_inline void
439 yuv2mono_1_c_template(SwsContext *c, const int16_t *buf0,
440 const int16_t *ubuf[2], const int16_t *vbuf[2],
441 const int16_t *abuf0, uint8_t *dest, int dstW,
442 int uvalpha, int y, enum AVPixelFormat target)
444 const uint8_t * const d128 = ff_dither_8x8_220[y & 7];
447 if (c->dither == SWS_DITHER_ED) {
450 for (i = 0; i < dstW; i +=2) {
453 Y = ((buf0[i + 0] + 64) >> 7);
454 Y += (7*err + 1*c->dither_error[0][i] + 5*c->dither_error[0][i+1] + 3*c->dither_error[0][i+2] + 8 - 256)>>4;
455 c->dither_error[0][i] = err;
456 acc = 2*acc + (Y >= 128);
459 err = ((buf0[i + 1] + 64) >> 7);
460 err += (7*Y + 1*c->dither_error[0][i+1] + 5*c->dither_error[0][i+2] + 3*c->dither_error[0][i+3] + 8 - 256)>>4;
461 c->dither_error[0][i+1] = Y;
462 acc = 2*acc + (err >= 128);
466 output_pixel(*dest++, acc);
468 c->dither_error[0][i] = err;
470 for (i = 0; i < dstW; i += 8) {
472 accumulate_bit(acc, ((buf0[i + 0] + 64) >> 7) + d128[0]);
473 accumulate_bit(acc, ((buf0[i + 1] + 64) >> 7) + d128[1]);
474 accumulate_bit(acc, ((buf0[i + 2] + 64) >> 7) + d128[2]);
475 accumulate_bit(acc, ((buf0[i + 3] + 64) >> 7) + d128[3]);
476 accumulate_bit(acc, ((buf0[i + 4] + 64) >> 7) + d128[4]);
477 accumulate_bit(acc, ((buf0[i + 5] + 64) >> 7) + d128[5]);
478 accumulate_bit(acc, ((buf0[i + 6] + 64) >> 7) + d128[6]);
479 accumulate_bit(acc, ((buf0[i + 7] + 64) >> 7) + d128[7]);
481 output_pixel(*dest++, acc);
487 #undef accumulate_bit
489 #define YUV2PACKEDWRAPPER(name, base, ext, fmt) \
490 static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \
491 const int16_t **lumSrc, int lumFilterSize, \
492 const int16_t *chrFilter, const int16_t **chrUSrc, \
493 const int16_t **chrVSrc, int chrFilterSize, \
494 const int16_t **alpSrc, uint8_t *dest, int dstW, \
497 name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \
498 chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
499 alpSrc, dest, dstW, y, fmt); \
502 static void name ## ext ## _2_c(SwsContext *c, const int16_t *buf[2], \
503 const int16_t *ubuf[2], const int16_t *vbuf[2], \
504 const int16_t *abuf[2], uint8_t *dest, int dstW, \
505 int yalpha, int uvalpha, int y) \
507 name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \
508 dest, dstW, yalpha, uvalpha, y, fmt); \
511 static void name ## ext ## _1_c(SwsContext *c, const int16_t *buf0, \
512 const int16_t *ubuf[2], const int16_t *vbuf[2], \
513 const int16_t *abuf0, uint8_t *dest, int dstW, \
514 int uvalpha, int y) \
516 name ## base ## _1_c_template(c, buf0, ubuf, vbuf, \
517 abuf0, dest, dstW, uvalpha, \
521 YUV2PACKEDWRAPPER(yuv2mono,, white, AV_PIX_FMT_MONOWHITE)
522 YUV2PACKEDWRAPPER(yuv2mono,, black, AV_PIX_FMT_MONOBLACK)
524 #define output_pixels(pos, Y1, U, Y2, V) \
525 if (target == AV_PIX_FMT_YUYV422) { \
526 dest[pos + 0] = Y1; \
528 dest[pos + 2] = Y2; \
530 } else if (target == AV_PIX_FMT_YVYU422) { \
531 dest[pos + 0] = Y1; \
533 dest[pos + 2] = Y2; \
535 } else { /* AV_PIX_FMT_UYVY422 */ \
537 dest[pos + 1] = Y1; \
539 dest[pos + 3] = Y2; \
542 static av_always_inline void
543 yuv2422_X_c_template(SwsContext *c, const int16_t *lumFilter,
544 const int16_t **lumSrc, int lumFilterSize,
545 const int16_t *chrFilter, const int16_t **chrUSrc,
546 const int16_t **chrVSrc, int chrFilterSize,
547 const int16_t **alpSrc, uint8_t *dest, int dstW,
548 int y, enum AVPixelFormat target)
552 for (i = 0; i < ((dstW + 1) >> 1); i++) {
559 for (j = 0; j < lumFilterSize; j++) {
560 Y1 += lumSrc[j][i * 2] * lumFilter[j];
561 Y2 += lumSrc[j][i * 2 + 1] * lumFilter[j];
563 for (j = 0; j < chrFilterSize; j++) {
564 U += chrUSrc[j][i] * chrFilter[j];
565 V += chrVSrc[j][i] * chrFilter[j];
571 if ((Y1 | Y2 | U | V) & 0x100) {
572 Y1 = av_clip_uint8(Y1);
573 Y2 = av_clip_uint8(Y2);
574 U = av_clip_uint8(U);
575 V = av_clip_uint8(V);
577 output_pixels(4*i, Y1, U, Y2, V);
581 static av_always_inline void
582 yuv2422_2_c_template(SwsContext *c, const int16_t *buf[2],
583 const int16_t *ubuf[2], const int16_t *vbuf[2],
584 const int16_t *abuf[2], uint8_t *dest, int dstW,
585 int yalpha, int uvalpha, int y,
586 enum AVPixelFormat target)
588 const int16_t *buf0 = buf[0], *buf1 = buf[1],
589 *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
590 *vbuf0 = vbuf[0], *vbuf1 = vbuf[1];
591 int yalpha1 = 4096 - yalpha;
592 int uvalpha1 = 4096 - uvalpha;
594 av_assert2(yalpha <= 4096U);
595 av_assert2(uvalpha <= 4096U);
597 for (i = 0; i < ((dstW + 1) >> 1); i++) {
598 int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 19;
599 int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 19;
600 int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha) >> 19;
601 int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha) >> 19;
603 if ((Y1 | Y2 | U | V) & 0x100) {
604 Y1 = av_clip_uint8(Y1);
605 Y2 = av_clip_uint8(Y2);
606 U = av_clip_uint8(U);
607 V = av_clip_uint8(V);
610 output_pixels(i * 4, Y1, U, Y2, V);
614 static av_always_inline void
615 yuv2422_1_c_template(SwsContext *c, const int16_t *buf0,
616 const int16_t *ubuf[2], const int16_t *vbuf[2],
617 const int16_t *abuf0, uint8_t *dest, int dstW,
618 int uvalpha, int y, enum AVPixelFormat target)
620 const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
623 if (uvalpha < 2048) {
624 for (i = 0; i < ((dstW + 1) >> 1); i++) {
625 int Y1 = (buf0[i * 2 ]+64) >> 7;
626 int Y2 = (buf0[i * 2 + 1]+64) >> 7;
627 int U = (ubuf0[i] +64) >> 7;
628 int V = (vbuf0[i] +64) >> 7;
630 if ((Y1 | Y2 | U | V) & 0x100) {
631 Y1 = av_clip_uint8(Y1);
632 Y2 = av_clip_uint8(Y2);
633 U = av_clip_uint8(U);
634 V = av_clip_uint8(V);
637 Y1 = av_clip_uint8(Y1);
638 Y2 = av_clip_uint8(Y2);
639 U = av_clip_uint8(U);
640 V = av_clip_uint8(V);
642 output_pixels(i * 4, Y1, U, Y2, V);
645 const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
646 for (i = 0; i < ((dstW + 1) >> 1); i++) {
647 int Y1 = (buf0[i * 2 ] + 64) >> 7;
648 int Y2 = (buf0[i * 2 + 1] + 64) >> 7;
649 int U = (ubuf0[i] + ubuf1[i]+128) >> 8;
650 int V = (vbuf0[i] + vbuf1[i]+128) >> 8;
652 if ((Y1 | Y2 | U | V) & 0x100) {
653 Y1 = av_clip_uint8(Y1);
654 Y2 = av_clip_uint8(Y2);
655 U = av_clip_uint8(U);
656 V = av_clip_uint8(V);
659 Y1 = av_clip_uint8(Y1);
660 Y2 = av_clip_uint8(Y2);
661 U = av_clip_uint8(U);
662 V = av_clip_uint8(V);
664 output_pixels(i * 4, Y1, U, Y2, V);
671 YUV2PACKEDWRAPPER(yuv2, 422, yuyv422, AV_PIX_FMT_YUYV422)
672 YUV2PACKEDWRAPPER(yuv2, 422, yvyu422, AV_PIX_FMT_YVYU422)
673 YUV2PACKEDWRAPPER(yuv2, 422, uyvy422, AV_PIX_FMT_UYVY422)
675 #define R_B ((target == AV_PIX_FMT_RGB48LE || target == AV_PIX_FMT_RGB48BE || target == AV_PIX_FMT_RGBA64LE || target == AV_PIX_FMT_RGBA64BE) ? R : B)
676 #define B_R ((target == AV_PIX_FMT_RGB48LE || target == AV_PIX_FMT_RGB48BE || target == AV_PIX_FMT_RGBA64LE || target == AV_PIX_FMT_RGBA64BE) ? B : R)
677 #define output_pixel(pos, val) \
678 if (isBE(target)) { \
684 static av_always_inline void
685 yuv2rgba64_X_c_template(SwsContext *c, const int16_t *lumFilter,
686 const int32_t **lumSrc, int lumFilterSize,
687 const int16_t *chrFilter, const int32_t **chrUSrc,
688 const int32_t **chrVSrc, int chrFilterSize,
689 const int32_t **alpSrc, uint16_t *dest, int dstW,
690 int y, enum AVPixelFormat target, int hasAlpha, int eightbytes)
693 int A1 = 0xffff<<14, A2 = 0xffff<<14;
695 for (i = 0; i < ((dstW + 1) >> 1); i++) {
697 int Y1 = -0x40000000;
698 int Y2 = -0x40000000;
699 int U = -(128 << 23); // 19
700 int V = -(128 << 23);
703 for (j = 0; j < lumFilterSize; j++) {
704 Y1 += lumSrc[j][i * 2] * (unsigned)lumFilter[j];
705 Y2 += lumSrc[j][i * 2 + 1] * (unsigned)lumFilter[j];
707 for (j = 0; j < chrFilterSize; j++) {;
708 U += chrUSrc[j][i] * (unsigned)chrFilter[j];
709 V += chrVSrc[j][i] * (unsigned)chrFilter[j];
715 for (j = 0; j < lumFilterSize; j++) {
716 A1 += alpSrc[j][i * 2] * (unsigned)lumFilter[j];
717 A2 += alpSrc[j][i * 2 + 1] * (unsigned)lumFilter[j];
725 // 8 bits: 12+15=27; 16 bits: 12+19=31
733 // 8 bits: 27 -> 17 bits, 16 bits: 31 - 14 = 17 bits
734 Y1 -= c->yuv2rgb_y_offset;
735 Y2 -= c->yuv2rgb_y_offset;
736 Y1 *= c->yuv2rgb_y_coeff;
737 Y2 *= c->yuv2rgb_y_coeff;
740 // 8 bits: 17 + 13 bits = 30 bits, 16 bits: 17 + 13 bits = 30 bits
742 R = V * c->yuv2rgb_v2r_coeff;
743 G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
744 B = U * c->yuv2rgb_u2b_coeff;
746 // 8 bits: 30 - 22 = 8 bits, 16 bits: 30 bits - 14 = 16 bits
747 output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14);
748 output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14);
749 output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14);
751 output_pixel(&dest[3], av_clip_uintp2(A1 , 30) >> 14);
752 output_pixel(&dest[4], av_clip_uintp2(R_B + Y2, 30) >> 14);
753 output_pixel(&dest[5], av_clip_uintp2( G + Y2, 30) >> 14);
754 output_pixel(&dest[6], av_clip_uintp2(B_R + Y2, 30) >> 14);
755 output_pixel(&dest[7], av_clip_uintp2(A2 , 30) >> 14);
758 output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14);
759 output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14);
760 output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14);
766 static av_always_inline void
767 yuv2rgba64_2_c_template(SwsContext *c, const int32_t *buf[2],
768 const int32_t *ubuf[2], const int32_t *vbuf[2],
769 const int32_t *abuf[2], uint16_t *dest, int dstW,
770 int yalpha, int uvalpha, int y,
771 enum AVPixelFormat target, int hasAlpha, int eightbytes)
773 const int32_t *buf0 = buf[0], *buf1 = buf[1],
774 *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
775 *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
776 *abuf0 = hasAlpha ? abuf[0] : NULL,
777 *abuf1 = hasAlpha ? abuf[1] : NULL;
778 int yalpha1 = 4096 - yalpha;
779 int uvalpha1 = 4096 - uvalpha;
781 int A1 = 0xffff<<14, A2 = 0xffff<<14;
783 av_assert2(yalpha <= 4096U);
784 av_assert2(uvalpha <= 4096U);
786 for (i = 0; i < ((dstW + 1) >> 1); i++) {
787 int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 14;
788 int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 14;
789 int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha - (128 << 23)) >> 14;
790 int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha - (128 << 23)) >> 14;
793 Y1 -= c->yuv2rgb_y_offset;
794 Y2 -= c->yuv2rgb_y_offset;
795 Y1 *= c->yuv2rgb_y_coeff;
796 Y2 *= c->yuv2rgb_y_coeff;
800 R = V * c->yuv2rgb_v2r_coeff;
801 G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
802 B = U * c->yuv2rgb_u2b_coeff;
805 A1 = (abuf0[i * 2 ] * yalpha1 + abuf1[i * 2 ] * yalpha) >> 1;
806 A2 = (abuf0[i * 2 + 1] * yalpha1 + abuf1[i * 2 + 1] * yalpha) >> 1;
812 output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14);
813 output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14);
814 output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14);
816 output_pixel(&dest[3], av_clip_uintp2(A1 , 30) >> 14);
817 output_pixel(&dest[4], av_clip_uintp2(R_B + Y2, 30) >> 14);
818 output_pixel(&dest[5], av_clip_uintp2( G + Y2, 30) >> 14);
819 output_pixel(&dest[6], av_clip_uintp2(B_R + Y2, 30) >> 14);
820 output_pixel(&dest[7], av_clip_uintp2(A2 , 30) >> 14);
823 output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14);
824 output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14);
825 output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14);
831 static av_always_inline void
832 yuv2rgba64_1_c_template(SwsContext *c, const int32_t *buf0,
833 const int32_t *ubuf[2], const int32_t *vbuf[2],
834 const int32_t *abuf0, uint16_t *dest, int dstW,
835 int uvalpha, int y, enum AVPixelFormat target, int hasAlpha, int eightbytes)
837 const int32_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
839 int A1 = 0xffff<<14, A2= 0xffff<<14;
841 if (uvalpha < 2048) {
842 for (i = 0; i < ((dstW + 1) >> 1); i++) {
843 int Y1 = (buf0[i * 2] ) >> 2;
844 int Y2 = (buf0[i * 2 + 1]) >> 2;
845 int U = (ubuf0[i] - (128 << 11)) >> 2;
846 int V = (vbuf0[i] - (128 << 11)) >> 2;
849 Y1 -= c->yuv2rgb_y_offset;
850 Y2 -= c->yuv2rgb_y_offset;
851 Y1 *= c->yuv2rgb_y_coeff;
852 Y2 *= c->yuv2rgb_y_coeff;
857 A1 = abuf0[i * 2 ] << 11;
858 A2 = abuf0[i * 2 + 1] << 11;
864 R = V * c->yuv2rgb_v2r_coeff;
865 G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
866 B = U * c->yuv2rgb_u2b_coeff;
868 output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14);
869 output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14);
870 output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14);
872 output_pixel(&dest[3], av_clip_uintp2(A1 , 30) >> 14);
873 output_pixel(&dest[4], av_clip_uintp2(R_B + Y2, 30) >> 14);
874 output_pixel(&dest[5], av_clip_uintp2( G + Y2, 30) >> 14);
875 output_pixel(&dest[6], av_clip_uintp2(B_R + Y2, 30) >> 14);
876 output_pixel(&dest[7], av_clip_uintp2(A2 , 30) >> 14);
879 output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14);
880 output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14);
881 output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14);
886 const int32_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
887 int A1 = 0xffff<<14, A2 = 0xffff<<14;
888 for (i = 0; i < ((dstW + 1) >> 1); i++) {
889 int Y1 = (buf0[i * 2] ) >> 2;
890 int Y2 = (buf0[i * 2 + 1]) >> 2;
891 int U = (ubuf0[i] + ubuf1[i] - (128 << 12)) >> 3;
892 int V = (vbuf0[i] + vbuf1[i] - (128 << 12)) >> 3;
895 Y1 -= c->yuv2rgb_y_offset;
896 Y2 -= c->yuv2rgb_y_offset;
897 Y1 *= c->yuv2rgb_y_coeff;
898 Y2 *= c->yuv2rgb_y_coeff;
903 A1 = abuf0[i * 2 ] << 11;
904 A2 = abuf0[i * 2 + 1] << 11;
910 R = V * c->yuv2rgb_v2r_coeff;
911 G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
912 B = U * c->yuv2rgb_u2b_coeff;
914 output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14);
915 output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14);
916 output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14);
918 output_pixel(&dest[3], av_clip_uintp2(A1 , 30) >> 14);
919 output_pixel(&dest[4], av_clip_uintp2(R_B + Y2, 30) >> 14);
920 output_pixel(&dest[5], av_clip_uintp2( G + Y2, 30) >> 14);
921 output_pixel(&dest[6], av_clip_uintp2(B_R + Y2, 30) >> 14);
922 output_pixel(&dest[7], av_clip_uintp2(A2 , 30) >> 14);
925 output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14);
926 output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14);
927 output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14);
934 static av_always_inline void
935 yuv2rgba64_full_X_c_template(SwsContext *c, const int16_t *lumFilter,
936 const int32_t **lumSrc, int lumFilterSize,
937 const int16_t *chrFilter, const int32_t **chrUSrc,
938 const int32_t **chrVSrc, int chrFilterSize,
939 const int32_t **alpSrc, uint16_t *dest, int dstW,
940 int y, enum AVPixelFormat target, int hasAlpha, int eightbytes)
945 for (i = 0; i < dstW; i++) {
948 int U = -(128 << 23); // 19
949 int V = -(128 << 23);
952 for (j = 0; j < lumFilterSize; j++) {
953 Y += lumSrc[j][i] * (unsigned)lumFilter[j];
955 for (j = 0; j < chrFilterSize; j++) {;
956 U += chrUSrc[j][i] * (unsigned)chrFilter[j];
957 V += chrVSrc[j][i] * (unsigned)chrFilter[j];
962 for (j = 0; j < lumFilterSize; j++) {
963 A += alpSrc[j][i] * (unsigned)lumFilter[j];
969 // 8bit: 12+15=27; 16-bit: 12+19=31
975 // 8bit: 27 -> 17bit, 16bit: 31 - 14 = 17bit
976 Y -= c->yuv2rgb_y_offset;
977 Y *= c->yuv2rgb_y_coeff;
979 // 8bit: 17 + 13bit = 30bit, 16bit: 17 + 13bit = 30bit
981 R = V * c->yuv2rgb_v2r_coeff;
982 G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
983 B = U * c->yuv2rgb_u2b_coeff;
985 // 8bit: 30 - 22 = 8bit, 16bit: 30bit - 14 = 16bit
986 output_pixel(&dest[0], av_clip_uintp2(R_B + Y, 30) >> 14);
987 output_pixel(&dest[1], av_clip_uintp2( G + Y, 30) >> 14);
988 output_pixel(&dest[2], av_clip_uintp2(B_R + Y, 30) >> 14);
990 output_pixel(&dest[3], av_clip_uintp2(A, 30) >> 14);
998 static av_always_inline void
999 yuv2rgba64_full_2_c_template(SwsContext *c, const int32_t *buf[2],
1000 const int32_t *ubuf[2], const int32_t *vbuf[2],
1001 const int32_t *abuf[2], uint16_t *dest, int dstW,
1002 int yalpha, int uvalpha, int y,
1003 enum AVPixelFormat target, int hasAlpha, int eightbytes)
1005 const int32_t *buf0 = buf[0], *buf1 = buf[1],
1006 *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
1007 *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
1008 *abuf0 = hasAlpha ? abuf[0] : NULL,
1009 *abuf1 = hasAlpha ? abuf[1] : NULL;
1010 int yalpha1 = 4096 - yalpha;
1011 int uvalpha1 = 4096 - uvalpha;
1015 av_assert2(yalpha <= 4096U);
1016 av_assert2(uvalpha <= 4096U);
1018 for (i = 0; i < dstW; i++) {
1019 int Y = (buf0[i] * yalpha1 + buf1[i] * yalpha) >> 14;
1020 int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha - (128 << 23)) >> 14;
1021 int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha - (128 << 23)) >> 14;
1024 Y -= c->yuv2rgb_y_offset;
1025 Y *= c->yuv2rgb_y_coeff;
1028 R = V * c->yuv2rgb_v2r_coeff;
1029 G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1030 B = U * c->yuv2rgb_u2b_coeff;
1033 A = (abuf0[i] * yalpha1 + abuf1[i] * yalpha) >> 1;
1038 output_pixel(&dest[0], av_clip_uintp2(R_B + Y, 30) >> 14);
1039 output_pixel(&dest[1], av_clip_uintp2( G + Y, 30) >> 14);
1040 output_pixel(&dest[2], av_clip_uintp2(B_R + Y, 30) >> 14);
1042 output_pixel(&dest[3], av_clip_uintp2(A, 30) >> 14);
1050 static av_always_inline void
1051 yuv2rgba64_full_1_c_template(SwsContext *c, const int32_t *buf0,
1052 const int32_t *ubuf[2], const int32_t *vbuf[2],
1053 const int32_t *abuf0, uint16_t *dest, int dstW,
1054 int uvalpha, int y, enum AVPixelFormat target, int hasAlpha, int eightbytes)
1056 const int32_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
1060 if (uvalpha < 2048) {
1061 for (i = 0; i < dstW; i++) {
1062 int Y = (buf0[i]) >> 2;
1063 int U = (ubuf0[i] - (128 << 11)) >> 2;
1064 int V = (vbuf0[i] - (128 << 11)) >> 2;
1067 Y -= c->yuv2rgb_y_offset;
1068 Y *= c->yuv2rgb_y_coeff;
1077 R = V * c->yuv2rgb_v2r_coeff;
1078 G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1079 B = U * c->yuv2rgb_u2b_coeff;
1081 output_pixel(&dest[0], av_clip_uintp2(R_B + Y, 30) >> 14);
1082 output_pixel(&dest[1], av_clip_uintp2( G + Y, 30) >> 14);
1083 output_pixel(&dest[2], av_clip_uintp2(B_R + Y, 30) >> 14);
1085 output_pixel(&dest[3], av_clip_uintp2(A, 30) >> 14);
1092 const int32_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
1094 for (i = 0; i < dstW; i++) {
1095 int Y = (buf0[i] ) >> 2;
1096 int U = (ubuf0[i] + ubuf1[i] - (128 << 12)) >> 3;
1097 int V = (vbuf0[i] + vbuf1[i] - (128 << 12)) >> 3;
1100 Y -= c->yuv2rgb_y_offset;
1101 Y *= c->yuv2rgb_y_coeff;
1110 R = V * c->yuv2rgb_v2r_coeff;
1111 G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1112 B = U * c->yuv2rgb_u2b_coeff;
1114 output_pixel(&dest[0], av_clip_uintp2(R_B + Y, 30) >> 14);
1115 output_pixel(&dest[1], av_clip_uintp2( G + Y, 30) >> 14);
1116 output_pixel(&dest[2], av_clip_uintp2(B_R + Y, 30) >> 14);
1118 output_pixel(&dest[3], av_clip_uintp2(A, 30) >> 14);
1131 #define YUV2PACKED16WRAPPER(name, base, ext, fmt, hasAlpha, eightbytes) \
1132 static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \
1133 const int16_t **_lumSrc, int lumFilterSize, \
1134 const int16_t *chrFilter, const int16_t **_chrUSrc, \
1135 const int16_t **_chrVSrc, int chrFilterSize, \
1136 const int16_t **_alpSrc, uint8_t *_dest, int dstW, \
1139 const int32_t **lumSrc = (const int32_t **) _lumSrc, \
1140 **chrUSrc = (const int32_t **) _chrUSrc, \
1141 **chrVSrc = (const int32_t **) _chrVSrc, \
1142 **alpSrc = (const int32_t **) _alpSrc; \
1143 uint16_t *dest = (uint16_t *) _dest; \
1144 name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \
1145 chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
1146 alpSrc, dest, dstW, y, fmt, hasAlpha, eightbytes); \
1149 static void name ## ext ## _2_c(SwsContext *c, const int16_t *_buf[2], \
1150 const int16_t *_ubuf[2], const int16_t *_vbuf[2], \
1151 const int16_t *_abuf[2], uint8_t *_dest, int dstW, \
1152 int yalpha, int uvalpha, int y) \
1154 const int32_t **buf = (const int32_t **) _buf, \
1155 **ubuf = (const int32_t **) _ubuf, \
1156 **vbuf = (const int32_t **) _vbuf, \
1157 **abuf = (const int32_t **) _abuf; \
1158 uint16_t *dest = (uint16_t *) _dest; \
1159 name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \
1160 dest, dstW, yalpha, uvalpha, y, fmt, hasAlpha, eightbytes); \
1163 static void name ## ext ## _1_c(SwsContext *c, const int16_t *_buf0, \
1164 const int16_t *_ubuf[2], const int16_t *_vbuf[2], \
1165 const int16_t *_abuf0, uint8_t *_dest, int dstW, \
1166 int uvalpha, int y) \
1168 const int32_t *buf0 = (const int32_t *) _buf0, \
1169 **ubuf = (const int32_t **) _ubuf, \
1170 **vbuf = (const int32_t **) _vbuf, \
1171 *abuf0 = (const int32_t *) _abuf0; \
1172 uint16_t *dest = (uint16_t *) _dest; \
1173 name ## base ## _1_c_template(c, buf0, ubuf, vbuf, abuf0, dest, \
1174 dstW, uvalpha, y, fmt, hasAlpha, eightbytes); \
1177 YUV2PACKED16WRAPPER(yuv2, rgba64, rgb48be, AV_PIX_FMT_RGB48BE, 0, 0)
1178 YUV2PACKED16WRAPPER(yuv2, rgba64, rgb48le, AV_PIX_FMT_RGB48LE, 0, 0)
1179 YUV2PACKED16WRAPPER(yuv2, rgba64, bgr48be, AV_PIX_FMT_BGR48BE, 0, 0)
1180 YUV2PACKED16WRAPPER(yuv2, rgba64, bgr48le, AV_PIX_FMT_BGR48LE, 0, 0)
1181 YUV2PACKED16WRAPPER(yuv2, rgba64, rgba64be, AV_PIX_FMT_RGBA64BE, 1, 1)
1182 YUV2PACKED16WRAPPER(yuv2, rgba64, rgba64le, AV_PIX_FMT_RGBA64LE, 1, 1)
1183 YUV2PACKED16WRAPPER(yuv2, rgba64, rgbx64be, AV_PIX_FMT_RGBA64BE, 0, 1)
1184 YUV2PACKED16WRAPPER(yuv2, rgba64, rgbx64le, AV_PIX_FMT_RGBA64LE, 0, 1)
1185 YUV2PACKED16WRAPPER(yuv2, rgba64, bgra64be, AV_PIX_FMT_BGRA64BE, 1, 1)
1186 YUV2PACKED16WRAPPER(yuv2, rgba64, bgra64le, AV_PIX_FMT_BGRA64LE, 1, 1)
1187 YUV2PACKED16WRAPPER(yuv2, rgba64, bgrx64be, AV_PIX_FMT_BGRA64BE, 0, 1)
1188 YUV2PACKED16WRAPPER(yuv2, rgba64, bgrx64le, AV_PIX_FMT_BGRA64LE, 0, 1)
1190 YUV2PACKED16WRAPPER(yuv2, rgba64_full, rgb48be_full, AV_PIX_FMT_RGB48BE, 0, 0)
1191 YUV2PACKED16WRAPPER(yuv2, rgba64_full, rgb48le_full, AV_PIX_FMT_RGB48LE, 0, 0)
1192 YUV2PACKED16WRAPPER(yuv2, rgba64_full, bgr48be_full, AV_PIX_FMT_BGR48BE, 0, 0)
1193 YUV2PACKED16WRAPPER(yuv2, rgba64_full, bgr48le_full, AV_PIX_FMT_BGR48LE, 0, 0)
1194 YUV2PACKED16WRAPPER(yuv2, rgba64_full, rgba64be_full, AV_PIX_FMT_RGBA64BE, 1, 1)
1195 YUV2PACKED16WRAPPER(yuv2, rgba64_full, rgba64le_full, AV_PIX_FMT_RGBA64LE, 1, 1)
1196 YUV2PACKED16WRAPPER(yuv2, rgba64_full, rgbx64be_full, AV_PIX_FMT_RGBA64BE, 0, 1)
1197 YUV2PACKED16WRAPPER(yuv2, rgba64_full, rgbx64le_full, AV_PIX_FMT_RGBA64LE, 0, 1)
1198 YUV2PACKED16WRAPPER(yuv2, rgba64_full, bgra64be_full, AV_PIX_FMT_BGRA64BE, 1, 1)
1199 YUV2PACKED16WRAPPER(yuv2, rgba64_full, bgra64le_full, AV_PIX_FMT_BGRA64LE, 1, 1)
1200 YUV2PACKED16WRAPPER(yuv2, rgba64_full, bgrx64be_full, AV_PIX_FMT_BGRA64BE, 0, 1)
1201 YUV2PACKED16WRAPPER(yuv2, rgba64_full, bgrx64le_full, AV_PIX_FMT_BGRA64LE, 0, 1)
1204 * Write out 2 RGB pixels in the target pixel format. This function takes a
1205 * R/G/B LUT as generated by ff_yuv2rgb_c_init_tables(), which takes care of
1206 * things like endianness conversion and shifting. The caller takes care of
1207 * setting the correct offset in these tables from the chroma (U/V) values.
1208 * This function then uses the luminance (Y1/Y2) values to write out the
1209 * correct RGB values into the destination buffer.
1211 static av_always_inline void
1212 yuv2rgb_write(uint8_t *_dest, int i, int Y1, int Y2,
1213 unsigned A1, unsigned A2,
1214 const void *_r, const void *_g, const void *_b, int y,
1215 enum AVPixelFormat target, int hasAlpha)
1217 if (target == AV_PIX_FMT_ARGB || target == AV_PIX_FMT_RGBA ||
1218 target == AV_PIX_FMT_ABGR || target == AV_PIX_FMT_BGRA) {
1219 uint32_t *dest = (uint32_t *) _dest;
1220 const uint32_t *r = (const uint32_t *) _r;
1221 const uint32_t *g = (const uint32_t *) _g;
1222 const uint32_t *b = (const uint32_t *) _b;
1225 int sh = hasAlpha ? ((target == AV_PIX_FMT_RGB32_1 || target == AV_PIX_FMT_BGR32_1) ? 0 : 24) : 0;
1227 dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1] + (hasAlpha ? A1 << sh : 0);
1228 dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2] + (hasAlpha ? A2 << sh : 0);
1231 int sh = (target == AV_PIX_FMT_RGB32_1 || target == AV_PIX_FMT_BGR32_1) ? 0 : 24;
1233 av_assert2((((r[Y1] + g[Y1] + b[Y1]) >> sh) & 0xFF) == 0);
1234 dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1] + (A1 << sh);
1235 dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2] + (A2 << sh);
1237 #if defined(ASSERT_LEVEL) && ASSERT_LEVEL > 1
1238 int sh = (target == AV_PIX_FMT_RGB32_1 || target == AV_PIX_FMT_BGR32_1) ? 0 : 24;
1240 av_assert2((((r[Y1] + g[Y1] + b[Y1]) >> sh) & 0xFF) == 0xFF);
1242 dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1];
1243 dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2];
1246 } else if (target == AV_PIX_FMT_RGB24 || target == AV_PIX_FMT_BGR24) {
1247 uint8_t *dest = (uint8_t *) _dest;
1248 const uint8_t *r = (const uint8_t *) _r;
1249 const uint8_t *g = (const uint8_t *) _g;
1250 const uint8_t *b = (const uint8_t *) _b;
1252 #define r_b ((target == AV_PIX_FMT_RGB24) ? r : b)
1253 #define b_r ((target == AV_PIX_FMT_RGB24) ? b : r)
1255 dest[i * 6 + 0] = r_b[Y1];
1256 dest[i * 6 + 1] = g[Y1];
1257 dest[i * 6 + 2] = b_r[Y1];
1258 dest[i * 6 + 3] = r_b[Y2];
1259 dest[i * 6 + 4] = g[Y2];
1260 dest[i * 6 + 5] = b_r[Y2];
1263 } else if (target == AV_PIX_FMT_RGB565 || target == AV_PIX_FMT_BGR565 ||
1264 target == AV_PIX_FMT_RGB555 || target == AV_PIX_FMT_BGR555 ||
1265 target == AV_PIX_FMT_RGB444 || target == AV_PIX_FMT_BGR444) {
1266 uint16_t *dest = (uint16_t *) _dest;
1267 const uint16_t *r = (const uint16_t *) _r;
1268 const uint16_t *g = (const uint16_t *) _g;
1269 const uint16_t *b = (const uint16_t *) _b;
1270 int dr1, dg1, db1, dr2, dg2, db2;
1272 if (target == AV_PIX_FMT_RGB565 || target == AV_PIX_FMT_BGR565) {
1273 dr1 = ff_dither_2x2_8[ y & 1 ][0];
1274 dg1 = ff_dither_2x2_4[ y & 1 ][0];
1275 db1 = ff_dither_2x2_8[(y & 1) ^ 1][0];
1276 dr2 = ff_dither_2x2_8[ y & 1 ][1];
1277 dg2 = ff_dither_2x2_4[ y & 1 ][1];
1278 db2 = ff_dither_2x2_8[(y & 1) ^ 1][1];
1279 } else if (target == AV_PIX_FMT_RGB555 || target == AV_PIX_FMT_BGR555) {
1280 dr1 = ff_dither_2x2_8[ y & 1 ][0];
1281 dg1 = ff_dither_2x2_8[ y & 1 ][1];
1282 db1 = ff_dither_2x2_8[(y & 1) ^ 1][0];
1283 dr2 = ff_dither_2x2_8[ y & 1 ][1];
1284 dg2 = ff_dither_2x2_8[ y & 1 ][0];
1285 db2 = ff_dither_2x2_8[(y & 1) ^ 1][1];
1287 dr1 = ff_dither_4x4_16[ y & 3 ][0];
1288 dg1 = ff_dither_4x4_16[ y & 3 ][1];
1289 db1 = ff_dither_4x4_16[(y & 3) ^ 3][0];
1290 dr2 = ff_dither_4x4_16[ y & 3 ][1];
1291 dg2 = ff_dither_4x4_16[ y & 3 ][0];
1292 db2 = ff_dither_4x4_16[(y & 3) ^ 3][1];
1295 dest[i * 2 + 0] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1];
1296 dest[i * 2 + 1] = r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2];
1297 } else /* 8/4 bits */ {
1298 uint8_t *dest = (uint8_t *) _dest;
1299 const uint8_t *r = (const uint8_t *) _r;
1300 const uint8_t *g = (const uint8_t *) _g;
1301 const uint8_t *b = (const uint8_t *) _b;
1302 int dr1, dg1, db1, dr2, dg2, db2;
1304 if (target == AV_PIX_FMT_RGB8 || target == AV_PIX_FMT_BGR8) {
1305 const uint8_t * const d64 = ff_dither_8x8_73[y & 7];
1306 const uint8_t * const d32 = ff_dither_8x8_32[y & 7];
1307 dr1 = dg1 = d32[(i * 2 + 0) & 7];
1308 db1 = d64[(i * 2 + 0) & 7];
1309 dr2 = dg2 = d32[(i * 2 + 1) & 7];
1310 db2 = d64[(i * 2 + 1) & 7];
1312 const uint8_t * const d64 = ff_dither_8x8_73 [y & 7];
1313 const uint8_t * const d128 = ff_dither_8x8_220[y & 7];
1314 dr1 = db1 = d128[(i * 2 + 0) & 7];
1315 dg1 = d64[(i * 2 + 0) & 7];
1316 dr2 = db2 = d128[(i * 2 + 1) & 7];
1317 dg2 = d64[(i * 2 + 1) & 7];
1320 if (target == AV_PIX_FMT_RGB4 || target == AV_PIX_FMT_BGR4) {
1321 dest[i] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1] +
1322 ((r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2]) << 4);
1324 dest[i * 2 + 0] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1];
1325 dest[i * 2 + 1] = r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2];
1330 static av_always_inline void
1331 yuv2rgb_X_c_template(SwsContext *c, const int16_t *lumFilter,
1332 const int16_t **lumSrc, int lumFilterSize,
1333 const int16_t *chrFilter, const int16_t **chrUSrc,
1334 const int16_t **chrVSrc, int chrFilterSize,
1335 const int16_t **alpSrc, uint8_t *dest, int dstW,
1336 int y, enum AVPixelFormat target, int hasAlpha)
1340 for (i = 0; i < ((dstW + 1) >> 1); i++) {
1346 const void *r, *g, *b;
1348 for (j = 0; j < lumFilterSize; j++) {
1349 Y1 += lumSrc[j][i * 2] * lumFilter[j];
1350 Y2 += lumSrc[j][i * 2 + 1] * lumFilter[j];
1352 for (j = 0; j < chrFilterSize; j++) {
1353 U += chrUSrc[j][i] * chrFilter[j];
1354 V += chrVSrc[j][i] * chrFilter[j];
1363 for (j = 0; j < lumFilterSize; j++) {
1364 A1 += alpSrc[j][i * 2 ] * lumFilter[j];
1365 A2 += alpSrc[j][i * 2 + 1] * lumFilter[j];
1369 if ((A1 | A2) & 0x100) {
1370 A1 = av_clip_uint8(A1);
1371 A2 = av_clip_uint8(A2);
1375 r = c->table_rV[V + YUVRGB_TABLE_HEADROOM];
1376 g = (c->table_gU[U + YUVRGB_TABLE_HEADROOM] + c->table_gV[V + YUVRGB_TABLE_HEADROOM]);
1377 b = c->table_bU[U + YUVRGB_TABLE_HEADROOM];
1379 yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
1380 r, g, b, y, target, hasAlpha);
1384 static av_always_inline void
1385 yuv2rgb_2_c_template(SwsContext *c, const int16_t *buf[2],
1386 const int16_t *ubuf[2], const int16_t *vbuf[2],
1387 const int16_t *abuf[2], uint8_t *dest, int dstW,
1388 int yalpha, int uvalpha, int y,
1389 enum AVPixelFormat target, int hasAlpha)
1391 const int16_t *buf0 = buf[0], *buf1 = buf[1],
1392 *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
1393 *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
1394 *abuf0 = hasAlpha ? abuf[0] : NULL,
1395 *abuf1 = hasAlpha ? abuf[1] : NULL;
1396 int yalpha1 = 4096 - yalpha;
1397 int uvalpha1 = 4096 - uvalpha;
1399 av_assert2(yalpha <= 4096U);
1400 av_assert2(uvalpha <= 4096U);
1402 for (i = 0; i < ((dstW + 1) >> 1); i++) {
1403 int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 19;
1404 int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 19;
1405 int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha) >> 19;
1406 int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha) >> 19;
1408 const void *r = c->table_rV[V + YUVRGB_TABLE_HEADROOM],
1409 *g = (c->table_gU[U + YUVRGB_TABLE_HEADROOM] + c->table_gV[V + YUVRGB_TABLE_HEADROOM]),
1410 *b = c->table_bU[U + YUVRGB_TABLE_HEADROOM];
1413 A1 = (abuf0[i * 2 ] * yalpha1 + abuf1[i * 2 ] * yalpha) >> 19;
1414 A2 = (abuf0[i * 2 + 1] * yalpha1 + abuf1[i * 2 + 1] * yalpha) >> 19;
1415 A1 = av_clip_uint8(A1);
1416 A2 = av_clip_uint8(A2);
1419 yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
1420 r, g, b, y, target, hasAlpha);
1424 static av_always_inline void
1425 yuv2rgb_1_c_template(SwsContext *c, const int16_t *buf0,
1426 const int16_t *ubuf[2], const int16_t *vbuf[2],
1427 const int16_t *abuf0, uint8_t *dest, int dstW,
1428 int uvalpha, int y, enum AVPixelFormat target,
1431 const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
1434 if (uvalpha < 2048) {
1435 for (i = 0; i < ((dstW + 1) >> 1); i++) {
1436 int Y1 = (buf0[i * 2 ] + 64) >> 7;
1437 int Y2 = (buf0[i * 2 + 1] + 64) >> 7;
1438 int U = (ubuf0[i] + 64) >> 7;
1439 int V = (vbuf0[i] + 64) >> 7;
1441 const void *r = c->table_rV[V + YUVRGB_TABLE_HEADROOM],
1442 *g = (c->table_gU[U + YUVRGB_TABLE_HEADROOM] + c->table_gV[V + YUVRGB_TABLE_HEADROOM]),
1443 *b = c->table_bU[U + YUVRGB_TABLE_HEADROOM];
1446 A1 = abuf0[i * 2 ] * 255 + 16384 >> 15;
1447 A2 = abuf0[i * 2 + 1] * 255 + 16384 >> 15;
1448 A1 = av_clip_uint8(A1);
1449 A2 = av_clip_uint8(A2);
1452 yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
1453 r, g, b, y, target, hasAlpha);
1456 const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
1457 for (i = 0; i < ((dstW + 1) >> 1); i++) {
1458 int Y1 = (buf0[i * 2 ] + 64) >> 7;
1459 int Y2 = (buf0[i * 2 + 1] + 64) >> 7;
1460 int U = (ubuf0[i] + ubuf1[i] + 128) >> 8;
1461 int V = (vbuf0[i] + vbuf1[i] + 128) >> 8;
1463 const void *r = c->table_rV[V + YUVRGB_TABLE_HEADROOM],
1464 *g = (c->table_gU[U + YUVRGB_TABLE_HEADROOM] + c->table_gV[V + YUVRGB_TABLE_HEADROOM]),
1465 *b = c->table_bU[U + YUVRGB_TABLE_HEADROOM];
1468 A1 = (abuf0[i * 2 ] + 64) >> 7;
1469 A2 = (abuf0[i * 2 + 1] + 64) >> 7;
1470 A1 = av_clip_uint8(A1);
1471 A2 = av_clip_uint8(A2);
1474 yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
1475 r, g, b, y, target, hasAlpha);
1480 #define YUV2RGBWRAPPERX(name, base, ext, fmt, hasAlpha) \
1481 static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \
1482 const int16_t **lumSrc, int lumFilterSize, \
1483 const int16_t *chrFilter, const int16_t **chrUSrc, \
1484 const int16_t **chrVSrc, int chrFilterSize, \
1485 const int16_t **alpSrc, uint8_t *dest, int dstW, \
1488 name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \
1489 chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
1490 alpSrc, dest, dstW, y, fmt, hasAlpha); \
1493 #define YUV2RGBWRAPPERX2(name, base, ext, fmt, hasAlpha) \
1494 YUV2RGBWRAPPERX(name, base, ext, fmt, hasAlpha) \
1495 static void name ## ext ## _2_c(SwsContext *c, const int16_t *buf[2], \
1496 const int16_t *ubuf[2], const int16_t *vbuf[2], \
1497 const int16_t *abuf[2], uint8_t *dest, int dstW, \
1498 int yalpha, int uvalpha, int y) \
1500 name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \
1501 dest, dstW, yalpha, uvalpha, y, fmt, hasAlpha); \
1504 #define YUV2RGBWRAPPER(name, base, ext, fmt, hasAlpha) \
1505 YUV2RGBWRAPPERX2(name, base, ext, fmt, hasAlpha) \
1506 static void name ## ext ## _1_c(SwsContext *c, const int16_t *buf0, \
1507 const int16_t *ubuf[2], const int16_t *vbuf[2], \
1508 const int16_t *abuf0, uint8_t *dest, int dstW, \
1509 int uvalpha, int y) \
1511 name ## base ## _1_c_template(c, buf0, ubuf, vbuf, abuf0, dest, \
1512 dstW, uvalpha, y, fmt, hasAlpha); \
1516 YUV2RGBWRAPPER(yuv2rgb,, 32_1, AV_PIX_FMT_RGB32_1, CONFIG_SWSCALE_ALPHA && c->needAlpha)
1517 YUV2RGBWRAPPER(yuv2rgb,, 32, AV_PIX_FMT_RGB32, CONFIG_SWSCALE_ALPHA && c->needAlpha)
1519 #if CONFIG_SWSCALE_ALPHA
1520 YUV2RGBWRAPPER(yuv2rgb,, a32_1, AV_PIX_FMT_RGB32_1, 1)
1521 YUV2RGBWRAPPER(yuv2rgb,, a32, AV_PIX_FMT_RGB32, 1)
1523 YUV2RGBWRAPPER(yuv2rgb,, x32_1, AV_PIX_FMT_RGB32_1, 0)
1524 YUV2RGBWRAPPER(yuv2rgb,, x32, AV_PIX_FMT_RGB32, 0)
1526 YUV2RGBWRAPPER(yuv2, rgb, rgb24, AV_PIX_FMT_RGB24, 0)
1527 YUV2RGBWRAPPER(yuv2, rgb, bgr24, AV_PIX_FMT_BGR24, 0)
1528 YUV2RGBWRAPPER(yuv2rgb,, 16, AV_PIX_FMT_RGB565, 0)
1529 YUV2RGBWRAPPER(yuv2rgb,, 15, AV_PIX_FMT_RGB555, 0)
1530 YUV2RGBWRAPPER(yuv2rgb,, 12, AV_PIX_FMT_RGB444, 0)
1531 YUV2RGBWRAPPER(yuv2rgb,, 8, AV_PIX_FMT_RGB8, 0)
1532 YUV2RGBWRAPPER(yuv2rgb,, 4, AV_PIX_FMT_RGB4, 0)
1533 YUV2RGBWRAPPER(yuv2rgb,, 4b, AV_PIX_FMT_RGB4_BYTE, 0)
1535 static av_always_inline void yuv2rgb_write_full(SwsContext *c,
1536 uint8_t *dest, int i, int Y, int A, int U, int V,
1537 int y, enum AVPixelFormat target, int hasAlpha, int err[4])
1540 int isrgb8 = target == AV_PIX_FMT_BGR8 || target == AV_PIX_FMT_RGB8;
1542 Y -= c->yuv2rgb_y_offset;
1543 Y *= c->yuv2rgb_y_coeff;
1545 R = Y + V*c->yuv2rgb_v2r_coeff;
1546 G = Y + V*c->yuv2rgb_v2g_coeff + U*c->yuv2rgb_u2g_coeff;
1547 B = Y + U*c->yuv2rgb_u2b_coeff;
1548 if ((R | G | B) & 0xC0000000) {
1549 R = av_clip_uintp2(R, 30);
1550 G = av_clip_uintp2(G, 30);
1551 B = av_clip_uintp2(B, 30);
1555 case AV_PIX_FMT_ARGB:
1556 dest[0] = hasAlpha ? A : 255;
1561 case AV_PIX_FMT_RGB24:
1566 case AV_PIX_FMT_RGBA:
1570 dest[3] = hasAlpha ? A : 255;
1572 case AV_PIX_FMT_ABGR:
1573 dest[0] = hasAlpha ? A : 255;
1578 case AV_PIX_FMT_BGR24:
1583 case AV_PIX_FMT_BGRA:
1587 dest[3] = hasAlpha ? A : 255;
1589 case AV_PIX_FMT_BGR4_BYTE:
1590 case AV_PIX_FMT_RGB4_BYTE:
1591 case AV_PIX_FMT_BGR8:
1592 case AV_PIX_FMT_RGB8:
1596 switch (c->dither) {
1598 case SWS_DITHER_AUTO:
1603 R += (7*err[0] + 1*c->dither_error[0][i] + 5*c->dither_error[0][i+1] + 3*c->dither_error[0][i+2])>>4;
1604 G += (7*err[1] + 1*c->dither_error[1][i] + 5*c->dither_error[1][i+1] + 3*c->dither_error[1][i+2])>>4;
1605 B += (7*err[2] + 1*c->dither_error[2][i] + 5*c->dither_error[2][i+1] + 3*c->dither_error[2][i+2])>>4;
1606 c->dither_error[0][i] = err[0];
1607 c->dither_error[1][i] = err[1];
1608 c->dither_error[2][i] = err[2];
1609 r = R >> (isrgb8 ? 5 : 7);
1610 g = G >> (isrgb8 ? 5 : 6);
1611 b = B >> (isrgb8 ? 6 : 7);
1612 r = av_clip(r, 0, isrgb8 ? 7 : 1);
1613 g = av_clip(g, 0, isrgb8 ? 7 : 3);
1614 b = av_clip(b, 0, isrgb8 ? 3 : 1);
1615 err[0] = R - r*(isrgb8 ? 36 : 255);
1616 err[1] = G - g*(isrgb8 ? 36 : 85);
1617 err[2] = B - b*(isrgb8 ? 85 : 255);
1619 case SWS_DITHER_A_DITHER:
1621 /* see http://pippin.gimp.org/a_dither/ for details/origin */
1622 #define A_DITHER(u,v) (((((u)+((v)*236))*119)&0xff))
1623 r = (((R >> 19) + A_DITHER(i,y) -96)>>8);
1624 g = (((G >> 19) + A_DITHER(i + 17,y) - 96)>>8);
1625 b = (((B >> 20) + A_DITHER(i + 17*2,y) -96)>>8);
1626 r = av_clip_uintp2(r, 3);
1627 g = av_clip_uintp2(g, 3);
1628 b = av_clip_uintp2(b, 2);
1630 r = (((R >> 21) + A_DITHER(i,y)-256)>>8);
1631 g = (((G >> 19) + A_DITHER(i + 17,y)-256)>>8);
1632 b = (((B >> 21) + A_DITHER(i + 17*2,y)-256)>>8);
1633 r = av_clip_uintp2(r, 1);
1634 g = av_clip_uintp2(g, 2);
1635 b = av_clip_uintp2(b, 1);
1638 case SWS_DITHER_X_DITHER:
1640 /* see http://pippin.gimp.org/a_dither/ for details/origin */
1641 #define X_DITHER(u,v) (((((u)^((v)*237))*181)&0x1ff)/2)
1642 r = (((R >> 19) + X_DITHER(i,y) - 96)>>8);
1643 g = (((G >> 19) + X_DITHER(i + 17,y) - 96)>>8);
1644 b = (((B >> 20) + X_DITHER(i + 17*2,y) - 96)>>8);
1645 r = av_clip_uintp2(r, 3);
1646 g = av_clip_uintp2(g, 3);
1647 b = av_clip_uintp2(b, 2);
1649 r = (((R >> 21) + X_DITHER(i,y)-256)>>8);
1650 g = (((G >> 19) + X_DITHER(i + 17,y)-256)>>8);
1651 b = (((B >> 21) + X_DITHER(i + 17*2,y)-256)>>8);
1652 r = av_clip_uintp2(r, 1);
1653 g = av_clip_uintp2(g, 2);
1654 b = av_clip_uintp2(b, 1);
1660 if(target == AV_PIX_FMT_BGR4_BYTE) {
1661 dest[0] = r + 2*g + 8*b;
1662 } else if(target == AV_PIX_FMT_RGB4_BYTE) {
1663 dest[0] = b + 2*g + 8*r;
1664 } else if(target == AV_PIX_FMT_BGR8) {
1665 dest[0] = r + 8*g + 64*b;
1666 } else if(target == AV_PIX_FMT_RGB8) {
1667 dest[0] = b + 4*g + 32*r;
1674 static av_always_inline void
1675 yuv2rgb_full_X_c_template(SwsContext *c, const int16_t *lumFilter,
1676 const int16_t **lumSrc, int lumFilterSize,
1677 const int16_t *chrFilter, const int16_t **chrUSrc,
1678 const int16_t **chrVSrc, int chrFilterSize,
1679 const int16_t **alpSrc, uint8_t *dest,
1680 int dstW, int y, enum AVPixelFormat target, int hasAlpha)
1683 int step = (target == AV_PIX_FMT_RGB24 || target == AV_PIX_FMT_BGR24) ? 3 : 4;
1685 int A = 0; //init to silence warning
1687 if( target == AV_PIX_FMT_BGR4_BYTE || target == AV_PIX_FMT_RGB4_BYTE
1688 || target == AV_PIX_FMT_BGR8 || target == AV_PIX_FMT_RGB8)
1691 for (i = 0; i < dstW; i++) {
1694 int U = (1<<9)-(128 << 19);
1695 int V = (1<<9)-(128 << 19);
1697 for (j = 0; j < lumFilterSize; j++) {
1698 Y += lumSrc[j][i] * lumFilter[j];
1700 for (j = 0; j < chrFilterSize; j++) {
1701 U += chrUSrc[j][i] * chrFilter[j];
1702 V += chrVSrc[j][i] * chrFilter[j];
1709 for (j = 0; j < lumFilterSize; j++) {
1710 A += alpSrc[j][i] * lumFilter[j];
1714 A = av_clip_uint8(A);
1716 yuv2rgb_write_full(c, dest, i, Y, A, U, V, y, target, hasAlpha, err);
1719 c->dither_error[0][i] = err[0];
1720 c->dither_error[1][i] = err[1];
1721 c->dither_error[2][i] = err[2];
1724 static av_always_inline void
1725 yuv2rgb_full_2_c_template(SwsContext *c, const int16_t *buf[2],
1726 const int16_t *ubuf[2], const int16_t *vbuf[2],
1727 const int16_t *abuf[2], uint8_t *dest, int dstW,
1728 int yalpha, int uvalpha, int y,
1729 enum AVPixelFormat target, int hasAlpha)
1731 const int16_t *buf0 = buf[0], *buf1 = buf[1],
1732 *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
1733 *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
1734 *abuf0 = hasAlpha ? abuf[0] : NULL,
1735 *abuf1 = hasAlpha ? abuf[1] : NULL;
1736 int yalpha1 = 4096 - yalpha;
1737 int uvalpha1 = 4096 - uvalpha;
1739 int step = (target == AV_PIX_FMT_RGB24 || target == AV_PIX_FMT_BGR24) ? 3 : 4;
1741 int A = 0; // init to silcene warning
1743 av_assert2(yalpha <= 4096U);
1744 av_assert2(uvalpha <= 4096U);
1746 if( target == AV_PIX_FMT_BGR4_BYTE || target == AV_PIX_FMT_RGB4_BYTE
1747 || target == AV_PIX_FMT_BGR8 || target == AV_PIX_FMT_RGB8)
1750 for (i = 0; i < dstW; i++) {
1751 int Y = ( buf0[i] * yalpha1 + buf1[i] * yalpha ) >> 10; //FIXME rounding
1752 int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha-(128 << 19)) >> 10;
1753 int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha-(128 << 19)) >> 10;
1756 A = (abuf0[i] * yalpha1 + abuf1[i] * yalpha + (1<<18)) >> 19;
1758 A = av_clip_uint8(A);
1761 yuv2rgb_write_full(c, dest, i, Y, A, U, V, y, target, hasAlpha, err);
1764 c->dither_error[0][i] = err[0];
1765 c->dither_error[1][i] = err[1];
1766 c->dither_error[2][i] = err[2];
1769 static av_always_inline void
1770 yuv2rgb_full_1_c_template(SwsContext *c, const int16_t *buf0,
1771 const int16_t *ubuf[2], const int16_t *vbuf[2],
1772 const int16_t *abuf0, uint8_t *dest, int dstW,
1773 int uvalpha, int y, enum AVPixelFormat target,
1776 const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
1778 int step = (target == AV_PIX_FMT_RGB24 || target == AV_PIX_FMT_BGR24) ? 3 : 4;
1781 if( target == AV_PIX_FMT_BGR4_BYTE || target == AV_PIX_FMT_RGB4_BYTE
1782 || target == AV_PIX_FMT_BGR8 || target == AV_PIX_FMT_RGB8)
1785 if (uvalpha < 2048) {
1786 int A = 0; //init to silence warning
1787 for (i = 0; i < dstW; i++) {
1788 int Y = buf0[i] << 2;
1789 int U = (ubuf0[i] - (128<<7)) * 4;
1790 int V = (vbuf0[i] - (128<<7)) * 4;
1793 A = (abuf0[i] + 64) >> 7;
1795 A = av_clip_uint8(A);
1798 yuv2rgb_write_full(c, dest, i, Y, A, U, V, y, target, hasAlpha, err);
1802 const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
1803 int A = 0; //init to silence warning
1804 for (i = 0; i < dstW; i++) {
1805 int Y = buf0[i] << 2;
1806 int U = (ubuf0[i] + ubuf1[i] - (128<<8)) << 1;
1807 int V = (vbuf0[i] + vbuf1[i] - (128<<8)) << 1;
1810 A = (abuf0[i] + 64) >> 7;
1812 A = av_clip_uint8(A);
1815 yuv2rgb_write_full(c, dest, i, Y, A, U, V, y, target, hasAlpha, err);
1820 c->dither_error[0][i] = err[0];
1821 c->dither_error[1][i] = err[1];
1822 c->dither_error[2][i] = err[2];
1826 YUV2RGBWRAPPER(yuv2, rgb_full, bgra32_full, AV_PIX_FMT_BGRA, CONFIG_SWSCALE_ALPHA && c->needAlpha)
1827 YUV2RGBWRAPPER(yuv2, rgb_full, abgr32_full, AV_PIX_FMT_ABGR, CONFIG_SWSCALE_ALPHA && c->needAlpha)
1828 YUV2RGBWRAPPER(yuv2, rgb_full, rgba32_full, AV_PIX_FMT_RGBA, CONFIG_SWSCALE_ALPHA && c->needAlpha)
1829 YUV2RGBWRAPPER(yuv2, rgb_full, argb32_full, AV_PIX_FMT_ARGB, CONFIG_SWSCALE_ALPHA && c->needAlpha)
1831 #if CONFIG_SWSCALE_ALPHA
1832 YUV2RGBWRAPPER(yuv2, rgb_full, bgra32_full, AV_PIX_FMT_BGRA, 1)
1833 YUV2RGBWRAPPER(yuv2, rgb_full, abgr32_full, AV_PIX_FMT_ABGR, 1)
1834 YUV2RGBWRAPPER(yuv2, rgb_full, rgba32_full, AV_PIX_FMT_RGBA, 1)
1835 YUV2RGBWRAPPER(yuv2, rgb_full, argb32_full, AV_PIX_FMT_ARGB, 1)
1837 YUV2RGBWRAPPER(yuv2, rgb_full, bgrx32_full, AV_PIX_FMT_BGRA, 0)
1838 YUV2RGBWRAPPER(yuv2, rgb_full, xbgr32_full, AV_PIX_FMT_ABGR, 0)
1839 YUV2RGBWRAPPER(yuv2, rgb_full, rgbx32_full, AV_PIX_FMT_RGBA, 0)
1840 YUV2RGBWRAPPER(yuv2, rgb_full, xrgb32_full, AV_PIX_FMT_ARGB, 0)
1842 YUV2RGBWRAPPER(yuv2, rgb_full, bgr24_full, AV_PIX_FMT_BGR24, 0)
1843 YUV2RGBWRAPPER(yuv2, rgb_full, rgb24_full, AV_PIX_FMT_RGB24, 0)
1845 YUV2RGBWRAPPER(yuv2, rgb_full, bgr4_byte_full, AV_PIX_FMT_BGR4_BYTE, 0)
1846 YUV2RGBWRAPPER(yuv2, rgb_full, rgb4_byte_full, AV_PIX_FMT_RGB4_BYTE, 0)
1847 YUV2RGBWRAPPER(yuv2, rgb_full, bgr8_full, AV_PIX_FMT_BGR8, 0)
1848 YUV2RGBWRAPPER(yuv2, rgb_full, rgb8_full, AV_PIX_FMT_RGB8, 0)
1851 yuv2gbrp_full_X_c(SwsContext *c, const int16_t *lumFilter,
1852 const int16_t **lumSrc, int lumFilterSize,
1853 const int16_t *chrFilter, const int16_t **chrUSrc,
1854 const int16_t **chrVSrc, int chrFilterSize,
1855 const int16_t **alpSrc, uint8_t **dest,
1858 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(c->dstFormat);
1860 int hasAlpha = (desc->flags & AV_PIX_FMT_FLAG_ALPHA) && alpSrc;
1861 uint16_t **dest16 = (uint16_t**)dest;
1862 int SH = 22 + 8 - desc->comp[0].depth;
1863 int A = 0; // init to silence warning
1865 for (i = 0; i < dstW; i++) {
1868 int U = (1 << 9) - (128 << 19);
1869 int V = (1 << 9) - (128 << 19);
1872 for (j = 0; j < lumFilterSize; j++)
1873 Y += lumSrc[j][i] * lumFilter[j];
1875 for (j = 0; j < chrFilterSize; j++) {
1876 U += chrUSrc[j][i] * chrFilter[j];
1877 V += chrVSrc[j][i] * chrFilter[j];
1887 for (j = 0; j < lumFilterSize; j++)
1888 A += alpSrc[j][i] * lumFilter[j];
1893 A = av_clip_uint8(A);
1896 Y -= c->yuv2rgb_y_offset;
1897 Y *= c->yuv2rgb_y_coeff;
1899 R = Y + V * c->yuv2rgb_v2r_coeff;
1900 G = Y + V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1901 B = Y + U * c->yuv2rgb_u2b_coeff;
1903 if ((R | G | B) & 0xC0000000) {
1904 R = av_clip_uintp2(R, 30);
1905 G = av_clip_uintp2(G, 30);
1906 B = av_clip_uintp2(B, 30);
1910 dest16[0][i] = G >> SH;
1911 dest16[1][i] = B >> SH;
1912 dest16[2][i] = R >> SH;
1916 dest[0][i] = G >> 22;
1917 dest[1][i] = B >> 22;
1918 dest[2][i] = R >> 22;
1923 if (SH != 22 && (!isBE(c->dstFormat)) != (!HAVE_BIGENDIAN)) {
1924 for (i = 0; i < dstW; i++) {
1925 dest16[0][i] = av_bswap16(dest16[0][i]);
1926 dest16[1][i] = av_bswap16(dest16[1][i]);
1927 dest16[2][i] = av_bswap16(dest16[2][i]);
1929 dest16[3][i] = av_bswap16(dest16[3][i]);
1935 yuv2ya8_1_c(SwsContext *c, const int16_t *buf0,
1936 const int16_t *ubuf[2], const int16_t *vbuf[2],
1937 const int16_t *abuf0, uint8_t *dest, int dstW,
1940 int hasAlpha = !!abuf0;
1943 for (i = 0; i < dstW; i++) {
1944 int Y = (buf0[i] + 64) >> 7;
1947 Y = av_clip_uint8(Y);
1950 A = (abuf0[i] + 64) >> 7;
1952 A = av_clip_uint8(A);
1956 dest[i * 2 + 1] = hasAlpha ? A : 255;
1961 yuv2ya8_2_c(SwsContext *c, const int16_t *buf[2],
1962 const int16_t *ubuf[2], const int16_t *vbuf[2],
1963 const int16_t *abuf[2], uint8_t *dest, int dstW,
1964 int yalpha, int uvalpha, int y)
1966 int hasAlpha = abuf && abuf[0] && abuf[1];
1967 const int16_t *buf0 = buf[0], *buf1 = buf[1],
1968 *abuf0 = hasAlpha ? abuf[0] : NULL,
1969 *abuf1 = hasAlpha ? abuf[1] : NULL;
1970 int yalpha1 = 4096 - yalpha;
1973 av_assert2(yalpha <= 4096U);
1975 for (i = 0; i < dstW; i++) {
1976 int Y = (buf0[i] * yalpha1 + buf1[i] * yalpha) >> 19;
1979 Y = av_clip_uint8(Y);
1982 A = (abuf0[i] * yalpha1 + abuf1[i] * yalpha) >> 19;
1983 A = av_clip_uint8(A);
1987 dest[i * 2 + 1] = hasAlpha ? A : 255;
1992 yuv2ya8_X_c(SwsContext *c, const int16_t *lumFilter,
1993 const int16_t **lumSrc, int lumFilterSize,
1994 const int16_t *chrFilter, const int16_t **chrUSrc,
1995 const int16_t **chrVSrc, int chrFilterSize,
1996 const int16_t **alpSrc, uint8_t *dest, int dstW, int y)
1998 int hasAlpha = !!alpSrc;
2001 for (i = 0; i < dstW; i++) {
2003 int Y = 1 << 18, A = 1 << 18;
2005 for (j = 0; j < lumFilterSize; j++)
2006 Y += lumSrc[j][i] * lumFilter[j];
2010 Y = av_clip_uint8(Y);
2013 for (j = 0; j < lumFilterSize; j++)
2014 A += alpSrc[j][i] * lumFilter[j];
2019 A = av_clip_uint8(A);
2023 dest[2 * i + 1] = hasAlpha ? A : 255;
2028 yuv2ayuv64le_X_c(SwsContext *c, const int16_t *lumFilter,
2029 const int16_t **_lumSrc, int lumFilterSize,
2030 const int16_t *chrFilter, const int16_t **_chrUSrc,
2031 const int16_t **_chrVSrc, int chrFilterSize,
2032 const int16_t **_alpSrc, uint8_t *dest, int dstW, int y)
2034 const int32_t **lumSrc = (const int32_t **) _lumSrc,
2035 **chrUSrc = (const int32_t **) _chrUSrc,
2036 **chrVSrc = (const int32_t **) _chrVSrc,
2037 **alpSrc = (const int32_t **) _alpSrc;
2038 int hasAlpha = !!alpSrc;
2041 for (i = 0; i < dstW; i++) {
2042 int Y = 1 << 14, U = 1 << 14;
2043 int V = 1 << 14, A = 1 << 14;
2051 for (j = 0; j < lumFilterSize; j++)
2052 Y += lumSrc[j][i] * (unsigned)lumFilter[j];
2054 for (j = 0; j < chrFilterSize; j++)
2055 U += chrUSrc[j][i] * (unsigned)chrFilter[j];
2057 for (j = 0; j < chrFilterSize; j++)
2058 V += chrVSrc[j][i] * (unsigned)chrFilter[j];
2061 for (j = 0; j < lumFilterSize; j++)
2062 A += alpSrc[j][i] * (unsigned)lumFilter[j];
2064 Y = 0x8000 + av_clip_int16(Y >> 15);
2065 U = 0x8000 + av_clip_int16(U >> 15);
2066 V = 0x8000 + av_clip_int16(V >> 15);
2067 A = 0x8000 + av_clip_int16(A >> 15);
2069 AV_WL16(dest + 8 * i, hasAlpha ? A : 65535);
2070 AV_WL16(dest + 8 * i + 2, Y);
2071 AV_WL16(dest + 8 * i + 4, U);
2072 AV_WL16(dest + 8 * i + 6, V);
2076 av_cold void ff_sws_init_output_funcs(SwsContext *c,
2077 yuv2planar1_fn *yuv2plane1,
2078 yuv2planarX_fn *yuv2planeX,
2079 yuv2interleavedX_fn *yuv2nv12cX,
2080 yuv2packed1_fn *yuv2packed1,
2081 yuv2packed2_fn *yuv2packed2,
2082 yuv2packedX_fn *yuv2packedX,
2083 yuv2anyX_fn *yuv2anyX)
2085 enum AVPixelFormat dstFormat = c->dstFormat;
2086 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(dstFormat);
2088 if (is16BPS(dstFormat)) {
2089 *yuv2planeX = isBE(dstFormat) ? yuv2planeX_16BE_c : yuv2planeX_16LE_c;
2090 *yuv2plane1 = isBE(dstFormat) ? yuv2plane1_16BE_c : yuv2plane1_16LE_c;
2091 } else if (is9_OR_10BPS(dstFormat)) {
2092 if (desc->comp[0].depth == 9) {
2093 *yuv2planeX = isBE(dstFormat) ? yuv2planeX_9BE_c : yuv2planeX_9LE_c;
2094 *yuv2plane1 = isBE(dstFormat) ? yuv2plane1_9BE_c : yuv2plane1_9LE_c;
2095 } else if (desc->comp[0].depth == 10) {
2096 *yuv2planeX = isBE(dstFormat) ? yuv2planeX_10BE_c : yuv2planeX_10LE_c;
2097 *yuv2plane1 = isBE(dstFormat) ? yuv2plane1_10BE_c : yuv2plane1_10LE_c;
2098 } else if (desc->comp[0].depth == 12) {
2099 *yuv2planeX = isBE(dstFormat) ? yuv2planeX_12BE_c : yuv2planeX_12LE_c;
2100 *yuv2plane1 = isBE(dstFormat) ? yuv2plane1_12BE_c : yuv2plane1_12LE_c;
2101 } else if (desc->comp[0].depth == 14) {
2102 *yuv2planeX = isBE(dstFormat) ? yuv2planeX_14BE_c : yuv2planeX_14LE_c;
2103 *yuv2plane1 = isBE(dstFormat) ? yuv2plane1_14BE_c : yuv2plane1_14LE_c;
2107 *yuv2plane1 = yuv2plane1_8_c;
2108 *yuv2planeX = yuv2planeX_8_c;
2109 if (dstFormat == AV_PIX_FMT_NV12 || dstFormat == AV_PIX_FMT_NV21)
2110 *yuv2nv12cX = yuv2nv12cX_c;
2113 if(c->flags & SWS_FULL_CHR_H_INT) {
2114 switch (dstFormat) {
2115 case AV_PIX_FMT_RGBA:
2117 *yuv2packedX = yuv2rgba32_full_X_c;
2118 *yuv2packed2 = yuv2rgba32_full_2_c;
2119 *yuv2packed1 = yuv2rgba32_full_1_c;
2121 #if CONFIG_SWSCALE_ALPHA
2123 *yuv2packedX = yuv2rgba32_full_X_c;
2124 *yuv2packed2 = yuv2rgba32_full_2_c;
2125 *yuv2packed1 = yuv2rgba32_full_1_c;
2127 #endif /* CONFIG_SWSCALE_ALPHA */
2129 *yuv2packedX = yuv2rgbx32_full_X_c;
2130 *yuv2packed2 = yuv2rgbx32_full_2_c;
2131 *yuv2packed1 = yuv2rgbx32_full_1_c;
2133 #endif /* !CONFIG_SMALL */
2135 case AV_PIX_FMT_ARGB:
2137 *yuv2packedX = yuv2argb32_full_X_c;
2138 *yuv2packed2 = yuv2argb32_full_2_c;
2139 *yuv2packed1 = yuv2argb32_full_1_c;
2141 #if CONFIG_SWSCALE_ALPHA
2143 *yuv2packedX = yuv2argb32_full_X_c;
2144 *yuv2packed2 = yuv2argb32_full_2_c;
2145 *yuv2packed1 = yuv2argb32_full_1_c;
2147 #endif /* CONFIG_SWSCALE_ALPHA */
2149 *yuv2packedX = yuv2xrgb32_full_X_c;
2150 *yuv2packed2 = yuv2xrgb32_full_2_c;
2151 *yuv2packed1 = yuv2xrgb32_full_1_c;
2153 #endif /* !CONFIG_SMALL */
2155 case AV_PIX_FMT_BGRA:
2157 *yuv2packedX = yuv2bgra32_full_X_c;
2158 *yuv2packed2 = yuv2bgra32_full_2_c;
2159 *yuv2packed1 = yuv2bgra32_full_1_c;
2161 #if CONFIG_SWSCALE_ALPHA
2163 *yuv2packedX = yuv2bgra32_full_X_c;
2164 *yuv2packed2 = yuv2bgra32_full_2_c;
2165 *yuv2packed1 = yuv2bgra32_full_1_c;
2167 #endif /* CONFIG_SWSCALE_ALPHA */
2169 *yuv2packedX = yuv2bgrx32_full_X_c;
2170 *yuv2packed2 = yuv2bgrx32_full_2_c;
2171 *yuv2packed1 = yuv2bgrx32_full_1_c;
2173 #endif /* !CONFIG_SMALL */
2175 case AV_PIX_FMT_ABGR:
2177 *yuv2packedX = yuv2abgr32_full_X_c;
2178 *yuv2packed2 = yuv2abgr32_full_2_c;
2179 *yuv2packed1 = yuv2abgr32_full_1_c;
2181 #if CONFIG_SWSCALE_ALPHA
2183 *yuv2packedX = yuv2abgr32_full_X_c;
2184 *yuv2packed2 = yuv2abgr32_full_2_c;
2185 *yuv2packed1 = yuv2abgr32_full_1_c;
2187 #endif /* CONFIG_SWSCALE_ALPHA */
2189 *yuv2packedX = yuv2xbgr32_full_X_c;
2190 *yuv2packed2 = yuv2xbgr32_full_2_c;
2191 *yuv2packed1 = yuv2xbgr32_full_1_c;
2193 #endif /* !CONFIG_SMALL */
2195 case AV_PIX_FMT_RGBA64LE:
2196 #if CONFIG_SWSCALE_ALPHA
2198 *yuv2packedX = yuv2rgba64le_full_X_c;
2199 *yuv2packed2 = yuv2rgba64le_full_2_c;
2200 *yuv2packed1 = yuv2rgba64le_full_1_c;
2202 #endif /* CONFIG_SWSCALE_ALPHA */
2204 *yuv2packedX = yuv2rgbx64le_full_X_c;
2205 *yuv2packed2 = yuv2rgbx64le_full_2_c;
2206 *yuv2packed1 = yuv2rgbx64le_full_1_c;
2209 case AV_PIX_FMT_RGBA64BE:
2210 #if CONFIG_SWSCALE_ALPHA
2212 *yuv2packedX = yuv2rgba64be_full_X_c;
2213 *yuv2packed2 = yuv2rgba64be_full_2_c;
2214 *yuv2packed1 = yuv2rgba64be_full_1_c;
2216 #endif /* CONFIG_SWSCALE_ALPHA */
2218 *yuv2packedX = yuv2rgbx64be_full_X_c;
2219 *yuv2packed2 = yuv2rgbx64be_full_2_c;
2220 *yuv2packed1 = yuv2rgbx64be_full_1_c;
2223 case AV_PIX_FMT_BGRA64LE:
2224 #if CONFIG_SWSCALE_ALPHA
2226 *yuv2packedX = yuv2bgra64le_full_X_c;
2227 *yuv2packed2 = yuv2bgra64le_full_2_c;
2228 *yuv2packed1 = yuv2bgra64le_full_1_c;
2230 #endif /* CONFIG_SWSCALE_ALPHA */
2232 *yuv2packedX = yuv2bgrx64le_full_X_c;
2233 *yuv2packed2 = yuv2bgrx64le_full_2_c;
2234 *yuv2packed1 = yuv2bgrx64le_full_1_c;
2237 case AV_PIX_FMT_BGRA64BE:
2238 #if CONFIG_SWSCALE_ALPHA
2240 *yuv2packedX = yuv2bgra64be_full_X_c;
2241 *yuv2packed2 = yuv2bgra64be_full_2_c;
2242 *yuv2packed1 = yuv2bgra64be_full_1_c;
2244 #endif /* CONFIG_SWSCALE_ALPHA */
2246 *yuv2packedX = yuv2bgrx64be_full_X_c;
2247 *yuv2packed2 = yuv2bgrx64be_full_2_c;
2248 *yuv2packed1 = yuv2bgrx64be_full_1_c;
2252 case AV_PIX_FMT_RGB24:
2253 *yuv2packedX = yuv2rgb24_full_X_c;
2254 *yuv2packed2 = yuv2rgb24_full_2_c;
2255 *yuv2packed1 = yuv2rgb24_full_1_c;
2257 case AV_PIX_FMT_BGR24:
2258 *yuv2packedX = yuv2bgr24_full_X_c;
2259 *yuv2packed2 = yuv2bgr24_full_2_c;
2260 *yuv2packed1 = yuv2bgr24_full_1_c;
2262 case AV_PIX_FMT_RGB48LE:
2263 *yuv2packedX = yuv2rgb48le_full_X_c;
2264 *yuv2packed2 = yuv2rgb48le_full_2_c;
2265 *yuv2packed1 = yuv2rgb48le_full_1_c;
2267 case AV_PIX_FMT_BGR48LE:
2268 *yuv2packedX = yuv2bgr48le_full_X_c;
2269 *yuv2packed2 = yuv2bgr48le_full_2_c;
2270 *yuv2packed1 = yuv2bgr48le_full_1_c;
2272 case AV_PIX_FMT_RGB48BE:
2273 *yuv2packedX = yuv2rgb48be_full_X_c;
2274 *yuv2packed2 = yuv2rgb48be_full_2_c;
2275 *yuv2packed1 = yuv2rgb48be_full_1_c;
2277 case AV_PIX_FMT_BGR48BE:
2278 *yuv2packedX = yuv2bgr48be_full_X_c;
2279 *yuv2packed2 = yuv2bgr48be_full_2_c;
2280 *yuv2packed1 = yuv2bgr48be_full_1_c;
2282 case AV_PIX_FMT_BGR4_BYTE:
2283 *yuv2packedX = yuv2bgr4_byte_full_X_c;
2284 *yuv2packed2 = yuv2bgr4_byte_full_2_c;
2285 *yuv2packed1 = yuv2bgr4_byte_full_1_c;
2287 case AV_PIX_FMT_RGB4_BYTE:
2288 *yuv2packedX = yuv2rgb4_byte_full_X_c;
2289 *yuv2packed2 = yuv2rgb4_byte_full_2_c;
2290 *yuv2packed1 = yuv2rgb4_byte_full_1_c;
2292 case AV_PIX_FMT_BGR8:
2293 *yuv2packedX = yuv2bgr8_full_X_c;
2294 *yuv2packed2 = yuv2bgr8_full_2_c;
2295 *yuv2packed1 = yuv2bgr8_full_1_c;
2297 case AV_PIX_FMT_RGB8:
2298 *yuv2packedX = yuv2rgb8_full_X_c;
2299 *yuv2packed2 = yuv2rgb8_full_2_c;
2300 *yuv2packed1 = yuv2rgb8_full_1_c;
2302 case AV_PIX_FMT_GBRP:
2303 case AV_PIX_FMT_GBRP9BE:
2304 case AV_PIX_FMT_GBRP9LE:
2305 case AV_PIX_FMT_GBRP10BE:
2306 case AV_PIX_FMT_GBRP10LE:
2307 case AV_PIX_FMT_GBRP12BE:
2308 case AV_PIX_FMT_GBRP12LE:
2309 case AV_PIX_FMT_GBRP14BE:
2310 case AV_PIX_FMT_GBRP14LE:
2311 case AV_PIX_FMT_GBRP16BE:
2312 case AV_PIX_FMT_GBRP16LE:
2313 case AV_PIX_FMT_GBRAP:
2314 *yuv2anyX = yuv2gbrp_full_X_c;
2317 if (!*yuv2packedX && !*yuv2anyX)
2321 switch (dstFormat) {
2322 case AV_PIX_FMT_RGBA64LE:
2323 #if CONFIG_SWSCALE_ALPHA
2325 *yuv2packed1 = yuv2rgba64le_1_c;
2326 *yuv2packed2 = yuv2rgba64le_2_c;
2327 *yuv2packedX = yuv2rgba64le_X_c;
2329 #endif /* CONFIG_SWSCALE_ALPHA */
2331 *yuv2packed1 = yuv2rgbx64le_1_c;
2332 *yuv2packed2 = yuv2rgbx64le_2_c;
2333 *yuv2packedX = yuv2rgbx64le_X_c;
2336 case AV_PIX_FMT_RGBA64BE:
2337 #if CONFIG_SWSCALE_ALPHA
2339 *yuv2packed1 = yuv2rgba64be_1_c;
2340 *yuv2packed2 = yuv2rgba64be_2_c;
2341 *yuv2packedX = yuv2rgba64be_X_c;
2343 #endif /* CONFIG_SWSCALE_ALPHA */
2345 *yuv2packed1 = yuv2rgbx64be_1_c;
2346 *yuv2packed2 = yuv2rgbx64be_2_c;
2347 *yuv2packedX = yuv2rgbx64be_X_c;
2350 case AV_PIX_FMT_BGRA64LE:
2351 #if CONFIG_SWSCALE_ALPHA
2353 *yuv2packed1 = yuv2bgra64le_1_c;
2354 *yuv2packed2 = yuv2bgra64le_2_c;
2355 *yuv2packedX = yuv2bgra64le_X_c;
2357 #endif /* CONFIG_SWSCALE_ALPHA */
2359 *yuv2packed1 = yuv2bgrx64le_1_c;
2360 *yuv2packed2 = yuv2bgrx64le_2_c;
2361 *yuv2packedX = yuv2bgrx64le_X_c;
2364 case AV_PIX_FMT_BGRA64BE:
2365 #if CONFIG_SWSCALE_ALPHA
2367 *yuv2packed1 = yuv2bgra64be_1_c;
2368 *yuv2packed2 = yuv2bgra64be_2_c;
2369 *yuv2packedX = yuv2bgra64be_X_c;
2371 #endif /* CONFIG_SWSCALE_ALPHA */
2373 *yuv2packed1 = yuv2bgrx64be_1_c;
2374 *yuv2packed2 = yuv2bgrx64be_2_c;
2375 *yuv2packedX = yuv2bgrx64be_X_c;
2378 case AV_PIX_FMT_RGB48LE:
2379 *yuv2packed1 = yuv2rgb48le_1_c;
2380 *yuv2packed2 = yuv2rgb48le_2_c;
2381 *yuv2packedX = yuv2rgb48le_X_c;
2383 case AV_PIX_FMT_RGB48BE:
2384 *yuv2packed1 = yuv2rgb48be_1_c;
2385 *yuv2packed2 = yuv2rgb48be_2_c;
2386 *yuv2packedX = yuv2rgb48be_X_c;
2388 case AV_PIX_FMT_BGR48LE:
2389 *yuv2packed1 = yuv2bgr48le_1_c;
2390 *yuv2packed2 = yuv2bgr48le_2_c;
2391 *yuv2packedX = yuv2bgr48le_X_c;
2393 case AV_PIX_FMT_BGR48BE:
2394 *yuv2packed1 = yuv2bgr48be_1_c;
2395 *yuv2packed2 = yuv2bgr48be_2_c;
2396 *yuv2packedX = yuv2bgr48be_X_c;
2398 case AV_PIX_FMT_RGB32:
2399 case AV_PIX_FMT_BGR32:
2401 *yuv2packed1 = yuv2rgb32_1_c;
2402 *yuv2packed2 = yuv2rgb32_2_c;
2403 *yuv2packedX = yuv2rgb32_X_c;
2405 #if CONFIG_SWSCALE_ALPHA
2407 *yuv2packed1 = yuv2rgba32_1_c;
2408 *yuv2packed2 = yuv2rgba32_2_c;
2409 *yuv2packedX = yuv2rgba32_X_c;
2411 #endif /* CONFIG_SWSCALE_ALPHA */
2413 *yuv2packed1 = yuv2rgbx32_1_c;
2414 *yuv2packed2 = yuv2rgbx32_2_c;
2415 *yuv2packedX = yuv2rgbx32_X_c;
2417 #endif /* !CONFIG_SMALL */
2419 case AV_PIX_FMT_RGB32_1:
2420 case AV_PIX_FMT_BGR32_1:
2422 *yuv2packed1 = yuv2rgb32_1_1_c;
2423 *yuv2packed2 = yuv2rgb32_1_2_c;
2424 *yuv2packedX = yuv2rgb32_1_X_c;
2426 #if CONFIG_SWSCALE_ALPHA
2428 *yuv2packed1 = yuv2rgba32_1_1_c;
2429 *yuv2packed2 = yuv2rgba32_1_2_c;
2430 *yuv2packedX = yuv2rgba32_1_X_c;
2432 #endif /* CONFIG_SWSCALE_ALPHA */
2434 *yuv2packed1 = yuv2rgbx32_1_1_c;
2435 *yuv2packed2 = yuv2rgbx32_1_2_c;
2436 *yuv2packedX = yuv2rgbx32_1_X_c;
2438 #endif /* !CONFIG_SMALL */
2440 case AV_PIX_FMT_RGB24:
2441 *yuv2packed1 = yuv2rgb24_1_c;
2442 *yuv2packed2 = yuv2rgb24_2_c;
2443 *yuv2packedX = yuv2rgb24_X_c;
2445 case AV_PIX_FMT_BGR24:
2446 *yuv2packed1 = yuv2bgr24_1_c;
2447 *yuv2packed2 = yuv2bgr24_2_c;
2448 *yuv2packedX = yuv2bgr24_X_c;
2450 case AV_PIX_FMT_RGB565LE:
2451 case AV_PIX_FMT_RGB565BE:
2452 case AV_PIX_FMT_BGR565LE:
2453 case AV_PIX_FMT_BGR565BE:
2454 *yuv2packed1 = yuv2rgb16_1_c;
2455 *yuv2packed2 = yuv2rgb16_2_c;
2456 *yuv2packedX = yuv2rgb16_X_c;
2458 case AV_PIX_FMT_RGB555LE:
2459 case AV_PIX_FMT_RGB555BE:
2460 case AV_PIX_FMT_BGR555LE:
2461 case AV_PIX_FMT_BGR555BE:
2462 *yuv2packed1 = yuv2rgb15_1_c;
2463 *yuv2packed2 = yuv2rgb15_2_c;
2464 *yuv2packedX = yuv2rgb15_X_c;
2466 case AV_PIX_FMT_RGB444LE:
2467 case AV_PIX_FMT_RGB444BE:
2468 case AV_PIX_FMT_BGR444LE:
2469 case AV_PIX_FMT_BGR444BE:
2470 *yuv2packed1 = yuv2rgb12_1_c;
2471 *yuv2packed2 = yuv2rgb12_2_c;
2472 *yuv2packedX = yuv2rgb12_X_c;
2474 case AV_PIX_FMT_RGB8:
2475 case AV_PIX_FMT_BGR8:
2476 *yuv2packed1 = yuv2rgb8_1_c;
2477 *yuv2packed2 = yuv2rgb8_2_c;
2478 *yuv2packedX = yuv2rgb8_X_c;
2480 case AV_PIX_FMT_RGB4:
2481 case AV_PIX_FMT_BGR4:
2482 *yuv2packed1 = yuv2rgb4_1_c;
2483 *yuv2packed2 = yuv2rgb4_2_c;
2484 *yuv2packedX = yuv2rgb4_X_c;
2486 case AV_PIX_FMT_RGB4_BYTE:
2487 case AV_PIX_FMT_BGR4_BYTE:
2488 *yuv2packed1 = yuv2rgb4b_1_c;
2489 *yuv2packed2 = yuv2rgb4b_2_c;
2490 *yuv2packedX = yuv2rgb4b_X_c;
2494 switch (dstFormat) {
2495 case AV_PIX_FMT_MONOWHITE:
2496 *yuv2packed1 = yuv2monowhite_1_c;
2497 *yuv2packed2 = yuv2monowhite_2_c;
2498 *yuv2packedX = yuv2monowhite_X_c;
2500 case AV_PIX_FMT_MONOBLACK:
2501 *yuv2packed1 = yuv2monoblack_1_c;
2502 *yuv2packed2 = yuv2monoblack_2_c;
2503 *yuv2packedX = yuv2monoblack_X_c;
2505 case AV_PIX_FMT_YUYV422:
2506 *yuv2packed1 = yuv2yuyv422_1_c;
2507 *yuv2packed2 = yuv2yuyv422_2_c;
2508 *yuv2packedX = yuv2yuyv422_X_c;
2510 case AV_PIX_FMT_YVYU422:
2511 *yuv2packed1 = yuv2yvyu422_1_c;
2512 *yuv2packed2 = yuv2yvyu422_2_c;
2513 *yuv2packedX = yuv2yvyu422_X_c;
2515 case AV_PIX_FMT_UYVY422:
2516 *yuv2packed1 = yuv2uyvy422_1_c;
2517 *yuv2packed2 = yuv2uyvy422_2_c;
2518 *yuv2packedX = yuv2uyvy422_X_c;
2520 case AV_PIX_FMT_YA8:
2521 *yuv2packed1 = yuv2ya8_1_c;
2522 *yuv2packed2 = yuv2ya8_2_c;
2523 *yuv2packedX = yuv2ya8_X_c;
2525 case AV_PIX_FMT_AYUV64LE:
2526 *yuv2packedX = yuv2ayuv64le_X_c;