2 * Copyright (C) 2001-2012 Michael Niedermayer <michaelni@gmx.at>
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 #include "libavutil/attributes.h"
27 #include "libavutil/avutil.h"
28 #include "libavutil/avassert.h"
29 #include "libavutil/bswap.h"
30 #include "libavutil/cpu.h"
31 #include "libavutil/intreadwrite.h"
32 #include "libavutil/mathematics.h"
33 #include "libavutil/pixdesc.h"
37 #include "swscale_internal.h"
39 DECLARE_ALIGNED(8, const uint8_t, ff_dither_2x2_4)[][8] = {
40 { 1, 3, 1, 3, 1, 3, 1, 3, },
41 { 2, 0, 2, 0, 2, 0, 2, 0, },
42 { 1, 3, 1, 3, 1, 3, 1, 3, },
45 DECLARE_ALIGNED(8, const uint8_t, ff_dither_2x2_8)[][8] = {
46 { 6, 2, 6, 2, 6, 2, 6, 2, },
47 { 0, 4, 0, 4, 0, 4, 0, 4, },
48 { 6, 2, 6, 2, 6, 2, 6, 2, },
51 DECLARE_ALIGNED(8, const uint8_t, ff_dither_4x4_16)[][8] = {
52 { 8, 4, 11, 7, 8, 4, 11, 7, },
53 { 2, 14, 1, 13, 2, 14, 1, 13, },
54 { 10, 6, 9, 5, 10, 6, 9, 5, },
55 { 0, 12, 3, 15, 0, 12, 3, 15, },
56 { 8, 4, 11, 7, 8, 4, 11, 7, },
59 DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_32)[][8] = {
60 { 17, 9, 23, 15, 16, 8, 22, 14, },
61 { 5, 29, 3, 27, 4, 28, 2, 26, },
62 { 21, 13, 19, 11, 20, 12, 18, 10, },
63 { 0, 24, 6, 30, 1, 25, 7, 31, },
64 { 16, 8, 22, 14, 17, 9, 23, 15, },
65 { 4, 28, 2, 26, 5, 29, 3, 27, },
66 { 20, 12, 18, 10, 21, 13, 19, 11, },
67 { 1, 25, 7, 31, 0, 24, 6, 30, },
68 { 17, 9, 23, 15, 16, 8, 22, 14, },
71 DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_73)[][8] = {
72 { 0, 55, 14, 68, 3, 58, 17, 72, },
73 { 37, 18, 50, 32, 40, 22, 54, 35, },
74 { 9, 64, 5, 59, 13, 67, 8, 63, },
75 { 46, 27, 41, 23, 49, 31, 44, 26, },
76 { 2, 57, 16, 71, 1, 56, 15, 70, },
77 { 39, 21, 52, 34, 38, 19, 51, 33, },
78 { 11, 66, 7, 62, 10, 65, 6, 60, },
79 { 48, 30, 43, 25, 47, 29, 42, 24, },
80 { 0, 55, 14, 68, 3, 58, 17, 72, },
84 DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_220)[][8] = {
85 {117, 62, 158, 103, 113, 58, 155, 100, },
86 { 34, 199, 21, 186, 31, 196, 17, 182, },
87 {144, 89, 131, 76, 141, 86, 127, 72, },
88 { 0, 165, 41, 206, 10, 175, 52, 217, },
89 {110, 55, 151, 96, 120, 65, 162, 107, },
90 { 28, 193, 14, 179, 38, 203, 24, 189, },
91 {138, 83, 124, 69, 148, 93, 134, 79, },
92 { 7, 172, 48, 213, 3, 168, 45, 210, },
93 {117, 62, 158, 103, 113, 58, 155, 100, },
96 // tries to correct a gamma of 1.5
97 DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_220)[][8] = {
98 { 0, 143, 18, 200, 2, 156, 25, 215, },
99 { 78, 28, 125, 64, 89, 36, 138, 74, },
100 { 10, 180, 3, 161, 16, 195, 8, 175, },
101 {109, 51, 93, 38, 121, 60, 105, 47, },
102 { 1, 152, 23, 210, 0, 147, 20, 205, },
103 { 85, 33, 134, 71, 81, 30, 130, 67, },
104 { 14, 190, 6, 171, 12, 185, 5, 166, },
105 {117, 57, 101, 44, 113, 54, 97, 41, },
106 { 0, 143, 18, 200, 2, 156, 25, 215, },
109 // tries to correct a gamma of 2.0
110 DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_220)[][8] = {
111 { 0, 124, 8, 193, 0, 140, 12, 213, },
112 { 55, 14, 104, 42, 66, 19, 119, 52, },
113 { 3, 168, 1, 145, 6, 187, 3, 162, },
114 { 86, 31, 70, 21, 99, 39, 82, 28, },
115 { 0, 134, 11, 206, 0, 129, 9, 200, },
116 { 62, 17, 114, 48, 58, 16, 109, 45, },
117 { 5, 181, 2, 157, 4, 175, 1, 151, },
118 { 95, 36, 78, 26, 90, 34, 74, 24, },
119 { 0, 124, 8, 193, 0, 140, 12, 213, },
122 // tries to correct a gamma of 2.5
123 DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_220)[][8] = {
124 { 0, 107, 3, 187, 0, 125, 6, 212, },
125 { 39, 7, 86, 28, 49, 11, 102, 36, },
126 { 1, 158, 0, 131, 3, 180, 1, 151, },
127 { 68, 19, 52, 12, 81, 25, 64, 17, },
128 { 0, 119, 5, 203, 0, 113, 4, 195, },
129 { 45, 9, 96, 33, 42, 8, 91, 30, },
130 { 2, 172, 1, 144, 2, 165, 0, 137, },
131 { 77, 23, 60, 15, 72, 21, 56, 14, },
132 { 0, 107, 3, 187, 0, 125, 6, 212, },
136 #define output_pixel(pos, val, bias, signedness) \
138 AV_WB16(pos, bias + av_clip_ ## signedness ## 16(val >> shift)); \
140 AV_WL16(pos, bias + av_clip_ ## signedness ## 16(val >> shift)); \
143 static av_always_inline void
144 yuv2plane1_16_c_template(const int32_t *src, uint16_t *dest, int dstW,
145 int big_endian, int output_bits)
149 av_assert0(output_bits == 16);
151 for (i = 0; i < dstW; i++) {
152 int val = src[i] + (1 << (shift - 1));
153 output_pixel(&dest[i], val, 0, uint);
157 static av_always_inline void
158 yuv2planeX_16_c_template(const int16_t *filter, int filterSize,
159 const int32_t **src, uint16_t *dest, int dstW,
160 int big_endian, int output_bits)
164 av_assert0(output_bits == 16);
166 for (i = 0; i < dstW; i++) {
167 int val = 1 << (shift - 1);
170 /* range of val is [0,0x7FFFFFFF], so 31 bits, but with lanczos/spline
171 * filters (or anything with negative coeffs, the range can be slightly
172 * wider in both directions. To account for this overflow, we subtract
173 * a constant so it always fits in the signed range (assuming a
174 * reasonable filterSize), and re-add that at the end. */
176 for (j = 0; j < filterSize; j++)
177 val += src[j][i] * (unsigned)filter[j];
179 output_pixel(&dest[i], val, 0x8000, int);
183 static void yuv2p016cX_c(enum AVPixelFormat dstFormat, const uint8_t *chrDither,
184 const int16_t *chrFilter, int chrFilterSize,
185 const int16_t **chrUSrc, const int16_t **chrVSrc,
186 uint8_t *dest8, int chrDstW)
188 uint16_t *dest = (uint16_t*)dest8;
189 const int32_t **uSrc = (const int32_t **)chrUSrc;
190 const int32_t **vSrc = (const int32_t **)chrVSrc;
192 int big_endian = dstFormat == AV_PIX_FMT_P016BE;
195 for (i = 0; i < chrDstW; i++) {
196 int u = 1 << (shift - 1);
197 int v = 1 << (shift - 1);
199 /* See yuv2planeX_16_c_template for details. */
202 for (j = 0; j < chrFilterSize; j++) {
203 u += uSrc[j][i] * (unsigned)chrFilter[j];
204 v += vSrc[j][i] * (unsigned)chrFilter[j];
207 output_pixel(&dest[2*i] , u, 0x8000, int);
208 output_pixel(&dest[2*i+1], v, 0x8000, int);
212 static av_always_inline void
213 yuv2plane1_float_c_template(const int32_t *src, float *dest, int dstW)
215 static const int big_endian = HAVE_BIGENDIAN;
216 static const int shift = 3;
217 static const float float_mult = 1.0f / 65535.0f;
221 for (i = 0; i < dstW; ++i){
222 val = src[i] + (1 << (shift - 1));
223 output_pixel(&val_uint, val, 0, uint);
224 dest[i] = float_mult * (float)val_uint;
228 static av_always_inline void
229 yuv2plane1_float_bswap_c_template(const int32_t *src, uint32_t *dest, int dstW)
231 static const int big_endian = HAVE_BIGENDIAN;
232 static const int shift = 3;
233 static const float float_mult = 1.0f / 65535.0f;
237 for (i = 0; i < dstW; ++i){
238 val = src[i] + (1 << (shift - 1));
239 output_pixel(&val_uint, val, 0, uint);
240 dest[i] = av_bswap32(av_float2int(float_mult * (float)val_uint));
244 static av_always_inline void
245 yuv2planeX_float_c_template(const int16_t *filter, int filterSize, const int32_t **src,
246 float *dest, int dstW)
248 static const int big_endian = HAVE_BIGENDIAN;
249 static const int shift = 15;
250 static const float float_mult = 1.0f / 65535.0f;
254 for (i = 0; i < dstW; ++i){
255 val = (1 << (shift - 1)) - 0x40000000;
256 for (j = 0; j < filterSize; ++j){
257 val += src[j][i] * (unsigned)filter[j];
259 output_pixel(&val_uint, val, 0x8000, int);
260 dest[i] = float_mult * (float)val_uint;
264 static av_always_inline void
265 yuv2planeX_float_bswap_c_template(const int16_t *filter, int filterSize, const int32_t **src,
266 uint32_t *dest, int dstW)
268 static const int big_endian = HAVE_BIGENDIAN;
269 static const int shift = 15;
270 static const float float_mult = 1.0f / 65535.0f;
274 for (i = 0; i < dstW; ++i){
275 val = (1 << (shift - 1)) - 0x40000000;
276 for (j = 0; j < filterSize; ++j){
277 val += src[j][i] * (unsigned)filter[j];
279 output_pixel(&val_uint, val, 0x8000, int);
280 dest[i] = av_bswap32(av_float2int(float_mult * (float)val_uint));
284 #define yuv2plane1_float(template, dest_type, BE_LE) \
285 static void yuv2plane1_float ## BE_LE ## _c(const int16_t *src, uint8_t *dest, int dstW, \
286 const uint8_t *dither, int offset) \
288 template((const int32_t *)src, (dest_type *)dest, dstW); \
291 #define yuv2planeX_float(template, dest_type, BE_LE) \
292 static void yuv2planeX_float ## BE_LE ## _c(const int16_t *filter, int filterSize, \
293 const int16_t **src, uint8_t *dest, int dstW, \
294 const uint8_t *dither, int offset) \
296 template(filter, filterSize, (const int32_t **)src, (dest_type *)dest, dstW); \
300 yuv2plane1_float(yuv2plane1_float_c_template, float, BE)
301 yuv2plane1_float(yuv2plane1_float_bswap_c_template, uint32_t, LE)
302 yuv2planeX_float(yuv2planeX_float_c_template, float, BE)
303 yuv2planeX_float(yuv2planeX_float_bswap_c_template, uint32_t, LE)
305 yuv2plane1_float(yuv2plane1_float_c_template, float, LE)
306 yuv2plane1_float(yuv2plane1_float_bswap_c_template, uint32_t, BE)
307 yuv2planeX_float(yuv2planeX_float_c_template, float, LE)
308 yuv2planeX_float(yuv2planeX_float_bswap_c_template, uint32_t, BE)
313 #define output_pixel(pos, val) \
315 AV_WB16(pos, av_clip_uintp2(val >> shift, output_bits)); \
317 AV_WL16(pos, av_clip_uintp2(val >> shift, output_bits)); \
320 static av_always_inline void
321 yuv2plane1_10_c_template(const int16_t *src, uint16_t *dest, int dstW,
322 int big_endian, int output_bits)
325 int shift = 15 - output_bits;
327 for (i = 0; i < dstW; i++) {
328 int val = src[i] + (1 << (shift - 1));
329 output_pixel(&dest[i], val);
333 static av_always_inline void
334 yuv2planeX_10_c_template(const int16_t *filter, int filterSize,
335 const int16_t **src, uint16_t *dest, int dstW,
336 int big_endian, int output_bits)
339 int shift = 11 + 16 - output_bits;
341 for (i = 0; i < dstW; i++) {
342 int val = 1 << (shift - 1);
345 for (j = 0; j < filterSize; j++)
346 val += src[j][i] * filter[j];
348 output_pixel(&dest[i], val);
354 #define yuv2NBPS(bits, BE_LE, is_be, template_size, typeX_t) \
355 static void yuv2plane1_ ## bits ## BE_LE ## _c(const int16_t *src, \
356 uint8_t *dest, int dstW, \
357 const uint8_t *dither, int offset)\
359 yuv2plane1_ ## template_size ## _c_template((const typeX_t *) src, \
360 (uint16_t *) dest, dstW, is_be, bits); \
362 static void yuv2planeX_ ## bits ## BE_LE ## _c(const int16_t *filter, int filterSize, \
363 const int16_t **src, uint8_t *dest, int dstW, \
364 const uint8_t *dither, int offset)\
366 yuv2planeX_## template_size ## _c_template(filter, \
367 filterSize, (const typeX_t **) src, \
368 (uint16_t *) dest, dstW, is_be, bits); \
370 yuv2NBPS( 9, BE, 1, 10, int16_t)
371 yuv2NBPS( 9, LE, 0, 10, int16_t)
372 yuv2NBPS(10, BE, 1, 10, int16_t)
373 yuv2NBPS(10, LE, 0, 10, int16_t)
374 yuv2NBPS(12, BE, 1, 10, int16_t)
375 yuv2NBPS(12, LE, 0, 10, int16_t)
376 yuv2NBPS(14, BE, 1, 10, int16_t)
377 yuv2NBPS(14, LE, 0, 10, int16_t)
378 yuv2NBPS(16, BE, 1, 16, int32_t)
379 yuv2NBPS(16, LE, 0, 16, int32_t)
381 static void yuv2planeX_8_c(const int16_t *filter, int filterSize,
382 const int16_t **src, uint8_t *dest, int dstW,
383 const uint8_t *dither, int offset)
386 for (i=0; i<dstW; i++) {
387 int val = dither[(i + offset) & 7] << 12;
389 for (j=0; j<filterSize; j++)
390 val += src[j][i] * filter[j];
392 dest[i]= av_clip_uint8(val>>19);
396 static void yuv2plane1_8_c(const int16_t *src, uint8_t *dest, int dstW,
397 const uint8_t *dither, int offset)
400 for (i=0; i<dstW; i++) {
401 int val = (src[i] + dither[(i + offset) & 7]) >> 7;
402 dest[i]= av_clip_uint8(val);
406 static void yuv2nv12cX_c(enum AVPixelFormat dstFormat, const uint8_t *chrDither,
407 const int16_t *chrFilter, int chrFilterSize,
408 const int16_t **chrUSrc, const int16_t **chrVSrc,
409 uint8_t *dest, int chrDstW)
413 if (dstFormat == AV_PIX_FMT_NV12 ||
414 dstFormat == AV_PIX_FMT_NV24)
415 for (i=0; i<chrDstW; i++) {
416 int u = chrDither[i & 7] << 12;
417 int v = chrDither[(i + 3) & 7] << 12;
419 for (j=0; j<chrFilterSize; j++) {
420 u += chrUSrc[j][i] * chrFilter[j];
421 v += chrVSrc[j][i] * chrFilter[j];
424 dest[2*i]= av_clip_uint8(u>>19);
425 dest[2*i+1]= av_clip_uint8(v>>19);
428 for (i=0; i<chrDstW; i++) {
429 int u = chrDither[i & 7] << 12;
430 int v = chrDither[(i + 3) & 7] << 12;
432 for (j=0; j<chrFilterSize; j++) {
433 u += chrUSrc[j][i] * chrFilter[j];
434 v += chrVSrc[j][i] * chrFilter[j];
437 dest[2*i]= av_clip_uint8(v>>19);
438 dest[2*i+1]= av_clip_uint8(u>>19);
443 #define output_pixel(pos, val) \
445 AV_WB16(pos, av_clip_uintp2(val >> shift, 10) << 6); \
447 AV_WL16(pos, av_clip_uintp2(val >> shift, 10) << 6); \
450 static void yuv2p010l1_c(const int16_t *src,
451 uint16_t *dest, int dstW,
457 for (i = 0; i < dstW; i++) {
458 int val = src[i] + (1 << (shift - 1));
459 output_pixel(&dest[i], val);
463 static void yuv2p010lX_c(const int16_t *filter, int filterSize,
464 const int16_t **src, uint16_t *dest, int dstW,
470 for (i = 0; i < dstW; i++) {
471 int val = 1 << (shift - 1);
473 for (j = 0; j < filterSize; j++)
474 val += src[j][i] * filter[j];
476 output_pixel(&dest[i], val);
480 static void yuv2p010cX_c(enum AVPixelFormat dstFormat, const uint8_t *chrDither,
481 const int16_t *chrFilter, int chrFilterSize,
482 const int16_t **chrUSrc, const int16_t **chrVSrc,
483 uint8_t *dest8, int chrDstW)
485 uint16_t *dest = (uint16_t*)dest8;
487 int big_endian = dstFormat == AV_PIX_FMT_P010BE;
490 for (i = 0; i < chrDstW; i++) {
491 int u = 1 << (shift - 1);
492 int v = 1 << (shift - 1);
494 for (j = 0; j < chrFilterSize; j++) {
495 u += chrUSrc[j][i] * chrFilter[j];
496 v += chrVSrc[j][i] * chrFilter[j];
499 output_pixel(&dest[2*i] , u);
500 output_pixel(&dest[2*i+1], v);
504 static void yuv2p010l1_LE_c(const int16_t *src,
505 uint8_t *dest, int dstW,
506 const uint8_t *dither, int offset)
508 yuv2p010l1_c(src, (uint16_t*)dest, dstW, 0);
511 static void yuv2p010l1_BE_c(const int16_t *src,
512 uint8_t *dest, int dstW,
513 const uint8_t *dither, int offset)
515 yuv2p010l1_c(src, (uint16_t*)dest, dstW, 1);
518 static void yuv2p010lX_LE_c(const int16_t *filter, int filterSize,
519 const int16_t **src, uint8_t *dest, int dstW,
520 const uint8_t *dither, int offset)
522 yuv2p010lX_c(filter, filterSize, src, (uint16_t*)dest, dstW, 0);
525 static void yuv2p010lX_BE_c(const int16_t *filter, int filterSize,
526 const int16_t **src, uint8_t *dest, int dstW,
527 const uint8_t *dither, int offset)
529 yuv2p010lX_c(filter, filterSize, src, (uint16_t*)dest, dstW, 1);
535 #define accumulate_bit(acc, val) \
538 #define output_pixel(pos, acc) \
539 if (target == AV_PIX_FMT_MONOBLACK) { \
545 static av_always_inline void
546 yuv2mono_X_c_template(SwsContext *c, const int16_t *lumFilter,
547 const int16_t **lumSrc, int lumFilterSize,
548 const int16_t *chrFilter, const int16_t **chrUSrc,
549 const int16_t **chrVSrc, int chrFilterSize,
550 const int16_t **alpSrc, uint8_t *dest, int dstW,
551 int y, enum AVPixelFormat target)
553 const uint8_t * const d128 = ff_dither_8x8_220[y&7];
558 for (i = 0; i < dstW; i += 2) {
563 for (j = 0; j < lumFilterSize; j++) {
564 Y1 += lumSrc[j][i] * lumFilter[j];
565 Y2 += lumSrc[j][i+1] * lumFilter[j];
569 if ((Y1 | Y2) & 0x100) {
570 Y1 = av_clip_uint8(Y1);
571 Y2 = av_clip_uint8(Y2);
573 if (c->dither == SWS_DITHER_ED) {
574 Y1 += (7*err + 1*c->dither_error[0][i] + 5*c->dither_error[0][i+1] + 3*c->dither_error[0][i+2] + 8 - 256)>>4;
575 c->dither_error[0][i] = err;
576 acc = 2*acc + (Y1 >= 128);
579 err = Y2 + ((7*Y1 + 1*c->dither_error[0][i+1] + 5*c->dither_error[0][i+2] + 3*c->dither_error[0][i+3] + 8 - 256)>>4);
580 c->dither_error[0][i+1] = Y1;
581 acc = 2*acc + (err >= 128);
584 accumulate_bit(acc, Y1 + d128[(i + 0) & 7]);
585 accumulate_bit(acc, Y2 + d128[(i + 1) & 7]);
588 output_pixel(*dest++, acc);
591 c->dither_error[0][i] = err;
594 output_pixel(*dest, acc);
598 static av_always_inline void
599 yuv2mono_2_c_template(SwsContext *c, const int16_t *buf[2],
600 const int16_t *ubuf[2], const int16_t *vbuf[2],
601 const int16_t *abuf[2], uint8_t *dest, int dstW,
602 int yalpha, int uvalpha, int y,
603 enum AVPixelFormat target)
605 const int16_t *buf0 = buf[0], *buf1 = buf[1];
606 const uint8_t * const d128 = ff_dither_8x8_220[y & 7];
607 int yalpha1 = 4096 - yalpha;
609 av_assert2(yalpha <= 4096U);
611 if (c->dither == SWS_DITHER_ED) {
614 for (i = 0; i < dstW; i +=2) {
617 Y = (buf0[i + 0] * yalpha1 + buf1[i + 0] * yalpha) >> 19;
618 Y += (7*err + 1*c->dither_error[0][i] + 5*c->dither_error[0][i+1] + 3*c->dither_error[0][i+2] + 8 - 256)>>4;
619 c->dither_error[0][i] = err;
620 acc = 2*acc + (Y >= 128);
623 err = (buf0[i + 1] * yalpha1 + buf1[i + 1] * yalpha) >> 19;
624 err += (7*Y + 1*c->dither_error[0][i+1] + 5*c->dither_error[0][i+2] + 3*c->dither_error[0][i+3] + 8 - 256)>>4;
625 c->dither_error[0][i+1] = Y;
626 acc = 2*acc + (err >= 128);
630 output_pixel(*dest++, acc);
632 c->dither_error[0][i] = err;
634 for (i = 0; i < dstW; i += 8) {
637 Y = (buf0[i + 0] * yalpha1 + buf1[i + 0] * yalpha) >> 19;
638 accumulate_bit(acc, Y + d128[0]);
639 Y = (buf0[i + 1] * yalpha1 + buf1[i + 1] * yalpha) >> 19;
640 accumulate_bit(acc, Y + d128[1]);
641 Y = (buf0[i + 2] * yalpha1 + buf1[i + 2] * yalpha) >> 19;
642 accumulate_bit(acc, Y + d128[2]);
643 Y = (buf0[i + 3] * yalpha1 + buf1[i + 3] * yalpha) >> 19;
644 accumulate_bit(acc, Y + d128[3]);
645 Y = (buf0[i + 4] * yalpha1 + buf1[i + 4] * yalpha) >> 19;
646 accumulate_bit(acc, Y + d128[4]);
647 Y = (buf0[i + 5] * yalpha1 + buf1[i + 5] * yalpha) >> 19;
648 accumulate_bit(acc, Y + d128[5]);
649 Y = (buf0[i + 6] * yalpha1 + buf1[i + 6] * yalpha) >> 19;
650 accumulate_bit(acc, Y + d128[6]);
651 Y = (buf0[i + 7] * yalpha1 + buf1[i + 7] * yalpha) >> 19;
652 accumulate_bit(acc, Y + d128[7]);
654 output_pixel(*dest++, acc);
659 static av_always_inline void
660 yuv2mono_1_c_template(SwsContext *c, const int16_t *buf0,
661 const int16_t *ubuf[2], const int16_t *vbuf[2],
662 const int16_t *abuf0, uint8_t *dest, int dstW,
663 int uvalpha, int y, enum AVPixelFormat target)
665 const uint8_t * const d128 = ff_dither_8x8_220[y & 7];
668 if (c->dither == SWS_DITHER_ED) {
671 for (i = 0; i < dstW; i +=2) {
674 Y = ((buf0[i + 0] + 64) >> 7);
675 Y += (7*err + 1*c->dither_error[0][i] + 5*c->dither_error[0][i+1] + 3*c->dither_error[0][i+2] + 8 - 256)>>4;
676 c->dither_error[0][i] = err;
677 acc = 2*acc + (Y >= 128);
680 err = ((buf0[i + 1] + 64) >> 7);
681 err += (7*Y + 1*c->dither_error[0][i+1] + 5*c->dither_error[0][i+2] + 3*c->dither_error[0][i+3] + 8 - 256)>>4;
682 c->dither_error[0][i+1] = Y;
683 acc = 2*acc + (err >= 128);
687 output_pixel(*dest++, acc);
689 c->dither_error[0][i] = err;
691 for (i = 0; i < dstW; i += 8) {
693 accumulate_bit(acc, ((buf0[i + 0] + 64) >> 7) + d128[0]);
694 accumulate_bit(acc, ((buf0[i + 1] + 64) >> 7) + d128[1]);
695 accumulate_bit(acc, ((buf0[i + 2] + 64) >> 7) + d128[2]);
696 accumulate_bit(acc, ((buf0[i + 3] + 64) >> 7) + d128[3]);
697 accumulate_bit(acc, ((buf0[i + 4] + 64) >> 7) + d128[4]);
698 accumulate_bit(acc, ((buf0[i + 5] + 64) >> 7) + d128[5]);
699 accumulate_bit(acc, ((buf0[i + 6] + 64) >> 7) + d128[6]);
700 accumulate_bit(acc, ((buf0[i + 7] + 64) >> 7) + d128[7]);
702 output_pixel(*dest++, acc);
708 #undef accumulate_bit
710 #define YUV2PACKEDWRAPPER(name, base, ext, fmt) \
711 static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \
712 const int16_t **lumSrc, int lumFilterSize, \
713 const int16_t *chrFilter, const int16_t **chrUSrc, \
714 const int16_t **chrVSrc, int chrFilterSize, \
715 const int16_t **alpSrc, uint8_t *dest, int dstW, \
718 name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \
719 chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
720 alpSrc, dest, dstW, y, fmt); \
723 static void name ## ext ## _2_c(SwsContext *c, const int16_t *buf[2], \
724 const int16_t *ubuf[2], const int16_t *vbuf[2], \
725 const int16_t *abuf[2], uint8_t *dest, int dstW, \
726 int yalpha, int uvalpha, int y) \
728 name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \
729 dest, dstW, yalpha, uvalpha, y, fmt); \
732 static void name ## ext ## _1_c(SwsContext *c, const int16_t *buf0, \
733 const int16_t *ubuf[2], const int16_t *vbuf[2], \
734 const int16_t *abuf0, uint8_t *dest, int dstW, \
735 int uvalpha, int y) \
737 name ## base ## _1_c_template(c, buf0, ubuf, vbuf, \
738 abuf0, dest, dstW, uvalpha, \
742 YUV2PACKEDWRAPPER(yuv2mono,, white, AV_PIX_FMT_MONOWHITE)
743 YUV2PACKEDWRAPPER(yuv2mono,, black, AV_PIX_FMT_MONOBLACK)
745 #define output_pixels(pos, Y1, U, Y2, V) \
746 if (target == AV_PIX_FMT_YUYV422) { \
747 dest[pos + 0] = Y1; \
749 dest[pos + 2] = Y2; \
751 } else if (target == AV_PIX_FMT_YVYU422) { \
752 dest[pos + 0] = Y1; \
754 dest[pos + 2] = Y2; \
756 } else { /* AV_PIX_FMT_UYVY422 */ \
758 dest[pos + 1] = Y1; \
760 dest[pos + 3] = Y2; \
763 static av_always_inline void
764 yuv2422_X_c_template(SwsContext *c, const int16_t *lumFilter,
765 const int16_t **lumSrc, int lumFilterSize,
766 const int16_t *chrFilter, const int16_t **chrUSrc,
767 const int16_t **chrVSrc, int chrFilterSize,
768 const int16_t **alpSrc, uint8_t *dest, int dstW,
769 int y, enum AVPixelFormat target)
773 for (i = 0; i < ((dstW + 1) >> 1); i++) {
780 for (j = 0; j < lumFilterSize; j++) {
781 Y1 += lumSrc[j][i * 2] * lumFilter[j];
782 Y2 += lumSrc[j][i * 2 + 1] * lumFilter[j];
784 for (j = 0; j < chrFilterSize; j++) {
785 U += chrUSrc[j][i] * chrFilter[j];
786 V += chrVSrc[j][i] * chrFilter[j];
792 if ((Y1 | Y2 | U | V) & 0x100) {
793 Y1 = av_clip_uint8(Y1);
794 Y2 = av_clip_uint8(Y2);
795 U = av_clip_uint8(U);
796 V = av_clip_uint8(V);
798 output_pixels(4*i, Y1, U, Y2, V);
802 static av_always_inline void
803 yuv2422_2_c_template(SwsContext *c, const int16_t *buf[2],
804 const int16_t *ubuf[2], const int16_t *vbuf[2],
805 const int16_t *abuf[2], uint8_t *dest, int dstW,
806 int yalpha, int uvalpha, int y,
807 enum AVPixelFormat target)
809 const int16_t *buf0 = buf[0], *buf1 = buf[1],
810 *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
811 *vbuf0 = vbuf[0], *vbuf1 = vbuf[1];
812 int yalpha1 = 4096 - yalpha;
813 int uvalpha1 = 4096 - uvalpha;
815 av_assert2(yalpha <= 4096U);
816 av_assert2(uvalpha <= 4096U);
818 for (i = 0; i < ((dstW + 1) >> 1); i++) {
819 int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 19;
820 int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 19;
821 int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha) >> 19;
822 int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha) >> 19;
824 if ((Y1 | Y2 | U | V) & 0x100) {
825 Y1 = av_clip_uint8(Y1);
826 Y2 = av_clip_uint8(Y2);
827 U = av_clip_uint8(U);
828 V = av_clip_uint8(V);
831 output_pixels(i * 4, Y1, U, Y2, V);
835 static av_always_inline void
836 yuv2422_1_c_template(SwsContext *c, const int16_t *buf0,
837 const int16_t *ubuf[2], const int16_t *vbuf[2],
838 const int16_t *abuf0, uint8_t *dest, int dstW,
839 int uvalpha, int y, enum AVPixelFormat target)
841 const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
844 if (uvalpha < 2048) {
845 for (i = 0; i < ((dstW + 1) >> 1); i++) {
846 int Y1 = (buf0[i * 2 ]+64) >> 7;
847 int Y2 = (buf0[i * 2 + 1]+64) >> 7;
848 int U = (ubuf0[i] +64) >> 7;
849 int V = (vbuf0[i] +64) >> 7;
851 if ((Y1 | Y2 | U | V) & 0x100) {
852 Y1 = av_clip_uint8(Y1);
853 Y2 = av_clip_uint8(Y2);
854 U = av_clip_uint8(U);
855 V = av_clip_uint8(V);
858 output_pixels(i * 4, Y1, U, Y2, V);
861 const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
862 for (i = 0; i < ((dstW + 1) >> 1); i++) {
863 int Y1 = (buf0[i * 2 ] + 64) >> 7;
864 int Y2 = (buf0[i * 2 + 1] + 64) >> 7;
865 int U = (ubuf0[i] + ubuf1[i]+128) >> 8;
866 int V = (vbuf0[i] + vbuf1[i]+128) >> 8;
868 if ((Y1 | Y2 | U | V) & 0x100) {
869 Y1 = av_clip_uint8(Y1);
870 Y2 = av_clip_uint8(Y2);
871 U = av_clip_uint8(U);
872 V = av_clip_uint8(V);
875 output_pixels(i * 4, Y1, U, Y2, V);
882 YUV2PACKEDWRAPPER(yuv2, 422, yuyv422, AV_PIX_FMT_YUYV422)
883 YUV2PACKEDWRAPPER(yuv2, 422, yvyu422, AV_PIX_FMT_YVYU422)
884 YUV2PACKEDWRAPPER(yuv2, 422, uyvy422, AV_PIX_FMT_UYVY422)
886 #define R_B ((target == AV_PIX_FMT_RGB48LE || target == AV_PIX_FMT_RGB48BE || target == AV_PIX_FMT_RGBA64LE || target == AV_PIX_FMT_RGBA64BE) ? R : B)
887 #define B_R ((target == AV_PIX_FMT_RGB48LE || target == AV_PIX_FMT_RGB48BE || target == AV_PIX_FMT_RGBA64LE || target == AV_PIX_FMT_RGBA64BE) ? B : R)
888 #define output_pixel(pos, val) \
889 if (isBE(target)) { \
895 static av_always_inline void
896 yuv2ya16_X_c_template(SwsContext *c, const int16_t *lumFilter,
897 const int32_t **lumSrc, int lumFilterSize,
898 const int16_t *chrFilter, const int32_t **unused_chrUSrc,
899 const int32_t **unused_chrVSrc, int unused_chrFilterSize,
900 const int32_t **alpSrc, uint16_t *dest, int dstW,
901 int y, enum AVPixelFormat target, int unused_hasAlpha, int unused_eightbytes)
903 int hasAlpha = !!alpSrc;
906 for (i = 0; i < dstW; i++) {
911 for (j = 0; j < lumFilterSize; j++)
912 Y += lumSrc[j][i] * lumFilter[j];
915 Y += (1<<3) + 0x8000;
916 Y = av_clip_uint16(Y);
919 A = -0x40000000 + (1<<14);
920 for (j = 0; j < lumFilterSize; j++)
921 A += alpSrc[j][i] * lumFilter[j];
925 A = av_clip_uint16(A);
928 output_pixel(&dest[2 * i ], Y);
929 output_pixel(&dest[2 * i + 1], A);
933 static av_always_inline void
934 yuv2ya16_2_c_template(SwsContext *c, const int32_t *buf[2],
935 const int32_t *unused_ubuf[2], const int32_t *unused_vbuf[2],
936 const int32_t *abuf[2], uint16_t *dest, int dstW,
937 int yalpha, int unused_uvalpha, int y,
938 enum AVPixelFormat target, int unused_hasAlpha, int unused_eightbytes)
940 int hasAlpha = abuf && abuf[0] && abuf[1];
941 const int32_t *buf0 = buf[0], *buf1 = buf[1],
942 *abuf0 = hasAlpha ? abuf[0] : NULL,
943 *abuf1 = hasAlpha ? abuf[1] : NULL;
944 int yalpha1 = 4096 - yalpha;
947 av_assert2(yalpha <= 4096U);
949 for (i = 0; i < dstW; i++) {
950 int Y = (buf0[i] * yalpha1 + buf1[i] * yalpha) >> 15;
953 Y = av_clip_uint16(Y);
956 A = (abuf0[i] * yalpha1 + abuf1[i] * yalpha) >> 15;
957 A = av_clip_uint16(A);
960 output_pixel(&dest[2 * i ], Y);
961 output_pixel(&dest[2 * i + 1], hasAlpha ? A : 65535);
965 static av_always_inline void
966 yuv2ya16_1_c_template(SwsContext *c, const int32_t *buf0,
967 const int32_t *unused_ubuf[2], const int32_t *unused_vbuf[2],
968 const int32_t *abuf0, uint16_t *dest, int dstW,
969 int unused_uvalpha, int y, enum AVPixelFormat target, int unused_hasAlpha, int unused_eightbytes)
971 int hasAlpha = !!abuf0;
974 for (i = 0; i < dstW; i++) {
975 int Y = buf0[i] >> 3;/* 19 - 16 */
978 Y = av_clip_uint16(Y);
983 A = av_clip_uint16(A);
986 output_pixel(&dest[2 * i ], Y);
987 output_pixel(&dest[2 * i + 1], hasAlpha ? A : 65535);
991 static av_always_inline void
992 yuv2rgba64_X_c_template(SwsContext *c, const int16_t *lumFilter,
993 const int32_t **lumSrc, int lumFilterSize,
994 const int16_t *chrFilter, const int32_t **chrUSrc,
995 const int32_t **chrVSrc, int chrFilterSize,
996 const int32_t **alpSrc, uint16_t *dest, int dstW,
997 int y, enum AVPixelFormat target, int hasAlpha, int eightbytes)
1000 int A1 = 0xffff<<14, A2 = 0xffff<<14;
1002 for (i = 0; i < ((dstW + 1) >> 1); i++) {
1004 int Y1 = -0x40000000;
1005 int Y2 = -0x40000000;
1006 int U = -(128 << 23); // 19
1007 int V = -(128 << 23);
1010 for (j = 0; j < lumFilterSize; j++) {
1011 Y1 += lumSrc[j][i * 2] * (unsigned)lumFilter[j];
1012 Y2 += lumSrc[j][i * 2 + 1] * (unsigned)lumFilter[j];
1014 for (j = 0; j < chrFilterSize; j++) {;
1015 U += chrUSrc[j][i] * (unsigned)chrFilter[j];
1016 V += chrVSrc[j][i] * (unsigned)chrFilter[j];
1022 for (j = 0; j < lumFilterSize; j++) {
1023 A1 += alpSrc[j][i * 2] * (unsigned)lumFilter[j];
1024 A2 += alpSrc[j][i * 2 + 1] * (unsigned)lumFilter[j];
1032 // 8 bits: 12+15=27; 16 bits: 12+19=31
1040 // 8 bits: 27 -> 17 bits, 16 bits: 31 - 14 = 17 bits
1041 Y1 -= c->yuv2rgb_y_offset;
1042 Y2 -= c->yuv2rgb_y_offset;
1043 Y1 *= c->yuv2rgb_y_coeff;
1044 Y2 *= c->yuv2rgb_y_coeff;
1045 Y1 += 1 << 13; // 21
1047 // 8 bits: 17 + 13 bits = 30 bits, 16 bits: 17 + 13 bits = 30 bits
1049 R = V * c->yuv2rgb_v2r_coeff;
1050 G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1051 B = U * c->yuv2rgb_u2b_coeff;
1053 // 8 bits: 30 - 22 = 8 bits, 16 bits: 30 bits - 14 = 16 bits
1054 output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14);
1055 output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14);
1056 output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14);
1058 output_pixel(&dest[3], av_clip_uintp2(A1 , 30) >> 14);
1059 output_pixel(&dest[4], av_clip_uintp2(R_B + Y2, 30) >> 14);
1060 output_pixel(&dest[5], av_clip_uintp2( G + Y2, 30) >> 14);
1061 output_pixel(&dest[6], av_clip_uintp2(B_R + Y2, 30) >> 14);
1062 output_pixel(&dest[7], av_clip_uintp2(A2 , 30) >> 14);
1065 output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14);
1066 output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14);
1067 output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14);
1073 static av_always_inline void
1074 yuv2rgba64_2_c_template(SwsContext *c, const int32_t *buf[2],
1075 const int32_t *ubuf[2], const int32_t *vbuf[2],
1076 const int32_t *abuf[2], uint16_t *dest, int dstW,
1077 int yalpha, int uvalpha, int y,
1078 enum AVPixelFormat target, int hasAlpha, int eightbytes)
1080 const int32_t *buf0 = buf[0], *buf1 = buf[1],
1081 *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
1082 *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
1083 *abuf0 = hasAlpha ? abuf[0] : NULL,
1084 *abuf1 = hasAlpha ? abuf[1] : NULL;
1085 int yalpha1 = 4096 - yalpha;
1086 int uvalpha1 = 4096 - uvalpha;
1088 int A1 = 0xffff<<14, A2 = 0xffff<<14;
1090 av_assert2(yalpha <= 4096U);
1091 av_assert2(uvalpha <= 4096U);
1093 for (i = 0; i < ((dstW + 1) >> 1); i++) {
1094 int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 14;
1095 int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 14;
1096 int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha - (128 << 23)) >> 14;
1097 int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha - (128 << 23)) >> 14;
1100 Y1 -= c->yuv2rgb_y_offset;
1101 Y2 -= c->yuv2rgb_y_offset;
1102 Y1 *= c->yuv2rgb_y_coeff;
1103 Y2 *= c->yuv2rgb_y_coeff;
1107 R = V * c->yuv2rgb_v2r_coeff;
1108 G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1109 B = U * c->yuv2rgb_u2b_coeff;
1112 A1 = (abuf0[i * 2 ] * yalpha1 + abuf1[i * 2 ] * yalpha) >> 1;
1113 A2 = (abuf0[i * 2 + 1] * yalpha1 + abuf1[i * 2 + 1] * yalpha) >> 1;
1119 output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14);
1120 output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14);
1121 output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14);
1123 output_pixel(&dest[3], av_clip_uintp2(A1 , 30) >> 14);
1124 output_pixel(&dest[4], av_clip_uintp2(R_B + Y2, 30) >> 14);
1125 output_pixel(&dest[5], av_clip_uintp2( G + Y2, 30) >> 14);
1126 output_pixel(&dest[6], av_clip_uintp2(B_R + Y2, 30) >> 14);
1127 output_pixel(&dest[7], av_clip_uintp2(A2 , 30) >> 14);
1130 output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14);
1131 output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14);
1132 output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14);
1138 static av_always_inline void
1139 yuv2rgba64_1_c_template(SwsContext *c, const int32_t *buf0,
1140 const int32_t *ubuf[2], const int32_t *vbuf[2],
1141 const int32_t *abuf0, uint16_t *dest, int dstW,
1142 int uvalpha, int y, enum AVPixelFormat target, int hasAlpha, int eightbytes)
1144 const int32_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
1146 int A1 = 0xffff<<14, A2= 0xffff<<14;
1148 if (uvalpha < 2048) {
1149 for (i = 0; i < ((dstW + 1) >> 1); i++) {
1150 int Y1 = (buf0[i * 2] ) >> 2;
1151 int Y2 = (buf0[i * 2 + 1]) >> 2;
1152 int U = (ubuf0[i] - (128 << 11)) >> 2;
1153 int V = (vbuf0[i] - (128 << 11)) >> 2;
1156 Y1 -= c->yuv2rgb_y_offset;
1157 Y2 -= c->yuv2rgb_y_offset;
1158 Y1 *= c->yuv2rgb_y_coeff;
1159 Y2 *= c->yuv2rgb_y_coeff;
1164 A1 = abuf0[i * 2 ] << 11;
1165 A2 = abuf0[i * 2 + 1] << 11;
1171 R = V * c->yuv2rgb_v2r_coeff;
1172 G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1173 B = U * c->yuv2rgb_u2b_coeff;
1175 output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14);
1176 output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14);
1177 output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14);
1179 output_pixel(&dest[3], av_clip_uintp2(A1 , 30) >> 14);
1180 output_pixel(&dest[4], av_clip_uintp2(R_B + Y2, 30) >> 14);
1181 output_pixel(&dest[5], av_clip_uintp2( G + Y2, 30) >> 14);
1182 output_pixel(&dest[6], av_clip_uintp2(B_R + Y2, 30) >> 14);
1183 output_pixel(&dest[7], av_clip_uintp2(A2 , 30) >> 14);
1186 output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14);
1187 output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14);
1188 output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14);
1193 const int32_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
1194 int A1 = 0xffff<<14, A2 = 0xffff<<14;
1195 for (i = 0; i < ((dstW + 1) >> 1); i++) {
1196 int Y1 = (buf0[i * 2] ) >> 2;
1197 int Y2 = (buf0[i * 2 + 1]) >> 2;
1198 int U = (ubuf0[i] + ubuf1[i] - (128 << 12)) >> 3;
1199 int V = (vbuf0[i] + vbuf1[i] - (128 << 12)) >> 3;
1202 Y1 -= c->yuv2rgb_y_offset;
1203 Y2 -= c->yuv2rgb_y_offset;
1204 Y1 *= c->yuv2rgb_y_coeff;
1205 Y2 *= c->yuv2rgb_y_coeff;
1210 A1 = abuf0[i * 2 ] << 11;
1211 A2 = abuf0[i * 2 + 1] << 11;
1217 R = V * c->yuv2rgb_v2r_coeff;
1218 G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1219 B = U * c->yuv2rgb_u2b_coeff;
1221 output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14);
1222 output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14);
1223 output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14);
1225 output_pixel(&dest[3], av_clip_uintp2(A1 , 30) >> 14);
1226 output_pixel(&dest[4], av_clip_uintp2(R_B + Y2, 30) >> 14);
1227 output_pixel(&dest[5], av_clip_uintp2( G + Y2, 30) >> 14);
1228 output_pixel(&dest[6], av_clip_uintp2(B_R + Y2, 30) >> 14);
1229 output_pixel(&dest[7], av_clip_uintp2(A2 , 30) >> 14);
1232 output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14);
1233 output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14);
1234 output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14);
1241 static av_always_inline void
1242 yuv2rgba64_full_X_c_template(SwsContext *c, const int16_t *lumFilter,
1243 const int32_t **lumSrc, int lumFilterSize,
1244 const int16_t *chrFilter, const int32_t **chrUSrc,
1245 const int32_t **chrVSrc, int chrFilterSize,
1246 const int32_t **alpSrc, uint16_t *dest, int dstW,
1247 int y, enum AVPixelFormat target, int hasAlpha, int eightbytes)
1252 for (i = 0; i < dstW; i++) {
1254 int Y = -0x40000000;
1255 int U = -(128 << 23); // 19
1256 int V = -(128 << 23);
1259 for (j = 0; j < lumFilterSize; j++) {
1260 Y += lumSrc[j][i] * (unsigned)lumFilter[j];
1262 for (j = 0; j < chrFilterSize; j++) {;
1263 U += chrUSrc[j][i] * (unsigned)chrFilter[j];
1264 V += chrVSrc[j][i] * (unsigned)chrFilter[j];
1269 for (j = 0; j < lumFilterSize; j++) {
1270 A += alpSrc[j][i] * (unsigned)lumFilter[j];
1276 // 8bit: 12+15=27; 16-bit: 12+19=31
1282 // 8bit: 27 -> 17bit, 16bit: 31 - 14 = 17bit
1283 Y -= c->yuv2rgb_y_offset;
1284 Y *= c->yuv2rgb_y_coeff;
1286 // 8bit: 17 + 13bit = 30bit, 16bit: 17 + 13bit = 30bit
1288 R = V * c->yuv2rgb_v2r_coeff;
1289 G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1290 B = U * c->yuv2rgb_u2b_coeff;
1292 // 8bit: 30 - 22 = 8bit, 16bit: 30bit - 14 = 16bit
1293 output_pixel(&dest[0], av_clip_uintp2(R_B + Y, 30) >> 14);
1294 output_pixel(&dest[1], av_clip_uintp2( G + Y, 30) >> 14);
1295 output_pixel(&dest[2], av_clip_uintp2(B_R + Y, 30) >> 14);
1297 output_pixel(&dest[3], av_clip_uintp2(A, 30) >> 14);
1305 static av_always_inline void
1306 yuv2rgba64_full_2_c_template(SwsContext *c, const int32_t *buf[2],
1307 const int32_t *ubuf[2], const int32_t *vbuf[2],
1308 const int32_t *abuf[2], uint16_t *dest, int dstW,
1309 int yalpha, int uvalpha, int y,
1310 enum AVPixelFormat target, int hasAlpha, int eightbytes)
1312 const int32_t *buf0 = buf[0], *buf1 = buf[1],
1313 *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
1314 *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
1315 *abuf0 = hasAlpha ? abuf[0] : NULL,
1316 *abuf1 = hasAlpha ? abuf[1] : NULL;
1317 int yalpha1 = 4096 - yalpha;
1318 int uvalpha1 = 4096 - uvalpha;
1322 av_assert2(yalpha <= 4096U);
1323 av_assert2(uvalpha <= 4096U);
1325 for (i = 0; i < dstW; i++) {
1326 int Y = (buf0[i] * yalpha1 + buf1[i] * yalpha) >> 14;
1327 int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha - (128 << 23)) >> 14;
1328 int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha - (128 << 23)) >> 14;
1331 Y -= c->yuv2rgb_y_offset;
1332 Y *= c->yuv2rgb_y_coeff;
1335 R = V * c->yuv2rgb_v2r_coeff;
1336 G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1337 B = U * c->yuv2rgb_u2b_coeff;
1340 A = (abuf0[i] * yalpha1 + abuf1[i] * yalpha) >> 1;
1345 output_pixel(&dest[0], av_clip_uintp2(R_B + Y, 30) >> 14);
1346 output_pixel(&dest[1], av_clip_uintp2( G + Y, 30) >> 14);
1347 output_pixel(&dest[2], av_clip_uintp2(B_R + Y, 30) >> 14);
1349 output_pixel(&dest[3], av_clip_uintp2(A, 30) >> 14);
1357 static av_always_inline void
1358 yuv2rgba64_full_1_c_template(SwsContext *c, const int32_t *buf0,
1359 const int32_t *ubuf[2], const int32_t *vbuf[2],
1360 const int32_t *abuf0, uint16_t *dest, int dstW,
1361 int uvalpha, int y, enum AVPixelFormat target, int hasAlpha, int eightbytes)
1363 const int32_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
1367 if (uvalpha < 2048) {
1368 for (i = 0; i < dstW; i++) {
1369 int Y = (buf0[i]) >> 2;
1370 int U = (ubuf0[i] - (128 << 11)) >> 2;
1371 int V = (vbuf0[i] - (128 << 11)) >> 2;
1374 Y -= c->yuv2rgb_y_offset;
1375 Y *= c->yuv2rgb_y_coeff;
1384 R = V * c->yuv2rgb_v2r_coeff;
1385 G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1386 B = U * c->yuv2rgb_u2b_coeff;
1388 output_pixel(&dest[0], av_clip_uintp2(R_B + Y, 30) >> 14);
1389 output_pixel(&dest[1], av_clip_uintp2( G + Y, 30) >> 14);
1390 output_pixel(&dest[2], av_clip_uintp2(B_R + Y, 30) >> 14);
1392 output_pixel(&dest[3], av_clip_uintp2(A, 30) >> 14);
1399 const int32_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
1401 for (i = 0; i < dstW; i++) {
1402 int Y = (buf0[i] ) >> 2;
1403 int U = (ubuf0[i] + ubuf1[i] - (128 << 12)) >> 3;
1404 int V = (vbuf0[i] + vbuf1[i] - (128 << 12)) >> 3;
1407 Y -= c->yuv2rgb_y_offset;
1408 Y *= c->yuv2rgb_y_coeff;
1417 R = V * c->yuv2rgb_v2r_coeff;
1418 G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1419 B = U * c->yuv2rgb_u2b_coeff;
1421 output_pixel(&dest[0], av_clip_uintp2(R_B + Y, 30) >> 14);
1422 output_pixel(&dest[1], av_clip_uintp2( G + Y, 30) >> 14);
1423 output_pixel(&dest[2], av_clip_uintp2(B_R + Y, 30) >> 14);
1425 output_pixel(&dest[3], av_clip_uintp2(A, 30) >> 14);
1438 #define YUV2PACKED16WRAPPER(name, base, ext, fmt, hasAlpha, eightbytes) \
1439 static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \
1440 const int16_t **_lumSrc, int lumFilterSize, \
1441 const int16_t *chrFilter, const int16_t **_chrUSrc, \
1442 const int16_t **_chrVSrc, int chrFilterSize, \
1443 const int16_t **_alpSrc, uint8_t *_dest, int dstW, \
1446 const int32_t **lumSrc = (const int32_t **) _lumSrc, \
1447 **chrUSrc = (const int32_t **) _chrUSrc, \
1448 **chrVSrc = (const int32_t **) _chrVSrc, \
1449 **alpSrc = (const int32_t **) _alpSrc; \
1450 uint16_t *dest = (uint16_t *) _dest; \
1451 name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \
1452 chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
1453 alpSrc, dest, dstW, y, fmt, hasAlpha, eightbytes); \
1456 static void name ## ext ## _2_c(SwsContext *c, const int16_t *_buf[2], \
1457 const int16_t *_ubuf[2], const int16_t *_vbuf[2], \
1458 const int16_t *_abuf[2], uint8_t *_dest, int dstW, \
1459 int yalpha, int uvalpha, int y) \
1461 const int32_t **buf = (const int32_t **) _buf, \
1462 **ubuf = (const int32_t **) _ubuf, \
1463 **vbuf = (const int32_t **) _vbuf, \
1464 **abuf = (const int32_t **) _abuf; \
1465 uint16_t *dest = (uint16_t *) _dest; \
1466 name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \
1467 dest, dstW, yalpha, uvalpha, y, fmt, hasAlpha, eightbytes); \
1470 static void name ## ext ## _1_c(SwsContext *c, const int16_t *_buf0, \
1471 const int16_t *_ubuf[2], const int16_t *_vbuf[2], \
1472 const int16_t *_abuf0, uint8_t *_dest, int dstW, \
1473 int uvalpha, int y) \
1475 const int32_t *buf0 = (const int32_t *) _buf0, \
1476 **ubuf = (const int32_t **) _ubuf, \
1477 **vbuf = (const int32_t **) _vbuf, \
1478 *abuf0 = (const int32_t *) _abuf0; \
1479 uint16_t *dest = (uint16_t *) _dest; \
1480 name ## base ## _1_c_template(c, buf0, ubuf, vbuf, abuf0, dest, \
1481 dstW, uvalpha, y, fmt, hasAlpha, eightbytes); \
1484 YUV2PACKED16WRAPPER(yuv2, rgba64, rgb48be, AV_PIX_FMT_RGB48BE, 0, 0)
1485 YUV2PACKED16WRAPPER(yuv2, rgba64, rgb48le, AV_PIX_FMT_RGB48LE, 0, 0)
1486 YUV2PACKED16WRAPPER(yuv2, rgba64, bgr48be, AV_PIX_FMT_BGR48BE, 0, 0)
1487 YUV2PACKED16WRAPPER(yuv2, rgba64, bgr48le, AV_PIX_FMT_BGR48LE, 0, 0)
1488 YUV2PACKED16WRAPPER(yuv2, rgba64, rgba64be, AV_PIX_FMT_RGBA64BE, 1, 1)
1489 YUV2PACKED16WRAPPER(yuv2, rgba64, rgba64le, AV_PIX_FMT_RGBA64LE, 1, 1)
1490 YUV2PACKED16WRAPPER(yuv2, rgba64, rgbx64be, AV_PIX_FMT_RGBA64BE, 0, 1)
1491 YUV2PACKED16WRAPPER(yuv2, rgba64, rgbx64le, AV_PIX_FMT_RGBA64LE, 0, 1)
1492 YUV2PACKED16WRAPPER(yuv2, rgba64, bgra64be, AV_PIX_FMT_BGRA64BE, 1, 1)
1493 YUV2PACKED16WRAPPER(yuv2, rgba64, bgra64le, AV_PIX_FMT_BGRA64LE, 1, 1)
1494 YUV2PACKED16WRAPPER(yuv2, rgba64, bgrx64be, AV_PIX_FMT_BGRA64BE, 0, 1)
1495 YUV2PACKED16WRAPPER(yuv2, rgba64, bgrx64le, AV_PIX_FMT_BGRA64LE, 0, 1)
1496 YUV2PACKED16WRAPPER(yuv2, ya16, ya16be, AV_PIX_FMT_YA16BE, 1, 0)
1497 YUV2PACKED16WRAPPER(yuv2, ya16, ya16le, AV_PIX_FMT_YA16LE, 1, 0)
1499 YUV2PACKED16WRAPPER(yuv2, rgba64_full, rgb48be_full, AV_PIX_FMT_RGB48BE, 0, 0)
1500 YUV2PACKED16WRAPPER(yuv2, rgba64_full, rgb48le_full, AV_PIX_FMT_RGB48LE, 0, 0)
1501 YUV2PACKED16WRAPPER(yuv2, rgba64_full, bgr48be_full, AV_PIX_FMT_BGR48BE, 0, 0)
1502 YUV2PACKED16WRAPPER(yuv2, rgba64_full, bgr48le_full, AV_PIX_FMT_BGR48LE, 0, 0)
1503 YUV2PACKED16WRAPPER(yuv2, rgba64_full, rgba64be_full, AV_PIX_FMT_RGBA64BE, 1, 1)
1504 YUV2PACKED16WRAPPER(yuv2, rgba64_full, rgba64le_full, AV_PIX_FMT_RGBA64LE, 1, 1)
1505 YUV2PACKED16WRAPPER(yuv2, rgba64_full, rgbx64be_full, AV_PIX_FMT_RGBA64BE, 0, 1)
1506 YUV2PACKED16WRAPPER(yuv2, rgba64_full, rgbx64le_full, AV_PIX_FMT_RGBA64LE, 0, 1)
1507 YUV2PACKED16WRAPPER(yuv2, rgba64_full, bgra64be_full, AV_PIX_FMT_BGRA64BE, 1, 1)
1508 YUV2PACKED16WRAPPER(yuv2, rgba64_full, bgra64le_full, AV_PIX_FMT_BGRA64LE, 1, 1)
1509 YUV2PACKED16WRAPPER(yuv2, rgba64_full, bgrx64be_full, AV_PIX_FMT_BGRA64BE, 0, 1)
1510 YUV2PACKED16WRAPPER(yuv2, rgba64_full, bgrx64le_full, AV_PIX_FMT_BGRA64LE, 0, 1)
1513 * Write out 2 RGB pixels in the target pixel format. This function takes a
1514 * R/G/B LUT as generated by ff_yuv2rgb_c_init_tables(), which takes care of
1515 * things like endianness conversion and shifting. The caller takes care of
1516 * setting the correct offset in these tables from the chroma (U/V) values.
1517 * This function then uses the luminance (Y1/Y2) values to write out the
1518 * correct RGB values into the destination buffer.
1520 static av_always_inline void
1521 yuv2rgb_write(uint8_t *_dest, int i, int Y1, int Y2,
1522 unsigned A1, unsigned A2,
1523 const void *_r, const void *_g, const void *_b, int y,
1524 enum AVPixelFormat target, int hasAlpha)
1526 if (target == AV_PIX_FMT_ARGB || target == AV_PIX_FMT_RGBA ||
1527 target == AV_PIX_FMT_ABGR || target == AV_PIX_FMT_BGRA) {
1528 uint32_t *dest = (uint32_t *) _dest;
1529 const uint32_t *r = (const uint32_t *) _r;
1530 const uint32_t *g = (const uint32_t *) _g;
1531 const uint32_t *b = (const uint32_t *) _b;
1534 int sh = hasAlpha ? ((target == AV_PIX_FMT_RGB32_1 || target == AV_PIX_FMT_BGR32_1) ? 0 : 24) : 0;
1536 dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1] + (hasAlpha ? A1 << sh : 0);
1537 dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2] + (hasAlpha ? A2 << sh : 0);
1540 int sh = (target == AV_PIX_FMT_RGB32_1 || target == AV_PIX_FMT_BGR32_1) ? 0 : 24;
1542 av_assert2((((r[Y1] + g[Y1] + b[Y1]) >> sh) & 0xFF) == 0);
1543 dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1] + (A1 << sh);
1544 dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2] + (A2 << sh);
1546 #if defined(ASSERT_LEVEL) && ASSERT_LEVEL > 1
1547 int sh = (target == AV_PIX_FMT_RGB32_1 || target == AV_PIX_FMT_BGR32_1) ? 0 : 24;
1549 av_assert2((((r[Y1] + g[Y1] + b[Y1]) >> sh) & 0xFF) == 0xFF);
1551 dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1];
1552 dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2];
1555 } else if (target == AV_PIX_FMT_RGB24 || target == AV_PIX_FMT_BGR24) {
1556 uint8_t *dest = (uint8_t *) _dest;
1557 const uint8_t *r = (const uint8_t *) _r;
1558 const uint8_t *g = (const uint8_t *) _g;
1559 const uint8_t *b = (const uint8_t *) _b;
1561 #define r_b ((target == AV_PIX_FMT_RGB24) ? r : b)
1562 #define b_r ((target == AV_PIX_FMT_RGB24) ? b : r)
1564 dest[i * 6 + 0] = r_b[Y1];
1565 dest[i * 6 + 1] = g[Y1];
1566 dest[i * 6 + 2] = b_r[Y1];
1567 dest[i * 6 + 3] = r_b[Y2];
1568 dest[i * 6 + 4] = g[Y2];
1569 dest[i * 6 + 5] = b_r[Y2];
1572 } else if (target == AV_PIX_FMT_RGB565 || target == AV_PIX_FMT_BGR565 ||
1573 target == AV_PIX_FMT_RGB555 || target == AV_PIX_FMT_BGR555 ||
1574 target == AV_PIX_FMT_RGB444 || target == AV_PIX_FMT_BGR444) {
1575 uint16_t *dest = (uint16_t *) _dest;
1576 const uint16_t *r = (const uint16_t *) _r;
1577 const uint16_t *g = (const uint16_t *) _g;
1578 const uint16_t *b = (const uint16_t *) _b;
1579 int dr1, dg1, db1, dr2, dg2, db2;
1581 if (target == AV_PIX_FMT_RGB565 || target == AV_PIX_FMT_BGR565) {
1582 dr1 = ff_dither_2x2_8[ y & 1 ][0];
1583 dg1 = ff_dither_2x2_4[ y & 1 ][0];
1584 db1 = ff_dither_2x2_8[(y & 1) ^ 1][0];
1585 dr2 = ff_dither_2x2_8[ y & 1 ][1];
1586 dg2 = ff_dither_2x2_4[ y & 1 ][1];
1587 db2 = ff_dither_2x2_8[(y & 1) ^ 1][1];
1588 } else if (target == AV_PIX_FMT_RGB555 || target == AV_PIX_FMT_BGR555) {
1589 dr1 = ff_dither_2x2_8[ y & 1 ][0];
1590 dg1 = ff_dither_2x2_8[ y & 1 ][1];
1591 db1 = ff_dither_2x2_8[(y & 1) ^ 1][0];
1592 dr2 = ff_dither_2x2_8[ y & 1 ][1];
1593 dg2 = ff_dither_2x2_8[ y & 1 ][0];
1594 db2 = ff_dither_2x2_8[(y & 1) ^ 1][1];
1596 dr1 = ff_dither_4x4_16[ y & 3 ][0];
1597 dg1 = ff_dither_4x4_16[ y & 3 ][1];
1598 db1 = ff_dither_4x4_16[(y & 3) ^ 3][0];
1599 dr2 = ff_dither_4x4_16[ y & 3 ][1];
1600 dg2 = ff_dither_4x4_16[ y & 3 ][0];
1601 db2 = ff_dither_4x4_16[(y & 3) ^ 3][1];
1604 dest[i * 2 + 0] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1];
1605 dest[i * 2 + 1] = r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2];
1606 } else if (target == AV_PIX_FMT_X2RGB10) {
1607 uint32_t *dest = (uint32_t *) _dest;
1608 const uint32_t *r = (const uint32_t *) _r;
1609 const uint32_t *g = (const uint32_t *) _g;
1610 const uint32_t *b = (const uint32_t *) _b;
1611 dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1];
1612 dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2];
1613 } else /* 8/4 bits */ {
1614 uint8_t *dest = (uint8_t *) _dest;
1615 const uint8_t *r = (const uint8_t *) _r;
1616 const uint8_t *g = (const uint8_t *) _g;
1617 const uint8_t *b = (const uint8_t *) _b;
1618 int dr1, dg1, db1, dr2, dg2, db2;
1620 if (target == AV_PIX_FMT_RGB8 || target == AV_PIX_FMT_BGR8) {
1621 const uint8_t * const d64 = ff_dither_8x8_73[y & 7];
1622 const uint8_t * const d32 = ff_dither_8x8_32[y & 7];
1623 dr1 = dg1 = d32[(i * 2 + 0) & 7];
1624 db1 = d64[(i * 2 + 0) & 7];
1625 dr2 = dg2 = d32[(i * 2 + 1) & 7];
1626 db2 = d64[(i * 2 + 1) & 7];
1628 const uint8_t * const d64 = ff_dither_8x8_73 [y & 7];
1629 const uint8_t * const d128 = ff_dither_8x8_220[y & 7];
1630 dr1 = db1 = d128[(i * 2 + 0) & 7];
1631 dg1 = d64[(i * 2 + 0) & 7];
1632 dr2 = db2 = d128[(i * 2 + 1) & 7];
1633 dg2 = d64[(i * 2 + 1) & 7];
1636 if (target == AV_PIX_FMT_RGB4 || target == AV_PIX_FMT_BGR4) {
1637 dest[i] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1] +
1638 ((r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2]) << 4);
1640 dest[i * 2 + 0] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1];
1641 dest[i * 2 + 1] = r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2];
1646 static av_always_inline void
1647 yuv2rgb_X_c_template(SwsContext *c, const int16_t *lumFilter,
1648 const int16_t **lumSrc, int lumFilterSize,
1649 const int16_t *chrFilter, const int16_t **chrUSrc,
1650 const int16_t **chrVSrc, int chrFilterSize,
1651 const int16_t **alpSrc, uint8_t *dest, int dstW,
1652 int y, enum AVPixelFormat target, int hasAlpha)
1656 for (i = 0; i < ((dstW + 1) >> 1); i++) {
1662 const void *r, *g, *b;
1664 for (j = 0; j < lumFilterSize; j++) {
1665 Y1 += lumSrc[j][i * 2] * lumFilter[j];
1666 Y2 += lumSrc[j][i * 2 + 1] * lumFilter[j];
1668 for (j = 0; j < chrFilterSize; j++) {
1669 U += chrUSrc[j][i] * chrFilter[j];
1670 V += chrVSrc[j][i] * chrFilter[j];
1679 for (j = 0; j < lumFilterSize; j++) {
1680 A1 += alpSrc[j][i * 2 ] * lumFilter[j];
1681 A2 += alpSrc[j][i * 2 + 1] * lumFilter[j];
1685 if ((A1 | A2) & 0x100) {
1686 A1 = av_clip_uint8(A1);
1687 A2 = av_clip_uint8(A2);
1691 r = c->table_rV[V + YUVRGB_TABLE_HEADROOM];
1692 g = (c->table_gU[U + YUVRGB_TABLE_HEADROOM] + c->table_gV[V + YUVRGB_TABLE_HEADROOM]);
1693 b = c->table_bU[U + YUVRGB_TABLE_HEADROOM];
1695 yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
1696 r, g, b, y, target, hasAlpha);
1700 static av_always_inline void
1701 yuv2rgb_2_c_template(SwsContext *c, const int16_t *buf[2],
1702 const int16_t *ubuf[2], const int16_t *vbuf[2],
1703 const int16_t *abuf[2], uint8_t *dest, int dstW,
1704 int yalpha, int uvalpha, int y,
1705 enum AVPixelFormat target, int hasAlpha)
1707 const int16_t *buf0 = buf[0], *buf1 = buf[1],
1708 *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
1709 *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
1710 *abuf0 = hasAlpha ? abuf[0] : NULL,
1711 *abuf1 = hasAlpha ? abuf[1] : NULL;
1712 int yalpha1 = 4096 - yalpha;
1713 int uvalpha1 = 4096 - uvalpha;
1715 av_assert2(yalpha <= 4096U);
1716 av_assert2(uvalpha <= 4096U);
1718 for (i = 0; i < ((dstW + 1) >> 1); i++) {
1719 int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 19;
1720 int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 19;
1721 int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha) >> 19;
1722 int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha) >> 19;
1724 const void *r = c->table_rV[V + YUVRGB_TABLE_HEADROOM],
1725 *g = (c->table_gU[U + YUVRGB_TABLE_HEADROOM] + c->table_gV[V + YUVRGB_TABLE_HEADROOM]),
1726 *b = c->table_bU[U + YUVRGB_TABLE_HEADROOM];
1729 A1 = (abuf0[i * 2 ] * yalpha1 + abuf1[i * 2 ] * yalpha) >> 19;
1730 A2 = (abuf0[i * 2 + 1] * yalpha1 + abuf1[i * 2 + 1] * yalpha) >> 19;
1731 A1 = av_clip_uint8(A1);
1732 A2 = av_clip_uint8(A2);
1735 yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
1736 r, g, b, y, target, hasAlpha);
1740 static av_always_inline void
1741 yuv2rgb_1_c_template(SwsContext *c, const int16_t *buf0,
1742 const int16_t *ubuf[2], const int16_t *vbuf[2],
1743 const int16_t *abuf0, uint8_t *dest, int dstW,
1744 int uvalpha, int y, enum AVPixelFormat target,
1747 const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
1750 if (uvalpha < 2048) {
1751 for (i = 0; i < ((dstW + 1) >> 1); i++) {
1752 int Y1 = (buf0[i * 2 ] + 64) >> 7;
1753 int Y2 = (buf0[i * 2 + 1] + 64) >> 7;
1754 int U = (ubuf0[i] + 64) >> 7;
1755 int V = (vbuf0[i] + 64) >> 7;
1757 const void *r = c->table_rV[V + YUVRGB_TABLE_HEADROOM],
1758 *g = (c->table_gU[U + YUVRGB_TABLE_HEADROOM] + c->table_gV[V + YUVRGB_TABLE_HEADROOM]),
1759 *b = c->table_bU[U + YUVRGB_TABLE_HEADROOM];
1762 A1 = abuf0[i * 2 ] * 255 + 16384 >> 15;
1763 A2 = abuf0[i * 2 + 1] * 255 + 16384 >> 15;
1764 A1 = av_clip_uint8(A1);
1765 A2 = av_clip_uint8(A2);
1768 yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
1769 r, g, b, y, target, hasAlpha);
1772 const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
1773 for (i = 0; i < ((dstW + 1) >> 1); i++) {
1774 int Y1 = (buf0[i * 2 ] + 64) >> 7;
1775 int Y2 = (buf0[i * 2 + 1] + 64) >> 7;
1776 int U = (ubuf0[i] + ubuf1[i] + 128) >> 8;
1777 int V = (vbuf0[i] + vbuf1[i] + 128) >> 8;
1779 const void *r = c->table_rV[V + YUVRGB_TABLE_HEADROOM],
1780 *g = (c->table_gU[U + YUVRGB_TABLE_HEADROOM] + c->table_gV[V + YUVRGB_TABLE_HEADROOM]),
1781 *b = c->table_bU[U + YUVRGB_TABLE_HEADROOM];
1784 A1 = (abuf0[i * 2 ] + 64) >> 7;
1785 A2 = (abuf0[i * 2 + 1] + 64) >> 7;
1786 A1 = av_clip_uint8(A1);
1787 A2 = av_clip_uint8(A2);
1790 yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
1791 r, g, b, y, target, hasAlpha);
1796 #define YUV2RGBWRAPPERX(name, base, ext, fmt, hasAlpha) \
1797 static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \
1798 const int16_t **lumSrc, int lumFilterSize, \
1799 const int16_t *chrFilter, const int16_t **chrUSrc, \
1800 const int16_t **chrVSrc, int chrFilterSize, \
1801 const int16_t **alpSrc, uint8_t *dest, int dstW, \
1804 name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \
1805 chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
1806 alpSrc, dest, dstW, y, fmt, hasAlpha); \
1809 #define YUV2RGBWRAPPERX2(name, base, ext, fmt, hasAlpha) \
1810 YUV2RGBWRAPPERX(name, base, ext, fmt, hasAlpha) \
1811 static void name ## ext ## _2_c(SwsContext *c, const int16_t *buf[2], \
1812 const int16_t *ubuf[2], const int16_t *vbuf[2], \
1813 const int16_t *abuf[2], uint8_t *dest, int dstW, \
1814 int yalpha, int uvalpha, int y) \
1816 name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \
1817 dest, dstW, yalpha, uvalpha, y, fmt, hasAlpha); \
1820 #define YUV2RGBWRAPPER(name, base, ext, fmt, hasAlpha) \
1821 YUV2RGBWRAPPERX2(name, base, ext, fmt, hasAlpha) \
1822 static void name ## ext ## _1_c(SwsContext *c, const int16_t *buf0, \
1823 const int16_t *ubuf[2], const int16_t *vbuf[2], \
1824 const int16_t *abuf0, uint8_t *dest, int dstW, \
1825 int uvalpha, int y) \
1827 name ## base ## _1_c_template(c, buf0, ubuf, vbuf, abuf0, dest, \
1828 dstW, uvalpha, y, fmt, hasAlpha); \
1832 YUV2RGBWRAPPER(yuv2rgb,, 32_1, AV_PIX_FMT_RGB32_1, CONFIG_SWSCALE_ALPHA && c->needAlpha)
1833 YUV2RGBWRAPPER(yuv2rgb,, 32, AV_PIX_FMT_RGB32, CONFIG_SWSCALE_ALPHA && c->needAlpha)
1835 #if CONFIG_SWSCALE_ALPHA
1836 YUV2RGBWRAPPER(yuv2rgb,, a32_1, AV_PIX_FMT_RGB32_1, 1)
1837 YUV2RGBWRAPPER(yuv2rgb,, a32, AV_PIX_FMT_RGB32, 1)
1839 YUV2RGBWRAPPER(yuv2rgb,, x32_1, AV_PIX_FMT_RGB32_1, 0)
1840 YUV2RGBWRAPPER(yuv2rgb,, x32, AV_PIX_FMT_RGB32, 0)
1842 YUV2RGBWRAPPER(yuv2, rgb, rgb24, AV_PIX_FMT_RGB24, 0)
1843 YUV2RGBWRAPPER(yuv2, rgb, bgr24, AV_PIX_FMT_BGR24, 0)
1844 YUV2RGBWRAPPER(yuv2rgb,, 16, AV_PIX_FMT_RGB565, 0)
1845 YUV2RGBWRAPPER(yuv2rgb,, 15, AV_PIX_FMT_RGB555, 0)
1846 YUV2RGBWRAPPER(yuv2rgb,, 12, AV_PIX_FMT_RGB444, 0)
1847 YUV2RGBWRAPPER(yuv2rgb,, 8, AV_PIX_FMT_RGB8, 0)
1848 YUV2RGBWRAPPER(yuv2rgb,, 4, AV_PIX_FMT_RGB4, 0)
1849 YUV2RGBWRAPPER(yuv2rgb,, 4b, AV_PIX_FMT_RGB4_BYTE, 0)
1850 YUV2RGBWRAPPER(yuv2, rgb, x2rgb10, AV_PIX_FMT_X2RGB10, 0)
1852 static av_always_inline void yuv2rgb_write_full(SwsContext *c,
1853 uint8_t *dest, int i, int Y, int A, int U, int V,
1854 int y, enum AVPixelFormat target, int hasAlpha, int err[4])
1857 int isrgb8 = target == AV_PIX_FMT_BGR8 || target == AV_PIX_FMT_RGB8;
1859 Y -= c->yuv2rgb_y_offset;
1860 Y *= c->yuv2rgb_y_coeff;
1862 R = (unsigned)Y + V*c->yuv2rgb_v2r_coeff;
1863 G = (unsigned)Y + V*c->yuv2rgb_v2g_coeff + U*c->yuv2rgb_u2g_coeff;
1864 B = (unsigned)Y + U*c->yuv2rgb_u2b_coeff;
1865 if ((R | G | B) & 0xC0000000) {
1866 R = av_clip_uintp2(R, 30);
1867 G = av_clip_uintp2(G, 30);
1868 B = av_clip_uintp2(B, 30);
1872 case AV_PIX_FMT_ARGB:
1873 dest[0] = hasAlpha ? A : 255;
1878 case AV_PIX_FMT_RGB24:
1883 case AV_PIX_FMT_RGBA:
1887 dest[3] = hasAlpha ? A : 255;
1889 case AV_PIX_FMT_ABGR:
1890 dest[0] = hasAlpha ? A : 255;
1895 case AV_PIX_FMT_BGR24:
1900 case AV_PIX_FMT_BGRA:
1904 dest[3] = hasAlpha ? A : 255;
1906 case AV_PIX_FMT_BGR4_BYTE:
1907 case AV_PIX_FMT_RGB4_BYTE:
1908 case AV_PIX_FMT_BGR8:
1909 case AV_PIX_FMT_RGB8:
1913 switch (c->dither) {
1915 case SWS_DITHER_AUTO:
1920 R += (7*err[0] + 1*c->dither_error[0][i] + 5*c->dither_error[0][i+1] + 3*c->dither_error[0][i+2])>>4;
1921 G += (7*err[1] + 1*c->dither_error[1][i] + 5*c->dither_error[1][i+1] + 3*c->dither_error[1][i+2])>>4;
1922 B += (7*err[2] + 1*c->dither_error[2][i] + 5*c->dither_error[2][i+1] + 3*c->dither_error[2][i+2])>>4;
1923 c->dither_error[0][i] = err[0];
1924 c->dither_error[1][i] = err[1];
1925 c->dither_error[2][i] = err[2];
1926 r = R >> (isrgb8 ? 5 : 7);
1927 g = G >> (isrgb8 ? 5 : 6);
1928 b = B >> (isrgb8 ? 6 : 7);
1929 r = av_clip(r, 0, isrgb8 ? 7 : 1);
1930 g = av_clip(g, 0, isrgb8 ? 7 : 3);
1931 b = av_clip(b, 0, isrgb8 ? 3 : 1);
1932 err[0] = R - r*(isrgb8 ? 36 : 255);
1933 err[1] = G - g*(isrgb8 ? 36 : 85);
1934 err[2] = B - b*(isrgb8 ? 85 : 255);
1936 case SWS_DITHER_A_DITHER:
1938 /* see http://pippin.gimp.org/a_dither/ for details/origin */
1939 #define A_DITHER(u,v) (((((u)+((v)*236))*119)&0xff))
1940 r = (((R >> 19) + A_DITHER(i,y) -96)>>8);
1941 g = (((G >> 19) + A_DITHER(i + 17,y) - 96)>>8);
1942 b = (((B >> 20) + A_DITHER(i + 17*2,y) -96)>>8);
1943 r = av_clip_uintp2(r, 3);
1944 g = av_clip_uintp2(g, 3);
1945 b = av_clip_uintp2(b, 2);
1947 r = (((R >> 21) + A_DITHER(i,y)-256)>>8);
1948 g = (((G >> 19) + A_DITHER(i + 17,y)-256)>>8);
1949 b = (((B >> 21) + A_DITHER(i + 17*2,y)-256)>>8);
1950 r = av_clip_uintp2(r, 1);
1951 g = av_clip_uintp2(g, 2);
1952 b = av_clip_uintp2(b, 1);
1955 case SWS_DITHER_X_DITHER:
1957 /* see http://pippin.gimp.org/a_dither/ for details/origin */
1958 #define X_DITHER(u,v) (((((u)^((v)*237))*181)&0x1ff)/2)
1959 r = (((R >> 19) + X_DITHER(i,y) - 96)>>8);
1960 g = (((G >> 19) + X_DITHER(i + 17,y) - 96)>>8);
1961 b = (((B >> 20) + X_DITHER(i + 17*2,y) - 96)>>8);
1962 r = av_clip_uintp2(r, 3);
1963 g = av_clip_uintp2(g, 3);
1964 b = av_clip_uintp2(b, 2);
1966 r = (((R >> 21) + X_DITHER(i,y)-256)>>8);
1967 g = (((G >> 19) + X_DITHER(i + 17,y)-256)>>8);
1968 b = (((B >> 21) + X_DITHER(i + 17*2,y)-256)>>8);
1969 r = av_clip_uintp2(r, 1);
1970 g = av_clip_uintp2(g, 2);
1971 b = av_clip_uintp2(b, 1);
1977 if(target == AV_PIX_FMT_BGR4_BYTE) {
1978 dest[0] = r + 2*g + 8*b;
1979 } else if(target == AV_PIX_FMT_RGB4_BYTE) {
1980 dest[0] = b + 2*g + 8*r;
1981 } else if(target == AV_PIX_FMT_BGR8) {
1982 dest[0] = r + 8*g + 64*b;
1983 } else if(target == AV_PIX_FMT_RGB8) {
1984 dest[0] = b + 4*g + 32*r;
1991 static av_always_inline void
1992 yuv2rgb_full_X_c_template(SwsContext *c, const int16_t *lumFilter,
1993 const int16_t **lumSrc, int lumFilterSize,
1994 const int16_t *chrFilter, const int16_t **chrUSrc,
1995 const int16_t **chrVSrc, int chrFilterSize,
1996 const int16_t **alpSrc, uint8_t *dest,
1997 int dstW, int y, enum AVPixelFormat target, int hasAlpha)
2000 int step = (target == AV_PIX_FMT_RGB24 || target == AV_PIX_FMT_BGR24) ? 3 : 4;
2002 int A = 0; //init to silence warning
2004 if( target == AV_PIX_FMT_BGR4_BYTE || target == AV_PIX_FMT_RGB4_BYTE
2005 || target == AV_PIX_FMT_BGR8 || target == AV_PIX_FMT_RGB8)
2008 for (i = 0; i < dstW; i++) {
2011 int U = (1<<9)-(128 << 19);
2012 int V = (1<<9)-(128 << 19);
2014 for (j = 0; j < lumFilterSize; j++) {
2015 Y += lumSrc[j][i] * lumFilter[j];
2017 for (j = 0; j < chrFilterSize; j++) {
2018 U += chrUSrc[j][i] * chrFilter[j];
2019 V += chrVSrc[j][i] * chrFilter[j];
2026 for (j = 0; j < lumFilterSize; j++) {
2027 A += alpSrc[j][i] * lumFilter[j];
2031 A = av_clip_uint8(A);
2033 yuv2rgb_write_full(c, dest, i, Y, A, U, V, y, target, hasAlpha, err);
2036 c->dither_error[0][i] = err[0];
2037 c->dither_error[1][i] = err[1];
2038 c->dither_error[2][i] = err[2];
2041 static av_always_inline void
2042 yuv2rgb_full_2_c_template(SwsContext *c, const int16_t *buf[2],
2043 const int16_t *ubuf[2], const int16_t *vbuf[2],
2044 const int16_t *abuf[2], uint8_t *dest, int dstW,
2045 int yalpha, int uvalpha, int y,
2046 enum AVPixelFormat target, int hasAlpha)
2048 const int16_t *buf0 = buf[0], *buf1 = buf[1],
2049 *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
2050 *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
2051 *abuf0 = hasAlpha ? abuf[0] : NULL,
2052 *abuf1 = hasAlpha ? abuf[1] : NULL;
2053 int yalpha1 = 4096 - yalpha;
2054 int uvalpha1 = 4096 - uvalpha;
2056 int step = (target == AV_PIX_FMT_RGB24 || target == AV_PIX_FMT_BGR24) ? 3 : 4;
2058 int A = 0; // init to silcene warning
2060 av_assert2(yalpha <= 4096U);
2061 av_assert2(uvalpha <= 4096U);
2063 if( target == AV_PIX_FMT_BGR4_BYTE || target == AV_PIX_FMT_RGB4_BYTE
2064 || target == AV_PIX_FMT_BGR8 || target == AV_PIX_FMT_RGB8)
2067 for (i = 0; i < dstW; i++) {
2068 int Y = ( buf0[i] * yalpha1 + buf1[i] * yalpha ) >> 10; //FIXME rounding
2069 int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha-(128 << 19)) >> 10;
2070 int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha-(128 << 19)) >> 10;
2073 A = (abuf0[i] * yalpha1 + abuf1[i] * yalpha + (1<<18)) >> 19;
2075 A = av_clip_uint8(A);
2078 yuv2rgb_write_full(c, dest, i, Y, A, U, V, y, target, hasAlpha, err);
2081 c->dither_error[0][i] = err[0];
2082 c->dither_error[1][i] = err[1];
2083 c->dither_error[2][i] = err[2];
2086 static av_always_inline void
2087 yuv2rgb_full_1_c_template(SwsContext *c, const int16_t *buf0,
2088 const int16_t *ubuf[2], const int16_t *vbuf[2],
2089 const int16_t *abuf0, uint8_t *dest, int dstW,
2090 int uvalpha, int y, enum AVPixelFormat target,
2093 const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
2095 int step = (target == AV_PIX_FMT_RGB24 || target == AV_PIX_FMT_BGR24) ? 3 : 4;
2098 if( target == AV_PIX_FMT_BGR4_BYTE || target == AV_PIX_FMT_RGB4_BYTE
2099 || target == AV_PIX_FMT_BGR8 || target == AV_PIX_FMT_RGB8)
2102 if (uvalpha < 2048) {
2103 int A = 0; //init to silence warning
2104 for (i = 0; i < dstW; i++) {
2105 int Y = buf0[i] * 4;
2106 int U = (ubuf0[i] - (128<<7)) * 4;
2107 int V = (vbuf0[i] - (128<<7)) * 4;
2110 A = (abuf0[i] + 64) >> 7;
2112 A = av_clip_uint8(A);
2115 yuv2rgb_write_full(c, dest, i, Y, A, U, V, y, target, hasAlpha, err);
2119 const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
2120 int A = 0; //init to silence warning
2121 for (i = 0; i < dstW; i++) {
2122 int Y = buf0[i] * 4;
2123 int U = (ubuf0[i] + ubuf1[i] - (128<<8)) * 2;
2124 int V = (vbuf0[i] + vbuf1[i] - (128<<8)) * 2;
2127 A = (abuf0[i] + 64) >> 7;
2129 A = av_clip_uint8(A);
2132 yuv2rgb_write_full(c, dest, i, Y, A, U, V, y, target, hasAlpha, err);
2137 c->dither_error[0][i] = err[0];
2138 c->dither_error[1][i] = err[1];
2139 c->dither_error[2][i] = err[2];
2143 YUV2RGBWRAPPER(yuv2, rgb_full, bgra32_full, AV_PIX_FMT_BGRA, CONFIG_SWSCALE_ALPHA && c->needAlpha)
2144 YUV2RGBWRAPPER(yuv2, rgb_full, abgr32_full, AV_PIX_FMT_ABGR, CONFIG_SWSCALE_ALPHA && c->needAlpha)
2145 YUV2RGBWRAPPER(yuv2, rgb_full, rgba32_full, AV_PIX_FMT_RGBA, CONFIG_SWSCALE_ALPHA && c->needAlpha)
2146 YUV2RGBWRAPPER(yuv2, rgb_full, argb32_full, AV_PIX_FMT_ARGB, CONFIG_SWSCALE_ALPHA && c->needAlpha)
2148 #if CONFIG_SWSCALE_ALPHA
2149 YUV2RGBWRAPPER(yuv2, rgb_full, bgra32_full, AV_PIX_FMT_BGRA, 1)
2150 YUV2RGBWRAPPER(yuv2, rgb_full, abgr32_full, AV_PIX_FMT_ABGR, 1)
2151 YUV2RGBWRAPPER(yuv2, rgb_full, rgba32_full, AV_PIX_FMT_RGBA, 1)
2152 YUV2RGBWRAPPER(yuv2, rgb_full, argb32_full, AV_PIX_FMT_ARGB, 1)
2154 YUV2RGBWRAPPER(yuv2, rgb_full, bgrx32_full, AV_PIX_FMT_BGRA, 0)
2155 YUV2RGBWRAPPER(yuv2, rgb_full, xbgr32_full, AV_PIX_FMT_ABGR, 0)
2156 YUV2RGBWRAPPER(yuv2, rgb_full, rgbx32_full, AV_PIX_FMT_RGBA, 0)
2157 YUV2RGBWRAPPER(yuv2, rgb_full, xrgb32_full, AV_PIX_FMT_ARGB, 0)
2159 YUV2RGBWRAPPER(yuv2, rgb_full, bgr24_full, AV_PIX_FMT_BGR24, 0)
2160 YUV2RGBWRAPPER(yuv2, rgb_full, rgb24_full, AV_PIX_FMT_RGB24, 0)
2162 YUV2RGBWRAPPER(yuv2, rgb_full, bgr4_byte_full, AV_PIX_FMT_BGR4_BYTE, 0)
2163 YUV2RGBWRAPPER(yuv2, rgb_full, rgb4_byte_full, AV_PIX_FMT_RGB4_BYTE, 0)
2164 YUV2RGBWRAPPER(yuv2, rgb_full, bgr8_full, AV_PIX_FMT_BGR8, 0)
2165 YUV2RGBWRAPPER(yuv2, rgb_full, rgb8_full, AV_PIX_FMT_RGB8, 0)
2168 yuv2gbrp_full_X_c(SwsContext *c, const int16_t *lumFilter,
2169 const int16_t **lumSrc, int lumFilterSize,
2170 const int16_t *chrFilter, const int16_t **chrUSrc,
2171 const int16_t **chrVSrc, int chrFilterSize,
2172 const int16_t **alpSrc, uint8_t **dest,
2175 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(c->dstFormat);
2177 int hasAlpha = (desc->flags & AV_PIX_FMT_FLAG_ALPHA) && alpSrc;
2178 uint16_t **dest16 = (uint16_t**)dest;
2179 int SH = 22 + 8 - desc->comp[0].depth;
2180 int A = 0; // init to silence warning
2182 for (i = 0; i < dstW; i++) {
2185 int U = (1 << 9) - (128 << 19);
2186 int V = (1 << 9) - (128 << 19);
2189 for (j = 0; j < lumFilterSize; j++)
2190 Y += lumSrc[j][i] * lumFilter[j];
2192 for (j = 0; j < chrFilterSize; j++) {
2193 U += chrUSrc[j][i] * chrFilter[j];
2194 V += chrVSrc[j][i] * chrFilter[j];
2204 for (j = 0; j < lumFilterSize; j++)
2205 A += alpSrc[j][i] * lumFilter[j];
2208 A = av_clip_uintp2(A, 27);
2211 Y -= c->yuv2rgb_y_offset;
2212 Y *= c->yuv2rgb_y_coeff;
2214 R = Y + V * c->yuv2rgb_v2r_coeff;
2215 G = Y + V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
2216 B = Y + U * c->yuv2rgb_u2b_coeff;
2218 if ((R | G | B) & 0xC0000000) {
2219 R = av_clip_uintp2(R, 30);
2220 G = av_clip_uintp2(G, 30);
2221 B = av_clip_uintp2(B, 30);
2225 dest16[0][i] = G >> SH;
2226 dest16[1][i] = B >> SH;
2227 dest16[2][i] = R >> SH;
2229 dest16[3][i] = A >> (SH - 3);
2231 dest[0][i] = G >> 22;
2232 dest[1][i] = B >> 22;
2233 dest[2][i] = R >> 22;
2235 dest[3][i] = A >> 19;
2238 if (SH != 22 && (!isBE(c->dstFormat)) != (!HAVE_BIGENDIAN)) {
2239 for (i = 0; i < dstW; i++) {
2240 dest16[0][i] = av_bswap16(dest16[0][i]);
2241 dest16[1][i] = av_bswap16(dest16[1][i]);
2242 dest16[2][i] = av_bswap16(dest16[2][i]);
2244 dest16[3][i] = av_bswap16(dest16[3][i]);
2250 yuv2gbrp16_full_X_c(SwsContext *c, const int16_t *lumFilter,
2251 const int16_t **lumSrcx, int lumFilterSize,
2252 const int16_t *chrFilter, const int16_t **chrUSrcx,
2253 const int16_t **chrVSrcx, int chrFilterSize,
2254 const int16_t **alpSrcx, uint8_t **dest,
2257 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(c->dstFormat);
2259 int hasAlpha = (desc->flags & AV_PIX_FMT_FLAG_ALPHA) && alpSrcx;
2260 uint16_t **dest16 = (uint16_t**)dest;
2261 const int32_t **lumSrc = (const int32_t**)lumSrcx;
2262 const int32_t **chrUSrc = (const int32_t**)chrUSrcx;
2263 const int32_t **chrVSrc = (const int32_t**)chrVSrcx;
2264 const int32_t **alpSrc = (const int32_t**)alpSrcx;
2266 for (i = 0; i < dstW; i++) {
2268 int Y = -0x40000000;
2269 int U = -(128 << 23);
2270 int V = -(128 << 23);
2273 for (j = 0; j < lumFilterSize; j++)
2274 Y += lumSrc[j][i] * (unsigned)lumFilter[j];
2276 for (j = 0; j < chrFilterSize; j++) {
2277 U += chrUSrc[j][i] * (unsigned)chrFilter[j];
2278 V += chrVSrc[j][i] * (unsigned)chrFilter[j];
2289 for (j = 0; j < lumFilterSize; j++)
2290 A += alpSrc[j][i] * (unsigned)lumFilter[j];
2296 Y -= c->yuv2rgb_y_offset;
2297 Y *= c->yuv2rgb_y_coeff;
2299 R = V * c->yuv2rgb_v2r_coeff;
2300 G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
2301 B = U * c->yuv2rgb_u2b_coeff;
2303 R = av_clip_uintp2(Y + R, 30);
2304 G = av_clip_uintp2(Y + G, 30);
2305 B = av_clip_uintp2(Y + B, 30);
2307 dest16[0][i] = G >> 14;
2308 dest16[1][i] = B >> 14;
2309 dest16[2][i] = R >> 14;
2311 dest16[3][i] = av_clip_uintp2(A, 30) >> 14;
2313 if ((!isBE(c->dstFormat)) != (!HAVE_BIGENDIAN)) {
2314 for (i = 0; i < dstW; i++) {
2315 dest16[0][i] = av_bswap16(dest16[0][i]);
2316 dest16[1][i] = av_bswap16(dest16[1][i]);
2317 dest16[2][i] = av_bswap16(dest16[2][i]);
2319 dest16[3][i] = av_bswap16(dest16[3][i]);
2325 yuv2gbrpf32_full_X_c(SwsContext *c, const int16_t *lumFilter,
2326 const int16_t **lumSrcx, int lumFilterSize,
2327 const int16_t *chrFilter, const int16_t **chrUSrcx,
2328 const int16_t **chrVSrcx, int chrFilterSize,
2329 const int16_t **alpSrcx, uint8_t **dest,
2332 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(c->dstFormat);
2334 int hasAlpha = (desc->flags & AV_PIX_FMT_FLAG_ALPHA) && alpSrcx;
2335 uint32_t **dest32 = (uint32_t**)dest;
2336 const int32_t **lumSrc = (const int32_t**)lumSrcx;
2337 const int32_t **chrUSrc = (const int32_t**)chrUSrcx;
2338 const int32_t **chrVSrc = (const int32_t**)chrVSrcx;
2339 const int32_t **alpSrc = (const int32_t**)alpSrcx;
2340 static const float float_mult = 1.0f / 65535.0f;
2342 for (i = 0; i < dstW; i++) {
2344 int Y = -0x40000000;
2345 int U = -(128 << 23);
2346 int V = -(128 << 23);
2349 for (j = 0; j < lumFilterSize; j++)
2350 Y += lumSrc[j][i] * (unsigned)lumFilter[j];
2352 for (j = 0; j < chrFilterSize; j++) {
2353 U += chrUSrc[j][i] * (unsigned)chrFilter[j];
2354 V += chrVSrc[j][i] * (unsigned)chrFilter[j];
2365 for (j = 0; j < lumFilterSize; j++)
2366 A += alpSrc[j][i] * (unsigned)lumFilter[j];
2372 Y -= c->yuv2rgb_y_offset;
2373 Y *= c->yuv2rgb_y_coeff;
2375 R = V * c->yuv2rgb_v2r_coeff;
2376 G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
2377 B = U * c->yuv2rgb_u2b_coeff;
2379 R = av_clip_uintp2(Y + R, 30);
2380 G = av_clip_uintp2(Y + G, 30);
2381 B = av_clip_uintp2(Y + B, 30);
2383 dest32[0][i] = av_float2int(float_mult * (float)(G >> 14));
2384 dest32[1][i] = av_float2int(float_mult * (float)(B >> 14));
2385 dest32[2][i] = av_float2int(float_mult * (float)(R >> 14));
2387 dest32[3][i] = av_float2int(float_mult * (float)(av_clip_uintp2(A, 30) >> 14));
2389 if ((!isBE(c->dstFormat)) != (!HAVE_BIGENDIAN)) {
2390 for (i = 0; i < dstW; i++) {
2391 dest32[0][i] = av_bswap32(dest32[0][i]);
2392 dest32[1][i] = av_bswap32(dest32[1][i]);
2393 dest32[2][i] = av_bswap32(dest32[2][i]);
2395 dest32[3][i] = av_bswap32(dest32[3][i]);
2401 yuv2ya8_1_c(SwsContext *c, const int16_t *buf0,
2402 const int16_t *ubuf[2], const int16_t *vbuf[2],
2403 const int16_t *abuf0, uint8_t *dest, int dstW,
2406 int hasAlpha = !!abuf0;
2409 for (i = 0; i < dstW; i++) {
2410 int Y = (buf0[i] + 64) >> 7;
2413 Y = av_clip_uint8(Y);
2416 A = (abuf0[i] + 64) >> 7;
2418 A = av_clip_uint8(A);
2422 dest[i * 2 + 1] = hasAlpha ? A : 255;
2427 yuv2ya8_2_c(SwsContext *c, const int16_t *buf[2],
2428 const int16_t *ubuf[2], const int16_t *vbuf[2],
2429 const int16_t *abuf[2], uint8_t *dest, int dstW,
2430 int yalpha, int uvalpha, int y)
2432 int hasAlpha = abuf && abuf[0] && abuf[1];
2433 const int16_t *buf0 = buf[0], *buf1 = buf[1],
2434 *abuf0 = hasAlpha ? abuf[0] : NULL,
2435 *abuf1 = hasAlpha ? abuf[1] : NULL;
2436 int yalpha1 = 4096 - yalpha;
2439 av_assert2(yalpha <= 4096U);
2441 for (i = 0; i < dstW; i++) {
2442 int Y = (buf0[i] * yalpha1 + buf1[i] * yalpha) >> 19;
2445 Y = av_clip_uint8(Y);
2448 A = (abuf0[i] * yalpha1 + abuf1[i] * yalpha) >> 19;
2449 A = av_clip_uint8(A);
2453 dest[i * 2 + 1] = hasAlpha ? A : 255;
2458 yuv2ya8_X_c(SwsContext *c, const int16_t *lumFilter,
2459 const int16_t **lumSrc, int lumFilterSize,
2460 const int16_t *chrFilter, const int16_t **chrUSrc,
2461 const int16_t **chrVSrc, int chrFilterSize,
2462 const int16_t **alpSrc, uint8_t *dest, int dstW, int y)
2464 int hasAlpha = !!alpSrc;
2467 for (i = 0; i < dstW; i++) {
2469 int Y = 1 << 18, A = 1 << 18;
2471 for (j = 0; j < lumFilterSize; j++)
2472 Y += lumSrc[j][i] * lumFilter[j];
2476 Y = av_clip_uint8(Y);
2479 for (j = 0; j < lumFilterSize; j++)
2480 A += alpSrc[j][i] * lumFilter[j];
2485 A = av_clip_uint8(A);
2489 dest[2 * i + 1] = hasAlpha ? A : 255;
2494 yuv2ayuv64le_X_c(SwsContext *c, const int16_t *lumFilter,
2495 const int16_t **_lumSrc, int lumFilterSize,
2496 const int16_t *chrFilter, const int16_t **_chrUSrc,
2497 const int16_t **_chrVSrc, int chrFilterSize,
2498 const int16_t **_alpSrc, uint8_t *dest, int dstW, int y)
2500 const int32_t **lumSrc = (const int32_t **) _lumSrc,
2501 **chrUSrc = (const int32_t **) _chrUSrc,
2502 **chrVSrc = (const int32_t **) _chrVSrc,
2503 **alpSrc = (const int32_t **) _alpSrc;
2504 int hasAlpha = !!alpSrc;
2507 for (i = 0; i < dstW; i++) {
2508 int Y = 1 << 14, U = 1 << 14;
2509 int V = 1 << 14, A = 1 << 14;
2517 for (j = 0; j < lumFilterSize; j++)
2518 Y += lumSrc[j][i] * (unsigned)lumFilter[j];
2520 for (j = 0; j < chrFilterSize; j++)
2521 U += chrUSrc[j][i] * (unsigned)chrFilter[j];
2523 for (j = 0; j < chrFilterSize; j++)
2524 V += chrVSrc[j][i] * (unsigned)chrFilter[j];
2527 for (j = 0; j < lumFilterSize; j++)
2528 A += alpSrc[j][i] * (unsigned)lumFilter[j];
2530 Y = 0x8000 + av_clip_int16(Y >> 15);
2531 U = 0x8000 + av_clip_int16(U >> 15);
2532 V = 0x8000 + av_clip_int16(V >> 15);
2533 A = 0x8000 + av_clip_int16(A >> 15);
2535 AV_WL16(dest + 8 * i, hasAlpha ? A : 65535);
2536 AV_WL16(dest + 8 * i + 2, Y);
2537 AV_WL16(dest + 8 * i + 4, U);
2538 AV_WL16(dest + 8 * i + 6, V);
2542 av_cold void ff_sws_init_output_funcs(SwsContext *c,
2543 yuv2planar1_fn *yuv2plane1,
2544 yuv2planarX_fn *yuv2planeX,
2545 yuv2interleavedX_fn *yuv2nv12cX,
2546 yuv2packed1_fn *yuv2packed1,
2547 yuv2packed2_fn *yuv2packed2,
2548 yuv2packedX_fn *yuv2packedX,
2549 yuv2anyX_fn *yuv2anyX)
2551 enum AVPixelFormat dstFormat = c->dstFormat;
2552 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(dstFormat);
2554 if (dstFormat == AV_PIX_FMT_P010LE || dstFormat == AV_PIX_FMT_P010BE) {
2555 *yuv2plane1 = isBE(dstFormat) ? yuv2p010l1_BE_c : yuv2p010l1_LE_c;
2556 *yuv2planeX = isBE(dstFormat) ? yuv2p010lX_BE_c : yuv2p010lX_LE_c;
2557 *yuv2nv12cX = yuv2p010cX_c;
2558 } else if (is16BPS(dstFormat)) {
2559 *yuv2planeX = isBE(dstFormat) ? yuv2planeX_16BE_c : yuv2planeX_16LE_c;
2560 *yuv2plane1 = isBE(dstFormat) ? yuv2plane1_16BE_c : yuv2plane1_16LE_c;
2561 if (dstFormat == AV_PIX_FMT_P016LE || dstFormat == AV_PIX_FMT_P016BE) {
2562 *yuv2nv12cX = yuv2p016cX_c;
2564 } else if (isNBPS(dstFormat)) {
2565 if (desc->comp[0].depth == 9) {
2566 *yuv2planeX = isBE(dstFormat) ? yuv2planeX_9BE_c : yuv2planeX_9LE_c;
2567 *yuv2plane1 = isBE(dstFormat) ? yuv2plane1_9BE_c : yuv2plane1_9LE_c;
2568 } else if (desc->comp[0].depth == 10) {
2569 *yuv2planeX = isBE(dstFormat) ? yuv2planeX_10BE_c : yuv2planeX_10LE_c;
2570 *yuv2plane1 = isBE(dstFormat) ? yuv2plane1_10BE_c : yuv2plane1_10LE_c;
2571 } else if (desc->comp[0].depth == 12) {
2572 *yuv2planeX = isBE(dstFormat) ? yuv2planeX_12BE_c : yuv2planeX_12LE_c;
2573 *yuv2plane1 = isBE(dstFormat) ? yuv2plane1_12BE_c : yuv2plane1_12LE_c;
2574 } else if (desc->comp[0].depth == 14) {
2575 *yuv2planeX = isBE(dstFormat) ? yuv2planeX_14BE_c : yuv2planeX_14LE_c;
2576 *yuv2plane1 = isBE(dstFormat) ? yuv2plane1_14BE_c : yuv2plane1_14LE_c;
2579 } else if (dstFormat == AV_PIX_FMT_GRAYF32BE) {
2580 *yuv2planeX = yuv2planeX_floatBE_c;
2581 *yuv2plane1 = yuv2plane1_floatBE_c;
2582 } else if (dstFormat == AV_PIX_FMT_GRAYF32LE) {
2583 *yuv2planeX = yuv2planeX_floatLE_c;
2584 *yuv2plane1 = yuv2plane1_floatLE_c;
2586 *yuv2plane1 = yuv2plane1_8_c;
2587 *yuv2planeX = yuv2planeX_8_c;
2588 if (dstFormat == AV_PIX_FMT_NV12 || dstFormat == AV_PIX_FMT_NV21 ||
2589 dstFormat == AV_PIX_FMT_NV24 || dstFormat == AV_PIX_FMT_NV42)
2590 *yuv2nv12cX = yuv2nv12cX_c;
2593 if(c->flags & SWS_FULL_CHR_H_INT) {
2594 switch (dstFormat) {
2595 case AV_PIX_FMT_RGBA:
2597 *yuv2packedX = yuv2rgba32_full_X_c;
2598 *yuv2packed2 = yuv2rgba32_full_2_c;
2599 *yuv2packed1 = yuv2rgba32_full_1_c;
2601 #if CONFIG_SWSCALE_ALPHA
2603 *yuv2packedX = yuv2rgba32_full_X_c;
2604 *yuv2packed2 = yuv2rgba32_full_2_c;
2605 *yuv2packed1 = yuv2rgba32_full_1_c;
2607 #endif /* CONFIG_SWSCALE_ALPHA */
2609 *yuv2packedX = yuv2rgbx32_full_X_c;
2610 *yuv2packed2 = yuv2rgbx32_full_2_c;
2611 *yuv2packed1 = yuv2rgbx32_full_1_c;
2613 #endif /* !CONFIG_SMALL */
2615 case AV_PIX_FMT_ARGB:
2617 *yuv2packedX = yuv2argb32_full_X_c;
2618 *yuv2packed2 = yuv2argb32_full_2_c;
2619 *yuv2packed1 = yuv2argb32_full_1_c;
2621 #if CONFIG_SWSCALE_ALPHA
2623 *yuv2packedX = yuv2argb32_full_X_c;
2624 *yuv2packed2 = yuv2argb32_full_2_c;
2625 *yuv2packed1 = yuv2argb32_full_1_c;
2627 #endif /* CONFIG_SWSCALE_ALPHA */
2629 *yuv2packedX = yuv2xrgb32_full_X_c;
2630 *yuv2packed2 = yuv2xrgb32_full_2_c;
2631 *yuv2packed1 = yuv2xrgb32_full_1_c;
2633 #endif /* !CONFIG_SMALL */
2635 case AV_PIX_FMT_BGRA:
2637 *yuv2packedX = yuv2bgra32_full_X_c;
2638 *yuv2packed2 = yuv2bgra32_full_2_c;
2639 *yuv2packed1 = yuv2bgra32_full_1_c;
2641 #if CONFIG_SWSCALE_ALPHA
2643 *yuv2packedX = yuv2bgra32_full_X_c;
2644 *yuv2packed2 = yuv2bgra32_full_2_c;
2645 *yuv2packed1 = yuv2bgra32_full_1_c;
2647 #endif /* CONFIG_SWSCALE_ALPHA */
2649 *yuv2packedX = yuv2bgrx32_full_X_c;
2650 *yuv2packed2 = yuv2bgrx32_full_2_c;
2651 *yuv2packed1 = yuv2bgrx32_full_1_c;
2653 #endif /* !CONFIG_SMALL */
2655 case AV_PIX_FMT_ABGR:
2657 *yuv2packedX = yuv2abgr32_full_X_c;
2658 *yuv2packed2 = yuv2abgr32_full_2_c;
2659 *yuv2packed1 = yuv2abgr32_full_1_c;
2661 #if CONFIG_SWSCALE_ALPHA
2663 *yuv2packedX = yuv2abgr32_full_X_c;
2664 *yuv2packed2 = yuv2abgr32_full_2_c;
2665 *yuv2packed1 = yuv2abgr32_full_1_c;
2667 #endif /* CONFIG_SWSCALE_ALPHA */
2669 *yuv2packedX = yuv2xbgr32_full_X_c;
2670 *yuv2packed2 = yuv2xbgr32_full_2_c;
2671 *yuv2packed1 = yuv2xbgr32_full_1_c;
2673 #endif /* !CONFIG_SMALL */
2675 case AV_PIX_FMT_RGBA64LE:
2676 #if CONFIG_SWSCALE_ALPHA
2678 *yuv2packedX = yuv2rgba64le_full_X_c;
2679 *yuv2packed2 = yuv2rgba64le_full_2_c;
2680 *yuv2packed1 = yuv2rgba64le_full_1_c;
2682 #endif /* CONFIG_SWSCALE_ALPHA */
2684 *yuv2packedX = yuv2rgbx64le_full_X_c;
2685 *yuv2packed2 = yuv2rgbx64le_full_2_c;
2686 *yuv2packed1 = yuv2rgbx64le_full_1_c;
2689 case AV_PIX_FMT_RGBA64BE:
2690 #if CONFIG_SWSCALE_ALPHA
2692 *yuv2packedX = yuv2rgba64be_full_X_c;
2693 *yuv2packed2 = yuv2rgba64be_full_2_c;
2694 *yuv2packed1 = yuv2rgba64be_full_1_c;
2696 #endif /* CONFIG_SWSCALE_ALPHA */
2698 *yuv2packedX = yuv2rgbx64be_full_X_c;
2699 *yuv2packed2 = yuv2rgbx64be_full_2_c;
2700 *yuv2packed1 = yuv2rgbx64be_full_1_c;
2703 case AV_PIX_FMT_BGRA64LE:
2704 #if CONFIG_SWSCALE_ALPHA
2706 *yuv2packedX = yuv2bgra64le_full_X_c;
2707 *yuv2packed2 = yuv2bgra64le_full_2_c;
2708 *yuv2packed1 = yuv2bgra64le_full_1_c;
2710 #endif /* CONFIG_SWSCALE_ALPHA */
2712 *yuv2packedX = yuv2bgrx64le_full_X_c;
2713 *yuv2packed2 = yuv2bgrx64le_full_2_c;
2714 *yuv2packed1 = yuv2bgrx64le_full_1_c;
2717 case AV_PIX_FMT_BGRA64BE:
2718 #if CONFIG_SWSCALE_ALPHA
2720 *yuv2packedX = yuv2bgra64be_full_X_c;
2721 *yuv2packed2 = yuv2bgra64be_full_2_c;
2722 *yuv2packed1 = yuv2bgra64be_full_1_c;
2724 #endif /* CONFIG_SWSCALE_ALPHA */
2726 *yuv2packedX = yuv2bgrx64be_full_X_c;
2727 *yuv2packed2 = yuv2bgrx64be_full_2_c;
2728 *yuv2packed1 = yuv2bgrx64be_full_1_c;
2732 case AV_PIX_FMT_RGB24:
2733 *yuv2packedX = yuv2rgb24_full_X_c;
2734 *yuv2packed2 = yuv2rgb24_full_2_c;
2735 *yuv2packed1 = yuv2rgb24_full_1_c;
2737 case AV_PIX_FMT_BGR24:
2738 *yuv2packedX = yuv2bgr24_full_X_c;
2739 *yuv2packed2 = yuv2bgr24_full_2_c;
2740 *yuv2packed1 = yuv2bgr24_full_1_c;
2742 case AV_PIX_FMT_RGB48LE:
2743 *yuv2packedX = yuv2rgb48le_full_X_c;
2744 *yuv2packed2 = yuv2rgb48le_full_2_c;
2745 *yuv2packed1 = yuv2rgb48le_full_1_c;
2747 case AV_PIX_FMT_BGR48LE:
2748 *yuv2packedX = yuv2bgr48le_full_X_c;
2749 *yuv2packed2 = yuv2bgr48le_full_2_c;
2750 *yuv2packed1 = yuv2bgr48le_full_1_c;
2752 case AV_PIX_FMT_RGB48BE:
2753 *yuv2packedX = yuv2rgb48be_full_X_c;
2754 *yuv2packed2 = yuv2rgb48be_full_2_c;
2755 *yuv2packed1 = yuv2rgb48be_full_1_c;
2757 case AV_PIX_FMT_BGR48BE:
2758 *yuv2packedX = yuv2bgr48be_full_X_c;
2759 *yuv2packed2 = yuv2bgr48be_full_2_c;
2760 *yuv2packed1 = yuv2bgr48be_full_1_c;
2762 case AV_PIX_FMT_BGR4_BYTE:
2763 *yuv2packedX = yuv2bgr4_byte_full_X_c;
2764 *yuv2packed2 = yuv2bgr4_byte_full_2_c;
2765 *yuv2packed1 = yuv2bgr4_byte_full_1_c;
2767 case AV_PIX_FMT_RGB4_BYTE:
2768 *yuv2packedX = yuv2rgb4_byte_full_X_c;
2769 *yuv2packed2 = yuv2rgb4_byte_full_2_c;
2770 *yuv2packed1 = yuv2rgb4_byte_full_1_c;
2772 case AV_PIX_FMT_BGR8:
2773 *yuv2packedX = yuv2bgr8_full_X_c;
2774 *yuv2packed2 = yuv2bgr8_full_2_c;
2775 *yuv2packed1 = yuv2bgr8_full_1_c;
2777 case AV_PIX_FMT_RGB8:
2778 *yuv2packedX = yuv2rgb8_full_X_c;
2779 *yuv2packed2 = yuv2rgb8_full_2_c;
2780 *yuv2packed1 = yuv2rgb8_full_1_c;
2782 case AV_PIX_FMT_GBRP:
2783 case AV_PIX_FMT_GBRP9BE:
2784 case AV_PIX_FMT_GBRP9LE:
2785 case AV_PIX_FMT_GBRP10BE:
2786 case AV_PIX_FMT_GBRP10LE:
2787 case AV_PIX_FMT_GBRP12BE:
2788 case AV_PIX_FMT_GBRP12LE:
2789 case AV_PIX_FMT_GBRP14BE:
2790 case AV_PIX_FMT_GBRP14LE:
2791 case AV_PIX_FMT_GBRAP:
2792 case AV_PIX_FMT_GBRAP10BE:
2793 case AV_PIX_FMT_GBRAP10LE:
2794 case AV_PIX_FMT_GBRAP12BE:
2795 case AV_PIX_FMT_GBRAP12LE:
2796 *yuv2anyX = yuv2gbrp_full_X_c;
2798 case AV_PIX_FMT_GBRP16BE:
2799 case AV_PIX_FMT_GBRP16LE:
2800 case AV_PIX_FMT_GBRAP16BE:
2801 case AV_PIX_FMT_GBRAP16LE:
2802 *yuv2anyX = yuv2gbrp16_full_X_c;
2804 case AV_PIX_FMT_GBRPF32BE:
2805 case AV_PIX_FMT_GBRPF32LE:
2806 case AV_PIX_FMT_GBRAPF32BE:
2807 case AV_PIX_FMT_GBRAPF32LE:
2808 *yuv2anyX = yuv2gbrpf32_full_X_c;
2811 if (!*yuv2packedX && !*yuv2anyX)
2815 switch (dstFormat) {
2816 case AV_PIX_FMT_RGBA64LE:
2817 #if CONFIG_SWSCALE_ALPHA
2819 *yuv2packed1 = yuv2rgba64le_1_c;
2820 *yuv2packed2 = yuv2rgba64le_2_c;
2821 *yuv2packedX = yuv2rgba64le_X_c;
2823 #endif /* CONFIG_SWSCALE_ALPHA */
2825 *yuv2packed1 = yuv2rgbx64le_1_c;
2826 *yuv2packed2 = yuv2rgbx64le_2_c;
2827 *yuv2packedX = yuv2rgbx64le_X_c;
2830 case AV_PIX_FMT_RGBA64BE:
2831 #if CONFIG_SWSCALE_ALPHA
2833 *yuv2packed1 = yuv2rgba64be_1_c;
2834 *yuv2packed2 = yuv2rgba64be_2_c;
2835 *yuv2packedX = yuv2rgba64be_X_c;
2837 #endif /* CONFIG_SWSCALE_ALPHA */
2839 *yuv2packed1 = yuv2rgbx64be_1_c;
2840 *yuv2packed2 = yuv2rgbx64be_2_c;
2841 *yuv2packedX = yuv2rgbx64be_X_c;
2844 case AV_PIX_FMT_BGRA64LE:
2845 #if CONFIG_SWSCALE_ALPHA
2847 *yuv2packed1 = yuv2bgra64le_1_c;
2848 *yuv2packed2 = yuv2bgra64le_2_c;
2849 *yuv2packedX = yuv2bgra64le_X_c;
2851 #endif /* CONFIG_SWSCALE_ALPHA */
2853 *yuv2packed1 = yuv2bgrx64le_1_c;
2854 *yuv2packed2 = yuv2bgrx64le_2_c;
2855 *yuv2packedX = yuv2bgrx64le_X_c;
2858 case AV_PIX_FMT_BGRA64BE:
2859 #if CONFIG_SWSCALE_ALPHA
2861 *yuv2packed1 = yuv2bgra64be_1_c;
2862 *yuv2packed2 = yuv2bgra64be_2_c;
2863 *yuv2packedX = yuv2bgra64be_X_c;
2865 #endif /* CONFIG_SWSCALE_ALPHA */
2867 *yuv2packed1 = yuv2bgrx64be_1_c;
2868 *yuv2packed2 = yuv2bgrx64be_2_c;
2869 *yuv2packedX = yuv2bgrx64be_X_c;
2872 case AV_PIX_FMT_RGB48LE:
2873 *yuv2packed1 = yuv2rgb48le_1_c;
2874 *yuv2packed2 = yuv2rgb48le_2_c;
2875 *yuv2packedX = yuv2rgb48le_X_c;
2877 case AV_PIX_FMT_RGB48BE:
2878 *yuv2packed1 = yuv2rgb48be_1_c;
2879 *yuv2packed2 = yuv2rgb48be_2_c;
2880 *yuv2packedX = yuv2rgb48be_X_c;
2882 case AV_PIX_FMT_BGR48LE:
2883 *yuv2packed1 = yuv2bgr48le_1_c;
2884 *yuv2packed2 = yuv2bgr48le_2_c;
2885 *yuv2packedX = yuv2bgr48le_X_c;
2887 case AV_PIX_FMT_BGR48BE:
2888 *yuv2packed1 = yuv2bgr48be_1_c;
2889 *yuv2packed2 = yuv2bgr48be_2_c;
2890 *yuv2packedX = yuv2bgr48be_X_c;
2892 case AV_PIX_FMT_RGB32:
2893 case AV_PIX_FMT_BGR32:
2895 *yuv2packed1 = yuv2rgb32_1_c;
2896 *yuv2packed2 = yuv2rgb32_2_c;
2897 *yuv2packedX = yuv2rgb32_X_c;
2899 #if CONFIG_SWSCALE_ALPHA
2901 *yuv2packed1 = yuv2rgba32_1_c;
2902 *yuv2packed2 = yuv2rgba32_2_c;
2903 *yuv2packedX = yuv2rgba32_X_c;
2905 #endif /* CONFIG_SWSCALE_ALPHA */
2907 *yuv2packed1 = yuv2rgbx32_1_c;
2908 *yuv2packed2 = yuv2rgbx32_2_c;
2909 *yuv2packedX = yuv2rgbx32_X_c;
2911 #endif /* !CONFIG_SMALL */
2913 case AV_PIX_FMT_RGB32_1:
2914 case AV_PIX_FMT_BGR32_1:
2916 *yuv2packed1 = yuv2rgb32_1_1_c;
2917 *yuv2packed2 = yuv2rgb32_1_2_c;
2918 *yuv2packedX = yuv2rgb32_1_X_c;
2920 #if CONFIG_SWSCALE_ALPHA
2922 *yuv2packed1 = yuv2rgba32_1_1_c;
2923 *yuv2packed2 = yuv2rgba32_1_2_c;
2924 *yuv2packedX = yuv2rgba32_1_X_c;
2926 #endif /* CONFIG_SWSCALE_ALPHA */
2928 *yuv2packed1 = yuv2rgbx32_1_1_c;
2929 *yuv2packed2 = yuv2rgbx32_1_2_c;
2930 *yuv2packedX = yuv2rgbx32_1_X_c;
2932 #endif /* !CONFIG_SMALL */
2934 case AV_PIX_FMT_RGB24:
2935 *yuv2packed1 = yuv2rgb24_1_c;
2936 *yuv2packed2 = yuv2rgb24_2_c;
2937 *yuv2packedX = yuv2rgb24_X_c;
2939 case AV_PIX_FMT_BGR24:
2940 *yuv2packed1 = yuv2bgr24_1_c;
2941 *yuv2packed2 = yuv2bgr24_2_c;
2942 *yuv2packedX = yuv2bgr24_X_c;
2944 case AV_PIX_FMT_RGB565LE:
2945 case AV_PIX_FMT_RGB565BE:
2946 case AV_PIX_FMT_BGR565LE:
2947 case AV_PIX_FMT_BGR565BE:
2948 *yuv2packed1 = yuv2rgb16_1_c;
2949 *yuv2packed2 = yuv2rgb16_2_c;
2950 *yuv2packedX = yuv2rgb16_X_c;
2952 case AV_PIX_FMT_RGB555LE:
2953 case AV_PIX_FMT_RGB555BE:
2954 case AV_PIX_FMT_BGR555LE:
2955 case AV_PIX_FMT_BGR555BE:
2956 *yuv2packed1 = yuv2rgb15_1_c;
2957 *yuv2packed2 = yuv2rgb15_2_c;
2958 *yuv2packedX = yuv2rgb15_X_c;
2960 case AV_PIX_FMT_RGB444LE:
2961 case AV_PIX_FMT_RGB444BE:
2962 case AV_PIX_FMT_BGR444LE:
2963 case AV_PIX_FMT_BGR444BE:
2964 *yuv2packed1 = yuv2rgb12_1_c;
2965 *yuv2packed2 = yuv2rgb12_2_c;
2966 *yuv2packedX = yuv2rgb12_X_c;
2968 case AV_PIX_FMT_RGB8:
2969 case AV_PIX_FMT_BGR8:
2970 *yuv2packed1 = yuv2rgb8_1_c;
2971 *yuv2packed2 = yuv2rgb8_2_c;
2972 *yuv2packedX = yuv2rgb8_X_c;
2974 case AV_PIX_FMT_RGB4:
2975 case AV_PIX_FMT_BGR4:
2976 *yuv2packed1 = yuv2rgb4_1_c;
2977 *yuv2packed2 = yuv2rgb4_2_c;
2978 *yuv2packedX = yuv2rgb4_X_c;
2980 case AV_PIX_FMT_RGB4_BYTE:
2981 case AV_PIX_FMT_BGR4_BYTE:
2982 *yuv2packed1 = yuv2rgb4b_1_c;
2983 *yuv2packed2 = yuv2rgb4b_2_c;
2984 *yuv2packedX = yuv2rgb4b_X_c;
2986 case AV_PIX_FMT_X2RGB10LE:
2987 case AV_PIX_FMT_X2RGB10BE:
2988 *yuv2packed1 = yuv2x2rgb10_1_c;
2989 *yuv2packed2 = yuv2x2rgb10_2_c;
2990 *yuv2packedX = yuv2x2rgb10_X_c;
2994 switch (dstFormat) {
2995 case AV_PIX_FMT_MONOWHITE:
2996 *yuv2packed1 = yuv2monowhite_1_c;
2997 *yuv2packed2 = yuv2monowhite_2_c;
2998 *yuv2packedX = yuv2monowhite_X_c;
3000 case AV_PIX_FMT_MONOBLACK:
3001 *yuv2packed1 = yuv2monoblack_1_c;
3002 *yuv2packed2 = yuv2monoblack_2_c;
3003 *yuv2packedX = yuv2monoblack_X_c;
3005 case AV_PIX_FMT_YUYV422:
3006 *yuv2packed1 = yuv2yuyv422_1_c;
3007 *yuv2packed2 = yuv2yuyv422_2_c;
3008 *yuv2packedX = yuv2yuyv422_X_c;
3010 case AV_PIX_FMT_YVYU422:
3011 *yuv2packed1 = yuv2yvyu422_1_c;
3012 *yuv2packed2 = yuv2yvyu422_2_c;
3013 *yuv2packedX = yuv2yvyu422_X_c;
3015 case AV_PIX_FMT_UYVY422:
3016 *yuv2packed1 = yuv2uyvy422_1_c;
3017 *yuv2packed2 = yuv2uyvy422_2_c;
3018 *yuv2packedX = yuv2uyvy422_X_c;
3020 case AV_PIX_FMT_YA8:
3021 *yuv2packed1 = yuv2ya8_1_c;
3022 *yuv2packed2 = yuv2ya8_2_c;
3023 *yuv2packedX = yuv2ya8_X_c;
3025 case AV_PIX_FMT_YA16LE:
3026 *yuv2packed1 = yuv2ya16le_1_c;
3027 *yuv2packed2 = yuv2ya16le_2_c;
3028 *yuv2packedX = yuv2ya16le_X_c;
3030 case AV_PIX_FMT_YA16BE:
3031 *yuv2packed1 = yuv2ya16be_1_c;
3032 *yuv2packed2 = yuv2ya16be_2_c;
3033 *yuv2packedX = yuv2ya16be_X_c;
3035 case AV_PIX_FMT_AYUV64LE:
3036 *yuv2packedX = yuv2ayuv64le_X_c;