2 * Copyright (C) 2001-2012 Michael Niedermayer <michaelni@gmx.at>
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 #include "libavutil/avutil.h"
27 #include "libavutil/bswap.h"
28 #include "libavutil/cpu.h"
29 #include "libavutil/intreadwrite.h"
30 #include "libavutil/mathematics.h"
31 #include "libavutil/pixdesc.h"
32 #include "libavutil/avassert.h"
36 #include "swscale_internal.h"
38 #define input_pixel(pos) (isBE(origin) ? AV_RB16(pos) : AV_RL16(pos))
40 #define r ((origin == AV_PIX_FMT_BGR48BE || origin == AV_PIX_FMT_BGR48LE || origin == AV_PIX_FMT_BGRA64BE || origin == AV_PIX_FMT_BGRA64LE) ? b_r : r_b)
41 #define b ((origin == AV_PIX_FMT_BGR48BE || origin == AV_PIX_FMT_BGR48LE || origin == AV_PIX_FMT_BGRA64BE || origin == AV_PIX_FMT_BGRA64LE) ? r_b : b_r)
43 static av_always_inline void
44 rgb64ToY_c_template(uint16_t *dst, const uint16_t *src, int width,
45 enum AVPixelFormat origin, int32_t *rgb2yuv)
47 int32_t ry = rgb2yuv[RY_IDX], gy = rgb2yuv[GY_IDX], by = rgb2yuv[BY_IDX];
49 for (i = 0; i < width; i++) {
50 unsigned int r_b = input_pixel(&src[i*4+0]);
51 unsigned int g = input_pixel(&src[i*4+1]);
52 unsigned int b_r = input_pixel(&src[i*4+2]);
54 dst[i] = (ry*r + gy*g + by*b + (0x2001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT;
58 static av_always_inline void
59 rgb64ToUV_c_template(uint16_t *dstU, uint16_t *dstV,
60 const uint16_t *src1, const uint16_t *src2,
61 int width, enum AVPixelFormat origin, int32_t *rgb2yuv)
64 int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
65 int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
66 av_assert1(src1==src2);
67 for (i = 0; i < width; i++) {
68 int r_b = input_pixel(&src1[i*4+0]);
69 int g = input_pixel(&src1[i*4+1]);
70 int b_r = input_pixel(&src1[i*4+2]);
72 dstU[i] = (ru*r + gu*g + bu*b + (0x10001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT;
73 dstV[i] = (rv*r + gv*g + bv*b + (0x10001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT;
77 static av_always_inline void
78 rgb64ToUV_half_c_template(uint16_t *dstU, uint16_t *dstV,
79 const uint16_t *src1, const uint16_t *src2,
80 int width, enum AVPixelFormat origin, int32_t *rgb2yuv)
83 int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
84 int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
85 av_assert1(src1==src2);
86 for (i = 0; i < width; i++) {
87 int r_b = (input_pixel(&src1[8 * i + 0]) + input_pixel(&src1[8 * i + 4]) + 1) >> 1;
88 int g = (input_pixel(&src1[8 * i + 1]) + input_pixel(&src1[8 * i + 5]) + 1) >> 1;
89 int b_r = (input_pixel(&src1[8 * i + 2]) + input_pixel(&src1[8 * i + 6]) + 1) >> 1;
91 dstU[i]= (ru*r + gu*g + bu*b + (0x10001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT;
92 dstV[i]= (rv*r + gv*g + bv*b + (0x10001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT;
96 #define rgb64funcs(pattern, BE_LE, origin) \
97 static void pattern ## 64 ## BE_LE ## ToY_c(uint8_t *_dst, const uint8_t *_src, const uint8_t *unused0, const uint8_t *unused1,\
98 int width, uint32_t *rgb2yuv) \
100 const uint16_t *src = (const uint16_t *) _src; \
101 uint16_t *dst = (uint16_t *) _dst; \
102 rgb64ToY_c_template(dst, src, width, origin, rgb2yuv); \
105 static void pattern ## 64 ## BE_LE ## ToUV_c(uint8_t *_dstU, uint8_t *_dstV, \
106 const uint8_t *unused0, const uint8_t *_src1, const uint8_t *_src2, \
107 int width, uint32_t *rgb2yuv) \
109 const uint16_t *src1 = (const uint16_t *) _src1, \
110 *src2 = (const uint16_t *) _src2; \
111 uint16_t *dstU = (uint16_t *) _dstU, *dstV = (uint16_t *) _dstV; \
112 rgb64ToUV_c_template(dstU, dstV, src1, src2, width, origin, rgb2yuv); \
115 static void pattern ## 64 ## BE_LE ## ToUV_half_c(uint8_t *_dstU, uint8_t *_dstV, \
116 const uint8_t *unused0, const uint8_t *_src1, const uint8_t *_src2, \
117 int width, uint32_t *rgb2yuv) \
119 const uint16_t *src1 = (const uint16_t *) _src1, \
120 *src2 = (const uint16_t *) _src2; \
121 uint16_t *dstU = (uint16_t *) _dstU, *dstV = (uint16_t *) _dstV; \
122 rgb64ToUV_half_c_template(dstU, dstV, src1, src2, width, origin, rgb2yuv); \
125 rgb64funcs(rgb, LE, AV_PIX_FMT_RGBA64LE)
126 rgb64funcs(rgb, BE, AV_PIX_FMT_RGBA64BE)
127 rgb64funcs(bgr, LE, AV_PIX_FMT_BGRA64LE)
128 rgb64funcs(bgr, BE, AV_PIX_FMT_BGRA64BE)
130 static av_always_inline void rgb48ToY_c_template(uint16_t *dst,
131 const uint16_t *src, int width,
132 enum AVPixelFormat origin,
135 int32_t ry = rgb2yuv[RY_IDX], gy = rgb2yuv[GY_IDX], by = rgb2yuv[BY_IDX];
137 for (i = 0; i < width; i++) {
138 unsigned int r_b = input_pixel(&src[i * 3 + 0]);
139 unsigned int g = input_pixel(&src[i * 3 + 1]);
140 unsigned int b_r = input_pixel(&src[i * 3 + 2]);
142 dst[i] = (ry*r + gy*g + by*b + (0x2001 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
146 static av_always_inline void rgb48ToUV_c_template(uint16_t *dstU,
148 const uint16_t *src1,
149 const uint16_t *src2,
151 enum AVPixelFormat origin,
155 int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
156 int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
157 av_assert1(src1 == src2);
158 for (i = 0; i < width; i++) {
159 int r_b = input_pixel(&src1[i * 3 + 0]);
160 int g = input_pixel(&src1[i * 3 + 1]);
161 int b_r = input_pixel(&src1[i * 3 + 2]);
163 dstU[i] = (ru*r + gu*g + bu*b + (0x10001 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
164 dstV[i] = (rv*r + gv*g + bv*b + (0x10001 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
168 static av_always_inline void rgb48ToUV_half_c_template(uint16_t *dstU,
170 const uint16_t *src1,
171 const uint16_t *src2,
173 enum AVPixelFormat origin,
177 int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
178 int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
179 av_assert1(src1 == src2);
180 for (i = 0; i < width; i++) {
181 int r_b = (input_pixel(&src1[6 * i + 0]) +
182 input_pixel(&src1[6 * i + 3]) + 1) >> 1;
183 int g = (input_pixel(&src1[6 * i + 1]) +
184 input_pixel(&src1[6 * i + 4]) + 1) >> 1;
185 int b_r = (input_pixel(&src1[6 * i + 2]) +
186 input_pixel(&src1[6 * i + 5]) + 1) >> 1;
188 dstU[i] = (ru*r + gu*g + bu*b + (0x10001 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
189 dstV[i] = (rv*r + gv*g + bv*b + (0x10001 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
197 #define rgb48funcs(pattern, BE_LE, origin) \
198 static void pattern ## 48 ## BE_LE ## ToY_c(uint8_t *_dst, \
199 const uint8_t *_src, \
200 const uint8_t *unused0, const uint8_t *unused1,\
204 const uint16_t *src = (const uint16_t *)_src; \
205 uint16_t *dst = (uint16_t *)_dst; \
206 rgb48ToY_c_template(dst, src, width, origin, rgb2yuv); \
209 static void pattern ## 48 ## BE_LE ## ToUV_c(uint8_t *_dstU, \
211 const uint8_t *unused0, \
212 const uint8_t *_src1, \
213 const uint8_t *_src2, \
217 const uint16_t *src1 = (const uint16_t *)_src1, \
218 *src2 = (const uint16_t *)_src2; \
219 uint16_t *dstU = (uint16_t *)_dstU, \
220 *dstV = (uint16_t *)_dstV; \
221 rgb48ToUV_c_template(dstU, dstV, src1, src2, width, origin, rgb2yuv); \
224 static void pattern ## 48 ## BE_LE ## ToUV_half_c(uint8_t *_dstU, \
226 const uint8_t *unused0, \
227 const uint8_t *_src1, \
228 const uint8_t *_src2, \
232 const uint16_t *src1 = (const uint16_t *)_src1, \
233 *src2 = (const uint16_t *)_src2; \
234 uint16_t *dstU = (uint16_t *)_dstU, \
235 *dstV = (uint16_t *)_dstV; \
236 rgb48ToUV_half_c_template(dstU, dstV, src1, src2, width, origin, rgb2yuv); \
239 rgb48funcs(rgb, LE, AV_PIX_FMT_RGB48LE)
240 rgb48funcs(rgb, BE, AV_PIX_FMT_RGB48BE)
241 rgb48funcs(bgr, LE, AV_PIX_FMT_BGR48LE)
242 rgb48funcs(bgr, BE, AV_PIX_FMT_BGR48BE)
244 #define input_pixel(i) ((origin == AV_PIX_FMT_RGBA || \
245 origin == AV_PIX_FMT_BGRA || \
246 origin == AV_PIX_FMT_ARGB || \
247 origin == AV_PIX_FMT_ABGR || \
248 origin == AV_PIX_FMT_X2RGB10) \
249 ? AV_RN32A(&src[(i) * 4]) \
250 : (isBE(origin) ? AV_RB16(&src[(i) * 2]) \
251 : AV_RL16(&src[(i) * 2])))
253 static av_always_inline void rgb16_32ToY_c_template(int16_t *dst,
256 enum AVPixelFormat origin,
259 int maskr, int maskg,
261 int gsh, int bsh, int S,
264 const int ry = rgb2yuv[RY_IDX]<<rsh, gy = rgb2yuv[GY_IDX]<<gsh, by = rgb2yuv[BY_IDX]<<bsh;
265 const unsigned rnd = (32<<((S)-1)) + (1<<(S-7));
268 for (i = 0; i < width; i++) {
269 int px = input_pixel(i) >> shp;
270 int b = (px & maskb) >> shb;
271 int g = (px & maskg) >> shg;
272 int r = (px & maskr) >> shr;
274 dst[i] = (ry * r + gy * g + by * b + rnd) >> ((S)-6);
278 static av_always_inline void rgb16_32ToUV_c_template(int16_t *dstU,
282 enum AVPixelFormat origin,
285 int maskr, int maskg,
287 int gsh, int bsh, int S,
290 const int ru = rgb2yuv[RU_IDX] * (1 << rsh), gu = rgb2yuv[GU_IDX] * (1 << gsh), bu = rgb2yuv[BU_IDX] * (1 << bsh),
291 rv = rgb2yuv[RV_IDX] * (1 << rsh), gv = rgb2yuv[GV_IDX] * (1 << gsh), bv = rgb2yuv[BV_IDX] * (1 << bsh);
292 const unsigned rnd = (256u<<((S)-1)) + (1<<(S-7));
295 for (i = 0; i < width; i++) {
296 int px = input_pixel(i) >> shp;
297 int b = (px & maskb) >> shb;
298 int g = (px & maskg) >> shg;
299 int r = (px & maskr) >> shr;
301 dstU[i] = (ru * r + gu * g + bu * b + rnd) >> ((S)-6);
302 dstV[i] = (rv * r + gv * g + bv * b + rnd) >> ((S)-6);
306 static av_always_inline void rgb16_32ToUV_half_c_template(int16_t *dstU,
310 enum AVPixelFormat origin,
313 int maskr, int maskg,
315 int gsh, int bsh, int S,
318 const int ru = rgb2yuv[RU_IDX] * (1 << rsh), gu = rgb2yuv[GU_IDX] * (1 << gsh), bu = rgb2yuv[BU_IDX] * (1 << bsh),
319 rv = rgb2yuv[RV_IDX] * (1 << rsh), gv = rgb2yuv[GV_IDX] * (1 << gsh), bv = rgb2yuv[BV_IDX] * (1 << bsh),
320 maskgx = ~(maskr | maskb);
321 const unsigned rnd = (256U<<(S)) + (1<<(S-6));
327 for (i = 0; i < width; i++) {
328 unsigned px0 = input_pixel(2 * i + 0) >> shp;
329 unsigned px1 = input_pixel(2 * i + 1) >> shp;
330 int b, r, g = (px0 & maskgx) + (px1 & maskgx);
331 int rb = px0 + px1 - g;
333 b = (rb & maskb) >> shb;
335 origin == AV_PIX_FMT_BGR565LE || origin == AV_PIX_FMT_BGR565BE ||
336 origin == AV_PIX_FMT_RGB565LE || origin == AV_PIX_FMT_RGB565BE) {
339 g = (g & maskg) >> shg;
341 r = (rb & maskr) >> shr;
343 dstU[i] = (ru * r + gu * g + bu * b + (unsigned)rnd) >> ((S)-6+1);
344 dstV[i] = (rv * r + gv * g + bv * b + (unsigned)rnd) >> ((S)-6+1);
350 #define rgb16_32_wrapper(fmt, name, shr, shg, shb, shp, maskr, \
351 maskg, maskb, rsh, gsh, bsh, S) \
352 static void name ## ToY_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, \
353 int width, uint32_t *tab) \
355 rgb16_32ToY_c_template((int16_t*)dst, src, width, fmt, shr, shg, shb, shp, \
356 maskr, maskg, maskb, rsh, gsh, bsh, S, tab); \
359 static void name ## ToUV_c(uint8_t *dstU, uint8_t *dstV, \
360 const uint8_t *unused0, const uint8_t *src, const uint8_t *dummy, \
361 int width, uint32_t *tab) \
363 rgb16_32ToUV_c_template((int16_t*)dstU, (int16_t*)dstV, src, width, fmt, \
364 shr, shg, shb, shp, \
365 maskr, maskg, maskb, rsh, gsh, bsh, S, tab);\
368 static void name ## ToUV_half_c(uint8_t *dstU, uint8_t *dstV, \
369 const uint8_t *unused0, const uint8_t *src, \
370 const uint8_t *dummy, \
371 int width, uint32_t *tab) \
373 rgb16_32ToUV_half_c_template((int16_t*)dstU, (int16_t*)dstV, src, width, fmt, \
374 shr, shg, shb, shp, \
375 maskr, maskg, maskb, \
376 rsh, gsh, bsh, S, tab); \
379 rgb16_32_wrapper(AV_PIX_FMT_BGR32, bgr32, 16, 0, 0, 0, 0xFF0000, 0xFF00, 0x00FF, 8, 0, 8, RGB2YUV_SHIFT + 8)
380 rgb16_32_wrapper(AV_PIX_FMT_BGR32_1, bgr321, 16, 0, 0, 8, 0xFF0000, 0xFF00, 0x00FF, 8, 0, 8, RGB2YUV_SHIFT + 8)
381 rgb16_32_wrapper(AV_PIX_FMT_RGB32, rgb32, 0, 0, 16, 0, 0x00FF, 0xFF00, 0xFF0000, 8, 0, 8, RGB2YUV_SHIFT + 8)
382 rgb16_32_wrapper(AV_PIX_FMT_RGB32_1, rgb321, 0, 0, 16, 8, 0x00FF, 0xFF00, 0xFF0000, 8, 0, 8, RGB2YUV_SHIFT + 8)
383 rgb16_32_wrapper(AV_PIX_FMT_BGR565LE, bgr16le, 0, 0, 0, 0, 0x001F, 0x07E0, 0xF800, 11, 5, 0, RGB2YUV_SHIFT + 8)
384 rgb16_32_wrapper(AV_PIX_FMT_BGR555LE, bgr15le, 0, 0, 0, 0, 0x001F, 0x03E0, 0x7C00, 10, 5, 0, RGB2YUV_SHIFT + 7)
385 rgb16_32_wrapper(AV_PIX_FMT_BGR444LE, bgr12le, 0, 0, 0, 0, 0x000F, 0x00F0, 0x0F00, 8, 4, 0, RGB2YUV_SHIFT + 4)
386 rgb16_32_wrapper(AV_PIX_FMT_RGB565LE, rgb16le, 0, 0, 0, 0, 0xF800, 0x07E0, 0x001F, 0, 5, 11, RGB2YUV_SHIFT + 8)
387 rgb16_32_wrapper(AV_PIX_FMT_RGB555LE, rgb15le, 0, 0, 0, 0, 0x7C00, 0x03E0, 0x001F, 0, 5, 10, RGB2YUV_SHIFT + 7)
388 rgb16_32_wrapper(AV_PIX_FMT_RGB444LE, rgb12le, 0, 0, 0, 0, 0x0F00, 0x00F0, 0x000F, 0, 4, 8, RGB2YUV_SHIFT + 4)
389 rgb16_32_wrapper(AV_PIX_FMT_BGR565BE, bgr16be, 0, 0, 0, 0, 0x001F, 0x07E0, 0xF800, 11, 5, 0, RGB2YUV_SHIFT + 8)
390 rgb16_32_wrapper(AV_PIX_FMT_BGR555BE, bgr15be, 0, 0, 0, 0, 0x001F, 0x03E0, 0x7C00, 10, 5, 0, RGB2YUV_SHIFT + 7)
391 rgb16_32_wrapper(AV_PIX_FMT_BGR444BE, bgr12be, 0, 0, 0, 0, 0x000F, 0x00F0, 0x0F00, 8, 4, 0, RGB2YUV_SHIFT + 4)
392 rgb16_32_wrapper(AV_PIX_FMT_RGB565BE, rgb16be, 0, 0, 0, 0, 0xF800, 0x07E0, 0x001F, 0, 5, 11, RGB2YUV_SHIFT + 8)
393 rgb16_32_wrapper(AV_PIX_FMT_RGB555BE, rgb15be, 0, 0, 0, 0, 0x7C00, 0x03E0, 0x001F, 0, 5, 10, RGB2YUV_SHIFT + 7)
394 rgb16_32_wrapper(AV_PIX_FMT_RGB444BE, rgb12be, 0, 0, 0, 0, 0x0F00, 0x00F0, 0x000F, 0, 4, 8, RGB2YUV_SHIFT + 4)
395 rgb16_32_wrapper(AV_PIX_FMT_X2RGB10LE, rgb30le, 16, 6, 0, 0, 0x3FF00000, 0xFFC00, 0x3FF, 0, 0, 4, RGB2YUV_SHIFT + 6)
397 static void gbr24pToUV_half_c(uint8_t *_dstU, uint8_t *_dstV,
398 const uint8_t *gsrc, const uint8_t *bsrc, const uint8_t *rsrc,
399 int width, uint32_t *rgb2yuv)
401 uint16_t *dstU = (uint16_t *)_dstU;
402 uint16_t *dstV = (uint16_t *)_dstV;
403 int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
404 int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
407 for (i = 0; i < width; i++) {
408 unsigned int g = gsrc[2*i] + gsrc[2*i+1];
409 unsigned int b = bsrc[2*i] + bsrc[2*i+1];
410 unsigned int r = rsrc[2*i] + rsrc[2*i+1];
412 dstU[i] = (ru*r + gu*g + bu*b + (0x4001<<(RGB2YUV_SHIFT-6))) >> (RGB2YUV_SHIFT-6+1);
413 dstV[i] = (rv*r + gv*g + bv*b + (0x4001<<(RGB2YUV_SHIFT-6))) >> (RGB2YUV_SHIFT-6+1);
417 static void rgba64leToA_c(uint8_t *_dst, const uint8_t *_src, const uint8_t *unused1,
418 const uint8_t *unused2, int width, uint32_t *unused)
420 int16_t *dst = (int16_t *)_dst;
421 const uint16_t *src = (const uint16_t *)_src;
423 for (i = 0; i < width; i++)
424 dst[i] = AV_RL16(src + 4 * i + 3);
427 static void rgba64beToA_c(uint8_t *_dst, const uint8_t *_src, const uint8_t *unused1,
428 const uint8_t *unused2, int width, uint32_t *unused)
430 int16_t *dst = (int16_t *)_dst;
431 const uint16_t *src = (const uint16_t *)_src;
433 for (i = 0; i < width; i++)
434 dst[i] = AV_RB16(src + 4 * i + 3);
437 static void abgrToA_c(uint8_t *_dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width, uint32_t *unused)
439 int16_t *dst = (int16_t *)_dst;
441 for (i=0; i<width; i++) {
442 dst[i]= src[4*i]<<6 | src[4*i]>>2;
446 static void rgbaToA_c(uint8_t *_dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width, uint32_t *unused)
448 int16_t *dst = (int16_t *)_dst;
450 for (i=0; i<width; i++) {
451 dst[i]= src[4*i+3]<<6 | src[4*i+3]>>2;
455 static void palToA_c(uint8_t *_dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width, uint32_t *pal)
457 int16_t *dst = (int16_t *)_dst;
459 for (i=0; i<width; i++) {
462 dst[i]= (pal[d] >> 24)<<6 | pal[d]>>26;
466 static void palToY_c(uint8_t *_dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width, uint32_t *pal)
468 int16_t *dst = (int16_t *)_dst;
470 for (i = 0; i < width; i++) {
473 dst[i] = (pal[d] & 0xFF)<<6;
477 static void palToUV_c(uint8_t *_dstU, uint8_t *_dstV,
478 const uint8_t *unused0, const uint8_t *src1, const uint8_t *src2,
479 int width, uint32_t *pal)
481 uint16_t *dstU = (uint16_t *)_dstU;
482 int16_t *dstV = (int16_t *)_dstV;
484 av_assert1(src1 == src2);
485 for (i = 0; i < width; i++) {
486 int p = pal[src1[i]];
488 dstU[i] = (uint8_t)(p>> 8)<<6;
489 dstV[i] = (uint8_t)(p>>16)<<6;
493 static void monowhite2Y_c(uint8_t *_dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width, uint32_t *unused)
495 int16_t *dst = (int16_t *)_dst;
497 width = (width + 7) >> 3;
498 for (i = 0; i < width; i++) {
500 for (j = 0; j < 8; j++)
501 dst[8*i+j]= ((d>>(7-j))&1) * 16383;
505 for (j = 0; j < (width&7); j++)
506 dst[8*i+j]= ((d>>(7-j))&1) * 16383;
510 static void monoblack2Y_c(uint8_t *_dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width, uint32_t *unused)
512 int16_t *dst = (int16_t *)_dst;
514 width = (width + 7) >> 3;
515 for (i = 0; i < width; i++) {
517 for (j = 0; j < 8; j++)
518 dst[8*i+j]= ((d>>(7-j))&1) * 16383;
522 for (j = 0; j < (width&7); j++)
523 dst[8*i+j] = ((d>>(7-j))&1) * 16383;
527 static void yuy2ToY_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width,
531 for (i = 0; i < width; i++)
535 static void yuy2ToUV_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *unused0, const uint8_t *src1,
536 const uint8_t *src2, int width, uint32_t *unused)
539 for (i = 0; i < width; i++) {
540 dstU[i] = src1[4 * i + 1];
541 dstV[i] = src1[4 * i + 3];
543 av_assert1(src1 == src2);
546 static void yvy2ToUV_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *unused0, const uint8_t *src1,
547 const uint8_t *src2, int width, uint32_t *unused)
550 for (i = 0; i < width; i++) {
551 dstV[i] = src1[4 * i + 1];
552 dstU[i] = src1[4 * i + 3];
554 av_assert1(src1 == src2);
557 static void y210le_UV_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *unused0, const uint8_t *src,
558 const uint8_t *unused1, int width, uint32_t *unused2)
561 for (i = 0; i < width; i++) {
562 AV_WN16(dstU + i * 2, AV_RL16(src + i * 8 + 2) >> 6);
563 AV_WN16(dstV + i * 2, AV_RL16(src + i * 8 + 6) >> 6);
567 static void y210le_Y_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused0,
568 const uint8_t *unused1, int width, uint32_t *unused2)
571 for (i = 0; i < width; i++)
572 AV_WN16(dst + i * 2, AV_RL16(src + i * 4) >> 6);
575 static void bswap16Y_c(uint8_t *_dst, const uint8_t *_src, const uint8_t *unused1, const uint8_t *unused2, int width,
579 const uint16_t *src = (const uint16_t *)_src;
580 uint16_t *dst = (uint16_t *)_dst;
581 for (i = 0; i < width; i++)
582 dst[i] = av_bswap16(src[i]);
585 static void bswap16UV_c(uint8_t *_dstU, uint8_t *_dstV, const uint8_t *unused0, const uint8_t *_src1,
586 const uint8_t *_src2, int width, uint32_t *unused)
589 const uint16_t *src1 = (const uint16_t *)_src1,
590 *src2 = (const uint16_t *)_src2;
591 uint16_t *dstU = (uint16_t *)_dstU, *dstV = (uint16_t *)_dstV;
592 for (i = 0; i < width; i++) {
593 dstU[i] = av_bswap16(src1[i]);
594 dstV[i] = av_bswap16(src2[i]);
598 static void read_ya16le_gray_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width,
602 for (i = 0; i < width; i++)
603 AV_WN16(dst + i * 2, AV_RL16(src + i * 4));
606 static void read_ya16le_alpha_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width,
610 for (i = 0; i < width; i++)
611 AV_WN16(dst + i * 2, AV_RL16(src + i * 4 + 2));
614 static void read_ya16be_gray_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width,
618 for (i = 0; i < width; i++)
619 AV_WN16(dst + i * 2, AV_RB16(src + i * 4));
622 static void read_ya16be_alpha_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width,
626 for (i = 0; i < width; i++)
627 AV_WN16(dst + i * 2, AV_RB16(src + i * 4 + 2));
630 static void read_ayuv64le_Y_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused0, const uint8_t *unused1, int width,
634 for (i = 0; i < width; i++)
635 AV_WN16(dst + i * 2, AV_RL16(src + i * 8 + 2));
639 static void read_ayuv64le_UV_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *unused0, const uint8_t *src,
640 const uint8_t *unused1, int width, uint32_t *unused2)
643 for (i = 0; i < width; i++) {
644 AV_WN16(dstU + i * 2, AV_RL16(src + i * 8 + 4));
645 AV_WN16(dstV + i * 2, AV_RL16(src + i * 8 + 6));
649 static void read_ayuv64le_A_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused0, const uint8_t *unused1, int width,
653 for (i = 0; i < width; i++)
654 AV_WN16(dst + i * 2, AV_RL16(src + i * 8));
657 /* This is almost identical to the previous, end exists only because
658 * yuy2ToY/UV)(dst, src + 1, ...) would have 100% unaligned accesses. */
659 static void uyvyToY_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width,
663 for (i = 0; i < width; i++)
664 dst[i] = src[2 * i + 1];
667 static void uyvyToUV_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *unused0, const uint8_t *src1,
668 const uint8_t *src2, int width, uint32_t *unused)
671 for (i = 0; i < width; i++) {
672 dstU[i] = src1[4 * i + 0];
673 dstV[i] = src1[4 * i + 2];
675 av_assert1(src1 == src2);
678 static av_always_inline void nvXXtoUV_c(uint8_t *dst1, uint8_t *dst2,
679 const uint8_t *src, int width)
682 for (i = 0; i < width; i++) {
683 dst1[i] = src[2 * i + 0];
684 dst2[i] = src[2 * i + 1];
688 static void nv12ToUV_c(uint8_t *dstU, uint8_t *dstV,
689 const uint8_t *unused0, const uint8_t *src1, const uint8_t *src2,
690 int width, uint32_t *unused)
692 nvXXtoUV_c(dstU, dstV, src1, width);
695 static void nv21ToUV_c(uint8_t *dstU, uint8_t *dstV,
696 const uint8_t *unused0, const uint8_t *src1, const uint8_t *src2,
697 int width, uint32_t *unused)
699 nvXXtoUV_c(dstV, dstU, src1, width);
702 static void p010LEToY_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused1,
703 const uint8_t *unused2, int width, uint32_t *unused)
706 for (i = 0; i < width; i++) {
707 AV_WN16(dst + i * 2, AV_RL16(src + i * 2) >> 6);
711 static void p010BEToY_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused1,
712 const uint8_t *unused2, int width, uint32_t *unused)
715 for (i = 0; i < width; i++) {
716 AV_WN16(dst + i * 2, AV_RB16(src + i * 2) >> 6);
720 static void p010LEToUV_c(uint8_t *dstU, uint8_t *dstV,
721 const uint8_t *unused0, const uint8_t *src1, const uint8_t *src2,
722 int width, uint32_t *unused)
725 for (i = 0; i < width; i++) {
726 AV_WN16(dstU + i * 2, AV_RL16(src1 + i * 4 + 0) >> 6);
727 AV_WN16(dstV + i * 2, AV_RL16(src1 + i * 4 + 2) >> 6);
731 static void p010BEToUV_c(uint8_t *dstU, uint8_t *dstV,
732 const uint8_t *unused0, const uint8_t *src1, const uint8_t *src2,
733 int width, uint32_t *unused)
736 for (i = 0; i < width; i++) {
737 AV_WN16(dstU + i * 2, AV_RB16(src1 + i * 4 + 0) >> 6);
738 AV_WN16(dstV + i * 2, AV_RB16(src1 + i * 4 + 2) >> 6);
742 static void p016LEToUV_c(uint8_t *dstU, uint8_t *dstV,
743 const uint8_t *unused0, const uint8_t *src1, const uint8_t *src2,
744 int width, uint32_t *unused)
747 for (i = 0; i < width; i++) {
748 AV_WN16(dstU + i * 2, AV_RL16(src1 + i * 4 + 0));
749 AV_WN16(dstV + i * 2, AV_RL16(src1 + i * 4 + 2));
753 static void p016BEToUV_c(uint8_t *dstU, uint8_t *dstV,
754 const uint8_t *unused0, const uint8_t *src1, const uint8_t *src2,
755 int width, uint32_t *unused)
758 for (i = 0; i < width; i++) {
759 AV_WN16(dstU + i * 2, AV_RB16(src1 + i * 4 + 0));
760 AV_WN16(dstV + i * 2, AV_RB16(src1 + i * 4 + 2));
764 #define input_pixel(pos) (isBE(origin) ? AV_RB16(pos) : AV_RL16(pos))
766 static void bgr24ToY_c(uint8_t *_dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2,
767 int width, uint32_t *rgb2yuv)
769 int16_t *dst = (int16_t *)_dst;
770 int32_t ry = rgb2yuv[RY_IDX], gy = rgb2yuv[GY_IDX], by = rgb2yuv[BY_IDX];
772 for (i = 0; i < width; i++) {
773 int b = src[i * 3 + 0];
774 int g = src[i * 3 + 1];
775 int r = src[i * 3 + 2];
777 dst[i] = ((ry*r + gy*g + by*b + (32<<(RGB2YUV_SHIFT-1)) + (1<<(RGB2YUV_SHIFT-7)))>>(RGB2YUV_SHIFT-6));
781 static void bgr24ToUV_c(uint8_t *_dstU, uint8_t *_dstV, const uint8_t *unused0, const uint8_t *src1,
782 const uint8_t *src2, int width, uint32_t *rgb2yuv)
784 int16_t *dstU = (int16_t *)_dstU;
785 int16_t *dstV = (int16_t *)_dstV;
786 int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
787 int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
789 for (i = 0; i < width; i++) {
790 int b = src1[3 * i + 0];
791 int g = src1[3 * i + 1];
792 int r = src1[3 * i + 2];
794 dstU[i] = (ru*r + gu*g + bu*b + (256<<(RGB2YUV_SHIFT-1)) + (1<<(RGB2YUV_SHIFT-7)))>>(RGB2YUV_SHIFT-6);
795 dstV[i] = (rv*r + gv*g + bv*b + (256<<(RGB2YUV_SHIFT-1)) + (1<<(RGB2YUV_SHIFT-7)))>>(RGB2YUV_SHIFT-6);
797 av_assert1(src1 == src2);
800 static void bgr24ToUV_half_c(uint8_t *_dstU, uint8_t *_dstV, const uint8_t *unused0, const uint8_t *src1,
801 const uint8_t *src2, int width, uint32_t *rgb2yuv)
803 int16_t *dstU = (int16_t *)_dstU;
804 int16_t *dstV = (int16_t *)_dstV;
806 int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
807 int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
808 for (i = 0; i < width; i++) {
809 int b = src1[6 * i + 0] + src1[6 * i + 3];
810 int g = src1[6 * i + 1] + src1[6 * i + 4];
811 int r = src1[6 * i + 2] + src1[6 * i + 5];
813 dstU[i] = (ru*r + gu*g + bu*b + (256<<RGB2YUV_SHIFT) + (1<<(RGB2YUV_SHIFT-6)))>>(RGB2YUV_SHIFT-5);
814 dstV[i] = (rv*r + gv*g + bv*b + (256<<RGB2YUV_SHIFT) + (1<<(RGB2YUV_SHIFT-6)))>>(RGB2YUV_SHIFT-5);
816 av_assert1(src1 == src2);
819 static void rgb24ToY_c(uint8_t *_dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width,
822 int16_t *dst = (int16_t *)_dst;
823 int32_t ry = rgb2yuv[RY_IDX], gy = rgb2yuv[GY_IDX], by = rgb2yuv[BY_IDX];
825 for (i = 0; i < width; i++) {
826 int r = src[i * 3 + 0];
827 int g = src[i * 3 + 1];
828 int b = src[i * 3 + 2];
830 dst[i] = ((ry*r + gy*g + by*b + (32<<(RGB2YUV_SHIFT-1)) + (1<<(RGB2YUV_SHIFT-7)))>>(RGB2YUV_SHIFT-6));
834 static void rgb24ToUV_c(uint8_t *_dstU, uint8_t *_dstV, const uint8_t *unused0, const uint8_t *src1,
835 const uint8_t *src2, int width, uint32_t *rgb2yuv)
837 int16_t *dstU = (int16_t *)_dstU;
838 int16_t *dstV = (int16_t *)_dstV;
840 int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
841 int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
842 av_assert1(src1 == src2);
843 for (i = 0; i < width; i++) {
844 int r = src1[3 * i + 0];
845 int g = src1[3 * i + 1];
846 int b = src1[3 * i + 2];
848 dstU[i] = (ru*r + gu*g + bu*b + (256<<(RGB2YUV_SHIFT-1)) + (1<<(RGB2YUV_SHIFT-7)))>>(RGB2YUV_SHIFT-6);
849 dstV[i] = (rv*r + gv*g + bv*b + (256<<(RGB2YUV_SHIFT-1)) + (1<<(RGB2YUV_SHIFT-7)))>>(RGB2YUV_SHIFT-6);
853 static void rgb24ToUV_half_c(uint8_t *_dstU, uint8_t *_dstV, const uint8_t *unused0, const uint8_t *src1,
854 const uint8_t *src2, int width, uint32_t *rgb2yuv)
856 int16_t *dstU = (int16_t *)_dstU;
857 int16_t *dstV = (int16_t *)_dstV;
859 int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
860 int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
861 av_assert1(src1 == src2);
862 for (i = 0; i < width; i++) {
863 int r = src1[6 * i + 0] + src1[6 * i + 3];
864 int g = src1[6 * i + 1] + src1[6 * i + 4];
865 int b = src1[6 * i + 2] + src1[6 * i + 5];
867 dstU[i] = (ru*r + gu*g + bu*b + (256<<RGB2YUV_SHIFT) + (1<<(RGB2YUV_SHIFT-6)))>>(RGB2YUV_SHIFT-5);
868 dstV[i] = (rv*r + gv*g + bv*b + (256<<RGB2YUV_SHIFT) + (1<<(RGB2YUV_SHIFT-6)))>>(RGB2YUV_SHIFT-5);
872 static void planar_rgb_to_y(uint8_t *_dst, const uint8_t *src[4], int width, int32_t *rgb2yuv)
874 uint16_t *dst = (uint16_t *)_dst;
875 int32_t ry = rgb2yuv[RY_IDX], gy = rgb2yuv[GY_IDX], by = rgb2yuv[BY_IDX];
877 for (i = 0; i < width; i++) {
882 dst[i] = (ry*r + gy*g + by*b + (0x801<<(RGB2YUV_SHIFT-7))) >> (RGB2YUV_SHIFT-6);
886 static void planar_rgb_to_a(uint8_t *_dst, const uint8_t *src[4], int width, int32_t *unused)
888 uint16_t *dst = (uint16_t *)_dst;
890 for (i = 0; i < width; i++)
891 dst[i] = src[3][i] << 6;
894 static void planar_rgb_to_uv(uint8_t *_dstU, uint8_t *_dstV, const uint8_t *src[4], int width, int32_t *rgb2yuv)
896 uint16_t *dstU = (uint16_t *)_dstU;
897 uint16_t *dstV = (uint16_t *)_dstV;
898 int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
899 int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
901 for (i = 0; i < width; i++) {
906 dstU[i] = (ru*r + gu*g + bu*b + (0x4001<<(RGB2YUV_SHIFT-7))) >> (RGB2YUV_SHIFT-6);
907 dstV[i] = (rv*r + gv*g + bv*b + (0x4001<<(RGB2YUV_SHIFT-7))) >> (RGB2YUV_SHIFT-6);
912 is_be ? AV_RB16(src) : AV_RL16(src)
913 static av_always_inline void planar_rgb16_to_y(uint8_t *_dst, const uint8_t *_src[4],
914 int width, int bpc, int is_be, int32_t *rgb2yuv)
917 const uint16_t **src = (const uint16_t **)_src;
918 uint16_t *dst = (uint16_t *)_dst;
919 int32_t ry = rgb2yuv[RY_IDX], gy = rgb2yuv[GY_IDX], by = rgb2yuv[BY_IDX];
920 int shift = bpc < 16 ? bpc : 14;
921 for (i = 0; i < width; i++) {
922 int g = rdpx(src[0] + i);
923 int b = rdpx(src[1] + i);
924 int r = rdpx(src[2] + i);
926 dst[i] = ((ry*r + gy*g + by*b + (33 << (RGB2YUV_SHIFT + bpc - 9))) >> (RGB2YUV_SHIFT + shift - 14));
930 static av_always_inline void planar_rgb16_to_a(uint8_t *_dst, const uint8_t *_src[4],
931 int width, int bpc, int is_be, int32_t *rgb2yuv)
934 const uint16_t **src = (const uint16_t **)_src;
935 uint16_t *dst = (uint16_t *)_dst;
936 int shift = bpc < 16 ? bpc : 14;
938 for (i = 0; i < width; i++) {
939 dst[i] = rdpx(src[3] + i) << (14 - shift);
943 static av_always_inline void planar_rgb16_to_uv(uint8_t *_dstU, uint8_t *_dstV,
944 const uint8_t *_src[4], int width,
945 int bpc, int is_be, int32_t *rgb2yuv)
948 const uint16_t **src = (const uint16_t **)_src;
949 uint16_t *dstU = (uint16_t *)_dstU;
950 uint16_t *dstV = (uint16_t *)_dstV;
951 int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
952 int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
953 int shift = bpc < 16 ? bpc : 14;
954 for (i = 0; i < width; i++) {
955 int g = rdpx(src[0] + i);
956 int b = rdpx(src[1] + i);
957 int r = rdpx(src[2] + i);
959 dstU[i] = (ru*r + gu*g + bu*b + (257 << (RGB2YUV_SHIFT + bpc - 9))) >> (RGB2YUV_SHIFT + shift - 14);
960 dstV[i] = (rv*r + gv*g + bv*b + (257 << (RGB2YUV_SHIFT + bpc - 9))) >> (RGB2YUV_SHIFT + shift - 14);
965 #define rdpx(src) (is_be ? av_int2float(AV_RB32(src)): av_int2float(AV_RL32(src)))
967 static av_always_inline void planar_rgbf32_to_a(uint8_t *_dst, const uint8_t *_src[4], int width, int is_be, int32_t *rgb2yuv)
970 const float **src = (const float **)_src;
971 uint16_t *dst = (uint16_t *)_dst;
973 for (i = 0; i < width; i++) {
974 dst[i] = av_clip_uint16(lrintf(65535.0f * rdpx(src[3] + i)));
978 static av_always_inline void planar_rgbf32_to_uv(uint8_t *_dstU, uint8_t *_dstV, const uint8_t *_src[4], int width, int is_be, int32_t *rgb2yuv)
981 const float **src = (const float **)_src;
982 uint16_t *dstU = (uint16_t *)_dstU;
983 uint16_t *dstV = (uint16_t *)_dstV;
984 int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
985 int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
988 for (i = 0; i < width; i++) {
989 int g = av_clip_uint16(lrintf(65535.0f * rdpx(src[0] + i)));
990 int b = av_clip_uint16(lrintf(65535.0f * rdpx(src[1] + i)));
991 int r = av_clip_uint16(lrintf(65535.0f * rdpx(src[2] + i)));
993 dstU[i] = (ru*r + gu*g + bu*b + (257 << (RGB2YUV_SHIFT + bpc - 9))) >> (RGB2YUV_SHIFT + shift - 14);
994 dstV[i] = (rv*r + gv*g + bv*b + (257 << (RGB2YUV_SHIFT + bpc - 9))) >> (RGB2YUV_SHIFT + shift - 14);
998 static av_always_inline void planar_rgbf32_to_y(uint8_t *_dst, const uint8_t *_src[4], int width, int is_be, int32_t *rgb2yuv)
1001 const float **src = (const float **)_src;
1002 uint16_t *dst = (uint16_t *)_dst;
1004 int32_t ry = rgb2yuv[RY_IDX], gy = rgb2yuv[GY_IDX], by = rgb2yuv[BY_IDX];
1007 for (i = 0; i < width; i++) {
1008 int g = av_clip_uint16(lrintf(65535.0f * rdpx(src[0] + i)));
1009 int b = av_clip_uint16(lrintf(65535.0f * rdpx(src[1] + i)));
1010 int r = av_clip_uint16(lrintf(65535.0f * rdpx(src[2] + i)));
1012 dst[i] = ((ry*r + gy*g + by*b + (33 << (RGB2YUV_SHIFT + bpc - 9))) >> (RGB2YUV_SHIFT + shift - 14));
1018 static av_always_inline void grayf32ToY16_c(uint8_t *_dst, const uint8_t *_src, const uint8_t *unused1,
1019 const uint8_t *unused2, int width, uint32_t *unused)
1022 const float *src = (const float *)_src;
1023 uint16_t *dst = (uint16_t *)_dst;
1025 for (i = 0; i < width; ++i){
1026 dst[i] = av_clip_uint16(lrintf(65535.0f * src[i]));
1030 static av_always_inline void grayf32ToY16_bswap_c(uint8_t *_dst, const uint8_t *_src, const uint8_t *unused1,
1031 const uint8_t *unused2, int width, uint32_t *unused)
1034 const uint32_t *src = (const uint32_t *)_src;
1035 uint16_t *dst = (uint16_t *)_dst;
1037 for (i = 0; i < width; ++i){
1038 dst[i] = av_clip_uint16(lrintf(65535.0f * av_int2float(av_bswap32(src[i]))));
1042 #define rgb9plus_planar_funcs_endian(nbits, endian_name, endian) \
1043 static void planar_rgb##nbits##endian_name##_to_y(uint8_t *dst, const uint8_t *src[4], \
1044 int w, int32_t *rgb2yuv) \
1046 planar_rgb16_to_y(dst, src, w, nbits, endian, rgb2yuv); \
1048 static void planar_rgb##nbits##endian_name##_to_uv(uint8_t *dstU, uint8_t *dstV, \
1049 const uint8_t *src[4], int w, int32_t *rgb2yuv) \
1051 planar_rgb16_to_uv(dstU, dstV, src, w, nbits, endian, rgb2yuv); \
1054 #define rgb9plus_planar_transparency_funcs(nbits) \
1055 static void planar_rgb##nbits##le_to_a(uint8_t *dst, const uint8_t *src[4], \
1056 int w, int32_t *rgb2yuv) \
1058 planar_rgb16_to_a(dst, src, w, nbits, 0, rgb2yuv); \
1060 static void planar_rgb##nbits##be_to_a(uint8_t *dst, const uint8_t *src[4], \
1061 int w, int32_t *rgb2yuv) \
1063 planar_rgb16_to_a(dst, src, w, nbits, 1, rgb2yuv); \
1066 #define rgb9plus_planar_funcs(nbits) \
1067 rgb9plus_planar_funcs_endian(nbits, le, 0) \
1068 rgb9plus_planar_funcs_endian(nbits, be, 1)
1070 rgb9plus_planar_funcs(9)
1071 rgb9plus_planar_funcs(10)
1072 rgb9plus_planar_funcs(12)
1073 rgb9plus_planar_funcs(14)
1074 rgb9plus_planar_funcs(16)
1076 rgb9plus_planar_transparency_funcs(10)
1077 rgb9plus_planar_transparency_funcs(12)
1078 rgb9plus_planar_transparency_funcs(16)
1080 #define rgbf32_planar_funcs_endian(endian_name, endian) \
1081 static void planar_rgbf32##endian_name##_to_y(uint8_t *dst, const uint8_t *src[4], \
1082 int w, int32_t *rgb2yuv) \
1084 planar_rgbf32_to_y(dst, src, w, endian, rgb2yuv); \
1086 static void planar_rgbf32##endian_name##_to_uv(uint8_t *dstU, uint8_t *dstV, \
1087 const uint8_t *src[4], int w, int32_t *rgb2yuv) \
1089 planar_rgbf32_to_uv(dstU, dstV, src, w, endian, rgb2yuv); \
1091 static void planar_rgbf32##endian_name##_to_a(uint8_t *dst, const uint8_t *src[4], \
1092 int w, int32_t *rgb2yuv) \
1094 planar_rgbf32_to_a(dst, src, w, endian, rgb2yuv); \
1097 rgbf32_planar_funcs_endian(le, 0)
1098 rgbf32_planar_funcs_endian(be, 1)
1100 av_cold void ff_sws_init_input_funcs(SwsContext *c)
1102 enum AVPixelFormat srcFormat = c->srcFormat;
1104 c->chrToYV12 = NULL;
1105 switch (srcFormat) {
1106 case AV_PIX_FMT_YUYV422:
1107 c->chrToYV12 = yuy2ToUV_c;
1109 case AV_PIX_FMT_YVYU422:
1110 c->chrToYV12 = yvy2ToUV_c;
1112 case AV_PIX_FMT_UYVY422:
1113 c->chrToYV12 = uyvyToUV_c;
1115 case AV_PIX_FMT_NV12:
1116 case AV_PIX_FMT_NV24:
1117 c->chrToYV12 = nv12ToUV_c;
1119 case AV_PIX_FMT_NV21:
1120 case AV_PIX_FMT_NV42:
1121 c->chrToYV12 = nv21ToUV_c;
1123 case AV_PIX_FMT_RGB8:
1124 case AV_PIX_FMT_BGR8:
1125 case AV_PIX_FMT_PAL8:
1126 case AV_PIX_FMT_BGR4_BYTE:
1127 case AV_PIX_FMT_RGB4_BYTE:
1128 c->chrToYV12 = palToUV_c;
1130 case AV_PIX_FMT_GBRP9LE:
1131 c->readChrPlanar = planar_rgb9le_to_uv;
1133 case AV_PIX_FMT_GBRAP10LE:
1134 case AV_PIX_FMT_GBRP10LE:
1135 c->readChrPlanar = planar_rgb10le_to_uv;
1137 case AV_PIX_FMT_GBRAP12LE:
1138 case AV_PIX_FMT_GBRP12LE:
1139 c->readChrPlanar = planar_rgb12le_to_uv;
1141 case AV_PIX_FMT_GBRP14LE:
1142 c->readChrPlanar = planar_rgb14le_to_uv;
1144 case AV_PIX_FMT_GBRAP16LE:
1145 case AV_PIX_FMT_GBRP16LE:
1146 c->readChrPlanar = planar_rgb16le_to_uv;
1148 case AV_PIX_FMT_GBRAPF32LE:
1149 case AV_PIX_FMT_GBRPF32LE:
1150 c->readChrPlanar = planar_rgbf32le_to_uv;
1152 case AV_PIX_FMT_GBRP9BE:
1153 c->readChrPlanar = planar_rgb9be_to_uv;
1155 case AV_PIX_FMT_GBRAP10BE:
1156 case AV_PIX_FMT_GBRP10BE:
1157 c->readChrPlanar = planar_rgb10be_to_uv;
1159 case AV_PIX_FMT_GBRAP12BE:
1160 case AV_PIX_FMT_GBRP12BE:
1161 c->readChrPlanar = planar_rgb12be_to_uv;
1163 case AV_PIX_FMT_GBRP14BE:
1164 c->readChrPlanar = planar_rgb14be_to_uv;
1166 case AV_PIX_FMT_GBRAP16BE:
1167 case AV_PIX_FMT_GBRP16BE:
1168 c->readChrPlanar = planar_rgb16be_to_uv;
1170 case AV_PIX_FMT_GBRAPF32BE:
1171 case AV_PIX_FMT_GBRPF32BE:
1172 c->readChrPlanar = planar_rgbf32be_to_uv;
1174 case AV_PIX_FMT_GBRAP:
1175 case AV_PIX_FMT_GBRP:
1176 c->readChrPlanar = planar_rgb_to_uv;
1179 case AV_PIX_FMT_YUV420P9LE:
1180 case AV_PIX_FMT_YUV422P9LE:
1181 case AV_PIX_FMT_YUV444P9LE:
1182 case AV_PIX_FMT_YUV420P10LE:
1183 case AV_PIX_FMT_YUV422P10LE:
1184 case AV_PIX_FMT_YUV440P10LE:
1185 case AV_PIX_FMT_YUV444P10LE:
1186 case AV_PIX_FMT_YUV420P12LE:
1187 case AV_PIX_FMT_YUV422P12LE:
1188 case AV_PIX_FMT_YUV440P12LE:
1189 case AV_PIX_FMT_YUV444P12LE:
1190 case AV_PIX_FMT_YUV420P14LE:
1191 case AV_PIX_FMT_YUV422P14LE:
1192 case AV_PIX_FMT_YUV444P14LE:
1193 case AV_PIX_FMT_YUV420P16LE:
1194 case AV_PIX_FMT_YUV422P16LE:
1195 case AV_PIX_FMT_YUV444P16LE:
1197 case AV_PIX_FMT_YUVA420P9LE:
1198 case AV_PIX_FMT_YUVA422P9LE:
1199 case AV_PIX_FMT_YUVA444P9LE:
1200 case AV_PIX_FMT_YUVA420P10LE:
1201 case AV_PIX_FMT_YUVA422P10LE:
1202 case AV_PIX_FMT_YUVA444P10LE:
1203 case AV_PIX_FMT_YUVA422P12LE:
1204 case AV_PIX_FMT_YUVA444P12LE:
1205 case AV_PIX_FMT_YUVA420P16LE:
1206 case AV_PIX_FMT_YUVA422P16LE:
1207 case AV_PIX_FMT_YUVA444P16LE:
1208 c->chrToYV12 = bswap16UV_c;
1211 case AV_PIX_FMT_YUV420P9BE:
1212 case AV_PIX_FMT_YUV422P9BE:
1213 case AV_PIX_FMT_YUV444P9BE:
1214 case AV_PIX_FMT_YUV420P10BE:
1215 case AV_PIX_FMT_YUV422P10BE:
1216 case AV_PIX_FMT_YUV440P10BE:
1217 case AV_PIX_FMT_YUV444P10BE:
1218 case AV_PIX_FMT_YUV420P12BE:
1219 case AV_PIX_FMT_YUV422P12BE:
1220 case AV_PIX_FMT_YUV440P12BE:
1221 case AV_PIX_FMT_YUV444P12BE:
1222 case AV_PIX_FMT_YUV420P14BE:
1223 case AV_PIX_FMT_YUV422P14BE:
1224 case AV_PIX_FMT_YUV444P14BE:
1225 case AV_PIX_FMT_YUV420P16BE:
1226 case AV_PIX_FMT_YUV422P16BE:
1227 case AV_PIX_FMT_YUV444P16BE:
1229 case AV_PIX_FMT_YUVA420P9BE:
1230 case AV_PIX_FMT_YUVA422P9BE:
1231 case AV_PIX_FMT_YUVA444P9BE:
1232 case AV_PIX_FMT_YUVA420P10BE:
1233 case AV_PIX_FMT_YUVA422P10BE:
1234 case AV_PIX_FMT_YUVA444P10BE:
1235 case AV_PIX_FMT_YUVA422P12BE:
1236 case AV_PIX_FMT_YUVA444P12BE:
1237 case AV_PIX_FMT_YUVA420P16BE:
1238 case AV_PIX_FMT_YUVA422P16BE:
1239 case AV_PIX_FMT_YUVA444P16BE:
1240 c->chrToYV12 = bswap16UV_c;
1243 case AV_PIX_FMT_AYUV64LE:
1244 c->chrToYV12 = read_ayuv64le_UV_c;
1246 case AV_PIX_FMT_P010LE:
1247 c->chrToYV12 = p010LEToUV_c;
1249 case AV_PIX_FMT_P010BE:
1250 c->chrToYV12 = p010BEToUV_c;
1252 case AV_PIX_FMT_P016LE:
1253 c->chrToYV12 = p016LEToUV_c;
1255 case AV_PIX_FMT_P016BE:
1256 c->chrToYV12 = p016BEToUV_c;
1258 case AV_PIX_FMT_Y210LE:
1259 c->chrToYV12 = y210le_UV_c;
1262 if (c->chrSrcHSubSample) {
1263 switch (srcFormat) {
1264 case AV_PIX_FMT_RGBA64BE:
1265 c->chrToYV12 = rgb64BEToUV_half_c;
1267 case AV_PIX_FMT_RGBA64LE:
1268 c->chrToYV12 = rgb64LEToUV_half_c;
1270 case AV_PIX_FMT_BGRA64BE:
1271 c->chrToYV12 = bgr64BEToUV_half_c;
1273 case AV_PIX_FMT_BGRA64LE:
1274 c->chrToYV12 = bgr64LEToUV_half_c;
1276 case AV_PIX_FMT_RGB48BE:
1277 c->chrToYV12 = rgb48BEToUV_half_c;
1279 case AV_PIX_FMT_RGB48LE:
1280 c->chrToYV12 = rgb48LEToUV_half_c;
1282 case AV_PIX_FMT_BGR48BE:
1283 c->chrToYV12 = bgr48BEToUV_half_c;
1285 case AV_PIX_FMT_BGR48LE:
1286 c->chrToYV12 = bgr48LEToUV_half_c;
1288 case AV_PIX_FMT_RGB32:
1289 c->chrToYV12 = bgr32ToUV_half_c;
1291 case AV_PIX_FMT_RGB32_1:
1292 c->chrToYV12 = bgr321ToUV_half_c;
1294 case AV_PIX_FMT_BGR24:
1295 c->chrToYV12 = bgr24ToUV_half_c;
1297 case AV_PIX_FMT_BGR565LE:
1298 c->chrToYV12 = bgr16leToUV_half_c;
1300 case AV_PIX_FMT_BGR565BE:
1301 c->chrToYV12 = bgr16beToUV_half_c;
1303 case AV_PIX_FMT_BGR555LE:
1304 c->chrToYV12 = bgr15leToUV_half_c;
1306 case AV_PIX_FMT_BGR555BE:
1307 c->chrToYV12 = bgr15beToUV_half_c;
1309 case AV_PIX_FMT_GBRAP:
1310 case AV_PIX_FMT_GBRP:
1311 c->chrToYV12 = gbr24pToUV_half_c;
1313 case AV_PIX_FMT_BGR444LE:
1314 c->chrToYV12 = bgr12leToUV_half_c;
1316 case AV_PIX_FMT_BGR444BE:
1317 c->chrToYV12 = bgr12beToUV_half_c;
1319 case AV_PIX_FMT_BGR32:
1320 c->chrToYV12 = rgb32ToUV_half_c;
1322 case AV_PIX_FMT_BGR32_1:
1323 c->chrToYV12 = rgb321ToUV_half_c;
1325 case AV_PIX_FMT_RGB24:
1326 c->chrToYV12 = rgb24ToUV_half_c;
1328 case AV_PIX_FMT_RGB565LE:
1329 c->chrToYV12 = rgb16leToUV_half_c;
1331 case AV_PIX_FMT_RGB565BE:
1332 c->chrToYV12 = rgb16beToUV_half_c;
1334 case AV_PIX_FMT_RGB555LE:
1335 c->chrToYV12 = rgb15leToUV_half_c;
1337 case AV_PIX_FMT_RGB555BE:
1338 c->chrToYV12 = rgb15beToUV_half_c;
1340 case AV_PIX_FMT_RGB444LE:
1341 c->chrToYV12 = rgb12leToUV_half_c;
1343 case AV_PIX_FMT_RGB444BE:
1344 c->chrToYV12 = rgb12beToUV_half_c;
1346 case AV_PIX_FMT_X2RGB10LE:
1347 c->chrToYV12 = rgb30leToUV_half_c;
1351 switch (srcFormat) {
1352 case AV_PIX_FMT_RGBA64BE:
1353 c->chrToYV12 = rgb64BEToUV_c;
1355 case AV_PIX_FMT_RGBA64LE:
1356 c->chrToYV12 = rgb64LEToUV_c;
1358 case AV_PIX_FMT_BGRA64BE:
1359 c->chrToYV12 = bgr64BEToUV_c;
1361 case AV_PIX_FMT_BGRA64LE:
1362 c->chrToYV12 = bgr64LEToUV_c;
1364 case AV_PIX_FMT_RGB48BE:
1365 c->chrToYV12 = rgb48BEToUV_c;
1367 case AV_PIX_FMT_RGB48LE:
1368 c->chrToYV12 = rgb48LEToUV_c;
1370 case AV_PIX_FMT_BGR48BE:
1371 c->chrToYV12 = bgr48BEToUV_c;
1373 case AV_PIX_FMT_BGR48LE:
1374 c->chrToYV12 = bgr48LEToUV_c;
1376 case AV_PIX_FMT_RGB32:
1377 c->chrToYV12 = bgr32ToUV_c;
1379 case AV_PIX_FMT_RGB32_1:
1380 c->chrToYV12 = bgr321ToUV_c;
1382 case AV_PIX_FMT_BGR24:
1383 c->chrToYV12 = bgr24ToUV_c;
1385 case AV_PIX_FMT_BGR565LE:
1386 c->chrToYV12 = bgr16leToUV_c;
1388 case AV_PIX_FMT_BGR565BE:
1389 c->chrToYV12 = bgr16beToUV_c;
1391 case AV_PIX_FMT_BGR555LE:
1392 c->chrToYV12 = bgr15leToUV_c;
1394 case AV_PIX_FMT_BGR555BE:
1395 c->chrToYV12 = bgr15beToUV_c;
1397 case AV_PIX_FMT_BGR444LE:
1398 c->chrToYV12 = bgr12leToUV_c;
1400 case AV_PIX_FMT_BGR444BE:
1401 c->chrToYV12 = bgr12beToUV_c;
1403 case AV_PIX_FMT_BGR32:
1404 c->chrToYV12 = rgb32ToUV_c;
1406 case AV_PIX_FMT_BGR32_1:
1407 c->chrToYV12 = rgb321ToUV_c;
1409 case AV_PIX_FMT_RGB24:
1410 c->chrToYV12 = rgb24ToUV_c;
1412 case AV_PIX_FMT_RGB565LE:
1413 c->chrToYV12 = rgb16leToUV_c;
1415 case AV_PIX_FMT_RGB565BE:
1416 c->chrToYV12 = rgb16beToUV_c;
1418 case AV_PIX_FMT_RGB555LE:
1419 c->chrToYV12 = rgb15leToUV_c;
1421 case AV_PIX_FMT_RGB555BE:
1422 c->chrToYV12 = rgb15beToUV_c;
1424 case AV_PIX_FMT_RGB444LE:
1425 c->chrToYV12 = rgb12leToUV_c;
1427 case AV_PIX_FMT_RGB444BE:
1428 c->chrToYV12 = rgb12beToUV_c;
1430 case AV_PIX_FMT_X2RGB10LE:
1431 c->chrToYV12 = rgb30leToUV_c;
1436 c->lumToYV12 = NULL;
1437 c->alpToYV12 = NULL;
1438 switch (srcFormat) {
1439 case AV_PIX_FMT_GBRP9LE:
1440 c->readLumPlanar = planar_rgb9le_to_y;
1442 case AV_PIX_FMT_GBRAP10LE:
1443 c->readAlpPlanar = planar_rgb10le_to_a;
1444 case AV_PIX_FMT_GBRP10LE:
1445 c->readLumPlanar = planar_rgb10le_to_y;
1447 case AV_PIX_FMT_GBRAP12LE:
1448 c->readAlpPlanar = planar_rgb12le_to_a;
1449 case AV_PIX_FMT_GBRP12LE:
1450 c->readLumPlanar = planar_rgb12le_to_y;
1452 case AV_PIX_FMT_GBRP14LE:
1453 c->readLumPlanar = planar_rgb14le_to_y;
1455 case AV_PIX_FMT_GBRAP16LE:
1456 c->readAlpPlanar = planar_rgb16le_to_a;
1457 case AV_PIX_FMT_GBRP16LE:
1458 c->readLumPlanar = planar_rgb16le_to_y;
1460 case AV_PIX_FMT_GBRAPF32LE:
1461 c->readAlpPlanar = planar_rgbf32le_to_a;
1462 case AV_PIX_FMT_GBRPF32LE:
1463 c->readLumPlanar = planar_rgbf32le_to_y;
1465 case AV_PIX_FMT_GBRP9BE:
1466 c->readLumPlanar = planar_rgb9be_to_y;
1468 case AV_PIX_FMT_GBRAP10BE:
1469 c->readAlpPlanar = planar_rgb10be_to_a;
1470 case AV_PIX_FMT_GBRP10BE:
1471 c->readLumPlanar = planar_rgb10be_to_y;
1473 case AV_PIX_FMT_GBRAP12BE:
1474 c->readAlpPlanar = planar_rgb12be_to_a;
1475 case AV_PIX_FMT_GBRP12BE:
1476 c->readLumPlanar = planar_rgb12be_to_y;
1478 case AV_PIX_FMT_GBRP14BE:
1479 c->readLumPlanar = planar_rgb14be_to_y;
1481 case AV_PIX_FMT_GBRAP16BE:
1482 c->readAlpPlanar = planar_rgb16be_to_a;
1483 case AV_PIX_FMT_GBRP16BE:
1484 c->readLumPlanar = planar_rgb16be_to_y;
1486 case AV_PIX_FMT_GBRAPF32BE:
1487 c->readAlpPlanar = planar_rgbf32be_to_a;
1488 case AV_PIX_FMT_GBRPF32BE:
1489 c->readLumPlanar = planar_rgbf32be_to_y;
1491 case AV_PIX_FMT_GBRAP:
1492 c->readAlpPlanar = planar_rgb_to_a;
1493 case AV_PIX_FMT_GBRP:
1494 c->readLumPlanar = planar_rgb_to_y;
1497 case AV_PIX_FMT_YUV420P9LE:
1498 case AV_PIX_FMT_YUV422P9LE:
1499 case AV_PIX_FMT_YUV444P9LE:
1500 case AV_PIX_FMT_YUV420P10LE:
1501 case AV_PIX_FMT_YUV422P10LE:
1502 case AV_PIX_FMT_YUV440P10LE:
1503 case AV_PIX_FMT_YUV444P10LE:
1504 case AV_PIX_FMT_YUV420P12LE:
1505 case AV_PIX_FMT_YUV422P12LE:
1506 case AV_PIX_FMT_YUV440P12LE:
1507 case AV_PIX_FMT_YUV444P12LE:
1508 case AV_PIX_FMT_YUV420P14LE:
1509 case AV_PIX_FMT_YUV422P14LE:
1510 case AV_PIX_FMT_YUV444P14LE:
1511 case AV_PIX_FMT_YUV420P16LE:
1512 case AV_PIX_FMT_YUV422P16LE:
1513 case AV_PIX_FMT_YUV444P16LE:
1515 case AV_PIX_FMT_GRAY9LE:
1516 case AV_PIX_FMT_GRAY10LE:
1517 case AV_PIX_FMT_GRAY12LE:
1518 case AV_PIX_FMT_GRAY14LE:
1519 case AV_PIX_FMT_GRAY16LE:
1521 case AV_PIX_FMT_P016LE:
1522 c->lumToYV12 = bswap16Y_c;
1524 case AV_PIX_FMT_YUVA420P9LE:
1525 case AV_PIX_FMT_YUVA422P9LE:
1526 case AV_PIX_FMT_YUVA444P9LE:
1527 case AV_PIX_FMT_YUVA420P10LE:
1528 case AV_PIX_FMT_YUVA422P10LE:
1529 case AV_PIX_FMT_YUVA444P10LE:
1530 case AV_PIX_FMT_YUVA422P12LE:
1531 case AV_PIX_FMT_YUVA444P12LE:
1532 case AV_PIX_FMT_YUVA420P16LE:
1533 case AV_PIX_FMT_YUVA422P16LE:
1534 case AV_PIX_FMT_YUVA444P16LE:
1535 c->lumToYV12 = bswap16Y_c;
1536 c->alpToYV12 = bswap16Y_c;
1539 case AV_PIX_FMT_YUV420P9BE:
1540 case AV_PIX_FMT_YUV422P9BE:
1541 case AV_PIX_FMT_YUV444P9BE:
1542 case AV_PIX_FMT_YUV420P10BE:
1543 case AV_PIX_FMT_YUV422P10BE:
1544 case AV_PIX_FMT_YUV440P10BE:
1545 case AV_PIX_FMT_YUV444P10BE:
1546 case AV_PIX_FMT_YUV420P12BE:
1547 case AV_PIX_FMT_YUV422P12BE:
1548 case AV_PIX_FMT_YUV440P12BE:
1549 case AV_PIX_FMT_YUV444P12BE:
1550 case AV_PIX_FMT_YUV420P14BE:
1551 case AV_PIX_FMT_YUV422P14BE:
1552 case AV_PIX_FMT_YUV444P14BE:
1553 case AV_PIX_FMT_YUV420P16BE:
1554 case AV_PIX_FMT_YUV422P16BE:
1555 case AV_PIX_FMT_YUV444P16BE:
1557 case AV_PIX_FMT_GRAY9BE:
1558 case AV_PIX_FMT_GRAY10BE:
1559 case AV_PIX_FMT_GRAY12BE:
1560 case AV_PIX_FMT_GRAY14BE:
1561 case AV_PIX_FMT_GRAY16BE:
1563 case AV_PIX_FMT_P016BE:
1564 c->lumToYV12 = bswap16Y_c;
1566 case AV_PIX_FMT_YUVA420P9BE:
1567 case AV_PIX_FMT_YUVA422P9BE:
1568 case AV_PIX_FMT_YUVA444P9BE:
1569 case AV_PIX_FMT_YUVA420P10BE:
1570 case AV_PIX_FMT_YUVA422P10BE:
1571 case AV_PIX_FMT_YUVA444P10BE:
1572 case AV_PIX_FMT_YUVA422P12BE:
1573 case AV_PIX_FMT_YUVA444P12BE:
1574 case AV_PIX_FMT_YUVA420P16BE:
1575 case AV_PIX_FMT_YUVA422P16BE:
1576 case AV_PIX_FMT_YUVA444P16BE:
1577 c->lumToYV12 = bswap16Y_c;
1578 c->alpToYV12 = bswap16Y_c;
1581 case AV_PIX_FMT_YA16LE:
1582 c->lumToYV12 = read_ya16le_gray_c;
1584 case AV_PIX_FMT_YA16BE:
1585 c->lumToYV12 = read_ya16be_gray_c;
1587 case AV_PIX_FMT_AYUV64LE:
1588 c->lumToYV12 = read_ayuv64le_Y_c;
1590 case AV_PIX_FMT_YUYV422:
1591 case AV_PIX_FMT_YVYU422:
1592 case AV_PIX_FMT_YA8:
1593 c->lumToYV12 = yuy2ToY_c;
1595 case AV_PIX_FMT_UYVY422:
1596 c->lumToYV12 = uyvyToY_c;
1598 case AV_PIX_FMT_BGR24:
1599 c->lumToYV12 = bgr24ToY_c;
1601 case AV_PIX_FMT_BGR565LE:
1602 c->lumToYV12 = bgr16leToY_c;
1604 case AV_PIX_FMT_BGR565BE:
1605 c->lumToYV12 = bgr16beToY_c;
1607 case AV_PIX_FMT_BGR555LE:
1608 c->lumToYV12 = bgr15leToY_c;
1610 case AV_PIX_FMT_BGR555BE:
1611 c->lumToYV12 = bgr15beToY_c;
1613 case AV_PIX_FMT_BGR444LE:
1614 c->lumToYV12 = bgr12leToY_c;
1616 case AV_PIX_FMT_BGR444BE:
1617 c->lumToYV12 = bgr12beToY_c;
1619 case AV_PIX_FMT_RGB24:
1620 c->lumToYV12 = rgb24ToY_c;
1622 case AV_PIX_FMT_RGB565LE:
1623 c->lumToYV12 = rgb16leToY_c;
1625 case AV_PIX_FMT_RGB565BE:
1626 c->lumToYV12 = rgb16beToY_c;
1628 case AV_PIX_FMT_RGB555LE:
1629 c->lumToYV12 = rgb15leToY_c;
1631 case AV_PIX_FMT_RGB555BE:
1632 c->lumToYV12 = rgb15beToY_c;
1634 case AV_PIX_FMT_RGB444LE:
1635 c->lumToYV12 = rgb12leToY_c;
1637 case AV_PIX_FMT_RGB444BE:
1638 c->lumToYV12 = rgb12beToY_c;
1640 case AV_PIX_FMT_RGB8:
1641 case AV_PIX_FMT_BGR8:
1642 case AV_PIX_FMT_PAL8:
1643 case AV_PIX_FMT_BGR4_BYTE:
1644 case AV_PIX_FMT_RGB4_BYTE:
1645 c->lumToYV12 = palToY_c;
1647 case AV_PIX_FMT_MONOBLACK:
1648 c->lumToYV12 = monoblack2Y_c;
1650 case AV_PIX_FMT_MONOWHITE:
1651 c->lumToYV12 = monowhite2Y_c;
1653 case AV_PIX_FMT_RGB32:
1654 c->lumToYV12 = bgr32ToY_c;
1656 case AV_PIX_FMT_RGB32_1:
1657 c->lumToYV12 = bgr321ToY_c;
1659 case AV_PIX_FMT_BGR32:
1660 c->lumToYV12 = rgb32ToY_c;
1662 case AV_PIX_FMT_BGR32_1:
1663 c->lumToYV12 = rgb321ToY_c;
1665 case AV_PIX_FMT_RGB48BE:
1666 c->lumToYV12 = rgb48BEToY_c;
1668 case AV_PIX_FMT_RGB48LE:
1669 c->lumToYV12 = rgb48LEToY_c;
1671 case AV_PIX_FMT_BGR48BE:
1672 c->lumToYV12 = bgr48BEToY_c;
1674 case AV_PIX_FMT_BGR48LE:
1675 c->lumToYV12 = bgr48LEToY_c;
1677 case AV_PIX_FMT_RGBA64BE:
1678 c->lumToYV12 = rgb64BEToY_c;
1680 case AV_PIX_FMT_RGBA64LE:
1681 c->lumToYV12 = rgb64LEToY_c;
1683 case AV_PIX_FMT_BGRA64BE:
1684 c->lumToYV12 = bgr64BEToY_c;
1686 case AV_PIX_FMT_BGRA64LE:
1687 c->lumToYV12 = bgr64LEToY_c;
1689 case AV_PIX_FMT_P010LE:
1690 c->lumToYV12 = p010LEToY_c;
1692 case AV_PIX_FMT_P010BE:
1693 c->lumToYV12 = p010BEToY_c;
1695 case AV_PIX_FMT_GRAYF32LE:
1697 c->lumToYV12 = grayf32ToY16_bswap_c;
1699 c->lumToYV12 = grayf32ToY16_c;
1702 case AV_PIX_FMT_GRAYF32BE:
1704 c->lumToYV12 = grayf32ToY16_c;
1706 c->lumToYV12 = grayf32ToY16_bswap_c;
1709 case AV_PIX_FMT_Y210LE:
1710 c->lumToYV12 = y210le_Y_c;
1712 case AV_PIX_FMT_X2RGB10LE:
1713 c->lumToYV12 =rgb30leToY_c;
1717 if (is16BPS(srcFormat) || isNBPS(srcFormat)) {
1718 if (HAVE_BIGENDIAN == !isBE(srcFormat) && !c->readAlpPlanar)
1719 c->alpToYV12 = bswap16Y_c;
1721 switch (srcFormat) {
1722 case AV_PIX_FMT_BGRA64LE:
1723 case AV_PIX_FMT_RGBA64LE: c->alpToYV12 = rgba64leToA_c; break;
1724 case AV_PIX_FMT_BGRA64BE:
1725 case AV_PIX_FMT_RGBA64BE: c->alpToYV12 = rgba64beToA_c; break;
1726 case AV_PIX_FMT_BGRA:
1727 case AV_PIX_FMT_RGBA:
1728 c->alpToYV12 = rgbaToA_c;
1730 case AV_PIX_FMT_ABGR:
1731 case AV_PIX_FMT_ARGB:
1732 c->alpToYV12 = abgrToA_c;
1734 case AV_PIX_FMT_YA8:
1735 c->alpToYV12 = uyvyToY_c;
1737 case AV_PIX_FMT_YA16LE:
1738 c->alpToYV12 = read_ya16le_alpha_c;
1740 case AV_PIX_FMT_YA16BE:
1741 c->alpToYV12 = read_ya16be_alpha_c;
1743 case AV_PIX_FMT_AYUV64LE:
1744 c->alpToYV12 = read_ayuv64le_A_c;
1746 case AV_PIX_FMT_PAL8 :
1747 c->alpToYV12 = palToA_c;