2 * Copyright (c) 2016 Ronald S. Bultje <rsbultje@gmail.com>
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * Convert between colorspaces.
26 #include "libavutil/avassert.h"
27 #include "libavutil/opt.h"
28 #include "libavutil/pixdesc.h"
29 #include "libavutil/pixfmt.h"
32 #include "colorspacedsp.h"
36 #include "colorspace.h"
65 enum WhitepointAdaptation {
68 NB_WP_ADAPT_NON_IDENTITY,
69 WP_ADAPT_IDENTITY = NB_WP_ADAPT_NON_IDENTITY,
73 static const enum AVColorTransferCharacteristic default_trc[CS_NB + 1] = {
74 [CS_UNSPECIFIED] = AVCOL_TRC_UNSPECIFIED,
75 [CS_BT470M] = AVCOL_TRC_GAMMA22,
76 [CS_BT470BG] = AVCOL_TRC_GAMMA28,
77 [CS_BT601_6_525] = AVCOL_TRC_SMPTE170M,
78 [CS_BT601_6_625] = AVCOL_TRC_SMPTE170M,
79 [CS_BT709] = AVCOL_TRC_BT709,
80 [CS_SMPTE170M] = AVCOL_TRC_SMPTE170M,
81 [CS_SMPTE240M] = AVCOL_TRC_SMPTE240M,
82 [CS_BT2020] = AVCOL_TRC_BT2020_10,
83 [CS_NB] = AVCOL_TRC_UNSPECIFIED,
86 static const enum AVColorPrimaries default_prm[CS_NB + 1] = {
87 [CS_UNSPECIFIED] = AVCOL_PRI_UNSPECIFIED,
88 [CS_BT470M] = AVCOL_PRI_BT470M,
89 [CS_BT470BG] = AVCOL_PRI_BT470BG,
90 [CS_BT601_6_525] = AVCOL_PRI_SMPTE170M,
91 [CS_BT601_6_625] = AVCOL_PRI_BT470BG,
92 [CS_BT709] = AVCOL_PRI_BT709,
93 [CS_SMPTE170M] = AVCOL_PRI_SMPTE170M,
94 [CS_SMPTE240M] = AVCOL_PRI_SMPTE240M,
95 [CS_BT2020] = AVCOL_PRI_BT2020,
96 [CS_NB] = AVCOL_PRI_UNSPECIFIED,
99 static const enum AVColorSpace default_csp[CS_NB + 1] = {
100 [CS_UNSPECIFIED] = AVCOL_SPC_UNSPECIFIED,
101 [CS_BT470M] = AVCOL_SPC_SMPTE170M,
102 [CS_BT470BG] = AVCOL_SPC_BT470BG,
103 [CS_BT601_6_525] = AVCOL_SPC_SMPTE170M,
104 [CS_BT601_6_625] = AVCOL_SPC_BT470BG,
105 [CS_BT709] = AVCOL_SPC_BT709,
106 [CS_SMPTE170M] = AVCOL_SPC_SMPTE170M,
107 [CS_SMPTE240M] = AVCOL_SPC_SMPTE240M,
108 [CS_BT2020] = AVCOL_SPC_BT2020_NCL,
109 [CS_NB] = AVCOL_SPC_UNSPECIFIED,
112 struct ColorPrimaries {
114 struct PrimaryCoefficients coeff;
117 struct TransferCharacteristics {
118 double alpha, beta, gamma, delta;
121 typedef struct ColorSpaceContext {
122 const AVClass *class;
124 ColorSpaceDSPContext dsp;
126 enum Colorspace user_all, user_iall;
127 enum AVColorSpace in_csp, out_csp, user_csp, user_icsp;
128 enum AVColorRange in_rng, out_rng, user_rng, user_irng;
129 enum AVColorTransferCharacteristic in_trc, out_trc, user_trc, user_itrc;
130 enum AVColorPrimaries in_prm, out_prm, user_prm, user_iprm;
131 enum AVPixelFormat in_format, user_format;
133 enum DitherMode dither;
134 enum WhitepointAdaptation wp_adapt;
137 ptrdiff_t rgb_stride;
139 int *dither_scratch[3][2], *dither_scratch_base[3][2];
141 const struct ColorPrimaries *in_primaries, *out_primaries;
142 int lrgb2lrgb_passthrough;
143 DECLARE_ALIGNED(16, int16_t, lrgb2lrgb_coeffs)[3][3][8];
145 const struct TransferCharacteristics *in_txchr, *out_txchr;
146 int rgb2rgb_passthrough;
147 int16_t *lin_lut, *delin_lut;
149 const struct LumaCoefficients *in_lumacoef, *out_lumacoef;
150 int yuv2yuv_passthrough, yuv2yuv_fastmode;
151 DECLARE_ALIGNED(16, int16_t, yuv2rgb_coeffs)[3][3][8];
152 DECLARE_ALIGNED(16, int16_t, rgb2yuv_coeffs)[3][3][8];
153 DECLARE_ALIGNED(16, int16_t, yuv2yuv_coeffs)[3][3][8];
154 DECLARE_ALIGNED(16, int16_t, yuv_offset)[2 /* in, out */][8];
157 rgb2yuv_fsb_fn rgb2yuv_fsb;
159 double yuv2rgb_dbl_coeffs[3][3], rgb2yuv_dbl_coeffs[3][3];
160 int in_y_rng, in_uv_rng, out_y_rng, out_uv_rng;
165 // FIXME deal with odd width/heights
166 // FIXME faster linearize/delinearize implementation (integer pow)
167 // FIXME bt2020cl support (linearization between yuv/rgb step instead of between rgb/xyz)
168 // FIXME test that the values in (de)lin_lut don't exceed their container storage
169 // type size (only useful if we keep the LUT and don't move to fast integer pow)
170 // FIXME dithering if bitdepth goes down?
171 // FIXME bitexact for fate integration?
173 static const double ycgco_matrix[3][3] =
176 { -0.25, 0.5, -0.25 },
180 static const double gbr_matrix[3][3] =
188 * All constants explained in e.g. https://linuxtv.org/downloads/v4l-dvb-apis/ch02s06.html
189 * The older ones (bt470bg/m) are also explained in their respective ITU docs
190 * (e.g. https://www.itu.int/dms_pubrec/itu-r/rec/bt/R-REC-BT.470-5-199802-S!!PDF-E.pdf)
191 * whereas the newer ones can typically be copied directly from wikipedia :)
193 static const struct LumaCoefficients luma_coefficients[AVCOL_SPC_NB] = {
194 [AVCOL_SPC_FCC] = { 0.30, 0.59, 0.11 },
195 [AVCOL_SPC_BT470BG] = { 0.299, 0.587, 0.114 },
196 [AVCOL_SPC_SMPTE170M] = { 0.299, 0.587, 0.114 },
197 [AVCOL_SPC_BT709] = { 0.2126, 0.7152, 0.0722 },
198 [AVCOL_SPC_SMPTE240M] = { 0.212, 0.701, 0.087 },
199 [AVCOL_SPC_YCOCG] = { 0.25, 0.5, 0.25 },
200 [AVCOL_SPC_RGB] = { 1, 1, 1 },
201 [AVCOL_SPC_BT2020_NCL] = { 0.2627, 0.6780, 0.0593 },
202 [AVCOL_SPC_BT2020_CL] = { 0.2627, 0.6780, 0.0593 },
205 static const struct LumaCoefficients *get_luma_coefficients(enum AVColorSpace csp)
207 const struct LumaCoefficients *coeffs;
209 if (csp >= AVCOL_SPC_NB)
211 coeffs = &luma_coefficients[csp];
218 static void fill_rgb2yuv_table(const struct LumaCoefficients *coeffs,
219 double rgb2yuv[3][3])
221 double bscale, rscale;
223 // special ycgco matrix
224 if (coeffs->cr == 0.25 && coeffs->cg == 0.5 && coeffs->cb == 0.25) {
225 memcpy(rgb2yuv, ycgco_matrix, sizeof(double) * 9);
227 } else if (coeffs->cr == 1 && coeffs->cg == 1 && coeffs->cb == 1) {
228 memcpy(rgb2yuv, gbr_matrix, sizeof(double) * 9);
232 rgb2yuv[0][0] = coeffs->cr;
233 rgb2yuv[0][1] = coeffs->cg;
234 rgb2yuv[0][2] = coeffs->cb;
235 bscale = 0.5 / (coeffs->cb - 1.0);
236 rscale = 0.5 / (coeffs->cr - 1.0);
237 rgb2yuv[1][0] = bscale * coeffs->cr;
238 rgb2yuv[1][1] = bscale * coeffs->cg;
241 rgb2yuv[2][1] = rscale * coeffs->cg;
242 rgb2yuv[2][2] = rscale * coeffs->cb;
245 // FIXME I'm pretty sure gamma22/28 also have a linear toe slope, but I can't
246 // find any actual tables that document their real values...
247 // See http://www.13thmonkey.org/~boris/gammacorrection/ first graph why it matters
248 static const struct TransferCharacteristics transfer_characteristics[AVCOL_TRC_NB] = {
249 [AVCOL_TRC_BT709] = { 1.099, 0.018, 0.45, 4.5 },
250 [AVCOL_TRC_GAMMA22] = { 1.0, 0.0, 1.0 / 2.2, 0.0 },
251 [AVCOL_TRC_GAMMA28] = { 1.0, 0.0, 1.0 / 2.8, 0.0 },
252 [AVCOL_TRC_SMPTE170M] = { 1.099, 0.018, 0.45, 4.5 },
253 [AVCOL_TRC_SMPTE240M] = { 1.1115, 0.0228, 0.45, 4.0 },
254 [AVCOL_TRC_IEC61966_2_1] = { 1.055, 0.0031308, 1.0 / 2.4, 12.92 },
255 [AVCOL_TRC_IEC61966_2_4] = { 1.099, 0.018, 0.45, 4.5 },
256 [AVCOL_TRC_BT2020_10] = { 1.099, 0.018, 0.45, 4.5 },
257 [AVCOL_TRC_BT2020_12] = { 1.0993, 0.0181, 0.45, 4.5 },
260 static const struct TransferCharacteristics *
261 get_transfer_characteristics(enum AVColorTransferCharacteristic trc)
263 const struct TransferCharacteristics *coeffs;
265 if (trc >= AVCOL_TRC_NB)
267 coeffs = &transfer_characteristics[trc];
274 static const struct WhitepointCoefficients whitepoint_coefficients[WP_NB] = {
275 [WP_D65] = { 0.3127, 0.3290 },
276 [WP_C] = { 0.3100, 0.3160 },
277 [WP_DCI] = { 0.3140, 0.3510 },
278 [WP_E] = { 1/3.0f, 1/3.0f },
281 static const struct ColorPrimaries color_primaries[AVCOL_PRI_NB] = {
282 [AVCOL_PRI_BT709] = { WP_D65, { 0.640, 0.330, 0.300, 0.600, 0.150, 0.060 } },
283 [AVCOL_PRI_BT470M] = { WP_C, { 0.670, 0.330, 0.210, 0.710, 0.140, 0.080 } },
284 [AVCOL_PRI_BT470BG] = { WP_D65, { 0.640, 0.330, 0.290, 0.600, 0.150, 0.060 } },
285 [AVCOL_PRI_SMPTE170M] = { WP_D65, { 0.630, 0.340, 0.310, 0.595, 0.155, 0.070 } },
286 [AVCOL_PRI_SMPTE240M] = { WP_D65, { 0.630, 0.340, 0.310, 0.595, 0.155, 0.070 } },
287 [AVCOL_PRI_SMPTE428] = { WP_E, { 0.735, 0.265, 0.274, 0.718, 0.167, 0.009 } },
288 [AVCOL_PRI_SMPTE431] = { WP_DCI, { 0.680, 0.320, 0.265, 0.690, 0.150, 0.060 } },
289 [AVCOL_PRI_SMPTE432] = { WP_D65, { 0.680, 0.320, 0.265, 0.690, 0.150, 0.060 } },
290 [AVCOL_PRI_FILM] = { WP_C, { 0.681, 0.319, 0.243, 0.692, 0.145, 0.049 } },
291 [AVCOL_PRI_BT2020] = { WP_D65, { 0.708, 0.292, 0.170, 0.797, 0.131, 0.046 } },
292 [AVCOL_PRI_JEDEC_P22] = { WP_D65, { 0.630, 0.340, 0.295, 0.605, 0.155, 0.077 } },
295 static const struct ColorPrimaries *get_color_primaries(enum AVColorPrimaries prm)
297 const struct ColorPrimaries *p;
299 if (prm >= AVCOL_PRI_NB)
301 p = &color_primaries[prm];
308 static int fill_gamma_table(ColorSpaceContext *s)
311 double in_alpha = s->in_txchr->alpha, in_beta = s->in_txchr->beta;
312 double in_gamma = s->in_txchr->gamma, in_delta = s->in_txchr->delta;
313 double in_ialpha = 1.0 / in_alpha, in_igamma = 1.0 / in_gamma, in_idelta = 1.0 / in_delta;
314 double out_alpha = s->out_txchr->alpha, out_beta = s->out_txchr->beta;
315 double out_gamma = s->out_txchr->gamma, out_delta = s->out_txchr->delta;
317 s->lin_lut = av_malloc(sizeof(*s->lin_lut) * 32768 * 2);
319 return AVERROR(ENOMEM);
320 s->delin_lut = &s->lin_lut[32768];
321 for (n = 0; n < 32768; n++) {
322 double v = (n - 2048.0) / 28672.0, d, l;
325 if (v <= -out_beta) {
326 d = -out_alpha * pow(-v, out_gamma) + (out_alpha - 1.0);
327 } else if (v < out_beta) {
330 d = out_alpha * pow(v, out_gamma) - (out_alpha - 1.0);
332 s->delin_lut[n] = av_clip_int16(lrint(d * 28672.0));
336 l = -pow((1.0 - in_alpha - v) * in_ialpha, in_igamma);
337 } else if (v < in_beta) {
340 l = pow((v + in_alpha - 1.0) * in_ialpha, in_igamma);
342 s->lin_lut[n] = av_clip_int16(lrint(l * 28672.0));
349 * See http://www.brucelindbloom.com/index.html?Eqn_ChromAdapt.html
350 * This function uses the Bradford mechanism.
352 static void fill_whitepoint_conv_table(double out[3][3], enum WhitepointAdaptation wp_adapt,
353 enum Whitepoint src, enum Whitepoint dst)
355 static const double ma_tbl[NB_WP_ADAPT_NON_IDENTITY][3][3] = {
356 [WP_ADAPT_BRADFORD] = {
357 { 0.8951, 0.2664, -0.1614 },
358 { -0.7502, 1.7135, 0.0367 },
359 { 0.0389, -0.0685, 1.0296 },
360 }, [WP_ADAPT_VON_KRIES] = {
361 { 0.40024, 0.70760, -0.08081 },
362 { -0.22630, 1.16532, 0.04570 },
363 { 0.00000, 0.00000, 0.91822 },
366 const double (*ma)[3] = ma_tbl[wp_adapt];
367 const struct WhitepointCoefficients *wp_src = &whitepoint_coefficients[src];
368 double zw_src = 1.0 - wp_src->xw - wp_src->yw;
369 const struct WhitepointCoefficients *wp_dst = &whitepoint_coefficients[dst];
370 double zw_dst = 1.0 - wp_dst->xw - wp_dst->yw;
371 double mai[3][3], fac[3][3], tmp[3][3];
372 double rs, gs, bs, rd, gd, bd;
374 ff_matrix_invert_3x3(ma, mai);
375 rs = ma[0][0] * wp_src->xw + ma[0][1] * wp_src->yw + ma[0][2] * zw_src;
376 gs = ma[1][0] * wp_src->xw + ma[1][1] * wp_src->yw + ma[1][2] * zw_src;
377 bs = ma[2][0] * wp_src->xw + ma[2][1] * wp_src->yw + ma[2][2] * zw_src;
378 rd = ma[0][0] * wp_dst->xw + ma[0][1] * wp_dst->yw + ma[0][2] * zw_dst;
379 gd = ma[1][0] * wp_dst->xw + ma[1][1] * wp_dst->yw + ma[1][2] * zw_dst;
380 bd = ma[2][0] * wp_dst->xw + ma[2][1] * wp_dst->yw + ma[2][2] * zw_dst;
384 fac[0][1] = fac[0][2] = fac[1][0] = fac[1][2] = fac[2][0] = fac[2][1] = 0.0;
385 ff_matrix_mul_3x3(tmp, ma, fac);
386 ff_matrix_mul_3x3(out, tmp, mai);
389 static void apply_lut(int16_t *buf[3], ptrdiff_t stride,
390 int w, int h, const int16_t *lut)
394 for (n = 0; n < 3; n++) {
395 int16_t *data = buf[n];
397 for (y = 0; y < h; y++) {
398 for (x = 0; x < w; x++)
399 data[x] = lut[av_clip_uintp2(2048 + data[x], 15)];
408 ptrdiff_t in_linesize[3], out_linesize[3];
409 int in_ss_h, out_ss_h;
412 static int convert(AVFilterContext *ctx, void *data, int job_nr, int n_jobs)
414 struct ThreadData *td = data;
415 ColorSpaceContext *s = ctx->priv;
416 uint8_t *in_data[3], *out_data[3];
418 int h_in = (td->in->height + 1) >> 1;
419 int h1 = 2 * (job_nr * h_in / n_jobs), h2 = 2 * ((job_nr + 1) * h_in / n_jobs);
420 int w = td->in->width, h = h2 - h1;
422 in_data[0] = td->in->data[0] + td->in_linesize[0] * h1;
423 in_data[1] = td->in->data[1] + td->in_linesize[1] * (h1 >> td->in_ss_h);
424 in_data[2] = td->in->data[2] + td->in_linesize[2] * (h1 >> td->in_ss_h);
425 out_data[0] = td->out->data[0] + td->out_linesize[0] * h1;
426 out_data[1] = td->out->data[1] + td->out_linesize[1] * (h1 >> td->out_ss_h);
427 out_data[2] = td->out->data[2] + td->out_linesize[2] * (h1 >> td->out_ss_h);
428 rgb[0] = s->rgb[0] + s->rgb_stride * h1;
429 rgb[1] = s->rgb[1] + s->rgb_stride * h1;
430 rgb[2] = s->rgb[2] + s->rgb_stride * h1;
432 // FIXME for simd, also make sure we do pictures with negative stride
433 // top-down so we don't overwrite lines with padding of data before it
434 // in the same buffer (same as swscale)
436 if (s->yuv2yuv_fastmode) {
437 // FIXME possibly use a fast mode in case only the y range changes?
438 // since in that case, only the diagonal entries in yuv2yuv_coeffs[]
440 s->yuv2yuv(out_data, td->out_linesize, in_data, td->in_linesize, w, h,
441 s->yuv2yuv_coeffs, s->yuv_offset);
443 // FIXME maybe (for caching effciency) do pipeline per-line instead of
444 // full buffer per function? (Or, since yuv2rgb requires 2 lines: per
445 // 2 lines, for yuv420.)
448 * - yuv2rgb converts from whatever range the input was ([16-235/240] or
449 * [0,255] or the 10/12bpp equivalents thereof) to an integer version
450 * of RGB in psuedo-restricted 15+sign bits. That means that the float
451 * range [0.0,1.0] is in [0,28762], and the remainder of the int16_t
452 * range is used for overflow/underflow outside the representable
453 * range of this RGB type. rgb2yuv is the exact opposite.
454 * - gamma correction is done using a LUT since that appears to work
456 * - If the input is chroma-subsampled (420/422), the yuv2rgb conversion
457 * (or rgb2yuv conversion) uses nearest-neighbour sampling to read
458 * read chroma pixels at luma resolution. If you want some more fancy
459 * filter, you can use swscale to convert to yuv444p.
460 * - all coefficients are 14bit (so in the [-2.0,2.0] range).
462 s->yuv2rgb(rgb, s->rgb_stride, in_data, td->in_linesize, w, h,
463 s->yuv2rgb_coeffs, s->yuv_offset[0]);
464 if (!s->rgb2rgb_passthrough) {
465 apply_lut(rgb, s->rgb_stride, w, h, s->lin_lut);
466 if (!s->lrgb2lrgb_passthrough)
467 s->dsp.multiply3x3(rgb, s->rgb_stride, w, h, s->lrgb2lrgb_coeffs);
468 apply_lut(rgb, s->rgb_stride, w, h, s->delin_lut);
470 if (s->dither == DITHER_FSB) {
471 s->rgb2yuv_fsb(out_data, td->out_linesize, rgb, s->rgb_stride, w, h,
472 s->rgb2yuv_coeffs, s->yuv_offset[1], s->dither_scratch);
474 s->rgb2yuv(out_data, td->out_linesize, rgb, s->rgb_stride, w, h,
475 s->rgb2yuv_coeffs, s->yuv_offset[1]);
482 static int get_range_off(AVFilterContext *ctx, int *off,
483 int *y_rng, int *uv_rng,
484 enum AVColorRange rng, int depth)
487 case AVCOL_RANGE_UNSPECIFIED: {
488 ColorSpaceContext *s = ctx->priv;
490 if (!s->did_warn_range) {
491 av_log(ctx, AV_LOG_WARNING, "Input range not set, assuming tv/mpeg\n");
492 s->did_warn_range = 1;
496 case AVCOL_RANGE_MPEG:
497 *off = 16 << (depth - 8);
498 *y_rng = 219 << (depth - 8);
499 *uv_rng = 224 << (depth - 8);
501 case AVCOL_RANGE_JPEG:
503 *y_rng = *uv_rng = (256 << (depth - 8)) - 1;
506 return AVERROR(EINVAL);
512 static int create_filtergraph(AVFilterContext *ctx,
513 const AVFrame *in, const AVFrame *out)
515 ColorSpaceContext *s = ctx->priv;
516 const AVPixFmtDescriptor *in_desc = av_pix_fmt_desc_get(in->format);
517 const AVPixFmtDescriptor *out_desc = av_pix_fmt_desc_get(out->format);
518 int emms = 0, m, n, o, res, fmt_identical, redo_yuv2rgb = 0, redo_rgb2yuv = 0;
520 #define supported_depth(d) ((d) == 8 || (d) == 10 || (d) == 12)
521 #define supported_subsampling(lcw, lch) \
522 (((lcw) == 0 && (lch) == 0) || ((lcw) == 1 && (lch) == 0) || ((lcw) == 1 && (lch) == 1))
523 #define supported_format(d) \
524 ((d) != NULL && (d)->nb_components == 3 && \
525 !((d)->flags & AV_PIX_FMT_FLAG_RGB) && \
526 supported_depth((d)->comp[0].depth) && \
527 supported_subsampling((d)->log2_chroma_w, (d)->log2_chroma_h))
529 if (!supported_format(in_desc)) {
530 av_log(ctx, AV_LOG_ERROR,
531 "Unsupported input format %d (%s) or bitdepth (%d)\n",
532 in->format, av_get_pix_fmt_name(in->format),
533 in_desc ? in_desc->comp[0].depth : -1);
534 return AVERROR(EINVAL);
536 if (!supported_format(out_desc)) {
537 av_log(ctx, AV_LOG_ERROR,
538 "Unsupported output format %d (%s) or bitdepth (%d)\n",
539 out->format, av_get_pix_fmt_name(out->format),
540 out_desc ? out_desc->comp[0].depth : -1);
541 return AVERROR(EINVAL);
544 if (in->color_primaries != s->in_prm) s->in_primaries = NULL;
545 if (out->color_primaries != s->out_prm) s->out_primaries = NULL;
546 if (in->color_trc != s->in_trc) s->in_txchr = NULL;
547 if (out->color_trc != s->out_trc) s->out_txchr = NULL;
548 if (in->colorspace != s->in_csp ||
549 in->color_range != s->in_rng) s->in_lumacoef = NULL;
550 if (out->colorspace != s->out_csp ||
551 out->color_range != s->out_rng) s->out_lumacoef = NULL;
553 if (!s->out_primaries || !s->in_primaries) {
554 s->in_prm = in->color_primaries;
555 if (s->user_iall != CS_UNSPECIFIED)
556 s->in_prm = default_prm[FFMIN(s->user_iall, CS_NB)];
557 if (s->user_iprm != AVCOL_PRI_UNSPECIFIED)
558 s->in_prm = s->user_iprm;
559 s->in_primaries = get_color_primaries(s->in_prm);
560 if (!s->in_primaries) {
561 av_log(ctx, AV_LOG_ERROR,
562 "Unsupported input primaries %d (%s)\n",
563 s->in_prm, av_color_primaries_name(s->in_prm));
564 return AVERROR(EINVAL);
566 s->out_prm = out->color_primaries;
567 s->out_primaries = get_color_primaries(s->out_prm);
568 if (!s->out_primaries) {
569 if (s->out_prm == AVCOL_PRI_UNSPECIFIED) {
570 if (s->user_all == CS_UNSPECIFIED) {
571 av_log(ctx, AV_LOG_ERROR, "Please specify output primaries\n");
573 av_log(ctx, AV_LOG_ERROR,
574 "Unsupported output color property %d\n", s->user_all);
577 av_log(ctx, AV_LOG_ERROR,
578 "Unsupported output primaries %d (%s)\n",
579 s->out_prm, av_color_primaries_name(s->out_prm));
581 return AVERROR(EINVAL);
583 s->lrgb2lrgb_passthrough = !memcmp(s->in_primaries, s->out_primaries,
584 sizeof(*s->in_primaries));
585 if (!s->lrgb2lrgb_passthrough) {
586 double rgb2xyz[3][3], xyz2rgb[3][3], rgb2rgb[3][3];
587 const struct WhitepointCoefficients *wp_out, *wp_in;
589 wp_out = &whitepoint_coefficients[s->out_primaries->wp];
590 wp_in = &whitepoint_coefficients[s->in_primaries->wp];
591 ff_fill_rgb2xyz_table(&s->out_primaries->coeff, wp_out, rgb2xyz);
592 ff_matrix_invert_3x3(rgb2xyz, xyz2rgb);
593 ff_fill_rgb2xyz_table(&s->in_primaries->coeff, wp_in, rgb2xyz);
594 if (s->out_primaries->wp != s->in_primaries->wp &&
595 s->wp_adapt != WP_ADAPT_IDENTITY) {
596 double wpconv[3][3], tmp[3][3];
598 fill_whitepoint_conv_table(wpconv, s->wp_adapt, s->in_primaries->wp,
599 s->out_primaries->wp);
600 ff_matrix_mul_3x3(tmp, rgb2xyz, wpconv);
601 ff_matrix_mul_3x3(rgb2rgb, tmp, xyz2rgb);
603 ff_matrix_mul_3x3(rgb2rgb, rgb2xyz, xyz2rgb);
605 for (m = 0; m < 3; m++)
606 for (n = 0; n < 3; n++) {
607 s->lrgb2lrgb_coeffs[m][n][0] = lrint(16384.0 * rgb2rgb[m][n]);
608 for (o = 1; o < 8; o++)
609 s->lrgb2lrgb_coeffs[m][n][o] = s->lrgb2lrgb_coeffs[m][n][0];
617 av_freep(&s->lin_lut);
618 s->in_trc = in->color_trc;
619 if (s->user_iall != CS_UNSPECIFIED)
620 s->in_trc = default_trc[FFMIN(s->user_iall, CS_NB)];
621 if (s->user_itrc != AVCOL_TRC_UNSPECIFIED)
622 s->in_trc = s->user_itrc;
623 s->in_txchr = get_transfer_characteristics(s->in_trc);
625 av_log(ctx, AV_LOG_ERROR,
626 "Unsupported input transfer characteristics %d (%s)\n",
627 s->in_trc, av_color_transfer_name(s->in_trc));
628 return AVERROR(EINVAL);
633 av_freep(&s->lin_lut);
634 s->out_trc = out->color_trc;
635 s->out_txchr = get_transfer_characteristics(s->out_trc);
637 if (s->out_trc == AVCOL_TRC_UNSPECIFIED) {
638 if (s->user_all == CS_UNSPECIFIED) {
639 av_log(ctx, AV_LOG_ERROR,
640 "Please specify output transfer characteristics\n");
642 av_log(ctx, AV_LOG_ERROR,
643 "Unsupported output color property %d\n", s->user_all);
646 av_log(ctx, AV_LOG_ERROR,
647 "Unsupported output transfer characteristics %d (%s)\n",
648 s->out_trc, av_color_transfer_name(s->out_trc));
650 return AVERROR(EINVAL);
654 s->rgb2rgb_passthrough = s->fast_mode || (s->lrgb2lrgb_passthrough &&
655 !memcmp(s->in_txchr, s->out_txchr, sizeof(*s->in_txchr)));
656 if (!s->rgb2rgb_passthrough && !s->lin_lut) {
657 res = fill_gamma_table(s);
663 if (!s->in_lumacoef) {
664 s->in_csp = in->colorspace;
665 if (s->user_iall != CS_UNSPECIFIED)
666 s->in_csp = default_csp[FFMIN(s->user_iall, CS_NB)];
667 if (s->user_icsp != AVCOL_SPC_UNSPECIFIED)
668 s->in_csp = s->user_icsp;
669 s->in_rng = in->color_range;
670 if (s->user_irng != AVCOL_RANGE_UNSPECIFIED)
671 s->in_rng = s->user_irng;
672 s->in_lumacoef = get_luma_coefficients(s->in_csp);
673 if (!s->in_lumacoef) {
674 av_log(ctx, AV_LOG_ERROR,
675 "Unsupported input colorspace %d (%s)\n",
676 s->in_csp, av_color_space_name(s->in_csp));
677 return AVERROR(EINVAL);
682 if (!s->out_lumacoef) {
683 s->out_csp = out->colorspace;
684 s->out_rng = out->color_range;
685 s->out_lumacoef = get_luma_coefficients(s->out_csp);
686 if (!s->out_lumacoef) {
687 if (s->out_csp == AVCOL_SPC_UNSPECIFIED) {
688 if (s->user_all == CS_UNSPECIFIED) {
689 av_log(ctx, AV_LOG_ERROR,
690 "Please specify output transfer characteristics\n");
692 av_log(ctx, AV_LOG_ERROR,
693 "Unsupported output color property %d\n", s->user_all);
696 av_log(ctx, AV_LOG_ERROR,
697 "Unsupported output transfer characteristics %d (%s)\n",
698 s->out_csp, av_color_space_name(s->out_csp));
700 return AVERROR(EINVAL);
705 fmt_identical = in_desc->log2_chroma_h == out_desc->log2_chroma_h &&
706 in_desc->log2_chroma_w == out_desc->log2_chroma_w;
707 s->yuv2yuv_fastmode = s->rgb2rgb_passthrough && fmt_identical;
708 s->yuv2yuv_passthrough = s->yuv2yuv_fastmode && s->in_rng == s->out_rng &&
709 !memcmp(s->in_lumacoef, s->out_lumacoef,
710 sizeof(*s->in_lumacoef)) &&
711 in_desc->comp[0].depth == out_desc->comp[0].depth;
712 if (!s->yuv2yuv_passthrough) {
714 double rgb2yuv[3][3], (*yuv2rgb)[3] = s->yuv2rgb_dbl_coeffs;
715 int off, bits, in_rng;
717 res = get_range_off(ctx, &off, &s->in_y_rng, &s->in_uv_rng,
718 s->in_rng, in_desc->comp[0].depth);
720 av_log(ctx, AV_LOG_ERROR,
721 "Unsupported input color range %d (%s)\n",
722 s->in_rng, av_color_range_name(s->in_rng));
725 for (n = 0; n < 8; n++)
726 s->yuv_offset[0][n] = off;
727 fill_rgb2yuv_table(s->in_lumacoef, rgb2yuv);
728 ff_matrix_invert_3x3(rgb2yuv, yuv2rgb);
729 bits = 1 << (in_desc->comp[0].depth - 1);
730 for (n = 0; n < 3; n++) {
731 for (in_rng = s->in_y_rng, m = 0; m < 3; m++, in_rng = s->in_uv_rng) {
732 s->yuv2rgb_coeffs[n][m][0] = lrint(28672 * bits * yuv2rgb[n][m] / in_rng);
733 for (o = 1; o < 8; o++)
734 s->yuv2rgb_coeffs[n][m][o] = s->yuv2rgb_coeffs[n][m][0];
737 av_assert2(s->yuv2rgb_coeffs[0][1][0] == 0);
738 av_assert2(s->yuv2rgb_coeffs[2][2][0] == 0);
739 av_assert2(s->yuv2rgb_coeffs[0][0][0] == s->yuv2rgb_coeffs[1][0][0]);
740 av_assert2(s->yuv2rgb_coeffs[0][0][0] == s->yuv2rgb_coeffs[2][0][0]);
741 s->yuv2rgb = s->dsp.yuv2rgb[(in_desc->comp[0].depth - 8) >> 1]
742 [in_desc->log2_chroma_h + in_desc->log2_chroma_w];
747 double (*rgb2yuv)[3] = s->rgb2yuv_dbl_coeffs;
748 int off, out_rng, bits;
750 res = get_range_off(ctx, &off, &s->out_y_rng, &s->out_uv_rng,
751 s->out_rng, out_desc->comp[0].depth);
753 av_log(ctx, AV_LOG_ERROR,
754 "Unsupported output color range %d (%s)\n",
755 s->out_rng, av_color_range_name(s->out_rng));
758 for (n = 0; n < 8; n++)
759 s->yuv_offset[1][n] = off;
760 fill_rgb2yuv_table(s->out_lumacoef, rgb2yuv);
761 bits = 1 << (29 - out_desc->comp[0].depth);
762 for (out_rng = s->out_y_rng, n = 0; n < 3; n++, out_rng = s->out_uv_rng) {
763 for (m = 0; m < 3; m++) {
764 s->rgb2yuv_coeffs[n][m][0] = lrint(bits * out_rng * rgb2yuv[n][m] / 28672);
765 for (o = 1; o < 8; o++)
766 s->rgb2yuv_coeffs[n][m][o] = s->rgb2yuv_coeffs[n][m][0];
769 av_assert2(s->rgb2yuv_coeffs[1][2][0] == s->rgb2yuv_coeffs[2][0][0]);
770 s->rgb2yuv = s->dsp.rgb2yuv[(out_desc->comp[0].depth - 8) >> 1]
771 [out_desc->log2_chroma_h + out_desc->log2_chroma_w];
772 s->rgb2yuv_fsb = s->dsp.rgb2yuv_fsb[(out_desc->comp[0].depth - 8) >> 1]
773 [out_desc->log2_chroma_h + out_desc->log2_chroma_w];
777 if (s->yuv2yuv_fastmode && (redo_yuv2rgb || redo_rgb2yuv)) {
778 int idepth = in_desc->comp[0].depth, odepth = out_desc->comp[0].depth;
779 double (*rgb2yuv)[3] = s->rgb2yuv_dbl_coeffs;
780 double (*yuv2rgb)[3] = s->yuv2rgb_dbl_coeffs;
781 double yuv2yuv[3][3];
784 ff_matrix_mul_3x3(yuv2yuv, yuv2rgb, rgb2yuv);
785 for (out_rng = s->out_y_rng, m = 0; m < 3; m++, out_rng = s->out_uv_rng) {
786 for (in_rng = s->in_y_rng, n = 0; n < 3; n++, in_rng = s->in_uv_rng) {
787 s->yuv2yuv_coeffs[m][n][0] =
788 lrint(16384 * yuv2yuv[m][n] * out_rng * (1 << idepth) /
789 (in_rng * (1 << odepth)));
790 for (o = 1; o < 8; o++)
791 s->yuv2yuv_coeffs[m][n][o] = s->yuv2yuv_coeffs[m][n][0];
794 av_assert2(s->yuv2yuv_coeffs[1][0][0] == 0);
795 av_assert2(s->yuv2yuv_coeffs[2][0][0] == 0);
796 s->yuv2yuv = s->dsp.yuv2yuv[(idepth - 8) >> 1][(odepth - 8) >> 1]
797 [in_desc->log2_chroma_h + in_desc->log2_chroma_w];
807 static int init(AVFilterContext *ctx)
809 ColorSpaceContext *s = ctx->priv;
811 ff_colorspacedsp_init(&s->dsp);
816 static void uninit(AVFilterContext *ctx)
818 ColorSpaceContext *s = ctx->priv;
820 av_freep(&s->rgb[0]);
821 av_freep(&s->rgb[1]);
822 av_freep(&s->rgb[2]);
824 av_freep(&s->dither_scratch_base[0][0]);
825 av_freep(&s->dither_scratch_base[0][1]);
826 av_freep(&s->dither_scratch_base[1][0]);
827 av_freep(&s->dither_scratch_base[1][1]);
828 av_freep(&s->dither_scratch_base[2][0]);
829 av_freep(&s->dither_scratch_base[2][1]);
831 av_freep(&s->lin_lut);
834 static int filter_frame(AVFilterLink *link, AVFrame *in)
836 AVFilterContext *ctx = link->dst;
837 AVFilterLink *outlink = ctx->outputs[0];
838 ColorSpaceContext *s = ctx->priv;
839 // FIXME if yuv2yuv_passthrough, don't get a new buffer but use the
840 // input one if it is writable *OR* the actual literal values of in_*
841 // and out_* are identical (not just their respective properties)
842 AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
844 ptrdiff_t rgb_stride = FFALIGN(in->width * sizeof(int16_t), 32);
845 unsigned rgb_sz = rgb_stride * in->height;
846 struct ThreadData td;
850 return AVERROR(ENOMEM);
852 res = av_frame_copy_props(out, in);
858 out->color_primaries = s->user_prm == AVCOL_PRI_UNSPECIFIED ?
859 default_prm[FFMIN(s->user_all, CS_NB)] : s->user_prm;
860 if (s->user_trc == AVCOL_TRC_UNSPECIFIED) {
861 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(out->format);
863 out->color_trc = default_trc[FFMIN(s->user_all, CS_NB)];
864 if (out->color_trc == AVCOL_TRC_BT2020_10 && desc && desc->comp[0].depth >= 12)
865 out->color_trc = AVCOL_TRC_BT2020_12;
867 out->color_trc = s->user_trc;
869 out->colorspace = s->user_csp == AVCOL_SPC_UNSPECIFIED ?
870 default_csp[FFMIN(s->user_all, CS_NB)] : s->user_csp;
871 out->color_range = s->user_rng == AVCOL_RANGE_UNSPECIFIED ?
872 in->color_range : s->user_rng;
873 if (rgb_sz != s->rgb_sz) {
874 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(out->format);
875 int uvw = in->width >> desc->log2_chroma_w;
877 av_freep(&s->rgb[0]);
878 av_freep(&s->rgb[1]);
879 av_freep(&s->rgb[2]);
881 av_freep(&s->dither_scratch_base[0][0]);
882 av_freep(&s->dither_scratch_base[0][1]);
883 av_freep(&s->dither_scratch_base[1][0]);
884 av_freep(&s->dither_scratch_base[1][1]);
885 av_freep(&s->dither_scratch_base[2][0]);
886 av_freep(&s->dither_scratch_base[2][1]);
888 s->rgb[0] = av_malloc(rgb_sz);
889 s->rgb[1] = av_malloc(rgb_sz);
890 s->rgb[2] = av_malloc(rgb_sz);
891 s->dither_scratch_base[0][0] =
892 av_malloc(sizeof(*s->dither_scratch_base[0][0]) * (in->width + 4));
893 s->dither_scratch_base[0][1] =
894 av_malloc(sizeof(*s->dither_scratch_base[0][1]) * (in->width + 4));
895 s->dither_scratch_base[1][0] =
896 av_malloc(sizeof(*s->dither_scratch_base[1][0]) * (uvw + 4));
897 s->dither_scratch_base[1][1] =
898 av_malloc(sizeof(*s->dither_scratch_base[1][1]) * (uvw + 4));
899 s->dither_scratch_base[2][0] =
900 av_malloc(sizeof(*s->dither_scratch_base[2][0]) * (uvw + 4));
901 s->dither_scratch_base[2][1] =
902 av_malloc(sizeof(*s->dither_scratch_base[2][1]) * (uvw + 4));
903 s->dither_scratch[0][0] = &s->dither_scratch_base[0][0][1];
904 s->dither_scratch[0][1] = &s->dither_scratch_base[0][1][1];
905 s->dither_scratch[1][0] = &s->dither_scratch_base[1][0][1];
906 s->dither_scratch[1][1] = &s->dither_scratch_base[1][1][1];
907 s->dither_scratch[2][0] = &s->dither_scratch_base[2][0][1];
908 s->dither_scratch[2][1] = &s->dither_scratch_base[2][1][1];
909 if (!s->rgb[0] || !s->rgb[1] || !s->rgb[2] ||
910 !s->dither_scratch_base[0][0] || !s->dither_scratch_base[0][1] ||
911 !s->dither_scratch_base[1][0] || !s->dither_scratch_base[1][1] ||
912 !s->dither_scratch_base[2][0] || !s->dither_scratch_base[2][1]) {
914 return AVERROR(ENOMEM);
918 res = create_filtergraph(ctx, in, out);
921 s->rgb_stride = rgb_stride / sizeof(int16_t);
924 td.in_linesize[0] = in->linesize[0];
925 td.in_linesize[1] = in->linesize[1];
926 td.in_linesize[2] = in->linesize[2];
927 td.out_linesize[0] = out->linesize[0];
928 td.out_linesize[1] = out->linesize[1];
929 td.out_linesize[2] = out->linesize[2];
930 td.in_ss_h = av_pix_fmt_desc_get(in->format)->log2_chroma_h;
931 td.out_ss_h = av_pix_fmt_desc_get(out->format)->log2_chroma_h;
932 if (s->yuv2yuv_passthrough) {
933 res = av_frame_copy(out, in);
937 ctx->internal->execute(ctx, convert, &td, NULL,
938 FFMIN((in->height + 1) >> 1, ff_filter_get_nb_threads(ctx)));
942 return ff_filter_frame(outlink, out);
945 static int query_formats(AVFilterContext *ctx)
947 static const enum AVPixelFormat pix_fmts[] = {
948 AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P,
949 AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
950 AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12,
951 AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P,
955 ColorSpaceContext *s = ctx->priv;
956 AVFilterFormats *formats = ff_make_format_list(pix_fmts);
959 return AVERROR(ENOMEM);
960 if (s->user_format == AV_PIX_FMT_NONE)
961 return ff_set_common_formats(ctx, formats);
962 res = ff_formats_ref(formats, &ctx->inputs[0]->out_formats);
966 res = ff_add_format(&formats, s->user_format);
970 return ff_formats_ref(formats, &ctx->outputs[0]->in_formats);
973 static int config_props(AVFilterLink *outlink)
975 AVFilterContext *ctx = outlink->dst;
976 AVFilterLink *inlink = outlink->src->inputs[0];
978 if (inlink->w % 2 || inlink->h % 2) {
979 av_log(ctx, AV_LOG_ERROR, "Invalid odd size (%dx%d)\n",
980 inlink->w, inlink->h);
981 return AVERROR_PATCHWELCOME;
984 outlink->w = inlink->w;
985 outlink->h = inlink->h;
986 outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
987 outlink->time_base = inlink->time_base;
992 #define OFFSET(x) offsetof(ColorSpaceContext, x)
993 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM
994 #define ENUM(x, y, z) { x, "", 0, AV_OPT_TYPE_CONST, { .i64 = y }, INT_MIN, INT_MAX, FLAGS, z }
996 static const AVOption colorspace_options[] = {
997 { "all", "Set all color properties together",
998 OFFSET(user_all), AV_OPT_TYPE_INT, { .i64 = CS_UNSPECIFIED },
999 CS_UNSPECIFIED, CS_NB - 1, FLAGS, "all" },
1000 ENUM("bt470m", CS_BT470M, "all"),
1001 ENUM("bt470bg", CS_BT470BG, "all"),
1002 ENUM("bt601-6-525", CS_BT601_6_525, "all"),
1003 ENUM("bt601-6-625", CS_BT601_6_625, "all"),
1004 ENUM("bt709", CS_BT709, "all"),
1005 ENUM("smpte170m", CS_SMPTE170M, "all"),
1006 ENUM("smpte240m", CS_SMPTE240M, "all"),
1007 ENUM("bt2020", CS_BT2020, "all"),
1009 { "space", "Output colorspace",
1010 OFFSET(user_csp), AV_OPT_TYPE_INT, { .i64 = AVCOL_SPC_UNSPECIFIED },
1011 AVCOL_SPC_RGB, AVCOL_SPC_NB - 1, FLAGS, "csp"},
1012 ENUM("bt709", AVCOL_SPC_BT709, "csp"),
1013 ENUM("fcc", AVCOL_SPC_FCC, "csp"),
1014 ENUM("bt470bg", AVCOL_SPC_BT470BG, "csp"),
1015 ENUM("smpte170m", AVCOL_SPC_SMPTE170M, "csp"),
1016 ENUM("smpte240m", AVCOL_SPC_SMPTE240M, "csp"),
1017 ENUM("ycgco", AVCOL_SPC_YCGCO, "csp"),
1018 ENUM("gbr", AVCOL_SPC_RGB, "csp"),
1019 ENUM("bt2020nc", AVCOL_SPC_BT2020_NCL, "csp"),
1020 ENUM("bt2020ncl", AVCOL_SPC_BT2020_NCL, "csp"),
1022 { "range", "Output color range",
1023 OFFSET(user_rng), AV_OPT_TYPE_INT, { .i64 = AVCOL_RANGE_UNSPECIFIED },
1024 AVCOL_RANGE_UNSPECIFIED, AVCOL_RANGE_NB - 1, FLAGS, "rng" },
1025 ENUM("tv", AVCOL_RANGE_MPEG, "rng"),
1026 ENUM("mpeg", AVCOL_RANGE_MPEG, "rng"),
1027 ENUM("pc", AVCOL_RANGE_JPEG, "rng"),
1028 ENUM("jpeg", AVCOL_RANGE_JPEG, "rng"),
1030 { "primaries", "Output color primaries",
1031 OFFSET(user_prm), AV_OPT_TYPE_INT, { .i64 = AVCOL_PRI_UNSPECIFIED },
1032 AVCOL_PRI_RESERVED0, AVCOL_PRI_NB - 1, FLAGS, "prm" },
1033 ENUM("bt709", AVCOL_PRI_BT709, "prm"),
1034 ENUM("bt470m", AVCOL_PRI_BT470M, "prm"),
1035 ENUM("bt470bg", AVCOL_PRI_BT470BG, "prm"),
1036 ENUM("smpte170m", AVCOL_PRI_SMPTE170M, "prm"),
1037 ENUM("smpte240m", AVCOL_PRI_SMPTE240M, "prm"),
1038 ENUM("smpte428", AVCOL_PRI_SMPTE428, "prm"),
1039 ENUM("film", AVCOL_PRI_FILM, "prm"),
1040 ENUM("smpte431", AVCOL_PRI_SMPTE431, "prm"),
1041 ENUM("smpte432", AVCOL_PRI_SMPTE432, "prm"),
1042 ENUM("bt2020", AVCOL_PRI_BT2020, "prm"),
1043 ENUM("jedec-p22", AVCOL_PRI_JEDEC_P22, "prm"),
1045 { "trc", "Output transfer characteristics",
1046 OFFSET(user_trc), AV_OPT_TYPE_INT, { .i64 = AVCOL_TRC_UNSPECIFIED },
1047 AVCOL_TRC_RESERVED0, AVCOL_TRC_NB - 1, FLAGS, "trc" },
1048 ENUM("bt709", AVCOL_TRC_BT709, "trc"),
1049 ENUM("bt470m", AVCOL_TRC_GAMMA22, "trc"),
1050 ENUM("gamma22", AVCOL_TRC_GAMMA22, "trc"),
1051 ENUM("bt470bg", AVCOL_TRC_GAMMA28, "trc"),
1052 ENUM("gamma28", AVCOL_TRC_GAMMA28, "trc"),
1053 ENUM("smpte170m", AVCOL_TRC_SMPTE170M, "trc"),
1054 ENUM("smpte240m", AVCOL_TRC_SMPTE240M, "trc"),
1055 ENUM("srgb", AVCOL_TRC_IEC61966_2_1, "trc"),
1056 ENUM("iec61966-2-1", AVCOL_TRC_IEC61966_2_1, "trc"),
1057 ENUM("xvycc", AVCOL_TRC_IEC61966_2_4, "trc"),
1058 ENUM("iec61966-2-4", AVCOL_TRC_IEC61966_2_4, "trc"),
1059 ENUM("bt2020-10", AVCOL_TRC_BT2020_10, "trc"),
1060 ENUM("bt2020-12", AVCOL_TRC_BT2020_12, "trc"),
1062 { "format", "Output pixel format",
1063 OFFSET(user_format), AV_OPT_TYPE_INT, { .i64 = AV_PIX_FMT_NONE },
1064 AV_PIX_FMT_NONE, AV_PIX_FMT_GBRAP12LE, FLAGS, "fmt" },
1065 ENUM("yuv420p", AV_PIX_FMT_YUV420P, "fmt"),
1066 ENUM("yuv420p10", AV_PIX_FMT_YUV420P10, "fmt"),
1067 ENUM("yuv420p12", AV_PIX_FMT_YUV420P12, "fmt"),
1068 ENUM("yuv422p", AV_PIX_FMT_YUV422P, "fmt"),
1069 ENUM("yuv422p10", AV_PIX_FMT_YUV422P10, "fmt"),
1070 ENUM("yuv422p12", AV_PIX_FMT_YUV422P12, "fmt"),
1071 ENUM("yuv444p", AV_PIX_FMT_YUV444P, "fmt"),
1072 ENUM("yuv444p10", AV_PIX_FMT_YUV444P10, "fmt"),
1073 ENUM("yuv444p12", AV_PIX_FMT_YUV444P12, "fmt"),
1075 { "fast", "Ignore primary chromaticity and gamma correction",
1076 OFFSET(fast_mode), AV_OPT_TYPE_BOOL, { .i64 = 0 },
1079 { "dither", "Dithering mode",
1080 OFFSET(dither), AV_OPT_TYPE_INT, { .i64 = DITHER_NONE },
1081 DITHER_NONE, DITHER_NB - 1, FLAGS, "dither" },
1082 ENUM("none", DITHER_NONE, "dither"),
1083 ENUM("fsb", DITHER_FSB, "dither"),
1085 { "wpadapt", "Whitepoint adaptation method",
1086 OFFSET(wp_adapt), AV_OPT_TYPE_INT, { .i64 = WP_ADAPT_BRADFORD },
1087 WP_ADAPT_BRADFORD, NB_WP_ADAPT - 1, FLAGS, "wpadapt" },
1088 ENUM("bradford", WP_ADAPT_BRADFORD, "wpadapt"),
1089 ENUM("vonkries", WP_ADAPT_VON_KRIES, "wpadapt"),
1090 ENUM("identity", WP_ADAPT_IDENTITY, "wpadapt"),
1092 { "iall", "Set all input color properties together",
1093 OFFSET(user_iall), AV_OPT_TYPE_INT, { .i64 = CS_UNSPECIFIED },
1094 CS_UNSPECIFIED, CS_NB - 1, FLAGS, "all" },
1095 { "ispace", "Input colorspace",
1096 OFFSET(user_icsp), AV_OPT_TYPE_INT, { .i64 = AVCOL_SPC_UNSPECIFIED },
1097 AVCOL_PRI_RESERVED0, AVCOL_PRI_NB - 1, FLAGS, "csp" },
1098 { "irange", "Input color range",
1099 OFFSET(user_irng), AV_OPT_TYPE_INT, { .i64 = AVCOL_RANGE_UNSPECIFIED },
1100 AVCOL_RANGE_UNSPECIFIED, AVCOL_RANGE_NB - 1, FLAGS, "rng" },
1101 { "iprimaries", "Input color primaries",
1102 OFFSET(user_iprm), AV_OPT_TYPE_INT, { .i64 = AVCOL_PRI_UNSPECIFIED },
1103 AVCOL_PRI_RESERVED0, AVCOL_PRI_NB - 1, FLAGS, "prm" },
1104 { "itrc", "Input transfer characteristics",
1105 OFFSET(user_itrc), AV_OPT_TYPE_INT, { .i64 = AVCOL_TRC_UNSPECIFIED },
1106 AVCOL_TRC_RESERVED0, AVCOL_TRC_NB - 1, FLAGS, "trc" },
1111 AVFILTER_DEFINE_CLASS(colorspace);
1113 static const AVFilterPad inputs[] = {
1116 .type = AVMEDIA_TYPE_VIDEO,
1117 .filter_frame = filter_frame,
1122 static const AVFilterPad outputs[] = {
1125 .type = AVMEDIA_TYPE_VIDEO,
1126 .config_props = config_props,
1131 AVFilter ff_vf_colorspace = {
1132 .name = "colorspace",
1133 .description = NULL_IF_CONFIG_SMALL("Convert between colorspaces."),
1136 .query_formats = query_formats,
1137 .priv_size = sizeof(ColorSpaceContext),
1138 .priv_class = &colorspace_class,
1141 .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS,