2 * Copyright (c) 2016 Ronald S. Bultje <rsbultje@gmail.com>
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * Convert between colorspaces.
26 #include "libavutil/avassert.h"
27 #include "libavutil/opt.h"
28 #include "libavutil/pixdesc.h"
29 #include "libavutil/pixfmt.h"
32 #include "colorspacedsp.h"
56 static const enum AVColorTransferCharacteristic default_trc[CS_NB + 1] = {
57 [CS_UNSPECIFIED] = AVCOL_TRC_UNSPECIFIED,
58 [CS_BT470M] = AVCOL_TRC_GAMMA22,
59 [CS_BT470BG] = AVCOL_TRC_GAMMA28,
60 [CS_BT601_6_525] = AVCOL_TRC_SMPTE170M,
61 [CS_BT601_6_625] = AVCOL_TRC_SMPTE170M,
62 [CS_BT709] = AVCOL_TRC_BT709,
63 [CS_SMPTE170M] = AVCOL_TRC_SMPTE170M,
64 [CS_SMPTE240M] = AVCOL_TRC_SMPTE240M,
65 [CS_BT2020] = AVCOL_TRC_BT2020_10,
66 [CS_NB] = AVCOL_TRC_UNSPECIFIED,
69 static const enum AVColorPrimaries default_prm[CS_NB + 1] = {
70 [CS_UNSPECIFIED] = AVCOL_PRI_UNSPECIFIED,
71 [CS_BT470M] = AVCOL_PRI_BT470M,
72 [CS_BT470BG] = AVCOL_PRI_BT470BG,
73 [CS_BT601_6_525] = AVCOL_PRI_SMPTE170M,
74 [CS_BT601_6_625] = AVCOL_PRI_BT470BG,
75 [CS_BT709] = AVCOL_PRI_BT709,
76 [CS_SMPTE170M] = AVCOL_PRI_SMPTE170M,
77 [CS_SMPTE240M] = AVCOL_PRI_SMPTE240M,
78 [CS_BT2020] = AVCOL_PRI_BT2020,
79 [CS_NB] = AVCOL_PRI_UNSPECIFIED,
82 static const enum AVColorSpace default_csp[CS_NB + 1] = {
83 [CS_UNSPECIFIED] = AVCOL_SPC_UNSPECIFIED,
84 [CS_BT470M] = AVCOL_SPC_SMPTE170M,
85 [CS_BT470BG] = AVCOL_SPC_BT470BG,
86 [CS_BT601_6_525] = AVCOL_SPC_SMPTE170M,
87 [CS_BT601_6_625] = AVCOL_SPC_BT470BG,
88 [CS_BT709] = AVCOL_SPC_BT709,
89 [CS_SMPTE170M] = AVCOL_SPC_SMPTE170M,
90 [CS_SMPTE240M] = AVCOL_SPC_SMPTE240M,
91 [CS_BT2020] = AVCOL_SPC_BT2020_NCL,
92 [CS_NB] = AVCOL_SPC_UNSPECIFIED,
95 struct ColorPrimaries {
97 double xr, yr, xg, yg, xb, yb;
100 struct TransferCharacteristics {
101 double alpha, beta, gamma, delta;
104 struct LumaCoefficients {
108 struct WhitepointCoefficients {
112 typedef struct ColorSpaceContext {
113 const AVClass *class;
115 ColorSpaceDSPContext dsp;
117 enum Colorspace user_all;
118 enum AVColorSpace in_csp, out_csp, user_csp;
119 enum AVColorRange in_rng, out_rng, user_rng;
120 enum AVColorTransferCharacteristic in_trc, out_trc, user_trc;
121 enum AVColorPrimaries in_prm, out_prm, user_prm;
122 enum AVPixelFormat in_format, user_format;
126 ptrdiff_t rgb_stride;
129 const struct ColorPrimaries *in_primaries, *out_primaries;
130 int lrgb2lrgb_passthrough;
131 DECLARE_ALIGNED(16, int16_t, lrgb2lrgb_coeffs)[3][3][8];
133 const struct TransferCharacteristics *in_txchr, *out_txchr;
134 int rgb2rgb_passthrough;
135 int16_t *lin_lut, *delin_lut;
137 const struct LumaCoefficients *in_lumacoef, *out_lumacoef;
138 int yuv2yuv_passthrough, yuv2yuv_fastmode;
139 DECLARE_ALIGNED(16, int16_t, yuv2rgb_coeffs)[3][3][8];
140 DECLARE_ALIGNED(16, int16_t, rgb2yuv_coeffs)[3][3][8];
141 DECLARE_ALIGNED(16, int16_t, yuv2yuv_coeffs)[3][3][8];
142 DECLARE_ALIGNED(16, int16_t, yuv_offset)[2 /* in, out */][8];
146 double yuv2rgb_dbl_coeffs[3][3], rgb2yuv_dbl_coeffs[3][3];
147 int in_y_rng, in_uv_rng, out_y_rng, out_uv_rng;
150 // FIXME deal with odd width/heights (or just forbid it)
151 // FIXME faster linearize/delinearize implementation (integer pow)
152 // FIXME bt2020cl support (linearization between yuv/rgb step instead of between rgb/xyz)
153 // FIXME test that the values in (de)lin_lut don't exceed their container storage
154 // type size (only useful if we keep the LUT and don't move to fast integer pow)
155 // FIXME dithering if bitdepth goes down?
156 // FIXME bitexact for fate integration?
159 * All constants explained in e.g. https://linuxtv.org/downloads/v4l-dvb-apis/ch02s06.html
160 * The older ones (bt470bg/m) are also explained in their respective ITU docs
161 * (e.g. https://www.itu.int/dms_pubrec/itu-r/rec/bt/R-REC-BT.470-5-199802-S!!PDF-E.pdf)
162 * whereas the newer ones can typically be copied directly from wikipedia :)
164 static const struct LumaCoefficients luma_coefficients[AVCOL_SPC_NB] = {
165 [AVCOL_SPC_FCC] = { 0.30, 0.59, 0.11 },
166 [AVCOL_SPC_BT470BG] = { 0.299, 0.587, 0.114 },
167 [AVCOL_SPC_SMPTE170M] = { 0.299, 0.587, 0.114 },
168 [AVCOL_SPC_BT709] = { 0.2126, 0.7152, 0.0722 },
169 [AVCOL_SPC_SMPTE240M] = { 0.212, 0.701, 0.087 },
170 [AVCOL_SPC_BT2020_NCL] = { 0.2627, 0.6780, 0.0593 },
171 [AVCOL_SPC_BT2020_CL] = { 0.2627, 0.6780, 0.0593 },
174 static const struct LumaCoefficients *get_luma_coefficients(enum AVColorSpace csp)
176 const struct LumaCoefficients *coeffs;
178 if (csp >= AVCOL_SPC_NB)
180 coeffs = &luma_coefficients[csp];
187 static void fill_rgb2yuv_table(const struct LumaCoefficients *coeffs,
188 double rgb2yuv[3][3])
190 double bscale, rscale;
192 rgb2yuv[0][0] = coeffs->cr;
193 rgb2yuv[0][1] = coeffs->cg;
194 rgb2yuv[0][2] = coeffs->cb;
195 bscale = 0.5 / (coeffs->cb - 1.0);
196 rscale = 0.5 / (coeffs->cr - 1.0);
197 rgb2yuv[1][0] = bscale * coeffs->cr;
198 rgb2yuv[1][1] = bscale * coeffs->cg;
201 rgb2yuv[2][1] = rscale * coeffs->cg;
202 rgb2yuv[2][2] = rscale * coeffs->cb;
205 // FIXME I'm pretty sure gamma22/28 also have a linear toe slope, but I can't
206 // find any actual tables that document their real values...
207 // See http://www.13thmonkey.org/~boris/gammacorrection/ first graph why it matters
208 static const struct TransferCharacteristics transfer_characteristics[AVCOL_TRC_NB] = {
209 [AVCOL_TRC_BT709] = { 1.099, 0.018, 0.45, 4.5 },
210 [AVCOL_TRC_GAMMA22] = { 1.0, 0.0, 1.0 / 2.2, 0.0 },
211 [AVCOL_TRC_GAMMA28] = { 1.0, 0.0, 1.0 / 2.8, 0.0 },
212 [AVCOL_TRC_SMPTE170M] = { 1.099, 0.018, 0.45, 4.5 },
213 [AVCOL_TRC_SMPTE240M] = { 1.1115, 0.0228, 0.45, 4.0 },
214 [AVCOL_TRC_BT2020_10] = { 1.099, 0.018, 0.45, 4.5 },
215 [AVCOL_TRC_BT2020_12] = { 1.0993, 0.0181, 0.45, 4.5 },
218 static const struct TransferCharacteristics *
219 get_transfer_characteristics(enum AVColorTransferCharacteristic trc)
221 const struct TransferCharacteristics *coeffs;
223 if (trc >= AVCOL_TRC_NB)
225 coeffs = &transfer_characteristics[trc];
232 static const struct WhitepointCoefficients whitepoint_coefficients[WP_NB] = {
233 [WP_D65] = { 0.3127, 0.3290 },
234 [WP_C] = { 0.3100, 0.3160 },
237 static const struct ColorPrimaries color_primaries[AVCOL_PRI_NB] = {
238 [AVCOL_PRI_BT709] = { WP_D65, 0.640, 0.330, 0.300, 0.600, 0.150, 0.060 },
239 [AVCOL_PRI_BT470M] = { WP_C, 0.670, 0.330, 0.210, 0.710, 0.140, 0.080 },
240 [AVCOL_PRI_BT470BG] = { WP_D65, 0.640, 0.330, 0.290, 0.600, 0.150, 0.060,},
241 [AVCOL_PRI_SMPTE170M] = { WP_D65, 0.630, 0.340, 0.310, 0.595, 0.155, 0.070 },
242 [AVCOL_PRI_SMPTE240M] = { WP_D65, 0.630, 0.340, 0.310, 0.595, 0.155, 0.070 },
243 [AVCOL_PRI_BT2020] = { WP_D65, 0.708, 0.292, 0.170, 0.797, 0.131, 0.046 },
246 static const struct ColorPrimaries *get_color_primaries(enum AVColorPrimaries prm)
248 const struct ColorPrimaries *coeffs;
250 if (prm >= AVCOL_PRI_NB)
252 coeffs = &color_primaries[prm];
259 static void invert_matrix3x3(const double in[3][3], double out[3][3])
261 double m00 = in[0][0], m01 = in[0][1], m02 = in[0][2],
262 m10 = in[1][0], m11 = in[1][1], m12 = in[1][2],
263 m20 = in[2][0], m21 = in[2][1], m22 = in[2][2];
267 out[0][0] = (m11 * m22 - m21 * m12);
268 out[0][1] = -(m01 * m22 - m21 * m02);
269 out[0][2] = (m01 * m12 - m11 * m02);
270 out[1][0] = -(m10 * m22 - m20 * m12);
271 out[1][1] = (m00 * m22 - m20 * m02);
272 out[1][2] = -(m00 * m12 - m10 * m02);
273 out[2][0] = (m10 * m21 - m20 * m11);
274 out[2][1] = -(m00 * m21 - m20 * m01);
275 out[2][2] = (m00 * m11 - m10 * m01);
277 det = m00 * out[0][0] + m10 * out[0][1] + m20 * out[0][2];
280 for (i = 0; i < 3; i++) {
281 for (j = 0; j < 3; j++)
286 static int fill_gamma_table(ColorSpaceContext *s)
289 double in_alpha = s->in_txchr->alpha, in_beta = s->in_txchr->beta;
290 double in_gamma = s->in_txchr->gamma, in_delta = s->in_txchr->delta;
291 double in_ialpha = 1.0 / in_alpha, in_igamma = 1.0 / in_gamma, in_idelta = 1.0 / in_delta;
292 double out_alpha = s->out_txchr->alpha, out_beta = s->out_txchr->beta;
293 double out_gamma = s->out_txchr->gamma, out_delta = s->out_txchr->delta;
295 s->lin_lut = av_malloc(sizeof(*s->lin_lut) * 32768 * 2);
297 return AVERROR(ENOMEM);
298 s->delin_lut = &s->lin_lut[32768];
299 for (n = 0; n < 32768; n++) {
300 double v = (n - 2048.0) / 28672.0, d, l;
303 if (v <= -out_beta) {
304 d = -out_alpha * pow(-v, out_gamma) + (out_alpha - 1.0);
305 } else if (v < out_beta) {
308 d = out_alpha * pow(v, out_gamma) - (out_alpha - 1.0);
310 s->delin_lut[n] = av_clip_int16(lrint(d * 28672.0));
314 l = -pow((1.0 - in_alpha - v) * in_ialpha, in_igamma);
315 } else if (v < in_beta) {
318 l = pow((v + in_alpha - 1.0) * in_ialpha, in_igamma);
320 s->lin_lut[n] = av_clip_int16(lrint(l * 28672.0));
327 * see e.g. http://www.brucelindbloom.com/index.html?Eqn_RGB_XYZ_Matrix.html
329 static void fill_rgb2xyz_table(const struct ColorPrimaries *coeffs,
330 double rgb2xyz[3][3])
332 const struct WhitepointCoefficients *wp = &whitepoint_coefficients[coeffs->wp];
333 double i[3][3], sr, sg, sb, zw;
335 rgb2xyz[0][0] = coeffs->xr / coeffs->yr;
336 rgb2xyz[0][1] = coeffs->xg / coeffs->yg;
337 rgb2xyz[0][2] = coeffs->xb / coeffs->yb;
338 rgb2xyz[1][0] = rgb2xyz[1][1] = rgb2xyz[1][2] = 1.0;
339 rgb2xyz[2][0] = (1.0 - coeffs->xr - coeffs->yr) / coeffs->yr;
340 rgb2xyz[2][1] = (1.0 - coeffs->xg - coeffs->yg) / coeffs->yg;
341 rgb2xyz[2][2] = (1.0 - coeffs->xb - coeffs->yb) / coeffs->yb;
342 invert_matrix3x3(rgb2xyz, i);
343 zw = 1.0 - wp->xw - wp->yw;
344 sr = i[0][0] * wp->xw + i[0][1] * wp->yw + i[0][2] * zw;
345 sg = i[1][0] * wp->xw + i[1][1] * wp->yw + i[1][2] * zw;
346 sb = i[2][0] * wp->xw + i[2][1] * wp->yw + i[2][2] * zw;
358 static void mul3x3(double dst[3][3], const double src1[3][3], const double src2[3][3])
362 for (m = 0; m < 3; m++)
363 for (n = 0; n < 3; n++)
364 dst[m][n] = src2[m][0] * src1[0][n] +
365 src2[m][1] * src1[1][n] +
366 src2[m][2] * src1[2][n];
370 * See http://www.brucelindbloom.com/index.html?Eqn_ChromAdapt.html
371 * This function uses the Bradford mechanism.
373 static void fill_whitepoint_conv_table(double out[3][3],
374 enum Whitepoint src, enum Whitepoint dst)
376 static const double ma[3][3] = {
377 { 0.8951, 0.2664, -0.1614 },
378 { -0.7502, 1.7135, 0.0367 },
379 { 0.0389, -0.0685, 1.0296 },
381 const struct WhitepointCoefficients *wp_src = &whitepoint_coefficients[src];
382 double zw_src = 1.0 - wp_src->xw - wp_src->yw;
383 const struct WhitepointCoefficients *wp_dst = &whitepoint_coefficients[dst];
384 double zw_dst = 1.0 - wp_dst->xw - wp_dst->yw;
385 double mai[3][3], fac[3][3], tmp[3][3];
386 double rs, gs, bs, rd, gd, bd;
388 invert_matrix3x3(ma, mai);
389 rs = ma[0][0] * wp_src->xw + ma[0][1] * wp_src->yw + ma[0][2] * zw_src;
390 gs = ma[1][0] * wp_src->xw + ma[1][1] * wp_src->yw + ma[1][2] * zw_src;
391 bs = ma[2][0] * wp_src->xw + ma[2][1] * wp_src->yw + ma[2][2] * zw_src;
392 rd = ma[0][0] * wp_dst->xw + ma[0][1] * wp_dst->yw + ma[0][2] * zw_dst;
393 gd = ma[1][0] * wp_dst->xw + ma[1][1] * wp_dst->yw + ma[1][2] * zw_dst;
394 bd = ma[2][0] * wp_dst->xw + ma[2][1] * wp_dst->yw + ma[2][2] * zw_dst;
398 fac[0][1] = fac[0][2] = fac[1][0] = fac[1][2] = fac[2][0] = fac[2][1] = 0.0;
399 mul3x3(tmp, ma, fac);
400 mul3x3(out, tmp, mai);
403 static void apply_lut(int16_t *buf[3], ptrdiff_t stride,
404 int w, int h, const int16_t *lut)
408 for (n = 0; n < 3; n++) {
409 int16_t *data = buf[n];
411 for (y = 0; y < h; y++) {
412 for (x = 0; x < w; x++)
413 data[x] = lut[av_clip_uintp2(2048 + data[x], 15)];
422 ptrdiff_t in_linesize[3], out_linesize[3];
423 int in_ss_h, out_ss_h;
426 static int convert(AVFilterContext *ctx, void *data, int job_nr, int n_jobs)
428 struct ThreadData *td = data;
429 ColorSpaceContext *s = ctx->priv;
430 uint8_t *in_data[3], *out_data[3];
432 int h_in = (td->in->height + 1) >> 1;
433 int h1 = 2 * (job_nr * h_in / n_jobs), h2 = 2 * ((job_nr + 1) * h_in / n_jobs);
434 int w = td->in->width, h = h2 - h1;
436 in_data[0] = td->in->data[0] + td->in_linesize[0] * h1;
437 in_data[1] = td->in->data[1] + td->in_linesize[1] * (h1 >> td->in_ss_h);
438 in_data[2] = td->in->data[2] + td->in_linesize[2] * (h1 >> td->in_ss_h);
439 out_data[0] = td->out->data[0] + td->out_linesize[0] * h1;
440 out_data[1] = td->out->data[1] + td->out_linesize[1] * (h1 >> td->out_ss_h);
441 out_data[2] = td->out->data[2] + td->out_linesize[2] * (h1 >> td->out_ss_h);
442 rgb[0] = s->rgb[0] + s->rgb_stride * h1;
443 rgb[1] = s->rgb[1] + s->rgb_stride * h1;
444 rgb[2] = s->rgb[2] + s->rgb_stride * h1;
446 // FIXME for simd, also make sure we do pictures with negative stride
447 // top-down so we don't overwrite lines with padding of data before it
448 // in the same buffer (same as swscale)
450 if (s->yuv2yuv_fastmode) {
451 // FIXME possibly use a fast mode in case only the y range changes?
452 // since in that case, only the diagonal entries in yuv2yuv_coeffs[]
454 s->yuv2yuv(out_data, td->out_linesize, in_data, td->in_linesize, w, h,
455 s->yuv2yuv_coeffs, s->yuv_offset);
457 // FIXME maybe (for caching effciency) do pipeline per-line instead of
458 // full buffer per function? (Or, since yuv2rgb requires 2 lines: per
459 // 2 lines, for yuv420.)
462 * - yuv2rgb converts from whatever range the input was ([16-235/240] or
463 * [0,255] or the 10/12bpp equivalents thereof) to an integer version
464 * of RGB in psuedo-restricted 15+sign bits. That means that the float
465 * range [0.0,1.0] is in [0,28762], and the remainder of the int16_t
466 * range is used for overflow/underflow outside the representable
467 * range of this RGB type. rgb2yuv is the exact opposite.
468 * - gamma correction is done using a LUT since that appears to work
470 * - If the input is chroma-subsampled (420/422), the yuv2rgb conversion
471 * (or rgb2yuv conversion) uses nearest-neighbour sampling to read
472 * read chroma pixels at luma resolution. If you want some more fancy
473 * filter, you can use swscale to convert to yuv444p.
474 * - all coefficients are 14bit (so in the [-2.0,2.0] range).
476 s->yuv2rgb(rgb, s->rgb_stride, in_data, td->in_linesize, w, h,
477 s->yuv2rgb_coeffs, s->yuv_offset[0]);
478 if (!s->rgb2rgb_passthrough) {
479 apply_lut(rgb, s->rgb_stride, w, h, s->lin_lut);
480 if (!s->lrgb2lrgb_passthrough)
481 s->dsp.multiply3x3(rgb, s->rgb_stride, w, h, s->lrgb2lrgb_coeffs);
482 apply_lut(rgb, s->rgb_stride, w, h, s->delin_lut);
484 s->rgb2yuv(out_data, td->out_linesize, rgb, s->rgb_stride, w, h,
485 s->rgb2yuv_coeffs, s->yuv_offset[1]);
491 static int get_range_off(int *off, int *y_rng, int *uv_rng,
492 enum AVColorRange rng, int depth)
495 case AVCOL_RANGE_MPEG:
496 *off = 16 << (depth - 8);
497 *y_rng = 219 << (depth - 8);
498 *uv_rng = 224 << (depth - 8);
500 case AVCOL_RANGE_JPEG:
502 *y_rng = *uv_rng = (256 << (depth - 8)) - 1;
505 return AVERROR(EINVAL);
511 static int create_filtergraph(AVFilterContext *ctx,
512 const AVFrame *in, const AVFrame *out)
514 ColorSpaceContext *s = ctx->priv;
515 const AVPixFmtDescriptor *in_desc = av_pix_fmt_desc_get(in->format);
516 const AVPixFmtDescriptor *out_desc = av_pix_fmt_desc_get(out->format);
517 int emms = 0, m, n, o, res, fmt_identical, redo_yuv2rgb = 0, redo_rgb2yuv = 0;
519 #define supported_depth(d) ((d) == 8 || (d) == 10 || (d) == 12)
520 #define supported_subsampling(lcw, lch) \
521 (((lcw) == 0 && (lch) == 0) || ((lcw) == 1 && (lch) == 0) || ((lcw) == 1 && (lch) == 1))
522 #define supported_format(d) \
523 ((d) != NULL && (d)->nb_components == 3 && \
524 !((d)->flags & AV_PIX_FMT_FLAG_RGB) && \
525 supported_depth((d)->comp[0].depth) && \
526 supported_subsampling((d)->log2_chroma_w, (d)->log2_chroma_h))
528 if (!supported_format(in_desc)) {
529 av_log(ctx, AV_LOG_ERROR,
530 "Unsupported input format %d (%s) or bitdepth (%d)\n",
531 in->format, av_get_pix_fmt_name(in->format),
532 in_desc ? in_desc->comp[0].depth : -1);
533 return AVERROR(EINVAL);
535 if (!supported_format(out_desc)) {
536 av_log(ctx, AV_LOG_ERROR,
537 "Unsupported output format %d (%s) or bitdepth (%d)\n",
538 out->format, av_get_pix_fmt_name(out->format),
539 out_desc ? out_desc->comp[0].depth : -1);
540 return AVERROR(EINVAL);
543 if (in->color_primaries != s->in_prm) s->in_primaries = NULL;
544 if (out->color_primaries != s->out_prm) s->out_primaries = NULL;
545 if (in->color_trc != s->in_trc) s->in_txchr = NULL;
546 if (out->color_trc != s->out_trc) s->out_txchr = NULL;
547 if (in->colorspace != s->in_csp ||
548 in->color_range != s->in_rng) s->in_lumacoef = NULL;
549 if (out->colorspace != s->out_csp ||
550 out->color_range != s->out_rng) s->out_lumacoef = NULL;
552 if (!s->out_primaries || !s->in_primaries) {
553 s->in_prm = in->color_primaries;
554 s->in_primaries = get_color_primaries(s->in_prm);
555 if (!s->in_primaries) {
556 av_log(ctx, AV_LOG_ERROR,
557 "Unsupported input primaries %d (%s)\n",
558 s->in_prm, av_color_primaries_name(s->in_prm));
559 return AVERROR(EINVAL);
561 s->out_prm = out->color_primaries;
562 s->out_primaries = get_color_primaries(s->out_prm);
563 if (!s->out_primaries) {
564 if (s->out_prm == AVCOL_PRI_UNSPECIFIED) {
565 if (s->user_all == CS_UNSPECIFIED) {
566 av_log(ctx, AV_LOG_ERROR, "Please specify output primaries\n");
568 av_log(ctx, AV_LOG_ERROR,
569 "Unsupported output color property %d\n", s->user_all);
572 av_log(ctx, AV_LOG_ERROR,
573 "Unsupported output primaries %d (%s)\n",
574 s->out_prm, av_color_primaries_name(s->out_prm));
576 return AVERROR(EINVAL);
578 s->lrgb2lrgb_passthrough = !memcmp(s->in_primaries, s->out_primaries,
579 sizeof(*s->in_primaries));
580 if (!s->lrgb2lrgb_passthrough) {
581 double rgb2xyz[3][3], xyz2rgb[3][3], rgb2rgb[3][3];
583 fill_rgb2xyz_table(s->out_primaries, rgb2xyz);
584 invert_matrix3x3(rgb2xyz, xyz2rgb);
585 fill_rgb2xyz_table(s->in_primaries, rgb2xyz);
586 if (s->out_primaries->wp != s->in_primaries->wp) {
587 double wpconv[3][3], tmp[3][3];
589 fill_whitepoint_conv_table(wpconv, s->in_primaries->wp,
590 s->out_primaries->wp);
591 mul3x3(tmp, rgb2xyz, wpconv);
592 mul3x3(rgb2rgb, tmp, xyz2rgb);
594 mul3x3(rgb2rgb, rgb2xyz, xyz2rgb);
596 for (m = 0; m < 3; m++)
597 for (n = 0; n < 3; n++) {
598 s->lrgb2lrgb_coeffs[m][n][0] = lrint(16384.0 * rgb2rgb[m][n]);
599 for (o = 1; o < 8; o++)
600 s->lrgb2lrgb_coeffs[m][n][o] = s->lrgb2lrgb_coeffs[m][n][0];
608 av_freep(&s->lin_lut);
609 s->in_trc = in->color_trc;
610 s->in_txchr = get_transfer_characteristics(s->in_trc);
612 av_log(ctx, AV_LOG_ERROR,
613 "Unsupported input transfer characteristics %d (%s)\n",
614 s->in_trc, av_color_transfer_name(s->in_trc));
615 return AVERROR(EINVAL);
620 av_freep(&s->lin_lut);
621 s->out_trc = out->color_trc;
622 s->out_txchr = get_transfer_characteristics(s->out_trc);
624 if (s->out_trc == AVCOL_TRC_UNSPECIFIED) {
625 if (s->user_all == CS_UNSPECIFIED) {
626 av_log(ctx, AV_LOG_ERROR,
627 "Please specify output transfer characteristics\n");
629 av_log(ctx, AV_LOG_ERROR,
630 "Unsupported output color property %d\n", s->user_all);
633 av_log(ctx, AV_LOG_ERROR,
634 "Unsupported output transfer characteristics %d (%s)\n",
635 s->out_trc, av_color_transfer_name(s->out_trc));
637 return AVERROR(EINVAL);
641 s->rgb2rgb_passthrough = s->fast_mode || (s->lrgb2lrgb_passthrough &&
642 !memcmp(s->in_txchr, s->out_txchr, sizeof(*s->in_txchr)));
643 if (!s->rgb2rgb_passthrough && !s->lin_lut) {
644 res = fill_gamma_table(s);
650 if (!s->in_lumacoef) {
651 s->in_csp = in->colorspace;
652 s->in_rng = in->color_range;
653 s->in_lumacoef = get_luma_coefficients(s->in_csp);
654 if (!s->in_lumacoef) {
655 av_log(ctx, AV_LOG_ERROR,
656 "Unsupported input colorspace %d (%s)\n",
657 s->in_csp, av_color_space_name(s->in_csp));
658 return AVERROR(EINVAL);
663 if (!s->out_lumacoef) {
664 s->out_csp = out->colorspace;
665 s->out_rng = out->color_range;
666 s->out_lumacoef = get_luma_coefficients(s->out_csp);
667 if (!s->out_lumacoef) {
668 if (s->out_csp == AVCOL_SPC_UNSPECIFIED) {
669 if (s->user_all == CS_UNSPECIFIED) {
670 av_log(ctx, AV_LOG_ERROR,
671 "Please specify output transfer characteristics\n");
673 av_log(ctx, AV_LOG_ERROR,
674 "Unsupported output color property %d\n", s->user_all);
677 av_log(ctx, AV_LOG_ERROR,
678 "Unsupported output transfer characteristics %d (%s)\n",
679 s->out_csp, av_color_space_name(s->out_csp));
681 return AVERROR(EINVAL);
686 fmt_identical = in_desc->log2_chroma_h == out_desc->log2_chroma_h &&
687 in_desc->log2_chroma_w == out_desc->log2_chroma_w;
688 s->yuv2yuv_fastmode = s->rgb2rgb_passthrough && fmt_identical;
689 s->yuv2yuv_passthrough = s->yuv2yuv_fastmode && s->in_rng == s->out_rng &&
690 !memcmp(s->in_lumacoef, s->out_lumacoef,
691 sizeof(*s->in_lumacoef));
692 if (!s->yuv2yuv_passthrough) {
694 double rgb2yuv[3][3], (*yuv2rgb)[3] = s->yuv2rgb_dbl_coeffs;
695 int off, bits, in_rng;
697 res = get_range_off(&off, &s->in_y_rng, &s->in_uv_rng,
698 s->in_rng, in_desc->comp[0].depth);
700 av_log(ctx, AV_LOG_ERROR,
701 "Unsupported input color range %d (%s)\n",
702 s->in_rng, av_color_range_name(s->in_rng));
705 for (n = 0; n < 8; n++)
706 s->yuv_offset[0][n] = off;
707 fill_rgb2yuv_table(s->in_lumacoef, rgb2yuv);
708 invert_matrix3x3(rgb2yuv, yuv2rgb);
709 bits = 1 << (in_desc->comp[0].depth - 1);
710 for (n = 0; n < 3; n++) {
711 for (in_rng = s->in_y_rng, m = 0; m < 3; m++, in_rng = s->in_uv_rng) {
712 s->yuv2rgb_coeffs[n][m][0] = lrint(28672 * bits * yuv2rgb[n][m] / in_rng);
713 for (o = 1; o < 8; o++)
714 s->yuv2rgb_coeffs[n][m][o] = s->yuv2rgb_coeffs[n][m][0];
717 av_assert2(s->yuv2rgb_coeffs[0][1][0] == 0);
718 av_assert2(s->yuv2rgb_coeffs[2][2][0] == 0);
719 av_assert2(s->yuv2rgb_coeffs[0][0][0] == s->yuv2rgb_coeffs[1][0][0]);
720 av_assert2(s->yuv2rgb_coeffs[0][0][0] == s->yuv2rgb_coeffs[2][0][0]);
721 s->yuv2rgb = s->dsp.yuv2rgb[(in_desc->comp[0].depth - 8) >> 1]
722 [in_desc->log2_chroma_h + in_desc->log2_chroma_w];
727 double (*rgb2yuv)[3] = s->rgb2yuv_dbl_coeffs;
728 int off, out_rng, bits;
730 res = get_range_off(&off, &s->out_y_rng, &s->out_uv_rng,
731 s->out_rng, out_desc->comp[0].depth);
733 av_log(ctx, AV_LOG_ERROR,
734 "Unsupported output color range %d (%s)\n",
735 s->out_rng, av_color_range_name(s->out_rng));
738 for (n = 0; n < 8; n++)
739 s->yuv_offset[1][n] = off;
740 fill_rgb2yuv_table(s->out_lumacoef, rgb2yuv);
741 bits = 1 << (29 - out_desc->comp[0].depth);
742 for (n = 0; n < 3; n++) {
743 for (out_rng = s->out_y_rng, m = 0; m < 3; m++, out_rng = s->out_uv_rng) {
744 s->rgb2yuv_coeffs[n][m][0] = lrint(bits * out_rng * rgb2yuv[n][m] / 28672);
745 for (o = 1; o < 8; o++)
746 s->rgb2yuv_coeffs[n][m][o] = s->rgb2yuv_coeffs[n][m][0];
749 av_assert2(s->rgb2yuv_coeffs[1][2][0] == s->rgb2yuv_coeffs[2][0][0]);
750 s->rgb2yuv = s->dsp.rgb2yuv[(out_desc->comp[0].depth - 8) >> 1]
751 [out_desc->log2_chroma_h + out_desc->log2_chroma_w];
755 if (s->yuv2yuv_fastmode && (redo_yuv2rgb || redo_rgb2yuv)) {
756 int idepth = in_desc->comp[0].depth, odepth = out_desc->comp[0].depth;
757 double (*rgb2yuv)[3] = s->rgb2yuv_dbl_coeffs;
758 double (*yuv2rgb)[3] = s->yuv2rgb_dbl_coeffs;
759 double yuv2yuv[3][3];
762 mul3x3(yuv2yuv, yuv2rgb, rgb2yuv);
763 for (out_rng = s->out_y_rng, m = 0; m < 3; m++, out_rng = s->out_uv_rng) {
764 for (in_rng = s->in_y_rng, n = 0; n < 3; n++, in_rng = s->in_uv_rng) {
765 s->yuv2yuv_coeffs[m][n][0] =
766 lrint(16384 * yuv2yuv[m][n] * out_rng * (1 << idepth) /
767 (in_rng * (1 << odepth)));
768 for (o = 1; o < 8; o++)
769 s->yuv2yuv_coeffs[m][n][o] = s->yuv2yuv_coeffs[m][n][0];
772 av_assert2(s->yuv2yuv_coeffs[1][0][0] == 0);
773 av_assert2(s->yuv2yuv_coeffs[2][0][0] == 0);
774 s->yuv2yuv = s->dsp.yuv2yuv[(idepth - 8) >> 1][(odepth - 8) >> 1]
775 [in_desc->log2_chroma_h + in_desc->log2_chroma_w];
785 static int init(AVFilterContext *ctx)
787 ColorSpaceContext *s = ctx->priv;
789 ff_colorspacedsp_init(&s->dsp);
794 static void uninit(AVFilterContext *ctx)
796 ColorSpaceContext *s = ctx->priv;
798 av_freep(&s->rgb[0]);
799 av_freep(&s->rgb[1]);
800 av_freep(&s->rgb[2]);
803 av_freep(&s->lin_lut);
806 static int filter_frame(AVFilterLink *link, AVFrame *in)
808 AVFilterContext *ctx = link->dst;
809 AVFilterLink *outlink = ctx->outputs[0];
810 ColorSpaceContext *s = ctx->priv;
811 // FIXME if yuv2yuv_passthrough, don't get a new buffer but use the
812 // input one if it is writable *OR* the actual literal values of in_*
813 // and out_* are identical (not just their respective properties)
814 AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
816 ptrdiff_t rgb_stride = FFALIGN(in->width * sizeof(int16_t), 32);
817 unsigned rgb_sz = rgb_stride * in->height;
818 struct ThreadData td;
822 return AVERROR(ENOMEM);
824 av_frame_copy_props(out, in);
826 out->color_primaries = s->user_prm == AVCOL_PRI_UNSPECIFIED ?
827 default_prm[FFMIN(s->user_all, CS_NB)] : s->user_prm;
828 if (s->user_trc == AVCOL_TRC_UNSPECIFIED) {
829 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(out->format);
831 out->color_trc = default_trc[FFMIN(s->user_all, CS_NB)];
832 if (out->color_trc == AVCOL_TRC_BT2020_10 && desc && desc->comp[0].depth >= 12)
833 out->color_trc = AVCOL_TRC_BT2020_12;
835 out->color_trc = s->user_trc;
837 out->colorspace = s->user_csp == AVCOL_SPC_UNSPECIFIED ?
838 default_csp[FFMIN(s->user_all, CS_NB)] : s->user_csp;
839 out->color_range = s->user_rng == AVCOL_RANGE_UNSPECIFIED ?
840 in->color_range : s->user_rng;
841 if (rgb_sz != s->rgb_sz) {
842 av_freep(&s->rgb[0]);
843 av_freep(&s->rgb[1]);
844 av_freep(&s->rgb[2]);
847 s->rgb[0] = av_malloc(rgb_sz);
848 s->rgb[1] = av_malloc(rgb_sz);
849 s->rgb[2] = av_malloc(rgb_sz);
850 if (!s->rgb[0] || !s->rgb[1] || !s->rgb[2]) {
852 return AVERROR(ENOMEM);
856 res = create_filtergraph(ctx, in, out);
859 s->rgb_stride = rgb_stride / sizeof(int16_t);
862 td.in_linesize[0] = in->linesize[0];
863 td.in_linesize[1] = in->linesize[1];
864 td.in_linesize[2] = in->linesize[2];
865 td.out_linesize[0] = out->linesize[0];
866 td.out_linesize[1] = out->linesize[1];
867 td.out_linesize[2] = out->linesize[2];
868 td.in_ss_h = av_pix_fmt_desc_get(in->format)->log2_chroma_h;
869 td.out_ss_h = av_pix_fmt_desc_get(out->format)->log2_chroma_h;
870 if (s->yuv2yuv_passthrough) {
871 av_frame_copy(out, in);
873 ctx->internal->execute(ctx, convert, &td, NULL,
874 FFMIN((in->height + 1) >> 1, ctx->graph->nb_threads));
878 return ff_filter_frame(outlink, out);
881 static int query_formats(AVFilterContext *ctx)
883 static const enum AVPixelFormat pix_fmts[] = {
884 AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P,
885 AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
886 AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12,
890 ColorSpaceContext *s = ctx->priv;
891 AVFilterFormats *formats = ff_make_format_list(pix_fmts);
894 return AVERROR(ENOMEM);
895 if (s->user_format == AV_PIX_FMT_NONE)
896 return ff_set_common_formats(ctx, formats);
897 res = ff_formats_ref(formats, &ctx->inputs[0]->out_formats);
901 res = ff_add_format(&formats, s->user_format);
905 return ff_formats_ref(formats, &ctx->outputs[0]->in_formats);
908 static int config_props(AVFilterLink *outlink)
910 AVFilterLink *inlink = outlink->src->inputs[0];
912 outlink->w = inlink->w;
913 outlink->h = inlink->h;
914 outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
915 outlink->time_base = inlink->time_base;
920 #define OFFSET(x) offsetof(ColorSpaceContext, x)
921 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM
922 #define ENUM(x, y, z) { x, "", 0, AV_OPT_TYPE_CONST, { .i64 = y }, INT_MIN, INT_MAX, FLAGS, z }
924 static const AVOption colorspace_options[] = {
925 { "all", "Set all color properties together",
926 OFFSET(user_all), AV_OPT_TYPE_INT, { .i64 = CS_UNSPECIFIED },
927 CS_UNSPECIFIED, CS_NB - 1, FLAGS, "all" },
928 ENUM("bt470m", CS_BT470M, "all"),
929 ENUM("bt470bg", CS_BT470BG, "all"),
930 ENUM("bt601-6-525", CS_BT601_6_525, "all"),
931 ENUM("bt601-6-625", CS_BT601_6_625, "all"),
932 ENUM("bt709", CS_BT709, "all"),
933 ENUM("smpte170m", CS_SMPTE170M, "all"),
934 ENUM("smpte240m", CS_SMPTE240M, "all"),
935 ENUM("bt2020", CS_BT2020, "all"),
937 { "space", "Output colorspace",
938 OFFSET(user_csp), AV_OPT_TYPE_INT, { .i64 = AVCOL_SPC_UNSPECIFIED },
939 AVCOL_PRI_RESERVED0, AVCOL_PRI_NB - 1, FLAGS, "csp" },
940 ENUM("bt709", AVCOL_SPC_BT709, "csp"),
941 ENUM("fcc", AVCOL_SPC_FCC, "csp"),
942 ENUM("bt470bg", AVCOL_SPC_BT470BG, "csp"),
943 ENUM("smpte170m", AVCOL_SPC_SMPTE170M, "csp"),
944 ENUM("smpte240m", AVCOL_SPC_SMPTE240M, "csp"),
945 ENUM("bt2020ncl", AVCOL_SPC_BT2020_NCL, "csp"),
947 { "range", "Output color range",
948 OFFSET(user_rng), AV_OPT_TYPE_INT, { .i64 = AVCOL_RANGE_UNSPECIFIED },
949 AVCOL_RANGE_UNSPECIFIED, AVCOL_RANGE_NB - 1, FLAGS, "rng" },
950 ENUM("mpeg", AVCOL_RANGE_MPEG, "rng"),
951 ENUM("jpeg", AVCOL_RANGE_JPEG, "rng"),
953 { "primaries", "Output color primaries",
954 OFFSET(user_prm), AV_OPT_TYPE_INT, { .i64 = AVCOL_PRI_UNSPECIFIED },
955 AVCOL_PRI_RESERVED0, AVCOL_PRI_NB - 1, FLAGS, "prm" },
956 ENUM("bt709", AVCOL_PRI_BT709, "prm"),
957 ENUM("bt470m", AVCOL_PRI_BT470M, "prm"),
958 ENUM("bt470bg", AVCOL_PRI_BT470BG, "prm"),
959 ENUM("smpte170m", AVCOL_PRI_SMPTE170M, "prm"),
960 ENUM("smpte240m", AVCOL_PRI_SMPTE240M, "prm"),
961 ENUM("bt2020", AVCOL_PRI_BT2020, "prm"),
963 { "trc", "Output transfer characteristics",
964 OFFSET(user_trc), AV_OPT_TYPE_INT, { .i64 = AVCOL_TRC_UNSPECIFIED },
965 AVCOL_TRC_RESERVED0, AVCOL_TRC_NB - 1, FLAGS, "trc" },
966 ENUM("bt709", AVCOL_TRC_BT709, "trc"),
967 ENUM("gamma22", AVCOL_TRC_GAMMA22, "trc"),
968 ENUM("gamma28", AVCOL_TRC_GAMMA28, "trc"),
969 ENUM("smpte170m", AVCOL_TRC_SMPTE170M, "trc"),
970 ENUM("smpte240m", AVCOL_TRC_SMPTE240M, "trc"),
971 ENUM("bt2020-10", AVCOL_TRC_BT2020_10, "trc"),
972 ENUM("bt2020-12", AVCOL_TRC_BT2020_12, "trc"),
974 { "format", "Output pixel format",
975 OFFSET(user_format), AV_OPT_TYPE_INT, { .i64 = AV_PIX_FMT_NONE },
976 AV_PIX_FMT_NONE, AV_PIX_FMT_GBRAP12LE, FLAGS, "fmt" },
977 ENUM("yuv420p", AV_PIX_FMT_YUV420P, "fmt"),
978 ENUM("yuv420p10", AV_PIX_FMT_YUV420P10, "fmt"),
979 ENUM("yuv420p12", AV_PIX_FMT_YUV420P12, "fmt"),
980 ENUM("yuv422p", AV_PIX_FMT_YUV422P, "fmt"),
981 ENUM("yuv422p10", AV_PIX_FMT_YUV422P10, "fmt"),
982 ENUM("yuv422p12", AV_PIX_FMT_YUV422P12, "fmt"),
983 ENUM("yuv444p", AV_PIX_FMT_YUV444P, "fmt"),
984 ENUM("yuv444p10", AV_PIX_FMT_YUV444P10, "fmt"),
985 ENUM("yuv444p12", AV_PIX_FMT_YUV444P12, "fmt"),
987 { "fast", "Ignore primary chromaticity and gamma correction",
988 OFFSET(fast_mode), AV_OPT_TYPE_BOOL, { .i64 = 0 },
993 AVFILTER_DEFINE_CLASS(colorspace);
995 static const AVFilterPad inputs[] = {
998 .type = AVMEDIA_TYPE_VIDEO,
999 .filter_frame = filter_frame,
1004 static const AVFilterPad outputs[] = {
1007 .type = AVMEDIA_TYPE_VIDEO,
1008 .config_props = config_props,
1013 AVFilter ff_vf_colorspace = {
1014 .name = "colorspace",
1015 .description = NULL_IF_CONFIG_SMALL("Convert between colorspaces."),
1018 .query_formats = query_formats,
1019 .priv_size = sizeof(ColorSpaceContext),
1020 .priv_class = &colorspace_class,
1023 .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS,