2 * ColorMatrix v2.2 for Avisynth 2.5.x
4 * Copyright (C) 2006-2007 Kevin Stone
6 * ColorMatrix 1.x is Copyright (C) Wilbert Dijkhof
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
13 * This program is distributed in the hope that it will be useful, but
14 * OUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
16 * License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 * ColorMatrix 2.0 is based on the original ColorMatrix filter by Wilbert
26 * Dijkhof. It adds the ability to convert between any of: Rec.709, FCC,
27 * Rec.601, and SMPTE 240M. It also makes pre and post clipping optional,
28 * adds an option to use scaled or non-scaled coefficients, and more...
36 #include "libavutil/opt.h"
37 #include "libavutil/pixdesc.h"
38 #include "libavutil/avstring.h"
40 #define NS(n) ((n) < 0 ? (int)((n)*65536.0-0.5+DBL_EPSILON) : (int)((n)*65536.0+0.5))
41 #define CB(n) av_clip_uint8(n)
43 static const double yuv_coeff[4][3][3] = {
44 { { +0.7152, +0.0722, +0.2126 }, // Rec.709 (0)
45 { -0.3850, +0.5000, -0.1150 },
46 { -0.4540, -0.0460, +0.5000 } },
47 { { +0.5900, +0.1100, +0.3000 }, // FCC (1)
48 { -0.3310, +0.5000, -0.1690 },
49 { -0.4210, -0.0790, +0.5000 } },
50 { { +0.5870, +0.1140, +0.2990 }, // Rec.601 (ITU-R BT.470-2/SMPTE 170M) (2)
51 { -0.3313, +0.5000, -0.1687 },
52 { -0.4187, -0.0813, +0.5000 } },
53 { { +0.7010, +0.0870, +0.2120 }, // SMPTE 240M (3)
54 { -0.3840, +0.5000, -0.1160 },
55 { -0.4450, -0.0550, +0.5000 } },
69 int yuv_convert[16][3][3];
71 int source, dest; ///< ColorMode
76 typedef struct ThreadData {
87 #define OFFSET(x) offsetof(ColorMatrixContext, x)
88 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
90 static const AVOption colormatrix_options[] = {
91 { "src", "set source color matrix", OFFSET(source), AV_OPT_TYPE_INT, {.i64=COLOR_MODE_NONE}, COLOR_MODE_NONE, COLOR_MODE_COUNT-1, .flags=FLAGS, .unit="color_mode" },
92 { "dst", "set destination color matrix", OFFSET(dest), AV_OPT_TYPE_INT, {.i64=COLOR_MODE_NONE}, COLOR_MODE_NONE, COLOR_MODE_COUNT-1, .flags=FLAGS, .unit="color_mode" },
93 { "bt709", "set BT.709 colorspace", 0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_BT709}, .flags=FLAGS, .unit="color_mode" },
94 { "fcc", "set FCC colorspace ", 0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_FCC}, .flags=FLAGS, .unit="color_mode" },
95 { "bt601", "set BT.601 colorspace", 0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_BT601}, .flags=FLAGS, .unit="color_mode" },
96 { "bt470", "set BT.470 colorspace", 0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_BT601}, .flags=FLAGS, .unit="color_mode" },
97 { "smpte170m", "set SMTPE-170M colorspace", 0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_BT601}, .flags=FLAGS, .unit="color_mode" },
98 { "smpte240m", "set SMPTE-240M colorspace", 0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_SMPTE240M}, .flags=FLAGS, .unit="color_mode" },
102 AVFILTER_DEFINE_CLASS(colormatrix);
124 static void inverse3x3(double im[3][3], const double m[3][3])
126 double det = ma * (me * mi - mf * mh) - mb * (md * mi - mf * mg) + mc * (md * mh - me * mg);
128 ima = det * (me * mi - mf * mh);
129 imb = det * (mc * mh - mb * mi);
130 imc = det * (mb * mf - mc * me);
131 imd = det * (mf * mg - md * mi);
132 ime = det * (ma * mi - mc * mg);
133 imf = det * (mc * md - ma * mf);
134 img = det * (md * mh - me * mg);
135 imh = det * (mb * mg - ma * mh);
136 imi = det * (ma * me - mb * md);
139 static void solve_coefficients(double cm[3][3], double rgb[3][3], const double yuv[3][3])
142 for (i = 0; i < 3; i++)
143 for (j = 0; j < 3; j++)
144 cm[i][j] = yuv[i][0] * rgb[0][j] + yuv[i][1] * rgb[1][j] + yuv[i][2] * rgb[2][j];
147 static void calc_coefficients(AVFilterContext *ctx)
149 ColorMatrixContext *color = ctx->priv;
150 double rgb_coeffd[4][3][3];
151 double yuv_convertd[16][3][3];
155 for (i = 0; i < 4; i++)
156 inverse3x3(rgb_coeffd[i], yuv_coeff[i]);
157 for (i = 0; i < 4; i++) {
158 for (j = 0; j < 4; j++) {
159 solve_coefficients(yuv_convertd[v], rgb_coeffd[i], yuv_coeff[j]);
160 for (k = 0; k < 3; k++) {
161 color->yuv_convert[v][k][0] = NS(yuv_convertd[v][k][0]);
162 color->yuv_convert[v][k][1] = NS(yuv_convertd[v][k][1]);
163 color->yuv_convert[v][k][2] = NS(yuv_convertd[v][k][2]);
165 if (color->yuv_convert[v][0][0] != 65536 || color->yuv_convert[v][1][0] != 0 ||
166 color->yuv_convert[v][2][0] != 0) {
167 av_log(ctx, AV_LOG_ERROR, "error calculating conversion coefficients\n");
174 static const char * const color_modes[] = {"bt709", "fcc", "bt601", "smpte240m"};
176 static av_cold int init(AVFilterContext *ctx)
178 ColorMatrixContext *color = ctx->priv;
180 if (color->dest == COLOR_MODE_NONE) {
181 av_log(ctx, AV_LOG_ERROR, "Unspecified destination color space\n");
182 return AVERROR(EINVAL);
185 if (color->source == color->dest) {
186 av_log(ctx, AV_LOG_ERROR, "Source and destination color space must not be identical\n");
187 return AVERROR(EINVAL);
190 calc_coefficients(ctx);
195 static int process_slice_uyvy422(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
197 const ThreadData *td = arg;
198 const AVFrame *src = td->src;
199 AVFrame *dst = td->dst;
200 const int height = src->height;
201 const int width = src->width*2;
202 const int src_pitch = src->linesize[0];
203 const int dst_pitch = dst->linesize[0];
204 const int slice_start = (height * jobnr ) / nb_jobs;
205 const int slice_end = (height * (jobnr+1)) / nb_jobs;
206 const unsigned char *srcp = src->data[0] + slice_start * src_pitch;
207 unsigned char *dstp = dst->data[0] + slice_start * dst_pitch;
208 const int c2 = td->c2;
209 const int c3 = td->c3;
210 const int c4 = td->c4;
211 const int c5 = td->c5;
212 const int c6 = td->c6;
213 const int c7 = td->c7;
216 for (y = slice_start; y < slice_end; y++) {
217 for (x = 0; x < width; x += 4) {
218 const int u = srcp[x + 0] - 128;
219 const int v = srcp[x + 2] - 128;
220 const int uvval = c2 * u + c3 * v + 1081344;
221 dstp[x + 0] = CB((c4 * u + c5 * v + 8421376) >> 16);
222 dstp[x + 1] = CB((65536 * (srcp[x + 1] - 16) + uvval) >> 16);
223 dstp[x + 2] = CB((c6 * u + c7 * v + 8421376) >> 16);
224 dstp[x + 3] = CB((65536 * (srcp[x + 3] - 16) + uvval) >> 16);
233 static int process_slice_yuv422p(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
235 const ThreadData *td = arg;
236 const AVFrame *src = td->src;
237 AVFrame *dst = td->dst;
238 const int height = src->height;
239 const int width = src->width;
240 const int slice_start = (height * jobnr ) / nb_jobs;
241 const int slice_end = (height * (jobnr+1)) / nb_jobs;
242 const int src_pitchY = src->linesize[0];
243 const int src_pitchUV = src->linesize[1];
244 const unsigned char *srcpU = src->data[1] + slice_start * src_pitchUV;
245 const unsigned char *srcpV = src->data[2] + slice_start * src_pitchUV;
246 const unsigned char *srcpY = src->data[0] + slice_start * src_pitchY;
247 const int dst_pitchY = dst->linesize[0];
248 const int dst_pitchUV = dst->linesize[1];
249 unsigned char *dstpU = dst->data[1] + slice_start * dst_pitchUV;
250 unsigned char *dstpV = dst->data[2] + slice_start * dst_pitchUV;
251 unsigned char *dstpY = dst->data[0] + slice_start * dst_pitchY;
252 const int c2 = td->c2;
253 const int c3 = td->c3;
254 const int c4 = td->c4;
255 const int c5 = td->c5;
256 const int c6 = td->c6;
257 const int c7 = td->c7;
260 for (y = slice_start; y < slice_end; y++) {
261 for (x = 0; x < width; x += 2) {
262 const int u = srcpU[x >> 1] - 128;
263 const int v = srcpV[x >> 1] - 128;
264 const int uvval = c2 * u + c3 * v + 1081344;
265 dstpY[x + 0] = CB((65536 * (srcpY[x + 0] - 16) + uvval) >> 16);
266 dstpY[x + 1] = CB((65536 * (srcpY[x + 1] - 16) + uvval) >> 16);
267 dstpU[x >> 1] = CB((c4 * u + c5 * v + 8421376) >> 16);
268 dstpV[x >> 1] = CB((c6 * u + c7 * v + 8421376) >> 16);
272 srcpU += src_pitchUV;
273 srcpV += src_pitchUV;
274 dstpU += dst_pitchUV;
275 dstpV += dst_pitchUV;
281 static int process_slice_yuv420p(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
283 const ThreadData *td = arg;
284 const AVFrame *src = td->src;
285 AVFrame *dst = td->dst;
286 const int height = FFALIGN(src->height, 2) >> 1;
287 const int width = src->width;
288 const int slice_start = ((height * jobnr ) / nb_jobs) << 1;
289 const int slice_end = ((height * (jobnr+1)) / nb_jobs) << 1;
290 const int src_pitchY = src->linesize[0];
291 const int src_pitchUV = src->linesize[1];
292 const int dst_pitchY = dst->linesize[0];
293 const int dst_pitchUV = dst->linesize[1];
294 const unsigned char *srcpY = src->data[0] + src_pitchY * slice_start;
295 const unsigned char *srcpU = src->data[1] + src_pitchUV * (slice_start >> 1);
296 const unsigned char *srcpV = src->data[2] + src_pitchUV * (slice_start >> 1);
297 const unsigned char *srcpN = src->data[0] + src_pitchY * (slice_start + 1);
298 unsigned char *dstpU = dst->data[1] + dst_pitchUV * (slice_start >> 1);
299 unsigned char *dstpV = dst->data[2] + dst_pitchUV * (slice_start >> 1);
300 unsigned char *dstpY = dst->data[0] + dst_pitchY * slice_start;
301 unsigned char *dstpN = dst->data[0] + dst_pitchY * (slice_start + 1);
302 const int c2 = td->c2;
303 const int c3 = td->c3;
304 const int c4 = td->c4;
305 const int c5 = td->c5;
306 const int c6 = td->c6;
307 const int c7 = td->c7;
310 for (y = slice_start; y < slice_end; y += 2) {
311 for (x = 0; x < width; x += 2) {
312 const int u = srcpU[x >> 1] - 128;
313 const int v = srcpV[x >> 1] - 128;
314 const int uvval = c2 * u + c3 * v + 1081344;
315 dstpY[x + 0] = CB((65536 * (srcpY[x + 0] - 16) + uvval) >> 16);
316 dstpY[x + 1] = CB((65536 * (srcpY[x + 1] - 16) + uvval) >> 16);
317 dstpN[x + 0] = CB((65536 * (srcpN[x + 0] - 16) + uvval) >> 16);
318 dstpN[x + 1] = CB((65536 * (srcpN[x + 1] - 16) + uvval) >> 16);
319 dstpU[x >> 1] = CB((c4 * u + c5 * v + 8421376) >> 16);
320 dstpV[x >> 1] = CB((c6 * u + c7 * v + 8421376) >> 16);
322 srcpY += src_pitchY << 1;
323 dstpY += dst_pitchY << 1;
324 srcpN += src_pitchY << 1;
325 dstpN += dst_pitchY << 1;
326 srcpU += src_pitchUV;
327 srcpV += src_pitchUV;
328 dstpU += dst_pitchUV;
329 dstpV += dst_pitchUV;
335 static int config_input(AVFilterLink *inlink)
337 AVFilterContext *ctx = inlink->dst;
338 ColorMatrixContext *color = ctx->priv;
339 const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
341 color->hsub = pix_desc->log2_chroma_w;
342 color->vsub = pix_desc->log2_chroma_h;
344 av_log(ctx, AV_LOG_VERBOSE, "%s -> %s\n",
345 color_modes[color->source], color_modes[color->dest]);
350 static int query_formats(AVFilterContext *ctx)
352 static const enum AVPixelFormat pix_fmts[] = {
358 AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
360 return AVERROR(ENOMEM);
361 return ff_set_common_formats(ctx, fmts_list);
364 static int filter_frame(AVFilterLink *link, AVFrame *in)
366 AVFilterContext *ctx = link->dst;
367 ColorMatrixContext *color = ctx->priv;
368 AVFilterLink *outlink = ctx->outputs[0];
372 out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
375 return AVERROR(ENOMEM);
377 av_frame_copy_props(out, in);
379 if (color->source == COLOR_MODE_NONE) {
380 enum AVColorSpace cs = av_frame_get_colorspace(in);
381 enum ColorMode source;
384 case AVCOL_SPC_BT709 : source = COLOR_MODE_BT709 ; break;
385 case AVCOL_SPC_FCC : source = COLOR_MODE_FCC ; break;
386 case AVCOL_SPC_SMPTE240M : source = COLOR_MODE_SMPTE240M ; break;
387 case AVCOL_SPC_BT470BG : source = COLOR_MODE_BT601 ; break;
388 case AVCOL_SPC_SMPTE170M : source = COLOR_MODE_BT601 ; break;
390 av_log(ctx, AV_LOG_ERROR, "Input frame does not specify a supported colorspace, and none has been specified as source either\n");
392 return AVERROR(EINVAL);
394 color->mode = source * 4 + color->dest;
396 color->mode = color->source * 4 + color->dest;
398 switch(color->dest) {
399 case COLOR_MODE_BT709 : av_frame_set_colorspace(out, AVCOL_SPC_BT709) ; break;
400 case COLOR_MODE_FCC : av_frame_set_colorspace(out, AVCOL_SPC_FCC) ; break;
401 case COLOR_MODE_SMPTE240M: av_frame_set_colorspace(out, AVCOL_SPC_SMPTE240M); break;
402 case COLOR_MODE_BT601 : av_frame_set_colorspace(out, AVCOL_SPC_BT470BG) ; break;
407 td.c2 = color->yuv_convert[color->mode][0][1];
408 td.c3 = color->yuv_convert[color->mode][0][2];
409 td.c4 = color->yuv_convert[color->mode][1][1];
410 td.c5 = color->yuv_convert[color->mode][1][2];
411 td.c6 = color->yuv_convert[color->mode][2][1];
412 td.c7 = color->yuv_convert[color->mode][2][2];
414 if (in->format == AV_PIX_FMT_YUV422P)
415 ctx->internal->execute(ctx, process_slice_yuv422p, &td, NULL,
416 FFMIN(in->height, ctx->graph->nb_threads));
417 else if (in->format == AV_PIX_FMT_YUV420P)
418 ctx->internal->execute(ctx, process_slice_yuv420p, &td, NULL,
419 FFMIN(in->height / 2, ctx->graph->nb_threads));
421 ctx->internal->execute(ctx, process_slice_uyvy422, &td, NULL,
422 FFMIN(in->height, ctx->graph->nb_threads));
425 return ff_filter_frame(outlink, out);
428 static const AVFilterPad colormatrix_inputs[] = {
431 .type = AVMEDIA_TYPE_VIDEO,
432 .config_props = config_input,
433 .filter_frame = filter_frame,
438 static const AVFilterPad colormatrix_outputs[] = {
441 .type = AVMEDIA_TYPE_VIDEO,
446 AVFilter ff_vf_colormatrix = {
447 .name = "colormatrix",
448 .description = NULL_IF_CONFIG_SMALL("Convert color matrix."),
449 .priv_size = sizeof(ColorMatrixContext),
451 .query_formats = query_formats,
452 .inputs = colormatrix_inputs,
453 .outputs = colormatrix_outputs,
454 .priv_class = &colormatrix_class,
455 .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS,