2 * Copyright (c) 2017 Ronald S. Bultje <rsbultje@gmail.com>
3 * Copyright (c) 2017 Ashish Pratap Singh <ashk43712@gmail.com>
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 * Calculate the VMAF between two input videos.
30 #include "libavutil/avstring.h"
31 #include "libavutil/opt.h"
32 #include "libavutil/pixdesc.h"
34 #include "dualinput.h"
35 #include "drawutils.h"
40 typedef struct LIBVMAFContext {
42 FFDualInputContext dinput;
43 const AVPixFmtDescriptor *desc;
48 pthread_t vmaf_thread;
68 #define OFFSET(x) offsetof(LIBVMAFContext, x)
69 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
71 static const AVOption libvmaf_options[] = {
72 {"model_path", "Set the model to be used for computing vmaf.", OFFSET(model_path), AV_OPT_TYPE_STRING, {.str="/usr/local/share/model/vmaf_v0.6.1.pkl"}, 0, 1, FLAGS},
73 {"log_path", "Set the file path to be used to store logs.", OFFSET(log_path), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS},
74 {"log_fmt", "Set the format of the log (xml or json).", OFFSET(log_fmt), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS},
75 {"enable_transform", "Enables transform for computing vmaf.", OFFSET(enable_transform), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
76 {"phone_model", "Invokes the phone model that will generate higher VMAF scores.", OFFSET(phone_model), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
77 {"psnr", "Enables computing psnr along with vmaf.", OFFSET(psnr), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
78 {"ssim", "Enables computing ssim along with vmaf.", OFFSET(ssim), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
79 {"ms_ssim", "Enables computing ms-ssim along with vmaf.", OFFSET(ms_ssim), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
80 {"pool", "Set the pool method to be used for computing vmaf.", OFFSET(pool), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS},
84 AVFILTER_DEFINE_CLASS(libvmaf);
86 #define read_frame_fn(type, bits) \
87 static int read_frame_##bits##bit(float *ref_data, float *main_data, \
88 float *temp_data, int stride, \
89 double *score, void *ctx) \
91 LIBVMAFContext *s = (LIBVMAFContext *) ctx; \
94 pthread_mutex_lock(&s->lock); \
96 while (!s->frame_set && !s->eof) { \
97 pthread_cond_wait(&s->cond, &s->lock); \
100 if (s->frame_set) { \
101 int ref_stride = s->gref->linesize[0]; \
102 int main_stride = s->gmain->linesize[0]; \
104 const type *ref_ptr = (const type *) s->gref->data[0]; \
105 const type *main_ptr = (const type *) s->gmain->data[0]; \
107 float *ptr = ref_data; \
114 for (i = 0; i < h; i++) { \
115 for ( j = 0; j < w; j++) { \
116 ptr[j] = (float)ref_ptr[j]; \
118 ref_ptr += ref_stride / sizeof(*ref_ptr); \
119 ptr += stride / sizeof(*ptr); \
124 for (i = 0; i < h; i++) { \
125 for (j = 0; j < w; j++) { \
126 ptr[j] = (float)main_ptr[j]; \
128 main_ptr += main_stride / sizeof(*main_ptr); \
129 ptr += stride / sizeof(*ptr); \
133 ret = !s->frame_set; \
137 pthread_cond_signal(&s->cond); \
138 pthread_mutex_unlock(&s->lock); \
147 read_frame_fn(uint8_t, 8);
148 read_frame_fn(uint16_t, 10);
150 static void compute_vmaf_score(LIBVMAFContext *s)
152 int (*read_frame)(float *ref_data, float *main_data, float *temp_data,
153 int stride, double *score, void *ctx);
155 if (s->desc->comp[0].depth <= 8) {
156 read_frame = read_frame_8bit;
158 read_frame = read_frame_10bit;
161 s->vmaf_score = compute_vmaf(s->format, s->width, s->height, read_frame, s,
162 s->model_path, s->log_path, s->log_fmt, 0, 0,
163 s->enable_transform, s->phone_model, s->psnr,
164 s->ssim, s->ms_ssim, s->pool);
167 static void *call_vmaf(void *ctx)
169 LIBVMAFContext *s = (LIBVMAFContext *) ctx;
170 compute_vmaf_score(s);
171 av_log(ctx, AV_LOG_INFO, "VMAF score: %f\n",s->vmaf_score);
175 static AVFrame *do_vmaf(AVFilterContext *ctx, AVFrame *main, const AVFrame *ref)
177 LIBVMAFContext *s = ctx->priv;
179 pthread_mutex_lock(&s->lock);
181 while (s->frame_set != 0) {
182 pthread_cond_wait(&s->cond, &s->lock);
185 av_frame_ref(s->gref, ref);
186 av_frame_ref(s->gmain, main);
190 pthread_cond_signal(&s->cond);
191 pthread_mutex_unlock(&s->lock);
196 static av_cold int init(AVFilterContext *ctx)
198 LIBVMAFContext *s = ctx->priv;
200 s->gref = av_frame_alloc();
201 s->gmain = av_frame_alloc();
203 pthread_mutex_init(&s->lock, NULL);
204 pthread_cond_init (&s->cond, NULL);
206 s->dinput.process = do_vmaf;
210 static int query_formats(AVFilterContext *ctx)
212 static const enum AVPixelFormat pix_fmts[] = {
213 AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
214 AV_PIX_FMT_YUV444P10LE, AV_PIX_FMT_YUV422P10LE, AV_PIX_FMT_YUV420P10LE,
218 AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
220 return AVERROR(ENOMEM);
221 return ff_set_common_formats(ctx, fmts_list);
225 static int config_input_ref(AVFilterLink *inlink)
227 AVFilterContext *ctx = inlink->dst;
228 LIBVMAFContext *s = ctx->priv;
231 if (ctx->inputs[0]->w != ctx->inputs[1]->w ||
232 ctx->inputs[0]->h != ctx->inputs[1]->h) {
233 av_log(ctx, AV_LOG_ERROR, "Width and height of input videos must be same.\n");
234 return AVERROR(EINVAL);
236 if (ctx->inputs[0]->format != ctx->inputs[1]->format) {
237 av_log(ctx, AV_LOG_ERROR, "Inputs must be of same pixel format.\n");
238 return AVERROR(EINVAL);
241 s->desc = av_pix_fmt_desc_get(inlink->format);
242 s->width = ctx->inputs[0]->w;
243 s->height = ctx->inputs[0]->h;
245 th = pthread_create(&s->vmaf_thread, NULL, call_vmaf, (void *) s);
247 av_log(ctx, AV_LOG_ERROR, "Thread creation failed.\n");
248 return AVERROR(EINVAL);
255 static int config_output(AVFilterLink *outlink)
257 AVFilterContext *ctx = outlink->src;
258 LIBVMAFContext *s = ctx->priv;
259 AVFilterLink *mainlink = ctx->inputs[0];
262 outlink->w = mainlink->w;
263 outlink->h = mainlink->h;
264 outlink->time_base = mainlink->time_base;
265 outlink->sample_aspect_ratio = mainlink->sample_aspect_ratio;
266 outlink->frame_rate = mainlink->frame_rate;
267 if ((ret = ff_dualinput_init(ctx, &s->dinput)) < 0)
273 static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
275 LIBVMAFContext *s = inlink->dst->priv;
276 return ff_dualinput_filter_frame(&s->dinput, inlink, inpicref);
279 static int request_frame(AVFilterLink *outlink)
281 LIBVMAFContext *s = outlink->src->priv;
282 return ff_dualinput_request_frame(&s->dinput, outlink);
285 static av_cold void uninit(AVFilterContext *ctx)
287 LIBVMAFContext *s = ctx->priv;
289 ff_dualinput_uninit(&s->dinput);
291 pthread_mutex_lock(&s->lock);
293 pthread_cond_signal(&s->cond);
294 pthread_mutex_unlock(&s->lock);
296 pthread_join(s->vmaf_thread, NULL);
298 av_frame_free(&s->gref);
299 av_frame_free(&s->gmain);
301 pthread_mutex_destroy(&s->lock);
302 pthread_cond_destroy(&s->cond);
305 static const AVFilterPad libvmaf_inputs[] = {
308 .type = AVMEDIA_TYPE_VIDEO,
309 .filter_frame = filter_frame,
312 .type = AVMEDIA_TYPE_VIDEO,
313 .filter_frame = filter_frame,
314 .config_props = config_input_ref,
319 static const AVFilterPad libvmaf_outputs[] = {
322 .type = AVMEDIA_TYPE_VIDEO,
323 .config_props = config_output,
324 .request_frame = request_frame,
329 AVFilter ff_vf_libvmaf = {
331 .description = NULL_IF_CONFIG_SMALL("Calculate the VMAF between two video streams."),
334 .query_formats = query_formats,
335 .priv_size = sizeof(LIBVMAFContext),
336 .priv_class = &libvmaf_class,
337 .inputs = libvmaf_inputs,
338 .outputs = libvmaf_outputs,