]> git.sesse.net Git - ffmpeg/blob - libavfilter/vf_libvmaf.c
Merge commit 'a957e9379d11f2982d615f92c30580a57ea8bb40'
[ffmpeg] / libavfilter / vf_libvmaf.c
1 /*
2  * Copyright (c) 2017 Ronald S. Bultje <rsbultje@gmail.com>
3  * Copyright (c) 2017 Ashish Pratap Singh <ashk43712@gmail.com>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 /**
23  * @file
24  * Calculate the VMAF between two input videos.
25  */
26
27 #include <pthread.h>
28 #include <libvmaf.h>
29 #include "libavutil/avstring.h"
30 #include "libavutil/opt.h"
31 #include "libavutil/pixdesc.h"
32 #include "avfilter.h"
33 #include "drawutils.h"
34 #include "formats.h"
35 #include "framesync.h"
36 #include "internal.h"
37 #include "video.h"
38
39 typedef struct LIBVMAFContext {
40     const AVClass *class;
41     FFFrameSync fs;
42     const AVPixFmtDescriptor *desc;
43     int width;
44     int height;
45     double vmaf_score;
46     int vmaf_thread_created;
47     pthread_t vmaf_thread;
48     pthread_mutex_t lock;
49     pthread_cond_t cond;
50     int eof;
51     AVFrame *gmain;
52     AVFrame *gref;
53     int frame_set;
54     char *model_path;
55     char *log_path;
56     char *log_fmt;
57     int disable_clip;
58     int disable_avx;
59     int enable_transform;
60     int phone_model;
61     int psnr;
62     int ssim;
63     int ms_ssim;
64     char *pool;
65     int n_threads;
66     int n_subsample;
67     int enable_conf_interval;
68     int error;
69 } LIBVMAFContext;
70
71 #define OFFSET(x) offsetof(LIBVMAFContext, x)
72 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
73
74 static const AVOption libvmaf_options[] = {
75     {"model_path",  "Set the model to be used for computing vmaf.",                     OFFSET(model_path), AV_OPT_TYPE_STRING, {.str="/usr/local/share/model/vmaf_v0.6.1.pkl"}, 0, 1, FLAGS},
76     {"log_path",  "Set the file path to be used to store logs.",                        OFFSET(log_path), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS},
77     {"log_fmt",  "Set the format of the log (xml or json).",                            OFFSET(log_fmt), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS},
78     {"enable_transform",  "Enables transform for computing vmaf.",                      OFFSET(enable_transform), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
79     {"phone_model",  "Invokes the phone model that will generate higher VMAF scores.",  OFFSET(phone_model), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
80     {"psnr",  "Enables computing psnr along with vmaf.",                                OFFSET(psnr), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
81     {"ssim",  "Enables computing ssim along with vmaf.",                                OFFSET(ssim), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
82     {"ms_ssim",  "Enables computing ms-ssim along with vmaf.",                          OFFSET(ms_ssim), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
83     {"pool",  "Set the pool method to be used for computing vmaf.",                     OFFSET(pool), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS},
84     {"n_threads", "Set number of threads to be used when computing vmaf.",              OFFSET(n_threads), AV_OPT_TYPE_INT, {.i64=0}, 0, UINT_MAX, FLAGS},
85     {"n_subsample", "Set interval for frame subsampling used when computing vmaf.",     OFFSET(n_subsample), AV_OPT_TYPE_INT, {.i64=1}, 1, UINT_MAX, FLAGS},
86     {"enable_conf_interval",  "Enables confidence interval.",                           OFFSET(enable_conf_interval), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
87     { NULL }
88 };
89
90 FRAMESYNC_DEFINE_CLASS(libvmaf, LIBVMAFContext, fs);
91
92 #define read_frame_fn(type, bits)                                               \
93     static int read_frame_##bits##bit(float *ref_data, float *main_data,        \
94                                       float *temp_data, int stride, void *ctx)  \
95 {                                                                               \
96     LIBVMAFContext *s = (LIBVMAFContext *) ctx;                                 \
97     int ret;                                                                    \
98     \
99     pthread_mutex_lock(&s->lock);                                               \
100     \
101     while (!s->frame_set && !s->eof) {                                          \
102         pthread_cond_wait(&s->cond, &s->lock);                                  \
103     }                                                                           \
104     \
105     if (s->frame_set) {                                                         \
106         int ref_stride = s->gref->linesize[0];                                  \
107         int main_stride = s->gmain->linesize[0];                                \
108         \
109         const type *ref_ptr = (const type *) s->gref->data[0];                  \
110         const type *main_ptr = (const type *) s->gmain->data[0];                \
111         \
112         float *ptr = ref_data;                                                  \
113         \
114         int h = s->height;                                                      \
115         int w = s->width;                                                       \
116         \
117         int i,j;                                                                \
118         \
119         for (i = 0; i < h; i++) {                                               \
120             for ( j = 0; j < w; j++) {                                          \
121                 ptr[j] = (float)ref_ptr[j];                                     \
122             }                                                                   \
123             ref_ptr += ref_stride / sizeof(*ref_ptr);                           \
124             ptr += stride / sizeof(*ptr);                                       \
125         }                                                                       \
126         \
127         ptr = main_data;                                                        \
128         \
129         for (i = 0; i < h; i++) {                                               \
130             for (j = 0; j < w; j++) {                                           \
131                 ptr[j] = (float)main_ptr[j];                                    \
132             }                                                                   \
133             main_ptr += main_stride / sizeof(*main_ptr);                        \
134             ptr += stride / sizeof(*ptr);                                       \
135         }                                                                       \
136     }                                                                           \
137     \
138     ret = !s->frame_set;                                                        \
139     \
140     av_frame_unref(s->gref);                                                    \
141     av_frame_unref(s->gmain);                                                   \
142     s->frame_set = 0;                                                           \
143     \
144     pthread_cond_signal(&s->cond);                                              \
145     pthread_mutex_unlock(&s->lock);                                             \
146     \
147     if (ret) {                                                                  \
148         return 2;                                                               \
149     }                                                                           \
150     \
151     return 0;                                                                   \
152 }
153
154 read_frame_fn(uint8_t, 8);
155 read_frame_fn(uint16_t, 10);
156
157 static void compute_vmaf_score(LIBVMAFContext *s)
158 {
159     int (*read_frame)(float *ref_data, float *main_data, float *temp_data,
160                       int stride, void *ctx);
161     char *format;
162
163     if (s->desc->comp[0].depth <= 8) {
164         read_frame = read_frame_8bit;
165     } else {
166         read_frame = read_frame_10bit;
167     }
168
169     format = (char *) s->desc->name;
170
171     s->error = compute_vmaf(&s->vmaf_score, format, s->width, s->height,
172                             read_frame, s, s->model_path, s->log_path,
173                             s->log_fmt, 0, 0, s->enable_transform,
174                             s->phone_model, s->psnr, s->ssim,
175                             s->ms_ssim, s->pool,
176                             s->n_threads, s->n_subsample, s->enable_conf_interval);
177 }
178
179 static void *call_vmaf(void *ctx)
180 {
181     LIBVMAFContext *s = (LIBVMAFContext *) ctx;
182     compute_vmaf_score(s);
183     if (!s->error) {
184         av_log(ctx, AV_LOG_INFO, "VMAF score: %f\n",s->vmaf_score);
185     } else {
186         pthread_mutex_lock(&s->lock);
187         pthread_cond_signal(&s->cond);
188         pthread_mutex_unlock(&s->lock);
189     }
190     pthread_exit(NULL);
191     return NULL;
192 }
193
194 static int do_vmaf(FFFrameSync *fs)
195 {
196     AVFilterContext *ctx = fs->parent;
197     LIBVMAFContext *s = ctx->priv;
198     AVFrame *master, *ref;
199     int ret;
200
201     ret = ff_framesync_dualinput_get(fs, &master, &ref);
202     if (ret < 0)
203         return ret;
204     if (!ref)
205         return ff_filter_frame(ctx->outputs[0], master);
206
207     pthread_mutex_lock(&s->lock);
208
209     while (s->frame_set && !s->error) {
210         pthread_cond_wait(&s->cond, &s->lock);
211     }
212
213     if (s->error) {
214         av_log(ctx, AV_LOG_ERROR,
215                "libvmaf encountered an error, check log for details\n");
216         pthread_mutex_unlock(&s->lock);
217         return AVERROR(EINVAL);
218     }
219
220     av_frame_ref(s->gref, ref);
221     av_frame_ref(s->gmain, master);
222
223     s->frame_set = 1;
224
225     pthread_cond_signal(&s->cond);
226     pthread_mutex_unlock(&s->lock);
227
228     return ff_filter_frame(ctx->outputs[0], master);
229 }
230
231 static av_cold int init(AVFilterContext *ctx)
232 {
233     LIBVMAFContext *s = ctx->priv;
234
235     s->gref = av_frame_alloc();
236     s->gmain = av_frame_alloc();
237     s->error = 0;
238
239     s->vmaf_thread_created = 0;
240     pthread_mutex_init(&s->lock, NULL);
241     pthread_cond_init (&s->cond, NULL);
242
243     s->fs.on_event = do_vmaf;
244     return 0;
245 }
246
247 static int query_formats(AVFilterContext *ctx)
248 {
249     static const enum AVPixelFormat pix_fmts[] = {
250         AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
251         AV_PIX_FMT_YUV444P10LE, AV_PIX_FMT_YUV422P10LE, AV_PIX_FMT_YUV420P10LE,
252         AV_PIX_FMT_NONE
253     };
254
255     AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
256     if (!fmts_list)
257         return AVERROR(ENOMEM);
258     return ff_set_common_formats(ctx, fmts_list);
259 }
260
261
262 static int config_input_ref(AVFilterLink *inlink)
263 {
264     AVFilterContext *ctx  = inlink->dst;
265     LIBVMAFContext *s = ctx->priv;
266     int th;
267
268     if (ctx->inputs[0]->w != ctx->inputs[1]->w ||
269         ctx->inputs[0]->h != ctx->inputs[1]->h) {
270         av_log(ctx, AV_LOG_ERROR, "Width and height of input videos must be same.\n");
271         return AVERROR(EINVAL);
272     }
273     if (ctx->inputs[0]->format != ctx->inputs[1]->format) {
274         av_log(ctx, AV_LOG_ERROR, "Inputs must be of same pixel format.\n");
275         return AVERROR(EINVAL);
276     }
277
278     s->desc = av_pix_fmt_desc_get(inlink->format);
279     s->width = ctx->inputs[0]->w;
280     s->height = ctx->inputs[0]->h;
281
282     th = pthread_create(&s->vmaf_thread, NULL, call_vmaf, (void *) s);
283     if (th) {
284         av_log(ctx, AV_LOG_ERROR, "Thread creation failed.\n");
285         return AVERROR(EINVAL);
286     }
287     s->vmaf_thread_created = 1;
288
289     return 0;
290 }
291
292 static int config_output(AVFilterLink *outlink)
293 {
294     AVFilterContext *ctx = outlink->src;
295     LIBVMAFContext *s = ctx->priv;
296     AVFilterLink *mainlink = ctx->inputs[0];
297     int ret;
298
299     ret = ff_framesync_init_dualinput(&s->fs, ctx);
300     if (ret < 0)
301         return ret;
302     outlink->w = mainlink->w;
303     outlink->h = mainlink->h;
304     outlink->time_base = mainlink->time_base;
305     outlink->sample_aspect_ratio = mainlink->sample_aspect_ratio;
306     outlink->frame_rate = mainlink->frame_rate;
307     if ((ret = ff_framesync_configure(&s->fs)) < 0)
308         return ret;
309
310     return 0;
311 }
312
313 static int activate(AVFilterContext *ctx)
314 {
315     LIBVMAFContext *s = ctx->priv;
316     return ff_framesync_activate(&s->fs);
317 }
318
319 static av_cold void uninit(AVFilterContext *ctx)
320 {
321     LIBVMAFContext *s = ctx->priv;
322
323     ff_framesync_uninit(&s->fs);
324
325     pthread_mutex_lock(&s->lock);
326     s->eof = 1;
327     pthread_cond_signal(&s->cond);
328     pthread_mutex_unlock(&s->lock);
329
330     if (s->vmaf_thread_created)
331     {
332         pthread_join(s->vmaf_thread, NULL);
333         s->vmaf_thread_created = 0;
334     }
335
336     av_frame_free(&s->gref);
337     av_frame_free(&s->gmain);
338
339     pthread_mutex_destroy(&s->lock);
340     pthread_cond_destroy(&s->cond);
341 }
342
343 static const AVFilterPad libvmaf_inputs[] = {
344     {
345         .name         = "main",
346         .type         = AVMEDIA_TYPE_VIDEO,
347     },{
348         .name         = "reference",
349         .type         = AVMEDIA_TYPE_VIDEO,
350         .config_props = config_input_ref,
351     },
352     { NULL }
353 };
354
355 static const AVFilterPad libvmaf_outputs[] = {
356     {
357         .name          = "default",
358         .type          = AVMEDIA_TYPE_VIDEO,
359         .config_props  = config_output,
360     },
361     { NULL }
362 };
363
364 AVFilter ff_vf_libvmaf = {
365     .name          = "libvmaf",
366     .description   = NULL_IF_CONFIG_SMALL("Calculate the VMAF between two video streams."),
367     .preinit       = libvmaf_framesync_preinit,
368     .init          = init,
369     .uninit        = uninit,
370     .query_formats = query_formats,
371     .activate      = activate,
372     .priv_size     = sizeof(LIBVMAFContext),
373     .priv_class    = &libvmaf_class,
374     .inputs        = libvmaf_inputs,
375     .outputs       = libvmaf_outputs,
376 };