2 * Copyright (c) 2019 Guo Yejun
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * implementing a generic image processing filter using deep learning networks.
26 #include "libavformat/avio.h"
27 #include "libavutil/opt.h"
28 #include "libavutil/pixdesc.h"
29 #include "libavutil/avassert.h"
30 #include "libavutil/imgutils.h"
32 #include "dnn_interface.h"
35 #include "libswscale/swscale.h"
36 #include "libavutil/time.h"
38 typedef struct DnnProcessingContext {
42 DNNBackendType backend_type;
43 char *model_inputname;
44 char *model_outputname;
45 char *backend_options;
48 DNNModule *dnn_module;
51 struct SwsContext *sws_uv_scale;
53 } DnnProcessingContext;
55 #define OFFSET(x) offsetof(DnnProcessingContext, x)
56 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM
57 static const AVOption dnn_processing_options[] = {
58 { "dnn_backend", "DNN backend", OFFSET(backend_type), AV_OPT_TYPE_INT, { .i64 = 0 }, INT_MIN, INT_MAX, FLAGS, "backend" },
59 { "native", "native backend flag", 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, 0, 0, FLAGS, "backend" },
60 #if (CONFIG_LIBTENSORFLOW == 1)
61 { "tensorflow", "tensorflow backend flag", 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, 0, 0, FLAGS, "backend" },
63 #if (CONFIG_LIBOPENVINO == 1)
64 { "openvino", "openvino backend flag", 0, AV_OPT_TYPE_CONST, { .i64 = 2 }, 0, 0, FLAGS, "backend" },
66 { "model", "path to model file", OFFSET(model_filename), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },
67 { "input", "input name of the model", OFFSET(model_inputname), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },
68 { "output", "output name of the model", OFFSET(model_outputname), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },
69 { "options", "backend options", OFFSET(backend_options), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },
70 { "async", "use DNN async inference", OFFSET(async), AV_OPT_TYPE_BOOL, { .i64 = 1}, 0, 1, FLAGS},
74 AVFILTER_DEFINE_CLASS(dnn_processing);
76 static av_cold int init(AVFilterContext *context)
78 DnnProcessingContext *ctx = context->priv;
80 if (!ctx->model_filename) {
81 av_log(ctx, AV_LOG_ERROR, "model file for network is not specified\n");
82 return AVERROR(EINVAL);
84 if (!ctx->model_inputname) {
85 av_log(ctx, AV_LOG_ERROR, "input name of the model network is not specified\n");
86 return AVERROR(EINVAL);
88 if (!ctx->model_outputname) {
89 av_log(ctx, AV_LOG_ERROR, "output name of the model network is not specified\n");
90 return AVERROR(EINVAL);
93 ctx->dnn_module = ff_get_dnn_module(ctx->backend_type);
94 if (!ctx->dnn_module) {
95 av_log(ctx, AV_LOG_ERROR, "could not create DNN module for requested backend\n");
96 return AVERROR(ENOMEM);
98 if (!ctx->dnn_module->load_model) {
99 av_log(ctx, AV_LOG_ERROR, "load_model for network is not specified\n");
100 return AVERROR(EINVAL);
103 ctx->model = (ctx->dnn_module->load_model)(ctx->model_filename, ctx->backend_options, context);
105 av_log(ctx, AV_LOG_ERROR, "could not load DNN model\n");
106 return AVERROR(EINVAL);
109 if (!ctx->dnn_module->execute_model_async && ctx->async) {
111 av_log(ctx, AV_LOG_WARNING, "this backend does not support async execution, roll back to sync.\n");
114 #if !HAVE_PTHREAD_CANCEL
117 av_log(ctx, AV_LOG_WARNING, "pthread is not supported, roll back to sync.\n");
124 static int query_formats(AVFilterContext *context)
126 static const enum AVPixelFormat pix_fmts[] = {
127 AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
128 AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAYF32,
129 AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P,
130 AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P,
134 AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
135 return ff_set_common_formats(context, fmts_list);
138 #define LOG_FORMAT_CHANNEL_MISMATCH() \
139 av_log(ctx, AV_LOG_ERROR, \
140 "the frame's format %s does not match " \
141 "the model input channel %d\n", \
142 av_get_pix_fmt_name(fmt), \
143 model_input->channels);
145 static int check_modelinput_inlink(const DNNData *model_input, const AVFilterLink *inlink)
147 AVFilterContext *ctx = inlink->dst;
148 enum AVPixelFormat fmt = inlink->format;
150 // the design is to add explicit scale filter before this filter
151 if (model_input->height != -1 && model_input->height != inlink->h) {
152 av_log(ctx, AV_LOG_ERROR, "the model requires frame height %d but got %d\n",
153 model_input->height, inlink->h);
156 if (model_input->width != -1 && model_input->width != inlink->w) {
157 av_log(ctx, AV_LOG_ERROR, "the model requires frame width %d but got %d\n",
158 model_input->width, inlink->w);
161 if (model_input->dt != DNN_FLOAT) {
162 avpriv_report_missing_feature(ctx, "data type rather than DNN_FLOAT");
167 case AV_PIX_FMT_RGB24:
168 case AV_PIX_FMT_BGR24:
169 if (model_input->channels != 3) {
170 LOG_FORMAT_CHANNEL_MISMATCH();
174 case AV_PIX_FMT_GRAYF32:
175 case AV_PIX_FMT_YUV420P:
176 case AV_PIX_FMT_YUV422P:
177 case AV_PIX_FMT_YUV444P:
178 case AV_PIX_FMT_YUV410P:
179 case AV_PIX_FMT_YUV411P:
180 case AV_PIX_FMT_NV12:
181 if (model_input->channels != 1) {
182 LOG_FORMAT_CHANNEL_MISMATCH();
187 avpriv_report_missing_feature(ctx, "%s", av_get_pix_fmt_name(fmt));
194 static int config_input(AVFilterLink *inlink)
196 AVFilterContext *context = inlink->dst;
197 DnnProcessingContext *ctx = context->priv;
198 DNNReturnType result;
202 result = ctx->model->get_input(ctx->model->model, &model_input, ctx->model_inputname);
203 if (result != DNN_SUCCESS) {
204 av_log(ctx, AV_LOG_ERROR, "could not get input from the model\n");
208 check = check_modelinput_inlink(&model_input, inlink);
216 static av_always_inline int isPlanarYUV(enum AVPixelFormat pix_fmt)
218 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
220 return !(desc->flags & AV_PIX_FMT_FLAG_RGB) && desc->nb_components == 3;
223 static int prepare_uv_scale(AVFilterLink *outlink)
225 AVFilterContext *context = outlink->src;
226 DnnProcessingContext *ctx = context->priv;
227 AVFilterLink *inlink = context->inputs[0];
228 enum AVPixelFormat fmt = inlink->format;
230 if (isPlanarYUV(fmt)) {
231 if (inlink->w != outlink->w || inlink->h != outlink->h) {
232 if (fmt == AV_PIX_FMT_NV12) {
233 ctx->sws_uv_scale = sws_getContext(inlink->w >> 1, inlink->h >> 1, AV_PIX_FMT_YA8,
234 outlink->w >> 1, outlink->h >> 1, AV_PIX_FMT_YA8,
235 SWS_BICUBIC, NULL, NULL, NULL);
236 ctx->sws_uv_height = inlink->h >> 1;
238 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt);
239 int sws_src_h = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
240 int sws_src_w = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
241 int sws_dst_h = AV_CEIL_RSHIFT(outlink->h, desc->log2_chroma_h);
242 int sws_dst_w = AV_CEIL_RSHIFT(outlink->w, desc->log2_chroma_w);
243 ctx->sws_uv_scale = sws_getContext(sws_src_w, sws_src_h, AV_PIX_FMT_GRAY8,
244 sws_dst_w, sws_dst_h, AV_PIX_FMT_GRAY8,
245 SWS_BICUBIC, NULL, NULL, NULL);
246 ctx->sws_uv_height = sws_src_h;
254 static int config_output(AVFilterLink *outlink)
256 AVFilterContext *context = outlink->src;
257 DnnProcessingContext *ctx = context->priv;
258 DNNReturnType result;
259 AVFilterLink *inlink = context->inputs[0];
261 // have a try run in case that the dnn model resize the frame
262 result = ctx->model->get_output(ctx->model->model, ctx->model_inputname, inlink->w, inlink->h,
263 ctx->model_outputname, &outlink->w, &outlink->h);
264 if (result != DNN_SUCCESS) {
265 av_log(ctx, AV_LOG_ERROR, "could not get output from the model\n");
269 prepare_uv_scale(outlink);
274 static int copy_uv_planes(DnnProcessingContext *ctx, AVFrame *out, const AVFrame *in)
276 const AVPixFmtDescriptor *desc;
279 if (!ctx->sws_uv_scale) {
280 av_assert0(in->height == out->height && in->width == out->width);
281 desc = av_pix_fmt_desc_get(in->format);
282 uv_height = AV_CEIL_RSHIFT(in->height, desc->log2_chroma_h);
283 for (int i = 1; i < 3; ++i) {
284 int bytewidth = av_image_get_linesize(in->format, in->width, i);
285 av_image_copy_plane(out->data[i], out->linesize[i],
286 in->data[i], in->linesize[i],
287 bytewidth, uv_height);
289 } else if (in->format == AV_PIX_FMT_NV12) {
290 sws_scale(ctx->sws_uv_scale, (const uint8_t **)(in->data + 1), in->linesize + 1,
291 0, ctx->sws_uv_height, out->data + 1, out->linesize + 1);
293 sws_scale(ctx->sws_uv_scale, (const uint8_t **)(in->data + 1), in->linesize + 1,
294 0, ctx->sws_uv_height, out->data + 1, out->linesize + 1);
295 sws_scale(ctx->sws_uv_scale, (const uint8_t **)(in->data + 2), in->linesize + 2,
296 0, ctx->sws_uv_height, out->data + 2, out->linesize + 2);
302 static int filter_frame(AVFilterLink *inlink, AVFrame *in)
304 AVFilterContext *context = inlink->dst;
305 AVFilterLink *outlink = context->outputs[0];
306 DnnProcessingContext *ctx = context->priv;
307 DNNReturnType dnn_result;
310 out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
313 return AVERROR(ENOMEM);
315 av_frame_copy_props(out, in);
317 dnn_result = (ctx->dnn_module->execute_model)(ctx->model, ctx->model_inputname, in,
318 (const char **)&ctx->model_outputname, 1, out);
319 if (dnn_result != DNN_SUCCESS){
320 av_log(ctx, AV_LOG_ERROR, "failed to execute model\n");
326 if (isPlanarYUV(in->format))
327 copy_uv_planes(ctx, out, in);
330 return ff_filter_frame(outlink, out);
333 static int activate_sync(AVFilterContext *filter_ctx)
335 AVFilterLink *inlink = filter_ctx->inputs[0];
336 AVFilterLink *outlink = filter_ctx->outputs[0];
342 FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
345 // drain all input frames
346 ret = ff_inlink_consume_frame(inlink, &in);
350 ret = filter_frame(inlink, in);
357 // if frame got, schedule to next filter
361 if (ff_inlink_acknowledge_status(inlink, &status, &pts)) {
362 if (status == AVERROR_EOF) {
363 ff_outlink_set_status(outlink, status, pts);
368 FF_FILTER_FORWARD_WANTED(outlink, inlink);
370 return FFERROR_NOT_READY;
373 static int flush_frame(AVFilterLink *outlink, int64_t pts, int64_t *out_pts)
375 DnnProcessingContext *ctx = outlink->src->priv;
377 DNNAsyncStatusType async_state;
379 ret = (ctx->dnn_module->flush)(ctx->model);
380 if (ret != DNN_SUCCESS) {
385 AVFrame *in_frame = NULL;
386 AVFrame *out_frame = NULL;
387 async_state = (ctx->dnn_module->get_async_result)(ctx->model, &in_frame, &out_frame);
389 if (isPlanarYUV(in_frame->format))
390 copy_uv_planes(ctx, out_frame, in_frame);
391 av_frame_free(&in_frame);
392 ret = ff_filter_frame(outlink, out_frame);
396 *out_pts = out_frame->pts + pts;
399 } while (async_state >= DAST_NOT_READY);
404 static int activate_async(AVFilterContext *filter_ctx)
406 AVFilterLink *inlink = filter_ctx->inputs[0];
407 AVFilterLink *outlink = filter_ctx->outputs[0];
408 DnnProcessingContext *ctx = (DnnProcessingContext *)filter_ctx->priv;
409 AVFrame *in = NULL, *out = NULL;
415 FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
418 // drain all input frames
419 ret = ff_inlink_consume_frame(inlink, &in);
423 out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
426 return AVERROR(ENOMEM);
428 av_frame_copy_props(out, in);
429 if ((ctx->dnn_module->execute_model_async)(ctx->model, ctx->model_inputname, in,
430 (const char **)&ctx->model_outputname, 1, out) != DNN_SUCCESS) {
436 // drain all processed frames
438 AVFrame *in_frame = NULL;
439 AVFrame *out_frame = NULL;
440 async_state = (ctx->dnn_module->get_async_result)(ctx->model, &in_frame, &out_frame);
442 if (isPlanarYUV(in_frame->format))
443 copy_uv_planes(ctx, out_frame, in_frame);
444 av_frame_free(&in_frame);
445 ret = ff_filter_frame(outlink, out_frame);
450 } while (async_state == DAST_SUCCESS);
452 // if frame got, schedule to next filter
456 if (ff_inlink_acknowledge_status(inlink, &status, &pts)) {
457 if (status == AVERROR_EOF) {
458 int64_t out_pts = pts;
459 ret = flush_frame(outlink, pts, &out_pts);
460 ff_outlink_set_status(outlink, status, out_pts);
465 FF_FILTER_FORWARD_WANTED(outlink, inlink);
470 static int activate(AVFilterContext *filter_ctx)
472 DnnProcessingContext *ctx = filter_ctx->priv;
475 return activate_async(filter_ctx);
477 return activate_sync(filter_ctx);
480 static av_cold void uninit(AVFilterContext *ctx)
482 DnnProcessingContext *context = ctx->priv;
484 sws_freeContext(context->sws_uv_scale);
486 if (context->dnn_module)
487 (context->dnn_module->free_model)(&context->model);
489 av_freep(&context->dnn_module);
492 static const AVFilterPad dnn_processing_inputs[] = {
495 .type = AVMEDIA_TYPE_VIDEO,
496 .config_props = config_input,
501 static const AVFilterPad dnn_processing_outputs[] = {
504 .type = AVMEDIA_TYPE_VIDEO,
505 .config_props = config_output,
510 AVFilter ff_vf_dnn_processing = {
511 .name = "dnn_processing",
512 .description = NULL_IF_CONFIG_SMALL("Apply DNN processing filter to the input."),
513 .priv_size = sizeof(DnnProcessingContext),
516 .query_formats = query_formats,
517 .inputs = dnn_processing_inputs,
518 .outputs = dnn_processing_outputs,
519 .priv_class = &dnn_processing_class,
520 .activate = activate,