2 * Copyright (c) 2019 Guo Yejun
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * implementing a generic image processing filter using deep learning networks.
26 #include "libavformat/avio.h"
27 #include "libavutil/opt.h"
28 #include "libavutil/pixdesc.h"
29 #include "libavutil/avassert.h"
30 #include "libavutil/imgutils.h"
32 #include "dnn_interface.h"
35 #include "libswscale/swscale.h"
37 typedef struct DnnProcessingContext {
41 DNNBackendType backend_type;
42 char *model_inputname;
43 char *model_outputname;
44 char *backend_options;
47 DNNModule *dnn_module;
50 struct SwsContext *sws_uv_scale;
52 } DnnProcessingContext;
54 #define OFFSET(x) offsetof(DnnProcessingContext, x)
55 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM
56 static const AVOption dnn_processing_options[] = {
57 { "dnn_backend", "DNN backend", OFFSET(backend_type), AV_OPT_TYPE_INT, { .i64 = 0 }, INT_MIN, INT_MAX, FLAGS, "backend" },
58 { "native", "native backend flag", 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, 0, 0, FLAGS, "backend" },
59 #if (CONFIG_LIBTENSORFLOW == 1)
60 { "tensorflow", "tensorflow backend flag", 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, 0, 0, FLAGS, "backend" },
62 #if (CONFIG_LIBOPENVINO == 1)
63 { "openvino", "openvino backend flag", 0, AV_OPT_TYPE_CONST, { .i64 = 2 }, 0, 0, FLAGS, "backend" },
65 { "model", "path to model file", OFFSET(model_filename), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },
66 { "input", "input name of the model", OFFSET(model_inputname), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },
67 { "output", "output name of the model", OFFSET(model_outputname), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },
68 { "options", "backend options", OFFSET(backend_options), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },
69 { "async", "use DNN async inference", OFFSET(async), AV_OPT_TYPE_BOOL, { .i64 = 1}, 0, 1, FLAGS},
73 AVFILTER_DEFINE_CLASS(dnn_processing);
75 static av_cold int init(AVFilterContext *context)
77 DnnProcessingContext *ctx = context->priv;
79 if (!ctx->model_filename) {
80 av_log(ctx, AV_LOG_ERROR, "model file for network is not specified\n");
81 return AVERROR(EINVAL);
83 if (!ctx->model_inputname) {
84 av_log(ctx, AV_LOG_ERROR, "input name of the model network is not specified\n");
85 return AVERROR(EINVAL);
87 if (!ctx->model_outputname) {
88 av_log(ctx, AV_LOG_ERROR, "output name of the model network is not specified\n");
89 return AVERROR(EINVAL);
92 ctx->dnn_module = ff_get_dnn_module(ctx->backend_type);
93 if (!ctx->dnn_module) {
94 av_log(ctx, AV_LOG_ERROR, "could not create DNN module for requested backend\n");
95 return AVERROR(ENOMEM);
97 if (!ctx->dnn_module->load_model) {
98 av_log(ctx, AV_LOG_ERROR, "load_model for network is not specified\n");
99 return AVERROR(EINVAL);
102 ctx->model = (ctx->dnn_module->load_model)(ctx->model_filename, ctx->backend_options, context);
104 av_log(ctx, AV_LOG_ERROR, "could not load DNN model\n");
105 return AVERROR(EINVAL);
108 if (!ctx->dnn_module->execute_model_async && ctx->async) {
110 av_log(ctx, AV_LOG_WARNING, "this backend does not support async execution, roll back to sync.\n");
116 static int query_formats(AVFilterContext *context)
118 static const enum AVPixelFormat pix_fmts[] = {
119 AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
120 AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAYF32,
121 AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P,
122 AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P,
126 AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
127 return ff_set_common_formats(context, fmts_list);
130 #define LOG_FORMAT_CHANNEL_MISMATCH() \
131 av_log(ctx, AV_LOG_ERROR, \
132 "the frame's format %s does not match " \
133 "the model input channel %d\n", \
134 av_get_pix_fmt_name(fmt), \
135 model_input->channels);
137 static int check_modelinput_inlink(const DNNData *model_input, const AVFilterLink *inlink)
139 AVFilterContext *ctx = inlink->dst;
140 enum AVPixelFormat fmt = inlink->format;
142 // the design is to add explicit scale filter before this filter
143 if (model_input->height != -1 && model_input->height != inlink->h) {
144 av_log(ctx, AV_LOG_ERROR, "the model requires frame height %d but got %d\n",
145 model_input->height, inlink->h);
148 if (model_input->width != -1 && model_input->width != inlink->w) {
149 av_log(ctx, AV_LOG_ERROR, "the model requires frame width %d but got %d\n",
150 model_input->width, inlink->w);
153 if (model_input->dt != DNN_FLOAT) {
154 av_log(ctx, AV_LOG_ERROR, "only support dnn models with input data type as float32.\n");
159 case AV_PIX_FMT_RGB24:
160 case AV_PIX_FMT_BGR24:
161 if (model_input->channels != 3) {
162 LOG_FORMAT_CHANNEL_MISMATCH();
166 case AV_PIX_FMT_GRAYF32:
167 case AV_PIX_FMT_YUV420P:
168 case AV_PIX_FMT_YUV422P:
169 case AV_PIX_FMT_YUV444P:
170 case AV_PIX_FMT_YUV410P:
171 case AV_PIX_FMT_YUV411P:
172 case AV_PIX_FMT_NV12:
173 if (model_input->channels != 1) {
174 LOG_FORMAT_CHANNEL_MISMATCH();
179 av_log(ctx, AV_LOG_ERROR, "%s not supported.\n", av_get_pix_fmt_name(fmt));
186 static int config_input(AVFilterLink *inlink)
188 AVFilterContext *context = inlink->dst;
189 DnnProcessingContext *ctx = context->priv;
190 DNNReturnType result;
194 result = ctx->model->get_input(ctx->model->model, &model_input, ctx->model_inputname);
195 if (result != DNN_SUCCESS) {
196 av_log(ctx, AV_LOG_ERROR, "could not get input from the model\n");
200 check = check_modelinput_inlink(&model_input, inlink);
208 static av_always_inline int isPlanarYUV(enum AVPixelFormat pix_fmt)
210 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
212 return !(desc->flags & AV_PIX_FMT_FLAG_RGB) && desc->nb_components == 3;
215 static int prepare_uv_scale(AVFilterLink *outlink)
217 AVFilterContext *context = outlink->src;
218 DnnProcessingContext *ctx = context->priv;
219 AVFilterLink *inlink = context->inputs[0];
220 enum AVPixelFormat fmt = inlink->format;
222 if (isPlanarYUV(fmt)) {
223 if (inlink->w != outlink->w || inlink->h != outlink->h) {
224 if (fmt == AV_PIX_FMT_NV12) {
225 ctx->sws_uv_scale = sws_getContext(inlink->w >> 1, inlink->h >> 1, AV_PIX_FMT_YA8,
226 outlink->w >> 1, outlink->h >> 1, AV_PIX_FMT_YA8,
227 SWS_BICUBIC, NULL, NULL, NULL);
228 ctx->sws_uv_height = inlink->h >> 1;
230 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt);
231 int sws_src_h = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
232 int sws_src_w = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
233 int sws_dst_h = AV_CEIL_RSHIFT(outlink->h, desc->log2_chroma_h);
234 int sws_dst_w = AV_CEIL_RSHIFT(outlink->w, desc->log2_chroma_w);
235 ctx->sws_uv_scale = sws_getContext(sws_src_w, sws_src_h, AV_PIX_FMT_GRAY8,
236 sws_dst_w, sws_dst_h, AV_PIX_FMT_GRAY8,
237 SWS_BICUBIC, NULL, NULL, NULL);
238 ctx->sws_uv_height = sws_src_h;
246 static int config_output(AVFilterLink *outlink)
248 AVFilterContext *context = outlink->src;
249 DnnProcessingContext *ctx = context->priv;
250 DNNReturnType result;
251 AVFilterLink *inlink = context->inputs[0];
253 // have a try run in case that the dnn model resize the frame
254 result = ctx->model->get_output(ctx->model->model, ctx->model_inputname, inlink->w, inlink->h,
255 ctx->model_outputname, &outlink->w, &outlink->h);
256 if (result != DNN_SUCCESS) {
257 av_log(ctx, AV_LOG_ERROR, "could not get output from the model\n");
261 prepare_uv_scale(outlink);
266 static int copy_uv_planes(DnnProcessingContext *ctx, AVFrame *out, const AVFrame *in)
268 const AVPixFmtDescriptor *desc;
271 if (!ctx->sws_uv_scale) {
272 av_assert0(in->height == out->height && in->width == out->width);
273 desc = av_pix_fmt_desc_get(in->format);
274 uv_height = AV_CEIL_RSHIFT(in->height, desc->log2_chroma_h);
275 for (int i = 1; i < 3; ++i) {
276 int bytewidth = av_image_get_linesize(in->format, in->width, i);
277 av_image_copy_plane(out->data[i], out->linesize[i],
278 in->data[i], in->linesize[i],
279 bytewidth, uv_height);
281 } else if (in->format == AV_PIX_FMT_NV12) {
282 sws_scale(ctx->sws_uv_scale, (const uint8_t **)(in->data + 1), in->linesize + 1,
283 0, ctx->sws_uv_height, out->data + 1, out->linesize + 1);
285 sws_scale(ctx->sws_uv_scale, (const uint8_t **)(in->data + 1), in->linesize + 1,
286 0, ctx->sws_uv_height, out->data + 1, out->linesize + 1);
287 sws_scale(ctx->sws_uv_scale, (const uint8_t **)(in->data + 2), in->linesize + 2,
288 0, ctx->sws_uv_height, out->data + 2, out->linesize + 2);
294 static int filter_frame(AVFilterLink *inlink, AVFrame *in)
296 AVFilterContext *context = inlink->dst;
297 AVFilterLink *outlink = context->outputs[0];
298 DnnProcessingContext *ctx = context->priv;
299 DNNReturnType dnn_result;
302 out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
305 return AVERROR(ENOMEM);
307 av_frame_copy_props(out, in);
309 dnn_result = (ctx->dnn_module->execute_model)(ctx->model, ctx->model_inputname, in,
310 (const char **)&ctx->model_outputname, 1, out);
311 if (dnn_result != DNN_SUCCESS){
312 av_log(ctx, AV_LOG_ERROR, "failed to execute model\n");
318 if (isPlanarYUV(in->format))
319 copy_uv_planes(ctx, out, in);
322 return ff_filter_frame(outlink, out);
325 static int activate_sync(AVFilterContext *filter_ctx)
327 AVFilterLink *inlink = filter_ctx->inputs[0];
328 AVFilterLink *outlink = filter_ctx->outputs[0];
334 FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
337 // drain all input frames
338 ret = ff_inlink_consume_frame(inlink, &in);
342 ret = filter_frame(inlink, in);
349 // if frame got, schedule to next filter
353 if (ff_inlink_acknowledge_status(inlink, &status, &pts)) {
354 if (status == AVERROR_EOF) {
355 ff_outlink_set_status(outlink, status, pts);
360 FF_FILTER_FORWARD_WANTED(outlink, inlink);
362 return FFERROR_NOT_READY;
365 static int activate_async(AVFilterContext *filter_ctx)
367 AVFilterLink *inlink = filter_ctx->inputs[0];
368 AVFilterLink *outlink = filter_ctx->outputs[0];
369 DnnProcessingContext *ctx = (DnnProcessingContext *)filter_ctx->priv;
370 AVFrame *in = NULL, *out = NULL;
376 FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
379 // drain all input frames
380 ret = ff_inlink_consume_frame(inlink, &in);
384 out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
387 return AVERROR(ENOMEM);
389 av_frame_copy_props(out, in);
390 if ((ctx->dnn_module->execute_model_async)(ctx->model, ctx->model_inputname, in,
391 (const char **)&ctx->model_outputname, 1, out) != DNN_SUCCESS) {
392 return FFERROR_NOT_READY;
397 // drain all processed frames
399 AVFrame *in_frame = NULL;
400 AVFrame *out_frame = NULL;
401 async_state = (ctx->dnn_module->get_async_result)(ctx->model, &in_frame, &out_frame);
403 if (isPlanarYUV(in_frame->format))
404 copy_uv_planes(ctx, out_frame, in_frame);
405 av_frame_free(&in_frame);
406 ret = ff_filter_frame(outlink, out_frame);
411 } while (async_state == DAST_SUCCESS);
413 // if frame got, schedule to next filter
417 if (ff_inlink_acknowledge_status(inlink, &status, &pts)) {
418 if (status == AVERROR_EOF) {
419 ff_outlink_set_status(outlink, status, pts);
424 FF_FILTER_FORWARD_WANTED(outlink, inlink);
426 return FFERROR_NOT_READY;
429 static int activate(AVFilterContext *filter_ctx)
431 DnnProcessingContext *ctx = filter_ctx->priv;
434 return activate_async(filter_ctx);
436 return activate_sync(filter_ctx);
439 static av_cold void uninit(AVFilterContext *ctx)
441 DnnProcessingContext *context = ctx->priv;
443 sws_freeContext(context->sws_uv_scale);
445 if (context->dnn_module)
446 (context->dnn_module->free_model)(&context->model);
448 av_freep(&context->dnn_module);
451 static const AVFilterPad dnn_processing_inputs[] = {
454 .type = AVMEDIA_TYPE_VIDEO,
455 .config_props = config_input,
460 static const AVFilterPad dnn_processing_outputs[] = {
463 .type = AVMEDIA_TYPE_VIDEO,
464 .config_props = config_output,
469 AVFilter ff_vf_dnn_processing = {
470 .name = "dnn_processing",
471 .description = NULL_IF_CONFIG_SMALL("Apply DNN processing filter to the input."),
472 .priv_size = sizeof(DnnProcessingContext),
475 .query_formats = query_formats,
476 .inputs = dnn_processing_inputs,
477 .outputs = dnn_processing_outputs,
478 .priv_class = &dnn_processing_class,
479 .activate = activate,