2 * Copyright (c) 2018 Sergey Lavrushkin
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * Filter implementing image super-resolution using deep convolutional networks.
24 * https://arxiv.org/abs/1501.00092
25 * https://arxiv.org/abs/1609.05158
31 #include "libavutil/opt.h"
32 #include "libavformat/avio.h"
33 #include "libswscale/swscale.h"
34 #include "dnn_interface.h"
36 typedef enum {SRCNN, ESPCN} SRModel;
38 typedef struct SRContext {
43 DNNBackendType backend_type;
44 DNNModule *dnn_module;
46 DNNData input, output;
48 struct SwsContext *sws_contexts[3];
49 int sws_slice_h, sws_input_linesize, sws_output_linesize;
52 #define OFFSET(x) offsetof(SRContext, x)
53 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM
54 static const AVOption sr_options[] = {
55 { "model", "specifies what DNN model to use", OFFSET(model_type), AV_OPT_TYPE_FLAGS, { .i64 = 0 }, 0, 1, FLAGS, "model_type" },
56 { "srcnn", "Super-Resolution Convolutional Neural Network model (scale factor should be specified for custom SRCNN model)", 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, 0, 0, FLAGS, "model_type" },
57 { "espcn", "Efficient Sub-Pixel Convolutional Neural Network model", 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, 0, 0, FLAGS, "model_type" },
58 { "dnn_backend", "DNN backend used for model execution", OFFSET(backend_type), AV_OPT_TYPE_FLAGS, { .i64 = 0 }, 0, 1, FLAGS, "backend" },
59 { "native", "native backend flag", 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, 0, 0, FLAGS, "backend" },
60 #if (CONFIG_LIBTENSORFLOW == 1)
61 { "tensorflow", "tensorflow backend flag", 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, 0, 0, FLAGS, "backend" },
63 {"scale_factor", "scale factor for SRCNN model", OFFSET(scale_factor), AV_OPT_TYPE_INT, { .i64 = 2 }, 2, 4, FLAGS},
64 { "model_filename", "path to model file specifying network architecture and its parameters", OFFSET(model_filename), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS },
68 AVFILTER_DEFINE_CLASS(sr);
70 static av_cold int init(AVFilterContext *context)
72 SRContext *sr_context = context->priv;
74 sr_context->dnn_module = ff_get_dnn_module(sr_context->backend_type);
75 if (!sr_context->dnn_module){
76 av_log(context, AV_LOG_ERROR, "could not create DNN module for requested backend\n");
77 return AVERROR(ENOMEM);
79 if (!sr_context->model_filename){
80 av_log(context, AV_LOG_VERBOSE, "model file for network was not specified, using default network for x2 upsampling\n");
81 sr_context->scale_factor = 2;
82 switch (sr_context->model_type){
84 sr_context->model = (sr_context->dnn_module->load_default_model)(DNN_SRCNN);
87 sr_context->model = (sr_context->dnn_module->load_default_model)(DNN_ESPCN);
91 sr_context->model = (sr_context->dnn_module->load_model)(sr_context->model_filename);
93 if (!sr_context->model){
94 av_log(context, AV_LOG_ERROR, "could not load DNN model\n");
98 sr_context->sws_contexts[0] = NULL;
99 sr_context->sws_contexts[1] = NULL;
100 sr_context->sws_contexts[2] = NULL;
105 static int query_formats(AVFilterContext *context)
107 const enum AVPixelFormat pixel_formats[] = {AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P,
108 AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_GRAY8,
110 AVFilterFormats *formats_list;
112 formats_list = ff_make_format_list(pixel_formats);
114 av_log(context, AV_LOG_ERROR, "could not create formats list\n");
115 return AVERROR(ENOMEM);
118 return ff_set_common_formats(context, formats_list);
121 static int config_props(AVFilterLink *inlink)
123 AVFilterContext *context = inlink->dst;
124 SRContext *sr_context = context->priv;
125 AVFilterLink *outlink = context->outputs[0];
126 DNNReturnType result;
127 int sws_src_h, sws_src_w, sws_dst_h, sws_dst_w;
129 switch (sr_context->model_type){
131 sr_context->input.width = inlink->w * sr_context->scale_factor;
132 sr_context->input.height = inlink->h * sr_context->scale_factor;
135 sr_context->input.width = inlink->w;
136 sr_context->input.height = inlink->h;
138 sr_context->input.channels = 1;
140 result = (sr_context->model->set_input_output)(sr_context->model->model, &sr_context->input, &sr_context->output);
141 if (result != DNN_SUCCESS){
142 av_log(context, AV_LOG_ERROR, "could not set input and output for the model\n");
146 outlink->h = sr_context->output.height;
147 outlink->w = sr_context->output.width;
148 sr_context->sws_contexts[1] = sws_getContext(sr_context->input.width, sr_context->input.height, AV_PIX_FMT_GRAY8,
149 sr_context->input.width, sr_context->input.height, AV_PIX_FMT_GRAYF32,
150 0, NULL, NULL, NULL);
151 sr_context->sws_input_linesize = sr_context->input.width << 2;
152 sr_context->sws_contexts[2] = sws_getContext(sr_context->output.width, sr_context->output.height, AV_PIX_FMT_GRAYF32,
153 sr_context->output.width, sr_context->output.height, AV_PIX_FMT_GRAY8,
154 0, NULL, NULL, NULL);
155 sr_context->sws_output_linesize = sr_context->output.width << 2;
156 if (!sr_context->sws_contexts[1] || !sr_context->sws_contexts[2]){
157 av_log(context, AV_LOG_ERROR, "could not create SwsContext for conversions\n");
158 return AVERROR(ENOMEM);
160 switch (sr_context->model_type){
162 sr_context->sws_contexts[0] = sws_getContext(inlink->w, inlink->h, inlink->format,
163 outlink->w, outlink->h, outlink->format,
164 SWS_BICUBIC, NULL, NULL, NULL);
165 if (!sr_context->sws_contexts[0]){
166 av_log(context, AV_LOG_ERROR, "could not create SwsContext for scaling\n");
167 return AVERROR(ENOMEM);
169 sr_context->sws_slice_h = inlink->h;
172 if (inlink->format != AV_PIX_FMT_GRAY8){
173 sws_src_h = sr_context->input.height;
174 sws_src_w = sr_context->input.width;
175 sws_dst_h = sr_context->output.height;
176 sws_dst_w = sr_context->output.width;
178 switch (inlink->format){
179 case AV_PIX_FMT_YUV420P:
180 sws_src_h = AV_CEIL_RSHIFT(sws_src_h, 1);
181 sws_src_w = AV_CEIL_RSHIFT(sws_src_w, 1);
182 sws_dst_h = AV_CEIL_RSHIFT(sws_dst_h, 1);
183 sws_dst_w = AV_CEIL_RSHIFT(sws_dst_w, 1);
185 case AV_PIX_FMT_YUV422P:
186 sws_src_w = AV_CEIL_RSHIFT(sws_src_w, 1);
187 sws_dst_w = AV_CEIL_RSHIFT(sws_dst_w, 1);
189 case AV_PIX_FMT_YUV444P:
191 case AV_PIX_FMT_YUV410P:
192 sws_src_h = AV_CEIL_RSHIFT(sws_src_h, 2);
193 sws_src_w = AV_CEIL_RSHIFT(sws_src_w, 2);
194 sws_dst_h = AV_CEIL_RSHIFT(sws_dst_h, 2);
195 sws_dst_w = AV_CEIL_RSHIFT(sws_dst_w, 2);
197 case AV_PIX_FMT_YUV411P:
198 sws_src_w = AV_CEIL_RSHIFT(sws_src_w, 2);
199 sws_dst_w = AV_CEIL_RSHIFT(sws_dst_w, 2);
202 av_log(context, AV_LOG_ERROR, "could not create SwsContext for scaling for given input pixel format");
205 sr_context->sws_contexts[0] = sws_getContext(sws_src_w, sws_src_h, AV_PIX_FMT_GRAY8,
206 sws_dst_w, sws_dst_h, AV_PIX_FMT_GRAY8,
207 SWS_BICUBIC, NULL, NULL, NULL);
208 if (!sr_context->sws_contexts[0]){
209 av_log(context, AV_LOG_ERROR, "could not create SwsContext for scaling\n");
210 return AVERROR(ENOMEM);
212 sr_context->sws_slice_h = sws_src_h;
220 static int filter_frame(AVFilterLink *inlink, AVFrame *in)
222 AVFilterContext *context = inlink->dst;
223 SRContext *sr_context = context->priv;
224 AVFilterLink *outlink = context->outputs[0];
225 AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
226 DNNReturnType dnn_result;
229 av_log(context, AV_LOG_ERROR, "could not allocate memory for output frame\n");
231 return AVERROR(ENOMEM);
233 av_frame_copy_props(out, in);
234 out->height = sr_context->output.height;
235 out->width = sr_context->output.width;
236 switch (sr_context->model_type){
238 sws_scale(sr_context->sws_contexts[0], (const uint8_t **)in->data, in->linesize,
239 0, sr_context->sws_slice_h, out->data, out->linesize);
241 sws_scale(sr_context->sws_contexts[1], (const uint8_t **)out->data, out->linesize,
242 0, out->height, (uint8_t * const*)(&sr_context->input.data), &sr_context->sws_input_linesize);
245 if (sr_context->sws_contexts[0]){
246 sws_scale(sr_context->sws_contexts[0], (const uint8_t **)(in->data + 1), in->linesize + 1,
247 0, sr_context->sws_slice_h, out->data + 1, out->linesize + 1);
248 sws_scale(sr_context->sws_contexts[0], (const uint8_t **)(in->data + 2), in->linesize + 2,
249 0, sr_context->sws_slice_h, out->data + 2, out->linesize + 2);
252 sws_scale(sr_context->sws_contexts[1], (const uint8_t **)in->data, in->linesize,
253 0, in->height, (uint8_t * const*)(&sr_context->input.data), &sr_context->sws_input_linesize);
257 dnn_result = (sr_context->dnn_module->execute_model)(sr_context->model);
258 if (dnn_result != DNN_SUCCESS){
259 av_log(context, AV_LOG_ERROR, "failed to execute loaded model\n");
263 sws_scale(sr_context->sws_contexts[2], (const uint8_t **)(&sr_context->output.data), &sr_context->sws_output_linesize,
264 0, out->height, (uint8_t * const*)out->data, out->linesize);
266 return ff_filter_frame(outlink, out);
269 static av_cold void uninit(AVFilterContext *context)
272 SRContext *sr_context = context->priv;
274 if (sr_context->dnn_module){
275 (sr_context->dnn_module->free_model)(&sr_context->model);
276 av_freep(&sr_context->dnn_module);
279 for (i = 0; i < 3; ++i){
280 if (sr_context->sws_contexts[i]){
281 sws_freeContext(sr_context->sws_contexts[i]);
286 static const AVFilterPad sr_inputs[] = {
289 .type = AVMEDIA_TYPE_VIDEO,
290 .config_props = config_props,
291 .filter_frame = filter_frame,
296 static const AVFilterPad sr_outputs[] = {
299 .type = AVMEDIA_TYPE_VIDEO,
304 AVFilter ff_vf_sr = {
306 .description = NULL_IF_CONFIG_SMALL("Apply DNN-based image super resolution to the input."),
307 .priv_size = sizeof(SRContext),
310 .query_formats = query_formats,
312 .outputs = sr_outputs,
313 .priv_class = &sr_class,
314 .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS,