2 * Copyright (c) 2018 Sergey Lavrushkin
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * DNN native backend implementation.
26 #include "dnn_backend_native.h"
27 #include "dnn_srcnn.h"
28 #include "dnn_espcn.h"
29 #include "libavformat/avio.h"
31 typedef enum {INPUT, CONV, DEPTH_TO_SPACE} LayerType;
33 typedef enum {RELU, TANH, SIGMOID} ActivationFunc;
41 typedef struct ConvolutionalParams{
42 int32_t input_num, output_num, kernel_size;
43 ActivationFunc activation;
46 } ConvolutionalParams;
48 typedef struct InputParams{
49 int height, width, channels;
52 typedef struct DepthToSpaceParams{
56 // Represents simple feed-forward convolutional network.
57 typedef struct ConvolutionalNetwork{
60 } ConvolutionalNetwork;
62 static DNNReturnType set_input_output_native(void* model, DNNData* input, DNNData* output)
64 ConvolutionalNetwork* network = (ConvolutionalNetwork*)model;
65 InputParams* input_params;
66 ConvolutionalParams* conv_params;
67 DepthToSpaceParams* depth_to_space_params;
68 int cur_width, cur_height, cur_channels;
71 if (network->layers_num <= 0 || network->layers[0].type != INPUT){
75 input_params = (InputParams*)network->layers[0].params;
76 input_params->width = cur_width = input->width;
77 input_params->height = cur_height = input->height;
78 input_params->channels = cur_channels = input->channels;
80 av_freep(&input->data);
82 network->layers[0].output = input->data = av_malloc(cur_height * cur_width * cur_channels * sizeof(float));
83 if (!network->layers[0].output){
88 for (layer = 1; layer < network->layers_num; ++layer){
89 switch (network->layers[layer].type){
91 conv_params = (ConvolutionalParams*)network->layers[layer].params;
92 if (conv_params->input_num != cur_channels){
95 cur_channels = conv_params->output_num;
98 depth_to_space_params = (DepthToSpaceParams*)network->layers[layer].params;
99 if (cur_channels % (depth_to_space_params->block_size * depth_to_space_params->block_size) != 0){
102 cur_channels = cur_channels / (depth_to_space_params->block_size * depth_to_space_params->block_size);
103 cur_height *= depth_to_space_params->block_size;
104 cur_width *= depth_to_space_params->block_size;
109 if (network->layers[layer].output){
110 av_freep(&network->layers[layer].output);
112 network->layers[layer].output = av_malloc(cur_height * cur_width * cur_channels * sizeof(float));
113 if (!network->layers[layer].output){
118 output->data = network->layers[network->layers_num - 1].output;
119 output->height = cur_height;
120 output->width = cur_width;
121 output->channels = cur_channels;
126 // Loads model and its parameters that are stored in a binary file with following structure:
127 // layers_num,layer_type,layer_parameterss,layer_type,layer_parameters...
128 // For CONV layer: activation_function, input_num, output_num, kernel_size, kernel, biases
129 // For DEPTH_TO_SPACE layer: block_size
130 DNNModel* ff_dnn_load_model_native(const char* model_filename)
132 DNNModel* model = NULL;
133 ConvolutionalNetwork* network = NULL;
134 AVIOContext* model_file_context;
135 int file_size, dnn_size, kernel_size, i;
137 LayerType layer_type;
138 ConvolutionalParams* conv_params;
139 DepthToSpaceParams* depth_to_space_params;
141 model = av_malloc(sizeof(DNNModel));
146 if (avio_open(&model_file_context, model_filename, AVIO_FLAG_READ) < 0){
150 file_size = avio_size(model_file_context);
152 network = av_malloc(sizeof(ConvolutionalNetwork));
154 avio_closep(&model_file_context);
158 model->model = (void*)network;
160 network->layers_num = 1 + (int32_t)avio_rl32(model_file_context);
163 network->layers = av_malloc(network->layers_num * sizeof(Layer));
164 if (!network->layers){
166 avio_closep(&model_file_context);
171 for (layer = 0; layer < network->layers_num; ++layer){
172 network->layers[layer].output = NULL;
173 network->layers[layer].params = NULL;
175 network->layers[0].type = INPUT;
176 network->layers[0].params = av_malloc(sizeof(InputParams));
177 if (!network->layers[0].params){
178 avio_closep(&model_file_context);
179 ff_dnn_free_model_native(&model);
183 for (layer = 1; layer < network->layers_num; ++layer){
184 layer_type = (int32_t)avio_rl32(model_file_context);
188 conv_params = av_malloc(sizeof(ConvolutionalParams));
190 avio_closep(&model_file_context);
191 ff_dnn_free_model_native(&model);
194 conv_params->activation = (int32_t)avio_rl32(model_file_context);
195 conv_params->input_num = (int32_t)avio_rl32(model_file_context);
196 conv_params->output_num = (int32_t)avio_rl32(model_file_context);
197 conv_params->kernel_size = (int32_t)avio_rl32(model_file_context);
198 kernel_size = conv_params->input_num * conv_params->output_num *
199 conv_params->kernel_size * conv_params->kernel_size;
200 dnn_size += 16 + (kernel_size + conv_params->output_num << 2);
201 if (dnn_size > file_size || conv_params->input_num <= 0 ||
202 conv_params->output_num <= 0 || conv_params->kernel_size <= 0){
203 avio_closep(&model_file_context);
204 ff_dnn_free_model_native(&model);
207 conv_params->kernel = av_malloc(kernel_size * sizeof(float));
208 conv_params->biases = av_malloc(conv_params->output_num * sizeof(float));
209 if (!conv_params->kernel || !conv_params->biases){
210 avio_closep(&model_file_context);
211 ff_dnn_free_model_native(&model);
214 for (i = 0; i < kernel_size; ++i){
215 conv_params->kernel[i] = av_int2float(avio_rl32(model_file_context));
217 for (i = 0; i < conv_params->output_num; ++i){
218 conv_params->biases[i] = av_int2float(avio_rl32(model_file_context));
220 network->layers[layer].type = CONV;
221 network->layers[layer].params = conv_params;
224 depth_to_space_params = av_malloc(sizeof(DepthToSpaceParams));
225 if (!depth_to_space_params){
226 avio_closep(&model_file_context);
227 ff_dnn_free_model_native(&model);
230 depth_to_space_params->block_size = (int32_t)avio_rl32(model_file_context);
232 network->layers[layer].type = DEPTH_TO_SPACE;
233 network->layers[layer].params = depth_to_space_params;
236 avio_closep(&model_file_context);
237 ff_dnn_free_model_native(&model);
242 avio_closep(&model_file_context);
244 if (dnn_size != file_size){
245 ff_dnn_free_model_native(&model);
249 model->set_input_output = &set_input_output_native;
254 static int set_up_conv_layer(Layer* layer, const float* kernel, const float* biases, ActivationFunc activation,
255 int32_t input_num, int32_t output_num, int32_t size)
257 ConvolutionalParams* conv_params;
260 conv_params = av_malloc(sizeof(ConvolutionalParams));
264 conv_params->activation = activation;
265 conv_params->input_num = input_num;
266 conv_params->output_num = output_num;
267 conv_params->kernel_size = size;
268 kernel_size = input_num * output_num * size * size;
269 conv_params->kernel = av_malloc(kernel_size * sizeof(float));
270 conv_params->biases = av_malloc(conv_params->output_num * sizeof(float));
271 if (!conv_params->kernel || !conv_params->biases){
272 av_freep(&conv_params->kernel);
273 av_freep(&conv_params->biases);
274 av_freep(&conv_params);
277 memcpy(conv_params->kernel, kernel, kernel_size * sizeof(float));
278 memcpy(conv_params->biases, biases, output_num * sizeof(float));
280 layer->params = conv_params;
285 DNNModel* ff_dnn_load_default_model_native(DNNDefaultModel model_type)
287 DNNModel* model = NULL;
288 ConvolutionalNetwork* network = NULL;
289 DepthToSpaceParams* depth_to_space_params;
292 model = av_malloc(sizeof(DNNModel));
297 network = av_malloc(sizeof(ConvolutionalNetwork));
302 model->model = (void*)network;
306 network->layers_num = 4;
309 network->layers_num = 5;
317 network->layers = av_malloc(network->layers_num * sizeof(Layer));
318 if (!network->layers){
324 for (layer = 0; layer < network->layers_num; ++layer){
325 network->layers[layer].output = NULL;
326 network->layers[layer].params = NULL;
328 network->layers[0].type = INPUT;
329 network->layers[0].params = av_malloc(sizeof(InputParams));
330 if (!network->layers[0].params){
331 ff_dnn_free_model_native(&model);
337 if (set_up_conv_layer(network->layers + 1, srcnn_conv1_kernel, srcnn_conv1_biases, RELU, 1, 64, 9) != DNN_SUCCESS ||
338 set_up_conv_layer(network->layers + 2, srcnn_conv2_kernel, srcnn_conv2_biases, RELU, 64, 32, 1) != DNN_SUCCESS ||
339 set_up_conv_layer(network->layers + 3, srcnn_conv3_kernel, srcnn_conv3_biases, RELU, 32, 1, 5) != DNN_SUCCESS){
340 ff_dnn_free_model_native(&model);
345 if (set_up_conv_layer(network->layers + 1, espcn_conv1_kernel, espcn_conv1_biases, TANH, 1, 64, 5) != DNN_SUCCESS ||
346 set_up_conv_layer(network->layers + 2, espcn_conv2_kernel, espcn_conv2_biases, TANH, 64, 32, 3) != DNN_SUCCESS ||
347 set_up_conv_layer(network->layers + 3, espcn_conv3_kernel, espcn_conv3_biases, SIGMOID, 32, 4, 3) != DNN_SUCCESS){
348 ff_dnn_free_model_native(&model);
351 network->layers[4].type = DEPTH_TO_SPACE;
352 depth_to_space_params = av_malloc(sizeof(DepthToSpaceParams));
353 if (!depth_to_space_params){
354 ff_dnn_free_model_native(&model);
357 depth_to_space_params->block_size = 2;
358 network->layers[4].params = depth_to_space_params;
361 model->set_input_output = &set_input_output_native;
366 #define CLAMP_TO_EDGE(x, w) ((x) < 0 ? 0 : ((x) >= (w) ? (w - 1) : (x)))
368 static void convolve(const float* input, float* output, const ConvolutionalParams* conv_params, int width, int height)
370 int y, x, n_filter, ch, kernel_y, kernel_x;
371 int radius = conv_params->kernel_size >> 1;
372 int src_linesize = width * conv_params->input_num;
373 int filter_linesize = conv_params->kernel_size * conv_params->input_num;
374 int filter_size = conv_params->kernel_size * filter_linesize;
376 for (y = 0; y < height; ++y){
377 for (x = 0; x < width; ++x){
378 for (n_filter = 0; n_filter < conv_params->output_num; ++n_filter){
379 output[n_filter] = conv_params->biases[n_filter];
380 for (ch = 0; ch < conv_params->input_num; ++ch){
381 for (kernel_y = 0; kernel_y < conv_params->kernel_size; ++kernel_y){
382 for (kernel_x = 0; kernel_x < conv_params->kernel_size; ++kernel_x){
383 output[n_filter] += input[CLAMP_TO_EDGE(y + kernel_y - radius, height) * src_linesize +
384 CLAMP_TO_EDGE(x + kernel_x - radius, width) * conv_params->input_num + ch] *
385 conv_params->kernel[n_filter * filter_size + kernel_y * filter_linesize +
386 kernel_x * conv_params->input_num + ch];
390 switch (conv_params->activation){
392 output[n_filter] = FFMAX(output[n_filter], 0.0);
395 output[n_filter] = 2.0f / (1.0f + exp(-2.0f * output[n_filter])) - 1.0f;
398 output[n_filter] = 1.0f / (1.0f + exp(-output[n_filter]));
401 output += conv_params->output_num;
406 static void depth_to_space(const float* input, float* output, int block_size, int width, int height, int channels)
408 int y, x, by, bx, ch;
409 int new_channels = channels / (block_size * block_size);
410 int output_linesize = width * channels;
411 int by_linesize = output_linesize / block_size;
412 int x_linesize = new_channels * block_size;
414 for (y = 0; y < height; ++y){
415 for (x = 0; x < width; ++x){
416 for (by = 0; by < block_size; ++by){
417 for (bx = 0; bx < block_size; ++bx){
418 for (ch = 0; ch < new_channels; ++ch){
419 output[by * by_linesize + x * x_linesize + bx * new_channels + ch] = input[ch];
421 input += new_channels;
425 output += output_linesize;
429 DNNReturnType ff_dnn_execute_model_native(const DNNModel* model)
431 ConvolutionalNetwork* network = (ConvolutionalNetwork*)model->model;
432 int cur_width, cur_height, cur_channels;
434 InputParams* input_params;
435 ConvolutionalParams* conv_params;
436 DepthToSpaceParams* depth_to_space_params;
438 if (network->layers_num <= 0 || network->layers[0].type != INPUT || !network->layers[0].output){
442 input_params = (InputParams*)network->layers[0].params;
443 cur_width = input_params->width;
444 cur_height = input_params->height;
445 cur_channels = input_params->channels;
448 for (layer = 1; layer < network->layers_num; ++layer){
449 if (!network->layers[layer].output){
452 switch (network->layers[layer].type){
454 conv_params = (ConvolutionalParams*)network->layers[layer].params;
455 convolve(network->layers[layer - 1].output, network->layers[layer].output, conv_params, cur_width, cur_height);
456 cur_channels = conv_params->output_num;
459 depth_to_space_params = (DepthToSpaceParams*)network->layers[layer].params;
460 depth_to_space(network->layers[layer - 1].output, network->layers[layer].output,
461 depth_to_space_params->block_size, cur_width, cur_height, cur_channels);
462 cur_height *= depth_to_space_params->block_size;
463 cur_width *= depth_to_space_params->block_size;
464 cur_channels /= depth_to_space_params->block_size * depth_to_space_params->block_size;
474 void ff_dnn_free_model_native(DNNModel** model)
476 ConvolutionalNetwork* network;
477 ConvolutionalParams* conv_params;
482 network = (ConvolutionalNetwork*)(*model)->model;
483 for (layer = 0; layer < network->layers_num; ++layer){
484 av_freep(&network->layers[layer].output);
485 if (network->layers[layer].type == CONV){
486 conv_params = (ConvolutionalParams*)network->layers[layer].params;
487 av_freep(&conv_params->kernel);
488 av_freep(&conv_params->biases);
490 av_freep(&network->layers[layer].params);