]> git.sesse.net Git - ffmpeg/blob - libavfilter/dnn_backend_native.c
libavfilter/dnn: add more data type support for dnn model input
[ffmpeg] / libavfilter / dnn_backend_native.c
1 /*
2  * Copyright (c) 2018 Sergey Lavrushkin
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20
21 /**
22  * @file
23  * DNN native backend implementation.
24  */
25
26 #include "dnn_backend_native.h"
27 #include "libavutil/avassert.h"
28
29 static DNNReturnType set_input_output_native(void *model, DNNInputData *input, const char *input_name, const char **output_names, uint32_t nb_output)
30 {
31     ConvolutionalNetwork *network = (ConvolutionalNetwork *)model;
32     InputParams *input_params;
33     ConvolutionalParams *conv_params;
34     DepthToSpaceParams *depth_to_space_params;
35     int cur_width, cur_height, cur_channels;
36     int32_t layer;
37
38     if (network->layers_num <= 0 || network->layers[0].type != INPUT){
39         return DNN_ERROR;
40     }
41     else{
42         input_params = (InputParams *)network->layers[0].params;
43         input_params->width = cur_width = input->width;
44         input_params->height = cur_height = input->height;
45         input_params->channels = cur_channels = input->channels;
46         if (input->data){
47             av_freep(&input->data);
48         }
49         av_assert0(input->dt == DNN_FLOAT);
50         network->layers[0].output = input->data = av_malloc(cur_height * cur_width * cur_channels * sizeof(float));
51         if (!network->layers[0].output){
52             return DNN_ERROR;
53         }
54     }
55
56     for (layer = 1; layer < network->layers_num; ++layer){
57         switch (network->layers[layer].type){
58         case CONV:
59             conv_params = (ConvolutionalParams *)network->layers[layer].params;
60             if (conv_params->input_num != cur_channels){
61                 return DNN_ERROR;
62             }
63             cur_channels = conv_params->output_num;
64             break;
65         case DEPTH_TO_SPACE:
66             depth_to_space_params = (DepthToSpaceParams *)network->layers[layer].params;
67             if (cur_channels % (depth_to_space_params->block_size * depth_to_space_params->block_size) != 0){
68                 return DNN_ERROR;
69             }
70             cur_channels = cur_channels / (depth_to_space_params->block_size * depth_to_space_params->block_size);
71             cur_height *= depth_to_space_params->block_size;
72             cur_width *= depth_to_space_params->block_size;
73             break;
74         default:
75             return DNN_ERROR;
76         }
77         if (network->layers[layer].output){
78             av_freep(&network->layers[layer].output);
79         }
80         network->layers[layer].output = av_malloc(cur_height * cur_width * cur_channels * sizeof(float));
81         if (!network->layers[layer].output){
82             return DNN_ERROR;
83         }
84     }
85
86     return DNN_SUCCESS;
87 }
88
89 // Loads model and its parameters that are stored in a binary file with following structure:
90 // layers_num,layer_type,layer_parameterss,layer_type,layer_parameters...
91 // For CONV layer: activation_function, input_num, output_num, kernel_size, kernel, biases
92 // For DEPTH_TO_SPACE layer: block_size
93 DNNModel *ff_dnn_load_model_native(const char *model_filename)
94 {
95     DNNModel *model = NULL;
96     ConvolutionalNetwork *network = NULL;
97     AVIOContext *model_file_context;
98     int file_size, dnn_size, kernel_size, i;
99     int32_t layer;
100     DNNLayerType layer_type;
101     ConvolutionalParams *conv_params;
102     DepthToSpaceParams *depth_to_space_params;
103
104     model = av_malloc(sizeof(DNNModel));
105     if (!model){
106         return NULL;
107     }
108
109     if (avio_open(&model_file_context, model_filename, AVIO_FLAG_READ) < 0){
110         av_freep(&model);
111         return NULL;
112     }
113     file_size = avio_size(model_file_context);
114
115     network = av_malloc(sizeof(ConvolutionalNetwork));
116     if (!network){
117         avio_closep(&model_file_context);
118         av_freep(&model);
119         return NULL;
120     }
121     model->model = (void *)network;
122
123     network->layers_num = 1 + (int32_t)avio_rl32(model_file_context);
124     dnn_size = 4;
125
126     network->layers = av_malloc(network->layers_num * sizeof(Layer));
127     if (!network->layers){
128         av_freep(&network);
129         avio_closep(&model_file_context);
130         av_freep(&model);
131         return NULL;
132     }
133
134     for (layer = 0; layer < network->layers_num; ++layer){
135         network->layers[layer].output = NULL;
136         network->layers[layer].params = NULL;
137     }
138     network->layers[0].type = INPUT;
139     network->layers[0].params = av_malloc(sizeof(InputParams));
140     if (!network->layers[0].params){
141         avio_closep(&model_file_context);
142         ff_dnn_free_model_native(&model);
143         return NULL;
144     }
145
146     for (layer = 1; layer < network->layers_num; ++layer){
147         layer_type = (int32_t)avio_rl32(model_file_context);
148         dnn_size += 4;
149         switch (layer_type){
150         case CONV:
151             conv_params = av_malloc(sizeof(ConvolutionalParams));
152             if (!conv_params){
153                 avio_closep(&model_file_context);
154                 ff_dnn_free_model_native(&model);
155                 return NULL;
156             }
157             conv_params->activation = (int32_t)avio_rl32(model_file_context);
158             conv_params->input_num = (int32_t)avio_rl32(model_file_context);
159             conv_params->output_num = (int32_t)avio_rl32(model_file_context);
160             conv_params->kernel_size = (int32_t)avio_rl32(model_file_context);
161             kernel_size = conv_params->input_num * conv_params->output_num *
162                           conv_params->kernel_size * conv_params->kernel_size;
163             dnn_size += 16 + (kernel_size + conv_params->output_num << 2);
164             if (dnn_size > file_size || conv_params->input_num <= 0 ||
165                 conv_params->output_num <= 0 || conv_params->kernel_size <= 0){
166                 avio_closep(&model_file_context);
167                 ff_dnn_free_model_native(&model);
168                 return NULL;
169             }
170             conv_params->kernel = av_malloc(kernel_size * sizeof(float));
171             conv_params->biases = av_malloc(conv_params->output_num * sizeof(float));
172             if (!conv_params->kernel || !conv_params->biases){
173                 avio_closep(&model_file_context);
174                 ff_dnn_free_model_native(&model);
175                 return NULL;
176             }
177             for (i = 0; i < kernel_size; ++i){
178                 conv_params->kernel[i] = av_int2float(avio_rl32(model_file_context));
179             }
180             for (i = 0; i < conv_params->output_num; ++i){
181                 conv_params->biases[i] = av_int2float(avio_rl32(model_file_context));
182             }
183             network->layers[layer].type = CONV;
184             network->layers[layer].params = conv_params;
185             break;
186         case DEPTH_TO_SPACE:
187             depth_to_space_params = av_malloc(sizeof(DepthToSpaceParams));
188             if (!depth_to_space_params){
189                 avio_closep(&model_file_context);
190                 ff_dnn_free_model_native(&model);
191                 return NULL;
192             }
193             depth_to_space_params->block_size = (int32_t)avio_rl32(model_file_context);
194             dnn_size += 4;
195             network->layers[layer].type = DEPTH_TO_SPACE;
196             network->layers[layer].params = depth_to_space_params;
197             break;
198         default:
199             avio_closep(&model_file_context);
200             ff_dnn_free_model_native(&model);
201             return NULL;
202         }
203     }
204
205     avio_closep(&model_file_context);
206
207     if (dnn_size != file_size){
208         ff_dnn_free_model_native(&model);
209         return NULL;
210     }
211
212     model->set_input_output = &set_input_output_native;
213
214     return model;
215 }
216
217 #define CLAMP_TO_EDGE(x, w) ((x) < 0 ? 0 : ((x) >= (w) ? (w - 1) : (x)))
218
219 static void convolve(const float *input, float *output, const ConvolutionalParams *conv_params, int width, int height)
220 {
221     int y, x, n_filter, ch, kernel_y, kernel_x;
222     int radius = conv_params->kernel_size >> 1;
223     int src_linesize = width * conv_params->input_num;
224     int filter_linesize = conv_params->kernel_size * conv_params->input_num;
225     int filter_size = conv_params->kernel_size * filter_linesize;
226
227     for (y = 0; y < height; ++y){
228         for (x = 0; x < width; ++x){
229             for (n_filter = 0; n_filter < conv_params->output_num; ++n_filter){
230                 output[n_filter] = conv_params->biases[n_filter];
231                 for (ch = 0; ch < conv_params->input_num; ++ch){
232                     for (kernel_y = 0; kernel_y < conv_params->kernel_size; ++kernel_y){
233                         for (kernel_x = 0; kernel_x < conv_params->kernel_size; ++kernel_x){
234                             output[n_filter] += input[CLAMP_TO_EDGE(y + kernel_y - radius, height) * src_linesize +
235                                                       CLAMP_TO_EDGE(x + kernel_x - radius, width) * conv_params->input_num + ch] *
236                                                 conv_params->kernel[n_filter * filter_size + kernel_y * filter_linesize +
237                                                                     kernel_x * conv_params->input_num + ch];
238                         }
239                     }
240                 }
241                 switch (conv_params->activation){
242                 case RELU:
243                     output[n_filter] = FFMAX(output[n_filter], 0.0);
244                     break;
245                 case TANH:
246                     output[n_filter] = 2.0f  / (1.0f + exp(-2.0f * output[n_filter])) - 1.0f;
247                     break;
248                 case SIGMOID:
249                     output[n_filter] = 1.0f / (1.0f + exp(-output[n_filter]));
250                 }
251             }
252             output += conv_params->output_num;
253         }
254     }
255 }
256
257 static void depth_to_space(const float *input, float *output, int block_size, int width, int height, int channels)
258 {
259     int y, x, by, bx, ch;
260     int new_channels = channels / (block_size * block_size);
261     int output_linesize = width * channels;
262     int by_linesize = output_linesize / block_size;
263     int x_linesize = new_channels * block_size;
264
265     for (y = 0; y < height; ++y){
266         for (x = 0; x < width; ++x){
267             for (by = 0; by < block_size; ++by){
268                 for (bx = 0; bx < block_size; ++bx){
269                     for (ch = 0; ch < new_channels; ++ch){
270                         output[by * by_linesize + x * x_linesize + bx * new_channels + ch] = input[ch];
271                     }
272                     input += new_channels;
273                 }
274             }
275         }
276         output += output_linesize;
277     }
278 }
279
280 DNNReturnType ff_dnn_execute_model_native(const DNNModel *model, DNNData *outputs, uint32_t nb_output)
281 {
282     ConvolutionalNetwork *network = (ConvolutionalNetwork *)model->model;
283     int cur_width, cur_height, cur_channels;
284     int32_t layer;
285     InputParams *input_params;
286     ConvolutionalParams *conv_params;
287     DepthToSpaceParams *depth_to_space_params;
288
289     if (network->layers_num <= 0 || network->layers[0].type != INPUT || !network->layers[0].output){
290         return DNN_ERROR;
291     }
292     else{
293         input_params = (InputParams *)network->layers[0].params;
294         cur_width = input_params->width;
295         cur_height = input_params->height;
296         cur_channels = input_params->channels;
297     }
298
299     for (layer = 1; layer < network->layers_num; ++layer){
300         if (!network->layers[layer].output){
301             return DNN_ERROR;
302         }
303         switch (network->layers[layer].type){
304         case CONV:
305             conv_params = (ConvolutionalParams *)network->layers[layer].params;
306             convolve(network->layers[layer - 1].output, network->layers[layer].output, conv_params, cur_width, cur_height);
307             cur_channels = conv_params->output_num;
308             break;
309         case DEPTH_TO_SPACE:
310             depth_to_space_params = (DepthToSpaceParams *)network->layers[layer].params;
311             depth_to_space(network->layers[layer - 1].output, network->layers[layer].output,
312                            depth_to_space_params->block_size, cur_width, cur_height, cur_channels);
313             cur_height *= depth_to_space_params->block_size;
314             cur_width *= depth_to_space_params->block_size;
315             cur_channels /= depth_to_space_params->block_size * depth_to_space_params->block_size;
316             break;
317         case INPUT:
318             return DNN_ERROR;
319         }
320     }
321
322     // native mode does not support multiple outputs yet
323     if (nb_output > 1)
324         return DNN_ERROR;
325     outputs[0].data = network->layers[network->layers_num - 1].output;
326     outputs[0].height = cur_height;
327     outputs[0].width = cur_width;
328     outputs[0].channels = cur_channels;
329
330     return DNN_SUCCESS;
331 }
332
333 void ff_dnn_free_model_native(DNNModel **model)
334 {
335     ConvolutionalNetwork *network;
336     ConvolutionalParams *conv_params;
337     int32_t layer;
338
339     if (*model)
340     {
341         network = (ConvolutionalNetwork *)(*model)->model;
342         for (layer = 0; layer < network->layers_num; ++layer){
343             av_freep(&network->layers[layer].output);
344             if (network->layers[layer].type == CONV){
345                 conv_params = (ConvolutionalParams *)network->layers[layer].params;
346                 av_freep(&conv_params->kernel);
347                 av_freep(&conv_params->biases);
348             }
349             av_freep(&network->layers[layer].params);
350         }
351         av_freep(&network->layers);
352         av_freep(&network);
353         av_freep(model);
354     }
355 }