]> git.sesse.net Git - ffmpeg/blob - libavfilter/dnn/dnn_backend_native.c
8b05bec29375690bfba3ab3fc4847886e34a3be8
[ffmpeg] / libavfilter / dnn / dnn_backend_native.c
1 /*
2  * Copyright (c) 2018 Sergey Lavrushkin
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20
21 /**
22  * @file
23  * DNN native backend implementation.
24  */
25
26 #include "dnn_backend_native.h"
27 #include "libavutil/avassert.h"
28 #include "dnn_backend_native_layer_pad.h"
29
30 static DNNReturnType set_input_output_native(void *model, DNNInputData *input, const char *input_name, const char **output_names, uint32_t nb_output)
31 {
32     ConvolutionalNetwork *network = (ConvolutionalNetwork *)model;
33
34     if (network->layers_num <= 0 || network->operands_num <= 0)
35         return DNN_ERROR;
36
37     av_assert0(input->dt == DNN_FLOAT);
38
39     /**
40      * as the first step, suppose network->operands[0] is the input operand.
41      */
42     network->operands[0].dims[0] = 1;
43     network->operands[0].dims[1] = input->height;
44     network->operands[0].dims[2] = input->width;
45     network->operands[0].dims[3] = input->channels;
46     network->operands[0].type = DOT_INPUT;
47     network->operands[0].data_type = DNN_FLOAT;
48     network->operands[0].isNHWC = 1;
49
50     av_freep(&network->operands[0].data);
51     network->operands[0].length = calculate_operand_data_length(&network->operands[0]);
52     network->operands[0].data = av_malloc(network->operands[0].length);
53     if (!network->operands[0].data)
54         return DNN_ERROR;
55
56     input->data = network->operands[0].data;
57     return DNN_SUCCESS;
58 }
59
60 // Loads model and its parameters that are stored in a binary file with following structure:
61 // layers_num,layer_type,layer_parameterss,layer_type,layer_parameters...
62 // For CONV layer: activation_function, input_num, output_num, kernel_size, kernel, biases
63 // For DEPTH_TO_SPACE layer: block_size
64 DNNModel *ff_dnn_load_model_native(const char *model_filename)
65 {
66     DNNModel *model = NULL;
67     ConvolutionalNetwork *network = NULL;
68     AVIOContext *model_file_context;
69     int file_size, dnn_size, kernel_size, i;
70     int32_t layer;
71     DNNLayerType layer_type;
72     ConvolutionalParams *conv_params;
73     DepthToSpaceParams *depth_to_space_params;
74     LayerPadParams *pad_params;
75
76     model = av_malloc(sizeof(DNNModel));
77     if (!model){
78         return NULL;
79     }
80
81     if (avio_open(&model_file_context, model_filename, AVIO_FLAG_READ) < 0){
82         av_freep(&model);
83         return NULL;
84     }
85     file_size = avio_size(model_file_context);
86
87     network = av_mallocz(sizeof(ConvolutionalNetwork));
88     if (!network){
89         avio_closep(&model_file_context);
90         av_freep(&model);
91         return NULL;
92     }
93     model->model = (void *)network;
94
95     avio_seek(model_file_context, file_size - 8, SEEK_SET);
96     network->layers_num = (int32_t)avio_rl32(model_file_context);
97     network->operands_num = (int32_t)avio_rl32(model_file_context);
98     dnn_size = 8;
99     avio_seek(model_file_context, 0, SEEK_SET);
100
101     network->layers = av_mallocz(network->layers_num * sizeof(Layer));
102     if (!network->layers){
103         avio_closep(&model_file_context);
104         ff_dnn_free_model_native(&model);
105         return NULL;
106     }
107
108     network->operands = av_mallocz(network->operands_num * sizeof(DnnOperand));
109     if (!network->operands){
110         avio_closep(&model_file_context);
111         ff_dnn_free_model_native(&model);
112         return NULL;
113     }
114
115     for (layer = 0; layer < network->layers_num; ++layer){
116         layer_type = (int32_t)avio_rl32(model_file_context);
117         dnn_size += 4;
118         switch (layer_type){
119         case CONV:
120             conv_params = av_malloc(sizeof(ConvolutionalParams));
121             if (!conv_params){
122                 avio_closep(&model_file_context);
123                 ff_dnn_free_model_native(&model);
124                 return NULL;
125             }
126             conv_params->dilation = (int32_t)avio_rl32(model_file_context);
127             conv_params->padding_method = (int32_t)avio_rl32(model_file_context);
128             conv_params->activation = (int32_t)avio_rl32(model_file_context);
129             conv_params->input_num = (int32_t)avio_rl32(model_file_context);
130             conv_params->output_num = (int32_t)avio_rl32(model_file_context);
131             conv_params->kernel_size = (int32_t)avio_rl32(model_file_context);
132             kernel_size = conv_params->input_num * conv_params->output_num *
133                           conv_params->kernel_size * conv_params->kernel_size;
134             dnn_size += 24 + (kernel_size + conv_params->output_num << 2);
135             if (dnn_size > file_size || conv_params->input_num <= 0 ||
136                 conv_params->output_num <= 0 || conv_params->kernel_size <= 0){
137                 avio_closep(&model_file_context);
138                 av_freep(&conv_params);
139                 ff_dnn_free_model_native(&model);
140                 return NULL;
141             }
142             conv_params->kernel = av_malloc(kernel_size * sizeof(float));
143             conv_params->biases = av_malloc(conv_params->output_num * sizeof(float));
144             if (!conv_params->kernel || !conv_params->biases){
145                 avio_closep(&model_file_context);
146                 av_freep(&conv_params->kernel);
147                 av_freep(&conv_params->biases);
148                 av_freep(&conv_params);
149                 ff_dnn_free_model_native(&model);
150                 return NULL;
151             }
152             for (i = 0; i < kernel_size; ++i){
153                 conv_params->kernel[i] = av_int2float(avio_rl32(model_file_context));
154             }
155             for (i = 0; i < conv_params->output_num; ++i){
156                 conv_params->biases[i] = av_int2float(avio_rl32(model_file_context));
157             }
158             network->layers[layer].input_operand_indexes[0] = (int32_t)avio_rl32(model_file_context);
159             network->layers[layer].output_operand_index = (int32_t)avio_rl32(model_file_context);
160             dnn_size += 8;
161             network->layers[layer].type = CONV;
162             network->layers[layer].params = conv_params;
163             break;
164         case DEPTH_TO_SPACE:
165             depth_to_space_params = av_malloc(sizeof(DepthToSpaceParams));
166             if (!depth_to_space_params){
167                 avio_closep(&model_file_context);
168                 ff_dnn_free_model_native(&model);
169                 return NULL;
170             }
171             depth_to_space_params->block_size = (int32_t)avio_rl32(model_file_context);
172             dnn_size += 4;
173             network->layers[layer].input_operand_indexes[0] = (int32_t)avio_rl32(model_file_context);
174             network->layers[layer].output_operand_index = (int32_t)avio_rl32(model_file_context);
175             dnn_size += 8;
176             network->layers[layer].type = DEPTH_TO_SPACE;
177             network->layers[layer].params = depth_to_space_params;
178             break;
179         case MIRROR_PAD:
180             pad_params = av_malloc(sizeof(LayerPadParams));
181             if (!pad_params){
182                 avio_closep(&model_file_context);
183                 ff_dnn_free_model_native(&model);
184                 return NULL;
185             }
186             pad_params->mode = (int32_t)avio_rl32(model_file_context);
187             dnn_size += 4;
188             for (i = 0; i < 4; ++i) {
189                 pad_params->paddings[i][0] = avio_rl32(model_file_context);
190                 pad_params->paddings[i][1] = avio_rl32(model_file_context);
191                 dnn_size += 8;
192             }
193             network->layers[layer].input_operand_indexes[0] = (int32_t)avio_rl32(model_file_context);
194             network->layers[layer].output_operand_index = (int32_t)avio_rl32(model_file_context);
195             dnn_size += 8;
196             network->layers[layer].type = MIRROR_PAD;
197             network->layers[layer].params = pad_params;
198             break;
199         default:
200             avio_closep(&model_file_context);
201             ff_dnn_free_model_native(&model);
202             return NULL;
203         }
204     }
205
206     for (int32_t i = 0; i < network->operands_num; ++i){
207         DnnOperand *oprd;
208         int32_t name_len;
209         int32_t operand_index = (int32_t)avio_rl32(model_file_context);
210         dnn_size += 4;
211
212         oprd = &network->operands[operand_index];
213         name_len = (int32_t)avio_rl32(model_file_context);
214         dnn_size += 4;
215
216         avio_get_str(model_file_context, name_len, oprd->name, sizeof(oprd->name));
217         dnn_size += name_len;
218
219         oprd->type = (int32_t)avio_rl32(model_file_context);
220         dnn_size += 4;
221
222         oprd->data_type = (int32_t)avio_rl32(model_file_context);
223         dnn_size += 4;
224
225         for (int32_t dim = 0; dim < 4; ++dim) {
226             oprd->dims[dim] = (int32_t)avio_rl32(model_file_context);
227             dnn_size += 4;
228         }
229
230         oprd->isNHWC = 1;
231     }
232
233     avio_closep(&model_file_context);
234
235     if (dnn_size != file_size){
236         ff_dnn_free_model_native(&model);
237         return NULL;
238     }
239
240     model->set_input_output = &set_input_output_native;
241
242     return model;
243 }
244
245 #define CLAMP_TO_EDGE(x, w) ((x) < 0 ? 0 : ((x) >= (w) ? (w - 1) : (x)))
246
247 static int convolve(DnnOperand *operands, const int32_t *input_operand_indexes, int32_t output_operand_index, const ConvolutionalParams *conv_params)
248 {
249     float *output;
250     int32_t input_operand_index = input_operand_indexes[0];
251     int number = operands[input_operand_index].dims[0];
252     int height = operands[input_operand_index].dims[1];
253     int width = operands[input_operand_index].dims[2];
254     int channel = operands[input_operand_index].dims[3];
255     const float *input = operands[input_operand_index].data;
256
257     int radius = conv_params->kernel_size >> 1;
258     int src_linesize = width * conv_params->input_num;
259     int filter_linesize = conv_params->kernel_size * conv_params->input_num;
260     int filter_size = conv_params->kernel_size * filter_linesize;
261     int pad_size = (conv_params->padding_method == VALID) ? (conv_params->kernel_size - 1) / 2 * conv_params->dilation : 0;
262
263     DnnOperand *output_operand = &operands[output_operand_index];
264     output_operand->dims[0] = number;
265     output_operand->dims[1] = height - pad_size * 2;
266     output_operand->dims[2] = width - pad_size * 2;
267     output_operand->dims[3] = conv_params->output_num;
268     output_operand->length = calculate_operand_data_length(output_operand);
269     output_operand->data = av_realloc(output_operand->data, output_operand->length);
270     if (!output_operand->data)
271         return -1;
272     output = output_operand->data;
273
274     av_assert0(channel == conv_params->input_num);
275
276     for (int y = pad_size; y < height - pad_size; ++y) {
277         for (int x = pad_size; x < width - pad_size; ++x) {
278             for (int n_filter = 0; n_filter < conv_params->output_num; ++n_filter) {
279                 output[n_filter] = conv_params->biases[n_filter];
280
281                 for (int ch = 0; ch < conv_params->input_num; ++ch) {
282                     for (int kernel_y = 0; kernel_y < conv_params->kernel_size; ++kernel_y) {
283                         for (int kernel_x = 0; kernel_x < conv_params->kernel_size; ++kernel_x) {
284                             float input_pel;
285                             if (conv_params->padding_method == SAME_CLAMP_TO_EDGE) {
286                                 int y_pos = CLAMP_TO_EDGE(y + (kernel_y - radius) * conv_params->dilation, height);
287                                 int x_pos = CLAMP_TO_EDGE(x + (kernel_x - radius) * conv_params->dilation, width);
288                                 input_pel = input[y_pos * src_linesize + x_pos * conv_params->input_num + ch];
289                             } else {
290                                 int y_pos = y + (kernel_y - radius) * conv_params->dilation;
291                                 int x_pos = x + (kernel_x - radius) * conv_params->dilation;
292                                 input_pel = (x_pos < 0 || x_pos >= width || y_pos < 0 || y_pos >= height) ? 0.0 :
293                                                    input[y_pos * src_linesize + x_pos * conv_params->input_num + ch];
294                             }
295
296
297                             output[n_filter] += input_pel * conv_params->kernel[n_filter * filter_size + kernel_y * filter_linesize +
298                                                                                 kernel_x * conv_params->input_num + ch];
299                         }
300                     }
301                 }
302                 switch (conv_params->activation){
303                 case RELU:
304                     output[n_filter] = FFMAX(output[n_filter], 0.0);
305                     break;
306                 case TANH:
307                     output[n_filter] = 2.0f  / (1.0f + exp(-2.0f * output[n_filter])) - 1.0f;
308                     break;
309                 case SIGMOID:
310                     output[n_filter] = 1.0f / (1.0f + exp(-output[n_filter]));
311                     break;
312                 case NONE:
313                     break;
314                 case LEAKY_RELU:
315                     output[n_filter] = FFMAX(output[n_filter], 0.0) + 0.2 * FFMIN(output[n_filter], 0.0);
316                 }
317             }
318             output += conv_params->output_num;
319         }
320     }
321     return 0;
322 }
323
324 static int depth_to_space(DnnOperand *operands, const int32_t *input_operand_indexes, int32_t output_operand_index, int block_size)
325 {
326     float *output;
327     int32_t input_operand_index = input_operand_indexes[0];
328     int number = operands[input_operand_index].dims[0];
329     int height = operands[input_operand_index].dims[1];
330     int width = operands[input_operand_index].dims[2];
331     int channels = operands[input_operand_index].dims[3];
332     const float *input = operands[input_operand_index].data;
333
334     int y, x, by, bx, ch;
335     int new_channels = channels / (block_size * block_size);
336     int output_linesize = width * channels;
337     int by_linesize = output_linesize / block_size;
338     int x_linesize = new_channels * block_size;
339
340     DnnOperand *output_operand = &operands[output_operand_index];
341     output_operand->dims[0] = number;
342     output_operand->dims[1] = height * block_size;
343     output_operand->dims[2] = width * block_size;
344     output_operand->dims[3] = new_channels;
345     output_operand->length = calculate_operand_data_length(output_operand);
346     output_operand->data = av_realloc(output_operand->data, output_operand->length);
347     if (!output_operand->data)
348         return -1;
349     output = output_operand->data;
350
351     for (y = 0; y < height; ++y){
352         for (x = 0; x < width; ++x){
353             for (by = 0; by < block_size; ++by){
354                 for (bx = 0; bx < block_size; ++bx){
355                     for (ch = 0; ch < new_channels; ++ch){
356                         output[by * by_linesize + x * x_linesize + bx * new_channels + ch] = input[ch];
357                     }
358                     input += new_channels;
359                 }
360             }
361         }
362         output += output_linesize;
363     }
364     return 0;
365 }
366
367 DNNReturnType ff_dnn_execute_model_native(const DNNModel *model, DNNData *outputs, uint32_t nb_output)
368 {
369     ConvolutionalNetwork *network = (ConvolutionalNetwork *)model->model;
370     int32_t layer;
371     ConvolutionalParams *conv_params;
372     DepthToSpaceParams *depth_to_space_params;
373     LayerPadParams *pad_params;
374
375     if (network->layers_num <= 0 || network->operands_num <= 0)
376         return DNN_ERROR;
377     if (!network->operands[0].data)
378         return DNN_ERROR;
379
380     for (layer = 0; layer < network->layers_num; ++layer){
381         switch (network->layers[layer].type){
382         case CONV:
383             conv_params = (ConvolutionalParams *)network->layers[layer].params;
384             convolve(network->operands, network->layers[layer].input_operand_indexes,
385                      network->layers[layer].output_operand_index, conv_params);
386             break;
387         case DEPTH_TO_SPACE:
388             depth_to_space_params = (DepthToSpaceParams *)network->layers[layer].params;
389             depth_to_space(network->operands, network->layers[layer].input_operand_indexes,
390                            network->layers[layer].output_operand_index, depth_to_space_params->block_size);
391             break;
392         case MIRROR_PAD:
393             pad_params = (LayerPadParams *)network->layers[layer].params;
394             dnn_execute_layer_pad(network->operands, network->layers[layer].input_operand_indexes,
395                                   network->layers[layer].output_operand_index, pad_params);
396             break;
397         case INPUT:
398             return DNN_ERROR;
399         }
400     }
401
402     // native mode does not support multiple outputs yet
403     if (nb_output > 1)
404         return DNN_ERROR;
405
406     /**
407      * as the first step, suppose network->operands[network->operands_num - 1] is the output operand.
408      */
409     outputs[0].data = network->operands[network->operands_num - 1].data;
410     outputs[0].height = network->operands[network->operands_num - 1].dims[1];
411     outputs[0].width = network->operands[network->operands_num - 1].dims[2];
412     outputs[0].channels = network->operands[network->operands_num - 1].dims[3];
413
414     return DNN_SUCCESS;
415 }
416
417 int32_t calculate_operand_data_length(DnnOperand* operand)
418 {
419     // currently, we just support DNN_FLOAT
420     return operand->dims[0] * operand->dims[1] * operand->dims[2] * operand->dims[3] * sizeof(float);
421 }
422
423 void ff_dnn_free_model_native(DNNModel **model)
424 {
425     ConvolutionalNetwork *network;
426     ConvolutionalParams *conv_params;
427     int32_t layer;
428
429     if (*model)
430     {
431         network = (ConvolutionalNetwork *)(*model)->model;
432         for (layer = 0; layer < network->layers_num; ++layer){
433             if (network->layers[layer].type == CONV){
434                 conv_params = (ConvolutionalParams *)network->layers[layer].params;
435                 av_freep(&conv_params->kernel);
436                 av_freep(&conv_params->biases);
437             }
438             av_freep(&network->layers[layer].params);
439         }
440         av_freep(&network->layers);
441
442         for (uint32_t operand = 0; operand < network->operands_num; ++operand)
443             av_freep(&network->operands[operand].data);
444         av_freep(&network->operands);
445
446         av_freep(&network);
447         av_freep(model);
448     }
449 }