]> git.sesse.net Git - ffmpeg/blob - libavfilter/dnn/dnn_backend_native.c
be548c6d467dbc4df063e0d9256f7819c87f3f72
[ffmpeg] / libavfilter / dnn / dnn_backend_native.c
1 /*
2  * Copyright (c) 2018 Sergey Lavrushkin
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20
21 /**
22  * @file
23  * DNN native backend implementation.
24  */
25
26 #include "dnn_backend_native.h"
27 #include "libavutil/avassert.h"
28 #include "dnn_backend_native_layer_pad.h"
29 #include "dnn_backend_native_layer_conv2d.h"
30 #include "dnn_backend_native_layer_depth2space.h"
31
32 static DNNReturnType set_input_output_native(void *model, DNNInputData *input, const char *input_name, const char **output_names, uint32_t nb_output)
33 {
34     ConvolutionalNetwork *network = (ConvolutionalNetwork *)model;
35
36     if (network->layers_num <= 0 || network->operands_num <= 0)
37         return DNN_ERROR;
38
39     av_assert0(input->dt == DNN_FLOAT);
40
41     /**
42      * as the first step, suppose network->operands[0] is the input operand.
43      */
44     network->operands[0].dims[0] = 1;
45     network->operands[0].dims[1] = input->height;
46     network->operands[0].dims[2] = input->width;
47     network->operands[0].dims[3] = input->channels;
48     network->operands[0].type = DOT_INPUT;
49     network->operands[0].data_type = DNN_FLOAT;
50     network->operands[0].isNHWC = 1;
51
52     av_freep(&network->operands[0].data);
53     network->operands[0].length = calculate_operand_data_length(&network->operands[0]);
54     network->operands[0].data = av_malloc(network->operands[0].length);
55     if (!network->operands[0].data)
56         return DNN_ERROR;
57
58     input->data = network->operands[0].data;
59     return DNN_SUCCESS;
60 }
61
62 // Loads model and its parameters that are stored in a binary file with following structure:
63 // layers_num,layer_type,layer_parameterss,layer_type,layer_parameters...
64 // For CONV layer: activation_function, input_num, output_num, kernel_size, kernel, biases
65 // For DEPTH_TO_SPACE layer: block_size
66 DNNModel *ff_dnn_load_model_native(const char *model_filename)
67 {
68     DNNModel *model = NULL;
69     char header_expected[] = "FFMPEGDNNNATIVE";
70     char *buf;
71     size_t size;
72     int version, header_size, major_version_expected = 0;
73     ConvolutionalNetwork *network = NULL;
74     AVIOContext *model_file_context;
75     int file_size, dnn_size, kernel_size, i;
76     int32_t layer;
77     DNNLayerType layer_type;
78     ConvolutionalParams *conv_params;
79     DepthToSpaceParams *depth_to_space_params;
80     LayerPadParams *pad_params;
81
82     model = av_malloc(sizeof(DNNModel));
83     if (!model){
84         return NULL;
85     }
86
87     if (avio_open(&model_file_context, model_filename, AVIO_FLAG_READ) < 0){
88         av_freep(&model);
89         return NULL;
90     }
91     file_size = avio_size(model_file_context);
92
93     /**
94      * check file header with string and version
95      */
96     size = sizeof(header_expected);
97     buf = av_malloc(size);
98     if (!buf) {
99         avio_closep(&model_file_context);
100         av_freep(&model);
101         return NULL;
102     }
103
104     // size - 1 to skip the ending '\0' which is not saved in file
105     avio_get_str(model_file_context, size - 1, buf, size);
106     dnn_size = size - 1;
107     if (strncmp(buf, header_expected, size) != 0) {
108         av_freep(&buf);
109         avio_closep(&model_file_context);
110         av_freep(&model);
111         return NULL;
112     }
113     av_freep(&buf);
114
115     version = (int32_t)avio_rl32(model_file_context);
116     dnn_size += 4;
117     if (version != major_version_expected) {
118         avio_closep(&model_file_context);
119         av_freep(&model);
120         return NULL;
121     }
122
123     // currently no need to check minor version
124     version = (int32_t)avio_rl32(model_file_context);
125     dnn_size += 4;
126     header_size = dnn_size;
127
128     network = av_mallocz(sizeof(ConvolutionalNetwork));
129     if (!network){
130         avio_closep(&model_file_context);
131         av_freep(&model);
132         return NULL;
133     }
134     model->model = (void *)network;
135
136     avio_seek(model_file_context, file_size - 8, SEEK_SET);
137     network->layers_num = (int32_t)avio_rl32(model_file_context);
138     network->operands_num = (int32_t)avio_rl32(model_file_context);
139     dnn_size += 8;
140     avio_seek(model_file_context, header_size, SEEK_SET);
141
142     network->layers = av_mallocz(network->layers_num * sizeof(Layer));
143     if (!network->layers){
144         avio_closep(&model_file_context);
145         ff_dnn_free_model_native(&model);
146         return NULL;
147     }
148
149     network->operands = av_mallocz(network->operands_num * sizeof(DnnOperand));
150     if (!network->operands){
151         avio_closep(&model_file_context);
152         ff_dnn_free_model_native(&model);
153         return NULL;
154     }
155
156     for (layer = 0; layer < network->layers_num; ++layer){
157         layer_type = (int32_t)avio_rl32(model_file_context);
158         dnn_size += 4;
159         switch (layer_type){
160         case CONV:
161             conv_params = av_malloc(sizeof(ConvolutionalParams));
162             if (!conv_params){
163                 avio_closep(&model_file_context);
164                 ff_dnn_free_model_native(&model);
165                 return NULL;
166             }
167             conv_params->dilation = (int32_t)avio_rl32(model_file_context);
168             conv_params->padding_method = (int32_t)avio_rl32(model_file_context);
169             conv_params->activation = (int32_t)avio_rl32(model_file_context);
170             conv_params->input_num = (int32_t)avio_rl32(model_file_context);
171             conv_params->output_num = (int32_t)avio_rl32(model_file_context);
172             conv_params->kernel_size = (int32_t)avio_rl32(model_file_context);
173             kernel_size = conv_params->input_num * conv_params->output_num *
174                           conv_params->kernel_size * conv_params->kernel_size;
175             dnn_size += 24 + (kernel_size + conv_params->output_num << 2);
176             if (dnn_size > file_size || conv_params->input_num <= 0 ||
177                 conv_params->output_num <= 0 || conv_params->kernel_size <= 0){
178                 avio_closep(&model_file_context);
179                 av_freep(&conv_params);
180                 ff_dnn_free_model_native(&model);
181                 return NULL;
182             }
183             conv_params->kernel = av_malloc(kernel_size * sizeof(float));
184             conv_params->biases = av_malloc(conv_params->output_num * sizeof(float));
185             if (!conv_params->kernel || !conv_params->biases){
186                 avio_closep(&model_file_context);
187                 av_freep(&conv_params->kernel);
188                 av_freep(&conv_params->biases);
189                 av_freep(&conv_params);
190                 ff_dnn_free_model_native(&model);
191                 return NULL;
192             }
193             for (i = 0; i < kernel_size; ++i){
194                 conv_params->kernel[i] = av_int2float(avio_rl32(model_file_context));
195             }
196             for (i = 0; i < conv_params->output_num; ++i){
197                 conv_params->biases[i] = av_int2float(avio_rl32(model_file_context));
198             }
199             network->layers[layer].input_operand_indexes[0] = (int32_t)avio_rl32(model_file_context);
200             network->layers[layer].output_operand_index = (int32_t)avio_rl32(model_file_context);
201             dnn_size += 8;
202             network->layers[layer].type = CONV;
203             network->layers[layer].params = conv_params;
204             break;
205         case DEPTH_TO_SPACE:
206             depth_to_space_params = av_malloc(sizeof(DepthToSpaceParams));
207             if (!depth_to_space_params){
208                 avio_closep(&model_file_context);
209                 ff_dnn_free_model_native(&model);
210                 return NULL;
211             }
212             depth_to_space_params->block_size = (int32_t)avio_rl32(model_file_context);
213             dnn_size += 4;
214             network->layers[layer].input_operand_indexes[0] = (int32_t)avio_rl32(model_file_context);
215             network->layers[layer].output_operand_index = (int32_t)avio_rl32(model_file_context);
216             dnn_size += 8;
217             network->layers[layer].type = DEPTH_TO_SPACE;
218             network->layers[layer].params = depth_to_space_params;
219             break;
220         case MIRROR_PAD:
221             pad_params = av_malloc(sizeof(LayerPadParams));
222             if (!pad_params){
223                 avio_closep(&model_file_context);
224                 ff_dnn_free_model_native(&model);
225                 return NULL;
226             }
227             pad_params->mode = (int32_t)avio_rl32(model_file_context);
228             dnn_size += 4;
229             for (i = 0; i < 4; ++i) {
230                 pad_params->paddings[i][0] = avio_rl32(model_file_context);
231                 pad_params->paddings[i][1] = avio_rl32(model_file_context);
232                 dnn_size += 8;
233             }
234             network->layers[layer].input_operand_indexes[0] = (int32_t)avio_rl32(model_file_context);
235             network->layers[layer].output_operand_index = (int32_t)avio_rl32(model_file_context);
236             dnn_size += 8;
237             network->layers[layer].type = MIRROR_PAD;
238             network->layers[layer].params = pad_params;
239             break;
240         default:
241             avio_closep(&model_file_context);
242             ff_dnn_free_model_native(&model);
243             return NULL;
244         }
245     }
246
247     for (int32_t i = 0; i < network->operands_num; ++i){
248         DnnOperand *oprd;
249         int32_t name_len;
250         int32_t operand_index = (int32_t)avio_rl32(model_file_context);
251         dnn_size += 4;
252
253         oprd = &network->operands[operand_index];
254         name_len = (int32_t)avio_rl32(model_file_context);
255         dnn_size += 4;
256
257         avio_get_str(model_file_context, name_len, oprd->name, sizeof(oprd->name));
258         dnn_size += name_len;
259
260         oprd->type = (int32_t)avio_rl32(model_file_context);
261         dnn_size += 4;
262
263         oprd->data_type = (int32_t)avio_rl32(model_file_context);
264         dnn_size += 4;
265
266         for (int32_t dim = 0; dim < 4; ++dim) {
267             oprd->dims[dim] = (int32_t)avio_rl32(model_file_context);
268             dnn_size += 4;
269         }
270
271         oprd->isNHWC = 1;
272     }
273
274     avio_closep(&model_file_context);
275
276     if (dnn_size != file_size){
277         ff_dnn_free_model_native(&model);
278         return NULL;
279     }
280
281     model->set_input_output = &set_input_output_native;
282
283     return model;
284 }
285
286 DNNReturnType ff_dnn_execute_model_native(const DNNModel *model, DNNData *outputs, uint32_t nb_output)
287 {
288     ConvolutionalNetwork *network = (ConvolutionalNetwork *)model->model;
289     int32_t layer;
290     ConvolutionalParams *conv_params;
291     DepthToSpaceParams *depth_to_space_params;
292     LayerPadParams *pad_params;
293
294     if (network->layers_num <= 0 || network->operands_num <= 0)
295         return DNN_ERROR;
296     if (!network->operands[0].data)
297         return DNN_ERROR;
298
299     for (layer = 0; layer < network->layers_num; ++layer){
300         switch (network->layers[layer].type){
301         case CONV:
302             conv_params = (ConvolutionalParams *)network->layers[layer].params;
303             convolve(network->operands, network->layers[layer].input_operand_indexes,
304                      network->layers[layer].output_operand_index, conv_params);
305             break;
306         case DEPTH_TO_SPACE:
307             depth_to_space_params = (DepthToSpaceParams *)network->layers[layer].params;
308             depth_to_space(network->operands, network->layers[layer].input_operand_indexes,
309                            network->layers[layer].output_operand_index, depth_to_space_params->block_size);
310             break;
311         case MIRROR_PAD:
312             pad_params = (LayerPadParams *)network->layers[layer].params;
313             dnn_execute_layer_pad(network->operands, network->layers[layer].input_operand_indexes,
314                                   network->layers[layer].output_operand_index, pad_params);
315             break;
316         case INPUT:
317             return DNN_ERROR;
318         }
319     }
320
321     // native mode does not support multiple outputs yet
322     if (nb_output > 1)
323         return DNN_ERROR;
324
325     /**
326      * as the first step, suppose network->operands[network->operands_num - 1] is the output operand.
327      */
328     outputs[0].data = network->operands[network->operands_num - 1].data;
329     outputs[0].height = network->operands[network->operands_num - 1].dims[1];
330     outputs[0].width = network->operands[network->operands_num - 1].dims[2];
331     outputs[0].channels = network->operands[network->operands_num - 1].dims[3];
332
333     return DNN_SUCCESS;
334 }
335
336 int32_t calculate_operand_data_length(DnnOperand* operand)
337 {
338     // currently, we just support DNN_FLOAT
339     return operand->dims[0] * operand->dims[1] * operand->dims[2] * operand->dims[3] * sizeof(float);
340 }
341
342 void ff_dnn_free_model_native(DNNModel **model)
343 {
344     ConvolutionalNetwork *network;
345     ConvolutionalParams *conv_params;
346     int32_t layer;
347
348     if (*model)
349     {
350         network = (ConvolutionalNetwork *)(*model)->model;
351         for (layer = 0; layer < network->layers_num; ++layer){
352             if (network->layers[layer].type == CONV){
353                 conv_params = (ConvolutionalParams *)network->layers[layer].params;
354                 av_freep(&conv_params->kernel);
355                 av_freep(&conv_params->biases);
356             }
357             av_freep(&network->layers[layer].params);
358         }
359         av_freep(&network->layers);
360
361         for (uint32_t operand = 0; operand < network->operands_num; ++operand)
362             av_freep(&network->operands[operand].data);
363         av_freep(&network->operands);
364
365         av_freep(&network);
366         av_freep(model);
367     }
368 }