2 * Copyright (c) 2018 Sergey Lavrushkin
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * DNN native backend implementation.
26 #include "dnn_backend_native.h"
27 #include "libavutil/avassert.h"
28 #include "dnn_backend_native_layer_pad.h"
29 #include "dnn_backend_native_layer_conv2d.h"
30 #include "dnn_backend_native_layer_depth2space.h"
31 #include "dnn_backend_native_layer_maximum.h"
32 #include "dnn_backend_native_layers.h"
34 static DNNReturnType set_input_output_native(void *model, DNNInputData *input, const char *input_name, const char **output_names, uint32_t nb_output)
36 ConvolutionalNetwork *network = (ConvolutionalNetwork *)model;
37 DnnOperand *oprd = NULL;
39 if (network->layers_num <= 0 || network->operands_num <= 0)
43 av_assert0(input->dt == DNN_FLOAT);
44 for (int i = 0; i < network->operands_num; ++i) {
45 oprd = &network->operands[i];
46 if (strcmp(oprd->name, input_name) == 0) {
47 if (oprd->type != DOT_INPUT)
58 oprd->dims[1] = input->height;
59 oprd->dims[2] = input->width;
60 oprd->dims[3] = input->channels;
62 av_freep(&oprd->data);
63 oprd->length = calculate_operand_data_length(oprd);
64 oprd->data = av_malloc(oprd->length);
68 input->data = oprd->data;
71 network->nb_output = 0;
72 av_freep(&network->output_indexes);
73 network->output_indexes = av_mallocz_array(nb_output, sizeof(*network->output_indexes));
74 if (!network->output_indexes)
77 for (uint32_t i = 0; i < nb_output; ++i) {
78 const char *output_name = output_names[i];
79 for (int j = 0; j < network->operands_num; ++j) {
80 oprd = &network->operands[j];
81 if (strcmp(oprd->name, output_name) == 0) {
82 network->output_indexes[network->nb_output++] = j;
88 if (network->nb_output != nb_output)
94 // Loads model and its parameters that are stored in a binary file with following structure:
95 // layers_num,layer_type,layer_parameterss,layer_type,layer_parameters...
96 // For CONV layer: activation_function, input_num, output_num, kernel_size, kernel, biases
97 // For DEPTH_TO_SPACE layer: block_size
98 DNNModel *ff_dnn_load_model_native(const char *model_filename)
100 DNNModel *model = NULL;
101 char header_expected[] = "FFMPEGDNNNATIVE";
104 int version, header_size, major_version_expected = 0;
105 ConvolutionalNetwork *network = NULL;
106 AVIOContext *model_file_context;
107 int file_size, dnn_size, kernel_size, i;
109 DNNLayerType layer_type;
110 ConvolutionalParams *conv_params;
111 DepthToSpaceParams *depth_to_space_params;
112 LayerPadParams *pad_params;
113 DnnLayerMaximumParams *maximum_params;
115 model = av_malloc(sizeof(DNNModel));
120 if (avio_open(&model_file_context, model_filename, AVIO_FLAG_READ) < 0){
124 file_size = avio_size(model_file_context);
127 * check file header with string and version
129 size = sizeof(header_expected);
130 buf = av_malloc(size);
132 avio_closep(&model_file_context);
137 // size - 1 to skip the ending '\0' which is not saved in file
138 avio_get_str(model_file_context, size - 1, buf, size);
140 if (strncmp(buf, header_expected, size) != 0) {
142 avio_closep(&model_file_context);
148 version = (int32_t)avio_rl32(model_file_context);
150 if (version != major_version_expected) {
151 avio_closep(&model_file_context);
156 // currently no need to check minor version
157 version = (int32_t)avio_rl32(model_file_context);
159 header_size = dnn_size;
161 network = av_mallocz(sizeof(ConvolutionalNetwork));
163 avio_closep(&model_file_context);
167 model->model = (void *)network;
169 avio_seek(model_file_context, file_size - 8, SEEK_SET);
170 network->layers_num = (int32_t)avio_rl32(model_file_context);
171 network->operands_num = (int32_t)avio_rl32(model_file_context);
173 avio_seek(model_file_context, header_size, SEEK_SET);
175 network->layers = av_mallocz(network->layers_num * sizeof(Layer));
176 if (!network->layers){
177 avio_closep(&model_file_context);
178 ff_dnn_free_model_native(&model);
182 network->operands = av_mallocz(network->operands_num * sizeof(DnnOperand));
183 if (!network->operands){
184 avio_closep(&model_file_context);
185 ff_dnn_free_model_native(&model);
189 for (layer = 0; layer < network->layers_num; ++layer){
190 layer_type = (int32_t)avio_rl32(model_file_context);
192 network->layers[layer].type = layer_type;
195 conv_params = av_malloc(sizeof(ConvolutionalParams));
197 avio_closep(&model_file_context);
198 ff_dnn_free_model_native(&model);
201 conv_params->dilation = (int32_t)avio_rl32(model_file_context);
202 conv_params->padding_method = (int32_t)avio_rl32(model_file_context);
203 conv_params->activation = (int32_t)avio_rl32(model_file_context);
204 conv_params->input_num = (int32_t)avio_rl32(model_file_context);
205 conv_params->output_num = (int32_t)avio_rl32(model_file_context);
206 conv_params->kernel_size = (int32_t)avio_rl32(model_file_context);
207 kernel_size = conv_params->input_num * conv_params->output_num *
208 conv_params->kernel_size * conv_params->kernel_size;
209 dnn_size += 24 + (kernel_size + conv_params->output_num << 2);
210 if (dnn_size > file_size || conv_params->input_num <= 0 ||
211 conv_params->output_num <= 0 || conv_params->kernel_size <= 0){
212 avio_closep(&model_file_context);
213 av_freep(&conv_params);
214 ff_dnn_free_model_native(&model);
217 conv_params->kernel = av_malloc(kernel_size * sizeof(float));
218 conv_params->biases = av_malloc(conv_params->output_num * sizeof(float));
219 if (!conv_params->kernel || !conv_params->biases){
220 avio_closep(&model_file_context);
221 av_freep(&conv_params->kernel);
222 av_freep(&conv_params->biases);
223 av_freep(&conv_params);
224 ff_dnn_free_model_native(&model);
227 for (i = 0; i < kernel_size; ++i){
228 conv_params->kernel[i] = av_int2float(avio_rl32(model_file_context));
230 for (i = 0; i < conv_params->output_num; ++i){
231 conv_params->biases[i] = av_int2float(avio_rl32(model_file_context));
233 network->layers[layer].input_operand_indexes[0] = (int32_t)avio_rl32(model_file_context);
234 network->layers[layer].output_operand_index = (int32_t)avio_rl32(model_file_context);
236 network->layers[layer].params = conv_params;
238 case DLT_DEPTH_TO_SPACE:
239 depth_to_space_params = av_malloc(sizeof(DepthToSpaceParams));
240 if (!depth_to_space_params){
241 avio_closep(&model_file_context);
242 ff_dnn_free_model_native(&model);
245 depth_to_space_params->block_size = (int32_t)avio_rl32(model_file_context);
247 network->layers[layer].input_operand_indexes[0] = (int32_t)avio_rl32(model_file_context);
248 network->layers[layer].output_operand_index = (int32_t)avio_rl32(model_file_context);
250 network->layers[layer].params = depth_to_space_params;
253 pad_params = av_malloc(sizeof(LayerPadParams));
255 avio_closep(&model_file_context);
256 ff_dnn_free_model_native(&model);
259 pad_params->mode = (int32_t)avio_rl32(model_file_context);
261 for (i = 0; i < 4; ++i) {
262 pad_params->paddings[i][0] = avio_rl32(model_file_context);
263 pad_params->paddings[i][1] = avio_rl32(model_file_context);
266 network->layers[layer].input_operand_indexes[0] = (int32_t)avio_rl32(model_file_context);
267 network->layers[layer].output_operand_index = (int32_t)avio_rl32(model_file_context);
269 network->layers[layer].params = pad_params;
272 maximum_params = av_malloc(sizeof(*maximum_params));
273 if (!maximum_params){
274 avio_closep(&model_file_context);
275 ff_dnn_free_model_native(&model);
278 maximum_params->val.u32 = avio_rl32(model_file_context);
280 network->layers[layer].params = maximum_params;
281 network->layers[layer].input_operand_indexes[0] = (int32_t)avio_rl32(model_file_context);
282 network->layers[layer].output_operand_index = (int32_t)avio_rl32(model_file_context);
286 avio_closep(&model_file_context);
287 ff_dnn_free_model_native(&model);
292 for (int32_t i = 0; i < network->operands_num; ++i){
295 int32_t operand_index = (int32_t)avio_rl32(model_file_context);
298 oprd = &network->operands[operand_index];
299 name_len = (int32_t)avio_rl32(model_file_context);
302 avio_get_str(model_file_context, name_len, oprd->name, sizeof(oprd->name));
303 dnn_size += name_len;
305 oprd->type = (int32_t)avio_rl32(model_file_context);
308 oprd->data_type = (int32_t)avio_rl32(model_file_context);
311 for (int32_t dim = 0; dim < 4; ++dim) {
312 oprd->dims[dim] = (int32_t)avio_rl32(model_file_context);
319 avio_closep(&model_file_context);
321 if (dnn_size != file_size){
322 ff_dnn_free_model_native(&model);
326 model->set_input_output = &set_input_output_native;
331 DNNReturnType ff_dnn_execute_model_native(const DNNModel *model, DNNData *outputs, uint32_t nb_output)
333 ConvolutionalNetwork *network = (ConvolutionalNetwork *)model->model;
335 uint32_t nb = FFMIN(nb_output, network->nb_output);
337 if (network->layers_num <= 0 || network->operands_num <= 0)
339 if (!network->operands[0].data)
342 for (layer = 0; layer < network->layers_num; ++layer){
343 DNNLayerType layer_type = network->layers[layer].type;
344 layer_funcs[layer_type](network->operands,
345 network->layers[layer].input_operand_indexes,
346 network->layers[layer].output_operand_index,
347 network->layers[layer].params);
350 for (uint32_t i = 0; i < nb; ++i) {
351 DnnOperand *oprd = &network->operands[network->output_indexes[i]];
352 outputs[i].data = oprd->data;
353 outputs[i].height = oprd->dims[1];
354 outputs[i].width = oprd->dims[2];
355 outputs[i].channels = oprd->dims[3];
361 int32_t calculate_operand_dims_count(const DnnOperand *oprd)
364 for (int i = 0; i < 4; ++i)
365 result *= oprd->dims[i];
370 int32_t calculate_operand_data_length(const DnnOperand* oprd)
372 // currently, we just support DNN_FLOAT
373 return oprd->dims[0] * oprd->dims[1] * oprd->dims[2] * oprd->dims[3] * sizeof(float);
376 void ff_dnn_free_model_native(DNNModel **model)
378 ConvolutionalNetwork *network;
379 ConvolutionalParams *conv_params;
384 network = (ConvolutionalNetwork *)(*model)->model;
385 for (layer = 0; layer < network->layers_num; ++layer){
386 if (network->layers[layer].type == DLT_CONV2D){
387 conv_params = (ConvolutionalParams *)network->layers[layer].params;
388 av_freep(&conv_params->kernel);
389 av_freep(&conv_params->biases);
391 av_freep(&network->layers[layer].params);
393 av_freep(&network->layers);
395 for (uint32_t operand = 0; operand < network->operands_num; ++operand)
396 av_freep(&network->operands[operand].data);
397 av_freep(&network->operands);
399 av_freep(&network->output_indexes);