]> git.sesse.net Git - ffmpeg/blobdiff - libavfilter/dnn/dnn_backend_native.c
dnn: add userdata for load model parameter
[ffmpeg] / libavfilter / dnn / dnn_backend_native.c
index 8b05bec29375690bfba3ab3fc4847886e34a3be8..830ec19c80d430906e2226584c235e0c1006ea2a 100644 (file)
 
 #include "dnn_backend_native.h"
 #include "libavutil/avassert.h"
-#include "dnn_backend_native_layer_pad.h"
+#include "dnn_backend_native_layer_conv2d.h"
+#include "dnn_backend_native_layers.h"
+
+#define OFFSET(x) offsetof(NativeContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption dnn_native_options[] = {
+    { "conv2d_threads", "threads num for conv2d layer", OFFSET(options.conv2d_threads), AV_OPT_TYPE_INT,  { .i64 = 0 }, INT_MIN, INT_MAX, FLAGS },
+    { NULL },
+};
+
+const AVClass dnn_native_class = {
+    .class_name = "dnn_native",
+    .item_name  = av_default_item_name,
+    .option     = dnn_native_options,
+    .version    = LIBAVUTIL_VERSION_INT,
+    .category   = AV_CLASS_CATEGORY_FILTER,
+};
+
+static DNNReturnType get_input_native(void *model, DNNData *input, const char *input_name)
+{
+    NativeModel *native_model = (NativeModel *)model;
+    NativeContext *ctx = &native_model->ctx;
+
+    for (int i = 0; i < native_model->operands_num; ++i) {
+        DnnOperand *oprd = &native_model->operands[i];
+        if (strcmp(oprd->name, input_name) == 0) {
+            if (oprd->type != DOT_INPUT) {
+                av_log(ctx, AV_LOG_ERROR, "Found \"%s\" in model, but it is not input node\n", input_name);
+                return DNN_ERROR;
+            }
+            input->dt = oprd->data_type;
+            av_assert0(oprd->dims[0] == 1);
+            input->height = oprd->dims[1];
+            input->width = oprd->dims[2];
+            input->channels = oprd->dims[3];
+            return DNN_SUCCESS;
+        }
+    }
 
-static DNNReturnType set_input_output_native(void *model, DNNInputData *input, const char *input_name, const char **output_names, uint32_t nb_output)
+    // do not find the input operand
+    av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model\n", input_name);
+    return DNN_ERROR;
+}
+
+static DNNReturnType set_input_native(void *model, DNNData *input, const char *input_name)
 {
-    ConvolutionalNetwork *network = (ConvolutionalNetwork *)model;
+    NativeModel *native_model = (NativeModel *)model;
+    NativeContext *ctx = &native_model->ctx;
+    DnnOperand *oprd = NULL;
 
-    if (network->layers_num <= 0 || network->operands_num <= 0)
+    if (native_model->layers_num <= 0 || native_model->operands_num <= 0) {
+        av_log(ctx, AV_LOG_ERROR, "No operands or layers in model\n");
         return DNN_ERROR;
+    }
 
-    av_assert0(input->dt == DNN_FLOAT);
+    /* inputs */
+    for (int i = 0; i < native_model->operands_num; ++i) {
+        oprd = &native_model->operands[i];
+        if (strcmp(oprd->name, input_name) == 0) {
+            if (oprd->type != DOT_INPUT) {
+                av_log(ctx, AV_LOG_ERROR, "Found \"%s\" in model, but it is not input node\n", input_name);
+                return DNN_ERROR;
+            }
+            break;
+        }
+        oprd = NULL;
+    }
+    if (!oprd) {
+        av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model\n", input_name);
+        return DNN_ERROR;
+    }
 
-    /**
-     * as the first step, suppose network->operands[0] is the input operand.
-     */
-    network->operands[0].dims[0] = 1;
-    network->operands[0].dims[1] = input->height;
-    network->operands[0].dims[2] = input->width;
-    network->operands[0].dims[3] = input->channels;
-    network->operands[0].type = DOT_INPUT;
-    network->operands[0].data_type = DNN_FLOAT;
-    network->operands[0].isNHWC = 1;
-
-    av_freep(&network->operands[0].data);
-    network->operands[0].length = calculate_operand_data_length(&network->operands[0]);
-    network->operands[0].data = av_malloc(network->operands[0].length);
-    if (!network->operands[0].data)
+    oprd->dims[0] = 1;
+    oprd->dims[1] = input->height;
+    oprd->dims[2] = input->width;
+    oprd->dims[3] = input->channels;
+
+    av_freep(&oprd->data);
+    oprd->length = calculate_operand_data_length(oprd);
+    if (oprd->length <= 0) {
+        av_log(ctx, AV_LOG_ERROR, "The input data length overflow\n");
+        return DNN_ERROR;
+    }
+    oprd->data = av_malloc(oprd->length);
+    if (!oprd->data) {
+        av_log(ctx, AV_LOG_ERROR, "Failed to malloc memory for input data\n");
         return DNN_ERROR;
+    }
+
+    input->data = oprd->data;
 
-    input->data = network->operands[0].data;
     return DNN_SUCCESS;
 }
 
@@ -61,155 +123,119 @@ static DNNReturnType set_input_output_native(void *model, DNNInputData *input, c
 // layers_num,layer_type,layer_parameterss,layer_type,layer_parameters...
 // For CONV layer: activation_function, input_num, output_num, kernel_size, kernel, biases
 // For DEPTH_TO_SPACE layer: block_size
-DNNModel *ff_dnn_load_model_native(const char *model_filename)
+DNNModel *ff_dnn_load_model_native(const char *model_filename, const char *options, void *userdata)
 {
     DNNModel *model = NULL;
-    ConvolutionalNetwork *network = NULL;
+    char header_expected[] = "FFMPEGDNNNATIVE";
+    char *buf;
+    size_t size;
+    int version, header_size, major_version_expected = 1;
+    NativeModel *native_model = NULL;
     AVIOContext *model_file_context;
-    int file_size, dnn_size, kernel_size, i;
+    int file_size, dnn_size, parsed_size;
     int32_t layer;
     DNNLayerType layer_type;
-    ConvolutionalParams *conv_params;
-    DepthToSpaceParams *depth_to_space_params;
-    LayerPadParams *pad_params;
-
-    model = av_malloc(sizeof(DNNModel));
-    if (!model){
-        return NULL;
-    }
 
     if (avio_open(&model_file_context, model_filename, AVIO_FLAG_READ) < 0){
-        av_freep(&model);
         return NULL;
     }
     file_size = avio_size(model_file_context);
 
-    network = av_mallocz(sizeof(ConvolutionalNetwork));
-    if (!network){
-        avio_closep(&model_file_context);
-        av_freep(&model);
-        return NULL;
+    model = av_mallocz(sizeof(DNNModel));
+    if (!model){
+        goto fail;
     }
-    model->model = (void *)network;
+
+    /**
+     * check file header with string and version
+     */
+    size = sizeof(header_expected);
+    buf = av_malloc(size);
+    if (!buf) {
+        goto fail;
+    }
+
+    // size - 1 to skip the ending '\0' which is not saved in file
+    avio_get_str(model_file_context, size - 1, buf, size);
+    dnn_size = size - 1;
+    if (strncmp(buf, header_expected, size) != 0) {
+        av_freep(&buf);
+        goto fail;
+    }
+    av_freep(&buf);
+
+    version = (int32_t)avio_rl32(model_file_context);
+    dnn_size += 4;
+    if (version != major_version_expected) {
+        goto fail;
+    }
+
+    // currently no need to check minor version
+    version = (int32_t)avio_rl32(model_file_context);
+    dnn_size += 4;
+    header_size = dnn_size;
+
+    native_model = av_mallocz(sizeof(NativeModel));
+    if (!native_model){
+        goto fail;
+    }
+
+    native_model->ctx.class = &dnn_native_class;
+    model->options = options;
+    if (av_opt_set_from_string(&native_model->ctx, model->options, NULL, "=", "&") < 0)
+        goto fail;
+    model->model = (void *)native_model;
+
+#if !HAVE_PTHREAD_CANCEL
+    if (native_model->ctx.options.conv2d_threads > 1){
+        av_log(&native_model->ctx, AV_LOG_WARNING, "'conv2d_threads' option was set but it is not supported "
+                       "on this build (pthread support is required)\n");
+    }
+#endif
 
     avio_seek(model_file_context, file_size - 8, SEEK_SET);
-    network->layers_num = (int32_t)avio_rl32(model_file_context);
-    network->operands_num = (int32_t)avio_rl32(model_file_context);
-    dnn_size = 8;
-    avio_seek(model_file_context, 0, SEEK_SET);
-
-    network->layers = av_mallocz(network->layers_num * sizeof(Layer));
-    if (!network->layers){
-        avio_closep(&model_file_context);
-        ff_dnn_free_model_native(&model);
-        return NULL;
+    native_model->layers_num = (int32_t)avio_rl32(model_file_context);
+    native_model->operands_num = (int32_t)avio_rl32(model_file_context);
+    dnn_size += 8;
+    avio_seek(model_file_context, header_size, SEEK_SET);
+
+    native_model->layers = av_mallocz(native_model->layers_num * sizeof(Layer));
+    if (!native_model->layers){
+        goto fail;
     }
 
-    network->operands = av_mallocz(network->operands_num * sizeof(DnnOperand));
-    if (!network->operands){
-        avio_closep(&model_file_context);
-        ff_dnn_free_model_native(&model);
-        return NULL;
+    native_model->operands = av_mallocz(native_model->operands_num * sizeof(DnnOperand));
+    if (!native_model->operands){
+        goto fail;
     }
 
-    for (layer = 0; layer < network->layers_num; ++layer){
+    for (layer = 0; layer < native_model->layers_num; ++layer){
         layer_type = (int32_t)avio_rl32(model_file_context);
         dnn_size += 4;
-        switch (layer_type){
-        case CONV:
-            conv_params = av_malloc(sizeof(ConvolutionalParams));
-            if (!conv_params){
-                avio_closep(&model_file_context);
-                ff_dnn_free_model_native(&model);
-                return NULL;
-            }
-            conv_params->dilation = (int32_t)avio_rl32(model_file_context);
-            conv_params->padding_method = (int32_t)avio_rl32(model_file_context);
-            conv_params->activation = (int32_t)avio_rl32(model_file_context);
-            conv_params->input_num = (int32_t)avio_rl32(model_file_context);
-            conv_params->output_num = (int32_t)avio_rl32(model_file_context);
-            conv_params->kernel_size = (int32_t)avio_rl32(model_file_context);
-            kernel_size = conv_params->input_num * conv_params->output_num *
-                          conv_params->kernel_size * conv_params->kernel_size;
-            dnn_size += 24 + (kernel_size + conv_params->output_num << 2);
-            if (dnn_size > file_size || conv_params->input_num <= 0 ||
-                conv_params->output_num <= 0 || conv_params->kernel_size <= 0){
-                avio_closep(&model_file_context);
-                av_freep(&conv_params);
-                ff_dnn_free_model_native(&model);
-                return NULL;
-            }
-            conv_params->kernel = av_malloc(kernel_size * sizeof(float));
-            conv_params->biases = av_malloc(conv_params->output_num * sizeof(float));
-            if (!conv_params->kernel || !conv_params->biases){
-                avio_closep(&model_file_context);
-                av_freep(&conv_params->kernel);
-                av_freep(&conv_params->biases);
-                av_freep(&conv_params);
-                ff_dnn_free_model_native(&model);
-                return NULL;
-            }
-            for (i = 0; i < kernel_size; ++i){
-                conv_params->kernel[i] = av_int2float(avio_rl32(model_file_context));
-            }
-            for (i = 0; i < conv_params->output_num; ++i){
-                conv_params->biases[i] = av_int2float(avio_rl32(model_file_context));
-            }
-            network->layers[layer].input_operand_indexes[0] = (int32_t)avio_rl32(model_file_context);
-            network->layers[layer].output_operand_index = (int32_t)avio_rl32(model_file_context);
-            dnn_size += 8;
-            network->layers[layer].type = CONV;
-            network->layers[layer].params = conv_params;
-            break;
-        case DEPTH_TO_SPACE:
-            depth_to_space_params = av_malloc(sizeof(DepthToSpaceParams));
-            if (!depth_to_space_params){
-                avio_closep(&model_file_context);
-                ff_dnn_free_model_native(&model);
-                return NULL;
-            }
-            depth_to_space_params->block_size = (int32_t)avio_rl32(model_file_context);
-            dnn_size += 4;
-            network->layers[layer].input_operand_indexes[0] = (int32_t)avio_rl32(model_file_context);
-            network->layers[layer].output_operand_index = (int32_t)avio_rl32(model_file_context);
-            dnn_size += 8;
-            network->layers[layer].type = DEPTH_TO_SPACE;
-            network->layers[layer].params = depth_to_space_params;
-            break;
-        case MIRROR_PAD:
-            pad_params = av_malloc(sizeof(LayerPadParams));
-            if (!pad_params){
-                avio_closep(&model_file_context);
-                ff_dnn_free_model_native(&model);
-                return NULL;
-            }
-            pad_params->mode = (int32_t)avio_rl32(model_file_context);
-            dnn_size += 4;
-            for (i = 0; i < 4; ++i) {
-                pad_params->paddings[i][0] = avio_rl32(model_file_context);
-                pad_params->paddings[i][1] = avio_rl32(model_file_context);
-                dnn_size += 8;
-            }
-            network->layers[layer].input_operand_indexes[0] = (int32_t)avio_rl32(model_file_context);
-            network->layers[layer].output_operand_index = (int32_t)avio_rl32(model_file_context);
-            dnn_size += 8;
-            network->layers[layer].type = MIRROR_PAD;
-            network->layers[layer].params = pad_params;
-            break;
-        default:
-            avio_closep(&model_file_context);
-            ff_dnn_free_model_native(&model);
-            return NULL;
+
+        if (layer_type >= DLT_COUNT) {
+            goto fail;
+        }
+
+        native_model->layers[layer].type = layer_type;
+        parsed_size = layer_funcs[layer_type].pf_load(&native_model->layers[layer], model_file_context, file_size, native_model->operands_num);
+        if (!parsed_size) {
+            goto fail;
         }
+        dnn_size += parsed_size;
     }
 
-    for (int32_t i = 0; i < network->operands_num; ++i){
+    for (int32_t i = 0; i < native_model->operands_num; ++i){
         DnnOperand *oprd;
         int32_t name_len;
         int32_t operand_index = (int32_t)avio_rl32(model_file_context);
         dnn_size += 4;
 
-        oprd = &network->operands[operand_index];
+        if (operand_index >= native_model->operands_num) {
+            goto fail;
+        }
+
+        oprd = &native_model->operands[operand_index];
         name_len = (int32_t)avio_rl32(model_file_context);
         dnn_size += 4;
 
@@ -237,213 +263,121 @@ DNNModel *ff_dnn_load_model_native(const char *model_filename)
         return NULL;
     }
 
-    model->set_input_output = &set_input_output_native;
+    model->set_input = &set_input_native;
+    model->get_input = &get_input_native;
+    model->userdata = userdata;
 
     return model;
-}
 
-#define CLAMP_TO_EDGE(x, w) ((x) < 0 ? 0 : ((x) >= (w) ? (w - 1) : (x)))
-
-static int convolve(DnnOperand *operands, const int32_t *input_operand_indexes, int32_t output_operand_index, const ConvolutionalParams *conv_params)
-{
-    float *output;
-    int32_t input_operand_index = input_operand_indexes[0];
-    int number = operands[input_operand_index].dims[0];
-    int height = operands[input_operand_index].dims[1];
-    int width = operands[input_operand_index].dims[2];
-    int channel = operands[input_operand_index].dims[3];
-    const float *input = operands[input_operand_index].data;
-
-    int radius = conv_params->kernel_size >> 1;
-    int src_linesize = width * conv_params->input_num;
-    int filter_linesize = conv_params->kernel_size * conv_params->input_num;
-    int filter_size = conv_params->kernel_size * filter_linesize;
-    int pad_size = (conv_params->padding_method == VALID) ? (conv_params->kernel_size - 1) / 2 * conv_params->dilation : 0;
-
-    DnnOperand *output_operand = &operands[output_operand_index];
-    output_operand->dims[0] = number;
-    output_operand->dims[1] = height - pad_size * 2;
-    output_operand->dims[2] = width - pad_size * 2;
-    output_operand->dims[3] = conv_params->output_num;
-    output_operand->length = calculate_operand_data_length(output_operand);
-    output_operand->data = av_realloc(output_operand->data, output_operand->length);
-    if (!output_operand->data)
-        return -1;
-    output = output_operand->data;
-
-    av_assert0(channel == conv_params->input_num);
-
-    for (int y = pad_size; y < height - pad_size; ++y) {
-        for (int x = pad_size; x < width - pad_size; ++x) {
-            for (int n_filter = 0; n_filter < conv_params->output_num; ++n_filter) {
-                output[n_filter] = conv_params->biases[n_filter];
-
-                for (int ch = 0; ch < conv_params->input_num; ++ch) {
-                    for (int kernel_y = 0; kernel_y < conv_params->kernel_size; ++kernel_y) {
-                        for (int kernel_x = 0; kernel_x < conv_params->kernel_size; ++kernel_x) {
-                            float input_pel;
-                            if (conv_params->padding_method == SAME_CLAMP_TO_EDGE) {
-                                int y_pos = CLAMP_TO_EDGE(y + (kernel_y - radius) * conv_params->dilation, height);
-                                int x_pos = CLAMP_TO_EDGE(x + (kernel_x - radius) * conv_params->dilation, width);
-                                input_pel = input[y_pos * src_linesize + x_pos * conv_params->input_num + ch];
-                            } else {
-                                int y_pos = y + (kernel_y - radius) * conv_params->dilation;
-                                int x_pos = x + (kernel_x - radius) * conv_params->dilation;
-                                input_pel = (x_pos < 0 || x_pos >= width || y_pos < 0 || y_pos >= height) ? 0.0 :
-                                                   input[y_pos * src_linesize + x_pos * conv_params->input_num + ch];
-                            }
-
-
-                            output[n_filter] += input_pel * conv_params->kernel[n_filter * filter_size + kernel_y * filter_linesize +
-                                                                                kernel_x * conv_params->input_num + ch];
-                        }
-                    }
-                }
-                switch (conv_params->activation){
-                case RELU:
-                    output[n_filter] = FFMAX(output[n_filter], 0.0);
-                    break;
-                case TANH:
-                    output[n_filter] = 2.0f  / (1.0f + exp(-2.0f * output[n_filter])) - 1.0f;
-                    break;
-                case SIGMOID:
-                    output[n_filter] = 1.0f / (1.0f + exp(-output[n_filter]));
-                    break;
-                case NONE:
-                    break;
-                case LEAKY_RELU:
-                    output[n_filter] = FFMAX(output[n_filter], 0.0) + 0.2 * FFMIN(output[n_filter], 0.0);
-                }
-            }
-            output += conv_params->output_num;
-        }
-    }
-    return 0;
-}
-
-static int depth_to_space(DnnOperand *operands, const int32_t *input_operand_indexes, int32_t output_operand_index, int block_size)
-{
-    float *output;
-    int32_t input_operand_index = input_operand_indexes[0];
-    int number = operands[input_operand_index].dims[0];
-    int height = operands[input_operand_index].dims[1];
-    int width = operands[input_operand_index].dims[2];
-    int channels = operands[input_operand_index].dims[3];
-    const float *input = operands[input_operand_index].data;
-
-    int y, x, by, bx, ch;
-    int new_channels = channels / (block_size * block_size);
-    int output_linesize = width * channels;
-    int by_linesize = output_linesize / block_size;
-    int x_linesize = new_channels * block_size;
-
-    DnnOperand *output_operand = &operands[output_operand_index];
-    output_operand->dims[0] = number;
-    output_operand->dims[1] = height * block_size;
-    output_operand->dims[2] = width * block_size;
-    output_operand->dims[3] = new_channels;
-    output_operand->length = calculate_operand_data_length(output_operand);
-    output_operand->data = av_realloc(output_operand->data, output_operand->length);
-    if (!output_operand->data)
-        return -1;
-    output = output_operand->data;
-
-    for (y = 0; y < height; ++y){
-        for (x = 0; x < width; ++x){
-            for (by = 0; by < block_size; ++by){
-                for (bx = 0; bx < block_size; ++bx){
-                    for (ch = 0; ch < new_channels; ++ch){
-                        output[by * by_linesize + x * x_linesize + bx * new_channels + ch] = input[ch];
-                    }
-                    input += new_channels;
-                }
-            }
-        }
-        output += output_linesize;
-    }
-    return 0;
+fail:
+    ff_dnn_free_model_native(&model);
+    avio_closep(&model_file_context);
+    return NULL;
 }
 
-DNNReturnType ff_dnn_execute_model_native(const DNNModel *model, DNNData *outputs, uint32_t nb_output)
+DNNReturnType ff_dnn_execute_model_native(const DNNModel *model, DNNData *outputs, const char **output_names, uint32_t nb_output)
 {
-    ConvolutionalNetwork *network = (ConvolutionalNetwork *)model->model;
+    NativeModel *native_model = (NativeModel *)model->model;
+    NativeContext *ctx = &native_model->ctx;
     int32_t layer;
-    ConvolutionalParams *conv_params;
-    DepthToSpaceParams *depth_to_space_params;
-    LayerPadParams *pad_params;
 
-    if (network->layers_num <= 0 || network->operands_num <= 0)
+    if (native_model->layers_num <= 0 || native_model->operands_num <= 0) {
+        av_log(ctx, AV_LOG_ERROR, "No operands or layers in model\n");
         return DNN_ERROR;
-    if (!network->operands[0].data)
+    }
+    if (!native_model->operands[0].data) {
+        av_log(ctx, AV_LOG_ERROR, "Empty model input data\n");
         return DNN_ERROR;
+    }
 
-    for (layer = 0; layer < network->layers_num; ++layer){
-        switch (network->layers[layer].type){
-        case CONV:
-            conv_params = (ConvolutionalParams *)network->layers[layer].params;
-            convolve(network->operands, network->layers[layer].input_operand_indexes,
-                     network->layers[layer].output_operand_index, conv_params);
-            break;
-        case DEPTH_TO_SPACE:
-            depth_to_space_params = (DepthToSpaceParams *)network->layers[layer].params;
-            depth_to_space(network->operands, network->layers[layer].input_operand_indexes,
-                           network->layers[layer].output_operand_index, depth_to_space_params->block_size);
-            break;
-        case MIRROR_PAD:
-            pad_params = (LayerPadParams *)network->layers[layer].params;
-            dnn_execute_layer_pad(network->operands, network->layers[layer].input_operand_indexes,
-                                  network->layers[layer].output_operand_index, pad_params);
-            break;
-        case INPUT:
+    for (layer = 0; layer < native_model->layers_num; ++layer){
+        DNNLayerType layer_type = native_model->layers[layer].type;
+        if (layer_funcs[layer_type].pf_exec(native_model->operands,
+                                            native_model->layers[layer].input_operand_indexes,
+                                            native_model->layers[layer].output_operand_index,
+                                            native_model->layers[layer].params,
+                                            &native_model->ctx) == DNN_ERROR) {
+            av_log(ctx, AV_LOG_ERROR, "Failed to execuet model\n");
             return DNN_ERROR;
         }
     }
 
-    // native mode does not support multiple outputs yet
-    if (nb_output > 1)
-        return DNN_ERROR;
+    for (uint32_t i = 0; i < nb_output; ++i) {
+        DnnOperand *oprd = NULL;
+        const char *output_name = output_names[i];
+        for (int j = 0; j < native_model->operands_num; ++j) {
+            if (strcmp(native_model->operands[j].name, output_name) == 0) {
+                oprd = &native_model->operands[j];
+                break;
+            }
+        }
 
-    /**
-     * as the first step, suppose network->operands[network->operands_num - 1] is the output operand.
-     */
-    outputs[0].data = network->operands[network->operands_num - 1].data;
-    outputs[0].height = network->operands[network->operands_num - 1].dims[1];
-    outputs[0].width = network->operands[network->operands_num - 1].dims[2];
-    outputs[0].channels = network->operands[network->operands_num - 1].dims[3];
+        if (oprd == NULL) {
+            av_log(ctx, AV_LOG_ERROR, "Could not find output in model\n");
+            return DNN_ERROR;
+        }
+
+        outputs[i].data = oprd->data;
+        outputs[i].height = oprd->dims[1];
+        outputs[i].width = oprd->dims[2];
+        outputs[i].channels = oprd->dims[3];
+        outputs[i].dt = oprd->data_type;
+    }
 
     return DNN_SUCCESS;
 }
 
-int32_t calculate_operand_data_length(DnnOperand* operand)
+int32_t calculate_operand_dims_count(const DnnOperand *oprd)
+{
+    int32_t result = 1;
+    for (int i = 0; i < 4; ++i)
+        result *= oprd->dims[i];
+
+    return result;
+}
+
+int32_t calculate_operand_data_length(const DnnOperand* oprd)
 {
     // currently, we just support DNN_FLOAT
-    return operand->dims[0] * operand->dims[1] * operand->dims[2] * operand->dims[3] * sizeof(float);
+    uint64_t len = sizeof(float);
+    for (int i = 0; i < 4; i++) {
+        len *= oprd->dims[i];
+        if (len > INT32_MAX)
+            return 0;
+    }
+    return len;
 }
 
 void ff_dnn_free_model_native(DNNModel **model)
 {
-    ConvolutionalNetwork *network;
+    NativeModel *native_model;
     ConvolutionalParams *conv_params;
     int32_t layer;
 
     if (*model)
     {
-        network = (ConvolutionalNetwork *)(*model)->model;
-        for (layer = 0; layer < network->layers_num; ++layer){
-            if (network->layers[layer].type == CONV){
-                conv_params = (ConvolutionalParams *)network->layers[layer].params;
-                av_freep(&conv_params->kernel);
-                av_freep(&conv_params->biases);
+        if ((*model)->model) {
+            native_model = (NativeModel *)(*model)->model;
+            if (native_model->layers) {
+                for (layer = 0; layer < native_model->layers_num; ++layer){
+                    if (native_model->layers[layer].type == DLT_CONV2D){
+                        conv_params = (ConvolutionalParams *)native_model->layers[layer].params;
+                        av_freep(&conv_params->kernel);
+                        av_freep(&conv_params->biases);
+                    }
+                    av_freep(&native_model->layers[layer].params);
+                }
+                av_freep(&native_model->layers);
             }
-            av_freep(&network->layers[layer].params);
-        }
-        av_freep(&network->layers);
 
-        for (uint32_t operand = 0; operand < network->operands_num; ++operand)
-            av_freep(&network->operands[operand].data);
-        av_freep(&network->operands);
+            if (native_model->operands) {
+                for (uint32_t operand = 0; operand < native_model->operands_num; ++operand)
+                    av_freep(&native_model->operands[operand].data);
+                av_freep(&native_model->operands);
+            }
 
-        av_freep(&network);
+            av_freep(&native_model);
+        }
         av_freep(model);
     }
 }