]> git.sesse.net Git - ffmpeg/commitdiff
avfilter/dnn: add DLT prefix for enum DNNLayerType to avoid potential conflicts
authorGuo, Yejun <yejun.guo@intel.com>
Wed, 9 Oct 2019 14:08:04 +0000 (22:08 +0800)
committerPedro Arthur <bygrandao@gmail.com>
Tue, 15 Oct 2019 19:35:39 +0000 (16:35 -0300)
and also change CONV to DLT_CONV2D for better description

Signed-off-by: Guo, Yejun <yejun.guo@intel.com>
Signed-off-by: Pedro Arthur <bygrandao@gmail.com>
libavfilter/dnn/dnn_backend_native.c
libavfilter/dnn/dnn_backend_native.h
libavfilter/dnn/dnn_backend_tf.c

index 68fca50e763c407a81bb851fc2064c9c09c10b64..97549d3077c910af437b3550caefd1dcb2bfc9b7 100644 (file)
@@ -188,8 +188,9 @@ DNNModel *ff_dnn_load_model_native(const char *model_filename)
     for (layer = 0; layer < network->layers_num; ++layer){
         layer_type = (int32_t)avio_rl32(model_file_context);
         dnn_size += 4;
+        network->layers[layer].type = layer_type;
         switch (layer_type){
-        case CONV:
+        case DLT_CONV2D:
             conv_params = av_malloc(sizeof(ConvolutionalParams));
             if (!conv_params){
                 avio_closep(&model_file_context);
@@ -231,10 +232,9 @@ DNNModel *ff_dnn_load_model_native(const char *model_filename)
             network->layers[layer].input_operand_indexes[0] = (int32_t)avio_rl32(model_file_context);
             network->layers[layer].output_operand_index = (int32_t)avio_rl32(model_file_context);
             dnn_size += 8;
-            network->layers[layer].type = CONV;
             network->layers[layer].params = conv_params;
             break;
-        case DEPTH_TO_SPACE:
+        case DLT_DEPTH_TO_SPACE:
             depth_to_space_params = av_malloc(sizeof(DepthToSpaceParams));
             if (!depth_to_space_params){
                 avio_closep(&model_file_context);
@@ -246,10 +246,9 @@ DNNModel *ff_dnn_load_model_native(const char *model_filename)
             network->layers[layer].input_operand_indexes[0] = (int32_t)avio_rl32(model_file_context);
             network->layers[layer].output_operand_index = (int32_t)avio_rl32(model_file_context);
             dnn_size += 8;
-            network->layers[layer].type = DEPTH_TO_SPACE;
             network->layers[layer].params = depth_to_space_params;
             break;
-        case MIRROR_PAD:
+        case DLT_MIRROR_PAD:
             pad_params = av_malloc(sizeof(LayerPadParams));
             if (!pad_params){
                 avio_closep(&model_file_context);
@@ -266,10 +265,9 @@ DNNModel *ff_dnn_load_model_native(const char *model_filename)
             network->layers[layer].input_operand_indexes[0] = (int32_t)avio_rl32(model_file_context);
             network->layers[layer].output_operand_index = (int32_t)avio_rl32(model_file_context);
             dnn_size += 8;
-            network->layers[layer].type = MIRROR_PAD;
             network->layers[layer].params = pad_params;
             break;
-        case MAXIMUM:
+        case DLT_MAXIMUM:
             maximum_params = av_malloc(sizeof(*maximum_params));
             if (!maximum_params){
                 avio_closep(&model_file_context);
@@ -278,7 +276,6 @@ DNNModel *ff_dnn_load_model_native(const char *model_filename)
             }
             maximum_params->val.u32 = avio_rl32(model_file_context);
             dnn_size += 4;
-            network->layers[layer].type = MAXIMUM;
             network->layers[layer].params = maximum_params;
             network->layers[layer].input_operand_indexes[0] = (int32_t)avio_rl32(model_file_context);
             network->layers[layer].output_operand_index = (int32_t)avio_rl32(model_file_context);
@@ -347,27 +344,27 @@ DNNReturnType ff_dnn_execute_model_native(const DNNModel *model, DNNData *output
 
     for (layer = 0; layer < network->layers_num; ++layer){
         switch (network->layers[layer].type){
-        case CONV:
+        case DLT_CONV2D:
             conv_params = (ConvolutionalParams *)network->layers[layer].params;
             convolve(network->operands, network->layers[layer].input_operand_indexes,
                      network->layers[layer].output_operand_index, conv_params);
             break;
-        case DEPTH_TO_SPACE:
+        case DLT_DEPTH_TO_SPACE:
             depth_to_space_params = (DepthToSpaceParams *)network->layers[layer].params;
             depth_to_space(network->operands, network->layers[layer].input_operand_indexes,
                            network->layers[layer].output_operand_index, depth_to_space_params->block_size);
             break;
-        case MIRROR_PAD:
+        case DLT_MIRROR_PAD:
             pad_params = (LayerPadParams *)network->layers[layer].params;
             dnn_execute_layer_pad(network->operands, network->layers[layer].input_operand_indexes,
                                   network->layers[layer].output_operand_index, pad_params);
             break;
-        case MAXIMUM:
+        case DLT_MAXIMUM:
             maximum_params = (DnnLayerMaximumParams *)network->layers[layer].params;
             dnn_execute_layer_maximum(network->operands, network->layers[layer].input_operand_indexes,
                                   network->layers[layer].output_operand_index, maximum_params);
             break;
-        case INPUT:
+        case DLT_INPUT:
             return DNN_ERROR;
         }
     }
@@ -408,7 +405,7 @@ void ff_dnn_free_model_native(DNNModel **model)
     {
         network = (ConvolutionalNetwork *)(*model)->model;
         for (layer = 0; layer < network->layers_num; ++layer){
-            if (network->layers[layer].type == CONV){
+            if (network->layers[layer].type == DLT_CONV2D){
                 conv_params = (ConvolutionalParams *)network->layers[layer].params;
                 av_freep(&conv_params->kernel);
                 av_freep(&conv_params->biases);
index 3f2840c8fde62bbed1a5d59432be5c8ea21f8ae2..761e5ed02ca1d3c5e7fb552130b6050391a3ae4b 100644 (file)
 #include "../dnn_interface.h"
 #include "libavformat/avio.h"
 
-typedef enum {INPUT = 0, CONV = 1, DEPTH_TO_SPACE = 2, MIRROR_PAD = 3, MAXIMUM = 4} DNNLayerType;
+/**
+ * the enum value of DNNLayerType should not be changed,
+ * the same values are used in convert_from_tensorflow.py
+ */
+typedef enum {
+    DLT_INPUT = 0,
+    DLT_CONV2D = 1,
+    DLT_DEPTH_TO_SPACE = 2,
+    DLT_MIRROR_PAD = 3,
+    DLT_MAXIMUM = 4
+} DNNLayerType;
 
 typedef enum {DOT_INPUT = 1, DOT_OUTPUT = 2, DOT_INTERMEDIATE = DOT_INPUT | DOT_INPUT} DNNOperandType;
 
index 612d2e0982889c44d40b5fb59109b8056743f614..c8dff51744836a9c2fba30d63cf33a4e872a2330 100644 (file)
@@ -499,22 +499,22 @@ static DNNReturnType load_native_model(TFModel *tf_model, const char *model_file
 
     for (layer = 0; layer < conv_network->layers_num; ++layer){
         switch (conv_network->layers[layer].type){
-        case INPUT:
+        case DLT_INPUT:
             layer_add_res = DNN_SUCCESS;
             break;
-        case CONV:
+        case DLT_CONV2D:
             layer_add_res = add_conv_layer(tf_model, transpose_op, &op,
                                            (ConvolutionalParams *)conv_network->layers[layer].params, layer);
             break;
-        case DEPTH_TO_SPACE:
+        case DLT_DEPTH_TO_SPACE:
             layer_add_res = add_depth_to_space_layer(tf_model, &op,
                                                      (DepthToSpaceParams *)conv_network->layers[layer].params, layer);
             break;
-        case MIRROR_PAD:
+        case DLT_MIRROR_PAD:
             layer_add_res = add_pad_layer(tf_model, &op,
                                           (LayerPadParams *)conv_network->layers[layer].params, layer);
             break;
-        case MAXIMUM:
+        case DLT_MAXIMUM:
             layer_add_res = add_maximum_layer(tf_model, &op,
                                           (DnnLayerMaximumParams *)conv_network->layers[layer].params, layer);
             break;