]> git.sesse.net Git - ffmpeg/commitdiff
tests/dnn: fix build issue after function name changed
authorGuo, Yejun <yejun.guo@intel.com>
Fri, 22 Jan 2021 11:28:29 +0000 (19:28 +0800)
committerGuo, Yejun <yejun.guo@intel.com>
Fri, 22 Jan 2021 11:28:29 +0000 (19:28 +0800)
tests/dnn/dnn-layer-avgpool-test.c
tests/dnn/dnn-layer-conv2d-test.c
tests/dnn/dnn-layer-dense-test.c
tests/dnn/dnn-layer-depth2space-test.c
tests/dnn/dnn-layer-mathbinary-test.c
tests/dnn/dnn-layer-mathunary-test.c
tests/dnn/dnn-layer-maximum-test.c
tests/dnn/dnn-layer-pad-test.c

index 0e6be8ba579e8efab8a7492b76ff5b9f191ef9a3..4a925ea22af1b3557d3cfb7e3840f0da85709f2f 100644 (file)
@@ -91,7 +91,7 @@ static int test_with_same(void)
     operands[1].data = NULL;
 
     input_indexes[0] = 0;
-    dnn_execute_layer_avg_pool(operands, input_indexes, 1, &params, NULL);
+    ff_dnn_execute_layer_avg_pool(operands, input_indexes, 1, &params, NULL);
 
     output = operands[1].data;
     for (int i = 0; i < sizeof(expected_output) / sizeof(float); ++i) {
@@ -171,7 +171,7 @@ static int test_with_valid(void)
     operands[1].data = NULL;
 
     input_indexes[0] = 0;
-    dnn_execute_layer_avg_pool(operands, input_indexes, 1, &params, NULL);
+    ff_dnn_execute_layer_avg_pool(operands, input_indexes, 1, &params, NULL);
 
     output = operands[1].data;
     for (int i = 0; i < sizeof(expected_output) / sizeof(float); ++i) {
index b623ddac0d3bf23d865704f0691103a72089e01f..5ee60eeaf0d91a711e4d993c3111a7a0db6e03e3 100644 (file)
@@ -118,7 +118,7 @@ static int test_with_same_dilate(void)
     operands[1].data = NULL;
 
     input_indexes[0] = 0;
-    dnn_execute_layer_conv2d(operands, input_indexes, 1, &params, &ctx);
+    ff_dnn_execute_layer_conv2d(operands, input_indexes, 1, &params, &ctx);
 
     output = operands[1].data;
     for (int i = 0; i < sizeof(expected_output) / sizeof(float); i++) {
@@ -222,7 +222,7 @@ static int test_with_valid(void)
     operands[1].data = NULL;
 
     input_indexes[0] = 0;
-    dnn_execute_layer_conv2d(operands, input_indexes, 1, &params, &ctx);
+    ff_dnn_execute_layer_conv2d(operands, input_indexes, 1, &params, &ctx);
 
     output = operands[1].data;
     for (int i = 0; i < sizeof(expected_output) / sizeof(float); i++) {
index 2c11ec52182dde118ae0787d67a3f38a29adaad4..755d3ebb319bd6c5dc1d85d2c429b4355a41d6c1 100644 (file)
@@ -107,7 +107,7 @@ static int test(void)
     operands[1].data = NULL;
 
     input_indexes[0] = 0;
-    dnn_execute_layer_dense(operands, input_indexes, 1, &params, NULL);
+    ff_dnn_execute_layer_dense(operands, input_indexes, 1, &params, NULL);
 
     output = operands[1].data;
     for (int i = 0; i < sizeof(expected_output) / sizeof(float); i++) {
index 2c641884c1bb5a0df030ad09893795495ffd8860..958247e6753b75fdf4cc44b135757d7792bbbd73 100644 (file)
@@ -81,7 +81,7 @@ static int test(void)
 
     input_indexes[0] = 0;
     params.block_size = 2;
-    dnn_execute_layer_depth2space(operands, input_indexes, 1, &params, NULL);
+    ff_dnn_execute_layer_depth2space(operands, input_indexes, 1, &params, NULL);
 
     output = operands[1].data;
     for (int i = 0; i < sizeof(expected_output) / sizeof(float); i++) {
index c4da3f6a86dfeeb51520e64f2a1acf7f104c81c9..2e41dc1ae75f2bb2cc7625353880683080edee74 100644 (file)
@@ -71,7 +71,7 @@ static int test_broadcast_input0(DNNMathBinaryOperation op)
     operands[1].data = NULL;
 
     input_indexes[0] = 0;
-    dnn_execute_layer_math_binary(operands, input_indexes, 1, &params, NULL);
+    ff_dnn_execute_layer_math_binary(operands, input_indexes, 1, &params, NULL);
 
     output = operands[1].data;
     for (int i = 0; i < sizeof(input) / sizeof(float); i++) {
@@ -111,7 +111,7 @@ static int test_broadcast_input1(DNNMathBinaryOperation op)
     operands[1].data = NULL;
 
     input_indexes[0] = 0;
-    dnn_execute_layer_math_binary(operands, input_indexes, 1, &params, NULL);
+    ff_dnn_execute_layer_math_binary(operands, input_indexes, 1, &params, NULL);
 
     output = operands[1].data;
     for (int i = 0; i < sizeof(input) / sizeof(float); i++) {
@@ -159,7 +159,7 @@ static int test_no_broadcast(DNNMathBinaryOperation op)
 
     input_indexes[0] = 0;
     input_indexes[1] = 1;
-    dnn_execute_layer_math_binary(operands, input_indexes, 2, &params, NULL);
+    ff_dnn_execute_layer_math_binary(operands, input_indexes, 2, &params, NULL);
 
     output = operands[2].data;
     for (int i = 0; i < sizeof(input0) / sizeof(float); i++) {
index ce14c41311be039a13c4cfb463a1efc2d13f03ab..a8c5ab0224ca08df4ce31e389641087bc9e0121d 100644 (file)
@@ -87,7 +87,7 @@ static int test(DNNMathUnaryOperation op)
     operands[1].data = NULL;
 
     input_indexes[0] = 0;
-    dnn_execute_layer_math_unary(operands, input_indexes, 1, &params, NULL);
+    ff_dnn_execute_layer_math_unary(operands, input_indexes, 1, &params, NULL);
 
     output = operands[1].data;
     for (int i = 0; i < sizeof(input) / sizeof(float); ++i) {
index c9826705916d6b811b3620fb7602e180900a6110..bf22f3719fd163c8b80dbfc199dfd0edcb99d828 100644 (file)
@@ -45,7 +45,7 @@ static int test(void)
     operands[1].data = NULL;
 
     input_indexes[0] = 0;
-    dnn_execute_layer_maximum(operands, input_indexes, 1, &params, NULL);
+    ff_dnn_execute_layer_maximum(operands, input_indexes, 1, &params, NULL);
 
     output = operands[1].data;
     for (int i = 0; i < sizeof(input) / sizeof(float); i++) {
index 6a72adb3aef20200d3839011d26b6c98dad8e3b9..a8443ce3be05125386ca7f183b8b641d366421c5 100644 (file)
@@ -79,7 +79,7 @@ static int test_with_mode_symmetric(void)
     operands[1].data = NULL;
 
     input_indexes[0] = 0;
-    dnn_execute_layer_pad(operands, input_indexes, 1, &params, NULL);
+    ff_dnn_execute_layer_pad(operands, input_indexes, 1, &params, NULL);
 
     output = operands[1].data;
     for (int i = 0; i < sizeof(expected_output) / sizeof(float); i++) {
@@ -144,7 +144,7 @@ static int test_with_mode_reflect(void)
     operands[1].data = NULL;
 
     input_indexes[0] = 0;
-    dnn_execute_layer_pad(operands, input_indexes, 1, &params, NULL);
+    ff_dnn_execute_layer_pad(operands, input_indexes, 1, &params, NULL);
 
     output = operands[1].data;
     for (int i = 0; i < sizeof(expected_output) / sizeof(float); i++) {
@@ -210,7 +210,7 @@ static int test_with_mode_constant(void)
     operands[1].data = NULL;
 
     input_indexes[0] = 0;
-    dnn_execute_layer_pad(operands, input_indexes, 1, &params, NULL);
+    ff_dnn_execute_layer_pad(operands, input_indexes, 1, &params, NULL);
 
     output = operands[1].data;
     for (int i = 0; i < sizeof(expected_output) / sizeof(float); i++) {