Browse Source

tests/dnn: fix build issue after function name changed

tags/n4.4
Guo, Yejun 4 years ago
parent
commit
07a18ff477
8 changed files with 14 additions and 14 deletions
  1. +2
    -2
      tests/dnn/dnn-layer-avgpool-test.c
  2. +2
    -2
      tests/dnn/dnn-layer-conv2d-test.c
  3. +1
    -1
      tests/dnn/dnn-layer-dense-test.c
  4. +1
    -1
      tests/dnn/dnn-layer-depth2space-test.c
  5. +3
    -3
      tests/dnn/dnn-layer-mathbinary-test.c
  6. +1
    -1
      tests/dnn/dnn-layer-mathunary-test.c
  7. +1
    -1
      tests/dnn/dnn-layer-maximum-test.c
  8. +3
    -3
      tests/dnn/dnn-layer-pad-test.c

+ 2
- 2
tests/dnn/dnn-layer-avgpool-test.c View File

@@ -91,7 +91,7 @@ static int test_with_same(void)
operands[1].data = NULL; operands[1].data = NULL;


input_indexes[0] = 0; input_indexes[0] = 0;
dnn_execute_layer_avg_pool(operands, input_indexes, 1, &params, NULL);
ff_dnn_execute_layer_avg_pool(operands, input_indexes, 1, &params, NULL);


output = operands[1].data; output = operands[1].data;
for (int i = 0; i < sizeof(expected_output) / sizeof(float); ++i) { for (int i = 0; i < sizeof(expected_output) / sizeof(float); ++i) {
@@ -171,7 +171,7 @@ static int test_with_valid(void)
operands[1].data = NULL; operands[1].data = NULL;


input_indexes[0] = 0; input_indexes[0] = 0;
dnn_execute_layer_avg_pool(operands, input_indexes, 1, &params, NULL);
ff_dnn_execute_layer_avg_pool(operands, input_indexes, 1, &params, NULL);


output = operands[1].data; output = operands[1].data;
for (int i = 0; i < sizeof(expected_output) / sizeof(float); ++i) { for (int i = 0; i < sizeof(expected_output) / sizeof(float); ++i) {


+ 2
- 2
tests/dnn/dnn-layer-conv2d-test.c View File

@@ -118,7 +118,7 @@ static int test_with_same_dilate(void)
operands[1].data = NULL; operands[1].data = NULL;


input_indexes[0] = 0; input_indexes[0] = 0;
dnn_execute_layer_conv2d(operands, input_indexes, 1, &params, &ctx);
ff_dnn_execute_layer_conv2d(operands, input_indexes, 1, &params, &ctx);


output = operands[1].data; output = operands[1].data;
for (int i = 0; i < sizeof(expected_output) / sizeof(float); i++) { for (int i = 0; i < sizeof(expected_output) / sizeof(float); i++) {
@@ -222,7 +222,7 @@ static int test_with_valid(void)
operands[1].data = NULL; operands[1].data = NULL;


input_indexes[0] = 0; input_indexes[0] = 0;
dnn_execute_layer_conv2d(operands, input_indexes, 1, &params, &ctx);
ff_dnn_execute_layer_conv2d(operands, input_indexes, 1, &params, &ctx);


output = operands[1].data; output = operands[1].data;
for (int i = 0; i < sizeof(expected_output) / sizeof(float); i++) { for (int i = 0; i < sizeof(expected_output) / sizeof(float); i++) {


+ 1
- 1
tests/dnn/dnn-layer-dense-test.c View File

@@ -107,7 +107,7 @@ static int test(void)
operands[1].data = NULL; operands[1].data = NULL;


input_indexes[0] = 0; input_indexes[0] = 0;
dnn_execute_layer_dense(operands, input_indexes, 1, &params, NULL);
ff_dnn_execute_layer_dense(operands, input_indexes, 1, &params, NULL);


output = operands[1].data; output = operands[1].data;
for (int i = 0; i < sizeof(expected_output) / sizeof(float); i++) { for (int i = 0; i < sizeof(expected_output) / sizeof(float); i++) {


+ 1
- 1
tests/dnn/dnn-layer-depth2space-test.c View File

@@ -81,7 +81,7 @@ static int test(void)


input_indexes[0] = 0; input_indexes[0] = 0;
params.block_size = 2; params.block_size = 2;
dnn_execute_layer_depth2space(operands, input_indexes, 1, &params, NULL);
ff_dnn_execute_layer_depth2space(operands, input_indexes, 1, &params, NULL);


output = operands[1].data; output = operands[1].data;
for (int i = 0; i < sizeof(expected_output) / sizeof(float); i++) { for (int i = 0; i < sizeof(expected_output) / sizeof(float); i++) {


+ 3
- 3
tests/dnn/dnn-layer-mathbinary-test.c View File

@@ -71,7 +71,7 @@ static int test_broadcast_input0(DNNMathBinaryOperation op)
operands[1].data = NULL; operands[1].data = NULL;


input_indexes[0] = 0; input_indexes[0] = 0;
dnn_execute_layer_math_binary(operands, input_indexes, 1, &params, NULL);
ff_dnn_execute_layer_math_binary(operands, input_indexes, 1, &params, NULL);


output = operands[1].data; output = operands[1].data;
for (int i = 0; i < sizeof(input) / sizeof(float); i++) { for (int i = 0; i < sizeof(input) / sizeof(float); i++) {
@@ -111,7 +111,7 @@ static int test_broadcast_input1(DNNMathBinaryOperation op)
operands[1].data = NULL; operands[1].data = NULL;


input_indexes[0] = 0; input_indexes[0] = 0;
dnn_execute_layer_math_binary(operands, input_indexes, 1, &params, NULL);
ff_dnn_execute_layer_math_binary(operands, input_indexes, 1, &params, NULL);


output = operands[1].data; output = operands[1].data;
for (int i = 0; i < sizeof(input) / sizeof(float); i++) { for (int i = 0; i < sizeof(input) / sizeof(float); i++) {
@@ -159,7 +159,7 @@ static int test_no_broadcast(DNNMathBinaryOperation op)


input_indexes[0] = 0; input_indexes[0] = 0;
input_indexes[1] = 1; input_indexes[1] = 1;
dnn_execute_layer_math_binary(operands, input_indexes, 2, &params, NULL);
ff_dnn_execute_layer_math_binary(operands, input_indexes, 2, &params, NULL);


output = operands[2].data; output = operands[2].data;
for (int i = 0; i < sizeof(input0) / sizeof(float); i++) { for (int i = 0; i < sizeof(input0) / sizeof(float); i++) {


+ 1
- 1
tests/dnn/dnn-layer-mathunary-test.c View File

@@ -87,7 +87,7 @@ static int test(DNNMathUnaryOperation op)
operands[1].data = NULL; operands[1].data = NULL;


input_indexes[0] = 0; input_indexes[0] = 0;
dnn_execute_layer_math_unary(operands, input_indexes, 1, &params, NULL);
ff_dnn_execute_layer_math_unary(operands, input_indexes, 1, &params, NULL);


output = operands[1].data; output = operands[1].data;
for (int i = 0; i < sizeof(input) / sizeof(float); ++i) { for (int i = 0; i < sizeof(input) / sizeof(float); ++i) {


+ 1
- 1
tests/dnn/dnn-layer-maximum-test.c View File

@@ -45,7 +45,7 @@ static int test(void)
operands[1].data = NULL; operands[1].data = NULL;


input_indexes[0] = 0; input_indexes[0] = 0;
dnn_execute_layer_maximum(operands, input_indexes, 1, &params, NULL);
ff_dnn_execute_layer_maximum(operands, input_indexes, 1, &params, NULL);


output = operands[1].data; output = operands[1].data;
for (int i = 0; i < sizeof(input) / sizeof(float); i++) { for (int i = 0; i < sizeof(input) / sizeof(float); i++) {


+ 3
- 3
tests/dnn/dnn-layer-pad-test.c View File

@@ -79,7 +79,7 @@ static int test_with_mode_symmetric(void)
operands[1].data = NULL; operands[1].data = NULL;


input_indexes[0] = 0; input_indexes[0] = 0;
dnn_execute_layer_pad(operands, input_indexes, 1, &params, NULL);
ff_dnn_execute_layer_pad(operands, input_indexes, 1, &params, NULL);


output = operands[1].data; output = operands[1].data;
for (int i = 0; i < sizeof(expected_output) / sizeof(float); i++) { for (int i = 0; i < sizeof(expected_output) / sizeof(float); i++) {
@@ -144,7 +144,7 @@ static int test_with_mode_reflect(void)
operands[1].data = NULL; operands[1].data = NULL;


input_indexes[0] = 0; input_indexes[0] = 0;
dnn_execute_layer_pad(operands, input_indexes, 1, &params, NULL);
ff_dnn_execute_layer_pad(operands, input_indexes, 1, &params, NULL);


output = operands[1].data; output = operands[1].data;
for (int i = 0; i < sizeof(expected_output) / sizeof(float); i++) { for (int i = 0; i < sizeof(expected_output) / sizeof(float); i++) {
@@ -210,7 +210,7 @@ static int test_with_mode_constant(void)
operands[1].data = NULL; operands[1].data = NULL;


input_indexes[0] = 0; input_indexes[0] = 0;
dnn_execute_layer_pad(operands, input_indexes, 1, &params, NULL);
ff_dnn_execute_layer_pad(operands, input_indexes, 1, &params, NULL);


output = operands[1].data; output = operands[1].data;
for (int i = 0; i < sizeof(expected_output) / sizeof(float); i++) { for (int i = 0; i < sizeof(expected_output) / sizeof(float); i++) {


Loading…
Cancel
Save