diff options
author | Michael Butler | 2018-03-22 18:37:57 -0500 |
---|---|---|
committer | Michael Butler | 2018-04-18 14:07:52 -0500 |
commit | 7ed6135471fa8b08f7d2026d0279e02be8d66d61 (patch) | |
tree | 13f6cdd88fe2286d4f7e17fc7cf110cf7edfdb62 | |
parent | fa89f009f97e51c357a0d05bfa1ee1cdbf650676 (diff) | |
download | platform-hardware-interfaces-7ed6135471fa8b08f7d2026d0279e02be8d66d61.tar.gz platform-hardware-interfaces-7ed6135471fa8b08f7d2026d0279e02be8d66d61.tar.xz platform-hardware-interfaces-7ed6135471fa8b08f7d2026d0279e02be8d66d61.zip |
NN validation tests
This CL adds validation tests for all of the existing generated models.
The strategy of this CL is this: given a valid model or request, make a
single change to invalidate the model or request, then verify that the
vendor service driver catches the inconsistency and returns
INVALID_ARGUMENT.
Bug: 67828197
Test: mma
Test: VtsHalNeuralnetworksV1_0TargetTest
Test: VtsHalNeuralnetworksV1_1TargetTest
Merged-In: I8efcdbdccc77aaf78992e52c1eac5c940fc81a03
Change-Id: I8efcdbdccc77aaf78992e52c1eac5c940fc81a03
(cherry picked from commit f76acd0312f7d47bd2e371f027a54bca581d8f8f)
23 files changed, 2392 insertions, 1126 deletions
diff --git a/neuralnetworks/1.0/vts/functional/Android.bp b/neuralnetworks/1.0/vts/functional/Android.bp index 54dd14ab..e28113bc 100644 --- a/neuralnetworks/1.0/vts/functional/Android.bp +++ b/neuralnetworks/1.0/vts/functional/Android.bp | |||
@@ -18,7 +18,6 @@ cc_library_static { | |||
18 | name: "VtsHalNeuralnetworksTest_utils", | 18 | name: "VtsHalNeuralnetworksTest_utils", |
19 | srcs: [ | 19 | srcs: [ |
20 | "Callbacks.cpp", | 20 | "Callbacks.cpp", |
21 | "Models.cpp", | ||
22 | "GeneratedTestHarness.cpp", | 21 | "GeneratedTestHarness.cpp", |
23 | ], | 22 | ], |
24 | defaults: ["VtsHalTargetTestDefaults"], | 23 | defaults: ["VtsHalTargetTestDefaults"], |
@@ -41,14 +40,17 @@ cc_library_static { | |||
41 | cc_test { | 40 | cc_test { |
42 | name: "VtsHalNeuralnetworksV1_0TargetTest", | 41 | name: "VtsHalNeuralnetworksV1_0TargetTest", |
43 | srcs: [ | 42 | srcs: [ |
44 | "VtsHalNeuralnetworksV1_0.cpp", | 43 | "BasicTests.cpp", |
45 | "VtsHalNeuralnetworksV1_0BasicTest.cpp", | 44 | "GeneratedTests.cpp", |
46 | "VtsHalNeuralnetworksV1_0GeneratedTest.cpp", | 45 | "ValidateModel.cpp", |
46 | "ValidateRequest.cpp", | ||
47 | "ValidationTests.cpp", | ||
48 | "VtsHalNeuralnetworks.cpp", | ||
47 | ], | 49 | ], |
48 | defaults: ["VtsHalTargetTestDefaults"], | 50 | defaults: ["VtsHalTargetTestDefaults"], |
49 | static_libs: [ | 51 | static_libs: [ |
50 | "android.hardware.neuralnetworks@1.0", | ||
51 | "android.hardware.neuralnetworks@1.1", | 52 | "android.hardware.neuralnetworks@1.1", |
53 | "android.hardware.neuralnetworks@1.0", | ||
52 | "android.hidl.allocator@1.0", | 54 | "android.hidl.allocator@1.0", |
53 | "android.hidl.memory@1.0", | 55 | "android.hidl.memory@1.0", |
54 | "libhidlmemory", | 56 | "libhidlmemory", |
diff --git a/neuralnetworks/1.0/vts/functional/BasicTests.cpp b/neuralnetworks/1.0/vts/functional/BasicTests.cpp new file mode 100644 index 00000000..945c4065 --- /dev/null +++ b/neuralnetworks/1.0/vts/functional/BasicTests.cpp | |||
@@ -0,0 +1,56 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2018 The Android Open Source Project | ||
3 | * | ||
4 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | * you may not use this file except in compliance with the License. | ||
6 | * You may obtain a copy of the License at | ||
7 | * | ||
8 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | * | ||
10 | * Unless required by applicable law or agreed to in writing, software | ||
11 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | * See the License for the specific language governing permissions and | ||
14 | * limitations under the License. | ||
15 | */ | ||
16 | |||
17 | #define LOG_TAG "neuralnetworks_hidl_hal_test" | ||
18 | |||
19 | #include "VtsHalNeuralnetworks.h" | ||
20 | |||
21 | namespace android { | ||
22 | namespace hardware { | ||
23 | namespace neuralnetworks { | ||
24 | namespace V1_0 { | ||
25 | namespace vts { | ||
26 | namespace functional { | ||
27 | |||
28 | // create device test | ||
29 | TEST_F(NeuralnetworksHidlTest, CreateDevice) {} | ||
30 | |||
31 | // status test | ||
32 | TEST_F(NeuralnetworksHidlTest, StatusTest) { | ||
33 | Return<DeviceStatus> status = device->getStatus(); | ||
34 | ASSERT_TRUE(status.isOk()); | ||
35 | EXPECT_EQ(DeviceStatus::AVAILABLE, static_cast<DeviceStatus>(status)); | ||
36 | } | ||
37 | |||
38 | // initialization | ||
39 | TEST_F(NeuralnetworksHidlTest, GetCapabilitiesTest) { | ||
40 | Return<void> ret = | ||
41 | device->getCapabilities([](ErrorStatus status, const Capabilities& capabilities) { | ||
42 | EXPECT_EQ(ErrorStatus::NONE, status); | ||
43 | EXPECT_LT(0.0f, capabilities.float32Performance.execTime); | ||
44 | EXPECT_LT(0.0f, capabilities.float32Performance.powerUsage); | ||
45 | EXPECT_LT(0.0f, capabilities.quantized8Performance.execTime); | ||
46 | EXPECT_LT(0.0f, capabilities.quantized8Performance.powerUsage); | ||
47 | }); | ||
48 | EXPECT_TRUE(ret.isOk()); | ||
49 | } | ||
50 | |||
51 | } // namespace functional | ||
52 | } // namespace vts | ||
53 | } // namespace V1_0 | ||
54 | } // namespace neuralnetworks | ||
55 | } // namespace hardware | ||
56 | } // namespace android | ||
diff --git a/neuralnetworks/1.0/vts/functional/Callbacks.h b/neuralnetworks/1.0/vts/functional/Callbacks.h index 18c31670..570a4fb7 100644 --- a/neuralnetworks/1.0/vts/functional/Callbacks.h +++ b/neuralnetworks/1.0/vts/functional/Callbacks.h | |||
@@ -17,14 +17,6 @@ namespace neuralnetworks { | |||
17 | namespace V1_0 { | 17 | namespace V1_0 { |
18 | namespace implementation { | 18 | namespace implementation { |
19 | 19 | ||
20 | using ::android::hardware::hidl_array; | ||
21 | using ::android::hardware::hidl_memory; | ||
22 | using ::android::hardware::hidl_string; | ||
23 | using ::android::hardware::hidl_vec; | ||
24 | using ::android::hardware::Return; | ||
25 | using ::android::hardware::Void; | ||
26 | using ::android::sp; | ||
27 | |||
28 | /** | 20 | /** |
29 | * The CallbackBase class is used internally by the NeuralNetworks runtime to | 21 | * The CallbackBase class is used internally by the NeuralNetworks runtime to |
30 | * synchronize between different threads. An asynchronous task is launched | 22 | * synchronize between different threads. An asynchronous task is launched |
diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp index 8646a4cb..4f9d5283 100644 --- a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp +++ b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp | |||
@@ -179,7 +179,7 @@ void EvaluatePreparedModel(sp<IPreparedModel>& preparedModel, std::function<bool | |||
179 | } | 179 | } |
180 | } | 180 | } |
181 | 181 | ||
182 | void Execute(sp<V1_0::IDevice>& device, std::function<V1_0::Model(void)> create_model, | 182 | void Execute(const sp<V1_0::IDevice>& device, std::function<V1_0::Model(void)> create_model, |
183 | std::function<bool(int)> is_ignored, | 183 | std::function<bool(int)> is_ignored, |
184 | const std::vector<MixedTypedExampleType>& examples) { | 184 | const std::vector<MixedTypedExampleType>& examples) { |
185 | V1_0::Model model = create_model(); | 185 | V1_0::Model model = create_model(); |
@@ -223,7 +223,7 @@ void Execute(sp<V1_0::IDevice>& device, std::function<V1_0::Model(void)> create_ | |||
223 | EvaluatePreparedModel(preparedModel, is_ignored, examples); | 223 | EvaluatePreparedModel(preparedModel, is_ignored, examples); |
224 | } | 224 | } |
225 | 225 | ||
226 | void Execute(sp<V1_1::IDevice>& device, std::function<V1_1::Model(void)> create_model, | 226 | void Execute(const sp<V1_1::IDevice>& device, std::function<V1_1::Model(void)> create_model, |
227 | std::function<bool(int)> is_ignored, | 227 | std::function<bool(int)> is_ignored, |
228 | const std::vector<MixedTypedExampleType>& examples) { | 228 | const std::vector<MixedTypedExampleType>& examples) { |
229 | V1_1::Model model = create_model(); | 229 | V1_1::Model model = create_model(); |
diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0GeneratedTest.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTests.cpp index b99aef7f..2107333e 100644 --- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0GeneratedTest.cpp +++ b/neuralnetworks/1.0/vts/functional/GeneratedTests.cpp | |||
@@ -16,47 +16,33 @@ | |||
16 | 16 | ||
17 | #define LOG_TAG "neuralnetworks_hidl_hal_test" | 17 | #define LOG_TAG "neuralnetworks_hidl_hal_test" |
18 | 18 | ||
19 | #include "VtsHalNeuralnetworksV1_0.h" | 19 | #include "VtsHalNeuralnetworks.h" |
20 | 20 | ||
21 | #include "Callbacks.h" | 21 | #include "Callbacks.h" |
22 | #include "TestHarness.h" | 22 | #include "TestHarness.h" |
23 | #include "Utils.h" | ||
23 | 24 | ||
24 | #include <android-base/logging.h> | 25 | #include <android-base/logging.h> |
25 | #include <android/hidl/memory/1.0/IMemory.h> | 26 | #include <android/hidl/memory/1.0/IMemory.h> |
26 | #include <hidlmemory/mapping.h> | 27 | #include <hidlmemory/mapping.h> |
27 | 28 | ||
28 | using ::android::hardware::neuralnetworks::V1_0::IDevice; | ||
29 | using ::android::hardware::neuralnetworks::V1_0::IPreparedModel; | ||
30 | using ::android::hardware::neuralnetworks::V1_0::Capabilities; | ||
31 | using ::android::hardware::neuralnetworks::V1_0::DeviceStatus; | ||
32 | using ::android::hardware::neuralnetworks::V1_0::FusedActivationFunc; | ||
33 | using ::android::hardware::neuralnetworks::V1_0::Model; | ||
34 | using ::android::hardware::neuralnetworks::V1_0::OperationType; | ||
35 | using ::android::hardware::neuralnetworks::V1_0::PerformanceInfo; | ||
36 | using ::android::hardware::Return; | ||
37 | using ::android::hardware::Void; | ||
38 | using ::android::hardware::hidl_memory; | ||
39 | using ::android::hardware::hidl_string; | ||
40 | using ::android::hardware::hidl_vec; | ||
41 | using ::android::hidl::allocator::V1_0::IAllocator; | ||
42 | using ::android::hidl::memory::V1_0::IMemory; | ||
43 | using ::android::sp; | ||
44 | |||
45 | namespace android { | 29 | namespace android { |
46 | namespace hardware { | 30 | namespace hardware { |
47 | namespace neuralnetworks { | 31 | namespace neuralnetworks { |
48 | 32 | ||
49 | namespace generated_tests { | 33 | namespace generated_tests { |
50 | using ::generated_tests::MixedTypedExampleType; | 34 | using ::generated_tests::MixedTypedExampleType; |
51 | extern void Execute(sp<IDevice>&, std::function<Model(void)>, std::function<bool(int)>, | 35 | extern void Execute(const sp<V1_0::IDevice>&, std::function<V1_0::Model(void)>, |
52 | const std::vector<MixedTypedExampleType>&); | 36 | std::function<bool(int)>, const std::vector<MixedTypedExampleType>&); |
53 | } // namespace generated_tests | 37 | } // namespace generated_tests |
54 | 38 | ||
55 | namespace V1_0 { | 39 | namespace V1_0 { |
56 | namespace vts { | 40 | namespace vts { |
57 | namespace functional { | 41 | namespace functional { |
42 | |||
58 | using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback; | 43 | using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback; |
59 | using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback; | 44 | using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback; |
45 | using ::android::nn::allocateSharedMemory; | ||
60 | 46 | ||
61 | // Mixed-typed examples | 47 | // Mixed-typed examples |
62 | typedef generated_tests::MixedTypedExampleType MixedTypedExample; | 48 | typedef generated_tests::MixedTypedExampleType MixedTypedExample; |
diff --git a/neuralnetworks/1.0/vts/functional/Models.cpp b/neuralnetworks/1.0/vts/functional/Models.cpp deleted file mode 100644 index 180286a5..00000000 --- a/neuralnetworks/1.0/vts/functional/Models.cpp +++ /dev/null | |||
@@ -1,202 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2017 The Android Open Source Project | ||
3 | * | ||
4 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | * you may not use this file except in compliance with the License. | ||
6 | * You may obtain a copy of the License at | ||
7 | * | ||
8 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | * | ||
10 | * Unless required by applicable law or agreed to in writing, software | ||
11 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | * See the License for the specific language governing permissions and | ||
14 | * limitations under the License. | ||
15 | */ | ||
16 | |||
17 | #define LOG_TAG "neuralnetworks_hidl_hal_test" | ||
18 | |||
19 | #include "Models.h" | ||
20 | #include "Utils.h" | ||
21 | |||
22 | #include <android-base/logging.h> | ||
23 | #include <android/hidl/allocator/1.0/IAllocator.h> | ||
24 | #include <android/hidl/memory/1.0/IMemory.h> | ||
25 | #include <hidlmemory/mapping.h> | ||
26 | #include <vector> | ||
27 | |||
28 | using ::android::sp; | ||
29 | |||
30 | namespace android { | ||
31 | namespace hardware { | ||
32 | namespace neuralnetworks { | ||
33 | |||
34 | // create a valid model | ||
35 | V1_1::Model createValidTestModel_1_1() { | ||
36 | const std::vector<float> operand2Data = {5.0f, 6.0f, 7.0f, 8.0f}; | ||
37 | const uint32_t size = operand2Data.size() * sizeof(float); | ||
38 | |||
39 | const uint32_t operand1 = 0; | ||
40 | const uint32_t operand2 = 1; | ||
41 | const uint32_t operand3 = 2; | ||
42 | const uint32_t operand4 = 3; | ||
43 | |||
44 | const std::vector<Operand> operands = { | ||
45 | { | ||
46 | .type = OperandType::TENSOR_FLOAT32, | ||
47 | .dimensions = {1, 2, 2, 1}, | ||
48 | .numberOfConsumers = 1, | ||
49 | .scale = 0.0f, | ||
50 | .zeroPoint = 0, | ||
51 | .lifetime = OperandLifeTime::MODEL_INPUT, | ||
52 | .location = {.poolIndex = 0, .offset = 0, .length = 0}, | ||
53 | }, | ||
54 | { | ||
55 | .type = OperandType::TENSOR_FLOAT32, | ||
56 | .dimensions = {1, 2, 2, 1}, | ||
57 | .numberOfConsumers = 1, | ||
58 | .scale = 0.0f, | ||
59 | .zeroPoint = 0, | ||
60 | .lifetime = OperandLifeTime::CONSTANT_COPY, | ||
61 | .location = {.poolIndex = 0, .offset = 0, .length = size}, | ||
62 | }, | ||
63 | { | ||
64 | .type = OperandType::INT32, | ||
65 | .dimensions = {}, | ||
66 | .numberOfConsumers = 1, | ||
67 | .scale = 0.0f, | ||
68 | .zeroPoint = 0, | ||
69 | .lifetime = OperandLifeTime::CONSTANT_COPY, | ||
70 | .location = {.poolIndex = 0, .offset = size, .length = sizeof(int32_t)}, | ||
71 | }, | ||
72 | { | ||
73 | .type = OperandType::TENSOR_FLOAT32, | ||
74 | .dimensions = {1, 2, 2, 1}, | ||
75 | .numberOfConsumers = 0, | ||
76 | .scale = 0.0f, | ||
77 | .zeroPoint = 0, | ||
78 | .lifetime = OperandLifeTime::MODEL_OUTPUT, | ||
79 | .location = {.poolIndex = 0, .offset = 0, .length = 0}, | ||
80 | }, | ||
81 | }; | ||
82 | |||
83 | const std::vector<Operation> operations = {{ | ||
84 | .type = OperationType::ADD, .inputs = {operand1, operand2, operand3}, .outputs = {operand4}, | ||
85 | }}; | ||
86 | |||
87 | const std::vector<uint32_t> inputIndexes = {operand1}; | ||
88 | const std::vector<uint32_t> outputIndexes = {operand4}; | ||
89 | std::vector<uint8_t> operandValues( | ||
90 | reinterpret_cast<const uint8_t*>(operand2Data.data()), | ||
91 | reinterpret_cast<const uint8_t*>(operand2Data.data()) + size); | ||
92 | int32_t activation[1] = {static_cast<int32_t>(FusedActivationFunc::NONE)}; | ||
93 | operandValues.insert(operandValues.end(), reinterpret_cast<const uint8_t*>(&activation[0]), | ||
94 | reinterpret_cast<const uint8_t*>(&activation[1])); | ||
95 | |||
96 | const std::vector<hidl_memory> pools = {}; | ||
97 | |||
98 | return { | ||
99 | .operands = operands, | ||
100 | .operations = operations, | ||
101 | .inputIndexes = inputIndexes, | ||
102 | .outputIndexes = outputIndexes, | ||
103 | .operandValues = operandValues, | ||
104 | .pools = pools, | ||
105 | }; | ||
106 | } | ||
107 | |||
108 | // create first invalid model | ||
109 | V1_1::Model createInvalidTestModel1_1_1() { | ||
110 | Model model = createValidTestModel_1_1(); | ||
111 | model.operations[0].type = static_cast<OperationType>(0xDEADBEEF); /* INVALID */ | ||
112 | return model; | ||
113 | } | ||
114 | |||
115 | // create second invalid model | ||
116 | V1_1::Model createInvalidTestModel2_1_1() { | ||
117 | Model model = createValidTestModel_1_1(); | ||
118 | const uint32_t operand1 = 0; | ||
119 | const uint32_t operand5 = 4; // INVALID OPERAND | ||
120 | model.inputIndexes = std::vector<uint32_t>({operand1, operand5 /* INVALID OPERAND */}); | ||
121 | return model; | ||
122 | } | ||
123 | |||
124 | V1_0::Model createValidTestModel_1_0() { | ||
125 | V1_1::Model model = createValidTestModel_1_1(); | ||
126 | return nn::convertToV1_0(model); | ||
127 | } | ||
128 | |||
129 | V1_0::Model createInvalidTestModel1_1_0() { | ||
130 | V1_1::Model model = createInvalidTestModel1_1_1(); | ||
131 | return nn::convertToV1_0(model); | ||
132 | } | ||
133 | |||
134 | V1_0::Model createInvalidTestModel2_1_0() { | ||
135 | V1_1::Model model = createInvalidTestModel2_1_1(); | ||
136 | return nn::convertToV1_0(model); | ||
137 | } | ||
138 | |||
139 | // create a valid request | ||
140 | Request createValidTestRequest() { | ||
141 | std::vector<float> inputData = {1.0f, 2.0f, 3.0f, 4.0f}; | ||
142 | std::vector<float> outputData = {-1.0f, -1.0f, -1.0f, -1.0f}; | ||
143 | const uint32_t INPUT = 0; | ||
144 | const uint32_t OUTPUT = 1; | ||
145 | |||
146 | // prepare inputs | ||
147 | uint32_t inputSize = static_cast<uint32_t>(inputData.size() * sizeof(float)); | ||
148 | uint32_t outputSize = static_cast<uint32_t>(outputData.size() * sizeof(float)); | ||
149 | std::vector<RequestArgument> inputs = {{ | ||
150 | .location = {.poolIndex = INPUT, .offset = 0, .length = inputSize}, .dimensions = {}, | ||
151 | }}; | ||
152 | std::vector<RequestArgument> outputs = {{ | ||
153 | .location = {.poolIndex = OUTPUT, .offset = 0, .length = outputSize}, .dimensions = {}, | ||
154 | }}; | ||
155 | std::vector<hidl_memory> pools = {nn::allocateSharedMemory(inputSize), | ||
156 | nn::allocateSharedMemory(outputSize)}; | ||
157 | if (pools[INPUT].size() == 0 || pools[OUTPUT].size() == 0) { | ||
158 | return {}; | ||
159 | } | ||
160 | |||
161 | // load data | ||
162 | sp<IMemory> inputMemory = mapMemory(pools[INPUT]); | ||
163 | sp<IMemory> outputMemory = mapMemory(pools[OUTPUT]); | ||
164 | if (inputMemory.get() == nullptr || outputMemory.get() == nullptr) { | ||
165 | return {}; | ||
166 | } | ||
167 | float* inputPtr = reinterpret_cast<float*>(static_cast<void*>(inputMemory->getPointer())); | ||
168 | float* outputPtr = reinterpret_cast<float*>(static_cast<void*>(outputMemory->getPointer())); | ||
169 | if (inputPtr == nullptr || outputPtr == nullptr) { | ||
170 | return {}; | ||
171 | } | ||
172 | inputMemory->update(); | ||
173 | outputMemory->update(); | ||
174 | std::copy(inputData.begin(), inputData.end(), inputPtr); | ||
175 | std::copy(outputData.begin(), outputData.end(), outputPtr); | ||
176 | inputMemory->commit(); | ||
177 | outputMemory->commit(); | ||
178 | |||
179 | return {.inputs = inputs, .outputs = outputs, .pools = pools}; | ||
180 | } | ||
181 | |||
182 | // create first invalid request | ||
183 | Request createInvalidTestRequest1() { | ||
184 | Request request = createValidTestRequest(); | ||
185 | const uint32_t INVALID = 2; | ||
186 | std::vector<float> inputData = {1.0f, 2.0f, 3.0f, 4.0f}; | ||
187 | uint32_t inputSize = static_cast<uint32_t>(inputData.size() * sizeof(float)); | ||
188 | request.inputs[0].location = { | ||
189 | .poolIndex = INVALID /* INVALID */, .offset = 0, .length = inputSize}; | ||
190 | return request; | ||
191 | } | ||
192 | |||
193 | // create second invalid request | ||
194 | Request createInvalidTestRequest2() { | ||
195 | Request request = createValidTestRequest(); | ||
196 | request.inputs[0].dimensions = std::vector<uint32_t>({1, 2, 3, 4, 5, 6, 7, 8} /* INVALID */); | ||
197 | return request; | ||
198 | } | ||
199 | |||
200 | } // namespace neuralnetworks | ||
201 | } // namespace hardware | ||
202 | } // namespace android | ||
diff --git a/neuralnetworks/1.0/vts/functional/Models.h b/neuralnetworks/1.0/vts/functional/Models.h index 93982351..a1fbe927 100644 --- a/neuralnetworks/1.0/vts/functional/Models.h +++ b/neuralnetworks/1.0/vts/functional/Models.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2017 The Android Open Source Project | 2 | * Copyright (C) 2018 The Android Open Source Project |
3 | * | 3 | * |
4 | * Licensed under the Apache License, Version 2.0 (the "License"); | 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
5 | * you may not use this file except in compliance with the License. | 5 | * you may not use this file except in compliance with the License. |
@@ -14,29 +14,187 @@ | |||
14 | * limitations under the License. | 14 | * limitations under the License. |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #ifndef VTS_HAL_NEURALNETWORKS_V1_0_VTS_FUNCTIONAL_MODELS_H | ||
18 | #define VTS_HAL_NEURALNETWORKS_V1_0_VTS_FUNCTIONAL_MODELS_H | ||
19 | |||
17 | #define LOG_TAG "neuralnetworks_hidl_hal_test" | 20 | #define LOG_TAG "neuralnetworks_hidl_hal_test" |
18 | 21 | ||
19 | #include <android/hardware/neuralnetworks/1.1/types.h> | 22 | #include "TestHarness.h" |
23 | |||
24 | #include <android/hardware/neuralnetworks/1.0/types.h> | ||
20 | 25 | ||
21 | namespace android { | 26 | namespace android { |
22 | namespace hardware { | 27 | namespace hardware { |
23 | namespace neuralnetworks { | 28 | namespace neuralnetworks { |
29 | namespace V1_0 { | ||
30 | namespace vts { | ||
31 | namespace functional { | ||
32 | |||
33 | using MixedTypedExample = generated_tests::MixedTypedExampleType; | ||
24 | 34 | ||
25 | // create V1_1 model | 35 | #define FOR_EACH_TEST_MODEL(FN) \ |
26 | V1_1::Model createValidTestModel_1_1(); | 36 | FN(add_broadcast_quant8) \ |
27 | V1_1::Model createInvalidTestModel1_1_1(); | 37 | FN(add) \ |
28 | V1_1::Model createInvalidTestModel2_1_1(); | 38 | FN(add_quant8) \ |
39 | FN(avg_pool_float_1) \ | ||
40 | FN(avg_pool_float_2) \ | ||
41 | FN(avg_pool_float_3) \ | ||
42 | FN(avg_pool_float_4) \ | ||
43 | FN(avg_pool_float_5) \ | ||
44 | FN(avg_pool_quant8_1) \ | ||
45 | FN(avg_pool_quant8_2) \ | ||
46 | FN(avg_pool_quant8_3) \ | ||
47 | FN(avg_pool_quant8_4) \ | ||
48 | FN(avg_pool_quant8_5) \ | ||
49 | FN(concat_float_1) \ | ||
50 | FN(concat_float_2) \ | ||
51 | FN(concat_float_3) \ | ||
52 | FN(concat_quant8_1) \ | ||
53 | FN(concat_quant8_2) \ | ||
54 | FN(concat_quant8_3) \ | ||
55 | FN(conv_1_h3_w2_SAME) \ | ||
56 | FN(conv_1_h3_w2_VALID) \ | ||
57 | FN(conv_3_h3_w2_SAME) \ | ||
58 | FN(conv_3_h3_w2_VALID) \ | ||
59 | FN(conv_float_2) \ | ||
60 | FN(conv_float_channels) \ | ||
61 | FN(conv_float_channels_weights_as_inputs) \ | ||
62 | FN(conv_float_large) \ | ||
63 | FN(conv_float_large_weights_as_inputs) \ | ||
64 | FN(conv_float) \ | ||
65 | FN(conv_float_weights_as_inputs) \ | ||
66 | FN(conv_quant8_2) \ | ||
67 | FN(conv_quant8_channels) \ | ||
68 | FN(conv_quant8_channels_weights_as_inputs) \ | ||
69 | FN(conv_quant8_large) \ | ||
70 | FN(conv_quant8_large_weights_as_inputs) \ | ||
71 | FN(conv_quant8) \ | ||
72 | FN(conv_quant8_overflow) \ | ||
73 | FN(conv_quant8_overflow_weights_as_inputs) \ | ||
74 | FN(conv_quant8_weights_as_inputs) \ | ||
75 | FN(depth_to_space_float_1) \ | ||
76 | FN(depth_to_space_float_2) \ | ||
77 | FN(depth_to_space_float_3) \ | ||
78 | FN(depth_to_space_quant8_1) \ | ||
79 | FN(depth_to_space_quant8_2) \ | ||
80 | FN(depthwise_conv2d_float_2) \ | ||
81 | FN(depthwise_conv2d_float_large_2) \ | ||
82 | FN(depthwise_conv2d_float_large_2_weights_as_inputs) \ | ||
83 | FN(depthwise_conv2d_float_large) \ | ||
84 | FN(depthwise_conv2d_float_large_weights_as_inputs) \ | ||
85 | FN(depthwise_conv2d_float) \ | ||
86 | FN(depthwise_conv2d_float_weights_as_inputs) \ | ||
87 | FN(depthwise_conv2d_quant8_2) \ | ||
88 | FN(depthwise_conv2d_quant8_large) \ | ||
89 | FN(depthwise_conv2d_quant8_large_weights_as_inputs) \ | ||
90 | FN(depthwise_conv2d_quant8) \ | ||
91 | FN(depthwise_conv2d_quant8_weights_as_inputs) \ | ||
92 | FN(depthwise_conv) \ | ||
93 | FN(dequantize) \ | ||
94 | FN(embedding_lookup) \ | ||
95 | FN(floor) \ | ||
96 | FN(fully_connected_float_2) \ | ||
97 | FN(fully_connected_float_large) \ | ||
98 | FN(fully_connected_float_large_weights_as_inputs) \ | ||
99 | FN(fully_connected_float) \ | ||
100 | FN(fully_connected_float_weights_as_inputs) \ | ||
101 | FN(fully_connected_quant8_2) \ | ||
102 | FN(fully_connected_quant8_large) \ | ||
103 | FN(fully_connected_quant8_large_weights_as_inputs) \ | ||
104 | FN(fully_connected_quant8) \ | ||
105 | FN(fully_connected_quant8_weights_as_inputs) \ | ||
106 | FN(hashtable_lookup_float) \ | ||
107 | FN(hashtable_lookup_quant8) \ | ||
108 | FN(l2_normalization_2) \ | ||
109 | FN(l2_normalization_large) \ | ||
110 | FN(l2_normalization) \ | ||
111 | FN(l2_pool_float_2) \ | ||
112 | FN(l2_pool_float_large) \ | ||
113 | FN(l2_pool_float) \ | ||
114 | FN(local_response_norm_float_1) \ | ||
115 | FN(local_response_norm_float_2) \ | ||
116 | FN(local_response_norm_float_3) \ | ||
117 | FN(local_response_norm_float_4) \ | ||
118 | FN(logistic_float_1) \ | ||
119 | FN(logistic_float_2) \ | ||
120 | FN(logistic_quant8_1) \ | ||
121 | FN(logistic_quant8_2) \ | ||
122 | FN(lsh_projection_2) \ | ||
123 | FN(lsh_projection) \ | ||
124 | FN(lsh_projection_weights_as_inputs) \ | ||
125 | FN(lstm2) \ | ||
126 | FN(lstm2_state2) \ | ||
127 | FN(lstm2_state) \ | ||
128 | FN(lstm3) \ | ||
129 | FN(lstm3_state2) \ | ||
130 | FN(lstm3_state3) \ | ||
131 | FN(lstm3_state) \ | ||
132 | FN(lstm) \ | ||
133 | FN(lstm_state2) \ | ||
134 | FN(lstm_state) \ | ||
135 | FN(max_pool_float_1) \ | ||
136 | FN(max_pool_float_2) \ | ||
137 | FN(max_pool_float_3) \ | ||
138 | FN(max_pool_float_4) \ | ||
139 | FN(max_pool_quant8_1) \ | ||
140 | FN(max_pool_quant8_2) \ | ||
141 | FN(max_pool_quant8_3) \ | ||
142 | FN(max_pool_quant8_4) \ | ||
143 | FN(mobilenet_224_gender_basic_fixed) \ | ||
144 | FN(mobilenet_quantized) \ | ||
145 | FN(mul_broadcast_quant8) \ | ||
146 | FN(mul) \ | ||
147 | FN(mul_quant8) \ | ||
148 | FN(mul_relu) \ | ||
149 | FN(relu1_float_1) \ | ||
150 | FN(relu1_float_2) \ | ||
151 | FN(relu1_quant8_1) \ | ||
152 | FN(relu1_quant8_2) \ | ||
153 | FN(relu6_float_1) \ | ||
154 | FN(relu6_float_2) \ | ||
155 | FN(relu6_quant8_1) \ | ||
156 | FN(relu6_quant8_2) \ | ||
157 | FN(relu_float_1) \ | ||
158 | FN(relu_float_2) \ | ||
159 | FN(relu_quant8_1) \ | ||
160 | FN(relu_quant8_2) \ | ||
161 | FN(reshape) \ | ||
162 | FN(reshape_quant8) \ | ||
163 | FN(reshape_quant8_weights_as_inputs) \ | ||
164 | FN(reshape_weights_as_inputs) \ | ||
165 | FN(resize_bilinear_2) \ | ||
166 | FN(resize_bilinear) \ | ||
167 | FN(rnn) \ | ||
168 | FN(rnn_state) \ | ||
169 | FN(softmax_float_1) \ | ||
170 | FN(softmax_float_2) \ | ||
171 | FN(softmax_quant8_1) \ | ||
172 | FN(softmax_quant8_2) \ | ||
173 | FN(space_to_depth_float_1) \ | ||
174 | FN(space_to_depth_float_2) \ | ||
175 | FN(space_to_depth_float_3) \ | ||
176 | FN(space_to_depth_quant8_1) \ | ||
177 | FN(space_to_depth_quant8_2) \ | ||
178 | FN(svdf2) \ | ||
179 | FN(svdf) \ | ||
180 | FN(svdf_state) \ | ||
181 | FN(tanh) | ||
29 | 182 | ||
30 | // create V1_0 model | 183 | #define FORWARD_DECLARE_GENERATED_OBJECTS(function) \ |
31 | V1_0::Model createValidTestModel_1_0(); | 184 | namespace function { \ |
32 | V1_0::Model createInvalidTestModel1_1_0(); | 185 | extern std::vector<MixedTypedExample> examples; \ |
33 | V1_0::Model createInvalidTestModel2_1_0(); | 186 | Model createTestModel(); \ |
187 | } | ||
34 | 188 | ||
35 | // create the request | 189 | FOR_EACH_TEST_MODEL(FORWARD_DECLARE_GENERATED_OBJECTS) |
36 | V1_0::Request createValidTestRequest(); | ||
37 | V1_0::Request createInvalidTestRequest1(); | ||
38 | V1_0::Request createInvalidTestRequest2(); | ||
39 | 190 | ||
191 | #undef FORWARD_DECLARE_GENERATED_OBJECTS | ||
192 | |||
193 | } // namespace functional | ||
194 | } // namespace vts | ||
195 | } // namespace V1_0 | ||
40 | } // namespace neuralnetworks | 196 | } // namespace neuralnetworks |
41 | } // namespace hardware | 197 | } // namespace hardware |
42 | } // namespace android | 198 | } // namespace android |
199 | |||
200 | #endif // VTS_HAL_NEURALNETWORKS_V1_0_VTS_FUNCTIONAL_MODELS_H | ||
diff --git a/neuralnetworks/1.0/vts/functional/ValidateModel.cpp b/neuralnetworks/1.0/vts/functional/ValidateModel.cpp new file mode 100644 index 00000000..4f0697e9 --- /dev/null +++ b/neuralnetworks/1.0/vts/functional/ValidateModel.cpp | |||
@@ -0,0 +1,506 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2018 The Android Open Source Project | ||
3 | * | ||
4 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | * you may not use this file except in compliance with the License. | ||
6 | * You may obtain a copy of the License at | ||
7 | * | ||
8 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | * | ||
10 | * Unless required by applicable law or agreed to in writing, software | ||
11 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | * See the License for the specific language governing permissions and | ||
14 | * limitations under the License. | ||
15 | */ | ||
16 | |||
17 | #define LOG_TAG "neuralnetworks_hidl_hal_test" | ||
18 | |||
19 | #include "VtsHalNeuralnetworks.h" | ||
20 | |||
21 | #include "Callbacks.h" | ||
22 | |||
23 | namespace android { | ||
24 | namespace hardware { | ||
25 | namespace neuralnetworks { | ||
26 | namespace V1_0 { | ||
27 | namespace vts { | ||
28 | namespace functional { | ||
29 | |||
30 | using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback; | ||
31 | using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback; | ||
32 | |||
33 | ///////////////////////// UTILITY FUNCTIONS ///////////////////////// | ||
34 | |||
35 | static void validateGetSupportedOperations(const sp<IDevice>& device, const std::string& message, | ||
36 | const V1_0::Model& model) { | ||
37 | SCOPED_TRACE(message + " [getSupportedOperations]"); | ||
38 | |||
39 | Return<void> ret = | ||
40 | device->getSupportedOperations(model, [&](ErrorStatus status, const hidl_vec<bool>&) { | ||
41 | EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status); | ||
42 | }); | ||
43 | EXPECT_TRUE(ret.isOk()); | ||
44 | } | ||
45 | |||
46 | static void validatePrepareModel(const sp<IDevice>& device, const std::string& message, | ||
47 | const V1_0::Model& model) { | ||
48 | SCOPED_TRACE(message + " [prepareModel]"); | ||
49 | |||
50 | sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback(); | ||
51 | ASSERT_NE(nullptr, preparedModelCallback.get()); | ||
52 | Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback); | ||
53 | ASSERT_TRUE(prepareLaunchStatus.isOk()); | ||
54 | ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(prepareLaunchStatus)); | ||
55 | |||
56 | preparedModelCallback->wait(); | ||
57 | ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); | ||
58 | ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, prepareReturnStatus); | ||
59 | sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel(); | ||
60 | ASSERT_EQ(nullptr, preparedModel.get()); | ||
61 | } | ||
62 | |||
63 | // Primary validation function. This function will take a valid model, apply a | ||
64 | // mutation to it to invalidate the model, then pass it to interface calls that | ||
65 | // use the model. Note that the model here is passed by value, and any mutation | ||
66 | // to the model does not leave this function. | ||
67 | static void validate(const sp<IDevice>& device, const std::string& message, V1_0::Model model, | ||
68 | const std::function<void(Model*)>& mutation) { | ||
69 | mutation(&model); | ||
70 | validateGetSupportedOperations(device, message, model); | ||
71 | validatePrepareModel(device, message, model); | ||
72 | } | ||
73 | |||
74 | // Delete element from hidl_vec. hidl_vec doesn't support a "remove" operation, | ||
75 | // so this is efficiently accomplished by moving the element to the end and | ||
76 | // resizing the hidl_vec to one less. | ||
77 | template <typename Type> | ||
78 | static void hidl_vec_removeAt(hidl_vec<Type>* vec, uint32_t index) { | ||
79 | if (vec) { | ||
80 | std::rotate(vec->begin() + index, vec->begin() + index + 1, vec->end()); | ||
81 | vec->resize(vec->size() - 1); | ||
82 | } | ||
83 | } | ||
84 | |||
85 | template <typename Type> | ||
86 | static uint32_t hidl_vec_push_back(hidl_vec<Type>* vec, const Type& value) { | ||
87 | // assume vec is valid | ||
88 | const uint32_t index = vec->size(); | ||
89 | vec->resize(index + 1); | ||
90 | (*vec)[index] = value; | ||
91 | return index; | ||
92 | } | ||
93 | |||
94 | static uint32_t addOperand(Model* model) { | ||
95 | return hidl_vec_push_back(&model->operands, | ||
96 | { | ||
97 | .type = OperandType::INT32, | ||
98 | .dimensions = {}, | ||
99 | .numberOfConsumers = 0, | ||
100 | .scale = 0.0f, | ||
101 | .zeroPoint = 0, | ||
102 | .lifetime = OperandLifeTime::MODEL_INPUT, | ||
103 | .location = {.poolIndex = 0, .offset = 0, .length = 0}, | ||
104 | }); | ||
105 | } | ||
106 | |||
107 | static uint32_t addOperand(Model* model, OperandLifeTime lifetime) { | ||
108 | uint32_t index = addOperand(model); | ||
109 | model->operands[index].numberOfConsumers = 1; | ||
110 | model->operands[index].lifetime = lifetime; | ||
111 | return index; | ||
112 | } | ||
113 | |||
114 | ///////////////////////// VALIDATE MODEL OPERAND TYPE ///////////////////////// | ||
115 | |||
116 | static const int32_t invalidOperandTypes[] = { | ||
117 | static_cast<int32_t>(OperandType::FLOAT32) - 1, // lower bound fundamental | ||
118 | static_cast<int32_t>(OperandType::TENSOR_QUANT8_ASYMM) + 1, // upper bound fundamental | ||
119 | static_cast<int32_t>(OperandType::OEM) - 1, // lower bound OEM | ||
120 | static_cast<int32_t>(OperandType::TENSOR_OEM_BYTE) + 1, // upper bound OEM | ||
121 | }; | ||
122 | |||
123 | static void mutateOperandTypeTest(const sp<IDevice>& device, const V1_0::Model& model) { | ||
124 | for (size_t operand = 0; operand < model.operands.size(); ++operand) { | ||
125 | for (int32_t invalidOperandType : invalidOperandTypes) { | ||
126 | const std::string message = "mutateOperandTypeTest: operand " + | ||
127 | std::to_string(operand) + " set to value " + | ||
128 | std::to_string(invalidOperandType); | ||
129 | validate(device, message, model, [operand, invalidOperandType](Model* model) { | ||
130 | model->operands[operand].type = static_cast<OperandType>(invalidOperandType); | ||
131 | }); | ||
132 | } | ||
133 | } | ||
134 | } | ||
135 | |||
136 | ///////////////////////// VALIDATE OPERAND RANK ///////////////////////// | ||
137 | |||
138 | static uint32_t getInvalidRank(OperandType type) { | ||
139 | switch (type) { | ||
140 | case OperandType::FLOAT32: | ||
141 | case OperandType::INT32: | ||
142 | case OperandType::UINT32: | ||
143 | return 1; | ||
144 | case OperandType::TENSOR_FLOAT32: | ||
145 | case OperandType::TENSOR_INT32: | ||
146 | case OperandType::TENSOR_QUANT8_ASYMM: | ||
147 | return 0; | ||
148 | default: | ||
149 | return 0; | ||
150 | } | ||
151 | } | ||
152 | |||
153 | static void mutateOperandRankTest(const sp<IDevice>& device, const V1_0::Model& model) { | ||
154 | for (size_t operand = 0; operand < model.operands.size(); ++operand) { | ||
155 | const uint32_t invalidRank = getInvalidRank(model.operands[operand].type); | ||
156 | const std::string message = "mutateOperandRankTest: operand " + std::to_string(operand) + | ||
157 | " has rank of " + std::to_string(invalidRank); | ||
158 | validate(device, message, model, [operand, invalidRank](Model* model) { | ||
159 | model->operands[operand].dimensions = std::vector<uint32_t>(invalidRank, 0); | ||
160 | }); | ||
161 | } | ||
162 | } | ||
163 | |||
164 | ///////////////////////// VALIDATE OPERAND SCALE ///////////////////////// | ||
165 | |||
166 | static float getInvalidScale(OperandType type) { | ||
167 | switch (type) { | ||
168 | case OperandType::FLOAT32: | ||
169 | case OperandType::INT32: | ||
170 | case OperandType::UINT32: | ||
171 | case OperandType::TENSOR_FLOAT32: | ||
172 | return 1.0f; | ||
173 | case OperandType::TENSOR_INT32: | ||
174 | return -1.0f; | ||
175 | case OperandType::TENSOR_QUANT8_ASYMM: | ||
176 | return 0.0f; | ||
177 | default: | ||
178 | return 0.0f; | ||
179 | } | ||
180 | } | ||
181 | |||
182 | static void mutateOperandScaleTest(const sp<IDevice>& device, const V1_0::Model& model) { | ||
183 | for (size_t operand = 0; operand < model.operands.size(); ++operand) { | ||
184 | const float invalidScale = getInvalidScale(model.operands[operand].type); | ||
185 | const std::string message = "mutateOperandScaleTest: operand " + std::to_string(operand) + | ||
186 | " has scale of " + std::to_string(invalidScale); | ||
187 | validate(device, message, model, [operand, invalidScale](Model* model) { | ||
188 | model->operands[operand].scale = invalidScale; | ||
189 | }); | ||
190 | } | ||
191 | } | ||
192 | |||
193 | ///////////////////////// VALIDATE OPERAND ZERO POINT ///////////////////////// | ||
194 | |||
195 | static std::vector<int32_t> getInvalidZeroPoints(OperandType type) { | ||
196 | switch (type) { | ||
197 | case OperandType::FLOAT32: | ||
198 | case OperandType::INT32: | ||
199 | case OperandType::UINT32: | ||
200 | case OperandType::TENSOR_FLOAT32: | ||
201 | case OperandType::TENSOR_INT32: | ||
202 | return {1}; | ||
203 | case OperandType::TENSOR_QUANT8_ASYMM: | ||
204 | return {-1, 256}; | ||
205 | default: | ||
206 | return {}; | ||
207 | } | ||
208 | } | ||
209 | |||
210 | static void mutateOperandZeroPointTest(const sp<IDevice>& device, const V1_0::Model& model) { | ||
211 | for (size_t operand = 0; operand < model.operands.size(); ++operand) { | ||
212 | const std::vector<int32_t> invalidZeroPoints = | ||
213 | getInvalidZeroPoints(model.operands[operand].type); | ||
214 | for (int32_t invalidZeroPoint : invalidZeroPoints) { | ||
215 | const std::string message = "mutateOperandZeroPointTest: operand " + | ||
216 | std::to_string(operand) + " has zero point of " + | ||
217 | std::to_string(invalidZeroPoint); | ||
218 | validate(device, message, model, [operand, invalidZeroPoint](Model* model) { | ||
219 | model->operands[operand].zeroPoint = invalidZeroPoint; | ||
220 | }); | ||
221 | } | ||
222 | } | ||
223 | } | ||
224 | |||
225 | ///////////////////////// VALIDATE EXTRA ??? ///////////////////////// | ||
226 | |||
227 | // TODO: Operand::lifetime | ||
228 | // TODO: Operand::location | ||
229 | |||
230 | ///////////////////////// VALIDATE OPERATION OPERAND TYPE ///////////////////////// | ||
231 | |||
232 | static void mutateOperand(Operand* operand, OperandType type) { | ||
233 | Operand newOperand = *operand; | ||
234 | newOperand.type = type; | ||
235 | switch (type) { | ||
236 | case OperandType::FLOAT32: | ||
237 | case OperandType::INT32: | ||
238 | case OperandType::UINT32: | ||
239 | newOperand.dimensions = hidl_vec<uint32_t>(); | ||
240 | newOperand.scale = 0.0f; | ||
241 | newOperand.zeroPoint = 0; | ||
242 | break; | ||
243 | case OperandType::TENSOR_FLOAT32: | ||
244 | newOperand.dimensions = | ||
245 | operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1}); | ||
246 | newOperand.scale = 0.0f; | ||
247 | newOperand.zeroPoint = 0; | ||
248 | break; | ||
249 | case OperandType::TENSOR_INT32: | ||
250 | newOperand.dimensions = | ||
251 | operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1}); | ||
252 | newOperand.zeroPoint = 0; | ||
253 | break; | ||
254 | case OperandType::TENSOR_QUANT8_ASYMM: | ||
255 | newOperand.dimensions = | ||
256 | operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1}); | ||
257 | newOperand.scale = operand->scale != 0.0f ? operand->scale : 1.0f; | ||
258 | break; | ||
259 | case OperandType::OEM: | ||
260 | case OperandType::TENSOR_OEM_BYTE: | ||
261 | default: | ||
262 | break; | ||
263 | } | ||
264 | *operand = newOperand; | ||
265 | } | ||
266 | |||
267 | static bool mutateOperationOperandTypeSkip(size_t operand, const V1_0::Model& model) { | ||
268 | // LSH_PROJECTION's second argument is allowed to have any type. This is the | ||
269 | // only operation that currently has a type that can be anything independent | ||
270 | // from any other type. Changing the operand type to any other type will | ||
271 | // result in a valid model for LSH_PROJECTION. If this is the case, skip the | ||
272 | // test. | ||
273 | for (const Operation& operation : model.operations) { | ||
274 | if (operation.type == OperationType::LSH_PROJECTION && operand == operation.inputs[1]) { | ||
275 | return true; | ||
276 | } | ||
277 | } | ||
278 | return false; | ||
279 | } | ||
280 | |||
281 | static void mutateOperationOperandTypeTest(const sp<IDevice>& device, const V1_0::Model& model) { | ||
282 | for (size_t operand = 0; operand < model.operands.size(); ++operand) { | ||
283 | if (mutateOperationOperandTypeSkip(operand, model)) { | ||
284 | continue; | ||
285 | } | ||
286 | for (OperandType invalidOperandType : hidl_enum_iterator<OperandType>{}) { | ||
287 | // Do not test OEM types | ||
288 | if (invalidOperandType == model.operands[operand].type || | ||
289 | invalidOperandType == OperandType::OEM || | ||
290 | invalidOperandType == OperandType::TENSOR_OEM_BYTE) { | ||
291 | continue; | ||
292 | } | ||
293 | const std::string message = "mutateOperationOperandTypeTest: operand " + | ||
294 | std::to_string(operand) + " set to type " + | ||
295 | toString(invalidOperandType); | ||
296 | validate(device, message, model, [operand, invalidOperandType](Model* model) { | ||
297 | mutateOperand(&model->operands[operand], invalidOperandType); | ||
298 | }); | ||
299 | } | ||
300 | } | ||
301 | } | ||
302 | |||
303 | ///////////////////////// VALIDATE MODEL OPERATION TYPE ///////////////////////// | ||
304 | |||
305 | static const int32_t invalidOperationTypes[] = { | ||
306 | static_cast<int32_t>(OperationType::ADD) - 1, // lower bound fundamental | ||
307 | static_cast<int32_t>(OperationType::TANH) + 1, // upper bound fundamental | ||
308 | static_cast<int32_t>(OperationType::OEM_OPERATION) - 1, // lower bound OEM | ||
309 | static_cast<int32_t>(OperationType::OEM_OPERATION) + 1, // upper bound OEM | ||
310 | }; | ||
311 | |||
312 | static void mutateOperationTypeTest(const sp<IDevice>& device, const V1_0::Model& model) { | ||
313 | for (size_t operation = 0; operation < model.operations.size(); ++operation) { | ||
314 | for (int32_t invalidOperationType : invalidOperationTypes) { | ||
315 | const std::string message = "mutateOperationTypeTest: operation " + | ||
316 | std::to_string(operation) + " set to value " + | ||
317 | std::to_string(invalidOperationType); | ||
318 | validate(device, message, model, [operation, invalidOperationType](Model* model) { | ||
319 | model->operations[operation].type = | ||
320 | static_cast<OperationType>(invalidOperationType); | ||
321 | }); | ||
322 | } | ||
323 | } | ||
324 | } | ||
325 | |||
326 | ///////////////////////// VALIDATE MODEL OPERATION INPUT OPERAND INDEX ///////////////////////// | ||
327 | |||
328 | static void mutateOperationInputOperandIndexTest(const sp<IDevice>& device, | ||
329 | const V1_0::Model& model) { | ||
330 | for (size_t operation = 0; operation < model.operations.size(); ++operation) { | ||
331 | const uint32_t invalidOperand = model.operands.size(); | ||
332 | for (size_t input = 0; input < model.operations[operation].inputs.size(); ++input) { | ||
333 | const std::string message = "mutateOperationInputOperandIndexTest: operation " + | ||
334 | std::to_string(operation) + " input " + | ||
335 | std::to_string(input); | ||
336 | validate(device, message, model, [operation, input, invalidOperand](Model* model) { | ||
337 | model->operations[operation].inputs[input] = invalidOperand; | ||
338 | }); | ||
339 | } | ||
340 | } | ||
341 | } | ||
342 | |||
343 | ///////////////////////// VALIDATE MODEL OPERATION OUTPUT OPERAND INDEX ///////////////////////// | ||
344 | |||
345 | static void mutateOperationOutputOperandIndexTest(const sp<IDevice>& device, | ||
346 | const V1_0::Model& model) { | ||
347 | for (size_t operation = 0; operation < model.operations.size(); ++operation) { | ||
348 | const uint32_t invalidOperand = model.operands.size(); | ||
349 | for (size_t output = 0; output < model.operations[operation].outputs.size(); ++output) { | ||
350 | const std::string message = "mutateOperationOutputOperandIndexTest: operation " + | ||
351 | std::to_string(operation) + " output " + | ||
352 | std::to_string(output); | ||
353 | validate(device, message, model, [operation, output, invalidOperand](Model* model) { | ||
354 | model->operations[operation].outputs[output] = invalidOperand; | ||
355 | }); | ||
356 | } | ||
357 | } | ||
358 | } | ||
359 | |||
360 | ///////////////////////// REMOVE OPERAND FROM EVERYTHING ///////////////////////// | ||
361 | |||
362 | static void removeValueAndDecrementGreaterValues(hidl_vec<uint32_t>* vec, uint32_t value) { | ||
363 | if (vec) { | ||
364 | // remove elements matching "value" | ||
365 | auto last = std::remove(vec->begin(), vec->end(), value); | ||
366 | vec->resize(std::distance(vec->begin(), last)); | ||
367 | |||
368 | // decrement elements exceeding "value" | ||
369 | std::transform(vec->begin(), vec->end(), vec->begin(), | ||
370 | [value](uint32_t v) { return v > value ? v-- : v; }); | ||
371 | } | ||
372 | } | ||
373 | |||
374 | static void removeOperand(Model* model, uint32_t index) { | ||
375 | hidl_vec_removeAt(&model->operands, index); | ||
376 | for (Operation& operation : model->operations) { | ||
377 | removeValueAndDecrementGreaterValues(&operation.inputs, index); | ||
378 | removeValueAndDecrementGreaterValues(&operation.outputs, index); | ||
379 | } | ||
380 | removeValueAndDecrementGreaterValues(&model->inputIndexes, index); | ||
381 | removeValueAndDecrementGreaterValues(&model->outputIndexes, index); | ||
382 | } | ||
383 | |||
384 | static void removeOperandTest(const sp<IDevice>& device, const V1_0::Model& model) { | ||
385 | for (size_t operand = 0; operand < model.operands.size(); ++operand) { | ||
386 | const std::string message = "removeOperandTest: operand " + std::to_string(operand); | ||
387 | validate(device, message, model, | ||
388 | [operand](Model* model) { removeOperand(model, operand); }); | ||
389 | } | ||
390 | } | ||
391 | |||
392 | ///////////////////////// REMOVE OPERATION ///////////////////////// | ||
393 | |||
394 | static void removeOperation(Model* model, uint32_t index) { | ||
395 | for (uint32_t operand : model->operations[index].inputs) { | ||
396 | model->operands[operand].numberOfConsumers--; | ||
397 | } | ||
398 | hidl_vec_removeAt(&model->operations, index); | ||
399 | } | ||
400 | |||
401 | static void removeOperationTest(const sp<IDevice>& device, const V1_0::Model& model) { | ||
402 | for (size_t operation = 0; operation < model.operations.size(); ++operation) { | ||
403 | const std::string message = "removeOperationTest: operation " + std::to_string(operation); | ||
404 | validate(device, message, model, | ||
405 | [operation](Model* model) { removeOperation(model, operation); }); | ||
406 | } | ||
407 | } | ||
408 | |||
409 | ///////////////////////// REMOVE OPERATION INPUT ///////////////////////// | ||
410 | |||
411 | static void removeOperationInputTest(const sp<IDevice>& device, const V1_0::Model& model) { | ||
412 | for (size_t operation = 0; operation < model.operations.size(); ++operation) { | ||
413 | for (size_t input = 0; input < model.operations[operation].inputs.size(); ++input) { | ||
414 | const V1_0::Operation& op = model.operations[operation]; | ||
415 | // CONCATENATION has at least 2 inputs, with the last element being | ||
416 | // INT32. Skip this test if removing one of CONCATENATION's | ||
417 | // inputs still produces a valid model. | ||
418 | if (op.type == V1_0::OperationType::CONCATENATION && op.inputs.size() > 2 && | ||
419 | input != op.inputs.size() - 1) { | ||
420 | continue; | ||
421 | } | ||
422 | const std::string message = "removeOperationInputTest: operation " + | ||
423 | std::to_string(operation) + ", input " + | ||
424 | std::to_string(input); | ||
425 | validate(device, message, model, [operation, input](Model* model) { | ||
426 | uint32_t operand = model->operations[operation].inputs[input]; | ||
427 | model->operands[operand].numberOfConsumers--; | ||
428 | hidl_vec_removeAt(&model->operations[operation].inputs, input); | ||
429 | }); | ||
430 | } | ||
431 | } | ||
432 | } | ||
433 | |||
434 | ///////////////////////// REMOVE OPERATION OUTPUT ///////////////////////// | ||
435 | |||
436 | static void removeOperationOutputTest(const sp<IDevice>& device, const V1_0::Model& model) { | ||
437 | for (size_t operation = 0; operation < model.operations.size(); ++operation) { | ||
438 | for (size_t output = 0; output < model.operations[operation].outputs.size(); ++output) { | ||
439 | const std::string message = "removeOperationOutputTest: operation " + | ||
440 | std::to_string(operation) + ", output " + | ||
441 | std::to_string(output); | ||
442 | validate(device, message, model, [operation, output](Model* model) { | ||
443 | hidl_vec_removeAt(&model->operations[operation].outputs, output); | ||
444 | }); | ||
445 | } | ||
446 | } | ||
447 | } | ||
448 | |||
449 | ///////////////////////// MODEL VALIDATION ///////////////////////// | ||
450 | |||
451 | // TODO: remove model input | ||
452 | // TODO: remove model output | ||
453 | // TODO: add unused operation | ||
454 | |||
455 | ///////////////////////// ADD OPERATION INPUT ///////////////////////// | ||
456 | |||
457 | static void addOperationInputTest(const sp<IDevice>& device, const V1_0::Model& model) { | ||
458 | for (size_t operation = 0; operation < model.operations.size(); ++operation) { | ||
459 | const std::string message = "addOperationInputTest: operation " + std::to_string(operation); | ||
460 | validate(device, message, model, [operation](Model* model) { | ||
461 | uint32_t index = addOperand(model, OperandLifeTime::MODEL_INPUT); | ||
462 | hidl_vec_push_back(&model->operations[operation].inputs, index); | ||
463 | hidl_vec_push_back(&model->inputIndexes, index); | ||
464 | }); | ||
465 | } | ||
466 | } | ||
467 | |||
468 | ///////////////////////// ADD OPERATION OUTPUT ///////////////////////// | ||
469 | |||
470 | static void addOperationOutputTest(const sp<IDevice>& device, const V1_0::Model& model) { | ||
471 | for (size_t operation = 0; operation < model.operations.size(); ++operation) { | ||
472 | const std::string message = | ||
473 | "addOperationOutputTest: operation " + std::to_string(operation); | ||
474 | validate(device, message, model, [operation](Model* model) { | ||
475 | uint32_t index = addOperand(model, OperandLifeTime::MODEL_OUTPUT); | ||
476 | hidl_vec_push_back(&model->operations[operation].outputs, index); | ||
477 | hidl_vec_push_back(&model->outputIndexes, index); | ||
478 | }); | ||
479 | } | ||
480 | } | ||
481 | |||
482 | ////////////////////////// ENTRY POINT ////////////////////////////// | ||
483 | |||
484 | void ValidationTest::validateModel(const V1_0::Model& model) { | ||
485 | mutateOperandTypeTest(device, model); | ||
486 | mutateOperandRankTest(device, model); | ||
487 | mutateOperandScaleTest(device, model); | ||
488 | mutateOperandZeroPointTest(device, model); | ||
489 | mutateOperationOperandTypeTest(device, model); | ||
490 | mutateOperationTypeTest(device, model); | ||
491 | mutateOperationInputOperandIndexTest(device, model); | ||
492 | mutateOperationOutputOperandIndexTest(device, model); | ||
493 | removeOperandTest(device, model); | ||
494 | removeOperationTest(device, model); | ||
495 | removeOperationInputTest(device, model); | ||
496 | removeOperationOutputTest(device, model); | ||
497 | addOperationInputTest(device, model); | ||
498 | addOperationOutputTest(device, model); | ||
499 | } | ||
500 | |||
501 | } // namespace functional | ||
502 | } // namespace vts | ||
503 | } // namespace V1_0 | ||
504 | } // namespace neuralnetworks | ||
505 | } // namespace hardware | ||
506 | } // namespace android | ||
diff --git a/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp new file mode 100644 index 00000000..08f2613c --- /dev/null +++ b/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp | |||
@@ -0,0 +1,261 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2018 The Android Open Source Project | ||
3 | * | ||
4 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | * you may not use this file except in compliance with the License. | ||
6 | * You may obtain a copy of the License at | ||
7 | * | ||
8 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | * | ||
10 | * Unless required by applicable law or agreed to in writing, software | ||
11 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | * See the License for the specific language governing permissions and | ||
14 | * limitations under the License. | ||
15 | */ | ||
16 | |||
17 | #define LOG_TAG "neuralnetworks_hidl_hal_test" | ||
18 | |||
19 | #include "VtsHalNeuralnetworks.h" | ||
20 | |||
21 | #include "Callbacks.h" | ||
22 | #include "TestHarness.h" | ||
23 | #include "Utils.h" | ||
24 | |||
25 | #include <android-base/logging.h> | ||
26 | #include <android/hidl/memory/1.0/IMemory.h> | ||
27 | #include <hidlmemory/mapping.h> | ||
28 | |||
29 | namespace android { | ||
30 | namespace hardware { | ||
31 | namespace neuralnetworks { | ||
32 | namespace V1_0 { | ||
33 | namespace vts { | ||
34 | namespace functional { | ||
35 | |||
36 | using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback; | ||
37 | using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback; | ||
38 | using ::android::hidl::memory::V1_0::IMemory; | ||
39 | using generated_tests::MixedTyped; | ||
40 | using generated_tests::MixedTypedExampleType; | ||
41 | using generated_tests::for_all; | ||
42 | |||
43 | ///////////////////////// UTILITY FUNCTIONS ///////////////////////// | ||
44 | |||
45 | static void createPreparedModel(const sp<IDevice>& device, const V1_0::Model& model, | ||
46 | sp<IPreparedModel>* preparedModel) { | ||
47 | ASSERT_NE(nullptr, preparedModel); | ||
48 | |||
49 | // see if service can handle model | ||
50 | bool fullySupportsModel = false; | ||
51 | Return<void> supportedOpsLaunchStatus = device->getSupportedOperations( | ||
52 | model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) { | ||
53 | ASSERT_EQ(ErrorStatus::NONE, status); | ||
54 | ASSERT_NE(0ul, supported.size()); | ||
55 | fullySupportsModel = | ||
56 | std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; }); | ||
57 | }); | ||
58 | ASSERT_TRUE(supportedOpsLaunchStatus.isOk()); | ||
59 | |||
60 | // launch prepare model | ||
61 | sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback(); | ||
62 | ASSERT_NE(nullptr, preparedModelCallback.get()); | ||
63 | Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback); | ||
64 | ASSERT_TRUE(prepareLaunchStatus.isOk()); | ||
65 | ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus)); | ||
66 | |||
67 | // retrieve prepared model | ||
68 | preparedModelCallback->wait(); | ||
69 | ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); | ||
70 | *preparedModel = preparedModelCallback->getPreparedModel(); | ||
71 | |||
72 | // The getSupportedOperations call returns a list of operations that are | ||
73 | // guaranteed not to fail if prepareModel is called, and | ||
74 | // 'fullySupportsModel' is true i.f.f. the entire model is guaranteed. | ||
75 | // If a driver has any doubt that it can prepare an operation, it must | ||
76 | // return false. So here, if a driver isn't sure if it can support an | ||
77 | // operation, but reports that it successfully prepared the model, the test | ||
78 | // can continue. | ||
79 | if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) { | ||
80 | ASSERT_EQ(nullptr, preparedModel->get()); | ||
81 | LOG(INFO) << "NN VTS: Unable to test Request validation because vendor service cannot " | ||
82 | "prepare model that it does not support."; | ||
83 | std::cout << "[ ] Unable to test Request validation because vendor service " | ||
84 | "cannot prepare model that it does not support." | ||
85 | << std::endl; | ||
86 | return; | ||
87 | } | ||
88 | ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus); | ||
89 | ASSERT_NE(nullptr, preparedModel->get()); | ||
90 | } | ||
91 | |||
92 | // Primary validation function. This function will take a valid request, apply a | ||
93 | // mutation to it to invalidate the request, then pass it to interface calls | ||
94 | // that use the request. Note that the request here is passed by value, and any | ||
95 | // mutation to the request does not leave this function. | ||
96 | static void validate(const sp<IPreparedModel>& preparedModel, const std::string& message, | ||
97 | Request request, const std::function<void(Request*)>& mutation) { | ||
98 | mutation(&request); | ||
99 | SCOPED_TRACE(message + " [execute]"); | ||
100 | |||
101 | sp<ExecutionCallback> executionCallback = new ExecutionCallback(); | ||
102 | ASSERT_NE(nullptr, executionCallback.get()); | ||
103 | Return<ErrorStatus> executeLaunchStatus = preparedModel->execute(request, executionCallback); | ||
104 | ASSERT_TRUE(executeLaunchStatus.isOk()); | ||
105 | ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeLaunchStatus)); | ||
106 | |||
107 | executionCallback->wait(); | ||
108 | ErrorStatus executionReturnStatus = executionCallback->getStatus(); | ||
109 | ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus); | ||
110 | } | ||
111 | |||
112 | // Delete element from hidl_vec. hidl_vec doesn't support a "remove" operation, | ||
113 | // so this is efficiently accomplished by moving the element to the end and | ||
114 | // resizing the hidl_vec to one less. | ||
115 | template <typename Type> | ||
116 | static void hidl_vec_removeAt(hidl_vec<Type>* vec, uint32_t index) { | ||
117 | if (vec) { | ||
118 | std::rotate(vec->begin() + index, vec->begin() + index + 1, vec->end()); | ||
119 | vec->resize(vec->size() - 1); | ||
120 | } | ||
121 | } | ||
122 | |||
123 | template <typename Type> | ||
124 | static uint32_t hidl_vec_push_back(hidl_vec<Type>* vec, const Type& value) { | ||
125 | // assume vec is valid | ||
126 | const uint32_t index = vec->size(); | ||
127 | vec->resize(index + 1); | ||
128 | (*vec)[index] = value; | ||
129 | return index; | ||
130 | } | ||
131 | |||
132 | ///////////////////////// REMOVE INPUT //////////////////////////////////// | ||
133 | |||
134 | static void removeInputTest(const sp<IPreparedModel>& preparedModel, const Request& request) { | ||
135 | for (size_t input = 0; input < request.inputs.size(); ++input) { | ||
136 | const std::string message = "removeInput: removed input " + std::to_string(input); | ||
137 | validate(preparedModel, message, request, | ||
138 | [input](Request* request) { hidl_vec_removeAt(&request->inputs, input); }); | ||
139 | } | ||
140 | } | ||
141 | |||
142 | ///////////////////////// REMOVE OUTPUT //////////////////////////////////// | ||
143 | |||
144 | static void removeOutputTest(const sp<IPreparedModel>& preparedModel, const Request& request) { | ||
145 | for (size_t output = 0; output < request.outputs.size(); ++output) { | ||
146 | const std::string message = "removeOutput: removed Output " + std::to_string(output); | ||
147 | validate(preparedModel, message, request, | ||
148 | [output](Request* request) { hidl_vec_removeAt(&request->outputs, output); }); | ||
149 | } | ||
150 | } | ||
151 | |||
152 | ///////////////////////////// ENTRY POINT ////////////////////////////////// | ||
153 | |||
154 | std::vector<Request> createRequests(const std::vector<MixedTypedExampleType>& examples) { | ||
155 | const uint32_t INPUT = 0; | ||
156 | const uint32_t OUTPUT = 1; | ||
157 | |||
158 | std::vector<Request> requests; | ||
159 | |||
160 | for (auto& example : examples) { | ||
161 | const MixedTyped& inputs = example.first; | ||
162 | const MixedTyped& outputs = example.second; | ||
163 | |||
164 | std::vector<RequestArgument> inputs_info, outputs_info; | ||
165 | uint32_t inputSize = 0, outputSize = 0; | ||
166 | |||
167 | // This function only partially specifies the metadata (vector of RequestArguments). | ||
168 | // The contents are copied over below. | ||
169 | for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) { | ||
170 | if (inputs_info.size() <= static_cast<size_t>(index)) inputs_info.resize(index + 1); | ||
171 | RequestArgument arg = { | ||
172 | .location = {.poolIndex = INPUT, .offset = 0, .length = static_cast<uint32_t>(s)}, | ||
173 | .dimensions = {}, | ||
174 | }; | ||
175 | RequestArgument arg_empty = { | ||
176 | .hasNoValue = true, | ||
177 | }; | ||
178 | inputs_info[index] = s ? arg : arg_empty; | ||
179 | inputSize += s; | ||
180 | }); | ||
181 | // Compute offset for inputs 1 and so on | ||
182 | { | ||
183 | size_t offset = 0; | ||
184 | for (auto& i : inputs_info) { | ||
185 | if (!i.hasNoValue) i.location.offset = offset; | ||
186 | offset += i.location.length; | ||
187 | } | ||
188 | } | ||
189 | |||
190 | // Go through all outputs, initialize RequestArgument descriptors | ||
191 | for_all(outputs, [&outputs_info, &outputSize](int index, auto, auto s) { | ||
192 | if (outputs_info.size() <= static_cast<size_t>(index)) outputs_info.resize(index + 1); | ||
193 | RequestArgument arg = { | ||
194 | .location = {.poolIndex = OUTPUT, .offset = 0, .length = static_cast<uint32_t>(s)}, | ||
195 | .dimensions = {}, | ||
196 | }; | ||
197 | outputs_info[index] = arg; | ||
198 | outputSize += s; | ||
199 | }); | ||
200 | // Compute offset for outputs 1 and so on | ||
201 | { | ||
202 | size_t offset = 0; | ||
203 | for (auto& i : outputs_info) { | ||
204 | i.location.offset = offset; | ||
205 | offset += i.location.length; | ||
206 | } | ||
207 | } | ||
208 | std::vector<hidl_memory> pools = {nn::allocateSharedMemory(inputSize), | ||
209 | nn::allocateSharedMemory(outputSize)}; | ||
210 | if (pools[INPUT].size() == 0 || pools[OUTPUT].size() == 0) { | ||
211 | return {}; | ||
212 | } | ||
213 | |||
214 | // map pool | ||
215 | sp<IMemory> inputMemory = mapMemory(pools[INPUT]); | ||
216 | if (inputMemory == nullptr) { | ||
217 | return {}; | ||
218 | } | ||
219 | char* inputPtr = reinterpret_cast<char*>(static_cast<void*>(inputMemory->getPointer())); | ||
220 | if (inputPtr == nullptr) { | ||
221 | return {}; | ||
222 | } | ||
223 | |||
224 | // initialize pool | ||
225 | inputMemory->update(); | ||
226 | for_all(inputs, [&inputs_info, inputPtr](int index, auto p, auto s) { | ||
227 | char* begin = (char*)p; | ||
228 | char* end = begin + s; | ||
229 | // TODO: handle more than one input | ||
230 | std::copy(begin, end, inputPtr + inputs_info[index].location.offset); | ||
231 | }); | ||
232 | inputMemory->commit(); | ||
233 | |||
234 | requests.push_back({.inputs = inputs_info, .outputs = outputs_info, .pools = pools}); | ||
235 | } | ||
236 | |||
237 | return requests; | ||
238 | } | ||
239 | |||
240 | void ValidationTest::validateRequests(const V1_0::Model& model, | ||
241 | const std::vector<Request>& requests) { | ||
242 | // create IPreparedModel | ||
243 | sp<IPreparedModel> preparedModel; | ||
244 | ASSERT_NO_FATAL_FAILURE(createPreparedModel(device, model, &preparedModel)); | ||
245 | if (preparedModel == nullptr) { | ||
246 | return; | ||
247 | } | ||
248 | |||
249 | // validate each request | ||
250 | for (const Request& request : requests) { | ||
251 | removeInputTest(preparedModel, request); | ||
252 | removeOutputTest(preparedModel, request); | ||
253 | } | ||
254 | } | ||
255 | |||
256 | } // namespace functional | ||
257 | } // namespace vts | ||
258 | } // namespace V1_0 | ||
259 | } // namespace neuralnetworks | ||
260 | } // namespace hardware | ||
261 | } // namespace android | ||
diff --git a/neuralnetworks/1.0/vts/functional/ValidationTests.cpp b/neuralnetworks/1.0/vts/functional/ValidationTests.cpp new file mode 100644 index 00000000..98fc1c59 --- /dev/null +++ b/neuralnetworks/1.0/vts/functional/ValidationTests.cpp | |||
@@ -0,0 +1,50 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2018 The Android Open Source Project | ||
3 | * | ||
4 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | * you may not use this file except in compliance with the License. | ||
6 | * You may obtain a copy of the License at | ||
7 | * | ||
8 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | * | ||
10 | * Unless required by applicable law or agreed to in writing, software | ||
11 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | * See the License for the specific language governing permissions and | ||
14 | * limitations under the License. | ||
15 | */ | ||
16 | |||
17 | #define LOG_TAG "neuralnetworks_hidl_hal_test" | ||
18 | |||
19 | #include "Models.h" | ||
20 | #include "VtsHalNeuralnetworks.h" | ||
21 | |||
22 | namespace android { | ||
23 | namespace hardware { | ||
24 | namespace neuralnetworks { | ||
25 | namespace V1_0 { | ||
26 | namespace vts { | ||
27 | namespace functional { | ||
28 | |||
29 | // forward declarations | ||
30 | std::vector<Request> createRequests(const std::vector<MixedTypedExample>& examples); | ||
31 | |||
32 | // generate validation tests | ||
33 | #define VTS_CURRENT_TEST_CASE(TestName) \ | ||
34 | TEST_F(ValidationTest, TestName) { \ | ||
35 | const Model model = TestName::createTestModel(); \ | ||
36 | const std::vector<Request> requests = createRequests(TestName::examples); \ | ||
37 | validateModel(model); \ | ||
38 | validateRequests(model, requests); \ | ||
39 | } | ||
40 | |||
41 | FOR_EACH_TEST_MODEL(VTS_CURRENT_TEST_CASE) | ||
42 | |||
43 | #undef VTS_CURRENT_TEST_CASE | ||
44 | |||
45 | } // namespace functional | ||
46 | } // namespace vts | ||
47 | } // namespace V1_0 | ||
48 | } // namespace neuralnetworks | ||
49 | } // namespace hardware | ||
50 | } // namespace android | ||
diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0.cpp b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp index b14fb2c4..1ff3b668 100644 --- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0.cpp +++ b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp | |||
@@ -16,15 +16,7 @@ | |||
16 | 16 | ||
17 | #define LOG_TAG "neuralnetworks_hidl_hal_test" | 17 | #define LOG_TAG "neuralnetworks_hidl_hal_test" |
18 | 18 | ||
19 | #include "VtsHalNeuralnetworksV1_0.h" | 19 | #include "VtsHalNeuralnetworks.h" |
20 | #include "Utils.h" | ||
21 | |||
22 | #include <android-base/logging.h> | ||
23 | |||
24 | using ::android::hardware::hidl_memory; | ||
25 | using ::android::hidl::allocator::V1_0::IAllocator; | ||
26 | using ::android::hidl::memory::V1_0::IMemory; | ||
27 | using ::android::sp; | ||
28 | 20 | ||
29 | namespace android { | 21 | namespace android { |
30 | namespace hardware { | 22 | namespace hardware { |
@@ -33,11 +25,6 @@ namespace V1_0 { | |||
33 | namespace vts { | 25 | namespace vts { |
34 | namespace functional { | 26 | namespace functional { |
35 | 27 | ||
36 | // allocator helper | ||
37 | hidl_memory allocateSharedMemory(int64_t size) { | ||
38 | return nn::allocateSharedMemory(size); | ||
39 | } | ||
40 | |||
41 | // A class for test environment setup | 28 | // A class for test environment setup |
42 | NeuralnetworksHidlEnvironment::NeuralnetworksHidlEnvironment() {} | 29 | NeuralnetworksHidlEnvironment::NeuralnetworksHidlEnvironment() {} |
43 | 30 | ||
@@ -51,23 +38,49 @@ NeuralnetworksHidlEnvironment* NeuralnetworksHidlEnvironment::getInstance() { | |||
51 | } | 38 | } |
52 | 39 | ||
53 | void NeuralnetworksHidlEnvironment::registerTestServices() { | 40 | void NeuralnetworksHidlEnvironment::registerTestServices() { |
54 | registerTestService<V1_0::IDevice>(); | 41 | registerTestService<IDevice>(); |
55 | } | 42 | } |
56 | 43 | ||
57 | // The main test class for NEURALNETWORK HIDL HAL. | 44 | // The main test class for NEURALNETWORK HIDL HAL. |
45 | NeuralnetworksHidlTest::NeuralnetworksHidlTest() {} | ||
46 | |||
58 | NeuralnetworksHidlTest::~NeuralnetworksHidlTest() {} | 47 | NeuralnetworksHidlTest::~NeuralnetworksHidlTest() {} |
59 | 48 | ||
60 | void NeuralnetworksHidlTest::SetUp() { | 49 | void NeuralnetworksHidlTest::SetUp() { |
61 | device = ::testing::VtsHalHidlTargetTestBase::getService<V1_0::IDevice>( | 50 | ::testing::VtsHalHidlTargetTestBase::SetUp(); |
51 | device = ::testing::VtsHalHidlTargetTestBase::getService<IDevice>( | ||
62 | NeuralnetworksHidlEnvironment::getInstance()); | 52 | NeuralnetworksHidlEnvironment::getInstance()); |
63 | ASSERT_NE(nullptr, device.get()); | 53 | ASSERT_NE(nullptr, device.get()); |
64 | } | 54 | } |
65 | 55 | ||
66 | void NeuralnetworksHidlTest::TearDown() {} | 56 | void NeuralnetworksHidlTest::TearDown() { |
57 | device = nullptr; | ||
58 | ::testing::VtsHalHidlTargetTestBase::TearDown(); | ||
59 | } | ||
67 | 60 | ||
68 | } // namespace functional | 61 | } // namespace functional |
69 | } // namespace vts | 62 | } // namespace vts |
63 | |||
64 | ::std::ostream& operator<<(::std::ostream& os, ErrorStatus errorStatus) { | ||
65 | return os << toString(errorStatus); | ||
66 | } | ||
67 | |||
68 | ::std::ostream& operator<<(::std::ostream& os, DeviceStatus deviceStatus) { | ||
69 | return os << toString(deviceStatus); | ||
70 | } | ||
71 | |||
70 | } // namespace V1_0 | 72 | } // namespace V1_0 |
71 | } // namespace neuralnetworks | 73 | } // namespace neuralnetworks |
72 | } // namespace hardware | 74 | } // namespace hardware |
73 | } // namespace android | 75 | } // namespace android |
76 | |||
77 | using android::hardware::neuralnetworks::V1_0::vts::functional::NeuralnetworksHidlEnvironment; | ||
78 | |||
79 | int main(int argc, char** argv) { | ||
80 | ::testing::AddGlobalTestEnvironment(NeuralnetworksHidlEnvironment::getInstance()); | ||
81 | ::testing::InitGoogleTest(&argc, argv); | ||
82 | NeuralnetworksHidlEnvironment::getInstance()->init(&argc, argv); | ||
83 | |||
84 | int status = RUN_ALL_TESTS(); | ||
85 | return status; | ||
86 | } | ||
diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0.h b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.h index fbb16074..e79129b0 100644 --- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0.h +++ b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.h | |||
@@ -18,16 +18,15 @@ | |||
18 | #define VTS_HAL_NEURALNETWORKS_V1_0_TARGET_TESTS_H | 18 | #define VTS_HAL_NEURALNETWORKS_V1_0_TARGET_TESTS_H |
19 | 19 | ||
20 | #include <android/hardware/neuralnetworks/1.0/IDevice.h> | 20 | #include <android/hardware/neuralnetworks/1.0/IDevice.h> |
21 | #include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h> | ||
22 | #include <android/hardware/neuralnetworks/1.0/IPreparedModel.h> | ||
23 | #include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h> | ||
24 | #include <android/hardware/neuralnetworks/1.0/types.h> | 21 | #include <android/hardware/neuralnetworks/1.0/types.h> |
25 | #include <android/hidl/allocator/1.0/IAllocator.h> | ||
26 | 22 | ||
27 | #include <VtsHalHidlTargetTestBase.h> | 23 | #include <VtsHalHidlTargetTestBase.h> |
28 | #include <VtsHalHidlTargetTestEnvBase.h> | 24 | #include <VtsHalHidlTargetTestEnvBase.h> |
25 | |||
26 | #include <android-base/macros.h> | ||
29 | #include <gtest/gtest.h> | 27 | #include <gtest/gtest.h> |
30 | #include <string> | 28 | #include <iostream> |
29 | #include <vector> | ||
31 | 30 | ||
32 | namespace android { | 31 | namespace android { |
33 | namespace hardware { | 32 | namespace hardware { |
@@ -36,47 +35,47 @@ namespace V1_0 { | |||
36 | namespace vts { | 35 | namespace vts { |
37 | namespace functional { | 36 | namespace functional { |
38 | 37 | ||
39 | hidl_memory allocateSharedMemory(int64_t size); | ||
40 | |||
41 | // A class for test environment setup | 38 | // A class for test environment setup |
42 | class NeuralnetworksHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase { | 39 | class NeuralnetworksHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase { |
40 | DISALLOW_COPY_AND_ASSIGN(NeuralnetworksHidlEnvironment); | ||
43 | NeuralnetworksHidlEnvironment(); | 41 | NeuralnetworksHidlEnvironment(); |
44 | NeuralnetworksHidlEnvironment(const NeuralnetworksHidlEnvironment&) = delete; | 42 | ~NeuralnetworksHidlEnvironment() override; |
45 | NeuralnetworksHidlEnvironment(NeuralnetworksHidlEnvironment&&) = delete; | ||
46 | NeuralnetworksHidlEnvironment& operator=(const NeuralnetworksHidlEnvironment&) = delete; | ||
47 | NeuralnetworksHidlEnvironment& operator=(NeuralnetworksHidlEnvironment&&) = delete; | ||
48 | 43 | ||
49 | public: | 44 | public: |
50 | ~NeuralnetworksHidlEnvironment() override; | ||
51 | static NeuralnetworksHidlEnvironment* getInstance(); | 45 | static NeuralnetworksHidlEnvironment* getInstance(); |
52 | void registerTestServices() override; | 46 | void registerTestServices() override; |
53 | }; | 47 | }; |
54 | 48 | ||
55 | // The main test class for NEURALNETWORKS HIDL HAL. | 49 | // The main test class for NEURALNETWORKS HIDL HAL. |
56 | class NeuralnetworksHidlTest : public ::testing::VtsHalHidlTargetTestBase { | 50 | class NeuralnetworksHidlTest : public ::testing::VtsHalHidlTargetTestBase { |
51 | DISALLOW_COPY_AND_ASSIGN(NeuralnetworksHidlTest); | ||
52 | |||
57 | public: | 53 | public: |
54 | NeuralnetworksHidlTest(); | ||
58 | ~NeuralnetworksHidlTest() override; | 55 | ~NeuralnetworksHidlTest() override; |
59 | void SetUp() override; | 56 | void SetUp() override; |
60 | void TearDown() override; | 57 | void TearDown() override; |
61 | 58 | ||
62 | sp<V1_0::IDevice> device; | 59 | protected: |
60 | sp<IDevice> device; | ||
63 | }; | 61 | }; |
62 | |||
63 | // Tag for the validation tests | ||
64 | class ValidationTest : public NeuralnetworksHidlTest { | ||
65 | protected: | ||
66 | void validateModel(const Model& model); | ||
67 | void validateRequests(const Model& model, const std::vector<Request>& request); | ||
68 | }; | ||
69 | |||
70 | // Tag for the generated tests | ||
71 | class GeneratedTest : public NeuralnetworksHidlTest {}; | ||
72 | |||
64 | } // namespace functional | 73 | } // namespace functional |
65 | } // namespace vts | 74 | } // namespace vts |
66 | 75 | ||
67 | // pretty-print values for error messages | 76 | // pretty-print values for error messages |
68 | 77 | ::std::ostream& operator<<(::std::ostream& os, ErrorStatus errorStatus); | |
69 | template <typename CharT, typename Traits> | 78 | ::std::ostream& operator<<(::std::ostream& os, DeviceStatus deviceStatus); |
70 | ::std::basic_ostream<CharT, Traits>& operator<<(::std::basic_ostream<CharT, Traits>& os, | ||
71 | V1_0::ErrorStatus errorStatus) { | ||
72 | return os << toString(errorStatus); | ||
73 | } | ||
74 | |||
75 | template <typename CharT, typename Traits> | ||
76 | ::std::basic_ostream<CharT, Traits>& operator<<(::std::basic_ostream<CharT, Traits>& os, | ||
77 | V1_0::DeviceStatus deviceStatus) { | ||
78 | return os << toString(deviceStatus); | ||
79 | } | ||
80 | 79 | ||
81 | } // namespace V1_0 | 80 | } // namespace V1_0 |
82 | } // namespace neuralnetworks | 81 | } // namespace neuralnetworks |
diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0BasicTest.cpp b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0BasicTest.cpp deleted file mode 100644 index 59e5b806..00000000 --- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0BasicTest.cpp +++ /dev/null | |||
@@ -1,293 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2018 The Android Open Source Project | ||
3 | * | ||
4 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | * you may not use this file except in compliance with the License. | ||
6 | * You may obtain a copy of the License at | ||
7 | * | ||
8 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | * | ||
10 | * Unless required by applicable law or agreed to in writing, software | ||
11 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | * See the License for the specific language governing permissions and | ||
14 | * limitations under the License. | ||
15 | */ | ||
16 | |||
17 | #define LOG_TAG "neuralnetworks_hidl_hal_test" | ||
18 | |||
19 | #include "VtsHalNeuralnetworksV1_0.h" | ||
20 | |||
21 | #include "Callbacks.h" | ||
22 | #include "Models.h" | ||
23 | #include "TestHarness.h" | ||
24 | |||
25 | #include <android-base/logging.h> | ||
26 | #include <android/hidl/memory/1.0/IMemory.h> | ||
27 | #include <hidlmemory/mapping.h> | ||
28 | |||
29 | using ::android::hardware::neuralnetworks::V1_0::IDevice; | ||
30 | using ::android::hardware::neuralnetworks::V1_0::IPreparedModel; | ||
31 | using ::android::hardware::neuralnetworks::V1_0::Capabilities; | ||
32 | using ::android::hardware::neuralnetworks::V1_0::DeviceStatus; | ||
33 | using ::android::hardware::neuralnetworks::V1_0::FusedActivationFunc; | ||
34 | using ::android::hardware::neuralnetworks::V1_0::Model; | ||
35 | using ::android::hardware::neuralnetworks::V1_0::OperationType; | ||
36 | using ::android::hardware::neuralnetworks::V1_0::PerformanceInfo; | ||
37 | using ::android::hardware::Return; | ||
38 | using ::android::hardware::Void; | ||
39 | using ::android::hardware::hidl_memory; | ||
40 | using ::android::hardware::hidl_string; | ||
41 | using ::android::hardware::hidl_vec; | ||
42 | using ::android::hidl::allocator::V1_0::IAllocator; | ||
43 | using ::android::hidl::memory::V1_0::IMemory; | ||
44 | using ::android::sp; | ||
45 | |||
46 | namespace android { | ||
47 | namespace hardware { | ||
48 | namespace neuralnetworks { | ||
49 | namespace V1_0 { | ||
50 | namespace vts { | ||
51 | namespace functional { | ||
52 | using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback; | ||
53 | using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback; | ||
54 | |||
55 | static void doPrepareModelShortcut(const sp<IDevice>& device, sp<IPreparedModel>* preparedModel) { | ||
56 | ASSERT_NE(nullptr, preparedModel); | ||
57 | Model model = createValidTestModel_1_0(); | ||
58 | |||
59 | // see if service can handle model | ||
60 | bool fullySupportsModel = false; | ||
61 | Return<void> supportedOpsLaunchStatus = device->getSupportedOperations( | ||
62 | model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) { | ||
63 | ASSERT_EQ(ErrorStatus::NONE, status); | ||
64 | ASSERT_NE(0ul, supported.size()); | ||
65 | fullySupportsModel = | ||
66 | std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; }); | ||
67 | }); | ||
68 | ASSERT_TRUE(supportedOpsLaunchStatus.isOk()); | ||
69 | |||
70 | // launch prepare model | ||
71 | sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback(); | ||
72 | ASSERT_NE(nullptr, preparedModelCallback.get()); | ||
73 | Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback); | ||
74 | ASSERT_TRUE(prepareLaunchStatus.isOk()); | ||
75 | ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus)); | ||
76 | |||
77 | // retrieve prepared model | ||
78 | preparedModelCallback->wait(); | ||
79 | ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); | ||
80 | *preparedModel = preparedModelCallback->getPreparedModel(); | ||
81 | |||
82 | // The getSupportedOperations call returns a list of operations that are | ||
83 | // guaranteed not to fail if prepareModel is called, and | ||
84 | // 'fullySupportsModel' is true i.f.f. the entire model is guaranteed. | ||
85 | // If a driver has any doubt that it can prepare an operation, it must | ||
86 | // return false. So here, if a driver isn't sure if it can support an | ||
87 | // operation, but reports that it successfully prepared the model, the test | ||
88 | // can continue. | ||
89 | if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) { | ||
90 | ASSERT_EQ(nullptr, preparedModel->get()); | ||
91 | LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot " | ||
92 | "prepare model that it does not support."; | ||
93 | std::cout << "[ ] Early termination of test because vendor service cannot " | ||
94 | "prepare model that it does not support." | ||
95 | << std::endl; | ||
96 | return; | ||
97 | } | ||
98 | ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus); | ||
99 | ASSERT_NE(nullptr, preparedModel->get()); | ||
100 | } | ||
101 | |||
102 | // create device test | ||
103 | TEST_F(NeuralnetworksHidlTest, CreateDevice) {} | ||
104 | |||
105 | // status test | ||
106 | TEST_F(NeuralnetworksHidlTest, StatusTest) { | ||
107 | Return<DeviceStatus> status = device->getStatus(); | ||
108 | ASSERT_TRUE(status.isOk()); | ||
109 | EXPECT_EQ(DeviceStatus::AVAILABLE, static_cast<DeviceStatus>(status)); | ||
110 | } | ||
111 | |||
112 | // initialization | ||
113 | TEST_F(NeuralnetworksHidlTest, GetCapabilitiesTest) { | ||
114 | Return<void> ret = | ||
115 | device->getCapabilities([](ErrorStatus status, const Capabilities& capabilities) { | ||
116 | EXPECT_EQ(ErrorStatus::NONE, status); | ||
117 | EXPECT_LT(0.0f, capabilities.float32Performance.execTime); | ||
118 | EXPECT_LT(0.0f, capabilities.float32Performance.powerUsage); | ||
119 | EXPECT_LT(0.0f, capabilities.quantized8Performance.execTime); | ||
120 | EXPECT_LT(0.0f, capabilities.quantized8Performance.powerUsage); | ||
121 | }); | ||
122 | EXPECT_TRUE(ret.isOk()); | ||
123 | } | ||
124 | |||
125 | // supported operations positive test | ||
126 | TEST_F(NeuralnetworksHidlTest, SupportedOperationsPositiveTest) { | ||
127 | Model model = createValidTestModel_1_0(); | ||
128 | Return<void> ret = device->getSupportedOperations( | ||
129 | model, [&](ErrorStatus status, const hidl_vec<bool>& supported) { | ||
130 | EXPECT_EQ(ErrorStatus::NONE, status); | ||
131 | EXPECT_EQ(model.operations.size(), supported.size()); | ||
132 | }); | ||
133 | EXPECT_TRUE(ret.isOk()); | ||
134 | } | ||
135 | |||
136 | // supported operations negative test 1 | ||
137 | TEST_F(NeuralnetworksHidlTest, SupportedOperationsNegativeTest1) { | ||
138 | Model model = createInvalidTestModel1_1_0(); | ||
139 | Return<void> ret = device->getSupportedOperations( | ||
140 | model, [&](ErrorStatus status, const hidl_vec<bool>& supported) { | ||
141 | EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status); | ||
142 | (void)supported; | ||
143 | }); | ||
144 | EXPECT_TRUE(ret.isOk()); | ||
145 | } | ||
146 | |||
147 | // supported operations negative test 2 | ||
148 | TEST_F(NeuralnetworksHidlTest, SupportedOperationsNegativeTest2) { | ||
149 | Model model = createInvalidTestModel2_1_0(); | ||
150 | Return<void> ret = device->getSupportedOperations( | ||
151 | model, [&](ErrorStatus status, const hidl_vec<bool>& supported) { | ||
152 | EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status); | ||
153 | (void)supported; | ||
154 | }); | ||
155 | EXPECT_TRUE(ret.isOk()); | ||
156 | } | ||
157 | |||
158 | // prepare simple model positive test | ||
159 | TEST_F(NeuralnetworksHidlTest, SimplePrepareModelPositiveTest) { | ||
160 | sp<IPreparedModel> preparedModel; | ||
161 | doPrepareModelShortcut(device, &preparedModel); | ||
162 | } | ||
163 | |||
164 | // prepare simple model negative test 1 | ||
165 | TEST_F(NeuralnetworksHidlTest, SimplePrepareModelNegativeTest1) { | ||
166 | Model model = createInvalidTestModel1_1_0(); | ||
167 | sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback(); | ||
168 | ASSERT_NE(nullptr, preparedModelCallback.get()); | ||
169 | Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback); | ||
170 | ASSERT_TRUE(prepareLaunchStatus.isOk()); | ||
171 | EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(prepareLaunchStatus)); | ||
172 | |||
173 | preparedModelCallback->wait(); | ||
174 | ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); | ||
175 | EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, prepareReturnStatus); | ||
176 | sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel(); | ||
177 | EXPECT_EQ(nullptr, preparedModel.get()); | ||
178 | } | ||
179 | |||
180 | // prepare simple model negative test 2 | ||
181 | TEST_F(NeuralnetworksHidlTest, SimplePrepareModelNegativeTest2) { | ||
182 | Model model = createInvalidTestModel2_1_0(); | ||
183 | sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback(); | ||
184 | ASSERT_NE(nullptr, preparedModelCallback.get()); | ||
185 | Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback); | ||
186 | ASSERT_TRUE(prepareLaunchStatus.isOk()); | ||
187 | EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(prepareLaunchStatus)); | ||
188 | |||
189 | preparedModelCallback->wait(); | ||
190 | ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); | ||
191 | EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, prepareReturnStatus); | ||
192 | sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel(); | ||
193 | EXPECT_EQ(nullptr, preparedModel.get()); | ||
194 | } | ||
195 | |||
196 | // execute simple graph positive test | ||
197 | TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphPositiveTest) { | ||
198 | std::vector<float> outputData = {-1.0f, -1.0f, -1.0f, -1.0f}; | ||
199 | std::vector<float> expectedData = {6.0f, 8.0f, 10.0f, 12.0f}; | ||
200 | const uint32_t OUTPUT = 1; | ||
201 | |||
202 | sp<IPreparedModel> preparedModel; | ||
203 | ASSERT_NO_FATAL_FAILURE(doPrepareModelShortcut(device, &preparedModel)); | ||
204 | if (preparedModel == nullptr) { | ||
205 | return; | ||
206 | } | ||
207 | Request request = createValidTestRequest(); | ||
208 | |||
209 | auto postWork = [&] { | ||
210 | sp<IMemory> outputMemory = mapMemory(request.pools[OUTPUT]); | ||
211 | if (outputMemory == nullptr) { | ||
212 | return false; | ||
213 | } | ||
214 | float* outputPtr = reinterpret_cast<float*>(static_cast<void*>(outputMemory->getPointer())); | ||
215 | if (outputPtr == nullptr) { | ||
216 | return false; | ||
217 | } | ||
218 | outputMemory->read(); | ||
219 | std::copy(outputPtr, outputPtr + outputData.size(), outputData.begin()); | ||
220 | outputMemory->commit(); | ||
221 | return true; | ||
222 | }; | ||
223 | |||
224 | sp<ExecutionCallback> executionCallback = new ExecutionCallback(); | ||
225 | ASSERT_NE(nullptr, executionCallback.get()); | ||
226 | executionCallback->on_finish(postWork); | ||
227 | Return<ErrorStatus> executeLaunchStatus = preparedModel->execute(request, executionCallback); | ||
228 | ASSERT_TRUE(executeLaunchStatus.isOk()); | ||
229 | EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executeLaunchStatus)); | ||
230 | |||
231 | executionCallback->wait(); | ||
232 | ErrorStatus executionReturnStatus = executionCallback->getStatus(); | ||
233 | EXPECT_EQ(ErrorStatus::NONE, executionReturnStatus); | ||
234 | EXPECT_EQ(expectedData, outputData); | ||
235 | } | ||
236 | |||
237 | // execute simple graph negative test 1 | ||
238 | TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest1) { | ||
239 | sp<IPreparedModel> preparedModel; | ||
240 | ASSERT_NO_FATAL_FAILURE(doPrepareModelShortcut(device, &preparedModel)); | ||
241 | if (preparedModel == nullptr) { | ||
242 | return; | ||
243 | } | ||
244 | Request request = createInvalidTestRequest1(); | ||
245 | |||
246 | sp<ExecutionCallback> executionCallback = new ExecutionCallback(); | ||
247 | ASSERT_NE(nullptr, executionCallback.get()); | ||
248 | Return<ErrorStatus> executeLaunchStatus = preparedModel->execute(request, executionCallback); | ||
249 | ASSERT_TRUE(executeLaunchStatus.isOk()); | ||
250 | EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeLaunchStatus)); | ||
251 | |||
252 | executionCallback->wait(); | ||
253 | ErrorStatus executionReturnStatus = executionCallback->getStatus(); | ||
254 | EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus); | ||
255 | } | ||
256 | |||
257 | // execute simple graph negative test 2 | ||
258 | TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest2) { | ||
259 | sp<IPreparedModel> preparedModel; | ||
260 | ASSERT_NO_FATAL_FAILURE(doPrepareModelShortcut(device, &preparedModel)); | ||
261 | if (preparedModel == nullptr) { | ||
262 | return; | ||
263 | } | ||
264 | Request request = createInvalidTestRequest2(); | ||
265 | |||
266 | sp<ExecutionCallback> executionCallback = new ExecutionCallback(); | ||
267 | ASSERT_NE(nullptr, executionCallback.get()); | ||
268 | Return<ErrorStatus> executeLaunchStatus = preparedModel->execute(request, executionCallback); | ||
269 | ASSERT_TRUE(executeLaunchStatus.isOk()); | ||
270 | EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeLaunchStatus)); | ||
271 | |||
272 | executionCallback->wait(); | ||
273 | ErrorStatus executionReturnStatus = executionCallback->getStatus(); | ||
274 | EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus); | ||
275 | } | ||
276 | |||
277 | } // namespace functional | ||
278 | } // namespace vts | ||
279 | } // namespace V1_0 | ||
280 | } // namespace neuralnetworks | ||
281 | } // namespace hardware | ||
282 | } // namespace android | ||
283 | |||
284 | using android::hardware::neuralnetworks::V1_0::vts::functional::NeuralnetworksHidlEnvironment; | ||
285 | |||
286 | int main(int argc, char** argv) { | ||
287 | ::testing::AddGlobalTestEnvironment(NeuralnetworksHidlEnvironment::getInstance()); | ||
288 | ::testing::InitGoogleTest(&argc, argv); | ||
289 | NeuralnetworksHidlEnvironment::getInstance()->init(&argc, argv); | ||
290 | |||
291 | int status = RUN_ALL_TESTS(); | ||
292 | return status; | ||
293 | } | ||
diff --git a/neuralnetworks/1.1/vts/functional/Android.bp b/neuralnetworks/1.1/vts/functional/Android.bp index 947ca2ca..f755c20b 100644 --- a/neuralnetworks/1.1/vts/functional/Android.bp +++ b/neuralnetworks/1.1/vts/functional/Android.bp | |||
@@ -17,9 +17,12 @@ | |||
17 | cc_test { | 17 | cc_test { |
18 | name: "VtsHalNeuralnetworksV1_1TargetTest", | 18 | name: "VtsHalNeuralnetworksV1_1TargetTest", |
19 | srcs: [ | 19 | srcs: [ |
20 | "VtsHalNeuralnetworksV1_1.cpp", | 20 | "BasicTests.cpp", |
21 | "VtsHalNeuralnetworksV1_1BasicTest.cpp", | 21 | "GeneratedTests.cpp", |
22 | "VtsHalNeuralnetworksV1_1GeneratedTest.cpp", | 22 | "ValidateModel.cpp", |
23 | "ValidateRequest.cpp", | ||
24 | "ValidationTests.cpp", | ||
25 | "VtsHalNeuralnetworks.cpp", | ||
23 | ], | 26 | ], |
24 | defaults: ["VtsHalTargetTestDefaults"], | 27 | defaults: ["VtsHalTargetTestDefaults"], |
25 | static_libs: [ | 28 | static_libs: [ |
diff --git a/neuralnetworks/1.1/vts/functional/BasicTests.cpp b/neuralnetworks/1.1/vts/functional/BasicTests.cpp new file mode 100644 index 00000000..ed59a2dd --- /dev/null +++ b/neuralnetworks/1.1/vts/functional/BasicTests.cpp | |||
@@ -0,0 +1,58 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2018 The Android Open Source Project | ||
3 | * | ||
4 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | * you may not use this file except in compliance with the License. | ||
6 | * You may obtain a copy of the License at | ||
7 | * | ||
8 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | * | ||
10 | * Unless required by applicable law or agreed to in writing, software | ||
11 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | * See the License for the specific language governing permissions and | ||
14 | * limitations under the License. | ||
15 | */ | ||
16 | |||
17 | #define LOG_TAG "neuralnetworks_hidl_hal_test" | ||
18 | |||
19 | #include "VtsHalNeuralnetworks.h" | ||
20 | |||
21 | namespace android { | ||
22 | namespace hardware { | ||
23 | namespace neuralnetworks { | ||
24 | namespace V1_1 { | ||
25 | namespace vts { | ||
26 | namespace functional { | ||
27 | |||
28 | // create device test | ||
29 | TEST_F(NeuralnetworksHidlTest, CreateDevice) {} | ||
30 | |||
31 | // status test | ||
32 | TEST_F(NeuralnetworksHidlTest, StatusTest) { | ||
33 | Return<DeviceStatus> status = device->getStatus(); | ||
34 | ASSERT_TRUE(status.isOk()); | ||
35 | EXPECT_EQ(DeviceStatus::AVAILABLE, static_cast<DeviceStatus>(status)); | ||
36 | } | ||
37 | |||
38 | // initialization | ||
39 | TEST_F(NeuralnetworksHidlTest, GetCapabilitiesTest) { | ||
40 | Return<void> ret = | ||
41 | device->getCapabilities_1_1([](ErrorStatus status, const Capabilities& capabilities) { | ||
42 | EXPECT_EQ(ErrorStatus::NONE, status); | ||
43 | EXPECT_LT(0.0f, capabilities.float32Performance.execTime); | ||
44 | EXPECT_LT(0.0f, capabilities.float32Performance.powerUsage); | ||
45 | EXPECT_LT(0.0f, capabilities.quantized8Performance.execTime); | ||
46 | EXPECT_LT(0.0f, capabilities.quantized8Performance.powerUsage); | ||
47 | EXPECT_LT(0.0f, capabilities.relaxedFloat32toFloat16Performance.execTime); | ||
48 | EXPECT_LT(0.0f, capabilities.relaxedFloat32toFloat16Performance.powerUsage); | ||
49 | }); | ||
50 | EXPECT_TRUE(ret.isOk()); | ||
51 | } | ||
52 | |||
53 | } // namespace functional | ||
54 | } // namespace vts | ||
55 | } // namespace V1_1 | ||
56 | } // namespace neuralnetworks | ||
57 | } // namespace hardware | ||
58 | } // namespace android | ||
diff --git a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1GeneratedTest.cpp b/neuralnetworks/1.1/vts/functional/GeneratedTests.cpp index 025d9fed..1f1cc7af 100644 --- a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1GeneratedTest.cpp +++ b/neuralnetworks/1.1/vts/functional/GeneratedTests.cpp | |||
@@ -16,54 +16,33 @@ | |||
16 | 16 | ||
17 | #define LOG_TAG "neuralnetworks_hidl_hal_test" | 17 | #define LOG_TAG "neuralnetworks_hidl_hal_test" |
18 | 18 | ||
19 | #include "VtsHalNeuralnetworksV1_1.h" | 19 | #include "VtsHalNeuralnetworks.h" |
20 | 20 | ||
21 | #include "Callbacks.h" | 21 | #include "Callbacks.h" |
22 | #include "TestHarness.h" | 22 | #include "TestHarness.h" |
23 | #include "Utils.h" | ||
23 | 24 | ||
24 | #include <android-base/logging.h> | 25 | #include <android-base/logging.h> |
25 | #include <android/hardware/neuralnetworks/1.1/IDevice.h> | ||
26 | #include <android/hardware/neuralnetworks/1.1/types.h> | ||
27 | #include <android/hidl/memory/1.0/IMemory.h> | 26 | #include <android/hidl/memory/1.0/IMemory.h> |
28 | #include <hidlmemory/mapping.h> | 27 | #include <hidlmemory/mapping.h> |
29 | 28 | ||
30 | using ::android::hardware::neuralnetworks::V1_0::IPreparedModel; | ||
31 | using ::android::hardware::neuralnetworks::V1_0::Capabilities; | ||
32 | using ::android::hardware::neuralnetworks::V1_0::DeviceStatus; | ||
33 | using ::android::hardware::neuralnetworks::V1_0::ErrorStatus; | ||
34 | using ::android::hardware::neuralnetworks::V1_0::FusedActivationFunc; | ||
35 | using ::android::hardware::neuralnetworks::V1_0::Operand; | ||
36 | using ::android::hardware::neuralnetworks::V1_0::OperandLifeTime; | ||
37 | using ::android::hardware::neuralnetworks::V1_0::OperandType; | ||
38 | using ::android::hardware::neuralnetworks::V1_0::Request; | ||
39 | using ::android::hardware::neuralnetworks::V1_1::IDevice; | ||
40 | using ::android::hardware::neuralnetworks::V1_1::Model; | ||
41 | using ::android::hardware::neuralnetworks::V1_1::Operation; | ||
42 | using ::android::hardware::neuralnetworks::V1_1::OperationType; | ||
43 | using ::android::hardware::Return; | ||
44 | using ::android::hardware::Void; | ||
45 | using ::android::hardware::hidl_memory; | ||
46 | using ::android::hardware::hidl_string; | ||
47 | using ::android::hardware::hidl_vec; | ||
48 | using ::android::hidl::allocator::V1_0::IAllocator; | ||
49 | using ::android::hidl::memory::V1_0::IMemory; | ||
50 | using ::android::sp; | ||
51 | |||
52 | namespace android { | 29 | namespace android { |
53 | namespace hardware { | 30 | namespace hardware { |
54 | namespace neuralnetworks { | 31 | namespace neuralnetworks { |
55 | 32 | ||
56 | namespace generated_tests { | 33 | namespace generated_tests { |
57 | using ::generated_tests::MixedTypedExampleType; | 34 | using ::generated_tests::MixedTypedExampleType; |
58 | extern void Execute(sp<V1_1::IDevice>&, std::function<Model(void)>, std::function<bool(int)>, | 35 | extern void Execute(const sp<V1_1::IDevice>&, std::function<V1_1::Model(void)>, |
59 | const std::vector<MixedTypedExampleType>&); | 36 | std::function<bool(int)>, const std::vector<MixedTypedExampleType>&); |
60 | } // namespace generated_tests | 37 | } // namespace generated_tests |
61 | 38 | ||
62 | namespace V1_1 { | 39 | namespace V1_1 { |
63 | namespace vts { | 40 | namespace vts { |
64 | namespace functional { | 41 | namespace functional { |
42 | |||
65 | using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback; | 43 | using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback; |
66 | using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback; | 44 | using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback; |
45 | using ::android::nn::allocateSharedMemory; | ||
67 | 46 | ||
68 | // Mixed-typed examples | 47 | // Mixed-typed examples |
69 | typedef generated_tests::MixedTypedExampleType MixedTypedExample; | 48 | typedef generated_tests::MixedTypedExampleType MixedTypedExample; |
diff --git a/neuralnetworks/1.1/vts/functional/Models.h b/neuralnetworks/1.1/vts/functional/Models.h new file mode 100644 index 00000000..c3cadb5f --- /dev/null +++ b/neuralnetworks/1.1/vts/functional/Models.h | |||
@@ -0,0 +1,323 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2018 The Android Open Source Project | ||
3 | * | ||
4 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | * you may not use this file except in compliance with the License. | ||
6 | * You may obtain a copy of the License at | ||
7 | * | ||
8 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | * | ||
10 | * Unless required by applicable law or agreed to in writing, software | ||
11 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | * See the License for the specific language governing permissions and | ||
14 | * limitations under the License. | ||
15 | */ | ||
16 | |||
17 | #ifndef VTS_HAL_NEURALNETWORKS_V1_1_VTS_FUNCTIONAL_MODELS_H | ||
18 | #define VTS_HAL_NEURALNETWORKS_V1_1_VTS_FUNCTIONAL_MODELS_H | ||
19 | |||
20 | #define LOG_TAG "neuralnetworks_hidl_hal_test" | ||
21 | |||
22 | #include "TestHarness.h" | ||
23 | |||
24 | #include <android/hardware/neuralnetworks/1.0/types.h> | ||
25 | #include <android/hardware/neuralnetworks/1.1/types.h> | ||
26 | |||
27 | namespace android { | ||
28 | namespace hardware { | ||
29 | namespace neuralnetworks { | ||
30 | namespace V1_1 { | ||
31 | namespace vts { | ||
32 | namespace functional { | ||
33 | |||
34 | using MixedTypedExample = generated_tests::MixedTypedExampleType; | ||
35 | |||
36 | #define FOR_EACH_TEST_MODEL(FN) \ | ||
37 | FN(add) \ | ||
38 | FN(add_broadcast_quant8) \ | ||
39 | FN(add_quant8) \ | ||
40 | FN(add_relaxed) \ | ||
41 | FN(avg_pool_float_1) \ | ||
42 | FN(avg_pool_float_1_relaxed) \ | ||
43 | FN(avg_pool_float_2) \ | ||
44 | FN(avg_pool_float_2_relaxed) \ | ||
45 | FN(avg_pool_float_3) \ | ||
46 | FN(avg_pool_float_3_relaxed) \ | ||
47 | FN(avg_pool_float_4) \ | ||
48 | FN(avg_pool_float_4_relaxed) \ | ||
49 | FN(avg_pool_float_5) \ | ||
50 | FN(avg_pool_quant8_1) \ | ||
51 | FN(avg_pool_quant8_2) \ | ||
52 | FN(avg_pool_quant8_3) \ | ||
53 | FN(avg_pool_quant8_4) \ | ||
54 | FN(avg_pool_quant8_5) \ | ||
55 | FN(batch_to_space) \ | ||
56 | FN(batch_to_space_float_1) \ | ||
57 | FN(batch_to_space_quant8_1) \ | ||
58 | FN(concat_float_1) \ | ||
59 | FN(concat_float_1_relaxed) \ | ||
60 | FN(concat_float_2) \ | ||
61 | FN(concat_float_2_relaxed) \ | ||
62 | FN(concat_float_3) \ | ||
63 | FN(concat_float_3_relaxed) \ | ||
64 | FN(concat_quant8_1) \ | ||
65 | FN(concat_quant8_2) \ | ||
66 | FN(concat_quant8_3) \ | ||
67 | FN(conv_1_h3_w2_SAME) \ | ||
68 | FN(conv_1_h3_w2_SAME_relaxed) \ | ||
69 | FN(conv_1_h3_w2_VALID) \ | ||
70 | FN(conv_1_h3_w2_VALID_relaxed) \ | ||
71 | FN(conv_3_h3_w2_SAME) \ | ||
72 | FN(conv_3_h3_w2_SAME_relaxed) \ | ||
73 | FN(conv_3_h3_w2_VALID) \ | ||
74 | FN(conv_3_h3_w2_VALID_relaxed) \ | ||
75 | FN(conv_float) \ | ||
76 | FN(conv_float_2) \ | ||
77 | FN(conv_float_channels) \ | ||
78 | FN(conv_float_channels_relaxed) \ | ||
79 | FN(conv_float_channels_weights_as_inputs) \ | ||
80 | FN(conv_float_channels_weights_as_inputs_relaxed) \ | ||
81 | FN(conv_float_large) \ | ||
82 | FN(conv_float_large_relaxed) \ | ||
83 | FN(conv_float_large_weights_as_inputs) \ | ||
84 | FN(conv_float_large_weights_as_inputs_relaxed) \ | ||
85 | FN(conv_float_relaxed) \ | ||
86 | FN(conv_float_weights_as_inputs) \ | ||
87 | FN(conv_float_weights_as_inputs_relaxed) \ | ||
88 | FN(conv_quant8) \ | ||
89 | FN(conv_quant8_2) \ | ||
90 | FN(conv_quant8_channels) \ | ||
91 | FN(conv_quant8_channels_weights_as_inputs) \ | ||
92 | FN(conv_quant8_large) \ | ||
93 | FN(conv_quant8_large_weights_as_inputs) \ | ||
94 | FN(conv_quant8_overflow) \ | ||
95 | FN(conv_quant8_overflow_weights_as_inputs) \ | ||
96 | FN(conv_quant8_weights_as_inputs) \ | ||
97 | FN(depth_to_space_float_1) \ | ||
98 | FN(depth_to_space_float_1_relaxed) \ | ||
99 | FN(depth_to_space_float_2) \ | ||
100 | FN(depth_to_space_float_2_relaxed) \ | ||
101 | FN(depth_to_space_float_3) \ | ||
102 | FN(depth_to_space_float_3_relaxed) \ | ||
103 | FN(depth_to_space_quant8_1) \ | ||
104 | FN(depth_to_space_quant8_2) \ | ||
105 | FN(depthwise_conv) \ | ||
106 | FN(depthwise_conv2d_float) \ | ||
107 | FN(depthwise_conv2d_float_2) \ | ||
108 | FN(depthwise_conv2d_float_large) \ | ||
109 | FN(depthwise_conv2d_float_large_2) \ | ||
110 | FN(depthwise_conv2d_float_large_2_weights_as_inputs) \ | ||
111 | FN(depthwise_conv2d_float_large_relaxed) \ | ||
112 | FN(depthwise_conv2d_float_large_weights_as_inputs) \ | ||
113 | FN(depthwise_conv2d_float_large_weights_as_inputs_relaxed) \ | ||
114 | FN(depthwise_conv2d_float_weights_as_inputs) \ | ||
115 | FN(depthwise_conv2d_quant8) \ | ||
116 | FN(depthwise_conv2d_quant8_2) \ | ||
117 | FN(depthwise_conv2d_quant8_large) \ | ||
118 | FN(depthwise_conv2d_quant8_large_weights_as_inputs) \ | ||
119 | FN(depthwise_conv2d_quant8_weights_as_inputs) \ | ||
120 | FN(depthwise_conv_relaxed) \ | ||
121 | FN(dequantize) \ | ||
122 | FN(div) \ | ||
123 | FN(embedding_lookup) \ | ||
124 | FN(embedding_lookup_relaxed) \ | ||
125 | FN(floor) \ | ||
126 | FN(floor_relaxed) \ | ||
127 | FN(fully_connected_float) \ | ||
128 | FN(fully_connected_float_2) \ | ||
129 | FN(fully_connected_float_large) \ | ||
130 | FN(fully_connected_float_large_weights_as_inputs) \ | ||
131 | FN(fully_connected_float_relaxed) \ | ||
132 | FN(fully_connected_float_weights_as_inputs) \ | ||
133 | FN(fully_connected_float_weights_as_inputs_relaxed) \ | ||
134 | FN(fully_connected_quant8) \ | ||
135 | FN(fully_connected_quant8_2) \ | ||
136 | FN(fully_connected_quant8_large) \ | ||
137 | FN(fully_connected_quant8_large_weights_as_inputs) \ | ||
138 | FN(fully_connected_quant8_weights_as_inputs) \ | ||
139 | FN(hashtable_lookup_float) \ | ||
140 | FN(hashtable_lookup_float_relaxed) \ | ||
141 | FN(hashtable_lookup_quant8) \ | ||
142 | FN(l2_normalization) \ | ||
143 | FN(l2_normalization_2) \ | ||
144 | FN(l2_normalization_large) \ | ||
145 | FN(l2_normalization_large_relaxed) \ | ||
146 | FN(l2_normalization_relaxed) \ | ||
147 | FN(l2_pool_float) \ | ||
148 | FN(l2_pool_float_2) \ | ||
149 | FN(l2_pool_float_large) \ | ||
150 | FN(l2_pool_float_relaxed) \ | ||
151 | FN(local_response_norm_float_1) \ | ||
152 | FN(local_response_norm_float_1_relaxed) \ | ||
153 | FN(local_response_norm_float_2) \ | ||
154 | FN(local_response_norm_float_2_relaxed) \ | ||
155 | FN(local_response_norm_float_3) \ | ||
156 | FN(local_response_norm_float_3_relaxed) \ | ||
157 | FN(local_response_norm_float_4) \ | ||
158 | FN(local_response_norm_float_4_relaxed) \ | ||
159 | FN(logistic_float_1) \ | ||
160 | FN(logistic_float_1_relaxed) \ | ||
161 | FN(logistic_float_2) \ | ||
162 | FN(logistic_float_2_relaxed) \ | ||
163 | FN(logistic_quant8_1) \ | ||
164 | FN(logistic_quant8_2) \ | ||
165 | FN(lsh_projection) \ | ||
166 | FN(lsh_projection_2) \ | ||
167 | FN(lsh_projection_2_relaxed) \ | ||
168 | FN(lsh_projection_relaxed) \ | ||
169 | FN(lsh_projection_weights_as_inputs) \ | ||
170 | FN(lsh_projection_weights_as_inputs_relaxed) \ | ||
171 | FN(lstm) \ | ||
172 | FN(lstm2) \ | ||
173 | FN(lstm2_relaxed) \ | ||
174 | FN(lstm2_state) \ | ||
175 | FN(lstm2_state2) \ | ||
176 | FN(lstm2_state2_relaxed) \ | ||
177 | FN(lstm2_state_relaxed) \ | ||
178 | FN(lstm3) \ | ||
179 | FN(lstm3_relaxed) \ | ||
180 | FN(lstm3_state) \ | ||
181 | FN(lstm3_state2) \ | ||
182 | FN(lstm3_state2_relaxed) \ | ||
183 | FN(lstm3_state3) \ | ||
184 | FN(lstm3_state3_relaxed) \ | ||
185 | FN(lstm3_state_relaxed) \ | ||
186 | FN(lstm_relaxed) \ | ||
187 | FN(lstm_state) \ | ||
188 | FN(lstm_state2) \ | ||
189 | FN(lstm_state2_relaxed) \ | ||
190 | FN(lstm_state_relaxed) \ | ||
191 | FN(max_pool_float_1) \ | ||
192 | FN(max_pool_float_1_relaxed) \ | ||
193 | FN(max_pool_float_2) \ | ||
194 | FN(max_pool_float_2_relaxed) \ | ||
195 | FN(max_pool_float_3) \ | ||
196 | FN(max_pool_float_3_relaxed) \ | ||
197 | FN(max_pool_float_4) \ | ||
198 | FN(max_pool_quant8_1) \ | ||
199 | FN(max_pool_quant8_2) \ | ||
200 | FN(max_pool_quant8_3) \ | ||
201 | FN(max_pool_quant8_4) \ | ||
202 | FN(mean) \ | ||
203 | FN(mean_float_1) \ | ||
204 | FN(mean_float_2) \ | ||
205 | FN(mean_quant8_1) \ | ||
206 | FN(mean_quant8_2) \ | ||
207 | FN(mobilenet_224_gender_basic_fixed) \ | ||
208 | FN(mobilenet_224_gender_basic_fixed_relaxed) \ | ||
209 | FN(mobilenet_quantized) \ | ||
210 | FN(mul) \ | ||
211 | FN(mul_broadcast_quant8) \ | ||
212 | FN(mul_quant8) \ | ||
213 | FN(mul_relaxed) \ | ||
214 | FN(mul_relu) \ | ||
215 | FN(mul_relu_relaxed) \ | ||
216 | FN(pad) \ | ||
217 | FN(pad_float_1) \ | ||
218 | FN(relu1_float_1) \ | ||
219 | FN(relu1_float_1_relaxed) \ | ||
220 | FN(relu1_float_2) \ | ||
221 | FN(relu1_float_2_relaxed) \ | ||
222 | FN(relu1_quant8_1) \ | ||
223 | FN(relu1_quant8_2) \ | ||
224 | FN(relu6_float_1) \ | ||
225 | FN(relu6_float_1_relaxed) \ | ||
226 | FN(relu6_float_2) \ | ||
227 | FN(relu6_float_2_relaxed) \ | ||
228 | FN(relu6_quant8_1) \ | ||
229 | FN(relu6_quant8_2) \ | ||
230 | FN(relu_float_1) \ | ||
231 | FN(relu_float_1_relaxed) \ | ||
232 | FN(relu_float_2) \ | ||
233 | FN(relu_quant8_1) \ | ||
234 | FN(relu_quant8_2) \ | ||
235 | FN(reshape) \ | ||
236 | FN(reshape_quant8) \ | ||
237 | FN(reshape_quant8_weights_as_inputs) \ | ||
238 | FN(reshape_relaxed) \ | ||
239 | FN(reshape_weights_as_inputs) \ | ||
240 | FN(reshape_weights_as_inputs_relaxed) \ | ||
241 | FN(resize_bilinear) \ | ||
242 | FN(resize_bilinear_2) \ | ||
243 | FN(resize_bilinear_relaxed) \ | ||
244 | FN(rnn) \ | ||
245 | FN(rnn_relaxed) \ | ||
246 | FN(rnn_state) \ | ||
247 | FN(rnn_state_relaxed) \ | ||
248 | FN(softmax_float_1) \ | ||
249 | FN(softmax_float_1_relaxed) \ | ||
250 | FN(softmax_float_2) \ | ||
251 | FN(softmax_float_2_relaxed) \ | ||
252 | FN(softmax_quant8_1) \ | ||
253 | FN(softmax_quant8_2) \ | ||
254 | FN(space_to_batch) \ | ||
255 | FN(space_to_batch_float_1) \ | ||
256 | FN(space_to_batch_float_2) \ | ||
257 | FN(space_to_batch_float_3) \ | ||
258 | FN(space_to_batch_quant8_1) \ | ||
259 | FN(space_to_batch_quant8_2) \ | ||
260 | FN(space_to_batch_quant8_3) \ | ||
261 | FN(space_to_depth_float_1) \ | ||
262 | FN(space_to_depth_float_1_relaxed) \ | ||
263 | FN(space_to_depth_float_2) \ | ||
264 | FN(space_to_depth_float_2_relaxed) \ | ||
265 | FN(space_to_depth_float_3) \ | ||
266 | FN(space_to_depth_float_3_relaxed) \ | ||
267 | FN(space_to_depth_quant8_1) \ | ||
268 | FN(space_to_depth_quant8_2) \ | ||
269 | FN(squeeze) \ | ||
270 | FN(squeeze_float_1) \ | ||
271 | FN(squeeze_quant8_1) \ | ||
272 | FN(strided_slice) \ | ||
273 | FN(strided_slice_float_1) \ | ||
274 | FN(strided_slice_float_10) \ | ||
275 | FN(strided_slice_float_2) \ | ||
276 | FN(strided_slice_float_3) \ | ||
277 | FN(strided_slice_float_4) \ | ||
278 | FN(strided_slice_float_5) \ | ||
279 | FN(strided_slice_float_6) \ | ||
280 | FN(strided_slice_float_7) \ | ||
281 | FN(strided_slice_float_8) \ | ||
282 | FN(strided_slice_float_9) \ | ||
283 | FN(strided_slice_qaunt8_10) \ | ||
284 | FN(strided_slice_quant8_1) \ | ||
285 | FN(strided_slice_quant8_2) \ | ||
286 | FN(strided_slice_quant8_3) \ | ||
287 | FN(strided_slice_quant8_4) \ | ||
288 | FN(strided_slice_quant8_5) \ | ||
289 | FN(strided_slice_quant8_6) \ | ||
290 | FN(strided_slice_quant8_7) \ | ||
291 | FN(strided_slice_quant8_8) \ | ||
292 | FN(strided_slice_quant8_9) \ | ||
293 | FN(sub) \ | ||
294 | FN(svdf) \ | ||
295 | FN(svdf2) \ | ||
296 | FN(svdf2_relaxed) \ | ||
297 | FN(svdf_relaxed) \ | ||
298 | FN(svdf_state) \ | ||
299 | FN(svdf_state_relaxed) \ | ||
300 | FN(tanh) \ | ||
301 | FN(tanh_relaxed) \ | ||
302 | FN(transpose) \ | ||
303 | FN(transpose_float_1) \ | ||
304 | FN(transpose_quant8_1) | ||
305 | |||
306 | #define FORWARD_DECLARE_GENERATED_OBJECTS(function) \ | ||
307 | namespace function { \ | ||
308 | extern std::vector<MixedTypedExample> examples; \ | ||
309 | Model createTestModel(); \ | ||
310 | } | ||
311 | |||
312 | FOR_EACH_TEST_MODEL(FORWARD_DECLARE_GENERATED_OBJECTS) | ||
313 | |||
314 | #undef FORWARD_DECLARE_GENERATED_OBJECTS | ||
315 | |||
316 | } // namespace functional | ||
317 | } // namespace vts | ||
318 | } // namespace V1_1 | ||
319 | } // namespace neuralnetworks | ||
320 | } // namespace hardware | ||
321 | } // namespace android | ||
322 | |||
323 | #endif // VTS_HAL_NEURALNETWORKS_V1_1_VTS_FUNCTIONAL_MODELS_H | ||
diff --git a/neuralnetworks/1.1/vts/functional/ValidateModel.cpp b/neuralnetworks/1.1/vts/functional/ValidateModel.cpp new file mode 100644 index 00000000..7a20e26f --- /dev/null +++ b/neuralnetworks/1.1/vts/functional/ValidateModel.cpp | |||
@@ -0,0 +1,513 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2018 The Android Open Source Project | ||
3 | * | ||
4 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | * you may not use this file except in compliance with the License. | ||
6 | * You may obtain a copy of the License at | ||
7 | * | ||
8 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | * | ||
10 | * Unless required by applicable law or agreed to in writing, software | ||
11 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | * See the License for the specific language governing permissions and | ||
14 | * limitations under the License. | ||
15 | */ | ||
16 | |||
17 | #define LOG_TAG "neuralnetworks_hidl_hal_test" | ||
18 | |||
19 | #include "VtsHalNeuralnetworks.h" | ||
20 | |||
21 | #include "Callbacks.h" | ||
22 | |||
23 | namespace android { | ||
24 | namespace hardware { | ||
25 | namespace neuralnetworks { | ||
26 | namespace V1_1 { | ||
27 | |||
28 | using V1_0::IPreparedModel; | ||
29 | using V1_0::Operand; | ||
30 | using V1_0::OperandLifeTime; | ||
31 | using V1_0::OperandType; | ||
32 | |||
33 | namespace vts { | ||
34 | namespace functional { | ||
35 | |||
36 | using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback; | ||
37 | using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback; | ||
38 | |||
39 | ///////////////////////// UTILITY FUNCTIONS ///////////////////////// | ||
40 | |||
41 | static void validateGetSupportedOperations(const sp<IDevice>& device, const std::string& message, | ||
42 | const V1_1::Model& model) { | ||
43 | SCOPED_TRACE(message + " [getSupportedOperations_1_1]"); | ||
44 | |||
45 | Return<void> ret = | ||
46 | device->getSupportedOperations_1_1(model, [&](ErrorStatus status, const hidl_vec<bool>&) { | ||
47 | EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status); | ||
48 | }); | ||
49 | EXPECT_TRUE(ret.isOk()); | ||
50 | } | ||
51 | |||
52 | static void validatePrepareModel(const sp<IDevice>& device, const std::string& message, | ||
53 | const V1_1::Model& model) { | ||
54 | SCOPED_TRACE(message + " [prepareModel_1_1]"); | ||
55 | |||
56 | sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback(); | ||
57 | ASSERT_NE(nullptr, preparedModelCallback.get()); | ||
58 | Return<ErrorStatus> prepareLaunchStatus = | ||
59 | device->prepareModel_1_1(model, preparedModelCallback); | ||
60 | ASSERT_TRUE(prepareLaunchStatus.isOk()); | ||
61 | ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(prepareLaunchStatus)); | ||
62 | |||
63 | preparedModelCallback->wait(); | ||
64 | ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); | ||
65 | ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, prepareReturnStatus); | ||
66 | sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel(); | ||
67 | ASSERT_EQ(nullptr, preparedModel.get()); | ||
68 | } | ||
69 | |||
70 | // Primary validation function. This function will take a valid model, apply a | ||
71 | // mutation to it to invalidate the model, then pass it to interface calls that | ||
72 | // use the model. Note that the model here is passed by value, and any mutation | ||
73 | // to the model does not leave this function. | ||
74 | static void validate(const sp<IDevice>& device, const std::string& message, V1_1::Model model, | ||
75 | const std::function<void(Model*)>& mutation) { | ||
76 | mutation(&model); | ||
77 | validateGetSupportedOperations(device, message, model); | ||
78 | validatePrepareModel(device, message, model); | ||
79 | } | ||
80 | |||
81 | // Delete element from hidl_vec. hidl_vec doesn't support a "remove" operation, | ||
82 | // so this is efficiently accomplished by moving the element to the end and | ||
83 | // resizing the hidl_vec to one less. | ||
84 | template <typename Type> | ||
85 | static void hidl_vec_removeAt(hidl_vec<Type>* vec, uint32_t index) { | ||
86 | if (vec) { | ||
87 | std::rotate(vec->begin() + index, vec->begin() + index + 1, vec->end()); | ||
88 | vec->resize(vec->size() - 1); | ||
89 | } | ||
90 | } | ||
91 | |||
92 | template <typename Type> | ||
93 | static uint32_t hidl_vec_push_back(hidl_vec<Type>* vec, const Type& value) { | ||
94 | // assume vec is valid | ||
95 | const uint32_t index = vec->size(); | ||
96 | vec->resize(index + 1); | ||
97 | (*vec)[index] = value; | ||
98 | return index; | ||
99 | } | ||
100 | |||
101 | static uint32_t addOperand(Model* model) { | ||
102 | return hidl_vec_push_back(&model->operands, | ||
103 | { | ||
104 | .type = OperandType::INT32, | ||
105 | .dimensions = {}, | ||
106 | .numberOfConsumers = 0, | ||
107 | .scale = 0.0f, | ||
108 | .zeroPoint = 0, | ||
109 | .lifetime = OperandLifeTime::MODEL_INPUT, | ||
110 | .location = {.poolIndex = 0, .offset = 0, .length = 0}, | ||
111 | }); | ||
112 | } | ||
113 | |||
114 | static uint32_t addOperand(Model* model, OperandLifeTime lifetime) { | ||
115 | uint32_t index = addOperand(model); | ||
116 | model->operands[index].numberOfConsumers = 1; | ||
117 | model->operands[index].lifetime = lifetime; | ||
118 | return index; | ||
119 | } | ||
120 | |||
121 | ///////////////////////// VALIDATE MODEL OPERAND TYPE ///////////////////////// | ||
122 | |||
123 | static const int32_t invalidOperandTypes[] = { | ||
124 | static_cast<int32_t>(OperandType::FLOAT32) - 1, // lower bound fundamental | ||
125 | static_cast<int32_t>(OperandType::TENSOR_QUANT8_ASYMM) + 1, // upper bound fundamental | ||
126 | static_cast<int32_t>(OperandType::OEM) - 1, // lower bound OEM | ||
127 | static_cast<int32_t>(OperandType::TENSOR_OEM_BYTE) + 1, // upper bound OEM | ||
128 | }; | ||
129 | |||
130 | static void mutateOperandTypeTest(const sp<IDevice>& device, const V1_1::Model& model) { | ||
131 | for (size_t operand = 0; operand < model.operands.size(); ++operand) { | ||
132 | for (int32_t invalidOperandType : invalidOperandTypes) { | ||
133 | const std::string message = "mutateOperandTypeTest: operand " + | ||
134 | std::to_string(operand) + " set to value " + | ||
135 | std::to_string(invalidOperandType); | ||
136 | validate(device, message, model, [operand, invalidOperandType](Model* model) { | ||
137 | model->operands[operand].type = static_cast<OperandType>(invalidOperandType); | ||
138 | }); | ||
139 | } | ||
140 | } | ||
141 | } | ||
142 | |||
143 | ///////////////////////// VALIDATE OPERAND RANK ///////////////////////// | ||
144 | |||
145 | static uint32_t getInvalidRank(OperandType type) { | ||
146 | switch (type) { | ||
147 | case OperandType::FLOAT32: | ||
148 | case OperandType::INT32: | ||
149 | case OperandType::UINT32: | ||
150 | return 1; | ||
151 | case OperandType::TENSOR_FLOAT32: | ||
152 | case OperandType::TENSOR_INT32: | ||
153 | case OperandType::TENSOR_QUANT8_ASYMM: | ||
154 | return 0; | ||
155 | default: | ||
156 | return 0; | ||
157 | } | ||
158 | } | ||
159 | |||
160 | static void mutateOperandRankTest(const sp<IDevice>& device, const V1_1::Model& model) { | ||
161 | for (size_t operand = 0; operand < model.operands.size(); ++operand) { | ||
162 | const uint32_t invalidRank = getInvalidRank(model.operands[operand].type); | ||
163 | const std::string message = "mutateOperandRankTest: operand " + std::to_string(operand) + | ||
164 | " has rank of " + std::to_string(invalidRank); | ||
165 | validate(device, message, model, [operand, invalidRank](Model* model) { | ||
166 | model->operands[operand].dimensions = std::vector<uint32_t>(invalidRank, 0); | ||
167 | }); | ||
168 | } | ||
169 | } | ||
170 | |||
171 | ///////////////////////// VALIDATE OPERAND SCALE ///////////////////////// | ||
172 | |||
173 | static float getInvalidScale(OperandType type) { | ||
174 | switch (type) { | ||
175 | case OperandType::FLOAT32: | ||
176 | case OperandType::INT32: | ||
177 | case OperandType::UINT32: | ||
178 | case OperandType::TENSOR_FLOAT32: | ||
179 | return 1.0f; | ||
180 | case OperandType::TENSOR_INT32: | ||
181 | return -1.0f; | ||
182 | case OperandType::TENSOR_QUANT8_ASYMM: | ||
183 | return 0.0f; | ||
184 | default: | ||
185 | return 0.0f; | ||
186 | } | ||
187 | } | ||
188 | |||
189 | static void mutateOperandScaleTest(const sp<IDevice>& device, const V1_1::Model& model) { | ||
190 | for (size_t operand = 0; operand < model.operands.size(); ++operand) { | ||
191 | const float invalidScale = getInvalidScale(model.operands[operand].type); | ||
192 | const std::string message = "mutateOperandScaleTest: operand " + std::to_string(operand) + | ||
193 | " has scale of " + std::to_string(invalidScale); | ||
194 | validate(device, message, model, [operand, invalidScale](Model* model) { | ||
195 | model->operands[operand].scale = invalidScale; | ||
196 | }); | ||
197 | } | ||
198 | } | ||
199 | |||
200 | ///////////////////////// VALIDATE OPERAND ZERO POINT ///////////////////////// | ||
201 | |||
202 | static std::vector<int32_t> getInvalidZeroPoints(OperandType type) { | ||
203 | switch (type) { | ||
204 | case OperandType::FLOAT32: | ||
205 | case OperandType::INT32: | ||
206 | case OperandType::UINT32: | ||
207 | case OperandType::TENSOR_FLOAT32: | ||
208 | case OperandType::TENSOR_INT32: | ||
209 | return {1}; | ||
210 | case OperandType::TENSOR_QUANT8_ASYMM: | ||
211 | return {-1, 256}; | ||
212 | default: | ||
213 | return {}; | ||
214 | } | ||
215 | } | ||
216 | |||
217 | static void mutateOperandZeroPointTest(const sp<IDevice>& device, const V1_1::Model& model) { | ||
218 | for (size_t operand = 0; operand < model.operands.size(); ++operand) { | ||
219 | const std::vector<int32_t> invalidZeroPoints = | ||
220 | getInvalidZeroPoints(model.operands[operand].type); | ||
221 | for (int32_t invalidZeroPoint : invalidZeroPoints) { | ||
222 | const std::string message = "mutateOperandZeroPointTest: operand " + | ||
223 | std::to_string(operand) + " has zero point of " + | ||
224 | std::to_string(invalidZeroPoint); | ||
225 | validate(device, message, model, [operand, invalidZeroPoint](Model* model) { | ||
226 | model->operands[operand].zeroPoint = invalidZeroPoint; | ||
227 | }); | ||
228 | } | ||
229 | } | ||
230 | } | ||
231 | |||
232 | ///////////////////////// VALIDATE EXTRA ??? ///////////////////////// | ||
233 | |||
234 | // TODO: Operand::lifetime | ||
235 | // TODO: Operand::location | ||
236 | |||
237 | ///////////////////////// VALIDATE OPERATION OPERAND TYPE ///////////////////////// | ||
238 | |||
239 | static void mutateOperand(Operand* operand, OperandType type) { | ||
240 | Operand newOperand = *operand; | ||
241 | newOperand.type = type; | ||
242 | switch (type) { | ||
243 | case OperandType::FLOAT32: | ||
244 | case OperandType::INT32: | ||
245 | case OperandType::UINT32: | ||
246 | newOperand.dimensions = hidl_vec<uint32_t>(); | ||
247 | newOperand.scale = 0.0f; | ||
248 | newOperand.zeroPoint = 0; | ||
249 | break; | ||
250 | case OperandType::TENSOR_FLOAT32: | ||
251 | newOperand.dimensions = | ||
252 | operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1}); | ||
253 | newOperand.scale = 0.0f; | ||
254 | newOperand.zeroPoint = 0; | ||
255 | break; | ||
256 | case OperandType::TENSOR_INT32: | ||
257 | newOperand.dimensions = | ||
258 | operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1}); | ||
259 | newOperand.zeroPoint = 0; | ||
260 | break; | ||
261 | case OperandType::TENSOR_QUANT8_ASYMM: | ||
262 | newOperand.dimensions = | ||
263 | operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1}); | ||
264 | newOperand.scale = operand->scale != 0.0f ? operand->scale : 1.0f; | ||
265 | break; | ||
266 | case OperandType::OEM: | ||
267 | case OperandType::TENSOR_OEM_BYTE: | ||
268 | default: | ||
269 | break; | ||
270 | } | ||
271 | *operand = newOperand; | ||
272 | } | ||
273 | |||
274 | static bool mutateOperationOperandTypeSkip(size_t operand, const V1_1::Model& model) { | ||
275 | // LSH_PROJECTION's second argument is allowed to have any type. This is the | ||
276 | // only operation that currently has a type that can be anything independent | ||
277 | // from any other type. Changing the operand type to any other type will | ||
278 | // result in a valid model for LSH_PROJECTION. If this is the case, skip the | ||
279 | // test. | ||
280 | for (const Operation& operation : model.operations) { | ||
281 | if (operation.type == OperationType::LSH_PROJECTION && operand == operation.inputs[1]) { | ||
282 | return true; | ||
283 | } | ||
284 | } | ||
285 | return false; | ||
286 | } | ||
287 | |||
288 | static void mutateOperationOperandTypeTest(const sp<IDevice>& device, const V1_1::Model& model) { | ||
289 | for (size_t operand = 0; operand < model.operands.size(); ++operand) { | ||
290 | if (mutateOperationOperandTypeSkip(operand, model)) { | ||
291 | continue; | ||
292 | } | ||
293 | for (OperandType invalidOperandType : hidl_enum_iterator<OperandType>{}) { | ||
294 | // Do not test OEM types | ||
295 | if (invalidOperandType == model.operands[operand].type || | ||
296 | invalidOperandType == OperandType::OEM || | ||
297 | invalidOperandType == OperandType::TENSOR_OEM_BYTE) { | ||
298 | continue; | ||
299 | } | ||
300 | const std::string message = "mutateOperationOperandTypeTest: operand " + | ||
301 | std::to_string(operand) + " set to type " + | ||
302 | toString(invalidOperandType); | ||
303 | validate(device, message, model, [operand, invalidOperandType](Model* model) { | ||
304 | mutateOperand(&model->operands[operand], invalidOperandType); | ||
305 | }); | ||
306 | } | ||
307 | } | ||
308 | } | ||
309 | |||
310 | ///////////////////////// VALIDATE MODEL OPERATION TYPE ///////////////////////// | ||
311 | |||
312 | static const int32_t invalidOperationTypes[] = { | ||
313 | static_cast<int32_t>(OperationType::ADD) - 1, // lower bound fundamental | ||
314 | static_cast<int32_t>(OperationType::TRANSPOSE) + 1, // upper bound fundamental | ||
315 | static_cast<int32_t>(OperationType::OEM_OPERATION) - 1, // lower bound OEM | ||
316 | static_cast<int32_t>(OperationType::OEM_OPERATION) + 1, // upper bound OEM | ||
317 | }; | ||
318 | |||
319 | static void mutateOperationTypeTest(const sp<IDevice>& device, const V1_1::Model& model) { | ||
320 | for (size_t operation = 0; operation < model.operations.size(); ++operation) { | ||
321 | for (int32_t invalidOperationType : invalidOperationTypes) { | ||
322 | const std::string message = "mutateOperationTypeTest: operation " + | ||
323 | std::to_string(operation) + " set to value " + | ||
324 | std::to_string(invalidOperationType); | ||
325 | validate(device, message, model, [operation, invalidOperationType](Model* model) { | ||
326 | model->operations[operation].type = | ||
327 | static_cast<OperationType>(invalidOperationType); | ||
328 | }); | ||
329 | } | ||
330 | } | ||
331 | } | ||
332 | |||
333 | ///////////////////////// VALIDATE MODEL OPERATION INPUT OPERAND INDEX ///////////////////////// | ||
334 | |||
335 | static void mutateOperationInputOperandIndexTest(const sp<IDevice>& device, | ||
336 | const V1_1::Model& model) { | ||
337 | for (size_t operation = 0; operation < model.operations.size(); ++operation) { | ||
338 | const uint32_t invalidOperand = model.operands.size(); | ||
339 | for (size_t input = 0; input < model.operations[operation].inputs.size(); ++input) { | ||
340 | const std::string message = "mutateOperationInputOperandIndexTest: operation " + | ||
341 | std::to_string(operation) + " input " + | ||
342 | std::to_string(input); | ||
343 | validate(device, message, model, [operation, input, invalidOperand](Model* model) { | ||
344 | model->operations[operation].inputs[input] = invalidOperand; | ||
345 | }); | ||
346 | } | ||
347 | } | ||
348 | } | ||
349 | |||
350 | ///////////////////////// VALIDATE MODEL OPERATION OUTPUT OPERAND INDEX ///////////////////////// | ||
351 | |||
352 | static void mutateOperationOutputOperandIndexTest(const sp<IDevice>& device, | ||
353 | const V1_1::Model& model) { | ||
354 | for (size_t operation = 0; operation < model.operations.size(); ++operation) { | ||
355 | const uint32_t invalidOperand = model.operands.size(); | ||
356 | for (size_t output = 0; output < model.operations[operation].outputs.size(); ++output) { | ||
357 | const std::string message = "mutateOperationOutputOperandIndexTest: operation " + | ||
358 | std::to_string(operation) + " output " + | ||
359 | std::to_string(output); | ||
360 | validate(device, message, model, [operation, output, invalidOperand](Model* model) { | ||
361 | model->operations[operation].outputs[output] = invalidOperand; | ||
362 | }); | ||
363 | } | ||
364 | } | ||
365 | } | ||
366 | |||
367 | ///////////////////////// REMOVE OPERAND FROM EVERYTHING ///////////////////////// | ||
368 | |||
369 | static void removeValueAndDecrementGreaterValues(hidl_vec<uint32_t>* vec, uint32_t value) { | ||
370 | if (vec) { | ||
371 | // remove elements matching "value" | ||
372 | auto last = std::remove(vec->begin(), vec->end(), value); | ||
373 | vec->resize(std::distance(vec->begin(), last)); | ||
374 | |||
375 | // decrement elements exceeding "value" | ||
376 | std::transform(vec->begin(), vec->end(), vec->begin(), | ||
377 | [value](uint32_t v) { return v > value ? v-- : v; }); | ||
378 | } | ||
379 | } | ||
380 | |||
381 | static void removeOperand(Model* model, uint32_t index) { | ||
382 | hidl_vec_removeAt(&model->operands, index); | ||
383 | for (Operation& operation : model->operations) { | ||
384 | removeValueAndDecrementGreaterValues(&operation.inputs, index); | ||
385 | removeValueAndDecrementGreaterValues(&operation.outputs, index); | ||
386 | } | ||
387 | removeValueAndDecrementGreaterValues(&model->inputIndexes, index); | ||
388 | removeValueAndDecrementGreaterValues(&model->outputIndexes, index); | ||
389 | } | ||
390 | |||
391 | static void removeOperandTest(const sp<IDevice>& device, const V1_1::Model& model) { | ||
392 | for (size_t operand = 0; operand < model.operands.size(); ++operand) { | ||
393 | const std::string message = "removeOperandTest: operand " + std::to_string(operand); | ||
394 | validate(device, message, model, | ||
395 | [operand](Model* model) { removeOperand(model, operand); }); | ||
396 | } | ||
397 | } | ||
398 | |||
399 | ///////////////////////// REMOVE OPERATION ///////////////////////// | ||
400 | |||
401 | static void removeOperation(Model* model, uint32_t index) { | ||
402 | for (uint32_t operand : model->operations[index].inputs) { | ||
403 | model->operands[operand].numberOfConsumers--; | ||
404 | } | ||
405 | hidl_vec_removeAt(&model->operations, index); | ||
406 | } | ||
407 | |||
408 | static void removeOperationTest(const sp<IDevice>& device, const V1_1::Model& model) { | ||
409 | for (size_t operation = 0; operation < model.operations.size(); ++operation) { | ||
410 | const std::string message = "removeOperationTest: operation " + std::to_string(operation); | ||
411 | validate(device, message, model, | ||
412 | [operation](Model* model) { removeOperation(model, operation); }); | ||
413 | } | ||
414 | } | ||
415 | |||
416 | ///////////////////////// REMOVE OPERATION INPUT ///////////////////////// | ||
417 | |||
418 | static void removeOperationInputTest(const sp<IDevice>& device, const V1_1::Model& model) { | ||
419 | for (size_t operation = 0; operation < model.operations.size(); ++operation) { | ||
420 | for (size_t input = 0; input < model.operations[operation].inputs.size(); ++input) { | ||
421 | const V1_1::Operation& op = model.operations[operation]; | ||
422 | // CONCATENATION has at least 2 inputs, with the last element being | ||
423 | // INT32. Skip this test if removing one of CONCATENATION's | ||
424 | // inputs still produces a valid model. | ||
425 | if (op.type == V1_1::OperationType::CONCATENATION && op.inputs.size() > 2 && | ||
426 | input != op.inputs.size() - 1) { | ||
427 | continue; | ||
428 | } | ||
429 | const std::string message = "removeOperationInputTest: operation " + | ||
430 | std::to_string(operation) + ", input " + | ||
431 | std::to_string(input); | ||
432 | validate(device, message, model, [operation, input](Model* model) { | ||
433 | uint32_t operand = model->operations[operation].inputs[input]; | ||
434 | model->operands[operand].numberOfConsumers--; | ||
435 | hidl_vec_removeAt(&model->operations[operation].inputs, input); | ||
436 | }); | ||
437 | } | ||
438 | } | ||
439 | } | ||
440 | |||
441 | ///////////////////////// REMOVE OPERATION OUTPUT ///////////////////////// | ||
442 | |||
443 | static void removeOperationOutputTest(const sp<IDevice>& device, const V1_1::Model& model) { | ||
444 | for (size_t operation = 0; operation < model.operations.size(); ++operation) { | ||
445 | for (size_t output = 0; output < model.operations[operation].outputs.size(); ++output) { | ||
446 | const std::string message = "removeOperationOutputTest: operation " + | ||
447 | std::to_string(operation) + ", output " + | ||
448 | std::to_string(output); | ||
449 | validate(device, message, model, [operation, output](Model* model) { | ||
450 | hidl_vec_removeAt(&model->operations[operation].outputs, output); | ||
451 | }); | ||
452 | } | ||
453 | } | ||
454 | } | ||
455 | |||
456 | ///////////////////////// MODEL VALIDATION ///////////////////////// | ||
457 | |||
458 | // TODO: remove model input | ||
459 | // TODO: remove model output | ||
460 | // TODO: add unused operation | ||
461 | |||
462 | ///////////////////////// ADD OPERATION INPUT ///////////////////////// | ||
463 | |||
464 | static void addOperationInputTest(const sp<IDevice>& device, const V1_1::Model& model) { | ||
465 | for (size_t operation = 0; operation < model.operations.size(); ++operation) { | ||
466 | const std::string message = "addOperationInputTest: operation " + std::to_string(operation); | ||
467 | validate(device, message, model, [operation](Model* model) { | ||
468 | uint32_t index = addOperand(model, OperandLifeTime::MODEL_INPUT); | ||
469 | hidl_vec_push_back(&model->operations[operation].inputs, index); | ||
470 | hidl_vec_push_back(&model->inputIndexes, index); | ||
471 | }); | ||
472 | } | ||
473 | } | ||
474 | |||
475 | ///////////////////////// ADD OPERATION OUTPUT ///////////////////////// | ||
476 | |||
477 | static void addOperationOutputTest(const sp<IDevice>& device, const V1_1::Model& model) { | ||
478 | for (size_t operation = 0; operation < model.operations.size(); ++operation) { | ||
479 | const std::string message = | ||
480 | "addOperationOutputTest: operation " + std::to_string(operation); | ||
481 | validate(device, message, model, [operation](Model* model) { | ||
482 | uint32_t index = addOperand(model, OperandLifeTime::MODEL_OUTPUT); | ||
483 | hidl_vec_push_back(&model->operations[operation].outputs, index); | ||
484 | hidl_vec_push_back(&model->outputIndexes, index); | ||
485 | }); | ||
486 | } | ||
487 | } | ||
488 | |||
489 | ////////////////////////// ENTRY POINT ////////////////////////////// | ||
490 | |||
491 | void ValidationTest::validateModel(const V1_1::Model& model) { | ||
492 | mutateOperandTypeTest(device, model); | ||
493 | mutateOperandRankTest(device, model); | ||
494 | mutateOperandScaleTest(device, model); | ||
495 | mutateOperandZeroPointTest(device, model); | ||
496 | mutateOperationOperandTypeTest(device, model); | ||
497 | mutateOperationTypeTest(device, model); | ||
498 | mutateOperationInputOperandIndexTest(device, model); | ||
499 | mutateOperationOutputOperandIndexTest(device, model); | ||
500 | removeOperandTest(device, model); | ||
501 | removeOperationTest(device, model); | ||
502 | removeOperationInputTest(device, model); | ||
503 | removeOperationOutputTest(device, model); | ||
504 | addOperationInputTest(device, model); | ||
505 | addOperationOutputTest(device, model); | ||
506 | } | ||
507 | |||
508 | } // namespace functional | ||
509 | } // namespace vts | ||
510 | } // namespace V1_1 | ||
511 | } // namespace neuralnetworks | ||
512 | } // namespace hardware | ||
513 | } // namespace android | ||
diff --git a/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp new file mode 100644 index 00000000..bd966144 --- /dev/null +++ b/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp | |||
@@ -0,0 +1,262 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2018 The Android Open Source Project | ||
3 | * | ||
4 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | * you may not use this file except in compliance with the License. | ||
6 | * You may obtain a copy of the License at | ||
7 | * | ||
8 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | * | ||
10 | * Unless required by applicable law or agreed to in writing, software | ||
11 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | * See the License for the specific language governing permissions and | ||
14 | * limitations under the License. | ||
15 | */ | ||
16 | |||
17 | #define LOG_TAG "neuralnetworks_hidl_hal_test" | ||
18 | |||
19 | #include "VtsHalNeuralnetworks.h" | ||
20 | |||
21 | #include "Callbacks.h" | ||
22 | #include "TestHarness.h" | ||
23 | #include "Utils.h" | ||
24 | |||
25 | #include <android-base/logging.h> | ||
26 | #include <android/hidl/memory/1.0/IMemory.h> | ||
27 | #include <hidlmemory/mapping.h> | ||
28 | |||
29 | namespace android { | ||
30 | namespace hardware { | ||
31 | namespace neuralnetworks { | ||
32 | namespace V1_1 { | ||
33 | namespace vts { | ||
34 | namespace functional { | ||
35 | |||
36 | using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback; | ||
37 | using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback; | ||
38 | using ::android::hidl::memory::V1_0::IMemory; | ||
39 | using generated_tests::MixedTyped; | ||
40 | using generated_tests::MixedTypedExampleType; | ||
41 | using generated_tests::for_all; | ||
42 | |||
43 | ///////////////////////// UTILITY FUNCTIONS ///////////////////////// | ||
44 | |||
45 | static void createPreparedModel(const sp<IDevice>& device, const V1_1::Model& model, | ||
46 | sp<IPreparedModel>* preparedModel) { | ||
47 | ASSERT_NE(nullptr, preparedModel); | ||
48 | |||
49 | // see if service can handle model | ||
50 | bool fullySupportsModel = false; | ||
51 | Return<void> supportedOpsLaunchStatus = device->getSupportedOperations_1_1( | ||
52 | model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) { | ||
53 | ASSERT_EQ(ErrorStatus::NONE, status); | ||
54 | ASSERT_NE(0ul, supported.size()); | ||
55 | fullySupportsModel = | ||
56 | std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; }); | ||
57 | }); | ||
58 | ASSERT_TRUE(supportedOpsLaunchStatus.isOk()); | ||
59 | |||
60 | // launch prepare model | ||
61 | sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback(); | ||
62 | ASSERT_NE(nullptr, preparedModelCallback.get()); | ||
63 | Return<ErrorStatus> prepareLaunchStatus = | ||
64 | device->prepareModel_1_1(model, preparedModelCallback); | ||
65 | ASSERT_TRUE(prepareLaunchStatus.isOk()); | ||
66 | ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus)); | ||
67 | |||
68 | // retrieve prepared model | ||
69 | preparedModelCallback->wait(); | ||
70 | ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); | ||
71 | *preparedModel = preparedModelCallback->getPreparedModel(); | ||
72 | |||
73 | // The getSupportedOperations_1_1 call returns a list of operations that are | ||
74 | // guaranteed not to fail if prepareModel_1_1 is called, and | ||
75 | // 'fullySupportsModel' is true i.f.f. the entire model is guaranteed. | ||
76 | // If a driver has any doubt that it can prepare an operation, it must | ||
77 | // return false. So here, if a driver isn't sure if it can support an | ||
78 | // operation, but reports that it successfully prepared the model, the test | ||
79 | // can continue. | ||
80 | if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) { | ||
81 | ASSERT_EQ(nullptr, preparedModel->get()); | ||
82 | LOG(INFO) << "NN VTS: Unable to test Request validation because vendor service cannot " | ||
83 | "prepare model that it does not support."; | ||
84 | std::cout << "[ ] Unable to test Request validation because vendor service " | ||
85 | "cannot prepare model that it does not support." | ||
86 | << std::endl; | ||
87 | return; | ||
88 | } | ||
89 | ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus); | ||
90 | ASSERT_NE(nullptr, preparedModel->get()); | ||
91 | } | ||
92 | |||
93 | // Primary validation function. This function will take a valid request, apply a | ||
94 | // mutation to it to invalidate the request, then pass it to interface calls | ||
95 | // that use the request. Note that the request here is passed by value, and any | ||
96 | // mutation to the request does not leave this function. | ||
97 | static void validate(const sp<IPreparedModel>& preparedModel, const std::string& message, | ||
98 | Request request, const std::function<void(Request*)>& mutation) { | ||
99 | mutation(&request); | ||
100 | SCOPED_TRACE(message + " [execute]"); | ||
101 | |||
102 | sp<ExecutionCallback> executionCallback = new ExecutionCallback(); | ||
103 | ASSERT_NE(nullptr, executionCallback.get()); | ||
104 | Return<ErrorStatus> executeLaunchStatus = preparedModel->execute(request, executionCallback); | ||
105 | ASSERT_TRUE(executeLaunchStatus.isOk()); | ||
106 | ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeLaunchStatus)); | ||
107 | |||
108 | executionCallback->wait(); | ||
109 | ErrorStatus executionReturnStatus = executionCallback->getStatus(); | ||
110 | ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus); | ||
111 | } | ||
112 | |||
113 | // Delete element from hidl_vec. hidl_vec doesn't support a "remove" operation, | ||
114 | // so this is efficiently accomplished by moving the element to the end and | ||
115 | // resizing the hidl_vec to one less. | ||
116 | template <typename Type> | ||
117 | static void hidl_vec_removeAt(hidl_vec<Type>* vec, uint32_t index) { | ||
118 | if (vec) { | ||
119 | std::rotate(vec->begin() + index, vec->begin() + index + 1, vec->end()); | ||
120 | vec->resize(vec->size() - 1); | ||
121 | } | ||
122 | } | ||
123 | |||
124 | template <typename Type> | ||
125 | static uint32_t hidl_vec_push_back(hidl_vec<Type>* vec, const Type& value) { | ||
126 | // assume vec is valid | ||
127 | const uint32_t index = vec->size(); | ||
128 | vec->resize(index + 1); | ||
129 | (*vec)[index] = value; | ||
130 | return index; | ||
131 | } | ||
132 | |||
133 | ///////////////////////// REMOVE INPUT //////////////////////////////////// | ||
134 | |||
135 | static void removeInputTest(const sp<IPreparedModel>& preparedModel, const Request& request) { | ||
136 | for (size_t input = 0; input < request.inputs.size(); ++input) { | ||
137 | const std::string message = "removeInput: removed input " + std::to_string(input); | ||
138 | validate(preparedModel, message, request, | ||
139 | [input](Request* request) { hidl_vec_removeAt(&request->inputs, input); }); | ||
140 | } | ||
141 | } | ||
142 | |||
143 | ///////////////////////// REMOVE OUTPUT //////////////////////////////////// | ||
144 | |||
145 | static void removeOutputTest(const sp<IPreparedModel>& preparedModel, const Request& request) { | ||
146 | for (size_t output = 0; output < request.outputs.size(); ++output) { | ||
147 | const std::string message = "removeOutput: removed Output " + std::to_string(output); | ||
148 | validate(preparedModel, message, request, | ||
149 | [output](Request* request) { hidl_vec_removeAt(&request->outputs, output); }); | ||
150 | } | ||
151 | } | ||
152 | |||
153 | ///////////////////////////// ENTRY POINT ////////////////////////////////// | ||
154 | |||
155 | std::vector<Request> createRequests(const std::vector<MixedTypedExampleType>& examples) { | ||
156 | const uint32_t INPUT = 0; | ||
157 | const uint32_t OUTPUT = 1; | ||
158 | |||
159 | std::vector<Request> requests; | ||
160 | |||
161 | for (auto& example : examples) { | ||
162 | const MixedTyped& inputs = example.first; | ||
163 | const MixedTyped& outputs = example.second; | ||
164 | |||
165 | std::vector<RequestArgument> inputs_info, outputs_info; | ||
166 | uint32_t inputSize = 0, outputSize = 0; | ||
167 | |||
168 | // This function only partially specifies the metadata (vector of RequestArguments). | ||
169 | // The contents are copied over below. | ||
170 | for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) { | ||
171 | if (inputs_info.size() <= static_cast<size_t>(index)) inputs_info.resize(index + 1); | ||
172 | RequestArgument arg = { | ||
173 | .location = {.poolIndex = INPUT, .offset = 0, .length = static_cast<uint32_t>(s)}, | ||
174 | .dimensions = {}, | ||
175 | }; | ||
176 | RequestArgument arg_empty = { | ||
177 | .hasNoValue = true, | ||
178 | }; | ||
179 | inputs_info[index] = s ? arg : arg_empty; | ||
180 | inputSize += s; | ||
181 | }); | ||
182 | // Compute offset for inputs 1 and so on | ||
183 | { | ||
184 | size_t offset = 0; | ||
185 | for (auto& i : inputs_info) { | ||
186 | if (!i.hasNoValue) i.location.offset = offset; | ||
187 | offset += i.location.length; | ||
188 | } | ||
189 | } | ||
190 | |||
191 | // Go through all outputs, initialize RequestArgument descriptors | ||
192 | for_all(outputs, [&outputs_info, &outputSize](int index, auto, auto s) { | ||
193 | if (outputs_info.size() <= static_cast<size_t>(index)) outputs_info.resize(index + 1); | ||
194 | RequestArgument arg = { | ||
195 | .location = {.poolIndex = OUTPUT, .offset = 0, .length = static_cast<uint32_t>(s)}, | ||
196 | .dimensions = {}, | ||
197 | }; | ||
198 | outputs_info[index] = arg; | ||
199 | outputSize += s; | ||
200 | }); | ||
201 | // Compute offset for outputs 1 and so on | ||
202 | { | ||
203 | size_t offset = 0; | ||
204 | for (auto& i : outputs_info) { | ||
205 | i.location.offset = offset; | ||
206 | offset += i.location.length; | ||
207 | } | ||
208 | } | ||
209 | std::vector<hidl_memory> pools = {nn::allocateSharedMemory(inputSize), | ||
210 | nn::allocateSharedMemory(outputSize)}; | ||
211 | if (pools[INPUT].size() == 0 || pools[OUTPUT].size() == 0) { | ||
212 | return {}; | ||
213 | } | ||
214 | |||
215 | // map pool | ||
216 | sp<IMemory> inputMemory = mapMemory(pools[INPUT]); | ||
217 | if (inputMemory == nullptr) { | ||
218 | return {}; | ||
219 | } | ||
220 | char* inputPtr = reinterpret_cast<char*>(static_cast<void*>(inputMemory->getPointer())); | ||
221 | if (inputPtr == nullptr) { | ||
222 | return {}; | ||
223 | } | ||
224 | |||
225 | // initialize pool | ||
226 | inputMemory->update(); | ||
227 | for_all(inputs, [&inputs_info, inputPtr](int index, auto p, auto s) { | ||
228 | char* begin = (char*)p; | ||
229 | char* end = begin + s; | ||
230 | // TODO: handle more than one input | ||
231 | std::copy(begin, end, inputPtr + inputs_info[index].location.offset); | ||
232 | }); | ||
233 | inputMemory->commit(); | ||
234 | |||
235 | requests.push_back({.inputs = inputs_info, .outputs = outputs_info, .pools = pools}); | ||
236 | } | ||
237 | |||
238 | return requests; | ||
239 | } | ||
240 | |||
241 | void ValidationTest::validateRequests(const V1_1::Model& model, | ||
242 | const std::vector<Request>& requests) { | ||
243 | // create IPreparedModel | ||
244 | sp<IPreparedModel> preparedModel; | ||
245 | ASSERT_NO_FATAL_FAILURE(createPreparedModel(device, model, &preparedModel)); | ||
246 | if (preparedModel == nullptr) { | ||
247 | return; | ||
248 | } | ||
249 | |||
250 | // validate each request | ||
251 | for (const Request& request : requests) { | ||
252 | removeInputTest(preparedModel, request); | ||
253 | removeOutputTest(preparedModel, request); | ||
254 | } | ||
255 | } | ||
256 | |||
257 | } // namespace functional | ||
258 | } // namespace vts | ||
259 | } // namespace V1_1 | ||
260 | } // namespace neuralnetworks | ||
261 | } // namespace hardware | ||
262 | } // namespace android | ||
diff --git a/neuralnetworks/1.1/vts/functional/ValidationTests.cpp b/neuralnetworks/1.1/vts/functional/ValidationTests.cpp new file mode 100644 index 00000000..1c35ba84 --- /dev/null +++ b/neuralnetworks/1.1/vts/functional/ValidationTests.cpp | |||
@@ -0,0 +1,50 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2018 The Android Open Source Project | ||
3 | * | ||
4 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | * you may not use this file except in compliance with the License. | ||
6 | * You may obtain a copy of the License at | ||
7 | * | ||
8 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | * | ||
10 | * Unless required by applicable law or agreed to in writing, software | ||
11 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | * See the License for the specific language governing permissions and | ||
14 | * limitations under the License. | ||
15 | */ | ||
16 | |||
17 | #define LOG_TAG "neuralnetworks_hidl_hal_test" | ||
18 | |||
19 | #include "Models.h" | ||
20 | #include "VtsHalNeuralnetworks.h" | ||
21 | |||
22 | namespace android { | ||
23 | namespace hardware { | ||
24 | namespace neuralnetworks { | ||
25 | namespace V1_1 { | ||
26 | namespace vts { | ||
27 | namespace functional { | ||
28 | |||
29 | // forward declarations | ||
30 | std::vector<Request> createRequests(const std::vector<MixedTypedExample>& examples); | ||
31 | |||
32 | // generate validation tests | ||
33 | #define VTS_CURRENT_TEST_CASE(TestName) \ | ||
34 | TEST_F(ValidationTest, TestName) { \ | ||
35 | const Model model = TestName::createTestModel(); \ | ||
36 | const std::vector<Request> requests = createRequests(TestName::examples); \ | ||
37 | validateModel(model); \ | ||
38 | validateRequests(model, requests); \ | ||
39 | } | ||
40 | |||
41 | FOR_EACH_TEST_MODEL(VTS_CURRENT_TEST_CASE) | ||
42 | |||
43 | #undef VTS_CURRENT_TEST_CASE | ||
44 | |||
45 | } // namespace functional | ||
46 | } // namespace vts | ||
47 | } // namespace V1_1 | ||
48 | } // namespace neuralnetworks | ||
49 | } // namespace hardware | ||
50 | } // namespace android | ||
diff --git a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1.cpp b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp index b1d3be78..62381e67 100644 --- a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1.cpp +++ b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp | |||
@@ -16,16 +16,7 @@ | |||
16 | 16 | ||
17 | #define LOG_TAG "neuralnetworks_hidl_hal_test" | 17 | #define LOG_TAG "neuralnetworks_hidl_hal_test" |
18 | 18 | ||
19 | #include "VtsHalNeuralnetworksV1_1.h" | 19 | #include "VtsHalNeuralnetworks.h" |
20 | #include "Utils.h" | ||
21 | |||
22 | #include <android-base/logging.h> | ||
23 | #include <hidlmemory/mapping.h> | ||
24 | |||
25 | using ::android::hardware::hidl_memory; | ||
26 | using ::android::hidl::allocator::V1_0::IAllocator; | ||
27 | using ::android::hidl::memory::V1_0::IMemory; | ||
28 | using ::android::sp; | ||
29 | 20 | ||
30 | namespace android { | 21 | namespace android { |
31 | namespace hardware { | 22 | namespace hardware { |
@@ -34,11 +25,6 @@ namespace V1_1 { | |||
34 | namespace vts { | 25 | namespace vts { |
35 | namespace functional { | 26 | namespace functional { |
36 | 27 | ||
37 | // allocator helper | ||
38 | hidl_memory allocateSharedMemory(int64_t size) { | ||
39 | return nn::allocateSharedMemory(size); | ||
40 | } | ||
41 | |||
42 | // A class for test environment setup | 28 | // A class for test environment setup |
43 | NeuralnetworksHidlEnvironment::NeuralnetworksHidlEnvironment() {} | 29 | NeuralnetworksHidlEnvironment::NeuralnetworksHidlEnvironment() {} |
44 | 30 | ||
@@ -52,23 +38,49 @@ NeuralnetworksHidlEnvironment* NeuralnetworksHidlEnvironment::getInstance() { | |||
52 | } | 38 | } |
53 | 39 | ||
54 | void NeuralnetworksHidlEnvironment::registerTestServices() { | 40 | void NeuralnetworksHidlEnvironment::registerTestServices() { |
55 | registerTestService<V1_1::IDevice>(); | 41 | registerTestService<IDevice>(); |
56 | } | 42 | } |
57 | 43 | ||
58 | // The main test class for NEURALNETWORK HIDL HAL. | 44 | // The main test class for NEURALNETWORK HIDL HAL. |
45 | NeuralnetworksHidlTest::NeuralnetworksHidlTest() {} | ||
46 | |||
59 | NeuralnetworksHidlTest::~NeuralnetworksHidlTest() {} | 47 | NeuralnetworksHidlTest::~NeuralnetworksHidlTest() {} |
60 | 48 | ||
61 | void NeuralnetworksHidlTest::SetUp() { | 49 | void NeuralnetworksHidlTest::SetUp() { |
62 | device = ::testing::VtsHalHidlTargetTestBase::getService<V1_1::IDevice>( | 50 | ::testing::VtsHalHidlTargetTestBase::SetUp(); |
51 | device = ::testing::VtsHalHidlTargetTestBase::getService<IDevice>( | ||
63 | NeuralnetworksHidlEnvironment::getInstance()); | 52 | NeuralnetworksHidlEnvironment::getInstance()); |
64 | ASSERT_NE(nullptr, device.get()); | 53 | ASSERT_NE(nullptr, device.get()); |
65 | } | 54 | } |
66 | 55 | ||
67 | void NeuralnetworksHidlTest::TearDown() {} | 56 | void NeuralnetworksHidlTest::TearDown() { |
57 | device = nullptr; | ||
58 | ::testing::VtsHalHidlTargetTestBase::TearDown(); | ||
59 | } | ||
68 | 60 | ||
69 | } // namespace functional | 61 | } // namespace functional |
70 | } // namespace vts | 62 | } // namespace vts |
63 | |||
64 | ::std::ostream& operator<<(::std::ostream& os, ErrorStatus errorStatus) { | ||
65 | return os << toString(errorStatus); | ||
66 | } | ||
67 | |||
68 | ::std::ostream& operator<<(::std::ostream& os, DeviceStatus deviceStatus) { | ||
69 | return os << toString(deviceStatus); | ||
70 | } | ||
71 | |||
71 | } // namespace V1_1 | 72 | } // namespace V1_1 |
72 | } // namespace neuralnetworks | 73 | } // namespace neuralnetworks |
73 | } // namespace hardware | 74 | } // namespace hardware |
74 | } // namespace android | 75 | } // namespace android |
76 | |||
77 | using android::hardware::neuralnetworks::V1_1::vts::functional::NeuralnetworksHidlEnvironment; | ||
78 | |||
79 | int main(int argc, char** argv) { | ||
80 | ::testing::AddGlobalTestEnvironment(NeuralnetworksHidlEnvironment::getInstance()); | ||
81 | ::testing::InitGoogleTest(&argc, argv); | ||
82 | NeuralnetworksHidlEnvironment::getInstance()->init(&argc, argv); | ||
83 | |||
84 | int status = RUN_ALL_TESTS(); | ||
85 | return status; | ||
86 | } | ||
diff --git a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1.h b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.h index 426246ce..0050e52d 100644 --- a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1.h +++ b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.h | |||
@@ -17,65 +17,71 @@ | |||
17 | #ifndef VTS_HAL_NEURALNETWORKS_V1_1_H | 17 | #ifndef VTS_HAL_NEURALNETWORKS_V1_1_H |
18 | #define VTS_HAL_NEURALNETWORKS_V1_1_H | 18 | #define VTS_HAL_NEURALNETWORKS_V1_1_H |
19 | 19 | ||
20 | #include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h> | 20 | #include <android/hardware/neuralnetworks/1.0/types.h> |
21 | #include <android/hardware/neuralnetworks/1.0/IPreparedModel.h> | ||
22 | #include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h> | ||
23 | #include <android/hardware/neuralnetworks/1.1/IDevice.h> | 21 | #include <android/hardware/neuralnetworks/1.1/IDevice.h> |
24 | #include <android/hardware/neuralnetworks/1.1/types.h> | 22 | #include <android/hardware/neuralnetworks/1.1/types.h> |
25 | #include <android/hidl/allocator/1.0/IAllocator.h> | ||
26 | 23 | ||
27 | #include <VtsHalHidlTargetTestBase.h> | 24 | #include <VtsHalHidlTargetTestBase.h> |
28 | #include <VtsHalHidlTargetTestEnvBase.h> | 25 | #include <VtsHalHidlTargetTestEnvBase.h> |
26 | |||
27 | #include <android-base/macros.h> | ||
29 | #include <gtest/gtest.h> | 28 | #include <gtest/gtest.h> |
30 | #include <string> | 29 | #include <iostream> |
30 | #include <vector> | ||
31 | 31 | ||
32 | namespace android { | 32 | namespace android { |
33 | namespace hardware { | 33 | namespace hardware { |
34 | namespace neuralnetworks { | 34 | namespace neuralnetworks { |
35 | namespace V1_1 { | 35 | namespace V1_1 { |
36 | |||
37 | using V1_0::Request; | ||
38 | using V1_0::DeviceStatus; | ||
39 | using V1_0::ErrorStatus; | ||
40 | |||
36 | namespace vts { | 41 | namespace vts { |
37 | namespace functional { | 42 | namespace functional { |
38 | hidl_memory allocateSharedMemory(int64_t size); | ||
39 | 43 | ||
40 | // A class for test environment setup | 44 | // A class for test environment setup |
41 | class NeuralnetworksHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase { | 45 | class NeuralnetworksHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase { |
46 | DISALLOW_COPY_AND_ASSIGN(NeuralnetworksHidlEnvironment); | ||
42 | NeuralnetworksHidlEnvironment(); | 47 | NeuralnetworksHidlEnvironment(); |
43 | NeuralnetworksHidlEnvironment(const NeuralnetworksHidlEnvironment&) = delete; | 48 | ~NeuralnetworksHidlEnvironment() override; |
44 | NeuralnetworksHidlEnvironment(NeuralnetworksHidlEnvironment&&) = delete; | ||
45 | NeuralnetworksHidlEnvironment& operator=(const NeuralnetworksHidlEnvironment&) = delete; | ||
46 | NeuralnetworksHidlEnvironment& operator=(NeuralnetworksHidlEnvironment&&) = delete; | ||
47 | 49 | ||
48 | public: | 50 | public: |
49 | ~NeuralnetworksHidlEnvironment() override; | ||
50 | static NeuralnetworksHidlEnvironment* getInstance(); | 51 | static NeuralnetworksHidlEnvironment* getInstance(); |
51 | void registerTestServices() override; | 52 | void registerTestServices() override; |
52 | }; | 53 | }; |
53 | 54 | ||
54 | // The main test class for NEURALNETWORKS HIDL HAL. | 55 | // The main test class for NEURALNETWORKS HIDL HAL. |
55 | class NeuralnetworksHidlTest : public ::testing::VtsHalHidlTargetTestBase { | 56 | class NeuralnetworksHidlTest : public ::testing::VtsHalHidlTargetTestBase { |
57 | DISALLOW_COPY_AND_ASSIGN(NeuralnetworksHidlTest); | ||
58 | |||
56 | public: | 59 | public: |
60 | NeuralnetworksHidlTest(); | ||
57 | ~NeuralnetworksHidlTest() override; | 61 | ~NeuralnetworksHidlTest() override; |
58 | void SetUp() override; | 62 | void SetUp() override; |
59 | void TearDown() override; | 63 | void TearDown() override; |
60 | 64 | ||
61 | sp<V1_1::IDevice> device; | 65 | protected: |
66 | sp<IDevice> device; | ||
62 | }; | 67 | }; |
68 | |||
69 | // Tag for the validation tests | ||
70 | class ValidationTest : public NeuralnetworksHidlTest { | ||
71 | protected: | ||
72 | void validateModel(const Model& model); | ||
73 | void validateRequests(const Model& model, const std::vector<Request>& request); | ||
74 | }; | ||
75 | |||
76 | // Tag for the generated tests | ||
77 | class GeneratedTest : public NeuralnetworksHidlTest {}; | ||
78 | |||
63 | } // namespace functional | 79 | } // namespace functional |
64 | } // namespace vts | 80 | } // namespace vts |
65 | 81 | ||
66 | // pretty-print values for error messages | 82 | // pretty-print values for error messages |
67 | 83 | ::std::ostream& operator<<(::std::ostream& os, ErrorStatus errorStatus); | |
68 | template <typename CharT, typename Traits> | 84 | ::std::ostream& operator<<(::std::ostream& os, DeviceStatus deviceStatus); |
69 | ::std::basic_ostream<CharT, Traits>& operator<<(::std::basic_ostream<CharT, Traits>& os, | ||
70 | V1_0::ErrorStatus errorStatus) { | ||
71 | return os << toString(errorStatus); | ||
72 | } | ||
73 | |||
74 | template <typename CharT, typename Traits> | ||
75 | ::std::basic_ostream<CharT, Traits>& operator<<(::std::basic_ostream<CharT, Traits>& os, | ||
76 | V1_0::DeviceStatus deviceStatus) { | ||
77 | return os << toString(deviceStatus); | ||
78 | } | ||
79 | 85 | ||
80 | } // namespace V1_1 | 86 | } // namespace V1_1 |
81 | } // namespace neuralnetworks | 87 | } // namespace neuralnetworks |
diff --git a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1BasicTest.cpp b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1BasicTest.cpp deleted file mode 100644 index 10591dcb..00000000 --- a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1BasicTest.cpp +++ /dev/null | |||
@@ -1,468 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2018 The Android Open Source Project | ||
3 | * | ||
4 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | * you may not use this file except in compliance with the License. | ||
6 | * You may obtain a copy of the License at | ||
7 | * | ||
8 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | * | ||
10 | * Unless required by applicable law or agreed to in writing, software | ||
11 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | * See the License for the specific language governing permissions and | ||
14 | * limitations under the License. | ||
15 | */ | ||
16 | |||
17 | #define LOG_TAG "neuralnetworks_hidl_hal_test" | ||
18 | |||
19 | #include "VtsHalNeuralnetworksV1_1.h" | ||
20 | |||
21 | #include "Callbacks.h" | ||
22 | #include "Models.h" | ||
23 | #include "TestHarness.h" | ||
24 | |||
25 | #include <android-base/logging.h> | ||
26 | #include <android/hardware/neuralnetworks/1.1/IDevice.h> | ||
27 | #include <android/hardware/neuralnetworks/1.1/types.h> | ||
28 | #include <android/hidl/memory/1.0/IMemory.h> | ||
29 | #include <hidlmemory/mapping.h> | ||
30 | |||
31 | using ::android::hardware::neuralnetworks::V1_0::IPreparedModel; | ||
32 | using ::android::hardware::neuralnetworks::V1_0::DeviceStatus; | ||
33 | using ::android::hardware::neuralnetworks::V1_0::ErrorStatus; | ||
34 | using ::android::hardware::neuralnetworks::V1_0::FusedActivationFunc; | ||
35 | using ::android::hardware::neuralnetworks::V1_0::Operand; | ||
36 | using ::android::hardware::neuralnetworks::V1_0::OperandLifeTime; | ||
37 | using ::android::hardware::neuralnetworks::V1_0::OperandType; | ||
38 | using ::android::hardware::neuralnetworks::V1_0::Request; | ||
39 | using ::android::hardware::neuralnetworks::V1_1::Capabilities; | ||
40 | using ::android::hardware::neuralnetworks::V1_1::IDevice; | ||
41 | using ::android::hardware::neuralnetworks::V1_1::Model; | ||
42 | using ::android::hardware::neuralnetworks::V1_1::Operation; | ||
43 | using ::android::hardware::neuralnetworks::V1_1::OperationType; | ||
44 | using ::android::hardware::Return; | ||
45 | using ::android::hardware::Void; | ||
46 | using ::android::hardware::hidl_memory; | ||
47 | using ::android::hardware::hidl_string; | ||
48 | using ::android::hardware::hidl_vec; | ||
49 | using ::android::hidl::allocator::V1_0::IAllocator; | ||
50 | using ::android::hidl::memory::V1_0::IMemory; | ||
51 | using ::android::sp; | ||
52 | |||
53 | namespace android { | ||
54 | namespace hardware { | ||
55 | namespace neuralnetworks { | ||
56 | namespace V1_1 { | ||
57 | namespace vts { | ||
58 | namespace functional { | ||
59 | using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback; | ||
60 | using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback; | ||
61 | |||
62 | static void doPrepareModelShortcut(const sp<IDevice>& device, sp<IPreparedModel>* preparedModel) { | ||
63 | ASSERT_NE(nullptr, preparedModel); | ||
64 | Model model = createValidTestModel_1_1(); | ||
65 | |||
66 | // see if service can handle model | ||
67 | bool fullySupportsModel = false; | ||
68 | Return<void> supportedOpsLaunchStatus = device->getSupportedOperations_1_1( | ||
69 | model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) { | ||
70 | ASSERT_EQ(ErrorStatus::NONE, status); | ||
71 | ASSERT_NE(0ul, supported.size()); | ||
72 | fullySupportsModel = | ||
73 | std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; }); | ||
74 | }); | ||
75 | ASSERT_TRUE(supportedOpsLaunchStatus.isOk()); | ||
76 | |||
77 | // launch prepare model | ||
78 | sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback(); | ||
79 | ASSERT_NE(nullptr, preparedModelCallback.get()); | ||
80 | Return<ErrorStatus> prepareLaunchStatus = | ||
81 | device->prepareModel_1_1(model, preparedModelCallback); | ||
82 | ASSERT_TRUE(prepareLaunchStatus.isOk()); | ||
83 | ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus)); | ||
84 | |||
85 | // retrieve prepared model | ||
86 | preparedModelCallback->wait(); | ||
87 | ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); | ||
88 | *preparedModel = preparedModelCallback->getPreparedModel(); | ||
89 | |||
90 | // The getSupportedOperations call returns a list of operations that are | ||
91 | // guaranteed not to fail if prepareModel is called, and | ||
92 | // 'fullySupportsModel' is true i.f.f. the entire model is guaranteed. | ||
93 | // If a driver has any doubt that it can prepare an operation, it must | ||
94 | // return false. So here, if a driver isn't sure if it can support an | ||
95 | // operation, but reports that it successfully prepared the model, the test | ||
96 | // can continue. | ||
97 | if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) { | ||
98 | ASSERT_EQ(nullptr, preparedModel->get()); | ||
99 | LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot " | ||
100 | "prepare model that it does not support."; | ||
101 | std::cout << "[ ] Early termination of test because vendor service cannot " | ||
102 | "prepare model that it does not support." | ||
103 | << std::endl; | ||
104 | return; | ||
105 | } | ||
106 | ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus); | ||
107 | ASSERT_NE(nullptr, preparedModel->get()); | ||
108 | } | ||
109 | |||
110 | // create device test | ||
111 | TEST_F(NeuralnetworksHidlTest, CreateDevice) {} | ||
112 | |||
113 | // status test | ||
114 | TEST_F(NeuralnetworksHidlTest, StatusTest) { | ||
115 | Return<DeviceStatus> status = device->getStatus(); | ||
116 | ASSERT_TRUE(status.isOk()); | ||
117 | EXPECT_EQ(DeviceStatus::AVAILABLE, static_cast<DeviceStatus>(status)); | ||
118 | } | ||
119 | |||
120 | // initialization | ||
121 | TEST_F(NeuralnetworksHidlTest, GetCapabilitiesTest) { | ||
122 | Return<void> ret = | ||
123 | device->getCapabilities_1_1([](ErrorStatus status, const Capabilities& capabilities) { | ||
124 | EXPECT_EQ(ErrorStatus::NONE, status); | ||
125 | EXPECT_LT(0.0f, capabilities.float32Performance.execTime); | ||
126 | EXPECT_LT(0.0f, capabilities.float32Performance.powerUsage); | ||
127 | EXPECT_LT(0.0f, capabilities.quantized8Performance.execTime); | ||
128 | EXPECT_LT(0.0f, capabilities.quantized8Performance.powerUsage); | ||
129 | EXPECT_LT(0.0f, capabilities.relaxedFloat32toFloat16Performance.execTime); | ||
130 | EXPECT_LT(0.0f, capabilities.relaxedFloat32toFloat16Performance.powerUsage); | ||
131 | }); | ||
132 | EXPECT_TRUE(ret.isOk()); | ||
133 | } | ||
134 | |||
135 | // supported operations positive test | ||
136 | TEST_F(NeuralnetworksHidlTest, SupportedOperationsPositiveTest) { | ||
137 | Model model = createValidTestModel_1_1(); | ||
138 | Return<void> ret = device->getSupportedOperations_1_1( | ||
139 | model, [&](ErrorStatus status, const hidl_vec<bool>& supported) { | ||
140 | EXPECT_EQ(ErrorStatus::NONE, status); | ||
141 | EXPECT_EQ(model.operations.size(), supported.size()); | ||
142 | }); | ||
143 | EXPECT_TRUE(ret.isOk()); | ||
144 | } | ||
145 | |||
146 | // supported operations negative test 1 | ||
147 | TEST_F(NeuralnetworksHidlTest, SupportedOperationsNegativeTest1) { | ||
148 | Model model = createInvalidTestModel1_1_1(); | ||
149 | Return<void> ret = device->getSupportedOperations_1_1( | ||
150 | model, [&](ErrorStatus status, const hidl_vec<bool>& supported) { | ||
151 | EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status); | ||
152 | (void)supported; | ||
153 | }); | ||
154 | EXPECT_TRUE(ret.isOk()); | ||
155 | } | ||
156 | |||
157 | // supported operations negative test 2 | ||
158 | TEST_F(NeuralnetworksHidlTest, SupportedOperationsNegativeTest2) { | ||
159 | Model model = createInvalidTestModel2_1_1(); | ||
160 | Return<void> ret = device->getSupportedOperations_1_1( | ||
161 | model, [&](ErrorStatus status, const hidl_vec<bool>& supported) { | ||
162 | EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status); | ||
163 | (void)supported; | ||
164 | }); | ||
165 | EXPECT_TRUE(ret.isOk()); | ||
166 | } | ||
167 | |||
168 | // prepare simple model positive test | ||
169 | TEST_F(NeuralnetworksHidlTest, SimplePrepareModelPositiveTest) { | ||
170 | sp<IPreparedModel> preparedModel; | ||
171 | doPrepareModelShortcut(device, &preparedModel); | ||
172 | } | ||
173 | |||
174 | // prepare simple model negative test 1 | ||
175 | TEST_F(NeuralnetworksHidlTest, SimplePrepareModelNegativeTest1) { | ||
176 | Model model = createInvalidTestModel1_1_1(); | ||
177 | sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback(); | ||
178 | ASSERT_NE(nullptr, preparedModelCallback.get()); | ||
179 | Return<ErrorStatus> prepareLaunchStatus = | ||
180 | device->prepareModel_1_1(model, preparedModelCallback); | ||
181 | ASSERT_TRUE(prepareLaunchStatus.isOk()); | ||
182 | EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(prepareLaunchStatus)); | ||
183 | |||
184 | preparedModelCallback->wait(); | ||
185 | ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); | ||
186 | EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, prepareReturnStatus); | ||
187 | sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel(); | ||
188 | EXPECT_EQ(nullptr, preparedModel.get()); | ||
189 | } | ||
190 | |||
191 | // prepare simple model negative test 2 | ||
192 | TEST_F(NeuralnetworksHidlTest, SimplePrepareModelNegativeTest2) { | ||
193 | Model model = createInvalidTestModel2_1_1(); | ||
194 | sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback(); | ||
195 | ASSERT_NE(nullptr, preparedModelCallback.get()); | ||
196 | Return<ErrorStatus> prepareLaunchStatus = | ||
197 | device->prepareModel_1_1(model, preparedModelCallback); | ||
198 | ASSERT_TRUE(prepareLaunchStatus.isOk()); | ||
199 | EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(prepareLaunchStatus)); | ||
200 | |||
201 | preparedModelCallback->wait(); | ||
202 | ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); | ||
203 | EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, prepareReturnStatus); | ||
204 | sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel(); | ||
205 | EXPECT_EQ(nullptr, preparedModel.get()); | ||
206 | } | ||
207 | |||
208 | // execute simple graph positive test | ||
209 | TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphPositiveTest) { | ||
210 | std::vector<float> outputData = {-1.0f, -1.0f, -1.0f, -1.0f}; | ||
211 | std::vector<float> expectedData = {6.0f, 8.0f, 10.0f, 12.0f}; | ||
212 | const uint32_t OUTPUT = 1; | ||
213 | |||
214 | sp<IPreparedModel> preparedModel; | ||
215 | ASSERT_NO_FATAL_FAILURE(doPrepareModelShortcut(device, &preparedModel)); | ||
216 | if (preparedModel == nullptr) { | ||
217 | return; | ||
218 | } | ||
219 | Request request = createValidTestRequest(); | ||
220 | |||
221 | auto postWork = [&] { | ||
222 | sp<IMemory> outputMemory = mapMemory(request.pools[OUTPUT]); | ||
223 | if (outputMemory == nullptr) { | ||
224 | return false; | ||
225 | } | ||
226 | float* outputPtr = reinterpret_cast<float*>(static_cast<void*>(outputMemory->getPointer())); | ||
227 | if (outputPtr == nullptr) { | ||
228 | return false; | ||
229 | } | ||
230 | outputMemory->read(); | ||
231 | std::copy(outputPtr, outputPtr + outputData.size(), outputData.begin()); | ||
232 | outputMemory->commit(); | ||
233 | return true; | ||
234 | }; | ||
235 | |||
236 | sp<ExecutionCallback> executionCallback = new ExecutionCallback(); | ||
237 | ASSERT_NE(nullptr, executionCallback.get()); | ||
238 | executionCallback->on_finish(postWork); | ||
239 | Return<ErrorStatus> executeLaunchStatus = preparedModel->execute(request, executionCallback); | ||
240 | ASSERT_TRUE(executeLaunchStatus.isOk()); | ||
241 | EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executeLaunchStatus)); | ||
242 | |||
243 | executionCallback->wait(); | ||
244 | ErrorStatus executionReturnStatus = executionCallback->getStatus(); | ||
245 | EXPECT_EQ(ErrorStatus::NONE, executionReturnStatus); | ||
246 | EXPECT_EQ(expectedData, outputData); | ||
247 | } | ||
248 | |||
249 | // execute simple graph negative test 1 | ||
250 | TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest1) { | ||
251 | sp<IPreparedModel> preparedModel; | ||
252 | ASSERT_NO_FATAL_FAILURE(doPrepareModelShortcut(device, &preparedModel)); | ||
253 | if (preparedModel == nullptr) { | ||
254 | return; | ||
255 | } | ||
256 | Request request = createInvalidTestRequest1(); | ||
257 | |||
258 | sp<ExecutionCallback> executionCallback = new ExecutionCallback(); | ||
259 | ASSERT_NE(nullptr, executionCallback.get()); | ||
260 | Return<ErrorStatus> executeLaunchStatus = preparedModel->execute(request, executionCallback); | ||
261 | ASSERT_TRUE(executeLaunchStatus.isOk()); | ||
262 | EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeLaunchStatus)); | ||
263 | |||
264 | executionCallback->wait(); | ||
265 | ErrorStatus executionReturnStatus = executionCallback->getStatus(); | ||
266 | EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus); | ||
267 | } | ||
268 | |||
269 | // execute simple graph negative test 2 | ||
270 | TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest2) { | ||
271 | sp<IPreparedModel> preparedModel; | ||
272 | ASSERT_NO_FATAL_FAILURE(doPrepareModelShortcut(device, &preparedModel)); | ||
273 | if (preparedModel == nullptr) { | ||
274 | return; | ||
275 | } | ||
276 | Request request = createInvalidTestRequest2(); | ||
277 | |||
278 | sp<ExecutionCallback> executionCallback = new ExecutionCallback(); | ||
279 | ASSERT_NE(nullptr, executionCallback.get()); | ||
280 | Return<ErrorStatus> executeLaunchStatus = preparedModel->execute(request, executionCallback); | ||
281 | ASSERT_TRUE(executeLaunchStatus.isOk()); | ||
282 | EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeLaunchStatus)); | ||
283 | |||
284 | executionCallback->wait(); | ||
285 | ErrorStatus executionReturnStatus = executionCallback->getStatus(); | ||
286 | EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus); | ||
287 | } | ||
288 | |||
289 | class NeuralnetworksInputsOutputsTest | ||
290 | : public NeuralnetworksHidlTest, | ||
291 | public ::testing::WithParamInterface<std::tuple<bool, bool>> { | ||
292 | protected: | ||
293 | virtual void SetUp() { NeuralnetworksHidlTest::SetUp(); } | ||
294 | virtual void TearDown() { NeuralnetworksHidlTest::TearDown(); } | ||
295 | V1_1::Model createModel(const std::vector<uint32_t>& inputs, | ||
296 | const std::vector<uint32_t>& outputs) { | ||
297 | // We set up the operands as floating-point with no designated | ||
298 | // model inputs and outputs, and then patch type and lifetime | ||
299 | // later on in this function. | ||
300 | |||
301 | std::vector<Operand> operands = { | ||
302 | { | ||
303 | .type = OperandType::TENSOR_FLOAT32, | ||
304 | .dimensions = {1}, | ||
305 | .numberOfConsumers = 1, | ||
306 | .scale = 0.0f, | ||
307 | .zeroPoint = 0, | ||
308 | .lifetime = OperandLifeTime::TEMPORARY_VARIABLE, | ||
309 | .location = {.poolIndex = 0, .offset = 0, .length = 0}, | ||
310 | }, | ||
311 | { | ||
312 | .type = OperandType::TENSOR_FLOAT32, | ||
313 | .dimensions = {1}, | ||
314 | .numberOfConsumers = 1, | ||
315 | .scale = 0.0f, | ||
316 | .zeroPoint = 0, | ||
317 | .lifetime = OperandLifeTime::TEMPORARY_VARIABLE, | ||
318 | .location = {.poolIndex = 0, .offset = 0, .length = 0}, | ||
319 | }, | ||
320 | { | ||
321 | .type = OperandType::INT32, | ||
322 | .dimensions = {}, | ||
323 | .numberOfConsumers = 1, | ||
324 | .scale = 0.0f, | ||
325 | .zeroPoint = 0, | ||
326 | .lifetime = OperandLifeTime::CONSTANT_COPY, | ||
327 | .location = {.poolIndex = 0, .offset = 0, .length = sizeof(int32_t)}, | ||
328 | }, | ||
329 | { | ||
330 | .type = OperandType::TENSOR_FLOAT32, | ||
331 | .dimensions = {1}, | ||
332 | .numberOfConsumers = 0, | ||
333 | .scale = 0.0f, | ||
334 | .zeroPoint = 0, | ||
335 | .lifetime = OperandLifeTime::TEMPORARY_VARIABLE, | ||
336 | .location = {.poolIndex = 0, .offset = 0, .length = 0}, | ||
337 | }, | ||
338 | }; | ||
339 | |||
340 | const std::vector<Operation> operations = {{ | ||
341 | .type = OperationType::ADD, .inputs = {0, 1, 2}, .outputs = {3}, | ||
342 | }}; | ||
343 | |||
344 | std::vector<uint8_t> operandValues; | ||
345 | int32_t activation[1] = {static_cast<int32_t>(FusedActivationFunc::NONE)}; | ||
346 | operandValues.insert(operandValues.end(), reinterpret_cast<const uint8_t*>(&activation[0]), | ||
347 | reinterpret_cast<const uint8_t*>(&activation[1])); | ||
348 | |||
349 | if (kQuantized) { | ||
350 | for (auto& operand : operands) { | ||
351 | if (operand.type == OperandType::TENSOR_FLOAT32) { | ||
352 | operand.type = OperandType::TENSOR_QUANT8_ASYMM; | ||
353 | operand.scale = 1.0f; | ||
354 | operand.zeroPoint = 0; | ||
355 | } | ||
356 | } | ||
357 | } | ||
358 | |||
359 | auto patchLifetime = [&operands](const std::vector<uint32_t>& operandIndexes, | ||
360 | OperandLifeTime lifetime) { | ||
361 | for (uint32_t index : operandIndexes) { | ||
362 | operands[index].lifetime = lifetime; | ||
363 | } | ||
364 | }; | ||
365 | if (kInputHasPrecedence) { | ||
366 | patchLifetime(outputs, OperandLifeTime::MODEL_OUTPUT); | ||
367 | patchLifetime(inputs, OperandLifeTime::MODEL_INPUT); | ||
368 | } else { | ||
369 | patchLifetime(inputs, OperandLifeTime::MODEL_INPUT); | ||
370 | patchLifetime(outputs, OperandLifeTime::MODEL_OUTPUT); | ||
371 | } | ||
372 | |||
373 | return { | ||
374 | .operands = operands, | ||
375 | .operations = operations, | ||
376 | .inputIndexes = inputs, | ||
377 | .outputIndexes = outputs, | ||
378 | .operandValues = operandValues, | ||
379 | .pools = {}, | ||
380 | }; | ||
381 | } | ||
382 | void check(const std::string& name, | ||
383 | bool expectation, // true = success | ||
384 | const std::vector<uint32_t>& inputs, const std::vector<uint32_t>& outputs) { | ||
385 | SCOPED_TRACE(name + " (HAL calls should " + (expectation ? "succeed" : "fail") + ", " + | ||
386 | (kInputHasPrecedence ? "input" : "output") + " precedence, " + | ||
387 | (kQuantized ? "quantized" : "float")); | ||
388 | |||
389 | V1_1::Model model = createModel(inputs, outputs); | ||
390 | |||
391 | // ensure that getSupportedOperations_1_1() checks model validity | ||
392 | ErrorStatus supportedOpsErrorStatus = ErrorStatus::GENERAL_FAILURE; | ||
393 | Return<void> supportedOpsReturn = device->getSupportedOperations_1_1( | ||
394 | model, [&model, &supportedOpsErrorStatus](ErrorStatus status, | ||
395 | const hidl_vec<bool>& supported) { | ||
396 | supportedOpsErrorStatus = status; | ||
397 | if (status == ErrorStatus::NONE) { | ||
398 | ASSERT_EQ(supported.size(), model.operations.size()); | ||
399 | } | ||
400 | }); | ||
401 | ASSERT_TRUE(supportedOpsReturn.isOk()); | ||
402 | ASSERT_EQ(supportedOpsErrorStatus, | ||
403 | (expectation ? ErrorStatus::NONE : ErrorStatus::INVALID_ARGUMENT)); | ||
404 | |||
405 | // ensure that prepareModel_1_1() checks model validity | ||
406 | sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback; | ||
407 | ASSERT_NE(preparedModelCallback.get(), nullptr); | ||
408 | Return<ErrorStatus> prepareLaunchReturn = | ||
409 | device->prepareModel_1_1(model, preparedModelCallback); | ||
410 | ASSERT_TRUE(prepareLaunchReturn.isOk()); | ||
411 | ASSERT_TRUE(prepareLaunchReturn == ErrorStatus::NONE || | ||
412 | prepareLaunchReturn == ErrorStatus::INVALID_ARGUMENT); | ||
413 | bool preparationOk = (prepareLaunchReturn == ErrorStatus::NONE); | ||
414 | if (preparationOk) { | ||
415 | preparedModelCallback->wait(); | ||
416 | preparationOk = (preparedModelCallback->getStatus() == ErrorStatus::NONE); | ||
417 | } | ||
418 | |||
419 | if (preparationOk) { | ||
420 | ASSERT_TRUE(expectation); | ||
421 | } else { | ||
422 | // Preparation can fail for reasons other than an invalid model -- | ||
423 | // for example, perhaps not all operations are supported, or perhaps | ||
424 | // the device hit some kind of capacity limit. | ||
425 | bool invalid = prepareLaunchReturn == ErrorStatus::INVALID_ARGUMENT || | ||
426 | preparedModelCallback->getStatus() == ErrorStatus::INVALID_ARGUMENT; | ||
427 | ASSERT_NE(expectation, invalid); | ||
428 | } | ||
429 | } | ||
430 | |||
431 | // Indicates whether an operand that appears in both the inputs | ||
432 | // and outputs vector should have lifetime appropriate for input | ||
433 | // rather than for output. | ||
434 | const bool kInputHasPrecedence = std::get<0>(GetParam()); | ||
435 | |||
436 | // Indicates whether we should test TENSOR_QUANT8_ASYMM rather | ||
437 | // than TENSOR_FLOAT32. | ||
438 | const bool kQuantized = std::get<1>(GetParam()); | ||
439 | }; | ||
440 | |||
441 | TEST_P(NeuralnetworksInputsOutputsTest, Validate) { | ||
442 | check("Ok", true, {0, 1}, {3}); | ||
443 | check("InputIsOutput", false, {0, 1}, {3, 0}); | ||
444 | check("OutputIsInput", false, {0, 1, 3}, {3}); | ||
445 | check("DuplicateInputs", false, {0, 1, 0}, {3}); | ||
446 | check("DuplicateOutputs", false, {0, 1}, {3, 3}); | ||
447 | } | ||
448 | |||
449 | INSTANTIATE_TEST_CASE_P(Flavor, NeuralnetworksInputsOutputsTest, | ||
450 | ::testing::Combine(::testing::Bool(), ::testing::Bool())); | ||
451 | |||
452 | } // namespace functional | ||
453 | } // namespace vts | ||
454 | } // namespace V1_1 | ||
455 | } // namespace neuralnetworks | ||
456 | } // namespace hardware | ||
457 | } // namespace android | ||
458 | |||
459 | using android::hardware::neuralnetworks::V1_1::vts::functional::NeuralnetworksHidlEnvironment; | ||
460 | |||
461 | int main(int argc, char** argv) { | ||
462 | ::testing::AddGlobalTestEnvironment(NeuralnetworksHidlEnvironment::getInstance()); | ||
463 | ::testing::InitGoogleTest(&argc, argv); | ||
464 | NeuralnetworksHidlEnvironment::getInstance()->init(&argc, argv); | ||
465 | |||
466 | int status = RUN_ALL_TESTS(); | ||
467 | return status; | ||
468 | } | ||