summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichael Butler2018-02-26 17:24:46 -0600
committerMiao Wang2018-03-05 15:01:00 -0600
commit1ae02d63df3d69c82774e4a4ca945ab5cc50fe3a (patch)
tree39805f04e07217d26aae1d4a8cdbc8f296b8e2e4 /neuralnetworks/1.0/vts
parenta8cd639dc0c6e84c27f9b32590564b5eeae6a490 (diff)
downloadplatform-hardware-interfaces-1ae02d63df3d69c82774e4a4ca945ab5cc50fe3a.tar.gz
platform-hardware-interfaces-1ae02d63df3d69c82774e4a4ca945ab5cc50fe3a.tar.xz
platform-hardware-interfaces-1ae02d63df3d69c82774e4a4ca945ab5cc50fe3a.zip
Relax NeuralNetwork's VTS positive and negative base tests
There are some NN VTS tests that assume a service is able to generate a model consisting only of a floating point add operation. However, some drivers do not support floating point operations. This CL relaxes the test requirements to allow a test to be skipped if the service does not support floating point add. Bug: 72764145 Test: mma Test: VtsHalNeuralnetworksV1_0TargetTest Merged-In: I6b0644432680fc2f8098b5187795dc2953df03f9 Change-Id: I6b0644432680fc2f8098b5187795dc2953df03f9 (cherry picked from commit 4d5bb1097a34495212c09473b477dc97acb99264)
Diffstat (limited to 'neuralnetworks/1.0/vts')
-rw-r--r--neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp46
-rw-r--r--neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0BasicTest.cpp84
2 files changed, 72 insertions, 58 deletions
diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
index f0ce9382..8646a4cb 100644
--- a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
@@ -186,35 +186,29 @@ void Execute(sp<V1_0::IDevice>& device, std::function<V1_0::Model(void)> create_
186 186
187 // see if service can handle model 187 // see if service can handle model
188 bool fullySupportsModel = false; 188 bool fullySupportsModel = false;
189 ErrorStatus supportedStatus;
190 sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
191 ASSERT_NE(nullptr, preparedModelCallback.get());
192
193 Return<void> supportedCall = device->getSupportedOperations( 189 Return<void> supportedCall = device->getSupportedOperations(
194 model, [&](ErrorStatus status, const hidl_vec<bool>& supported) { 190 model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
195 supportedStatus = status; 191 ASSERT_EQ(ErrorStatus::NONE, status);
196 ASSERT_NE(0ul, supported.size()); 192 ASSERT_NE(0ul, supported.size());
197 fullySupportsModel = 193 fullySupportsModel =
198 std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; }); 194 std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; });
199 }); 195 });
200 ASSERT_TRUE(supportedCall.isOk()); 196 ASSERT_TRUE(supportedCall.isOk());
201 ASSERT_EQ(ErrorStatus::NONE, supportedStatus); 197
198 // launch prepare model
199 sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
200 ASSERT_NE(nullptr, preparedModelCallback.get());
202 Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback); 201 Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback);
203 ASSERT_TRUE(prepareLaunchStatus.isOk()); 202 ASSERT_TRUE(prepareLaunchStatus.isOk());
203 ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
204 204
205 // retrieve prepared model 205 // retrieve prepared model
206 preparedModelCallback->wait(); 206 preparedModelCallback->wait();
207 ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); 207 ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
208 sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel(); 208 sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
209 if (fullySupportsModel) {
210 EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
211 } else {
212 EXPECT_TRUE(prepareReturnStatus == ErrorStatus::NONE ||
213 prepareReturnStatus == ErrorStatus::GENERAL_FAILURE);
214 }
215 209
216 // early termination if vendor service cannot fully prepare model 210 // early termination if vendor service cannot fully prepare model
217 if (!fullySupportsModel && prepareReturnStatus == ErrorStatus::GENERAL_FAILURE) { 211 if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
218 ASSERT_EQ(nullptr, preparedModel.get()); 212 ASSERT_EQ(nullptr, preparedModel.get());
219 LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot " 213 LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
220 "prepare model that it does not support."; 214 "prepare model that it does not support.";
@@ -223,6 +217,7 @@ void Execute(sp<V1_0::IDevice>& device, std::function<V1_0::Model(void)> create_
223 << std::endl; 217 << std::endl;
224 return; 218 return;
225 } 219 }
220 EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
226 ASSERT_NE(nullptr, preparedModel.get()); 221 ASSERT_NE(nullptr, preparedModel.get());
227 222
228 EvaluatePreparedModel(preparedModel, is_ignored, examples); 223 EvaluatePreparedModel(preparedModel, is_ignored, examples);
@@ -235,36 +230,30 @@ void Execute(sp<V1_1::IDevice>& device, std::function<V1_1::Model(void)> create_
235 230
236 // see if service can handle model 231 // see if service can handle model
237 bool fullySupportsModel = false; 232 bool fullySupportsModel = false;
238 ErrorStatus supportedStatus;
239 sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
240 ASSERT_NE(nullptr, preparedModelCallback.get());
241
242 Return<void> supportedCall = device->getSupportedOperations_1_1( 233 Return<void> supportedCall = device->getSupportedOperations_1_1(
243 model, [&](ErrorStatus status, const hidl_vec<bool>& supported) { 234 model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
244 supportedStatus = status; 235 ASSERT_EQ(ErrorStatus::NONE, status);
245 ASSERT_NE(0ul, supported.size()); 236 ASSERT_NE(0ul, supported.size());
246 fullySupportsModel = 237 fullySupportsModel =
247 std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; }); 238 std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; });
248 }); 239 });
249 ASSERT_TRUE(supportedCall.isOk()); 240 ASSERT_TRUE(supportedCall.isOk());
250 ASSERT_EQ(ErrorStatus::NONE, supportedStatus); 241
242 // launch prepare model
243 sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
244 ASSERT_NE(nullptr, preparedModelCallback.get());
251 Return<ErrorStatus> prepareLaunchStatus = 245 Return<ErrorStatus> prepareLaunchStatus =
252 device->prepareModel_1_1(model, preparedModelCallback); 246 device->prepareModel_1_1(model, preparedModelCallback);
253 ASSERT_TRUE(prepareLaunchStatus.isOk()); 247 ASSERT_TRUE(prepareLaunchStatus.isOk());
248 ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
254 249
255 // retrieve prepared model 250 // retrieve prepared model
256 preparedModelCallback->wait(); 251 preparedModelCallback->wait();
257 ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); 252 ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
258 sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel(); 253 sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
259 if (fullySupportsModel) {
260 EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
261 } else {
262 EXPECT_TRUE(prepareReturnStatus == ErrorStatus::NONE ||
263 prepareReturnStatus == ErrorStatus::GENERAL_FAILURE);
264 }
265 254
266 // early termination if vendor service cannot fully prepare model 255 // early termination if vendor service cannot fully prepare model
267 if (!fullySupportsModel && prepareReturnStatus == ErrorStatus::GENERAL_FAILURE) { 256 if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
268 ASSERT_EQ(nullptr, preparedModel.get()); 257 ASSERT_EQ(nullptr, preparedModel.get());
269 LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot " 258 LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
270 "prepare model that it does not support."; 259 "prepare model that it does not support.";
@@ -273,6 +262,7 @@ void Execute(sp<V1_1::IDevice>& device, std::function<V1_1::Model(void)> create_
273 << std::endl; 262 << std::endl;
274 return; 263 return;
275 } 264 }
265 EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
276 ASSERT_NE(nullptr, preparedModel.get()); 266 ASSERT_NE(nullptr, preparedModel.get());
277 267
278 // If in relaxed mode, set the error range to be 5ULP of FP16. 268 // If in relaxed mode, set the error range to be 5ULP of FP16.
diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0BasicTest.cpp b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0BasicTest.cpp
index e838997a..59e5b806 100644
--- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0BasicTest.cpp
+++ b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0BasicTest.cpp
@@ -52,26 +52,51 @@ namespace functional {
52using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback; 52using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
53using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback; 53using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
54 54
55inline sp<IPreparedModel> doPrepareModelShortcut(sp<IDevice>& device) { 55static void doPrepareModelShortcut(const sp<IDevice>& device, sp<IPreparedModel>* preparedModel) {
56 ASSERT_NE(nullptr, preparedModel);
56 Model model = createValidTestModel_1_0(); 57 Model model = createValidTestModel_1_0();
57 58
59 // see if service can handle model
60 bool fullySupportsModel = false;
61 Return<void> supportedOpsLaunchStatus = device->getSupportedOperations(
62 model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
63 ASSERT_EQ(ErrorStatus::NONE, status);
64 ASSERT_NE(0ul, supported.size());
65 fullySupportsModel =
66 std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; });
67 });
68 ASSERT_TRUE(supportedOpsLaunchStatus.isOk());
69
70 // launch prepare model
58 sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback(); 71 sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
59 if (preparedModelCallback == nullptr) { 72 ASSERT_NE(nullptr, preparedModelCallback.get());
60 return nullptr;
61 }
62 Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback); 73 Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback);
63 if (!prepareLaunchStatus.isOk() || prepareLaunchStatus != ErrorStatus::NONE) { 74 ASSERT_TRUE(prepareLaunchStatus.isOk());
64 return nullptr; 75 ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
65 }
66 76
77 // retrieve prepared model
67 preparedModelCallback->wait(); 78 preparedModelCallback->wait();
68 ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); 79 ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
69 sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel(); 80 *preparedModel = preparedModelCallback->getPreparedModel();
70 if (prepareReturnStatus != ErrorStatus::NONE || preparedModel == nullptr) { 81
71 return nullptr; 82 // The getSupportedOperations call returns a list of operations that are
83 // guaranteed not to fail if prepareModel is called, and
84 // 'fullySupportsModel' is true i.f.f. the entire model is guaranteed.
85 // If a driver has any doubt that it can prepare an operation, it must
86 // return false. So here, if a driver isn't sure if it can support an
87 // operation, but reports that it successfully prepared the model, the test
88 // can continue.
89 if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
90 ASSERT_EQ(nullptr, preparedModel->get());
91 LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
92 "prepare model that it does not support.";
93 std::cout << "[ ] Early termination of test because vendor service cannot "
94 "prepare model that it does not support."
95 << std::endl;
96 return;
72 } 97 }
73 98 ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus);
74 return preparedModel; 99 ASSERT_NE(nullptr, preparedModel->get());
75} 100}
76 101
77// create device test 102// create device test
@@ -132,18 +157,8 @@ TEST_F(NeuralnetworksHidlTest, SupportedOperationsNegativeTest2) {
132 157
133// prepare simple model positive test 158// prepare simple model positive test
134TEST_F(NeuralnetworksHidlTest, SimplePrepareModelPositiveTest) { 159TEST_F(NeuralnetworksHidlTest, SimplePrepareModelPositiveTest) {
135 Model model = createValidTestModel_1_0(); 160 sp<IPreparedModel> preparedModel;
136 sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback(); 161 doPrepareModelShortcut(device, &preparedModel);
137 ASSERT_NE(nullptr, preparedModelCallback.get());
138 Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback);
139 ASSERT_TRUE(prepareLaunchStatus.isOk());
140 EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
141
142 preparedModelCallback->wait();
143 ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
144 EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
145 sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
146 EXPECT_NE(nullptr, preparedModel.get());
147} 162}
148 163
149// prepare simple model negative test 1 164// prepare simple model negative test 1
@@ -184,8 +199,11 @@ TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphPositiveTest) {
184 std::vector<float> expectedData = {6.0f, 8.0f, 10.0f, 12.0f}; 199 std::vector<float> expectedData = {6.0f, 8.0f, 10.0f, 12.0f};
185 const uint32_t OUTPUT = 1; 200 const uint32_t OUTPUT = 1;
186 201
187 sp<IPreparedModel> preparedModel = doPrepareModelShortcut(device); 202 sp<IPreparedModel> preparedModel;
188 ASSERT_NE(nullptr, preparedModel.get()); 203 ASSERT_NO_FATAL_FAILURE(doPrepareModelShortcut(device, &preparedModel));
204 if (preparedModel == nullptr) {
205 return;
206 }
189 Request request = createValidTestRequest(); 207 Request request = createValidTestRequest();
190 208
191 auto postWork = [&] { 209 auto postWork = [&] {
@@ -218,8 +236,11 @@ TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphPositiveTest) {
218 236
219// execute simple graph negative test 1 237// execute simple graph negative test 1
220TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest1) { 238TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest1) {
221 sp<IPreparedModel> preparedModel = doPrepareModelShortcut(device); 239 sp<IPreparedModel> preparedModel;
222 ASSERT_NE(nullptr, preparedModel.get()); 240 ASSERT_NO_FATAL_FAILURE(doPrepareModelShortcut(device, &preparedModel));
241 if (preparedModel == nullptr) {
242 return;
243 }
223 Request request = createInvalidTestRequest1(); 244 Request request = createInvalidTestRequest1();
224 245
225 sp<ExecutionCallback> executionCallback = new ExecutionCallback(); 246 sp<ExecutionCallback> executionCallback = new ExecutionCallback();
@@ -235,8 +256,11 @@ TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest1) {
235 256
236// execute simple graph negative test 2 257// execute simple graph negative test 2
237TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest2) { 258TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest2) {
238 sp<IPreparedModel> preparedModel = doPrepareModelShortcut(device); 259 sp<IPreparedModel> preparedModel;
239 ASSERT_NE(nullptr, preparedModel.get()); 260 ASSERT_NO_FATAL_FAILURE(doPrepareModelShortcut(device, &preparedModel));
261 if (preparedModel == nullptr) {
262 return;
263 }
240 Request request = createInvalidTestRequest2(); 264 Request request = createInvalidTestRequest2();
241 265
242 sp<ExecutionCallback> executionCallback = new ExecutionCallback(); 266 sp<ExecutionCallback> executionCallback = new ExecutionCallback();