summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Gross2018-03-23 11:51:02 -0500
committerMichael Butler2018-04-18 14:07:45 -0500
commitfa89f009f97e51c357a0d05bfa1ee1cdbf650676 (patch)
treea60796e893082564d2eed2abbb8949cad7faff73
parent0d8dbeecd69b252341f09d8e0cb536a8d3f95614 (diff)
downloadplatform-hardware-interfaces-fa89f009f97e51c357a0d05bfa1ee1cdbf650676.tar.gz
platform-hardware-interfaces-fa89f009f97e51c357a0d05bfa1ee1cdbf650676.tar.xz
platform-hardware-interfaces-fa89f009f97e51c357a0d05bfa1ee1cdbf650676.zip
Add validation tests for consistency of model inputs and outputs.
Test: VtsHalNeuralnetworksV1_1TargetTest --hal_service_instance=android.hardware.neuralnetworks@1.1::IDevice/sample-all --gtest_filter=Flavor/NeuralnetworksInputsOutputsTest.* Test: VtsHalNeuralnetworksV1_1TargetTest --hal_service_instance=android.hardware.neuralnetworks@1.1::IDevice/sample-float-fast --gtest_filter=Flavor/NeuralnetworksInputsOutputsTest.* Test: VtsHalNeuralnetworksV1_1TargetTest --hal_service_instance=android.hardware.neuralnetworks@1.1::IDevice/sample-quant --gtest_filter=Flavor/NeuralnetworksInputsOutputsTest.* Bug: 67828197 Merged-In: I245227dce095b9cbbb9b527ad99aa71d11f77c4f Change-Id: I245227dce095b9cbbb9b527ad99aa71d11f77c4f (cherry picked from commit 7a76d8a7a84d7980ecb37011d109a5a0f7287a70)
-rw-r--r--neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1BasicTest.cpp163
1 files changed, 163 insertions, 0 deletions
diff --git a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1BasicTest.cpp b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1BasicTest.cpp
index 17f6744c..10591dcb 100644
--- a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1BasicTest.cpp
+++ b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1BasicTest.cpp
@@ -286,6 +286,169 @@ TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest2) {
286 EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus); 286 EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus);
287} 287}
288 288
289class NeuralnetworksInputsOutputsTest
290 : public NeuralnetworksHidlTest,
291 public ::testing::WithParamInterface<std::tuple<bool, bool>> {
292 protected:
293 virtual void SetUp() { NeuralnetworksHidlTest::SetUp(); }
294 virtual void TearDown() { NeuralnetworksHidlTest::TearDown(); }
295 V1_1::Model createModel(const std::vector<uint32_t>& inputs,
296 const std::vector<uint32_t>& outputs) {
297 // We set up the operands as floating-point with no designated
298 // model inputs and outputs, and then patch type and lifetime
299 // later on in this function.
300
301 std::vector<Operand> operands = {
302 {
303 .type = OperandType::TENSOR_FLOAT32,
304 .dimensions = {1},
305 .numberOfConsumers = 1,
306 .scale = 0.0f,
307 .zeroPoint = 0,
308 .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
309 .location = {.poolIndex = 0, .offset = 0, .length = 0},
310 },
311 {
312 .type = OperandType::TENSOR_FLOAT32,
313 .dimensions = {1},
314 .numberOfConsumers = 1,
315 .scale = 0.0f,
316 .zeroPoint = 0,
317 .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
318 .location = {.poolIndex = 0, .offset = 0, .length = 0},
319 },
320 {
321 .type = OperandType::INT32,
322 .dimensions = {},
323 .numberOfConsumers = 1,
324 .scale = 0.0f,
325 .zeroPoint = 0,
326 .lifetime = OperandLifeTime::CONSTANT_COPY,
327 .location = {.poolIndex = 0, .offset = 0, .length = sizeof(int32_t)},
328 },
329 {
330 .type = OperandType::TENSOR_FLOAT32,
331 .dimensions = {1},
332 .numberOfConsumers = 0,
333 .scale = 0.0f,
334 .zeroPoint = 0,
335 .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
336 .location = {.poolIndex = 0, .offset = 0, .length = 0},
337 },
338 };
339
340 const std::vector<Operation> operations = {{
341 .type = OperationType::ADD, .inputs = {0, 1, 2}, .outputs = {3},
342 }};
343
344 std::vector<uint8_t> operandValues;
345 int32_t activation[1] = {static_cast<int32_t>(FusedActivationFunc::NONE)};
346 operandValues.insert(operandValues.end(), reinterpret_cast<const uint8_t*>(&activation[0]),
347 reinterpret_cast<const uint8_t*>(&activation[1]));
348
349 if (kQuantized) {
350 for (auto& operand : operands) {
351 if (operand.type == OperandType::TENSOR_FLOAT32) {
352 operand.type = OperandType::TENSOR_QUANT8_ASYMM;
353 operand.scale = 1.0f;
354 operand.zeroPoint = 0;
355 }
356 }
357 }
358
359 auto patchLifetime = [&operands](const std::vector<uint32_t>& operandIndexes,
360 OperandLifeTime lifetime) {
361 for (uint32_t index : operandIndexes) {
362 operands[index].lifetime = lifetime;
363 }
364 };
365 if (kInputHasPrecedence) {
366 patchLifetime(outputs, OperandLifeTime::MODEL_OUTPUT);
367 patchLifetime(inputs, OperandLifeTime::MODEL_INPUT);
368 } else {
369 patchLifetime(inputs, OperandLifeTime::MODEL_INPUT);
370 patchLifetime(outputs, OperandLifeTime::MODEL_OUTPUT);
371 }
372
373 return {
374 .operands = operands,
375 .operations = operations,
376 .inputIndexes = inputs,
377 .outputIndexes = outputs,
378 .operandValues = operandValues,
379 .pools = {},
380 };
381 }
382 void check(const std::string& name,
383 bool expectation, // true = success
384 const std::vector<uint32_t>& inputs, const std::vector<uint32_t>& outputs) {
385 SCOPED_TRACE(name + " (HAL calls should " + (expectation ? "succeed" : "fail") + ", " +
386 (kInputHasPrecedence ? "input" : "output") + " precedence, " +
387 (kQuantized ? "quantized" : "float"));
388
389 V1_1::Model model = createModel(inputs, outputs);
390
391 // ensure that getSupportedOperations_1_1() checks model validity
392 ErrorStatus supportedOpsErrorStatus = ErrorStatus::GENERAL_FAILURE;
393 Return<void> supportedOpsReturn = device->getSupportedOperations_1_1(
394 model, [&model, &supportedOpsErrorStatus](ErrorStatus status,
395 const hidl_vec<bool>& supported) {
396 supportedOpsErrorStatus = status;
397 if (status == ErrorStatus::NONE) {
398 ASSERT_EQ(supported.size(), model.operations.size());
399 }
400 });
401 ASSERT_TRUE(supportedOpsReturn.isOk());
402 ASSERT_EQ(supportedOpsErrorStatus,
403 (expectation ? ErrorStatus::NONE : ErrorStatus::INVALID_ARGUMENT));
404
405 // ensure that prepareModel_1_1() checks model validity
406 sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback;
407 ASSERT_NE(preparedModelCallback.get(), nullptr);
408 Return<ErrorStatus> prepareLaunchReturn =
409 device->prepareModel_1_1(model, preparedModelCallback);
410 ASSERT_TRUE(prepareLaunchReturn.isOk());
411 ASSERT_TRUE(prepareLaunchReturn == ErrorStatus::NONE ||
412 prepareLaunchReturn == ErrorStatus::INVALID_ARGUMENT);
413 bool preparationOk = (prepareLaunchReturn == ErrorStatus::NONE);
414 if (preparationOk) {
415 preparedModelCallback->wait();
416 preparationOk = (preparedModelCallback->getStatus() == ErrorStatus::NONE);
417 }
418
419 if (preparationOk) {
420 ASSERT_TRUE(expectation);
421 } else {
422 // Preparation can fail for reasons other than an invalid model --
423 // for example, perhaps not all operations are supported, or perhaps
424 // the device hit some kind of capacity limit.
425 bool invalid = prepareLaunchReturn == ErrorStatus::INVALID_ARGUMENT ||
426 preparedModelCallback->getStatus() == ErrorStatus::INVALID_ARGUMENT;
427 ASSERT_NE(expectation, invalid);
428 }
429 }
430
431 // Indicates whether an operand that appears in both the inputs
432 // and outputs vector should have lifetime appropriate for input
433 // rather than for output.
434 const bool kInputHasPrecedence = std::get<0>(GetParam());
435
436 // Indicates whether we should test TENSOR_QUANT8_ASYMM rather
437 // than TENSOR_FLOAT32.
438 const bool kQuantized = std::get<1>(GetParam());
439};
440
441TEST_P(NeuralnetworksInputsOutputsTest, Validate) {
442 check("Ok", true, {0, 1}, {3});
443 check("InputIsOutput", false, {0, 1}, {3, 0});
444 check("OutputIsInput", false, {0, 1, 3}, {3});
445 check("DuplicateInputs", false, {0, 1, 0}, {3});
446 check("DuplicateOutputs", false, {0, 1}, {3, 3});
447}
448
449INSTANTIATE_TEST_CASE_P(Flavor, NeuralnetworksInputsOutputsTest,
450 ::testing::Combine(::testing::Bool(), ::testing::Bool()));
451
289} // namespace functional 452} // namespace functional
290} // namespace vts 453} // namespace vts
291} // namespace V1_1 454} // namespace V1_1