summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorandroid-build-team Robot2018-03-29 02:26:43 -0500
committerandroid-build-team Robot2018-03-29 02:26:43 -0500
commita87236462c2b5798b2e2f75b9ec7cc7cefd74b0f (patch)
treec9687758640090e1fb90818c3a235acbff433b43
parent3855d757ecfd0bcecf19a7b0b6a4d1a21ce0c3a8 (diff)
parent55b92198fb650795183ffd25c07442437fec94ec (diff)
downloadplatform-hardware-interfaces-a87236462c2b5798b2e2f75b9ec7cc7cefd74b0f.tar.gz
platform-hardware-interfaces-a87236462c2b5798b2e2f75b9ec7cc7cefd74b0f.tar.xz
platform-hardware-interfaces-a87236462c2b5798b2e2f75b9ec7cc7cefd74b0f.zip
Snap for 4686875 from 55b92198fb650795183ffd25c07442437fec94ec to pi-release
Change-Id: I26a28e0e4a40925eb5f400559b3f9acf8cbf10d2
l---------audio/effect/4.0/xml/audio_effects_conf_V4_0.xsd1
-rw-r--r--broadcastradio/1.1/default/Tuner.cpp1
-rw-r--r--camera/metadata/3.3/types.hal1
-rw-r--r--compatibility_matrices/Android.mk39
-rw-r--r--compatibility_matrices/compatibility_matrix.current.xml27
-rw-r--r--compatibility_matrices/compatibility_matrix.mk14
-rw-r--r--compatibility_matrices/manifest.empty.xml1
-rw-r--r--current.txt2
-rw-r--r--neuralnetworks/1.0/vts/functional/Android.bp12
-rw-r--r--neuralnetworks/1.0/vts/functional/BasicTests.cpp56
-rw-r--r--neuralnetworks/1.0/vts/functional/Callbacks.h8
-rw-r--r--neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp4
-rw-r--r--neuralnetworks/1.0/vts/functional/GeneratedTests.cpp (renamed from neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0GeneratedTest.cpp)26
-rw-r--r--neuralnetworks/1.0/vts/functional/Models.cpp202
-rw-r--r--neuralnetworks/1.0/vts/functional/Models.h186
-rw-r--r--neuralnetworks/1.0/vts/functional/ValidateModel.cpp506
-rw-r--r--neuralnetworks/1.0/vts/functional/ValidateRequest.cpp261
-rw-r--r--neuralnetworks/1.0/vts/functional/ValidationTests.cpp50
-rw-r--r--neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp (renamed from neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0.cpp)47
-rw-r--r--neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.h (renamed from neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0.h)49
-rw-r--r--neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0BasicTest.cpp293
-rw-r--r--neuralnetworks/1.1/vts/functional/Android.bp9
-rw-r--r--neuralnetworks/1.1/vts/functional/BasicTests.cpp58
-rw-r--r--neuralnetworks/1.1/vts/functional/GeneratedTests.cpp (renamed from neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1GeneratedTest.cpp)33
-rw-r--r--neuralnetworks/1.1/vts/functional/Models.h323
-rw-r--r--neuralnetworks/1.1/vts/functional/ValidateModel.cpp513
-rw-r--r--neuralnetworks/1.1/vts/functional/ValidateRequest.cpp262
-rw-r--r--neuralnetworks/1.1/vts/functional/ValidationTests.cpp50
-rw-r--r--neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp (renamed from neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1.cpp)48
-rw-r--r--neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.h (renamed from neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1.h)54
-rw-r--r--neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1BasicTest.cpp468
31 files changed, 2465 insertions, 1139 deletions
diff --git a/audio/effect/4.0/xml/audio_effects_conf_V4_0.xsd b/audio/effect/4.0/xml/audio_effects_conf_V4_0.xsd
new file mode 120000
index 00000000..82d569a7
--- /dev/null
+++ b/audio/effect/4.0/xml/audio_effects_conf_V4_0.xsd
@@ -0,0 +1 @@
../../2.0/xml/audio_effects_conf_V2_0.xsd \ No newline at end of file
diff --git a/broadcastradio/1.1/default/Tuner.cpp b/broadcastradio/1.1/default/Tuner.cpp
index ae018798..4b49b59f 100644
--- a/broadcastradio/1.1/default/Tuner.cpp
+++ b/broadcastradio/1.1/default/Tuner.cpp
@@ -86,6 +86,7 @@ void Tuner::setConfigurationInternalLocked(const BandConfig& config) {
86 86
87 mIsAmfmConfigSet = true; 87 mIsAmfmConfigSet = true;
88 mCallback->configChange(Result::OK, mAmfmConfig); 88 mCallback->configChange(Result::OK, mAmfmConfig);
89 if (mCallback1_1 != nullptr) mCallback1_1->programListChanged();
89} 90}
90 91
91bool Tuner::autoConfigureLocked(uint64_t frequency) { 92bool Tuner::autoConfigureLocked(uint64_t frequency) {
diff --git a/camera/metadata/3.3/types.hal b/camera/metadata/3.3/types.hal
index 0535be16..04edfe9f 100644
--- a/camera/metadata/3.3/types.hal
+++ b/camera/metadata/3.3/types.hal
@@ -230,6 +230,7 @@ enum CameraMetadataEnumAndroidRequestAvailableCapabilities :
230 @3.2::CameraMetadataEnumAndroidRequestAvailableCapabilities { 230 @3.2::CameraMetadataEnumAndroidRequestAvailableCapabilities {
231 ANDROID_REQUEST_AVAILABLE_CAPABILITIES_MOTION_TRACKING, 231 ANDROID_REQUEST_AVAILABLE_CAPABILITIES_MOTION_TRACKING,
232 ANDROID_REQUEST_AVAILABLE_CAPABILITIES_LOGICAL_MULTI_CAMERA, 232 ANDROID_REQUEST_AVAILABLE_CAPABILITIES_LOGICAL_MULTI_CAMERA,
233 ANDROID_REQUEST_AVAILABLE_CAPABILITIES_MONOCHROME,
233}; 234};
234 235
235/** android.statistics.oisDataMode enumeration values 236/** android.statistics.oisDataMode enumeration values
diff --git a/compatibility_matrices/Android.mk b/compatibility_matrices/Android.mk
index 71253dac..a10d808d 100644
--- a/compatibility_matrices/Android.mk
+++ b/compatibility_matrices/Android.mk
@@ -22,6 +22,7 @@ BUILD_FRAMEWORK_COMPATIBILITY_MATRIX := $(LOCAL_PATH)/compatibility_matrix.mk
22LOCAL_ADD_VBMETA_VERSION := 22LOCAL_ADD_VBMETA_VERSION :=
23LOCAL_ASSEMBLE_VINTF_ENV_VARS := 23LOCAL_ASSEMBLE_VINTF_ENV_VARS :=
24LOCAL_ASSEMBLE_VINTF_FLAGS := 24LOCAL_ASSEMBLE_VINTF_FLAGS :=
25LOCAL_WARN_REQUIRED_HALS :=
25LOCAL_KERNEL_VERSIONS := 26LOCAL_KERNEL_VERSIONS :=
26LOCAL_GEN_FILE_DEPENDENCIES := 27LOCAL_GEN_FILE_DEPENDENCIES :=
27 28
@@ -57,14 +58,46 @@ include $(BUILD_FRAMEWORK_COMPATIBILITY_MATRIX)
57# Framework Compatibility Matrix (common to all FCM versions) 58# Framework Compatibility Matrix (common to all FCM versions)
58 59
59include $(CLEAR_VARS) 60include $(CLEAR_VARS)
60LOCAL_MODULE_STEM := compatibility_matrix.empty.xml 61LOCAL_MODULE_STEM := compatibility_matrix.device.xml
61LOCAL_SRC_FILES := $(LOCAL_MODULE_STEM) 62# define LOCAL_MODULE and LOCAL_MODULE_CLASS for local-generated-sources-dir.
63LOCAL_MODULE := framework_compatibility_matrix.device.xml
64LOCAL_MODULE_CLASS := ETC
65
66ifndef DEVICE_FRAMEWORK_COMPATIBILITY_MATRIX_FILE
67LOCAL_SRC_FILES := compatibility_matrix.empty.xml
68else
69
70# DEVICE_FRAMEWORK_COMPATIBILITY_MATRIX_FILE specify an absolute path
71LOCAL_GENERATED_SOURCES := $(DEVICE_FRAMEWORK_COMPATIBILITY_MATRIX_FILE)
72
73# Enforce that DEVICE_FRAMEWORK_COMPATIBILITY_MATRIX_FILE does not specify required HALs
74# by checking it against an empty manifest. But the empty manifest needs to contain
75# BOARD_SEPOLICY_VERS to be compatible with DEVICE_FRAMEWORK_COMPATIBILITY_MATRIX_FILE.
76my_manifest_src_file := $(LOCAL_PATH)/manifest.empty.xml
77my_gen_check_manifest := $(local-generated-sources-dir)/manifest.check.xml
78$(my_gen_check_manifest): PRIVATE_SRC_FILE := $(my_manifest_src_file)
79$(my_gen_check_manifest): $(my_manifest_src_file) $(HOST_OUT_EXECUTABLES)/assemble_vintf
80 BOARD_SEPOLICY_VERS=$(BOARD_SEPOLICY_VERS) \
81 IGNORE_TARGET_FCM_VERSION=true \
82 $(HOST_OUT_EXECUTABLES)/assemble_vintf -i $(PRIVATE_SRC_FILE) -o $@
83
84LOCAL_GEN_FILE_DEPENDENCIES += $(my_gen_check_manifest)
85LOCAL_ASSEMBLE_VINTF_FLAGS += -c "$(my_gen_check_manifest)"
86
87my_gen_check_manifest :=
88my_manifest_src_file :=
89
90endif # DEVICE_FRAMEWORK_COMPATIBILITY_MATRIX_FILE
91
62LOCAL_ADD_VBMETA_VERSION := true 92LOCAL_ADD_VBMETA_VERSION := true
63LOCAL_ASSEMBLE_VINTF_ENV_VARS := \ 93LOCAL_ASSEMBLE_VINTF_ENV_VARS := \
64 POLICYVERS \ 94 POLICYVERS \
65 PLATFORM_SEPOLICY_VERSION \ 95 PLATFORM_SEPOLICY_VERSION \
66 PLATFORM_SEPOLICY_COMPAT_VERSIONS 96 PLATFORM_SEPOLICY_COMPAT_VERSIONS
67 97
98LOCAL_WARN_REQUIRED_HALS := \
99 "Error: DEVICE_FRAMEWORK_COMPATIBILITY_MATRIX cannot contain required HALs."
100
68include $(BUILD_FRAMEWORK_COMPATIBILITY_MATRIX) 101include $(BUILD_FRAMEWORK_COMPATIBILITY_MATRIX)
69 102
70# Framework Compatibility Matrix 103# Framework Compatibility Matrix
@@ -78,7 +111,7 @@ LOCAL_REQUIRED_MODULES := \
78 framework_compatibility_matrix.1.xml \ 111 framework_compatibility_matrix.1.xml \
79 framework_compatibility_matrix.2.xml \ 112 framework_compatibility_matrix.2.xml \
80 framework_compatibility_matrix.current.xml \ 113 framework_compatibility_matrix.current.xml \
81 framework_compatibility_matrix.empty.xml 114 framework_compatibility_matrix.device.xml
82LOCAL_GENERATED_SOURCES := $(call module-installed-files,$(LOCAL_REQUIRED_MODULES)) 115LOCAL_GENERATED_SOURCES := $(call module-installed-files,$(LOCAL_REQUIRED_MODULES))
83 116
84ifdef BUILT_VENDOR_MANIFEST 117ifdef BUILT_VENDOR_MANIFEST
diff --git a/compatibility_matrices/compatibility_matrix.current.xml b/compatibility_matrices/compatibility_matrix.current.xml
index 370ffdd6..486c548e 100644
--- a/compatibility_matrices/compatibility_matrix.current.xml
+++ b/compatibility_matrices/compatibility_matrix.current.xml
@@ -76,7 +76,7 @@
76 <version>2.4</version> 76 <version>2.4</version>
77 <interface> 77 <interface>
78 <name>ICameraProvider</name> 78 <name>ICameraProvider</name>
79 <instance>legacy/0</instance> 79 <regex-instance>[^/]+/[0-9]+</regex-instance>
80 </interface> 80 </interface>
81 </hal> 81 </hal>
82 <hal format="hidl" optional="true"> 82 <hal format="hidl" optional="true">
@@ -103,16 +103,28 @@
103 <instance>default</instance> 103 <instance>default</instance>
104 </interface> 104 </interface>
105 </hal> 105 </hal>
106 <hal format="hidl" optional="false"> 106 <hal format="hidl" optional="true">
107 <name>android.hardware.drm</name> 107 <name>android.hardware.drm</name>
108 <version>1.0</version> 108 <version>1.0</version>
109 <interface> 109 <interface>
110 <name>ICryptoFactory</name> 110 <name>ICryptoFactory</name>
111 <instance>default</instance> 111 <regex-instance>.*</regex-instance>
112 </interface> 112 </interface>
113 <interface> 113 <interface>
114 <name>IDrmFactory</name> 114 <name>IDrmFactory</name>
115 <instance>default</instance> 115 <regex-instance>.*</regex-instance>
116 </interface>
117 </hal>
118 <hal format="hidl" optional="false">
119 <name>android.hardware.drm</name>
120 <version>1.1</version>
121 <interface>
122 <name>ICryptoFactory</name>
123 <regex-instance>.*</regex-instance>
124 </interface>
125 <interface>
126 <name>IDrmFactory</name>
127 <regex-instance>.*</regex-instance>
116 </interface> 128 </interface>
117 </hal> 129 </hal>
118 <hal format="hidl" optional="true"> 130 <hal format="hidl" optional="true">
@@ -225,8 +237,7 @@
225 <version>1.0</version> 237 <version>1.0</version>
226 <interface> 238 <interface>
227 <name>IDevice</name> 239 <name>IDevice</name>
228 <!-- TODO(b/73738616): This should be * (match any) --> 240 <regex-instance>.*</regex-instance>
229 <instance>hvx</instance>
230 </interface> 241 </interface>
231 </hal> 242 </hal>
232 <hal format="hidl" optional="true"> 243 <hal format="hidl" optional="true">
@@ -258,11 +269,11 @@
258 <version>1.0-1</version> 269 <version>1.0-1</version>
259 <interface> 270 <interface>
260 <name>IRadio</name> 271 <name>IRadio</name>
261 <instance>slot1</instance> 272 <regex-instance>slot[0-9]+</regex-instance>
262 </interface> 273 </interface>
263 <interface> 274 <interface>
264 <name>ISap</name> 275 <name>ISap</name>
265 <instance>slot1</instance> 276 <regex-instance>slot[0-9]+</regex-instance>
266 </interface> 277 </interface>
267 </hal> 278 </hal>
268 <hal format="hidl" optional="true"> 279 <hal format="hidl" optional="true">
diff --git a/compatibility_matrices/compatibility_matrix.mk b/compatibility_matrices/compatibility_matrix.mk
index 96815b83..abc67965 100644
--- a/compatibility_matrices/compatibility_matrix.mk
+++ b/compatibility_matrices/compatibility_matrix.mk
@@ -29,8 +29,13 @@ ifndef LOCAL_MODULE_STEM
29$(error LOCAL_MODULE_STEM must be defined.) 29$(error LOCAL_MODULE_STEM must be defined.)
30endif 30endif
31 31
32ifndef LOCAL_MODULE
32LOCAL_MODULE := framework_$(LOCAL_MODULE_STEM) 33LOCAL_MODULE := framework_$(LOCAL_MODULE_STEM)
34endif
35
36ifndef LOCAL_MODULE_CLASS
33LOCAL_MODULE_CLASS := ETC 37LOCAL_MODULE_CLASS := ETC
38endif
34 39
35ifndef LOCAL_MODULE_PATH 40ifndef LOCAL_MODULE_PATH
36LOCAL_MODULE_PATH := $(TARGET_OUT)/etc/vintf 41LOCAL_MODULE_PATH := $(TARGET_OUT)/etc/vintf
@@ -76,13 +81,19 @@ my_matrix_src_files := \
76 $(addprefix $(LOCAL_PATH)/,$(LOCAL_SRC_FILES)) \ 81 $(addprefix $(LOCAL_PATH)/,$(LOCAL_SRC_FILES)) \
77 $(LOCAL_GENERATED_SOURCES) 82 $(LOCAL_GENERATED_SOURCES)
78 83
84ifneq (,$(strip $(LOCAL_WARN_REQUIRED_HALS)))
85$(GEN): PRIVATE_ADDITIONAL_ENV_VARS += PRODUCT_ENFORCE_VINTF_MANIFEST=true
86$(GEN): PRIVATE_COMMAND_TAIL := || (echo $(strip $(LOCAL_WARN_REQUIRED_HALS)) && false)
87endif
88
79$(GEN): PRIVATE_SRC_FILES := $(my_matrix_src_files) 89$(GEN): PRIVATE_SRC_FILES := $(my_matrix_src_files)
80$(GEN): $(my_matrix_src_files) $(HOST_OUT_EXECUTABLES)/assemble_vintf 90$(GEN): $(my_matrix_src_files) $(HOST_OUT_EXECUTABLES)/assemble_vintf
81 $(foreach varname,$(PRIVATE_ENV_VARS),$(varname)="$($(varname))") \ 91 $(foreach varname,$(PRIVATE_ENV_VARS),$(varname)="$($(varname))") \
92 $(PRIVATE_ADDITIONAL_ENV_VARS) \
82 $(HOST_OUT_EXECUTABLES)/assemble_vintf \ 93 $(HOST_OUT_EXECUTABLES)/assemble_vintf \
83 -i $(call normalize-path-list,$(PRIVATE_SRC_FILES)) \ 94 -i $(call normalize-path-list,$(PRIVATE_SRC_FILES)) \
84 -o $@ \ 95 -o $@ \
85 $(PRIVATE_FLAGS) 96 $(PRIVATE_FLAGS) $(PRIVATE_COMMAND_TAIL)
86 97
87LOCAL_PREBUILT_MODULE_FILE := $(GEN) 98LOCAL_PREBUILT_MODULE_FILE := $(GEN)
88LOCAL_SRC_FILES := 99LOCAL_SRC_FILES :=
@@ -91,6 +102,7 @@ LOCAL_GENERATED_SOURCES :=
91LOCAL_ADD_VBMETA_VERSION := 102LOCAL_ADD_VBMETA_VERSION :=
92LOCAL_ASSEMBLE_VINTF_ENV_VARS := 103LOCAL_ASSEMBLE_VINTF_ENV_VARS :=
93LOCAL_ASSEMBLE_VINTF_FLAGS := 104LOCAL_ASSEMBLE_VINTF_FLAGS :=
105LOCAL_WARN_REQUIRED_HALS :=
94LOCAL_KERNEL_VERSIONS := 106LOCAL_KERNEL_VERSIONS :=
95LOCAL_GEN_FILE_DEPENDENCIES := 107LOCAL_GEN_FILE_DEPENDENCIES :=
96my_matrix_src_files := 108my_matrix_src_files :=
diff --git a/compatibility_matrices/manifest.empty.xml b/compatibility_matrices/manifest.empty.xml
new file mode 100644
index 00000000..e50e0e53
--- /dev/null
+++ b/compatibility_matrices/manifest.empty.xml
@@ -0,0 +1 @@
<manifest version="1.0" type="device" />
diff --git a/current.txt b/current.txt
index 71e2d912..dbe462fd 100644
--- a/current.txt
+++ b/current.txt
@@ -307,7 +307,7 @@ eacf4e7491fc52c4db90898faddf25ec7bc72501b07ae8737434c47cb845128c android.hardwar
3074fb0725c36ed4f77a42b42e3f18d8b5f7919cb62b90098b23143a555aa7dd96d android.hardware.camera.device@3.4::ICameraDeviceCallback 3074fb0725c36ed4f77a42b42e3f18d8b5f7919cb62b90098b23143a555aa7dd96d android.hardware.camera.device@3.4::ICameraDeviceCallback
308812fa66aa10ba0cba27cfddc2fd7f0ee27a8ab65a1f15aa79fdad97d403e6a14 android.hardware.camera.device@3.4::ICameraDeviceSession 308812fa66aa10ba0cba27cfddc2fd7f0ee27a8ab65a1f15aa79fdad97d403e6a14 android.hardware.camera.device@3.4::ICameraDeviceSession
309cc288f1f78d1e643eb3d3dbc16e1401d44033d8e6856761f5156814a29986ec7 android.hardware.camera.device@3.4::types 309cc288f1f78d1e643eb3d3dbc16e1401d44033d8e6856761f5156814a29986ec7 android.hardware.camera.device@3.4::types
31071ee1f46dac4df417d2950e4de760e4145038ae363fc11aeea487350bf603897 android.hardware.camera.metadata@3.3::types 310f9278c8beb9d42d96e26d73ecabe1dff1d7e2fb301ab7f737d93e5ffae8d3312 android.hardware.camera.metadata@3.3::types
3111a46aeae45b7a0e47f79b7207300532986f9d9cd7060779afc7a529f54d712ab android.hardware.confirmationui@1.0::IConfirmationResultCallback 3111a46aeae45b7a0e47f79b7207300532986f9d9cd7060779afc7a529f54d712ab android.hardware.confirmationui@1.0::IConfirmationResultCallback
3126d8347ff3cd7de471065ac3e8e68385073630cdeebe9f8fa58cb91cf44436c95 android.hardware.confirmationui@1.0::IConfirmationUI 3126d8347ff3cd7de471065ac3e8e68385073630cdeebe9f8fa58cb91cf44436c95 android.hardware.confirmationui@1.0::IConfirmationUI
313a3ff916784dce87a56c757ab5c86433f0cdf562280999a5f978a6e8a0f3f19e7 android.hardware.confirmationui@1.0::types 313a3ff916784dce87a56c757ab5c86433f0cdf562280999a5f978a6e8a0f3f19e7 android.hardware.confirmationui@1.0::types
diff --git a/neuralnetworks/1.0/vts/functional/Android.bp b/neuralnetworks/1.0/vts/functional/Android.bp
index 54dd14ab..e28113bc 100644
--- a/neuralnetworks/1.0/vts/functional/Android.bp
+++ b/neuralnetworks/1.0/vts/functional/Android.bp
@@ -18,7 +18,6 @@ cc_library_static {
18 name: "VtsHalNeuralnetworksTest_utils", 18 name: "VtsHalNeuralnetworksTest_utils",
19 srcs: [ 19 srcs: [
20 "Callbacks.cpp", 20 "Callbacks.cpp",
21 "Models.cpp",
22 "GeneratedTestHarness.cpp", 21 "GeneratedTestHarness.cpp",
23 ], 22 ],
24 defaults: ["VtsHalTargetTestDefaults"], 23 defaults: ["VtsHalTargetTestDefaults"],
@@ -41,14 +40,17 @@ cc_library_static {
41cc_test { 40cc_test {
42 name: "VtsHalNeuralnetworksV1_0TargetTest", 41 name: "VtsHalNeuralnetworksV1_0TargetTest",
43 srcs: [ 42 srcs: [
44 "VtsHalNeuralnetworksV1_0.cpp", 43 "BasicTests.cpp",
45 "VtsHalNeuralnetworksV1_0BasicTest.cpp", 44 "GeneratedTests.cpp",
46 "VtsHalNeuralnetworksV1_0GeneratedTest.cpp", 45 "ValidateModel.cpp",
46 "ValidateRequest.cpp",
47 "ValidationTests.cpp",
48 "VtsHalNeuralnetworks.cpp",
47 ], 49 ],
48 defaults: ["VtsHalTargetTestDefaults"], 50 defaults: ["VtsHalTargetTestDefaults"],
49 static_libs: [ 51 static_libs: [
50 "android.hardware.neuralnetworks@1.0",
51 "android.hardware.neuralnetworks@1.1", 52 "android.hardware.neuralnetworks@1.1",
53 "android.hardware.neuralnetworks@1.0",
52 "android.hidl.allocator@1.0", 54 "android.hidl.allocator@1.0",
53 "android.hidl.memory@1.0", 55 "android.hidl.memory@1.0",
54 "libhidlmemory", 56 "libhidlmemory",
diff --git a/neuralnetworks/1.0/vts/functional/BasicTests.cpp b/neuralnetworks/1.0/vts/functional/BasicTests.cpp
new file mode 100644
index 00000000..945c4065
--- /dev/null
+++ b/neuralnetworks/1.0/vts/functional/BasicTests.cpp
@@ -0,0 +1,56 @@
1/*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "neuralnetworks_hidl_hal_test"
18
19#include "VtsHalNeuralnetworks.h"
20
21namespace android {
22namespace hardware {
23namespace neuralnetworks {
24namespace V1_0 {
25namespace vts {
26namespace functional {
27
28// create device test
29TEST_F(NeuralnetworksHidlTest, CreateDevice) {}
30
31// status test
32TEST_F(NeuralnetworksHidlTest, StatusTest) {
33 Return<DeviceStatus> status = device->getStatus();
34 ASSERT_TRUE(status.isOk());
35 EXPECT_EQ(DeviceStatus::AVAILABLE, static_cast<DeviceStatus>(status));
36}
37
38// initialization
39TEST_F(NeuralnetworksHidlTest, GetCapabilitiesTest) {
40 Return<void> ret =
41 device->getCapabilities([](ErrorStatus status, const Capabilities& capabilities) {
42 EXPECT_EQ(ErrorStatus::NONE, status);
43 EXPECT_LT(0.0f, capabilities.float32Performance.execTime);
44 EXPECT_LT(0.0f, capabilities.float32Performance.powerUsage);
45 EXPECT_LT(0.0f, capabilities.quantized8Performance.execTime);
46 EXPECT_LT(0.0f, capabilities.quantized8Performance.powerUsage);
47 });
48 EXPECT_TRUE(ret.isOk());
49}
50
51} // namespace functional
52} // namespace vts
53} // namespace V1_0
54} // namespace neuralnetworks
55} // namespace hardware
56} // namespace android
diff --git a/neuralnetworks/1.0/vts/functional/Callbacks.h b/neuralnetworks/1.0/vts/functional/Callbacks.h
index 0e2ffb32..2ac6130d 100644
--- a/neuralnetworks/1.0/vts/functional/Callbacks.h
+++ b/neuralnetworks/1.0/vts/functional/Callbacks.h
@@ -17,14 +17,6 @@ namespace neuralnetworks {
17namespace V1_0 { 17namespace V1_0 {
18namespace implementation { 18namespace implementation {
19 19
20using ::android::hardware::hidl_array;
21using ::android::hardware::hidl_memory;
22using ::android::hardware::hidl_string;
23using ::android::hardware::hidl_vec;
24using ::android::hardware::Return;
25using ::android::hardware::Void;
26using ::android::sp;
27
28/** 20/**
29 * The CallbackBase class is used internally by the NeuralNetworks runtime to 21 * The CallbackBase class is used internally by the NeuralNetworks runtime to
30 * synchronize between different threads. An asynchronous task is launched 22 * synchronize between different threads. An asynchronous task is launched
diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
index 8646a4cb..4f9d5283 100644
--- a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
@@ -179,7 +179,7 @@ void EvaluatePreparedModel(sp<IPreparedModel>& preparedModel, std::function<bool
179 } 179 }
180} 180}
181 181
182void Execute(sp<V1_0::IDevice>& device, std::function<V1_0::Model(void)> create_model, 182void Execute(const sp<V1_0::IDevice>& device, std::function<V1_0::Model(void)> create_model,
183 std::function<bool(int)> is_ignored, 183 std::function<bool(int)> is_ignored,
184 const std::vector<MixedTypedExampleType>& examples) { 184 const std::vector<MixedTypedExampleType>& examples) {
185 V1_0::Model model = create_model(); 185 V1_0::Model model = create_model();
@@ -223,7 +223,7 @@ void Execute(sp<V1_0::IDevice>& device, std::function<V1_0::Model(void)> create_
223 EvaluatePreparedModel(preparedModel, is_ignored, examples); 223 EvaluatePreparedModel(preparedModel, is_ignored, examples);
224} 224}
225 225
226void Execute(sp<V1_1::IDevice>& device, std::function<V1_1::Model(void)> create_model, 226void Execute(const sp<V1_1::IDevice>& device, std::function<V1_1::Model(void)> create_model,
227 std::function<bool(int)> is_ignored, 227 std::function<bool(int)> is_ignored,
228 const std::vector<MixedTypedExampleType>& examples) { 228 const std::vector<MixedTypedExampleType>& examples) {
229 V1_1::Model model = create_model(); 229 V1_1::Model model = create_model();
diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0GeneratedTest.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTests.cpp
index b99aef7f..2107333e 100644
--- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0GeneratedTest.cpp
+++ b/neuralnetworks/1.0/vts/functional/GeneratedTests.cpp
@@ -16,47 +16,33 @@
16 16
17#define LOG_TAG "neuralnetworks_hidl_hal_test" 17#define LOG_TAG "neuralnetworks_hidl_hal_test"
18 18
19#include "VtsHalNeuralnetworksV1_0.h" 19#include "VtsHalNeuralnetworks.h"
20 20
21#include "Callbacks.h" 21#include "Callbacks.h"
22#include "TestHarness.h" 22#include "TestHarness.h"
23#include "Utils.h"
23 24
24#include <android-base/logging.h> 25#include <android-base/logging.h>
25#include <android/hidl/memory/1.0/IMemory.h> 26#include <android/hidl/memory/1.0/IMemory.h>
26#include <hidlmemory/mapping.h> 27#include <hidlmemory/mapping.h>
27 28
28using ::android::hardware::neuralnetworks::V1_0::IDevice;
29using ::android::hardware::neuralnetworks::V1_0::IPreparedModel;
30using ::android::hardware::neuralnetworks::V1_0::Capabilities;
31using ::android::hardware::neuralnetworks::V1_0::DeviceStatus;
32using ::android::hardware::neuralnetworks::V1_0::FusedActivationFunc;
33using ::android::hardware::neuralnetworks::V1_0::Model;
34using ::android::hardware::neuralnetworks::V1_0::OperationType;
35using ::android::hardware::neuralnetworks::V1_0::PerformanceInfo;
36using ::android::hardware::Return;
37using ::android::hardware::Void;
38using ::android::hardware::hidl_memory;
39using ::android::hardware::hidl_string;
40using ::android::hardware::hidl_vec;
41using ::android::hidl::allocator::V1_0::IAllocator;
42using ::android::hidl::memory::V1_0::IMemory;
43using ::android::sp;
44
45namespace android { 29namespace android {
46namespace hardware { 30namespace hardware {
47namespace neuralnetworks { 31namespace neuralnetworks {
48 32
49namespace generated_tests { 33namespace generated_tests {
50using ::generated_tests::MixedTypedExampleType; 34using ::generated_tests::MixedTypedExampleType;
51extern void Execute(sp<IDevice>&, std::function<Model(void)>, std::function<bool(int)>, 35extern void Execute(const sp<V1_0::IDevice>&, std::function<V1_0::Model(void)>,
52 const std::vector<MixedTypedExampleType>&); 36 std::function<bool(int)>, const std::vector<MixedTypedExampleType>&);
53} // namespace generated_tests 37} // namespace generated_tests
54 38
55namespace V1_0 { 39namespace V1_0 {
56namespace vts { 40namespace vts {
57namespace functional { 41namespace functional {
42
58using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback; 43using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
59using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback; 44using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
45using ::android::nn::allocateSharedMemory;
60 46
61// Mixed-typed examples 47// Mixed-typed examples
62typedef generated_tests::MixedTypedExampleType MixedTypedExample; 48typedef generated_tests::MixedTypedExampleType MixedTypedExample;
diff --git a/neuralnetworks/1.0/vts/functional/Models.cpp b/neuralnetworks/1.0/vts/functional/Models.cpp
deleted file mode 100644
index 180286a5..00000000
--- a/neuralnetworks/1.0/vts/functional/Models.cpp
+++ /dev/null
@@ -1,202 +0,0 @@
1/*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "neuralnetworks_hidl_hal_test"
18
19#include "Models.h"
20#include "Utils.h"
21
22#include <android-base/logging.h>
23#include <android/hidl/allocator/1.0/IAllocator.h>
24#include <android/hidl/memory/1.0/IMemory.h>
25#include <hidlmemory/mapping.h>
26#include <vector>
27
28using ::android::sp;
29
30namespace android {
31namespace hardware {
32namespace neuralnetworks {
33
34// create a valid model
35V1_1::Model createValidTestModel_1_1() {
36 const std::vector<float> operand2Data = {5.0f, 6.0f, 7.0f, 8.0f};
37 const uint32_t size = operand2Data.size() * sizeof(float);
38
39 const uint32_t operand1 = 0;
40 const uint32_t operand2 = 1;
41 const uint32_t operand3 = 2;
42 const uint32_t operand4 = 3;
43
44 const std::vector<Operand> operands = {
45 {
46 .type = OperandType::TENSOR_FLOAT32,
47 .dimensions = {1, 2, 2, 1},
48 .numberOfConsumers = 1,
49 .scale = 0.0f,
50 .zeroPoint = 0,
51 .lifetime = OperandLifeTime::MODEL_INPUT,
52 .location = {.poolIndex = 0, .offset = 0, .length = 0},
53 },
54 {
55 .type = OperandType::TENSOR_FLOAT32,
56 .dimensions = {1, 2, 2, 1},
57 .numberOfConsumers = 1,
58 .scale = 0.0f,
59 .zeroPoint = 0,
60 .lifetime = OperandLifeTime::CONSTANT_COPY,
61 .location = {.poolIndex = 0, .offset = 0, .length = size},
62 },
63 {
64 .type = OperandType::INT32,
65 .dimensions = {},
66 .numberOfConsumers = 1,
67 .scale = 0.0f,
68 .zeroPoint = 0,
69 .lifetime = OperandLifeTime::CONSTANT_COPY,
70 .location = {.poolIndex = 0, .offset = size, .length = sizeof(int32_t)},
71 },
72 {
73 .type = OperandType::TENSOR_FLOAT32,
74 .dimensions = {1, 2, 2, 1},
75 .numberOfConsumers = 0,
76 .scale = 0.0f,
77 .zeroPoint = 0,
78 .lifetime = OperandLifeTime::MODEL_OUTPUT,
79 .location = {.poolIndex = 0, .offset = 0, .length = 0},
80 },
81 };
82
83 const std::vector<Operation> operations = {{
84 .type = OperationType::ADD, .inputs = {operand1, operand2, operand3}, .outputs = {operand4},
85 }};
86
87 const std::vector<uint32_t> inputIndexes = {operand1};
88 const std::vector<uint32_t> outputIndexes = {operand4};
89 std::vector<uint8_t> operandValues(
90 reinterpret_cast<const uint8_t*>(operand2Data.data()),
91 reinterpret_cast<const uint8_t*>(operand2Data.data()) + size);
92 int32_t activation[1] = {static_cast<int32_t>(FusedActivationFunc::NONE)};
93 operandValues.insert(operandValues.end(), reinterpret_cast<const uint8_t*>(&activation[0]),
94 reinterpret_cast<const uint8_t*>(&activation[1]));
95
96 const std::vector<hidl_memory> pools = {};
97
98 return {
99 .operands = operands,
100 .operations = operations,
101 .inputIndexes = inputIndexes,
102 .outputIndexes = outputIndexes,
103 .operandValues = operandValues,
104 .pools = pools,
105 };
106}
107
108// create first invalid model
109V1_1::Model createInvalidTestModel1_1_1() {
110 Model model = createValidTestModel_1_1();
111 model.operations[0].type = static_cast<OperationType>(0xDEADBEEF); /* INVALID */
112 return model;
113}
114
115// create second invalid model
116V1_1::Model createInvalidTestModel2_1_1() {
117 Model model = createValidTestModel_1_1();
118 const uint32_t operand1 = 0;
119 const uint32_t operand5 = 4; // INVALID OPERAND
120 model.inputIndexes = std::vector<uint32_t>({operand1, operand5 /* INVALID OPERAND */});
121 return model;
122}
123
124V1_0::Model createValidTestModel_1_0() {
125 V1_1::Model model = createValidTestModel_1_1();
126 return nn::convertToV1_0(model);
127}
128
129V1_0::Model createInvalidTestModel1_1_0() {
130 V1_1::Model model = createInvalidTestModel1_1_1();
131 return nn::convertToV1_0(model);
132}
133
134V1_0::Model createInvalidTestModel2_1_0() {
135 V1_1::Model model = createInvalidTestModel2_1_1();
136 return nn::convertToV1_0(model);
137}
138
139// create a valid request
140Request createValidTestRequest() {
141 std::vector<float> inputData = {1.0f, 2.0f, 3.0f, 4.0f};
142 std::vector<float> outputData = {-1.0f, -1.0f, -1.0f, -1.0f};
143 const uint32_t INPUT = 0;
144 const uint32_t OUTPUT = 1;
145
146 // prepare inputs
147 uint32_t inputSize = static_cast<uint32_t>(inputData.size() * sizeof(float));
148 uint32_t outputSize = static_cast<uint32_t>(outputData.size() * sizeof(float));
149 std::vector<RequestArgument> inputs = {{
150 .location = {.poolIndex = INPUT, .offset = 0, .length = inputSize}, .dimensions = {},
151 }};
152 std::vector<RequestArgument> outputs = {{
153 .location = {.poolIndex = OUTPUT, .offset = 0, .length = outputSize}, .dimensions = {},
154 }};
155 std::vector<hidl_memory> pools = {nn::allocateSharedMemory(inputSize),
156 nn::allocateSharedMemory(outputSize)};
157 if (pools[INPUT].size() == 0 || pools[OUTPUT].size() == 0) {
158 return {};
159 }
160
161 // load data
162 sp<IMemory> inputMemory = mapMemory(pools[INPUT]);
163 sp<IMemory> outputMemory = mapMemory(pools[OUTPUT]);
164 if (inputMemory.get() == nullptr || outputMemory.get() == nullptr) {
165 return {};
166 }
167 float* inputPtr = reinterpret_cast<float*>(static_cast<void*>(inputMemory->getPointer()));
168 float* outputPtr = reinterpret_cast<float*>(static_cast<void*>(outputMemory->getPointer()));
169 if (inputPtr == nullptr || outputPtr == nullptr) {
170 return {};
171 }
172 inputMemory->update();
173 outputMemory->update();
174 std::copy(inputData.begin(), inputData.end(), inputPtr);
175 std::copy(outputData.begin(), outputData.end(), outputPtr);
176 inputMemory->commit();
177 outputMemory->commit();
178
179 return {.inputs = inputs, .outputs = outputs, .pools = pools};
180}
181
182// create first invalid request
183Request createInvalidTestRequest1() {
184 Request request = createValidTestRequest();
185 const uint32_t INVALID = 2;
186 std::vector<float> inputData = {1.0f, 2.0f, 3.0f, 4.0f};
187 uint32_t inputSize = static_cast<uint32_t>(inputData.size() * sizeof(float));
188 request.inputs[0].location = {
189 .poolIndex = INVALID /* INVALID */, .offset = 0, .length = inputSize};
190 return request;
191}
192
193// create second invalid request
194Request createInvalidTestRequest2() {
195 Request request = createValidTestRequest();
196 request.inputs[0].dimensions = std::vector<uint32_t>({1, 2, 3, 4, 5, 6, 7, 8} /* INVALID */);
197 return request;
198}
199
200} // namespace neuralnetworks
201} // namespace hardware
202} // namespace android
diff --git a/neuralnetworks/1.0/vts/functional/Models.h b/neuralnetworks/1.0/vts/functional/Models.h
index 93982351..a1fbe927 100644
--- a/neuralnetworks/1.0/vts/functional/Models.h
+++ b/neuralnetworks/1.0/vts/functional/Models.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2017 The Android Open Source Project 2 * Copyright (C) 2018 The Android Open Source Project
3 * 3 *
4 * Licensed under the Apache License, Version 2.0 (the "License"); 4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License. 5 * you may not use this file except in compliance with the License.
@@ -14,29 +14,187 @@
14 * limitations under the License. 14 * limitations under the License.
15 */ 15 */
16 16
17#ifndef VTS_HAL_NEURALNETWORKS_V1_0_VTS_FUNCTIONAL_MODELS_H
18#define VTS_HAL_NEURALNETWORKS_V1_0_VTS_FUNCTIONAL_MODELS_H
19
17#define LOG_TAG "neuralnetworks_hidl_hal_test" 20#define LOG_TAG "neuralnetworks_hidl_hal_test"
18 21
19#include <android/hardware/neuralnetworks/1.1/types.h> 22#include "TestHarness.h"
23
24#include <android/hardware/neuralnetworks/1.0/types.h>
20 25
21namespace android { 26namespace android {
22namespace hardware { 27namespace hardware {
23namespace neuralnetworks { 28namespace neuralnetworks {
29namespace V1_0 {
30namespace vts {
31namespace functional {
32
33using MixedTypedExample = generated_tests::MixedTypedExampleType;
24 34
25// create V1_1 model 35#define FOR_EACH_TEST_MODEL(FN) \
26V1_1::Model createValidTestModel_1_1(); 36 FN(add_broadcast_quant8) \
27V1_1::Model createInvalidTestModel1_1_1(); 37 FN(add) \
28V1_1::Model createInvalidTestModel2_1_1(); 38 FN(add_quant8) \
39 FN(avg_pool_float_1) \
40 FN(avg_pool_float_2) \
41 FN(avg_pool_float_3) \
42 FN(avg_pool_float_4) \
43 FN(avg_pool_float_5) \
44 FN(avg_pool_quant8_1) \
45 FN(avg_pool_quant8_2) \
46 FN(avg_pool_quant8_3) \
47 FN(avg_pool_quant8_4) \
48 FN(avg_pool_quant8_5) \
49 FN(concat_float_1) \
50 FN(concat_float_2) \
51 FN(concat_float_3) \
52 FN(concat_quant8_1) \
53 FN(concat_quant8_2) \
54 FN(concat_quant8_3) \
55 FN(conv_1_h3_w2_SAME) \
56 FN(conv_1_h3_w2_VALID) \
57 FN(conv_3_h3_w2_SAME) \
58 FN(conv_3_h3_w2_VALID) \
59 FN(conv_float_2) \
60 FN(conv_float_channels) \
61 FN(conv_float_channels_weights_as_inputs) \
62 FN(conv_float_large) \
63 FN(conv_float_large_weights_as_inputs) \
64 FN(conv_float) \
65 FN(conv_float_weights_as_inputs) \
66 FN(conv_quant8_2) \
67 FN(conv_quant8_channels) \
68 FN(conv_quant8_channels_weights_as_inputs) \
69 FN(conv_quant8_large) \
70 FN(conv_quant8_large_weights_as_inputs) \
71 FN(conv_quant8) \
72 FN(conv_quant8_overflow) \
73 FN(conv_quant8_overflow_weights_as_inputs) \
74 FN(conv_quant8_weights_as_inputs) \
75 FN(depth_to_space_float_1) \
76 FN(depth_to_space_float_2) \
77 FN(depth_to_space_float_3) \
78 FN(depth_to_space_quant8_1) \
79 FN(depth_to_space_quant8_2) \
80 FN(depthwise_conv2d_float_2) \
81 FN(depthwise_conv2d_float_large_2) \
82 FN(depthwise_conv2d_float_large_2_weights_as_inputs) \
83 FN(depthwise_conv2d_float_large) \
84 FN(depthwise_conv2d_float_large_weights_as_inputs) \
85 FN(depthwise_conv2d_float) \
86 FN(depthwise_conv2d_float_weights_as_inputs) \
87 FN(depthwise_conv2d_quant8_2) \
88 FN(depthwise_conv2d_quant8_large) \
89 FN(depthwise_conv2d_quant8_large_weights_as_inputs) \
90 FN(depthwise_conv2d_quant8) \
91 FN(depthwise_conv2d_quant8_weights_as_inputs) \
92 FN(depthwise_conv) \
93 FN(dequantize) \
94 FN(embedding_lookup) \
95 FN(floor) \
96 FN(fully_connected_float_2) \
97 FN(fully_connected_float_large) \
98 FN(fully_connected_float_large_weights_as_inputs) \
99 FN(fully_connected_float) \
100 FN(fully_connected_float_weights_as_inputs) \
101 FN(fully_connected_quant8_2) \
102 FN(fully_connected_quant8_large) \
103 FN(fully_connected_quant8_large_weights_as_inputs) \
104 FN(fully_connected_quant8) \
105 FN(fully_connected_quant8_weights_as_inputs) \
106 FN(hashtable_lookup_float) \
107 FN(hashtable_lookup_quant8) \
108 FN(l2_normalization_2) \
109 FN(l2_normalization_large) \
110 FN(l2_normalization) \
111 FN(l2_pool_float_2) \
112 FN(l2_pool_float_large) \
113 FN(l2_pool_float) \
114 FN(local_response_norm_float_1) \
115 FN(local_response_norm_float_2) \
116 FN(local_response_norm_float_3) \
117 FN(local_response_norm_float_4) \
118 FN(logistic_float_1) \
119 FN(logistic_float_2) \
120 FN(logistic_quant8_1) \
121 FN(logistic_quant8_2) \
122 FN(lsh_projection_2) \
123 FN(lsh_projection) \
124 FN(lsh_projection_weights_as_inputs) \
125 FN(lstm2) \
126 FN(lstm2_state2) \
127 FN(lstm2_state) \
128 FN(lstm3) \
129 FN(lstm3_state2) \
130 FN(lstm3_state3) \
131 FN(lstm3_state) \
132 FN(lstm) \
133 FN(lstm_state2) \
134 FN(lstm_state) \
135 FN(max_pool_float_1) \
136 FN(max_pool_float_2) \
137 FN(max_pool_float_3) \
138 FN(max_pool_float_4) \
139 FN(max_pool_quant8_1) \
140 FN(max_pool_quant8_2) \
141 FN(max_pool_quant8_3) \
142 FN(max_pool_quant8_4) \
143 FN(mobilenet_224_gender_basic_fixed) \
144 FN(mobilenet_quantized) \
145 FN(mul_broadcast_quant8) \
146 FN(mul) \
147 FN(mul_quant8) \
148 FN(mul_relu) \
149 FN(relu1_float_1) \
150 FN(relu1_float_2) \
151 FN(relu1_quant8_1) \
152 FN(relu1_quant8_2) \
153 FN(relu6_float_1) \
154 FN(relu6_float_2) \
155 FN(relu6_quant8_1) \
156 FN(relu6_quant8_2) \
157 FN(relu_float_1) \
158 FN(relu_float_2) \
159 FN(relu_quant8_1) \
160 FN(relu_quant8_2) \
161 FN(reshape) \
162 FN(reshape_quant8) \
163 FN(reshape_quant8_weights_as_inputs) \
164 FN(reshape_weights_as_inputs) \
165 FN(resize_bilinear_2) \
166 FN(resize_bilinear) \
167 FN(rnn) \
168 FN(rnn_state) \
169 FN(softmax_float_1) \
170 FN(softmax_float_2) \
171 FN(softmax_quant8_1) \
172 FN(softmax_quant8_2) \
173 FN(space_to_depth_float_1) \
174 FN(space_to_depth_float_2) \
175 FN(space_to_depth_float_3) \
176 FN(space_to_depth_quant8_1) \
177 FN(space_to_depth_quant8_2) \
178 FN(svdf2) \
179 FN(svdf) \
180 FN(svdf_state) \
181 FN(tanh)
29 182
30// create V1_0 model 183#define FORWARD_DECLARE_GENERATED_OBJECTS(function) \
31V1_0::Model createValidTestModel_1_0(); 184 namespace function { \
32V1_0::Model createInvalidTestModel1_1_0(); 185 extern std::vector<MixedTypedExample> examples; \
33V1_0::Model createInvalidTestModel2_1_0(); 186 Model createTestModel(); \
187 }
34 188
35// create the request 189FOR_EACH_TEST_MODEL(FORWARD_DECLARE_GENERATED_OBJECTS)
36V1_0::Request createValidTestRequest();
37V1_0::Request createInvalidTestRequest1();
38V1_0::Request createInvalidTestRequest2();
39 190
191#undef FORWARD_DECLARE_GENERATED_OBJECTS
192
193} // namespace functional
194} // namespace vts
195} // namespace V1_0
40} // namespace neuralnetworks 196} // namespace neuralnetworks
41} // namespace hardware 197} // namespace hardware
42} // namespace android 198} // namespace android
199
200#endif // VTS_HAL_NEURALNETWORKS_V1_0_VTS_FUNCTIONAL_MODELS_H
diff --git a/neuralnetworks/1.0/vts/functional/ValidateModel.cpp b/neuralnetworks/1.0/vts/functional/ValidateModel.cpp
new file mode 100644
index 00000000..4f0697e9
--- /dev/null
+++ b/neuralnetworks/1.0/vts/functional/ValidateModel.cpp
@@ -0,0 +1,506 @@
1/*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "neuralnetworks_hidl_hal_test"
18
19#include "VtsHalNeuralnetworks.h"
20
21#include "Callbacks.h"
22
23namespace android {
24namespace hardware {
25namespace neuralnetworks {
26namespace V1_0 {
27namespace vts {
28namespace functional {
29
30using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
31using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
32
33///////////////////////// UTILITY FUNCTIONS /////////////////////////
34
35static void validateGetSupportedOperations(const sp<IDevice>& device, const std::string& message,
36 const V1_0::Model& model) {
37 SCOPED_TRACE(message + " [getSupportedOperations]");
38
39 Return<void> ret =
40 device->getSupportedOperations(model, [&](ErrorStatus status, const hidl_vec<bool>&) {
41 EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
42 });
43 EXPECT_TRUE(ret.isOk());
44}
45
46static void validatePrepareModel(const sp<IDevice>& device, const std::string& message,
47 const V1_0::Model& model) {
48 SCOPED_TRACE(message + " [prepareModel]");
49
50 sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
51 ASSERT_NE(nullptr, preparedModelCallback.get());
52 Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback);
53 ASSERT_TRUE(prepareLaunchStatus.isOk());
54 ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(prepareLaunchStatus));
55
56 preparedModelCallback->wait();
57 ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
58 ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, prepareReturnStatus);
59 sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
60 ASSERT_EQ(nullptr, preparedModel.get());
61}
62
63// Primary validation function. This function will take a valid model, apply a
64// mutation to it to invalidate the model, then pass it to interface calls that
65// use the model. Note that the model here is passed by value, and any mutation
66// to the model does not leave this function.
67static void validate(const sp<IDevice>& device, const std::string& message, V1_0::Model model,
68 const std::function<void(Model*)>& mutation) {
69 mutation(&model);
70 validateGetSupportedOperations(device, message, model);
71 validatePrepareModel(device, message, model);
72}
73
74// Delete element from hidl_vec. hidl_vec doesn't support a "remove" operation,
75// so this is efficiently accomplished by moving the element to the end and
76// resizing the hidl_vec to one less.
77template <typename Type>
78static void hidl_vec_removeAt(hidl_vec<Type>* vec, uint32_t index) {
79 if (vec) {
80 std::rotate(vec->begin() + index, vec->begin() + index + 1, vec->end());
81 vec->resize(vec->size() - 1);
82 }
83}
84
85template <typename Type>
86static uint32_t hidl_vec_push_back(hidl_vec<Type>* vec, const Type& value) {
87 // assume vec is valid
88 const uint32_t index = vec->size();
89 vec->resize(index + 1);
90 (*vec)[index] = value;
91 return index;
92}
93
94static uint32_t addOperand(Model* model) {
95 return hidl_vec_push_back(&model->operands,
96 {
97 .type = OperandType::INT32,
98 .dimensions = {},
99 .numberOfConsumers = 0,
100 .scale = 0.0f,
101 .zeroPoint = 0,
102 .lifetime = OperandLifeTime::MODEL_INPUT,
103 .location = {.poolIndex = 0, .offset = 0, .length = 0},
104 });
105}
106
107static uint32_t addOperand(Model* model, OperandLifeTime lifetime) {
108 uint32_t index = addOperand(model);
109 model->operands[index].numberOfConsumers = 1;
110 model->operands[index].lifetime = lifetime;
111 return index;
112}
113
114///////////////////////// VALIDATE MODEL OPERAND TYPE /////////////////////////
115
116static const int32_t invalidOperandTypes[] = {
117 static_cast<int32_t>(OperandType::FLOAT32) - 1, // lower bound fundamental
118 static_cast<int32_t>(OperandType::TENSOR_QUANT8_ASYMM) + 1, // upper bound fundamental
119 static_cast<int32_t>(OperandType::OEM) - 1, // lower bound OEM
120 static_cast<int32_t>(OperandType::TENSOR_OEM_BYTE) + 1, // upper bound OEM
121};
122
123static void mutateOperandTypeTest(const sp<IDevice>& device, const V1_0::Model& model) {
124 for (size_t operand = 0; operand < model.operands.size(); ++operand) {
125 for (int32_t invalidOperandType : invalidOperandTypes) {
126 const std::string message = "mutateOperandTypeTest: operand " +
127 std::to_string(operand) + " set to value " +
128 std::to_string(invalidOperandType);
129 validate(device, message, model, [operand, invalidOperandType](Model* model) {
130 model->operands[operand].type = static_cast<OperandType>(invalidOperandType);
131 });
132 }
133 }
134}
135
136///////////////////////// VALIDATE OPERAND RANK /////////////////////////
137
138static uint32_t getInvalidRank(OperandType type) {
139 switch (type) {
140 case OperandType::FLOAT32:
141 case OperandType::INT32:
142 case OperandType::UINT32:
143 return 1;
144 case OperandType::TENSOR_FLOAT32:
145 case OperandType::TENSOR_INT32:
146 case OperandType::TENSOR_QUANT8_ASYMM:
147 return 0;
148 default:
149 return 0;
150 }
151}
152
153static void mutateOperandRankTest(const sp<IDevice>& device, const V1_0::Model& model) {
154 for (size_t operand = 0; operand < model.operands.size(); ++operand) {
155 const uint32_t invalidRank = getInvalidRank(model.operands[operand].type);
156 const std::string message = "mutateOperandRankTest: operand " + std::to_string(operand) +
157 " has rank of " + std::to_string(invalidRank);
158 validate(device, message, model, [operand, invalidRank](Model* model) {
159 model->operands[operand].dimensions = std::vector<uint32_t>(invalidRank, 0);
160 });
161 }
162}
163
164///////////////////////// VALIDATE OPERAND SCALE /////////////////////////
165
166static float getInvalidScale(OperandType type) {
167 switch (type) {
168 case OperandType::FLOAT32:
169 case OperandType::INT32:
170 case OperandType::UINT32:
171 case OperandType::TENSOR_FLOAT32:
172 return 1.0f;
173 case OperandType::TENSOR_INT32:
174 return -1.0f;
175 case OperandType::TENSOR_QUANT8_ASYMM:
176 return 0.0f;
177 default:
178 return 0.0f;
179 }
180}
181
182static void mutateOperandScaleTest(const sp<IDevice>& device, const V1_0::Model& model) {
183 for (size_t operand = 0; operand < model.operands.size(); ++operand) {
184 const float invalidScale = getInvalidScale(model.operands[operand].type);
185 const std::string message = "mutateOperandScaleTest: operand " + std::to_string(operand) +
186 " has scale of " + std::to_string(invalidScale);
187 validate(device, message, model, [operand, invalidScale](Model* model) {
188 model->operands[operand].scale = invalidScale;
189 });
190 }
191}
192
193///////////////////////// VALIDATE OPERAND ZERO POINT /////////////////////////
194
195static std::vector<int32_t> getInvalidZeroPoints(OperandType type) {
196 switch (type) {
197 case OperandType::FLOAT32:
198 case OperandType::INT32:
199 case OperandType::UINT32:
200 case OperandType::TENSOR_FLOAT32:
201 case OperandType::TENSOR_INT32:
202 return {1};
203 case OperandType::TENSOR_QUANT8_ASYMM:
204 return {-1, 256};
205 default:
206 return {};
207 }
208}
209
210static void mutateOperandZeroPointTest(const sp<IDevice>& device, const V1_0::Model& model) {
211 for (size_t operand = 0; operand < model.operands.size(); ++operand) {
212 const std::vector<int32_t> invalidZeroPoints =
213 getInvalidZeroPoints(model.operands[operand].type);
214 for (int32_t invalidZeroPoint : invalidZeroPoints) {
215 const std::string message = "mutateOperandZeroPointTest: operand " +
216 std::to_string(operand) + " has zero point of " +
217 std::to_string(invalidZeroPoint);
218 validate(device, message, model, [operand, invalidZeroPoint](Model* model) {
219 model->operands[operand].zeroPoint = invalidZeroPoint;
220 });
221 }
222 }
223}
224
225///////////////////////// VALIDATE EXTRA ??? /////////////////////////
226
227// TODO: Operand::lifetime
228// TODO: Operand::location
229
230///////////////////////// VALIDATE OPERATION OPERAND TYPE /////////////////////////
231
232static void mutateOperand(Operand* operand, OperandType type) {
233 Operand newOperand = *operand;
234 newOperand.type = type;
235 switch (type) {
236 case OperandType::FLOAT32:
237 case OperandType::INT32:
238 case OperandType::UINT32:
239 newOperand.dimensions = hidl_vec<uint32_t>();
240 newOperand.scale = 0.0f;
241 newOperand.zeroPoint = 0;
242 break;
243 case OperandType::TENSOR_FLOAT32:
244 newOperand.dimensions =
245 operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
246 newOperand.scale = 0.0f;
247 newOperand.zeroPoint = 0;
248 break;
249 case OperandType::TENSOR_INT32:
250 newOperand.dimensions =
251 operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
252 newOperand.zeroPoint = 0;
253 break;
254 case OperandType::TENSOR_QUANT8_ASYMM:
255 newOperand.dimensions =
256 operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
257 newOperand.scale = operand->scale != 0.0f ? operand->scale : 1.0f;
258 break;
259 case OperandType::OEM:
260 case OperandType::TENSOR_OEM_BYTE:
261 default:
262 break;
263 }
264 *operand = newOperand;
265}
266
267static bool mutateOperationOperandTypeSkip(size_t operand, const V1_0::Model& model) {
268 // LSH_PROJECTION's second argument is allowed to have any type. This is the
269 // only operation that currently has a type that can be anything independent
270 // from any other type. Changing the operand type to any other type will
271 // result in a valid model for LSH_PROJECTION. If this is the case, skip the
272 // test.
273 for (const Operation& operation : model.operations) {
274 if (operation.type == OperationType::LSH_PROJECTION && operand == operation.inputs[1]) {
275 return true;
276 }
277 }
278 return false;
279}
280
281static void mutateOperationOperandTypeTest(const sp<IDevice>& device, const V1_0::Model& model) {
282 for (size_t operand = 0; operand < model.operands.size(); ++operand) {
283 if (mutateOperationOperandTypeSkip(operand, model)) {
284 continue;
285 }
286 for (OperandType invalidOperandType : hidl_enum_iterator<OperandType>{}) {
287 // Do not test OEM types
288 if (invalidOperandType == model.operands[operand].type ||
289 invalidOperandType == OperandType::OEM ||
290 invalidOperandType == OperandType::TENSOR_OEM_BYTE) {
291 continue;
292 }
293 const std::string message = "mutateOperationOperandTypeTest: operand " +
294 std::to_string(operand) + " set to type " +
295 toString(invalidOperandType);
296 validate(device, message, model, [operand, invalidOperandType](Model* model) {
297 mutateOperand(&model->operands[operand], invalidOperandType);
298 });
299 }
300 }
301}
302
303///////////////////////// VALIDATE MODEL OPERATION TYPE /////////////////////////
304
305static const int32_t invalidOperationTypes[] = {
306 static_cast<int32_t>(OperationType::ADD) - 1, // lower bound fundamental
307 static_cast<int32_t>(OperationType::TANH) + 1, // upper bound fundamental
308 static_cast<int32_t>(OperationType::OEM_OPERATION) - 1, // lower bound OEM
309 static_cast<int32_t>(OperationType::OEM_OPERATION) + 1, // upper bound OEM
310};
311
312static void mutateOperationTypeTest(const sp<IDevice>& device, const V1_0::Model& model) {
313 for (size_t operation = 0; operation < model.operations.size(); ++operation) {
314 for (int32_t invalidOperationType : invalidOperationTypes) {
315 const std::string message = "mutateOperationTypeTest: operation " +
316 std::to_string(operation) + " set to value " +
317 std::to_string(invalidOperationType);
318 validate(device, message, model, [operation, invalidOperationType](Model* model) {
319 model->operations[operation].type =
320 static_cast<OperationType>(invalidOperationType);
321 });
322 }
323 }
324}
325
326///////////////////////// VALIDATE MODEL OPERATION INPUT OPERAND INDEX /////////////////////////
327
328static void mutateOperationInputOperandIndexTest(const sp<IDevice>& device,
329 const V1_0::Model& model) {
330 for (size_t operation = 0; operation < model.operations.size(); ++operation) {
331 const uint32_t invalidOperand = model.operands.size();
332 for (size_t input = 0; input < model.operations[operation].inputs.size(); ++input) {
333 const std::string message = "mutateOperationInputOperandIndexTest: operation " +
334 std::to_string(operation) + " input " +
335 std::to_string(input);
336 validate(device, message, model, [operation, input, invalidOperand](Model* model) {
337 model->operations[operation].inputs[input] = invalidOperand;
338 });
339 }
340 }
341}
342
343///////////////////////// VALIDATE MODEL OPERATION OUTPUT OPERAND INDEX /////////////////////////
344
345static void mutateOperationOutputOperandIndexTest(const sp<IDevice>& device,
346 const V1_0::Model& model) {
347 for (size_t operation = 0; operation < model.operations.size(); ++operation) {
348 const uint32_t invalidOperand = model.operands.size();
349 for (size_t output = 0; output < model.operations[operation].outputs.size(); ++output) {
350 const std::string message = "mutateOperationOutputOperandIndexTest: operation " +
351 std::to_string(operation) + " output " +
352 std::to_string(output);
353 validate(device, message, model, [operation, output, invalidOperand](Model* model) {
354 model->operations[operation].outputs[output] = invalidOperand;
355 });
356 }
357 }
358}
359
360///////////////////////// REMOVE OPERAND FROM EVERYTHING /////////////////////////
361
362static void removeValueAndDecrementGreaterValues(hidl_vec<uint32_t>* vec, uint32_t value) {
363 if (vec) {
364 // remove elements matching "value"
365 auto last = std::remove(vec->begin(), vec->end(), value);
366 vec->resize(std::distance(vec->begin(), last));
367
368 // decrement elements exceeding "value"
369 std::transform(vec->begin(), vec->end(), vec->begin(),
370 [value](uint32_t v) { return v > value ? v-- : v; });
371 }
372}
373
374static void removeOperand(Model* model, uint32_t index) {
375 hidl_vec_removeAt(&model->operands, index);
376 for (Operation& operation : model->operations) {
377 removeValueAndDecrementGreaterValues(&operation.inputs, index);
378 removeValueAndDecrementGreaterValues(&operation.outputs, index);
379 }
380 removeValueAndDecrementGreaterValues(&model->inputIndexes, index);
381 removeValueAndDecrementGreaterValues(&model->outputIndexes, index);
382}
383
384static void removeOperandTest(const sp<IDevice>& device, const V1_0::Model& model) {
385 for (size_t operand = 0; operand < model.operands.size(); ++operand) {
386 const std::string message = "removeOperandTest: operand " + std::to_string(operand);
387 validate(device, message, model,
388 [operand](Model* model) { removeOperand(model, operand); });
389 }
390}
391
392///////////////////////// REMOVE OPERATION /////////////////////////
393
394static void removeOperation(Model* model, uint32_t index) {
395 for (uint32_t operand : model->operations[index].inputs) {
396 model->operands[operand].numberOfConsumers--;
397 }
398 hidl_vec_removeAt(&model->operations, index);
399}
400
401static void removeOperationTest(const sp<IDevice>& device, const V1_0::Model& model) {
402 for (size_t operation = 0; operation < model.operations.size(); ++operation) {
403 const std::string message = "removeOperationTest: operation " + std::to_string(operation);
404 validate(device, message, model,
405 [operation](Model* model) { removeOperation(model, operation); });
406 }
407}
408
409///////////////////////// REMOVE OPERATION INPUT /////////////////////////
410
411static void removeOperationInputTest(const sp<IDevice>& device, const V1_0::Model& model) {
412 for (size_t operation = 0; operation < model.operations.size(); ++operation) {
413 for (size_t input = 0; input < model.operations[operation].inputs.size(); ++input) {
414 const V1_0::Operation& op = model.operations[operation];
415 // CONCATENATION has at least 2 inputs, with the last element being
416 // INT32. Skip this test if removing one of CONCATENATION's
417 // inputs still produces a valid model.
418 if (op.type == V1_0::OperationType::CONCATENATION && op.inputs.size() > 2 &&
419 input != op.inputs.size() - 1) {
420 continue;
421 }
422 const std::string message = "removeOperationInputTest: operation " +
423 std::to_string(operation) + ", input " +
424 std::to_string(input);
425 validate(device, message, model, [operation, input](Model* model) {
426 uint32_t operand = model->operations[operation].inputs[input];
427 model->operands[operand].numberOfConsumers--;
428 hidl_vec_removeAt(&model->operations[operation].inputs, input);
429 });
430 }
431 }
432}
433
434///////////////////////// REMOVE OPERATION OUTPUT /////////////////////////
435
436static void removeOperationOutputTest(const sp<IDevice>& device, const V1_0::Model& model) {
437 for (size_t operation = 0; operation < model.operations.size(); ++operation) {
438 for (size_t output = 0; output < model.operations[operation].outputs.size(); ++output) {
439 const std::string message = "removeOperationOutputTest: operation " +
440 std::to_string(operation) + ", output " +
441 std::to_string(output);
442 validate(device, message, model, [operation, output](Model* model) {
443 hidl_vec_removeAt(&model->operations[operation].outputs, output);
444 });
445 }
446 }
447}
448
449///////////////////////// MODEL VALIDATION /////////////////////////
450
451// TODO: remove model input
452// TODO: remove model output
453// TODO: add unused operation
454
455///////////////////////// ADD OPERATION INPUT /////////////////////////
456
457static void addOperationInputTest(const sp<IDevice>& device, const V1_0::Model& model) {
458 for (size_t operation = 0; operation < model.operations.size(); ++operation) {
459 const std::string message = "addOperationInputTest: operation " + std::to_string(operation);
460 validate(device, message, model, [operation](Model* model) {
461 uint32_t index = addOperand(model, OperandLifeTime::MODEL_INPUT);
462 hidl_vec_push_back(&model->operations[operation].inputs, index);
463 hidl_vec_push_back(&model->inputIndexes, index);
464 });
465 }
466}
467
468///////////////////////// ADD OPERATION OUTPUT /////////////////////////
469
470static void addOperationOutputTest(const sp<IDevice>& device, const V1_0::Model& model) {
471 for (size_t operation = 0; operation < model.operations.size(); ++operation) {
472 const std::string message =
473 "addOperationOutputTest: operation " + std::to_string(operation);
474 validate(device, message, model, [operation](Model* model) {
475 uint32_t index = addOperand(model, OperandLifeTime::MODEL_OUTPUT);
476 hidl_vec_push_back(&model->operations[operation].outputs, index);
477 hidl_vec_push_back(&model->outputIndexes, index);
478 });
479 }
480}
481
482////////////////////////// ENTRY POINT //////////////////////////////
483
484void ValidationTest::validateModel(const V1_0::Model& model) {
485 mutateOperandTypeTest(device, model);
486 mutateOperandRankTest(device, model);
487 mutateOperandScaleTest(device, model);
488 mutateOperandZeroPointTest(device, model);
489 mutateOperationOperandTypeTest(device, model);
490 mutateOperationTypeTest(device, model);
491 mutateOperationInputOperandIndexTest(device, model);
492 mutateOperationOutputOperandIndexTest(device, model);
493 removeOperandTest(device, model);
494 removeOperationTest(device, model);
495 removeOperationInputTest(device, model);
496 removeOperationOutputTest(device, model);
497 addOperationInputTest(device, model);
498 addOperationOutputTest(device, model);
499}
500
501} // namespace functional
502} // namespace vts
503} // namespace V1_0
504} // namespace neuralnetworks
505} // namespace hardware
506} // namespace android
diff --git a/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp
new file mode 100644
index 00000000..08f2613c
--- /dev/null
+++ b/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp
@@ -0,0 +1,261 @@
1/*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "neuralnetworks_hidl_hal_test"
18
19#include "VtsHalNeuralnetworks.h"
20
21#include "Callbacks.h"
22#include "TestHarness.h"
23#include "Utils.h"
24
25#include <android-base/logging.h>
26#include <android/hidl/memory/1.0/IMemory.h>
27#include <hidlmemory/mapping.h>
28
29namespace android {
30namespace hardware {
31namespace neuralnetworks {
32namespace V1_0 {
33namespace vts {
34namespace functional {
35
36using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
37using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
38using ::android::hidl::memory::V1_0::IMemory;
39using generated_tests::MixedTyped;
40using generated_tests::MixedTypedExampleType;
41using generated_tests::for_all;
42
43///////////////////////// UTILITY FUNCTIONS /////////////////////////
44
45static void createPreparedModel(const sp<IDevice>& device, const V1_0::Model& model,
46 sp<IPreparedModel>* preparedModel) {
47 ASSERT_NE(nullptr, preparedModel);
48
49 // see if service can handle model
50 bool fullySupportsModel = false;
51 Return<void> supportedOpsLaunchStatus = device->getSupportedOperations(
52 model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
53 ASSERT_EQ(ErrorStatus::NONE, status);
54 ASSERT_NE(0ul, supported.size());
55 fullySupportsModel =
56 std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; });
57 });
58 ASSERT_TRUE(supportedOpsLaunchStatus.isOk());
59
60 // launch prepare model
61 sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
62 ASSERT_NE(nullptr, preparedModelCallback.get());
63 Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback);
64 ASSERT_TRUE(prepareLaunchStatus.isOk());
65 ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
66
67 // retrieve prepared model
68 preparedModelCallback->wait();
69 ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
70 *preparedModel = preparedModelCallback->getPreparedModel();
71
72 // The getSupportedOperations call returns a list of operations that are
73 // guaranteed not to fail if prepareModel is called, and
74 // 'fullySupportsModel' is true i.f.f. the entire model is guaranteed.
75 // If a driver has any doubt that it can prepare an operation, it must
76 // return false. So here, if a driver isn't sure if it can support an
77 // operation, but reports that it successfully prepared the model, the test
78 // can continue.
79 if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
80 ASSERT_EQ(nullptr, preparedModel->get());
81 LOG(INFO) << "NN VTS: Unable to test Request validation because vendor service cannot "
82 "prepare model that it does not support.";
83 std::cout << "[ ] Unable to test Request validation because vendor service "
84 "cannot prepare model that it does not support."
85 << std::endl;
86 return;
87 }
88 ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus);
89 ASSERT_NE(nullptr, preparedModel->get());
90}
91
92// Primary validation function. This function will take a valid request, apply a
93// mutation to it to invalidate the request, then pass it to interface calls
94// that use the request. Note that the request here is passed by value, and any
95// mutation to the request does not leave this function.
96static void validate(const sp<IPreparedModel>& preparedModel, const std::string& message,
97 Request request, const std::function<void(Request*)>& mutation) {
98 mutation(&request);
99 SCOPED_TRACE(message + " [execute]");
100
101 sp<ExecutionCallback> executionCallback = new ExecutionCallback();
102 ASSERT_NE(nullptr, executionCallback.get());
103 Return<ErrorStatus> executeLaunchStatus = preparedModel->execute(request, executionCallback);
104 ASSERT_TRUE(executeLaunchStatus.isOk());
105 ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeLaunchStatus));
106
107 executionCallback->wait();
108 ErrorStatus executionReturnStatus = executionCallback->getStatus();
109 ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus);
110}
111
112// Delete element from hidl_vec. hidl_vec doesn't support a "remove" operation,
113// so this is efficiently accomplished by moving the element to the end and
114// resizing the hidl_vec to one less.
115template <typename Type>
116static void hidl_vec_removeAt(hidl_vec<Type>* vec, uint32_t index) {
117 if (vec) {
118 std::rotate(vec->begin() + index, vec->begin() + index + 1, vec->end());
119 vec->resize(vec->size() - 1);
120 }
121}
122
123template <typename Type>
124static uint32_t hidl_vec_push_back(hidl_vec<Type>* vec, const Type& value) {
125 // assume vec is valid
126 const uint32_t index = vec->size();
127 vec->resize(index + 1);
128 (*vec)[index] = value;
129 return index;
130}
131
132///////////////////////// REMOVE INPUT ////////////////////////////////////
133
134static void removeInputTest(const sp<IPreparedModel>& preparedModel, const Request& request) {
135 for (size_t input = 0; input < request.inputs.size(); ++input) {
136 const std::string message = "removeInput: removed input " + std::to_string(input);
137 validate(preparedModel, message, request,
138 [input](Request* request) { hidl_vec_removeAt(&request->inputs, input); });
139 }
140}
141
142///////////////////////// REMOVE OUTPUT ////////////////////////////////////
143
144static void removeOutputTest(const sp<IPreparedModel>& preparedModel, const Request& request) {
145 for (size_t output = 0; output < request.outputs.size(); ++output) {
146 const std::string message = "removeOutput: removed Output " + std::to_string(output);
147 validate(preparedModel, message, request,
148 [output](Request* request) { hidl_vec_removeAt(&request->outputs, output); });
149 }
150}
151
152///////////////////////////// ENTRY POINT //////////////////////////////////
153
154std::vector<Request> createRequests(const std::vector<MixedTypedExampleType>& examples) {
155 const uint32_t INPUT = 0;
156 const uint32_t OUTPUT = 1;
157
158 std::vector<Request> requests;
159
160 for (auto& example : examples) {
161 const MixedTyped& inputs = example.first;
162 const MixedTyped& outputs = example.second;
163
164 std::vector<RequestArgument> inputs_info, outputs_info;
165 uint32_t inputSize = 0, outputSize = 0;
166
167 // This function only partially specifies the metadata (vector of RequestArguments).
168 // The contents are copied over below.
169 for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) {
170 if (inputs_info.size() <= static_cast<size_t>(index)) inputs_info.resize(index + 1);
171 RequestArgument arg = {
172 .location = {.poolIndex = INPUT, .offset = 0, .length = static_cast<uint32_t>(s)},
173 .dimensions = {},
174 };
175 RequestArgument arg_empty = {
176 .hasNoValue = true,
177 };
178 inputs_info[index] = s ? arg : arg_empty;
179 inputSize += s;
180 });
181 // Compute offset for inputs 1 and so on
182 {
183 size_t offset = 0;
184 for (auto& i : inputs_info) {
185 if (!i.hasNoValue) i.location.offset = offset;
186 offset += i.location.length;
187 }
188 }
189
190 // Go through all outputs, initialize RequestArgument descriptors
191 for_all(outputs, [&outputs_info, &outputSize](int index, auto, auto s) {
192 if (outputs_info.size() <= static_cast<size_t>(index)) outputs_info.resize(index + 1);
193 RequestArgument arg = {
194 .location = {.poolIndex = OUTPUT, .offset = 0, .length = static_cast<uint32_t>(s)},
195 .dimensions = {},
196 };
197 outputs_info[index] = arg;
198 outputSize += s;
199 });
200 // Compute offset for outputs 1 and so on
201 {
202 size_t offset = 0;
203 for (auto& i : outputs_info) {
204 i.location.offset = offset;
205 offset += i.location.length;
206 }
207 }
208 std::vector<hidl_memory> pools = {nn::allocateSharedMemory(inputSize),
209 nn::allocateSharedMemory(outputSize)};
210 if (pools[INPUT].size() == 0 || pools[OUTPUT].size() == 0) {
211 return {};
212 }
213
214 // map pool
215 sp<IMemory> inputMemory = mapMemory(pools[INPUT]);
216 if (inputMemory == nullptr) {
217 return {};
218 }
219 char* inputPtr = reinterpret_cast<char*>(static_cast<void*>(inputMemory->getPointer()));
220 if (inputPtr == nullptr) {
221 return {};
222 }
223
224 // initialize pool
225 inputMemory->update();
226 for_all(inputs, [&inputs_info, inputPtr](int index, auto p, auto s) {
227 char* begin = (char*)p;
228 char* end = begin + s;
229 // TODO: handle more than one input
230 std::copy(begin, end, inputPtr + inputs_info[index].location.offset);
231 });
232 inputMemory->commit();
233
234 requests.push_back({.inputs = inputs_info, .outputs = outputs_info, .pools = pools});
235 }
236
237 return requests;
238}
239
240void ValidationTest::validateRequests(const V1_0::Model& model,
241 const std::vector<Request>& requests) {
242 // create IPreparedModel
243 sp<IPreparedModel> preparedModel;
244 ASSERT_NO_FATAL_FAILURE(createPreparedModel(device, model, &preparedModel));
245 if (preparedModel == nullptr) {
246 return;
247 }
248
249 // validate each request
250 for (const Request& request : requests) {
251 removeInputTest(preparedModel, request);
252 removeOutputTest(preparedModel, request);
253 }
254}
255
256} // namespace functional
257} // namespace vts
258} // namespace V1_0
259} // namespace neuralnetworks
260} // namespace hardware
261} // namespace android
diff --git a/neuralnetworks/1.0/vts/functional/ValidationTests.cpp b/neuralnetworks/1.0/vts/functional/ValidationTests.cpp
new file mode 100644
index 00000000..98fc1c59
--- /dev/null
+++ b/neuralnetworks/1.0/vts/functional/ValidationTests.cpp
@@ -0,0 +1,50 @@
1/*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "neuralnetworks_hidl_hal_test"
18
19#include "Models.h"
20#include "VtsHalNeuralnetworks.h"
21
22namespace android {
23namespace hardware {
24namespace neuralnetworks {
25namespace V1_0 {
26namespace vts {
27namespace functional {
28
29// forward declarations
30std::vector<Request> createRequests(const std::vector<MixedTypedExample>& examples);
31
32// generate validation tests
33#define VTS_CURRENT_TEST_CASE(TestName) \
34 TEST_F(ValidationTest, TestName) { \
35 const Model model = TestName::createTestModel(); \
36 const std::vector<Request> requests = createRequests(TestName::examples); \
37 validateModel(model); \
38 validateRequests(model, requests); \
39 }
40
41FOR_EACH_TEST_MODEL(VTS_CURRENT_TEST_CASE)
42
43#undef VTS_CURRENT_TEST_CASE
44
45} // namespace functional
46} // namespace vts
47} // namespace V1_0
48} // namespace neuralnetworks
49} // namespace hardware
50} // namespace android
diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0.cpp b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp
index b14fb2c4..1ff3b668 100644
--- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0.cpp
+++ b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp
@@ -16,15 +16,7 @@
16 16
17#define LOG_TAG "neuralnetworks_hidl_hal_test" 17#define LOG_TAG "neuralnetworks_hidl_hal_test"
18 18
19#include "VtsHalNeuralnetworksV1_0.h" 19#include "VtsHalNeuralnetworks.h"
20#include "Utils.h"
21
22#include <android-base/logging.h>
23
24using ::android::hardware::hidl_memory;
25using ::android::hidl::allocator::V1_0::IAllocator;
26using ::android::hidl::memory::V1_0::IMemory;
27using ::android::sp;
28 20
29namespace android { 21namespace android {
30namespace hardware { 22namespace hardware {
@@ -33,11 +25,6 @@ namespace V1_0 {
33namespace vts { 25namespace vts {
34namespace functional { 26namespace functional {
35 27
36// allocator helper
37hidl_memory allocateSharedMemory(int64_t size) {
38 return nn::allocateSharedMemory(size);
39}
40
41// A class for test environment setup 28// A class for test environment setup
42NeuralnetworksHidlEnvironment::NeuralnetworksHidlEnvironment() {} 29NeuralnetworksHidlEnvironment::NeuralnetworksHidlEnvironment() {}
43 30
@@ -51,23 +38,49 @@ NeuralnetworksHidlEnvironment* NeuralnetworksHidlEnvironment::getInstance() {
51} 38}
52 39
53void NeuralnetworksHidlEnvironment::registerTestServices() { 40void NeuralnetworksHidlEnvironment::registerTestServices() {
54 registerTestService<V1_0::IDevice>(); 41 registerTestService<IDevice>();
55} 42}
56 43
57// The main test class for NEURALNETWORK HIDL HAL. 44// The main test class for NEURALNETWORK HIDL HAL.
45NeuralnetworksHidlTest::NeuralnetworksHidlTest() {}
46
58NeuralnetworksHidlTest::~NeuralnetworksHidlTest() {} 47NeuralnetworksHidlTest::~NeuralnetworksHidlTest() {}
59 48
60void NeuralnetworksHidlTest::SetUp() { 49void NeuralnetworksHidlTest::SetUp() {
61 device = ::testing::VtsHalHidlTargetTestBase::getService<V1_0::IDevice>( 50 ::testing::VtsHalHidlTargetTestBase::SetUp();
51 device = ::testing::VtsHalHidlTargetTestBase::getService<IDevice>(
62 NeuralnetworksHidlEnvironment::getInstance()); 52 NeuralnetworksHidlEnvironment::getInstance());
63 ASSERT_NE(nullptr, device.get()); 53 ASSERT_NE(nullptr, device.get());
64} 54}
65 55
66void NeuralnetworksHidlTest::TearDown() {} 56void NeuralnetworksHidlTest::TearDown() {
57 device = nullptr;
58 ::testing::VtsHalHidlTargetTestBase::TearDown();
59}
67 60
68} // namespace functional 61} // namespace functional
69} // namespace vts 62} // namespace vts
63
64::std::ostream& operator<<(::std::ostream& os, ErrorStatus errorStatus) {
65 return os << toString(errorStatus);
66}
67
68::std::ostream& operator<<(::std::ostream& os, DeviceStatus deviceStatus) {
69 return os << toString(deviceStatus);
70}
71
70} // namespace V1_0 72} // namespace V1_0
71} // namespace neuralnetworks 73} // namespace neuralnetworks
72} // namespace hardware 74} // namespace hardware
73} // namespace android 75} // namespace android
76
77using android::hardware::neuralnetworks::V1_0::vts::functional::NeuralnetworksHidlEnvironment;
78
79int main(int argc, char** argv) {
80 ::testing::AddGlobalTestEnvironment(NeuralnetworksHidlEnvironment::getInstance());
81 ::testing::InitGoogleTest(&argc, argv);
82 NeuralnetworksHidlEnvironment::getInstance()->init(&argc, argv);
83
84 int status = RUN_ALL_TESTS();
85 return status;
86}
diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0.h b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.h
index fbb16074..e79129b0 100644
--- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0.h
+++ b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.h
@@ -18,16 +18,15 @@
18#define VTS_HAL_NEURALNETWORKS_V1_0_TARGET_TESTS_H 18#define VTS_HAL_NEURALNETWORKS_V1_0_TARGET_TESTS_H
19 19
20#include <android/hardware/neuralnetworks/1.0/IDevice.h> 20#include <android/hardware/neuralnetworks/1.0/IDevice.h>
21#include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h>
22#include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
23#include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h>
24#include <android/hardware/neuralnetworks/1.0/types.h> 21#include <android/hardware/neuralnetworks/1.0/types.h>
25#include <android/hidl/allocator/1.0/IAllocator.h>
26 22
27#include <VtsHalHidlTargetTestBase.h> 23#include <VtsHalHidlTargetTestBase.h>
28#include <VtsHalHidlTargetTestEnvBase.h> 24#include <VtsHalHidlTargetTestEnvBase.h>
25
26#include <android-base/macros.h>
29#include <gtest/gtest.h> 27#include <gtest/gtest.h>
30#include <string> 28#include <iostream>
29#include <vector>
31 30
32namespace android { 31namespace android {
33namespace hardware { 32namespace hardware {
@@ -36,47 +35,47 @@ namespace V1_0 {
36namespace vts { 35namespace vts {
37namespace functional { 36namespace functional {
38 37
39hidl_memory allocateSharedMemory(int64_t size);
40
41// A class for test environment setup 38// A class for test environment setup
42class NeuralnetworksHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase { 39class NeuralnetworksHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase {
40 DISALLOW_COPY_AND_ASSIGN(NeuralnetworksHidlEnvironment);
43 NeuralnetworksHidlEnvironment(); 41 NeuralnetworksHidlEnvironment();
44 NeuralnetworksHidlEnvironment(const NeuralnetworksHidlEnvironment&) = delete; 42 ~NeuralnetworksHidlEnvironment() override;
45 NeuralnetworksHidlEnvironment(NeuralnetworksHidlEnvironment&&) = delete;
46 NeuralnetworksHidlEnvironment& operator=(const NeuralnetworksHidlEnvironment&) = delete;
47 NeuralnetworksHidlEnvironment& operator=(NeuralnetworksHidlEnvironment&&) = delete;
48 43
49 public: 44 public:
50 ~NeuralnetworksHidlEnvironment() override;
51 static NeuralnetworksHidlEnvironment* getInstance(); 45 static NeuralnetworksHidlEnvironment* getInstance();
52 void registerTestServices() override; 46 void registerTestServices() override;
53}; 47};
54 48
55// The main test class for NEURALNETWORKS HIDL HAL. 49// The main test class for NEURALNETWORKS HIDL HAL.
56class NeuralnetworksHidlTest : public ::testing::VtsHalHidlTargetTestBase { 50class NeuralnetworksHidlTest : public ::testing::VtsHalHidlTargetTestBase {
51 DISALLOW_COPY_AND_ASSIGN(NeuralnetworksHidlTest);
52
57 public: 53 public:
54 NeuralnetworksHidlTest();
58 ~NeuralnetworksHidlTest() override; 55 ~NeuralnetworksHidlTest() override;
59 void SetUp() override; 56 void SetUp() override;
60 void TearDown() override; 57 void TearDown() override;
61 58
62 sp<V1_0::IDevice> device; 59 protected:
60 sp<IDevice> device;
63}; 61};
62
63// Tag for the validation tests
64class ValidationTest : public NeuralnetworksHidlTest {
65 protected:
66 void validateModel(const Model& model);
67 void validateRequests(const Model& model, const std::vector<Request>& request);
68};
69
70// Tag for the generated tests
71class GeneratedTest : public NeuralnetworksHidlTest {};
72
64} // namespace functional 73} // namespace functional
65} // namespace vts 74} // namespace vts
66 75
67// pretty-print values for error messages 76// pretty-print values for error messages
68 77::std::ostream& operator<<(::std::ostream& os, ErrorStatus errorStatus);
69template <typename CharT, typename Traits> 78::std::ostream& operator<<(::std::ostream& os, DeviceStatus deviceStatus);
70::std::basic_ostream<CharT, Traits>& operator<<(::std::basic_ostream<CharT, Traits>& os,
71 V1_0::ErrorStatus errorStatus) {
72 return os << toString(errorStatus);
73}
74
75template <typename CharT, typename Traits>
76::std::basic_ostream<CharT, Traits>& operator<<(::std::basic_ostream<CharT, Traits>& os,
77 V1_0::DeviceStatus deviceStatus) {
78 return os << toString(deviceStatus);
79}
80 79
81} // namespace V1_0 80} // namespace V1_0
82} // namespace neuralnetworks 81} // namespace neuralnetworks
diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0BasicTest.cpp b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0BasicTest.cpp
deleted file mode 100644
index 59e5b806..00000000
--- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0BasicTest.cpp
+++ /dev/null
@@ -1,293 +0,0 @@
1/*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "neuralnetworks_hidl_hal_test"
18
19#include "VtsHalNeuralnetworksV1_0.h"
20
21#include "Callbacks.h"
22#include "Models.h"
23#include "TestHarness.h"
24
25#include <android-base/logging.h>
26#include <android/hidl/memory/1.0/IMemory.h>
27#include <hidlmemory/mapping.h>
28
29using ::android::hardware::neuralnetworks::V1_0::IDevice;
30using ::android::hardware::neuralnetworks::V1_0::IPreparedModel;
31using ::android::hardware::neuralnetworks::V1_0::Capabilities;
32using ::android::hardware::neuralnetworks::V1_0::DeviceStatus;
33using ::android::hardware::neuralnetworks::V1_0::FusedActivationFunc;
34using ::android::hardware::neuralnetworks::V1_0::Model;
35using ::android::hardware::neuralnetworks::V1_0::OperationType;
36using ::android::hardware::neuralnetworks::V1_0::PerformanceInfo;
37using ::android::hardware::Return;
38using ::android::hardware::Void;
39using ::android::hardware::hidl_memory;
40using ::android::hardware::hidl_string;
41using ::android::hardware::hidl_vec;
42using ::android::hidl::allocator::V1_0::IAllocator;
43using ::android::hidl::memory::V1_0::IMemory;
44using ::android::sp;
45
46namespace android {
47namespace hardware {
48namespace neuralnetworks {
49namespace V1_0 {
50namespace vts {
51namespace functional {
52using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
53using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
54
55static void doPrepareModelShortcut(const sp<IDevice>& device, sp<IPreparedModel>* preparedModel) {
56 ASSERT_NE(nullptr, preparedModel);
57 Model model = createValidTestModel_1_0();
58
59 // see if service can handle model
60 bool fullySupportsModel = false;
61 Return<void> supportedOpsLaunchStatus = device->getSupportedOperations(
62 model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
63 ASSERT_EQ(ErrorStatus::NONE, status);
64 ASSERT_NE(0ul, supported.size());
65 fullySupportsModel =
66 std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; });
67 });
68 ASSERT_TRUE(supportedOpsLaunchStatus.isOk());
69
70 // launch prepare model
71 sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
72 ASSERT_NE(nullptr, preparedModelCallback.get());
73 Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback);
74 ASSERT_TRUE(prepareLaunchStatus.isOk());
75 ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
76
77 // retrieve prepared model
78 preparedModelCallback->wait();
79 ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
80 *preparedModel = preparedModelCallback->getPreparedModel();
81
82 // The getSupportedOperations call returns a list of operations that are
83 // guaranteed not to fail if prepareModel is called, and
84 // 'fullySupportsModel' is true i.f.f. the entire model is guaranteed.
85 // If a driver has any doubt that it can prepare an operation, it must
86 // return false. So here, if a driver isn't sure if it can support an
87 // operation, but reports that it successfully prepared the model, the test
88 // can continue.
89 if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
90 ASSERT_EQ(nullptr, preparedModel->get());
91 LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
92 "prepare model that it does not support.";
93 std::cout << "[ ] Early termination of test because vendor service cannot "
94 "prepare model that it does not support."
95 << std::endl;
96 return;
97 }
98 ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus);
99 ASSERT_NE(nullptr, preparedModel->get());
100}
101
102// create device test
103TEST_F(NeuralnetworksHidlTest, CreateDevice) {}
104
105// status test
106TEST_F(NeuralnetworksHidlTest, StatusTest) {
107 Return<DeviceStatus> status = device->getStatus();
108 ASSERT_TRUE(status.isOk());
109 EXPECT_EQ(DeviceStatus::AVAILABLE, static_cast<DeviceStatus>(status));
110}
111
112// initialization
113TEST_F(NeuralnetworksHidlTest, GetCapabilitiesTest) {
114 Return<void> ret =
115 device->getCapabilities([](ErrorStatus status, const Capabilities& capabilities) {
116 EXPECT_EQ(ErrorStatus::NONE, status);
117 EXPECT_LT(0.0f, capabilities.float32Performance.execTime);
118 EXPECT_LT(0.0f, capabilities.float32Performance.powerUsage);
119 EXPECT_LT(0.0f, capabilities.quantized8Performance.execTime);
120 EXPECT_LT(0.0f, capabilities.quantized8Performance.powerUsage);
121 });
122 EXPECT_TRUE(ret.isOk());
123}
124
125// supported operations positive test
126TEST_F(NeuralnetworksHidlTest, SupportedOperationsPositiveTest) {
127 Model model = createValidTestModel_1_0();
128 Return<void> ret = device->getSupportedOperations(
129 model, [&](ErrorStatus status, const hidl_vec<bool>& supported) {
130 EXPECT_EQ(ErrorStatus::NONE, status);
131 EXPECT_EQ(model.operations.size(), supported.size());
132 });
133 EXPECT_TRUE(ret.isOk());
134}
135
136// supported operations negative test 1
137TEST_F(NeuralnetworksHidlTest, SupportedOperationsNegativeTest1) {
138 Model model = createInvalidTestModel1_1_0();
139 Return<void> ret = device->getSupportedOperations(
140 model, [&](ErrorStatus status, const hidl_vec<bool>& supported) {
141 EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
142 (void)supported;
143 });
144 EXPECT_TRUE(ret.isOk());
145}
146
147// supported operations negative test 2
148TEST_F(NeuralnetworksHidlTest, SupportedOperationsNegativeTest2) {
149 Model model = createInvalidTestModel2_1_0();
150 Return<void> ret = device->getSupportedOperations(
151 model, [&](ErrorStatus status, const hidl_vec<bool>& supported) {
152 EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
153 (void)supported;
154 });
155 EXPECT_TRUE(ret.isOk());
156}
157
158// prepare simple model positive test
159TEST_F(NeuralnetworksHidlTest, SimplePrepareModelPositiveTest) {
160 sp<IPreparedModel> preparedModel;
161 doPrepareModelShortcut(device, &preparedModel);
162}
163
164// prepare simple model negative test 1
165TEST_F(NeuralnetworksHidlTest, SimplePrepareModelNegativeTest1) {
166 Model model = createInvalidTestModel1_1_0();
167 sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
168 ASSERT_NE(nullptr, preparedModelCallback.get());
169 Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback);
170 ASSERT_TRUE(prepareLaunchStatus.isOk());
171 EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(prepareLaunchStatus));
172
173 preparedModelCallback->wait();
174 ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
175 EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, prepareReturnStatus);
176 sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
177 EXPECT_EQ(nullptr, preparedModel.get());
178}
179
180// prepare simple model negative test 2
181TEST_F(NeuralnetworksHidlTest, SimplePrepareModelNegativeTest2) {
182 Model model = createInvalidTestModel2_1_0();
183 sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
184 ASSERT_NE(nullptr, preparedModelCallback.get());
185 Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback);
186 ASSERT_TRUE(prepareLaunchStatus.isOk());
187 EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(prepareLaunchStatus));
188
189 preparedModelCallback->wait();
190 ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
191 EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, prepareReturnStatus);
192 sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
193 EXPECT_EQ(nullptr, preparedModel.get());
194}
195
196// execute simple graph positive test
197TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphPositiveTest) {
198 std::vector<float> outputData = {-1.0f, -1.0f, -1.0f, -1.0f};
199 std::vector<float> expectedData = {6.0f, 8.0f, 10.0f, 12.0f};
200 const uint32_t OUTPUT = 1;
201
202 sp<IPreparedModel> preparedModel;
203 ASSERT_NO_FATAL_FAILURE(doPrepareModelShortcut(device, &preparedModel));
204 if (preparedModel == nullptr) {
205 return;
206 }
207 Request request = createValidTestRequest();
208
209 auto postWork = [&] {
210 sp<IMemory> outputMemory = mapMemory(request.pools[OUTPUT]);
211 if (outputMemory == nullptr) {
212 return false;
213 }
214 float* outputPtr = reinterpret_cast<float*>(static_cast<void*>(outputMemory->getPointer()));
215 if (outputPtr == nullptr) {
216 return false;
217 }
218 outputMemory->read();
219 std::copy(outputPtr, outputPtr + outputData.size(), outputData.begin());
220 outputMemory->commit();
221 return true;
222 };
223
224 sp<ExecutionCallback> executionCallback = new ExecutionCallback();
225 ASSERT_NE(nullptr, executionCallback.get());
226 executionCallback->on_finish(postWork);
227 Return<ErrorStatus> executeLaunchStatus = preparedModel->execute(request, executionCallback);
228 ASSERT_TRUE(executeLaunchStatus.isOk());
229 EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executeLaunchStatus));
230
231 executionCallback->wait();
232 ErrorStatus executionReturnStatus = executionCallback->getStatus();
233 EXPECT_EQ(ErrorStatus::NONE, executionReturnStatus);
234 EXPECT_EQ(expectedData, outputData);
235}
236
237// execute simple graph negative test 1
238TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest1) {
239 sp<IPreparedModel> preparedModel;
240 ASSERT_NO_FATAL_FAILURE(doPrepareModelShortcut(device, &preparedModel));
241 if (preparedModel == nullptr) {
242 return;
243 }
244 Request request = createInvalidTestRequest1();
245
246 sp<ExecutionCallback> executionCallback = new ExecutionCallback();
247 ASSERT_NE(nullptr, executionCallback.get());
248 Return<ErrorStatus> executeLaunchStatus = preparedModel->execute(request, executionCallback);
249 ASSERT_TRUE(executeLaunchStatus.isOk());
250 EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeLaunchStatus));
251
252 executionCallback->wait();
253 ErrorStatus executionReturnStatus = executionCallback->getStatus();
254 EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus);
255}
256
257// execute simple graph negative test 2
258TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest2) {
259 sp<IPreparedModel> preparedModel;
260 ASSERT_NO_FATAL_FAILURE(doPrepareModelShortcut(device, &preparedModel));
261 if (preparedModel == nullptr) {
262 return;
263 }
264 Request request = createInvalidTestRequest2();
265
266 sp<ExecutionCallback> executionCallback = new ExecutionCallback();
267 ASSERT_NE(nullptr, executionCallback.get());
268 Return<ErrorStatus> executeLaunchStatus = preparedModel->execute(request, executionCallback);
269 ASSERT_TRUE(executeLaunchStatus.isOk());
270 EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeLaunchStatus));
271
272 executionCallback->wait();
273 ErrorStatus executionReturnStatus = executionCallback->getStatus();
274 EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus);
275}
276
277} // namespace functional
278} // namespace vts
279} // namespace V1_0
280} // namespace neuralnetworks
281} // namespace hardware
282} // namespace android
283
284using android::hardware::neuralnetworks::V1_0::vts::functional::NeuralnetworksHidlEnvironment;
285
286int main(int argc, char** argv) {
287 ::testing::AddGlobalTestEnvironment(NeuralnetworksHidlEnvironment::getInstance());
288 ::testing::InitGoogleTest(&argc, argv);
289 NeuralnetworksHidlEnvironment::getInstance()->init(&argc, argv);
290
291 int status = RUN_ALL_TESTS();
292 return status;
293}
diff --git a/neuralnetworks/1.1/vts/functional/Android.bp b/neuralnetworks/1.1/vts/functional/Android.bp
index 947ca2ca..f755c20b 100644
--- a/neuralnetworks/1.1/vts/functional/Android.bp
+++ b/neuralnetworks/1.1/vts/functional/Android.bp
@@ -17,9 +17,12 @@
17cc_test { 17cc_test {
18 name: "VtsHalNeuralnetworksV1_1TargetTest", 18 name: "VtsHalNeuralnetworksV1_1TargetTest",
19 srcs: [ 19 srcs: [
20 "VtsHalNeuralnetworksV1_1.cpp", 20 "BasicTests.cpp",
21 "VtsHalNeuralnetworksV1_1BasicTest.cpp", 21 "GeneratedTests.cpp",
22 "VtsHalNeuralnetworksV1_1GeneratedTest.cpp", 22 "ValidateModel.cpp",
23 "ValidateRequest.cpp",
24 "ValidationTests.cpp",
25 "VtsHalNeuralnetworks.cpp",
23 ], 26 ],
24 defaults: ["VtsHalTargetTestDefaults"], 27 defaults: ["VtsHalTargetTestDefaults"],
25 static_libs: [ 28 static_libs: [
diff --git a/neuralnetworks/1.1/vts/functional/BasicTests.cpp b/neuralnetworks/1.1/vts/functional/BasicTests.cpp
new file mode 100644
index 00000000..ed59a2dd
--- /dev/null
+++ b/neuralnetworks/1.1/vts/functional/BasicTests.cpp
@@ -0,0 +1,58 @@
1/*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "neuralnetworks_hidl_hal_test"
18
19#include "VtsHalNeuralnetworks.h"
20
21namespace android {
22namespace hardware {
23namespace neuralnetworks {
24namespace V1_1 {
25namespace vts {
26namespace functional {
27
28// create device test
29TEST_F(NeuralnetworksHidlTest, CreateDevice) {}
30
31// status test
32TEST_F(NeuralnetworksHidlTest, StatusTest) {
33 Return<DeviceStatus> status = device->getStatus();
34 ASSERT_TRUE(status.isOk());
35 EXPECT_EQ(DeviceStatus::AVAILABLE, static_cast<DeviceStatus>(status));
36}
37
38// initialization
39TEST_F(NeuralnetworksHidlTest, GetCapabilitiesTest) {
40 Return<void> ret =
41 device->getCapabilities_1_1([](ErrorStatus status, const Capabilities& capabilities) {
42 EXPECT_EQ(ErrorStatus::NONE, status);
43 EXPECT_LT(0.0f, capabilities.float32Performance.execTime);
44 EXPECT_LT(0.0f, capabilities.float32Performance.powerUsage);
45 EXPECT_LT(0.0f, capabilities.quantized8Performance.execTime);
46 EXPECT_LT(0.0f, capabilities.quantized8Performance.powerUsage);
47 EXPECT_LT(0.0f, capabilities.relaxedFloat32toFloat16Performance.execTime);
48 EXPECT_LT(0.0f, capabilities.relaxedFloat32toFloat16Performance.powerUsage);
49 });
50 EXPECT_TRUE(ret.isOk());
51}
52
53} // namespace functional
54} // namespace vts
55} // namespace V1_1
56} // namespace neuralnetworks
57} // namespace hardware
58} // namespace android
diff --git a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1GeneratedTest.cpp b/neuralnetworks/1.1/vts/functional/GeneratedTests.cpp
index 025d9fed..1f1cc7af 100644
--- a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1GeneratedTest.cpp
+++ b/neuralnetworks/1.1/vts/functional/GeneratedTests.cpp
@@ -16,54 +16,33 @@
16 16
17#define LOG_TAG "neuralnetworks_hidl_hal_test" 17#define LOG_TAG "neuralnetworks_hidl_hal_test"
18 18
19#include "VtsHalNeuralnetworksV1_1.h" 19#include "VtsHalNeuralnetworks.h"
20 20
21#include "Callbacks.h" 21#include "Callbacks.h"
22#include "TestHarness.h" 22#include "TestHarness.h"
23#include "Utils.h"
23 24
24#include <android-base/logging.h> 25#include <android-base/logging.h>
25#include <android/hardware/neuralnetworks/1.1/IDevice.h>
26#include <android/hardware/neuralnetworks/1.1/types.h>
27#include <android/hidl/memory/1.0/IMemory.h> 26#include <android/hidl/memory/1.0/IMemory.h>
28#include <hidlmemory/mapping.h> 27#include <hidlmemory/mapping.h>
29 28
30using ::android::hardware::neuralnetworks::V1_0::IPreparedModel;
31using ::android::hardware::neuralnetworks::V1_0::Capabilities;
32using ::android::hardware::neuralnetworks::V1_0::DeviceStatus;
33using ::android::hardware::neuralnetworks::V1_0::ErrorStatus;
34using ::android::hardware::neuralnetworks::V1_0::FusedActivationFunc;
35using ::android::hardware::neuralnetworks::V1_0::Operand;
36using ::android::hardware::neuralnetworks::V1_0::OperandLifeTime;
37using ::android::hardware::neuralnetworks::V1_0::OperandType;
38using ::android::hardware::neuralnetworks::V1_0::Request;
39using ::android::hardware::neuralnetworks::V1_1::IDevice;
40using ::android::hardware::neuralnetworks::V1_1::Model;
41using ::android::hardware::neuralnetworks::V1_1::Operation;
42using ::android::hardware::neuralnetworks::V1_1::OperationType;
43using ::android::hardware::Return;
44using ::android::hardware::Void;
45using ::android::hardware::hidl_memory;
46using ::android::hardware::hidl_string;
47using ::android::hardware::hidl_vec;
48using ::android::hidl::allocator::V1_0::IAllocator;
49using ::android::hidl::memory::V1_0::IMemory;
50using ::android::sp;
51
52namespace android { 29namespace android {
53namespace hardware { 30namespace hardware {
54namespace neuralnetworks { 31namespace neuralnetworks {
55 32
56namespace generated_tests { 33namespace generated_tests {
57using ::generated_tests::MixedTypedExampleType; 34using ::generated_tests::MixedTypedExampleType;
58extern void Execute(sp<V1_1::IDevice>&, std::function<Model(void)>, std::function<bool(int)>, 35extern void Execute(const sp<V1_1::IDevice>&, std::function<V1_1::Model(void)>,
59 const std::vector<MixedTypedExampleType>&); 36 std::function<bool(int)>, const std::vector<MixedTypedExampleType>&);
60} // namespace generated_tests 37} // namespace generated_tests
61 38
62namespace V1_1 { 39namespace V1_1 {
63namespace vts { 40namespace vts {
64namespace functional { 41namespace functional {
42
65using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback; 43using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
66using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback; 44using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
45using ::android::nn::allocateSharedMemory;
67 46
68// Mixed-typed examples 47// Mixed-typed examples
69typedef generated_tests::MixedTypedExampleType MixedTypedExample; 48typedef generated_tests::MixedTypedExampleType MixedTypedExample;
diff --git a/neuralnetworks/1.1/vts/functional/Models.h b/neuralnetworks/1.1/vts/functional/Models.h
new file mode 100644
index 00000000..c3cadb5f
--- /dev/null
+++ b/neuralnetworks/1.1/vts/functional/Models.h
@@ -0,0 +1,323 @@
1/*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef VTS_HAL_NEURALNETWORKS_V1_1_VTS_FUNCTIONAL_MODELS_H
18#define VTS_HAL_NEURALNETWORKS_V1_1_VTS_FUNCTIONAL_MODELS_H
19
20#define LOG_TAG "neuralnetworks_hidl_hal_test"
21
22#include "TestHarness.h"
23
24#include <android/hardware/neuralnetworks/1.0/types.h>
25#include <android/hardware/neuralnetworks/1.1/types.h>
26
27namespace android {
28namespace hardware {
29namespace neuralnetworks {
30namespace V1_1 {
31namespace vts {
32namespace functional {
33
34using MixedTypedExample = generated_tests::MixedTypedExampleType;
35
36#define FOR_EACH_TEST_MODEL(FN) \
37 FN(add) \
38 FN(add_broadcast_quant8) \
39 FN(add_quant8) \
40 FN(add_relaxed) \
41 FN(avg_pool_float_1) \
42 FN(avg_pool_float_1_relaxed) \
43 FN(avg_pool_float_2) \
44 FN(avg_pool_float_2_relaxed) \
45 FN(avg_pool_float_3) \
46 FN(avg_pool_float_3_relaxed) \
47 FN(avg_pool_float_4) \
48 FN(avg_pool_float_4_relaxed) \
49 FN(avg_pool_float_5) \
50 FN(avg_pool_quant8_1) \
51 FN(avg_pool_quant8_2) \
52 FN(avg_pool_quant8_3) \
53 FN(avg_pool_quant8_4) \
54 FN(avg_pool_quant8_5) \
55 FN(batch_to_space) \
56 FN(batch_to_space_float_1) \
57 FN(batch_to_space_quant8_1) \
58 FN(concat_float_1) \
59 FN(concat_float_1_relaxed) \
60 FN(concat_float_2) \
61 FN(concat_float_2_relaxed) \
62 FN(concat_float_3) \
63 FN(concat_float_3_relaxed) \
64 FN(concat_quant8_1) \
65 FN(concat_quant8_2) \
66 FN(concat_quant8_3) \
67 FN(conv_1_h3_w2_SAME) \
68 FN(conv_1_h3_w2_SAME_relaxed) \
69 FN(conv_1_h3_w2_VALID) \
70 FN(conv_1_h3_w2_VALID_relaxed) \
71 FN(conv_3_h3_w2_SAME) \
72 FN(conv_3_h3_w2_SAME_relaxed) \
73 FN(conv_3_h3_w2_VALID) \
74 FN(conv_3_h3_w2_VALID_relaxed) \
75 FN(conv_float) \
76 FN(conv_float_2) \
77 FN(conv_float_channels) \
78 FN(conv_float_channels_relaxed) \
79 FN(conv_float_channels_weights_as_inputs) \
80 FN(conv_float_channels_weights_as_inputs_relaxed) \
81 FN(conv_float_large) \
82 FN(conv_float_large_relaxed) \
83 FN(conv_float_large_weights_as_inputs) \
84 FN(conv_float_large_weights_as_inputs_relaxed) \
85 FN(conv_float_relaxed) \
86 FN(conv_float_weights_as_inputs) \
87 FN(conv_float_weights_as_inputs_relaxed) \
88 FN(conv_quant8) \
89 FN(conv_quant8_2) \
90 FN(conv_quant8_channels) \
91 FN(conv_quant8_channels_weights_as_inputs) \
92 FN(conv_quant8_large) \
93 FN(conv_quant8_large_weights_as_inputs) \
94 FN(conv_quant8_overflow) \
95 FN(conv_quant8_overflow_weights_as_inputs) \
96 FN(conv_quant8_weights_as_inputs) \
97 FN(depth_to_space_float_1) \
98 FN(depth_to_space_float_1_relaxed) \
99 FN(depth_to_space_float_2) \
100 FN(depth_to_space_float_2_relaxed) \
101 FN(depth_to_space_float_3) \
102 FN(depth_to_space_float_3_relaxed) \
103 FN(depth_to_space_quant8_1) \
104 FN(depth_to_space_quant8_2) \
105 FN(depthwise_conv) \
106 FN(depthwise_conv2d_float) \
107 FN(depthwise_conv2d_float_2) \
108 FN(depthwise_conv2d_float_large) \
109 FN(depthwise_conv2d_float_large_2) \
110 FN(depthwise_conv2d_float_large_2_weights_as_inputs) \
111 FN(depthwise_conv2d_float_large_relaxed) \
112 FN(depthwise_conv2d_float_large_weights_as_inputs) \
113 FN(depthwise_conv2d_float_large_weights_as_inputs_relaxed) \
114 FN(depthwise_conv2d_float_weights_as_inputs) \
115 FN(depthwise_conv2d_quant8) \
116 FN(depthwise_conv2d_quant8_2) \
117 FN(depthwise_conv2d_quant8_large) \
118 FN(depthwise_conv2d_quant8_large_weights_as_inputs) \
119 FN(depthwise_conv2d_quant8_weights_as_inputs) \
120 FN(depthwise_conv_relaxed) \
121 FN(dequantize) \
122 FN(div) \
123 FN(embedding_lookup) \
124 FN(embedding_lookup_relaxed) \
125 FN(floor) \
126 FN(floor_relaxed) \
127 FN(fully_connected_float) \
128 FN(fully_connected_float_2) \
129 FN(fully_connected_float_large) \
130 FN(fully_connected_float_large_weights_as_inputs) \
131 FN(fully_connected_float_relaxed) \
132 FN(fully_connected_float_weights_as_inputs) \
133 FN(fully_connected_float_weights_as_inputs_relaxed) \
134 FN(fully_connected_quant8) \
135 FN(fully_connected_quant8_2) \
136 FN(fully_connected_quant8_large) \
137 FN(fully_connected_quant8_large_weights_as_inputs) \
138 FN(fully_connected_quant8_weights_as_inputs) \
139 FN(hashtable_lookup_float) \
140 FN(hashtable_lookup_float_relaxed) \
141 FN(hashtable_lookup_quant8) \
142 FN(l2_normalization) \
143 FN(l2_normalization_2) \
144 FN(l2_normalization_large) \
145 FN(l2_normalization_large_relaxed) \
146 FN(l2_normalization_relaxed) \
147 FN(l2_pool_float) \
148 FN(l2_pool_float_2) \
149 FN(l2_pool_float_large) \
150 FN(l2_pool_float_relaxed) \
151 FN(local_response_norm_float_1) \
152 FN(local_response_norm_float_1_relaxed) \
153 FN(local_response_norm_float_2) \
154 FN(local_response_norm_float_2_relaxed) \
155 FN(local_response_norm_float_3) \
156 FN(local_response_norm_float_3_relaxed) \
157 FN(local_response_norm_float_4) \
158 FN(local_response_norm_float_4_relaxed) \
159 FN(logistic_float_1) \
160 FN(logistic_float_1_relaxed) \
161 FN(logistic_float_2) \
162 FN(logistic_float_2_relaxed) \
163 FN(logistic_quant8_1) \
164 FN(logistic_quant8_2) \
165 FN(lsh_projection) \
166 FN(lsh_projection_2) \
167 FN(lsh_projection_2_relaxed) \
168 FN(lsh_projection_relaxed) \
169 FN(lsh_projection_weights_as_inputs) \
170 FN(lsh_projection_weights_as_inputs_relaxed) \
171 FN(lstm) \
172 FN(lstm2) \
173 FN(lstm2_relaxed) \
174 FN(lstm2_state) \
175 FN(lstm2_state2) \
176 FN(lstm2_state2_relaxed) \
177 FN(lstm2_state_relaxed) \
178 FN(lstm3) \
179 FN(lstm3_relaxed) \
180 FN(lstm3_state) \
181 FN(lstm3_state2) \
182 FN(lstm3_state2_relaxed) \
183 FN(lstm3_state3) \
184 FN(lstm3_state3_relaxed) \
185 FN(lstm3_state_relaxed) \
186 FN(lstm_relaxed) \
187 FN(lstm_state) \
188 FN(lstm_state2) \
189 FN(lstm_state2_relaxed) \
190 FN(lstm_state_relaxed) \
191 FN(max_pool_float_1) \
192 FN(max_pool_float_1_relaxed) \
193 FN(max_pool_float_2) \
194 FN(max_pool_float_2_relaxed) \
195 FN(max_pool_float_3) \
196 FN(max_pool_float_3_relaxed) \
197 FN(max_pool_float_4) \
198 FN(max_pool_quant8_1) \
199 FN(max_pool_quant8_2) \
200 FN(max_pool_quant8_3) \
201 FN(max_pool_quant8_4) \
202 FN(mean) \
203 FN(mean_float_1) \
204 FN(mean_float_2) \
205 FN(mean_quant8_1) \
206 FN(mean_quant8_2) \
207 FN(mobilenet_224_gender_basic_fixed) \
208 FN(mobilenet_224_gender_basic_fixed_relaxed) \
209 FN(mobilenet_quantized) \
210 FN(mul) \
211 FN(mul_broadcast_quant8) \
212 FN(mul_quant8) \
213 FN(mul_relaxed) \
214 FN(mul_relu) \
215 FN(mul_relu_relaxed) \
216 FN(pad) \
217 FN(pad_float_1) \
218 FN(relu1_float_1) \
219 FN(relu1_float_1_relaxed) \
220 FN(relu1_float_2) \
221 FN(relu1_float_2_relaxed) \
222 FN(relu1_quant8_1) \
223 FN(relu1_quant8_2) \
224 FN(relu6_float_1) \
225 FN(relu6_float_1_relaxed) \
226 FN(relu6_float_2) \
227 FN(relu6_float_2_relaxed) \
228 FN(relu6_quant8_1) \
229 FN(relu6_quant8_2) \
230 FN(relu_float_1) \
231 FN(relu_float_1_relaxed) \
232 FN(relu_float_2) \
233 FN(relu_quant8_1) \
234 FN(relu_quant8_2) \
235 FN(reshape) \
236 FN(reshape_quant8) \
237 FN(reshape_quant8_weights_as_inputs) \
238 FN(reshape_relaxed) \
239 FN(reshape_weights_as_inputs) \
240 FN(reshape_weights_as_inputs_relaxed) \
241 FN(resize_bilinear) \
242 FN(resize_bilinear_2) \
243 FN(resize_bilinear_relaxed) \
244 FN(rnn) \
245 FN(rnn_relaxed) \
246 FN(rnn_state) \
247 FN(rnn_state_relaxed) \
248 FN(softmax_float_1) \
249 FN(softmax_float_1_relaxed) \
250 FN(softmax_float_2) \
251 FN(softmax_float_2_relaxed) \
252 FN(softmax_quant8_1) \
253 FN(softmax_quant8_2) \
254 FN(space_to_batch) \
255 FN(space_to_batch_float_1) \
256 FN(space_to_batch_float_2) \
257 FN(space_to_batch_float_3) \
258 FN(space_to_batch_quant8_1) \
259 FN(space_to_batch_quant8_2) \
260 FN(space_to_batch_quant8_3) \
261 FN(space_to_depth_float_1) \
262 FN(space_to_depth_float_1_relaxed) \
263 FN(space_to_depth_float_2) \
264 FN(space_to_depth_float_2_relaxed) \
265 FN(space_to_depth_float_3) \
266 FN(space_to_depth_float_3_relaxed) \
267 FN(space_to_depth_quant8_1) \
268 FN(space_to_depth_quant8_2) \
269 FN(squeeze) \
270 FN(squeeze_float_1) \
271 FN(squeeze_quant8_1) \
272 FN(strided_slice) \
273 FN(strided_slice_float_1) \
274 FN(strided_slice_float_10) \
275 FN(strided_slice_float_2) \
276 FN(strided_slice_float_3) \
277 FN(strided_slice_float_4) \
278 FN(strided_slice_float_5) \
279 FN(strided_slice_float_6) \
280 FN(strided_slice_float_7) \
281 FN(strided_slice_float_8) \
282 FN(strided_slice_float_9) \
283 FN(strided_slice_qaunt8_10) \
284 FN(strided_slice_quant8_1) \
285 FN(strided_slice_quant8_2) \
286 FN(strided_slice_quant8_3) \
287 FN(strided_slice_quant8_4) \
288 FN(strided_slice_quant8_5) \
289 FN(strided_slice_quant8_6) \
290 FN(strided_slice_quant8_7) \
291 FN(strided_slice_quant8_8) \
292 FN(strided_slice_quant8_9) \
293 FN(sub) \
294 FN(svdf) \
295 FN(svdf2) \
296 FN(svdf2_relaxed) \
297 FN(svdf_relaxed) \
298 FN(svdf_state) \
299 FN(svdf_state_relaxed) \
300 FN(tanh) \
301 FN(tanh_relaxed) \
302 FN(transpose) \
303 FN(transpose_float_1) \
304 FN(transpose_quant8_1)
305
306#define FORWARD_DECLARE_GENERATED_OBJECTS(function) \
307 namespace function { \
308 extern std::vector<MixedTypedExample> examples; \
309 Model createTestModel(); \
310 }
311
312FOR_EACH_TEST_MODEL(FORWARD_DECLARE_GENERATED_OBJECTS)
313
314#undef FORWARD_DECLARE_GENERATED_OBJECTS
315
316} // namespace functional
317} // namespace vts
318} // namespace V1_1
319} // namespace neuralnetworks
320} // namespace hardware
321} // namespace android
322
323#endif // VTS_HAL_NEURALNETWORKS_V1_1_VTS_FUNCTIONAL_MODELS_H
diff --git a/neuralnetworks/1.1/vts/functional/ValidateModel.cpp b/neuralnetworks/1.1/vts/functional/ValidateModel.cpp
new file mode 100644
index 00000000..7a20e26f
--- /dev/null
+++ b/neuralnetworks/1.1/vts/functional/ValidateModel.cpp
@@ -0,0 +1,513 @@
1/*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "neuralnetworks_hidl_hal_test"
18
19#include "VtsHalNeuralnetworks.h"
20
21#include "Callbacks.h"
22
23namespace android {
24namespace hardware {
25namespace neuralnetworks {
26namespace V1_1 {
27
28using V1_0::IPreparedModel;
29using V1_0::Operand;
30using V1_0::OperandLifeTime;
31using V1_0::OperandType;
32
33namespace vts {
34namespace functional {
35
36using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
37using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
38
39///////////////////////// UTILITY FUNCTIONS /////////////////////////
40
41static void validateGetSupportedOperations(const sp<IDevice>& device, const std::string& message,
42 const V1_1::Model& model) {
43 SCOPED_TRACE(message + " [getSupportedOperations_1_1]");
44
45 Return<void> ret =
46 device->getSupportedOperations_1_1(model, [&](ErrorStatus status, const hidl_vec<bool>&) {
47 EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
48 });
49 EXPECT_TRUE(ret.isOk());
50}
51
52static void validatePrepareModel(const sp<IDevice>& device, const std::string& message,
53 const V1_1::Model& model) {
54 SCOPED_TRACE(message + " [prepareModel_1_1]");
55
56 sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
57 ASSERT_NE(nullptr, preparedModelCallback.get());
58 Return<ErrorStatus> prepareLaunchStatus =
59 device->prepareModel_1_1(model, preparedModelCallback);
60 ASSERT_TRUE(prepareLaunchStatus.isOk());
61 ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(prepareLaunchStatus));
62
63 preparedModelCallback->wait();
64 ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
65 ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, prepareReturnStatus);
66 sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
67 ASSERT_EQ(nullptr, preparedModel.get());
68}
69
70// Primary validation function. This function will take a valid model, apply a
71// mutation to it to invalidate the model, then pass it to interface calls that
72// use the model. Note that the model here is passed by value, and any mutation
73// to the model does not leave this function.
74static void validate(const sp<IDevice>& device, const std::string& message, V1_1::Model model,
75 const std::function<void(Model*)>& mutation) {
76 mutation(&model);
77 validateGetSupportedOperations(device, message, model);
78 validatePrepareModel(device, message, model);
79}
80
81// Delete element from hidl_vec. hidl_vec doesn't support a "remove" operation,
82// so this is efficiently accomplished by moving the element to the end and
83// resizing the hidl_vec to one less.
84template <typename Type>
85static void hidl_vec_removeAt(hidl_vec<Type>* vec, uint32_t index) {
86 if (vec) {
87 std::rotate(vec->begin() + index, vec->begin() + index + 1, vec->end());
88 vec->resize(vec->size() - 1);
89 }
90}
91
92template <typename Type>
93static uint32_t hidl_vec_push_back(hidl_vec<Type>* vec, const Type& value) {
94 // assume vec is valid
95 const uint32_t index = vec->size();
96 vec->resize(index + 1);
97 (*vec)[index] = value;
98 return index;
99}
100
101static uint32_t addOperand(Model* model) {
102 return hidl_vec_push_back(&model->operands,
103 {
104 .type = OperandType::INT32,
105 .dimensions = {},
106 .numberOfConsumers = 0,
107 .scale = 0.0f,
108 .zeroPoint = 0,
109 .lifetime = OperandLifeTime::MODEL_INPUT,
110 .location = {.poolIndex = 0, .offset = 0, .length = 0},
111 });
112}
113
114static uint32_t addOperand(Model* model, OperandLifeTime lifetime) {
115 uint32_t index = addOperand(model);
116 model->operands[index].numberOfConsumers = 1;
117 model->operands[index].lifetime = lifetime;
118 return index;
119}
120
121///////////////////////// VALIDATE MODEL OPERAND TYPE /////////////////////////
122
123static const int32_t invalidOperandTypes[] = {
124 static_cast<int32_t>(OperandType::FLOAT32) - 1, // lower bound fundamental
125 static_cast<int32_t>(OperandType::TENSOR_QUANT8_ASYMM) + 1, // upper bound fundamental
126 static_cast<int32_t>(OperandType::OEM) - 1, // lower bound OEM
127 static_cast<int32_t>(OperandType::TENSOR_OEM_BYTE) + 1, // upper bound OEM
128};
129
130static void mutateOperandTypeTest(const sp<IDevice>& device, const V1_1::Model& model) {
131 for (size_t operand = 0; operand < model.operands.size(); ++operand) {
132 for (int32_t invalidOperandType : invalidOperandTypes) {
133 const std::string message = "mutateOperandTypeTest: operand " +
134 std::to_string(operand) + " set to value " +
135 std::to_string(invalidOperandType);
136 validate(device, message, model, [operand, invalidOperandType](Model* model) {
137 model->operands[operand].type = static_cast<OperandType>(invalidOperandType);
138 });
139 }
140 }
141}
142
143///////////////////////// VALIDATE OPERAND RANK /////////////////////////
144
145static uint32_t getInvalidRank(OperandType type) {
146 switch (type) {
147 case OperandType::FLOAT32:
148 case OperandType::INT32:
149 case OperandType::UINT32:
150 return 1;
151 case OperandType::TENSOR_FLOAT32:
152 case OperandType::TENSOR_INT32:
153 case OperandType::TENSOR_QUANT8_ASYMM:
154 return 0;
155 default:
156 return 0;
157 }
158}
159
160static void mutateOperandRankTest(const sp<IDevice>& device, const V1_1::Model& model) {
161 for (size_t operand = 0; operand < model.operands.size(); ++operand) {
162 const uint32_t invalidRank = getInvalidRank(model.operands[operand].type);
163 const std::string message = "mutateOperandRankTest: operand " + std::to_string(operand) +
164 " has rank of " + std::to_string(invalidRank);
165 validate(device, message, model, [operand, invalidRank](Model* model) {
166 model->operands[operand].dimensions = std::vector<uint32_t>(invalidRank, 0);
167 });
168 }
169}
170
171///////////////////////// VALIDATE OPERAND SCALE /////////////////////////
172
173static float getInvalidScale(OperandType type) {
174 switch (type) {
175 case OperandType::FLOAT32:
176 case OperandType::INT32:
177 case OperandType::UINT32:
178 case OperandType::TENSOR_FLOAT32:
179 return 1.0f;
180 case OperandType::TENSOR_INT32:
181 return -1.0f;
182 case OperandType::TENSOR_QUANT8_ASYMM:
183 return 0.0f;
184 default:
185 return 0.0f;
186 }
187}
188
189static void mutateOperandScaleTest(const sp<IDevice>& device, const V1_1::Model& model) {
190 for (size_t operand = 0; operand < model.operands.size(); ++operand) {
191 const float invalidScale = getInvalidScale(model.operands[operand].type);
192 const std::string message = "mutateOperandScaleTest: operand " + std::to_string(operand) +
193 " has scale of " + std::to_string(invalidScale);
194 validate(device, message, model, [operand, invalidScale](Model* model) {
195 model->operands[operand].scale = invalidScale;
196 });
197 }
198}
199
200///////////////////////// VALIDATE OPERAND ZERO POINT /////////////////////////
201
202static std::vector<int32_t> getInvalidZeroPoints(OperandType type) {
203 switch (type) {
204 case OperandType::FLOAT32:
205 case OperandType::INT32:
206 case OperandType::UINT32:
207 case OperandType::TENSOR_FLOAT32:
208 case OperandType::TENSOR_INT32:
209 return {1};
210 case OperandType::TENSOR_QUANT8_ASYMM:
211 return {-1, 256};
212 default:
213 return {};
214 }
215}
216
217static void mutateOperandZeroPointTest(const sp<IDevice>& device, const V1_1::Model& model) {
218 for (size_t operand = 0; operand < model.operands.size(); ++operand) {
219 const std::vector<int32_t> invalidZeroPoints =
220 getInvalidZeroPoints(model.operands[operand].type);
221 for (int32_t invalidZeroPoint : invalidZeroPoints) {
222 const std::string message = "mutateOperandZeroPointTest: operand " +
223 std::to_string(operand) + " has zero point of " +
224 std::to_string(invalidZeroPoint);
225 validate(device, message, model, [operand, invalidZeroPoint](Model* model) {
226 model->operands[operand].zeroPoint = invalidZeroPoint;
227 });
228 }
229 }
230}
231
232///////////////////////// VALIDATE EXTRA ??? /////////////////////////
233
234// TODO: Operand::lifetime
235// TODO: Operand::location
236
237///////////////////////// VALIDATE OPERATION OPERAND TYPE /////////////////////////
238
239static void mutateOperand(Operand* operand, OperandType type) {
240 Operand newOperand = *operand;
241 newOperand.type = type;
242 switch (type) {
243 case OperandType::FLOAT32:
244 case OperandType::INT32:
245 case OperandType::UINT32:
246 newOperand.dimensions = hidl_vec<uint32_t>();
247 newOperand.scale = 0.0f;
248 newOperand.zeroPoint = 0;
249 break;
250 case OperandType::TENSOR_FLOAT32:
251 newOperand.dimensions =
252 operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
253 newOperand.scale = 0.0f;
254 newOperand.zeroPoint = 0;
255 break;
256 case OperandType::TENSOR_INT32:
257 newOperand.dimensions =
258 operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
259 newOperand.zeroPoint = 0;
260 break;
261 case OperandType::TENSOR_QUANT8_ASYMM:
262 newOperand.dimensions =
263 operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
264 newOperand.scale = operand->scale != 0.0f ? operand->scale : 1.0f;
265 break;
266 case OperandType::OEM:
267 case OperandType::TENSOR_OEM_BYTE:
268 default:
269 break;
270 }
271 *operand = newOperand;
272}
273
274static bool mutateOperationOperandTypeSkip(size_t operand, const V1_1::Model& model) {
275 // LSH_PROJECTION's second argument is allowed to have any type. This is the
276 // only operation that currently has a type that can be anything independent
277 // from any other type. Changing the operand type to any other type will
278 // result in a valid model for LSH_PROJECTION. If this is the case, skip the
279 // test.
280 for (const Operation& operation : model.operations) {
281 if (operation.type == OperationType::LSH_PROJECTION && operand == operation.inputs[1]) {
282 return true;
283 }
284 }
285 return false;
286}
287
288static void mutateOperationOperandTypeTest(const sp<IDevice>& device, const V1_1::Model& model) {
289 for (size_t operand = 0; operand < model.operands.size(); ++operand) {
290 if (mutateOperationOperandTypeSkip(operand, model)) {
291 continue;
292 }
293 for (OperandType invalidOperandType : hidl_enum_iterator<OperandType>{}) {
294 // Do not test OEM types
295 if (invalidOperandType == model.operands[operand].type ||
296 invalidOperandType == OperandType::OEM ||
297 invalidOperandType == OperandType::TENSOR_OEM_BYTE) {
298 continue;
299 }
300 const std::string message = "mutateOperationOperandTypeTest: operand " +
301 std::to_string(operand) + " set to type " +
302 toString(invalidOperandType);
303 validate(device, message, model, [operand, invalidOperandType](Model* model) {
304 mutateOperand(&model->operands[operand], invalidOperandType);
305 });
306 }
307 }
308}
309
310///////////////////////// VALIDATE MODEL OPERATION TYPE /////////////////////////
311
312static const int32_t invalidOperationTypes[] = {
313 static_cast<int32_t>(OperationType::ADD) - 1, // lower bound fundamental
314 static_cast<int32_t>(OperationType::TRANSPOSE) + 1, // upper bound fundamental
315 static_cast<int32_t>(OperationType::OEM_OPERATION) - 1, // lower bound OEM
316 static_cast<int32_t>(OperationType::OEM_OPERATION) + 1, // upper bound OEM
317};
318
319static void mutateOperationTypeTest(const sp<IDevice>& device, const V1_1::Model& model) {
320 for (size_t operation = 0; operation < model.operations.size(); ++operation) {
321 for (int32_t invalidOperationType : invalidOperationTypes) {
322 const std::string message = "mutateOperationTypeTest: operation " +
323 std::to_string(operation) + " set to value " +
324 std::to_string(invalidOperationType);
325 validate(device, message, model, [operation, invalidOperationType](Model* model) {
326 model->operations[operation].type =
327 static_cast<OperationType>(invalidOperationType);
328 });
329 }
330 }
331}
332
333///////////////////////// VALIDATE MODEL OPERATION INPUT OPERAND INDEX /////////////////////////
334
335static void mutateOperationInputOperandIndexTest(const sp<IDevice>& device,
336 const V1_1::Model& model) {
337 for (size_t operation = 0; operation < model.operations.size(); ++operation) {
338 const uint32_t invalidOperand = model.operands.size();
339 for (size_t input = 0; input < model.operations[operation].inputs.size(); ++input) {
340 const std::string message = "mutateOperationInputOperandIndexTest: operation " +
341 std::to_string(operation) + " input " +
342 std::to_string(input);
343 validate(device, message, model, [operation, input, invalidOperand](Model* model) {
344 model->operations[operation].inputs[input] = invalidOperand;
345 });
346 }
347 }
348}
349
350///////////////////////// VALIDATE MODEL OPERATION OUTPUT OPERAND INDEX /////////////////////////
351
352static void mutateOperationOutputOperandIndexTest(const sp<IDevice>& device,
353 const V1_1::Model& model) {
354 for (size_t operation = 0; operation < model.operations.size(); ++operation) {
355 const uint32_t invalidOperand = model.operands.size();
356 for (size_t output = 0; output < model.operations[operation].outputs.size(); ++output) {
357 const std::string message = "mutateOperationOutputOperandIndexTest: operation " +
358 std::to_string(operation) + " output " +
359 std::to_string(output);
360 validate(device, message, model, [operation, output, invalidOperand](Model* model) {
361 model->operations[operation].outputs[output] = invalidOperand;
362 });
363 }
364 }
365}
366
367///////////////////////// REMOVE OPERAND FROM EVERYTHING /////////////////////////
368
369static void removeValueAndDecrementGreaterValues(hidl_vec<uint32_t>* vec, uint32_t value) {
370 if (vec) {
371 // remove elements matching "value"
372 auto last = std::remove(vec->begin(), vec->end(), value);
373 vec->resize(std::distance(vec->begin(), last));
374
375 // decrement elements exceeding "value"
376 std::transform(vec->begin(), vec->end(), vec->begin(),
377 [value](uint32_t v) { return v > value ? v-- : v; });
378 }
379}
380
381static void removeOperand(Model* model, uint32_t index) {
382 hidl_vec_removeAt(&model->operands, index);
383 for (Operation& operation : model->operations) {
384 removeValueAndDecrementGreaterValues(&operation.inputs, index);
385 removeValueAndDecrementGreaterValues(&operation.outputs, index);
386 }
387 removeValueAndDecrementGreaterValues(&model->inputIndexes, index);
388 removeValueAndDecrementGreaterValues(&model->outputIndexes, index);
389}
390
391static void removeOperandTest(const sp<IDevice>& device, const V1_1::Model& model) {
392 for (size_t operand = 0; operand < model.operands.size(); ++operand) {
393 const std::string message = "removeOperandTest: operand " + std::to_string(operand);
394 validate(device, message, model,
395 [operand](Model* model) { removeOperand(model, operand); });
396 }
397}
398
399///////////////////////// REMOVE OPERATION /////////////////////////
400
401static void removeOperation(Model* model, uint32_t index) {
402 for (uint32_t operand : model->operations[index].inputs) {
403 model->operands[operand].numberOfConsumers--;
404 }
405 hidl_vec_removeAt(&model->operations, index);
406}
407
408static void removeOperationTest(const sp<IDevice>& device, const V1_1::Model& model) {
409 for (size_t operation = 0; operation < model.operations.size(); ++operation) {
410 const std::string message = "removeOperationTest: operation " + std::to_string(operation);
411 validate(device, message, model,
412 [operation](Model* model) { removeOperation(model, operation); });
413 }
414}
415
416///////////////////////// REMOVE OPERATION INPUT /////////////////////////
417
418static void removeOperationInputTest(const sp<IDevice>& device, const V1_1::Model& model) {
419 for (size_t operation = 0; operation < model.operations.size(); ++operation) {
420 for (size_t input = 0; input < model.operations[operation].inputs.size(); ++input) {
421 const V1_1::Operation& op = model.operations[operation];
422 // CONCATENATION has at least 2 inputs, with the last element being
423 // INT32. Skip this test if removing one of CONCATENATION's
424 // inputs still produces a valid model.
425 if (op.type == V1_1::OperationType::CONCATENATION && op.inputs.size() > 2 &&
426 input != op.inputs.size() - 1) {
427 continue;
428 }
429 const std::string message = "removeOperationInputTest: operation " +
430 std::to_string(operation) + ", input " +
431 std::to_string(input);
432 validate(device, message, model, [operation, input](Model* model) {
433 uint32_t operand = model->operations[operation].inputs[input];
434 model->operands[operand].numberOfConsumers--;
435 hidl_vec_removeAt(&model->operations[operation].inputs, input);
436 });
437 }
438 }
439}
440
441///////////////////////// REMOVE OPERATION OUTPUT /////////////////////////
442
443static void removeOperationOutputTest(const sp<IDevice>& device, const V1_1::Model& model) {
444 for (size_t operation = 0; operation < model.operations.size(); ++operation) {
445 for (size_t output = 0; output < model.operations[operation].outputs.size(); ++output) {
446 const std::string message = "removeOperationOutputTest: operation " +
447 std::to_string(operation) + ", output " +
448 std::to_string(output);
449 validate(device, message, model, [operation, output](Model* model) {
450 hidl_vec_removeAt(&model->operations[operation].outputs, output);
451 });
452 }
453 }
454}
455
456///////////////////////// MODEL VALIDATION /////////////////////////
457
458// TODO: remove model input
459// TODO: remove model output
460// TODO: add unused operation
461
462///////////////////////// ADD OPERATION INPUT /////////////////////////
463
464static void addOperationInputTest(const sp<IDevice>& device, const V1_1::Model& model) {
465 for (size_t operation = 0; operation < model.operations.size(); ++operation) {
466 const std::string message = "addOperationInputTest: operation " + std::to_string(operation);
467 validate(device, message, model, [operation](Model* model) {
468 uint32_t index = addOperand(model, OperandLifeTime::MODEL_INPUT);
469 hidl_vec_push_back(&model->operations[operation].inputs, index);
470 hidl_vec_push_back(&model->inputIndexes, index);
471 });
472 }
473}
474
475///////////////////////// ADD OPERATION OUTPUT /////////////////////////
476
477static void addOperationOutputTest(const sp<IDevice>& device, const V1_1::Model& model) {
478 for (size_t operation = 0; operation < model.operations.size(); ++operation) {
479 const std::string message =
480 "addOperationOutputTest: operation " + std::to_string(operation);
481 validate(device, message, model, [operation](Model* model) {
482 uint32_t index = addOperand(model, OperandLifeTime::MODEL_OUTPUT);
483 hidl_vec_push_back(&model->operations[operation].outputs, index);
484 hidl_vec_push_back(&model->outputIndexes, index);
485 });
486 }
487}
488
489////////////////////////// ENTRY POINT //////////////////////////////
490
491void ValidationTest::validateModel(const V1_1::Model& model) {
492 mutateOperandTypeTest(device, model);
493 mutateOperandRankTest(device, model);
494 mutateOperandScaleTest(device, model);
495 mutateOperandZeroPointTest(device, model);
496 mutateOperationOperandTypeTest(device, model);
497 mutateOperationTypeTest(device, model);
498 mutateOperationInputOperandIndexTest(device, model);
499 mutateOperationOutputOperandIndexTest(device, model);
500 removeOperandTest(device, model);
501 removeOperationTest(device, model);
502 removeOperationInputTest(device, model);
503 removeOperationOutputTest(device, model);
504 addOperationInputTest(device, model);
505 addOperationOutputTest(device, model);
506}
507
508} // namespace functional
509} // namespace vts
510} // namespace V1_1
511} // namespace neuralnetworks
512} // namespace hardware
513} // namespace android
diff --git a/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp
new file mode 100644
index 00000000..bd966144
--- /dev/null
+++ b/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp
@@ -0,0 +1,262 @@
1/*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "neuralnetworks_hidl_hal_test"
18
19#include "VtsHalNeuralnetworks.h"
20
21#include "Callbacks.h"
22#include "TestHarness.h"
23#include "Utils.h"
24
25#include <android-base/logging.h>
26#include <android/hidl/memory/1.0/IMemory.h>
27#include <hidlmemory/mapping.h>
28
29namespace android {
30namespace hardware {
31namespace neuralnetworks {
32namespace V1_1 {
33namespace vts {
34namespace functional {
35
36using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
37using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
38using ::android::hidl::memory::V1_0::IMemory;
39using generated_tests::MixedTyped;
40using generated_tests::MixedTypedExampleType;
41using generated_tests::for_all;
42
43///////////////////////// UTILITY FUNCTIONS /////////////////////////
44
45static void createPreparedModel(const sp<IDevice>& device, const V1_1::Model& model,
46 sp<IPreparedModel>* preparedModel) {
47 ASSERT_NE(nullptr, preparedModel);
48
49 // see if service can handle model
50 bool fullySupportsModel = false;
51 Return<void> supportedOpsLaunchStatus = device->getSupportedOperations_1_1(
52 model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
53 ASSERT_EQ(ErrorStatus::NONE, status);
54 ASSERT_NE(0ul, supported.size());
55 fullySupportsModel =
56 std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; });
57 });
58 ASSERT_TRUE(supportedOpsLaunchStatus.isOk());
59
60 // launch prepare model
61 sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
62 ASSERT_NE(nullptr, preparedModelCallback.get());
63 Return<ErrorStatus> prepareLaunchStatus =
64 device->prepareModel_1_1(model, preparedModelCallback);
65 ASSERT_TRUE(prepareLaunchStatus.isOk());
66 ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
67
68 // retrieve prepared model
69 preparedModelCallback->wait();
70 ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
71 *preparedModel = preparedModelCallback->getPreparedModel();
72
73 // The getSupportedOperations_1_1 call returns a list of operations that are
74 // guaranteed not to fail if prepareModel_1_1 is called, and
75 // 'fullySupportsModel' is true i.f.f. the entire model is guaranteed.
76 // If a driver has any doubt that it can prepare an operation, it must
77 // return false. So here, if a driver isn't sure if it can support an
78 // operation, but reports that it successfully prepared the model, the test
79 // can continue.
80 if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
81 ASSERT_EQ(nullptr, preparedModel->get());
82 LOG(INFO) << "NN VTS: Unable to test Request validation because vendor service cannot "
83 "prepare model that it does not support.";
84 std::cout << "[ ] Unable to test Request validation because vendor service "
85 "cannot prepare model that it does not support."
86 << std::endl;
87 return;
88 }
89 ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus);
90 ASSERT_NE(nullptr, preparedModel->get());
91}
92
93// Primary validation function. This function will take a valid request, apply a
94// mutation to it to invalidate the request, then pass it to interface calls
95// that use the request. Note that the request here is passed by value, and any
96// mutation to the request does not leave this function.
97static void validate(const sp<IPreparedModel>& preparedModel, const std::string& message,
98 Request request, const std::function<void(Request*)>& mutation) {
99 mutation(&request);
100 SCOPED_TRACE(message + " [execute]");
101
102 sp<ExecutionCallback> executionCallback = new ExecutionCallback();
103 ASSERT_NE(nullptr, executionCallback.get());
104 Return<ErrorStatus> executeLaunchStatus = preparedModel->execute(request, executionCallback);
105 ASSERT_TRUE(executeLaunchStatus.isOk());
106 ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeLaunchStatus));
107
108 executionCallback->wait();
109 ErrorStatus executionReturnStatus = executionCallback->getStatus();
110 ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus);
111}
112
113// Delete element from hidl_vec. hidl_vec doesn't support a "remove" operation,
114// so this is efficiently accomplished by moving the element to the end and
115// resizing the hidl_vec to one less.
116template <typename Type>
117static void hidl_vec_removeAt(hidl_vec<Type>* vec, uint32_t index) {
118 if (vec) {
119 std::rotate(vec->begin() + index, vec->begin() + index + 1, vec->end());
120 vec->resize(vec->size() - 1);
121 }
122}
123
124template <typename Type>
125static uint32_t hidl_vec_push_back(hidl_vec<Type>* vec, const Type& value) {
126 // assume vec is valid
127 const uint32_t index = vec->size();
128 vec->resize(index + 1);
129 (*vec)[index] = value;
130 return index;
131}
132
133///////////////////////// REMOVE INPUT ////////////////////////////////////
134
135static void removeInputTest(const sp<IPreparedModel>& preparedModel, const Request& request) {
136 for (size_t input = 0; input < request.inputs.size(); ++input) {
137 const std::string message = "removeInput: removed input " + std::to_string(input);
138 validate(preparedModel, message, request,
139 [input](Request* request) { hidl_vec_removeAt(&request->inputs, input); });
140 }
141}
142
143///////////////////////// REMOVE OUTPUT ////////////////////////////////////
144
145static void removeOutputTest(const sp<IPreparedModel>& preparedModel, const Request& request) {
146 for (size_t output = 0; output < request.outputs.size(); ++output) {
147 const std::string message = "removeOutput: removed Output " + std::to_string(output);
148 validate(preparedModel, message, request,
149 [output](Request* request) { hidl_vec_removeAt(&request->outputs, output); });
150 }
151}
152
153///////////////////////////// ENTRY POINT //////////////////////////////////
154
155std::vector<Request> createRequests(const std::vector<MixedTypedExampleType>& examples) {
156 const uint32_t INPUT = 0;
157 const uint32_t OUTPUT = 1;
158
159 std::vector<Request> requests;
160
161 for (auto& example : examples) {
162 const MixedTyped& inputs = example.first;
163 const MixedTyped& outputs = example.second;
164
165 std::vector<RequestArgument> inputs_info, outputs_info;
166 uint32_t inputSize = 0, outputSize = 0;
167
168 // This function only partially specifies the metadata (vector of RequestArguments).
169 // The contents are copied over below.
170 for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) {
171 if (inputs_info.size() <= static_cast<size_t>(index)) inputs_info.resize(index + 1);
172 RequestArgument arg = {
173 .location = {.poolIndex = INPUT, .offset = 0, .length = static_cast<uint32_t>(s)},
174 .dimensions = {},
175 };
176 RequestArgument arg_empty = {
177 .hasNoValue = true,
178 };
179 inputs_info[index] = s ? arg : arg_empty;
180 inputSize += s;
181 });
182 // Compute offset for inputs 1 and so on
183 {
184 size_t offset = 0;
185 for (auto& i : inputs_info) {
186 if (!i.hasNoValue) i.location.offset = offset;
187 offset += i.location.length;
188 }
189 }
190
191 // Go through all outputs, initialize RequestArgument descriptors
192 for_all(outputs, [&outputs_info, &outputSize](int index, auto, auto s) {
193 if (outputs_info.size() <= static_cast<size_t>(index)) outputs_info.resize(index + 1);
194 RequestArgument arg = {
195 .location = {.poolIndex = OUTPUT, .offset = 0, .length = static_cast<uint32_t>(s)},
196 .dimensions = {},
197 };
198 outputs_info[index] = arg;
199 outputSize += s;
200 });
201 // Compute offset for outputs 1 and so on
202 {
203 size_t offset = 0;
204 for (auto& i : outputs_info) {
205 i.location.offset = offset;
206 offset += i.location.length;
207 }
208 }
209 std::vector<hidl_memory> pools = {nn::allocateSharedMemory(inputSize),
210 nn::allocateSharedMemory(outputSize)};
211 if (pools[INPUT].size() == 0 || pools[OUTPUT].size() == 0) {
212 return {};
213 }
214
215 // map pool
216 sp<IMemory> inputMemory = mapMemory(pools[INPUT]);
217 if (inputMemory == nullptr) {
218 return {};
219 }
220 char* inputPtr = reinterpret_cast<char*>(static_cast<void*>(inputMemory->getPointer()));
221 if (inputPtr == nullptr) {
222 return {};
223 }
224
225 // initialize pool
226 inputMemory->update();
227 for_all(inputs, [&inputs_info, inputPtr](int index, auto p, auto s) {
228 char* begin = (char*)p;
229 char* end = begin + s;
230 // TODO: handle more than one input
231 std::copy(begin, end, inputPtr + inputs_info[index].location.offset);
232 });
233 inputMemory->commit();
234
235 requests.push_back({.inputs = inputs_info, .outputs = outputs_info, .pools = pools});
236 }
237
238 return requests;
239}
240
241void ValidationTest::validateRequests(const V1_1::Model& model,
242 const std::vector<Request>& requests) {
243 // create IPreparedModel
244 sp<IPreparedModel> preparedModel;
245 ASSERT_NO_FATAL_FAILURE(createPreparedModel(device, model, &preparedModel));
246 if (preparedModel == nullptr) {
247 return;
248 }
249
250 // validate each request
251 for (const Request& request : requests) {
252 removeInputTest(preparedModel, request);
253 removeOutputTest(preparedModel, request);
254 }
255}
256
257} // namespace functional
258} // namespace vts
259} // namespace V1_1
260} // namespace neuralnetworks
261} // namespace hardware
262} // namespace android
diff --git a/neuralnetworks/1.1/vts/functional/ValidationTests.cpp b/neuralnetworks/1.1/vts/functional/ValidationTests.cpp
new file mode 100644
index 00000000..1c35ba84
--- /dev/null
+++ b/neuralnetworks/1.1/vts/functional/ValidationTests.cpp
@@ -0,0 +1,50 @@
1/*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "neuralnetworks_hidl_hal_test"
18
19#include "Models.h"
20#include "VtsHalNeuralnetworks.h"
21
22namespace android {
23namespace hardware {
24namespace neuralnetworks {
25namespace V1_1 {
26namespace vts {
27namespace functional {
28
29// forward declarations
30std::vector<Request> createRequests(const std::vector<MixedTypedExample>& examples);
31
32// generate validation tests
33#define VTS_CURRENT_TEST_CASE(TestName) \
34 TEST_F(ValidationTest, TestName) { \
35 const Model model = TestName::createTestModel(); \
36 const std::vector<Request> requests = createRequests(TestName::examples); \
37 validateModel(model); \
38 validateRequests(model, requests); \
39 }
40
41FOR_EACH_TEST_MODEL(VTS_CURRENT_TEST_CASE)
42
43#undef VTS_CURRENT_TEST_CASE
44
45} // namespace functional
46} // namespace vts
47} // namespace V1_1
48} // namespace neuralnetworks
49} // namespace hardware
50} // namespace android
diff --git a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1.cpp b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp
index b1d3be78..62381e67 100644
--- a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1.cpp
+++ b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp
@@ -16,16 +16,7 @@
16 16
17#define LOG_TAG "neuralnetworks_hidl_hal_test" 17#define LOG_TAG "neuralnetworks_hidl_hal_test"
18 18
19#include "VtsHalNeuralnetworksV1_1.h" 19#include "VtsHalNeuralnetworks.h"
20#include "Utils.h"
21
22#include <android-base/logging.h>
23#include <hidlmemory/mapping.h>
24
25using ::android::hardware::hidl_memory;
26using ::android::hidl::allocator::V1_0::IAllocator;
27using ::android::hidl::memory::V1_0::IMemory;
28using ::android::sp;
29 20
30namespace android { 21namespace android {
31namespace hardware { 22namespace hardware {
@@ -34,11 +25,6 @@ namespace V1_1 {
34namespace vts { 25namespace vts {
35namespace functional { 26namespace functional {
36 27
37// allocator helper
38hidl_memory allocateSharedMemory(int64_t size) {
39 return nn::allocateSharedMemory(size);
40}
41
42// A class for test environment setup 28// A class for test environment setup
43NeuralnetworksHidlEnvironment::NeuralnetworksHidlEnvironment() {} 29NeuralnetworksHidlEnvironment::NeuralnetworksHidlEnvironment() {}
44 30
@@ -52,23 +38,49 @@ NeuralnetworksHidlEnvironment* NeuralnetworksHidlEnvironment::getInstance() {
52} 38}
53 39
54void NeuralnetworksHidlEnvironment::registerTestServices() { 40void NeuralnetworksHidlEnvironment::registerTestServices() {
55 registerTestService<V1_1::IDevice>(); 41 registerTestService<IDevice>();
56} 42}
57 43
58// The main test class for NEURALNETWORK HIDL HAL. 44// The main test class for NEURALNETWORK HIDL HAL.
45NeuralnetworksHidlTest::NeuralnetworksHidlTest() {}
46
59NeuralnetworksHidlTest::~NeuralnetworksHidlTest() {} 47NeuralnetworksHidlTest::~NeuralnetworksHidlTest() {}
60 48
61void NeuralnetworksHidlTest::SetUp() { 49void NeuralnetworksHidlTest::SetUp() {
62 device = ::testing::VtsHalHidlTargetTestBase::getService<V1_1::IDevice>( 50 ::testing::VtsHalHidlTargetTestBase::SetUp();
51 device = ::testing::VtsHalHidlTargetTestBase::getService<IDevice>(
63 NeuralnetworksHidlEnvironment::getInstance()); 52 NeuralnetworksHidlEnvironment::getInstance());
64 ASSERT_NE(nullptr, device.get()); 53 ASSERT_NE(nullptr, device.get());
65} 54}
66 55
67void NeuralnetworksHidlTest::TearDown() {} 56void NeuralnetworksHidlTest::TearDown() {
57 device = nullptr;
58 ::testing::VtsHalHidlTargetTestBase::TearDown();
59}
68 60
69} // namespace functional 61} // namespace functional
70} // namespace vts 62} // namespace vts
63
64::std::ostream& operator<<(::std::ostream& os, ErrorStatus errorStatus) {
65 return os << toString(errorStatus);
66}
67
68::std::ostream& operator<<(::std::ostream& os, DeviceStatus deviceStatus) {
69 return os << toString(deviceStatus);
70}
71
71} // namespace V1_1 72} // namespace V1_1
72} // namespace neuralnetworks 73} // namespace neuralnetworks
73} // namespace hardware 74} // namespace hardware
74} // namespace android 75} // namespace android
76
77using android::hardware::neuralnetworks::V1_1::vts::functional::NeuralnetworksHidlEnvironment;
78
79int main(int argc, char** argv) {
80 ::testing::AddGlobalTestEnvironment(NeuralnetworksHidlEnvironment::getInstance());
81 ::testing::InitGoogleTest(&argc, argv);
82 NeuralnetworksHidlEnvironment::getInstance()->init(&argc, argv);
83
84 int status = RUN_ALL_TESTS();
85 return status;
86}
diff --git a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1.h b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.h
index 426246ce..0050e52d 100644
--- a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1.h
+++ b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.h
@@ -17,65 +17,71 @@
17#ifndef VTS_HAL_NEURALNETWORKS_V1_1_H 17#ifndef VTS_HAL_NEURALNETWORKS_V1_1_H
18#define VTS_HAL_NEURALNETWORKS_V1_1_H 18#define VTS_HAL_NEURALNETWORKS_V1_1_H
19 19
20#include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h> 20#include <android/hardware/neuralnetworks/1.0/types.h>
21#include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
22#include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h>
23#include <android/hardware/neuralnetworks/1.1/IDevice.h> 21#include <android/hardware/neuralnetworks/1.1/IDevice.h>
24#include <android/hardware/neuralnetworks/1.1/types.h> 22#include <android/hardware/neuralnetworks/1.1/types.h>
25#include <android/hidl/allocator/1.0/IAllocator.h>
26 23
27#include <VtsHalHidlTargetTestBase.h> 24#include <VtsHalHidlTargetTestBase.h>
28#include <VtsHalHidlTargetTestEnvBase.h> 25#include <VtsHalHidlTargetTestEnvBase.h>
26
27#include <android-base/macros.h>
29#include <gtest/gtest.h> 28#include <gtest/gtest.h>
30#include <string> 29#include <iostream>
30#include <vector>
31 31
32namespace android { 32namespace android {
33namespace hardware { 33namespace hardware {
34namespace neuralnetworks { 34namespace neuralnetworks {
35namespace V1_1 { 35namespace V1_1 {
36
37using V1_0::Request;
38using V1_0::DeviceStatus;
39using V1_0::ErrorStatus;
40
36namespace vts { 41namespace vts {
37namespace functional { 42namespace functional {
38hidl_memory allocateSharedMemory(int64_t size);
39 43
40// A class for test environment setup 44// A class for test environment setup
41class NeuralnetworksHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase { 45class NeuralnetworksHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase {
46 DISALLOW_COPY_AND_ASSIGN(NeuralnetworksHidlEnvironment);
42 NeuralnetworksHidlEnvironment(); 47 NeuralnetworksHidlEnvironment();
43 NeuralnetworksHidlEnvironment(const NeuralnetworksHidlEnvironment&) = delete; 48 ~NeuralnetworksHidlEnvironment() override;
44 NeuralnetworksHidlEnvironment(NeuralnetworksHidlEnvironment&&) = delete;
45 NeuralnetworksHidlEnvironment& operator=(const NeuralnetworksHidlEnvironment&) = delete;
46 NeuralnetworksHidlEnvironment& operator=(NeuralnetworksHidlEnvironment&&) = delete;
47 49
48 public: 50 public:
49 ~NeuralnetworksHidlEnvironment() override;
50 static NeuralnetworksHidlEnvironment* getInstance(); 51 static NeuralnetworksHidlEnvironment* getInstance();
51 void registerTestServices() override; 52 void registerTestServices() override;
52}; 53};
53 54
54// The main test class for NEURALNETWORKS HIDL HAL. 55// The main test class for NEURALNETWORKS HIDL HAL.
55class NeuralnetworksHidlTest : public ::testing::VtsHalHidlTargetTestBase { 56class NeuralnetworksHidlTest : public ::testing::VtsHalHidlTargetTestBase {
57 DISALLOW_COPY_AND_ASSIGN(NeuralnetworksHidlTest);
58
56 public: 59 public:
60 NeuralnetworksHidlTest();
57 ~NeuralnetworksHidlTest() override; 61 ~NeuralnetworksHidlTest() override;
58 void SetUp() override; 62 void SetUp() override;
59 void TearDown() override; 63 void TearDown() override;
60 64
61 sp<V1_1::IDevice> device; 65 protected:
66 sp<IDevice> device;
62}; 67};
68
69// Tag for the validation tests
70class ValidationTest : public NeuralnetworksHidlTest {
71 protected:
72 void validateModel(const Model& model);
73 void validateRequests(const Model& model, const std::vector<Request>& request);
74};
75
76// Tag for the generated tests
77class GeneratedTest : public NeuralnetworksHidlTest {};
78
63} // namespace functional 79} // namespace functional
64} // namespace vts 80} // namespace vts
65 81
66// pretty-print values for error messages 82// pretty-print values for error messages
67 83::std::ostream& operator<<(::std::ostream& os, ErrorStatus errorStatus);
68template <typename CharT, typename Traits> 84::std::ostream& operator<<(::std::ostream& os, DeviceStatus deviceStatus);
69::std::basic_ostream<CharT, Traits>& operator<<(::std::basic_ostream<CharT, Traits>& os,
70 V1_0::ErrorStatus errorStatus) {
71 return os << toString(errorStatus);
72}
73
74template <typename CharT, typename Traits>
75::std::basic_ostream<CharT, Traits>& operator<<(::std::basic_ostream<CharT, Traits>& os,
76 V1_0::DeviceStatus deviceStatus) {
77 return os << toString(deviceStatus);
78}
79 85
80} // namespace V1_1 86} // namespace V1_1
81} // namespace neuralnetworks 87} // namespace neuralnetworks
diff --git a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1BasicTest.cpp b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1BasicTest.cpp
deleted file mode 100644
index 10591dcb..00000000
--- a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworksV1_1BasicTest.cpp
+++ /dev/null
@@ -1,468 +0,0 @@
1/*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "neuralnetworks_hidl_hal_test"
18
19#include "VtsHalNeuralnetworksV1_1.h"
20
21#include "Callbacks.h"
22#include "Models.h"
23#include "TestHarness.h"
24
25#include <android-base/logging.h>
26#include <android/hardware/neuralnetworks/1.1/IDevice.h>
27#include <android/hardware/neuralnetworks/1.1/types.h>
28#include <android/hidl/memory/1.0/IMemory.h>
29#include <hidlmemory/mapping.h>
30
31using ::android::hardware::neuralnetworks::V1_0::IPreparedModel;
32using ::android::hardware::neuralnetworks::V1_0::DeviceStatus;
33using ::android::hardware::neuralnetworks::V1_0::ErrorStatus;
34using ::android::hardware::neuralnetworks::V1_0::FusedActivationFunc;
35using ::android::hardware::neuralnetworks::V1_0::Operand;
36using ::android::hardware::neuralnetworks::V1_0::OperandLifeTime;
37using ::android::hardware::neuralnetworks::V1_0::OperandType;
38using ::android::hardware::neuralnetworks::V1_0::Request;
39using ::android::hardware::neuralnetworks::V1_1::Capabilities;
40using ::android::hardware::neuralnetworks::V1_1::IDevice;
41using ::android::hardware::neuralnetworks::V1_1::Model;
42using ::android::hardware::neuralnetworks::V1_1::Operation;
43using ::android::hardware::neuralnetworks::V1_1::OperationType;
44using ::android::hardware::Return;
45using ::android::hardware::Void;
46using ::android::hardware::hidl_memory;
47using ::android::hardware::hidl_string;
48using ::android::hardware::hidl_vec;
49using ::android::hidl::allocator::V1_0::IAllocator;
50using ::android::hidl::memory::V1_0::IMemory;
51using ::android::sp;
52
53namespace android {
54namespace hardware {
55namespace neuralnetworks {
56namespace V1_1 {
57namespace vts {
58namespace functional {
59using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
60using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
61
62static void doPrepareModelShortcut(const sp<IDevice>& device, sp<IPreparedModel>* preparedModel) {
63 ASSERT_NE(nullptr, preparedModel);
64 Model model = createValidTestModel_1_1();
65
66 // see if service can handle model
67 bool fullySupportsModel = false;
68 Return<void> supportedOpsLaunchStatus = device->getSupportedOperations_1_1(
69 model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
70 ASSERT_EQ(ErrorStatus::NONE, status);
71 ASSERT_NE(0ul, supported.size());
72 fullySupportsModel =
73 std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; });
74 });
75 ASSERT_TRUE(supportedOpsLaunchStatus.isOk());
76
77 // launch prepare model
78 sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
79 ASSERT_NE(nullptr, preparedModelCallback.get());
80 Return<ErrorStatus> prepareLaunchStatus =
81 device->prepareModel_1_1(model, preparedModelCallback);
82 ASSERT_TRUE(prepareLaunchStatus.isOk());
83 ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
84
85 // retrieve prepared model
86 preparedModelCallback->wait();
87 ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
88 *preparedModel = preparedModelCallback->getPreparedModel();
89
90 // The getSupportedOperations call returns a list of operations that are
91 // guaranteed not to fail if prepareModel is called, and
92 // 'fullySupportsModel' is true i.f.f. the entire model is guaranteed.
93 // If a driver has any doubt that it can prepare an operation, it must
94 // return false. So here, if a driver isn't sure if it can support an
95 // operation, but reports that it successfully prepared the model, the test
96 // can continue.
97 if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
98 ASSERT_EQ(nullptr, preparedModel->get());
99 LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
100 "prepare model that it does not support.";
101 std::cout << "[ ] Early termination of test because vendor service cannot "
102 "prepare model that it does not support."
103 << std::endl;
104 return;
105 }
106 ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus);
107 ASSERT_NE(nullptr, preparedModel->get());
108}
109
110// create device test
111TEST_F(NeuralnetworksHidlTest, CreateDevice) {}
112
113// status test
114TEST_F(NeuralnetworksHidlTest, StatusTest) {
115 Return<DeviceStatus> status = device->getStatus();
116 ASSERT_TRUE(status.isOk());
117 EXPECT_EQ(DeviceStatus::AVAILABLE, static_cast<DeviceStatus>(status));
118}
119
120// initialization
121TEST_F(NeuralnetworksHidlTest, GetCapabilitiesTest) {
122 Return<void> ret =
123 device->getCapabilities_1_1([](ErrorStatus status, const Capabilities& capabilities) {
124 EXPECT_EQ(ErrorStatus::NONE, status);
125 EXPECT_LT(0.0f, capabilities.float32Performance.execTime);
126 EXPECT_LT(0.0f, capabilities.float32Performance.powerUsage);
127 EXPECT_LT(0.0f, capabilities.quantized8Performance.execTime);
128 EXPECT_LT(0.0f, capabilities.quantized8Performance.powerUsage);
129 EXPECT_LT(0.0f, capabilities.relaxedFloat32toFloat16Performance.execTime);
130 EXPECT_LT(0.0f, capabilities.relaxedFloat32toFloat16Performance.powerUsage);
131 });
132 EXPECT_TRUE(ret.isOk());
133}
134
135// supported operations positive test
136TEST_F(NeuralnetworksHidlTest, SupportedOperationsPositiveTest) {
137 Model model = createValidTestModel_1_1();
138 Return<void> ret = device->getSupportedOperations_1_1(
139 model, [&](ErrorStatus status, const hidl_vec<bool>& supported) {
140 EXPECT_EQ(ErrorStatus::NONE, status);
141 EXPECT_EQ(model.operations.size(), supported.size());
142 });
143 EXPECT_TRUE(ret.isOk());
144}
145
146// supported operations negative test 1
147TEST_F(NeuralnetworksHidlTest, SupportedOperationsNegativeTest1) {
148 Model model = createInvalidTestModel1_1_1();
149 Return<void> ret = device->getSupportedOperations_1_1(
150 model, [&](ErrorStatus status, const hidl_vec<bool>& supported) {
151 EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
152 (void)supported;
153 });
154 EXPECT_TRUE(ret.isOk());
155}
156
157// supported operations negative test 2
158TEST_F(NeuralnetworksHidlTest, SupportedOperationsNegativeTest2) {
159 Model model = createInvalidTestModel2_1_1();
160 Return<void> ret = device->getSupportedOperations_1_1(
161 model, [&](ErrorStatus status, const hidl_vec<bool>& supported) {
162 EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
163 (void)supported;
164 });
165 EXPECT_TRUE(ret.isOk());
166}
167
168// prepare simple model positive test
169TEST_F(NeuralnetworksHidlTest, SimplePrepareModelPositiveTest) {
170 sp<IPreparedModel> preparedModel;
171 doPrepareModelShortcut(device, &preparedModel);
172}
173
174// prepare simple model negative test 1
175TEST_F(NeuralnetworksHidlTest, SimplePrepareModelNegativeTest1) {
176 Model model = createInvalidTestModel1_1_1();
177 sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
178 ASSERT_NE(nullptr, preparedModelCallback.get());
179 Return<ErrorStatus> prepareLaunchStatus =
180 device->prepareModel_1_1(model, preparedModelCallback);
181 ASSERT_TRUE(prepareLaunchStatus.isOk());
182 EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(prepareLaunchStatus));
183
184 preparedModelCallback->wait();
185 ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
186 EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, prepareReturnStatus);
187 sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
188 EXPECT_EQ(nullptr, preparedModel.get());
189}
190
191// prepare simple model negative test 2
192TEST_F(NeuralnetworksHidlTest, SimplePrepareModelNegativeTest2) {
193 Model model = createInvalidTestModel2_1_1();
194 sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
195 ASSERT_NE(nullptr, preparedModelCallback.get());
196 Return<ErrorStatus> prepareLaunchStatus =
197 device->prepareModel_1_1(model, preparedModelCallback);
198 ASSERT_TRUE(prepareLaunchStatus.isOk());
199 EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(prepareLaunchStatus));
200
201 preparedModelCallback->wait();
202 ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
203 EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, prepareReturnStatus);
204 sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
205 EXPECT_EQ(nullptr, preparedModel.get());
206}
207
208// execute simple graph positive test
209TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphPositiveTest) {
210 std::vector<float> outputData = {-1.0f, -1.0f, -1.0f, -1.0f};
211 std::vector<float> expectedData = {6.0f, 8.0f, 10.0f, 12.0f};
212 const uint32_t OUTPUT = 1;
213
214 sp<IPreparedModel> preparedModel;
215 ASSERT_NO_FATAL_FAILURE(doPrepareModelShortcut(device, &preparedModel));
216 if (preparedModel == nullptr) {
217 return;
218 }
219 Request request = createValidTestRequest();
220
221 auto postWork = [&] {
222 sp<IMemory> outputMemory = mapMemory(request.pools[OUTPUT]);
223 if (outputMemory == nullptr) {
224 return false;
225 }
226 float* outputPtr = reinterpret_cast<float*>(static_cast<void*>(outputMemory->getPointer()));
227 if (outputPtr == nullptr) {
228 return false;
229 }
230 outputMemory->read();
231 std::copy(outputPtr, outputPtr + outputData.size(), outputData.begin());
232 outputMemory->commit();
233 return true;
234 };
235
236 sp<ExecutionCallback> executionCallback = new ExecutionCallback();
237 ASSERT_NE(nullptr, executionCallback.get());
238 executionCallback->on_finish(postWork);
239 Return<ErrorStatus> executeLaunchStatus = preparedModel->execute(request, executionCallback);
240 ASSERT_TRUE(executeLaunchStatus.isOk());
241 EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executeLaunchStatus));
242
243 executionCallback->wait();
244 ErrorStatus executionReturnStatus = executionCallback->getStatus();
245 EXPECT_EQ(ErrorStatus::NONE, executionReturnStatus);
246 EXPECT_EQ(expectedData, outputData);
247}
248
249// execute simple graph negative test 1
250TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest1) {
251 sp<IPreparedModel> preparedModel;
252 ASSERT_NO_FATAL_FAILURE(doPrepareModelShortcut(device, &preparedModel));
253 if (preparedModel == nullptr) {
254 return;
255 }
256 Request request = createInvalidTestRequest1();
257
258 sp<ExecutionCallback> executionCallback = new ExecutionCallback();
259 ASSERT_NE(nullptr, executionCallback.get());
260 Return<ErrorStatus> executeLaunchStatus = preparedModel->execute(request, executionCallback);
261 ASSERT_TRUE(executeLaunchStatus.isOk());
262 EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeLaunchStatus));
263
264 executionCallback->wait();
265 ErrorStatus executionReturnStatus = executionCallback->getStatus();
266 EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus);
267}
268
269// execute simple graph negative test 2
270TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest2) {
271 sp<IPreparedModel> preparedModel;
272 ASSERT_NO_FATAL_FAILURE(doPrepareModelShortcut(device, &preparedModel));
273 if (preparedModel == nullptr) {
274 return;
275 }
276 Request request = createInvalidTestRequest2();
277
278 sp<ExecutionCallback> executionCallback = new ExecutionCallback();
279 ASSERT_NE(nullptr, executionCallback.get());
280 Return<ErrorStatus> executeLaunchStatus = preparedModel->execute(request, executionCallback);
281 ASSERT_TRUE(executeLaunchStatus.isOk());
282 EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeLaunchStatus));
283
284 executionCallback->wait();
285 ErrorStatus executionReturnStatus = executionCallback->getStatus();
286 EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus);
287}
288
289class NeuralnetworksInputsOutputsTest
290 : public NeuralnetworksHidlTest,
291 public ::testing::WithParamInterface<std::tuple<bool, bool>> {
292 protected:
293 virtual void SetUp() { NeuralnetworksHidlTest::SetUp(); }
294 virtual void TearDown() { NeuralnetworksHidlTest::TearDown(); }
295 V1_1::Model createModel(const std::vector<uint32_t>& inputs,
296 const std::vector<uint32_t>& outputs) {
297 // We set up the operands as floating-point with no designated
298 // model inputs and outputs, and then patch type and lifetime
299 // later on in this function.
300
301 std::vector<Operand> operands = {
302 {
303 .type = OperandType::TENSOR_FLOAT32,
304 .dimensions = {1},
305 .numberOfConsumers = 1,
306 .scale = 0.0f,
307 .zeroPoint = 0,
308 .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
309 .location = {.poolIndex = 0, .offset = 0, .length = 0},
310 },
311 {
312 .type = OperandType::TENSOR_FLOAT32,
313 .dimensions = {1},
314 .numberOfConsumers = 1,
315 .scale = 0.0f,
316 .zeroPoint = 0,
317 .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
318 .location = {.poolIndex = 0, .offset = 0, .length = 0},
319 },
320 {
321 .type = OperandType::INT32,
322 .dimensions = {},
323 .numberOfConsumers = 1,
324 .scale = 0.0f,
325 .zeroPoint = 0,
326 .lifetime = OperandLifeTime::CONSTANT_COPY,
327 .location = {.poolIndex = 0, .offset = 0, .length = sizeof(int32_t)},
328 },
329 {
330 .type = OperandType::TENSOR_FLOAT32,
331 .dimensions = {1},
332 .numberOfConsumers = 0,
333 .scale = 0.0f,
334 .zeroPoint = 0,
335 .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
336 .location = {.poolIndex = 0, .offset = 0, .length = 0},
337 },
338 };
339
340 const std::vector<Operation> operations = {{
341 .type = OperationType::ADD, .inputs = {0, 1, 2}, .outputs = {3},
342 }};
343
344 std::vector<uint8_t> operandValues;
345 int32_t activation[1] = {static_cast<int32_t>(FusedActivationFunc::NONE)};
346 operandValues.insert(operandValues.end(), reinterpret_cast<const uint8_t*>(&activation[0]),
347 reinterpret_cast<const uint8_t*>(&activation[1]));
348
349 if (kQuantized) {
350 for (auto& operand : operands) {
351 if (operand.type == OperandType::TENSOR_FLOAT32) {
352 operand.type = OperandType::TENSOR_QUANT8_ASYMM;
353 operand.scale = 1.0f;
354 operand.zeroPoint = 0;
355 }
356 }
357 }
358
359 auto patchLifetime = [&operands](const std::vector<uint32_t>& operandIndexes,
360 OperandLifeTime lifetime) {
361 for (uint32_t index : operandIndexes) {
362 operands[index].lifetime = lifetime;
363 }
364 };
365 if (kInputHasPrecedence) {
366 patchLifetime(outputs, OperandLifeTime::MODEL_OUTPUT);
367 patchLifetime(inputs, OperandLifeTime::MODEL_INPUT);
368 } else {
369 patchLifetime(inputs, OperandLifeTime::MODEL_INPUT);
370 patchLifetime(outputs, OperandLifeTime::MODEL_OUTPUT);
371 }
372
373 return {
374 .operands = operands,
375 .operations = operations,
376 .inputIndexes = inputs,
377 .outputIndexes = outputs,
378 .operandValues = operandValues,
379 .pools = {},
380 };
381 }
382 void check(const std::string& name,
383 bool expectation, // true = success
384 const std::vector<uint32_t>& inputs, const std::vector<uint32_t>& outputs) {
385 SCOPED_TRACE(name + " (HAL calls should " + (expectation ? "succeed" : "fail") + ", " +
386 (kInputHasPrecedence ? "input" : "output") + " precedence, " +
387 (kQuantized ? "quantized" : "float"));
388
389 V1_1::Model model = createModel(inputs, outputs);
390
391 // ensure that getSupportedOperations_1_1() checks model validity
392 ErrorStatus supportedOpsErrorStatus = ErrorStatus::GENERAL_FAILURE;
393 Return<void> supportedOpsReturn = device->getSupportedOperations_1_1(
394 model, [&model, &supportedOpsErrorStatus](ErrorStatus status,
395 const hidl_vec<bool>& supported) {
396 supportedOpsErrorStatus = status;
397 if (status == ErrorStatus::NONE) {
398 ASSERT_EQ(supported.size(), model.operations.size());
399 }
400 });
401 ASSERT_TRUE(supportedOpsReturn.isOk());
402 ASSERT_EQ(supportedOpsErrorStatus,
403 (expectation ? ErrorStatus::NONE : ErrorStatus::INVALID_ARGUMENT));
404
405 // ensure that prepareModel_1_1() checks model validity
406 sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback;
407 ASSERT_NE(preparedModelCallback.get(), nullptr);
408 Return<ErrorStatus> prepareLaunchReturn =
409 device->prepareModel_1_1(model, preparedModelCallback);
410 ASSERT_TRUE(prepareLaunchReturn.isOk());
411 ASSERT_TRUE(prepareLaunchReturn == ErrorStatus::NONE ||
412 prepareLaunchReturn == ErrorStatus::INVALID_ARGUMENT);
413 bool preparationOk = (prepareLaunchReturn == ErrorStatus::NONE);
414 if (preparationOk) {
415 preparedModelCallback->wait();
416 preparationOk = (preparedModelCallback->getStatus() == ErrorStatus::NONE);
417 }
418
419 if (preparationOk) {
420 ASSERT_TRUE(expectation);
421 } else {
422 // Preparation can fail for reasons other than an invalid model --
423 // for example, perhaps not all operations are supported, or perhaps
424 // the device hit some kind of capacity limit.
425 bool invalid = prepareLaunchReturn == ErrorStatus::INVALID_ARGUMENT ||
426 preparedModelCallback->getStatus() == ErrorStatus::INVALID_ARGUMENT;
427 ASSERT_NE(expectation, invalid);
428 }
429 }
430
431 // Indicates whether an operand that appears in both the inputs
432 // and outputs vector should have lifetime appropriate for input
433 // rather than for output.
434 const bool kInputHasPrecedence = std::get<0>(GetParam());
435
436 // Indicates whether we should test TENSOR_QUANT8_ASYMM rather
437 // than TENSOR_FLOAT32.
438 const bool kQuantized = std::get<1>(GetParam());
439};
440
441TEST_P(NeuralnetworksInputsOutputsTest, Validate) {
442 check("Ok", true, {0, 1}, {3});
443 check("InputIsOutput", false, {0, 1}, {3, 0});
444 check("OutputIsInput", false, {0, 1, 3}, {3});
445 check("DuplicateInputs", false, {0, 1, 0}, {3});
446 check("DuplicateOutputs", false, {0, 1}, {3, 3});
447}
448
449INSTANTIATE_TEST_CASE_P(Flavor, NeuralnetworksInputsOutputsTest,
450 ::testing::Combine(::testing::Bool(), ::testing::Bool()));
451
452} // namespace functional
453} // namespace vts
454} // namespace V1_1
455} // namespace neuralnetworks
456} // namespace hardware
457} // namespace android
458
459using android::hardware::neuralnetworks::V1_1::vts::functional::NeuralnetworksHidlEnvironment;
460
461int main(int argc, char** argv) {
462 ::testing::AddGlobalTestEnvironment(NeuralnetworksHidlEnvironment::getInstance());
463 ::testing::InitGoogleTest(&argc, argv);
464 NeuralnetworksHidlEnvironment::getInstance()->init(&argc, argv);
465
466 int status = RUN_ALL_TESTS();
467 return status;
468}