summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
Diffstat (limited to 'neuralnetworks/1.0')
-rw-r--r--neuralnetworks/1.0/Android.bp70
-rw-r--r--neuralnetworks/1.0/IDevice.hal31
-rw-r--r--neuralnetworks/1.0/IPreparedModel.hal25
-rw-r--r--neuralnetworks/1.0/types.hal174
-rw-r--r--neuralnetworks/1.0/vts/functional/Android.bp37
-rw-r--r--neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp245
-rw-r--r--neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.h82
7 files changed, 664 insertions, 0 deletions
diff --git a/neuralnetworks/1.0/Android.bp b/neuralnetworks/1.0/Android.bp
new file mode 100644
index 00000000..1356d33d
--- /dev/null
+++ b/neuralnetworks/1.0/Android.bp
@@ -0,0 +1,70 @@
1// This file is autogenerated by hidl-gen. Do not edit manually.
2
3filegroup {
4 name: "android.hardware.neuralnetworks@1.0_hal",
5 srcs: [
6 "types.hal",
7 "IDevice.hal",
8 "IPreparedModel.hal",
9 ],
10}
11
12genrule {
13 name: "android.hardware.neuralnetworks@1.0_genc++",
14 tools: ["hidl-gen"],
15 cmd: "$(location hidl-gen) -o $(genDir) -Lc++-sources -randroid.hardware:hardware/interfaces -randroid.hidl:system/libhidl/transport android.hardware.neuralnetworks@1.0",
16 srcs: [
17 ":android.hardware.neuralnetworks@1.0_hal",
18 ],
19 out: [
20 "android/hardware/neuralnetworks/1.0/types.cpp",
21 "android/hardware/neuralnetworks/1.0/DeviceAll.cpp",
22 "android/hardware/neuralnetworks/1.0/PreparedModelAll.cpp",
23 ],
24}
25
26genrule {
27 name: "android.hardware.neuralnetworks@1.0_genc++_headers",
28 tools: ["hidl-gen"],
29 cmd: "$(location hidl-gen) -o $(genDir) -Lc++-headers -randroid.hardware:hardware/interfaces -randroid.hidl:system/libhidl/transport android.hardware.neuralnetworks@1.0",
30 srcs: [
31 ":android.hardware.neuralnetworks@1.0_hal",
32 ],
33 out: [
34 "android/hardware/neuralnetworks/1.0/types.h",
35 "android/hardware/neuralnetworks/1.0/hwtypes.h",
36 "android/hardware/neuralnetworks/1.0/IDevice.h",
37 "android/hardware/neuralnetworks/1.0/IHwDevice.h",
38 "android/hardware/neuralnetworks/1.0/BnHwDevice.h",
39 "android/hardware/neuralnetworks/1.0/BpHwDevice.h",
40 "android/hardware/neuralnetworks/1.0/BsDevice.h",
41 "android/hardware/neuralnetworks/1.0/IPreparedModel.h",
42 "android/hardware/neuralnetworks/1.0/IHwPreparedModel.h",
43 "android/hardware/neuralnetworks/1.0/BnHwPreparedModel.h",
44 "android/hardware/neuralnetworks/1.0/BpHwPreparedModel.h",
45 "android/hardware/neuralnetworks/1.0/BsPreparedModel.h",
46 ],
47}
48
49cc_library_shared {
50 name: "android.hardware.neuralnetworks@1.0",
51 defaults: ["hidl-module-defaults"],
52 generated_sources: ["android.hardware.neuralnetworks@1.0_genc++"],
53 generated_headers: ["android.hardware.neuralnetworks@1.0_genc++_headers"],
54 export_generated_headers: ["android.hardware.neuralnetworks@1.0_genc++_headers"],
55 vendor_available: true,
56 shared_libs: [
57 "libhidlbase",
58 "libhidltransport",
59 "libhwbinder",
60 "liblog",
61 "libutils",
62 "libcutils",
63 ],
64 export_shared_lib_headers: [
65 "libhidlbase",
66 "libhidltransport",
67 "libhwbinder",
68 "libutils",
69 ],
70}
diff --git a/neuralnetworks/1.0/IDevice.hal b/neuralnetworks/1.0/IDevice.hal
new file mode 100644
index 00000000..b826b237
--- /dev/null
+++ b/neuralnetworks/1.0/IDevice.hal
@@ -0,0 +1,31 @@
1/*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17/* This HAL is a work in progress */
18
19package android.hardware.neuralnetworks@1.0;
20
21import IPreparedModel;
22
23interface IDevice {
24 initialize() generates(Capabilities capabilities);
25
26 getSupportedSubgraph(Model model) generates(vec<bool> supported);
27
28 prepareModel(Model model) generates(IPreparedModel preparedModel);
29
30 getStatus() generates(DeviceStatus status);
31};
diff --git a/neuralnetworks/1.0/IPreparedModel.hal b/neuralnetworks/1.0/IPreparedModel.hal
new file mode 100644
index 00000000..566d6ace
--- /dev/null
+++ b/neuralnetworks/1.0/IPreparedModel.hal
@@ -0,0 +1,25 @@
1/*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17/* This HAL is a work in progress */
18
19package android.hardware.neuralnetworks@1.0;
20
21interface IPreparedModel {
22 // TODO: The execution is synchronous. Change that to have a callback on completion.
23 // Multiple threads can call this execute function concurrently.
24 execute(Request request) generates(bool success);
25};
diff --git a/neuralnetworks/1.0/types.hal b/neuralnetworks/1.0/types.hal
new file mode 100644
index 00000000..ccc17f14
--- /dev/null
+++ b/neuralnetworks/1.0/types.hal
@@ -0,0 +1,174 @@
1/*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17/* This HAL is a work in progress */
18
19package android.hardware.neuralnetworks@1.0;
20
21// The types an operand can have.
22// These values are the same as found in the NeuralNetworks.h file.
23// When modifying, be sure to update HAL_NUM_OPERAND_TYPES in HalIntefaces.h.
24enum OperandType : uint32_t {
25 FLOAT16 = 0,
26 FLOAT32 = 1,
27 INT8 = 2,
28 UINT8 = 3,
29 INT16 = 4,
30 UINT16 = 5,
31 INT32 = 6,
32 UINT32 = 7,
33 TENSOR_FLOAT16 = 8,
34 TENSOR_FLOAT32 = 9,
35 TENSOR_SYMMETRICAL_QUANT8 = 10,
36};
37
38// The type of operations. Unlike the operation types found in
39// NeuralNetworks.h file, these specify the data type they operate on.
40// This is done to simplify the work of drivers.
41// TODO: Currently they are the same. Add a conversion when finalizing the model.
42// When modifying, be sure to update HAL_NUM_OPERATION_TYPES in HalIntefaces.h.
43enum OperationType : uint32_t {
44 AVERAGE_POOL_FLOAT32 = 0,
45 CONCATENATION_FLOAT32 = 1,
46 CONV_FLOAT32 = 2,
47 DEPTHWISE_CONV_FLOAT32 = 3,
48 MAX_POOL_FLOAT32 = 4,
49 L2_POOL_FLOAT32 = 5,
50 DEPTH_TO_SPACE_FLOAT32 = 6,
51 SPACE_TO_DEPTH_FLOAT32 = 7,
52 LOCAL_RESPONSE_NORMALIZATION_FLOAT32 = 8,
53 SOFTMAX_FLOAT32 = 9,
54 RESHAPE_FLOAT32 = 10,
55 SPLIT_FLOAT32 = 11,
56 FAKE_QUANT_FLOAT32 = 12,
57 ADD_FLOAT32 = 13,
58 FULLY_CONNECTED_FLOAT32 = 14,
59 CAST_FLOAT32 = 15,
60 MUL_FLOAT32 = 16,
61 L2_NORMALIZATION_FLOAT32 = 17,
62 LOGISTIC_FLOAT32 = 18,
63 RELU_FLOAT32 = 19,
64 RELU6_FLOAT32 = 20,
65 RELU1_FLOAT32 = 21,
66 TANH_FLOAT32 = 22,
67 DEQUANTIZE_FLOAT32 = 23,
68 FLOOR_FLOAT32 = 24,
69 GATHER_FLOAT32 = 25,
70 RESIZE_BILINEAR_FLOAT32 = 26,
71 LSH_PROJECTION_FLOAT32 = 27,
72 LSTM_FLOAT32 = 28,
73 SVDF_FLOAT32 = 29,
74 RNN_FLOAT32 = 30,
75 N_GRAM_FLOAT32 = 31,
76 LOOKUP_FLOAT32 = 32,
77};
78
79// Two special values that can be used instead of a regular poolIndex.
80enum LocationValues : uint32_t {
81 // The location will be specified at runtime. It's either a temporary
82 // variable, an input, or an output.
83 LOCATION_AT_RUN_TIME = 0xFFFFFFFF,
84 // The operand's value is stored in the
85 // TODO: Only for old
86 LOCATION_SAME_BLOCK = 0xFFFFFFFE
87};
88
89// Status of a device.
90enum DeviceStatus : uint32_t {
91 AVAILABLE,
92 BUSY,
93 OFFLINE,
94 UNKNOWN // Do we need this?
95};
96
97// For the reference workload
98// Used by a driver to report its performance characteristics.
99// TODO revisit the data types and scales.
100struct PerformanceInfo {
101 float execTime; // in nanoseconds
102 float powerUsage; // in picoJoules
103};
104
105// The capabilities of a driver.
106struct Capabilities {
107 vec<OperationType> supportedOperationTypes;
108 // TODO Do the same for baseline model IDs
109 bool cachesCompilation;
110 // TODO revisit the data types and scales.
111 float bootupTime; // in nanoseconds
112 PerformanceInfo float16Performance;
113 PerformanceInfo float32Performance;
114 PerformanceInfo quantized8Performance;
115};
116
117// Describes the location of a data object.
118struct DataLocation {
119 // The index of the memory pool where this location is found.
120 // Two special values can also be used. See the LOCATION_* constants above.
121 uint32_t poolIndex;
122 // Offset in bytes from the start of the pool.
123 uint32_t offset;
124 // The length of the data, in bytes.
125 uint32_t length;
126};
127
128struct Operand {
129 OperandType type;
130 vec<uint32_t> dimensions;
131
132 // The number of operations that uses this operand as input.
133 // TODO It would be nice to track the actual consumers, e.g. vec<uint32_t> consumers;
134 uint32_t numberOfConsumers;
135
136 float scale;
137 int32_t zeroPoint;
138
139 // Where to find the data for this operand.
140 DataLocation location;
141};
142
143// Describes one operation of the graph.
144struct Operation {
145 // The type of operation.
146 OperationType type;
147 // Describes the table that contains the indexes of the inputs of the
148 // operation. The offset is the index in the operandIndexes table.
149 vec<uint32_t> inputs;
150 // Describes the table that contains the indexes of the outputs of the
151 // operation. The offset is the index in the operandIndexes table.
152 vec<uint32_t> outputs;
153};
154
155struct InputOutputInfo {
156 DataLocation location;
157 // If dimensions.size() > 0, we have updated dimensions.
158 vec<uint32_t> dimensions;
159};
160
161struct Model {
162 vec<Operand> operands;
163 vec<Operation> operations;
164 vec<uint32_t> inputIndexes;
165 vec<uint32_t> outputIndexes;
166 vec<uint8_t> operandValues;
167 vec<memory> pools;
168};
169
170struct Request {
171 vec<InputOutputInfo> inputs;
172 vec<InputOutputInfo> outputs;
173 vec<memory> pools;
174};
diff --git a/neuralnetworks/1.0/vts/functional/Android.bp b/neuralnetworks/1.0/vts/functional/Android.bp
new file mode 100644
index 00000000..96eb4cb5
--- /dev/null
+++ b/neuralnetworks/1.0/vts/functional/Android.bp
@@ -0,0 +1,37 @@
1//
2// Copyright (C) 2017 The Android Open Source Project
3//
4// Licensed under the Apache License, Version 2.0 (the "License");
5// you may not use this file except in compliance with the License.
6// You may obtain a copy of the License at
7//
8// http://www.apache.org/licenses/LICENSE-2.0
9//
10// Unless required by applicable law or agreed to in writing, software
11// distributed under the License is distributed on an "AS IS" BASIS,
12// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13// See the License for the specific language governing permissions and
14// limitations under the License.
15//
16
17cc_test {
18 name: "VtsHalNeuralnetworksV1_0TargetTest",
19 srcs: ["VtsHalNeuralnetworksV1_0TargetTest.cpp"],
20 defaults: ["hidl_defaults"],
21 shared_libs: [
22 "libbase",
23 "libhidlbase",
24 "libhidlmemory",
25 "libhidltransport",
26 "liblog",
27 "libutils",
28 "android.hardware.neuralnetworks@1.0",
29 "android.hidl.allocator@1.0",
30 "android.hidl.memory@1.0",
31 ],
32 static_libs: ["VtsHalHidlTargetTestBase"],
33 cflags: [
34 "-O0",
35 "-g",
36 ],
37}
diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp
new file mode 100644
index 00000000..9fa694df
--- /dev/null
+++ b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp
@@ -0,0 +1,245 @@
1/*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "neuralnetworks_hidl_hal_test"
18
19#include "VtsHalNeuralnetworksV1_0TargetTest.h"
20#include <android-base/logging.h>
21#include <android/hidl/memory/1.0/IMemory.h>
22#include <hidlmemory/mapping.h>
23#include <string>
24
25namespace android {
26namespace hardware {
27namespace neuralnetworks {
28namespace V1_0 {
29namespace vts {
30namespace functional {
31
32// A class for test environment setup
33NeuralnetworksHidlEnvironment::NeuralnetworksHidlEnvironment() {}
34
35NeuralnetworksHidlEnvironment* NeuralnetworksHidlEnvironment::getInstance() {
36 // This has to return a "new" object because it is freed inside
37 // ::testing::AddGlobalTestEnvironment when the gtest is being torn down
38 static NeuralnetworksHidlEnvironment* instance = new NeuralnetworksHidlEnvironment();
39 return instance;
40}
41
42void NeuralnetworksHidlEnvironment::registerTestServices() {
43 registerTestService("android.hardware.neuralnetworks", "1.0", "IDevice");
44}
45
46// The main test class for NEURALNETWORK HIDL HAL.
47void NeuralnetworksHidlTest::SetUp() {
48 std::string instance =
49 NeuralnetworksHidlEnvironment::getInstance()->getServiceName(IDevice::descriptor);
50 LOG(INFO) << "running vts test with instance: " << instance;
51 device = ::testing::VtsHalHidlTargetTestBase::getService<IDevice>(instance);
52 ASSERT_NE(nullptr, device.get());
53}
54
55void NeuralnetworksHidlTest::TearDown() {}
56
57// create device test
58TEST_F(NeuralnetworksHidlTest, CreateDevice) {}
59
60// status test
61TEST_F(NeuralnetworksHidlTest, StatusTest) {
62 DeviceStatus status = device->getStatus();
63 EXPECT_EQ(DeviceStatus::AVAILABLE, status);
64}
65
66// initialization
67TEST_F(NeuralnetworksHidlTest, InitializeTest) {
68 Return<void> ret = device->initialize([](const Capabilities& capabilities) {
69 EXPECT_NE(nullptr, capabilities.supportedOperationTypes.data());
70 EXPECT_NE(0ull, capabilities.supportedOperationTypes.size());
71 EXPECT_EQ(0u, static_cast<uint32_t>(capabilities.cachesCompilation) & ~0x1);
72 EXPECT_LT(0.0f, capabilities.bootupTime);
73 EXPECT_LT(0.0f, capabilities.float16Performance.execTime);
74 EXPECT_LT(0.0f, capabilities.float16Performance.powerUsage);
75 EXPECT_LT(0.0f, capabilities.float32Performance.execTime);
76 EXPECT_LT(0.0f, capabilities.float32Performance.powerUsage);
77 EXPECT_LT(0.0f, capabilities.quantized8Performance.execTime);
78 EXPECT_LT(0.0f, capabilities.quantized8Performance.powerUsage);
79 });
80 EXPECT_TRUE(ret.isOk());
81}
82
83namespace {
84// create the model
85Model createTestModel() {
86 const std::vector<float> operand2Data = {5.0f, 6.0f, 7.0f, 8.0f};
87 const uint32_t size = operand2Data.size() * sizeof(float);
88
89 const uint32_t operand1 = 0;
90 const uint32_t operand2 = 1;
91 const uint32_t operand3 = 2;
92
93 const std::vector<Operand> operands = {
94 {
95 .type = OperandType::FLOAT32,
96 .dimensions = {1, 2, 2, 1},
97 .numberOfConsumers = 1,
98 .scale = 0.0f,
99 .zeroPoint = 0,
100 .location = {.poolIndex = static_cast<uint32_t>(LocationValues::LOCATION_AT_RUN_TIME),
101 .offset = 0,
102 .length = 0},
103 },
104 {
105 .type = OperandType::FLOAT32,
106 .dimensions = {1, 2, 2, 1},
107 .numberOfConsumers = 1,
108 .scale = 0.0f,
109 .zeroPoint = 0,
110 .location = {.poolIndex = static_cast<uint32_t>(LocationValues::LOCATION_SAME_BLOCK),
111 .offset = 0,
112 .length = size},
113 },
114 {
115 .type = OperandType::FLOAT32,
116 .dimensions = {1, 2, 2, 1},
117 .numberOfConsumers = 0,
118 .scale = 0.0f,
119 .zeroPoint = 0,
120 .location = {.poolIndex = static_cast<uint32_t>(LocationValues::LOCATION_AT_RUN_TIME),
121 .offset = 0,
122 .length = 0},
123 },
124 };
125
126 const std::vector<Operation> operations = {{
127 .type = OperationType::ADD_FLOAT32, .inputs = {operand1, operand2}, .outputs = {operand3},
128 }};
129
130 const std::vector<uint32_t> inputIndexes = {operand1};
131 const std::vector<uint32_t> outputIndexes = {operand3};
132 const std::vector<uint8_t> operandValues(reinterpret_cast<const uint8_t*>(operand2Data.data()),
133 reinterpret_cast<const uint8_t*>(operand2Data.data()) +
134 operand2Data.size() * sizeof(float));
135 const std::vector<hidl_memory> pools = {};
136
137 return {
138 .operands = operands,
139 .operations = operations,
140 .inputIndexes = inputIndexes,
141 .outputIndexes = outputIndexes,
142 .operandValues = operandValues,
143 .pools = pools,
144 };
145}
146
147// allocator helper
148hidl_memory allocateSharedMemory(int64_t size, const std::string& type = "ashmem") {
149 hidl_memory memory;
150
151 sp<IAllocator> allocator = IAllocator::getService(type);
152 if (!allocator.get()) {
153 return {};
154 }
155
156 Return<void> ret = allocator->allocate(size, [&](bool success, const hidl_memory& mem) {
157 ASSERT_TRUE(success);
158 memory = mem;
159 });
160 if (!ret.isOk()) {
161 return {};
162 }
163
164 return memory;
165}
166} // anonymous namespace
167
168// supported subgraph test
169TEST_F(NeuralnetworksHidlTest, SupportedSubgraphTest) {
170 Model model = createTestModel();
171 std::vector<bool> supported;
172 Return<void> ret = device->getSupportedSubgraph(
173 model, [&](const hidl_vec<bool>& hidl_supported) { supported = hidl_supported; });
174 ASSERT_TRUE(ret.isOk());
175 EXPECT_EQ(/*model.operations.size()*/ 0ull, supported.size());
176}
177
178// execute simple graph
179TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphTest) {
180 std::vector<float> inputData = {1.0f, 2.0f, 3.0f, 4.0f};
181 std::vector<float> outputData = {-1.0f, -1.0f, -1.0f, -1.0f};
182 std::vector<float> expectedData = {6.0f, 8.0f, 10.0f, 12.0f};
183 const uint32_t INPUT = 0;
184 const uint32_t OUTPUT = 1;
185
186 // prpeare request
187 Model model = createTestModel();
188 sp<IPreparedModel> preparedModel = device->prepareModel(model);
189 ASSERT_NE(nullptr, preparedModel.get());
190
191 // prepare inputs
192 uint32_t inputSize = static_cast<uint32_t>(inputData.size() * sizeof(float));
193 uint32_t outputSize = static_cast<uint32_t>(outputData.size() * sizeof(float));
194 std::vector<InputOutputInfo> inputs = {{
195 .location = {.poolIndex = INPUT, .offset = 0, .length = inputSize}, .dimensions = {},
196 }};
197 std::vector<InputOutputInfo> outputs = {{
198 .location = {.poolIndex = OUTPUT, .offset = 0, .length = outputSize}, .dimensions = {},
199 }};
200 std::vector<hidl_memory> pools = {allocateSharedMemory(inputSize),
201 allocateSharedMemory(outputSize)};
202 ASSERT_NE(0ull, pools[INPUT].size());
203 ASSERT_NE(0ull, pools[OUTPUT].size());
204
205 // load data
206 sp<IMemory> inputMemory = mapMemory(pools[INPUT]);
207 sp<IMemory> outputMemory = mapMemory(pools[OUTPUT]);
208 ASSERT_NE(nullptr, inputMemory.get());
209 ASSERT_NE(nullptr, outputMemory.get());
210 float* inputPtr = reinterpret_cast<float*>(static_cast<void*>(inputMemory->getPointer()));
211 float* outputPtr = reinterpret_cast<float*>(static_cast<void*>(outputMemory->getPointer()));
212 ASSERT_NE(nullptr, inputPtr);
213 ASSERT_NE(nullptr, outputPtr);
214 std::copy(inputData.begin(), inputData.end(), inputPtr);
215 std::copy(outputData.begin(), outputData.end(), outputPtr);
216 inputMemory->commit();
217 outputMemory->commit();
218
219 // execute request
220 bool success = preparedModel->execute({.inputs = inputs, .outputs = outputs, .pools = pools});
221 EXPECT_TRUE(success);
222
223 // validate results { 1+5, 2+6, 3+7, 4+8 }
224 outputMemory->update();
225 std::copy(outputPtr, outputPtr + outputData.size(), outputData.begin());
226 EXPECT_EQ(expectedData, outputData);
227}
228
229} // namespace functional
230} // namespace vts
231} // namespace V1_0
232} // namespace neuralnetworks
233} // namespace hardware
234} // namespace android
235
236using android::hardware::neuralnetworks::V1_0::vts::functional::NeuralnetworksHidlEnvironment;
237
238int main(int argc, char** argv) {
239 ::testing::AddGlobalTestEnvironment(NeuralnetworksHidlEnvironment::getInstance());
240 ::testing::InitGoogleTest(&argc, argv);
241 NeuralnetworksHidlEnvironment::getInstance()->init(&argc, argv);
242
243 int status = RUN_ALL_TESTS();
244 return status;
245}
diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.h b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.h
new file mode 100644
index 00000000..bb0cdaa4
--- /dev/null
+++ b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.h
@@ -0,0 +1,82 @@
1/*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef VTS_HAL_NEURALNETWORKS_V1_0_TARGET_TESTS_H
18#define VTS_HAL_NEURALNETWORKS_V1_0_TARGET_TESTS_H
19
20#include <android/hardware/neuralnetworks/1.0/IDevice.h>
21#include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
22#include <android/hardware/neuralnetworks/1.0/types.h>
23#include <android/hidl/allocator/1.0/IAllocator.h>
24
25#include <VtsHalHidlTargetTestBase.h>
26#include <VtsHalHidlTargetTestEnvBase.h>
27#include <gtest/gtest.h>
28#include <string>
29
30using ::android::hardware::neuralnetworks::V1_0::IDevice;
31using ::android::hardware::neuralnetworks::V1_0::IPreparedModel;
32using ::android::hardware::neuralnetworks::V1_0::Capabilities;
33using ::android::hardware::neuralnetworks::V1_0::DeviceStatus;
34using ::android::hardware::neuralnetworks::V1_0::Model;
35using ::android::hardware::neuralnetworks::V1_0::OperationType;
36using ::android::hardware::neuralnetworks::V1_0::PerformanceInfo;
37using ::android::hardware::Return;
38using ::android::hardware::Void;
39using ::android::hardware::hidl_memory;
40using ::android::hardware::hidl_string;
41using ::android::hardware::hidl_vec;
42using ::android::hidl::allocator::V1_0::IAllocator;
43using ::android::hidl::memory::V1_0::IMemory;
44using ::android::sp;
45
46namespace android {
47namespace hardware {
48namespace neuralnetworks {
49namespace V1_0 {
50namespace vts {
51namespace functional {
52
53// A class for test environment setup
54class NeuralnetworksHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase {
55 NeuralnetworksHidlEnvironment();
56 NeuralnetworksHidlEnvironment(const NeuralnetworksHidlEnvironment&) = delete;
57 NeuralnetworksHidlEnvironment(NeuralnetworksHidlEnvironment&&) = delete;
58 NeuralnetworksHidlEnvironment& operator=(const NeuralnetworksHidlEnvironment&) = delete;
59 NeuralnetworksHidlEnvironment& operator=(NeuralnetworksHidlEnvironment&&) = delete;
60
61 public:
62 static NeuralnetworksHidlEnvironment* getInstance();
63 virtual void registerTestServices() override;
64};
65
66// The main test class for NEURALNETWORKS HIDL HAL.
67class NeuralnetworksHidlTest : public ::testing::VtsHalHidlTargetTestBase {
68 public:
69 virtual void SetUp() override;
70 virtual void TearDown() override;
71
72 sp<IDevice> device;
73};
74
75} // namespace functional
76} // namespace vts
77} // namespace V1_0
78} // namespace neuralnetworks
79} // namespace hardware
80} // namespace android
81
82#endif // VTS_HAL_NEURALNETWORKS_V1_0_TARGET_TESTS_H