summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
Diffstat (limited to 'neuralnetworks/1.0/vts')
-rw-r--r--neuralnetworks/1.0/vts/functional/Android.bp37
-rw-r--r--neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp245
-rw-r--r--neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.h82
3 files changed, 364 insertions, 0 deletions
diff --git a/neuralnetworks/1.0/vts/functional/Android.bp b/neuralnetworks/1.0/vts/functional/Android.bp
new file mode 100644
index 00000000..96eb4cb5
--- /dev/null
+++ b/neuralnetworks/1.0/vts/functional/Android.bp
@@ -0,0 +1,37 @@
1//
2// Copyright (C) 2017 The Android Open Source Project
3//
4// Licensed under the Apache License, Version 2.0 (the "License");
5// you may not use this file except in compliance with the License.
6// You may obtain a copy of the License at
7//
8// http://www.apache.org/licenses/LICENSE-2.0
9//
10// Unless required by applicable law or agreed to in writing, software
11// distributed under the License is distributed on an "AS IS" BASIS,
12// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13// See the License for the specific language governing permissions and
14// limitations under the License.
15//
16
17cc_test {
18 name: "VtsHalNeuralnetworksV1_0TargetTest",
19 srcs: ["VtsHalNeuralnetworksV1_0TargetTest.cpp"],
20 defaults: ["hidl_defaults"],
21 shared_libs: [
22 "libbase",
23 "libhidlbase",
24 "libhidlmemory",
25 "libhidltransport",
26 "liblog",
27 "libutils",
28 "android.hardware.neuralnetworks@1.0",
29 "android.hidl.allocator@1.0",
30 "android.hidl.memory@1.0",
31 ],
32 static_libs: ["VtsHalHidlTargetTestBase"],
33 cflags: [
34 "-O0",
35 "-g",
36 ],
37}
diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp
new file mode 100644
index 00000000..9fa694df
--- /dev/null
+++ b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp
@@ -0,0 +1,245 @@
1/*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "neuralnetworks_hidl_hal_test"
18
19#include "VtsHalNeuralnetworksV1_0TargetTest.h"
20#include <android-base/logging.h>
21#include <android/hidl/memory/1.0/IMemory.h>
22#include <hidlmemory/mapping.h>
23#include <string>
24
25namespace android {
26namespace hardware {
27namespace neuralnetworks {
28namespace V1_0 {
29namespace vts {
30namespace functional {
31
32// A class for test environment setup
33NeuralnetworksHidlEnvironment::NeuralnetworksHidlEnvironment() {}
34
35NeuralnetworksHidlEnvironment* NeuralnetworksHidlEnvironment::getInstance() {
36 // This has to return a "new" object because it is freed inside
37 // ::testing::AddGlobalTestEnvironment when the gtest is being torn down
38 static NeuralnetworksHidlEnvironment* instance = new NeuralnetworksHidlEnvironment();
39 return instance;
40}
41
42void NeuralnetworksHidlEnvironment::registerTestServices() {
43 registerTestService("android.hardware.neuralnetworks", "1.0", "IDevice");
44}
45
46// The main test class for NEURALNETWORK HIDL HAL.
47void NeuralnetworksHidlTest::SetUp() {
48 std::string instance =
49 NeuralnetworksHidlEnvironment::getInstance()->getServiceName(IDevice::descriptor);
50 LOG(INFO) << "running vts test with instance: " << instance;
51 device = ::testing::VtsHalHidlTargetTestBase::getService<IDevice>(instance);
52 ASSERT_NE(nullptr, device.get());
53}
54
55void NeuralnetworksHidlTest::TearDown() {}
56
57// create device test
58TEST_F(NeuralnetworksHidlTest, CreateDevice) {}
59
60// status test
61TEST_F(NeuralnetworksHidlTest, StatusTest) {
62 DeviceStatus status = device->getStatus();
63 EXPECT_EQ(DeviceStatus::AVAILABLE, status);
64}
65
66// initialization
67TEST_F(NeuralnetworksHidlTest, InitializeTest) {
68 Return<void> ret = device->initialize([](const Capabilities& capabilities) {
69 EXPECT_NE(nullptr, capabilities.supportedOperationTypes.data());
70 EXPECT_NE(0ull, capabilities.supportedOperationTypes.size());
71 EXPECT_EQ(0u, static_cast<uint32_t>(capabilities.cachesCompilation) & ~0x1);
72 EXPECT_LT(0.0f, capabilities.bootupTime);
73 EXPECT_LT(0.0f, capabilities.float16Performance.execTime);
74 EXPECT_LT(0.0f, capabilities.float16Performance.powerUsage);
75 EXPECT_LT(0.0f, capabilities.float32Performance.execTime);
76 EXPECT_LT(0.0f, capabilities.float32Performance.powerUsage);
77 EXPECT_LT(0.0f, capabilities.quantized8Performance.execTime);
78 EXPECT_LT(0.0f, capabilities.quantized8Performance.powerUsage);
79 });
80 EXPECT_TRUE(ret.isOk());
81}
82
83namespace {
84// create the model
85Model createTestModel() {
86 const std::vector<float> operand2Data = {5.0f, 6.0f, 7.0f, 8.0f};
87 const uint32_t size = operand2Data.size() * sizeof(float);
88
89 const uint32_t operand1 = 0;
90 const uint32_t operand2 = 1;
91 const uint32_t operand3 = 2;
92
93 const std::vector<Operand> operands = {
94 {
95 .type = OperandType::FLOAT32,
96 .dimensions = {1, 2, 2, 1},
97 .numberOfConsumers = 1,
98 .scale = 0.0f,
99 .zeroPoint = 0,
100 .location = {.poolIndex = static_cast<uint32_t>(LocationValues::LOCATION_AT_RUN_TIME),
101 .offset = 0,
102 .length = 0},
103 },
104 {
105 .type = OperandType::FLOAT32,
106 .dimensions = {1, 2, 2, 1},
107 .numberOfConsumers = 1,
108 .scale = 0.0f,
109 .zeroPoint = 0,
110 .location = {.poolIndex = static_cast<uint32_t>(LocationValues::LOCATION_SAME_BLOCK),
111 .offset = 0,
112 .length = size},
113 },
114 {
115 .type = OperandType::FLOAT32,
116 .dimensions = {1, 2, 2, 1},
117 .numberOfConsumers = 0,
118 .scale = 0.0f,
119 .zeroPoint = 0,
120 .location = {.poolIndex = static_cast<uint32_t>(LocationValues::LOCATION_AT_RUN_TIME),
121 .offset = 0,
122 .length = 0},
123 },
124 };
125
126 const std::vector<Operation> operations = {{
127 .type = OperationType::ADD_FLOAT32, .inputs = {operand1, operand2}, .outputs = {operand3},
128 }};
129
130 const std::vector<uint32_t> inputIndexes = {operand1};
131 const std::vector<uint32_t> outputIndexes = {operand3};
132 const std::vector<uint8_t> operandValues(reinterpret_cast<const uint8_t*>(operand2Data.data()),
133 reinterpret_cast<const uint8_t*>(operand2Data.data()) +
134 operand2Data.size() * sizeof(float));
135 const std::vector<hidl_memory> pools = {};
136
137 return {
138 .operands = operands,
139 .operations = operations,
140 .inputIndexes = inputIndexes,
141 .outputIndexes = outputIndexes,
142 .operandValues = operandValues,
143 .pools = pools,
144 };
145}
146
147// allocator helper
148hidl_memory allocateSharedMemory(int64_t size, const std::string& type = "ashmem") {
149 hidl_memory memory;
150
151 sp<IAllocator> allocator = IAllocator::getService(type);
152 if (!allocator.get()) {
153 return {};
154 }
155
156 Return<void> ret = allocator->allocate(size, [&](bool success, const hidl_memory& mem) {
157 ASSERT_TRUE(success);
158 memory = mem;
159 });
160 if (!ret.isOk()) {
161 return {};
162 }
163
164 return memory;
165}
166} // anonymous namespace
167
168// supported subgraph test
169TEST_F(NeuralnetworksHidlTest, SupportedSubgraphTest) {
170 Model model = createTestModel();
171 std::vector<bool> supported;
172 Return<void> ret = device->getSupportedSubgraph(
173 model, [&](const hidl_vec<bool>& hidl_supported) { supported = hidl_supported; });
174 ASSERT_TRUE(ret.isOk());
175 EXPECT_EQ(/*model.operations.size()*/ 0ull, supported.size());
176}
177
178// execute simple graph
179TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphTest) {
180 std::vector<float> inputData = {1.0f, 2.0f, 3.0f, 4.0f};
181 std::vector<float> outputData = {-1.0f, -1.0f, -1.0f, -1.0f};
182 std::vector<float> expectedData = {6.0f, 8.0f, 10.0f, 12.0f};
183 const uint32_t INPUT = 0;
184 const uint32_t OUTPUT = 1;
185
186 // prpeare request
187 Model model = createTestModel();
188 sp<IPreparedModel> preparedModel = device->prepareModel(model);
189 ASSERT_NE(nullptr, preparedModel.get());
190
191 // prepare inputs
192 uint32_t inputSize = static_cast<uint32_t>(inputData.size() * sizeof(float));
193 uint32_t outputSize = static_cast<uint32_t>(outputData.size() * sizeof(float));
194 std::vector<InputOutputInfo> inputs = {{
195 .location = {.poolIndex = INPUT, .offset = 0, .length = inputSize}, .dimensions = {},
196 }};
197 std::vector<InputOutputInfo> outputs = {{
198 .location = {.poolIndex = OUTPUT, .offset = 0, .length = outputSize}, .dimensions = {},
199 }};
200 std::vector<hidl_memory> pools = {allocateSharedMemory(inputSize),
201 allocateSharedMemory(outputSize)};
202 ASSERT_NE(0ull, pools[INPUT].size());
203 ASSERT_NE(0ull, pools[OUTPUT].size());
204
205 // load data
206 sp<IMemory> inputMemory = mapMemory(pools[INPUT]);
207 sp<IMemory> outputMemory = mapMemory(pools[OUTPUT]);
208 ASSERT_NE(nullptr, inputMemory.get());
209 ASSERT_NE(nullptr, outputMemory.get());
210 float* inputPtr = reinterpret_cast<float*>(static_cast<void*>(inputMemory->getPointer()));
211 float* outputPtr = reinterpret_cast<float*>(static_cast<void*>(outputMemory->getPointer()));
212 ASSERT_NE(nullptr, inputPtr);
213 ASSERT_NE(nullptr, outputPtr);
214 std::copy(inputData.begin(), inputData.end(), inputPtr);
215 std::copy(outputData.begin(), outputData.end(), outputPtr);
216 inputMemory->commit();
217 outputMemory->commit();
218
219 // execute request
220 bool success = preparedModel->execute({.inputs = inputs, .outputs = outputs, .pools = pools});
221 EXPECT_TRUE(success);
222
223 // validate results { 1+5, 2+6, 3+7, 4+8 }
224 outputMemory->update();
225 std::copy(outputPtr, outputPtr + outputData.size(), outputData.begin());
226 EXPECT_EQ(expectedData, outputData);
227}
228
229} // namespace functional
230} // namespace vts
231} // namespace V1_0
232} // namespace neuralnetworks
233} // namespace hardware
234} // namespace android
235
236using android::hardware::neuralnetworks::V1_0::vts::functional::NeuralnetworksHidlEnvironment;
237
238int main(int argc, char** argv) {
239 ::testing::AddGlobalTestEnvironment(NeuralnetworksHidlEnvironment::getInstance());
240 ::testing::InitGoogleTest(&argc, argv);
241 NeuralnetworksHidlEnvironment::getInstance()->init(&argc, argv);
242
243 int status = RUN_ALL_TESTS();
244 return status;
245}
diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.h b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.h
new file mode 100644
index 00000000..bb0cdaa4
--- /dev/null
+++ b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.h
@@ -0,0 +1,82 @@
1/*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef VTS_HAL_NEURALNETWORKS_V1_0_TARGET_TESTS_H
18#define VTS_HAL_NEURALNETWORKS_V1_0_TARGET_TESTS_H
19
20#include <android/hardware/neuralnetworks/1.0/IDevice.h>
21#include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
22#include <android/hardware/neuralnetworks/1.0/types.h>
23#include <android/hidl/allocator/1.0/IAllocator.h>
24
25#include <VtsHalHidlTargetTestBase.h>
26#include <VtsHalHidlTargetTestEnvBase.h>
27#include <gtest/gtest.h>
28#include <string>
29
30using ::android::hardware::neuralnetworks::V1_0::IDevice;
31using ::android::hardware::neuralnetworks::V1_0::IPreparedModel;
32using ::android::hardware::neuralnetworks::V1_0::Capabilities;
33using ::android::hardware::neuralnetworks::V1_0::DeviceStatus;
34using ::android::hardware::neuralnetworks::V1_0::Model;
35using ::android::hardware::neuralnetworks::V1_0::OperationType;
36using ::android::hardware::neuralnetworks::V1_0::PerformanceInfo;
37using ::android::hardware::Return;
38using ::android::hardware::Void;
39using ::android::hardware::hidl_memory;
40using ::android::hardware::hidl_string;
41using ::android::hardware::hidl_vec;
42using ::android::hidl::allocator::V1_0::IAllocator;
43using ::android::hidl::memory::V1_0::IMemory;
44using ::android::sp;
45
46namespace android {
47namespace hardware {
48namespace neuralnetworks {
49namespace V1_0 {
50namespace vts {
51namespace functional {
52
53// A class for test environment setup
54class NeuralnetworksHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase {
55 NeuralnetworksHidlEnvironment();
56 NeuralnetworksHidlEnvironment(const NeuralnetworksHidlEnvironment&) = delete;
57 NeuralnetworksHidlEnvironment(NeuralnetworksHidlEnvironment&&) = delete;
58 NeuralnetworksHidlEnvironment& operator=(const NeuralnetworksHidlEnvironment&) = delete;
59 NeuralnetworksHidlEnvironment& operator=(NeuralnetworksHidlEnvironment&&) = delete;
60
61 public:
62 static NeuralnetworksHidlEnvironment* getInstance();
63 virtual void registerTestServices() override;
64};
65
66// The main test class for NEURALNETWORKS HIDL HAL.
67class NeuralnetworksHidlTest : public ::testing::VtsHalHidlTargetTestBase {
68 public:
69 virtual void SetUp() override;
70 virtual void TearDown() override;
71
72 sp<IDevice> device;
73};
74
75} // namespace functional
76} // namespace vts
77} // namespace V1_0
78} // namespace neuralnetworks
79} // namespace hardware
80} // namespace android
81
82#endif // VTS_HAL_NEURALNETWORKS_V1_0_TARGET_TESTS_H