summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLev Proleev2018-10-01 05:18:31 -0500
committerPrzemyslaw Szczepaniak2018-10-19 03:29:09 -0500
commit68c8c174678f0881e4e031711397b6451479e36d (patch)
tree8b544598e23f86f266253cf29dbaea79c42619b7 /neuralnetworks/1.2
parent08662c6f66fa724a46b0ce1ae13a3c9358a658b4 (diff)
downloadplatform-hardware-interfaces-68c8c174678f0881e4e031711397b6451479e36d.tar.gz
platform-hardware-interfaces-68c8c174678f0881e4e031711397b6451479e36d.tar.xz
platform-hardware-interfaces-68c8c174678f0881e4e031711397b6451479e36d.zip
Add TENSOR_QUANT16_ASYMM to operand types
Add new OperandType::TENSOR_QUANT16_ASYMM. Add VTS validation for the new type. Bug: 113561892 Test: NeuralNetworksTest_static Test: VtsHalNeuralnetworksV1_0TargetTest Test: VtsHalNeuralnetworksV1_1TargetTest Test: VtsHalNeuralnetworksV1_2TargetTest Change-Id: I4f9ed6a33d5d3ec227e9f335df71954c73edf344 Merged-In: I4f9ed6a33d5d3ec227e9f335df71954c73edf344 (cherry picked from commit 5d7c99527e7bad07d6ab5413bcfd14cec5df5f31)
Diffstat (limited to 'neuralnetworks/1.2')
-rw-r--r--neuralnetworks/1.2/types.hal12
-rw-r--r--neuralnetworks/1.2/vts/functional/ValidateModel.cpp12
2 files changed, 20 insertions, 4 deletions
diff --git a/neuralnetworks/1.2/types.hal b/neuralnetworks/1.2/types.hal
index 95e97c4b..0aa7cc20 100644
--- a/neuralnetworks/1.2/types.hal
+++ b/neuralnetworks/1.2/types.hal
@@ -30,6 +30,18 @@ enum OperandType : @1.0::OperandType {
30 * represents false; any other value represents true. 30 * represents false; any other value represents true.
31 */ 31 */
32 BOOL = 6, 32 BOOL = 6,
33 /**
34 * A tensor of 16 bit signed integers that represent real numbers.
35 *
36 * Attached to this tensor are two numbers that are used to convert the 16
37 * bit integer to the real value and vice versa. These two numbers are:
38 * - scale: a 32 bit floating point value greater than zero.
39 * - zeroPoint: a 32 bit integer, in range [-32768, 32767].
40 *
41 * The formula is:
42 * realValue = (integerValue - zeroPoint) * scale.
43 */
44 TENSOR_QUANT16_ASYMM = 7,
33}; 45};
34 46
35/** 47/**
diff --git a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
index 5a8b8c59..9af62589 100644
--- a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
@@ -129,10 +129,10 @@ static uint32_t addOperand(Model* model, OperandLifeTime lifetime) {
129///////////////////////// VALIDATE MODEL OPERAND TYPE ///////////////////////// 129///////////////////////// VALIDATE MODEL OPERAND TYPE /////////////////////////
130 130
131static const int32_t invalidOperandTypes[] = { 131static const int32_t invalidOperandTypes[] = {
132 static_cast<int32_t>(OperandType::FLOAT32) - 1, // lower bound fundamental 132 static_cast<int32_t>(OperandType::FLOAT32) - 1, // lower bound fundamental
133 static_cast<int32_t>(OperandType::BOOL) + 1, // upper bound fundamental 133 static_cast<int32_t>(OperandType::TENSOR_QUANT16_ASYMM) + 1, // upper bound fundamental
134 static_cast<int32_t>(OperandType::OEM) - 1, // lower bound OEM 134 static_cast<int32_t>(OperandType::OEM) - 1, // lower bound OEM
135 static_cast<int32_t>(OperandType::TENSOR_OEM_BYTE) + 1, // upper bound OEM 135 static_cast<int32_t>(OperandType::TENSOR_OEM_BYTE) + 1, // upper bound OEM
136}; 136};
137 137
138static void mutateOperandTypeTest(const sp<IDevice>& device, const Model& model) { 138static void mutateOperandTypeTest(const sp<IDevice>& device, const Model& model) {
@@ -160,6 +160,7 @@ static uint32_t getInvalidRank(OperandType type) {
160 case OperandType::TENSOR_FLOAT32: 160 case OperandType::TENSOR_FLOAT32:
161 case OperandType::TENSOR_INT32: 161 case OperandType::TENSOR_INT32:
162 case OperandType::TENSOR_QUANT8_ASYMM: 162 case OperandType::TENSOR_QUANT8_ASYMM:
163 case OperandType::TENSOR_QUANT16_ASYMM:
163 return 0; 164 return 0;
164 default: 165 default:
165 return 0; 166 return 0;
@@ -190,6 +191,7 @@ static float getInvalidScale(OperandType type) {
190 case OperandType::TENSOR_INT32: 191 case OperandType::TENSOR_INT32:
191 return -1.0f; 192 return -1.0f;
192 case OperandType::TENSOR_QUANT8_ASYMM: 193 case OperandType::TENSOR_QUANT8_ASYMM:
194 case OperandType::TENSOR_QUANT16_ASYMM:
193 return 0.0f; 195 return 0.0f;
194 default: 196 default:
195 return 0.0f; 197 return 0.0f;
@@ -219,6 +221,7 @@ static std::vector<int32_t> getInvalidZeroPoints(OperandType type) {
219 case OperandType::TENSOR_INT32: 221 case OperandType::TENSOR_INT32:
220 return {1}; 222 return {1};
221 case OperandType::TENSOR_QUANT8_ASYMM: 223 case OperandType::TENSOR_QUANT8_ASYMM:
224 case OperandType::TENSOR_QUANT16_ASYMM:
222 return {-1, 256}; 225 return {-1, 256};
223 default: 226 default:
224 return {}; 227 return {};
@@ -271,6 +274,7 @@ static void mutateOperand(Operand* operand, OperandType type) {
271 newOperand.zeroPoint = 0; 274 newOperand.zeroPoint = 0;
272 break; 275 break;
273 case OperandType::TENSOR_QUANT8_ASYMM: 276 case OperandType::TENSOR_QUANT8_ASYMM:
277 case OperandType::TENSOR_QUANT16_ASYMM:
274 newOperand.dimensions = 278 newOperand.dimensions =
275 operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1}); 279 operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
276 newOperand.scale = operand->scale != 0.0f ? operand->scale : 1.0f; 280 newOperand.scale = operand->scale != 0.0f ? operand->scale : 1.0f;