summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorandroid-build-team Robot2018-04-29 02:23:53 -0500
committerandroid-build-team Robot2018-04-29 02:23:53 -0500
commit5eb24b785a1db274c4a6d328c1e97dca94e59c84 (patch)
tree272b9c75a367dc58982c89b3d7dc4887b65d05b1
parent70563fb8dc9a8a6eb92a616e4ab2a43ca5735b92 (diff)
parent89802f742382863ea26e7761bf773b0d4824608b (diff)
downloadplatform-hardware-interfaces-5eb24b785a1db274c4a6d328c1e97dca94e59c84.tar.gz
platform-hardware-interfaces-5eb24b785a1db274c4a6d328c1e97dca94e59c84.tar.xz
platform-hardware-interfaces-5eb24b785a1db274c4a6d328c1e97dca94e59c84.zip
Snap for 4751833 from 89802f742382863ea26e7761bf773b0d4824608b to pi-release
Change-Id: I1c6cf7e24d34671c8ec16471e9d3319fbd7dde6d
-rw-r--r--audio/core/4.0/vts/functional/AudioPrimaryHidlHalTest.cpp20
-rw-r--r--camera/device/3.2/default/CameraDeviceSession.cpp18
-rw-r--r--compatibility_matrices/Android.mk53
-rw-r--r--compatibility_matrices/clear_vars.mk24
-rw-r--r--compatibility_matrices/compatibility_matrix.mk40
-rw-r--r--confirmationui/1.0/default/PlatformSpecifics.cpp8
-rw-r--r--confirmationui/1.0/default/PlatformSpecifics.h5
-rw-r--r--confirmationui/1.0/vts/functional/VtsHalConfirmationUIV1_0TargetTest.cpp23
-rw-r--r--confirmationui/support/include/android/hardware/confirmationui/1.0/generic/GenericOperation.h17
-rw-r--r--confirmationui/support/include/android/hardware/confirmationui/support/confirmationui_utils.h18
-rw-r--r--current.txt4
-rw-r--r--neuralnetworks/1.0/types.hal967
-rw-r--r--neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp20
-rw-r--r--neuralnetworks/1.0/vts/functional/GeneratedTests.cpp4
-rw-r--r--neuralnetworks/1.0/vts/functional/Models.h2
-rw-r--r--neuralnetworks/1.0/vts/functional/ValidateRequest.cpp6
-rw-r--r--neuralnetworks/1.1/types.hal230
-rw-r--r--neuralnetworks/1.1/vts/functional/GeneratedTests.cpp2
-rw-r--r--neuralnetworks/1.1/vts/functional/Models.h2
-rw-r--r--neuralnetworks/1.1/vts/functional/ValidateRequest.cpp6
20 files changed, 856 insertions, 613 deletions
diff --git a/audio/core/4.0/vts/functional/AudioPrimaryHidlHalTest.cpp b/audio/core/4.0/vts/functional/AudioPrimaryHidlHalTest.cpp
index de0df400..0bf32b57 100644
--- a/audio/core/4.0/vts/functional/AudioPrimaryHidlHalTest.cpp
+++ b/audio/core/4.0/vts/functional/AudioPrimaryHidlHalTest.cpp
@@ -74,6 +74,7 @@ using ReadParameters = ::android::hardware::audio::V4_0::IStreamIn::ReadParamete
74using ReadStatus = ::android::hardware::audio::V4_0::IStreamIn::ReadStatus; 74using ReadStatus = ::android::hardware::audio::V4_0::IStreamIn::ReadStatus;
75using ::android::hardware::audio::V4_0::IStreamOut; 75using ::android::hardware::audio::V4_0::IStreamOut;
76using ::android::hardware::audio::V4_0::IStreamOutCallback; 76using ::android::hardware::audio::V4_0::IStreamOutCallback;
77using ::android::hardware::audio::V4_0::MicrophoneInfo;
77using ::android::hardware::audio::V4_0::MmapBufferInfo; 78using ::android::hardware::audio::V4_0::MmapBufferInfo;
78using ::android::hardware::audio::V4_0::MmapPosition; 79using ::android::hardware::audio::V4_0::MmapPosition;
79using ::android::hardware::audio::V4_0::ParameterValue; 80using ::android::hardware::audio::V4_0::ParameterValue;
@@ -478,6 +479,17 @@ TEST_F(AudioPrimaryHidlTest, getParameters) {
478} 479}
479 480
480////////////////////////////////////////////////////////////////////////////// 481//////////////////////////////////////////////////////////////////////////////
482/////////////////////////////// getMicrophones ///////////////////////////////
483//////////////////////////////////////////////////////////////////////////////
484
485TEST_F(AudioPrimaryHidlTest, GetMicrophonesTest) {
486 doc::test("Make sure getMicrophones always succeeds");
487 hidl_vec<MicrophoneInfo> microphones;
488 ASSERT_OK(device->getMicrophones(returnIn(res, microphones)));
489 ASSERT_OK(res);
490}
491
492//////////////////////////////////////////////////////////////////////////////
481//////////////////////////////// debugDebug ////////////////////////////////// 493//////////////////////////////// debugDebug //////////////////////////////////
482////////////////////////////////////////////////////////////////////////////// 494//////////////////////////////////////////////////////////////////////////////
483 495
@@ -1081,6 +1093,14 @@ TEST_P(InputStreamTest, updateSinkMetadata) {
1081 ASSERT_OK(stream->updateSinkMetadata(initialMetadata)); 1093 ASSERT_OK(stream->updateSinkMetadata(initialMetadata));
1082} 1094}
1083 1095
1096TEST_P(InputStreamTest, getActiveMicrophones) {
1097 doc::test("Getting active microphones should always succeed");
1098 hidl_vec<MicrophoneInfo> microphones;
1099 ASSERT_OK(device->getMicrophones(returnIn(res, microphones)));
1100 ASSERT_OK(res);
1101 ASSERT_TRUE(microphones.size() > 0);
1102}
1103
1084////////////////////////////////////////////////////////////////////////////// 1104//////////////////////////////////////////////////////////////////////////////
1085///////////////////////////////// StreamOut ////////////////////////////////// 1105///////////////////////////////// StreamOut //////////////////////////////////
1086////////////////////////////////////////////////////////////////////////////// 1106//////////////////////////////////////////////////////////////////////////////
diff --git a/camera/device/3.2/default/CameraDeviceSession.cpp b/camera/device/3.2/default/CameraDeviceSession.cpp
index 60a57cde..1cef882f 100644
--- a/camera/device/3.2/default/CameraDeviceSession.cpp
+++ b/camera/device/3.2/default/CameraDeviceSession.cpp
@@ -393,7 +393,11 @@ void CameraDeviceSession::ResultBatcher::sendBatchShutterCbsLocked(
393 return; 393 return;
394 } 394 }
395 395
396 mCallback->notify(batch->mShutterMsgs); 396 auto ret = mCallback->notify(batch->mShutterMsgs);
397 if (!ret.isOk()) {
398 ALOGE("%s: notify shutter transaction failed: %s",
399 __FUNCTION__, ret.description().c_str());
400 }
397 batch->mShutterDelivered = true; 401 batch->mShutterDelivered = true;
398 batch->mShutterMsgs.clear(); 402 batch->mShutterMsgs.clear();
399} 403}
@@ -563,7 +567,11 @@ void CameraDeviceSession::ResultBatcher::sendBatchMetadataLocked(
563} 567}
564 568
565void CameraDeviceSession::ResultBatcher::notifySingleMsg(NotifyMsg& msg) { 569void CameraDeviceSession::ResultBatcher::notifySingleMsg(NotifyMsg& msg) {
566 mCallback->notify({msg}); 570 auto ret = mCallback->notify({msg});
571 if (!ret.isOk()) {
572 ALOGE("%s: notify transaction failed: %s",
573 __FUNCTION__, ret.description().c_str());
574 }
567 return; 575 return;
568} 576}
569 577
@@ -654,7 +662,11 @@ void CameraDeviceSession::ResultBatcher::invokeProcessCaptureResultCallback(
654 } 662 }
655 } 663 }
656 } 664 }
657 mCallback->processCaptureResult(results); 665 auto ret = mCallback->processCaptureResult(results);
666 if (!ret.isOk()) {
667 ALOGE("%s: processCaptureResult transaction failed: %s",
668 __FUNCTION__, ret.description().c_str());
669 }
658 mProcessCaptureResultLock.unlock(); 670 mProcessCaptureResultLock.unlock();
659} 671}
660 672
diff --git a/compatibility_matrices/Android.mk b/compatibility_matrices/Android.mk
index ee97433f..9e48588e 100644
--- a/compatibility_matrices/Android.mk
+++ b/compatibility_matrices/Android.mk
@@ -18,68 +18,64 @@ LOCAL_PATH := $(call my-dir)
18 18
19BUILD_FRAMEWORK_COMPATIBILITY_MATRIX := $(LOCAL_PATH)/compatibility_matrix.mk 19BUILD_FRAMEWORK_COMPATIBILITY_MATRIX := $(LOCAL_PATH)/compatibility_matrix.mk
20 20
21# Clear potential input variables to BUILD_FRAMEWORK_COMPATIBILITY_MATRIX 21my_kernel_config_data := kernel/configs
22LOCAL_ADD_VBMETA_VERSION :=
23LOCAL_ASSEMBLE_VINTF_ENV_VARS :=
24LOCAL_ASSEMBLE_VINTF_ENV_VARS_OVERRIDE :=
25LOCAL_ASSEMBLE_VINTF_ERROR_MESSAGE :=
26LOCAL_ASSEMBLE_VINTF_FLAGS :=
27LOCAL_KERNEL_VERSIONS :=
28LOCAL_GEN_FILE_DEPENDENCIES :=
29 22
30# Install all compatibility_matrix.*.xml to /system/etc/vintf 23# Install all compatibility_matrix.*.xml to /system/etc/vintf
31 24
32
33include $(CLEAR_VARS) 25include $(CLEAR_VARS)
26include $(LOCAL_PATH)/clear_vars.mk
34LOCAL_MODULE := framework_compatibility_matrix.legacy.xml 27LOCAL_MODULE := framework_compatibility_matrix.legacy.xml
35LOCAL_MODULE_STEM := compatibility_matrix.legacy.xml 28LOCAL_MODULE_STEM := compatibility_matrix.legacy.xml
36LOCAL_SRC_FILES := $(LOCAL_MODULE_STEM) 29LOCAL_SRC_FILES := $(LOCAL_MODULE_STEM)
37LOCAL_KERNEL_VERSIONS := \ 30LOCAL_KERNEL_CONFIG_DATA_PATHS := \
38 3.18.0 \ 31 3.18.0:$(my_kernel_config_data)/o/android-3.18 \
39 4.4.0 \ 32 4.4.0:$(my_kernel_config_data)/o/android-4.4 \
40 4.9.0 \ 33 4.9.0:$(my_kernel_config_data)/o/android-4.9 \
41 4.14.0 \
42 34
43include $(BUILD_FRAMEWORK_COMPATIBILITY_MATRIX) 35include $(BUILD_FRAMEWORK_COMPATIBILITY_MATRIX)
44 36
45include $(CLEAR_VARS) 37include $(CLEAR_VARS)
38include $(LOCAL_PATH)/clear_vars.mk
46LOCAL_MODULE := framework_compatibility_matrix.1.xml 39LOCAL_MODULE := framework_compatibility_matrix.1.xml
47LOCAL_MODULE_STEM := compatibility_matrix.1.xml 40LOCAL_MODULE_STEM := compatibility_matrix.1.xml
48LOCAL_SRC_FILES := $(LOCAL_MODULE_STEM) 41LOCAL_SRC_FILES := $(LOCAL_MODULE_STEM)
49LOCAL_KERNEL_VERSIONS := \ 42LOCAL_KERNEL_CONFIG_DATA_PATHS := \
50 3.18.0 \ 43 3.18.0:$(my_kernel_config_data)/o/android-3.18 \
51 4.4.0 \ 44 4.4.0:$(my_kernel_config_data)/o/android-4.4 \
52 4.9.0 \ 45 4.9.0:$(my_kernel_config_data)/o/android-4.9 \
53 4.14.0 \
54 46
55include $(BUILD_FRAMEWORK_COMPATIBILITY_MATRIX) 47include $(BUILD_FRAMEWORK_COMPATIBILITY_MATRIX)
56 48
57include $(CLEAR_VARS) 49include $(CLEAR_VARS)
50include $(LOCAL_PATH)/clear_vars.mk
58LOCAL_MODULE := framework_compatibility_matrix.2.xml 51LOCAL_MODULE := framework_compatibility_matrix.2.xml
59LOCAL_MODULE_STEM := compatibility_matrix.2.xml 52LOCAL_MODULE_STEM := compatibility_matrix.2.xml
60LOCAL_SRC_FILES := $(LOCAL_MODULE_STEM) 53LOCAL_SRC_FILES := $(LOCAL_MODULE_STEM)
61LOCAL_KERNEL_VERSIONS := \ 54LOCAL_KERNEL_CONFIG_DATA_PATHS := \
62 3.18.0 \ 55 3.18.0:$(my_kernel_config_data)/o-mr1/android-3.18 \
63 4.4.0 \ 56 4.4.0:$(my_kernel_config_data)/o-mr1/android-4.4 \
64 4.9.0 \ 57 4.9.0:$(my_kernel_config_data)/o-mr1/android-4.9 \
65 4.14.0 \
66 58
67include $(BUILD_FRAMEWORK_COMPATIBILITY_MATRIX) 59include $(BUILD_FRAMEWORK_COMPATIBILITY_MATRIX)
68 60
69include $(CLEAR_VARS) 61include $(CLEAR_VARS)
62include $(LOCAL_PATH)/clear_vars.mk
70LOCAL_MODULE := framework_compatibility_matrix.3.xml 63LOCAL_MODULE := framework_compatibility_matrix.3.xml
71LOCAL_MODULE_STEM := compatibility_matrix.3.xml 64LOCAL_MODULE_STEM := compatibility_matrix.3.xml
72LOCAL_SRC_FILES := $(LOCAL_MODULE_STEM) 65LOCAL_SRC_FILES := $(LOCAL_MODULE_STEM)
73LOCAL_KERNEL_VERSIONS := \ 66LOCAL_KERNEL_CONFIG_DATA_PATHS := \
74 4.4.0 \ 67 4.4.0:$(my_kernel_config_data)/android-4.4 \
75 4.9.0 \ 68 4.9.0:$(my_kernel_config_data)/android-4.9 \
76 4.14.0 \ 69 4.14.0:$(my_kernel_config_data)/android-4.14 \
77 70
78include $(BUILD_FRAMEWORK_COMPATIBILITY_MATRIX) 71include $(BUILD_FRAMEWORK_COMPATIBILITY_MATRIX)
79 72
73my_kernel_config_data :=
74
80# Framework Compatibility Matrix (common to all FCM versions) 75# Framework Compatibility Matrix (common to all FCM versions)
81 76
82include $(CLEAR_VARS) 77include $(CLEAR_VARS)
78include $(LOCAL_PATH)/clear_vars.mk
83LOCAL_MODULE := framework_compatibility_matrix.device.xml 79LOCAL_MODULE := framework_compatibility_matrix.device.xml
84LOCAL_MODULE_STEM := compatibility_matrix.device.xml 80LOCAL_MODULE_STEM := compatibility_matrix.device.xml
85# define LOCAL_MODULE_CLASS for local-generated-sources-dir. 81# define LOCAL_MODULE_CLASS for local-generated-sources-dir.
@@ -126,6 +122,7 @@ include $(BUILD_FRAMEWORK_COMPATIBILITY_MATRIX)
126# Framework Compatibility Matrix 122# Framework Compatibility Matrix
127 123
128include $(CLEAR_VARS) 124include $(CLEAR_VARS)
125include $(LOCAL_PATH)/clear_vars.mk
129LOCAL_MODULE := framework_compatibility_matrix.xml 126LOCAL_MODULE := framework_compatibility_matrix.xml
130LOCAL_MODULE_STEM := compatibility_matrix.xml 127LOCAL_MODULE_STEM := compatibility_matrix.xml
131LOCAL_MODULE_PATH := $(TARGET_OUT) 128LOCAL_MODULE_PATH := $(TARGET_OUT)
diff --git a/compatibility_matrices/clear_vars.mk b/compatibility_matrices/clear_vars.mk
new file mode 100644
index 00000000..8fde301f
--- /dev/null
+++ b/compatibility_matrices/clear_vars.mk
@@ -0,0 +1,24 @@
1#
2# Copyright (C) 2017 The Android Open Source Project
3#
4# Licensed under the Apache License, Version 2.0 (the "License");
5# you may not use this file except in compliance with the License.
6# You may obtain a copy of the License at
7#
8# http://www.apache.org/licenses/LICENSE-2.0
9#
10# Unless required by applicable law or agreed to in writing, software
11# distributed under the License is distributed on an "AS IS" BASIS,
12# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13# See the License for the specific language governing permissions and
14# limitations under the License.
15#
16
17# Clear input variables to BUILD_FRAMEWORK_COMPATIBILITY_MATRIX
18LOCAL_ADD_VBMETA_VERSION :=
19LOCAL_ASSEMBLE_VINTF_ENV_VARS :=
20LOCAL_ASSEMBLE_VINTF_ENV_VARS_OVERRIDE :=
21LOCAL_ASSEMBLE_VINTF_ERROR_MESSAGE :=
22LOCAL_ASSEMBLE_VINTF_FLAGS :=
23LOCAL_KERNEL_CONFIG_DATA_PATHS :=
24LOCAL_GEN_FILE_DEPENDENCIES :=
diff --git a/compatibility_matrices/compatibility_matrix.mk b/compatibility_matrices/compatibility_matrix.mk
index 6dc2b4fb..1b6fd3b4 100644
--- a/compatibility_matrices/compatibility_matrix.mk
+++ b/compatibility_matrices/compatibility_matrix.mk
@@ -14,17 +14,6 @@
14# limitations under the License. 14# limitations under the License.
15# 15#
16 16
17###########################################################
18## Remove minor revision from a kernel version. For example,
19## 3.18.0 becomes 3.18.
20## $(1): kernel version
21###########################################################
22define remove-minor-revision
23$(strip $(subst $(space),.,$(wordlist 1,2,$(subst .,$(space),$(strip $(1))))))
24endef
25
26# $(warning $(call remove-minor-revision,3.18.0))
27
28##### Input Variables: 17##### Input Variables:
29# LOCAL_MODULE: required. Module name for the build system. 18# LOCAL_MODULE: required. Module name for the build system.
30# LOCAL_MODULE_CLASS: optional. Default is ETC. 19# LOCAL_MODULE_CLASS: optional. Default is ETC.
@@ -42,8 +31,8 @@ endef
42# LOCAL_ASSEMBLE_VINTF_ENV_VARS_OVERRIDE: Add a list of environment variables that is local to 31# LOCAL_ASSEMBLE_VINTF_ENV_VARS_OVERRIDE: Add a list of environment variables that is local to
43# assemble_vintf invocation. Format is "VINTF_ENFORCE_NO_UNUSED_HALS=true". 32# assemble_vintf invocation. Format is "VINTF_ENFORCE_NO_UNUSED_HALS=true".
44# LOCAL_ASSEMBLE_VINTF_FLAGS: Add additional command line arguments to assemble_vintf invocation. 33# LOCAL_ASSEMBLE_VINTF_FLAGS: Add additional command line arguments to assemble_vintf invocation.
45# LOCAL_KERNEL_VERSIONS: Parse kernel configurations and add to the output matrix 34# LOCAL_KERNEL_CONFIG_DATA_PATHS: Paths to search for kernel config requirements. Format for each is
46# (corresponds to <kernel> tags.) 35# <kernel version x.y.z>:<path that contains android-base*.cfg>.
47# LOCAL_GEN_FILE_DEPENDENCIES: A list of additional dependencies for the generated file. 36# LOCAL_GEN_FILE_DEPENDENCIES: A list of additional dependencies for the generated file.
48 37
49ifndef LOCAL_MODULE 38ifndef LOCAL_MODULE
@@ -88,14 +77,13 @@ endif # BOARD_AVB_ENABLE
88$(GEN): PRIVATE_ENV_VARS += FRAMEWORK_VBMETA_VERSION 77$(GEN): PRIVATE_ENV_VARS += FRAMEWORK_VBMETA_VERSION
89endif # LOCAL_ADD_VBMETA_VERSION 78endif # LOCAL_ADD_VBMETA_VERSION
90 79
91ifneq (,$(strip $(LOCAL_KERNEL_VERSIONS))) 80ifneq (,$(strip $(LOCAL_KERNEL_CONFIG_DATA_PATHS)))
92$(GEN): PRIVATE_KERNEL_CONFIG_DATA := kernel/configs 81$(GEN): PRIVATE_KERNEL_CONFIG_DATA_PATHS := $(LOCAL_KERNEL_CONFIG_DATA_PATHS)
93$(GEN): PRIVATE_KERNEL_VERSIONS := $(LOCAL_KERNEL_VERSIONS) 82$(GEN): $(foreach pair,$(PRIVATE_KERNEL_CONFIG_DATA_PATHS),\
94$(GEN): $(foreach version,$(PRIVATE_KERNEL_VERSIONS),\ 83 $(wildcard $(call word-colon,2,$(pair))/android-base*.cfg))
95 $(wildcard $(PRIVATE_KERNEL_CONFIG_DATA)/android-$(call remove-minor-revision,$(version))/android-base*.cfg)) 84$(GEN): PRIVATE_FLAGS += $(foreach pair,$(PRIVATE_KERNEL_CONFIG_DATA_PATHS),\
96$(GEN): PRIVATE_FLAGS += $(foreach version,$(PRIVATE_KERNEL_VERSIONS),\ 85 --kernel=$(call word-colon,1,$(pair)):$(call normalize-path-list,\
97 --kernel=$(version):$(call normalize-path-list,\ 86 $(wildcard $(call word-colon,2,$(pair))/android-base*.cfg)))
98 $(wildcard $(PRIVATE_KERNEL_CONFIG_DATA)/android-$(call remove-minor-revision,$(version))/android-base*.cfg)))
99endif 87endif
100 88
101my_matrix_src_files := \ 89my_matrix_src_files := \
@@ -124,15 +112,7 @@ LOCAL_PREBUILT_MODULE_FILE := $(GEN)
124LOCAL_SRC_FILES := 112LOCAL_SRC_FILES :=
125LOCAL_GENERATED_SOURCES := 113LOCAL_GENERATED_SOURCES :=
126 114
127LOCAL_ADD_VBMETA_VERSION := 115include $(LOCAL_PATH)/clear_vars.mk
128LOCAL_ASSEMBLE_VINTF_ENV_VARS :=
129LOCAL_ASSEMBLE_VINTF_ENV_VARS_OVERRIDE :=
130LOCAL_ASSEMBLE_VINTF_ERROR_MESSAGE :=
131LOCAL_ASSEMBLE_VINTF_FLAGS :=
132LOCAL_KERNEL_VERSIONS :=
133LOCAL_GEN_FILE_DEPENDENCIES :=
134my_matrix_src_files := 116my_matrix_src_files :=
135 117
136include $(BUILD_PREBUILT) 118include $(BUILD_PREBUILT)
137
138remove-minor-revision :=
diff --git a/confirmationui/1.0/default/PlatformSpecifics.cpp b/confirmationui/1.0/default/PlatformSpecifics.cpp
index dd039e22..03d61654 100644
--- a/confirmationui/1.0/default/PlatformSpecifics.cpp
+++ b/confirmationui/1.0/default/PlatformSpecifics.cpp
@@ -36,11 +36,11 @@ MonotonicClockTimeStamper::TimeStamp MonotonicClockTimeStamper::now() {
36 } 36 }
37} 37}
38 38
39support::NullOr<support::array<uint8_t, 32>> HMacImplementation::hmac256( 39support::NullOr<support::hmac_t> HMacImplementation::hmac256(
40 const uint8_t key[32], std::initializer_list<support::ByteBufferProxy> buffers) { 40 const support::auth_token_key_t& key, std::initializer_list<support::ByteBufferProxy> buffers) {
41 HMAC_CTX hmacCtx; 41 HMAC_CTX hmacCtx;
42 HMAC_CTX_init(&hmacCtx); 42 HMAC_CTX_init(&hmacCtx);
43 if (!HMAC_Init_ex(&hmacCtx, key, 32, EVP_sha256(), nullptr)) { 43 if (!HMAC_Init_ex(&hmacCtx, key.data(), key.size(), EVP_sha256(), nullptr)) {
44 return {}; 44 return {};
45 } 45 }
46 for (auto& buffer : buffers) { 46 for (auto& buffer : buffers) {
@@ -48,7 +48,7 @@ support::NullOr<support::array<uint8_t, 32>> HMacImplementation::hmac256(
48 return {}; 48 return {};
49 } 49 }
50 } 50 }
51 support::array<uint8_t, 32> result; 51 support::hmac_t result;
52 if (!HMAC_Final(&hmacCtx, result.data(), nullptr)) { 52 if (!HMAC_Final(&hmacCtx, result.data(), nullptr)) {
53 return {}; 53 return {};
54 } 54 }
diff --git a/confirmationui/1.0/default/PlatformSpecifics.h b/confirmationui/1.0/default/PlatformSpecifics.h
index 488da6d1..29f299c7 100644
--- a/confirmationui/1.0/default/PlatformSpecifics.h
+++ b/confirmationui/1.0/default/PlatformSpecifics.h
@@ -48,8 +48,9 @@ struct MonotonicClockTimeStamper {
48 48
49class HMacImplementation { 49class HMacImplementation {
50 public: 50 public:
51 static support::NullOr<support::array<uint8_t, 32>> hmac256( 51 static support::NullOr<support::hmac_t> hmac256(
52 const uint8_t key[32], std::initializer_list<support::ByteBufferProxy> buffers); 52 const support::auth_token_key_t& key,
53 std::initializer_list<support::ByteBufferProxy> buffers);
53}; 54};
54 55
55class MyOperation : public generic::Operation<sp<IConfirmationResultCallback>, 56class MyOperation : public generic::Operation<sp<IConfirmationResultCallback>,
diff --git a/confirmationui/1.0/vts/functional/VtsHalConfirmationUIV1_0TargetTest.cpp b/confirmationui/1.0/vts/functional/VtsHalConfirmationUIV1_0TargetTest.cpp
index 463bb40a..278d1f44 100644
--- a/confirmationui/1.0/vts/functional/VtsHalConfirmationUIV1_0TargetTest.cpp
+++ b/confirmationui/1.0/vts/functional/VtsHalConfirmationUIV1_0TargetTest.cpp
@@ -46,13 +46,16 @@ namespace V1_0 {
46 46
47namespace test { 47namespace test {
48namespace { 48namespace {
49const support::auth_token_key_t testKey(static_cast<uint8_t>(TestKeyBits::BYTE));
50
49class HMacImplementation { 51class HMacImplementation {
50 public: 52 public:
51 static support::NullOr<support::array<uint8_t, 32>> hmac256( 53 static support::NullOr<support::hmac_t> hmac256(
52 const uint8_t key[32], std::initializer_list<support::ByteBufferProxy> buffers) { 54 const support::auth_token_key_t& key,
55 std::initializer_list<support::ByteBufferProxy> buffers) {
53 HMAC_CTX hmacCtx; 56 HMAC_CTX hmacCtx;
54 HMAC_CTX_init(&hmacCtx); 57 HMAC_CTX_init(&hmacCtx);
55 if (!HMAC_Init_ex(&hmacCtx, key, 32, EVP_sha256(), nullptr)) { 58 if (!HMAC_Init_ex(&hmacCtx, key.data(), key.size(), EVP_sha256(), nullptr)) {
56 return {}; 59 return {};
57 } 60 }
58 for (auto& buffer : buffers) { 61 for (auto& buffer : buffers) {
@@ -60,7 +63,7 @@ class HMacImplementation {
60 return {}; 63 return {};
61 } 64 }
62 } 65 }
63 support::array<uint8_t, 32> result; 66 support::hmac_t result;
64 if (!HMAC_Final(&hmacCtx, result.data(), nullptr)) { 67 if (!HMAC_Final(&hmacCtx, result.data(), nullptr)) {
65 return {}; 68 return {};
66 } 69 }
@@ -70,23 +73,15 @@ class HMacImplementation {
70 73
71using HMacer = support::HMac<HMacImplementation>; 74using HMacer = support::HMac<HMacImplementation>;
72 75
73constexpr uint8_t testKeyByte = static_cast<uint8_t>(TestKeyBits::BYTE);
74
75template <typename... Data> 76template <typename... Data>
76hidl_vec<uint8_t> testHMAC(const Data&... data) { 77hidl_vec<uint8_t> testHMAC(const Data&... data) {
77 constexpr uint8_t testKey[32] = {testKeyByte, testKeyByte, testKeyByte, testKeyByte,
78 testKeyByte, testKeyByte, testKeyByte, testKeyByte,
79 testKeyByte, testKeyByte, testKeyByte, testKeyByte,
80 testKeyByte, testKeyByte, testKeyByte, testKeyByte};
81 constexpr uint8_t hmac_size_bytes = sizeof testKey;
82
83 auto hmac = HMacer::hmac256(testKey, data...); 78 auto hmac = HMacer::hmac256(testKey, data...);
84 if (!hmac.isOk()) { 79 if (!hmac.isOk()) {
85 EXPECT_TRUE(false) << "Failed to compute test hmac. This is a self-test error."; 80 EXPECT_TRUE(false) << "Failed to compute test hmac. This is a self-test error.";
86 return {}; 81 return {};
87 } 82 }
88 hidl_vec<uint8_t> result(hmac_size_bytes); 83 hidl_vec<uint8_t> result(hmac.value().size());
89 copy(hmac.value().data(), hmac.value().data() + hmac_size_bytes, result.data()); 84 copy(hmac.value().data(), hmac.value().data() + hmac.value().size(), result.data());
90 return result; 85 return result;
91} 86}
92 87
diff --git a/confirmationui/support/include/android/hardware/confirmationui/1.0/generic/GenericOperation.h b/confirmationui/support/include/android/hardware/confirmationui/1.0/generic/GenericOperation.h
index b4809429..b1c322ce 100644
--- a/confirmationui/support/include/android/hardware/confirmationui/1.0/generic/GenericOperation.h
+++ b/confirmationui/support/include/android/hardware/confirmationui/1.0/generic/GenericOperation.h
@@ -99,7 +99,8 @@ class Operation {
99 99
100 void setPending() { error_ = ResponseCode::OK; } 100 void setPending() { error_ = ResponseCode::OK; }
101 101
102 void setHmacKey(const uint8_t (&key)[32]) { hmacKey_ = {key}; } 102 void setHmacKey(const auth_token_key_t& key) { hmacKey_ = key; }
103 NullOr<auth_token_key_t> hmacKey() const { return hmacKey_; }
103 104
104 void abort() { 105 void abort() {
105 if (isPending()) { 106 if (isPending()) {
@@ -112,7 +113,7 @@ class Operation {
112 if (isPending()) error_ = ResponseCode::Canceled; 113 if (isPending()) error_ = ResponseCode::Canceled;
113 } 114 }
114 115
115 void finalize(const uint8_t key[32]) { 116 void finalize(const auth_token_key_t& key) {
116 if (error_ == ResponseCode::Ignored) return; 117 if (error_ == ResponseCode::Ignored) return;
117 resultCB_->result(error_, getMessage(), userConfirm(key)); 118 resultCB_->result(error_, getMessage(), userConfirm(key));
118 error_ = ResponseCode::Ignored; 119 error_ = ResponseCode::Ignored;
@@ -127,11 +128,7 @@ class Operation {
127 } 128 }
128 129
129 ResponseCode deliverSecureInputEvent(const HardwareAuthToken& secureInputToken) { 130 ResponseCode deliverSecureInputEvent(const HardwareAuthToken& secureInputToken) {
130 constexpr uint8_t testKeyByte = static_cast<uint8_t>(TestKeyBits::BYTE); 131 const auth_token_key_t testKey(static_cast<uint8_t>(TestKeyBits::BYTE));
131 constexpr uint8_t testKey[32] = {testKeyByte, testKeyByte, testKeyByte, testKeyByte,
132 testKeyByte, testKeyByte, testKeyByte, testKeyByte,
133 testKeyByte, testKeyByte, testKeyByte, testKeyByte,
134 testKeyByte, testKeyByte, testKeyByte, testKeyByte};
135 132
136 auto hmac = HMacer::hmac256(testKey, "\0", bytes_cast(secureInputToken.challenge), 133 auto hmac = HMacer::hmac256(testKey, "\0", bytes_cast(secureInputToken.challenge),
137 bytes_cast(secureInputToken.userId), 134 bytes_cast(secureInputToken.userId),
@@ -171,7 +168,7 @@ class Operation {
171 result.setToExternal(formattedMessageBuffer_, formattedMessageLength_); 168 result.setToExternal(formattedMessageBuffer_, formattedMessageLength_);
172 return result; 169 return result;
173 } 170 }
174 hidl_vec<uint8_t> userConfirm(const uint8_t key[32]) { 171 hidl_vec<uint8_t> userConfirm(const auth_token_key_t& key) {
175 if (error_ != ResponseCode::OK) return {}; 172 if (error_ != ResponseCode::OK) return {};
176 confirmationTokenScratchpad_ = HMacer::hmac256(key, "confirmation token", getMessage()); 173 confirmationTokenScratchpad_ = HMacer::hmac256(key, "confirmation token", getMessage());
177 if (!confirmationTokenScratchpad_.isOk()) { 174 if (!confirmationTokenScratchpad_.isOk()) {
@@ -188,10 +185,10 @@ class Operation {
188 uint8_t formattedMessageBuffer_[uint32_t(MessageSize::MAX)]; 185 uint8_t formattedMessageBuffer_[uint32_t(MessageSize::MAX)];
189 char promptStringBuffer_[uint32_t(MessageSize::MAX)]; 186 char promptStringBuffer_[uint32_t(MessageSize::MAX)];
190 size_t formattedMessageLength_ = 0; 187 size_t formattedMessageLength_ = 0;
191 NullOr<array<uint8_t, 32>> confirmationTokenScratchpad_; 188 NullOr<hmac_t> confirmationTokenScratchpad_;
192 Callback resultCB_; 189 Callback resultCB_;
193 typename TimeStamper::TimeStamp startTime_; 190 typename TimeStamper::TimeStamp startTime_;
194 NullOr<array<uint8_t, 32>> hmacKey_; 191 NullOr<auth_token_key_t> hmacKey_;
195}; 192};
196 193
197} // namespace 194} // namespace
diff --git a/confirmationui/support/include/android/hardware/confirmationui/support/confirmationui_utils.h b/confirmationui/support/include/android/hardware/confirmationui/support/confirmationui_utils.h
index d5514336..a01b5e3c 100644
--- a/confirmationui/support/include/android/hardware/confirmationui/support/confirmationui_utils.h
+++ b/confirmationui/support/include/android/hardware/confirmationui/support/confirmationui_utils.h
@@ -58,7 +58,8 @@ class NullOr {
58 58
59 public: 59 public:
60 NullOr() : value_(initializer_t<ValueT>::init()), null_(true) {} 60 NullOr() : value_(initializer_t<ValueT>::init()), null_(true) {}
61 NullOr(ValueT&& value) : value_(std::forward<ValueT>(value)), null_(false) {} 61 template <typename T>
62 NullOr(T&& value) : value_(std::forward<T>(value)), null_(false) {}
62 63
63 bool isOk() const { return !null_; } 64 bool isOk() const { return !null_; }
64 65
@@ -81,17 +82,23 @@ class array {
81 public: 82 public:
82 array() : data_{} {} 83 array() : data_{} {}
83 array(const T (&data)[elements]) { std::copy(data, data + elements, data_); } 84 array(const T (&data)[elements]) { std::copy(data, data + elements, data_); }
85 explicit array(const T& v) { fill(v); }
84 86
85 T* data() { return data_; } 87 T* data() { return data_; }
86 const T* data() const { return data_; } 88 const T* data() const { return data_; }
87 constexpr size_t size() const { return elements; } 89 constexpr size_t size() const { return elements; }
88 operator const array_type&() const { return data_; }
89 90
90 T* begin() { return data_; } 91 T* begin() { return data_; }
91 T* end() { return data_ + elements; } 92 T* end() { return data_ + elements; }
92 const T* begin() const { return data_; } 93 const T* begin() const { return data_; }
93 const T* end() const { return data_ + elements; } 94 const T* end() const { return data_ + elements; }
94 95
96 void fill(const T& v) {
97 for (size_t i = 0; i < elements; ++i) {
98 data_[i] = v;
99 }
100 }
101
95 private: 102 private:
96 array_type data_; 103 array_type data_;
97}; 104};
@@ -157,6 +164,11 @@ class ByteBufferProxy {
157 size_t size_; 164 size_t size_;
158}; 165};
159 166
167constexpr uint8_t auth_token_key_size = 32;
168constexpr uint8_t hmac_size_bytes = support::auth_token_key_size;
169using auth_token_key_t = array<uint8_t, auth_token_key_size>;
170using hmac_t = auth_token_key_t;
171
160/** 172/**
161 * Implementer are expected to provide an implementation with the following prototype: 173 * Implementer are expected to provide an implementation with the following prototype:
162 * static NullOr<array<uint8_t, 32>> hmac256(const uint8_t key[32], 174 * static NullOr<array<uint8_t, 32>> hmac256(const uint8_t key[32],
@@ -166,7 +178,7 @@ template <typename Impl>
166class HMac { 178class HMac {
167 public: 179 public:
168 template <typename... Data> 180 template <typename... Data>
169 static NullOr<array<uint8_t, 32>> hmac256(const uint8_t key[32], const Data&... data) { 181 static NullOr<hmac_t> hmac256(const auth_token_key_t& key, const Data&... data) {
170 return Impl::hmac256(key, {data...}); 182 return Impl::hmac256(key, {data...});
171 } 183 }
172}; 184};
diff --git a/current.txt b/current.txt
index 203f022f..36f19b7a 100644
--- a/current.txt
+++ b/current.txt
@@ -260,7 +260,7 @@ fb92e2b40f8e9d494e8fd3b4ac18499a3216342e7cff160714c3bbf3660b6e79 android.hardwar
2604e7169919d24fbe5573e5bcd683d0bd7abf553a4e6c34c41f9dfc1e12050db07 android.hardware.gnss@1.0::IGnssNavigationMessageCallback 2604e7169919d24fbe5573e5bcd683d0bd7abf553a4e6c34c41f9dfc1e12050db07 android.hardware.gnss@1.0::IGnssNavigationMessageCallback
2615804ca86611d72e5481f022b3a0c1b334217f2e4988dad25730c42af2d1f4d1c android.hardware.neuralnetworks@1.0::IDevice 2615804ca86611d72e5481f022b3a0c1b334217f2e4988dad25730c42af2d1f4d1c android.hardware.neuralnetworks@1.0::IDevice
26212e8dca4ab7d8aadd0ef8f1b438021938e2396139e85db2ed65783b08800aa52 android.hardware.neuralnetworks@1.0::IExecutionCallback 26212e8dca4ab7d8aadd0ef8f1b438021938e2396139e85db2ed65783b08800aa52 android.hardware.neuralnetworks@1.0::IExecutionCallback
263702f9a4cd3b7486a4b04f7155b737757ac2ca4b3548976d5782ad3cae9ff9780 android.hardware.neuralnetworks@1.0::types 26318e6885e184fe48401c2c53f1d1b8bfb07240f40c81ae6b9d2e336fca6efdbb7 android.hardware.neuralnetworks@1.0::types
264d4840db8efabdf1e4b344fc981cd36e5fe81a39aff6e199f6d06c1c8da413efd android.hardware.radio@1.0::types 264d4840db8efabdf1e4b344fc981cd36e5fe81a39aff6e199f6d06c1c8da413efd android.hardware.radio@1.0::types
265b280c4704dfcc548a9bf127b59b7c3578f460c50cce70a06b66fe0df8b27cff0 android.hardware.wifi@1.0::types 265b280c4704dfcc548a9bf127b59b7c3578f460c50cce70a06b66fe0df8b27cff0 android.hardware.wifi@1.0::types
266 266
@@ -339,7 +339,7 @@ b8c7ed58aa8740361e63d0ce9e7c94227572a629f356958840b34809d2393a7c android.hardwar
3394a2c0dc82780e6c90731725a103feab8ab6ecf85a64e049b9cbd2b2c61620fe1 android.hardware.media.bufferpool@1.0::IConnection 3394a2c0dc82780e6c90731725a103feab8ab6ecf85a64e049b9cbd2b2c61620fe1 android.hardware.media.bufferpool@1.0::IConnection
3406aef1218e5949f867b0104752ac536c1b707222a403341720de90141df129e3e android.hardware.media.bufferpool@1.0::types 3406aef1218e5949f867b0104752ac536c1b707222a403341720de90141df129e3e android.hardware.media.bufferpool@1.0::types
3417698dc2382a2eeb43541840e3ee624f34108efdfb976b2bfa7c13ef15fb8c4c4 android.hardware.neuralnetworks@1.1::IDevice 3417698dc2382a2eeb43541840e3ee624f34108efdfb976b2bfa7c13ef15fb8c4c4 android.hardware.neuralnetworks@1.1::IDevice
3425604001029a255648a9e955de0a822a48d9ba7cc259b106fb8be0cd43dc8eece android.hardware.neuralnetworks@1.1::types 34272cc6126632456e8fbb8776fe50150c3c4dd5d09145653193affb70785211dfa android.hardware.neuralnetworks@1.1::types
3438d3d86da0bfa4bf070970d8303c659f67f35d670c287d45a3f542e4fedadd578 android.hardware.nfc@1.1::INfc 3438d3d86da0bfa4bf070970d8303c659f67f35d670c287d45a3f542e4fedadd578 android.hardware.nfc@1.1::INfc
344e85f566698d2a2c28100e264fcf2c691a066756ddf8dd341d009ff50cfe10614 android.hardware.nfc@1.1::INfcClientCallback 344e85f566698d2a2c28100e264fcf2c691a066756ddf8dd341d009ff50cfe10614 android.hardware.nfc@1.1::INfcClientCallback
3455e278fcaa3287d397d8eebe1c22aaa28150f5caae1cf9381cd6dc32cb37899c5 android.hardware.nfc@1.1::types 3455e278fcaa3287d397d8eebe1c22aaa28150f5caae1cf9381cd6dc32cb37899c5 android.hardware.nfc@1.1::types
diff --git a/neuralnetworks/1.0/types.hal b/neuralnetworks/1.0/types.hal
index 8c07fcc3..4efa13ad 100644
--- a/neuralnetworks/1.0/types.hal
+++ b/neuralnetworks/1.0/types.hal
@@ -42,7 +42,8 @@ enum OperandType : int32_t {
42 TENSOR_FLOAT32 = 3, 42 TENSOR_FLOAT32 = 3,
43 /** A tensor of 32 bit integer values. */ 43 /** A tensor of 32 bit integer values. */
44 TENSOR_INT32 = 4, 44 TENSOR_INT32 = 4,
45 /** A tensor of 8 bit integers that represent real numbers. 45 /**
46 * A tensor of 8 bit integers that represent real numbers.
46 * 47 *
47 * Attached to this tensor are two numbers that can be used to convert the 48 * Attached to this tensor are two numbers that can be used to convert the
48 * 8 bit integer to the real value and vice versa. These two numbers are: 49 * 8 bit integer to the real value and vice versa. These two numbers are:
@@ -70,15 +71,17 @@ enum OperationType : int32_t {
70 /** 71 /**
71 * Adds two tensors, element-wise. 72 * Adds two tensors, element-wise.
72 * 73 *
73 * Takes two input tensors of identical type and compatible dimensions. The output 74 * Takes two input tensors of identical {@link OperandType} and compatible
74 * is the sum of both input tensors, optionally modified by an activation function. 75 * dimensions. The output is the sum of both input tensors, optionally
76 * modified by an activation function.
75 * 77 *
76 * Two dimensions are compatible when: 78 * Two dimensions are compatible when:
77 * 1. they are equal, or 79 * 1. they are equal, or
78 * 2. one of them is 1 80 * 2. one of them is 1
79 * 81 *
80 * The size of the output is the maximum size along each dimension of the input operands. 82 * The size of the output is the maximum size along each dimension of the
81 * It starts with the trailing dimensions, and works its way forward. 83 * input operands. It starts with the trailing dimensions, and works its
84 * way forward.
82 * 85 *
83 * Example: 86 * Example:
84 * 87 *
@@ -86,7 +89,7 @@ enum OperationType : int32_t {
86 * input2.dimension = {5, 4, 3, 1} 89 * input2.dimension = {5, 4, 3, 1}
87 * output.dimension = {5, 4, 3, 2} 90 * output.dimension = {5, 4, 3, 2}
88 * 91 *
89 * Supported tensor types: 92 * Supported tensor {@link OperandType}:
90 * * {@link OperandType::TENSOR_FLOAT32} 93 * * {@link OperandType::TENSOR_FLOAT32}
91 * * {@link OperandType::TENSOR_QUANT8_ASYMM} 94 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
92 * 95 *
@@ -94,98 +97,119 @@ enum OperationType : int32_t {
94 * 97 *
95 * Inputs: 98 * Inputs:
96 * * 0: A tensor. 99 * * 0: A tensor.
97 * * 1: A tensor of the same type, and compatible dimensions as input0. 100 * * 1: A tensor of the same {@link OperandType}, and compatible dimensions
98 * * 2: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. 101 * as input0.
99 * Specifies the activation to invoke on the result of each addition. 102 * * 2: An {@link OperandType::INT32} scalar, and has to be one of the
103 * {@link FusedActivationFunc} values. Specifies the activation to
104 * invoke on the result.
100 * 105 *
101 * Outputs: 106 * Outputs:
102 * * 0: The sum, a tensor of the same type as input0. 107 * * 0: The sum, a tensor of the same {@link OperandType} as input0.
103 */ 108 */
104 ADD = 0, 109 ADD = 0,
105 110
106 /** 111 /**
107 * Performs a 2-D average pooling operation. 112 * Performs a 2-D average pooling operation.
108 * 113 *
109 * The output dimensions are functions of the filter dimensions, stride, and padding. 114 * The output dimensions are functions of the filter dimensions, stride, and
115 * padding.
110 * 116 *
111 * The values in the output tensor are computed as: 117 * The values in the output tensor are computed as:
112 * 118 *
113 * output[batch, row, col, channel] = 119 * output[batch, row, col, channel] =
114 * sum_{i, j}(input[batch, row + i, col + j, channel]) / sum(1) 120 * sum_{i, j}(input[batch, row + i, col + j, channel]) / sum(1)
115 * 121 *
116 * Supported tensor types: 122 * Supported tensor {@link OperandType}:
117 * * {@link OperandType::TENSOR_FLOAT32} 123 * * {@link OperandType::TENSOR_FLOAT32}
118 * * {@link OperandType::TENSOR_QUANT8_ASYMM} 124 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
119 * 125 *
120 * Supported tensor rank: 4, with "NHWC" (i.e., Num_samples, Height, Width, and Channels) 126 * Supported tensor rank: 4, with "NHWC" (i.e., Num_samples, Height, Width,
121 * data layout. 127 * and Channels) data layout.
122 * 128 *
123 * Both explicit padding and implicit padding are supported. 129 * Both explicit padding and implicit padding are supported.
124 * 130 *
125 * Inputs (explicit padding): 131 * Inputs (explicit padding):
126 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input. 132 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
127 * * 1: An INT32 value, specifying the padding on the left, in the ‘width’ dimension. 133 * the input.
128 * * 2: An INT32 value, specifying the padding on the right,in the ‘width’ dimension. 134 * * 1: An {@link OperandType::INT32} scalar, specifying the padding on
129 * * 3: An INT32 value, specifying the padding on the top, in the ‘height’ dimension. 135 * the left, in the ‘width’ dimension.
130 * * 4: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension. 136 * * 2: An {@link OperandType::INT32} scalar, specifying the padding on
131 * * 5: An INT32 value, specifying the stride when walking through input 137 * the right, in the ‘width’ dimension.
132 * in the ‘width’ dimension. 138 * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
133 * * 6: An INT32 value, specifying the stride when walking through input 139 * the top, in the ‘height’ dimension.
134 * in the ‘height’ dimension. 140 * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
135 * * 7: An INT32 value, specifying the filter width. 141 * the bottom, in the ‘height’ dimension.
136 * * 8: An INT32 value, specifying the filter height. 142 * * 5: An {@link OperandType::INT32} scalar, specifying the stride when
137 * * 9: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. 143 * walking through input in the ‘width’ dimension.
138 * Specifies the activation to invoke on the result of each addition. 144 * * 6: An {@link OperandType::INT32} scalar, specifying the stride when
145 * walking through input in the ‘height’ dimension.
146 * * 7: An {@link OperandType::INT32} scalar, specifying the filter
147 * width.
148 * * 8: An {@link OperandType::INT32} scalar, specifying the filter
149 * height.
150 * * 9: An {@link OperandType::INT32} scalar, and has to be one of the
151 * {@link FusedActivationFunc} values. Specifies the activation to
152 * invoke on the result.
139 * 153 *
140 * Inputs (implicit padding): 154 * Inputs (implicit padding):
141 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input. 155 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
142 * * 1: An INT32 value, specifying the implicit padding scheme, has to be one of the 156 * the input.
157 * * 1: An {@link OperandType::INT32} scalar, specifying the implicit
158 * padding scheme, has to be one of the
143 * following values: {0 (NONE), 1 (SAME), 2 (VALID)}. 159 * following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
144 * * 2: An INT32 value, specifying the stride when walking through input 160 * * 2: An {@link OperandType::INT32} scalar, specifying the stride when
145 * in the ‘width’ dimension. 161 * walking through input in the ‘width’ dimension.
146 * * 3: An INT32 value, specifying the stride when walking through input 162 * * 3: An {@link OperandType::INT32} scalar, specifying the stride when
147 * in the ‘height’ dimension. 163 * walking through input in the ‘height’ dimension.
148 * * 4: An INT32 value, specifying the filter width. 164 * * 4: An {@link OperandType::INT32} scalar, specifying the filter
149 * * 5: An INT32 value, specifying the filter height. 165 * width.
150 * * 6: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. 166 * * 5: An {@link OperandType::INT32} scalar, specifying the filter
151 * Specifies the activation to invoke on the result of each addition. 167 * height.
168 * * 6: An {@link OperandType::INT32} scalar, and has to be one of the
169 * {@link FusedActivationFunc} values. Specifies the activation to
170 * invoke on the result.
152 * 171 *
153 * Outputs: 172 * Outputs:
154 * * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth]. 173 * * 0: The output 4-D tensor, of shape
174 [batches, out_height, out_width, depth].
155 */ 175 */
156 AVERAGE_POOL_2D = 1, 176 AVERAGE_POOL_2D = 1,
157 177
158 /** 178 /**
159 * Concatenates the input tensors along the given dimension. 179 * Concatenates the input tensors along the given dimension.
160 * 180 *
161 * The input tensors must have identical type and the same dimensions except the 181 * The input tensors must have identical {@link OperandType} and the same
162 * dimension along the concatenation axis. 182 * dimensions except the dimension along the concatenation axis.
163 * 183 *
164 * Supported tensor types: 184 * Supported tensor {@link OperandType}:
165 * * {@link OperandType::TENSOR_FLOAT32} 185 * * {@link OperandType::TENSOR_FLOAT32}
166 * * {@link OperandType::TENSOR_QUANT8_ASYMM} 186 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
167 * 187 *
168 * Supported tensor rank: up to 4 188 * Supported tensor rank: up to 4
169 * 189 *
170 * Inputs: 190 * Inputs:
171 * * 0 ~ n-1: The list of n input tensors, of shape [D0, D1, ..., Daxis(i), ..., Dm]. 191 * * 0 ~ n-1: The list of n input tensors, of shape
172 * For inputs of {@link OperandType::TENSOR_QUANT8_ASYMM} type, all 192 * [D0, D1, ..., Daxis(i), ..., Dm]. For inputs of
173 * input tensors must have the same scale and zeroPoint. 193 * {@link OperandType::TENSOR_QUANT8_ASYMM}, all input tensors
174 * * n: An INT32 value, specifying the concatenation axis. 194 * must have the same scale and zeroPoint.
195 * * n: An {@link OperandType::INT32} scalar, specifying the
196 * concatenation axis.
175 * 197 *
176 * Outputs: 198 * Outputs:
177 * * 0: The output, a tensor of the same type as the input tensors. 199 * * 0: The output, a tensor of the same {@link OperandType} as the input
178 * The output shape is [D0, D1, ..., sum(Daxis(i)), ..., Dm]. 200 * tensors. The output shape is [D0, D1, ..., sum(Daxis(i)), ..., Dm].
179 */ 201 */
180 CONCATENATION = 2, 202 CONCATENATION = 2,
181 203
182 /** 204 /**
183 * Performs an 2-D convolution operation. 205 * Performs an 2-D convolution operation.
184 * 206 *
185 * The CONV_2D op sweeps a 2-D filter that can mix channels together over a batch of 207 * The CONV_2D op sweeps a 2-D filter that can mix channels together over a
186 * images, applying the filter to each window of each image of the appropriate size. 208 * batch of images, applying the filter to each window of each image of the
209 * appropriate size.
187 * 210 *
188 * The output dimensions are functions of the filter dimensions, stride, and padding. 211 * The output dimensions are functions of the filter dimensions, stride, and
212 * padding.
189 * 213 *
190 * The values in the output tensor are computed as: 214 * The values in the output tensor are computed as:
191 * 215 *
@@ -196,7 +220,7 @@ enum OperationType : int32_t {
196 * bias[channel] 220 * bias[channel]
197 * ) 221 * )
198 * 222 *
199 * Supported tensor types: 223 * Supported tensor {@link OperandType}:
200 * * {@link OperandType::TENSOR_FLOAT32} 224 * * {@link OperandType::TENSOR_FLOAT32}
201 * * {@link OperandType::TENSOR_QUANT8_ASYMM} 225 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
202 * 226 *
@@ -205,63 +229,77 @@ enum OperationType : int32_t {
205 * Both explicit padding and implicit padding are supported. 229 * Both explicit padding and implicit padding are supported.
206 * 230 *
207 * Inputs (explicit padding): 231 * Inputs (explicit padding):
208 * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input. 232 * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
209 * * 1: A 4-D tensor, of shape [depth_out, filter_height, filter_width, depth_in], 233 * specifying the input.
210 * specifying the filter. 234 * * 1: A 4-D tensor, of shape
235 * [depth_out, filter_height, filter_width, depth_in], specifying the
236 * filter.
211 * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. 237 * * 2: A 1-D tensor, of shape [depth_out], specifying the bias.
212 * For input tensor of {@link OperandType::TENSOR_FLOAT32} type, the bias should 238 * For input tensor of {@link OperandType::TENSOR_FLOAT32}, the bias
213 * also be of {@link OperandType::TENSOR_FLOAT32}. 239 * should also be of {@link OperandType::TENSOR_FLOAT32}. For input
214 * For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the bias 240 * tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}, the bias
215 * should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and 241 * should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of
216 * bias_scale == input_scale * filter_scale. 242 * 0 and bias_scale == input_scale * filter_scale.
217 * * 3: An INT32 value, specifying the padding on the left, in the ‘width’ dimension. 243 * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
218 * * 4: An INT32 value, specifying the padding on the right,in the ‘width’ dimension. 244 * the left, in the ‘width’ dimension.
219 * * 5: An INT32 value, specifying the padding on the top, in the ‘height’ dimension. 245 * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
220 * * 6: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension. 246 * the right, in the ‘width’ dimension.
221 * * 7: An INT32 value, specifying the stride when walking through input 247 * * 5: An {@link OperandType::INT32} scalar, specifying the padding on
222 * in the ‘width’ dimension. 248 * the top, in the ‘height’ dimension.
223 * * 8: An INT32 value, specifying the stride when walking through input 249 * * 6: An {@link OperandType::INT32} scalar, specifying the padding on
224 * in the ‘height’ dimension. 250 * the bottom, in the ‘height’ dimension.
225 * * 9: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. 251 * * 7: An {@link OperandType::INT32} scalar, specifying the stride when
226 * Specifies the activation to invoke on the result of each addition. 252 * walking through input in the ‘width’ dimension.
253 * * 8: An {@link OperandType::INT32} scalar, specifying the stride when
254 * walking through input in the ‘height’ dimension.
255 * * 9: An {@link OperandType::INT32} scalar, and has to be one of the
256 * {@link FusedActivationFunc} values. Specifies the activation to
257 * invoke on the result.
227 * 258 *
228 * Inputs (implicit padding): 259 * Inputs (implicit padding):
229 * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input. 260 * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
230 * * 1: A 4-D tensor, of shape [depth_out, filter_height, filter_width, depth_in], 261 * specifying the input.
231 * specifying the filter. 262 * * 1: A 4-D tensor, of shape
232 * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. 263 * [depth_out, filter_height, filter_width, depth_in], specifying the
233 * For input tensor of {@link OperandType::TENSOR_FLOAT32} type, the bias should 264 * filter.
234 * also be of {@link OperandType::TENSOR_FLOAT32}. 265 * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
235 * For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the bias 266 * tensor of {@link OperandType::TENSOR_FLOAT32}, the bias should
236 * should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and 267 * also be of {@link OperandType::TENSOR_FLOAT32}. For input tensor
268 * of {@link OperandType::TENSOR_QUANT8_ASYMM}, the bias should be
269 * of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and
237 * bias_scale == input_scale * filter_scale. 270 * bias_scale == input_scale * filter_scale.
238 * * 3: An INT32 value, specifying the implicit padding scheme, has to be one of the 271 * * 3: An {@link OperandType::INT32} scalar, specifying the implicit
272 * padding scheme, has to be one of the
239 * following values: {0 (NONE), 1 (SAME), 2 (VALID)}. 273 * following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
240 * * 4: An INT32 value, specifying the stride when walking through input 274 * * 4: An {@link OperandType::INT32} scalar, specifying the stride when
241 * in the ‘width’ dimension. 275 * walking through input in the ‘width’ dimension.
242 * * 5: An INT32 value, specifying the stride when walking through input 276 * * 5: An {@link OperandType::INT32} scalar, specifying the stride when
243 * in the ‘height’ dimension. 277 * walking through input in the ‘height’ dimension.
244 * * 6: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. 278 * * 6: An {@link OperandType::INT32} scalar, and has to be one of the
245 * Specifies the activation to invoke on the result of each addition. 279 * {@link FusedActivationFunc} values. Specifies the activation to
280 * invoke on the result.
246 * 281 *
247 * Outputs: 282 * Outputs:
248 * * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth_out]. 283 * * 0: The output 4-D tensor, of shape
249 * For output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the following 284 * [batches, out_height, out_width, depth_out]. For output tensor of
250 * condition must be satisfied: output_scale > input_scale * filter_scale. 285 * {@link OperandType::TENSOR_QUANT8_ASYMM}, the following condition
286 * must be satisfied: output_scale > input_scale * filter_scale.
251 */ 287 */
252 CONV_2D = 3, 288 CONV_2D = 3,
253 289
254 /** 290 /**
255 * Performs a depthwise 2-D convolution operation. 291 * Performs a depthwise 2-D convolution operation.
256 * 292 *
257 * Given an input tensor of shape [batches, height, width, depth_in] and a filter 293 * Given an input tensor of shape [batches, height, width, depth_in] and a
258 * tensor of shape [1, filter_height, filter_width, depth_out] containing 294 * filter tensor of shape [1, filter_height, filter_width, depth_out]
259 * depth_out convolutional filters of depth 1, DEPTHWISE_CONV applies a different 295 * containing depth_out convolutional filters of depth 1, DEPTHWISE_CONV
260 * filter to each input channel (expanding from 1 channel to channel_multiplier channels 296 * applies a different filter to each input channel (expanding from 1
261 * for each), then concatenates the results together. 297 * channel to channel_multiplier channels for each), then concatenates the
298 * results together.
262 * 299 *
263 * The output has depth_out = depth_in * depth_multiplier channels. 300 * The output has depth_out = depth_in * depth_multiplier channels.
264 * The output dimensions are functions of the filter dimensions, stride, and padding. 301 * The output dimensions are functions of the filter dimensions, stride, and
302 * padding.
265 * 303 *
266 * The values in the output tensor are computed as: 304 * The values in the output tensor are computed as:
267 * 305 *
@@ -271,7 +309,7 @@ enum OperationType : int32_t {
271 * filter[1, di, dj, k * channel_multiplier + q] 309 * filter[1, di, dj, k * channel_multiplier + q]
272 * ) 310 * )
273 * 311 *
274 * Supported tensor types: 312 * Supported tensor {@link OperandType}:
275 * * {@link OperandType::TENSOR_FLOAT32} 313 * * {@link OperandType::TENSOR_FLOAT32}
276 * * {@link OperandType::TENSOR_QUANT8_ASYMM} 314 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
277 * 315 *
@@ -280,82 +318,97 @@ enum OperationType : int32_t {
280 * Both explicit padding and implicit padding are supported. 318 * Both explicit padding and implicit padding are supported.
281 * 319 *
282 * Inputs (explicit padding): 320 * Inputs (explicit padding):
283 * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input. 321 * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
322 * specifying the input.
284 * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out], 323 * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out],
285 * specifying the filter. 324 * specifying the filter.
286 * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. 325 * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
287 * For input tensor of {@link OperandType::TENSOR_FLOAT32} type, the bias should 326 * tensor of {@link OperandType::TENSOR_FLOAT32}, the bias should
288 * also be of {@link OperandType::TENSOR_FLOAT32}. 327 * also be of {@link OperandType::TENSOR_FLOAT32}. For input tensor
289 * For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the bias 328 * of {@link OperandType::TENSOR_QUANT8_ASYMM}, the bias should be
290 * should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and 329 * of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and
291 * bias_scale == input_scale * filter_scale. 330 * bias_scale == input_scale * filter_scale.
292 * * 3: An INT32 value, specifying the padding on the left, in the ‘width’ dimension. 331 * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
293 * * 4: An INT32 value, specifying the padding on the right,in the ‘width’ dimension. 332 * the left, in the ‘width’ dimension.
294 * * 5: An INT32 value, specifying the padding on the top, in the ‘height’ dimension. 333 * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
295 * * 6: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension. 334 * the right, in the ‘width’ dimension.
296 * * 7: An INT32 value, specifying the stride when walking through input 335 * * 5: An {@link OperandType::INT32} scalar, specifying the padding on
297 * in the ‘width’ dimension. 336 * the top, in the ‘height’ dimension.
298 * * 8: An INT32 value, specifying the stride when walking through input 337 * * 6: An {@link OperandType::INT32} scalar, specifying the padding on
299 * in the ‘height’ dimension. 338 * the bottom, in the ‘height’ dimension.
300 * * 9: An INT32 value, specifying the depthwise multiplier. 339 * * 7: An {@link OperandType::INT32} scalar, specifying the stride when
301 * * 10: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. 340 * walking through input in the ‘width’ dimension.
302 * Specifies the activation to invoke on the result of each addition. 341 * * 8: An {@link OperandType::INT32} scalar, specifying the stride when
342 * walking through input in the ‘height’ dimension.
343 * * 9: An {@link OperandType::INT32} scalar, specifying the depthwise
344 * multiplier.
345 * * 10: An {@link OperandType::INT32} scalar, and has to be one of the
346 * {@link FusedActivationFunc} values. Specifies the activation to
347 * invoke on the result.
303 * 348 *
304 * Inputs (implicit padding): 349 * Inputs (implicit padding):
305 * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input. 350 * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
351 * specifying the input.
306 * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out], 352 * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out],
307 * specifying the filter. 353 * specifying the filter.
308 * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. 354 * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
309 * For input tensor of {@link OperandType::TENSOR_FLOAT32} type, the bias should 355 * tensor of {@link OperandType::TENSOR_FLOAT32}, the bias should
310 * also be of {@link OperandType::TENSOR_FLOAT32}. 356 * also be of {@link OperandType::TENSOR_FLOAT32}. For input tensor
311 * For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the bias 357 * of {@link OperandType::TENSOR_QUANT8_ASYMM}, the bias should be
312 * should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and 358 * of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and
313 * bias_scale == input_scale * filter_scale. 359 * bias_scale == input_scale * filter_scale.
314 * * 3: An INT32 value, specifying the implicit padding scheme, has to be one of the 360 * * 3: An {@link OperandType::INT32} scalar, specifying the implicit
361 * padding scheme, has to be one of the
315 * following values: {0 (NONE), 1 (SAME), 2 (VALID)}. 362 * following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
316 * * 4: An INT32 value, specifying the stride when walking through input 363 * * 4: An {@link OperandType::INT32} scalar, specifying the stride when
317 * in the ‘width’ dimension. 364 * walking through input in the ‘width’ dimension.
318 * * 5: An INT32 value, specifying the stride when walking through input 365 * * 5: An {@link OperandType::INT32} scalar, specifying the stride when
319 * in the ‘height’ dimension. 366 * walking through input in the ‘height’ dimension.
320 * * 6: An INT32 value, specifying the depthwise multiplier. 367 * * 6: An {@link OperandType::INT32} scalar, specifying the depthwise
321 * * 7: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. 368 * multiplier.
322 * Specifies the activation to invoke on the result of each addition. 369 * * 7: An {@link OperandType::INT32} scalar, and has to be one of the
370 * {@link FusedActivationFunc} values. Specifies the activation to
371 * invoke on the result.
323 * 372 *
324 * Outputs: 373 * Outputs:
325 * * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth_out]. 374 * * 0: The output 4-D tensor, of shape
326 * For output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the following 375 * [batches, out_height, out_width, depth_out]. For output tensor of
327 * condition must be satisfied: output_scale > input_scale * filter_scale. 376 * {@link OperandType::TENSOR_QUANT8_ASYMM}, the following condition
377 * must be satisfied: output_scale > input_scale * filter_scale.
328 */ 378 */
329 DEPTHWISE_CONV_2D = 4, 379 DEPTHWISE_CONV_2D = 4,
330 380
331 /** 381 /**
332 * Rearranges data from depth into blocks of spatial data. 382 * Rearranges data from depth into blocks of spatial data.
333 * 383 *
334 * More specifically, this op outputs a copy of the input tensor where values from 384 * More specifically, this op outputs a copy of the input tensor where
335 * the depth dimension are moved in spatial blocks to the height and width dimensions. 385 * values from the depth dimension are moved in spatial blocks to the height
336 * The value block_size indicates the input block size and how the data is moved. 386 * and width dimensions. The value block_size indicates the input block size
387 * and how the data is moved.
337 * 388 *
338 * Chunks of data of size block_size * block_size from depth are rearranged into 389 * Chunks of data of size block_size * block_size from depth are rearranged
339 * non-overlapping blocks of size block_size x block_size. 390 * into non-overlapping blocks of size block_size x block_size.
340 * 391 *
341 * The width of the output tensor is input_depth * block_size, whereas the height is 392 * The width of the output tensor is input_depth * block_size, whereas the
342 * input_height * block_size. 393 * height is input_height * block_size. The depth of the input tensor must
343 * The depth of the input tensor must be divisible by block_size * block_size 394 * be divisible by block_size * block_size
344 * 395 *
345 * Supported tensor types: 396 * Supported tensor {@link OperandType}:
346 * * {@link OperandType::TENSOR_FLOAT32} 397 * * {@link OperandType::TENSOR_FLOAT32}
347 * * {@link OperandType::TENSOR_QUANT8_ASYMM} 398 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
348 * 399 *
349 * Supported tensor rank: 4, with "NHWC" data layout. 400 * Supported tensor rank: 4, with "NHWC" data layout.
350 * 401 *
351 * Inputs: 402 * Inputs:
352 * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input. 403 * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
353 * * 1: An INT32 value, specifying the block_size. block_size must be >=1 and 404 * specifying the input.
354 * block_size * block_size must be a divisor of the input depth. 405 * * 1: An {@link OperandType::INT32} scalar, specifying the block_size.
406 * block_size must be >=1 and block_size * block_size must be a divisor
407 * of the input depth.
355 * 408 *
356 * Outputs: 409 * Outputs:
357 * * 0: The output 4-D tensor, of shape [batch, height*block_size, width*block_size, 410 * * 0: The output 4-D tensor, of shape [batch, height*block_size,
358 * depth/(block_size*block_size)]. 411 * width*block_size, depth/(block_size*block_size)].
359 */ 412 */
360 DEPTH_TO_SPACE = 5, 413 DEPTH_TO_SPACE = 5,
361 414
@@ -366,16 +419,16 @@ enum OperationType : int32_t {
366 * 419 *
367 * output = (input - zeroPoint) * scale. 420 * output = (input - zeroPoint) * scale.
368 * 421 *
369 * Supported tensor types: 422 * Supported tensor {@link OperandType}:
370 * * {@link OperandType::TENSOR_QUANT8_ASYMM} 423 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
371 * 424 *
372 * Supported tensor rank: up to 4 425 * Supported tensor rank: up to 4
373 * 426 *
374 * Inputs: 427 * Inputs:
375 * * 0: A tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}. 428 * * 0: A tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}.
376 * 429 *
377 * Outputs: 430 * Outputs:
378 * * 0: The output tensor of same shape as input0, but with type 431 * * 0: The output tensor of same shape as input0, but with
379 * {@link OperandType::TENSOR_FLOAT32}. 432 * {@link OperandType::TENSOR_FLOAT32}.
380 */ 433 */
381 DEQUANTIZE = 6, 434 DEQUANTIZE = 6,
@@ -401,7 +454,7 @@ enum OperationType : int32_t {
401 * and an error must be reported. 454 * and an error must be reported.
402 * 455 *
403 * Inputs: 456 * Inputs:
404 * * 0: Lookups. A 1-D tensor of {@link OperandType::TENSOR_INT32} type. 457 * * 0: Lookups. A 1-D tensor of {@link OperandType::TENSOR_INT32}.
405 * The values are indices into the first dimension of Values. 458 * The values are indices into the first dimension of Values.
406 * * 1: Values. An n-D tensor, where n >= 2, from which sub-tensors are 459 * * 1: Values. An n-D tensor, where n >= 2, from which sub-tensors are
407 * extracted. 460 * extracted.
@@ -416,7 +469,7 @@ enum OperationType : int32_t {
416 /** 469 /**
417 * Computes element-wise floor() on the input tensor. 470 * Computes element-wise floor() on the input tensor.
418 * 471 *
419 * Supported tensor types: 472 * Supported tensor {@link OperandType}:
420 * * {@link OperandType::TENSOR_FLOAT32} 473 * * {@link OperandType::TENSOR_FLOAT32}
421 * 474 *
422 * Supported tensor rank: up to 4 475 * Supported tensor rank: up to 4
@@ -425,44 +478,51 @@ enum OperationType : int32_t {
425 * * 0: A tensor. 478 * * 0: A tensor.
426 * 479 *
427 * Outputs: 480 * Outputs:
428 * * 0: The output tensor, of the same type and dimensions as the input tensor. 481 * * 0: The output tensor, of the same {@link OperandType} and dimensions as
482 * the input tensor.
429 */ 483 */
430 FLOOR = 8, 484 FLOOR = 8,
431 485
432 /** 486 /**
433 * Denotes a fully (densely) connected layer, which connects all elements in the input 487 * Denotes a fully (densely) connected layer, which connects all elements
434 * tensor with each element in the output tensor. 488 * in the input tensor with each element in the output tensor.
435 * 489 *
436 * This layer implements the operation: 490 * This layer implements the operation:
437 * 491 *
438 * outputs = activation(inputs * weights’ + bias) 492 * outputs = activation(inputs * weights’ + bias)
439 * 493 *
440 * Supported tensor types: 494 * Supported tensor {@link OperandType}:
441 * * {@link OperandType::TENSOR_FLOAT32} 495 * * {@link OperandType::TENSOR_FLOAT32}
442 * * {@link OperandType::TENSOR_QUANT8_ASYMM} 496 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
443 * 497 *
444 * Supported tensor rank: up to 4. 498 * Supported tensor rank: up to 4.
445 * 499 *
446 * Inputs: 500 * Inputs:
447 * * 0: A tensor, specifying the input. If rank is greater than 2, then it gets flattened to 501 * * 0: A tensor of at least rank 2, specifying the input. If rank is
448 * a 2-D Tensor. The 2-D Tensor is handled as if dimensions corresponded to shape 502 * greater than 2, then it gets flattened to a 2-D Tensor. The
449 * [batch_size, input_size], where “batch_size” corresponds to the batching dimension, 503 * (flattened) 2-D Tensor is reshaped (if necessary) to
450 * and “input_size” is the size of the input. 504 * [batch_size, input_size], where "input_size" corresponds to the
451 * * 1: A 2-D tensor, specifying the weights, of shape [num_units, input_size], where 505 * number of inputs to the layer, matching the second dimension of
452 * "num_units" corresponds to the number of output nodes. 506 * weights, and "batch_size" is calculated by dividing the number of
453 * * 2: A 1-D tensor, of shape [num_units], specifying the bias. 507 * elements by "input_size".
454 * For input tensor of {@link OperandType::TENSOR_FLOAT32} type, the bias should 508 * * 1: A 2-D tensor, specifying the weights, of shape
455 * also be of {@link OperandType::TENSOR_FLOAT32}. 509 * [num_units, input_size], where "num_units" corresponds to the number
456 * For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the bias 510 * of output nodes.
457 * should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and 511 * * 2: A 1-D tensor, of shape [num_units], specifying the bias. For input
512 * tensor of {@link OperandType::TENSOR_FLOAT32}, the bias should
513 * also be of {@link OperandType::TENSOR_FLOAT32}. For input tensor
514 * of {@link OperandType::TENSOR_QUANT8_ASYMM}, the bias should be
515 * of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and
458 * bias_scale == input_scale * filter_scale. 516 * bias_scale == input_scale * filter_scale.
459 * * 3: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. 517 * * 3: An {@link OperandType::INT32} scalar, and has to be one of the
460 * Specifies the activation to invoke on the result of each addition. 518 * {@link FusedActivationFunc} values. Specifies the activation to
519 * invoke on the result.
461 * 520 *
462 * Outputs: 521 * Outputs:
463 * * 0: The output tensor, of shape [batch_size, num_units]. 522 * * 0: The output tensor, of shape [batch_size, num_units]. For output
464 * For output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the following 523 * tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}, the following
465 * condition must be satisfied: output_scale > input_scale * filter_scale. 524 * condition must be satisfied:
525 * output_scale > input_scale * filter_scale.
466 */ 526 */
467 FULLY_CONNECTED = 9, 527 FULLY_CONNECTED = 9,
468 528
@@ -494,19 +554,22 @@ enum OperationType : int32_t {
494 * must be concatenated. 554 * must be concatenated.
495 * 555 *
496 * Inputs: 556 * Inputs:
497 * * 0: Lookups. A 1-D {@link OperandType::TENSOR_INT32} tensor with shape [ k ]. 557 * * 0: Lookups. A 1-D {@link OperandType::TENSOR_INT32} tensor with
498 * * 1: Keys. A 1-D {@link OperandType::TENSOR_INT32} tensor with shape [ n ]; 558 * shape [ k ].
499 * Keys and Values pair represent a map, i.e., the ith element 559 * * 1: Keys. A 1-D {@link OperandType::TENSOR_INT32} tensor with shape
500 * in Keys (Keys[i]) is the key to select the ith sub-tensor 560 * [ n ]; Keys and Values pair represent a map, i.e., the ith element
501 * in Values (Values[i]), where 0 <= i <= n-1. 561 * in Keys (Keys[i]) is the key to select the ith sub-tensor in Values
502 * Keys tensor *MUST* be sorted in ascending order. 562 * (Values[i]), where 0 <= i <= n-1. Keys tensor *MUST* be sorted in
503 * * 2: Values. A tensor with shape of [ n, … ]; i.e., the first dimension must be n. 563 * ascending order.
564 * * 2: Values. A tensor with shape of [ n, … ]; i.e., the first dimension
565 * must be n.
504 * 566 *
505 * Outputs: 567 * Outputs:
506 * * 0: Output. A tensor with shape [ k …]. 568 * * 0: Output. A tensor with shape [ k …].
507 * * 1: Hits. A boolean tensor with shape [ k ] indicates whether the lookup 569 * * 1: Hits. A boolean tensor with shape [ k ] indicates whether the lookup
508 * hits (True) or not (False). 570 * hits (True) or not (False).
509 * Stored as {@link OperandType::TENSOR_QUANT8_ASYMM} with offset 0 and scale 1.0f. 571 * Stored as {@link OperandType::TENSOR_QUANT8_ASYMM} with offset 0
572 * and scale 1.0f.
510 * A non-zero byte represents True, a hit. A zero indicates otherwise. 573 * A non-zero byte represents True, a hit. A zero indicates otherwise.
511 */ 574 */
512 HASHTABLE_LOOKUP = 10, 575 HASHTABLE_LOOKUP = 10,
@@ -520,32 +583,37 @@ enum OperationType : int32_t {
520 * input[batch, row, col, channel] / 583 * input[batch, row, col, channel] /
521 * sqrt(sum_{c} pow(input[batch, row, col, c], 2)) 584 * sqrt(sum_{c} pow(input[batch, row, col, c], 2))
522 * 585 *
523 * For input tensor with more dimensions, independently normalizes each 1-D slice along dimension dim. 586 * For input tensor with more dimensions, independently normalizes each 1-D
587 * slice along dimension dim.
524 * 588 *
525 * Supported tensor types: 589 * Supported tensor {@link OperandType}:
526 * * {@link OperandType::TENSOR_FLOAT32} 590 * * {@link OperandType::TENSOR_FLOAT32}
527 * 591 *
528 * Supported tensor rank: 4, with "NHWC" data layout (i.e., Num_samples, Height, Width, and Channels). 592 * Supported tensor rank: 4, with "NHWC" data layout (i.e., Num_samples,
593 * Height, Width, and Channels).
529 * 594 *
530 * Inputs: 595 * Inputs:
531 * * 0: A 4-D tensor, of shape [batches, height, width, depth]. 596 * * 0: A 4-D tensor, of shape [batches, height, width, depth].
532 * 597 *
533 * Outputs: 598 * Outputs:
534 * * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth]. 599 * * 0: The output 4-D tensor, of shape
600 * [batches, out_height, out_width, depth].
535 */ 601 */
536 L2_NORMALIZATION = 11, 602 L2_NORMALIZATION = 11,
537 603
538 /** 604 /**
539 * Performs an 2-D L2 pooling operation. 605 * Performs an 2-D L2 pooling operation.
540 * 606 *
541 * The output dimensions are functions of the filter dimensions, stride, and padding. 607 * The output dimensions are functions of the filter dimensions, stride, and
608 * padding.
542 * 609 *
543 * The values in the output tensor are computed as: 610 * The values in the output tensor are computed as:
544 * 611 *
545 * output[batch, row, col, channel] = 612 * output[batch, row, col, channel] =
546 * sqrt(sum_{i, j} pow(input[batch, row + i, col + j, channel], 2) / sum(1)) 613 * sqrt(sum_{i, j} pow(input[batch, row + i, col + j, channel], 2) /
614 * sum(1))
547 * 615 *
548 * Supported tensor types: 616 * Supported tensor {@link OperandType}:
549 * * {@link OperandType::TENSOR_FLOAT32} 617 * * {@link OperandType::TENSOR_FLOAT32}
550 * 618 *
551 * Supported tensor rank: 4, with "NHWC" data layout. 619 * Supported tensor rank: 4, with "NHWC" data layout.
@@ -553,62 +621,82 @@ enum OperationType : int32_t {
553 * Both explicit padding and implicit padding are supported. 621 * Both explicit padding and implicit padding are supported.
554 * 622 *
555 * Inputs (explicit padding): 623 * Inputs (explicit padding):
556 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input. 624 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
557 * * 1: An INT32 value, specifying the padding on the left, in the ‘width’ dimension. 625 * the input.
558 * * 2: An INT32 value, specifying the padding on the right,in the ‘width’ dimension. 626 * * 1: An {@link OperandType::INT32} scalar, specifying the padding on
559 * * 3: An INT32 value, specifying the padding on the top, in the ‘height’ dimension. 627 * the left, in the ‘width’ dimension.
560 * * 4: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension. 628 * * 2: An {@link OperandType::INT32} scalar, specifying the padding on
561 * * 5: An INT32 value, specifying the stride when walking through input 629 * the right, in the ‘width’ dimension.
562 * in the ‘width’ dimension. 630 * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
563 * * 6: An INT32 value, specifying the stride when walking through input 631 * the top, in the ‘height’ dimension.
564 * in the ‘height’ dimension. 632 * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
565 * * 7: An INT32 value, specifying the filter width. 633 * the bottom, in the ‘height’ dimension.
566 * * 8: An INT32 value, specifying the filter height. 634 * * 5: An {@link OperandType::INT32} scalar, specifying the stride when
567 * * 9: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. 635 * walking through input in the ‘width’ dimension.
568 * Specifies the activation to invoke on the result of each addition. 636 * * 6: An {@link OperandType::INT32} scalar, specifying the stride when
637 * walking through input in the ‘height’ dimension.
638 * * 7: An {@link OperandType::INT32} scalar, specifying the filter
639 * width.
640 * * 8: An {@link OperandType::INT32} scalar, specifying the filter
641 * height.
642 * * 9: An {@link OperandType::INT32} scalar, and has to be one of the
643 * {@link FusedActivationFunc} values. Specifies the activation to
644 * invoke on the result.
569 * 645 *
570 * Inputs (implicit padding): 646 * Inputs (implicit padding):
571 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input. 647 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
572 * * 1: An INT32 value, specifying the implicit padding scheme, has to be one of the 648 * the input.
649 * * 1: An {@link OperandType::INT32} scalar, specifying the implicit
650 * padding scheme, has to be one of the
573 * following values: {0 (NONE), 1 (SAME), 2 (VALID)}. 651 * following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
574 * * 2: An INT32 value, specifying the stride when walking through input 652 * * 2: An {@link OperandType::INT32} scalar, specifying the stride when
575 * in the ‘width’ dimension. 653 * walking through input in the ‘width’ dimension.
576 * * 3: An INT32 value, specifying the stride when walking through input 654 * * 3: An {@link OperandType::INT32} scalar, specifying the stride when
577 * in the ‘height’ dimension. 655 * walking through input in the ‘height’ dimension.
578 * * 4: An INT32 value, specifying the filter width. 656 * * 4: An {@link OperandType::INT32} scalar, specifying the filter
579 * * 5: An INT32 value, specifying the filter height. 657 * width.
580 * * 6: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. 658 * * 5: An {@link OperandType::INT32} scalar, specifying the filter
581 * Specifies the activation to invoke on the result of each addition. 659 * height.
660 * * 6: An {@link OperandType::INT32} scalar, and has to be one of the
661 * {@link FusedActivationFunc} values. Specifies the activation to
662 * invoke on the result.
582 * 663 *
583 * Outputs: 664 * Outputs:
584 * * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth]. 665 * * 0: The output 4-D tensor, of shape
666 * [batches, out_height, out_width, depth].
585 */ 667 */
586 L2_POOL_2D = 12, 668 L2_POOL_2D = 12,
587 669
588 /** 670 /**
589 * Applies Local Response Normalization along the depth dimension. 671 * Applies Local Response Normalization along the depth dimension.
590 * 672 *
591 * The 4-D input tensor is treated as a 3-D array of 1-D vectors (along the last 673 * The 4-D input tensor is treated as a 3-D array of 1-D vectors (along the
592 * dimension), and each vector is normalized independently. Within a given vector, 674 * last dimension), and each vector is normalized independently. Within a
593 * each component is divided by the weighted, squared sum of inputs within depth_radius. 675 * given vector, each component is divided by the weighted, squared sum of
676 * inputs within depth_radius.
594 * 677 *
595 * The output is calculated using this formula: 678 * The output is calculated using this formula:
596 * 679 *
597 * sqr_sum[a, b, c, d] = 680 * sqr_sum[a, b, c, d] = sum(
598 * sum(pow(input[a, b, c, d - depth_radius : d + depth_radius + 1], 2) 681 * pow(input[a, b, c, d - depth_radius : d + depth_radius + 1], 2))
599 * output = input / pow((bias + alpha * sqr_sum), beta) 682 * output = input / pow((bias + alpha * sqr_sum), beta)
600 * 683 *
601 * Supported tensor types: 684 * Supported tensor {@link OperandType}:
602 * * {@link OperandType::TENSOR_FLOAT32} 685 * * {@link OperandType::TENSOR_FLOAT32}
603 * 686 *
604 * Supported tensor rank: 4, with "NHWC" data layout. 687 * Supported tensor rank: 4, with "NHWC" data layout.
605 * 688 *
606 * Inputs: 689 * Inputs:
607 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input. 690 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
608 * * 1: An INT32 value, specifying the radius of the normalization window. 691 * the input.
609 * * 2: A FLOAT32 value, specifying the bias, must not be zero. 692 * * 1: An {@link OperandType::INT32} scalar, specifying the radius of
610 * * 3: A FLOAT32 value, specifying the scale factor, alpha. 693 * the normalization window.
611 * * 4: A FLOAT32 value, specifying the exponent, beta. 694 * * 2: An {@link OperandType::FLOAT32} scalar, specifying the bias, must
695 * not be zero.
696 * * 3: An {@link OperandType::FLOAT32} scalar, specifying the scale
697 * factor, alpha.
698 * * 4: An {@link OperandType::FLOAT32} scalar, specifying the exponent,
699 * beta.
612 * 700 *
613 * Outputs: 701 * Outputs:
614 * * 0: The output tensor of same shape as input0. 702 * * 0: The output tensor of same shape as input0.
@@ -622,7 +710,7 @@ enum OperationType : int32_t {
622 * 710 *
623 * output = 1 / (1 + exp(-input)) 711 * output = 1 / (1 + exp(-input))
624 * 712 *
625 * Supported tensor types: 713 * Supported tensor {@link OperandType}:
626 * * {@link OperandType::TENSOR_FLOAT32} 714 * * {@link OperandType::TENSOR_FLOAT32}
627 * * {@link OperandType::TENSOR_QUANT8_ASYMM} 715 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
628 * 716 *
@@ -633,7 +721,7 @@ enum OperationType : int32_t {
633 * 721 *
634 * Outputs: 722 * Outputs:
635 * * 0: The output tensor of same shape as input0. 723 * * 0: The output tensor of same shape as input0.
636 * For {@link OperandType::TENSOR_QUANT8_ASYMM} type, 724 * For {@link OperandType::TENSOR_QUANT8_ASYMM},
637 * the scale must be 1.f / 256 and the zeroPoint must be 0. 725 * the scale must be 1.f / 256 and the zeroPoint must be 0.
638 */ 726 */
639 LOGISTIC = 14, 727 LOGISTIC = 14,
@@ -649,18 +737,19 @@ enum OperationType : int32_t {
649 * 737 *
650 * * 1: Input. Dim.size >= 1, no restriction on DataType. 738 * * 1: Input. Dim.size >= 1, no restriction on DataType.
651 * * 2: Weight. Optional. Dim.size == 1, DataType: Float. 739 * * 2: Weight. Optional. Dim.size == 1, DataType: Float.
652 * If not set, each input element is considered to have the same weight of 740 * If not set, each input element is considered to have the same weight
653 * 1.0. 741 * of 1.0.
654 * Tensor[1].Dim[0] == Tensor[2].Dim[0] 742 * Tensor[1].Dim[0] == Tensor[2].Dim[0]
655 * * 3: Type: 743 * * 3: Type:
656 * Sparse: Value LSHProjectionType_SPARSE(=1). 744 * Sparse: Value LSHProjectionType_SPARSE(=1).
657 * Computed bit vector is considered to be sparse. 745 * Computed bit vector is considered to be sparse.
658 * Each output element is an int32 made up of multiple bits computed from 746 * Each output element is an int32 made up of multiple bits
659 * hash functions. 747 * computed from hash functions.
660 * 748 *
661 * Dense: Value LSHProjectionType_DENSE(=2). 749 * Dense: Value LSHProjectionType_DENSE(=2).
662 * Computed bit vector is considered to be dense. Each output element 750 * Computed bit vector is considered to be dense. Each output
663 * represents a bit and can take the value of either 0 or 1. 751 * element represents a bit and can take the value of either
752 * 0 or 1.
664 * 753 *
665 * Outputs: 754 * Outputs:
666 * * 0: If the projection type is sparse: 755 * * 0: If the projection type is sparse:
@@ -680,9 +769,12 @@ enum OperationType : int32_t {
680 * \f{eqnarray*}{ 769 * \f{eqnarray*}{
681 * i_t =& \sigma(W_{xi}x_t+W_{hi}h_{t-1}+W_{ci}C_{t-1}+b_i) & \\ 770 * i_t =& \sigma(W_{xi}x_t+W_{hi}h_{t-1}+W_{ci}C_{t-1}+b_i) & \\
682 * f_t =& \sigma(W_{xf}x_t+W_{hf}h_{t-1}+W_{cf}C_{t-1}+b_f) & \\ 771 * f_t =& \sigma(W_{xf}x_t+W_{hf}h_{t-1}+W_{cf}C_{t-1}+b_f) & \\
683 * C_t =& clip(f_t \odot C_{t-1} + i_t \odot g(W_{xc}x_t+W_{hc}h_{t-1}+b_c),\ t_{cell})& \\ 772 * C_t =& clip(f_t \odot C_{t-1} + i_t \odot
684 * o_t =& \sigma(W_{xo}x_t+W_{ho}h_{t-1}+W_{co}C_t+b_o)& \\ 773 * g(W_{xc}x_t+W_{hc}h_{t-1}+b_c),\ t_{cell}) & \\
685 * & clip(W_{proj}(o_t \odot g(C_t))+b_{proj},\ t_{proj}) & if\ there\ is\ a\ projection; \\ 774 * o_t =& \sigma(W_{xo}x_t+W_{ho}h_{t-1}+W_{co}C_t+b_o) & \\
775 * & & \\
776 * & clip(W_{proj}(o_t \odot g(C_t))+b_{proj},\ t_{proj})
777 * & if\ there\ is\ a\ projection; \\
686 * h_t =& & \\ 778 * h_t =& & \\
687 * & o_t \odot g(C_t) & otherwise. \\ 779 * & o_t \odot g(C_t) & otherwise. \\
688 * \f} 780 * \f}
@@ -694,7 +786,8 @@ enum OperationType : int32_t {
694 * * \f$o_t\f$ is the output, 786 * * \f$o_t\f$ is the output,
695 * * \f$h_t\f$ is the output state, 787 * * \f$h_t\f$ is the output state,
696 * * \f$\sigma\f$ is the logistic sigmoid function, 788 * * \f$\sigma\f$ is the logistic sigmoid function,
697 * * \f$g\f$ is the cell input and cell output activation function, usually \f$tahn\f$, 789 * * \f$g\f$ is the cell input and cell output activation function, usually
790 * \f$tahn\f$,
698 * * \f$W_{xi}\f$ is the input-to-input weight matrix, 791 * * \f$W_{xi}\f$ is the input-to-input weight matrix,
699 * * \f$W_{hi}\f$ is the recurrent to input weight matrix, 792 * * \f$W_{hi}\f$ is the recurrent to input weight matrix,
700 * * \f$W_{ci}\f$ is the cell-to-input weight matrix, 793 * * \f$W_{ci}\f$ is the cell-to-input weight matrix,
@@ -714,27 +807,32 @@ enum OperationType : int32_t {
714 * * \f$b_{proj}\f$ is the projection bias, 807 * * \f$b_{proj}\f$ is the projection bias,
715 * * \f$t_{cell}\f$ is the threshold for clipping the cell state, and 808 * * \f$t_{cell}\f$ is the threshold for clipping the cell state, and
716 * * \f$t_{proj}\f$ is the threshold for clipping the projected output. 809 * * \f$t_{proj}\f$ is the threshold for clipping the projected output.
717 * * \f$\odot\f$ is the <a href="https://en.wikipedia.org/wiki/Hadamard_product_(matrices)"> 810 * * \f$\odot\f$ is the
811 * <a href="https://en.wikipedia.org/wiki/Hadamard_product_(matrices)">
718 * Hadamard product</a> that takes two matrices and produces another 812 * Hadamard product</a> that takes two matrices and produces another
719 * matrix, each element of which is the product of the corresponding 813 * matrix, each element of which is the product of the corresponding
720 * elements of the input matrices. 814 * elements of the input matrices.
721 * 815 *
722 * The operation has the following independently optional inputs: 816 * The operation has the following independently optional inputs:
723 * * The input-to-input weights (\f$W_{xi}\f$), recurrent-to-input weights (\f$W_{hi}\f$), 817 * * The input-to-input weights (\f$W_{xi}\f$), recurrent-to-input weights
724 * cell-to-input (\f$W_{ci}\f$) weights, and input gate bias (\f$b_i\f$) either all have values, 818 * (\f$W_{hi}\f$), cell-to-input (\f$W_{ci}\f$) weights, and input gate
725 * or none of them have values (i.e., all set to null). If they have no 819 * bias (\f$b_i\f$) either all have values, or none of them have values
726 * values, coupling of input and forget gates (CIFG) is used, in which case 820 * (i.e., all set to null). If they have no values, coupling of input and
727 * the input gate (\f$i_t\f$) is calculated using the following equation instead. 821 * forget gates (CIFG) is used, in which case the input gate (\f$i_t\f$)
822 * is calculated using the following equation instead.
728 * \f{eqnarray*}{ 823 * \f{eqnarray*}{
729 * i_t = 1 - f_t 824 * i_t = 1 - f_t
730 * \f} 825 * \f}
731 * * The cell-to-input weights (\f$W_{ci}\f$), cell-to-forget weights (\f$W_{cf}\f$), and cell-to-output 826 * * The cell-to-forget weights (\f$W_{cf}\f$) and cell-to-output weights
732 * weights (\f$W_{co}\f$) either all have values or none of them have values. 827 * (\f$W_{co}\f$) either both have values or neither of them have values.
733 * If they have values, the peephole optimization is used. 828 * If they have values, the peephole optimization is used. Additionally,
734 * * The projection weights (\f$W_{proj}\f$) is required only for the recurrent projection 829 * if CIFG is not used, cell-to-input weights (\f$W_{ci}\f$) is also
735 * layer, and should otherwise have no value. 830 * required to have values for peephole optimization.
736 * * The projection bias (\f$b_{proj}\f$) may (but not required to) have a value if the 831 * * The projection weights (\f$W_{proj}\f$) is required only for the
737 * recurrent projection layer exists, and should otherwise have no value. 832 * recurrent projection layer, and should otherwise have no value.
833 * * The projection bias (\f$b_{proj}\f$) may (but not required to) have a
834 * value if the recurrent projection layer exists, and should otherwise
835 * have no value.
738 * 836 *
739 * References: 837 * References:
740 * 838 *
@@ -746,8 +844,8 @@ enum OperationType : int32_t {
746 * The peephole implementation and projection layer is based on: 844 * The peephole implementation and projection layer is based on:
747 * https://research.google.com/pubs/archive/43905.pdf 845 * https://research.google.com/pubs/archive/43905.pdf
748 * Hasim Sak, Andrew Senior, and Francoise Beaufays. "Long short-term memory 846 * Hasim Sak, Andrew Senior, and Francoise Beaufays. "Long short-term memory
749 * recurrent neural network architectures for large scale acoustic modeling." 847 * recurrent neural network architectures for large scale acoustic
750 * INTERSPEECH, 2014. 848 * modeling." INTERSPEECH, 2014.
751 * (However, the concept of peephole optimization was introduced in work 849 * (However, the concept of peephole optimization was introduced in work
752 * prior to this paper.) 850 * prior to this paper.)
753 * 851 *
@@ -755,56 +853,74 @@ enum OperationType : int32_t {
755 * http://arxiv.org/pdf/1503.04069.pdf 853 * http://arxiv.org/pdf/1503.04069.pdf
756 * Greff et al. "LSTM: A Search Space Odyssey" 854 * Greff et al. "LSTM: A Search Space Odyssey"
757 * 855 *
758 * Supported tensor types (type T): 856 * Supported tensor {@link OperandType}:
759 * * {@link OperandType::TENSOR_FLOAT32} 857 * * {@link OperandType::TENSOR_FLOAT32}
760 * 858 *
761 * Inputs: 859 * Inputs:
762 * * 0: The input (\f$x_t\f$). 860 * * 0: The input (\f$x_t\f$).
763 * A 2-D tensor of type T, of shape [batch_size, input_size], where 861 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
764 * “batch_size” corresponds to the batching dimension, and “input_size” 862 * [batch_size, input_size], where “batch_size” corresponds to the
765 * is the size of the input. 863 * batching dimension, and “input_size” is the size of the input.
766 * * 1: The input-to-input weights (\f$W_{xi}\f$). Optional. 864 * * 1: The input-to-input weights (\f$W_{xi}\f$). Optional.
767 * A 2-D tensor of type T, of shape [num_units, input_size], where 865 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
768 * “num_units” corresponds to the number of cell units. 866 * [num_units, input_size], where “num_units” corresponds to the
867 * number of cell units.
769 * * 2: The input-to-forget weights (\f$W_{xf}\f$). 868 * * 2: The input-to-forget weights (\f$W_{xf}\f$).
770 * A 2-D tensor of type T, of shape [num_units, input_size]. 869 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
870 * [num_units, input_size].
771 * * 3: The input-to-cell weights (\f$W_{xc}\f$). 871 * * 3: The input-to-cell weights (\f$W_{xc}\f$).
772 * A 2-D tensor of type T, of shape [num_units, input_size]. 872 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
873 * [num_units, input_size].
773 * * 4: The input-to-output weights (\f$W_{xo}\f$). 874 * * 4: The input-to-output weights (\f$W_{xo}\f$).
774 * A 2-D tensor of type T, of shape [num_units, input_size]. 875 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
876 * [num_units, input_size].
775 * * 5: The recurrent-to-input weights (\f$W_{hi}\f$). Optional. 877 * * 5: The recurrent-to-input weights (\f$W_{hi}\f$). Optional.
776 * A 2-D tensor of type T, of shape [num_units, output_size], where 878 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
777 * “output_size” corresponds to either the number of cell units (i.e., 879 * [num_units, output_size], where “output_size” corresponds to either
778 * “num_units”), or the second dimension of the “projection_weights”, if 880 * the number of cell units (i.e., “num_units”), or the second
779 * defined. 881 * dimension of the “projection_weights”, if defined.
780 * * 6: The recurrent-to-forget weights (\f$W_{hf}\f$). 882 * * 6: The recurrent-to-forget weights (\f$W_{hf}\f$).
781 * A 2-D tensor of type T, of shape [num_units, output_size]. 883 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
884 * [num_units, output_size].
782 * * 7: The recurrent-to-cell weights (\f$W_{hc}\f$). 885 * * 7: The recurrent-to-cell weights (\f$W_{hc}\f$).
783 * A 2-D tensor of type T, of shape [num_units, output_size]. 886 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
887 * [num_units, output_size].
784 * * 8: The recurrent-to-output weights (\f$W_{ho}\f$). 888 * * 8: The recurrent-to-output weights (\f$W_{ho}\f$).
785 * A 2-D tensor of type T, of shape [num_units, output_size]. 889 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
890 * [num_units, output_size].
786 * * 9: The cell-to-input weights (\f$W_{ci}\f$). Optional. 891 * * 9: The cell-to-input weights (\f$W_{ci}\f$). Optional.
787 * A 1-D tensor of type T, of shape [num_units]. 892 * A 1-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
893 * [num_units].
788 * * 10:The cell-to-forget weights (\f$W_{cf}\f$). Optional. 894 * * 10:The cell-to-forget weights (\f$W_{cf}\f$). Optional.
789 * A 1-D tensor of type T, of shape [num_units]. 895 * A 1-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
896 * [num_units].
790 * * 11:The cell-to-output weights (\f$W_{co}\f$). Optional. 897 * * 11:The cell-to-output weights (\f$W_{co}\f$). Optional.
791 * A 1-D tensor of type T, of shape [num_units]. 898 * A 1-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
899 * [num_units].
792 * * 12:The input gate bias (\f$b_i\f$). Optional. 900 * * 12:The input gate bias (\f$b_i\f$). Optional.
793 * A 1-D tensor of type T, of shape [num_units]. 901 * A 1-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
902 * [num_units].
794 * * 13:The forget gate bias (\f$b_f\f$). 903 * * 13:The forget gate bias (\f$b_f\f$).
795 * A 1-D tensor of type T, of shape [num_units]. 904 * A 1-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
905 * [num_units].
796 * * 14:The cell bias (\f$b_c\f$). 906 * * 14:The cell bias (\f$b_c\f$).
797 * A 1-D tensor of type T, of shape [num_units]. 907 * A 1-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
908 * [num_units].
798 * * 15:The output gate bias (\f$b_o\f$). 909 * * 15:The output gate bias (\f$b_o\f$).
799 * A 1-D tensor of type T, of shape [num_units]. 910 * A 1-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
911 * [num_units].
800 * * 16:The projection weights (\f$W_{proj}\f$). Optional. 912 * * 16:The projection weights (\f$W_{proj}\f$). Optional.
801 * A 2-D tensor of type T, of shape [output_size, num_units]. 913 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
914 * [output_size, num_units].
802 * * 17:The projection bias (\f$b_{proj}\f$). Optional. 915 * * 17:The projection bias (\f$b_{proj}\f$). Optional.
803 * A 1-D tensor of type T, of shape [output_size]. 916 * A 1-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
917 * [output_size].
804 * * 18:The output state (in) (\f$h_{t-1}\f$). 918 * * 18:The output state (in) (\f$h_{t-1}\f$).
805 * A 2-D tensor of type T, of shape [batch_size, output_size]. 919 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
920 * [batch_size, output_size].
806 * * 19:The cell state (in) (\f$C_{t-1}\f$). 921 * * 19:The cell state (in) (\f$C_{t-1}\f$).
807 * A 2-D tensor of type T, of shape [batch_size, num_units]. 922 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
923 * [batch_size, num_units].
808 * * 20:The activation function (\f$g\f$). 924 * * 20:The activation function (\f$g\f$).
809 * A value indicating the activation function: 925 * A value indicating the activation function:
810 * <ul> 926 * <ul>
@@ -814,38 +930,43 @@ enum OperationType : int32_t {
814 * <li>4: Tanh; 930 * <li>4: Tanh;
815 * <li>6: Sigmoid. 931 * <li>6: Sigmoid.
816 * </ul> 932 * </ul>
817 * * 21:The clipping threshold (\f$t_{cell}\f$) for the cell state, such that values are bound 933 * * 21:The clipping threshold (\f$t_{cell}\f$) for the cell state, such
818 * within [-cell_clip, cell_clip]. If set to 0.0 then clipping is 934 * that values are bound within [-cell_clip, cell_clip]. If set to 0.0
819 * disabled.
820 * * 22:The clipping threshold (\f$t_{proj}\f$) for the output from the projection layer, such
821 * that values are bound within [-proj_clip, proj_clip]. If set to 0.0
822 * then clipping is disabled. 935 * then clipping is disabled.
936 * * 22:The clipping threshold (\f$t_{proj}\f$) for the output from the
937 * projection layer, such that values are bound within
938 * [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
823 * 939 *
824 * Outputs: 940 * Outputs:
825 * * 0: The scratch buffer. 941 * * 0: The scratch buffer.
826 * A 2-D tensor of type T, of shape [batch_size, num_units * 4] with 942 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
827 * CIFG, or [batch_size, num_units * 3] without CIFG. 943 * [batch_size, num_units * 4] with CIFG, or
944 * [batch_size, num_units * 3] without CIFG.
828 * * 1: The output state (out) (\f$h_t\f$). 945 * * 1: The output state (out) (\f$h_t\f$).
829 * A 2-D tensor of type T, of shape [batch_size, output_size]. 946 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
947 * [batch_size, output_size].
830 * * 2: The cell state (out) (\f$C_t\f$). 948 * * 2: The cell state (out) (\f$C_t\f$).
831 * A 2-D tensor of type T, of shape [batch_size, num_units]. 949 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
950 * [batch_size, num_units].
832 * * 3: The output (\f$o_t\f$). 951 * * 3: The output (\f$o_t\f$).
833 * A 2-D tensor of type T, of shape [batch_size, output_size]. This is 952 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
834 * effectively the same as the current “output state (out)” value. 953 * [batch_size, output_size]. This is effectively the same as the
954 * current “output state (out)” value.
835 */ 955 */
836 LSTM = 16, 956 LSTM = 16,
837 957
838 /** 958 /**
839 * Performs an 2-D max pooling operation. 959 * Performs an 2-D max pooling operation.
840 * 960 *
841 * The output dimensions are functions of the filter dimensions, stride, and padding. 961 * The output dimensions are functions of the filter dimensions, stride, and
962 * padding.
842 * 963 *
843 * The values in the output tensor are computed as: 964 * The values in the output tensor are computed as:
844 * 965 *
845 * output[batch, row, col, channel] = 966 * output[batch, row, col, channel] =
846 * max_{i, j} (input[batch, row + i, col + j, channel]) 967 * max_{i, j} (input[batch, row + i, col + j, channel])
847 * 968 *
848 * Supported tensor types: 969 * Supported tensor {@link OperandType}:
849 * * {@link OperandType::TENSOR_FLOAT32} 970 * * {@link OperandType::TENSOR_FLOAT32}
850 * * {@link OperandType::TENSOR_QUANT8_ASYMM} 971 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
851 * 972 *
@@ -854,52 +975,68 @@ enum OperationType : int32_t {
854 * Both explicit padding and implicit padding are supported. 975 * Both explicit padding and implicit padding are supported.
855 * 976 *
856 * Inputs (explicit padding): 977 * Inputs (explicit padding):
857 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input. 978 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
858 * * 1: An INT32 value, specifying the padding on the left, in the ‘width’ dimension. 979 * the input.
859 * * 2: An INT32 value, specifying the padding on the right,in the ‘width’ dimension. 980 * * 1: An {@link OperandType::INT32} scalar, specifying the padding on
860 * * 3: An INT32 value, specifying the padding on the top, in the ‘height’ dimension. 981 * the left, in the ‘width’ dimension.
861 * * 4: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension. 982 * * 2: An {@link OperandType::INT32} scalar, specifying the padding on
862 * * 5: An INT32 value, specifying the stride when walking through input 983 * the right, in the ‘width’ dimension.
863 * in the ‘width’ dimension. 984 * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
864 * * 6: An INT32 value, specifying the stride when walking through input 985 * the top, in the ‘height’ dimension.
865 * in the ‘height’ dimension. 986 * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
866 * * 7: An INT32 value, specifying the filter width. 987 * the bottom, in the ‘height’ dimension.
867 * * 8: An INT32 value, specifying the filter height. 988 * * 5: An {@link OperandType::INT32} scalar, specifying the stride when
868 * * 9: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. 989 * walking through input in the ‘width’ dimension.
869 * Specifies the activation to invoke on the result of each addition. 990 * * 6: An {@link OperandType::INT32} scalar, specifying the stride when
991 * walking through input in the ‘height’ dimension.
992 * * 7: An {@link OperandType::INT32} scalar, specifying the filter
993 * width.
994 * * 8: An {@link OperandType::INT32} scalar, specifying the filter
995 * height.
996 * * 9: An {@link OperandType::INT32} scalar, and has to be one of the
997 * {@link FusedActivationFunc} values. Specifies the activation to
998 * invoke on the result.
870 * 999 *
871 * Inputs (implicit padding): 1000 * Inputs (implicit padding):
872 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input. 1001 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
873 * * 1: An INT32 value, specifying the implicit padding scheme, has to be one of the 1002 * the input.
1003 * * 1: An {@link OperandType::INT32} scalar, specifying the implicit
1004 * padding scheme, has to be one of the
874 * following values: {0 (NONE), 1 (SAME), 2 (VALID)}. 1005 * following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
875 * * 2: An INT32 value, specifying the stride when walking through input 1006 * * 2: An {@link OperandType::INT32} scalar, specifying the stride when
876 * in the ‘width’ dimension. 1007 * walking through input in the ‘width’ dimension.
877 * * 3: An INT32 value, specifying the stride when walking through input 1008 * * 3: An {@link OperandType::INT32} scalar, specifying the stride when
878 * in the ‘height’ dimension. 1009 * walking through input in the ‘height’ dimension.
879 * * 4: An INT32 value, specifying the filter width. 1010 * * 4: An {@link OperandType::INT32} scalar, specifying the filter
880 * * 5: An INT32 value, specifying the filter height. 1011 * width.
881 * * 6: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. 1012 * * 5: An {@link OperandType::INT32} scalar, specifying the filter
882 * Specifies the activation to invoke on the result of each addition. 1013 * height.
1014 * * 6: An {@link OperandType::INT32} scalar, and has to be one of the
1015 * {@link FusedActivationFunc} values. Specifies the activation to
1016 * invoke on the result.
883 * 1017 *
884 * Outputs: 1018 * Outputs:
885 * * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth]. 1019 * * 0: The output 4-D tensor, of shape
1020 * [batches, out_height, out_width, depth].
886 */ 1021 */
887 MAX_POOL_2D = 17, 1022 MAX_POOL_2D = 17,
888 1023
889 /** 1024 /**
890 * Multiplies two tensors, element-wise. 1025 * Multiplies two tensors, element-wise.
891 * 1026 *
892 * Takes two input tensors of identical type and compatible dimensions. The output 1027 * Takes two input tensors of identical {@link OperandType} and compatible
893 * is the product of both input tensors, optionally modified by an activation function. 1028 * dimensions. The output is the product of both input tensors, optionally
1029 * modified by an activation function.
894 * 1030 *
895 * Two dimensions are compatible when: 1031 * Two dimensions are compatible when:
896 * 1. they are equal, or 1032 * 1. they are equal, or
897 * 2. one of them is 1 1033 * 2. one of them is 1
898 * 1034 *
899 * The size of the resulting output is the maximum size along each dimension of the 1035 * The size of the resulting output is the maximum size along each dimension
900 * input operands. It starts with the trailing dimensions, and works its way forward. 1036 * of the input operands. It starts with the trailing dimensions, and works
1037 * its way forward.
901 * 1038 *
902 * Supported tensor types: 1039 * Supported tensor {@link OperandType}:
903 * * {@link OperandType::TENSOR_FLOAT32} 1040 * * {@link OperandType::TENSOR_FLOAT32}
904 * * {@link OperandType::TENSOR_QUANT8_ASYMM} 1041 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
905 * 1042 *
@@ -907,14 +1044,17 @@ enum OperationType : int32_t {
907 * 1044 *
908 * Inputs: 1045 * Inputs:
909 * * 0: A tensor. 1046 * * 0: A tensor.
910 * * 1: A tensor of the same type, and compatible dimensions as input0. 1047 * * 1: A tensor of the same {@link OperandType}, and compatible dimensions
911 * * 2: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. 1048 * as input0.
912 * Specifies the activation to invoke on the result of each addition. 1049 * * 2: An {@link OperandType::INT32} scalar, and has to be one of the
1050 * {@link FusedActivationFunc} values. Specifies the activation to
1051 * invoke on the result.
913 * 1052 *
914 * Outputs: 1053 * Outputs:
915 * * 0: The product, a tensor of the same type as input0. 1054 * * 0: The product, a tensor of the same {@link OperandType} as input0.
916 * For output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the following 1055 * For output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
917 * condition must be satisfied: output_scale > input1_scale * input2_scale. 1056 * the following condition must be satisfied:
1057 * output_scale > input1_scale * input2_scale.
918 */ 1058 */
919 MUL = 18, 1059 MUL = 18,
920 1060
@@ -925,7 +1065,7 @@ enum OperationType : int32_t {
925 * 1065 *
926 * output = max(0, input) 1066 * output = max(0, input)
927 * 1067 *
928 * Supported tensor types: 1068 * Supported tensor {@link OperandType}:
929 * * {@link OperandType::TENSOR_FLOAT32} 1069 * * {@link OperandType::TENSOR_FLOAT32}
930 * * {@link OperandType::TENSOR_QUANT8_ASYMM} 1070 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
931 * 1071 *
@@ -946,7 +1086,7 @@ enum OperationType : int32_t {
946 * 1086 *
947 * output = min(1.f, max(-1.f, input)) 1087 * output = min(1.f, max(-1.f, input))
948 * 1088 *
949 * Supported tensor types: 1089 * Supported tensor {@link OperandType}:
950 * * {@link OperandType::TENSOR_FLOAT32} 1090 * * {@link OperandType::TENSOR_FLOAT32}
951 * * {@link OperandType::TENSOR_QUANT8_ASYMM} 1091 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
952 * 1092 *
@@ -967,7 +1107,7 @@ enum OperationType : int32_t {
967 * 1107 *
968 * output = min(6, max(0, input)) 1108 * output = min(6, max(0, input))
969 * 1109 *
970 * Supported tensor types: 1110 * Supported tensor {@link OperandType}:
971 * * {@link OperandType::TENSOR_FLOAT32} 1111 * * {@link OperandType::TENSOR_FLOAT32}
972 * * {@link OperandType::TENSOR_QUANT8_ASYMM} 1112 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
973 * 1113 *
@@ -984,10 +1124,10 @@ enum OperationType : int32_t {
984 /** 1124 /**
985 * Reshapes a tensor. 1125 * Reshapes a tensor.
986 * 1126 *
987 * Given tensor, this operation returns a tensor that has the same values as tensor, 1127 * Given tensor, this operation returns a tensor that has the same values as
988 * but with a newly specified shape. 1128 * tensor, but with a newly specified shape.
989 * 1129 *
990 * Supported tensor types: 1130 * Supported tensor {@link OperandType}:
991 * * {@link OperandType::TENSOR_FLOAT32} 1131 * * {@link OperandType::TENSOR_FLOAT32}
992 * * {@link OperandType::TENSOR_QUANT8_ASYMM} 1132 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
993 * 1133 *
@@ -995,9 +1135,9 @@ enum OperationType : int32_t {
995 * 1135 *
996 * Inputs: 1136 * Inputs:
997 * * 0: A tensor, specifying the tensor to be reshaped. 1137 * * 0: A tensor, specifying the tensor to be reshaped.
998 * * 1: A 1-D tensor of type {@link OperandType::TENSOR_INT32}, defining the shape 1138 * * 1: A 1-D tensor of {@link OperandType::TENSOR_INT32}, defining the
999 * of the output tensor. The number of elements implied by shape must be the same 1139 * shape of the output tensor. The number of elements implied by shape
1000 * as the number of elements in the input tensor. 1140 * must be the same as the number of elements in the input tensor.
1001 * 1141 *
1002 * Outputs: 1142 * Outputs:
1003 * * 0: The output tensor, of shape specified by the input shape. 1143 * * 0: The output tensor, of shape specified by the input shape.
@@ -1007,21 +1147,26 @@ enum OperationType : int32_t {
1007 /** 1147 /**
1008 * Resizes images to given size using the bilinear interpretation. 1148 * Resizes images to given size using the bilinear interpretation.
1009 * 1149 *
1010 * Resized images must be distorted if their output aspect ratio is not the same as 1150 * Resized images must be distorted if their output aspect ratio is not the
1011 * input aspect ratio. 1151 * same as input aspect ratio. The corner pixels of output may not be the
1152 * same as corner pixels of input.
1012 * 1153 *
1013 * Supported tensor types: 1154 * Supported tensor {@link OperandType}:
1014 * * {@link OperandType::TENSOR_FLOAT32} 1155 * * {@link OperandType::TENSOR_FLOAT32}
1015 * 1156 *
1016 * Supported tensor rank: 4, with "NHWC" data layout. 1157 * Supported tensor rank: 4, with "NHWC" data layout.
1017 * 1158 *
1018 * Inputs: 1159 * Inputs:
1019 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input. 1160 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
1020 * * 1: An INT32 value, specifying the output height of the output tensor. 1161 * the input.
1021 * * 2: An INT32 value, specifying the output width of the output tensor. 1162 * * 1: An {@link OperandType::INT32} scalar, specifying the output
1163 * height of the output tensor.
1164 * * 2: An {@link OperandType::INT32} scalar, specifying the output
1165 * width of the output tensor.
1022 * 1166 *
1023 * Outputs: 1167 * Outputs:
1024 * * 0: The output 4-D tensor, of shape [batches, new_height, new_width, depth]. 1168 * * 0: The output 4-D tensor, of shape
1169 * [batches, new_height, new_width, depth].
1025 */ 1170 */
1026 RESIZE_BILINEAR = 23, 1171 RESIZE_BILINEAR = 23,
1027 1172
@@ -1029,7 +1174,8 @@ enum OperationType : int32_t {
1029 * A basic recurrent neural network layer. 1174 * A basic recurrent neural network layer.
1030 * 1175 *
1031 * This layer implements the operation: 1176 * This layer implements the operation:
1032 * outputs = state = activation(inputs * input_weights + state * recurrent_weights + bias) 1177 * outputs = state = activation(inputs * input_weights +
1178 * state * recurrent_weights + bias)
1033 * 1179 *
1034 * Where: 1180 * Where:
1035 * * “input_weights” is a weight matrix that multiplies the inputs; 1181 * * “input_weights” is a weight matrix that multiplies the inputs;
@@ -1040,42 +1186,49 @@ enum OperationType : int32_t {
1040 * * “activation” is the function passed as the “fused_activation_function” 1186 * * “activation” is the function passed as the “fused_activation_function”
1041 * argument (if not “NONE”). 1187 * argument (if not “NONE”).
1042 * 1188 *
1043 * Supported tensor types (Type T): 1189 * Supported tensor {@link OperandType}:
1044 * * {@link OperandType::TENSOR_FLOAT32} 1190 * * {@link OperandType::TENSOR_FLOAT32}
1045 * 1191 *
1046 * Inputs: 1192 * Inputs:
1047 * * 0: input. 1193 * * 0: input.
1048 * A 2-D tensor of type T, of shape [batch_size, input_size], where 1194 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32} of shape
1049 * “batch_size” corresponds to the batching dimension, and “input_size” is 1195 * [batch_size, input_size], where “batch_size” corresponds to the
1050 * the size of the input. 1196 * batching dimension, and “input_size is the size of the input.
1051 * * 1: weights. 1197 * * 1: weights.
1052 * A 2-D tensor of type T, of shape [num_units, input_size], where 1198 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
1053 * “num_units” corresponds to the number of units. 1199 * [num_units, input_size], where “num_units” corresponds to the
1200 * number of units.
1054 * * 2: recurrent_weights. 1201 * * 2: recurrent_weights.
1055 * A 2-D tensor of type T, of shape [num_units, num_units], with columns 1202 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
1056 * corresponding to the weights from each unit. 1203 * [num_units, num_units], with columns corresponding to the weights
1204 * from each unit.
1057 * * 3: bias. 1205 * * 3: bias.
1058 * A 1-D tensor of type T, of shape [num_units]. 1206 * A 1-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
1207 * [num_units].
1059 * * 4: hidden state (in). 1208 * * 4: hidden state (in).
1060 * A 2-D tensor of type T, of shape [batch_size, num_units]. 1209 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
1210 * [batch_size, num_units].
1061 * * 5: fused_activation_function. 1211 * * 5: fused_activation_function.
1062 * An optional {@link FusedActivationFunc} value indicating the activation 1212 * An optional {@link FusedActivationFunc} value indicating the
1063 * function. If “NONE” is specified then it results in a linear 1213 * activation function. If “NONE” is specified then it results in a
1064 * activation. 1214 * linear activation.
1065 * 1215 *
1066 * Outputs: 1216 * Outputs:
1067 * * 0: hidden state (out). 1217 * * 0: hidden state (out).
1068 * A 2-D tensor of type T, of shape [batch_size, num_units]. 1218 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
1219 * [batch_size, num_units].
1069 * 1220 *
1070 * * 1: output. 1221 * * 1: output.
1071 * A 2-D tensor of type T, of shape [batch_size, num_units]. This is 1222 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
1072 * effectively the same as the current state value. 1223 * [batch_size, num_units]. This is effectively the same as the
1224 * current state value.
1073 */ 1225 */
1074 RNN = 24, 1226 RNN = 24,
1075 1227
1076 /** 1228 /**
1077 * Computes the softmax activation on the input tensor element-wise, per batch, by 1229 * Computes the softmax activation on the input tensor element-wise, per
1078 * normalizing the input vector so the maximum coefficient is zero. 1230 * batch, by normalizing the input vector so the maximum coefficient is
1231 * zero.
1079 * 1232 *
1080 * The output is calculated using this formula: 1233 * The output is calculated using this formula:
1081 * 1234 *
@@ -1083,7 +1236,7 @@ enum OperationType : int32_t {
1083 * exp((input[batch, i] - max(input[batch, :])) * beta) / 1236 * exp((input[batch, i] - max(input[batch, :])) * beta) /
1084 * sum_{k}{exp((input[batch, k] - max(input[batch, :])) * beta)} 1237 * sum_{k}{exp((input[batch, k] - max(input[batch, :])) * beta)}
1085 * 1238 *
1086 * Supported tensor types: 1239 * Supported tensor {@link OperandType}:
1087 * * {@link OperandType::TENSOR_FLOAT32} 1240 * * {@link OperandType::TENSOR_FLOAT32}
1088 * * {@link OperandType::TENSOR_QUANT8_ASYMM} 1241 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
1089 * 1242 *
@@ -1091,11 +1244,12 @@ enum OperationType : int32_t {
1091 * 1244 *
1092 * Inputs: 1245 * Inputs:
1093 * * 0: A 2-D or 4-D tensor, specifying the tensor to be reshaped. 1246 * * 0: A 2-D or 4-D tensor, specifying the tensor to be reshaped.
1094 * * 1: A FLOAT32 value, specifying the positive scaling factor for the exponent, beta. 1247 * * 1: An {@link OperandType::FLOAT32} scalar, specifying the positive
1248 * scaling factor for the exponent, beta.
1095 * 1249 *
1096 * Outputs: 1250 * Outputs:
1097 * * 0: The output tensor of same shape as input0. 1251 * * 0: The output tensor of same shape as input0.
1098 * For {@link OperandType::TENSOR_QUANT8_ASYMM} type, 1252 * For {@link OperandType::TENSOR_QUANT8_ASYMM},
1099 * the scale must be 1.f / 256 and the zeroPoint must be 0. 1253 * the scale must be 1.f / 256 and the zeroPoint must be 0.
1100 */ 1254 */
1101 SOFTMAX = 25, 1255 SOFTMAX = 25,
@@ -1103,30 +1257,33 @@ enum OperationType : int32_t {
1103 /** 1257 /**
1104 * Rearranges blocks of spatial data, into depth. 1258 * Rearranges blocks of spatial data, into depth.
1105 * 1259 *
1106 * More specifically, this op outputs a copy of the input tensor where values from 1260 * More specifically, this op outputs a copy of the input tensor where
1107 * the height and width dimensions are moved to the depth dimension. 1261 * values from the height and width dimensions are moved to the depth
1108 * The value block_size indicates the input block size and how the data is moved. 1262 * dimension. The value block_size indicates the input block size and how
1263 * the data is moved.
1109 * 1264 *
1110 * Chunks of data of size block_size * block_size from depth are rearranged into 1265 * Chunks of data of size block_size * block_size from depth are rearranged
1111 * non-overlapping blocks of size block_size x block_size. 1266 * into non-overlapping blocks of size block_size x block_size.
1112 * 1267 *
1113 * The depth of the output tensor is input_depth * block_size * block_size. 1268 * The depth of the output tensor is input_depth * block_size * block_size.
1114 * The input tensor's height and width must be divisible by block_size. 1269 * The input tensor's height and width must be divisible by block_size.
1115 * 1270 *
1116 * Supported tensor types: 1271 * Supported tensor {@link OperandType}:
1117 * * {@link OperandType::TENSOR_FLOAT32} 1272 * * {@link OperandType::TENSOR_FLOAT32}
1118 * * {@link OperandType::TENSOR_QUANT8_ASYMM} 1273 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
1119 * 1274 *
1120 * Supported tensor rank: 4, with "NHWC" data layout. 1275 * Supported tensor rank: 4, with "NHWC" data layout.
1121 * 1276 *
1122 * Inputs: 1277 * Inputs:
1123 * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input. 1278 * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
1124 * * 1: An INT32 value, specifying the block_size. block_size must be >=1 and 1279 * specifying the input.
1125 * block_size must be a divisor of both the input height and width. 1280 * * 1: An {@link OperandType::INT32} scalar, specifying the block_size.
1281 * block_size must be >=1 and block_size must be a divisor of both the
1282 * input height and width.
1126 * 1283 *
1127 * Outputs: 1284 * Outputs:
1128 * * 0: The output 4-D tensor, of shape [batch, height/block_size, width/block_size, 1285 * * 0: The output 4-D tensor, of shape [batch, height/block_size,
1129 * depth*block_size*block_size]. 1286 * width/block_size, depth*block_size*block_size].
1130 */ 1287 */
1131 SPACE_TO_DEPTH = 26, 1288 SPACE_TO_DEPTH = 26,
1132 1289
@@ -1143,21 +1300,22 @@ enum OperationType : int32_t {
1143 * INTERSPEECH, 2015. 1300 * INTERSPEECH, 2015.
1144 * 1301 *
1145 * It processes the incoming input using a 2-stage filtering mechanism: 1302 * It processes the incoming input using a 2-stage filtering mechanism:
1146 * * stage 1 performs filtering on the "features" dimension, whose outputs get 1303 * * stage 1 performs filtering on the "features" dimension, whose outputs
1147 * pushed into a memory of fixed-size memory_size. 1304 * get pushed into a memory of fixed-size memory_size.
1148 * * stage 2 performs filtering on the "time" dimension of the memory_size 1305 * * stage 2 performs filtering on the "time" dimension of the memory_size
1149 * memoized outputs of stage 1. 1306 * memoized outputs of stage 1.
1150 * 1307 *
1151 * Specifically, for rank 1, this layer implements the operation: 1308 * Specifically, for rank 1, this layer implements the operation:
1152 * 1309 *
1153 * memory = push(conv1d(inputs, weights_feature, feature_dim, "PADDING_VALID")); 1310 * memory = push(conv1d(inputs, weights_feature, feature_dim,
1311 * "PADDING_VALID"));
1154 * outputs = activation(memory * weights_time + bias); 1312 * outputs = activation(memory * weights_time + bias);
1155 * 1313 *
1156 * Where: 1314 * Where:
1157 * * “weights_feature” is a weights matrix that processes the inputs (by 1315 * * “weights_feature” is a weights matrix that processes the inputs (by
1158 * convolving the input with every “feature filter”), and whose outputs get 1316 * convolving the input with every “feature filter”), and whose outputs
1159 * pushed, stacked in order, into the fixed-size “memory” (the oldest entry 1317 * get pushed, stacked in order, into the fixed-size “memory” (the oldest
1160 * gets dropped); 1318 * entry gets dropped);
1161 * * “weights_time” is a weights matrix that processes the “memory” (by a 1319 * * “weights_time” is a weights matrix that processes the “memory” (by a
1162 * batched matrix multiplication on the num_units); 1320 * batched matrix multiplication on the num_units);
1163 * * “bias” is an optional bias vector (added to each output vector in the 1321 * * “bias” is an optional bias vector (added to each output vector in the
@@ -1168,35 +1326,42 @@ enum OperationType : int32_t {
1168 * Each rank adds a dimension to the weights matrices by means of stacking 1326 * Each rank adds a dimension to the weights matrices by means of stacking
1169 * the filters. 1327 * the filters.
1170 * 1328 *
1171 * Supported tensor types (type T): 1329 * Supported tensor {@link OperandType}:
1172 * * {@link OperandType::TENSOR_FLOAT32} 1330 * * {@link OperandType::TENSOR_FLOAT32}
1173 * 1331 *
1174 * Inputs: 1332 * Inputs:
1175 * * 0: input. 1333 * * 0: input.
1176 * A 2-D tensor of type T, of shape [batch_size, input_size], where 1334 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
1177 * “batch_size” corresponds to the batching dimension, and “input_size” is 1335 * [batch_size, input_size], where “batch_size” corresponds to the
1178 * the size of the input. 1336 * batching dimension, and “input_size is the size of the input.
1179 * * 1: weights_feature. 1337 * * 1: weights_feature.
1180 * A 2-D tensor of type T, of shape [num_units, input_size], where 1338 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
1181 * “num_units” corresponds to the number of units. 1339 * [num_units, input_size], where “num_units” corresponds to the
1340 * number of units.
1182 * * 2: weights_time. 1341 * * 2: weights_time.
1183 * A 2-D tensor of type T, of shape [num_units, memory_size], where 1342 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
1184 * “memory_size” corresponds to the fixed-size of the memory. 1343 * [num_units, memory_size], where “memory_size” corresponds to the
1344 * fixed-size of the memory.
1185 * * 3: bias. 1345 * * 3: bias.
1186 * An optional 1-D tensor of type T, of shape [num_units]. 1346 * An optional 1-D tensor of {@link OperandType::TENSOR_FLOAT32},
1347 * of shape [num_units].
1187 * * 4: state (in). 1348 * * 4: state (in).
1188 * A 2-D tensor of type T, of shape [batch_size, (memory_size - 1) * num_units * rank]. 1349 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
1350 * [batch_size, (memory_size - 1) * num_units * rank].
1189 * * 5: rank. 1351 * * 5: rank.
1190 * The rank of the SVD approximation. 1352 * The rank of the SVD approximation.
1191 * * 6: fused_activation_function. 1353 * * 6: fused_activation_function.
1192 * An optional {@link FusedActivationFunc} value indicating the activation function. 1354 * An optional {@link FusedActivationFunc} value indicating the
1193 * If “NONE” is specified then it results in a linear activation. 1355 * activation function. If “NONE” is specified then it results in a
1356 * linear activation.
1194 * 1357 *
1195 * Outputs: 1358 * Outputs:
1196 * * 0: state (out). 1359 * * 0: state (out).
1197 * A 2-D tensor of type T, of shape [batch_size, (memory_size - 1) * num_units * rank]. 1360 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
1361 * [batch_size, (memory_size - 1) * num_units * rank].
1198 * * 1: output. 1362 * * 1: output.
1199 * A 2-D tensor of type T, of shape [batch_size, num_units]. 1363 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
1364 * [batch_size, num_units].
1200 */ 1365 */
1201 SVDF = 27, 1366 SVDF = 27,
1202 1367
@@ -1207,7 +1372,7 @@ enum OperationType : int32_t {
1207 * 1372 *
1208 * output = tanh(input) 1373 * output = tanh(input)
1209 * 1374 *
1210 * Supported tensor types: 1375 * Supported tensor {@link OperandType}:
1211 * * {@link OperandType::TENSOR_FLOAT32} 1376 * * {@link OperandType::TENSOR_FLOAT32}
1212 * 1377 *
1213 * Supported tensor rank: up to 4. 1378 * Supported tensor rank: up to 4.
@@ -1223,7 +1388,8 @@ enum OperationType : int32_t {
1223 /** 1388 /**
1224 * OEM specific operation. 1389 * OEM specific operation.
1225 * 1390 *
1226 * This operation is OEM specific. It should only be used for OEM applications. 1391 * This operation is OEM specific. It should only be used for OEM
1392 * applications.
1227 */ 1393 */
1228 OEM_OPERATION = 10000, 1394 OEM_OPERATION = 10000,
1229}; 1395};
@@ -1270,8 +1436,8 @@ enum OperandLifeTime : int32_t {
1270 CONSTANT_REFERENCE, 1436 CONSTANT_REFERENCE,
1271 1437
1272 /** 1438 /**
1273 * The operand does not have a value. This is valid only for optional arguments 1439 * The operand does not have a value. This is valid only for optional
1274 * of operations. 1440 * arguments of operations.
1275 */ 1441 */
1276 NO_VALUE, 1442 NO_VALUE,
1277}; 1443};
@@ -1387,7 +1553,8 @@ struct Operand {
1387 1553
1388 /** 1554 /**
1389 * Where to find the data for this operand. 1555 * Where to find the data for this operand.
1390 * If the lifetime is TEMPORARY_VARIABLE, MODEL_INPUT, MODEL_OUTPUT, or NO_VALUE: 1556 * If the lifetime is TEMPORARY_VARIABLE, MODEL_INPUT, MODEL_OUTPUT, or
1557 * NO_VALUE:
1391 * - All the fields must be 0. 1558 * - All the fields must be 0.
1392 * If the lifetime is CONSTANT_COPY: 1559 * If the lifetime is CONSTANT_COPY:
1393 * - location.poolIndex is 0. 1560 * - location.poolIndex is 0.
@@ -1481,9 +1648,9 @@ struct Model {
1481 */ 1648 */
1482struct RequestArgument { 1649struct RequestArgument {
1483 /** 1650 /**
1484 * If true, the argument does not have a value. This can be used for operations 1651 * If true, the argument does not have a value. This can be used for
1485 * that take optional arguments. If true, the fields of location are set to 0 and 1652 * operations that take optional arguments. If true, the fields of location
1486 * the dimensions vector is left empty. 1653 * are set to 0 and the dimensions vector is left empty.
1487 */ 1654 */
1488 bool hasNoValue; 1655 bool hasNoValue;
1489 1656
@@ -1495,10 +1662,10 @@ struct RequestArgument {
1495 /** 1662 /**
1496 * Updated dimension information. 1663 * Updated dimension information.
1497 * 1664 *
1498 * If dimensions.size() > 0, dimension information was provided along with the 1665 * If dimensions.size() > 0, dimension information was provided along with
1499 * argument. This can be the case for models that accept inputs of varying size. 1666 * the argument. This can be the case for models that accept inputs of
1500 * This can't change the rank, just the value of the dimensions that were 1667 * varying size. This can't change the rank, just the value of the
1501 * unspecified in the model. 1668 * dimensions that were unspecified in the model.
1502 */ 1669 */
1503 vec<uint32_t> dimensions; 1670 vec<uint32_t> dimensions;
1504}; 1671};
diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
index ed1fb944..0682ab95 100644
--- a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
@@ -36,16 +36,16 @@ namespace neuralnetworks {
36namespace generated_tests { 36namespace generated_tests {
37using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback; 37using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
38using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback; 38using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
39using ::generated_tests::filter; 39using ::test_helper::filter;
40using ::generated_tests::for_all; 40using ::test_helper::for_all;
41using ::generated_tests::for_each; 41using ::test_helper::for_each;
42using ::generated_tests::resize_accordingly; 42using ::test_helper::resize_accordingly;
43using ::generated_tests::MixedTyped; 43using ::test_helper::MixedTyped;
44using ::generated_tests::MixedTypedExampleType; 44using ::test_helper::MixedTypedExampleType;
45using ::generated_tests::Float32Operands; 45using ::test_helper::Float32Operands;
46using ::generated_tests::Int32Operands; 46using ::test_helper::Int32Operands;
47using ::generated_tests::Quant8Operands; 47using ::test_helper::Quant8Operands;
48using ::generated_tests::compare; 48using ::test_helper::compare;
49 49
50template <typename T> 50template <typename T>
51void copy_back_(MixedTyped* dst, const std::vector<RequestArgument>& ra, char* src) { 51void copy_back_(MixedTyped* dst, const std::vector<RequestArgument>& ra, char* src) {
diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTests.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTests.cpp
index 2107333e..d84479c9 100644
--- a/neuralnetworks/1.0/vts/functional/GeneratedTests.cpp
+++ b/neuralnetworks/1.0/vts/functional/GeneratedTests.cpp
@@ -31,7 +31,7 @@ namespace hardware {
31namespace neuralnetworks { 31namespace neuralnetworks {
32 32
33namespace generated_tests { 33namespace generated_tests {
34using ::generated_tests::MixedTypedExampleType; 34using ::test_helper::MixedTypedExampleType;
35extern void Execute(const sp<V1_0::IDevice>&, std::function<V1_0::Model(void)>, 35extern void Execute(const sp<V1_0::IDevice>&, std::function<V1_0::Model(void)>,
36 std::function<bool(int)>, const std::vector<MixedTypedExampleType>&); 36 std::function<bool(int)>, const std::vector<MixedTypedExampleType>&);
37} // namespace generated_tests 37} // namespace generated_tests
@@ -45,7 +45,7 @@ using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCa
45using ::android::nn::allocateSharedMemory; 45using ::android::nn::allocateSharedMemory;
46 46
47// Mixed-typed examples 47// Mixed-typed examples
48typedef generated_tests::MixedTypedExampleType MixedTypedExample; 48typedef test_helper::MixedTypedExampleType MixedTypedExample;
49 49
50// in frameworks/ml/nn/runtime/tests/generated/ 50// in frameworks/ml/nn/runtime/tests/generated/
51#include "all_generated_V1_0_vts_tests.cpp" 51#include "all_generated_V1_0_vts_tests.cpp"
diff --git a/neuralnetworks/1.0/vts/functional/Models.h b/neuralnetworks/1.0/vts/functional/Models.h
index a1fbe927..751ab32a 100644
--- a/neuralnetworks/1.0/vts/functional/Models.h
+++ b/neuralnetworks/1.0/vts/functional/Models.h
@@ -30,7 +30,7 @@ namespace V1_0 {
30namespace vts { 30namespace vts {
31namespace functional { 31namespace functional {
32 32
33using MixedTypedExample = generated_tests::MixedTypedExampleType; 33using MixedTypedExample = test_helper::MixedTypedExampleType;
34 34
35#define FOR_EACH_TEST_MODEL(FN) \ 35#define FOR_EACH_TEST_MODEL(FN) \
36 FN(add_broadcast_quant8) \ 36 FN(add_broadcast_quant8) \
diff --git a/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp
index 08f2613c..09c18785 100644
--- a/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp
+++ b/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp
@@ -36,9 +36,9 @@ namespace functional {
36using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback; 36using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
37using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback; 37using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
38using ::android::hidl::memory::V1_0::IMemory; 38using ::android::hidl::memory::V1_0::IMemory;
39using generated_tests::MixedTyped; 39using test_helper::MixedTyped;
40using generated_tests::MixedTypedExampleType; 40using test_helper::MixedTypedExampleType;
41using generated_tests::for_all; 41using test_helper::for_all;
42 42
43///////////////////////// UTILITY FUNCTIONS ///////////////////////// 43///////////////////////// UTILITY FUNCTIONS /////////////////////////
44 44
diff --git a/neuralnetworks/1.1/types.hal b/neuralnetworks/1.1/types.hal
index 8290fbbb..e4c656db 100644
--- a/neuralnetworks/1.1/types.hal
+++ b/neuralnetworks/1.1/types.hal
@@ -29,87 +29,95 @@ enum OperationType : @1.0::OperationType {
29 /** 29 /**
30 * BatchToSpace for N-dimensional tensors. 30 * BatchToSpace for N-dimensional tensors.
31 * 31 *
32 * This operation reshapes the batch dimension (dimension 0) into M + 1 dimensions of shape 32 * This operation reshapes the batch dimension (dimension 0) into M + 1
33 * block_shape + [batch], interleaves these blocks back into the grid defined by the 33 * dimensions of shape block_shape + [batch], interleaves these blocks back
34 * spatial dimensions [1, ..., M], to obtain a result with the same rank as the input. 34 * into the grid defined by the spatial dimensions [1, ..., M], to obtain a
35 * result with the same rank as the input.
35 * 36 *
36 * This is the reverse of SpaceToBatch. 37 * This is the reverse of SpaceToBatch.
37 * 38 *
38 * Supported tensor types: 39 * Supported tensor {@link OperandType}:
39 * * {@link OperandType::TENSOR_FLOAT32} 40 * * {@link OperandType::TENSOR_FLOAT32}
40 * * {@link OperandType::TENSOR_QUANT8_ASYMM} 41 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
41 * 42 *
42 * Supported tensor rank: 4 43 * Supported tensor rank: 4
43 * 44 *
44 * Inputs: 45 * Inputs:
45 * 0: An n-D tensor, specifying the tensor to be reshaped 46 * * 0: An n-D tensor, specifying the tensor to be reshaped
46 * 1: A 1-D Tensor of type TENSOR_INT32, the block sizes for each spatial dimension of the 47 * * 1: A 1-D Tensor of {@link OperandType::TENSOR_INT32}, the block
47 * input tensor. All values must be >= 1. 48 * sizes for each spatial dimension of the input tensor. All values
49 * must be >= 1.
48 * 50 *
49 * Outputs: 51 * Outputs:
50 * 0: A tensor of the same type as input0. 52 * * 0: A tensor of the same {@link OperandType} as input0.
51 */ 53 */
52 BATCH_TO_SPACE_ND = 29, 54 BATCH_TO_SPACE_ND = 29,
53 55
54 /** 56 /**
55 * Element-wise division of two tensors. 57 * Element-wise division of two tensors.
56 * 58 *
57 * Takes two input tensors of identical type and compatible dimensions. The output 59 * Takes two input tensors of identical {@link OperandType} and compatible
58 * is the result of dividing the first input tensor by the second, optionally 60 * dimensions. The output is the result of dividing the first input tensor
59 * modified by an activation function. 61 * by the second, optionally modified by an activation function.
60 * 62 *
61 * Two dimensions are compatible when: 63 * Two dimensions are compatible when:
62 * 1. they are equal, or 64 * 1. they are equal, or
63 * 2. one of them is 1 65 * 2. one of them is 1
64 * 66 *
65 * The size of the output is the maximum size along each dimension of the input operands. 67 * The size of the output is the maximum size along each dimension of the
66 * It starts with the trailing dimensions, and works its way forward. 68 * input operands. It starts with the trailing dimensions, and works its way
69 * forward.
67 * 70 *
68 * Example: 71 * Example:
69 * input1.dimension = {4, 1, 2} 72 * input1.dimension = {4, 1, 2}
70 * input2.dimension = {5, 4, 3, 1} 73 * input2.dimension = {5, 4, 3, 1}
71 * output.dimension = {5, 4, 3, 2} 74 * output.dimension = {5, 4, 3, 2}
72 * 75 *
73 * Supported tensor types: 76 * Supported tensor {@link OperandType}:
74 * * {@link OperandType::TENSOR_FLOAT32} 77 * * {@link OperandType::TENSOR_FLOAT32}
75 * 78 *
76 * Supported tensor rank: up to 4 79 * Supported tensor rank: up to 4
77 * 80 *
78 * Inputs: 81 * Inputs:
79 * 0: An n-D tensor, specifying the first input. 82 * * 0: An n-D tensor, specifying the first input.
80 * 1: A tensor of the same type, and compatible dimensions as input0. 83 * * 1: A tensor of the same {@link OperandType}, and compatible dimensions
81 * 2: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. 84 * as input0.
82 * Specifies the activation to invoke on the result of each addition. 85 * * 2: An {@link OperandType::INT32} scalar, and has to be one of the
86 * {@link FusedActivationFunc} values. Specifies the activation to
87 * invoke on the result.
83 * 88 *
84 * Outputs: 89 * Outputs:
85 * 0: A tensor of the same type as input0. 90 * * 0: A tensor of the same {@link OperandType} as input0.
86 */ 91 */
87 DIV = 30, 92 DIV = 30,
88 93
89 /** 94 /**
90 * Computes the mean of elements across dimensions of a tensor. 95 * Computes the mean of elements across dimensions of a tensor.
91 * 96 *
92 * Reduces the input tensor along the given dimensions to reduce. Unless keep_dims 97 * Reduces the input tensor along the given dimensions to reduce. Unless
93 * is true, the rank of the tensor is reduced by 1 for each entry in axis. 98 * keep_dims is true, the rank of the tensor is reduced by 1 for each entry
94 * If keep_dims is true, the reduced dimensions are retained with length 1. 99 * in axis. If keep_dims is true, the reduced dimensions are retained with
100 * length 1.
95 * 101 *
96 * If dimensions to reduce have no entries, all dimensions are reduced, and a tensor with 102 * If dimensions to reduce have no entries, all dimensions are reduced, and
97 * a single element is returned. 103 * a tensor with a single element is returned.
98 * 104 *
99 * Supported tensor types: 105 * Supported tensor {@link OperandType}:
100 * * {@link OperandType::TENSOR_FLOAT32} 106 * * {@link OperandType::TENSOR_FLOAT32}
101 * * {@link OperandType::TENSOR_QUANT8_ASYMM} 107 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
102 * 108 *
103 * Supported tensor rank: up to 4 109 * Supported tensor rank: up to 4
104 * 110 *
105 * Inputs: 111 * Inputs:
106 * 0: A tensor, specifying the input. 112 * * 0: A tensor, specifying the input.
107 * 1: A 1-D Tensor of type TENSOR_INT32. The dimensions to reduce. If None (the default), 113 * * 1: A 1-D Tensor of {@link OperandType::TENSOR_INT32}. The dimensions
108 * reduces all dimensions. Must be in the range [-rank(input_tensor), rank(input_tensor)). 114 * to reduce. If None (the default), reduces all dimensions. Must be in
109 * 2: An INT32 value, keep_dims. If positive, retains reduced dimensions with length 1. 115 * the range [-rank(input_tensor), rank(input_tensor)).
116 * * 2: An {@link OperandType::INT32} scalar, keep_dims. If positive,
117 * retains reduced dimensions with length 1.
110 * 118 *
111 * Outputs: 119 * Outputs:
112 * 0: A tensor of the same type as input0. 120 * * 0: A tensor of the same {@link OperandType} as input0.
113 */ 121 */
114 MEAN = 31, 122 MEAN = 31,
115 123
@@ -118,163 +126,193 @@ enum OperationType : @1.0::OperationType {
118 * 126 *
119 * This operation pads a tensor according to the specified paddings. 127 * This operation pads a tensor according to the specified paddings.
120 * 128 *
121 * Supported tensor types: 129 * Supported tensor {@link OperandType}:
122 * * {@link OperandType::TENSOR_FLOAT32} 130 * * {@link OperandType::TENSOR_FLOAT32}
123 * * {@link OperandType::TENSOR_QUANT8_ASYMM} 131 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
124 * 132 *
125 * Supported tensor rank: up to 4 133 * Supported tensor rank: up to 4
126 * 134 *
127 * Inputs: 135 * Inputs:
128 * 0: An n-D tensor, specifying the tensor to be padded. 136 * * 0: An n-D tensor, specifying the tensor to be padded.
129 * 1: A 2-D Tensor of type TENSOR_INT32, the paddings for each spatial dimension of the 137 * * 1: A 2-D Tensor of {@link OperandType::TENSOR_INT32}, the paddings
130 * input tensor. The shape of the tensor must be {rank(input0), 2}. 138 * for each spatial dimension of the input tensor. The shape of the
131 * padding[i, 0] specifies the number of element to be padded in the front of dimension i. 139 * tensor must be {rank(input0), 2}.
132 * padding[i, 1] specifies the number of element to be padded after the end of dimension i. 140 * padding[i, 0] specifies the number of element to be padded in the
141 * front of dimension i.
142 * padding[i, 1] specifies the number of element to be padded after the
143 * end of dimension i.
133 * 144 *
134 * Outputs: 145 * Outputs:
135 * 0: A tensor of the same type as input0. 146 * * 0: A tensor of the same {@link OperandType} as input0.
136 */ 147 */
137 PAD = 32, 148 PAD = 32,
138 149
139 /** 150 /**
140 * SpaceToBatch for N-Dimensional tensors. 151 * SpaceToBatch for N-Dimensional tensors.
141 * 152 *
142 * This operation divides "spatial" dimensions [1, ..., M] of the input into a grid of blocks 153 * This operation divides "spatial" dimensions [1, ..., M] of the input into
143 * of shape block_shape, and interleaves these blocks with the "batch" dimension (0) such that 154 * a grid of blocks of shape block_shape, and interleaves these blocks with
144 * in the output, the spatial dimensions [1, ..., M] correspond to the position within the grid, 155 * the "batch" dimension (0) such that in the output, the spatial dimensions
145 * and the batch dimension combines both the position within a spatial block and the original 156 * [1, ..., M] correspond to the position within the grid, and the batch
146 * batch position. Prior to division into blocks, the spatial dimensions of the input are 157 * dimension combines both the position within a spatial block and the
147 * optionally zero padded according to paddings. 158 * original batch position. Prior to division into blocks, the spatial
159 * dimensions of the input are optionally zero padded according to paddings.
148 * 160 *
149 * Supported tensor types: 161 * Supported tensor {@link OperandType}:
150 * * {@link OperandType::TENSOR_FLOAT32} 162 * * {@link OperandType::TENSOR_FLOAT32}
151 * * {@link OperandType::TENSOR_QUANT8_ASYMM} 163 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
152 * 164 *
153 * Supported tensor rank: 4 165 * Supported tensor rank: 4
154 * 166 *
155 * Inputs: 167 * Inputs:
156 * 0: An n-D tensor, specifying the input. 168 * * 0: An n-D tensor, specifying the input.
157 * 1: A 1-D Tensor of type TENSOR_INT32, the block sizes for each spatial dimension of the 169 * * 1: A 1-D Tensor of {@link OperandType::TENSOR_INT32}, the block
158 * input tensor. All values must be >= 1. 170 * sizes for each spatial dimension of the input tensor. All values
159 * 2: A 2-D Tensor of type TENSOR_INT32, the paddings for each spatial diemension of the 171 * must be >= 1.
160 * input tensor. All values must be >= 0. The shape of the tensor must be {rank(input0), 2}. 172 * * 2: A 2-D Tensor of {@link OperandType::TENSOR_INT32}, the paddings
161 * padding[i, 0] specifies the number of element to be padded in the front of dimension i. 173 * for each spatial dimension of the input tensor. All values must be
162 * padding[i, 1] specifies the number of element to be padded after the end of dimension i. 174 * >= 0. The shape of the tensor must be {rank(input0), 2}.
175 * padding[i, 0] specifies the number of element to be padded in the
176 * front of dimension i.
177 * padding[i, 1] specifies the number of element to be padded after the
178 * end of dimension i.
163 * 179 *
164 * Outputs: 180 * Outputs:
165 * 0: A tensor of the same type as input0. 181 * * 0: A tensor of the same {@link OperandType} as input0.
166 */ 182 */
167 SPACE_TO_BATCH_ND = 33, 183 SPACE_TO_BATCH_ND = 33,
168 184
169 /** 185 /**
170 * Removes dimensions of size 1 from the shape of a tensor. 186 * Removes dimensions of size 1 from the shape of a tensor.
171 * 187 *
172 * Given a tensor input, this operation returns a tensor of the same type with all 188 * Given a tensor input, this operation returns a tensor of the same
173 * dimensions of size 1 removed. If you don't want to remove all size 1 dimensions, 189 * {@link OperandType} with all dimensions of size 1 removed. If you don't
174 * you can remove specific size 1 dimensions by specifying the axes (input1). 190 * want to remove all size 1 dimensions, you can remove specific size 1
191 * dimensions by specifying the axes (input1).
175 * 192 *
176 * Supported tensor types: 193 * Supported tensor {@link OperandType}:
177 * * {@link OperandType::TENSOR_FLOAT32} 194 * * {@link OperandType::TENSOR_FLOAT32}
178 * * {@link OperandType::TENSOR_QUANT8_ASYMM} 195 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
179 * 196 *
180 * Supported tensor rank: up to 4 197 * Supported tensor rank: up to 4
181 * 198 *
182 * Inputs: 199 * Inputs:
183 * 0: An n-D tensor, the tensor to be squeezed. 200 * * 0: An n-D tensor, the tensor to be squeezed.
184 * 1: An optional 1-D tensor of type TENSOR_INT32. The dimensions to squeeze. If specified 201 * * 1: An optional 1-D tensor of {@link OperandType::TENSOR_INT32}. The
185 * only squeezes the dimensions listed. Otherwise, squeezes all dimensions. 202 * dimensions to squeeze. If specified only squeezes the dimensions
186 * The dimension index starts at 0. An error must be reported if squeezing a dimension that 203 * listed. Otherwise, squeezes all dimensions. The dimension index
187 * is not 1. 204 * starts at 0. An error must be reported if squeezing a dimension that
205 * is not 1.
188 * 206 *
189 * Outputs: 207 * Outputs:
190 * 0: A tensor of the same type as input0. Contains the same data as input, but has one or more 208 * * 0: A tensor of the same {@link OperandType} as input0. Contains the
191 * dimensions of size 1 removed. 209 * same data as input, but has one or more dimensions of size 1
210 * removed.
192 */ 211 */
193 SQUEEZE = 34, 212 SQUEEZE = 34,
194 213
195 /** 214 /**
196 * Extracts a strided slice of a tensor. 215 * Extracts a strided slice of a tensor.
197 * 216 *
198 * Roughly speaking, this op extracts a slice of size (end - begin) / stride from the given 217 * Roughly speaking, this op extracts a slice of size (end - begin) / stride
199 * input tensor. Starting at the location specified by begin the slice continues by adding 218 * from the given input tensor. Starting at the location specified by begin
200 * stride to the index until all dimensions are not less than end. Note that a stride can 219 * the slice continues by adding stride to the index until all dimensions
201 * be negative, which causes a reverse slice. 220 * are not less than end. Note that a stride can be negative, which causes a
221 * reverse slice.
202 * 222 *
203 * Supported tensor types: 223 * Supported tensor {@link OperandType}:
204 * * {@link OperandType::TENSOR_FLOAT32} 224 * * {@link OperandType::TENSOR_FLOAT32}
205 * * {@link OperandType::TENSOR_QUANT8_ASYMM} 225 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
206 * 226 *
207 * Supported tensor rank: up to 4 227 * Supported tensor rank: up to 4
208 * 228 *
209 * Inputs: 229 * Inputs:
210 * 0: An n-D tensor, specifying the tensor to be sliced. 230 * * 0: An n-D tensor, specifying the tensor to be sliced.
211 * 1: A 1-D Tensor of type TENSOR_INT32, the starts of the dimensions of the input 231 * * 1: A 1-D Tensor of {@link OperandType::TENSOR_INT32}, the starts of
212 * tensor to be sliced. The length must be of rank(input0). 232 * the dimensions of the input tensor to be sliced. The length must be
213 * 2: A 1-D Tensor of type TENSOR_INT32, the ends of the dimensions of the input 233 * of rank(input0).
214 * tensor to be sliced. The length must be of rank(input0). 234 * * 2: A 1-D Tensor of {@link OperandType::TENSOR_INT32}, the ends of
215 * 3: A 1-D Tensor of type TENSOR_INT32, the strides of the dimensions of the input 235 * the dimensions of the input tensor to be sliced. The length must be
216 * tensor to be sliced. The length must be of rank(input0). 236 * of rank(input0).
237 * * 3: A 1-D Tensor of {@link OperandType::TENSOR_INT32}, the strides of
238 * the dimensions of the input tensor to be sliced. The length must be
239 * of rank(input0).
240 * * 4: An {@link OperandType::INT32} scalar, begin_mask. If the ith bit
241 * of begin_mask is set, begin[i] is ignored and the fullest possible
242 * range in that dimension is used instead.
243 * * 5: An {@link OperandType::INT32} scalar, end_mask. If the ith bit of
244 * end_mask is set, end[i] is ignored and the fullest possible range in
245 * that dimension is used instead.
246 * * 6: An {@link OperandType::INT32} scalar, shrink_axis_mask. An int32
247 * mask. If the ith bit of shrink_axis_mask is set, it implies that the
248 * ith specification shrinks the dimensionality by 1. A slice of size 1
249 * starting from begin[i] in the dimension must be preserved.
217 * 250 *
218 * Outputs: 251 * Outputs:
219 * 0: A tensor of the same type as input0. 252 * * 0: A tensor of the same {@link OperandType} as input0.
220 */ 253 */
221 STRIDED_SLICE = 35, 254 STRIDED_SLICE = 35,
222 255
223 /** 256 /**
224 * Element-wise subtraction of two tensors. 257 * Element-wise subtraction of two tensors.
225 * 258 *
226 * Takes two input tensors of identical type and compatible dimensions. The output 259 * Takes two input tensors of identical {@link OperandType} and compatible
227 * is the result of subtracting the second input tensor from the first one, optionally 260 * dimensions. The output is the result of subtracting the second input
228 * modified by an activation function. 261 * tensor from the first one, optionally modified by an activation function.
229 * 262 *
230 * Two dimensions are compatible when: 263 * Two dimensions are compatible when:
231 * 1. they are equal, or 264 * 1. they are equal, or
232 * 2. one of them is 1 265 * 2. one of them is 1
233 * 266 *
234 * The size of the output is the maximum size along each dimension of the input operands. 267 * The size of the output is the maximum size along each dimension of the
235 * It starts with the trailing dimensions, and works its way forward. 268 * input operands. It starts with the trailing dimensions, and works its way
269 * forward.
236 * 270 *
237 * Example: 271 * Example:
238 * input1.dimension = {4, 1, 2} 272 * input1.dimension = {4, 1, 2}
239 * input2.dimension = {5, 4, 3, 1} 273 * input2.dimension = {5, 4, 3, 1}
240 * output.dimension = {5, 4, 3, 2} 274 * output.dimension = {5, 4, 3, 2}
241 * 275 *
242 * Supported tensor types: 276 * Supported tensor {@link OperandType}:
243 * * {@link OperandType::TENSOR_FLOAT32} 277 * * {@link OperandType::TENSOR_FLOAT32}
244 * 278 *
245 * Supported tensor rank: up to 4 279 * Supported tensor rank: up to 4
246 * 280 *
247 * Inputs: 281 * Inputs:
248 * 0: An n-D tensor, specifying the first input. 282 * * 0: An n-D tensor, specifying the first input.
249 * 1: A tensor of the same type, and compatible dimensions as input0. 283 * * 1: A tensor of the same {@link OperandType}, and compatible dimensions
250 * 2: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. 284 * as input0.
251 * Specifies the activation to invoke on the result of each addition. 285 * * 2: An {@link OperandType::INT32} scalar, and has to be one of the
286 * {@link FusedActivationFunc} values. Specifies the activation to
287 * invoke on the result.
252 * 288 *
253 * Outputs: 289 * Outputs:
254 * 0: A tensor of the same type as input0. 290 * * 0: A tensor of the same {@link OperandType} as input0.
255 */ 291 */
256 SUB = 36, 292 SUB = 36,
257 293
258 /** 294 /**
259 * Transposes the input tensor, permuting the dimensions according to the perm tensor. 295 * Transposes the input tensor, permuting the dimensions according to the
296 * perm tensor.
260 * 297 *
261 * The returned tensor's dimension i corresponds to the input dimension perm[i]. 298 * The returned tensor's dimension i corresponds to the input dimension
262 * If perm is not given, it is set to (n-1...0), where n is the rank of the input tensor. 299 * perm[i]. If perm is not given, it is set to (n-1...0), where n is the
263 * Hence by default, this operation performs a regular matrix transpose on 2-D input Tensors. 300 * rank of the input tensor. Hence by default, this operation performs a
301 * regular matrix transpose on 2-D input Tensors.
264 * 302 *
265 * Supported tensor types: 303 * Supported tensor {@link OperandType}:
266 * * {@link OperandType::TENSOR_FLOAT32} 304 * * {@link OperandType::TENSOR_FLOAT32}
267 * * {@link OperandType::TENSOR_QUANT8_ASYMM} 305 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
268 * 306 *
269 * Supported tensor rank: up to 4 307 * Supported tensor rank: up to 4
270 * 308 *
271 * Inputs: 309 * Inputs:
272 * 0: An n-D tensor, specifying the tensor to be transposed. 310 * * 0: An n-D tensor, specifying the tensor to be transposed.
273 * 1: An optional 1-D Tensor of type TENSOR_INT32, the permutation of the dimensions of the 311 * * 1: An optional 1-D Tensor of {@link OperandType::TENSOR_INT32},
274 * input tensor. 312 * the permutation of the dimensions of the input tensor.
275 * 313 *
276 * Outputs: 314 * Outputs:
277 * 0: A tensor of the same type as input0. 315 * * 0: A tensor of the same {@link OperandType} as input0.
278 */ 316 */
279 TRANSPOSE = 37, 317 TRANSPOSE = 37,
280}; 318};
diff --git a/neuralnetworks/1.1/vts/functional/GeneratedTests.cpp b/neuralnetworks/1.1/vts/functional/GeneratedTests.cpp
index 1f1cc7af..95c2b1ac 100644
--- a/neuralnetworks/1.1/vts/functional/GeneratedTests.cpp
+++ b/neuralnetworks/1.1/vts/functional/GeneratedTests.cpp
@@ -31,7 +31,7 @@ namespace hardware {
31namespace neuralnetworks { 31namespace neuralnetworks {
32 32
33namespace generated_tests { 33namespace generated_tests {
34using ::generated_tests::MixedTypedExampleType; 34using ::test_helper::MixedTypedExampleType;
35extern void Execute(const sp<V1_1::IDevice>&, std::function<V1_1::Model(void)>, 35extern void Execute(const sp<V1_1::IDevice>&, std::function<V1_1::Model(void)>,
36 std::function<bool(int)>, const std::vector<MixedTypedExampleType>&); 36 std::function<bool(int)>, const std::vector<MixedTypedExampleType>&);
37} // namespace generated_tests 37} // namespace generated_tests
diff --git a/neuralnetworks/1.1/vts/functional/Models.h b/neuralnetworks/1.1/vts/functional/Models.h
index 7fb26968..cc0fac12 100644
--- a/neuralnetworks/1.1/vts/functional/Models.h
+++ b/neuralnetworks/1.1/vts/functional/Models.h
@@ -31,7 +31,7 @@ namespace V1_1 {
31namespace vts { 31namespace vts {
32namespace functional { 32namespace functional {
33 33
34using MixedTypedExample = generated_tests::MixedTypedExampleType; 34using MixedTypedExample = test_helper::MixedTypedExampleType;
35 35
36#define FOR_EACH_TEST_MODEL(FN) \ 36#define FOR_EACH_TEST_MODEL(FN) \
37 FN(add) \ 37 FN(add) \
diff --git a/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp
index b42f561c..687b760e 100644
--- a/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp
+++ b/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp
@@ -36,9 +36,9 @@ namespace functional {
36using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback; 36using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
37using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback; 37using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
38using ::android::hidl::memory::V1_0::IMemory; 38using ::android::hidl::memory::V1_0::IMemory;
39using generated_tests::MixedTyped; 39using test_helper::MixedTyped;
40using generated_tests::MixedTypedExampleType; 40using test_helper::MixedTypedExampleType;
41using generated_tests::for_all; 41using test_helper::for_all;
42 42
43///////////////////////// UTILITY FUNCTIONS ///////////////////////// 43///////////////////////// UTILITY FUNCTIONS /////////////////////////
44 44