summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorandroid-build-team Robot2018-04-12 02:21:14 -0500
committerandroid-build-team Robot2018-04-12 02:21:14 -0500
commitba70b6434d3680a8765bb056dd4039c08cd9fb77 (patch)
treea93e7d4c8c99ba15f168e67733efe39ad1ac18d3
parent22448b385f7e3c06fba57a7228080e9a84c27818 (diff)
parent1f88e2092fa424b28a82fa59bce523b59776ac7c (diff)
downloadplatform-hardware-interfaces-ba70b6434d3680a8765bb056dd4039c08cd9fb77.tar.gz
platform-hardware-interfaces-ba70b6434d3680a8765bb056dd4039c08cd9fb77.tar.xz
platform-hardware-interfaces-ba70b6434d3680a8765bb056dd4039c08cd9fb77.zip
Snap for 4716599 from 1f88e2092fa424b28a82fa59bce523b59776ac7c to pi-release
Change-Id: Ia07f982ad69ac9f66d88cfeab52c14a1ce75db55
-rw-r--r--bluetooth/a2dp/1.0/vts/functional/Android.bp26
-rw-r--r--bluetooth/a2dp/1.0/vts/functional/VtsHalBluetoothA2dpV1_0TargetTest.cpp126
-rw-r--r--current.txt10
-rw-r--r--keymaster/3.0/vts/functional/keymaster_hidl_hal_test.cpp22
-rw-r--r--neuralnetworks/1.0/IDevice.hal2
-rw-r--r--neuralnetworks/1.0/IExecutionCallback.hal2
-rw-r--r--neuralnetworks/1.0/types.hal996
-rw-r--r--neuralnetworks/1.0/vts/functional/Callbacks.h4
-rw-r--r--neuralnetworks/1.1/IDevice.hal2
-rw-r--r--neuralnetworks/1.1/types.hal126
-rw-r--r--radio/1.2/vts/functional/radio_hidl_hal_api.cpp120
-rw-r--r--radio/1.2/vts/functional/radio_hidl_hal_test.cpp18
-rw-r--r--radio/1.2/vts/functional/radio_hidl_hal_utils_v1_2.h111
-rw-r--r--radio/1.2/vts/functional/radio_indication.cpp29
-rw-r--r--radio/1.2/vts/functional/radio_response.cpp45
15 files changed, 1059 insertions, 580 deletions
diff --git a/bluetooth/a2dp/1.0/vts/functional/Android.bp b/bluetooth/a2dp/1.0/vts/functional/Android.bp
new file mode 100644
index 00000000..f1ffc459
--- /dev/null
+++ b/bluetooth/a2dp/1.0/vts/functional/Android.bp
@@ -0,0 +1,26 @@
1//
2// Copyright (C) 2018 The Android Open Source Project
3//
4// Licensed under the Apache License, Version 2.0 (the "License");
5// you may not use this file except in compliance with the License.
6// You may obtain a copy of the License at
7//
8// http://www.apache.org/licenses/LICENSE-2.0
9//
10// Unless required by applicable law or agreed to in writing, software
11// distributed under the License is distributed on an "AS IS" BASIS,
12// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13// See the License for the specific language governing permissions and
14// limitations under the License.
15//
16
17cc_test {
18 name: "VtsHalBluetoothA2dpV1_0TargetTest",
19 defaults: ["VtsHalTargetTestDefaults"],
20 srcs: ["VtsHalBluetoothA2dpV1_0TargetTest.cpp"],
21 static_libs: [
22 "android.hardware.bluetooth@1.0",
23 "android.hardware.bluetooth.a2dp@1.0",
24 "libbluetooth-types",
25 ],
26}
diff --git a/bluetooth/a2dp/1.0/vts/functional/VtsHalBluetoothA2dpV1_0TargetTest.cpp b/bluetooth/a2dp/1.0/vts/functional/VtsHalBluetoothA2dpV1_0TargetTest.cpp
new file mode 100644
index 00000000..1a0342f3
--- /dev/null
+++ b/bluetooth/a2dp/1.0/vts/functional/VtsHalBluetoothA2dpV1_0TargetTest.cpp
@@ -0,0 +1,126 @@
1/*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "bluetooth_a2dp_hidl_hal_test"
18
19#include <android-base/logging.h>
20#include <android/hardware/bluetooth/a2dp/1.0/IBluetoothAudioHost.h>
21#include <android/hardware/bluetooth/a2dp/1.0/IBluetoothAudioOffload.h>
22#include <hardware/bluetooth.h>
23#include <utils/Log.h>
24
25#include <VtsHalHidlTargetCallbackBase.h>
26#include <VtsHalHidlTargetTestBase.h>
27#include <VtsHalHidlTargetTestEnvBase.h>
28
29using ::android::hardware::bluetooth::a2dp::V1_0::IBluetoothAudioHost;
30using ::android::hardware::bluetooth::a2dp::V1_0::IBluetoothAudioOffload;
31using ::android::hardware::bluetooth::a2dp::V1_0::Status;
32using ::android::hardware::bluetooth::a2dp::V1_0::CodecType;
33using ::android::hardware::bluetooth::a2dp::V1_0::SampleRate;
34using ::android::hardware::bluetooth::a2dp::V1_0::BitsPerSample;
35using ::android::hardware::bluetooth::a2dp::V1_0::ChannelMode;
36using ::android::hardware::bluetooth::a2dp::V1_0::CodecConfiguration;
37using ::android::hardware::Return;
38using ::android::hardware::Void;
39using ::android::sp;
40
41// Test environment for Bluetooth HIDL A2DP HAL.
42class BluetoothA2dpHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase {
43 public:
44 // get the test environment singleton
45 static BluetoothA2dpHidlEnvironment* Instance() {
46 static BluetoothA2dpHidlEnvironment* instance = new BluetoothA2dpHidlEnvironment;
47 return instance;
48 }
49
50 virtual void registerTestServices() override { registerTestService<IBluetoothAudioOffload>(); }
51
52 private:
53 BluetoothA2dpHidlEnvironment() {}
54};
55
56// The main test class for Bluetooth A2DP HIDL HAL.
57class BluetoothA2dpHidlTest : public ::testing::VtsHalHidlTargetTestBase {
58 public:
59 virtual void SetUp() override {
60 // currently test passthrough mode only
61 audio_offload = ::testing::VtsHalHidlTargetTestBase::getService<IBluetoothAudioOffload>(
62 BluetoothA2dpHidlEnvironment::Instance()->getServiceName<IBluetoothAudioOffload>());
63 ASSERT_NE(audio_offload, nullptr);
64
65 audio_host = new BluetoothAudioHost(*this);
66 ASSERT_NE(audio_host, nullptr);
67
68 codec.codecType = CodecType::AAC;
69 codec.sampleRate = SampleRate::RATE_44100;
70 codec.bitsPerSample = BitsPerSample::BITS_16;
71 codec.channelMode = ChannelMode::STEREO;
72 codec.encodedAudioBitrate = 320000;
73 codec.peerMtu = 1000;
74 }
75
76 virtual void TearDown() override {}
77
78 // A simple test implementation of IBluetoothAudioHost.
79 class BluetoothAudioHost
80 : public ::testing::VtsHalHidlTargetCallbackBase<BluetoothA2dpHidlTest>,
81 public IBluetoothAudioHost {
82 BluetoothA2dpHidlTest& parent_;
83
84 public:
85 BluetoothAudioHost(BluetoothA2dpHidlTest& parent) : parent_(parent){};
86 virtual ~BluetoothAudioHost() = default;
87
88 Return<void> startStream() override {
89 parent_.audio_offload->streamStarted(Status::SUCCESS);
90 return Void();
91 };
92
93 Return<void> suspendStream() override {
94 parent_.audio_offload->streamSuspended(Status::SUCCESS);
95 return Void();
96 };
97
98 Return<void> stopStream() override { return Void(); };
99 };
100
101 // audio_host is for the Audio HAL to send stream start/suspend/stop commands to Bluetooth
102 sp<IBluetoothAudioHost> audio_host;
103 // audio_offload is for the Bluetooth HAL to report session started/ended and handled audio
104 // stream started/suspended
105 sp<IBluetoothAudioOffload> audio_offload;
106 // codec is the currently used codec
107 CodecConfiguration codec;
108};
109
110// Empty test: Initialize()/Close() are called in SetUp()/TearDown().
111TEST_F(BluetoothA2dpHidlTest, InitializeAndClose) {}
112
113// Test start and end session
114TEST_F(BluetoothA2dpHidlTest, StartAndEndSession) {
115 EXPECT_EQ(Status::SUCCESS, audio_offload->startSession(audio_host, codec));
116 audio_offload->endSession();
117}
118
119int main(int argc, char** argv) {
120 ::testing::AddGlobalTestEnvironment(BluetoothA2dpHidlEnvironment::Instance());
121 ::testing::InitGoogleTest(&argc, argv);
122 BluetoothA2dpHidlEnvironment::Instance()->init(&argc, argv);
123 int status = RUN_ALL_TESTS();
124 LOG(INFO) << "Test result = " << status;
125 return status;
126}
diff --git a/current.txt b/current.txt
index f7447970..e79e2d6f 100644
--- a/current.txt
+++ b/current.txt
@@ -241,11 +241,11 @@ a432d6d9200248dc2126827bcd6cdea31dd65eff39b939f64585d27d915a5857 android.hardwar
24186ba9c03978b79a742e990420bc5ced0673d25a939f82572996bef92621e2014 android.hardware.cas@1.0::IMediaCasService 24186ba9c03978b79a742e990420bc5ced0673d25a939f82572996bef92621e2014 android.hardware.cas@1.0::IMediaCasService
242503da837d1a67cbdb7c08a033e927e5430ae1b159d98bf72c6336b4dcc5e76f5 android.hardware.cas.native@1.0::types 242503da837d1a67cbdb7c08a033e927e5430ae1b159d98bf72c6336b4dcc5e76f5 android.hardware.cas.native@1.0::types
243619600109232ed64b827c8a11beed8070b1827ae464547d7aa146cf0473b4bca android.hardware.cas.native@1.0::IDescrambler 243619600109232ed64b827c8a11beed8070b1827ae464547d7aa146cf0473b4bca android.hardware.cas.native@1.0::IDescrambler
244246a56d37d57a47224562c9d077b4a2886ce6242b9311bd98a17325944c280d7 android.hardware.neuralnetworks@1.0::types
24593eb3757ceaf21590fa4cd1d4a7dfe3b3794af5396100a6d25630879352abce9 android.hardware.neuralnetworks@1.0::IDevice 24493eb3757ceaf21590fa4cd1d4a7dfe3b3794af5396100a6d25630879352abce9 android.hardware.neuralnetworks@1.0::IDevice
246f66f9a38541bf92001d3adcce678cd7e3da2262124befb460b1c9aea9492813b android.hardware.neuralnetworks@1.0::IExecutionCallback 245f66f9a38541bf92001d3adcce678cd7e3da2262124befb460b1c9aea9492813b android.hardware.neuralnetworks@1.0::IExecutionCallback
247953607822954435874f4b81686440a604e2a88cdd2d9164c6293f3d5772510d7 android.hardware.neuralnetworks@1.0::IPreparedModel 246953607822954435874f4b81686440a604e2a88cdd2d9164c6293f3d5772510d7 android.hardware.neuralnetworks@1.0::IPreparedModel
24873e03573494ba96f0e711ab7f1956c5b2d54c3da690cd7ecf4d6d0f287447730 android.hardware.neuralnetworks@1.0::IPreparedModelCallback 24773e03573494ba96f0e711ab7f1956c5b2d54c3da690cd7ecf4d6d0f287447730 android.hardware.neuralnetworks@1.0::IPreparedModelCallback
248246a56d37d57a47224562c9d077b4a2886ce6242b9311bd98a17325944c280d7 android.hardware.neuralnetworks@1.0::types
249f4945e397b5dea41bb64518dfde59be71245d8a125fd1e0acffeb57ac7b08fed android.hardware.thermal@1.1::IThermal 249f4945e397b5dea41bb64518dfde59be71245d8a125fd1e0acffeb57ac7b08fed android.hardware.thermal@1.1::IThermal
250c8bc853546dd55584611def2a9fa1d99f657e3366c976d2f60fe6b8aa6d2cb87 android.hardware.thermal@1.1::IThermalCallback 250c8bc853546dd55584611def2a9fa1d99f657e3366c976d2f60fe6b8aa6d2cb87 android.hardware.thermal@1.1::IThermalCallback
251 251
@@ -258,7 +258,9 @@ cf72ff5a52bfa4d08e9e1000cf3ab5952a2d280c7f13cdad5ab7905c08050766 android.hardwar
258fb92e2b40f8e9d494e8fd3b4ac18499a3216342e7cff160714c3bbf3660b6e79 android.hardware.gnss@1.0::IGnssConfiguration 258fb92e2b40f8e9d494e8fd3b4ac18499a3216342e7cff160714c3bbf3660b6e79 android.hardware.gnss@1.0::IGnssConfiguration
259251594ea9b27447bfa005ebd806e58fb0ae4aad84a69938129c9800ec0c64eda android.hardware.gnss@1.0::IGnssMeasurementCallback 259251594ea9b27447bfa005ebd806e58fb0ae4aad84a69938129c9800ec0c64eda android.hardware.gnss@1.0::IGnssMeasurementCallback
2604e7169919d24fbe5573e5bcd683d0bd7abf553a4e6c34c41f9dfc1e12050db07 android.hardware.gnss@1.0::IGnssNavigationMessageCallback 2604e7169919d24fbe5573e5bcd683d0bd7abf553a4e6c34c41f9dfc1e12050db07 android.hardware.gnss@1.0::IGnssNavigationMessageCallback
2611488db5ffb8a7979488d1084761aab8bca2f59bc9a02d75cdefc296afeaf591b android.hardware.neuralnetworks@1.0::types 2615804ca86611d72e5481f022b3a0c1b334217f2e4988dad25730c42af2d1f4d1c android.hardware.neuralnetworks@1.0::IDevice
26212e8dca4ab7d8aadd0ef8f1b438021938e2396139e85db2ed65783b08800aa52 android.hardware.neuralnetworks@1.0::IExecutionCallback
263702f9a4cd3b7486a4b04f7155b737757ac2ca4b3548976d5782ad3cae9ff9780 android.hardware.neuralnetworks@1.0::types
262d4840db8efabdf1e4b344fc981cd36e5fe81a39aff6e199f6d06c1c8da413efd android.hardware.radio@1.0::types 264d4840db8efabdf1e4b344fc981cd36e5fe81a39aff6e199f6d06c1c8da413efd android.hardware.radio@1.0::types
263b280c4704dfcc548a9bf127b59b7c3578f460c50cce70a06b66fe0df8b27cff0 android.hardware.wifi@1.0::types 265b280c4704dfcc548a9bf127b59b7c3578f460c50cce70a06b66fe0df8b27cff0 android.hardware.wifi@1.0::types
264 266
@@ -336,8 +338,8 @@ e15ebdf1e0a326ff5b8a59668d4d8cd3852bd88388eae91de13f5f7e1af50ed1 android.hardwar
336b8c7ed58aa8740361e63d0ce9e7c94227572a629f356958840b34809d2393a7c android.hardware.media.bufferpool@1.0::IClientManager 338b8c7ed58aa8740361e63d0ce9e7c94227572a629f356958840b34809d2393a7c android.hardware.media.bufferpool@1.0::IClientManager
3374a2c0dc82780e6c90731725a103feab8ab6ecf85a64e049b9cbd2b2c61620fe1 android.hardware.media.bufferpool@1.0::IConnection 3394a2c0dc82780e6c90731725a103feab8ab6ecf85a64e049b9cbd2b2c61620fe1 android.hardware.media.bufferpool@1.0::IConnection
3386aef1218e5949f867b0104752ac536c1b707222a403341720de90141df129e3e android.hardware.media.bufferpool@1.0::types 3406aef1218e5949f867b0104752ac536c1b707222a403341720de90141df129e3e android.hardware.media.bufferpool@1.0::types
3391529409ed76ae87facab152b770495e9e62544fcc5215daabf146c28d588bab9 android.hardware.neuralnetworks@1.1::IDevice 3413e4d8e0085ebe8549efb8ad4b8b400a141a3fa3f47ae23696b3e05a1612eb003 android.hardware.neuralnetworks@1.1::IDevice
340e808a6f61cd7b47887c599d8843e67a2dcbf4ec5aadd5d22fdce93020070ef1b android.hardware.neuralnetworks@1.1::types 34250db076b03a6760557fc60ef433ba9dd2ff983cf3305eeb504b0fff3eaa604ff android.hardware.neuralnetworks@1.1::types
3418d3d86da0bfa4bf070970d8303c659f67f35d670c287d45a3f542e4fedadd578 android.hardware.nfc@1.1::INfc 3438d3d86da0bfa4bf070970d8303c659f67f35d670c287d45a3f542e4fedadd578 android.hardware.nfc@1.1::INfc
342e85f566698d2a2c28100e264fcf2c691a066756ddf8dd341d009ff50cfe10614 android.hardware.nfc@1.1::INfcClientCallback 344e85f566698d2a2c28100e264fcf2c691a066756ddf8dd341d009ff50cfe10614 android.hardware.nfc@1.1::INfcClientCallback
3435e278fcaa3287d397d8eebe1c22aaa28150f5caae1cf9381cd6dc32cb37899c5 android.hardware.nfc@1.1::types 3455e278fcaa3287d397d8eebe1c22aaa28150f5caae1cf9381cd6dc32cb37899c5 android.hardware.nfc@1.1::types
diff --git a/keymaster/3.0/vts/functional/keymaster_hidl_hal_test.cpp b/keymaster/3.0/vts/functional/keymaster_hidl_hal_test.cpp
index fbe5237a..3a181a96 100644
--- a/keymaster/3.0/vts/functional/keymaster_hidl_hal_test.cpp
+++ b/keymaster/3.0/vts/functional/keymaster_hidl_hal_test.cpp
@@ -2918,28 +2918,6 @@ TEST_F(EncryptionOperationsTest, AesEcbRoundTripSuccess) {
2918} 2918}
2919 2919
2920/* 2920/*
2921 * EncryptionOperationsTest.AesEcbWithUserId
2922 *
2923 * Verifies that AES ECB mode works when Tag::USER_ID is specified.
2924 */
2925TEST_F(EncryptionOperationsTest, AesEcbWithUserId) {
2926 string key = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2927 ASSERT_EQ(ErrorCode::OK, ImportKey(AuthorizationSetBuilder()
2928 .Authorization(TAG_NO_AUTH_REQUIRED)
2929 .Authorization(TAG_USER_ID, 0)
2930 .AesEncryptionKey(key.size() * 8)
2931 .EcbMode()
2932 .Padding(PaddingMode::PKCS7),
2933 KeyFormat::RAW, key));
2934
2935 string message = "Hello World!";
2936 auto params = AuthorizationSetBuilder().BlockMode(BlockMode::ECB).Padding(PaddingMode::PKCS7);
2937 string ciphertext = EncryptMessage(message, params);
2938 string plaintext = DecryptMessage(ciphertext, params);
2939 EXPECT_EQ(message, plaintext);
2940}
2941
2942/*
2943 * EncryptionOperationsTest.AesEcbRoundTripSuccess 2921 * EncryptionOperationsTest.AesEcbRoundTripSuccess
2944 * 2922 *
2945 * Verifies that AES encryption fails in the correct way when an unauthorized mode is specified. 2923 * Verifies that AES encryption fails in the correct way when an unauthorized mode is specified.
diff --git a/neuralnetworks/1.0/IDevice.hal b/neuralnetworks/1.0/IDevice.hal
index 49c29674..62fb2bae 100644
--- a/neuralnetworks/1.0/IDevice.hal
+++ b/neuralnetworks/1.0/IDevice.hal
@@ -36,7 +36,7 @@ interface IDevice {
36 /** 36 /**
37 * Gets the supported operations in a model. 37 * Gets the supported operations in a model.
38 * 38 *
39 * getSupportedSubgraph indicates which operations of a model are fully 39 * getSupportedOperations indicates which operations of a model are fully
40 * supported by the vendor driver. If an operation may not be supported for 40 * supported by the vendor driver. If an operation may not be supported for
41 * any reason, getSupportedOperations must return false for that operation. 41 * any reason, getSupportedOperations must return false for that operation.
42 * 42 *
diff --git a/neuralnetworks/1.0/IExecutionCallback.hal b/neuralnetworks/1.0/IExecutionCallback.hal
index ef0f4549..9c061669 100644
--- a/neuralnetworks/1.0/IExecutionCallback.hal
+++ b/neuralnetworks/1.0/IExecutionCallback.hal
@@ -28,7 +28,7 @@ interface IExecutionCallback {
28 * ErrorStatus resulting from the execution. If the asynchronous task 28 * ErrorStatus resulting from the execution. If the asynchronous task
29 * is not launched, notify must be invoked with the appropriate error. 29 * is not launched, notify must be invoked with the appropriate error.
30 * 30 *
31 * @return param Error status returned from launching the asynchronous task 31 * @param status Error status returned from launching the asynchronous task
32 * (if the launch fails) or from the asynchronous task itself 32 * (if the launch fails) or from the asynchronous task itself
33 * (if the launch succeeds). Must be: 33 * (if the launch succeeds). Must be:
34 * - NONE if the asynchronous execution was successful 34 * - NONE if the asynchronous execution was successful
diff --git a/neuralnetworks/1.0/types.hal b/neuralnetworks/1.0/types.hal
index a9c91cd4..8c07fcc3 100644
--- a/neuralnetworks/1.0/types.hal
+++ b/neuralnetworks/1.0/types.hal
@@ -24,38 +24,40 @@ package android.hardware.neuralnetworks@1.0;
24 * Types prefaced with TENSOR_* must be used for tensor data (i.e., tensors 24 * Types prefaced with TENSOR_* must be used for tensor data (i.e., tensors
25 * with at least one dimension). Types not prefaced by TENSOR_* represent 25 * with at least one dimension). Types not prefaced by TENSOR_* represent
26 * scalar values and must have no dimensions. 26 * scalar values and must have no dimensions.
27 *
28 * Although many types are defined, most operators accept just a few
29 * types. Most used are {@link OperandType::TENSOR_FLOAT32},
30 * {@link OperandType::TENSOR_QUANT8_ASYMM},
31 * and {@link OperandType::INT32}.
27 */ 32 */
28enum OperandType : int32_t { 33enum OperandType : int32_t {
29 /** 34 /** A 32 bit floating point scalar value. */
30 * The following entries are used to declare scalars.
31 */
32 FLOAT32 = 0, 35 FLOAT32 = 0,
36 /** A signed 32 bit integer scalar value. */
33 INT32 = 1, 37 INT32 = 1,
38 /** An unsigned 32 bit integer scalar value. */
34 UINT32 = 2, 39 UINT32 = 2,
35 40
36 /** 41 /** A tensor of 32 bit floating point values. */
37 * The following entries are used to declare tensors.
38 */
39 TENSOR_FLOAT32 = 3, 42 TENSOR_FLOAT32 = 3,
43 /** A tensor of 32 bit integer values. */
40 TENSOR_INT32 = 4, 44 TENSOR_INT32 = 4,
41 45 /** A tensor of 8 bit integers that represent real numbers.
42 /**
43 * A tensor of 8 bit integers that represent real numbers.
44 * 46 *
45 * Attached to this tensor are two numbers that can be used to convert the 47 * Attached to this tensor are two numbers that can be used to convert the
46 * 8 bit integer to the real value and vice versa. These two numbers are: 48 * 8 bit integer to the real value and vice versa. These two numbers are:
47 * - scale: a 32 bit floating point value greater than zero 49 * - scale: a 32 bit floating point value greater than zero.
48 * - zero_value: a 32 bit integer 50 * - zeroPoint: a 32 bit integer, in range [0, 255].
49 * 51 *
50 * The formula is: 52 * The formula is:
51 * real_value = (integer_value - zero_value) * scale. 53 * real_value = (integer_value - zeroPoint) * scale.
52 */ 54 */
53 TENSOR_QUANT8_ASYMM = 5, 55 TENSOR_QUANT8_ASYMM = 5,
54 56
55 /** 57 /** OEM specific scalar value. */
56 * The following entries are OEM specific operand types.
57 */
58 OEM = 10000, 58 OEM = 10000,
59
60 /** A tensor of OEM specific values. */
59 TENSOR_OEM_BYTE = 10001, 61 TENSOR_OEM_BYTE = 10001,
60}; 62};
61 63
@@ -66,9 +68,9 @@ enum OperandType : int32_t {
66 */ 68 */
67enum OperationType : int32_t { 69enum OperationType : int32_t {
68 /** 70 /**
69 * Adds two tensors, elment-wise. 71 * Adds two tensors, element-wise.
70 * 72 *
71 * Takes two input tensors of identical type and compatible dimensions. The output 73 * Takes two input tensors of identical type and compatible dimensions. The output
72 * is the sum of both input tensors, optionally modified by an activation function. 74 * is the sum of both input tensors, optionally modified by an activation function.
73 * 75 *
74 * Two dimensions are compatible when: 76 * Two dimensions are compatible when:
@@ -79,22 +81,25 @@ enum OperationType : int32_t {
79 * It starts with the trailing dimensions, and works its way forward. 81 * It starts with the trailing dimensions, and works its way forward.
80 * 82 *
81 * Example: 83 * Example:
82 * input1.dimension = {4, 1, 2} 84 *
85 * input1.dimension = {4, 1, 2}
83 * input2.dimension = {5, 4, 3, 1} 86 * input2.dimension = {5, 4, 3, 1}
84 * output.dimension = {5, 4, 3, 2} 87 * output.dimension = {5, 4, 3, 2}
85 * 88 *
86 * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} 89 * Supported tensor types:
87 * {@link OperandType::TENSOR_QUANT8_ASYMM} 90 * * {@link OperandType::TENSOR_FLOAT32}
91 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
92 *
88 * Supported tensor rank: up to 4 93 * Supported tensor rank: up to 4
89 * 94 *
90 * Inputs: 95 * Inputs:
91 * 0: A tensor. 96 * * 0: A tensor.
92 * 1: A tensor of the same type, and compatible dimensions as input0. 97 * * 1: A tensor of the same type, and compatible dimensions as input0.
93 * 2: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. 98 * * 2: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
94 * Specifies the activation to invoke on the result of each addition. 99 * Specifies the activation to invoke on the result of each addition.
95 * 100 *
96 * Ouputs: 101 * Outputs:
97 * 0: The sum, a tensor of the same type as input0. 102 * * 0: The sum, a tensor of the same type as input0.
98 */ 103 */
99 ADD = 0, 104 ADD = 0,
100 105
@@ -103,29 +108,50 @@ enum OperationType : int32_t {
103 * 108 *
104 * The output dimensions are functions of the filter dimensions, stride, and padding. 109 * The output dimensions are functions of the filter dimensions, stride, and padding.
105 * 110 *
106 * The values in output Tensor is computed as: 111 * The values in the output tensor are computed as:
112 *
107 * output[batch, row, col, channel] = 113 * output[batch, row, col, channel] =
108 * sum_{i, j}(input[batch, row + i, col + j, channel]) / sum(1) 114 * sum_{i, j}(input[batch, row + i, col + j, channel]) / sum(1)
109 * 115 *
110 * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} 116 * Supported tensor types:
111 * {@link OperandType::TENSOR_QUANT8_ASYMM} 117 * * {@link OperandType::TENSOR_FLOAT32}
112 * Supported tensor rank: 4, with "NHWC" data layout. 118 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
119 *
120 * Supported tensor rank: 4, with "NHWC" (i.e., Num_samples, Height, Width, and Channels)
121 * data layout.
122 *
123 * Both explicit padding and implicit padding are supported.
124 *
125 * Inputs (explicit padding):
126 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input.
127 * * 1: An INT32 value, specifying the padding on the left, in the ‘width’ dimension.
128 * * 2: An INT32 value, specifying the padding on the right,in the ‘width’ dimension.
129 * * 3: An INT32 value, specifying the padding on the top, in the ‘height’ dimension.
130 * * 4: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension.
131 * * 5: An INT32 value, specifying the stride when walking through input
132 * in the ‘width’ dimension.
133 * * 6: An INT32 value, specifying the stride when walking through input
134 * in the ‘height’ dimension.
135 * * 7: An INT32 value, specifying the filter width.
136 * * 8: An INT32 value, specifying the filter height.
137 * * 9: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
138 * Specifies the activation to invoke on the result of each addition.
139 *
140 * Inputs (implicit padding):
141 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input.
142 * * 1: An INT32 value, specifying the implicit padding scheme, has to be one of the
143 * following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
144 * * 2: An INT32 value, specifying the stride when walking through input
145 * in the ‘width’ dimension.
146 * * 3: An INT32 value, specifying the stride when walking through input
147 * in the ‘height’ dimension.
148 * * 4: An INT32 value, specifying the filter width.
149 * * 5: An INT32 value, specifying the filter height.
150 * * 6: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
151 * Specifies the activation to invoke on the result of each addition.
113 * 152 *
114 * Inputs: 153 * Outputs:
115 * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input. 154 * * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth].
116 * 1: An INT32 value, specifying the padding on the left, in the ‘width’ dimension.
117 * 2: An INT32 value, specifying the padding on the right,in the ‘width’ dimension.
118 * 3: An INT32 value, specifying the padding on the top, in the ‘height’ dimension.
119 * 4: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension.
120 * 5: An INT32 value, specifying the output stride in the ‘width’ dimension.
121 * 6: An INT32 value, specifying the output stride in the ‘height’ dimension.
122 * 7: An INT32 value, specifying the filter width.
123 * 8: An INT32 value, specifying the filter height.
124 * 9: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
125 * Specifies the activation to invoke on the result of each addition.
126 *
127 * Ouputs:
128 * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth].
129 */ 155 */
130 AVERAGE_POOL_2D = 1, 156 AVERAGE_POOL_2D = 1,
131 157
@@ -135,19 +161,21 @@ enum OperationType : int32_t {
135 * The input tensors must have identical type and the same dimensions except the 161 * The input tensors must have identical type and the same dimensions except the
136 * dimension along the concatenation axis. 162 * dimension along the concatenation axis.
137 * 163 *
138 * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} 164 * Supported tensor types:
139 * {@link OperandType::TENSOR_QUANT8_ASYMM} 165 * * {@link OperandType::TENSOR_FLOAT32}
166 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
167 *
140 * Supported tensor rank: up to 4 168 * Supported tensor rank: up to 4
141 * 169 *
142 * Inputs: 170 * Inputs:
143 * 0 ~ n: The list on n input tensors, of shape [D0, D1, ..., Daxis(i), ..., Dm] 171 * * 0 ~ n-1: The list of n input tensors, of shape [D0, D1, ..., Daxis(i), ..., Dm].
144 * n+1: An INT32 value, specifying the concatenation axis. 172 * For inputs of {@link OperandType::TENSOR_QUANT8_ASYMM} type, all
145 * n+2: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. 173 * input tensors must have the same scale and zeroPoint.
146 * Specifies the activation to invoke on the result of each addition. 174 * * n: An INT32 value, specifying the concatenation axis.
147 * 175 *
148 * Ouputs: 176 * Outputs:
149 * 0: The output, a tensor of the same type as the input tensors. 177 * * 0: The output, a tensor of the same type as the input tensors.
150 The output shape is [D0, D1, ..., sum(Daxis(i)), ..., Dm]. 178 * The output shape is [D0, D1, ..., sum(Daxis(i)), ..., Dm].
151 */ 179 */
152 CONCATENATION = 2, 180 CONCATENATION = 2,
153 181
@@ -159,7 +187,8 @@ enum OperationType : int32_t {
159 * 187 *
160 * The output dimensions are functions of the filter dimensions, stride, and padding. 188 * The output dimensions are functions of the filter dimensions, stride, and padding.
161 * 189 *
162 * The values in output Tensor is computed as: 190 * The values in the output tensor are computed as:
191 *
163 * output[batch, row, col, channel] = 192 * output[batch, row, col, channel] =
164 * sum_{i, j} ( 193 * sum_{i, j} (
165 * input[batch, row + i, col + j, k] * 194 * input[batch, row + i, col + j, k] *
@@ -167,77 +196,135 @@ enum OperationType : int32_t {
167 * bias[channel] 196 * bias[channel]
168 * ) 197 * )
169 * 198 *
170 * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} 199 * Supported tensor types:
171 * {@link OperandType::TENSOR_QUANT8_ASYMM} 200 * * {@link OperandType::TENSOR_FLOAT32}
201 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
202 *
172 * Supported tensor rank: 4, with "NHWC" data layout. 203 * Supported tensor rank: 4, with "NHWC" data layout.
173 * 204 *
174 * Inputs: 205 * Both explicit padding and implicit padding are supported.
175 * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input. 206 *
176 * 1: A 4-D tensor, of shape [depth_out, filter_height, filter_width, depth_in], 207 * Inputs (explicit padding):
177 * specifying the filter. 208 * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input.
178 * 2: A 1-D tensor, of shape [depth_out], specifying the bias. 209 * * 1: A 4-D tensor, of shape [depth_out, filter_height, filter_width, depth_in],
179 * For input tensor of {@link OperandType::TENSOR_FLOAT32} type, the bias should 210 * specifying the filter.
180 * also be of {@link OperandType::TENSOR_FLOAT32}. 211 * * 2: A 1-D tensor, of shape [depth_out], specifying the bias.
181 * For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the bias 212 * For input tensor of {@link OperandType::TENSOR_FLOAT32} type, the bias should
182 * should be of {@link OperandType::TENSOR_INT32}. 213 * also be of {@link OperandType::TENSOR_FLOAT32}.
183 * 3: An INT32 value, specifying the padding on the left, in the ‘width’ dimension. 214 * For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the bias
184 * 4: An INT32 value, specifying the padding on the right,in the ‘width’ dimension. 215 * should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and
185 * 5: An INT32 value, specifying the padding on the top, in the ‘height’ dimension. 216 * bias_scale == input_scale * filter_scale.
186 * 6: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension. 217 * * 3: An INT32 value, specifying the padding on the left, in the ‘width’ dimension.
187 * 7: An INT32 value, specifying the output stride in the ‘width’ dimension. 218 * * 4: An INT32 value, specifying the padding on the right,in the ‘width’ dimension.
188 * 8: An INT32 value, specifying the output stride in the ‘height’ dimension. 219 * * 5: An INT32 value, specifying the padding on the top, in the ‘height’ dimension.
189 * 9: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. 220 * * 6: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension.
190 * Specifies the activation to invoke on the result of each addition. 221 * * 7: An INT32 value, specifying the stride when walking through input
191 * 222 * in the ‘width’ dimension.
192 * Ouputs: 223 * * 8: An INT32 value, specifying the stride when walking through input
193 * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth_out]. 224 * in the ‘height’ dimension.
225 * * 9: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
226 * Specifies the activation to invoke on the result of each addition.
227 *
228 * Inputs (implicit padding):
229 * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input.
230 * * 1: A 4-D tensor, of shape [depth_out, filter_height, filter_width, depth_in],
231 * specifying the filter.
232 * * 2: A 1-D tensor, of shape [depth_out], specifying the bias.
233 * For input tensor of {@link OperandType::TENSOR_FLOAT32} type, the bias should
234 * also be of {@link OperandType::TENSOR_FLOAT32}.
235 * For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the bias
236 * should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and
237 * bias_scale == input_scale * filter_scale.
238 * * 3: An INT32 value, specifying the implicit padding scheme, has to be one of the
239 * following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
240 * * 4: An INT32 value, specifying the stride when walking through input
241 * in the ‘width’ dimension.
242 * * 5: An INT32 value, specifying the stride when walking through input
243 * in the ‘height’ dimension.
244 * * 6: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
245 * Specifies the activation to invoke on the result of each addition.
246 *
247 * Outputs:
248 * * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth_out].
249 * For output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the following
250 * condition must be satisfied: output_scale > input_scale * filter_scale.
194 */ 251 */
195 CONV_2D = 3, 252 CONV_2D = 3,
196 253
197 /** 254 /**
198 * Performs an depthwise 2-D convolution operation. 255 * Performs a depthwise 2-D convolution operation.
199 * 256 *
200 * Given an input tensor of shape [batches, height, width, depth_in] and a filter 257 * Given an input tensor of shape [batches, height, width, depth_in] and a filter
201 * tensor of shape [depth_out, filter_height, filter_width, depth_in] containing 258 * tensor of shape [1, filter_height, filter_width, depth_out] containing
202 * in_channels convolutional filters of depth 1, DEPTHWISE_CONV applies a different 259 * depth_out convolutional filters of depth 1, DEPTHWISE_CONV applies a different
203 * filter to each input channel (expanding from 1 channel to channel_multiplier channels 260 * filter to each input channel (expanding from 1 channel to channel_multiplier channels
204 * for each), then concatenates the results together. 261 * for each), then concatenates the results together.
205 * 262 *
206 * The output has depth_out = depth_in * depth_multiplier channels. 263 * The output has depth_out = depth_in * depth_multiplier channels.
207 * The output dimensions are functions of the filter dimensions, stride, and padding. 264 * The output dimensions are functions of the filter dimensions, stride, and padding.
208 * 265 *
209 * The values in output Tensor is computed as: 266 * The values in the output tensor are computed as:
267 *
210 * output[b, i, j, k * channel_multiplier + q] = 268 * output[b, i, j, k * channel_multiplier + q] =
211 * sum_{di, dj} ( 269 * sum_{di, dj} (
212 * input[b, strides[1] * i + di, strides[2] * j + dj, k] * 270 * input[b, strides[1] * i + di, strides[2] * j + dj, k] *
213 * filter[di, dj, k, q] 271 * filter[1, di, dj, k * channel_multiplier + q]
214 * ) 272 * )
215 * 273 *
216 * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} 274 * Supported tensor types:
217 * {@link OperandType::TENSOR_QUANT8_ASYMM} 275 * * {@link OperandType::TENSOR_FLOAT32}
276 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
277 *
218 * Supported tensor rank: 4, with "NHWC" data layout. 278 * Supported tensor rank: 4, with "NHWC" data layout.
219 * 279 *
220 * Inputs: 280 * Both explicit padding and implicit padding are supported.
221 * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input. 281 *
222 * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out], 282 * Inputs (explicit padding):
223 * specifying the filter. 283 * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input.
224 * 2: A 1-D tensor, of shape [depth_out], specifying the bias. 284 * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out],
225 * For input tensor of {@link OperandType::TENSOR_FLOAT32} type, the bias should 285 * specifying the filter.
226 * also be of {@link OperandType::TENSOR_FLOAT32}. 286 * * 2: A 1-D tensor, of shape [depth_out], specifying the bias.
227 * For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the bias 287 * For input tensor of {@link OperandType::TENSOR_FLOAT32} type, the bias should
228 * should be of {@link OperandType::TENSOR_INT32}. 288 * also be of {@link OperandType::TENSOR_FLOAT32}.
229 * 3: An INT32 value, specifying the padding on the left, in the ‘width’ dimension. 289 * For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the bias
230 * 4: An INT32 value, specifying the padding on the right,in the ‘width’ dimension. 290 * should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and
231 * 5: An INT32 value, specifying the padding on the top, in the ‘height’ dimension. 291 * bias_scale == input_scale * filter_scale.
232 * 6: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension. 292 * * 3: An INT32 value, specifying the padding on the left, in the ‘width’ dimension.
233 * 7: An INT32 value, specifying the output stride in the ‘width’ dimension. 293 * * 4: An INT32 value, specifying the padding on the right,in the ‘width’ dimension.
234 * 8: An INT32 value, specifying the output stride in the ‘height’ dimension. 294 * * 5: An INT32 value, specifying the padding on the top, in the ‘height’ dimension.
235 * 9: An INT32 value, specifying the depthwise multiplier. 295 * * 6: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension.
236 * 10: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. 296 * * 7: An INT32 value, specifying the stride when walking through input
237 * Specifies the activation to invoke on the result of each addition. 297 * in the ‘width’ dimension.
238 * 298 * * 8: An INT32 value, specifying the stride when walking through input
239 * Ouputs: 299 * in the ‘height’ dimension.
240 * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth_out]. 300 * * 9: An INT32 value, specifying the depthwise multiplier.
301 * * 10: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
302 * Specifies the activation to invoke on the result of each addition.
303 *
304 * Inputs (implicit padding):
305 * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input.
306 * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out],
307 * specifying the filter.
308 * * 2: A 1-D tensor, of shape [depth_out], specifying the bias.
309 * For input tensor of {@link OperandType::TENSOR_FLOAT32} type, the bias should
310 * also be of {@link OperandType::TENSOR_FLOAT32}.
311 * For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the bias
312 * should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and
313 * bias_scale == input_scale * filter_scale.
314 * * 3: An INT32 value, specifying the implicit padding scheme, has to be one of the
315 * following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
316 * * 4: An INT32 value, specifying the stride when walking through input
317 * in the ‘width’ dimension.
318 * * 5: An INT32 value, specifying the stride when walking through input
319 * in the ‘height’ dimension.
320 * * 6: An INT32 value, specifying the depthwise multiplier.
321 * * 7: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
322 * Specifies the activation to invoke on the result of each addition.
323 *
324 * Outputs:
325 * * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth_out].
326 * For output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the following
327 * condition must be satisfied: output_scale > input_scale * filter_scale.
241 */ 328 */
242 DEPTHWISE_CONV_2D = 4, 329 DEPTHWISE_CONV_2D = 4,
243 330
@@ -255,18 +342,20 @@ enum OperationType : int32_t {
255 * input_height * block_size. 342 * input_height * block_size.
256 * The depth of the input tensor must be divisible by block_size * block_size 343 * The depth of the input tensor must be divisible by block_size * block_size
257 * 344 *
258 * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} 345 * Supported tensor types:
259 * {@link OperandType::TENSOR_QUANT8_ASYMM} 346 * * {@link OperandType::TENSOR_FLOAT32}
347 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
348 *
260 * Supported tensor rank: 4, with "NHWC" data layout. 349 * Supported tensor rank: 4, with "NHWC" data layout.
261 * 350 *
262 * Inputs: 351 * Inputs:
263 * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input. 352 * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input.
264 * 1: An INT32 value, specifying the block_size. block_size must be >=1 and 353 * * 1: An INT32 value, specifying the block_size. block_size must be >=1 and
265 * block_size * block_size must be a divisor of the input depth. 354 * block_size * block_size must be a divisor of the input depth.
266 * 355 *
267 * Ouputs: 356 * Outputs:
268 * 0: The output 4-D tensor, of shape [batch, height*block_size, width*block_size, 357 * * 0: The output 4-D tensor, of shape [batch, height*block_size, width*block_size,
269 * depth/(block_size*block_size)]. 358 * depth/(block_size*block_size)].
270 */ 359 */
271 DEPTH_TO_SPACE = 5, 360 DEPTH_TO_SPACE = 5,
272 361
@@ -274,53 +363,69 @@ enum OperationType : int32_t {
274 * Dequantizes the input tensor. 363 * Dequantizes the input tensor.
275 * 364 *
276 * The formula is: 365 * The formula is:
277 * output = (input - zero_value) * scale.
278 * 366 *
279 * Supported tensor types: {@link OperandType::TENSOR_QUANT8_ASYMM} 367 * output = (input - zeroPoint) * scale.
368 *
369 * Supported tensor types:
370 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
371 *
280 * Supported tensor rank: up to 4 372 * Supported tensor rank: up to 4
281 * 373 *
282 * Inputs: 374 * Inputs:
283 * 0: A tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}. 375 * * 0: A tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}.
284 * 376 *
285 * Ouputs: 377 * Outputs:
286 * 0: The output tensor of same shape as input0, but with type 378 * * 0: The output tensor of same shape as input0, but with type
287 {@link OperandType::TENSOR_FLOAT32}. 379 * {@link OperandType::TENSOR_FLOAT32}.
288 */ 380 */
289 DEQUANTIZE = 6, 381 DEQUANTIZE = 6,
290 382
291 /** 383 /**
292 * Looks up items from a given tensor. 384 * Looks up sub-tensors in the input tensor.
385 *
386 * This operator takes for input a tensor of values (Values) and
387 * a one-dimensional tensor of selection indices (Lookups).
388 * The output tensor is the concatenation of sub-tensors of Values as
389 * selected by Lookups.
390 *
391 * Think of Values as being sliced along its first dimension:
392 * The entries in Lookups select which slices are concatenated together
393 * to create the output tensor.
394 *
395 * For example, if Values has shape of [40, 200, 300] and
396 * Lookups has shape of [3], all three values found in Lookups are
397 * expected to be between 0 and 39. The resulting tensor must
398 * have shape of [3, 200, 300].
293 * 399 *
294 * Each item in the output is a raw copy of the corresponding item in 400 * If a value in Lookups is out of bounds, the operation must fail
295 * the input “values”. If the the given “lookup” indices are out of bounds, 401 * and an error must be reported.
296 * the op will fail and an error will be reported.
297 * 402 *
298 * Inputs: 403 * Inputs:
299 * * 0: Values. An n-D tensor of any type X (where n >= 2). E.g., if n is 2, 404 * * 0: Lookups. A 1-D tensor of {@link OperandType::TENSOR_INT32} type.
300 * then the shape would be [lookup_dimension, values_dimension], where 405 * The values are indices into the first dimension of Values.
301 * “lookup_dimension” corresponds to the indexing dimension in the lookup 406 * * 1: Values. An n-D tensor, where n >= 2, from which sub-tensors are
302 * table, and “values_dimension” to the contents. 407 * extracted.
303 * * 1: Lookups. An 1-D tensor of type T, of shape [lookup_size], where
304 * “lookup_size” is the number of elements to look for, and each entry
305 * corresponds to the first dimension of the “values” tensor.
306 * 408 *
307 * Output: 409 * Output:
308 * * 0: A n-D tensor of type X and the same rank and shape as the “values” 410 * * 0: A n-D tensor with the same rank and shape as the Values
309 * tensor, except for the first dimension which has size “lookup_size”. 411 * tensor, except for the first dimension which has the same size
412 * as Lookups' only dimension.
310 */ 413 */
311 EMBEDDING_LOOKUP = 7, 414 EMBEDDING_LOOKUP = 7,
312 415
313 /** 416 /**
314 * Computes element-wise floor() on the input tensor. 417 * Computes element-wise floor() on the input tensor.
315 * 418 *
316 * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} 419 * Supported tensor types:
420 * * {@link OperandType::TENSOR_FLOAT32}
421 *
317 * Supported tensor rank: up to 4 422 * Supported tensor rank: up to 4
318 * 423 *
319 * Inputs: 424 * Inputs:
320 * 0: A tensor. 425 * * 0: A tensor.
321 * 426 *
322 * Ouputs: 427 * Outputs:
323 * 0: The output, a tensor of the same type and dimensions as input0. 428 * * 0: The output tensor, of the same type and dimensions as the input tensor.
324 */ 429 */
325 FLOOR = 8, 430 FLOOR = 8,
326 431
@@ -329,66 +434,104 @@ enum OperationType : int32_t {
329 * tensor with each element in the output tensor. 434 * tensor with each element in the output tensor.
330 * 435 *
331 * This layer implements the operation: 436 * This layer implements the operation:
437 *
332 * outputs = activation(inputs * weights’ + bias) 438 * outputs = activation(inputs * weights’ + bias)
333 * 439 *
334 * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} 440 * Supported tensor types:
335 * {@link OperandType::TENSOR_QUANT8_ASYMM} 441 * * {@link OperandType::TENSOR_FLOAT32}
442 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
443 *
336 * Supported tensor rank: up to 4. 444 * Supported tensor rank: up to 4.
337 * 445 *
338 * Inputs: 446 * Inputs:
339 * 0: A tensor, specifying the input. If rank is greater than 2, then it gets flattened to 447 * * 0: A tensor, specifying the input. If rank is greater than 2, then it gets flattened to
340 * a 2-D Tensor. The 2-D Tensor is handled as if dimensions corresponded to shape 448 * a 2-D Tensor. The 2-D Tensor is handled as if dimensions corresponded to shape
341 * [batch_size, input_size], where “batch_size” corresponds to the batching dimension, 449 * [batch_size, input_size], where “batch_size” corresponds to the batching dimension,
342 * and “input_size” is the size of the input. 450 * and “input_size” is the size of the input.
343 * 1: A 2-D tensor, specifying the weights, of shape [num_units, input_size], where “num_units” 451 * * 1: A 2-D tensor, specifying the weights, of shape [num_units, input_size], where
344 * corresponds to the number of output nodes. 452 * "num_units" corresponds to the number of output nodes.
345 * 2: A 1-D tensor, of shape [num_units], specifying the bias. 453 * * 2: A 1-D tensor, of shape [num_units], specifying the bias.
346 * For input tensor of {@link OperandType::TENSOR_FLOAT32} type, the bias should 454 * For input tensor of {@link OperandType::TENSOR_FLOAT32} type, the bias should
347 * also be of {@link OperandType::TENSOR_FLOAT32}. 455 * also be of {@link OperandType::TENSOR_FLOAT32}.
348 * For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the bias 456 * For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the bias
349 * should be of {@link OperandType::TENSOR_INT32}. 457 * should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and
350 * 3: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. 458 * bias_scale == input_scale * filter_scale.
351 * Specifies the activation to invoke on the result of each addition. 459 * * 3: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
352 * 460 * Specifies the activation to invoke on the result of each addition.
353 * Ouputs: 461 *
354 * 0: The output tensor, of shape [batch_size, num_units]. 462 * Outputs:
463 * * 0: The output tensor, of shape [batch_size, num_units].
464 * For output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the following
465 * condition must be satisfied: output_scale > input_scale * filter_scale.
355 */ 466 */
356 FULLY_CONNECTED = 9, 467 FULLY_CONNECTED = 9,
357 468
358 /** 469 /**
359 * Looks up values of a hash table with given keys. 470 * Looks up sub-tensors in the input tensor using a key-value map.
471 *
472 * This operator takes for input a tensor of values (Values),
473 * a one-dimensional tensor of selection values (Lookups) and
474 * a one-dimensional tensor that maps these values to Values
475 * indexes. The output tensor is the concatenation of sub-tensors of
476 * Values as selected by Lookups via Keys.
477 *
478 * Think of Values as being sliced along its outer-most dimension.
479 * The output is a concatenation of selected slices, with one slice
480 * for each entry of Lookups. The slice selected is the one at the
481 * same index as the Maps entry that matches the value in Lookups.
482 *
483 * For a hit, the corresponding sub-tensor of Values is included
484 * in the Output tensor. For a miss, the corresponding sub-tensor in
485 * Output must have zero values.
486 *
487 * For example, if Values has shape of [40, 200, 300],
488 * Keys should have a shape of [40]. If Lookups tensor has shape
489 * of [3], three slices are being concatenated, so the resulting tensor
490 * must have the shape of [3, 200, 300]. If the first entry in Lookups
491 * has the value 123456, that value must be located in Keys tensor.
492 * If the sixth entry of Keys contains 123456, the sixth slice of Values
493 * must be selected. If no entry in Keys has 123456, a slice of zeroes
494 * must be concatenated.
360 * 495 *
361 * Inputs: 496 * Inputs:
362 * * 0: Lookups. A 1-D int32 tensor with shape [ k ]. 497 * * 0: Lookups. A 1-D {@link OperandType::TENSOR_INT32} tensor with shape [ k ].
363 * * 1: Keys. A 1-D int32 tensor with shape [ n ], *MUST* be sorted in 498 * * 1: Keys. A 1-D {@link OperandType::TENSOR_INT32} tensor with shape [ n ];
364 * ascending order. 499 * Keys and Values pair represent a map, i.e., the ith element
365 * * 2: Values. A tensor with shape [ n … ]. 500 * in Keys (Keys[i]) is the key to select the ith sub-tensor
501 * in Values (Values[i]), where 0 <= i <= n-1.
502 * Keys tensor *MUST* be sorted in ascending order.
503 * * 2: Values. A tensor with shape of [ n, … ]; i.e., the first dimension must be n.
366 * 504 *
367 * Outputs: 505 * Outputs:
368 * * 0: Output. A tensor with shape [ k …]. 506 * * 0: Output. A tensor with shape [ k …].
369 * * 1: Hits. A uint8 tensor with shape [ k ] indicates whether the lookup 507 * * 1: Hits. A boolean tensor with shape [ k ] indicates whether the lookup
370 * hits or not. 508 * hits (True) or not (False).
509 * Stored as {@link OperandType::TENSOR_QUANT8_ASYMM} with offset 0 and scale 1.0f.
510 * A non-zero byte represents True, a hit. A zero indicates otherwise.
371 */ 511 */
372 HASHTABLE_LOOKUP = 10, 512 HASHTABLE_LOOKUP = 10,
373 513
374 /** 514 /**
375 * Applies L2 normalization along a the depth dimension. 515 * Applies L2 normalization along the depth dimension.
516 *
517 * The values in the output tensor are computed as:
376 * 518 *
377 * The values in output Tensor is computed as:
378 * output[batch, row, col, channel] = 519 * output[batch, row, col, channel] =
379 * input[batch, row, col, channel] / 520 * input[batch, row, col, channel] /
380 * sqrt(sum_{c} pow(input[batch, row, col, c], 2)) 521 * sqrt(sum_{c} pow(input[batch, row, col, c], 2))
381 * 522 *
382 * For x with more dimensions, independently normalizes each 1-D slice along dimension dim. 523 * For input tensor with more dimensions, independently normalizes each 1-D slice along dimension dim.
383 * 524 *
384 * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} 525 * Supported tensor types:
385 * Supported tensor rank: 4, with "NHWC" data layout. 526 * * {@link OperandType::TENSOR_FLOAT32}
527 *
528 * Supported tensor rank: 4, with "NHWC" data layout (i.e., Num_samples, Height, Width, and Channels).
386 * 529 *
387 * Inputs: 530 * Inputs:
388 * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input. 531 * * 0: A 4-D tensor, of shape [batches, height, width, depth].
389 * 532 *
390 * Ouputs: 533 * Outputs:
391 * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth]. 534 * * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth].
392 */ 535 */
393 L2_NORMALIZATION = 11, 536 L2_NORMALIZATION = 11,
394 537
@@ -397,28 +540,48 @@ enum OperationType : int32_t {
397 * 540 *
398 * The output dimensions are functions of the filter dimensions, stride, and padding. 541 * The output dimensions are functions of the filter dimensions, stride, and padding.
399 * 542 *
400 * The values in output Tensor is computed as: 543 * The values in the output tensor are computed as:
544 *
401 * output[batch, row, col, channel] = 545 * output[batch, row, col, channel] =
402 * sqrt(sum_{i, j} pow(input[batch, row + i, col + j, channel], 2) / sum(1)) 546 * sqrt(sum_{i, j} pow(input[batch, row + i, col + j, channel], 2) / sum(1))
403 * 547 *
404 * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} 548 * Supported tensor types:
549 * * {@link OperandType::TENSOR_FLOAT32}
550 *
405 * Supported tensor rank: 4, with "NHWC" data layout. 551 * Supported tensor rank: 4, with "NHWC" data layout.
406 * 552 *
407 * Inputs: 553 * Both explicit padding and implicit padding are supported.
408 * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input. 554 *
409 * 1: An INT32 value, specifying the padding on the left, in the ‘width’ dimension. 555 * Inputs (explicit padding):
410 * 2: An INT32 value, specifying the padding on the right,in the ‘width’ dimension. 556 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input.
411 * 3: An INT32 value, specifying the padding on the top, in the ‘height’ dimension. 557 * * 1: An INT32 value, specifying the padding on the left, in the ‘width’ dimension.
412 * 4: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension. 558 * * 2: An INT32 value, specifying the padding on the right,in the ‘width’ dimension.
413 * 5: An INT32 value, specifying the output stride in the ‘width’ dimension. 559 * * 3: An INT32 value, specifying the padding on the top, in the ‘height’ dimension.
414 * 6: An INT32 value, specifying the output stride in the ‘height’ dimension. 560 * * 4: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension.
415 * 7: An INT32 value, specifying the filter width. 561 * * 5: An INT32 value, specifying the stride when walking through input
416 * 8: An INT32 value, specifying the filter height. 562 * in the ‘width’ dimension.
417 * 9: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. 563 * * 6: An INT32 value, specifying the stride when walking through input
418 * Specifies the activation to invoke on the result of each addition. 564 * in the ‘height’ dimension.
419 * 565 * * 7: An INT32 value, specifying the filter width.
420 * Ouputs: 566 * * 8: An INT32 value, specifying the filter height.
421 * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth]. 567 * * 9: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
568 * Specifies the activation to invoke on the result of each addition.
569 *
570 * Inputs (implicit padding):
571 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input.
572 * * 1: An INT32 value, specifying the implicit padding scheme, has to be one of the
573 * following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
574 * * 2: An INT32 value, specifying the stride when walking through input
575 * in the ‘width’ dimension.
576 * * 3: An INT32 value, specifying the stride when walking through input
577 * in the ‘height’ dimension.
578 * * 4: An INT32 value, specifying the filter width.
579 * * 5: An INT32 value, specifying the filter height.
580 * * 6: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
581 * Specifies the activation to invoke on the result of each addition.
582 *
583 * Outputs:
584 * * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth].
422 */ 585 */
423 L2_POOL_2D = 12, 586 L2_POOL_2D = 12,
424 587
@@ -429,41 +592,49 @@ enum OperationType : int32_t {
429 * dimension), and each vector is normalized independently. Within a given vector, 592 * dimension), and each vector is normalized independently. Within a given vector,
430 * each component is divided by the weighted, squared sum of inputs within depth_radius. 593 * each component is divided by the weighted, squared sum of inputs within depth_radius.
431 * 594 *
432 * In details: 595 * The output is calculated using this formula:
596 *
433 * sqr_sum[a, b, c, d] = 597 * sqr_sum[a, b, c, d] =
434 * sum(pow(input[a, b, c, d - depth_radius : d + depth_radius + 1], 2) 598 * sum(pow(input[a, b, c, d - depth_radius : d + depth_radius + 1], 2)
435 * output = input / pow((bias + alpha * sqr_sum), beta) 599 * output = input / pow((bias + alpha * sqr_sum), beta)
436 * 600 *
437 * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} 601 * Supported tensor types:
602 * * {@link OperandType::TENSOR_FLOAT32}
603 *
438 * Supported tensor rank: 4, with "NHWC" data layout. 604 * Supported tensor rank: 4, with "NHWC" data layout.
439 * 605 *
440 * Inputs: 606 * Inputs:
441 * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input. 607 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input.
442 * 1: An INT32 value, specifying the radius of the normalization window. 608 * * 1: An INT32 value, specifying the radius of the normalization window.
443 * 2: A FLOAT32 value, specifying the bias, must not be zero. 609 * * 2: A FLOAT32 value, specifying the bias, must not be zero.
444 * 3: A FLOAT32 value, specifying the scale factor, alpha. 610 * * 3: A FLOAT32 value, specifying the scale factor, alpha.
445 * 4: A FLOAT32 value, specifying the exponent, beta. 611 * * 4: A FLOAT32 value, specifying the exponent, beta.
446 * 612 *
447 * Ouputs: 613 * Outputs:
448 * 0: The output tensor of same shape as input0. 614 * * 0: The output tensor of same shape as input0.
449 */ 615 */
450 LOCAL_RESPONSE_NORMALIZATION = 13, 616 LOCAL_RESPONSE_NORMALIZATION = 13,
451 617
452 /** 618 /**
453 * Computes sigmoid activation on the input tensor element-wise. 619 * Computes sigmoid activation on the input tensor element-wise.
454 * 620 *
455 * In details: 621 * The output is calculated using this formula:
622 *
456 * output = 1 / (1 + exp(-input)) 623 * output = 1 / (1 + exp(-input))
457 * 624 *
458 * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} 625 * Supported tensor types:
459 * {@link OperandType::TENSOR_QUANT8_ASYMM} 626 * * {@link OperandType::TENSOR_FLOAT32}
627 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
628 *
460 * Supported tensor rank: up to 4. 629 * Supported tensor rank: up to 4.
461 * 630 *
462 * Inputs: 631 * Inputs:
463 * 0: A tensor, specifying the input. 632 * * 0: A tensor, specifying the input.
464 * 633 *
465 * Ouputs: 634 * Outputs:
466 * 0: The output tensor of same shape as input0. 635 * * 0: The output tensor of same shape as input0.
636 * For {@link OperandType::TENSOR_QUANT8_ASYMM} type,
637 * the scale must be 1.f / 256 and the zeroPoint must be 0.
467 */ 638 */
468 LOGISTIC = 14, 639 LOGISTIC = 14,
469 640
@@ -502,102 +673,165 @@ enum OperationType : int32_t {
502 LSH_PROJECTION = 15, 673 LSH_PROJECTION = 15,
503 674
504 /** 675 /**
505 * Long short-term memory unit (LSTM) recurrent network layer. 676 * Performs a single time step in a Long Short-Term Memory (LSTM) layer
506 * 677 *
507 * The default non-peephole implementation is based on: 678 * The LSTM operation is described by the following equations.
508 * http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf 679 *
680 * \f{eqnarray*}{
681 * i_t =& \sigma(W_{xi}x_t+W_{hi}h_{t-1}+W_{ci}C_{t-1}+b_i) & \\
682 * f_t =& \sigma(W_{xf}x_t+W_{hf}h_{t-1}+W_{cf}C_{t-1}+b_f) & \\
683 * C_t =& clip(f_t \odot C_{t-1} + i_t \odot g(W_{xc}x_t+W_{hc}h_{t-1}+b_c),\ t_{cell})& \\
684 * o_t =& \sigma(W_{xo}x_t+W_{ho}h_{t-1}+W_{co}C_t+b_o)& \\
685 * & clip(W_{proj}(o_t \odot g(C_t))+b_{proj},\ t_{proj}) & if\ there\ is\ a\ projection; \\
686 * h_t =& & \\
687 * & o_t \odot g(C_t) & otherwise. \\
688 * \f}
689 * Where:
690 * * \f$x_t\f$ is the input,
691 * * \f$i_t\f$ is the input gate,
692 * * \f$f_t\f$ is the forget gate,
693 * * \f$C_t\f$ is the cell state,
694 * * \f$o_t\f$ is the output,
695 * * \f$h_t\f$ is the output state,
696 * * \f$\sigma\f$ is the logistic sigmoid function,
697 * * \f$g\f$ is the cell input and cell output activation function, usually \f$tahn\f$,
698 * * \f$W_{xi}\f$ is the input-to-input weight matrix,
699 * * \f$W_{hi}\f$ is the recurrent to input weight matrix,
700 * * \f$W_{ci}\f$ is the cell-to-input weight matrix,
701 * * \f$b_i\f$ is the input gate bias,
702 * * \f$W_{xf}\f$ is the input-to-forget weight matrix,
703 * * \f$W_{hf}\f$ is the recurrent-to-forget weight matrix,
704 * * \f$W_{cf}\f$ is the cell-to-forget weight matrix,
705 * * \f$b_f\f$ is the forget gate bias,
706 * * \f$W_{xc}\f$ is the input-to-cell weight matrix,
707 * * \f$W_{hc}\f$ is the recurrent-to-cell weight matrix,
708 * * \f$b_c\f$ is the cell bias,
709 * * \f$W_{xo}\f$ is the input-to-output weight matrix,
710 * * \f$W_{ho}\f$ is the recurrent-to-output weight matrix,
711 * * \f$W_{co}\f$ is the cell-to-output weight matrix,
712 * * \f$b_o\f$ is the output gate bias,
713 * * \f$W_{proj}\f$ is the projection weight matrix,
714 * * \f$b_{proj}\f$ is the projection bias,
715 * * \f$t_{cell}\f$ is the threshold for clipping the cell state, and
716 * * \f$t_{proj}\f$ is the threshold for clipping the projected output.
717 * * \f$\odot\f$ is the <a href="https://en.wikipedia.org/wiki/Hadamard_product_(matrices)">
718 * Hadamard product</a> that takes two matrices and produces another
719 * matrix, each element of which is the product of the corresponding
720 * elements of the input matrices.
721 *
722 * The operation has the following independently optional inputs:
723 * * The input-to-input weights (\f$W_{xi}\f$), recurrent-to-input weights (\f$W_{hi}\f$),
724 * cell-to-input (\f$W_{ci}\f$) weights, and input gate bias (\f$b_i\f$) either all have values,
725 * or none of them have values (i.e., all set to null). If they have no
726 * values, coupling of input and forget gates (CIFG) is used, in which case
727 * the input gate (\f$i_t\f$) is calculated using the following equation instead.
728 * \f{eqnarray*}{
729 * i_t = 1 - f_t
730 * \f}
731 * * The cell-to-input weights (\f$W_{ci}\f$), cell-to-forget weights (\f$W_{cf}\f$), and cell-to-output
732 * weights (\f$W_{co}\f$) either all have values or none of them have values.
733 * If they have values, the peephole optimization is used.
734 * * The projection weights (\f$W_{proj}\f$) is required only for the recurrent projection
735 * layer, and should otherwise have no value.
736 * * The projection bias (\f$b_{proj}\f$) may (but not required to) have a value if the
737 * recurrent projection layer exists, and should otherwise have no value.
738 *
739 * References:
740 *
741 * The default non-peephole non-CIFG implementation is based on:
742 * http://www.bioinf.jku.at/publications/older/2604.pdf
509 * S. Hochreiter and J. Schmidhuber. "Long Short-Term Memory". Neural 743 * S. Hochreiter and J. Schmidhuber. "Long Short-Term Memory". Neural
510 * Computation, 9(8):1735-1780, 1997. 744 * Computation, 9(8):1735-1780, 1997.
511 * 745 *
512 * The peephole implementation is based on: 746 * The peephole implementation and projection layer is based on:
513 * https://research.google.com/pubs/archive/43905.pdf 747 * https://research.google.com/pubs/archive/43905.pdf
514 * Hasim Sak, Andrew Senior, and Francoise Beaufays. "Long short-term memory 748 * Hasim Sak, Andrew Senior, and Francoise Beaufays. "Long short-term memory
515 * recurrent neural network architectures for large scale acoustic modeling." 749 * recurrent neural network architectures for large scale acoustic modeling."
516 * INTERSPEECH, 2014. 750 * INTERSPEECH, 2014.
751 * (However, the concept of peephole optimization was introduced in work
752 * prior to this paper.)
517 * 753 *
518 * The coupling of input and forget gate (CIFG) is based on: 754 * The coupling of input and forget gate (CIFG) is based on:
519 * http://arxiv.org/pdf/1503.04069.pdf 755 * http://arxiv.org/pdf/1503.04069.pdf
520 * Greff et al. "LSTM: A Search Space Odyssey" 756 * Greff et al. "LSTM: A Search Space Odyssey"
521 * 757 *
522 * The class has the following independently optional inputs: 758 * Supported tensor types (type T):
523 * * If input gate (if CIFG): “input_to_forget_weights”,
524 * “recurrent_to_input_weights”, “cell_to_input_weights”, “input_gate_bias”.
525 * * If no peephole connections: “cell_to_input_weights”,
526 * “cell_to_forget_weights”, “cell_to_output_weights”.
527 * * If no projection layer: “projection_weights” and “projection_bias”.
528 * * If no projection bias: “projection_bias”.
529 *
530 * Supported tensor types:
531 * * {@link OperandType::TENSOR_FLOAT32} 759 * * {@link OperandType::TENSOR_FLOAT32}
532 * 760 *
533 * Inputs: 761 * Inputs:
534 * * 0: Input. 762 * * 0: The input (\f$x_t\f$).
535 * A 2-D tensor of type T, of shape [batch_size, input_size], where 763 * A 2-D tensor of type T, of shape [batch_size, input_size], where
536 * “batch_size” corresponds to the batching dimension, and “input_size” 764 * “batch_size” corresponds to the batching dimension, and “input_size”
537 * is the size of the input. 765 * is the size of the input.
538 * * 1: input_to_input_weights. 766 * * 1: The input-to-input weights (\f$W_{xi}\f$). Optional.
539 * A 2-D tensor of type T, of shape [num_units, input_size], where 767 * A 2-D tensor of type T, of shape [num_units, input_size], where
540 * “num_units” corresponds to the number of cell units. 768 * “num_units” corresponds to the number of cell units.
541 * * 2: input_to_forget_weights. 769 * * 2: The input-to-forget weights (\f$W_{xf}\f$).
542 * A 2-D tensor of type T, of shape [num_units, input_size]. 770 * A 2-D tensor of type T, of shape [num_units, input_size].
543 * * 3: input_to_cell_weights. 771 * * 3: The input-to-cell weights (\f$W_{xc}\f$).
544 * A 2-D tensor of type T, of shape [num_units, input_size]. 772 * A 2-D tensor of type T, of shape [num_units, input_size].
545 * * 4: input_to_output_weights. 773 * * 4: The input-to-output weights (\f$W_{xo}\f$).
546 * A 2-D tensor of type T, of shape [num_units, input_size]. 774 * A 2-D tensor of type T, of shape [num_units, input_size].
547 * * 5: recurrent_to_input_weights. 775 * * 5: The recurrent-to-input weights (\f$W_{hi}\f$). Optional.
548 * A 2-D tensor of type T, of shape [num_units, output_size], where 776 * A 2-D tensor of type T, of shape [num_units, output_size], where
549 * “output_size” corresponds to either the number of cell units (i.e., 777 * “output_size” corresponds to either the number of cell units (i.e.,
550 * “num_units”), or the second dimension of the “projection_weights”, if 778 * “num_units”), or the second dimension of the “projection_weights”, if
551 * defined. 779 * defined.
552 * * 6: recurrent_to_forget_weights. 780 * * 6: The recurrent-to-forget weights (\f$W_{hf}\f$).
553 * A 2-D tensor of type T, of shape [num_units, output_size]. 781 * A 2-D tensor of type T, of shape [num_units, output_size].
554 * * 7: recurrent_to_cell_weights. 782 * * 7: The recurrent-to-cell weights (\f$W_{hc}\f$).
555 * A 2-D tensor of type T, of shape [num_units, output_size]. 783 * A 2-D tensor of type T, of shape [num_units, output_size].
556 * * 8: recurrent_to_output_weights. 784 * * 8: The recurrent-to-output weights (\f$W_{ho}\f$).
557 * A 2-D tensor of type T, of shape [num_units, output_size]. 785 * A 2-D tensor of type T, of shape [num_units, output_size].
558 * * 9: cell_to_input_weights. 786 * * 9: The cell-to-input weights (\f$W_{ci}\f$). Optional.
559 * A 1-D tensor of type T, of shape [num_units]. 787 * A 1-D tensor of type T, of shape [num_units].
560 * * 10:cell_to_forget_weights. 788 * * 10:The cell-to-forget weights (\f$W_{cf}\f$). Optional.
561 * A 1-D tensor of type T, of shape [num_units]. 789 * A 1-D tensor of type T, of shape [num_units].
562 * * 11:cell_to_output_weights. 790 * * 11:The cell-to-output weights (\f$W_{co}\f$). Optional.
563 * A 1-D tensor of type T, of shape [num_units]. 791 * A 1-D tensor of type T, of shape [num_units].
564 * * 12:input_gate_bias. 792 * * 12:The input gate bias (\f$b_i\f$). Optional.
565 * A 1-D tensor of type T, of shape [num_units]. 793 * A 1-D tensor of type T, of shape [num_units].
566 * * 13:forget_gate_bias. 794 * * 13:The forget gate bias (\f$b_f\f$).
567 * A 1-D tensor of type T, of shape [num_units]. 795 * A 1-D tensor of type T, of shape [num_units].
568 * * 14:cell_bias. 796 * * 14:The cell bias (\f$b_c\f$).
569 * A 1-D tensor of type T, of shape [num_units]. 797 * A 1-D tensor of type T, of shape [num_units].
570 * * 15:output_gate_bias. 798 * * 15:The output gate bias (\f$b_o\f$).
571 * A 1-D tensor of type T, of shape [num_units]. 799 * A 1-D tensor of type T, of shape [num_units].
572 * * 16:projection_weights. 800 * * 16:The projection weights (\f$W_{proj}\f$). Optional.
573 * A 2-D tensor of type T, of shape [output_size, num_units]. 801 * A 2-D tensor of type T, of shape [output_size, num_units].
574 * * 17:projection_bias. 802 * * 17:The projection bias (\f$b_{proj}\f$). Optional.
575 * A 1-D tensor of type T, of shape [output_size]. 803 * A 1-D tensor of type T, of shape [output_size].
576 * 804 * * 18:The output state (in) (\f$h_{t-1}\f$).
577 * Parameters: 805 * A 2-D tensor of type T, of shape [batch_size, output_size].
578 * * 18:fused_activation_function. 806 * * 19:The cell state (in) (\f$C_{t-1}\f$).
579 * An (optional) ActivationFunctionType indicating the activation 807 * A 2-D tensor of type T, of shape [batch_size, num_units].
580 * function. 808 * * 20:The activation function (\f$g\f$).
581 * If “NONE” is specified then it results in a linear activation. 809 * A value indicating the activation function:
582 * * 19:cell_clip. 810 * <ul>
583 * A clipping threshold for the cell state, such that values are bound 811 * <li>0: None;
812 * <li>1: Relu;
813 * <li>3: Relu6;
814 * <li>4: Tanh;
815 * <li>6: Sigmoid.
816 * </ul>
817 * * 21:The clipping threshold (\f$t_{cell}\f$) for the cell state, such that values are bound
584 * within [-cell_clip, cell_clip]. If set to 0.0 then clipping is 818 * within [-cell_clip, cell_clip]. If set to 0.0 then clipping is
585 * disabled. 819 * disabled.
586 * * 20:proj_clip. 820 * * 22:The clipping threshold (\f$t_{proj}\f$) for the output from the projection layer, such
587 * A clipping threshold for the output from the projection layer, such
588 * that values are bound within [-proj_clip, proj_clip]. If set to 0.0 821 * that values are bound within [-proj_clip, proj_clip]. If set to 0.0
589 * then clipping is disabled. 822 * then clipping is disabled.
590 * 823 *
591 * Outputs: 824 * Outputs:
592 * * 0: scratch_buffer. 825 * * 0: The scratch buffer.
593 * A 3-D tensor of type T, of shape [batch_size, num_cell, 4]. 826 * A 2-D tensor of type T, of shape [batch_size, num_units * 4] with
594 * * 1: output_state. 827 * CIFG, or [batch_size, num_units * 3] without CIFG.
828 * * 1: The output state (out) (\f$h_t\f$).
595 * A 2-D tensor of type T, of shape [batch_size, output_size]. 829 * A 2-D tensor of type T, of shape [batch_size, output_size].
596 * * 2: cell_state. 830 * * 2: The cell state (out) (\f$C_t\f$).
597 * A 2-D tensor of type T, of shape [batch_size, num_units]. 831 * A 2-D tensor of type T, of shape [batch_size, num_units].
598 * * 3: output. 832 * * 3: The output (\f$o_t\f$).
599 * A 2-D tensor of type T, of shape [batch_size, output_size]. This is 833 * A 2-D tensor of type T, of shape [batch_size, output_size]. This is
600 * effectively the same as the current “output_state” value. 834 * effectively the same as the current “output state (out)” value.
601 */ 835 */
602 LSTM = 16, 836 LSTM = 16,
603 837
@@ -606,36 +840,56 @@ enum OperationType : int32_t {
606 * 840 *
607 * The output dimensions are functions of the filter dimensions, stride, and padding. 841 * The output dimensions are functions of the filter dimensions, stride, and padding.
608 * 842 *
609 * The values in output Tensor is computed as: 843 * The values in the output tensor are computed as:
844 *
610 * output[batch, row, col, channel] = 845 * output[batch, row, col, channel] =
611 * max_{i, j} (input[batch, row + i, col + j, channel]) 846 * max_{i, j} (input[batch, row + i, col + j, channel])
612 * 847 *
613 * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} 848 * Supported tensor types:
614 * {@link OperandType::TENSOR_QUANT8_ASYMM} 849 * * {@link OperandType::TENSOR_FLOAT32}
850 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
851 *
615 * Supported tensor rank: 4, with "NHWC" data layout. 852 * Supported tensor rank: 4, with "NHWC" data layout.
616 * 853 *
617 * Inputs: 854 * Both explicit padding and implicit padding are supported.
618 * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input. 855 *
619 * 1: An INT32 value, specifying the padding on the left, in the ‘width’ dimension. 856 * Inputs (explicit padding):
620 * 2: An INT32 value, specifying the padding on the right,in the ‘width’ dimension. 857 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input.
621 * 3: An INT32 value, specifying the padding on the top, in the ‘height’ dimension. 858 * * 1: An INT32 value, specifying the padding on the left, in the ‘width’ dimension.
622 * 4: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension. 859 * * 2: An INT32 value, specifying the padding on the right,in the ‘width’ dimension.
623 * 5: An INT32 value, specifying the output stride in the ‘width’ dimension. 860 * * 3: An INT32 value, specifying the padding on the top, in the ‘height’ dimension.
624 * 6: An INT32 value, specifying the output stride in the ‘height’ dimension. 861 * * 4: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension.
625 * 7: An INT32 value, specifying the filter width. 862 * * 5: An INT32 value, specifying the stride when walking through input
626 * 8: An INT32 value, specifying the filter height. 863 * in the ‘width’ dimension.
627 * 9: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. 864 * * 6: An INT32 value, specifying the stride when walking through input
628 * Specifies the activation to invoke on the result of each addition. 865 * in the ‘height’ dimension.
629 * 866 * * 7: An INT32 value, specifying the filter width.
630 * Ouputs: 867 * * 8: An INT32 value, specifying the filter height.
631 * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth]. 868 * * 9: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
869 * Specifies the activation to invoke on the result of each addition.
870 *
871 * Inputs (implicit padding):
872 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input.
873 * * 1: An INT32 value, specifying the implicit padding scheme, has to be one of the
874 * following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
875 * * 2: An INT32 value, specifying the stride when walking through input
876 * in the ‘width’ dimension.
877 * * 3: An INT32 value, specifying the stride when walking through input
878 * in the ‘height’ dimension.
879 * * 4: An INT32 value, specifying the filter width.
880 * * 5: An INT32 value, specifying the filter height.
881 * * 6: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
882 * Specifies the activation to invoke on the result of each addition.
883 *
884 * Outputs:
885 * * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth].
632 */ 886 */
633 MAX_POOL_2D = 17, 887 MAX_POOL_2D = 17,
634 888
635 /** 889 /**
636 * Multiplies two tensors, elment-wise. 890 * Multiplies two tensors, element-wise.
637 * 891 *
638 * Takes two input tensors of identical type and compatible dimensions. The output 892 * Takes two input tensors of identical type and compatible dimensions. The output
639 * is the product of both input tensors, optionally modified by an activation function. 893 * is the product of both input tensors, optionally modified by an activation function.
640 * 894 *
641 * Two dimensions are compatible when: 895 * Two dimensions are compatible when:
@@ -645,72 +899,85 @@ enum OperationType : int32_t {
645 * The size of the resulting output is the maximum size along each dimension of the 899 * The size of the resulting output is the maximum size along each dimension of the
646 * input operands. It starts with the trailing dimensions, and works its way forward. 900 * input operands. It starts with the trailing dimensions, and works its way forward.
647 * 901 *
648 * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} 902 * Supported tensor types:
649 * {@link OperandType::TENSOR_QUANT8_ASYMM} 903 * * {@link OperandType::TENSOR_FLOAT32}
904 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
905 *
650 * Supported tensor rank: up to 4 906 * Supported tensor rank: up to 4
651 * 907 *
652 * Inputs: 908 * Inputs:
653 * 0: A tensor. 909 * * 0: A tensor.
654 * 1: A tensor of the same type, and compatible dimensions as input0. 910 * * 1: A tensor of the same type, and compatible dimensions as input0.
655 * 2: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. 911 * * 2: An INT32 value, and has to be one of the {@link FusedActivationFunc} values.
656 * Specifies the activation to invoke on the result of each addition. 912 * Specifies the activation to invoke on the result of each addition.
657 * 913 *
658 * Ouputs: 914 * Outputs:
659 * 0: The product, a tensor of the same type as input0. 915 * * 0: The product, a tensor of the same type as input0.
916 * For output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the following
917 * condition must be satisfied: output_scale > input1_scale * input2_scale.
660 */ 918 */
661 MUL = 18, 919 MUL = 18,
662 920
663 /** 921 /**
664 * Computes rectified linear activation on the input tensor element-wise. 922 * Computes rectified linear activation on the input tensor element-wise.
665 * 923 *
666 * In details: 924 * The output is calculated using this formula:
925 *
667 * output = max(0, input) 926 * output = max(0, input)
668 * 927 *
669 * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} 928 * Supported tensor types:
670 * {@link OperandType::TENSOR_QUANT8_ASYMM} 929 * * {@link OperandType::TENSOR_FLOAT32}
930 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
931 *
671 * Supported tensor rank: up to 4. 932 * Supported tensor rank: up to 4.
672 * 933 *
673 * Inputs: 934 * Inputs:
674 * 0: A tensor, specifying the input. 935 * * 0: A tensor, specifying the input.
675 * 936 *
676 * Ouputs: 937 * Outputs:
677 * 0: The output tensor of same shape as input0. 938 * * 0: The output tensor of same shape as input0.
678 */ 939 */
679 RELU = 19, 940 RELU = 19,
680 941
681 /** 942 /**
682 * Computes rectified linear 1 activation on the input tensor element-wise. 943 * Computes rectified linear 1 activation on the input tensor element-wise.
683 * 944 *
684 * In details: 945 * The output is calculated using this formula:
946 *
685 * output = min(1.f, max(-1.f, input)) 947 * output = min(1.f, max(-1.f, input))
686 * 948 *
687 * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} 949 * Supported tensor types:
688 * {@link OperandType::TENSOR_QUANT8_ASYMM} 950 * * {@link OperandType::TENSOR_FLOAT32}
951 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
952 *
689 * Supported tensor rank: up to 4. 953 * Supported tensor rank: up to 4.
690 * 954 *
691 * Inputs: 955 * Inputs:
692 * 0: A tensor, specifying the input. 956 * * 0: A tensor, specifying the input.
693 * 957 *
694 * Ouputs: 958 * Outputs:
695 * 0: The output tensor of same shape as input0. 959 * * 0: The output tensor of same shape as input0.
696 */ 960 */
697 RELU1 = 20, 961 RELU1 = 20,
698 962
699 /** 963 /**
700 * Computes rectified linear 6 activation on the input tensor element-wise. 964 * Computes rectified linear 6 activation on the input tensor element-wise.
701 * 965 *
702 * In details: 966 * The output is calculated using this formula:
967 *
703 * output = min(6, max(0, input)) 968 * output = min(6, max(0, input))
704 * 969 *
705 * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} 970 * Supported tensor types:
706 * {@link OperandType::TENSOR_QUANT8_ASYMM} 971 * * {@link OperandType::TENSOR_FLOAT32}
972 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
973 *
707 * Supported tensor rank: up to 4. 974 * Supported tensor rank: up to 4.
708 * 975 *
709 * Inputs: 976 * Inputs:
710 * 0: A tensor, specifying the input. 977 * * 0: A tensor, specifying the input.
711 * 978 *
712 * Ouputs: 979 * Outputs:
713 * 0: The output tensor of same shape as input0. 980 * * 0: The output tensor of same shape as input0.
714 */ 981 */
715 RELU6 = 21, 982 RELU6 = 21,
716 983
@@ -720,36 +987,41 @@ enum OperationType : int32_t {
720 * Given tensor, this operation returns a tensor that has the same values as tensor, 987 * Given tensor, this operation returns a tensor that has the same values as tensor,
721 * but with a newly specified shape. 988 * but with a newly specified shape.
722 * 989 *
723 * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} 990 * Supported tensor types:
724 * {@link OperandType::TENSOR_QUANT8_ASYMM} 991 * * {@link OperandType::TENSOR_FLOAT32}
992 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
993 *
725 * Supported tensor rank: up to 4. 994 * Supported tensor rank: up to 4.
726 * 995 *
727 * Inputs: 996 * Inputs:
728 * 0: A tensor, specifying the tensor to be reshaped. 997 * * 0: A tensor, specifying the tensor to be reshaped.
729 * 1: A 1-D tensor of type {@link OperandType::TENSOR_INT32}, defining the shape 998 * * 1: A 1-D tensor of type {@link OperandType::TENSOR_INT32}, defining the shape
730 * of the output tensor. The number of elements implied by shape must be the same 999 * of the output tensor. The number of elements implied by shape must be the same
731 * as the number of elements in the input tensor. 1000 * as the number of elements in the input tensor.
732 * 1001 *
733 * Ouputs: 1002 * Outputs:
734 * 0: The output tensor, of shape specified by the input shape. 1003 * * 0: The output tensor, of shape specified by the input shape.
735 */ 1004 */
736 RESHAPE = 22, 1005 RESHAPE = 22,
737 1006
738 /** 1007 /**
739 * Resizes images to given size using the bilinear interpretation. 1008 * Resizes images to given size using the bilinear interpretation.
740 * 1009 *
741 * Resized images will be distorted if their original aspect ratio is not the same as input. 1010 * Resized images must be distorted if their output aspect ratio is not the same as
1011 * input aspect ratio.
1012 *
1013 * Supported tensor types:
1014 * * {@link OperandType::TENSOR_FLOAT32}
742 * 1015 *
743 * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
744 * Supported tensor rank: 4, with "NHWC" data layout. 1016 * Supported tensor rank: 4, with "NHWC" data layout.
745 * 1017 *
746 * Inputs: 1018 * Inputs:
747 * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input. 1019 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input.
748 * 1: An INT32 value, specifying the output width of the output tensor. 1020 * * 1: An INT32 value, specifying the output height of the output tensor.
749 * 2: An INT32 value, specifying the output height of the output tensor. 1021 * * 2: An INT32 value, specifying the output width of the output tensor.
750 * 1022 *
751 * Ouputs: 1023 * Outputs:
752 * 0: The output 4-D tensor, of shape [batches, new_height, new_width, depth]. 1024 * * 0: The output 4-D tensor, of shape [batches, new_height, new_width, depth].
753 */ 1025 */
754 RESIZE_BILINEAR = 23, 1026 RESIZE_BILINEAR = 23,
755 1027
@@ -768,7 +1040,7 @@ enum OperationType : int32_t {
768 * * “activation” is the function passed as the “fused_activation_function” 1040 * * “activation” is the function passed as the “fused_activation_function”
769 * argument (if not “NONE”). 1041 * argument (if not “NONE”).
770 * 1042 *
771 * Supported tensor types: 1043 * Supported tensor types (Type T):
772 * * {@link OperandType::TENSOR_FLOAT32} 1044 * * {@link OperandType::TENSOR_FLOAT32}
773 * 1045 *
774 * Inputs: 1046 * Inputs:
@@ -784,21 +1056,18 @@ enum OperationType : int32_t {
784 * corresponding to the weights from each unit. 1056 * corresponding to the weights from each unit.
785 * * 3: bias. 1057 * * 3: bias.
786 * A 1-D tensor of type T, of shape [num_units]. 1058 * A 1-D tensor of type T, of shape [num_units].
787 * 1059 * * 4: hidden state (in).
788 * For FLOAT32 input tensor, bias must also be FLOAT32. 1060 * A 2-D tensor of type T, of shape [batch_size, num_units].
789 * For UINT8 input tensor, bias must be INT32. 1061 * * 5: fused_activation_function.
790 * 1062 * An optional {@link FusedActivationFunc} value indicating the activation
791 * Parameters
792 * * 4: fused_activation_function.
793 * An (optional) ActivationFunctionType indicating the activation
794 * function. If “NONE” is specified then it results in a linear 1063 * function. If “NONE” is specified then it results in a linear
795 * activation. 1064 * activation.
796 * 1065 *
797 * * 5: Hidden state. 1066 * Outputs:
1067 * * 0: hidden state (out).
798 * A 2-D tensor of type T, of shape [batch_size, num_units]. 1068 * A 2-D tensor of type T, of shape [batch_size, num_units].
799 * 1069 *
800 * Outputs: 1070 * * 1: output.
801 * * 0: output.
802 * A 2-D tensor of type T, of shape [batch_size, num_units]. This is 1071 * A 2-D tensor of type T, of shape [batch_size, num_units]. This is
803 * effectively the same as the current state value. 1072 * effectively the same as the current state value.
804 */ 1073 */
@@ -808,21 +1077,26 @@ enum OperationType : int32_t {
808 * Computes the softmax activation on the input tensor element-wise, per batch, by 1077 * Computes the softmax activation on the input tensor element-wise, per batch, by
809 * normalizing the input vector so the maximum coefficient is zero. 1078 * normalizing the input vector so the maximum coefficient is zero.
810 * 1079 *
811 * In details: 1080 * The output is calculated using this formula:
1081 *
812 * output[batch, i] = 1082 * output[batch, i] =
813 * exp((input[batch, i] - max(input[batch, :])) * beta) / 1083 * exp((input[batch, i] - max(input[batch, :])) * beta) /
814 * sum_{k}{exp((input[batch, k] - max(input[batch, :])) * beta)} 1084 * sum_{k}{exp((input[batch, k] - max(input[batch, :])) * beta)}
815 * 1085 *
816 * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} 1086 * Supported tensor types:
817 * {@link OperandType::TENSOR_QUANT8_ASYMM} 1087 * * {@link OperandType::TENSOR_FLOAT32}
1088 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
1089 *
818 * Supported tensor rank: 2 or 4. 1090 * Supported tensor rank: 2 or 4.
819 * 1091 *
820 * Inputs: 1092 * Inputs:
821 * 0: A 2-D or 4-D tensor, specifying the tensor to be reshaped. 1093 * * 0: A 2-D or 4-D tensor, specifying the tensor to be reshaped.
822 * 1: A FLOAT32 value, specifying the scaling factor for the exponent, beta. 1094 * * 1: A FLOAT32 value, specifying the positive scaling factor for the exponent, beta.
823 * 1095 *
824 * Ouputs: 1096 * Outputs:
825 * 0: The output tensor of same shape as input0. 1097 * * 0: The output tensor of same shape as input0.
1098 * For {@link OperandType::TENSOR_QUANT8_ASYMM} type,
1099 * the scale must be 1.f / 256 and the zeroPoint must be 0.
826 */ 1100 */
827 SOFTMAX = 25, 1101 SOFTMAX = 25,
828 1102
@@ -839,18 +1113,20 @@ enum OperationType : int32_t {
839 * The depth of the output tensor is input_depth * block_size * block_size. 1113 * The depth of the output tensor is input_depth * block_size * block_size.
840 * The input tensor's height and width must be divisible by block_size. 1114 * The input tensor's height and width must be divisible by block_size.
841 * 1115 *
842 * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} 1116 * Supported tensor types:
843 * {@link OperandType::TENSOR_QUANT8_ASYMM} 1117 * * {@link OperandType::TENSOR_FLOAT32}
1118 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
1119 *
844 * Supported tensor rank: 4, with "NHWC" data layout. 1120 * Supported tensor rank: 4, with "NHWC" data layout.
845 * 1121 *
846 * Inputs: 1122 * Inputs:
847 * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input. 1123 * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input.
848 * 1: An INT32 value, specifying the block_size. block_size must be >=1 and 1124 * * 1: An INT32 value, specifying the block_size. block_size must be >=1 and
849 * block_size must be a divisor of both the input height and width. 1125 * block_size must be a divisor of both the input height and width.
850 * 1126 *
851 * Ouputs: 1127 * Outputs:
852 * 0: The output 4-D tensor, of shape [batch, height/block_size, width/block_size, 1128 * * 0: The output 4-D tensor, of shape [batch, height/block_size, width/block_size,
853 * depth*block_size*block_size]. 1129 * depth*block_size*block_size].
854 */ 1130 */
855 SPACE_TO_DEPTH = 26, 1131 SPACE_TO_DEPTH = 26,
856 1132
@@ -874,8 +1150,8 @@ enum OperationType : int32_t {
874 * 1150 *
875 * Specifically, for rank 1, this layer implements the operation: 1151 * Specifically, for rank 1, this layer implements the operation:
876 * 1152 *
877 * memory = push(conv1d(inputs, weights_feature, feature_dim, "VALID")); 1153 * memory = push(conv1d(inputs, weights_feature, feature_dim, "PADDING_VALID"));
878 * outputs = activation(memory * weights_time + bias); 1154 * outputs = activation(memory * weights_time + bias);
879 * 1155 *
880 * Where: 1156 * Where:
881 * * “weights_feature” is a weights matrix that processes the inputs (by 1157 * * “weights_feature” is a weights matrix that processes the inputs (by
@@ -892,7 +1168,7 @@ enum OperationType : int32_t {
892 * Each rank adds a dimension to the weights matrices by means of stacking 1168 * Each rank adds a dimension to the weights matrices by means of stacking
893 * the filters. 1169 * the filters.
894 * 1170 *
895 * Supported tensor types: 1171 * Supported tensor types (type T):
896 * * {@link OperandType::TENSOR_FLOAT32} 1172 * * {@link OperandType::TENSOR_FLOAT32}
897 * 1173 *
898 * Inputs: 1174 * Inputs:
@@ -907,20 +1183,17 @@ enum OperationType : int32_t {
907 * A 2-D tensor of type T, of shape [num_units, memory_size], where 1183 * A 2-D tensor of type T, of shape [num_units, memory_size], where
908 * “memory_size” corresponds to the fixed-size of the memory. 1184 * “memory_size” corresponds to the fixed-size of the memory.
909 * * 3: bias. 1185 * * 3: bias.
910 * A optional 1-D tensor of type T, of shape [num_units]. 1186 * An optional 1-D tensor of type T, of shape [num_units].
911 * 1187 * * 4: state (in).
912 * For FLOAT32 input tensor, bias must also be FLOAT32. 1188 * A 2-D tensor of type T, of shape [batch_size, (memory_size - 1) * num_units * rank].
913 * For UINT8 input tensor, bias must be INT32. 1189 * * 5: rank.
914 *
915 * Parameters:
916 * * 4: rank.
917 * The rank of the SVD approximation. 1190 * The rank of the SVD approximation.
918 * * 5: fused_activation_function. 1191 * * 6: fused_activation_function.
919 * An (optional) ActivationFunctionType indicating the activation function. 1192 * An optional {@link FusedActivationFunc} value indicating the activation function.
920 * If “NONE” is specified then it results in a linear activation. 1193 * If “NONE” is specified then it results in a linear activation.
921 * 1194 *
922 * Outputs: 1195 * Outputs:
923 * * 0: state. 1196 * * 0: state (out).
924 * A 2-D tensor of type T, of shape [batch_size, (memory_size - 1) * num_units * rank]. 1197 * A 2-D tensor of type T, of shape [batch_size, (memory_size - 1) * num_units * rank].
925 * * 1: output. 1198 * * 1: output.
926 * A 2-D tensor of type T, of shape [batch_size, num_units]. 1199 * A 2-D tensor of type T, of shape [batch_size, num_units].
@@ -930,17 +1203,20 @@ enum OperationType : int32_t {
930 /** 1203 /**
931 * Computes hyperbolic tangent of input tensor element-wise. 1204 * Computes hyperbolic tangent of input tensor element-wise.
932 * 1205 *
933 * In details: 1206 * The output is calculated using this formula:
1207 *
934 * output = tanh(input) 1208 * output = tanh(input)
935 * 1209 *
936 * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} 1210 * Supported tensor types:
1211 * * {@link OperandType::TENSOR_FLOAT32}
1212 *
937 * Supported tensor rank: up to 4. 1213 * Supported tensor rank: up to 4.
938 * 1214 *
939 * Inputs: 1215 * Inputs:
940 * 0: A tensor, specifying the input. 1216 * * 0: A tensor, specifying the input.
941 * 1217 *
942 * Ouputs: 1218 * Outputs:
943 * 0: The output tensor of same shape as input0. 1219 * * 0: The output tensor of same shape as input0.
944 */ 1220 */
945 TANH = 28, 1221 TANH = 28,
946 1222
@@ -967,8 +1243,8 @@ enum FusedActivationFunc : int32_t {
967 */ 1243 */
968enum OperandLifeTime : int32_t { 1244enum OperandLifeTime : int32_t {
969 /** 1245 /**
970 * The operand is internal to the model. It's created by an operation 1246 * The operand is internal to the model. It's created by an operation and
971 * and consumed by other operations. 1247 * consumed by other operations.
972 */ 1248 */
973 TEMPORARY_VARIABLE, 1249 TEMPORARY_VARIABLE,
974 1250
@@ -1081,7 +1357,11 @@ struct Operand {
1081 vec<uint32_t> dimensions; 1357 vec<uint32_t> dimensions;
1082 1358
1083 /** 1359 /**
1084 * The number of operations that use this operand as input. 1360 * The number of times this operand appears as an operation input.
1361 *
1362 * (For example, if this operand appears once in one operation's
1363 * input list, and three times in another operation's input list,
1364 * then numberOfConsumers = 4.)
1085 */ 1365 */
1086 uint32_t numberOfConsumers; 1366 uint32_t numberOfConsumers;
1087 1367
@@ -1108,7 +1388,7 @@ struct Operand {
1108 /** 1388 /**
1109 * Where to find the data for this operand. 1389 * Where to find the data for this operand.
1110 * If the lifetime is TEMPORARY_VARIABLE, MODEL_INPUT, MODEL_OUTPUT, or NO_VALUE: 1390 * If the lifetime is TEMPORARY_VARIABLE, MODEL_INPUT, MODEL_OUTPUT, or NO_VALUE:
1111 * - All the fields will be 0. 1391 * - All the fields must be 0.
1112 * If the lifetime is CONSTANT_COPY: 1392 * If the lifetime is CONSTANT_COPY:
1113 * - location.poolIndex is 0. 1393 * - location.poolIndex is 0.
1114 * - location.offset is the offset in bytes into Model.operandValues. 1394 * - location.offset is the offset in bytes into Model.operandValues.
@@ -1216,7 +1496,7 @@ struct RequestArgument {
1216 * Updated dimension information. 1496 * Updated dimension information.
1217 * 1497 *
1218 * If dimensions.size() > 0, dimension information was provided along with the 1498 * If dimensions.size() > 0, dimension information was provided along with the
1219 * argument. This can be the case for models that accept inputs of varying size. 1499 * argument. This can be the case for models that accept inputs of varying size.
1220 * This can't change the rank, just the value of the dimensions that were 1500 * This can't change the rank, just the value of the dimensions that were
1221 * unspecified in the model. 1501 * unspecified in the model.
1222 */ 1502 */
diff --git a/neuralnetworks/1.0/vts/functional/Callbacks.h b/neuralnetworks/1.0/vts/functional/Callbacks.h
index 2ac6130d..570a4fb7 100644
--- a/neuralnetworks/1.0/vts/functional/Callbacks.h
+++ b/neuralnetworks/1.0/vts/functional/Callbacks.h
@@ -30,10 +30,6 @@ namespace implementation {
30 * "notify". This "notify" call awakens any client threads waiting on the 30 * "notify". This "notify" call awakens any client threads waiting on the
31 * callback object. 31 * callback object.
32 * 32 *
33 * callback object. When the asynchronous task has finished its workload or has
34 * failed to launch, it must immediately call "notify", awakening any client
35 * threads waiting on the callback object.
36 *
37 * The CallbackBase class implements some of the base synchronization common to 33 * The CallbackBase class implements some of the base synchronization common to
38 * both PrepareModelCallback and ExecutionCallback. For consistency, any HIDL 34 * both PrepareModelCallback and ExecutionCallback. For consistency, any HIDL
39 * callback class must inherit from CallbackBase as well as the HIDL callback 35 * callback class must inherit from CallbackBase as well as the HIDL callback
diff --git a/neuralnetworks/1.1/IDevice.hal b/neuralnetworks/1.1/IDevice.hal
index ca225554..d2c48433 100644
--- a/neuralnetworks/1.1/IDevice.hal
+++ b/neuralnetworks/1.1/IDevice.hal
@@ -41,7 +41,7 @@ interface IDevice extends @1.0::IDevice {
41 /** 41 /**
42 * Gets the supported operations in a model. 42 * Gets the supported operations in a model.
43 * 43 *
44 * getSupportedSubgraph indicates which operations of a model are fully 44 * getSupportedOperations indicates which operations of a model are fully
45 * supported by the vendor driver. If an operation may not be supported for 45 * supported by the vendor driver. If an operation may not be supported for
46 * any reason, getSupportedOperations must return false for that operation. 46 * any reason, getSupportedOperations must return false for that operation.
47 * 47 *
diff --git a/neuralnetworks/1.1/types.hal b/neuralnetworks/1.1/types.hal
index 1d470d63..b4fccaee 100644
--- a/neuralnetworks/1.1/types.hal
+++ b/neuralnetworks/1.1/types.hal
@@ -27,25 +27,24 @@ import @1.0::PerformanceInfo;
27 */ 27 */
28enum OperationType : @1.0::OperationType { 28enum OperationType : @1.0::OperationType {
29 /** 29 /**
30 * BatchToSpace for N-D tensors. 30 * BatchToSpace for N-dimensional tensors.
31 * 31 *
32 * This operation reshapes the "batch" dimension 0 into M + 1 dimensions of shape 32 * This operation reshapes the batch dimension (dimension 0) into M + 1 dimensions of shape
33 * block_shape + [batch], interleaves these blocks back into the grid defined by the 33 * block_shape + [batch], interleaves these blocks back into the grid defined by the
34 * spatial dimensions [1, ..., M], to obtain a result with the same rank as the input. 34 * spatial dimensions [1, ..., M], to obtain a result with the same rank as the input.
35 * The spatial dimensions of this intermediate result are then optionally cropped 35 *
36 * according to the amount to crop to produce the output.
37 * This is the reverse of SpaceToBatch. 36 * This is the reverse of SpaceToBatch.
38 * 37 *
39 * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} 38 * Supported tensor types:
40 * {@link OperandType::TENSOR_QUANT8_ASYMM} 39 * * {@link OperandType::TENSOR_FLOAT32}
41 * Supported tensor rank: up to 4 40 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
41 *
42 * Supported tensor rank: 4
42 * 43 *
43 * Inputs: 44 * Inputs:
44 * 0: An n-D tensor, specifying the input. 45 * 0: An n-D tensor, specifying the tensor to be reshaped
45 * 1: A 1-D Tensor of type TENSOR_INT32, the block sizes for each spatial dimension of the 46 * 1: A 1-D Tensor of type TENSOR_INT32, the block sizes for each spatial dimension of the
46 * input tensor. All values must be >= 1. 47 * input tensor. All values must be >= 1.
47 * 2: A 1-D Tensor of type TENSOR_INT32, the amount to crop for each spatial diemension of the
48 * input tensor. All values must be >= 0.
49 * 48 *
50 * Outputs: 49 * Outputs:
51 * 0: A tensor of the same type as input0. 50 * 0: A tensor of the same type as input0.
@@ -53,9 +52,9 @@ enum OperationType : @1.0::OperationType {
53 BATCH_TO_SPACE_ND = 29, 52 BATCH_TO_SPACE_ND = 29,
54 53
55 /** 54 /**
56 * Divides the second tensor from the first tensor, element-wise. 55 * Element-wise division of two tensors.
57 * 56 *
58 * Takes two input tensors of identical OperandType and compatible dimensions. The output 57 * Takes two input tensors of identical type and compatible dimensions. The output
59 * is the result of dividing the first input tensor by the second, optionally 58 * is the result of dividing the first input tensor by the second, optionally
60 * modified by an activation function. 59 * modified by an activation function.
61 * 60 *
@@ -71,7 +70,9 @@ enum OperationType : @1.0::OperationType {
71 * input2.dimension = {5, 4, 3, 1} 70 * input2.dimension = {5, 4, 3, 1}
72 * output.dimension = {5, 4, 3, 2} 71 * output.dimension = {5, 4, 3, 2}
73 * 72 *
74 * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} 73 * Supported tensor types:
74 * * {@link OperandType::TENSOR_FLOAT32}
75 *
75 * Supported tensor rank: up to 4 76 * Supported tensor rank: up to 4
76 * 77 *
77 * Inputs: 78 * Inputs:
@@ -88,15 +89,17 @@ enum OperationType : @1.0::OperationType {
88 /** 89 /**
89 * Computes the mean of elements across dimensions of a tensor. 90 * Computes the mean of elements across dimensions of a tensor.
90 * 91 *
91 * Reduces input tensor along the dimensions given in axis. Unless keep_dims is true, 92 * Reduces the input tensor along the given dimensions to reduce. Unless keep_dims
92 * the rank of the tensor is reduced by 1 for each entry in axis. If keep_dims is 93 * is true, the rank of the tensor is reduced by 1 for each entry in axis.
93 * true, the reduced dimensions are retained with length 1. 94 * If keep_dims is true, the reduced dimensions are retained with length 1.
95 *
96 * If dimensions to reduce have no entries, all dimensions are reduced, and a tensor with
97 * a single element is returned.
94 * 98 *
95 * If axis has no entries, all dimensions are reduced, and a tensor with a single 99 * Supported tensor types:
96 * element is returned. 100 * * {@link OperandType::TENSOR_FLOAT32}
101 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
97 * 102 *
98 * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
99 * {@link OperandType::TENSOR_QUANT8_ASYMM}
100 * Supported tensor rank: up to 4 103 * Supported tensor rank: up to 4
101 * 104 *
102 * Inputs: 105 * Inputs:
@@ -115,14 +118,18 @@ enum OperationType : @1.0::OperationType {
115 * 118 *
116 * This operation pads a tensor according to the specified paddings. 119 * This operation pads a tensor according to the specified paddings.
117 * 120 *
118 * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} 121 * Supported tensor types:
119 * {@link OperandType::TENSOR_QUANT8_ASYMM} 122 * * {@link OperandType::TENSOR_FLOAT32}
123 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
124 *
120 * Supported tensor rank: up to 4 125 * Supported tensor rank: up to 4
121 * 126 *
122 * Inputs: 127 * Inputs:
123 * 0: An n-D tensor, specifying the input. 128 * 0: An n-D tensor, specifying the tensor to be padded.
124 * 1: A 2-D Tensor of type TENSOR_INT32. The paddings, before and after for each spatial dimension 129 * 1: A 2-D Tensor of type TENSOR_INT32, the paddings for each spatial dimension of the
125 * of the input tensor. 130 * input tensor. The shape of the tensor must be {rank(input0), 2}.
131 * padding[i, 0] specifies the number of element to be padded in the front of dimension i.
132 * padding[i, 1] specifies the number of element to be padded after the end of dimension i.
126 * 133 *
127 * Outputs: 134 * Outputs:
128 * 0: A tensor of the same type as input0. 135 * 0: A tensor of the same type as input0.
@@ -130,7 +137,7 @@ enum OperationType : @1.0::OperationType {
130 PAD = 32, 137 PAD = 32,
131 138
132 /** 139 /**
133 * SpaceToBatch for N-D tensors. 140 * SpaceToBatch for N-Dimensional tensors.
134 * 141 *
135 * This operation divides "spatial" dimensions [1, ..., M] of the input into a grid of blocks 142 * This operation divides "spatial" dimensions [1, ..., M] of the input into a grid of blocks
136 * of shape block_shape, and interleaves these blocks with the "batch" dimension (0) such that 143 * of shape block_shape, and interleaves these blocks with the "batch" dimension (0) such that
@@ -139,16 +146,20 @@ enum OperationType : @1.0::OperationType {
139 * batch position. Prior to division into blocks, the spatial dimensions of the input are 146 * batch position. Prior to division into blocks, the spatial dimensions of the input are
140 * optionally zero padded according to paddings. 147 * optionally zero padded according to paddings.
141 * 148 *
142 * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} 149 * Supported tensor types:
143 * {@link OperandType::TENSOR_QUANT8_ASYMM} 150 * * {@link OperandType::TENSOR_FLOAT32}
144 * Supported tensor rank: up to 4 151 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
152 *
153 * Supported tensor rank: 4
145 * 154 *
146 * Inputs: 155 * Inputs:
147 * 0: An n-D tensor, specifying the input. 156 * 0: An n-D tensor, specifying the input.
148 * 1: A 1-D Tensor of type TENSOR_INT32, the block sizes for each spatial dimension of the 157 * 1: A 1-D Tensor of type TENSOR_INT32, the block sizes for each spatial dimension of the
149 * input tensor. All values must be >= 1. 158 * input tensor. All values must be >= 1.
150 * 2: A 2-D Tensor of type TENSOR_INT32, the paddings for each spatial diemension of the 159 * 2: A 2-D Tensor of type TENSOR_INT32, the paddings for each spatial diemension of the
151 * input tensor. All values must be >= 0. 160 * input tensor. All values must be >= 0. The shape of the tensor must be {rank(input0), 2}.
161 * padding[i, 0] specifies the number of element to be padded in the front of dimension i.
162 * padding[i, 1] specifies the number of element to be padded after the end of dimension i.
152 * 163 *
153 * Outputs: 164 * Outputs:
154 * 0: A tensor of the same type as input0. 165 * 0: A tensor of the same type as input0.
@@ -160,17 +171,20 @@ enum OperationType : @1.0::OperationType {
160 * 171 *
161 * Given a tensor input, this operation returns a tensor of the same type with all 172 * Given a tensor input, this operation returns a tensor of the same type with all
162 * dimensions of size 1 removed. If you don't want to remove all size 1 dimensions, 173 * dimensions of size 1 removed. If you don't want to remove all size 1 dimensions,
163 * you can remove specific size 1 dimensions by specifying axis. 174 * you can remove specific size 1 dimensions by specifying the axes (input1).
175 *
176 * Supported tensor types:
177 * * {@link OperandType::TENSOR_FLOAT32}
178 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
164 * 179 *
165 * Supported tensor types: {@link OperandType::TENSOR_FLOAT32}
166 * {@link OperandType::TENSOR_QUANT8_ASYMM}
167 * Supported tensor rank: up to 4 180 * Supported tensor rank: up to 4
168 * 181 *
169 * Inputs: 182 * Inputs:
170 * 0: An n-D tensor, specifying the input. 183 * 0: An n-D tensor, the tensor to be squeezed.
171 * 1: An 1-D Tensor of type TENSOR_INT32. The dimensions to squeeze. If None (the default), 184 * 1: An optional 1-D tensor of type TENSOR_INT32. The dimensions to squeeze. If specified
172 * squeezes all dimensions. If specified, only squeezes the dimensions listed. The dimension 185 * only squeezes the dimensions listed. Otherwise, squeezes all dimensions.
173 * index starts at 0. It is an error to squeeze a dimension that is not 1. 186 * The dimension index starts at 0. An error must be reported if squeezing a dimension that
187 * is not 1.
174 * 188 *
175 * Outputs: 189 * Outputs:
176 * 0: A tensor of the same type as input0. Contains the same data as input, but has one or more 190 * 0: A tensor of the same type as input0. Contains the same data as input, but has one or more
@@ -181,23 +195,25 @@ enum OperationType : @1.0::OperationType {
181 /** 195 /**
182 * Extracts a strided slice of a tensor. 196 * Extracts a strided slice of a tensor.
183 * 197 *
184 * This op extracts a slice of size (end-begin)/stride from the given input tensor. 198 * Roughly speaking, this op extracts a slice of size (end - begin) / stride from the given
185 * Starting at the location specified by begin the slice continues by adding 199 * input tensor. Starting at the location specified by begin the slice continues by adding
186 * stride to the index until all dimensions are not less than end. Note that a stride can 200 * stride to the index until all dimensions are not less than end. Note that a stride can
187 * be negative, which causes a reverse slice. 201 * be negative, which causes a reverse slice.
188 * 202 *
189 * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} 203 * Supported tensor types:
190 * {@link OperandType::TENSOR_QUANT8_ASYMM} 204 * * {@link OperandType::TENSOR_FLOAT32}
205 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
206 *
191 * Supported tensor rank: up to 4 207 * Supported tensor rank: up to 4
192 * 208 *
193 * Inputs: 209 * Inputs:
194 * 0: An n-D tensor, specifying the input. 210 * 0: An n-D tensor, specifying the tensor to be sliced.
195 * 1: A 1-D Tensor of type TENSOR_INT32, the starts of the dimensions of the input 211 * 1: A 1-D Tensor of type TENSOR_INT32, the starts of the dimensions of the input
196 * tensor to be sliced. 212 * tensor to be sliced. The length must be of rank(input0).
197 * 2: A 1-D Tensor of type TENSOR_INT32, the ends of the dimensions of the input 213 * 2: A 1-D Tensor of type TENSOR_INT32, the ends of the dimensions of the input
198 * tensor to be sliced. 214 * tensor to be sliced. The length must be of rank(input0).
199 * 3: A 1-D Tensor of type TENSOR_INT32, the strides of the dimensions of the input 215 * 3: A 1-D Tensor of type TENSOR_INT32, the strides of the dimensions of the input
200 * tensor to be sliced. 216 * tensor to be sliced. The length must be of rank(input0).
201 * 217 *
202 * Outputs: 218 * Outputs:
203 * 0: A tensor of the same type as input0. 219 * 0: A tensor of the same type as input0.
@@ -205,7 +221,7 @@ enum OperationType : @1.0::OperationType {
205 STRIDED_SLICE = 35, 221 STRIDED_SLICE = 35,
206 222
207 /** 223 /**
208 * Subtracts the second tensor from the first tensor, element-wise. 224 * Element-wise subtraction of two tensors.
209 * 225 *
210 * Takes two input tensors of identical type and compatible dimensions. The output 226 * Takes two input tensors of identical type and compatible dimensions. The output
211 * is the result of subtracting the second input tensor from the first one, optionally 227 * is the result of subtracting the second input tensor from the first one, optionally
@@ -223,7 +239,9 @@ enum OperationType : @1.0::OperationType {
223 * input2.dimension = {5, 4, 3, 1} 239 * input2.dimension = {5, 4, 3, 1}
224 * output.dimension = {5, 4, 3, 2} 240 * output.dimension = {5, 4, 3, 2}
225 * 241 *
226 * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} 242 * Supported tensor types:
243 * * {@link OperandType::TENSOR_FLOAT32}
244 *
227 * Supported tensor rank: up to 4 245 * Supported tensor rank: up to 4
228 * 246 *
229 * Inputs: 247 * Inputs:
@@ -240,18 +258,20 @@ enum OperationType : @1.0::OperationType {
240 /** 258 /**
241 * Transposes the input tensor, permuting the dimensions according to the perm tensor. 259 * Transposes the input tensor, permuting the dimensions according to the perm tensor.
242 * 260 *
243 * The returned tensor's dimension i must correspond to the input dimension perm[i]. 261 * The returned tensor's dimension i corresponds to the input dimension perm[i].
244 * If perm is not given, it is set to (n-1...0), where n is the rank of the input tensor. 262 * If perm is not given, it is set to (n-1...0), where n is the rank of the input tensor.
245 * Hence by default, this operation performs a regular matrix transpose on 2-D input Tensors. 263 * Hence by default, this operation performs a regular matrix transpose on 2-D input Tensors.
246 * 264 *
247 * Supported tensor types: {@link OperandType::TENSOR_FLOAT32} 265 * Supported tensor types:
248 * {@link OperandType::TENSOR_QUANT8_ASYMM} 266 * * {@link OperandType::TENSOR_FLOAT32}
267 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
268 *
249 * Supported tensor rank: up to 4 269 * Supported tensor rank: up to 4
250 * 270 *
251 * Inputs: 271 * Inputs:
252 * 0: An n-D tensor, specifying the input. 272 * 0: An n-D tensor, specifying the tensor to be transposed.
253 * 1: A 1-D Tensor of type TENSOR_INT32, the permutation of the dimensions of the input 273 * 1: An optional 1-D Tensor of type TENSOR_INT32, the permutation of the dimensions of the
254 * tensor. 274 * input tensor.
255 * 275 *
256 * Outputs: 276 * Outputs:
257 * 0: A tensor of the same type as input0. 277 * 0: A tensor of the same type as input0.
diff --git a/radio/1.2/vts/functional/radio_hidl_hal_api.cpp b/radio/1.2/vts/functional/radio_hidl_hal_api.cpp
index ee130f85..c2524e49 100644
--- a/radio/1.2/vts/functional/radio_hidl_hal_api.cpp
+++ b/radio/1.2/vts/functional/radio_hidl_hal_api.cpp
@@ -30,10 +30,8 @@ TEST_F(RadioHidlTest_v1_2, startNetworkScan) {
30 .geranBands = {GeranBands::BAND_450, GeranBands::BAND_480}, 30 .geranBands = {GeranBands::BAND_450, GeranBands::BAND_480},
31 .channels = {1,2}}; 31 .channels = {1,2}};
32 32
33 V1_2::NetworkScanRequest request = { 33 ::android::hardware::radio::V1_2::NetworkScanRequest request = {
34 .type = ScanType::ONE_SHOT, 34 .type = ScanType::ONE_SHOT, .interval = 60, .specifiers = {specifier}};
35 .interval = 60,
36 .specifiers = {specifier}};
37 35
38 Return<void> res = radio_v1_2->startNetworkScan_1_2(serial, request); 36 Return<void> res = radio_v1_2->startNetworkScan_1_2(serial, request);
39 ASSERT_OK(res); 37 ASSERT_OK(res);
@@ -42,9 +40,9 @@ TEST_F(RadioHidlTest_v1_2, startNetworkScan) {
42 EXPECT_EQ(serial, radioRsp_v1_2->rspInfo.serial); 40 EXPECT_EQ(serial, radioRsp_v1_2->rspInfo.serial);
43 41
44 ALOGI("startNetworkScan, rspInfo.error = %s\n", toString(radioRsp_v1_2->rspInfo.error).c_str()); 42 ALOGI("startNetworkScan, rspInfo.error = %s\n", toString(radioRsp_v1_2->rspInfo.error).c_str());
45 if (cardStatus.cardState == CardState::ABSENT) { 43 if (cardStatus.base.cardState == CardState::ABSENT) {
46 ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error, {RadioError::SIM_ABSENT})); 44 ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error, {RadioError::SIM_ABSENT}));
47 } else if (cardStatus.cardState == CardState::PRESENT) { 45 } else if (cardStatus.base.cardState == CardState::PRESENT) {
48 ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error, {RadioError::NONE})); 46 ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error, {RadioError::NONE}));
49 } 47 }
50} 48}
@@ -55,9 +53,8 @@ TEST_F(RadioHidlTest_v1_2, startNetworkScan) {
55TEST_F(RadioHidlTest_v1_2, startNetworkScan_InvalidArgument) { 53TEST_F(RadioHidlTest_v1_2, startNetworkScan_InvalidArgument) {
56 const int serial = GetRandomSerialNumber(); 54 const int serial = GetRandomSerialNumber();
57 55
58 V1_2::NetworkScanRequest request = { 56 ::android::hardware::radio::V1_2::NetworkScanRequest request = {.type = ScanType::ONE_SHOT,
59 .type = ScanType::ONE_SHOT, 57 .interval = 60};
60 .interval = 60};
61 58
62 Return<void> res = radio_v1_2->startNetworkScan_1_2(serial, request); 59 Return<void> res = radio_v1_2->startNetworkScan_1_2(serial, request);
63 ASSERT_OK(res); 60 ASSERT_OK(res);
@@ -67,10 +64,10 @@ TEST_F(RadioHidlTest_v1_2, startNetworkScan_InvalidArgument) {
67 64
68 ALOGI("startNetworkScan_InvalidArgument, rspInfo.error = %s\n", 65 ALOGI("startNetworkScan_InvalidArgument, rspInfo.error = %s\n",
69 toString(radioRsp_v1_2->rspInfo.error).c_str()); 66 toString(radioRsp_v1_2->rspInfo.error).c_str());
70 if (cardStatus.cardState == CardState::ABSENT) { 67 if (cardStatus.base.cardState == CardState::ABSENT) {
71 ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error, 68 ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error,
72 {RadioError::SIM_ABSENT, RadioError::INVALID_ARGUMENTS})); 69 {RadioError::SIM_ABSENT, RadioError::INVALID_ARGUMENTS}));
73 } else if (cardStatus.cardState == CardState::PRESENT) { 70 } else if (cardStatus.base.cardState == CardState::PRESENT) {
74 ASSERT_TRUE( 71 ASSERT_TRUE(
75 CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error, {RadioError::INVALID_ARGUMENTS})); 72 CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error, {RadioError::INVALID_ARGUMENTS}));
76 } 73 }
@@ -87,7 +84,7 @@ TEST_F(RadioHidlTest_v1_2, startNetworkScan_InvalidInterval1) {
87 .geranBands = {GeranBands::BAND_450, GeranBands::BAND_480}, 84 .geranBands = {GeranBands::BAND_450, GeranBands::BAND_480},
88 .channels = {1,2}}; 85 .channels = {1,2}};
89 86
90 V1_2::NetworkScanRequest request = { 87 ::android::hardware::radio::V1_2::NetworkScanRequest request = {
91 .type = ScanType::ONE_SHOT, 88 .type = ScanType::ONE_SHOT,
92 .interval = 4, 89 .interval = 4,
93 .specifiers = {specifier}, 90 .specifiers = {specifier},
@@ -103,10 +100,10 @@ TEST_F(RadioHidlTest_v1_2, startNetworkScan_InvalidInterval1) {
103 100
104 ALOGI("startNetworkScan_InvalidInterval1, rspInfo.error = %s\n", 101 ALOGI("startNetworkScan_InvalidInterval1, rspInfo.error = %s\n",
105 toString(radioRsp_v1_2->rspInfo.error).c_str()); 102 toString(radioRsp_v1_2->rspInfo.error).c_str());
106 if (cardStatus.cardState == CardState::ABSENT) { 103 if (cardStatus.base.cardState == CardState::ABSENT) {
107 ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error, 104 ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error,
108 {RadioError::SIM_ABSENT, RadioError::INVALID_ARGUMENTS})); 105 {RadioError::SIM_ABSENT, RadioError::INVALID_ARGUMENTS}));
109 } else if (cardStatus.cardState == CardState::PRESENT) { 106 } else if (cardStatus.base.cardState == CardState::PRESENT) {
110 ASSERT_TRUE( 107 ASSERT_TRUE(
111 CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error, {RadioError::INVALID_ARGUMENTS})); 108 CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error, {RadioError::INVALID_ARGUMENTS}));
112 } 109 }
@@ -123,7 +120,7 @@ TEST_F(RadioHidlTest_v1_2, startNetworkScan_InvalidInterval2) {
123 .geranBands = {GeranBands::BAND_450, GeranBands::BAND_480}, 120 .geranBands = {GeranBands::BAND_450, GeranBands::BAND_480},
124 .channels = {1,2}}; 121 .channels = {1,2}};
125 122
126 V1_2::NetworkScanRequest request = { 123 ::android::hardware::radio::V1_2::NetworkScanRequest request = {
127 .type = ScanType::ONE_SHOT, 124 .type = ScanType::ONE_SHOT,
128 .interval = 301, 125 .interval = 301,
129 .specifiers = {specifier}, 126 .specifiers = {specifier},
@@ -139,10 +136,10 @@ TEST_F(RadioHidlTest_v1_2, startNetworkScan_InvalidInterval2) {
139 136
140 ALOGI("startNetworkScan_InvalidInterval2, rspInfo.error = %s\n", 137 ALOGI("startNetworkScan_InvalidInterval2, rspInfo.error = %s\n",
141 toString(radioRsp_v1_2->rspInfo.error).c_str()); 138 toString(radioRsp_v1_2->rspInfo.error).c_str());
142 if (cardStatus.cardState == CardState::ABSENT) { 139 if (cardStatus.base.cardState == CardState::ABSENT) {
143 ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error, 140 ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error,
144 {RadioError::SIM_ABSENT, RadioError::INVALID_ARGUMENTS})); 141 {RadioError::SIM_ABSENT, RadioError::INVALID_ARGUMENTS}));
145 } else if (cardStatus.cardState == CardState::PRESENT) { 142 } else if (cardStatus.base.cardState == CardState::PRESENT) {
146 ASSERT_TRUE( 143 ASSERT_TRUE(
147 CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error, {RadioError::INVALID_ARGUMENTS})); 144 CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error, {RadioError::INVALID_ARGUMENTS}));
148 } 145 }
@@ -159,7 +156,7 @@ TEST_F(RadioHidlTest_v1_2, startNetworkScan_InvalidMaxSearchTime1) {
159 .geranBands = {GeranBands::BAND_450, GeranBands::BAND_480}, 156 .geranBands = {GeranBands::BAND_450, GeranBands::BAND_480},
160 .channels = {1,2}}; 157 .channels = {1,2}};
161 158
162 V1_2::NetworkScanRequest request = { 159 ::android::hardware::radio::V1_2::NetworkScanRequest request = {
163 .type = ScanType::ONE_SHOT, 160 .type = ScanType::ONE_SHOT,
164 .interval = 60, 161 .interval = 60,
165 .specifiers = {specifier}, 162 .specifiers = {specifier},
@@ -175,10 +172,10 @@ TEST_F(RadioHidlTest_v1_2, startNetworkScan_InvalidMaxSearchTime1) {
175 172
176 ALOGI("startNetworkScan_InvalidMaxSearchTime1, rspInfo.error = %s\n", 173 ALOGI("startNetworkScan_InvalidMaxSearchTime1, rspInfo.error = %s\n",
177 toString(radioRsp_v1_2->rspInfo.error).c_str()); 174 toString(radioRsp_v1_2->rspInfo.error).c_str());
178 if (cardStatus.cardState == CardState::ABSENT) { 175 if (cardStatus.base.cardState == CardState::ABSENT) {
179 ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error, 176 ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error,
180 {RadioError::SIM_ABSENT, RadioError::INVALID_ARGUMENTS})); 177 {RadioError::SIM_ABSENT, RadioError::INVALID_ARGUMENTS}));
181 } else if (cardStatus.cardState == CardState::PRESENT) { 178 } else if (cardStatus.base.cardState == CardState::PRESENT) {
182 ASSERT_TRUE( 179 ASSERT_TRUE(
183 CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error, {RadioError::INVALID_ARGUMENTS})); 180 CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error, {RadioError::INVALID_ARGUMENTS}));
184 } 181 }
@@ -195,7 +192,7 @@ TEST_F(RadioHidlTest_v1_2, startNetworkScan_InvalidMaxSearchTime2) {
195 .geranBands = {GeranBands::BAND_450, GeranBands::BAND_480}, 192 .geranBands = {GeranBands::BAND_450, GeranBands::BAND_480},
196 .channels = {1,2}}; 193 .channels = {1,2}};
197 194
198 V1_2::NetworkScanRequest request = { 195 ::android::hardware::radio::V1_2::NetworkScanRequest request = {
199 .type = ScanType::ONE_SHOT, 196 .type = ScanType::ONE_SHOT,
200 .interval = 60, 197 .interval = 60,
201 .specifiers = {specifier}, 198 .specifiers = {specifier},
@@ -211,10 +208,10 @@ TEST_F(RadioHidlTest_v1_2, startNetworkScan_InvalidMaxSearchTime2) {
211 208
212 ALOGI("startNetworkScan_InvalidMaxSearchTime2, rspInfo.error = %s\n", 209 ALOGI("startNetworkScan_InvalidMaxSearchTime2, rspInfo.error = %s\n",
213 toString(radioRsp_v1_2->rspInfo.error).c_str()); 210 toString(radioRsp_v1_2->rspInfo.error).c_str());
214 if (cardStatus.cardState == CardState::ABSENT) { 211 if (cardStatus.base.cardState == CardState::ABSENT) {
215 ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error, 212 ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error,
216 {RadioError::SIM_ABSENT, RadioError::INVALID_ARGUMENTS})); 213 {RadioError::SIM_ABSENT, RadioError::INVALID_ARGUMENTS}));
217 } else if (cardStatus.cardState == CardState::PRESENT) { 214 } else if (cardStatus.base.cardState == CardState::PRESENT) {
218 ASSERT_TRUE( 215 ASSERT_TRUE(
219 CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error, {RadioError::INVALID_ARGUMENTS})); 216 CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error, {RadioError::INVALID_ARGUMENTS}));
220 } 217 }
@@ -231,7 +228,7 @@ TEST_F(RadioHidlTest_v1_2, startNetworkScan_InvalidPeriodicity1) {
231 .geranBands = {GeranBands::BAND_450, GeranBands::BAND_480}, 228 .geranBands = {GeranBands::BAND_450, GeranBands::BAND_480},
232 .channels = {1,2}}; 229 .channels = {1,2}};
233 230
234 V1_2::NetworkScanRequest request = { 231 ::android::hardware::radio::V1_2::NetworkScanRequest request = {
235 .type = ScanType::ONE_SHOT, 232 .type = ScanType::ONE_SHOT,
236 .interval = 60, 233 .interval = 60,
237 .specifiers = {specifier}, 234 .specifiers = {specifier},
@@ -247,10 +244,10 @@ TEST_F(RadioHidlTest_v1_2, startNetworkScan_InvalidPeriodicity1) {
247 244
248 ALOGI("startNetworkScan_InvalidPeriodicity1, rspInfo.error = %s\n", 245 ALOGI("startNetworkScan_InvalidPeriodicity1, rspInfo.error = %s\n",
249 toString(radioRsp_v1_2->rspInfo.error).c_str()); 246 toString(radioRsp_v1_2->rspInfo.error).c_str());
250 if (cardStatus.cardState == CardState::ABSENT) { 247 if (cardStatus.base.cardState == CardState::ABSENT) {
251 ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error, 248 ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error,
252 {RadioError::SIM_ABSENT, RadioError::INVALID_ARGUMENTS})); 249 {RadioError::SIM_ABSENT, RadioError::INVALID_ARGUMENTS}));
253 } else if (cardStatus.cardState == CardState::PRESENT) { 250 } else if (cardStatus.base.cardState == CardState::PRESENT) {
254 ASSERT_TRUE( 251 ASSERT_TRUE(
255 CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error, {RadioError::INVALID_ARGUMENTS})); 252 CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error, {RadioError::INVALID_ARGUMENTS}));
256 } 253 }
@@ -267,7 +264,7 @@ TEST_F(RadioHidlTest_v1_2, startNetworkScan_InvalidPeriodicity2) {
267 .geranBands = {GeranBands::BAND_450, GeranBands::BAND_480}, 264 .geranBands = {GeranBands::BAND_450, GeranBands::BAND_480},
268 .channels = {1,2}}; 265 .channels = {1,2}};
269 266
270 V1_2::NetworkScanRequest request = { 267 ::android::hardware::radio::V1_2::NetworkScanRequest request = {
271 .type = ScanType::ONE_SHOT, 268 .type = ScanType::ONE_SHOT,
272 .interval = 60, 269 .interval = 60,
273 .specifiers = {specifier}, 270 .specifiers = {specifier},
@@ -283,10 +280,10 @@ TEST_F(RadioHidlTest_v1_2, startNetworkScan_InvalidPeriodicity2) {
283 280
284 ALOGI("startNetworkScan_InvalidPeriodicity2, rspInfo.error = %s\n", 281 ALOGI("startNetworkScan_InvalidPeriodicity2, rspInfo.error = %s\n",
285 toString(radioRsp_v1_2->rspInfo.error).c_str()); 282 toString(radioRsp_v1_2->rspInfo.error).c_str());
286 if (cardStatus.cardState == CardState::ABSENT) { 283 if (cardStatus.base.cardState == CardState::ABSENT) {
287 ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error, 284 ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error,
288 {RadioError::SIM_ABSENT, RadioError::INVALID_ARGUMENTS})); 285 {RadioError::SIM_ABSENT, RadioError::INVALID_ARGUMENTS}));
289 } else if (cardStatus.cardState == CardState::PRESENT) { 286 } else if (cardStatus.base.cardState == CardState::PRESENT) {
290 ASSERT_TRUE( 287 ASSERT_TRUE(
291 CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error, {RadioError::INVALID_ARGUMENTS})); 288 CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error, {RadioError::INVALID_ARGUMENTS}));
292 } 289 }
@@ -303,7 +300,7 @@ TEST_F(RadioHidlTest_v1_2, startNetworkScan_GoodRequest1) {
303 .geranBands = {GeranBands::BAND_450, GeranBands::BAND_480}, 300 .geranBands = {GeranBands::BAND_450, GeranBands::BAND_480},
304 .channels = {1,2}}; 301 .channels = {1,2}};
305 302
306 V1_2::NetworkScanRequest request = { 303 ::android::hardware::radio::V1_2::NetworkScanRequest request = {
307 .type = ScanType::ONE_SHOT, 304 .type = ScanType::ONE_SHOT,
308 .interval = 60, 305 .interval = 60,
309 .specifiers = {specifier}, 306 .specifiers = {specifier},
@@ -319,10 +316,10 @@ TEST_F(RadioHidlTest_v1_2, startNetworkScan_GoodRequest1) {
319 316
320 ALOGI("startNetworkScan_InvalidArgument, rspInfo.error = %s\n", 317 ALOGI("startNetworkScan_InvalidArgument, rspInfo.error = %s\n",
321 toString(radioRsp_v1_2->rspInfo.error).c_str()); 318 toString(radioRsp_v1_2->rspInfo.error).c_str());
322 if (cardStatus.cardState == CardState::ABSENT) { 319 if (cardStatus.base.cardState == CardState::ABSENT) {
323 ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error, 320 ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error,
324 {RadioError::NONE, RadioError::SIM_ABSENT})); 321 {RadioError::NONE, RadioError::SIM_ABSENT}));
325 } else if (cardStatus.cardState == CardState::PRESENT) { 322 } else if (cardStatus.base.cardState == CardState::PRESENT) {
326 ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error, {RadioError::NONE})); 323 ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error, {RadioError::NONE}));
327 } 324 }
328} 325}
@@ -338,7 +335,7 @@ TEST_F(RadioHidlTest_v1_2, startNetworkScan_GoodRequest2) {
338 .geranBands = {GeranBands::BAND_450, GeranBands::BAND_480}, 335 .geranBands = {GeranBands::BAND_450, GeranBands::BAND_480},
339 .channels = {1,2}}; 336 .channels = {1,2}};
340 337
341 V1_2::NetworkScanRequest request = { 338 ::android::hardware::radio::V1_2::NetworkScanRequest request = {
342 .type = ScanType::ONE_SHOT, 339 .type = ScanType::ONE_SHOT,
343 .interval = 60, 340 .interval = 60,
344 .specifiers = {specifier}, 341 .specifiers = {specifier},
@@ -355,10 +352,10 @@ TEST_F(RadioHidlTest_v1_2, startNetworkScan_GoodRequest2) {
355 352
356 ALOGI("startNetworkScan_InvalidArgument, rspInfo.error = %s\n", 353 ALOGI("startNetworkScan_InvalidArgument, rspInfo.error = %s\n",
357 toString(radioRsp_v1_2->rspInfo.error).c_str()); 354 toString(radioRsp_v1_2->rspInfo.error).c_str());
358 if (cardStatus.cardState == CardState::ABSENT) { 355 if (cardStatus.base.cardState == CardState::ABSENT) {
359 ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error, 356 ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error,
360 {RadioError::NONE, RadioError::SIM_ABSENT})); 357 {RadioError::NONE, RadioError::SIM_ABSENT}));
361 } else if (cardStatus.cardState == CardState::PRESENT) { 358 } else if (cardStatus.base.cardState == CardState::PRESENT) {
362 ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error, {RadioError::NONE})); 359 ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_2->rspInfo.error, {RadioError::NONE}));
363 } 360 }
364} 361}
@@ -369,8 +366,8 @@ TEST_F(RadioHidlTest_v1_2, startNetworkScan_GoodRequest2) {
369TEST_F(RadioHidlTest_v1_2, setIndicationFilter_1_2) { 366TEST_F(RadioHidlTest_v1_2, setIndicationFilter_1_2) {
370 const int serial = GetRandomSerialNumber(); 367 const int serial = GetRandomSerialNumber();
371 368
372 Return<void> res = 369 Return<void> res = radio_v1_2->setIndicationFilter_1_2(
373 radio_v1_2->setIndicationFilter_1_2(serial, static_cast<int>(IndicationFilter::ALL)); 370 serial, static_cast<int>(::android::hardware::radio::V1_2::IndicationFilter::ALL));
374 ASSERT_OK(res); 371 ASSERT_OK(res);
375 EXPECT_EQ(std::cv_status::no_timeout, wait()); 372 EXPECT_EQ(std::cv_status::no_timeout, wait());
376 EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_2->rspInfo.type); 373 EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_2->rspInfo.type);
@@ -390,7 +387,7 @@ TEST_F(RadioHidlTest_v1_2, setSignalStrengthReportingCriteria_invalidHysteresisD
390 Return<void> res = radio_v1_2->setSignalStrengthReportingCriteria( 387 Return<void> res = radio_v1_2->setSignalStrengthReportingCriteria(
391 serial, 5000, 388 serial, 5000,
392 10, // hysteresisDb too large given threshold list deltas 389 10, // hysteresisDb too large given threshold list deltas
393 {-109, -103, -97, -89}, V1_2::AccessNetwork::GERAN); 390 {-109, -103, -97, -89}, ::android::hardware::radio::V1_2::AccessNetwork::GERAN);
394 ASSERT_OK(res); 391 ASSERT_OK(res);
395 EXPECT_EQ(std::cv_status::no_timeout, wait()); 392 EXPECT_EQ(std::cv_status::no_timeout, wait());
396 EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_2->rspInfo.type); 393 EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_2->rspInfo.type);
@@ -407,8 +404,8 @@ TEST_F(RadioHidlTest_v1_2, setSignalStrengthReportingCriteria_invalidHysteresisD
407TEST_F(RadioHidlTest_v1_2, setSignalStrengthReportingCriteria_EmptyParams) { 404TEST_F(RadioHidlTest_v1_2, setSignalStrengthReportingCriteria_EmptyParams) {
408 const int serial = GetRandomSerialNumber(); 405 const int serial = GetRandomSerialNumber();
409 406
410 Return<void> res = radio_v1_2->setSignalStrengthReportingCriteria(serial, 0, 0, {}, 407 Return<void> res = radio_v1_2->setSignalStrengthReportingCriteria(
411 V1_2::AccessNetwork::GERAN); 408 serial, 0, 0, {}, ::android::hardware::radio::V1_2::AccessNetwork::GERAN);
412 ASSERT_OK(res); 409 ASSERT_OK(res);
413 EXPECT_EQ(std::cv_status::no_timeout, wait()); 410 EXPECT_EQ(std::cv_status::no_timeout, wait());
414 EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_2->rspInfo.type); 411 EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_2->rspInfo.type);
@@ -426,7 +423,8 @@ TEST_F(RadioHidlTest_v1_2, setSignalStrengthReportingCriteria_Geran) {
426 const int serial = GetRandomSerialNumber(); 423 const int serial = GetRandomSerialNumber();
427 424
428 Return<void> res = radio_v1_2->setSignalStrengthReportingCriteria( 425 Return<void> res = radio_v1_2->setSignalStrengthReportingCriteria(
429 serial, 5000, 2, {-109, -103, -97, -89}, V1_2::AccessNetwork::GERAN); 426 serial, 5000, 2, {-109, -103, -97, -89},
427 ::android::hardware::radio::V1_2::AccessNetwork::GERAN);
430 ASSERT_OK(res); 428 ASSERT_OK(res);
431 EXPECT_EQ(std::cv_status::no_timeout, wait()); 429 EXPECT_EQ(std::cv_status::no_timeout, wait());
432 EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_2->rspInfo.type); 430 EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_2->rspInfo.type);
@@ -444,7 +442,8 @@ TEST_F(RadioHidlTest_v1_2, setSignalStrengthReportingCriteria_Utran) {
444 const int serial = GetRandomSerialNumber(); 442 const int serial = GetRandomSerialNumber();
445 443
446 Return<void> res = radio_v1_2->setSignalStrengthReportingCriteria( 444 Return<void> res = radio_v1_2->setSignalStrengthReportingCriteria(
447 serial, 5000, 2, {-110, -97, -73, -49, -25}, V1_2::AccessNetwork::UTRAN); 445 serial, 5000, 2, {-110, -97, -73, -49, -25},
446 ::android::hardware::radio::V1_2::AccessNetwork::UTRAN);
448 ASSERT_OK(res); 447 ASSERT_OK(res);
449 EXPECT_EQ(std::cv_status::no_timeout, wait()); 448 EXPECT_EQ(std::cv_status::no_timeout, wait());
450 EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_2->rspInfo.type); 449 EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_2->rspInfo.type);
@@ -462,7 +461,8 @@ TEST_F(RadioHidlTest_v1_2, setSignalStrengthReportingCriteria_Eutran) {
462 const int serial = GetRandomSerialNumber(); 461 const int serial = GetRandomSerialNumber();
463 462
464 Return<void> res = radio_v1_2->setSignalStrengthReportingCriteria( 463 Return<void> res = radio_v1_2->setSignalStrengthReportingCriteria(
465 serial, 5000, 2, {-140, -128, -118, -108, -98, -44}, V1_2::AccessNetwork::EUTRAN); 464 serial, 5000, 2, {-140, -128, -118, -108, -98, -44},
465 ::android::hardware::radio::V1_2::AccessNetwork::EUTRAN);
466 ASSERT_OK(res); 466 ASSERT_OK(res);
467 EXPECT_EQ(std::cv_status::no_timeout, wait()); 467 EXPECT_EQ(std::cv_status::no_timeout, wait());
468 EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_2->rspInfo.type); 468 EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_2->rspInfo.type);
@@ -480,7 +480,8 @@ TEST_F(RadioHidlTest_v1_2, setSignalStrengthReportingCriteria_Cdma2000) {
480 const int serial = GetRandomSerialNumber(); 480 const int serial = GetRandomSerialNumber();
481 481
482 Return<void> res = radio_v1_2->setSignalStrengthReportingCriteria( 482 Return<void> res = radio_v1_2->setSignalStrengthReportingCriteria(
483 serial, 5000, 2, {-105, -90, -75, -65}, V1_2::AccessNetwork::CDMA2000); 483 serial, 5000, 2, {-105, -90, -75, -65},
484 ::android::hardware::radio::V1_2::AccessNetwork::CDMA2000);
484 ASSERT_OK(res); 485 ASSERT_OK(res);
485 EXPECT_EQ(std::cv_status::no_timeout, wait()); 486 EXPECT_EQ(std::cv_status::no_timeout, wait());
486 EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_2->rspInfo.type); 487 EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_2->rspInfo.type);
@@ -500,7 +501,8 @@ TEST_F(RadioHidlTest_v1_2, setLinkCapacityReportingCriteria_invalidHysteresisDlK
500 Return<void> res = radio_v1_2->setLinkCapacityReportingCriteria( 501 Return<void> res = radio_v1_2->setLinkCapacityReportingCriteria(
501 serial, 5000, 502 serial, 5000,
502 5000, // hysteresisDlKbps too big for thresholds delta 503 5000, // hysteresisDlKbps too big for thresholds delta
503 100, {1000, 5000, 10000, 20000}, {500, 1000, 5000, 10000}, V1_2::AccessNetwork::GERAN); 504 100, {1000, 5000, 10000, 20000}, {500, 1000, 5000, 10000},
505 ::android::hardware::radio::V1_2::AccessNetwork::GERAN);
504 ASSERT_OK(res); 506 ASSERT_OK(res);
505 EXPECT_EQ(std::cv_status::no_timeout, wait()); 507 EXPECT_EQ(std::cv_status::no_timeout, wait());
506 EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_2->rspInfo.type); 508 EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_2->rspInfo.type);
@@ -520,7 +522,8 @@ TEST_F(RadioHidlTest_v1_2, setLinkCapacityReportingCriteria_invalidHysteresisUlK
520 Return<void> res = radio_v1_2->setLinkCapacityReportingCriteria( 522 Return<void> res = radio_v1_2->setLinkCapacityReportingCriteria(
521 serial, 5000, 500, 523 serial, 5000, 500,
522 1000, // hysteresisUlKbps too big for thresholds delta 524 1000, // hysteresisUlKbps too big for thresholds delta
523 {1000, 5000, 10000, 20000}, {500, 1000, 5000, 10000}, V1_2::AccessNetwork::GERAN); 525 {1000, 5000, 10000, 20000}, {500, 1000, 5000, 10000},
526 ::android::hardware::radio::V1_2::AccessNetwork::GERAN);
524 ASSERT_OK(res); 527 ASSERT_OK(res);
525 EXPECT_EQ(std::cv_status::no_timeout, wait()); 528 EXPECT_EQ(std::cv_status::no_timeout, wait());
526 EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_2->rspInfo.type); 529 EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_2->rspInfo.type);
@@ -537,8 +540,8 @@ TEST_F(RadioHidlTest_v1_2, setLinkCapacityReportingCriteria_invalidHysteresisUlK
537TEST_F(RadioHidlTest_v1_2, setLinkCapacityReportingCriteria_emptyParams) { 540TEST_F(RadioHidlTest_v1_2, setLinkCapacityReportingCriteria_emptyParams) {
538 const int serial = GetRandomSerialNumber(); 541 const int serial = GetRandomSerialNumber();
539 542
540 Return<void> res = radio_v1_2->setLinkCapacityReportingCriteria(serial, 0, 0, 0, {}, {}, 543 Return<void> res = radio_v1_2->setLinkCapacityReportingCriteria(
541 V1_2::AccessNetwork::GERAN); 544 serial, 0, 0, 0, {}, {}, ::android::hardware::radio::V1_2::AccessNetwork::GERAN);
542 ASSERT_OK(res); 545 ASSERT_OK(res);
543 EXPECT_EQ(std::cv_status::no_timeout, wait()); 546 EXPECT_EQ(std::cv_status::no_timeout, wait());
544 EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_2->rspInfo.type); 547 EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_2->rspInfo.type);
@@ -557,7 +560,7 @@ TEST_F(RadioHidlTest_v1_2, setLinkCapacityReportingCriteria_Geran) {
557 560
558 Return<void> res = radio_v1_2->setLinkCapacityReportingCriteria( 561 Return<void> res = radio_v1_2->setLinkCapacityReportingCriteria(
559 serial, 5000, 500, 100, {1000, 5000, 10000, 20000}, {500, 1000, 5000, 10000}, 562 serial, 5000, 500, 100, {1000, 5000, 10000, 20000}, {500, 1000, 5000, 10000},
560 V1_2::AccessNetwork::GERAN); 563 ::android::hardware::radio::V1_2::AccessNetwork::GERAN);
561 ASSERT_OK(res); 564 ASSERT_OK(res);
562 EXPECT_EQ(std::cv_status::no_timeout, wait()); 565 EXPECT_EQ(std::cv_status::no_timeout, wait());
563 EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_2->rspInfo.type); 566 EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_2->rspInfo.type);
@@ -574,7 +577,8 @@ TEST_F(RadioHidlTest_v1_2, setLinkCapacityReportingCriteria_Geran) {
574TEST_F(RadioHidlTest_v1_2, setupDataCall_1_2) { 577TEST_F(RadioHidlTest_v1_2, setupDataCall_1_2) {
575 const int serial = GetRandomSerialNumber(); 578 const int serial = GetRandomSerialNumber();
576 579
577 V1_2::AccessNetwork accessNetwork = V1_2::AccessNetwork::EUTRAN; 580 ::android::hardware::radio::V1_2::AccessNetwork accessNetwork =
581 ::android::hardware::radio::V1_2::AccessNetwork::EUTRAN;
578 582
579 DataProfileInfo dataProfileInfo; 583 DataProfileInfo dataProfileInfo;
580 memset(&dataProfileInfo, 0, sizeof(dataProfileInfo)); 584 memset(&dataProfileInfo, 0, sizeof(dataProfileInfo));
@@ -600,7 +604,8 @@ TEST_F(RadioHidlTest_v1_2, setupDataCall_1_2) {
600 bool roamingAllowed = false; 604 bool roamingAllowed = false;
601 bool isRoaming = false; 605 bool isRoaming = false;
602 606
603 V1_2::DataRequestReason reason = V1_2::DataRequestReason::NORMAL; 607 ::android::hardware::radio::V1_2::DataRequestReason reason =
608 ::android::hardware::radio::V1_2::DataRequestReason::NORMAL;
604 std::vector<hidl_string> addresses = {""}; 609 std::vector<hidl_string> addresses = {""};
605 std::vector<hidl_string> dnses = {""}; 610 std::vector<hidl_string> dnses = {""};
606 611
@@ -613,12 +618,12 @@ TEST_F(RadioHidlTest_v1_2, setupDataCall_1_2) {
613 EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_2->rspInfo.type); 618 EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_2->rspInfo.type);
614 EXPECT_EQ(serial, radioRsp_v1_2->rspInfo.serial); 619 EXPECT_EQ(serial, radioRsp_v1_2->rspInfo.serial);
615 620
616 if (cardStatus.cardState == CardState::ABSENT) { 621 if (cardStatus.base.cardState == CardState::ABSENT) {
617 ASSERT_TRUE(CheckAnyOfErrors( 622 ASSERT_TRUE(CheckAnyOfErrors(
618 radioRsp_v1_2->rspInfo.error, 623 radioRsp_v1_2->rspInfo.error,
619 {RadioError::SIM_ABSENT, RadioError::RADIO_NOT_AVAILABLE, RadioError::INVALID_ARGUMENTS, 624 {RadioError::SIM_ABSENT, RadioError::RADIO_NOT_AVAILABLE, RadioError::INVALID_ARGUMENTS,
620 RadioError::OP_NOT_ALLOWED_BEFORE_REG_TO_NW, RadioError::REQUEST_NOT_SUPPORTED})); 625 RadioError::OP_NOT_ALLOWED_BEFORE_REG_TO_NW, RadioError::REQUEST_NOT_SUPPORTED}));
621 } else if (cardStatus.cardState == CardState::PRESENT) { 626 } else if (cardStatus.base.cardState == CardState::PRESENT) {
622 ASSERT_TRUE(CheckAnyOfErrors( 627 ASSERT_TRUE(CheckAnyOfErrors(
623 radioRsp_v1_2->rspInfo.error, 628 radioRsp_v1_2->rspInfo.error,
624 {RadioError::NONE, RadioError::RADIO_NOT_AVAILABLE, RadioError::INVALID_ARGUMENTS, 629 {RadioError::NONE, RadioError::RADIO_NOT_AVAILABLE, RadioError::INVALID_ARGUMENTS,
@@ -632,7 +637,8 @@ TEST_F(RadioHidlTest_v1_2, setupDataCall_1_2) {
632TEST_F(RadioHidlTest_v1_2, deactivateDataCall_1_2) { 637TEST_F(RadioHidlTest_v1_2, deactivateDataCall_1_2) {
633 const int serial = GetRandomSerialNumber(); 638 const int serial = GetRandomSerialNumber();
634 int cid = 1; 639 int cid = 1;
635 V1_2::DataRequestReason reason = V1_2::DataRequestReason::NORMAL; 640 ::android::hardware::radio::V1_2::DataRequestReason reason =
641 ::android::hardware::radio::V1_2::DataRequestReason::NORMAL;
636 642
637 Return<void> res = radio_v1_2->deactivateDataCall_1_2(serial, cid, reason); 643 Return<void> res = radio_v1_2->deactivateDataCall_1_2(serial, cid, reason);
638 ASSERT_OK(res); 644 ASSERT_OK(res);
@@ -641,13 +647,13 @@ TEST_F(RadioHidlTest_v1_2, deactivateDataCall_1_2) {
641 EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_2->rspInfo.type); 647 EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_2->rspInfo.type);
642 EXPECT_EQ(serial, radioRsp_v1_2->rspInfo.serial); 648 EXPECT_EQ(serial, radioRsp_v1_2->rspInfo.serial);
643 649
644 if (cardStatus.cardState == CardState::ABSENT) { 650 if (cardStatus.base.cardState == CardState::ABSENT) {
645 ASSERT_TRUE(CheckAnyOfErrors( 651 ASSERT_TRUE(CheckAnyOfErrors(
646 radioRsp_v1_2->rspInfo.error, 652 radioRsp_v1_2->rspInfo.error,
647 {RadioError::NONE, RadioError::RADIO_NOT_AVAILABLE, RadioError::INVALID_CALL_ID, 653 {RadioError::NONE, RadioError::RADIO_NOT_AVAILABLE, RadioError::INVALID_CALL_ID,
648 RadioError::INVALID_STATE, RadioError::INVALID_ARGUMENTS, 654 RadioError::INVALID_STATE, RadioError::INVALID_ARGUMENTS,
649 RadioError::REQUEST_NOT_SUPPORTED, RadioError::CANCELLED, RadioError::SIM_ABSENT})); 655 RadioError::REQUEST_NOT_SUPPORTED, RadioError::CANCELLED, RadioError::SIM_ABSENT}));
650 } else if (cardStatus.cardState == CardState::PRESENT) { 656 } else if (cardStatus.base.cardState == CardState::PRESENT) {
651 ASSERT_TRUE(CheckAnyOfErrors( 657 ASSERT_TRUE(CheckAnyOfErrors(
652 radioRsp_v1_2->rspInfo.error, 658 radioRsp_v1_2->rspInfo.error,
653 {RadioError::NONE, RadioError::RADIO_NOT_AVAILABLE, RadioError::INVALID_CALL_ID, 659 {RadioError::NONE, RadioError::RADIO_NOT_AVAILABLE, RadioError::INVALID_CALL_ID,
@@ -709,4 +715,4 @@ TEST_F(RadioHidlTest_v1_2, getDataRegistrationState) {
709 ASSERT_TRUE(CheckAnyOfErrors( 715 ASSERT_TRUE(CheckAnyOfErrors(
710 radioRsp_v1_2->rspInfo.error, 716 radioRsp_v1_2->rspInfo.error,
711 {RadioError::NONE, RadioError::RADIO_NOT_AVAILABLE, RadioError::NOT_PROVISIONED})); 717 {RadioError::NONE, RadioError::RADIO_NOT_AVAILABLE, RadioError::NOT_PROVISIONED}));
712} 718} \ No newline at end of file
diff --git a/radio/1.2/vts/functional/radio_hidl_hal_test.cpp b/radio/1.2/vts/functional/radio_hidl_hal_test.cpp
index d74d077d..edac1aac 100644
--- a/radio/1.2/vts/functional/radio_hidl_hal_test.cpp
+++ b/radio/1.2/vts/functional/radio_hidl_hal_test.cpp
@@ -17,14 +17,18 @@
17#include <radio_hidl_hal_utils_v1_2.h> 17#include <radio_hidl_hal_utils_v1_2.h>
18 18
19void RadioHidlTest_v1_2::SetUp() { 19void RadioHidlTest_v1_2::SetUp() {
20 radio_v1_2 = ::testing::VtsHalHidlTargetTestBase::getService<V1_2::IRadio>( 20 radio_v1_2 =
21 RadioHidlEnvironment::Instance()->getServiceName<V1_2::IRadio>( 21 ::testing::VtsHalHidlTargetTestBase::getService<::android::hardware::radio::V1_2::IRadio>(
22 hidl_string(RADIO_SERVICE_NAME))); 22 RadioHidlEnvironment::Instance()
23 ->getServiceName<::android::hardware::radio::V1_2::IRadio>(
24 hidl_string(RADIO_SERVICE_NAME)));
23 if (radio_v1_2 == NULL) { 25 if (radio_v1_2 == NULL) {
24 sleep(60); 26 sleep(60);
25 radio_v1_2 = ::testing::VtsHalHidlTargetTestBase::getService<V1_2::IRadio>( 27 radio_v1_2 = ::testing::VtsHalHidlTargetTestBase::getService<
26 RadioHidlEnvironment::Instance()->getServiceName<V1_2::IRadio>( 28 ::android::hardware::radio::V1_2::IRadio>(
27 hidl_string(RADIO_SERVICE_NAME))); 29 RadioHidlEnvironment::Instance()
30 ->getServiceName<::android::hardware::radio::V1_2::IRadio>(
31 hidl_string(RADIO_SERVICE_NAME)));
28 } 32 }
29 ASSERT_NE(nullptr, radio_v1_2.get()); 33 ASSERT_NE(nullptr, radio_v1_2.get());
30 34
@@ -71,4 +75,4 @@ std::cv_status RadioHidlTest_v1_2::wait() {
71 } 75 }
72 count_--; 76 count_--;
73 return status; 77 return status;
74} 78} \ No newline at end of file
diff --git a/radio/1.2/vts/functional/radio_hidl_hal_utils_v1_2.h b/radio/1.2/vts/functional/radio_hidl_hal_utils_v1_2.h
index c61913c7..2703ca51 100644
--- a/radio/1.2/vts/functional/radio_hidl_hal_utils_v1_2.h
+++ b/radio/1.2/vts/functional/radio_hidl_hal_utils_v1_2.h
@@ -22,14 +22,14 @@
22#include <condition_variable> 22#include <condition_variable>
23#include <mutex> 23#include <mutex>
24 24
25#include <android/hardware/radio/1.1/IRadioIndication.h>
26#include <android/hardware/radio/1.1/IRadioResponse.h>
27#include <android/hardware/radio/1.2/IRadio.h> 25#include <android/hardware/radio/1.2/IRadio.h>
26#include <android/hardware/radio/1.2/IRadioIndication.h>
27#include <android/hardware/radio/1.2/IRadioResponse.h>
28#include <android/hardware/radio/1.2/types.h> 28#include <android/hardware/radio/1.2/types.h>
29 29
30#include "vts_test_util.h" 30#include "vts_test_util.h"
31 31
32using namespace ::android::hardware::radio; 32using namespace ::android::hardware::radio::V1_2;
33using namespace ::android::hardware::radio::V1_1; 33using namespace ::android::hardware::radio::V1_1;
34using namespace ::android::hardware::radio::V1_0; 34using namespace ::android::hardware::radio::V1_0;
35 35
@@ -44,10 +44,10 @@ using ::android::sp;
44#define RADIO_SERVICE_NAME "slot1" 44#define RADIO_SERVICE_NAME "slot1"
45 45
46class RadioHidlTest_v1_2; 46class RadioHidlTest_v1_2;
47extern CardStatus cardStatus; 47extern ::android::hardware::radio::V1_2::CardStatus cardStatus;
48 48
49/* Callback class for radio response v1_2*/ 49/* Callback class for radio response v1_2*/
50class RadioResponse_v1_2 : public V1_1::IRadioResponse { 50class RadioResponse_v1_2 : public ::android::hardware::radio::V1_2::IRadioResponse {
51 protected: 51 protected:
52 RadioHidlTest_v1_2& parent_v1_2; 52 RadioHidlTest_v1_2& parent_v1_2;
53 53
@@ -57,8 +57,9 @@ class RadioResponse_v1_2 : public V1_1::IRadioResponse {
57 RadioResponse_v1_2(RadioHidlTest_v1_2& parent_v1_2); 57 RadioResponse_v1_2(RadioHidlTest_v1_2& parent_v1_2);
58 virtual ~RadioResponse_v1_2() = default; 58 virtual ~RadioResponse_v1_2() = default;
59 59
60 Return<void> getIccCardStatusResponse(const RadioResponseInfo& info, 60 Return<void> getIccCardStatusResponse(
61 const CardStatus& cardStatus); 61 const RadioResponseInfo& info,
62 const ::android::hardware::radio::V1_0::CardStatus& cardStatus);
62 63
63 Return<void> supplyIccPinForAppResponse(const RadioResponseInfo& info, 64 Return<void> supplyIccPinForAppResponse(const RadioResponseInfo& info,
64 int32_t remainingRetries); 65 int32_t remainingRetries);
@@ -81,8 +82,9 @@ class RadioResponse_v1_2 : public V1_1::IRadioResponse {
81 Return<void> supplyNetworkDepersonalizationResponse(const RadioResponseInfo& info, 82 Return<void> supplyNetworkDepersonalizationResponse(const RadioResponseInfo& info,
82 int32_t remainingRetries); 83 int32_t remainingRetries);
83 84
84 Return<void> getCurrentCallsResponse(const RadioResponseInfo& info, 85 Return<void> getCurrentCallsResponse(
85 const ::android::hardware::hidl_vec<Call>& calls); 86 const RadioResponseInfo& info,
87 const ::android::hardware::hidl_vec<::android::hardware::radio::V1_0::Call>& calls);
86 88
87 Return<void> dialResponse(const RadioResponseInfo& info); 89 Return<void> dialResponse(const RadioResponseInfo& info);
88 90
@@ -104,14 +106,17 @@ class RadioResponse_v1_2 : public V1_1::IRadioResponse {
104 Return<void> getLastCallFailCauseResponse(const RadioResponseInfo& info, 106 Return<void> getLastCallFailCauseResponse(const RadioResponseInfo& info,
105 const LastCallFailCauseInfo& failCauseInfo); 107 const LastCallFailCauseInfo& failCauseInfo);
106 108
107 Return<void> getSignalStrengthResponse(const RadioResponseInfo& info, 109 Return<void> getSignalStrengthResponse(
108 const SignalStrength& sigStrength); 110 const RadioResponseInfo& info,
111 const ::android::hardware::radio::V1_0::SignalStrength& sigStrength);
109 112
110 Return<void> getVoiceRegistrationStateResponse(const RadioResponseInfo& info, 113 Return<void> getVoiceRegistrationStateResponse(
111 const VoiceRegStateResult& voiceRegResponse); 114 const RadioResponseInfo& info,
115 const ::android::hardware::radio::V1_0::VoiceRegStateResult& voiceRegResponse);
112 116
113 Return<void> getDataRegistrationStateResponse(const RadioResponseInfo& info, 117 Return<void> getDataRegistrationStateResponse(
114 const DataRegStateResult& dataRegResponse); 118 const RadioResponseInfo& info,
119 const ::android::hardware::radio::V1_0::DataRegStateResult& dataRegResponse);
115 120
116 Return<void> getOperatorResponse(const RadioResponseInfo& info, 121 Return<void> getOperatorResponse(const RadioResponseInfo& info,
117 const ::android::hardware::hidl_string& longName, 122 const ::android::hardware::hidl_string& longName,
@@ -310,8 +315,9 @@ class RadioResponse_v1_2 : public V1_1::IRadioResponse {
310 Return<void> getVoiceRadioTechnologyResponse(const RadioResponseInfo& info, 315 Return<void> getVoiceRadioTechnologyResponse(const RadioResponseInfo& info,
311 RadioTechnology rat); 316 RadioTechnology rat);
312 317
313 Return<void> getCellInfoListResponse(const RadioResponseInfo& info, 318 Return<void> getCellInfoListResponse(
314 const ::android::hardware::hidl_vec<CellInfo>& cellInfo); 319 const RadioResponseInfo& info,
320 const ::android::hardware::hidl_vec<::android::hardware::radio::V1_0::CellInfo>& cellInfo);
315 321
316 Return<void> setCellInfoListRateResponse(const RadioResponseInfo& info); 322 Return<void> setCellInfoListRateResponse(const RadioResponseInfo& info);
317 323
@@ -406,27 +412,33 @@ class RadioResponse_v1_2 : public V1_1::IRadioResponse {
406 412
407 Return<void> setLinkCapacityReportingCriteriaResponse(const RadioResponseInfo& info); 413 Return<void> setLinkCapacityReportingCriteriaResponse(const RadioResponseInfo& info);
408 414
409 Return<void> getIccCardStatusResponse_1_2(const RadioResponseInfo& info, 415 Return<void> getIccCardStatusResponse_1_2(
410 const CardStatus& card_status); 416 const RadioResponseInfo& info,
417 const ::android::hardware::radio::V1_2::CardStatus& card_status);
411 418
412 Return<void> getCurrentCallsResponse_1_2(const RadioResponseInfo& info, 419 Return<void> getCurrentCallsResponse_1_2(
413 const ::android::hardware::hidl_vec<Call>& calls); 420 const RadioResponseInfo& info,
421 const ::android::hardware::hidl_vec<::android::hardware::radio::V1_2::Call>& calls);
414 422
415 Return<void> getSignalStrengthResponse_1_2(const RadioResponseInfo& info, 423 Return<void> getSignalStrengthResponse_1_2(
416 const SignalStrength& sig_strength); 424 const RadioResponseInfo& info,
425 const ::android::hardware::radio::V1_2::SignalStrength& sig_strength);
417 426
418 Return<void> getCellInfoListResponse_1_2( 427 Return<void> getCellInfoListResponse_1_2(
419 const RadioResponseInfo& info, const ::android::hardware::hidl_vec<CellInfo>& cellInfo); 428 const RadioResponseInfo& info,
429 const ::android::hardware::hidl_vec<::android::hardware::radio::V1_2::CellInfo>& cellInfo);
420 430
421 Return<void> getVoiceRegistrationStateResponse_1_2( 431 Return<void> getVoiceRegistrationStateResponse_1_2(
422 const RadioResponseInfo& info, const V1_2::VoiceRegStateResult& voiceRegResponse); 432 const RadioResponseInfo& info,
433 const ::android::hardware::radio::V1_2::VoiceRegStateResult& voiceRegResponse);
423 434
424 Return<void> getDataRegistrationStateResponse_1_2( 435 Return<void> getDataRegistrationStateResponse_1_2(
425 const RadioResponseInfo& info, const V1_2::DataRegStateResult& dataRegResponse); 436 const RadioResponseInfo& info,
437 const ::android::hardware::radio::V1_2::DataRegStateResult& dataRegResponse);
426}; 438};
427 439
428/* Callback class for radio indication */ 440/* Callback class for radio indication */
429class RadioIndication_v1_2 : public V1_1::IRadioIndication { 441class RadioIndication_v1_2 : public ::android::hardware::radio::V1_2::IRadioIndication {
430 protected: 442 protected:
431 RadioHidlTest_v1_2& parent_v1_2; 443 RadioHidlTest_v1_2& parent_v1_2;
432 444
@@ -435,26 +447,33 @@ class RadioIndication_v1_2 : public V1_1::IRadioIndication {
435 virtual ~RadioIndication_v1_2() = default; 447 virtual ~RadioIndication_v1_2() = default;
436 448
437 /* 1.2 Api */ 449 /* 1.2 Api */
438 Return<void> networkScanResult_1_2(RadioIndicationType type, 450 Return<void> networkScanResult_1_2(
439 const V1_2::NetworkScanResult& result); 451 RadioIndicationType type,
452 const ::android::hardware::radio::V1_2::NetworkScanResult& result);
440 453
441 Return<void> cellInfoList_1_2(RadioIndicationType type, 454 Return<void> cellInfoList_1_2(
442 const ::android::hardware::hidl_vec<V1_2::CellInfo>& records); 455 RadioIndicationType type,
456 const ::android::hardware::hidl_vec<::android::hardware::radio::V1_2::CellInfo>& records);
443 457
444 Return<void> currentLinkCapacityEstimate(RadioIndicationType type, 458 Return<void> currentLinkCapacityEstimate(
445 const V1_2::LinkCapacityEstimate& lce); 459 RadioIndicationType type,
460 const ::android::hardware::radio::V1_2::LinkCapacityEstimate& lce);
446 461
447 Return<void> currentPhysicalChannelConfigs( 462 Return<void> currentPhysicalChannelConfigs(
448 RadioIndicationType type, 463 RadioIndicationType type,
449 const ::android::hardware::hidl_vec<V1_2::PhysicalChannelConfig>& configs); 464 const ::android::hardware::hidl_vec<
465 ::android::hardware::radio::V1_2::PhysicalChannelConfig>& configs);
450 466
451 Return<void> currentSignalStrength_1_2(RadioIndicationType type, 467 Return<void> currentSignalStrength_1_2(
452 const V1_2::SignalStrength& signalStrength); 468 RadioIndicationType type,
469 const ::android::hardware::radio::V1_2::SignalStrength& signalStrength);
453 470
454 /* 1.1 Api */ 471 /* 1.1 Api */
455 Return<void> carrierInfoForImsiEncryption(RadioIndicationType info); 472 Return<void> carrierInfoForImsiEncryption(RadioIndicationType info);
456 473
457 Return<void> networkScanResult(RadioIndicationType type, const NetworkScanResult& result); 474 Return<void> networkScanResult(
475 RadioIndicationType type,
476 const ::android::hardware::radio::V1_1::NetworkScanResult& result);
458 477
459 Return<void> keepaliveStatus(RadioIndicationType type, const KeepaliveStatus& status); 478 Return<void> keepaliveStatus(RadioIndicationType type, const KeepaliveStatus& status);
460 479
@@ -480,8 +499,9 @@ class RadioIndication_v1_2 : public V1_1::IRadioIndication {
480 const ::android::hardware::hidl_string& nitzTime, 499 const ::android::hardware::hidl_string& nitzTime,
481 uint64_t receivedTime); 500 uint64_t receivedTime);
482 501
483 Return<void> currentSignalStrength(RadioIndicationType type, 502 Return<void> currentSignalStrength(
484 const SignalStrength& signalStrength); 503 RadioIndicationType type,
504 const ::android::hardware::radio::V1_0::SignalStrength& signalStrength);
485 505
486 Return<void> dataCallListChanged( 506 Return<void> dataCallListChanged(
487 RadioIndicationType type, const ::android::hardware::hidl_vec<SetupDataCallResult>& dcList); 507 RadioIndicationType type, const ::android::hardware::hidl_vec<SetupDataCallResult>& dcList);
@@ -539,8 +559,9 @@ class RadioIndication_v1_2 : public V1_1::IRadioIndication {
539 559
540 Return<void> voiceRadioTechChanged(RadioIndicationType type, RadioTechnology rat); 560 Return<void> voiceRadioTechChanged(RadioIndicationType type, RadioTechnology rat);
541 561
542 Return<void> cellInfoList(RadioIndicationType type, 562 Return<void> cellInfoList(
543 const ::android::hardware::hidl_vec<CellInfo>& records); 563 RadioIndicationType type,
564 const ::android::hardware::hidl_vec<::android::hardware::radio::V1_0::CellInfo>& records);
544 565
545 Return<void> imsNetworkStateChanged(RadioIndicationType type); 566 Return<void> imsNetworkStateChanged(RadioIndicationType type);
546 567
@@ -575,7 +596,9 @@ class RadioHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase {
575 static RadioHidlEnvironment* instance = new RadioHidlEnvironment; 596 static RadioHidlEnvironment* instance = new RadioHidlEnvironment;
576 return instance; 597 return instance;
577 } 598 }
578 virtual void registerTestServices() override { registerTestService<V1_2::IRadio>(); } 599 virtual void registerTestServices() override {
600 registerTestService<::android::hardware::radio::V1_2::IRadio>();
601 }
579 602
580 private: 603 private:
581 RadioHidlEnvironment() {} 604 RadioHidlEnvironment() {}
@@ -598,11 +621,11 @@ class RadioHidlTest_v1_2 : public ::testing::VtsHalHidlTargetTestBase {
598 std::cv_status wait(); 621 std::cv_status wait();
599 622
600 /* radio service handle */ 623 /* radio service handle */
601 sp<V1_2::IRadio> radio_v1_2; 624 sp<::android::hardware::radio::V1_2::IRadio> radio_v1_2;
602 625
603 /* radio response handle */ 626 /* radio response handle */
604 sp<RadioResponse_v1_2> radioRsp_v1_2; 627 sp<RadioResponse_v1_2> radioRsp_v1_2;
605 628
606 /* radio indication handle */ 629 /* radio indication handle */
607 sp<RadioIndication_v1_2> radioInd_v1_2; 630 sp<RadioIndication_v1_2> radioInd_v1_2;
608}; 631}; \ No newline at end of file
diff --git a/radio/1.2/vts/functional/radio_indication.cpp b/radio/1.2/vts/functional/radio_indication.cpp
index 57f5cb06..eba9dc07 100644
--- a/radio/1.2/vts/functional/radio_indication.cpp
+++ b/radio/1.2/vts/functional/radio_indication.cpp
@@ -20,29 +20,33 @@ RadioIndication_v1_2::RadioIndication_v1_2(RadioHidlTest_v1_2& parent) : parent_
20 20
21/* 1.2 Apis */ 21/* 1.2 Apis */
22Return<void> RadioIndication_v1_2::networkScanResult_1_2( 22Return<void> RadioIndication_v1_2::networkScanResult_1_2(
23 RadioIndicationType /*type*/, const V1_2::NetworkScanResult& /*result*/) { 23 RadioIndicationType /*type*/,
24 const ::android::hardware::radio::V1_2::NetworkScanResult& /*result*/) {
24 return Void(); 25 return Void();
25} 26}
26 27
27Return<void> RadioIndication_v1_2::cellInfoList_1_2( 28Return<void> RadioIndication_v1_2::cellInfoList_1_2(
28 RadioIndicationType /*type*/, 29 RadioIndicationType /*type*/,
29 const ::android::hardware::hidl_vec<V1_2::CellInfo>& /*records*/) { 30 const ::android::hardware::hidl_vec<::android::hardware::radio::V1_2::CellInfo>& /*records*/) {
30 return Void(); 31 return Void();
31} 32}
32 33
33Return<void> RadioIndication_v1_2::currentLinkCapacityEstimate( 34Return<void> RadioIndication_v1_2::currentLinkCapacityEstimate(
34 RadioIndicationType /*type*/, const V1_2::LinkCapacityEstimate& /*lce*/) { 35 RadioIndicationType /*type*/,
36 const ::android::hardware::radio::V1_2::LinkCapacityEstimate& /*lce*/) {
35 return Void(); 37 return Void();
36} 38}
37 39
38Return<void> RadioIndication_v1_2::currentPhysicalChannelConfigs( 40Return<void> RadioIndication_v1_2::currentPhysicalChannelConfigs(
39 RadioIndicationType /*type*/, 41 RadioIndicationType /*type*/,
40 const ::android::hardware::hidl_vec<V1_2::PhysicalChannelConfig>& /*configs*/) { 42 const ::android::hardware::hidl_vec<
43 ::android::hardware::radio::V1_2::PhysicalChannelConfig>& /*configs*/) {
41 return Void(); 44 return Void();
42} 45}
43 46
44Return<void> RadioIndication_v1_2::currentSignalStrength_1_2( 47Return<void> RadioIndication_v1_2::currentSignalStrength_1_2(
45 RadioIndicationType /*type*/, const V1_2::SignalStrength& /*signalStrength*/) { 48 RadioIndicationType /*type*/,
49 const ::android::hardware::radio::V1_2::SignalStrength& /*signalStrength*/) {
46 return Void(); 50 return Void();
47} 51}
48 52
@@ -51,8 +55,9 @@ Return<void> RadioIndication_v1_2::carrierInfoForImsiEncryption(RadioIndicationT
51 return Void(); 55 return Void();
52} 56}
53 57
54Return<void> RadioIndication_v1_2::networkScanResult(RadioIndicationType /*type*/, 58Return<void> RadioIndication_v1_2::networkScanResult(
55 const NetworkScanResult& /*result*/) { 59 RadioIndicationType /*type*/,
60 const ::android::hardware::radio::V1_1::NetworkScanResult& /*result*/) {
56 return Void(); 61 return Void();
57} 62}
58 63
@@ -101,8 +106,9 @@ Return<void> RadioIndication_v1_2::nitzTimeReceived(
101 return Void(); 106 return Void();
102} 107}
103 108
104Return<void> RadioIndication_v1_2::currentSignalStrength(RadioIndicationType /*type*/, 109Return<void> RadioIndication_v1_2::currentSignalStrength(
105 const SignalStrength& /*signalStrength*/) { 110 RadioIndicationType /*type*/,
111 const ::android::hardware::radio::V1_0::SignalStrength& /*signalStrength*/) {
106 return Void(); 112 return Void();
107} 113}
108 114
@@ -224,7 +230,8 @@ Return<void> RadioIndication_v1_2::voiceRadioTechChanged(RadioIndicationType /*t
224} 230}
225 231
226Return<void> RadioIndication_v1_2::cellInfoList( 232Return<void> RadioIndication_v1_2::cellInfoList(
227 RadioIndicationType /*type*/, const ::android::hardware::hidl_vec<CellInfo>& /*records*/) { 233 RadioIndicationType /*type*/,
234 const ::android::hardware::hidl_vec<::android::hardware::radio::V1_0::CellInfo>& /*records*/) {
228 return Void(); 235 return Void();
229} 236}
230 237
@@ -276,4 +283,4 @@ Return<void> RadioIndication_v1_2::pcoData(RadioIndicationType /*type*/,
276Return<void> RadioIndication_v1_2::modemReset(RadioIndicationType /*type*/, 283Return<void> RadioIndication_v1_2::modemReset(RadioIndicationType /*type*/,
277 const ::android::hardware::hidl_string& /*reason*/) { 284 const ::android::hardware::hidl_string& /*reason*/) {
278 return Void(); 285 return Void();
279} 286} \ No newline at end of file
diff --git a/radio/1.2/vts/functional/radio_response.cpp b/radio/1.2/vts/functional/radio_response.cpp
index 9195689c..2977ed68 100644
--- a/radio/1.2/vts/functional/radio_response.cpp
+++ b/radio/1.2/vts/functional/radio_response.cpp
@@ -16,13 +16,14 @@
16 16
17#include <radio_hidl_hal_utils_v1_2.h> 17#include <radio_hidl_hal_utils_v1_2.h>
18 18
19CardStatus cardStatus; 19::android::hardware::radio::V1_2::CardStatus cardStatus;
20 20
21RadioResponse_v1_2::RadioResponse_v1_2(RadioHidlTest_v1_2& parent) : parent_v1_2(parent) {} 21RadioResponse_v1_2::RadioResponse_v1_2(RadioHidlTest_v1_2& parent) : parent_v1_2(parent) {}
22 22
23/* 1.0 Apis */ 23/* 1.0 Apis */
24Return<void> RadioResponse_v1_2::getIccCardStatusResponse(const RadioResponseInfo& /*info*/, 24Return<void> RadioResponse_v1_2::getIccCardStatusResponse(
25 const CardStatus& /*card_status*/) { 25 const RadioResponseInfo& /*info*/,
26 const ::android::hardware::radio::V1_0::CardStatus& /*card_status*/) {
26 return Void(); 27 return Void();
27} 28}
28 29
@@ -62,7 +63,8 @@ Return<void> RadioResponse_v1_2::supplyNetworkDepersonalizationResponse(
62} 63}
63 64
64Return<void> RadioResponse_v1_2::getCurrentCallsResponse( 65Return<void> RadioResponse_v1_2::getCurrentCallsResponse(
65 const RadioResponseInfo& /*info*/, const ::android::hardware::hidl_vec<Call>& /*calls*/) { 66 const RadioResponseInfo& /*info*/,
67 const ::android::hardware::hidl_vec<::android::hardware::radio::V1_0::Call>& /*calls*/) {
66 return Void(); 68 return Void();
67} 69}
68 70
@@ -107,18 +109,21 @@ Return<void> RadioResponse_v1_2::getLastCallFailCauseResponse(
107 return Void(); 109 return Void();
108} 110}
109 111
110Return<void> RadioResponse_v1_2::getSignalStrengthResponse(const RadioResponseInfo& /*info*/, 112Return<void> RadioResponse_v1_2::getSignalStrengthResponse(
111 const SignalStrength& /*sig_strength*/) { 113 const RadioResponseInfo& /*info*/,
114 const ::android::hardware::radio::V1_0::SignalStrength& /*sig_strength*/) {
112 return Void(); 115 return Void();
113} 116}
114 117
115Return<void> RadioResponse_v1_2::getVoiceRegistrationStateResponse( 118Return<void> RadioResponse_v1_2::getVoiceRegistrationStateResponse(
116 const RadioResponseInfo& /*info*/, const VoiceRegStateResult& /*voiceRegResponse*/) { 119 const RadioResponseInfo& /*info*/,
120 const ::android::hardware::radio::V1_0::VoiceRegStateResult& /*voiceRegResponse*/) {
117 return Void(); 121 return Void();
118} 122}
119 123
120Return<void> RadioResponse_v1_2::getDataRegistrationStateResponse( 124Return<void> RadioResponse_v1_2::getDataRegistrationStateResponse(
121 const RadioResponseInfo& /*info*/, const DataRegStateResult& /*dataRegResponse*/) { 125 const RadioResponseInfo& /*info*/,
126 const ::android::hardware::radio::V1_0::DataRegStateResult& /*dataRegResponse*/) {
122 return Void(); 127 return Void();
123} 128}
124 129
@@ -515,7 +520,7 @@ Return<void> RadioResponse_v1_2::getVoiceRadioTechnologyResponse(const RadioResp
515 520
516Return<void> RadioResponse_v1_2::getCellInfoListResponse( 521Return<void> RadioResponse_v1_2::getCellInfoListResponse(
517 const RadioResponseInfo& /*info*/, 522 const RadioResponseInfo& /*info*/,
518 const ::android::hardware::hidl_vec<CellInfo>& /*cellInfo*/) { 523 const ::android::hardware::hidl_vec<::android::hardware::radio::V1_0::CellInfo>& /*cellInfo*/) {
519 return Void(); 524 return Void();
520} 525}
521 526
@@ -704,8 +709,9 @@ Return<void> RadioResponse_v1_2::setLinkCapacityReportingCriteriaResponse(
704 return Void(); 709 return Void();
705} 710}
706 711
707Return<void> RadioResponse_v1_2::getIccCardStatusResponse_1_2(const RadioResponseInfo& info, 712Return<void> RadioResponse_v1_2::getIccCardStatusResponse_1_2(
708 const CardStatus& card_status) { 713 const RadioResponseInfo& info,
714 const ::android::hardware::radio::V1_2::CardStatus& card_status) {
709 rspInfo = info; 715 rspInfo = info;
710 cardStatus = card_status; 716 cardStatus = card_status;
711 parent_v1_2.notify(); 717 parent_v1_2.notify();
@@ -713,32 +719,37 @@ Return<void> RadioResponse_v1_2::getIccCardStatusResponse_1_2(const RadioRespons
713} 719}
714 720
715Return<void> RadioResponse_v1_2::getCurrentCallsResponse_1_2( 721Return<void> RadioResponse_v1_2::getCurrentCallsResponse_1_2(
716 const RadioResponseInfo& info, const ::android::hardware::hidl_vec<Call>& /*calls*/) { 722 const RadioResponseInfo& info,
723 const ::android::hardware::hidl_vec<::android::hardware::radio::V1_2::Call>& /*calls*/) {
717 rspInfo = info; 724 rspInfo = info;
718 parent_v1_2.notify(); 725 parent_v1_2.notify();
719 return Void(); 726 return Void();
720} 727}
721 728
722Return<void> RadioResponse_v1_2::getSignalStrengthResponse_1_2( 729Return<void> RadioResponse_v1_2::getSignalStrengthResponse_1_2(
723 const RadioResponseInfo& info, const SignalStrength& /*sig_strength*/) { 730 const RadioResponseInfo& info,
731 const ::android::hardware::radio::V1_2::SignalStrength& /*sig_strength*/) {
724 rspInfo = info; 732 rspInfo = info;
725 parent_v1_2.notify(); 733 parent_v1_2.notify();
726 return Void(); 734 return Void();
727} 735}
728 736
729Return<void> RadioResponse_v1_2::getCellInfoListResponse_1_2( 737Return<void> RadioResponse_v1_2::getCellInfoListResponse_1_2(
730 const RadioResponseInfo& info, const ::android::hardware::hidl_vec<CellInfo>& /*cellInfo*/) { 738 const RadioResponseInfo& info,
739 const ::android::hardware::hidl_vec<::android::hardware::radio::V1_2::CellInfo>& /*cellInfo*/) {
731 rspInfo = info; 740 rspInfo = info;
732 parent_v1_2.notify(); 741 parent_v1_2.notify();
733 return Void(); 742 return Void();
734} 743}
735 744
736Return<void> RadioResponse_v1_2::getVoiceRegistrationStateResponse_1_2( 745Return<void> RadioResponse_v1_2::getVoiceRegistrationStateResponse_1_2(
737 const RadioResponseInfo& /*info*/, const V1_2::VoiceRegStateResult& /*voiceRegResponse*/) { 746 const RadioResponseInfo& /*info*/,
747 const ::android::hardware::radio::V1_2::VoiceRegStateResult& /*voiceRegResponse*/) {
738 return Void(); 748 return Void();
739} 749}
740 750
741Return<void> RadioResponse_v1_2::getDataRegistrationStateResponse_1_2( 751Return<void> RadioResponse_v1_2::getDataRegistrationStateResponse_1_2(
742 const RadioResponseInfo& /*info*/, const V1_2::DataRegStateResult& /*dataRegResponse*/) { 752 const RadioResponseInfo& /*info*/,
753 const ::android::hardware::radio::V1_2::DataRegStateResult& /*dataRegResponse*/) {
743 return Void(); 754 return Void();
744} 755} \ No newline at end of file