summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichael Butler2018-04-27 18:19:53 -0500
committerandroid-build-merger2018-04-27 18:19:53 -0500
commit9351f1bc67b99560889454b1a4a3070e456ff23a (patch)
tree99da9cbf512138b82bd95020a255fbd87c4948a3 /neuralnetworks/1.0
parente2b73c75d5539df0fe63c4ffb044026eb17517e1 (diff)
parent89802f742382863ea26e7761bf773b0d4824608b (diff)
downloadplatform-hardware-interfaces-9351f1bc67b99560889454b1a4a3070e456ff23a.tar.gz
platform-hardware-interfaces-9351f1bc67b99560889454b1a4a3070e456ff23a.tar.xz
platform-hardware-interfaces-9351f1bc67b99560889454b1a4a3070e456ff23a.zip
Merge "NNAPI: sync NDK and HAL documentation" into pi-dev
am: 89802f7423 Change-Id: I3c4734c05f627d727ff8de46b1b76656438acdd1
Diffstat (limited to 'neuralnetworks/1.0')
-rw-r--r--neuralnetworks/1.0/types.hal965
1 files changed, 564 insertions, 401 deletions
diff --git a/neuralnetworks/1.0/types.hal b/neuralnetworks/1.0/types.hal
index 802f6cb3..4efa13ad 100644
--- a/neuralnetworks/1.0/types.hal
+++ b/neuralnetworks/1.0/types.hal
@@ -42,7 +42,8 @@ enum OperandType : int32_t {
42 TENSOR_FLOAT32 = 3, 42 TENSOR_FLOAT32 = 3,
43 /** A tensor of 32 bit integer values. */ 43 /** A tensor of 32 bit integer values. */
44 TENSOR_INT32 = 4, 44 TENSOR_INT32 = 4,
45 /** A tensor of 8 bit integers that represent real numbers. 45 /**
46 * A tensor of 8 bit integers that represent real numbers.
46 * 47 *
47 * Attached to this tensor are two numbers that can be used to convert the 48 * Attached to this tensor are two numbers that can be used to convert the
48 * 8 bit integer to the real value and vice versa. These two numbers are: 49 * 8 bit integer to the real value and vice versa. These two numbers are:
@@ -70,15 +71,17 @@ enum OperationType : int32_t {
70 /** 71 /**
71 * Adds two tensors, element-wise. 72 * Adds two tensors, element-wise.
72 * 73 *
73 * Takes two input tensors of identical type and compatible dimensions. The output 74 * Takes two input tensors of identical {@link OperandType} and compatible
74 * is the sum of both input tensors, optionally modified by an activation function. 75 * dimensions. The output is the sum of both input tensors, optionally
76 * modified by an activation function.
75 * 77 *
76 * Two dimensions are compatible when: 78 * Two dimensions are compatible when:
77 * 1. they are equal, or 79 * 1. they are equal, or
78 * 2. one of them is 1 80 * 2. one of them is 1
79 * 81 *
80 * The size of the output is the maximum size along each dimension of the input operands. 82 * The size of the output is the maximum size along each dimension of the
81 * It starts with the trailing dimensions, and works its way forward. 83 * input operands. It starts with the trailing dimensions, and works its
84 * way forward.
82 * 85 *
83 * Example: 86 * Example:
84 * 87 *
@@ -86,7 +89,7 @@ enum OperationType : int32_t {
86 * input2.dimension = {5, 4, 3, 1} 89 * input2.dimension = {5, 4, 3, 1}
87 * output.dimension = {5, 4, 3, 2} 90 * output.dimension = {5, 4, 3, 2}
88 * 91 *
89 * Supported tensor types: 92 * Supported tensor {@link OperandType}:
90 * * {@link OperandType::TENSOR_FLOAT32} 93 * * {@link OperandType::TENSOR_FLOAT32}
91 * * {@link OperandType::TENSOR_QUANT8_ASYMM} 94 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
92 * 95 *
@@ -94,98 +97,119 @@ enum OperationType : int32_t {
94 * 97 *
95 * Inputs: 98 * Inputs:
96 * * 0: A tensor. 99 * * 0: A tensor.
97 * * 1: A tensor of the same type, and compatible dimensions as input0. 100 * * 1: A tensor of the same {@link OperandType}, and compatible dimensions
98 * * 2: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. 101 * as input0.
99 * Specifies the activation to invoke on the result of each addition. 102 * * 2: An {@link OperandType::INT32} scalar, and has to be one of the
103 * {@link FusedActivationFunc} values. Specifies the activation to
104 * invoke on the result.
100 * 105 *
101 * Outputs: 106 * Outputs:
102 * * 0: The sum, a tensor of the same type as input0. 107 * * 0: The sum, a tensor of the same {@link OperandType} as input0.
103 */ 108 */
104 ADD = 0, 109 ADD = 0,
105 110
106 /** 111 /**
107 * Performs a 2-D average pooling operation. 112 * Performs a 2-D average pooling operation.
108 * 113 *
109 * The output dimensions are functions of the filter dimensions, stride, and padding. 114 * The output dimensions are functions of the filter dimensions, stride, and
115 * padding.
110 * 116 *
111 * The values in the output tensor are computed as: 117 * The values in the output tensor are computed as:
112 * 118 *
113 * output[batch, row, col, channel] = 119 * output[batch, row, col, channel] =
114 * sum_{i, j}(input[batch, row + i, col + j, channel]) / sum(1) 120 * sum_{i, j}(input[batch, row + i, col + j, channel]) / sum(1)
115 * 121 *
116 * Supported tensor types: 122 * Supported tensor {@link OperandType}:
117 * * {@link OperandType::TENSOR_FLOAT32} 123 * * {@link OperandType::TENSOR_FLOAT32}
118 * * {@link OperandType::TENSOR_QUANT8_ASYMM} 124 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
119 * 125 *
120 * Supported tensor rank: 4, with "NHWC" (i.e., Num_samples, Height, Width, and Channels) 126 * Supported tensor rank: 4, with "NHWC" (i.e., Num_samples, Height, Width,
121 * data layout. 127 * and Channels) data layout.
122 * 128 *
123 * Both explicit padding and implicit padding are supported. 129 * Both explicit padding and implicit padding are supported.
124 * 130 *
125 * Inputs (explicit padding): 131 * Inputs (explicit padding):
126 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input. 132 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
127 * * 1: An INT32 value, specifying the padding on the left, in the ‘width’ dimension. 133 * the input.
128 * * 2: An INT32 value, specifying the padding on the right,in the ‘width’ dimension. 134 * * 1: An {@link OperandType::INT32} scalar, specifying the padding on
129 * * 3: An INT32 value, specifying the padding on the top, in the ‘height’ dimension. 135 * the left, in the ‘width’ dimension.
130 * * 4: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension. 136 * * 2: An {@link OperandType::INT32} scalar, specifying the padding on
131 * * 5: An INT32 value, specifying the stride when walking through input 137 * the right, in the ‘width’ dimension.
132 * in the ‘width’ dimension. 138 * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
133 * * 6: An INT32 value, specifying the stride when walking through input 139 * the top, in the ‘height’ dimension.
134 * in the ‘height’ dimension. 140 * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
135 * * 7: An INT32 value, specifying the filter width. 141 * the bottom, in the ‘height’ dimension.
136 * * 8: An INT32 value, specifying the filter height. 142 * * 5: An {@link OperandType::INT32} scalar, specifying the stride when
137 * * 9: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. 143 * walking through input in the ‘width’ dimension.
138 * Specifies the activation to invoke on the result of each addition. 144 * * 6: An {@link OperandType::INT32} scalar, specifying the stride when
145 * walking through input in the ‘height’ dimension.
146 * * 7: An {@link OperandType::INT32} scalar, specifying the filter
147 * width.
148 * * 8: An {@link OperandType::INT32} scalar, specifying the filter
149 * height.
150 * * 9: An {@link OperandType::INT32} scalar, and has to be one of the
151 * {@link FusedActivationFunc} values. Specifies the activation to
152 * invoke on the result.
139 * 153 *
140 * Inputs (implicit padding): 154 * Inputs (implicit padding):
141 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input. 155 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
142 * * 1: An INT32 value, specifying the implicit padding scheme, has to be one of the 156 * the input.
157 * * 1: An {@link OperandType::INT32} scalar, specifying the implicit
158 * padding scheme, has to be one of the
143 * following values: {0 (NONE), 1 (SAME), 2 (VALID)}. 159 * following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
144 * * 2: An INT32 value, specifying the stride when walking through input 160 * * 2: An {@link OperandType::INT32} scalar, specifying the stride when
145 * in the ‘width’ dimension. 161 * walking through input in the ‘width’ dimension.
146 * * 3: An INT32 value, specifying the stride when walking through input 162 * * 3: An {@link OperandType::INT32} scalar, specifying the stride when
147 * in the ‘height’ dimension. 163 * walking through input in the ‘height’ dimension.
148 * * 4: An INT32 value, specifying the filter width. 164 * * 4: An {@link OperandType::INT32} scalar, specifying the filter
149 * * 5: An INT32 value, specifying the filter height. 165 * width.
150 * * 6: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. 166 * * 5: An {@link OperandType::INT32} scalar, specifying the filter
151 * Specifies the activation to invoke on the result of each addition. 167 * height.
168 * * 6: An {@link OperandType::INT32} scalar, and has to be one of the
169 * {@link FusedActivationFunc} values. Specifies the activation to
170 * invoke on the result.
152 * 171 *
153 * Outputs: 172 * Outputs:
154 * * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth]. 173 * * 0: The output 4-D tensor, of shape
174 [batches, out_height, out_width, depth].
155 */ 175 */
156 AVERAGE_POOL_2D = 1, 176 AVERAGE_POOL_2D = 1,
157 177
158 /** 178 /**
159 * Concatenates the input tensors along the given dimension. 179 * Concatenates the input tensors along the given dimension.
160 * 180 *
161 * The input tensors must have identical type and the same dimensions except the 181 * The input tensors must have identical {@link OperandType} and the same
162 * dimension along the concatenation axis. 182 * dimensions except the dimension along the concatenation axis.
163 * 183 *
164 * Supported tensor types: 184 * Supported tensor {@link OperandType}:
165 * * {@link OperandType::TENSOR_FLOAT32} 185 * * {@link OperandType::TENSOR_FLOAT32}
166 * * {@link OperandType::TENSOR_QUANT8_ASYMM} 186 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
167 * 187 *
168 * Supported tensor rank: up to 4 188 * Supported tensor rank: up to 4
169 * 189 *
170 * Inputs: 190 * Inputs:
171 * * 0 ~ n-1: The list of n input tensors, of shape [D0, D1, ..., Daxis(i), ..., Dm]. 191 * * 0 ~ n-1: The list of n input tensors, of shape
172 * For inputs of {@link OperandType::TENSOR_QUANT8_ASYMM} type, all 192 * [D0, D1, ..., Daxis(i), ..., Dm]. For inputs of
173 * input tensors must have the same scale and zeroPoint. 193 * {@link OperandType::TENSOR_QUANT8_ASYMM}, all input tensors
174 * * n: An INT32 value, specifying the concatenation axis. 194 * must have the same scale and zeroPoint.
195 * * n: An {@link OperandType::INT32} scalar, specifying the
196 * concatenation axis.
175 * 197 *
176 * Outputs: 198 * Outputs:
177 * * 0: The output, a tensor of the same type as the input tensors. 199 * * 0: The output, a tensor of the same {@link OperandType} as the input
178 * The output shape is [D0, D1, ..., sum(Daxis(i)), ..., Dm]. 200 * tensors. The output shape is [D0, D1, ..., sum(Daxis(i)), ..., Dm].
179 */ 201 */
180 CONCATENATION = 2, 202 CONCATENATION = 2,
181 203
182 /** 204 /**
183 * Performs an 2-D convolution operation. 205 * Performs an 2-D convolution operation.
184 * 206 *
185 * The CONV_2D op sweeps a 2-D filter that can mix channels together over a batch of 207 * The CONV_2D op sweeps a 2-D filter that can mix channels together over a
186 * images, applying the filter to each window of each image of the appropriate size. 208 * batch of images, applying the filter to each window of each image of the
209 * appropriate size.
187 * 210 *
188 * The output dimensions are functions of the filter dimensions, stride, and padding. 211 * The output dimensions are functions of the filter dimensions, stride, and
212 * padding.
189 * 213 *
190 * The values in the output tensor are computed as: 214 * The values in the output tensor are computed as:
191 * 215 *
@@ -196,7 +220,7 @@ enum OperationType : int32_t {
196 * bias[channel] 220 * bias[channel]
197 * ) 221 * )
198 * 222 *
199 * Supported tensor types: 223 * Supported tensor {@link OperandType}:
200 * * {@link OperandType::TENSOR_FLOAT32} 224 * * {@link OperandType::TENSOR_FLOAT32}
201 * * {@link OperandType::TENSOR_QUANT8_ASYMM} 225 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
202 * 226 *
@@ -205,63 +229,77 @@ enum OperationType : int32_t {
205 * Both explicit padding and implicit padding are supported. 229 * Both explicit padding and implicit padding are supported.
206 * 230 *
207 * Inputs (explicit padding): 231 * Inputs (explicit padding):
208 * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input. 232 * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
209 * * 1: A 4-D tensor, of shape [depth_out, filter_height, filter_width, depth_in], 233 * specifying the input.
210 * specifying the filter. 234 * * 1: A 4-D tensor, of shape
235 * [depth_out, filter_height, filter_width, depth_in], specifying the
236 * filter.
211 * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. 237 * * 2: A 1-D tensor, of shape [depth_out], specifying the bias.
212 * For input tensor of {@link OperandType::TENSOR_FLOAT32} type, the bias should 238 * For input tensor of {@link OperandType::TENSOR_FLOAT32}, the bias
213 * also be of {@link OperandType::TENSOR_FLOAT32}. 239 * should also be of {@link OperandType::TENSOR_FLOAT32}. For input
214 * For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the bias 240 * tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}, the bias
215 * should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and 241 * should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of
216 * bias_scale == input_scale * filter_scale. 242 * 0 and bias_scale == input_scale * filter_scale.
217 * * 3: An INT32 value, specifying the padding on the left, in the ‘width’ dimension. 243 * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
218 * * 4: An INT32 value, specifying the padding on the right,in the ‘width’ dimension. 244 * the left, in the ‘width’ dimension.
219 * * 5: An INT32 value, specifying the padding on the top, in the ‘height’ dimension. 245 * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
220 * * 6: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension. 246 * the right, in the ‘width’ dimension.
221 * * 7: An INT32 value, specifying the stride when walking through input 247 * * 5: An {@link OperandType::INT32} scalar, specifying the padding on
222 * in the ‘width’ dimension. 248 * the top, in the ‘height’ dimension.
223 * * 8: An INT32 value, specifying the stride when walking through input 249 * * 6: An {@link OperandType::INT32} scalar, specifying the padding on
224 * in the ‘height’ dimension. 250 * the bottom, in the ‘height’ dimension.
225 * * 9: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. 251 * * 7: An {@link OperandType::INT32} scalar, specifying the stride when
226 * Specifies the activation to invoke on the result of each addition. 252 * walking through input in the ‘width’ dimension.
253 * * 8: An {@link OperandType::INT32} scalar, specifying the stride when
254 * walking through input in the ‘height’ dimension.
255 * * 9: An {@link OperandType::INT32} scalar, and has to be one of the
256 * {@link FusedActivationFunc} values. Specifies the activation to
257 * invoke on the result.
227 * 258 *
228 * Inputs (implicit padding): 259 * Inputs (implicit padding):
229 * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input. 260 * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
230 * * 1: A 4-D tensor, of shape [depth_out, filter_height, filter_width, depth_in], 261 * specifying the input.
231 * specifying the filter. 262 * * 1: A 4-D tensor, of shape
232 * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. 263 * [depth_out, filter_height, filter_width, depth_in], specifying the
233 * For input tensor of {@link OperandType::TENSOR_FLOAT32} type, the bias should 264 * filter.
234 * also be of {@link OperandType::TENSOR_FLOAT32}. 265 * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
235 * For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the bias 266 * tensor of {@link OperandType::TENSOR_FLOAT32}, the bias should
236 * should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and 267 * also be of {@link OperandType::TENSOR_FLOAT32}. For input tensor
268 * of {@link OperandType::TENSOR_QUANT8_ASYMM}, the bias should be
269 * of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and
237 * bias_scale == input_scale * filter_scale. 270 * bias_scale == input_scale * filter_scale.
238 * * 3: An INT32 value, specifying the implicit padding scheme, has to be one of the 271 * * 3: An {@link OperandType::INT32} scalar, specifying the implicit
272 * padding scheme, has to be one of the
239 * following values: {0 (NONE), 1 (SAME), 2 (VALID)}. 273 * following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
240 * * 4: An INT32 value, specifying the stride when walking through input 274 * * 4: An {@link OperandType::INT32} scalar, specifying the stride when
241 * in the ‘width’ dimension. 275 * walking through input in the ‘width’ dimension.
242 * * 5: An INT32 value, specifying the stride when walking through input 276 * * 5: An {@link OperandType::INT32} scalar, specifying the stride when
243 * in the ‘height’ dimension. 277 * walking through input in the ‘height’ dimension.
244 * * 6: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. 278 * * 6: An {@link OperandType::INT32} scalar, and has to be one of the
245 * Specifies the activation to invoke on the result of each addition. 279 * {@link FusedActivationFunc} values. Specifies the activation to
280 * invoke on the result.
246 * 281 *
247 * Outputs: 282 * Outputs:
248 * * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth_out]. 283 * * 0: The output 4-D tensor, of shape
249 * For output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the following 284 * [batches, out_height, out_width, depth_out]. For output tensor of
250 * condition must be satisfied: output_scale > input_scale * filter_scale. 285 * {@link OperandType::TENSOR_QUANT8_ASYMM}, the following condition
286 * must be satisfied: output_scale > input_scale * filter_scale.
251 */ 287 */
252 CONV_2D = 3, 288 CONV_2D = 3,
253 289
254 /** 290 /**
255 * Performs a depthwise 2-D convolution operation. 291 * Performs a depthwise 2-D convolution operation.
256 * 292 *
257 * Given an input tensor of shape [batches, height, width, depth_in] and a filter 293 * Given an input tensor of shape [batches, height, width, depth_in] and a
258 * tensor of shape [1, filter_height, filter_width, depth_out] containing 294 * filter tensor of shape [1, filter_height, filter_width, depth_out]
259 * depth_out convolutional filters of depth 1, DEPTHWISE_CONV applies a different 295 * containing depth_out convolutional filters of depth 1, DEPTHWISE_CONV
260 * filter to each input channel (expanding from 1 channel to channel_multiplier channels 296 * applies a different filter to each input channel (expanding from 1
261 * for each), then concatenates the results together. 297 * channel to channel_multiplier channels for each), then concatenates the
298 * results together.
262 * 299 *
263 * The output has depth_out = depth_in * depth_multiplier channels. 300 * The output has depth_out = depth_in * depth_multiplier channels.
264 * The output dimensions are functions of the filter dimensions, stride, and padding. 301 * The output dimensions are functions of the filter dimensions, stride, and
302 * padding.
265 * 303 *
266 * The values in the output tensor are computed as: 304 * The values in the output tensor are computed as:
267 * 305 *
@@ -271,7 +309,7 @@ enum OperationType : int32_t {
271 * filter[1, di, dj, k * channel_multiplier + q] 309 * filter[1, di, dj, k * channel_multiplier + q]
272 * ) 310 * )
273 * 311 *
274 * Supported tensor types: 312 * Supported tensor {@link OperandType}:
275 * * {@link OperandType::TENSOR_FLOAT32} 313 * * {@link OperandType::TENSOR_FLOAT32}
276 * * {@link OperandType::TENSOR_QUANT8_ASYMM} 314 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
277 * 315 *
@@ -280,82 +318,97 @@ enum OperationType : int32_t {
280 * Both explicit padding and implicit padding are supported. 318 * Both explicit padding and implicit padding are supported.
281 * 319 *
282 * Inputs (explicit padding): 320 * Inputs (explicit padding):
283 * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input. 321 * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
322 * specifying the input.
284 * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out], 323 * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out],
285 * specifying the filter. 324 * specifying the filter.
286 * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. 325 * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
287 * For input tensor of {@link OperandType::TENSOR_FLOAT32} type, the bias should 326 * tensor of {@link OperandType::TENSOR_FLOAT32}, the bias should
288 * also be of {@link OperandType::TENSOR_FLOAT32}. 327 * also be of {@link OperandType::TENSOR_FLOAT32}. For input tensor
289 * For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the bias 328 * of {@link OperandType::TENSOR_QUANT8_ASYMM}, the bias should be
290 * should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and 329 * of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and
291 * bias_scale == input_scale * filter_scale. 330 * bias_scale == input_scale * filter_scale.
292 * * 3: An INT32 value, specifying the padding on the left, in the ‘width’ dimension. 331 * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
293 * * 4: An INT32 value, specifying the padding on the right,in the ‘width’ dimension. 332 * the left, in the ‘width’ dimension.
294 * * 5: An INT32 value, specifying the padding on the top, in the ‘height’ dimension. 333 * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
295 * * 6: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension. 334 * the right, in the ‘width’ dimension.
296 * * 7: An INT32 value, specifying the stride when walking through input 335 * * 5: An {@link OperandType::INT32} scalar, specifying the padding on
297 * in the ‘width’ dimension. 336 * the top, in the ‘height’ dimension.
298 * * 8: An INT32 value, specifying the stride when walking through input 337 * * 6: An {@link OperandType::INT32} scalar, specifying the padding on
299 * in the ‘height’ dimension. 338 * the bottom, in the ‘height’ dimension.
300 * * 9: An INT32 value, specifying the depthwise multiplier. 339 * * 7: An {@link OperandType::INT32} scalar, specifying the stride when
301 * * 10: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. 340 * walking through input in the ‘width’ dimension.
302 * Specifies the activation to invoke on the result of each addition. 341 * * 8: An {@link OperandType::INT32} scalar, specifying the stride when
342 * walking through input in the ‘height’ dimension.
343 * * 9: An {@link OperandType::INT32} scalar, specifying the depthwise
344 * multiplier.
345 * * 10: An {@link OperandType::INT32} scalar, and has to be one of the
346 * {@link FusedActivationFunc} values. Specifies the activation to
347 * invoke on the result.
303 * 348 *
304 * Inputs (implicit padding): 349 * Inputs (implicit padding):
305 * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input. 350 * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
351 * specifying the input.
306 * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out], 352 * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out],
307 * specifying the filter. 353 * specifying the filter.
308 * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. 354 * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
309 * For input tensor of {@link OperandType::TENSOR_FLOAT32} type, the bias should 355 * tensor of {@link OperandType::TENSOR_FLOAT32}, the bias should
310 * also be of {@link OperandType::TENSOR_FLOAT32}. 356 * also be of {@link OperandType::TENSOR_FLOAT32}. For input tensor
311 * For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the bias 357 * of {@link OperandType::TENSOR_QUANT8_ASYMM}, the bias should be
312 * should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and 358 * of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and
313 * bias_scale == input_scale * filter_scale. 359 * bias_scale == input_scale * filter_scale.
314 * * 3: An INT32 value, specifying the implicit padding scheme, has to be one of the 360 * * 3: An {@link OperandType::INT32} scalar, specifying the implicit
361 * padding scheme, has to be one of the
315 * following values: {0 (NONE), 1 (SAME), 2 (VALID)}. 362 * following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
316 * * 4: An INT32 value, specifying the stride when walking through input 363 * * 4: An {@link OperandType::INT32} scalar, specifying the stride when
317 * in the ‘width’ dimension. 364 * walking through input in the ‘width’ dimension.
318 * * 5: An INT32 value, specifying the stride when walking through input 365 * * 5: An {@link OperandType::INT32} scalar, specifying the stride when
319 * in the ‘height’ dimension. 366 * walking through input in the ‘height’ dimension.
320 * * 6: An INT32 value, specifying the depthwise multiplier. 367 * * 6: An {@link OperandType::INT32} scalar, specifying the depthwise
321 * * 7: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. 368 * multiplier.
322 * Specifies the activation to invoke on the result of each addition. 369 * * 7: An {@link OperandType::INT32} scalar, and has to be one of the
370 * {@link FusedActivationFunc} values. Specifies the activation to
371 * invoke on the result.
323 * 372 *
324 * Outputs: 373 * Outputs:
325 * * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth_out]. 374 * * 0: The output 4-D tensor, of shape
326 * For output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the following 375 * [batches, out_height, out_width, depth_out]. For output tensor of
327 * condition must be satisfied: output_scale > input_scale * filter_scale. 376 * {@link OperandType::TENSOR_QUANT8_ASYMM}, the following condition
377 * must be satisfied: output_scale > input_scale * filter_scale.
328 */ 378 */
329 DEPTHWISE_CONV_2D = 4, 379 DEPTHWISE_CONV_2D = 4,
330 380
331 /** 381 /**
332 * Rearranges data from depth into blocks of spatial data. 382 * Rearranges data from depth into blocks of spatial data.
333 * 383 *
334 * More specifically, this op outputs a copy of the input tensor where values from 384 * More specifically, this op outputs a copy of the input tensor where
335 * the depth dimension are moved in spatial blocks to the height and width dimensions. 385 * values from the depth dimension are moved in spatial blocks to the height
336 * The value block_size indicates the input block size and how the data is moved. 386 * and width dimensions. The value block_size indicates the input block size
387 * and how the data is moved.
337 * 388 *
338 * Chunks of data of size block_size * block_size from depth are rearranged into 389 * Chunks of data of size block_size * block_size from depth are rearranged
339 * non-overlapping blocks of size block_size x block_size. 390 * into non-overlapping blocks of size block_size x block_size.
340 * 391 *
341 * The width of the output tensor is input_depth * block_size, whereas the height is 392 * The width of the output tensor is input_depth * block_size, whereas the
342 * input_height * block_size. 393 * height is input_height * block_size. The depth of the input tensor must
343 * The depth of the input tensor must be divisible by block_size * block_size 394 * be divisible by block_size * block_size
344 * 395 *
345 * Supported tensor types: 396 * Supported tensor {@link OperandType}:
346 * * {@link OperandType::TENSOR_FLOAT32} 397 * * {@link OperandType::TENSOR_FLOAT32}
347 * * {@link OperandType::TENSOR_QUANT8_ASYMM} 398 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
348 * 399 *
349 * Supported tensor rank: 4, with "NHWC" data layout. 400 * Supported tensor rank: 4, with "NHWC" data layout.
350 * 401 *
351 * Inputs: 402 * Inputs:
352 * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input. 403 * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
353 * * 1: An INT32 value, specifying the block_size. block_size must be >=1 and 404 * specifying the input.
354 * block_size * block_size must be a divisor of the input depth. 405 * * 1: An {@link OperandType::INT32} scalar, specifying the block_size.
406 * block_size must be >=1 and block_size * block_size must be a divisor
407 * of the input depth.
355 * 408 *
356 * Outputs: 409 * Outputs:
357 * * 0: The output 4-D tensor, of shape [batch, height*block_size, width*block_size, 410 * * 0: The output 4-D tensor, of shape [batch, height*block_size,
358 * depth/(block_size*block_size)]. 411 * width*block_size, depth/(block_size*block_size)].
359 */ 412 */
360 DEPTH_TO_SPACE = 5, 413 DEPTH_TO_SPACE = 5,
361 414
@@ -366,16 +419,16 @@ enum OperationType : int32_t {
366 * 419 *
367 * output = (input - zeroPoint) * scale. 420 * output = (input - zeroPoint) * scale.
368 * 421 *
369 * Supported tensor types: 422 * Supported tensor {@link OperandType}:
370 * * {@link OperandType::TENSOR_QUANT8_ASYMM} 423 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
371 * 424 *
372 * Supported tensor rank: up to 4 425 * Supported tensor rank: up to 4
373 * 426 *
374 * Inputs: 427 * Inputs:
375 * * 0: A tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}. 428 * * 0: A tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}.
376 * 429 *
377 * Outputs: 430 * Outputs:
378 * * 0: The output tensor of same shape as input0, but with type 431 * * 0: The output tensor of same shape as input0, but with
379 * {@link OperandType::TENSOR_FLOAT32}. 432 * {@link OperandType::TENSOR_FLOAT32}.
380 */ 433 */
381 DEQUANTIZE = 6, 434 DEQUANTIZE = 6,
@@ -401,7 +454,7 @@ enum OperationType : int32_t {
401 * and an error must be reported. 454 * and an error must be reported.
402 * 455 *
403 * Inputs: 456 * Inputs:
404 * * 0: Lookups. A 1-D tensor of {@link OperandType::TENSOR_INT32} type. 457 * * 0: Lookups. A 1-D tensor of {@link OperandType::TENSOR_INT32}.
405 * The values are indices into the first dimension of Values. 458 * The values are indices into the first dimension of Values.
406 * * 1: Values. An n-D tensor, where n >= 2, from which sub-tensors are 459 * * 1: Values. An n-D tensor, where n >= 2, from which sub-tensors are
407 * extracted. 460 * extracted.
@@ -416,7 +469,7 @@ enum OperationType : int32_t {
416 /** 469 /**
417 * Computes element-wise floor() on the input tensor. 470 * Computes element-wise floor() on the input tensor.
418 * 471 *
419 * Supported tensor types: 472 * Supported tensor {@link OperandType}:
420 * * {@link OperandType::TENSOR_FLOAT32} 473 * * {@link OperandType::TENSOR_FLOAT32}
421 * 474 *
422 * Supported tensor rank: up to 4 475 * Supported tensor rank: up to 4
@@ -425,45 +478,51 @@ enum OperationType : int32_t {
425 * * 0: A tensor. 478 * * 0: A tensor.
426 * 479 *
427 * Outputs: 480 * Outputs:
428 * * 0: The output tensor, of the same type and dimensions as the input tensor. 481 * * 0: The output tensor, of the same {@link OperandType} and dimensions as
482 * the input tensor.
429 */ 483 */
430 FLOOR = 8, 484 FLOOR = 8,
431 485
432 /** 486 /**
433 * Denotes a fully (densely) connected layer, which connects all elements in the input 487 * Denotes a fully (densely) connected layer, which connects all elements
434 * tensor with each element in the output tensor. 488 * in the input tensor with each element in the output tensor.
435 * 489 *
436 * This layer implements the operation: 490 * This layer implements the operation:
437 * 491 *
438 * outputs = activation(inputs * weights’ + bias) 492 * outputs = activation(inputs * weights’ + bias)
439 * 493 *
440 * Supported tensor types: 494 * Supported tensor {@link OperandType}:
441 * * {@link OperandType::TENSOR_FLOAT32} 495 * * {@link OperandType::TENSOR_FLOAT32}
442 * * {@link OperandType::TENSOR_QUANT8_ASYMM} 496 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
443 * 497 *
444 * Supported tensor rank: up to 4. 498 * Supported tensor rank: up to 4.
445 * 499 *
446 * Inputs: 500 * Inputs:
447 * * 0: A tensor of at least rank 2, specifying the input. If rank is greater than 2, 501 * * 0: A tensor of at least rank 2, specifying the input. If rank is
448 * then it gets flattened to a 2-D Tensor. The (flattened) 2-D Tensor is reshaped 502 * greater than 2, then it gets flattened to a 2-D Tensor. The
449 * (if necessary) to [batch_size, input_size], where "input_size" corresponds to 503 * (flattened) 2-D Tensor is reshaped (if necessary) to
450 * the number of inputs to the layer, matching the second dimension of weights, and 504 * [batch_size, input_size], where "input_size" corresponds to the
451 * "batch_size" is calculated by dividing the number of elements by "input_size". 505 * number of inputs to the layer, matching the second dimension of
452 * * 1: A 2-D tensor, specifying the weights, of shape [num_units, input_size], where 506 * weights, and "batch_size" is calculated by dividing the number of
453 * "num_units" corresponds to the number of output nodes. 507 * elements by "input_size".
454 * * 2: A 1-D tensor, of shape [num_units], specifying the bias. 508 * * 1: A 2-D tensor, specifying the weights, of shape
455 * For input tensor of {@link OperandType::TENSOR_FLOAT32} type, the bias should 509 * [num_units, input_size], where "num_units" corresponds to the number
456 * also be of {@link OperandType::TENSOR_FLOAT32}. 510 * of output nodes.
457 * For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the bias 511 * * 2: A 1-D tensor, of shape [num_units], specifying the bias. For input
458 * should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and 512 * tensor of {@link OperandType::TENSOR_FLOAT32}, the bias should
513 * also be of {@link OperandType::TENSOR_FLOAT32}. For input tensor
514 * of {@link OperandType::TENSOR_QUANT8_ASYMM}, the bias should be
515 * of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and
459 * bias_scale == input_scale * filter_scale. 516 * bias_scale == input_scale * filter_scale.
460 * * 3: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. 517 * * 3: An {@link OperandType::INT32} scalar, and has to be one of the
461 * Specifies the activation to invoke on the result of each addition. 518 * {@link FusedActivationFunc} values. Specifies the activation to
519 * invoke on the result.
462 * 520 *
463 * Outputs: 521 * Outputs:
464 * * 0: The output tensor, of shape [batch_size, num_units]. 522 * * 0: The output tensor, of shape [batch_size, num_units]. For output
465 * For output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the following 523 * tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}, the following
466 * condition must be satisfied: output_scale > input_scale * filter_scale. 524 * condition must be satisfied:
525 * output_scale > input_scale * filter_scale.
467 */ 526 */
468 FULLY_CONNECTED = 9, 527 FULLY_CONNECTED = 9,
469 528
@@ -495,19 +554,22 @@ enum OperationType : int32_t {
495 * must be concatenated. 554 * must be concatenated.
496 * 555 *
497 * Inputs: 556 * Inputs:
498 * * 0: Lookups. A 1-D {@link OperandType::TENSOR_INT32} tensor with shape [ k ]. 557 * * 0: Lookups. A 1-D {@link OperandType::TENSOR_INT32} tensor with
499 * * 1: Keys. A 1-D {@link OperandType::TENSOR_INT32} tensor with shape [ n ]; 558 * shape [ k ].
500 * Keys and Values pair represent a map, i.e., the ith element 559 * * 1: Keys. A 1-D {@link OperandType::TENSOR_INT32} tensor with shape
501 * in Keys (Keys[i]) is the key to select the ith sub-tensor 560 * [ n ]; Keys and Values pair represent a map, i.e., the ith element
502 * in Values (Values[i]), where 0 <= i <= n-1. 561 * in Keys (Keys[i]) is the key to select the ith sub-tensor in Values
503 * Keys tensor *MUST* be sorted in ascending order. 562 * (Values[i]), where 0 <= i <= n-1. Keys tensor *MUST* be sorted in
504 * * 2: Values. A tensor with shape of [ n, … ]; i.e., the first dimension must be n. 563 * ascending order.
564 * * 2: Values. A tensor with shape of [ n, … ]; i.e., the first dimension
565 * must be n.
505 * 566 *
506 * Outputs: 567 * Outputs:
507 * * 0: Output. A tensor with shape [ k …]. 568 * * 0: Output. A tensor with shape [ k …].
508 * * 1: Hits. A boolean tensor with shape [ k ] indicates whether the lookup 569 * * 1: Hits. A boolean tensor with shape [ k ] indicates whether the lookup
509 * hits (True) or not (False). 570 * hits (True) or not (False).
510 * Stored as {@link OperandType::TENSOR_QUANT8_ASYMM} with offset 0 and scale 1.0f. 571 * Stored as {@link OperandType::TENSOR_QUANT8_ASYMM} with offset 0
572 * and scale 1.0f.
511 * A non-zero byte represents True, a hit. A zero indicates otherwise. 573 * A non-zero byte represents True, a hit. A zero indicates otherwise.
512 */ 574 */
513 HASHTABLE_LOOKUP = 10, 575 HASHTABLE_LOOKUP = 10,
@@ -521,32 +583,37 @@ enum OperationType : int32_t {
521 * input[batch, row, col, channel] / 583 * input[batch, row, col, channel] /
522 * sqrt(sum_{c} pow(input[batch, row, col, c], 2)) 584 * sqrt(sum_{c} pow(input[batch, row, col, c], 2))
523 * 585 *
524 * For input tensor with more dimensions, independently normalizes each 1-D slice along dimension dim. 586 * For input tensor with more dimensions, independently normalizes each 1-D
587 * slice along dimension dim.
525 * 588 *
526 * Supported tensor types: 589 * Supported tensor {@link OperandType}:
527 * * {@link OperandType::TENSOR_FLOAT32} 590 * * {@link OperandType::TENSOR_FLOAT32}
528 * 591 *
529 * Supported tensor rank: 4, with "NHWC" data layout (i.e., Num_samples, Height, Width, and Channels). 592 * Supported tensor rank: 4, with "NHWC" data layout (i.e., Num_samples,
593 * Height, Width, and Channels).
530 * 594 *
531 * Inputs: 595 * Inputs:
532 * * 0: A 4-D tensor, of shape [batches, height, width, depth]. 596 * * 0: A 4-D tensor, of shape [batches, height, width, depth].
533 * 597 *
534 * Outputs: 598 * Outputs:
535 * * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth]. 599 * * 0: The output 4-D tensor, of shape
600 * [batches, out_height, out_width, depth].
536 */ 601 */
537 L2_NORMALIZATION = 11, 602 L2_NORMALIZATION = 11,
538 603
539 /** 604 /**
540 * Performs an 2-D L2 pooling operation. 605 * Performs an 2-D L2 pooling operation.
541 * 606 *
542 * The output dimensions are functions of the filter dimensions, stride, and padding. 607 * The output dimensions are functions of the filter dimensions, stride, and
608 * padding.
543 * 609 *
544 * The values in the output tensor are computed as: 610 * The values in the output tensor are computed as:
545 * 611 *
546 * output[batch, row, col, channel] = 612 * output[batch, row, col, channel] =
547 * sqrt(sum_{i, j} pow(input[batch, row + i, col + j, channel], 2) / sum(1)) 613 * sqrt(sum_{i, j} pow(input[batch, row + i, col + j, channel], 2) /
614 * sum(1))
548 * 615 *
549 * Supported tensor types: 616 * Supported tensor {@link OperandType}:
550 * * {@link OperandType::TENSOR_FLOAT32} 617 * * {@link OperandType::TENSOR_FLOAT32}
551 * 618 *
552 * Supported tensor rank: 4, with "NHWC" data layout. 619 * Supported tensor rank: 4, with "NHWC" data layout.
@@ -554,62 +621,82 @@ enum OperationType : int32_t {
554 * Both explicit padding and implicit padding are supported. 621 * Both explicit padding and implicit padding are supported.
555 * 622 *
556 * Inputs (explicit padding): 623 * Inputs (explicit padding):
557 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input. 624 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
558 * * 1: An INT32 value, specifying the padding on the left, in the ‘width’ dimension. 625 * the input.
559 * * 2: An INT32 value, specifying the padding on the right,in the ‘width’ dimension. 626 * * 1: An {@link OperandType::INT32} scalar, specifying the padding on
560 * * 3: An INT32 value, specifying the padding on the top, in the ‘height’ dimension. 627 * the left, in the ‘width’ dimension.
561 * * 4: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension. 628 * * 2: An {@link OperandType::INT32} scalar, specifying the padding on
562 * * 5: An INT32 value, specifying the stride when walking through input 629 * the right, in the ‘width’ dimension.
563 * in the ‘width’ dimension. 630 * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
564 * * 6: An INT32 value, specifying the stride when walking through input 631 * the top, in the ‘height’ dimension.
565 * in the ‘height’ dimension. 632 * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
566 * * 7: An INT32 value, specifying the filter width. 633 * the bottom, in the ‘height’ dimension.
567 * * 8: An INT32 value, specifying the filter height. 634 * * 5: An {@link OperandType::INT32} scalar, specifying the stride when
568 * * 9: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. 635 * walking through input in the ‘width’ dimension.
569 * Specifies the activation to invoke on the result of each addition. 636 * * 6: An {@link OperandType::INT32} scalar, specifying the stride when
637 * walking through input in the ‘height’ dimension.
638 * * 7: An {@link OperandType::INT32} scalar, specifying the filter
639 * width.
640 * * 8: An {@link OperandType::INT32} scalar, specifying the filter
641 * height.
642 * * 9: An {@link OperandType::INT32} scalar, and has to be one of the
643 * {@link FusedActivationFunc} values. Specifies the activation to
644 * invoke on the result.
570 * 645 *
571 * Inputs (implicit padding): 646 * Inputs (implicit padding):
572 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input. 647 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
573 * * 1: An INT32 value, specifying the implicit padding scheme, has to be one of the 648 * the input.
649 * * 1: An {@link OperandType::INT32} scalar, specifying the implicit
650 * padding scheme, has to be one of the
574 * following values: {0 (NONE), 1 (SAME), 2 (VALID)}. 651 * following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
575 * * 2: An INT32 value, specifying the stride when walking through input 652 * * 2: An {@link OperandType::INT32} scalar, specifying the stride when
576 * in the ‘width’ dimension. 653 * walking through input in the ‘width’ dimension.
577 * * 3: An INT32 value, specifying the stride when walking through input 654 * * 3: An {@link OperandType::INT32} scalar, specifying the stride when
578 * in the ‘height’ dimension. 655 * walking through input in the ‘height’ dimension.
579 * * 4: An INT32 value, specifying the filter width. 656 * * 4: An {@link OperandType::INT32} scalar, specifying the filter
580 * * 5: An INT32 value, specifying the filter height. 657 * width.
581 * * 6: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. 658 * * 5: An {@link OperandType::INT32} scalar, specifying the filter
582 * Specifies the activation to invoke on the result of each addition. 659 * height.
660 * * 6: An {@link OperandType::INT32} scalar, and has to be one of the
661 * {@link FusedActivationFunc} values. Specifies the activation to
662 * invoke on the result.
583 * 663 *
584 * Outputs: 664 * Outputs:
585 * * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth]. 665 * * 0: The output 4-D tensor, of shape
666 * [batches, out_height, out_width, depth].
586 */ 667 */
587 L2_POOL_2D = 12, 668 L2_POOL_2D = 12,
588 669
589 /** 670 /**
590 * Applies Local Response Normalization along the depth dimension. 671 * Applies Local Response Normalization along the depth dimension.
591 * 672 *
592 * The 4-D input tensor is treated as a 3-D array of 1-D vectors (along the last 673 * The 4-D input tensor is treated as a 3-D array of 1-D vectors (along the
593 * dimension), and each vector is normalized independently. Within a given vector, 674 * last dimension), and each vector is normalized independently. Within a
594 * each component is divided by the weighted, squared sum of inputs within depth_radius. 675 * given vector, each component is divided by the weighted, squared sum of
676 * inputs within depth_radius.
595 * 677 *
596 * The output is calculated using this formula: 678 * The output is calculated using this formula:
597 * 679 *
598 * sqr_sum[a, b, c, d] = 680 * sqr_sum[a, b, c, d] = sum(
599 * sum(pow(input[a, b, c, d - depth_radius : d + depth_radius + 1], 2) 681 * pow(input[a, b, c, d - depth_radius : d + depth_radius + 1], 2))
600 * output = input / pow((bias + alpha * sqr_sum), beta) 682 * output = input / pow((bias + alpha * sqr_sum), beta)
601 * 683 *
602 * Supported tensor types: 684 * Supported tensor {@link OperandType}:
603 * * {@link OperandType::TENSOR_FLOAT32} 685 * * {@link OperandType::TENSOR_FLOAT32}
604 * 686 *
605 * Supported tensor rank: 4, with "NHWC" data layout. 687 * Supported tensor rank: 4, with "NHWC" data layout.
606 * 688 *
607 * Inputs: 689 * Inputs:
608 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input. 690 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
609 * * 1: An INT32 value, specifying the radius of the normalization window. 691 * the input.
610 * * 2: A FLOAT32 value, specifying the bias, must not be zero. 692 * * 1: An {@link OperandType::INT32} scalar, specifying the radius of
611 * * 3: A FLOAT32 value, specifying the scale factor, alpha. 693 * the normalization window.
612 * * 4: A FLOAT32 value, specifying the exponent, beta. 694 * * 2: An {@link OperandType::FLOAT32} scalar, specifying the bias, must
695 * not be zero.
696 * * 3: An {@link OperandType::FLOAT32} scalar, specifying the scale
697 * factor, alpha.
698 * * 4: An {@link OperandType::FLOAT32} scalar, specifying the exponent,
699 * beta.
613 * 700 *
614 * Outputs: 701 * Outputs:
615 * * 0: The output tensor of same shape as input0. 702 * * 0: The output tensor of same shape as input0.
@@ -623,7 +710,7 @@ enum OperationType : int32_t {
623 * 710 *
624 * output = 1 / (1 + exp(-input)) 711 * output = 1 / (1 + exp(-input))
625 * 712 *
626 * Supported tensor types: 713 * Supported tensor {@link OperandType}:
627 * * {@link OperandType::TENSOR_FLOAT32} 714 * * {@link OperandType::TENSOR_FLOAT32}
628 * * {@link OperandType::TENSOR_QUANT8_ASYMM} 715 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
629 * 716 *
@@ -634,7 +721,7 @@ enum OperationType : int32_t {
634 * 721 *
635 * Outputs: 722 * Outputs:
636 * * 0: The output tensor of same shape as input0. 723 * * 0: The output tensor of same shape as input0.
637 * For {@link OperandType::TENSOR_QUANT8_ASYMM} type, 724 * For {@link OperandType::TENSOR_QUANT8_ASYMM},
638 * the scale must be 1.f / 256 and the zeroPoint must be 0. 725 * the scale must be 1.f / 256 and the zeroPoint must be 0.
639 */ 726 */
640 LOGISTIC = 14, 727 LOGISTIC = 14,
@@ -650,18 +737,19 @@ enum OperationType : int32_t {
650 * 737 *
651 * * 1: Input. Dim.size >= 1, no restriction on DataType. 738 * * 1: Input. Dim.size >= 1, no restriction on DataType.
652 * * 2: Weight. Optional. Dim.size == 1, DataType: Float. 739 * * 2: Weight. Optional. Dim.size == 1, DataType: Float.
653 * If not set, each input element is considered to have the same weight of 740 * If not set, each input element is considered to have the same weight
654 * 1.0. 741 * of 1.0.
655 * Tensor[1].Dim[0] == Tensor[2].Dim[0] 742 * Tensor[1].Dim[0] == Tensor[2].Dim[0]
656 * * 3: Type: 743 * * 3: Type:
657 * Sparse: Value LSHProjectionType_SPARSE(=1). 744 * Sparse: Value LSHProjectionType_SPARSE(=1).
658 * Computed bit vector is considered to be sparse. 745 * Computed bit vector is considered to be sparse.
659 * Each output element is an int32 made up of multiple bits computed from 746 * Each output element is an int32 made up of multiple bits
660 * hash functions. 747 * computed from hash functions.
661 * 748 *
662 * Dense: Value LSHProjectionType_DENSE(=2). 749 * Dense: Value LSHProjectionType_DENSE(=2).
663 * Computed bit vector is considered to be dense. Each output element 750 * Computed bit vector is considered to be dense. Each output
664 * represents a bit and can take the value of either 0 or 1. 751 * element represents a bit and can take the value of either
752 * 0 or 1.
665 * 753 *
666 * Outputs: 754 * Outputs:
667 * * 0: If the projection type is sparse: 755 * * 0: If the projection type is sparse:
@@ -681,9 +769,12 @@ enum OperationType : int32_t {
681 * \f{eqnarray*}{ 769 * \f{eqnarray*}{
682 * i_t =& \sigma(W_{xi}x_t+W_{hi}h_{t-1}+W_{ci}C_{t-1}+b_i) & \\ 770 * i_t =& \sigma(W_{xi}x_t+W_{hi}h_{t-1}+W_{ci}C_{t-1}+b_i) & \\
683 * f_t =& \sigma(W_{xf}x_t+W_{hf}h_{t-1}+W_{cf}C_{t-1}+b_f) & \\ 771 * f_t =& \sigma(W_{xf}x_t+W_{hf}h_{t-1}+W_{cf}C_{t-1}+b_f) & \\
684 * C_t =& clip(f_t \odot C_{t-1} + i_t \odot g(W_{xc}x_t+W_{hc}h_{t-1}+b_c),\ t_{cell})& \\ 772 * C_t =& clip(f_t \odot C_{t-1} + i_t \odot
685 * o_t =& \sigma(W_{xo}x_t+W_{ho}h_{t-1}+W_{co}C_t+b_o)& \\ 773 * g(W_{xc}x_t+W_{hc}h_{t-1}+b_c),\ t_{cell}) & \\
686 * & clip(W_{proj}(o_t \odot g(C_t))+b_{proj},\ t_{proj}) & if\ there\ is\ a\ projection; \\ 774 * o_t =& \sigma(W_{xo}x_t+W_{ho}h_{t-1}+W_{co}C_t+b_o) & \\
775 * & & \\
776 * & clip(W_{proj}(o_t \odot g(C_t))+b_{proj},\ t_{proj})
777 * & if\ there\ is\ a\ projection; \\
687 * h_t =& & \\ 778 * h_t =& & \\
688 * & o_t \odot g(C_t) & otherwise. \\ 779 * & o_t \odot g(C_t) & otherwise. \\
689 * \f} 780 * \f}
@@ -695,7 +786,8 @@ enum OperationType : int32_t {
695 * * \f$o_t\f$ is the output, 786 * * \f$o_t\f$ is the output,
696 * * \f$h_t\f$ is the output state, 787 * * \f$h_t\f$ is the output state,
697 * * \f$\sigma\f$ is the logistic sigmoid function, 788 * * \f$\sigma\f$ is the logistic sigmoid function,
698 * * \f$g\f$ is the cell input and cell output activation function, usually \f$tahn\f$, 789 * * \f$g\f$ is the cell input and cell output activation function, usually
790 * \f$tahn\f$,
699 * * \f$W_{xi}\f$ is the input-to-input weight matrix, 791 * * \f$W_{xi}\f$ is the input-to-input weight matrix,
700 * * \f$W_{hi}\f$ is the recurrent to input weight matrix, 792 * * \f$W_{hi}\f$ is the recurrent to input weight matrix,
701 * * \f$W_{ci}\f$ is the cell-to-input weight matrix, 793 * * \f$W_{ci}\f$ is the cell-to-input weight matrix,
@@ -715,29 +807,32 @@ enum OperationType : int32_t {
715 * * \f$b_{proj}\f$ is the projection bias, 807 * * \f$b_{proj}\f$ is the projection bias,
716 * * \f$t_{cell}\f$ is the threshold for clipping the cell state, and 808 * * \f$t_{cell}\f$ is the threshold for clipping the cell state, and
717 * * \f$t_{proj}\f$ is the threshold for clipping the projected output. 809 * * \f$t_{proj}\f$ is the threshold for clipping the projected output.
718 * * \f$\odot\f$ is the <a href="https://en.wikipedia.org/wiki/Hadamard_product_(matrices)"> 810 * * \f$\odot\f$ is the
811 * <a href="https://en.wikipedia.org/wiki/Hadamard_product_(matrices)">
719 * Hadamard product</a> that takes two matrices and produces another 812 * Hadamard product</a> that takes two matrices and produces another
720 * matrix, each element of which is the product of the corresponding 813 * matrix, each element of which is the product of the corresponding
721 * elements of the input matrices. 814 * elements of the input matrices.
722 * 815 *
723 * The operation has the following independently optional inputs: 816 * The operation has the following independently optional inputs:
724 * * The input-to-input weights (\f$W_{xi}\f$), recurrent-to-input weights (\f$W_{hi}\f$), 817 * * The input-to-input weights (\f$W_{xi}\f$), recurrent-to-input weights
725 * cell-to-input (\f$W_{ci}\f$) weights, and input gate bias (\f$b_i\f$) either all have values, 818 * (\f$W_{hi}\f$), cell-to-input (\f$W_{ci}\f$) weights, and input gate
726 * or none of them have values (i.e., all set to null). If they have no 819 * bias (\f$b_i\f$) either all have values, or none of them have values
727 * values, coupling of input and forget gates (CIFG) is used, in which case 820 * (i.e., all set to null). If they have no values, coupling of input and
728 * the input gate (\f$i_t\f$) is calculated using the following equation instead. 821 * forget gates (CIFG) is used, in which case the input gate (\f$i_t\f$)
822 * is calculated using the following equation instead.
729 * \f{eqnarray*}{ 823 * \f{eqnarray*}{
730 * i_t = 1 - f_t 824 * i_t = 1 - f_t
731 * \f} 825 * \f}
732 * * The cell-to-forget weights (\f$W_{cf}\f$) and cell-to-output 826 * * The cell-to-forget weights (\f$W_{cf}\f$) and cell-to-output weights
733 * weights (\f$W_{co}\f$) either both have values or neither of them have values. 827 * (\f$W_{co}\f$) either both have values or neither of them have values.
734 * If they have values, the peephole optimization is used. Additionally, 828 * If they have values, the peephole optimization is used. Additionally,
735 * if CIFG is not used, cell-to-input weights (\f$W_{ci}\f$) is also 829 * if CIFG is not used, cell-to-input weights (\f$W_{ci}\f$) is also
736 * required to have values for peephole optimization. 830 * required to have values for peephole optimization.
737 * * The projection weights (\f$W_{proj}\f$) is required only for the recurrent projection 831 * * The projection weights (\f$W_{proj}\f$) is required only for the
738 * layer, and should otherwise have no value. 832 * recurrent projection layer, and should otherwise have no value.
739 * * The projection bias (\f$b_{proj}\f$) may (but not required to) have a value if the 833 * * The projection bias (\f$b_{proj}\f$) may (but not required to) have a
740 * recurrent projection layer exists, and should otherwise have no value. 834 * value if the recurrent projection layer exists, and should otherwise
835 * have no value.
741 * 836 *
742 * References: 837 * References:
743 * 838 *
@@ -749,8 +844,8 @@ enum OperationType : int32_t {
749 * The peephole implementation and projection layer is based on: 844 * The peephole implementation and projection layer is based on:
750 * https://research.google.com/pubs/archive/43905.pdf 845 * https://research.google.com/pubs/archive/43905.pdf
751 * Hasim Sak, Andrew Senior, and Francoise Beaufays. "Long short-term memory 846 * Hasim Sak, Andrew Senior, and Francoise Beaufays. "Long short-term memory
752 * recurrent neural network architectures for large scale acoustic modeling." 847 * recurrent neural network architectures for large scale acoustic
753 * INTERSPEECH, 2014. 848 * modeling." INTERSPEECH, 2014.
754 * (However, the concept of peephole optimization was introduced in work 849 * (However, the concept of peephole optimization was introduced in work
755 * prior to this paper.) 850 * prior to this paper.)
756 * 851 *
@@ -758,56 +853,74 @@ enum OperationType : int32_t {
758 * http://arxiv.org/pdf/1503.04069.pdf 853 * http://arxiv.org/pdf/1503.04069.pdf
759 * Greff et al. "LSTM: A Search Space Odyssey" 854 * Greff et al. "LSTM: A Search Space Odyssey"
760 * 855 *
761 * Supported tensor types (type T): 856 * Supported tensor {@link OperandType}:
762 * * {@link OperandType::TENSOR_FLOAT32} 857 * * {@link OperandType::TENSOR_FLOAT32}
763 * 858 *
764 * Inputs: 859 * Inputs:
765 * * 0: The input (\f$x_t\f$). 860 * * 0: The input (\f$x_t\f$).
766 * A 2-D tensor of type T, of shape [batch_size, input_size], where 861 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
767 * “batch_size” corresponds to the batching dimension, and “input_size” 862 * [batch_size, input_size], where “batch_size” corresponds to the
768 * is the size of the input. 863 * batching dimension, and “input_size” is the size of the input.
769 * * 1: The input-to-input weights (\f$W_{xi}\f$). Optional. 864 * * 1: The input-to-input weights (\f$W_{xi}\f$). Optional.
770 * A 2-D tensor of type T, of shape [num_units, input_size], where 865 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
771 * “num_units” corresponds to the number of cell units. 866 * [num_units, input_size], where “num_units” corresponds to the
867 * number of cell units.
772 * * 2: The input-to-forget weights (\f$W_{xf}\f$). 868 * * 2: The input-to-forget weights (\f$W_{xf}\f$).
773 * A 2-D tensor of type T, of shape [num_units, input_size]. 869 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
870 * [num_units, input_size].
774 * * 3: The input-to-cell weights (\f$W_{xc}\f$). 871 * * 3: The input-to-cell weights (\f$W_{xc}\f$).
775 * A 2-D tensor of type T, of shape [num_units, input_size]. 872 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
873 * [num_units, input_size].
776 * * 4: The input-to-output weights (\f$W_{xo}\f$). 874 * * 4: The input-to-output weights (\f$W_{xo}\f$).
777 * A 2-D tensor of type T, of shape [num_units, input_size]. 875 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
876 * [num_units, input_size].
778 * * 5: The recurrent-to-input weights (\f$W_{hi}\f$). Optional. 877 * * 5: The recurrent-to-input weights (\f$W_{hi}\f$). Optional.
779 * A 2-D tensor of type T, of shape [num_units, output_size], where 878 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
780 * “output_size” corresponds to either the number of cell units (i.e., 879 * [num_units, output_size], where “output_size” corresponds to either
781 * “num_units”), or the second dimension of the “projection_weights”, if 880 * the number of cell units (i.e., “num_units”), or the second
782 * defined. 881 * dimension of the “projection_weights”, if defined.
783 * * 6: The recurrent-to-forget weights (\f$W_{hf}\f$). 882 * * 6: The recurrent-to-forget weights (\f$W_{hf}\f$).
784 * A 2-D tensor of type T, of shape [num_units, output_size]. 883 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
884 * [num_units, output_size].
785 * * 7: The recurrent-to-cell weights (\f$W_{hc}\f$). 885 * * 7: The recurrent-to-cell weights (\f$W_{hc}\f$).
786 * A 2-D tensor of type T, of shape [num_units, output_size]. 886 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
887 * [num_units, output_size].
787 * * 8: The recurrent-to-output weights (\f$W_{ho}\f$). 888 * * 8: The recurrent-to-output weights (\f$W_{ho}\f$).
788 * A 2-D tensor of type T, of shape [num_units, output_size]. 889 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
890 * [num_units, output_size].
789 * * 9: The cell-to-input weights (\f$W_{ci}\f$). Optional. 891 * * 9: The cell-to-input weights (\f$W_{ci}\f$). Optional.
790 * A 1-D tensor of type T, of shape [num_units]. 892 * A 1-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
893 * [num_units].
791 * * 10:The cell-to-forget weights (\f$W_{cf}\f$). Optional. 894 * * 10:The cell-to-forget weights (\f$W_{cf}\f$). Optional.
792 * A 1-D tensor of type T, of shape [num_units]. 895 * A 1-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
896 * [num_units].
793 * * 11:The cell-to-output weights (\f$W_{co}\f$). Optional. 897 * * 11:The cell-to-output weights (\f$W_{co}\f$). Optional.
794 * A 1-D tensor of type T, of shape [num_units]. 898 * A 1-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
899 * [num_units].
795 * * 12:The input gate bias (\f$b_i\f$). Optional. 900 * * 12:The input gate bias (\f$b_i\f$). Optional.
796 * A 1-D tensor of type T, of shape [num_units]. 901 * A 1-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
902 * [num_units].
797 * * 13:The forget gate bias (\f$b_f\f$). 903 * * 13:The forget gate bias (\f$b_f\f$).
798 * A 1-D tensor of type T, of shape [num_units]. 904 * A 1-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
905 * [num_units].
799 * * 14:The cell bias (\f$b_c\f$). 906 * * 14:The cell bias (\f$b_c\f$).
800 * A 1-D tensor of type T, of shape [num_units]. 907 * A 1-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
908 * [num_units].
801 * * 15:The output gate bias (\f$b_o\f$). 909 * * 15:The output gate bias (\f$b_o\f$).
802 * A 1-D tensor of type T, of shape [num_units]. 910 * A 1-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
911 * [num_units].
803 * * 16:The projection weights (\f$W_{proj}\f$). Optional. 912 * * 16:The projection weights (\f$W_{proj}\f$). Optional.
804 * A 2-D tensor of type T, of shape [output_size, num_units]. 913 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
914 * [output_size, num_units].
805 * * 17:The projection bias (\f$b_{proj}\f$). Optional. 915 * * 17:The projection bias (\f$b_{proj}\f$). Optional.
806 * A 1-D tensor of type T, of shape [output_size]. 916 * A 1-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
917 * [output_size].
807 * * 18:The output state (in) (\f$h_{t-1}\f$). 918 * * 18:The output state (in) (\f$h_{t-1}\f$).
808 * A 2-D tensor of type T, of shape [batch_size, output_size]. 919 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
920 * [batch_size, output_size].
809 * * 19:The cell state (in) (\f$C_{t-1}\f$). 921 * * 19:The cell state (in) (\f$C_{t-1}\f$).
810 * A 2-D tensor of type T, of shape [batch_size, num_units]. 922 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
923 * [batch_size, num_units].
811 * * 20:The activation function (\f$g\f$). 924 * * 20:The activation function (\f$g\f$).
812 * A value indicating the activation function: 925 * A value indicating the activation function:
813 * <ul> 926 * <ul>
@@ -817,38 +930,43 @@ enum OperationType : int32_t {
817 * <li>4: Tanh; 930 * <li>4: Tanh;
818 * <li>6: Sigmoid. 931 * <li>6: Sigmoid.
819 * </ul> 932 * </ul>
820 * * 21:The clipping threshold (\f$t_{cell}\f$) for the cell state, such that values are bound 933 * * 21:The clipping threshold (\f$t_{cell}\f$) for the cell state, such
821 * within [-cell_clip, cell_clip]. If set to 0.0 then clipping is 934 * that values are bound within [-cell_clip, cell_clip]. If set to 0.0
822 * disabled.
823 * * 22:The clipping threshold (\f$t_{proj}\f$) for the output from the projection layer, such
824 * that values are bound within [-proj_clip, proj_clip]. If set to 0.0
825 * then clipping is disabled. 935 * then clipping is disabled.
936 * * 22:The clipping threshold (\f$t_{proj}\f$) for the output from the
937 * projection layer, such that values are bound within
938 * [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
826 * 939 *
827 * Outputs: 940 * Outputs:
828 * * 0: The scratch buffer. 941 * * 0: The scratch buffer.
829 * A 2-D tensor of type T, of shape [batch_size, num_units * 4] with 942 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
830 * CIFG, or [batch_size, num_units * 3] without CIFG. 943 * [batch_size, num_units * 4] with CIFG, or
944 * [batch_size, num_units * 3] without CIFG.
831 * * 1: The output state (out) (\f$h_t\f$). 945 * * 1: The output state (out) (\f$h_t\f$).
832 * A 2-D tensor of type T, of shape [batch_size, output_size]. 946 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
947 * [batch_size, output_size].
833 * * 2: The cell state (out) (\f$C_t\f$). 948 * * 2: The cell state (out) (\f$C_t\f$).
834 * A 2-D tensor of type T, of shape [batch_size, num_units]. 949 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
950 * [batch_size, num_units].
835 * * 3: The output (\f$o_t\f$). 951 * * 3: The output (\f$o_t\f$).
836 * A 2-D tensor of type T, of shape [batch_size, output_size]. This is 952 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
837 * effectively the same as the current “output state (out)” value. 953 * [batch_size, output_size]. This is effectively the same as the
954 * current “output state (out)” value.
838 */ 955 */
839 LSTM = 16, 956 LSTM = 16,
840 957
841 /** 958 /**
842 * Performs an 2-D max pooling operation. 959 * Performs an 2-D max pooling operation.
843 * 960 *
844 * The output dimensions are functions of the filter dimensions, stride, and padding. 961 * The output dimensions are functions of the filter dimensions, stride, and
962 * padding.
845 * 963 *
846 * The values in the output tensor are computed as: 964 * The values in the output tensor are computed as:
847 * 965 *
848 * output[batch, row, col, channel] = 966 * output[batch, row, col, channel] =
849 * max_{i, j} (input[batch, row + i, col + j, channel]) 967 * max_{i, j} (input[batch, row + i, col + j, channel])
850 * 968 *
851 * Supported tensor types: 969 * Supported tensor {@link OperandType}:
852 * * {@link OperandType::TENSOR_FLOAT32} 970 * * {@link OperandType::TENSOR_FLOAT32}
853 * * {@link OperandType::TENSOR_QUANT8_ASYMM} 971 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
854 * 972 *
@@ -857,52 +975,68 @@ enum OperationType : int32_t {
857 * Both explicit padding and implicit padding are supported. 975 * Both explicit padding and implicit padding are supported.
858 * 976 *
859 * Inputs (explicit padding): 977 * Inputs (explicit padding):
860 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input. 978 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
861 * * 1: An INT32 value, specifying the padding on the left, in the ‘width’ dimension. 979 * the input.
862 * * 2: An INT32 value, specifying the padding on the right,in the ‘width’ dimension. 980 * * 1: An {@link OperandType::INT32} scalar, specifying the padding on
863 * * 3: An INT32 value, specifying the padding on the top, in the ‘height’ dimension. 981 * the left, in the ‘width’ dimension.
864 * * 4: An INT32 value, specifying the padding on the bottom, in the ‘height’ dimension. 982 * * 2: An {@link OperandType::INT32} scalar, specifying the padding on
865 * * 5: An INT32 value, specifying the stride when walking through input 983 * the right, in the ‘width’ dimension.
866 * in the ‘width’ dimension. 984 * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
867 * * 6: An INT32 value, specifying the stride when walking through input 985 * the top, in the ‘height’ dimension.
868 * in the ‘height’ dimension. 986 * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
869 * * 7: An INT32 value, specifying the filter width. 987 * the bottom, in the ‘height’ dimension.
870 * * 8: An INT32 value, specifying the filter height. 988 * * 5: An {@link OperandType::INT32} scalar, specifying the stride when
871 * * 9: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. 989 * walking through input in the ‘width’ dimension.
872 * Specifies the activation to invoke on the result of each addition. 990 * * 6: An {@link OperandType::INT32} scalar, specifying the stride when
991 * walking through input in the ‘height’ dimension.
992 * * 7: An {@link OperandType::INT32} scalar, specifying the filter
993 * width.
994 * * 8: An {@link OperandType::INT32} scalar, specifying the filter
995 * height.
996 * * 9: An {@link OperandType::INT32} scalar, and has to be one of the
997 * {@link FusedActivationFunc} values. Specifies the activation to
998 * invoke on the result.
873 * 999 *
874 * Inputs (implicit padding): 1000 * Inputs (implicit padding):
875 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input. 1001 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
876 * * 1: An INT32 value, specifying the implicit padding scheme, has to be one of the 1002 * the input.
1003 * * 1: An {@link OperandType::INT32} scalar, specifying the implicit
1004 * padding scheme, has to be one of the
877 * following values: {0 (NONE), 1 (SAME), 2 (VALID)}. 1005 * following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
878 * * 2: An INT32 value, specifying the stride when walking through input 1006 * * 2: An {@link OperandType::INT32} scalar, specifying the stride when
879 * in the ‘width’ dimension. 1007 * walking through input in the ‘width’ dimension.
880 * * 3: An INT32 value, specifying the stride when walking through input 1008 * * 3: An {@link OperandType::INT32} scalar, specifying the stride when
881 * in the ‘height’ dimension. 1009 * walking through input in the ‘height’ dimension.
882 * * 4: An INT32 value, specifying the filter width. 1010 * * 4: An {@link OperandType::INT32} scalar, specifying the filter
883 * * 5: An INT32 value, specifying the filter height. 1011 * width.
884 * * 6: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. 1012 * * 5: An {@link OperandType::INT32} scalar, specifying the filter
885 * Specifies the activation to invoke on the result of each addition. 1013 * height.
1014 * * 6: An {@link OperandType::INT32} scalar, and has to be one of the
1015 * {@link FusedActivationFunc} values. Specifies the activation to
1016 * invoke on the result.
886 * 1017 *
887 * Outputs: 1018 * Outputs:
888 * * 0: The output 4-D tensor, of shape [batches, out_height, out_width, depth]. 1019 * * 0: The output 4-D tensor, of shape
1020 * [batches, out_height, out_width, depth].
889 */ 1021 */
890 MAX_POOL_2D = 17, 1022 MAX_POOL_2D = 17,
891 1023
892 /** 1024 /**
893 * Multiplies two tensors, element-wise. 1025 * Multiplies two tensors, element-wise.
894 * 1026 *
895 * Takes two input tensors of identical type and compatible dimensions. The output 1027 * Takes two input tensors of identical {@link OperandType} and compatible
896 * is the product of both input tensors, optionally modified by an activation function. 1028 * dimensions. The output is the product of both input tensors, optionally
1029 * modified by an activation function.
897 * 1030 *
898 * Two dimensions are compatible when: 1031 * Two dimensions are compatible when:
899 * 1. they are equal, or 1032 * 1. they are equal, or
900 * 2. one of them is 1 1033 * 2. one of them is 1
901 * 1034 *
902 * The size of the resulting output is the maximum size along each dimension of the 1035 * The size of the resulting output is the maximum size along each dimension
903 * input operands. It starts with the trailing dimensions, and works its way forward. 1036 * of the input operands. It starts with the trailing dimensions, and works
1037 * its way forward.
904 * 1038 *
905 * Supported tensor types: 1039 * Supported tensor {@link OperandType}:
906 * * {@link OperandType::TENSOR_FLOAT32} 1040 * * {@link OperandType::TENSOR_FLOAT32}
907 * * {@link OperandType::TENSOR_QUANT8_ASYMM} 1041 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
908 * 1042 *
@@ -910,14 +1044,17 @@ enum OperationType : int32_t {
910 * 1044 *
911 * Inputs: 1045 * Inputs:
912 * * 0: A tensor. 1046 * * 0: A tensor.
913 * * 1: A tensor of the same type, and compatible dimensions as input0. 1047 * * 1: A tensor of the same {@link OperandType}, and compatible dimensions
914 * * 2: An INT32 value, and has to be one of the {@link FusedActivationFunc} values. 1048 * as input0.
915 * Specifies the activation to invoke on the result of each addition. 1049 * * 2: An {@link OperandType::INT32} scalar, and has to be one of the
1050 * {@link FusedActivationFunc} values. Specifies the activation to
1051 * invoke on the result.
916 * 1052 *
917 * Outputs: 1053 * Outputs:
918 * * 0: The product, a tensor of the same type as input0. 1054 * * 0: The product, a tensor of the same {@link OperandType} as input0.
919 * For output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} type, the following 1055 * For output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
920 * condition must be satisfied: output_scale > input1_scale * input2_scale. 1056 * the following condition must be satisfied:
1057 * output_scale > input1_scale * input2_scale.
921 */ 1058 */
922 MUL = 18, 1059 MUL = 18,
923 1060
@@ -928,7 +1065,7 @@ enum OperationType : int32_t {
928 * 1065 *
929 * output = max(0, input) 1066 * output = max(0, input)
930 * 1067 *
931 * Supported tensor types: 1068 * Supported tensor {@link OperandType}:
932 * * {@link OperandType::TENSOR_FLOAT32} 1069 * * {@link OperandType::TENSOR_FLOAT32}
933 * * {@link OperandType::TENSOR_QUANT8_ASYMM} 1070 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
934 * 1071 *
@@ -949,7 +1086,7 @@ enum OperationType : int32_t {
949 * 1086 *
950 * output = min(1.f, max(-1.f, input)) 1087 * output = min(1.f, max(-1.f, input))
951 * 1088 *
952 * Supported tensor types: 1089 * Supported tensor {@link OperandType}:
953 * * {@link OperandType::TENSOR_FLOAT32} 1090 * * {@link OperandType::TENSOR_FLOAT32}
954 * * {@link OperandType::TENSOR_QUANT8_ASYMM} 1091 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
955 * 1092 *
@@ -970,7 +1107,7 @@ enum OperationType : int32_t {
970 * 1107 *
971 * output = min(6, max(0, input)) 1108 * output = min(6, max(0, input))
972 * 1109 *
973 * Supported tensor types: 1110 * Supported tensor {@link OperandType}:
974 * * {@link OperandType::TENSOR_FLOAT32} 1111 * * {@link OperandType::TENSOR_FLOAT32}
975 * * {@link OperandType::TENSOR_QUANT8_ASYMM} 1112 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
976 * 1113 *
@@ -987,10 +1124,10 @@ enum OperationType : int32_t {
987 /** 1124 /**
988 * Reshapes a tensor. 1125 * Reshapes a tensor.
989 * 1126 *
990 * Given tensor, this operation returns a tensor that has the same values as tensor, 1127 * Given tensor, this operation returns a tensor that has the same values as
991 * but with a newly specified shape. 1128 * tensor, but with a newly specified shape.
992 * 1129 *
993 * Supported tensor types: 1130 * Supported tensor {@link OperandType}:
994 * * {@link OperandType::TENSOR_FLOAT32} 1131 * * {@link OperandType::TENSOR_FLOAT32}
995 * * {@link OperandType::TENSOR_QUANT8_ASYMM} 1132 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
996 * 1133 *
@@ -998,9 +1135,9 @@ enum OperationType : int32_t {
998 * 1135 *
999 * Inputs: 1136 * Inputs:
1000 * * 0: A tensor, specifying the tensor to be reshaped. 1137 * * 0: A tensor, specifying the tensor to be reshaped.
1001 * * 1: A 1-D tensor of type {@link OperandType::TENSOR_INT32}, defining the shape 1138 * * 1: A 1-D tensor of {@link OperandType::TENSOR_INT32}, defining the
1002 * of the output tensor. The number of elements implied by shape must be the same 1139 * shape of the output tensor. The number of elements implied by shape
1003 * as the number of elements in the input tensor. 1140 * must be the same as the number of elements in the input tensor.
1004 * 1141 *
1005 * Outputs: 1142 * Outputs:
1006 * * 0: The output tensor, of shape specified by the input shape. 1143 * * 0: The output tensor, of shape specified by the input shape.
@@ -1010,22 +1147,26 @@ enum OperationType : int32_t {
1010 /** 1147 /**
1011 * Resizes images to given size using the bilinear interpretation. 1148 * Resizes images to given size using the bilinear interpretation.
1012 * 1149 *
1013 * Resized images must be distorted if their output aspect ratio is not the same as 1150 * Resized images must be distorted if their output aspect ratio is not the
1014 * input aspect ratio. The corner pixels of output may not be the same as 1151 * same as input aspect ratio. The corner pixels of output may not be the
1015 * corner pixels of input. 1152 * same as corner pixels of input.
1016 * 1153 *
1017 * Supported tensor types: 1154 * Supported tensor {@link OperandType}:
1018 * * {@link OperandType::TENSOR_FLOAT32} 1155 * * {@link OperandType::TENSOR_FLOAT32}
1019 * 1156 *
1020 * Supported tensor rank: 4, with "NHWC" data layout. 1157 * Supported tensor rank: 4, with "NHWC" data layout.
1021 * 1158 *
1022 * Inputs: 1159 * Inputs:
1023 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the input. 1160 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
1024 * * 1: An INT32 value, specifying the output height of the output tensor. 1161 * the input.
1025 * * 2: An INT32 value, specifying the output width of the output tensor. 1162 * * 1: An {@link OperandType::INT32} scalar, specifying the output
1163 * height of the output tensor.
1164 * * 2: An {@link OperandType::INT32} scalar, specifying the output
1165 * width of the output tensor.
1026 * 1166 *
1027 * Outputs: 1167 * Outputs:
1028 * * 0: The output 4-D tensor, of shape [batches, new_height, new_width, depth]. 1168 * * 0: The output 4-D tensor, of shape
1169 * [batches, new_height, new_width, depth].
1029 */ 1170 */
1030 RESIZE_BILINEAR = 23, 1171 RESIZE_BILINEAR = 23,
1031 1172
@@ -1033,7 +1174,8 @@ enum OperationType : int32_t {
1033 * A basic recurrent neural network layer. 1174 * A basic recurrent neural network layer.
1034 * 1175 *
1035 * This layer implements the operation: 1176 * This layer implements the operation:
1036 * outputs = state = activation(inputs * input_weights + state * recurrent_weights + bias) 1177 * outputs = state = activation(inputs * input_weights +
1178 * state * recurrent_weights + bias)
1037 * 1179 *
1038 * Where: 1180 * Where:
1039 * * “input_weights” is a weight matrix that multiplies the inputs; 1181 * * “input_weights” is a weight matrix that multiplies the inputs;
@@ -1044,42 +1186,49 @@ enum OperationType : int32_t {
1044 * * “activation” is the function passed as the “fused_activation_function” 1186 * * “activation” is the function passed as the “fused_activation_function”
1045 * argument (if not “NONE”). 1187 * argument (if not “NONE”).
1046 * 1188 *
1047 * Supported tensor types (Type T): 1189 * Supported tensor {@link OperandType}:
1048 * * {@link OperandType::TENSOR_FLOAT32} 1190 * * {@link OperandType::TENSOR_FLOAT32}
1049 * 1191 *
1050 * Inputs: 1192 * Inputs:
1051 * * 0: input. 1193 * * 0: input.
1052 * A 2-D tensor of type T, of shape [batch_size, input_size], where 1194 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32} of shape
1053 * “batch_size” corresponds to the batching dimension, and “input_size” is 1195 * [batch_size, input_size], where “batch_size” corresponds to the
1054 * the size of the input. 1196 * batching dimension, and “input_size is the size of the input.
1055 * * 1: weights. 1197 * * 1: weights.
1056 * A 2-D tensor of type T, of shape [num_units, input_size], where 1198 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
1057 * “num_units” corresponds to the number of units. 1199 * [num_units, input_size], where “num_units” corresponds to the
1200 * number of units.
1058 * * 2: recurrent_weights. 1201 * * 2: recurrent_weights.
1059 * A 2-D tensor of type T, of shape [num_units, num_units], with columns 1202 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
1060 * corresponding to the weights from each unit. 1203 * [num_units, num_units], with columns corresponding to the weights
1204 * from each unit.
1061 * * 3: bias. 1205 * * 3: bias.
1062 * A 1-D tensor of type T, of shape [num_units]. 1206 * A 1-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
1207 * [num_units].
1063 * * 4: hidden state (in). 1208 * * 4: hidden state (in).
1064 * A 2-D tensor of type T, of shape [batch_size, num_units]. 1209 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
1210 * [batch_size, num_units].
1065 * * 5: fused_activation_function. 1211 * * 5: fused_activation_function.
1066 * An optional {@link FusedActivationFunc} value indicating the activation 1212 * An optional {@link FusedActivationFunc} value indicating the
1067 * function. If “NONE” is specified then it results in a linear 1213 * activation function. If “NONE” is specified then it results in a
1068 * activation. 1214 * linear activation.
1069 * 1215 *
1070 * Outputs: 1216 * Outputs:
1071 * * 0: hidden state (out). 1217 * * 0: hidden state (out).
1072 * A 2-D tensor of type T, of shape [batch_size, num_units]. 1218 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
1219 * [batch_size, num_units].
1073 * 1220 *
1074 * * 1: output. 1221 * * 1: output.
1075 * A 2-D tensor of type T, of shape [batch_size, num_units]. This is 1222 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
1076 * effectively the same as the current state value. 1223 * [batch_size, num_units]. This is effectively the same as the
1224 * current state value.
1077 */ 1225 */
1078 RNN = 24, 1226 RNN = 24,
1079 1227
1080 /** 1228 /**
1081 * Computes the softmax activation on the input tensor element-wise, per batch, by 1229 * Computes the softmax activation on the input tensor element-wise, per
1082 * normalizing the input vector so the maximum coefficient is zero. 1230 * batch, by normalizing the input vector so the maximum coefficient is
1231 * zero.
1083 * 1232 *
1084 * The output is calculated using this formula: 1233 * The output is calculated using this formula:
1085 * 1234 *
@@ -1087,7 +1236,7 @@ enum OperationType : int32_t {
1087 * exp((input[batch, i] - max(input[batch, :])) * beta) / 1236 * exp((input[batch, i] - max(input[batch, :])) * beta) /
1088 * sum_{k}{exp((input[batch, k] - max(input[batch, :])) * beta)} 1237 * sum_{k}{exp((input[batch, k] - max(input[batch, :])) * beta)}
1089 * 1238 *
1090 * Supported tensor types: 1239 * Supported tensor {@link OperandType}:
1091 * * {@link OperandType::TENSOR_FLOAT32} 1240 * * {@link OperandType::TENSOR_FLOAT32}
1092 * * {@link OperandType::TENSOR_QUANT8_ASYMM} 1241 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
1093 * 1242 *
@@ -1095,11 +1244,12 @@ enum OperationType : int32_t {
1095 * 1244 *
1096 * Inputs: 1245 * Inputs:
1097 * * 0: A 2-D or 4-D tensor, specifying the tensor to be reshaped. 1246 * * 0: A 2-D or 4-D tensor, specifying the tensor to be reshaped.
1098 * * 1: A FLOAT32 value, specifying the positive scaling factor for the exponent, beta. 1247 * * 1: An {@link OperandType::FLOAT32} scalar, specifying the positive
1248 * scaling factor for the exponent, beta.
1099 * 1249 *
1100 * Outputs: 1250 * Outputs:
1101 * * 0: The output tensor of same shape as input0. 1251 * * 0: The output tensor of same shape as input0.
1102 * For {@link OperandType::TENSOR_QUANT8_ASYMM} type, 1252 * For {@link OperandType::TENSOR_QUANT8_ASYMM},
1103 * the scale must be 1.f / 256 and the zeroPoint must be 0. 1253 * the scale must be 1.f / 256 and the zeroPoint must be 0.
1104 */ 1254 */
1105 SOFTMAX = 25, 1255 SOFTMAX = 25,
@@ -1107,30 +1257,33 @@ enum OperationType : int32_t {
1107 /** 1257 /**
1108 * Rearranges blocks of spatial data, into depth. 1258 * Rearranges blocks of spatial data, into depth.
1109 * 1259 *
1110 * More specifically, this op outputs a copy of the input tensor where values from 1260 * More specifically, this op outputs a copy of the input tensor where
1111 * the height and width dimensions are moved to the depth dimension. 1261 * values from the height and width dimensions are moved to the depth
1112 * The value block_size indicates the input block size and how the data is moved. 1262 * dimension. The value block_size indicates the input block size and how
1263 * the data is moved.
1113 * 1264 *
1114 * Chunks of data of size block_size * block_size from depth are rearranged into 1265 * Chunks of data of size block_size * block_size from depth are rearranged
1115 * non-overlapping blocks of size block_size x block_size. 1266 * into non-overlapping blocks of size block_size x block_size.
1116 * 1267 *
1117 * The depth of the output tensor is input_depth * block_size * block_size. 1268 * The depth of the output tensor is input_depth * block_size * block_size.
1118 * The input tensor's height and width must be divisible by block_size. 1269 * The input tensor's height and width must be divisible by block_size.
1119 * 1270 *
1120 * Supported tensor types: 1271 * Supported tensor {@link OperandType}:
1121 * * {@link OperandType::TENSOR_FLOAT32} 1272 * * {@link OperandType::TENSOR_FLOAT32}
1122 * * {@link OperandType::TENSOR_QUANT8_ASYMM} 1273 * * {@link OperandType::TENSOR_QUANT8_ASYMM}
1123 * 1274 *
1124 * Supported tensor rank: 4, with "NHWC" data layout. 1275 * Supported tensor rank: 4, with "NHWC" data layout.
1125 * 1276 *
1126 * Inputs: 1277 * Inputs:
1127 * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying the input. 1278 * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
1128 * * 1: An INT32 value, specifying the block_size. block_size must be >=1 and 1279 * specifying the input.
1129 * block_size must be a divisor of both the input height and width. 1280 * * 1: An {@link OperandType::INT32} scalar, specifying the block_size.
1281 * block_size must be >=1 and block_size must be a divisor of both the
1282 * input height and width.
1130 * 1283 *
1131 * Outputs: 1284 * Outputs:
1132 * * 0: The output 4-D tensor, of shape [batch, height/block_size, width/block_size, 1285 * * 0: The output 4-D tensor, of shape [batch, height/block_size,
1133 * depth*block_size*block_size]. 1286 * width/block_size, depth*block_size*block_size].
1134 */ 1287 */
1135 SPACE_TO_DEPTH = 26, 1288 SPACE_TO_DEPTH = 26,
1136 1289
@@ -1147,21 +1300,22 @@ enum OperationType : int32_t {
1147 * INTERSPEECH, 2015. 1300 * INTERSPEECH, 2015.
1148 * 1301 *
1149 * It processes the incoming input using a 2-stage filtering mechanism: 1302 * It processes the incoming input using a 2-stage filtering mechanism:
1150 * * stage 1 performs filtering on the "features" dimension, whose outputs get 1303 * * stage 1 performs filtering on the "features" dimension, whose outputs
1151 * pushed into a memory of fixed-size memory_size. 1304 * get pushed into a memory of fixed-size memory_size.
1152 * * stage 2 performs filtering on the "time" dimension of the memory_size 1305 * * stage 2 performs filtering on the "time" dimension of the memory_size
1153 * memoized outputs of stage 1. 1306 * memoized outputs of stage 1.
1154 * 1307 *
1155 * Specifically, for rank 1, this layer implements the operation: 1308 * Specifically, for rank 1, this layer implements the operation:
1156 * 1309 *
1157 * memory = push(conv1d(inputs, weights_feature, feature_dim, "PADDING_VALID")); 1310 * memory = push(conv1d(inputs, weights_feature, feature_dim,
1311 * "PADDING_VALID"));
1158 * outputs = activation(memory * weights_time + bias); 1312 * outputs = activation(memory * weights_time + bias);
1159 * 1313 *
1160 * Where: 1314 * Where:
1161 * * “weights_feature” is a weights matrix that processes the inputs (by 1315 * * “weights_feature” is a weights matrix that processes the inputs (by
1162 * convolving the input with every “feature filter”), and whose outputs get 1316 * convolving the input with every “feature filter”), and whose outputs
1163 * pushed, stacked in order, into the fixed-size “memory” (the oldest entry 1317 * get pushed, stacked in order, into the fixed-size “memory” (the oldest
1164 * gets dropped); 1318 * entry gets dropped);
1165 * * “weights_time” is a weights matrix that processes the “memory” (by a 1319 * * “weights_time” is a weights matrix that processes the “memory” (by a
1166 * batched matrix multiplication on the num_units); 1320 * batched matrix multiplication on the num_units);
1167 * * “bias” is an optional bias vector (added to each output vector in the 1321 * * “bias” is an optional bias vector (added to each output vector in the
@@ -1172,35 +1326,42 @@ enum OperationType : int32_t {
1172 * Each rank adds a dimension to the weights matrices by means of stacking 1326 * Each rank adds a dimension to the weights matrices by means of stacking
1173 * the filters. 1327 * the filters.
1174 * 1328 *
1175 * Supported tensor types (type T): 1329 * Supported tensor {@link OperandType}:
1176 * * {@link OperandType::TENSOR_FLOAT32} 1330 * * {@link OperandType::TENSOR_FLOAT32}
1177 * 1331 *
1178 * Inputs: 1332 * Inputs:
1179 * * 0: input. 1333 * * 0: input.
1180 * A 2-D tensor of type T, of shape [batch_size, input_size], where 1334 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
1181 * “batch_size” corresponds to the batching dimension, and “input_size” is 1335 * [batch_size, input_size], where “batch_size” corresponds to the
1182 * the size of the input. 1336 * batching dimension, and “input_size is the size of the input.
1183 * * 1: weights_feature. 1337 * * 1: weights_feature.
1184 * A 2-D tensor of type T, of shape [num_units, input_size], where 1338 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
1185 * “num_units” corresponds to the number of units. 1339 * [num_units, input_size], where “num_units” corresponds to the
1340 * number of units.
1186 * * 2: weights_time. 1341 * * 2: weights_time.
1187 * A 2-D tensor of type T, of shape [num_units, memory_size], where 1342 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
1188 * “memory_size” corresponds to the fixed-size of the memory. 1343 * [num_units, memory_size], where “memory_size” corresponds to the
1344 * fixed-size of the memory.
1189 * * 3: bias. 1345 * * 3: bias.
1190 * An optional 1-D tensor of type T, of shape [num_units]. 1346 * An optional 1-D tensor of {@link OperandType::TENSOR_FLOAT32},
1347 * of shape [num_units].
1191 * * 4: state (in). 1348 * * 4: state (in).
1192 * A 2-D tensor of type T, of shape [batch_size, (memory_size - 1) * num_units * rank]. 1349 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
1350 * [batch_size, (memory_size - 1) * num_units * rank].
1193 * * 5: rank. 1351 * * 5: rank.
1194 * The rank of the SVD approximation. 1352 * The rank of the SVD approximation.
1195 * * 6: fused_activation_function. 1353 * * 6: fused_activation_function.
1196 * An optional {@link FusedActivationFunc} value indicating the activation function. 1354 * An optional {@link FusedActivationFunc} value indicating the
1197 * If “NONE” is specified then it results in a linear activation. 1355 * activation function. If “NONE” is specified then it results in a
1356 * linear activation.
1198 * 1357 *
1199 * Outputs: 1358 * Outputs:
1200 * * 0: state (out). 1359 * * 0: state (out).
1201 * A 2-D tensor of type T, of shape [batch_size, (memory_size - 1) * num_units * rank]. 1360 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
1361 * [batch_size, (memory_size - 1) * num_units * rank].
1202 * * 1: output. 1362 * * 1: output.
1203 * A 2-D tensor of type T, of shape [batch_size, num_units]. 1363 * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
1364 * [batch_size, num_units].
1204 */ 1365 */
1205 SVDF = 27, 1366 SVDF = 27,
1206 1367
@@ -1211,7 +1372,7 @@ enum OperationType : int32_t {
1211 * 1372 *
1212 * output = tanh(input) 1373 * output = tanh(input)
1213 * 1374 *
1214 * Supported tensor types: 1375 * Supported tensor {@link OperandType}:
1215 * * {@link OperandType::TENSOR_FLOAT32} 1376 * * {@link OperandType::TENSOR_FLOAT32}
1216 * 1377 *
1217 * Supported tensor rank: up to 4. 1378 * Supported tensor rank: up to 4.
@@ -1227,7 +1388,8 @@ enum OperationType : int32_t {
1227 /** 1388 /**
1228 * OEM specific operation. 1389 * OEM specific operation.
1229 * 1390 *
1230 * This operation is OEM specific. It should only be used for OEM applications. 1391 * This operation is OEM specific. It should only be used for OEM
1392 * applications.
1231 */ 1393 */
1232 OEM_OPERATION = 10000, 1394 OEM_OPERATION = 10000,
1233}; 1395};
@@ -1274,8 +1436,8 @@ enum OperandLifeTime : int32_t {
1274 CONSTANT_REFERENCE, 1436 CONSTANT_REFERENCE,
1275 1437
1276 /** 1438 /**
1277 * The operand does not have a value. This is valid only for optional arguments 1439 * The operand does not have a value. This is valid only for optional
1278 * of operations. 1440 * arguments of operations.
1279 */ 1441 */
1280 NO_VALUE, 1442 NO_VALUE,
1281}; 1443};
@@ -1391,7 +1553,8 @@ struct Operand {
1391 1553
1392 /** 1554 /**
1393 * Where to find the data for this operand. 1555 * Where to find the data for this operand.
1394 * If the lifetime is TEMPORARY_VARIABLE, MODEL_INPUT, MODEL_OUTPUT, or NO_VALUE: 1556 * If the lifetime is TEMPORARY_VARIABLE, MODEL_INPUT, MODEL_OUTPUT, or
1557 * NO_VALUE:
1395 * - All the fields must be 0. 1558 * - All the fields must be 0.
1396 * If the lifetime is CONSTANT_COPY: 1559 * If the lifetime is CONSTANT_COPY:
1397 * - location.poolIndex is 0. 1560 * - location.poolIndex is 0.
@@ -1485,9 +1648,9 @@ struct Model {
1485 */ 1648 */
1486struct RequestArgument { 1649struct RequestArgument {
1487 /** 1650 /**
1488 * If true, the argument does not have a value. This can be used for operations 1651 * If true, the argument does not have a value. This can be used for
1489 * that take optional arguments. If true, the fields of location are set to 0 and 1652 * operations that take optional arguments. If true, the fields of location
1490 * the dimensions vector is left empty. 1653 * are set to 0 and the dimensions vector is left empty.
1491 */ 1654 */
1492 bool hasNoValue; 1655 bool hasNoValue;
1493 1656
@@ -1499,10 +1662,10 @@ struct RequestArgument {
1499 /** 1662 /**
1500 * Updated dimension information. 1663 * Updated dimension information.
1501 * 1664 *
1502 * If dimensions.size() > 0, dimension information was provided along with the 1665 * If dimensions.size() > 0, dimension information was provided along with
1503 * argument. This can be the case for models that accept inputs of varying size. 1666 * the argument. This can be the case for models that accept inputs of
1504 * This can't change the rank, just the value of the dimensions that were 1667 * varying size. This can't change the rank, just the value of the
1505 * unspecified in the model. 1668 * dimensions that were unspecified in the model.
1506 */ 1669 */
1507 vec<uint32_t> dimensions; 1670 vec<uint32_t> dimensions;
1508}; 1671};