summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSlava Shklyaev2018-09-07 09:27:24 -0500
committerPrzemyslaw Szczepaniak2018-09-21 08:41:55 -0500
commit8e139a7a1197f5626097af11e104c08d37082308 (patch)
tree946d63391b35aa5cc6f7c570b7c49fd5e3014be3 /neuralnetworks/1.2
parentc0263aa0e4dd6ce6f39521314fa638b0a7f10d66 (diff)
downloadplatform-hardware-interfaces-8e139a7a1197f5626097af11e104c08d37082308.tar.gz
platform-hardware-interfaces-8e139a7a1197f5626097af11e104c08d37082308.tar.xz
platform-hardware-interfaces-8e139a7a1197f5626097af11e104c08d37082308.zip
Create NeuralNetworks HAL v1.2 for new OperationTypes
Bug: 114365802 Test: mm Change-Id: I86b9261729a64d02ed30dc822a0226de11473ac8 Merged-In: I86b9261729a64d02ed30dc822a0226de11473ac8 (cherry-picked from 060a9acb3b982a51b4ae79f9456b3589229ba805)
Diffstat (limited to 'neuralnetworks/1.2')
-rw-r--r--neuralnetworks/1.2/Android.bp24
-rw-r--r--neuralnetworks/1.2/IDevice.hal106
-rw-r--r--neuralnetworks/1.2/types.hal112
3 files changed, 242 insertions, 0 deletions
diff --git a/neuralnetworks/1.2/Android.bp b/neuralnetworks/1.2/Android.bp
new file mode 100644
index 00000000..e183a263
--- /dev/null
+++ b/neuralnetworks/1.2/Android.bp
@@ -0,0 +1,24 @@
1// This file is autogenerated by hidl-gen -Landroidbp.
2
3hidl_interface {
4 name: "android.hardware.neuralnetworks@1.2",
5 root: "android.hardware",
6 vndk: {
7 enabled: true,
8 },
9 srcs: [
10 "types.hal",
11 "IDevice.hal",
12 ],
13 interfaces: [
14 "android.hardware.neuralnetworks@1.0",
15 "android.hardware.neuralnetworks@1.1",
16 "android.hidl.base@1.0",
17 ],
18 types: [
19 "Model",
20 "Operation",
21 "OperationType",
22 ],
23 gen_java: false,
24}
diff --git a/neuralnetworks/1.2/IDevice.hal b/neuralnetworks/1.2/IDevice.hal
new file mode 100644
index 00000000..9cc23a26
--- /dev/null
+++ b/neuralnetworks/1.2/IDevice.hal
@@ -0,0 +1,106 @@
1/*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17package android.hardware.neuralnetworks@1.2;
18
19import @1.0::ErrorStatus;
20import @1.0::IPreparedModelCallback;
21import @1.1::ExecutionPreference;
22import @1.1::IDevice;
23
24/**
25 * This interface represents a device driver.
26 */
27interface IDevice extends @1.1::IDevice {
28 /**
29 * Gets the supported operations in a model.
30 *
31 * getSupportedOperations indicates which operations of a model are fully
32 * supported by the vendor driver. If an operation may not be supported for
33 * any reason, getSupportedOperations must return false for that operation.
34 *
35 * @param model A model whose operations--and their corresponding operands--
36 * are to be verified by the driver.
37 * @return status Error status of the call, must be:
38 * - NONE if successful
39 * - DEVICE_UNAVAILABLE if driver is offline or busy
40 * - GENERAL_FAILURE if there is an unspecified error
41 * - INVALID_ARGUMENT if provided model is invalid
42 * @return supportedOperations A list of supported operations, where true
43 * indicates the operation is supported and false indicates the
44 * operation is not supported. The index of "supported" corresponds with
45 * the index of the operation it is describing.
46 */
47 getSupportedOperations_1_2(Model model)
48 generates (ErrorStatus status, vec<bool> supportedOperations);
49
50 /**
51 * Creates a prepared model for execution.
52 *
53 * prepareModel is used to make any necessary transformations or alternative
54 * representations to a model for execution, possibly including
55 * transformations on the constant data, optimization on the model's graph,
56 * or compilation into the device's native binary format. The model itself
57 * is not changed.
58 *
59 * The model is prepared asynchronously with respect to the caller. The
60 * prepareModel function must verify the inputs to the prepareModel function
61 * are correct. If there is an error, prepareModel must immediately invoke
62 * the callback with the appropriate ErrorStatus value and nullptr for the
63 * IPreparedModel, then return with the same ErrorStatus. If the inputs to
64 * the prepareModel function are valid and there is no error, prepareModel
65 * must launch an asynchronous task to prepare the model in the background,
66 * and immediately return from prepareModel with ErrorStatus::NONE. If the
67 * asynchronous task fails to launch, prepareModel must immediately invoke
68 * the callback with ErrorStatus::GENERAL_FAILURE and nullptr for the
69 * IPreparedModel, then return with ErrorStatus::GENERAL_FAILURE.
70 *
71 * When the asynchronous task has finished preparing the model, it must
72 * immediately invoke the callback function provided as an input to
73 * prepareModel. If the model was prepared successfully, the callback object
74 * must be invoked with an error status of ErrorStatus::NONE and the
75 * produced IPreparedModel object. If an error occurred preparing the model,
76 * the callback object must be invoked with the appropriate ErrorStatus
77 * value and nullptr for the IPreparedModel.
78 *
79 * The only information that may be unknown to the model at this stage is
80 * the shape of the tensors, which may only be known at execution time. As
81 * such, some driver services may return partially prepared models, where
82 * the prepared model may only be finished when it is paired with a set of
83 * inputs to the model. Note that the same prepared model object may be
84 * used with different shapes of inputs on different (possibly concurrent)
85 * executions.
86 *
87 * Multiple threads may call prepareModel on the same model concurrently.
88 *
89 * @param model The model to be prepared for execution.
90 * @param preference Indicates the intended execution behavior of a prepared
91 * model.
92 * @param callback A callback object used to return the error status of
93 * preparing the model for execution and the prepared model if
94 * successful, nullptr otherwise. The callback object's notify function
95 * must be called exactly once, even if the model could not be prepared.
96 * @return status Error status of launching a task which prepares the model
97 * in the background; must be:
98 * - NONE if preparation task is successfully launched
99 * - DEVICE_UNAVAILABLE if driver is offline or busy
100 * - GENERAL_FAILURE if there is an unspecified error
101 * - INVALID_ARGUMENT if one of the input arguments is invalid
102 */
103 prepareModel_1_2(Model model, ExecutionPreference preference,
104 IPreparedModelCallback callback)
105 generates (ErrorStatus status);
106};
diff --git a/neuralnetworks/1.2/types.hal b/neuralnetworks/1.2/types.hal
new file mode 100644
index 00000000..06606cc3
--- /dev/null
+++ b/neuralnetworks/1.2/types.hal
@@ -0,0 +1,112 @@
1/*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17package android.hardware.neuralnetworks@1.2;
18
19import @1.0::Operand;
20import @1.0::PerformanceInfo;
21import @1.1::OperationType;
22
23/**
24 * Operation types.
25 *
26 * The type of an operation in a model.
27 */
28enum OperationType : @1.1::OperationType {
29};
30
31/**
32 * Describes one operation of the model's graph.
33 */
34struct Operation {
35 /**
36 * The operation type.
37 */
38 OperationType type;
39
40 /**
41 * Describes the table that contains the indexes of the inputs of the
42 * operation. The offset is the index in the operandIndexes table.
43 */
44 vec<uint32_t> inputs;
45
46 /**
47 * Describes the table that contains the indexes of the outputs of the
48 * operation. The offset is the index in the operandIndexes table.
49 */
50 vec<uint32_t> outputs;
51};
52
53/**
54 * A Neural Network Model.
55 *
56 * This includes not only the execution graph, but also constant data such as
57 * weights or scalars added at construction time. The only information that
58 * may not be known is the shape of the input tensors.
59 */
60struct Model {
61 /**
62 * All operands included in the model.
63 */
64 vec<Operand> operands;
65
66 /**
67 * All operations included in the model.
68 *
69 * The operations are sorted into execution order. Every operand
70 * with lifetime MODEL_OUTPUT or TEMPORARY_VARIABLE must be
71 * written before it is read.
72 */
73 vec<Operation> operations;
74
75 /**
76 * Input indexes of the model. There must be at least one.
77 *
78 * Each value corresponds to the index of the operand in "operands".
79 */
80 vec<uint32_t> inputIndexes;
81
82 /**
83 * Output indexes of the model. There must be at least one.
84 *
85 * Each value corresponds to the index of the operand in "operands".
86 */
87 vec<uint32_t> outputIndexes;
88
89 /**
90 * A byte buffer containing operand data that were copied into the model.
91 *
92 * An operand's value must be located here if and only if Operand::lifetime
93 * equals OperandLifeTime::CONSTANT_COPY.
94 */
95 vec<uint8_t> operandValues;
96
97 /**
98 * A collection of shared memory pools containing operand values.
99 *
100 * An operand's value must be located here if and only if Operand::lifetime
101 * equals OperandLifeTime::CONSTANT_REFERENCE.
102 */
103 vec<memory> pools;
104
105 /**
106 * 'true' indicates TENSOR_FLOAT32 may be calculated with range and/or
107 * precision as low as that of the IEEE 754 16-bit floating-point format.
108 * 'false' indicates TENSOR_FLOAT32 must be calculated using at least the
109 * range and precision of the IEEE 754 32-bit floating-point format.
110 */
111 bool relaxComputationFloat32toFloat16;
112};