summary | shortlog | log | commit | commitdiff | tree
raw | patch | inline | side by side (parent: e054448)
raw | patch | inline | side by side (parent: e054448)
author | Hongmei Gou <a0271529@ti.com> | |
Mon, 17 Feb 2020 19:37:54 +0000 (14:37 -0500) | ||
committer | Hongmei Gou <a0271529@ti.com> | |
Tue, 18 Feb 2020 16:25:54 +0000 (11:25 -0500) |
* Currently supporting only a single tidl subgraph,
with single input tensor and single output tensor
Signed-off-by: Hongmei Gou <a0271529@ti.com>
with single input tensor and single output tensor
Signed-off-by: Hongmei Gou <a0271529@ti.com>
Makefile | patch | blob | history | |
model_utils.cc | patch | blob | history | |
tidl_op.cc | [new file with mode: 0644] | patch | blob |
tidl_op.h | [new file with mode: 0644] | patch | blob |
diff --git a/Makefile b/Makefile
index b70b76fe7452dee2008ebf69fde6f3a6ff5a5d36..f918f68f09357c720bae37f69f846440a6a51b3a 100644 (file)
--- a/Makefile
+++ b/Makefile
CXX := $(CC_PREFIX)${TARGET_TOOLCHAIN_PREFIX}g++
-classification: classification.cc model_utils.cc utils.cc
- $(CXX) classification.cc model_utils.cc utils.cc -o tflite_classification $(LDFLAGS) $(LIBS) $(CXXFLAGS) $(INCLUDES)
+COMMON_SRC = model_utils.cc utils.cc
-segmentation: segmentation.cc model_utils.cc utils.cc
- $(CXX) segmentation.cc model_utils.cc utils.cc -o tflite_segmentation $(LDFLAGS) $(LIBS) $(CXXFLAGS) $(INCLUDES)
+ifeq ($(TIDL_ACC), yes)
+ COMMON_SRC += tidl_op.cc
+endif
+classification: classification.cc $(COMMON_SRC)
+ $(CXX) classification.cc $(COMMON_SRC) -o tflite_classification $(LDFLAGS) $(LIBS) $(CXXFLAGS) $(INCLUDES)
+
+segmentation: segmentation.cc $(COMMON_SRC)
+ $(CXX) segmentation.cc $(COMMON_SRC) -o tflite_segmentation $(LDFLAGS) $(LIBS) $(CXXFLAGS) $(INCLUDES)
clean:
rm -rf classification segmentation
diff --git a/model_utils.cc b/model_utils.cc
index 8e6073569d21fda9fd9a3b77a8bee64c1080025b..134f9582e8f7d79ea03266726866f52bed9f71e2 100644 (file)
--- a/model_utils.cc
+++ b/model_utils.cc
#include "model_utils.h"
+#ifdef TIDL_OFFLOAD
+#include "tidl_op.h"
+#endif
+
#include <memory>
#include "tensorflow/lite/builtin_op_data.h"
std::unique_ptr<tflite::Interpreter> BuildTfliteInterpreter(
const tflite::FlatBufferModel& model, int num_threads) {
tflite::ops::builtin::BuiltinOpResolver resolver;
+#ifdef TIDL_OFFLOAD
+ resolver.AddCustom(tidl::kTidlSubgraphOp, tidl::RegisterTidlSubgraphOp());
+#endif
std::unique_ptr<tflite::Interpreter> interpreter;
if (tflite::InterpreterBuilder(model, resolver)(&interpreter) != kTfLiteOk) {
diff --git a/tidl_op.cc b/tidl_op.cc
--- /dev/null
+++ b/tidl_op.cc
@@ -0,0 +1,127 @@
+#include "tensorflow/lite/kernels/internal/tensor.h"
+#include "tensorflow/lite/kernels/kernel_util.h"
+
+#include "tidl_op.h"
+
+#include "subgraph_runtime.h"
+
+#include <iostream>
+using namespace std;
+
+namespace tidl {
+namespace tidl_subgraph_op {
+
+using tflite::GetInput;
+using tflite::GetOutput;
+using tflite::GetTensorData;
+using tflite::NumDimensions;
+using tflite::NumInputs;
+using tflite::NumOutputs;
+
+struct OpData {
+ int dummy; // Use OpData to pass data from Init() to Prepare()/Eval()
+};
+
+void* Init(TfLiteContext* context, const char* buffer, size_t length) {
+ auto* op_data = new OpData;
+ const uint8_t* buffer_t = reinterpret_cast<const uint8_t*>(buffer);
+
+ // A single subgraph with subgraph index of 0
+ TidlInitSubgraph(1, 0);
+
+ return op_data;
+}
+
+void Free(TfLiteContext* context, void* buffer) {
+ TidlFreeSubgraph(1, 0);
+ delete reinterpret_cast<OpData*>(buffer);
+}
+
+TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+ // Populate the data initialized during Init() call
+ OpData* data = reinterpret_cast<OpData*>(node->user_data);
+
+ // Get the input and output tensors
+ TfLiteTensor* output = GetOutput(context, node, 0);
+ const TfLiteTensor* input = GetInput(context, node, 0);
+
+ // Allocate buffer for the output
+ int num_dims = NumDimensions(output);
+ TfLiteIntArray* output_size = TfLiteIntArrayCreate(num_dims);
+ for (int i=0; i<num_dims; ++i) {
+ output_size->data[i] = output->dims->data[i];
+ }
+
+ return context->ResizeTensor(context, output, output_size);
+}
+
+TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+ // Populate the data initialized during Init() call
+ OpData* data = reinterpret_cast<OpData*>(node->user_data);
+
+ // Get the input and output tensors
+ TfLiteTensor* output = GetOutput(context, node, 0);
+ const TfLiteTensor* input = GetInput(context, node, 0);
+
+ float* input_data = input->data.f;
+ float* output_data = output->data.f;
+
+#ifdef DEBUG
+ FILE *filePtr;
+ filePtr = fopen("floatArray_tidl_input","w");
+
+ std::cout << "input dimension: ";
+ int num_dims = NumDimensions(input);
+ int input_size = 1;
+ for (int i=0; i<num_dims; i++) {
+ input_size = input_size*input->dims->data[i];
+ std::cout << input->dims->data[i] << "x";
+ }
+
+ std::cout << std::endl;
+
+ for (int i = 0; i < input_size; i++) {
+ fprintf(filePtr, "%.10g\t", input_data[i]);
+ if (i%9 == 8)
+ fprintf(filePtr, "\n");
+ }
+#endif
+
+ // A single subgraph with subgraph index of 0,
+ // batch size 1, sinlge input tensor, and single output tensor
+ TidlRunSubgraph(1, 0, 1, 1, 1, &input_data, &output_data);
+
+#ifdef DEBUG
+ FILE *filePtrOut;
+ filePtrOut = fopen("floatArray_tidl_output","w");
+
+ std::cout << "output dimension: ";
+ num_dims = NumDimensions(output);
+ int output_size = 1;
+ for (int i=0; i<num_dims; i++) {
+ output_size = output_size*output->dims->data[i];
+ std::cout << output->dims->data[i] << "x";
+ }
+
+ std::cout << std::endl;
+
+ for (int i = 0; i < output_size; i++) {
+ fprintf(filePtrOut, "%.10g\t", output_data[i]);
+ if (i%9 == 8)
+ fprintf(filePtrOut, "\n");
+ }
+#endif
+
+ return kTfLiteOk;
+}
+
+} //namespace tidl_subgraph_op
+
+TfLiteRegistration* RegisterTidlSubgraphOp() {
+ static TfLiteRegistration r = {
+ tidl_subgraph_op::Init, tidl_subgraph_op::Free,
+ tidl_subgraph_op::Prepare, tidl_subgraph_op::Eval};
+ return &r;
+}
+
+}//namespace tidl
diff --git a/tidl_op.h b/tidl_op.h
--- /dev/null
+++ b/tidl_op.h
@@ -0,0 +1,14 @@
+#ifndef TIDL_OP_H_
+#define TIDL_OP_H_
+
+#include "tensorflow/lite/c/c_api_internal.h"
+
+namespace tidl {
+
+ static const char kTidlSubgraphOp[] = "tidl-am5-custom-op";
+
+ TfLiteRegistration* RegisterTidlSubgraphOp();
+
+} // namespace tidl
+
+#endif