Skip to content

Commit

Permalink
Define preprocessor schema (#2211)
Browse files Browse the repository at this point in the history
The purpose of the preprocessor is to load a model with the TFLM interpreter, allocate tensors, and then capture the resulting data structures. To capture this data, we need to provide structure and serialization to it for ingesting into the code generator. This commit defines a preprocessor data schema with generated C++ and python bindings for it. Unfortunately, we need to check-in the generated code, as the Makefiles are currently not capable of running the flatc compiler. An update script is included to help keep these files in sync.

BUG=b/295076067
  • Loading branch information
rascani authored Sep 7, 2023
1 parent 5d542d7 commit 4813acf
Show file tree
Hide file tree
Showing 6 changed files with 309 additions and 0 deletions.
13 changes: 13 additions & 0 deletions codegen/preprocessor/BUILD
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
load("@flatbuffers//:build_defs.bzl", "flatbuffer_cc_library", "flatbuffer_py_library")

package(default_visibility = ["//visibility:public"])

flatbuffer_cc_library(
name = "preprocessor_schema_fbs",
srcs = ["preprocessor_schema.fbs"],
)

flatbuffer_py_library(
name = "preprocessor_schema_py",
srcs = ["preprocessor_schema.fbs"],
)
23 changes: 23 additions & 0 deletions codegen/preprocessor/preprocessor_schema.fbs
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
// Copyright 2017 The TensorFlow Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

namespace tflm.codegen.preprocessor;

// TODO(rjascani): This needs a more descriptive name
table Data {
// The model path that was provided to the preprocessor
input_model_path:string;
}

root_type Data;
158 changes: 158 additions & 0 deletions codegen/preprocessor/preprocessor_schema_generated.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,158 @@
// automatically generated by the FlatBuffers compiler, do not modify


#ifndef FLATBUFFERS_GENERATED_PREPROCESSORSCHEMA_TFLM_CODEGEN_PREPROCESSOR_H_
#define FLATBUFFERS_GENERATED_PREPROCESSORSCHEMA_TFLM_CODEGEN_PREPROCESSOR_H_

#include "flatbuffers/flatbuffers.h"

// Ensure the included flatbuffers.h is the same version as when this file was
// generated, otherwise it may not be compatible.
static_assert(FLATBUFFERS_VERSION_MAJOR == 2 &&
FLATBUFFERS_VERSION_MINOR == 0 &&
FLATBUFFERS_VERSION_REVISION == 6,
"Non-compatible flatbuffers version included");

namespace tflm {
namespace codegen {
namespace preprocessor {

struct Data;
struct DataBuilder;
struct DataT;

struct DataT : public flatbuffers::NativeTable {
typedef Data TableType;
std::string input_model_path{};
};

struct Data FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef DataT NativeTableType;
typedef DataBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_INPUT_MODEL_PATH = 4
};
const flatbuffers::String *input_model_path() const {
return GetPointer<const flatbuffers::String *>(VT_INPUT_MODEL_PATH);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffset(verifier, VT_INPUT_MODEL_PATH) &&
verifier.VerifyString(input_model_path()) &&
verifier.EndTable();
}
DataT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
void UnPackTo(DataT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
static flatbuffers::Offset<Data> Pack(flatbuffers::FlatBufferBuilder &_fbb, const DataT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
};

struct DataBuilder {
typedef Data Table;
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_input_model_path(flatbuffers::Offset<flatbuffers::String> input_model_path) {
fbb_.AddOffset(Data::VT_INPUT_MODEL_PATH, input_model_path);
}
explicit DataBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
flatbuffers::Offset<Data> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<Data>(end);
return o;
}
};

inline flatbuffers::Offset<Data> CreateData(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset<flatbuffers::String> input_model_path = 0) {
DataBuilder builder_(_fbb);
builder_.add_input_model_path(input_model_path);
return builder_.Finish();
}

inline flatbuffers::Offset<Data> CreateDataDirect(
flatbuffers::FlatBufferBuilder &_fbb,
const char *input_model_path = nullptr) {
auto input_model_path__ = input_model_path ? _fbb.CreateString(input_model_path) : 0;
return tflm::codegen::preprocessor::CreateData(
_fbb,
input_model_path__);
}

flatbuffers::Offset<Data> CreateData(flatbuffers::FlatBufferBuilder &_fbb, const DataT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);

inline DataT *Data::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = std::unique_ptr<DataT>(new DataT());
UnPackTo(_o.get(), _resolver);
return _o.release();
}

inline void Data::UnPackTo(DataT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
{ auto _e = input_model_path(); if (_e) _o->input_model_path = _e->str(); }
}

inline flatbuffers::Offset<Data> Data::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DataT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
return CreateData(_fbb, _o, _rehasher);
}

inline flatbuffers::Offset<Data> CreateData(flatbuffers::FlatBufferBuilder &_fbb, const DataT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
(void)_rehasher;
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DataT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _input_model_path = _o->input_model_path.empty() ? 0 : _fbb.CreateString(_o->input_model_path);
return tflm::codegen::preprocessor::CreateData(
_fbb,
_input_model_path);
}

inline const tflm::codegen::preprocessor::Data *GetData(const void *buf) {
return flatbuffers::GetRoot<tflm::codegen::preprocessor::Data>(buf);
}

inline const tflm::codegen::preprocessor::Data *GetSizePrefixedData(const void *buf) {
return flatbuffers::GetSizePrefixedRoot<tflm::codegen::preprocessor::Data>(buf);
}

inline bool VerifyDataBuffer(
flatbuffers::Verifier &verifier) {
return verifier.VerifyBuffer<tflm::codegen::preprocessor::Data>(nullptr);
}

inline bool VerifySizePrefixedDataBuffer(
flatbuffers::Verifier &verifier) {
return verifier.VerifySizePrefixedBuffer<tflm::codegen::preprocessor::Data>(nullptr);
}

inline void FinishDataBuffer(
flatbuffers::FlatBufferBuilder &fbb,
flatbuffers::Offset<tflm::codegen::preprocessor::Data> root) {
fbb.Finish(root);
}

inline void FinishSizePrefixedDataBuffer(
flatbuffers::FlatBufferBuilder &fbb,
flatbuffers::Offset<tflm::codegen::preprocessor::Data> root) {
fbb.FinishSizePrefixed(root);
}

inline std::unique_ptr<tflm::codegen::preprocessor::DataT> UnPackData(
const void *buf,
const flatbuffers::resolver_function_t *res = nullptr) {
return std::unique_ptr<tflm::codegen::preprocessor::DataT>(GetData(buf)->UnPack(res));
}

inline std::unique_ptr<tflm::codegen::preprocessor::DataT> UnPackSizePrefixedData(
const void *buf,
const flatbuffers::resolver_function_t *res = nullptr) {
return std::unique_ptr<tflm::codegen::preprocessor::DataT>(GetSizePrefixedData(buf)->UnPack(res));
}

} // namespace preprocessor
} // namespace codegen
} // namespace tflm

#endif // FLATBUFFERS_GENERATED_PREPROCESSORSCHEMA_TFLM_CODEGEN_PREPROCESSOR_H_
77 changes: 77 additions & 0 deletions codegen/preprocessor/preprocessor_schema_py_generated.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
import flatbuffers

# automatically generated by the FlatBuffers compiler, do not modify

# namespace: preprocessor

from flatbuffers.compat import import_numpy
np = import_numpy()

class Data(object):
__slots__ = ['_tab']

@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = Data()
x.Init(buf, n + offset)
return x

@classmethod
def GetRootAsData(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
# Data
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)

# Data
def InputModelPath(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None

def DataStart(builder): builder.StartObject(1)
def Start(builder):
return DataStart(builder)
def DataAddInputModelPath(builder, inputModelPath): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(inputModelPath), 0)
def AddInputModelPath(builder, inputModelPath):
return DataAddInputModelPath(builder, inputModelPath)
def DataEnd(builder): return builder.EndObject()
def End(builder):
return DataEnd(builder)

class DataT(object):

# DataT
def __init__(self):
self.inputModelPath = None # type: str

@classmethod
def InitFromBuf(cls, buf, pos):
data = Data()
data.Init(buf, pos)
return cls.InitFromObj(data)

@classmethod
def InitFromObj(cls, data):
x = DataT()
x._UnPack(data)
return x

# DataT
def _UnPack(self, data):
if data is None:
return
self.inputModelPath = data.InputModelPath()

# DataT
def Pack(self, builder):
if self.inputModelPath is not None:
inputModelPath = builder.CreateString(self.inputModelPath)
DataStart(builder)
if self.inputModelPath is not None:
DataAddInputModelPath(builder, inputModelPath)
data = DataEnd(builder)
return data
36 changes: 36 additions & 0 deletions codegen/preprocessor/update_schema.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
#!/usr/bin/env bash
# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================

#
# Updates the checked-generated source for the preprocessor schema
#

set -e

SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
ROOT_DIR=${SCRIPT_DIR}/../..
cd "${ROOT_DIR}"

# We generate and check in the C++ and Python bindings for the schema to keep it
# working with the Makefiles. The makefiles do not support running flatc.
bazel build //codegen/preprocessor:preprocessor_schema_fbs_srcs
/bin/cp ./bazel-bin/codegen/preprocessor/preprocessor_schema_generated.h \
codegen/preprocessor/preprocessor_schema_generated.h

bazel build //codegen/preprocessor:preprocessor_schema_py
/bin/cp ./bazel-bin/codegen/preprocessor/preprocessor_schema_py_generated.py \
codegen/preprocessor/preprocessor_schema_py_generated.py

2 changes: 2 additions & 0 deletions tensorflow/lite/micro/tools/ci_build/test_code_style.sh
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,8 @@ tensorflow/lite/micro/tools/make/downloads/pigweed/pw_presubmit/py/pw_presubmit/
-e third_party/xtensa \
-e ci \
-e c/common.c \
-e codegen/preprocessor/preprocessor_schema_generated.h \
-e codegen/preprocessor/preprocessor_schema_py_generated.py \
-e core/api/error_reporter.cc \
-e kernels/internal/reference/integer_ops/ \
-e kernels/internal/reference/reference_ops.h \
Expand Down

0 comments on commit 4813acf

Please sign in to comment.