Skip to content

Commit

Permalink
Automated sync from github.com/tensorflow/tensorflow (#2221)
Browse files Browse the repository at this point in the history
BUG=automated sync from upstream
NO_CHECK_TFLITE_FILES=automated sync from upstream
  • Loading branch information
TFLM-bot authored Sep 14, 2023
1 parent 77e2cdb commit ad8d238
Show file tree
Hide file tree
Showing 12 changed files with 178 additions and 31 deletions.
9 changes: 6 additions & 3 deletions codegen/examples/hello_world/hello_world_model.cc
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,8 @@ struct Node0_0 {
.activation = kTfLiteActRelu,
.weights_format = kTfLiteFullyConnectedWeightsFormatDefault,
.keep_num_dims = false,
.asymmetric_quantize_inputs = false};
.asymmetric_quantize_inputs = false,
.quantized_bias_type = kTfLiteNoType};
} node_0_0;

struct Node0_1 {
Expand All @@ -139,7 +140,8 @@ struct Node0_1 {
.activation = kTfLiteActRelu,
.weights_format = kTfLiteFullyConnectedWeightsFormatDefault,
.keep_num_dims = false,
.asymmetric_quantize_inputs = false};
.asymmetric_quantize_inputs = false,
.quantized_bias_type = kTfLiteNoType};
} node_0_1;

struct Node0_2 {
Expand All @@ -156,7 +158,8 @@ struct Node0_2 {
.activation = kTfLiteActNone,
.weights_format = kTfLiteFullyConnectedWeightsFormatDefault,
.keep_num_dims = false,
.asymmetric_quantize_inputs = false};
.asymmetric_quantize_inputs = false,
.quantized_bias_type = kTfLiteNoType};
} node_0_2;

struct Tensor0_0Dims {
Expand Down
22 changes: 22 additions & 0 deletions codegen/operators/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,3 +26,25 @@
schema_fb.ActivationFunctionType.TANH: "kTfLiteActTanh",
schema_fb.ActivationFunctionType.SIGN_BIT: "kTfLiteActSignBit",
}

TFLITE_TYPE: Dict[int, str] = {
0: "kTfLiteNoType",
1: "kTfLiteFloat32",
2: "kTfLiteInt32",
3: "kTfLiteUInt8",
4: "kTfLiteInt64",
5: "kTfLiteString",
6: "kTfLiteBool",
7: "kTfLiteInt16",
8: "kTfLiteComplex64",
9: "kTfLiteInt8",
10: "kTfLiteFloat16",
11: "kTfLiteFloat64",
12: "kTfLiteComplex128",
13: "kTfLiteUInt64",
14: "kTfLiteResource",
15: "kTfLiteVariant",
16: "kTfLiteUInt32",
17: "kTfLiteUInt16",
18: "kTfLiteInt4",
}
7 changes: 5 additions & 2 deletions codegen/operators/fully_connected.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,11 +43,14 @@ def generate_c_builtin_data(self) -> str:
" .activation = ${activation},\n"
" .weights_format = ${weights_format},\n"
" .keep_num_dims = ${keep_num_dims},\n"
" .asymmetric_quantize_inputs = ${asymmetric_quantize_inputs}};")
" .asymmetric_quantize_inputs = ${asymmetric_quantize_inputs},\n"
" .quantized_bias_type = ${quantized_bias_type}};")
return builtin_template.substitute(
activation=constants.ACTIVATION_FUNCS[
self._builtin_options.fusedActivationFunction],
weights_format=_WEIGHTS_FORMATS[self._builtin_options.weightsFormat],
keep_num_dims=utils.bool_to_c_str(self._builtin_options.keepNumDims),
asymmetric_quantize_inputs=utils.bool_to_c_str(
self._builtin_options.asymmetricQuantizeInputs))
self._builtin_options.asymmetricQuantizeInputs),
quantized_bias_type=constants.TFLITE_TYPE[
self._builtin_options.quantizedBiasType])
10 changes: 9 additions & 1 deletion tensorflow/lite/core/api/flatbuffer_conversions.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1305,6 +1305,9 @@ TfLiteStatus ParseConv2D(const Operator* op, ErrorReporter* error_reporter,

params->dilation_width_factor = schema_params->dilation_w_factor();
params->dilation_height_factor = schema_params->dilation_h_factor();
TF_LITE_ENSURE_STATUS(
ConvertTensorType(schema_params->quantized_bias_type(),
&params->quantized_bias_type, error_reporter));
} else {
// TODO(b/157480169): We should either return kTfLiteError or fill in some
// reasonable defaults in the params struct. We are not doing so until we
Expand Down Expand Up @@ -1519,7 +1522,9 @@ TfLiteStatus ParseFullyConnected(const Operator* op,
params->keep_num_dims = schema_params->keep_num_dims();
params->asymmetric_quantize_inputs =
schema_params->asymmetric_quantize_inputs();

TF_LITE_ENSURE_STATUS(
ConvertTensorType(schema_params->quantized_bias_type(),
&params->quantized_bias_type, error_reporter));
switch (schema_params->weights_format()) {
case FullyConnectedOptionsWeightsFormat_DEFAULT:
params->weights_format = kTfLiteFullyConnectedWeightsFormatDefault;
Expand Down Expand Up @@ -2450,6 +2455,9 @@ TfLiteStatus ParseTransposeConv(const Operator* op,

params->activation =
ConvertActivation(transpose_conv_params->fused_activation_function());
TF_LITE_ENSURE_STATUS(
ConvertTensorType(transpose_conv_params->quantized_bias_type(),
&params->quantized_bias_type, error_reporter));
} else {
// TODO(b/157480169): We should either return kTfLiteError or fill in some
// reasonable defaults in the params struct. We are not doing so until we
Expand Down
12 changes: 12 additions & 0 deletions tensorflow/lite/core/c/builtin_op_data.h
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,10 @@ typedef struct {
// Note: Version 2 supports dilation values not equal to 1.
int dilation_width_factor;
int dilation_height_factor;

// Parameters for CONV_2D version 7 or above.
// Used to determine the default value for the quantized bias.
TfLiteType quantized_bias_type;
} TfLiteConvParams;

typedef struct {
Expand Down Expand Up @@ -194,6 +198,10 @@ typedef struct {
// If set to true and the weights are quantized, then non constant inputs
// are quantized at evaluation time with asymmetric quantization.
bool asymmetric_quantize_inputs;

// Parameters for FullyConnected version 10 or above.
// Used to determine the default value for the quantized bias.
TfLiteType quantized_bias_type;
} TfLiteFullyConnectedParams;

typedef enum {
Expand Down Expand Up @@ -431,6 +439,10 @@ typedef struct {

// Parameters supported by version 4:
TfLiteFusedActivation activation;

// Parameters for TransposeConv version 5 or above.
// Used to determine the default value for the quantized bias.
TfLiteType quantized_bias_type;
} TfLiteTransposeConvParams;

typedef struct {
Expand Down
13 changes: 7 additions & 6 deletions tensorflow/lite/micro/kernels/conv_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@ static TfLiteConvParams common_conv_params = {
kTfLiteActNone, // activation
1, // dilation_width_factor
1, // dilation_height_factor
kTfLiteNoType // quantized_bias_type
};

} // namespace
Expand Down Expand Up @@ -420,8 +421,8 @@ TF_LITE_MICRO_TEST(SimpleTestQuantized16x8PerChannelRelu632bBias) {
TF_LITE_MICRO_TEST(Kernel1x1QuantizedPerChannel) {
// conv params:
// padding, stride_<width,height>, activation, dilation_<width, height>
TfLiteConvParams conv_params = {kTfLitePaddingValid, 1, 1,
kTfLiteActNone, 1, 1};
TfLiteConvParams conv_params = {
kTfLitePaddingValid, 1, 1, kTfLiteActNone, 1, 1, kTfLiteNoType};

int input_shape[] = {4, 1, 2, 2, 4}; // [len,N,H,W,C]
constexpr int input_elements =
Expand Down Expand Up @@ -473,8 +474,8 @@ TF_LITE_MICRO_TEST(Kernel1x1QuantizedPerChannel) {
TF_LITE_MICRO_TEST(Kernel1x1QuantizedPerChannelRelu6) {
// conv params:
// padding, stride_<width,height>, activation, dilation_<width, height>
TfLiteConvParams conv_params = {kTfLitePaddingValid, 1, 1,
kTfLiteActRelu6, 1, 1};
TfLiteConvParams conv_params = {
kTfLitePaddingValid, 1, 1, kTfLiteActRelu6, 1, 1, kTfLiteNoType};

int input_shape[] = {4, 1, 2, 2, 4}; // [len,N,H,W,C]
constexpr int input_elements =
Expand Down Expand Up @@ -526,8 +527,8 @@ TF_LITE_MICRO_TEST(Kernel1x1QuantizedPerChannelRelu6) {
TF_LITE_MICRO_TEST(Kernel1x1Quantized16x8PerChannelRelu6) {
// conv params:
// padding, stride_<width,height>, activation, dilation_<width, height>
TfLiteConvParams conv_params = {kTfLitePaddingValid, 1, 1,
kTfLiteActRelu6, 1, 1};
TfLiteConvParams conv_params = {
kTfLitePaddingValid, 1, 1, kTfLiteActRelu6, 1, 1, kTfLiteNoType};

int input_shape[] = {4, 1, 2, 2, 4}; // [len,N,H,W,C]
const int input_elements = 1 * 2 * 2 * 4;
Expand Down
3 changes: 2 additions & 1 deletion tensorflow/lite/micro/kernels/fully_connected_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -247,7 +247,8 @@ TfLiteStatus ValidateFullyConnectedGoldens(
const TfLiteFusedActivation activation, const float tolerance,
const int output_len, const T* golden, T* output_data) {
TfLiteFullyConnectedParams builtin_data = {
activation, kTfLiteFullyConnectedWeightsFormatDefault, false, false};
activation, kTfLiteFullyConnectedWeightsFormatDefault, false, false,
kTfLiteNoType};

// Avoid variable length array warning.
constexpr int inputs_array_len = 4;
Expand Down
12 changes: 8 additions & 4 deletions tensorflow/lite/micro/kernels/transpose_conv_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,8 @@ static TfLiteConvParams common_conv_params = {kTfLitePaddingSame, // padding
1, // stride_height
kTfLiteActNone,
1,
1};
1,
kTfLiteNoType};

template <typename T>
TfLiteStatus InvokeTransposeConv(TfLiteTensor* tensors, int tensors_size,
Expand Down Expand Up @@ -253,7 +254,8 @@ TF_LITE_MICRO_TEST(fusedRELUTest) {
1, // stride_height
kTfLiteActRelu,
1,
1};
1,
kTfLiteNoType};

TF_LITE_MICRO_EXPECT_EQ(
kTfLiteOk, tflite::testing::TestTransposeConvFloat(
Expand All @@ -276,7 +278,8 @@ TF_LITE_MICRO_TEST(AccuracyWithFusedActivationTest) {
3, // stride_height
kTfLiteActRelu,
1,
1};
1,
kTfLiteNoType};

TF_LITE_MICRO_EXPECT_EQ(
kTfLiteOk, tflite::testing::TestTransposeConvFloat(
Expand Down Expand Up @@ -304,7 +307,8 @@ TF_LITE_MICRO_TEST(MultiChannelBiasWithFusedActivationTest) {
2, // stride_height
kTfLiteActRelu,
1,
1};
1,
kTfLiteNoType};

TF_LITE_MICRO_EXPECT_EQ(
kTfLiteOk,
Expand Down
4 changes: 2 additions & 2 deletions tensorflow/lite/micro/memory_arena_threshold_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ constexpr int kTestConvModelOnlyTotalSize = 9488;
// Tail size contributed by the conv model excluding the
// RecordingMicroAllocator's overhead
// TODO(b/207157610): replace magic number that depends on OPs
constexpr int kTestConvModelOnlyTailSize = 1744;
constexpr int kTestConvModelOnlyTailSize = 1816;
constexpr int kTestConvModelPersistentTfLiteTensorDataSize = 128;
constexpr int kTestConvModelPersistentBufferDataSize = 728;
#else
Expand All @@ -108,7 +108,7 @@ constexpr int kTestConvModelOnlyTotalSize = 9760;
// Tail size contributed by the conv model excluding the
// RecordingMicroAllocator's overhead
// TODO(b/207157610): replace magic number that depends on OPs
constexpr int kTestConvModelOnlyTailSize = 2016;
constexpr int kTestConvModelOnlyTailSize = 2088;
constexpr int kTestConvModelPersistentTfLiteTensorDataSize = 224;
constexpr int kTestConvModelPersistentBufferDataSize = 720;
#endif
Expand Down
45 changes: 42 additions & 3 deletions tensorflow/lite/python/schema_py_generated.py
Original file line number Diff line number Diff line change
Expand Up @@ -2640,7 +2640,14 @@ def DilationHFactor(self):
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 1

def Conv2DOptionsStart(builder): builder.StartObject(6)
# Conv2DOptions
def QuantizedBiasType(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 0

def Conv2DOptionsStart(builder): builder.StartObject(7)
def Start(builder):
return Conv2DOptionsStart(builder)
def Conv2DOptionsAddPadding(builder, padding): builder.PrependInt8Slot(0, padding, 0)
Expand All @@ -2661,6 +2668,9 @@ def AddDilationWFactor(builder, dilationWFactor):
def Conv2DOptionsAddDilationHFactor(builder, dilationHFactor): builder.PrependInt32Slot(5, dilationHFactor, 1)
def AddDilationHFactor(builder, dilationHFactor):
return Conv2DOptionsAddDilationHFactor(builder, dilationHFactor)
def Conv2DOptionsAddQuantizedBiasType(builder, quantizedBiasType): builder.PrependInt8Slot(6, quantizedBiasType, 0)
def AddQuantizedBiasType(builder, quantizedBiasType):
return Conv2DOptionsAddQuantizedBiasType(builder, quantizedBiasType)
def Conv2DOptionsEnd(builder): return builder.EndObject()
def End(builder):
return Conv2DOptionsEnd(builder)
Expand All @@ -2675,6 +2685,7 @@ def __init__(self):
self.fusedActivationFunction = 0 # type: int
self.dilationWFactor = 1 # type: int
self.dilationHFactor = 1 # type: int
self.quantizedBiasType = 0 # type: int

@classmethod
def InitFromBuf(cls, buf, pos):
Expand All @@ -2698,6 +2709,7 @@ def _UnPack(self, conv2doptions):
self.fusedActivationFunction = conv2doptions.FusedActivationFunction()
self.dilationWFactor = conv2doptions.DilationWFactor()
self.dilationHFactor = conv2doptions.DilationHFactor()
self.quantizedBiasType = conv2doptions.QuantizedBiasType()

# Conv2DOptionsT
def Pack(self, builder):
Expand All @@ -2708,6 +2720,7 @@ def Pack(self, builder):
Conv2DOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction)
Conv2DOptionsAddDilationWFactor(builder, self.dilationWFactor)
Conv2DOptionsAddDilationHFactor(builder, self.dilationHFactor)
Conv2DOptionsAddQuantizedBiasType(builder, self.quantizedBiasType)
conv2doptions = Conv2DOptionsEnd(builder)
return conv2doptions
# automatically generated by the FlatBuffers compiler, do not modify
Expand Down Expand Up @@ -4512,7 +4525,14 @@ def AsymmetricQuantizeInputs(self):
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False

def FullyConnectedOptionsStart(builder): builder.StartObject(4)
# FullyConnectedOptions
def QuantizedBiasType(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 0

def FullyConnectedOptionsStart(builder): builder.StartObject(5)
def Start(builder):
return FullyConnectedOptionsStart(builder)
def FullyConnectedOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(0, fusedActivationFunction, 0)
Expand All @@ -4527,6 +4547,9 @@ def AddKeepNumDims(builder, keepNumDims):
def FullyConnectedOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): builder.PrependBoolSlot(3, asymmetricQuantizeInputs, 0)
def AddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs):
return FullyConnectedOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs)
def FullyConnectedOptionsAddQuantizedBiasType(builder, quantizedBiasType): builder.PrependInt8Slot(4, quantizedBiasType, 0)
def AddQuantizedBiasType(builder, quantizedBiasType):
return FullyConnectedOptionsAddQuantizedBiasType(builder, quantizedBiasType)
def FullyConnectedOptionsEnd(builder): return builder.EndObject()
def End(builder):
return FullyConnectedOptionsEnd(builder)
Expand All @@ -4539,6 +4562,7 @@ def __init__(self):
self.weightsFormat = 0 # type: int
self.keepNumDims = False # type: bool
self.asymmetricQuantizeInputs = False # type: bool
self.quantizedBiasType = 0 # type: int

@classmethod
def InitFromBuf(cls, buf, pos):
Expand All @@ -4560,6 +4584,7 @@ def _UnPack(self, fullyConnectedOptions):
self.weightsFormat = fullyConnectedOptions.WeightsFormat()
self.keepNumDims = fullyConnectedOptions.KeepNumDims()
self.asymmetricQuantizeInputs = fullyConnectedOptions.AsymmetricQuantizeInputs()
self.quantizedBiasType = fullyConnectedOptions.QuantizedBiasType()

# FullyConnectedOptionsT
def Pack(self, builder):
Expand All @@ -4568,6 +4593,7 @@ def Pack(self, builder):
FullyConnectedOptionsAddWeightsFormat(builder, self.weightsFormat)
FullyConnectedOptionsAddKeepNumDims(builder, self.keepNumDims)
FullyConnectedOptionsAddAsymmetricQuantizeInputs(builder, self.asymmetricQuantizeInputs)
FullyConnectedOptionsAddQuantizedBiasType(builder, self.quantizedBiasType)
fullyConnectedOptions = FullyConnectedOptionsEnd(builder)
return fullyConnectedOptions
# automatically generated by the FlatBuffers compiler, do not modify
Expand Down Expand Up @@ -16436,7 +16462,14 @@ def FusedActivationFunction(self):
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 0

def TransposeConvOptionsStart(builder): builder.StartObject(4)
# TransposeConvOptions
def QuantizedBiasType(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 0

def TransposeConvOptionsStart(builder): builder.StartObject(5)
def Start(builder):
return TransposeConvOptionsStart(builder)
def TransposeConvOptionsAddPadding(builder, padding): builder.PrependInt8Slot(0, padding, 0)
Expand All @@ -16451,6 +16484,9 @@ def AddStrideH(builder, strideH):
def TransposeConvOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(3, fusedActivationFunction, 0)
def AddFusedActivationFunction(builder, fusedActivationFunction):
return TransposeConvOptionsAddFusedActivationFunction(builder, fusedActivationFunction)
def TransposeConvOptionsAddQuantizedBiasType(builder, quantizedBiasType): builder.PrependInt8Slot(4, quantizedBiasType, 0)
def AddQuantizedBiasType(builder, quantizedBiasType):
return TransposeConvOptionsAddQuantizedBiasType(builder, quantizedBiasType)
def TransposeConvOptionsEnd(builder): return builder.EndObject()
def End(builder):
return TransposeConvOptionsEnd(builder)
Expand All @@ -16463,6 +16499,7 @@ def __init__(self):
self.strideW = 0 # type: int
self.strideH = 0 # type: int
self.fusedActivationFunction = 0 # type: int
self.quantizedBiasType = 0 # type: int

@classmethod
def InitFromBuf(cls, buf, pos):
Expand All @@ -16484,6 +16521,7 @@ def _UnPack(self, transposeConvOptions):
self.strideW = transposeConvOptions.StrideW()
self.strideH = transposeConvOptions.StrideH()
self.fusedActivationFunction = transposeConvOptions.FusedActivationFunction()
self.quantizedBiasType = transposeConvOptions.QuantizedBiasType()

# TransposeConvOptionsT
def Pack(self, builder):
Expand All @@ -16492,6 +16530,7 @@ def Pack(self, builder):
TransposeConvOptionsAddStrideW(builder, self.strideW)
TransposeConvOptionsAddStrideH(builder, self.strideH)
TransposeConvOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction)
TransposeConvOptionsAddQuantizedBiasType(builder, self.quantizedBiasType)
transposeConvOptions = TransposeConvOptionsEnd(builder)
return transposeConvOptions
# automatically generated by the FlatBuffers compiler, do not modify
Expand Down
Loading

0 comments on commit ad8d238

Please sign in to comment.