mirror of
https://github.com/zebrajr/tensorflow.git
synced 2025-12-06 00:19:58 +01:00
lite: Switch to Flatbuffer 2.0.5
Update TFLite build rules to use Flatbuffer v2.0.5 Also following changes are made: - Updated schema_generated.h for 2.0.5. - Fix a regression in flatbuffer_export.cc - Update tensor comparison logic in quantize_model_test.cc PiperOrigin-RevId: 454483837
This commit is contained in:
parent
443fe702c9
commit
625a4045bc
|
|
@ -34,6 +34,7 @@
|
|||
* tflite SelectV2 now supports 5D.
|
||||
* tf.einsum is supported with multiple unknown shapes.
|
||||
* tf.unsortedsegmentprod op is supported.
|
||||
* Upgrade Flatbuffers v2.0.5 from v1.12.0
|
||||
* `tf.keras`:
|
||||
|
||||
* Added `tf.keras.models.experimental.SharpnessAwareMinimization`. This
|
||||
|
|
|
|||
|
|
@ -785,17 +785,19 @@ Optional<BufferOffset<tflite::Tensor>> Translator::BuildTensorFromType(
|
|||
GetTFLiteType(tensor_type.getElementType()).ValueOrDie();
|
||||
BufferOffset<tflite::QuantizationParameters> q_params = 0;
|
||||
if (auto qtype = element_type.dyn_cast<mlir::quant::UniformQuantizedType>()) {
|
||||
std::vector<float> scales = {static_cast<float>(qtype.getScale())};
|
||||
std::vector<int64_t> zero_points = {qtype.getZeroPoint()};
|
||||
q_params = tflite::CreateQuantizationParameters(
|
||||
builder_, /*min=*/0, /*max=*/0,
|
||||
builder_.CreateVector<float>({static_cast<float>(qtype.getScale())}),
|
||||
builder_.CreateVector<int64_t>({qtype.getZeroPoint()}));
|
||||
builder_, /*min=*/0, /*max=*/0, builder_.CreateVector<float>(scales),
|
||||
builder_.CreateVector<int64_t>(zero_points));
|
||||
} else if (auto qtype =
|
||||
element_type
|
||||
.dyn_cast<mlir::quant::CalibratedQuantizedType>()) {
|
||||
std::vector<float> mins = {static_cast<float>(qtype.getMin())};
|
||||
std::vector<float> maxs = {static_cast<float>(qtype.getMax())};
|
||||
q_params = tflite::CreateQuantizationParameters(
|
||||
builder_,
|
||||
builder_.CreateVector<float>({static_cast<float>(qtype.getMin())}),
|
||||
builder_.CreateVector<float>({static_cast<float>(qtype.getMax())}));
|
||||
builder_, builder_.CreateVector<float>(mins),
|
||||
builder_.CreateVector<float>(maxs));
|
||||
}
|
||||
return tflite::CreateTensor(
|
||||
builder_, builder_.CreateVector(shape), tflite_element_type,
|
||||
|
|
@ -869,20 +871,23 @@ Optional<BufferOffset<tflite::Tensor>> Translator::BuildTensor(
|
|||
|
||||
BufferOffset<tflite::QuantizationParameters> q_params;
|
||||
if (auto qtype = element_type.dyn_cast<mlir::quant::UniformQuantizedType>()) {
|
||||
std::vector<float> scales = {static_cast<float>(qtype.getScale())};
|
||||
std::vector<int64_t> zero_points = {qtype.getZeroPoint()};
|
||||
q_params = tflite::CreateQuantizationParameters(
|
||||
// min and max values are not stored in the quantized type from MLIR, so
|
||||
// both are set to 0 in the flatbuffer when they are exported.
|
||||
builder_, /*min=*/0, /*max=*/0,
|
||||
builder_.CreateVector<float>({static_cast<float>(qtype.getScale())}),
|
||||
builder_.CreateVector<int64_t>({qtype.getZeroPoint()}));
|
||||
builder_, /*min=*/0, /*max=*/0, builder_.CreateVector<float>(scales),
|
||||
builder_.CreateVector<int64_t>(zero_points));
|
||||
} else if (auto qtype =
|
||||
element_type
|
||||
.dyn_cast<mlir::quant::UniformQuantizedPerAxisType>()) {
|
||||
std::vector<float> scales(qtype.getScales().begin(),
|
||||
qtype.getScales().end());
|
||||
std::vector<int64_t> zero_points(qtype.getZeroPoints().begin(),
|
||||
qtype.getZeroPoints().end());
|
||||
q_params = tflite::CreateQuantizationParameters(
|
||||
builder_, /*min=*/0, /*max=*/0, builder_.CreateVector<float>(scales),
|
||||
builder_.CreateVector<int64_t>(qtype.getZeroPoints()),
|
||||
builder_.CreateVector<int64_t>(zero_points),
|
||||
tflite::QuantizationDetails_NONE, /*details=*/0,
|
||||
qtype.getQuantizedDimension());
|
||||
} else if (quant_parameters.hasValue()) {
|
||||
|
|
|
|||
|
|
@ -210,7 +210,7 @@ void ExpectSameModels(const ModelT& model, const ModelT& expected_model) {
|
|||
continue;
|
||||
}
|
||||
ExpectEqualTensor(tensor.get(), expected_tensor);
|
||||
if (tensor->buffer >= 0) {
|
||||
if (expected_tensor->buffer > 0) {
|
||||
const int buffer_idx = tensor->buffer;
|
||||
const int expected_buffer_idx = expected_tensor->buffer;
|
||||
const auto buffer = model.buffers[buffer_idx].get()->data;
|
||||
|
|
|
|||
|
|
@ -112,7 +112,8 @@ int SingleOpModel::AddIntermediate(TensorType type,
|
|||
CreateQuantizationParameters(builder_, /*min=*/0, /*max=*/0,
|
||||
builder_.CreateVector<float>(scale),
|
||||
builder_.CreateVector<int64_t>(zero_point));
|
||||
tensors_.push_back(CreateTensor(builder_, builder_.CreateVector<int>({}),
|
||||
std::vector<int> empty;
|
||||
tensors_.push_back(CreateTensor(builder_, builder_.CreateVector<int>(empty),
|
||||
type,
|
||||
/*buffer=*/0,
|
||||
/*name=*/0, q_params, false));
|
||||
|
|
|
|||
|
|
@ -268,12 +268,12 @@ class SingleOpModel {
|
|||
if (i < t.shape.size() &&
|
||||
t.format[t.traversal_order[i]] == kTfLiteDimSparseCSR) {
|
||||
auto array_segments =
|
||||
CreateInt32Vector(builder_,
|
||||
builder_.CreateVector(dim_metadata[metadata_idx]))
|
||||
CreateInt32Vector(builder_, builder_.CreateVector<int>(
|
||||
dim_metadata[metadata_idx]))
|
||||
.Union();
|
||||
auto array_indices =
|
||||
CreateInt32Vector(
|
||||
builder_, builder_.CreateVector(dim_metadata[metadata_idx + 1]))
|
||||
CreateInt32Vector(builder_, builder_.CreateVector<int>(
|
||||
dim_metadata[metadata_idx + 1]))
|
||||
.Union();
|
||||
fb_dim_metadata[i] = CreateDimensionMetadata(
|
||||
builder_, DimensionType_SPARSE_CSR, 0,
|
||||
|
|
@ -286,8 +286,8 @@ class SingleOpModel {
|
|||
}
|
||||
|
||||
flatbuffers::Offset<SparsityParameters> s_param = CreateSparsityParameters(
|
||||
builder_, builder_.CreateVector(t.traversal_order),
|
||||
builder_.CreateVector(t.block_map),
|
||||
builder_, builder_.CreateVector<int>(t.traversal_order),
|
||||
builder_.CreateVector<int>(t.block_map),
|
||||
builder_.CreateVector(fb_dim_metadata));
|
||||
|
||||
int buffer_id = 0;
|
||||
|
|
@ -382,9 +382,11 @@ class SingleOpModel {
|
|||
float min, max, scaling_factor;
|
||||
tensor_utils::SymmetricQuantizeFloats(
|
||||
sparse_data.data(), length, q.data(), &min, &max, &scaling_factor);
|
||||
std::vector<float> scales{scaling_factor};
|
||||
std::vector<int64_t> zero_points{0};
|
||||
q_params = CreateQuantizationParameters(
|
||||
builder_, 0, 0, builder_.CreateVector<float>({scaling_factor}),
|
||||
builder_.CreateVector<int64_t>({0}));
|
||||
builder_, 0, 0, builder_.CreateVector<float>(scales),
|
||||
builder_.CreateVector<int64_t>(zero_points));
|
||||
auto data_buffer = builder_.CreateVector(
|
||||
reinterpret_cast<const uint8_t*>(q.data()), q.size());
|
||||
buffers_.push_back(CreateBuffer(builder_, data_buffer));
|
||||
|
|
@ -392,9 +394,11 @@ class SingleOpModel {
|
|||
CHECK_EQ(t.type, TensorType_INT8)
|
||||
<< "The INT8 quantization is only supported for sparsified tensor";
|
||||
auto q = Quantize<int8_t>(sparse_data, t.scale, t.zero_point);
|
||||
std::vector<float> scales{t.scale};
|
||||
std::vector<int64_t> zero_points{0};
|
||||
q_params = CreateQuantizationParameters(
|
||||
builder_, t.min, t.max, builder_.CreateVector<float>({t.scale}),
|
||||
builder_.CreateVector<int64_t>({0}));
|
||||
builder_, t.min, t.max, builder_.CreateVector<float>(scales),
|
||||
builder_.CreateVector<int64_t>(zero_points));
|
||||
auto data_buffer = builder_.CreateVector(
|
||||
reinterpret_cast<const uint8_t*>(q.data()), q.size());
|
||||
buffers_.push_back(CreateBuffer(builder_, data_buffer));
|
||||
|
|
@ -710,10 +714,11 @@ class SingleOpModel {
|
|||
t.max = 0;
|
||||
}
|
||||
|
||||
std::vector<float> scales{t.scale};
|
||||
std::vector<int64_t> zero_points{t.zero_point};
|
||||
q_params = CreateQuantizationParameters(
|
||||
builder_, /*min=*/0, /*max=*/0,
|
||||
builder_.CreateVector<float>({t.scale}),
|
||||
builder_.CreateVector<int64_t>({t.zero_point}));
|
||||
builder_, /*min=*/0, /*max=*/0, builder_.CreateVector<float>(scales),
|
||||
builder_.CreateVector<int64_t>(zero_points));
|
||||
}
|
||||
|
||||
int buffer_id = 0;
|
||||
|
|
|
|||
|
|
@ -77,6 +77,11 @@ py_test(
|
|||
# "//tensorflow/lite/schema:schema_generated.h.oss",
|
||||
# ],
|
||||
# python_version = "PY3",
|
||||
# # TODO(b/217577534): Enable this TAP with FlatBuffer 2.0 migration.
|
||||
# tags = [
|
||||
# "manual",
|
||||
# "notap",
|
||||
# ],
|
||||
# deps = [
|
||||
# "//testing/pybase",
|
||||
# "@absl_py//absl/flags",
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -23,7 +23,7 @@ OverridableFetchContent_Declare(
|
|||
flatbuffers
|
||||
GIT_REPOSITORY https://github.com/google/flatbuffers
|
||||
# Sync with tensorflow/third_party/flatbuffers/workspace.bzl
|
||||
GIT_TAG v1.12.0
|
||||
GIT_TAG v2.0.5
|
||||
GIT_SHALLOW TRUE
|
||||
GIT_PROGRESS TRUE
|
||||
SOURCE_DIR "${CMAKE_BINARY_DIR}/flatbuffers"
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
absl-py ~= 0.13.0
|
||||
astunparse ~= 1.6.3
|
||||
flatbuffers < 2.0 # NOTE: We cannot support faltbuffers 2.0 until internal code updates
|
||||
flatbuffers ~= 2.0
|
||||
google_pasta ~= 0.2
|
||||
h5py ~= 3.6.0 # NOTE: Earliest version for Python 3.10
|
||||
keras_preprocessing ~= 1.1.2
|
||||
|
|
|
|||
|
|
@ -75,9 +75,7 @@ def standard_or_nightly(standard, nightly):
|
|||
REQUIRED_PACKAGES = [
|
||||
'absl-py >= 1.0.0',
|
||||
'astunparse >= 1.6.0',
|
||||
# TODO(b/187981032): remove the constraint for 2.0 once the incompatibile
|
||||
# issue is resolved.
|
||||
'flatbuffers >= 1.12, <2',
|
||||
'flatbuffers >= 2.0',
|
||||
# TODO(b/213222745) gast versions above 0.4.0 break TF's tests
|
||||
'gast >= 0.2.1, <= 0.4.0',
|
||||
'google_pasta >= 0.1.1',
|
||||
|
|
|
|||
8
third_party/flatbuffers/build_defs.bzl
vendored
8
third_party/flatbuffers/build_defs.bzl
vendored
|
|
@ -279,6 +279,11 @@ def _gen_flatbuffer_srcs_impl(ctx):
|
|||
else:
|
||||
no_includes_statement = []
|
||||
|
||||
if ctx.attr.language_flag == "--python":
|
||||
onefile_statement = ["--gen-onefile"]
|
||||
else:
|
||||
onefile_statement = []
|
||||
|
||||
# Need to generate all files in a directory.
|
||||
if not outputs:
|
||||
outputs = [ctx.actions.declare_directory("{}_all".format(ctx.attr.name))]
|
||||
|
|
@ -314,6 +319,7 @@ def _gen_flatbuffer_srcs_impl(ctx):
|
|||
"-I",
|
||||
ctx.bin_dir.path,
|
||||
] + no_includes_statement +
|
||||
onefile_statement +
|
||||
include_paths_cmd_line + [
|
||||
"--no-union-value-namespacing",
|
||||
"--gen-object-api",
|
||||
|
|
@ -433,6 +439,8 @@ def flatbuffer_py_library(
|
|||
deps = deps,
|
||||
include_paths = include_paths,
|
||||
)
|
||||
|
||||
# TODO(b/235550563): Remove the concatnation rule with 2.0.6 update.
|
||||
all_srcs_no_include = "{}_srcs_no_include".format(name)
|
||||
_gen_flatbuffer_srcs(
|
||||
name = all_srcs_no_include,
|
||||
|
|
|
|||
40
third_party/flatbuffers/flatbuffers.BUILD
vendored
40
third_party/flatbuffers/flatbuffers.BUILD
vendored
|
|
@ -10,10 +10,15 @@ exports_files(["LICENSE.txt"])
|
|||
licenses(["notice"])
|
||||
|
||||
config_setting(
|
||||
name = "freebsd",
|
||||
name = "platform_freebsd",
|
||||
values = {"cpu": "freebsd"},
|
||||
)
|
||||
|
||||
config_setting(
|
||||
name = "platform_openbsd",
|
||||
values = {"cpu": "openbsd"},
|
||||
)
|
||||
|
||||
config_setting(
|
||||
name = "windows",
|
||||
values = {"cpu": "x64_windows"},
|
||||
|
|
@ -35,8 +40,16 @@ cc_library(
|
|||
filegroup(
|
||||
name = "public_headers",
|
||||
srcs = [
|
||||
"include/flatbuffers/allocator.h",
|
||||
"include/flatbuffers/array.h",
|
||||
"include/flatbuffers/base.h",
|
||||
"include/flatbuffers/bfbs_generator.h",
|
||||
"include/flatbuffers/buffer.h",
|
||||
"include/flatbuffers/buffer_ref.h",
|
||||
"include/flatbuffers/code_generators.h",
|
||||
"include/flatbuffers/default_allocator.h",
|
||||
"include/flatbuffers/detached_buffer.h",
|
||||
"include/flatbuffers/flatbuffer_builder.h",
|
||||
"include/flatbuffers/flatbuffers.h",
|
||||
"include/flatbuffers/flexbuffers.h",
|
||||
"include/flatbuffers/hash.h",
|
||||
|
|
@ -46,7 +59,13 @@ filegroup(
|
|||
"include/flatbuffers/reflection_generated.h",
|
||||
"include/flatbuffers/registry.h",
|
||||
"include/flatbuffers/stl_emulation.h",
|
||||
"include/flatbuffers/string.h",
|
||||
"include/flatbuffers/struct.h",
|
||||
"include/flatbuffers/table.h",
|
||||
"include/flatbuffers/util.h",
|
||||
"include/flatbuffers/vector.h",
|
||||
"include/flatbuffers/vector_downward.h",
|
||||
"include/flatbuffers/verifier.h",
|
||||
],
|
||||
visibility = ["//:__subpackages__"],
|
||||
)
|
||||
|
|
@ -65,7 +84,7 @@ cc_library(
|
|||
cc_binary(
|
||||
name = "flatc",
|
||||
linkopts = select({
|
||||
":freebsd": [
|
||||
":platform_freebsd": [
|
||||
"-lm",
|
||||
],
|
||||
":windows": [],
|
||||
|
|
@ -92,11 +111,24 @@ filegroup(
|
|||
cc_library(
|
||||
name = "runtime_cc",
|
||||
hdrs = [
|
||||
"include/flatbuffers/allocator.h",
|
||||
"include/flatbuffers/array.h",
|
||||
"include/flatbuffers/base.h",
|
||||
"include/flatbuffers/buffer.h",
|
||||
"include/flatbuffers/buffer_ref.h",
|
||||
"include/flatbuffers/default_allocator.h",
|
||||
"include/flatbuffers/detached_buffer.h",
|
||||
"include/flatbuffers/flatbuffer_builder.h",
|
||||
"include/flatbuffers/flatbuffers.h",
|
||||
"include/flatbuffers/flexbuffers.h",
|
||||
"include/flatbuffers/stl_emulation.h",
|
||||
"include/flatbuffers/string.h",
|
||||
"include/flatbuffers/struct.h",
|
||||
"include/flatbuffers/table.h",
|
||||
"include/flatbuffers/util.h",
|
||||
"include/flatbuffers/vector.h",
|
||||
"include/flatbuffers/vector_downward.h",
|
||||
"include/flatbuffers/verifier.h",
|
||||
],
|
||||
linkstatic = 1,
|
||||
strip_include_prefix = "/include",
|
||||
|
|
@ -107,9 +139,11 @@ flatbuffer_py_strip_prefix_srcs(
|
|||
name = "flatbuffer_py_strip_prefix",
|
||||
srcs = [
|
||||
"python/flatbuffers/__init__.py",
|
||||
"python/flatbuffers/_version.py",
|
||||
"python/flatbuffers/builder.py",
|
||||
"python/flatbuffers/compat.py",
|
||||
"python/flatbuffers/encode.py",
|
||||
"python/flatbuffers/flexbuffers.py",
|
||||
"python/flatbuffers/number_types.py",
|
||||
"python/flatbuffers/packer.py",
|
||||
"python/flatbuffers/table.py",
|
||||
|
|
@ -122,9 +156,11 @@ filegroup(
|
|||
name = "runtime_py_srcs",
|
||||
srcs = [
|
||||
"__init__.py",
|
||||
"_version.py",
|
||||
"builder.py",
|
||||
"compat.py",
|
||||
"encode.py",
|
||||
"flexbuffers.py",
|
||||
"number_types.py",
|
||||
"packer.py",
|
||||
"table.py",
|
||||
|
|
|
|||
6
third_party/flatbuffers/workspace.bzl
vendored
6
third_party/flatbuffers/workspace.bzl
vendored
|
|
@ -5,9 +5,9 @@ load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
|
|||
def repo():
|
||||
tf_http_archive(
|
||||
name = "flatbuffers",
|
||||
strip_prefix = "flatbuffers-1.12.0",
|
||||
sha256 = "62f2223fb9181d1d6338451375628975775f7522185266cd5296571ac152bc45",
|
||||
urls = tf_mirror_urls("https://github.com/google/flatbuffers/archive/v1.12.0.tar.gz"),
|
||||
strip_prefix = "flatbuffers-2.0.5",
|
||||
sha256 = "b01e97c988c429e164c5c7df9e87c80007ca87f593c0d73733ba536ddcbc8f98",
|
||||
urls = tf_mirror_urls("https://github.com/google/flatbuffers/archive/v2.0.5.tar.gz"),
|
||||
build_file = "//third_party/flatbuffers:flatbuffers.BUILD",
|
||||
system_build_file = "//third_party/flatbuffers:BUILD.system",
|
||||
link_files = {
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user