[bazel] Build flatbuffers within bazel (#151364)

This is similar to how we handle protobufs and it makes it more convenient for bazel users to handle their version of flatbuffers. (Flatbuffers is very picky about the generated code matching the runtime). Instead of using the checked in generated code, we generate it on the fly.

This is relevant to https://github.com/pytorch/pytorch/issues/112903, because having the version of flatbuffers tied to pytorch will make pytorch difficult to use as an external workspace.
Pull Request resolved: https://github.com/pytorch/pytorch/pull/151364
Approved by: https://github.com/malfet
This commit is contained in:
Jared Hance 2025-04-17 18:33:48 +00:00 committed by PyTorch MergeBot
parent 3a6b3c8e0e
commit 055e59e709

View File

@ -1,4 +1,5 @@
load("@bazel_skylib//lib:paths.bzl", "paths")
load("@com_github_google_flatbuffers//:build_defs.bzl", "flatbuffer_cc_library")
load("@pybind11_bazel//:build_defs.bzl", "pybind_extension")
load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_library", "cc_test")
load("@rules_python//python:defs.bzl", "py_library", "py_test")
@ -659,6 +660,15 @@ cc_library(
# torch
torch_cuda_headers = glob(["torch/csrc/cuda/*.h"])
flatbuffer_cc_library(
name = "torch_flatbuffers",
srcs = [
"torch/csrc/jit/serialization/mobile_bytecode.fbs",
],
flatc_args = ["--cpp", "--gen-mutable", "--scoped-enums"],
out_prefix = "torch/csrc/jit/serialization/",
)
cc_library(
name = "torch_headers",
hdrs = if_cuda(
@ -672,6 +682,7 @@ cc_library(
],
exclude = [
"torch/csrc/*/generated/*.h",
"torch/csrc/jit/serialization/mobile_bytecode_generated.h",
] + torch_cuda_headers,
) + GENERATED_AUTOGRAD_CPP + [":version_h"],
includes = [
@ -686,6 +697,7 @@ cc_library(
deps = [
":aten_headers",
":caffe2_headers",
":torch_flatbuffers",
"//c10",
"@com_github_google_flatbuffers//:flatbuffers",
"@local_config_python//:python_headers",