mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
Enable AArch64 CI scripts to be used for local dev (#143190)
- Allow user to specify custom ComputeLibrary directory, which is then built rather than checking out a clean copy - Remove `setup.py clean` in build. The CI environment should be clean already, removing this enables incremental rebuilds - Use all cores for building ComputeLibrary Mostly a port of https://github.com/pytorch/builder/pull/2028 with the conda part removed, because aarch64_ci_setup.sh has changed and can now handle being called twice. Pull Request resolved: https://github.com/pytorch/pytorch/pull/143190 Approved by: https://github.com/aditew01, https://github.com/fadara01, https://github.com/malfet Co-authored-by: David Svantesson-Yeung <David.Svantesson-Yeung@arm.com>
This commit is contained in:
parent
25149cd173
commit
2225231a14
|
|
@ -31,8 +31,10 @@ def build_ArmComputeLibrary() -> None:
|
||||||
"build=native",
|
"build=native",
|
||||||
]
|
]
|
||||||
acl_install_dir = "/acl"
|
acl_install_dir = "/acl"
|
||||||
acl_checkout_dir = "ComputeLibrary"
|
acl_checkout_dir = os.getenv("ACL_SOURCE_DIR", "ComputeLibrary")
|
||||||
os.makedirs(acl_install_dir)
|
if os.path.isdir(acl_install_dir):
|
||||||
|
shutil.rmtree(acl_install_dir)
|
||||||
|
if not os.path.isdir(acl_checkout_dir) or not len(os.listdir(acl_checkout_dir)):
|
||||||
check_call(
|
check_call(
|
||||||
[
|
[
|
||||||
"git",
|
"git",
|
||||||
|
|
@ -47,11 +49,10 @@ def build_ArmComputeLibrary() -> None:
|
||||||
)
|
)
|
||||||
|
|
||||||
check_call(
|
check_call(
|
||||||
["scons", "Werror=1", "-j8", f"build_dir=/{acl_install_dir}/build"]
|
["scons", "Werror=1", f"-j{os.cpu_count()}"] + acl_build_flags,
|
||||||
+ acl_build_flags,
|
|
||||||
cwd=acl_checkout_dir,
|
cwd=acl_checkout_dir,
|
||||||
)
|
)
|
||||||
for d in ["arm_compute", "include", "utils", "support", "src"]:
|
for d in ["arm_compute", "include", "utils", "support", "src", "build"]:
|
||||||
shutil.copytree(f"{acl_checkout_dir}/{d}", f"{acl_install_dir}/{d}")
|
shutil.copytree(f"{acl_checkout_dir}/{d}", f"{acl_install_dir}/{d}")
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -203,8 +204,10 @@ if __name__ == "__main__":
|
||||||
).decode()
|
).decode()
|
||||||
|
|
||||||
print("Building PyTorch wheel")
|
print("Building PyTorch wheel")
|
||||||
build_vars = "MAX_JOBS=5 CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000 "
|
build_vars = "CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000 "
|
||||||
os.system("cd /pytorch; python setup.py clean")
|
# MAX_JOB=5 is not required for CPU backend (see commit 465d98b)
|
||||||
|
if enable_cuda:
|
||||||
|
build_vars = "MAX_JOBS=5 " + build_vars
|
||||||
|
|
||||||
override_package_version = os.getenv("OVERRIDE_PACKAGE_VERSION")
|
override_package_version = os.getenv("OVERRIDE_PACKAGE_VERSION")
|
||||||
desired_cuda = os.getenv("DESIRED_CUDA")
|
desired_cuda = os.getenv("DESIRED_CUDA")
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue
Block a user