mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
Add torch_cpu specific flag for debug info (#57190)
Summary: Right now we are using `REL_WITH_DEB_INFO=1` on Linux CI binary builds. This is causing intermittent failures on CUDA builds since the debug information increases the load on the linker. This adds a workaround by a flag to enable debug info only for the target we actually want it for (`libtorch_cpu.so`, all the other binaries are stripped over their debug info after building). Example failures (from [the hud](https://ezyang.github.io/pytorch-ci-hud/build2/pytorch-nightly?mode=nightly)): * https://app.circleci.com/pipelines/github/pytorch/pytorch/311785/workflows/df640957-54b0-4592-aeef-6d5baee503ae/jobs/12932229 * https://app.circleci.com/pipelines/github/pytorch/pytorch/311784/workflows/e3b487d6-fb46-4a5d-a2d5-22eec328b678/jobs/12932228 * https://app.circleci.com/pipelines/github/pytorch/pytorch/311784/workflows/e3b487d6-fb46-4a5d-a2d5-22eec328b678/jobs/12932227 Pull Request resolved: https://github.com/pytorch/pytorch/pull/57190 Pulled By: driazati Reviewed By: janeyx99 Differential Revision: D28085550 fbshipit-source-id: 0fc5b3e769b10c0dd3811717f968d0c933667361
This commit is contained in:
parent
d3ffe9ab6b
commit
21be40b390
|
|
@ -64,5 +64,7 @@ popd
|
||||||
retry git clone -q https://github.com/pytorch/builder.git "$BUILDER_ROOT"
|
retry git clone -q https://github.com/pytorch/builder.git "$BUILDER_ROOT"
|
||||||
pushd "$BUILDER_ROOT"
|
pushd "$BUILDER_ROOT"
|
||||||
echo "Using builder from "
|
echo "Using builder from "
|
||||||
|
# TODO: Remove before landing, this is just for testing
|
||||||
|
git checkout driazati/torch_debug_flag
|
||||||
git --no-pager log --max-count 1
|
git --no-pager log --max-count 1
|
||||||
popd
|
popd
|
||||||
|
|
|
||||||
|
|
@ -282,6 +282,7 @@ cmake_dependent_option(
|
||||||
option(USE_TBB "Use TBB" OFF)
|
option(USE_TBB "Use TBB" OFF)
|
||||||
option(ONNX_ML "Enable traditional ONNX ML API." ON)
|
option(ONNX_ML "Enable traditional ONNX ML API." ON)
|
||||||
option(HAVE_SOVERSION "Whether to add SOVERSION to the shared objects" OFF)
|
option(HAVE_SOVERSION "Whether to add SOVERSION to the shared objects" OFF)
|
||||||
|
option(BUILD_LIBTORCH_CPU_WITH_DEBUG "Enable RelWithDebInfo for libtorch_cpu target only" OFF)
|
||||||
cmake_dependent_option(
|
cmake_dependent_option(
|
||||||
USE_DEPLOY "Build embedded torch::deploy interpreter. See torch/csrc/deploy/README.md for more info." OFF
|
USE_DEPLOY "Build embedded torch::deploy interpreter. See torch/csrc/deploy/README.md for more info." OFF
|
||||||
"BUILD_PYTHON" OFF)
|
"BUILD_PYTHON" OFF)
|
||||||
|
|
|
||||||
|
|
@ -770,6 +770,15 @@ if(HAVE_SOVERSION)
|
||||||
endif()
|
endif()
|
||||||
torch_compile_options(torch_cpu) # see cmake/public/utils.cmake
|
torch_compile_options(torch_cpu) # see cmake/public/utils.cmake
|
||||||
|
|
||||||
|
|
||||||
|
if(CMAKE_COMPILER_IS_GNUCXX AND BUILD_LIBTORCH_CPU_WITH_DEBUG)
|
||||||
|
# To enable debug fission we need to build libtorch_cpu with debug info on,
|
||||||
|
# but this increases link time and peak memory usage if we use the
|
||||||
|
# REL_WITH_DEB_INFO env var since that enables it for everything, but it's
|
||||||
|
# only really necessary for libtorch_cpu.
|
||||||
|
target_compile_options(torch_cpu PRIVATE "-g")
|
||||||
|
endif()
|
||||||
|
|
||||||
if(USE_LLVM AND LLVM_FOUND)
|
if(USE_LLVM AND LLVM_FOUND)
|
||||||
llvm_map_components_to_libnames(LLVM_LINK_LIBS
|
llvm_map_components_to_libnames(LLVM_LINK_LIBS
|
||||||
support core analysis executionengine instcombine
|
support core analysis executionengine instcombine
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue
Block a user