mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
Remove caffe2 mobile (#84338)
We're no longer building Caffe2 mobile as part of our CI, and it adds a lot of clutter to our make files. Any lingering internal dependencies will use the buck build and so wont be effected. Pull Request resolved: https://github.com/pytorch/pytorch/pull/84338 Approved by: https://github.com/dreiss
This commit is contained in:
parent
9669e3c6ec
commit
e0229d6517
|
|
@ -1,54 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# shellcheck source=./common.sh
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/common.sh"
|
||||
|
||||
# Anywhere except $ROOT_DIR should work. This is so the python import doesn't
|
||||
# get confused by any 'caffe2' directory in cwd
|
||||
cd "$INSTALL_PREFIX"
|
||||
|
||||
if [[ $BUILD_ENVIRONMENT == *-cuda* ]]; then
|
||||
num_gpus=$(nvidia-smi -L | wc -l)
|
||||
elif [[ $BUILD_ENVIRONMENT == *-rocm* ]]; then
|
||||
num_gpus=$(rocminfo | grep 'Device Type.*GPU' | wc -l)
|
||||
else
|
||||
num_gpus=0
|
||||
fi
|
||||
|
||||
caffe2_pypath="$(cd /usr && $PYTHON -c 'import os; import caffe2; print(os.path.dirname(os.path.realpath(caffe2.__file__)))')"
|
||||
# Resnet50
|
||||
if (( $num_gpus == 0 )); then
|
||||
"$PYTHON" "$caffe2_pypath/python/examples/imagenet_trainer.py" --train_data null --batch_size 128 --epoch_size 12800 --num_epochs 2 --use_cpu
|
||||
fi
|
||||
if (( $num_gpus >= 1 )); then
|
||||
"$PYTHON" "$caffe2_pypath/python/examples/imagenet_trainer.py" --train_data null --batch_size 128 --epoch_size 12800 --num_epochs 2 --num_gpus 1
|
||||
# Let's skip the fp16 bench runs for now, as it recompiles the miopen kernels and can take 10+min to run.
|
||||
# We can resume when we (1) bindmount the miopen cache folder in jenkins; (2) install the pre-compiled miopen kernel library in the docker
|
||||
# "$PYTHON" "$caffe2_pypath/python/examples/imagenet_trainer.py" --train_data null --batch_size 256 --epoch_size 25600 --num_epochs 2 --num_gpus 1 --float16_compute --dtype float16
|
||||
fi
|
||||
if (( $num_gpus >= 4 )); then
|
||||
"$PYTHON" "$caffe2_pypath/python/examples/imagenet_trainer.py" --train_data null --batch_size 512 --epoch_size 51200 --num_epochs 2 --num_gpus 4
|
||||
fi
|
||||
|
||||
# ResNext
|
||||
if (( $num_gpus == 0 )); then
|
||||
"$PYTHON" "$caffe2_pypath/python/examples/imagenet_trainer.py" --resnext_num_groups 32 --resnext_width_per_group 4 --num_layers 101 --train_data null --batch_size 32 --epoch_size 3200 --num_epochs 2 --use_cpu
|
||||
fi
|
||||
if (( $num_gpus >= 1 )); then
|
||||
"$PYTHON" "$caffe2_pypath/python/examples/imagenet_trainer.py" --resnext_num_groups 32 --resnext_width_per_group 4 --num_layers 101 --train_data null --batch_size 32 --epoch_size 3200 --num_epochs 2 --num_gpus 1
|
||||
# "$PYTHON" "$caffe2_pypath/python/examples/imagenet_trainer.py" --resnext_num_groups 32 --resnext_width_per_group 4 --num_layers 101 --train_data null --batch_size 64 --epoch_size 3200 --num_epochs 2 --num_gpus 1 --float16_compute --dtype float16
|
||||
fi
|
||||
if (( $num_gpus >= 4 )); then
|
||||
"$PYTHON" "$caffe2_pypath/python/examples/imagenet_trainer.py" --resnext_num_groups 32 --resnext_width_per_group 4 --num_layers 101 --train_data null --batch_size 128 --epoch_size 12800 --num_epochs 2 --num_gpus 4
|
||||
fi
|
||||
|
||||
# Shufflenet
|
||||
if (( $num_gpus == 0 )); then
|
||||
"$PYTHON" "$caffe2_pypath/python/examples/imagenet_trainer.py" --train_data null --batch_size 32 --epoch_size 3200 --num_epochs 2 --use_cpu --model shufflenet
|
||||
fi
|
||||
if (( $num_gpus >= 1 )); then
|
||||
"$PYTHON" "$caffe2_pypath/python/examples/imagenet_trainer.py" --train_data null --batch_size 32 --epoch_size 3200 --num_epochs 2 --num_gpus 1 --model shufflenet
|
||||
fi
|
||||
if (( $num_gpus >= 4 )); then
|
||||
"$PYTHON" "$caffe2_pypath/python/examples/imagenet_trainer.py" --train_data null --batch_size 128 --epoch_size 12800 --num_epochs 2 --num_gpus 4 --model shufflenet
|
||||
fi
|
||||
|
|
@ -1,231 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
# shellcheck source=./common.sh
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/common.sh"
|
||||
|
||||
# CMAKE_ARGS are only passed to 'cmake' and the -Dfoo=bar does not work with
|
||||
# setup.py, so we build a list of foo=bars and then either convert it to
|
||||
# -Dfoo=bars or export them before running setup.py
|
||||
build_args=()
|
||||
build_to_cmake () {
|
||||
cmake_args=()
|
||||
for build_arg in $*; do
|
||||
cmake_args+=("-D$build_arg")
|
||||
done
|
||||
echo ${cmake_args[@]}
|
||||
}
|
||||
|
||||
|
||||
SCCACHE="$(which sccache)"
|
||||
|
||||
# Setup ccache if configured to use it (and not sccache)
|
||||
if [ -z "${SCCACHE}" ] && which ccache > /dev/null; then
|
||||
mkdir -p ./ccache
|
||||
ln -sf "$(which ccache)" ./ccache/cc
|
||||
ln -sf "$(which ccache)" ./ccache/c++
|
||||
ln -sf "$(which ccache)" ./ccache/gcc
|
||||
ln -sf "$(which ccache)" ./ccache/g++
|
||||
ln -sf "$(which ccache)" ./ccache/x86_64-linux-gnu-gcc
|
||||
if [[ "${BUILD_ENVIRONMENT}" == *-cuda* ]]; then
|
||||
mkdir -p ./ccache/cuda
|
||||
ln -sf "$(which ccache)" ./ccache/cuda/nvcc
|
||||
fi
|
||||
export CACHE_WRAPPER_DIR="$PWD/ccache"
|
||||
export PATH="$CACHE_WRAPPER_DIR:$PATH"
|
||||
fi
|
||||
|
||||
# sccache will fail for CUDA builds if all cores are used for compiling
|
||||
if [ -z "$MAX_JOBS" ]; then
|
||||
if [[ "${BUILD_ENVIRONMENT}" == *-cuda* ]] && [ -n "${SCCACHE}" ]; then
|
||||
MAX_JOBS=`expr $(nproc) - 1`
|
||||
else
|
||||
MAX_JOBS=$(nproc)
|
||||
fi
|
||||
fi
|
||||
|
||||
report_compile_cache_stats() {
|
||||
if [[ -n "${SCCACHE}" ]]; then
|
||||
"$SCCACHE" --show-stats
|
||||
elif which ccache > /dev/null; then
|
||||
ccache -s
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
###############################################################################
|
||||
# Use special scripts for Android and setup builds
|
||||
###############################################################################
|
||||
if [[ "${BUILD_ENVIRONMENT}" == *-android* ]]; then
|
||||
export ANDROID_NDK=/opt/ndk
|
||||
build_args+=("BUILD_BINARY=ON")
|
||||
build_args+=("BUILD_TEST=ON")
|
||||
build_args+=("USE_OBSERVERS=ON")
|
||||
build_args+=("USE_ZSTD=ON")
|
||||
BUILD_CAFFE2_MOBILE=1 "${ROOT_DIR}/scripts/build_android.sh" $(build_to_cmake ${build_args[@]}) "$@"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
###############################################################################
|
||||
# Set parameters
|
||||
###############################################################################
|
||||
if [[ "$BUILD_ENVIRONMENT" == *cmake* ]]; then
|
||||
build_args+=("BUILD_PYTHON=OFF")
|
||||
else
|
||||
build_args+=("BUILD_PYTHON=ON")
|
||||
build_args+=("PYTHON_EXECUTABLE=${PYTHON}")
|
||||
fi
|
||||
if [[ $BUILD_ENVIRONMENT == *mkl* ]]; then
|
||||
build_args+=("BLAS=MKL")
|
||||
build_args+=("USE_MKLDNN=ON")
|
||||
fi
|
||||
build_args+=("BUILD_BINARY=ON")
|
||||
build_args+=("BUILD_TEST=ON")
|
||||
build_args+=("INSTALL_TEST=ON")
|
||||
build_args+=("USE_ZSTD=ON")
|
||||
|
||||
if [[ $BUILD_ENVIRONMENT == *cuda* ]]; then
|
||||
build_args+=("USE_CUDA=ON")
|
||||
build_args+=("USE_NNPACK=OFF")
|
||||
|
||||
# Target only our CI GPU machine's CUDA arch to speed up the build
|
||||
build_args+=("TORCH_CUDA_ARCH_LIST=Maxwell")
|
||||
|
||||
# Explicitly set path to NVCC such that the symlink to ccache or sccache is used
|
||||
if [ -n "${CACHE_WRAPPER_DIR}" ]; then
|
||||
build_args+=("CUDA_NVCC_EXECUTABLE=${CACHE_WRAPPER_DIR}/cuda/nvcc")
|
||||
build_args+=("CMAKE_CUDA_COMPILER_LAUNCHER=${CACHE_WRAPPER_DIR}/ccache")
|
||||
fi
|
||||
|
||||
# Ensure FindCUDA.cmake can infer the right path to the CUDA toolkit.
|
||||
# Setting PATH to resolve to the right nvcc alone isn't enough.
|
||||
# See /usr/share/cmake-3.5/Modules/FindCUDA.cmake, block at line 589.
|
||||
export CUDA_PATH="/usr/local/cuda"
|
||||
|
||||
# Ensure the ccache symlink can still find the real nvcc binary.
|
||||
export PATH="/usr/local/cuda/bin:$PATH"
|
||||
fi
|
||||
if [[ $BUILD_ENVIRONMENT == *rocm* ]]; then
|
||||
if [[ -n "$CI" && -z "$PYTORCH_ROCM_ARCH" ]]; then
|
||||
# Set ROCM_ARCH to gfx900 and gfx906 for CI builds, if user doesn't override.
|
||||
echo "Limiting PYTORCH_ROCM_ARCH to gfx90[06] for CI builds"
|
||||
export PYTORCH_ROCM_ARCH="gfx900;gfx906"
|
||||
fi
|
||||
# This is needed to enable ImageInput operator in resnet50_trainer
|
||||
build_args+=("USE_OPENCV=ON")
|
||||
# This is needed to read datasets from https://download.caffe2.ai/databases/resnet_trainer.zip
|
||||
build_args+=("USE_LMDB=ON")
|
||||
# hcc used to run out of memory, silently exiting without stopping
|
||||
# the build process, leaving undefined symbols in the shared lib,
|
||||
# causing undefined symbol errors when later running tests.
|
||||
# We used to set MAX_JOBS to 4 to avoid, but this is no longer an issue.
|
||||
if [ -z "$MAX_JOBS" ]; then
|
||||
export MAX_JOBS=$(($(nproc) - 1))
|
||||
fi
|
||||
|
||||
########## HIPIFY Caffe2 operators
|
||||
${PYTHON} "${ROOT_DIR}/tools/amd_build/build_amd.py"
|
||||
fi
|
||||
|
||||
# Try to include Redis support for Linux builds
|
||||
if [ "$(uname)" == "Linux" ]; then
|
||||
build_args+=("USE_REDIS=ON")
|
||||
fi
|
||||
|
||||
# Use a specialized onnx namespace in CI to catch hardcoded onnx namespace
|
||||
build_args+=("ONNX_NAMESPACE=ONNX_NAMESPACE_FOR_C2_CI")
|
||||
|
||||
###############################################################################
|
||||
# Configure and make
|
||||
###############################################################################
|
||||
|
||||
if [[ "$BUILD_ENVIRONMENT" == *cmake* ]]; then
|
||||
# cmake-only non-setup.py build, to test cpp only bits. This installs into
|
||||
# /usr/local/caffe2 and installs no Python tests
|
||||
build_args+=("CMAKE_INSTALL_PREFIX=${INSTALL_PREFIX}")
|
||||
|
||||
# Run cmake from ./build_caffe2 directory so it doesn't conflict with
|
||||
# standard PyTorch build directory. Eventually these won't need to
|
||||
# be separate.
|
||||
rm -rf build_caffe2
|
||||
mkdir build_caffe2
|
||||
cd ./build_caffe2
|
||||
|
||||
# We test the presence of cmake3 (for platforms like Centos and Ubuntu 14.04)
|
||||
# and use that if so.
|
||||
if [[ -x "$(command -v cmake3)" ]]; then
|
||||
CMAKE_BINARY=cmake3
|
||||
else
|
||||
CMAKE_BINARY=cmake
|
||||
fi
|
||||
|
||||
# Configure
|
||||
${CMAKE_BINARY} "${ROOT_DIR}" $(build_to_cmake ${build_args[@]}) "$@"
|
||||
|
||||
# Build
|
||||
if [ "$(uname)" == "Linux" ]; then
|
||||
make "-j${MAX_JOBS}" install
|
||||
else
|
||||
echo "Don't know how to build on $(uname)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# This is to save test binaries for testing
|
||||
mv "$INSTALL_PREFIX/test/" "$INSTALL_PREFIX/cpp_test/"
|
||||
|
||||
ls -lah $INSTALL_PREFIX
|
||||
|
||||
else
|
||||
# Python build. Uses setup.py to install into site-packages
|
||||
build_args+=("USE_LEVELDB=ON")
|
||||
build_args+=("USE_LMDB=ON")
|
||||
build_args+=("USE_OPENCV=ON")
|
||||
build_args+=("BUILD_TEST=ON")
|
||||
# These flags preserve the flags that were used before this refactor (blame
|
||||
# me)
|
||||
build_args+=("USE_GLOG=ON")
|
||||
build_args+=("USE_GFLAGS=ON")
|
||||
build_args+=("USE_FBGEMM=OFF")
|
||||
build_args+=("USE_MKLDNN=OFF")
|
||||
build_args+=("USE_DISTRIBUTED=ON")
|
||||
for build_arg in "${build_args[@]}"; do
|
||||
export $build_arg
|
||||
done
|
||||
|
||||
# sccache will be stuck if all cores are used for compiling
|
||||
# see https://github.com/pytorch/pytorch/pull/7361
|
||||
if [[ -n "${SCCACHE}" && $BUILD_ENVIRONMENT != *rocm* ]]; then
|
||||
export MAX_JOBS=`expr $(nproc) - 1`
|
||||
fi
|
||||
|
||||
pip install --user dataclasses typing_extensions
|
||||
|
||||
$PYTHON setup.py install --user
|
||||
|
||||
report_compile_cache_stats
|
||||
fi
|
||||
|
||||
###############################################################################
|
||||
# Install ONNX
|
||||
###############################################################################
|
||||
|
||||
# Install ONNX into a local directory
|
||||
pip install --user "file://${ROOT_DIR}/third_party/onnx#egg=onnx"
|
||||
|
||||
report_compile_cache_stats
|
||||
|
||||
if [[ $BUILD_ENVIRONMENT == *rocm* ]]; then
|
||||
# remove sccache wrappers post-build; runtime compilation of MIOpen kernels does not yet fully support them
|
||||
sudo rm -f /opt/cache/bin/cc
|
||||
sudo rm -f /opt/cache/bin/c++
|
||||
sudo rm -f /opt/cache/bin/gcc
|
||||
sudo rm -f /opt/cache/bin/g++
|
||||
pushd /opt/rocm/llvm/bin
|
||||
if [[ -d original ]]; then
|
||||
sudo mv original/clang .
|
||||
sudo mv original/clang++ .
|
||||
fi
|
||||
sudo rm -rf original
|
||||
popd
|
||||
fi
|
||||
|
|
@ -1,7 +0,0 @@
|
|||
#!/bin/bash
|
||||
set -ex
|
||||
upstream="$1"
|
||||
pr="$2"
|
||||
git diff --name-only "$upstream" "$pr"
|
||||
# For safety, unconditionally trigger for any changes.
|
||||
#git diff --name-only "$upstream" "$pr" | grep -Eq '^(CMakeLists.txt|Makefile|.gitmodules|.jenkins/caffe2|binaries|caffe|caffe2|cmake|conda|docker|docs/caffe2|modules|scripts|third_party)'
|
||||
|
|
@ -1,9 +0,0 @@
|
|||
#!/bin/bash
|
||||
set -ex
|
||||
upstream="$1"
|
||||
pr="$2"
|
||||
git diff --name-only "$upstream" "$pr"
|
||||
# Now that PyTorch build depends on Caffe2, unconditionally trigger
|
||||
# for any changes.
|
||||
# TODO: Replace this with a NEGATIVE regex that allows us to skip builds when they are unnecessary
|
||||
#git diff --name-only "$upstream" "$pr" | grep -Eq '^(aten/|caffe2/|.jenkins/pytorch|docs/(make.bat|Makefile|requirements.txt|source)|mypy|requirements.txt|setup.py|test/|third_party/|tools/|\.gitmodules|torch/)'
|
||||
|
|
@ -165,9 +165,6 @@ option(BUILD_LITE_INTERPRETER "Master flag to build Lite Interpreter" OFF)
|
|||
cmake_dependent_option(
|
||||
BUILD_CAFFE2_OPS "Build Caffe2 operators" ON
|
||||
"BUILD_CAFFE2" OFF)
|
||||
cmake_dependent_option(
|
||||
BUILD_CAFFE2_MOBILE "Build libcaffe2 for mobile (deprecating)" OFF
|
||||
"BUILD_CAFFE2" OFF)
|
||||
option(BUILD_SHARED_LIBS "Build libcaffe2.so" ON)
|
||||
cmake_dependent_option(
|
||||
CAFFE2_LINK_LOCAL_PROTOBUF "If set, build protobuf inside libcaffe2.so." ON
|
||||
|
|
@ -591,18 +588,11 @@ if(ANDROID OR IOS OR DEFINED ENV{BUILD_PYTORCH_MOBILE_WITH_HOST_TOOLCHAIN})
|
|||
endif()
|
||||
|
||||
# INTERN_BUILD_ATEN_OPS is used to control whether to build ATen/TH operators.
|
||||
# It's disabled for caffe2 mobile library.
|
||||
if(INTERN_BUILD_MOBILE AND BUILD_CAFFE2_MOBILE)
|
||||
set(INTERN_BUILD_ATEN_OPS OFF)
|
||||
else()
|
||||
set(INTERN_BUILD_ATEN_OPS ON)
|
||||
endif()
|
||||
set(INTERN_BUILD_ATEN_OPS ON)
|
||||
|
||||
# BUILD_CAFFE2_MOBILE is the master switch to choose between libcaffe2 v.s. libtorch mobile build.
|
||||
# When it's enabled it builds original libcaffe2 mobile library without ATen/TH ops nor TorchScript support;
|
||||
# When it's disabled it builds libtorch mobile library, which contains ATen/TH ops and native support for
|
||||
# Build libtorch mobile library, which contains ATen/TH ops and native support for
|
||||
# TorchScript model, but doesn't contain not-yet-unified caffe2 ops;
|
||||
if(INTERN_BUILD_MOBILE AND NOT BUILD_CAFFE2_MOBILE)
|
||||
if(INTERN_BUILD_MOBILE)
|
||||
if(NOT BUILD_SHARED_LIBS AND NOT "${SELECTED_OP_LIST}" STREQUAL "")
|
||||
string(APPEND CMAKE_CXX_FLAGS " -DNO_EXPORT")
|
||||
endif()
|
||||
|
|
|
|||
|
|
@ -1246,13 +1246,6 @@ In 2018, we merged Caffe2 into the PyTorch source repository. While the
|
|||
steady state aspiration is that Caffe2 and PyTorch share code freely,
|
||||
in the meantime there will be some separation.
|
||||
|
||||
If you submit a PR to only PyTorch or only Caffe2 code, CI will only
|
||||
run for the project you edited. The logic for this is implemented
|
||||
in `.jenkins/pytorch/dirty.sh` and `.jenkins/caffe2/dirty.sh`; you
|
||||
can look at this to see what path prefixes constitute changes.
|
||||
This also means if you ADD a new top-level path, or you start
|
||||
sharing code between projects, you need to modify these files.
|
||||
|
||||
There are a few "unusual" directories which, for historical reasons,
|
||||
are Caffe2/PyTorch specific. Here they are:
|
||||
|
||||
|
|
|
|||
|
|
@ -1,14 +1,9 @@
|
|||
if(INTERN_BUILD_MOBILE)
|
||||
if(BUILD_CAFFE2_MOBILE)
|
||||
#caffe2_binary_target("predictor_verifier.cc")
|
||||
caffe2_binary_target("speed_benchmark.cc")
|
||||
else()
|
||||
caffe2_binary_target("speed_benchmark_torch.cc")
|
||||
caffe2_binary_target("load_benchmark_torch.cc")
|
||||
if(NOT BUILD_LITE_INTERPRETER)
|
||||
caffe2_binary_target("compare_models_torch.cc")
|
||||
endif()
|
||||
endif()
|
||||
return()
|
||||
endif()
|
||||
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ endif()
|
|||
# OMP - OpenMP for intra-op, native thread pool for inter-op parallelism
|
||||
# NATIVE - using native thread pool for intra- and inter-op parallelism
|
||||
# TBB - using TBB for intra- and native thread pool for inter-op parallelism
|
||||
if(INTERN_BUILD_MOBILE AND NOT BUILD_CAFFE2_MOBILE)
|
||||
if(INTERN_BUILD_MOBILE)
|
||||
set(ATEN_THREADING "NATIVE" CACHE STRING "ATen parallel backend")
|
||||
else()
|
||||
if(USE_OPENMP)
|
||||
|
|
@ -129,7 +129,7 @@ if(BUILD_CAFFE2 OR (NOT USE_FBGEMM))
|
|||
endif()
|
||||
|
||||
# Skip modules that are not used by libtorch mobile yet.
|
||||
if(BUILD_CAFFE2 AND (NOT INTERN_BUILD_MOBILE OR BUILD_CAFFE2_MOBILE))
|
||||
if(BUILD_CAFFE2 AND NOT INTERN_BUILD_MOBILE)
|
||||
add_subdirectory(contrib)
|
||||
add_subdirectory(predictor)
|
||||
add_subdirectory(predictor/emulator)
|
||||
|
|
@ -166,7 +166,7 @@ if(BUILD_CAFFE2 AND (NOT INTERN_BUILD_MOBILE OR BUILD_CAFFE2_MOBILE))
|
|||
# add_subdirectory(test) # todo: use caffe2_gtest_main instead of gtest_main because we will need to call GlobalInit
|
||||
add_subdirectory(transforms)
|
||||
endif()
|
||||
if(NOT BUILD_CAFFE2 AND (NOT INTERN_BUILD_MOBILE OR BUILD_CAFFE2_MOBILE))
|
||||
if(NOT BUILD_CAFFE2 AND NOT INTERN_BUILD_MOBILE)
|
||||
add_subdirectory(proto)
|
||||
endif()
|
||||
|
||||
|
|
@ -269,7 +269,7 @@ if(PRINT_CMAKE_DEBUG_INFO)
|
|||
|
||||
endif()
|
||||
|
||||
if(NOT INTERN_BUILD_MOBILE OR BUILD_CAFFE2_MOBILE)
|
||||
if(NOT INTERN_BUILD_MOBILE)
|
||||
# ---[ List of libraries to link with
|
||||
add_library(caffe2_protos STATIC $<TARGET_OBJECTS:Caffe2_PROTO>)
|
||||
add_dependencies(caffe2_protos Caffe2_PROTO)
|
||||
|
|
@ -326,28 +326,25 @@ if(NOT TORCH_INSTALL_LIB_DIR)
|
|||
set(TORCH_INSTALL_LIB_DIR lib)
|
||||
endif()
|
||||
|
||||
set(CMAKE_POSITION_INDEPENDENT_CODE TRUE)
|
||||
|
||||
# Generate files
|
||||
set(TOOLS_PATH "${TORCH_ROOT}/tools")
|
||||
|
||||
if(NOT INTERN_BUILD_MOBILE OR NOT BUILD_CAFFE2_MOBILE)
|
||||
set(CMAKE_POSITION_INDEPENDENT_CODE TRUE)
|
||||
|
||||
# Generate files
|
||||
set(TOOLS_PATH "${TORCH_ROOT}/tools")
|
||||
|
||||
configure_file("${TORCH_SRC_DIR}/_utils_internal.py"
|
||||
configure_file("${TORCH_SRC_DIR}/_utils_internal.py"
|
||||
"${TOOLS_PATH}/shared/_utils_internal.py"
|
||||
COPYONLY)
|
||||
|
||||
# Generate header with version info
|
||||
configure_file("${TORCH_SRC_DIR}/csrc/api/include/torch/version.h.in"
|
||||
# Generate header with version info
|
||||
configure_file("${TORCH_SRC_DIR}/csrc/api/include/torch/version.h.in"
|
||||
"${TORCH_SRC_DIR}/csrc/api/include/torch/version.h"
|
||||
@ONLY)
|
||||
|
||||
set(GENERATED_CXX_TORCH
|
||||
set(GENERATED_CXX_TORCH
|
||||
"${TORCH_SRC_DIR}/csrc/autograd/generated/Functions.cpp"
|
||||
)
|
||||
|
||||
if(NOT INTERN_DISABLE_AUTOGRAD AND NOT BUILD_LITE_INTERPRETER)
|
||||
if(NOT INTERN_DISABLE_AUTOGRAD AND NOT BUILD_LITE_INTERPRETER)
|
||||
list(APPEND GENERATED_CXX_TORCH
|
||||
"${TORCH_SRC_DIR}/csrc/autograd/generated/VariableType_0.cpp"
|
||||
"${TORCH_SRC_DIR}/csrc/autograd/generated/VariableType_1.cpp"
|
||||
|
|
@ -369,23 +366,23 @@ if(NOT INTERN_BUILD_MOBILE OR NOT BUILD_CAFFE2_MOBILE)
|
|||
"${TORCH_SRC_DIR}/csrc/lazy/generated/RegisterLazy.cpp"
|
||||
)
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
set(GENERATED_H_TORCH
|
||||
set(GENERATED_H_TORCH
|
||||
"${TORCH_SRC_DIR}/csrc/autograd/generated/Functions.h"
|
||||
"${TORCH_SRC_DIR}/csrc/autograd/generated/variable_factories.h"
|
||||
)
|
||||
|
||||
if(NOT INTERN_DISABLE_AUTOGRAD)
|
||||
if(NOT INTERN_DISABLE_AUTOGRAD)
|
||||
list(APPEND GENERATED_H_TORCH
|
||||
"${TORCH_SRC_DIR}/csrc/autograd/generated/VariableType.h"
|
||||
"${TORCH_SRC_DIR}/csrc/lazy/generated/LazyIr.h"
|
||||
"${TORCH_SRC_DIR}/csrc/lazy/generated/LazyNonNativeIr.h"
|
||||
"${TORCH_SRC_DIR}/csrc/lazy/generated/LazyNativeFunctions.h"
|
||||
)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
set(GENERATED_CXX_PYTHON
|
||||
set(GENERATED_CXX_PYTHON
|
||||
"${TORCH_SRC_DIR}/csrc/autograd/generated/python_functions_0.cpp"
|
||||
"${TORCH_SRC_DIR}/csrc/autograd/generated/python_functions_1.cpp"
|
||||
"${TORCH_SRC_DIR}/csrc/autograd/generated/python_functions_2.cpp"
|
||||
|
|
@ -404,15 +401,15 @@ if(NOT INTERN_BUILD_MOBILE OR NOT BUILD_CAFFE2_MOBILE)
|
|||
"${TORCH_SRC_DIR}/csrc/autograd/generated/python_enum_tag.cpp"
|
||||
)
|
||||
|
||||
set(GENERATED_H_PYTHON
|
||||
set(GENERATED_H_PYTHON
|
||||
"${TORCH_SRC_DIR}/csrc/autograd/generated/python_functions.h"
|
||||
)
|
||||
|
||||
set(GENERATED_TESTING_PYTHON
|
||||
set(GENERATED_TESTING_PYTHON
|
||||
"${TORCH_SRC_DIR}/testing/_internal/generated/annotated_fn_args.py"
|
||||
)
|
||||
|
||||
set(TORCH_GENERATED_CODE
|
||||
set(TORCH_GENERATED_CODE
|
||||
${GENERATED_CXX_TORCH}
|
||||
${GENERATED_H_TORCH}
|
||||
${GENERATED_CXX_PYTHON}
|
||||
|
|
@ -420,15 +417,15 @@ if(NOT INTERN_BUILD_MOBILE OR NOT BUILD_CAFFE2_MOBILE)
|
|||
${GENERATED_TESTING_PYTHON}
|
||||
)
|
||||
|
||||
set(GEN_PER_OPERATOR_FLAG)
|
||||
if(USE_PER_OPERATOR_HEADERS)
|
||||
set(GEN_PER_OPERATOR_FLAG)
|
||||
if(USE_PER_OPERATOR_HEADERS)
|
||||
list(APPEND GEN_PER_OPERATOR_FLAG "--per_operator_headers")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
file(GLOB_RECURSE autograd_python "${TOOLS_PATH}/autograd/*.py")
|
||||
file(GLOB_RECURSE autograd_yaml "${TOOLS_PATH}/autograd/*.yaml")
|
||||
file(GLOB_RECURSE autograd_templates "${TOOLS_PATH}/autograd/templates/*")
|
||||
add_custom_command(
|
||||
file(GLOB_RECURSE autograd_python "${TOOLS_PATH}/autograd/*.py")
|
||||
file(GLOB_RECURSE autograd_yaml "${TOOLS_PATH}/autograd/*.yaml")
|
||||
file(GLOB_RECURSE autograd_templates "${TOOLS_PATH}/autograd/templates/*")
|
||||
add_custom_command(
|
||||
OUTPUT
|
||||
${TORCH_GENERATED_CODE}
|
||||
COMMAND
|
||||
|
|
@ -458,19 +455,19 @@ if(NOT INTERN_BUILD_MOBILE OR NOT BUILD_CAFFE2_MOBILE)
|
|||
WORKING_DIRECTORY "${TORCH_ROOT}")
|
||||
|
||||
|
||||
# Required workaround for libtorch_python.so build
|
||||
# see https://samthursfield.wordpress.com/2015/11/21/cmake-dependencies-between-targets-and-files-and-custom-commands/#custom-commands-in-different-directories
|
||||
add_custom_target(
|
||||
# Required workaround for libtorch_python.so build
|
||||
# see https://samthursfield.wordpress.com/2015/11/21/cmake-dependencies-between-targets-and-files-and-custom-commands/#custom-commands-in-different-directories
|
||||
add_custom_target(
|
||||
generate-torch-sources
|
||||
DEPENDS ${TORCH_GENERATED_CODE}
|
||||
)
|
||||
|
||||
set(TORCH_SRCS ${GENERATED_CXX_TORCH})
|
||||
list(APPEND TORCH_SRCS ${GENERATED_H_TORCH})
|
||||
list(APPEND LIBTORCH_CMAKE_SRCS "")
|
||||
set(TORCH_SRCS ${GENERATED_CXX_TORCH})
|
||||
list(APPEND TORCH_SRCS ${GENERATED_H_TORCH})
|
||||
list(APPEND LIBTORCH_CMAKE_SRCS "")
|
||||
|
||||
list(APPEND LITE_EAGER_SYMOBLICATION_SRCS "")
|
||||
if(USE_SOURCE_DEBUG_ON_MOBILE)
|
||||
list(APPEND LITE_EAGER_SYMOBLICATION_SRCS "")
|
||||
if(USE_SOURCE_DEBUG_ON_MOBILE)
|
||||
append_filelist("libtorch_lite_eager_symbolication" LITE_EAGER_SYMOBLICATION_SRCS)
|
||||
# For source debug on lite interpreter, we have to add dependency on pickling
|
||||
# but references to read/writeArchiveAndTensor is not built for mobile
|
||||
|
|
@ -479,20 +476,20 @@ if(NOT INTERN_BUILD_MOBILE OR NOT BUILD_CAFFE2_MOBILE)
|
|||
if(BUILD_LITE_INTERPRETER)
|
||||
set_source_files_properties(${TORCH_SRC_DIR}/csrc/jit/serialization/pickle.cpp PROPERTIES COMPILE_FLAGS "-DC10_MOBILE -DFEATURE_TORCH_MOBILE")
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
list(APPEND LITE_PROFILER_SRCS "")
|
||||
if(USE_LITE_INTERPRETER_PROFILER)
|
||||
list(APPEND LITE_PROFILER_SRCS "")
|
||||
if(USE_LITE_INTERPRETER_PROFILER)
|
||||
append_filelist("libtorch_edge_profiler_sources " LITE_PROFILER_SRCS)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# Switch between the full jit interpreter and lite interpreter
|
||||
if(BUILD_LITE_INTERPRETER)
|
||||
# Switch between the full jit interpreter and lite interpreter
|
||||
if(BUILD_LITE_INTERPRETER)
|
||||
append_filelist("libtorch_lite_cmake_sources" LIBTORCH_CMAKE_SRCS)
|
||||
list(APPEND LIBTORCH_CMAKE_SRCS ${LITE_EAGER_SYMOBLICATION_SRCS})
|
||||
list(APPEND LIBTORCH_CMAKE_SRCS ${LITE_PROFILER_SRCS})
|
||||
set(CMAKE_POSITION_INDEPENDENT_CODE TRUE)
|
||||
else()
|
||||
else()
|
||||
append_filelist("libtorch_cmake_sources" LIBTORCH_CMAKE_SRCS)
|
||||
if(BUILD_LAZY_TS_BACKEND)
|
||||
append_filelist("lazy_tensor_ts_sources" LIBTORCH_CMAKE_SRCS)
|
||||
|
|
@ -504,18 +501,18 @@ if(NOT INTERN_BUILD_MOBILE OR NOT BUILD_CAFFE2_MOBILE)
|
|||
# TODO: Delete this when https://github.com/pytorch/pytorch/issues/35026 is fixed
|
||||
set_source_files_properties(../torch/csrc/autograd/record_function_ops.cpp PROPERTIES COMPILE_FLAGS -Wno-deprecated-declarations)
|
||||
endif()
|
||||
endif()
|
||||
list(APPEND TORCH_SRCS ${LIBTORCH_CMAKE_SRCS})
|
||||
endif()
|
||||
list(APPEND TORCH_SRCS ${LIBTORCH_CMAKE_SRCS})
|
||||
|
||||
if(PRINT_CMAKE_DEBUG_INFO)
|
||||
if(PRINT_CMAKE_DEBUG_INFO)
|
||||
message(STATUS "Interpreter sources: ")
|
||||
foreach(tmp ${LIBTORCH_CMAKE_SRCS})
|
||||
message(STATUS " " ${tmp})
|
||||
endforeach()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# Mobile backend delegate srcs
|
||||
if(INTERN_BUILD_MOBILE AND NOT BUILD_CAFFE2_MOBILE)
|
||||
# Mobile backend delegate srcs
|
||||
if(INTERN_BUILD_MOBILE)
|
||||
set(DELEGATE_SRCS
|
||||
${TORCH_SRC_DIR}/csrc/jit/backends/backend_debug_info.cpp
|
||||
${TORCH_SRC_DIR}/csrc/jit/backends/backend_interface.cpp
|
||||
|
|
@ -533,22 +530,22 @@ if(NOT INTERN_BUILD_MOBILE OR NOT BUILD_CAFFE2_MOBILE)
|
|||
include_directories(${TORCH_ROOT}/third_party/nlohmann/single_include)
|
||||
list(APPEND TORCH_SRCS ${COREML_DELEGATE_SRCS})
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# Required workaround for LLVM 9 includes.
|
||||
if(NOT MSVC)
|
||||
# Required workaround for LLVM 9 includes.
|
||||
if(NOT MSVC)
|
||||
set_source_files_properties(${TORCH_SRC_DIR}/csrc/jit/tensorexpr/llvm_jit.cpp PROPERTIES COMPILE_FLAGS -Wno-noexcept-type)
|
||||
# Force -Werror on several files
|
||||
set_source_files_properties(${CMAKE_CURRENT_LIST_DIR}/../aten/src/ATen/native/mkldnn/Pooling.cpp PROPERTIES COMPILE_FLAGS "-Werror")
|
||||
endif()
|
||||
# Disable certain warnings for GCC-9.X
|
||||
if(CMAKE_COMPILER_IS_GNUCXX AND (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 9.0.0))
|
||||
endif()
|
||||
# Disable certain warnings for GCC-9.X
|
||||
if(CMAKE_COMPILER_IS_GNUCXX AND (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 9.0.0))
|
||||
# See https://github.com/pytorch/pytorch/issues/38856
|
||||
set_source_files_properties(${TORCH_SRC_DIR}/csrc/jit/tensorexpr/llvm_jit.cpp PROPERTIES COMPILE_FLAGS "-Wno-redundant-move -Wno-noexcept-type")
|
||||
set_source_files_properties(${TORCH_SRC_DIR}/csrc/jit/tensorexpr/llvm_codegen.cpp PROPERTIES COMPILE_FLAGS "-Wno-init-list-lifetime")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(NOT INTERN_DISABLE_MOBILE_INTERP)
|
||||
if(NOT INTERN_DISABLE_MOBILE_INTERP)
|
||||
set(MOBILE_SRCS
|
||||
${TORCH_SRC_DIR}/csrc/jit/mobile/function.cpp
|
||||
${TORCH_SRC_DIR}/csrc/jit/mobile/import.cpp
|
||||
|
|
@ -569,29 +566,29 @@ if(NOT INTERN_BUILD_MOBILE OR NOT BUILD_CAFFE2_MOBILE)
|
|||
)
|
||||
list(APPEND TORCH_SRCS ${MOBILE_SRCS})
|
||||
list(APPEND TORCH_SRCS ${LITE_EAGER_SYMOBLICATION_SRCS})
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# This one needs to be unconditionally added as Functions.cpp is also unconditionally added
|
||||
list(APPEND TORCH_SRCS
|
||||
# This one needs to be unconditionally added as Functions.cpp is also unconditionally added
|
||||
list(APPEND TORCH_SRCS
|
||||
${TORCH_SRC_DIR}/csrc/autograd/FunctionsManual.cpp
|
||||
${TORCH_SRC_DIR}/csrc/utils/out_types.cpp
|
||||
)
|
||||
)
|
||||
|
||||
if(NOT INTERN_DISABLE_AUTOGRAD AND NOT BUILD_LITE_INTERPRETER)
|
||||
if(NOT INTERN_DISABLE_AUTOGRAD AND NOT BUILD_LITE_INTERPRETER)
|
||||
list(APPEND TORCH_SRCS
|
||||
${TORCH_SRC_DIR}/csrc/autograd/TraceTypeManual.cpp
|
||||
${TORCH_SRC_DIR}/csrc/autograd/VariableTypeManual.cpp
|
||||
)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(${USE_ITT})
|
||||
if(${USE_ITT})
|
||||
list(APPEND TORCH_SRCS
|
||||
${TORCH_SRC_DIR}/csrc/itt_wrapper.cpp
|
||||
${TORCH_SRC_DIR}/csrc/profiler/itt.cpp
|
||||
)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(NOT INTERN_BUILD_MOBILE AND NOT BUILD_LITE_INTERPRETER)
|
||||
if(NOT INTERN_BUILD_MOBILE AND NOT BUILD_LITE_INTERPRETER)
|
||||
list(APPEND TORCH_SRCS
|
||||
${TORCH_SRC_DIR}/csrc/api/src/jit.cpp
|
||||
${TORCH_SRC_DIR}/csrc/jit/mobile/compatibility/backport.cpp
|
||||
|
|
@ -624,13 +621,13 @@ if(NOT INTERN_BUILD_MOBILE OR NOT BUILD_CAFFE2_MOBILE)
|
|||
append_filelist("libtorch_distributed_extra_sources" TORCH_SRCS)
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(USE_CUDA OR USE_ROCM)
|
||||
if(USE_CUDA OR USE_ROCM)
|
||||
append_filelist("libtorch_cuda_core_sources" Caffe2_GPU_HIP_JIT_FUSERS_SRCS)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(USE_CUDA)
|
||||
if(USE_CUDA)
|
||||
list(APPEND Caffe2_GPU_CU_SRCS ${Caffe2_GPU_HIP_JIT_FUSERS_SRCS})
|
||||
add_library(caffe2_nvrtc SHARED ${ATen_NVRTC_STUB_SRCS})
|
||||
if(MSVC)
|
||||
|
|
@ -657,9 +654,9 @@ if(NOT INTERN_BUILD_MOBILE OR NOT BUILD_CAFFE2_MOBILE)
|
|||
PROPERTIES COMPILE_DEFINITIONS "NVRTC_SHORTHASH=${CUDA_NVRTC_SHORTHASH}"
|
||||
)
|
||||
set_source_files_properties(${TORCH_SRC_DIR}/csrc/jit/passes/frozen_conv_add_relu_fusion.cpp PROPERTIES COMPILE_FLAGS "-DUSE_CUDA=1")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(BUILD_ONEDNN_GRAPH)
|
||||
if(BUILD_ONEDNN_GRAPH)
|
||||
list(APPEND Caffe2_CPU_SRCS
|
||||
${TORCH_SRC_DIR}/csrc/jit/codegen/onednn/LlgaTensorImpl.cpp
|
||||
${TORCH_SRC_DIR}/csrc/jit/codegen/onednn/graph_fuser.cpp
|
||||
|
|
@ -673,9 +670,9 @@ if(NOT INTERN_BUILD_MOBILE OR NOT BUILD_CAFFE2_MOBILE)
|
|||
${TORCH_SRC_DIR}/csrc/jit/codegen/onednn/prepare_binary.cpp
|
||||
${TORCH_SRC_DIR}/csrc/jit/codegen/onednn/guard_shape.cpp
|
||||
)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(USE_ROCM)
|
||||
if(USE_ROCM)
|
||||
list(APPEND Caffe2_HIP_SRCS ${Caffe2_GPU_HIP_JIT_FUSERS_SRCS})
|
||||
if(USE_NCCL)
|
||||
list(APPEND Caffe2_HIP_SRCS
|
||||
|
|
@ -693,9 +690,9 @@ if(NOT INTERN_BUILD_MOBILE OR NOT BUILD_CAFFE2_MOBILE)
|
|||
target_link_libraries(caffe2_nvrtc ${PYTORCH_HIP_HCC_LIBRARIES} ${ROCM_HIPRTC_LIB})
|
||||
target_compile_definitions(caffe2_nvrtc PRIVATE USE_ROCM __HIP_PLATFORM_HCC__)
|
||||
install(TARGETS caffe2_nvrtc DESTINATION "${TORCH_INSTALL_LIB_DIR}")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(NOT NO_API AND NOT BUILD_LITE_INTERPRETER)
|
||||
if(NOT NO_API AND NOT BUILD_LITE_INTERPRETER)
|
||||
list(APPEND TORCH_SRCS
|
||||
${TORCH_SRC_DIR}/csrc/api/src/cuda.cpp
|
||||
${TORCH_SRC_DIR}/csrc/api/src/data/datasets/mnist.cpp
|
||||
|
|
@ -756,11 +753,10 @@ if(NOT INTERN_BUILD_MOBILE OR NOT BUILD_CAFFE2_MOBILE)
|
|||
${TORCH_SRC_DIR}/csrc/api/src/serialize/input-archive.cpp
|
||||
${TORCH_SRC_DIR}/csrc/api/src/serialize/output-archive.cpp
|
||||
)
|
||||
endif()
|
||||
|
||||
list(APPEND Caffe2_CPU_SRCS ${TORCH_SRCS})
|
||||
endif()
|
||||
|
||||
list(APPEND Caffe2_CPU_SRCS ${TORCH_SRCS})
|
||||
|
||||
if(USE_MPS)
|
||||
list(APPEND Caffe2_CPU_SRCS ${Caffe2_MPS_SRCS})
|
||||
endif()
|
||||
|
|
@ -1079,25 +1075,24 @@ if(BUILD_LITE_INTERPRETER AND SELECTED_OP_LIST)
|
|||
add_dependencies(torch_cpu __selected_mobile_ops_header_gen)
|
||||
endif()
|
||||
|
||||
if(NOT INTERN_BUILD_MOBILE OR NOT BUILD_CAFFE2_MOBILE)
|
||||
if(NOT NO_API)
|
||||
if(NOT NO_API)
|
||||
target_include_directories(torch_cpu PRIVATE
|
||||
${TORCH_SRC_DIR}/csrc/api
|
||||
${TORCH_SRC_DIR}/csrc/api/include)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(BUILD_SPLIT_CUDA AND MSVC)
|
||||
if(BUILD_SPLIT_CUDA AND MSVC)
|
||||
# -INCLUDE is used to ensure torch_cuda_cpp/cu are linked against in a project that relies on them.
|
||||
target_link_libraries(torch_cuda_cpp INTERFACE "-INCLUDE:?warp_size@cuda@at@@YAHXZ")
|
||||
# See [Note about _torch_cuda_cu_linker_symbol_op and torch_cuda_cu] in native_functions.yaml
|
||||
target_link_libraries(torch_cuda_cu INTERFACE "-INCLUDE:?_torch_cuda_cu_linker_symbol_op_cuda@native@at@@YA?AVTensor@2@AEBV32@@Z")
|
||||
elseif(USE_CUDA AND MSVC)
|
||||
elseif(USE_CUDA AND MSVC)
|
||||
# -INCLUDE is used to ensure torch_cuda is linked against in a project that relies on them.
|
||||
# Related issue: https://github.com/pytorch/pytorch/issues/31611
|
||||
target_link_libraries(torch_cuda INTERFACE "-INCLUDE:?warp_size@cuda@at@@YAHXZ")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(NOT BUILD_LITE_INTERPRETER)
|
||||
if(NOT BUILD_LITE_INTERPRETER)
|
||||
set(TH_CPU_INCLUDE
|
||||
# dense
|
||||
aten/src/TH
|
||||
|
|
@ -1107,19 +1102,19 @@ if(NOT INTERN_BUILD_MOBILE OR NOT BUILD_CAFFE2_MOBILE)
|
|||
|
||||
${CMAKE_BINARY_DIR}/aten/src)
|
||||
target_include_directories(torch_cpu PRIVATE ${TH_CPU_INCLUDE})
|
||||
endif()
|
||||
endif()
|
||||
|
||||
set(ATen_CPU_INCLUDE
|
||||
set(ATen_CPU_INCLUDE
|
||||
${TORCH_ROOT}/aten/src
|
||||
${CMAKE_CURRENT_BINARY_DIR}/../aten/src
|
||||
${CMAKE_BINARY_DIR}/aten/src)
|
||||
|
||||
if(CMAKE_CXX_COMPILER_ID MATCHES "Clang" OR CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
if(CMAKE_CXX_COMPILER_ID MATCHES "Clang" OR CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
set_source_files_properties(${CMAKE_CURRENT_SOURCE_DIR}/../aten/src/ATen/native/QuantizedLinear.cpp PROPERTIES COMPILE_FLAGS -Wno-deprecated-declarations)
|
||||
set_source_files_properties(${CMAKE_CURRENT_SOURCE_DIR}/../aten/src/ATen/native/RNN.cpp PROPERTIES COMPILE_FLAGS -Wno-deprecated-declarations)
|
||||
set_source_files_properties(${CMAKE_CURRENT_SOURCE_DIR}/../aten/src/ATen/native/quantized/cpu/qlinear_prepack.cpp PROPERTIES COMPILE_FLAGS -Wno-deprecated-declarations)
|
||||
set_source_files_properties(${CMAKE_CURRENT_SOURCE_DIR}/../aten/src/ATen/native/quantized/qlinear_unpack.cpp PROPERTIES COMPILE_FLAGS -Wno-deprecated-declarations)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(USE_TBB)
|
||||
list(APPEND ATen_CPU_INCLUDE ${TBB_INCLUDE_DIR})
|
||||
|
|
@ -1131,42 +1126,42 @@ if(BUILD_CAFFE2 AND BUILD_CAFFE2_OPS AND USE_FBGEMM)
|
|||
target_include_directories(torch_cpu PRIVATE ${CMAKE_CURRENT_LIST_DIR}/../third_party)
|
||||
endif()
|
||||
|
||||
target_include_directories(torch_cpu PRIVATE ${ATen_CPU_INCLUDE})
|
||||
target_include_directories(torch_cpu PRIVATE ${ATen_CPU_INCLUDE})
|
||||
|
||||
target_include_directories(torch_cpu PRIVATE
|
||||
target_include_directories(torch_cpu PRIVATE
|
||||
${TORCH_SRC_DIR}/csrc)
|
||||
|
||||
target_include_directories(torch_cpu PRIVATE
|
||||
target_include_directories(torch_cpu PRIVATE
|
||||
${TORCH_ROOT}/third_party/miniz-2.1.0)
|
||||
|
||||
target_include_directories(torch_cpu PRIVATE
|
||||
target_include_directories(torch_cpu PRIVATE
|
||||
${TORCH_ROOT}/third_party/kineto/libkineto/include)
|
||||
|
||||
if(USE_KINETO)
|
||||
if(USE_KINETO)
|
||||
target_include_directories(torch_cpu PRIVATE
|
||||
${TORCH_ROOT}/third_party/kineto/libkineto/src)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
install(DIRECTORY "${TORCH_SRC_DIR}/csrc"
|
||||
install(DIRECTORY "${TORCH_SRC_DIR}/csrc"
|
||||
DESTINATION ${TORCH_INSTALL_INCLUDE_DIR}/torch
|
||||
FILES_MATCHING PATTERN "*.h" PATTERN "*.hpp")
|
||||
install(DIRECTORY "${TORCH_SRC_DIR}/csrc/distributed/c10d"
|
||||
install(DIRECTORY "${TORCH_SRC_DIR}/csrc/distributed/c10d"
|
||||
DESTINATION ${TORCH_INSTALL_INCLUDE_DIR}
|
||||
FILES_MATCHING PATTERN "*.h" PATTERN "*.hpp")
|
||||
install(FILES
|
||||
install(FILES
|
||||
"${TORCH_SRC_DIR}/script.h"
|
||||
"${TORCH_SRC_DIR}/extension.h"
|
||||
"${TORCH_SRC_DIR}/custom_class.h"
|
||||
"${TORCH_SRC_DIR}/library.h"
|
||||
"${TORCH_SRC_DIR}/custom_class_detail.h"
|
||||
DESTINATION ${TORCH_INSTALL_INCLUDE_DIR}/torch)
|
||||
if(USE_DEPLOY)
|
||||
if(USE_DEPLOY)
|
||||
install(FILES
|
||||
"${TORCH_SRC_DIR}/deploy.h"
|
||||
DESTINATION ${TORCH_INSTALL_INCLUDE_DIR}/torch)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(BUILD_TEST)
|
||||
if(BUILD_TEST)
|
||||
if(BUILD_LITE_INTERPRETER)
|
||||
add_subdirectory(
|
||||
${TORCH_ROOT}/test/cpp/lite_interpreter_runtime
|
||||
|
|
@ -1202,10 +1197,10 @@ endif()
|
|||
add_subdirectory(${TORCH_ROOT}/test/cpp/lazy
|
||||
${CMAKE_BINARY_DIR}/test_lazy)
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# XXX This ABI check cannot be run with arm-linux-androideabi-g++
|
||||
if(CMAKE_SYSTEM_NAME STREQUAL "Linux")
|
||||
# XXX This ABI check cannot be run with arm-linux-androideabi-g++
|
||||
if(CMAKE_SYSTEM_NAME STREQUAL "Linux")
|
||||
if(DEFINED GLIBCXX_USE_CXX11_ABI)
|
||||
message(STATUS "_GLIBCXX_USE_CXX11_ABI is already defined as a cmake variable")
|
||||
else()
|
||||
|
|
@ -1229,37 +1224,30 @@ endif()
|
|||
endif()
|
||||
endif()
|
||||
message(STATUS "Determined _GLIBCXX_USE_CXX11_ABI=${GLIBCXX_USE_CXX11_ABI}")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# CMake config for external projects.
|
||||
configure_file(
|
||||
# CMake config for external projects.
|
||||
configure_file(
|
||||
${PROJECT_SOURCE_DIR}/cmake/TorchConfigVersion.cmake.in
|
||||
${PROJECT_BINARY_DIR}/TorchConfigVersion.cmake
|
||||
@ONLY)
|
||||
configure_file(
|
||||
configure_file(
|
||||
${TORCH_ROOT}/cmake/TorchConfig.cmake.in
|
||||
${PROJECT_BINARY_DIR}/TorchConfig.cmake
|
||||
@ONLY)
|
||||
install(FILES
|
||||
install(FILES
|
||||
${PROJECT_BINARY_DIR}/TorchConfigVersion.cmake
|
||||
${PROJECT_BINARY_DIR}/TorchConfig.cmake
|
||||
DESTINATION share/cmake/Torch)
|
||||
|
||||
|
||||
# ---[ Torch python bindings build
|
||||
add_subdirectory(../torch torch)
|
||||
# ---[ Torch python bindings build
|
||||
add_subdirectory(../torch torch)
|
||||
|
||||
|
||||
endif()
|
||||
# ==========================================================
|
||||
# END formerly-libtorch flags
|
||||
# ==========================================================
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
if(NOT NO_API)
|
||||
target_include_directories(torch_cpu PUBLIC
|
||||
$<BUILD_INTERFACE:${TORCH_SRC_DIR}/csrc/api>
|
||||
|
|
@ -1399,7 +1387,7 @@ if(USE_DISTRIBUTED)
|
|||
endif()
|
||||
endif()
|
||||
|
||||
if(NOT INTERN_BUILD_MOBILE OR BUILD_CAFFE2_MOBILE)
|
||||
if(NOT INTERN_BUILD_MOBILE)
|
||||
caffe2_interface_library(caffe2_protos caffe2_protos_whole)
|
||||
target_link_libraries(torch_cpu PRIVATE caffe2_protos_whole)
|
||||
if(${CAFFE2_LINK_LOCAL_PROTOBUF})
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
if((NOT BUILD_CAFFE2) OR (INTERN_BUILD_MOBILE AND NOT BUILD_CAFFE2_MOBILE))
|
||||
if(NOT BUILD_CAFFE2 OR INTERN_BUILD_MOBILE)
|
||||
list(APPEND Caffe2_CPU_SRCS
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/common.cc"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
if(INTERN_BUILD_MOBILE AND NOT BUILD_CAFFE2_MOBILE)
|
||||
if(INTERN_BUILD_MOBILE)
|
||||
list(APPEND Caffe2_CPU_SRCS
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/embedding_lookup_idx.cc"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
if((NOT BUILD_CAFFE2) OR (INTERN_BUILD_MOBILE AND NOT BUILD_CAFFE2_MOBILE))
|
||||
if(NOT BUILD_CAFFE2 OR INTERN_BUILD_MOBILE)
|
||||
list(APPEND Caffe2_CPU_SRCS
|
||||
utils/string_utils.cc
|
||||
utils/threadpool/ThreadPool.cc
|
||||
|
|
|
|||
|
|
@ -78,7 +78,7 @@ if(USE_CUDA)
|
|||
endif()
|
||||
|
||||
# ---[ Custom Protobuf
|
||||
if(CAFFE2_CMAKE_BUILDING_WITH_MAIN_REPO AND (NOT INTERN_BUILD_MOBILE OR BUILD_CAFFE2_MOBILE))
|
||||
if(CAFFE2_CMAKE_BUILDING_WITH_MAIN_REPO AND NOT INTERN_BUILD_MOBILE)
|
||||
disable_ubsan()
|
||||
include(${CMAKE_CURRENT_LIST_DIR}/ProtoBuf.cmake)
|
||||
enable_ubsan()
|
||||
|
|
|
|||
|
|
@ -26,7 +26,6 @@ function(caffe2_print_configuration_summary)
|
|||
message(STATUS " CAFFE2_VERSION : ${CAFFE2_VERSION}")
|
||||
message(STATUS " BUILD_CAFFE2 : ${BUILD_CAFFE2}")
|
||||
message(STATUS " BUILD_CAFFE2_OPS : ${BUILD_CAFFE2_OPS}")
|
||||
message(STATUS " BUILD_CAFFE2_MOBILE : ${BUILD_CAFFE2_MOBILE}")
|
||||
message(STATUS " BUILD_STATIC_RUNTIME_BENCHMARK: ${BUILD_STATIC_RUNTIME_BENCHMARK}")
|
||||
message(STATUS " BUILD_TENSOREXPR_BENCHMARK: ${BUILD_TENSOREXPR_BENCHMARK}")
|
||||
message(STATUS " BUILD_NVFUSER_BENCHMARK: ${BUILD_NVFUSER_BENCHMARK}")
|
||||
|
|
|
|||
|
|
@ -415,7 +415,6 @@ function(torch_compile_options libname)
|
|||
list(APPEND private_compile_options -Werror)
|
||||
endif()
|
||||
|
||||
if(NOT INTERN_BUILD_MOBILE OR NOT BUILD_CAFFE2_MOBILE)
|
||||
# until they can be unified, keep these lists synced with setup.py
|
||||
if(MSVC)
|
||||
|
||||
|
|
@ -481,7 +480,6 @@ function(torch_compile_options libname)
|
|||
elseif(WERROR)
|
||||
list(APPEND private_compile_options -Wno-strict-overflow)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
target_compile_options(${libname} PRIVATE
|
||||
$<$<COMPILE_LANGUAGE:CXX>:${private_compile_options}>)
|
||||
|
|
|
|||
|
|
@ -59,13 +59,13 @@ echo "Android NDK version: $ANDROID_NDK_VERSION"
|
|||
|
||||
CMAKE_ARGS=()
|
||||
|
||||
if [ -z "${BUILD_CAFFE2_MOBILE:-}" ]; then
|
||||
# Build PyTorch mobile
|
||||
CMAKE_ARGS+=("-DCMAKE_PREFIX_PATH=$($PYTHON -c 'import sysconfig; print(sysconfig.get_path("purelib"))')")
|
||||
CMAKE_ARGS+=("-DPYTHON_EXECUTABLE=$($PYTHON -c 'import sys; print(sys.executable)')")
|
||||
CMAKE_ARGS+=("-DBUILD_CUSTOM_PROTOBUF=OFF")
|
||||
# custom build with selected ops
|
||||
if [ -n "${SELECTED_OP_LIST}" ]; then
|
||||
# Build PyTorch mobile
|
||||
CMAKE_ARGS+=("-DCMAKE_PREFIX_PATH=$($PYTHON -c 'import sysconfig; print(sysconfig.get_path("purelib"))')")
|
||||
CMAKE_ARGS+=("-DPYTHON_EXECUTABLE=$($PYTHON -c 'import sys; print(sys.executable)')")
|
||||
CMAKE_ARGS+=("-DBUILD_CUSTOM_PROTOBUF=OFF")
|
||||
|
||||
# custom build with selected ops
|
||||
if [ -n "${SELECTED_OP_LIST}" ]; then
|
||||
SELECTED_OP_LIST="$(cd $(dirname $SELECTED_OP_LIST); pwd -P)/$(basename $SELECTED_OP_LIST)"
|
||||
echo "Choose SELECTED_OP_LIST file: $SELECTED_OP_LIST"
|
||||
if [ ! -r ${SELECTED_OP_LIST} ]; then
|
||||
|
|
@ -73,16 +73,6 @@ if [ -z "${BUILD_CAFFE2_MOBILE:-}" ]; then
|
|||
exit 1
|
||||
fi
|
||||
CMAKE_ARGS+=("-DSELECTED_OP_LIST=${SELECTED_OP_LIST}")
|
||||
fi
|
||||
else
|
||||
# Build Caffe2 mobile
|
||||
CMAKE_ARGS+=("-DBUILD_CAFFE2_MOBILE=ON")
|
||||
# Build protobuf from third_party so we have a host protoc binary.
|
||||
echo "Building protoc"
|
||||
$CAFFE2_ROOT/scripts/build_host_protoc.sh
|
||||
# Use locally built protoc because we'll build libprotobuf for the
|
||||
# target architecture and need an exact version match.
|
||||
CMAKE_ARGS+=("-DCAFFE2_CUSTOM_PROTOC_EXECUTABLE=$CAFFE2_ROOT/build_host_protoc/bin/protoc")
|
||||
fi
|
||||
|
||||
# If Ninja is installed, prefer it to Make
|
||||
|
|
|
|||
|
|
@ -11,13 +11,13 @@ CAFFE2_ROOT="$( cd "$(dirname "$0")"/.. ; pwd -P)"
|
|||
|
||||
CMAKE_ARGS=()
|
||||
|
||||
if [ -z "${BUILD_CAFFE2_MOBILE:-}" ]; then
|
||||
# Build PyTorch mobile
|
||||
CMAKE_ARGS+=("-DCMAKE_PREFIX_PATH=$(python -c 'import sysconfig; print(sysconfig.get_path("purelib"))')")
|
||||
CMAKE_ARGS+=("-DPYTHON_EXECUTABLE=$(python -c 'import sys; print(sys.executable)')")
|
||||
CMAKE_ARGS+=("-DBUILD_CUSTOM_PROTOBUF=OFF")
|
||||
# custom build with selected ops
|
||||
if [ -n "${SELECTED_OP_LIST}" ]; then
|
||||
# Build PyTorch mobile
|
||||
CMAKE_ARGS+=("-DCMAKE_PREFIX_PATH=$(python -c 'import sysconfig; print(sysconfig.get_path("purelib"))')")
|
||||
CMAKE_ARGS+=("-DPYTHON_EXECUTABLE=$(python -c 'import sys; print(sys.executable)')")
|
||||
CMAKE_ARGS+=("-DBUILD_CUSTOM_PROTOBUF=OFF")
|
||||
|
||||
# custom build with selected ops
|
||||
if [ -n "${SELECTED_OP_LIST}" ]; then
|
||||
SELECTED_OP_LIST="$(cd $(dirname $SELECTED_OP_LIST); pwd -P)/$(basename $SELECTED_OP_LIST)"
|
||||
echo "Choose SELECTED_OP_LIST file: $SELECTED_OP_LIST"
|
||||
if [ ! -r ${SELECTED_OP_LIST} ]; then
|
||||
|
|
@ -25,23 +25,10 @@ if [ -z "${BUILD_CAFFE2_MOBILE:-}" ]; then
|
|||
exit 1
|
||||
fi
|
||||
CMAKE_ARGS+=("-DSELECTED_OP_LIST=${SELECTED_OP_LIST}")
|
||||
fi
|
||||
# bitcode
|
||||
if [ "${ENABLE_BITCODE:-}" == '1' ]; then
|
||||
CMAKE_ARGS+=("-DCMAKE_C_FLAGS=-fembed-bitcode")
|
||||
CMAKE_ARGS+=("-DCMAKE_CXX_FLAGS=-fembed-bitcode")
|
||||
fi
|
||||
else
|
||||
# Build Caffe2 mobile
|
||||
CMAKE_ARGS+=("-DBUILD_CAFFE2_MOBILE=ON")
|
||||
# Build protobuf from third_party so we have a host protoc binary.
|
||||
echo "Building protoc"
|
||||
BITCODE_FLAGS="-DCMAKE_C_FLAGS=-fembed-bitcode -DCMAKE_CXX_FLAGS=-fembed-bitcode "
|
||||
$CAFFE2_ROOT/scripts/build_host_protoc.sh --other-flags $BITCODE_FLAGS
|
||||
# Use locally built protoc because we'll build libprotobuf for the
|
||||
# target architecture and need an exact version match.
|
||||
CMAKE_ARGS+=("-DCAFFE2_CUSTOM_PROTOC_EXECUTABLE=$CAFFE2_ROOT/build_host_protoc/bin/protoc")
|
||||
# Bitcode is enabled by default for caffe2
|
||||
fi
|
||||
|
||||
# bitcode
|
||||
if [ "${ENABLE_BITCODE:-}" == '1' ]; then
|
||||
CMAKE_ARGS+=("-DCMAKE_C_FLAGS=-fembed-bitcode")
|
||||
CMAKE_ARGS+=("-DCMAKE_CXX_FLAGS=-fembed-bitcode")
|
||||
fi
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user