Remove caffe2 mobile (#84338)

We're no longer building Caffe2 mobile as part of our CI, and it adds a lot of clutter to our make files. Any lingering internal dependencies will use the buck build and so wont be effected.
Pull Request resolved: https://github.com/pytorch/pytorch/pull/84338
Approved by: https://github.com/dreiss
This commit is contained in:
John Detloff 2022-09-08 01:49:55 +00:00 committed by PyTorch MergeBot
parent 9669e3c6ec
commit e0229d6517
16 changed files with 676 additions and 1037 deletions

View File

@ -1,54 +0,0 @@
#!/bin/bash
# shellcheck source=./common.sh
source "$(dirname "${BASH_SOURCE[0]}")/common.sh"
# Anywhere except $ROOT_DIR should work. This is so the python import doesn't
# get confused by any 'caffe2' directory in cwd
cd "$INSTALL_PREFIX"
if [[ $BUILD_ENVIRONMENT == *-cuda* ]]; then
num_gpus=$(nvidia-smi -L | wc -l)
elif [[ $BUILD_ENVIRONMENT == *-rocm* ]]; then
num_gpus=$(rocminfo | grep 'Device Type.*GPU' | wc -l)
else
num_gpus=0
fi
caffe2_pypath="$(cd /usr && $PYTHON -c 'import os; import caffe2; print(os.path.dirname(os.path.realpath(caffe2.__file__)))')"
# Resnet50
if (( $num_gpus == 0 )); then
"$PYTHON" "$caffe2_pypath/python/examples/imagenet_trainer.py" --train_data null --batch_size 128 --epoch_size 12800 --num_epochs 2 --use_cpu
fi
if (( $num_gpus >= 1 )); then
"$PYTHON" "$caffe2_pypath/python/examples/imagenet_trainer.py" --train_data null --batch_size 128 --epoch_size 12800 --num_epochs 2 --num_gpus 1
# Let's skip the fp16 bench runs for now, as it recompiles the miopen kernels and can take 10+min to run.
# We can resume when we (1) bindmount the miopen cache folder in jenkins; (2) install the pre-compiled miopen kernel library in the docker
# "$PYTHON" "$caffe2_pypath/python/examples/imagenet_trainer.py" --train_data null --batch_size 256 --epoch_size 25600 --num_epochs 2 --num_gpus 1 --float16_compute --dtype float16
fi
if (( $num_gpus >= 4 )); then
"$PYTHON" "$caffe2_pypath/python/examples/imagenet_trainer.py" --train_data null --batch_size 512 --epoch_size 51200 --num_epochs 2 --num_gpus 4
fi
# ResNext
if (( $num_gpus == 0 )); then
"$PYTHON" "$caffe2_pypath/python/examples/imagenet_trainer.py" --resnext_num_groups 32 --resnext_width_per_group 4 --num_layers 101 --train_data null --batch_size 32 --epoch_size 3200 --num_epochs 2 --use_cpu
fi
if (( $num_gpus >= 1 )); then
"$PYTHON" "$caffe2_pypath/python/examples/imagenet_trainer.py" --resnext_num_groups 32 --resnext_width_per_group 4 --num_layers 101 --train_data null --batch_size 32 --epoch_size 3200 --num_epochs 2 --num_gpus 1
# "$PYTHON" "$caffe2_pypath/python/examples/imagenet_trainer.py" --resnext_num_groups 32 --resnext_width_per_group 4 --num_layers 101 --train_data null --batch_size 64 --epoch_size 3200 --num_epochs 2 --num_gpus 1 --float16_compute --dtype float16
fi
if (( $num_gpus >= 4 )); then
"$PYTHON" "$caffe2_pypath/python/examples/imagenet_trainer.py" --resnext_num_groups 32 --resnext_width_per_group 4 --num_layers 101 --train_data null --batch_size 128 --epoch_size 12800 --num_epochs 2 --num_gpus 4
fi
# Shufflenet
if (( $num_gpus == 0 )); then
"$PYTHON" "$caffe2_pypath/python/examples/imagenet_trainer.py" --train_data null --batch_size 32 --epoch_size 3200 --num_epochs 2 --use_cpu --model shufflenet
fi
if (( $num_gpus >= 1 )); then
"$PYTHON" "$caffe2_pypath/python/examples/imagenet_trainer.py" --train_data null --batch_size 32 --epoch_size 3200 --num_epochs 2 --num_gpus 1 --model shufflenet
fi
if (( $num_gpus >= 4 )); then
"$PYTHON" "$caffe2_pypath/python/examples/imagenet_trainer.py" --train_data null --batch_size 128 --epoch_size 12800 --num_epochs 2 --num_gpus 4 --model shufflenet
fi

View File

@ -1,231 +0,0 @@
#!/bin/bash
set -ex
# shellcheck source=./common.sh
source "$(dirname "${BASH_SOURCE[0]}")/common.sh"
# CMAKE_ARGS are only passed to 'cmake' and the -Dfoo=bar does not work with
# setup.py, so we build a list of foo=bars and then either convert it to
# -Dfoo=bars or export them before running setup.py
build_args=()
build_to_cmake () {
cmake_args=()
for build_arg in $*; do
cmake_args+=("-D$build_arg")
done
echo ${cmake_args[@]}
}
SCCACHE="$(which sccache)"
# Setup ccache if configured to use it (and not sccache)
if [ -z "${SCCACHE}" ] && which ccache > /dev/null; then
mkdir -p ./ccache
ln -sf "$(which ccache)" ./ccache/cc
ln -sf "$(which ccache)" ./ccache/c++
ln -sf "$(which ccache)" ./ccache/gcc
ln -sf "$(which ccache)" ./ccache/g++
ln -sf "$(which ccache)" ./ccache/x86_64-linux-gnu-gcc
if [[ "${BUILD_ENVIRONMENT}" == *-cuda* ]]; then
mkdir -p ./ccache/cuda
ln -sf "$(which ccache)" ./ccache/cuda/nvcc
fi
export CACHE_WRAPPER_DIR="$PWD/ccache"
export PATH="$CACHE_WRAPPER_DIR:$PATH"
fi
# sccache will fail for CUDA builds if all cores are used for compiling
if [ -z "$MAX_JOBS" ]; then
if [[ "${BUILD_ENVIRONMENT}" == *-cuda* ]] && [ -n "${SCCACHE}" ]; then
MAX_JOBS=`expr $(nproc) - 1`
else
MAX_JOBS=$(nproc)
fi
fi
report_compile_cache_stats() {
if [[ -n "${SCCACHE}" ]]; then
"$SCCACHE" --show-stats
elif which ccache > /dev/null; then
ccache -s
fi
}
###############################################################################
# Use special scripts for Android and setup builds
###############################################################################
if [[ "${BUILD_ENVIRONMENT}" == *-android* ]]; then
export ANDROID_NDK=/opt/ndk
build_args+=("BUILD_BINARY=ON")
build_args+=("BUILD_TEST=ON")
build_args+=("USE_OBSERVERS=ON")
build_args+=("USE_ZSTD=ON")
BUILD_CAFFE2_MOBILE=1 "${ROOT_DIR}/scripts/build_android.sh" $(build_to_cmake ${build_args[@]}) "$@"
exit 0
fi
###############################################################################
# Set parameters
###############################################################################
if [[ "$BUILD_ENVIRONMENT" == *cmake* ]]; then
build_args+=("BUILD_PYTHON=OFF")
else
build_args+=("BUILD_PYTHON=ON")
build_args+=("PYTHON_EXECUTABLE=${PYTHON}")
fi
if [[ $BUILD_ENVIRONMENT == *mkl* ]]; then
build_args+=("BLAS=MKL")
build_args+=("USE_MKLDNN=ON")
fi
build_args+=("BUILD_BINARY=ON")
build_args+=("BUILD_TEST=ON")
build_args+=("INSTALL_TEST=ON")
build_args+=("USE_ZSTD=ON")
if [[ $BUILD_ENVIRONMENT == *cuda* ]]; then
build_args+=("USE_CUDA=ON")
build_args+=("USE_NNPACK=OFF")
# Target only our CI GPU machine's CUDA arch to speed up the build
build_args+=("TORCH_CUDA_ARCH_LIST=Maxwell")
# Explicitly set path to NVCC such that the symlink to ccache or sccache is used
if [ -n "${CACHE_WRAPPER_DIR}" ]; then
build_args+=("CUDA_NVCC_EXECUTABLE=${CACHE_WRAPPER_DIR}/cuda/nvcc")
build_args+=("CMAKE_CUDA_COMPILER_LAUNCHER=${CACHE_WRAPPER_DIR}/ccache")
fi
# Ensure FindCUDA.cmake can infer the right path to the CUDA toolkit.
# Setting PATH to resolve to the right nvcc alone isn't enough.
# See /usr/share/cmake-3.5/Modules/FindCUDA.cmake, block at line 589.
export CUDA_PATH="/usr/local/cuda"
# Ensure the ccache symlink can still find the real nvcc binary.
export PATH="/usr/local/cuda/bin:$PATH"
fi
if [[ $BUILD_ENVIRONMENT == *rocm* ]]; then
if [[ -n "$CI" && -z "$PYTORCH_ROCM_ARCH" ]]; then
# Set ROCM_ARCH to gfx900 and gfx906 for CI builds, if user doesn't override.
echo "Limiting PYTORCH_ROCM_ARCH to gfx90[06] for CI builds"
export PYTORCH_ROCM_ARCH="gfx900;gfx906"
fi
# This is needed to enable ImageInput operator in resnet50_trainer
build_args+=("USE_OPENCV=ON")
# This is needed to read datasets from https://download.caffe2.ai/databases/resnet_trainer.zip
build_args+=("USE_LMDB=ON")
# hcc used to run out of memory, silently exiting without stopping
# the build process, leaving undefined symbols in the shared lib,
# causing undefined symbol errors when later running tests.
# We used to set MAX_JOBS to 4 to avoid, but this is no longer an issue.
if [ -z "$MAX_JOBS" ]; then
export MAX_JOBS=$(($(nproc) - 1))
fi
########## HIPIFY Caffe2 operators
${PYTHON} "${ROOT_DIR}/tools/amd_build/build_amd.py"
fi
# Try to include Redis support for Linux builds
if [ "$(uname)" == "Linux" ]; then
build_args+=("USE_REDIS=ON")
fi
# Use a specialized onnx namespace in CI to catch hardcoded onnx namespace
build_args+=("ONNX_NAMESPACE=ONNX_NAMESPACE_FOR_C2_CI")
###############################################################################
# Configure and make
###############################################################################
if [[ "$BUILD_ENVIRONMENT" == *cmake* ]]; then
# cmake-only non-setup.py build, to test cpp only bits. This installs into
# /usr/local/caffe2 and installs no Python tests
build_args+=("CMAKE_INSTALL_PREFIX=${INSTALL_PREFIX}")
# Run cmake from ./build_caffe2 directory so it doesn't conflict with
# standard PyTorch build directory. Eventually these won't need to
# be separate.
rm -rf build_caffe2
mkdir build_caffe2
cd ./build_caffe2
# We test the presence of cmake3 (for platforms like Centos and Ubuntu 14.04)
# and use that if so.
if [[ -x "$(command -v cmake3)" ]]; then
CMAKE_BINARY=cmake3
else
CMAKE_BINARY=cmake
fi
# Configure
${CMAKE_BINARY} "${ROOT_DIR}" $(build_to_cmake ${build_args[@]}) "$@"
# Build
if [ "$(uname)" == "Linux" ]; then
make "-j${MAX_JOBS}" install
else
echo "Don't know how to build on $(uname)"
exit 1
fi
# This is to save test binaries for testing
mv "$INSTALL_PREFIX/test/" "$INSTALL_PREFIX/cpp_test/"
ls -lah $INSTALL_PREFIX
else
# Python build. Uses setup.py to install into site-packages
build_args+=("USE_LEVELDB=ON")
build_args+=("USE_LMDB=ON")
build_args+=("USE_OPENCV=ON")
build_args+=("BUILD_TEST=ON")
# These flags preserve the flags that were used before this refactor (blame
# me)
build_args+=("USE_GLOG=ON")
build_args+=("USE_GFLAGS=ON")
build_args+=("USE_FBGEMM=OFF")
build_args+=("USE_MKLDNN=OFF")
build_args+=("USE_DISTRIBUTED=ON")
for build_arg in "${build_args[@]}"; do
export $build_arg
done
# sccache will be stuck if all cores are used for compiling
# see https://github.com/pytorch/pytorch/pull/7361
if [[ -n "${SCCACHE}" && $BUILD_ENVIRONMENT != *rocm* ]]; then
export MAX_JOBS=`expr $(nproc) - 1`
fi
pip install --user dataclasses typing_extensions
$PYTHON setup.py install --user
report_compile_cache_stats
fi
###############################################################################
# Install ONNX
###############################################################################
# Install ONNX into a local directory
pip install --user "file://${ROOT_DIR}/third_party/onnx#egg=onnx"
report_compile_cache_stats
if [[ $BUILD_ENVIRONMENT == *rocm* ]]; then
# remove sccache wrappers post-build; runtime compilation of MIOpen kernels does not yet fully support them
sudo rm -f /opt/cache/bin/cc
sudo rm -f /opt/cache/bin/c++
sudo rm -f /opt/cache/bin/gcc
sudo rm -f /opt/cache/bin/g++
pushd /opt/rocm/llvm/bin
if [[ -d original ]]; then
sudo mv original/clang .
sudo mv original/clang++ .
fi
sudo rm -rf original
popd
fi

View File

@ -1,7 +0,0 @@
#!/bin/bash
set -ex
upstream="$1"
pr="$2"
git diff --name-only "$upstream" "$pr"
# For safety, unconditionally trigger for any changes.
#git diff --name-only "$upstream" "$pr" | grep -Eq '^(CMakeLists.txt|Makefile|.gitmodules|.jenkins/caffe2|binaries|caffe|caffe2|cmake|conda|docker|docs/caffe2|modules|scripts|third_party)'

View File

@ -1,9 +0,0 @@
#!/bin/bash
set -ex
upstream="$1"
pr="$2"
git diff --name-only "$upstream" "$pr"
# Now that PyTorch build depends on Caffe2, unconditionally trigger
# for any changes.
# TODO: Replace this with a NEGATIVE regex that allows us to skip builds when they are unnecessary
#git diff --name-only "$upstream" "$pr" | grep -Eq '^(aten/|caffe2/|.jenkins/pytorch|docs/(make.bat|Makefile|requirements.txt|source)|mypy|requirements.txt|setup.py|test/|third_party/|tools/|\.gitmodules|torch/)'

View File

@ -165,9 +165,6 @@ option(BUILD_LITE_INTERPRETER "Master flag to build Lite Interpreter" OFF)
cmake_dependent_option(
BUILD_CAFFE2_OPS "Build Caffe2 operators" ON
"BUILD_CAFFE2" OFF)
cmake_dependent_option(
BUILD_CAFFE2_MOBILE "Build libcaffe2 for mobile (deprecating)" OFF
"BUILD_CAFFE2" OFF)
option(BUILD_SHARED_LIBS "Build libcaffe2.so" ON)
cmake_dependent_option(
CAFFE2_LINK_LOCAL_PROTOBUF "If set, build protobuf inside libcaffe2.so." ON
@ -591,18 +588,11 @@ if(ANDROID OR IOS OR DEFINED ENV{BUILD_PYTORCH_MOBILE_WITH_HOST_TOOLCHAIN})
endif()
# INTERN_BUILD_ATEN_OPS is used to control whether to build ATen/TH operators.
# It's disabled for caffe2 mobile library.
if(INTERN_BUILD_MOBILE AND BUILD_CAFFE2_MOBILE)
set(INTERN_BUILD_ATEN_OPS OFF)
else()
set(INTERN_BUILD_ATEN_OPS ON)
endif()
set(INTERN_BUILD_ATEN_OPS ON)
# BUILD_CAFFE2_MOBILE is the master switch to choose between libcaffe2 v.s. libtorch mobile build.
# When it's enabled it builds original libcaffe2 mobile library without ATen/TH ops nor TorchScript support;
# When it's disabled it builds libtorch mobile library, which contains ATen/TH ops and native support for
# Build libtorch mobile library, which contains ATen/TH ops and native support for
# TorchScript model, but doesn't contain not-yet-unified caffe2 ops;
if(INTERN_BUILD_MOBILE AND NOT BUILD_CAFFE2_MOBILE)
if(INTERN_BUILD_MOBILE)
if(NOT BUILD_SHARED_LIBS AND NOT "${SELECTED_OP_LIST}" STREQUAL "")
string(APPEND CMAKE_CXX_FLAGS " -DNO_EXPORT")
endif()

View File

@ -1246,13 +1246,6 @@ In 2018, we merged Caffe2 into the PyTorch source repository. While the
steady state aspiration is that Caffe2 and PyTorch share code freely,
in the meantime there will be some separation.
If you submit a PR to only PyTorch or only Caffe2 code, CI will only
run for the project you edited. The logic for this is implemented
in `.jenkins/pytorch/dirty.sh` and `.jenkins/caffe2/dirty.sh`; you
can look at this to see what path prefixes constitute changes.
This also means if you ADD a new top-level path, or you start
sharing code between projects, you need to modify these files.
There are a few "unusual" directories which, for historical reasons,
are Caffe2/PyTorch specific. Here they are:

View File

@ -1,13 +1,8 @@
if(INTERN_BUILD_MOBILE)
if(BUILD_CAFFE2_MOBILE)
#caffe2_binary_target("predictor_verifier.cc")
caffe2_binary_target("speed_benchmark.cc")
else()
caffe2_binary_target("speed_benchmark_torch.cc")
caffe2_binary_target("load_benchmark_torch.cc")
if(NOT BUILD_LITE_INTERPRETER)
caffe2_binary_target("compare_models_torch.cc")
endif()
caffe2_binary_target("speed_benchmark_torch.cc")
caffe2_binary_target("load_benchmark_torch.cc")
if(NOT BUILD_LITE_INTERPRETER)
caffe2_binary_target("compare_models_torch.cc")
endif()
return()
endif()

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,4 @@
if((NOT BUILD_CAFFE2) OR (INTERN_BUILD_MOBILE AND NOT BUILD_CAFFE2_MOBILE))
if(NOT BUILD_CAFFE2 OR INTERN_BUILD_MOBILE)
list(APPEND Caffe2_CPU_SRCS
"${CMAKE_CURRENT_SOURCE_DIR}/common.cc"
)

View File

@ -1,4 +1,4 @@
if(INTERN_BUILD_MOBILE AND NOT BUILD_CAFFE2_MOBILE)
if(INTERN_BUILD_MOBILE)
list(APPEND Caffe2_CPU_SRCS
"${CMAKE_CURRENT_SOURCE_DIR}/embedding_lookup_idx.cc"
)

View File

@ -1,4 +1,4 @@
if((NOT BUILD_CAFFE2) OR (INTERN_BUILD_MOBILE AND NOT BUILD_CAFFE2_MOBILE))
if(NOT BUILD_CAFFE2 OR INTERN_BUILD_MOBILE)
list(APPEND Caffe2_CPU_SRCS
utils/string_utils.cc
utils/threadpool/ThreadPool.cc

View File

@ -78,7 +78,7 @@ if(USE_CUDA)
endif()
# ---[ Custom Protobuf
if(CAFFE2_CMAKE_BUILDING_WITH_MAIN_REPO AND (NOT INTERN_BUILD_MOBILE OR BUILD_CAFFE2_MOBILE))
if(CAFFE2_CMAKE_BUILDING_WITH_MAIN_REPO AND NOT INTERN_BUILD_MOBILE)
disable_ubsan()
include(${CMAKE_CURRENT_LIST_DIR}/ProtoBuf.cmake)
enable_ubsan()

View File

@ -26,7 +26,6 @@ function(caffe2_print_configuration_summary)
message(STATUS " CAFFE2_VERSION : ${CAFFE2_VERSION}")
message(STATUS " BUILD_CAFFE2 : ${BUILD_CAFFE2}")
message(STATUS " BUILD_CAFFE2_OPS : ${BUILD_CAFFE2_OPS}")
message(STATUS " BUILD_CAFFE2_MOBILE : ${BUILD_CAFFE2_MOBILE}")
message(STATUS " BUILD_STATIC_RUNTIME_BENCHMARK: ${BUILD_STATIC_RUNTIME_BENCHMARK}")
message(STATUS " BUILD_TENSOREXPR_BENCHMARK: ${BUILD_TENSOREXPR_BENCHMARK}")
message(STATUS " BUILD_NVFUSER_BENCHMARK: ${BUILD_NVFUSER_BENCHMARK}")

View File

@ -415,72 +415,70 @@ function(torch_compile_options libname)
list(APPEND private_compile_options -Werror)
endif()
if(NOT INTERN_BUILD_MOBILE OR NOT BUILD_CAFFE2_MOBILE)
# until they can be unified, keep these lists synced with setup.py
if(MSVC)
# until they can be unified, keep these lists synced with setup.py
if(MSVC)
if(MSVC_Z7_OVERRIDE)
set(MSVC_DEBINFO_OPTION "/Z7")
else()
set(MSVC_DEBINFO_OPTION "/Zi")
endif()
if(MSVC_Z7_OVERRIDE)
set(MSVC_DEBINFO_OPTION "/Z7")
else()
set(MSVC_DEBINFO_OPTION "/Zi")
endif()
target_compile_options(${libname} PUBLIC
$<$<COMPILE_LANGUAGE:CXX>:
${MSVC_RUNTIME_LIBRARY_OPTION}
$<$<OR:$<CONFIG:Debug>,$<CONFIG:RelWithDebInfo>>:${MSVC_DEBINFO_OPTION}>
/EHsc
/DNOMINMAX
/wd4267
/wd4251
/wd4522
/wd4522
/wd4838
/wd4305
/wd4244
/wd4190
/wd4101
/wd4996
/wd4275
/bigobj>
)
target_compile_options(${libname} PUBLIC
$<$<COMPILE_LANGUAGE:CXX>:
${MSVC_RUNTIME_LIBRARY_OPTION}
$<$<OR:$<CONFIG:Debug>,$<CONFIG:RelWithDebInfo>>:${MSVC_DEBINFO_OPTION}>
/EHsc
/DNOMINMAX
/wd4267
/wd4251
/wd4522
/wd4522
/wd4838
/wd4305
/wd4244
/wd4190
/wd4101
/wd4996
/wd4275
/bigobj>
)
else()
list(APPEND private_compile_options
-Wall
-Wextra
-Wno-unused-parameter
-Wno-unused-function
-Wno-unused-result
-Wno-missing-field-initializers
-Wno-write-strings
-Wno-unknown-pragmas
-Wno-type-limits
-Wno-array-bounds
-Wno-unknown-pragmas
-Wno-sign-compare
-Wno-strict-overflow
-Wno-strict-aliasing
-Wno-error=deprecated-declarations
# Clang has an unfixed bug leading to spurious missing braces
# warnings, see https://bugs.llvm.org/show_bug.cgi?id=21629
-Wno-missing-braces
)
if("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang")
list(APPEND private_compile_options
-Wno-range-loop-analysis)
else()
list(APPEND private_compile_options
-Wall
-Wextra
-Wno-unused-parameter
-Wno-unused-function
-Wno-unused-result
-Wno-missing-field-initializers
-Wno-write-strings
-Wno-unknown-pragmas
-Wno-type-limits
-Wno-array-bounds
-Wno-unknown-pragmas
-Wno-sign-compare
-Wno-strict-overflow
-Wno-strict-aliasing
-Wno-error=deprecated-declarations
# Clang has an unfixed bug leading to spurious missing braces
# warnings, see https://bugs.llvm.org/show_bug.cgi?id=21629
-Wno-missing-braces
)
if("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang")
list(APPEND private_compile_options
-Wno-range-loop-analysis)
else()
list(APPEND private_compile_options
# Considered to be flaky. See the discussion at
# https://github.com/pytorch/pytorch/pull/9608
-Wno-maybe-uninitialized)
endif()
# Considered to be flaky. See the discussion at
# https://github.com/pytorch/pytorch/pull/9608
-Wno-maybe-uninitialized)
endif()
if(MSVC)
elseif(WERROR)
list(APPEND private_compile_options -Wno-strict-overflow)
endif()
endif()
if(MSVC)
elseif(WERROR)
list(APPEND private_compile_options -Wno-strict-overflow)
endif()
target_compile_options(${libname} PRIVATE

View File

@ -59,30 +59,20 @@ echo "Android NDK version: $ANDROID_NDK_VERSION"
CMAKE_ARGS=()
if [ -z "${BUILD_CAFFE2_MOBILE:-}" ]; then
# Build PyTorch mobile
CMAKE_ARGS+=("-DCMAKE_PREFIX_PATH=$($PYTHON -c 'import sysconfig; print(sysconfig.get_path("purelib"))')")
CMAKE_ARGS+=("-DPYTHON_EXECUTABLE=$($PYTHON -c 'import sys; print(sys.executable)')")
CMAKE_ARGS+=("-DBUILD_CUSTOM_PROTOBUF=OFF")
# custom build with selected ops
if [ -n "${SELECTED_OP_LIST}" ]; then
SELECTED_OP_LIST="$(cd $(dirname $SELECTED_OP_LIST); pwd -P)/$(basename $SELECTED_OP_LIST)"
echo "Choose SELECTED_OP_LIST file: $SELECTED_OP_LIST"
if [ ! -r ${SELECTED_OP_LIST} ]; then
echo "Error: SELECTED_OP_LIST file ${SELECTED_OP_LIST} not found."
exit 1
fi
CMAKE_ARGS+=("-DSELECTED_OP_LIST=${SELECTED_OP_LIST}")
# Build PyTorch mobile
CMAKE_ARGS+=("-DCMAKE_PREFIX_PATH=$($PYTHON -c 'import sysconfig; print(sysconfig.get_path("purelib"))')")
CMAKE_ARGS+=("-DPYTHON_EXECUTABLE=$($PYTHON -c 'import sys; print(sys.executable)')")
CMAKE_ARGS+=("-DBUILD_CUSTOM_PROTOBUF=OFF")
# custom build with selected ops
if [ -n "${SELECTED_OP_LIST}" ]; then
SELECTED_OP_LIST="$(cd $(dirname $SELECTED_OP_LIST); pwd -P)/$(basename $SELECTED_OP_LIST)"
echo "Choose SELECTED_OP_LIST file: $SELECTED_OP_LIST"
if [ ! -r ${SELECTED_OP_LIST} ]; then
echo "Error: SELECTED_OP_LIST file ${SELECTED_OP_LIST} not found."
exit 1
fi
else
# Build Caffe2 mobile
CMAKE_ARGS+=("-DBUILD_CAFFE2_MOBILE=ON")
# Build protobuf from third_party so we have a host protoc binary.
echo "Building protoc"
$CAFFE2_ROOT/scripts/build_host_protoc.sh
# Use locally built protoc because we'll build libprotobuf for the
# target architecture and need an exact version match.
CMAKE_ARGS+=("-DCAFFE2_CUSTOM_PROTOC_EXECUTABLE=$CAFFE2_ROOT/build_host_protoc/bin/protoc")
CMAKE_ARGS+=("-DSELECTED_OP_LIST=${SELECTED_OP_LIST}")
fi
# If Ninja is installed, prefer it to Make

View File

@ -11,37 +11,24 @@ CAFFE2_ROOT="$( cd "$(dirname "$0")"/.. ; pwd -P)"
CMAKE_ARGS=()
if [ -z "${BUILD_CAFFE2_MOBILE:-}" ]; then
# Build PyTorch mobile
CMAKE_ARGS+=("-DCMAKE_PREFIX_PATH=$(python -c 'import sysconfig; print(sysconfig.get_path("purelib"))')")
CMAKE_ARGS+=("-DPYTHON_EXECUTABLE=$(python -c 'import sys; print(sys.executable)')")
CMAKE_ARGS+=("-DBUILD_CUSTOM_PROTOBUF=OFF")
# custom build with selected ops
if [ -n "${SELECTED_OP_LIST}" ]; then
SELECTED_OP_LIST="$(cd $(dirname $SELECTED_OP_LIST); pwd -P)/$(basename $SELECTED_OP_LIST)"
echo "Choose SELECTED_OP_LIST file: $SELECTED_OP_LIST"
if [ ! -r ${SELECTED_OP_LIST} ]; then
echo "Error: SELECTED_OP_LIST file ${SELECTED_OP_LIST} not found."
exit 1
fi
CMAKE_ARGS+=("-DSELECTED_OP_LIST=${SELECTED_OP_LIST}")
# Build PyTorch mobile
CMAKE_ARGS+=("-DCMAKE_PREFIX_PATH=$(python -c 'import sysconfig; print(sysconfig.get_path("purelib"))')")
CMAKE_ARGS+=("-DPYTHON_EXECUTABLE=$(python -c 'import sys; print(sys.executable)')")
CMAKE_ARGS+=("-DBUILD_CUSTOM_PROTOBUF=OFF")
# custom build with selected ops
if [ -n "${SELECTED_OP_LIST}" ]; then
SELECTED_OP_LIST="$(cd $(dirname $SELECTED_OP_LIST); pwd -P)/$(basename $SELECTED_OP_LIST)"
echo "Choose SELECTED_OP_LIST file: $SELECTED_OP_LIST"
if [ ! -r ${SELECTED_OP_LIST} ]; then
echo "Error: SELECTED_OP_LIST file ${SELECTED_OP_LIST} not found."
exit 1
fi
# bitcode
if [ "${ENABLE_BITCODE:-}" == '1' ]; then
CMAKE_ARGS+=("-DCMAKE_C_FLAGS=-fembed-bitcode")
CMAKE_ARGS+=("-DCMAKE_CXX_FLAGS=-fembed-bitcode")
fi
else
# Build Caffe2 mobile
CMAKE_ARGS+=("-DBUILD_CAFFE2_MOBILE=ON")
# Build protobuf from third_party so we have a host protoc binary.
echo "Building protoc"
BITCODE_FLAGS="-DCMAKE_C_FLAGS=-fembed-bitcode -DCMAKE_CXX_FLAGS=-fembed-bitcode "
$CAFFE2_ROOT/scripts/build_host_protoc.sh --other-flags $BITCODE_FLAGS
# Use locally built protoc because we'll build libprotobuf for the
# target architecture and need an exact version match.
CMAKE_ARGS+=("-DCAFFE2_CUSTOM_PROTOC_EXECUTABLE=$CAFFE2_ROOT/build_host_protoc/bin/protoc")
# Bitcode is enabled by default for caffe2
CMAKE_ARGS+=("-DSELECTED_OP_LIST=${SELECTED_OP_LIST}")
fi
# bitcode
if [ "${ENABLE_BITCODE:-}" == '1' ]; then
CMAKE_ARGS+=("-DCMAKE_C_FLAGS=-fembed-bitcode")
CMAKE_ARGS+=("-DCMAKE_CXX_FLAGS=-fembed-bitcode")
fi