Revert D28875276: Move RPC agents to libtorch

Test Plan: revert-hammer

Differential Revision:
D28875276 (fc50f91929)

Original commit changeset: f2f6970fd74d

fbshipit-source-id: 3c52af652579733ebea8ddfb06576a0ce262bf78
This commit is contained in:
Mike Ruberry 2021-06-17 00:47:42 -07:00 committed by Facebook GitHub Bot
parent e5c99d9908
commit f233274f30
14 changed files with 128 additions and 86 deletions

View File

@ -1728,7 +1728,7 @@ cc_library(
],
[
":aten",
"@tensorpipe//:tensorpipe_cpu",
"@tensorpipe",
],
),
alwayslink = True,

View File

@ -344,6 +344,53 @@ endif()
if(NOT INTERN_BUILD_MOBILE OR NOT BUILD_CAFFE2_MOBILE)
if(USE_DISTRIBUTED)
# Define this target even if we're building without TensorPipe, to make life
# easier to other targets that depend on this. However, in that case, by not
# setting the USE_TENSORPIPE compile definition, this target will just end
# up being empty. Downstream targets should also add a #ifdef guard.
if(NOT WIN32)
add_library(process_group_agent
"${TORCH_SRC_DIR}/csrc/distributed/rpc/agent_utils.cpp"
"${TORCH_SRC_DIR}/csrc/distributed/rpc/agent_utils.h"
"${TORCH_SRC_DIR}/csrc/distributed/rpc/process_group_agent.cpp"
"${TORCH_SRC_DIR}/csrc/distributed/rpc/process_group_agent.h"
)
target_link_libraries(process_group_agent PRIVATE torch fmt::fmt-header-only)
add_dependencies(process_group_agent torch)
if(USE_TENSORPIPE)
add_library(tensorpipe_agent
"${TORCH_SRC_DIR}/csrc/distributed/rpc/agent_utils.cpp"
"${TORCH_SRC_DIR}/csrc/distributed/rpc/agent_utils.h"
"${TORCH_SRC_DIR}/csrc/distributed/rpc/macros.h"
"${TORCH_SRC_DIR}/csrc/distributed/rpc/tensorpipe_agent.cpp"
"${TORCH_SRC_DIR}/csrc/distributed/rpc/tensorpipe_agent.h"
"${TORCH_SRC_DIR}/csrc/distributed/rpc/tensorpipe_cuda.cpp"
"${TORCH_SRC_DIR}/csrc/distributed/rpc/tensorpipe_utils.cpp"
"${TORCH_SRC_DIR}/csrc/distributed/rpc/tensorpipe_utils.h"
)
target_link_libraries(tensorpipe_agent PRIVATE torch tensorpipe fmt::fmt-header-only)
add_dependencies(tensorpipe_agent torch)
if(USE_CUDA)
target_compile_definitions(tensorpipe_agent PUBLIC USE_CUDA)
endif()
if(USE_ROCM)
target_compile_definitions(tensorpipe_agent PRIVATE
USE_ROCM
__HIP_PLATFORM_HCC__
)
endif()
target_compile_definitions(tensorpipe_agent PUBLIC USE_TENSORPIPE)
target_link_libraries(tensorpipe_agent PRIVATE tensorpipe)
add_dependencies(tensorpipe_agent tensorpipe)
endif()
endif()
endif()
set(CMAKE_POSITION_INDEPENDENT_CODE TRUE)
# Generate files
@ -1189,7 +1236,7 @@ endif()
if(USE_DISTRIBUTED)
# Needed to support the inclusion of c10d/Foo.hpp headers.
target_include_directories(torch_cpu PUBLIC ${TORCH_SRC_DIR}/lib)
target_compile_definitions(torch_cpu PUBLIC USE_DISTRIBUTED)
target_compile_definitions(torch_cpu PRIVATE USE_DISTRIBUTED)
if(USE_GLOO AND USE_C10D_GLOO)
target_compile_definitions(torch_cpu PUBLIC USE_C10D_GLOO)
endif()
@ -1216,12 +1263,16 @@ if(USE_DISTRIBUTED)
# #if defined(USE_DISTRIBUTED) && !defined(_WIN32)
# need to be removed when RPC is supported
if(NOT WIN32)
target_compile_definitions(torch_cpu PUBLIC USE_RPC)
target_compile_definitions(torch_cpu PRIVATE
USE_RPC
)
endif()
# Pass USE_TENSORPIPE to torch_cpu as some parts of rpc/utils.cpp
# can only be compiled with USE_TENSORPIPE is set.
if(USE_TENSORPIPE)
target_compile_definitions(torch_cpu PUBLIC USE_TENSORPIPE)
target_compile_definitions(torch_cpu PRIVATE
USE_TENSORPIPE
)
endif()
endif()

View File

@ -1377,13 +1377,6 @@ if(USE_DISTRIBUTED AND USE_TENSORPIPE)
add_subdirectory(${PROJECT_SOURCE_DIR}/third_party/tensorpipe)
list(APPEND Caffe2_DEPENDENCY_LIBS tensorpipe)
if(USE_CUDA)
list(APPEND Caffe2_CUDA_DEPENDENCY_LIBS tensorpipe_cuda)
elseif(USE_ROCM)
message(WARNING "TensorPipe doesn't yet support ROCm")
# Not yet...
# list(APPEND Caffe2_HIP_DEPENDENCY_LIBS tensorpipe_hip)
endif()
endif()
endif()

View File

@ -5,7 +5,7 @@ set(TORCH_RPC_TEST_SOURCES
${TORCH_RPC_TEST_DIR}/test_wire_serialization.cpp
)
set(TORCH_RPC_TEST_DEPENDENCY_LIBS
torch gtest
torch gtest process_group_agent
)
if(USE_GLOO)
@ -20,7 +20,7 @@ if(USE_TENSORPIPE)
${TORCH_RPC_TEST_DIR}/test_tensorpipe_serialization.cpp
)
list(APPEND TORCH_RPC_TEST_DEPENDENCY_LIBS
tensorpipe
tensorpipe_agent tensorpipe
)
endif()

@ -1 +1 @@
Subproject commit c0e7623adb05f36311c7cde6dac8fc4c290419d9
Subproject commit 42a67277c1882c90cec0da6e57afb20247424994

View File

@ -71,82 +71,63 @@ cc_library(
)
header_template_rule(
name = "tensorpipe_cpu_config_header",
name = "tensorpipe_config_header",
src = "tensorpipe/config.h.in",
out = "tensorpipe/config.h",
substitutions = {
"#cmakedefine01 TENSORPIPE_HAS_SHM_TRANSPORT": "#define TENSORPIPE_HAS_SHM_TRANSPORT 1",
"#cmakedefine01 TENSORPIPE_HAS_IBV_TRANSPORT": "#define TENSORPIPE_HAS_IBV_TRANSPORT 1",
"#cmakedefine01 TENSORPIPE_HAS_CMA_CHANNEL": "#define TENSORPIPE_HAS_CMA_CHANNEL 1",
"#cmakedefine01 TENSORPIPE_HAS_SHM_TRANSPORT": "",
"#cmakedefine01 TENSORPIPE_HAS_CMA_CHANNEL": "",
"#cmakedefine01 TENSORPIPE_HAS_CUDA_IPC_CHANNEL": "",
"#cmakedefine01 TENSORPIPE_HAS_CUDA_GDR_CHANNEL": "",
"#cmakedefine01 TENSORPIPE_HAS_IBV_TRANSPORT": "",
"#cmakedefine01 TENSORPIPE_SUPPORTS_CUDA": "",
},
)
header_template_rule(
name = "tensorpipe_cuda_config_header",
src = "tensorpipe/config_cuda.h.in",
out = "tensorpipe/config_cuda.h",
substitutions = {
"#cmakedefine01 TENSORPIPE_HAS_CUDA_IPC_CHANNEL": "#define TENSORPIPE_HAS_CUDA_IPC_CHANNEL 1",
"#cmakedefine01 TENSORPIPE_HAS_CUDA_GDR_CHANNEL": "#define TENSORPIPE_HAS_CUDA_GDR_CHANNEL 1",
},
)
TENSORPIPE_HEADERS = glob([
"tensorpipe/*.h",
"tensorpipe/channel/*.h",
"tensorpipe/channel/*/*.h",
"tensorpipe/common/*.h",
"tensorpipe/core/*.h",
"tensorpipe/transport/*.h",
"tensorpipe/transport/*/*.h",
"tensorpipe/util/*/*.h",
])
# We explicitly list the CUDA headers & sources, and we consider everything else
# as CPU (using a catch-all glob). This is both because there's fewer CUDA files
# (thus making it easier to list them exhaustively) and because it will make it
# more likely to catch a misclassified file: if we forget to mark a file as CUDA
# we'll try to build it on CPU and that's likely to fail.
TENSORPIPE_BASE_SRCS = glob([
"tensorpipe/*.cc",
"tensorpipe/channel/*.cc",
"tensorpipe/common/address.cc",
"tensorpipe/common/epoll_loop.cc",
"tensorpipe/common/error.cc",
"tensorpipe/common/fd.cc",
"tensorpipe/common/ibv.cc",
"tensorpipe/common/socket.cc",
"tensorpipe/common/system.cc",
"tensorpipe/core/*.cc",
"tensorpipe/transport/*.cc",
"tensorpipe/util/*/*.cc",
])
TENSORPIPE_CUDA_HEADERS = [
"tensorpipe/tensorpipe_cuda.h",
"tensorpipe/channel/cuda_basic/*.h",
"tensorpipe/channel/cuda_gdr/*.h",
"tensorpipe/channel/cuda_ipc/*.h",
"tensorpipe/channel/cuda_xth/*.h",
"tensorpipe/common/cuda.h",
"tensorpipe/common/cuda_buffer.h",
"tensorpipe/common/cuda_lib.h",
"tensorpipe/common/cuda_loop.h",
"tensorpipe/common/nvml_lib.h",
]
TENSORPIPE_SRCS = TENSORPIPE_BASE_SRCS + glob([
"tensorpipe/channel/basic/*.cc",
"tensorpipe/channel/mpt/*.cc",
"tensorpipe/channel/xth/*.cc",
"tensorpipe/transport/uv/*.cc",
])
TENSORPIPE_CUDA_SOURCES = [
TENSORPIPE_SRCS_CUDA = TENSORPIPE_SRCS + glob([
"tensorpipe/common/cuda_loop.cc",
"tensorpipe/channel/cuda_basic/*.cc",
"tensorpipe/channel/cuda_gdr/*.cc",
"tensorpipe/channel/cuda_ipc/*.cc",
"tensorpipe/channel/cuda_xth/*.cc",
"tensorpipe/common/cuda_buffer.cc",
"tensorpipe/common/cuda_loop.cc",
]
TENSORPIPE_CPU_HEADERS = glob(
[
"tensorpipe/*.h",
"tensorpipe/channel/*.h",
"tensorpipe/channel/*/*.h",
"tensorpipe/common/*.h",
"tensorpipe/core/*.h",
"tensorpipe/transport/*.h",
"tensorpipe/transport/*/*.h",
],
exclude=TENSORPIPE_CUDA_HEADERS)
TENSORPIPE_CPU_SOURCES = glob(
[
"tensorpipe/*.cc",
"tensorpipe/channel/*.cc",
"tensorpipe/channel/*/*.cc",
"tensorpipe/common/*.cc",
"tensorpipe/core/*.cc",
"tensorpipe/transport/*.cc",
"tensorpipe/transport/*/*.cc",
],
exclude=TENSORPIPE_CUDA_SOURCES)
])
cc_library(
name = "tensorpipe_cpu",
srcs = TENSORPIPE_CPU_SOURCES,
hdrs = TENSORPIPE_CPU_HEADERS + [":tensorpipe_cpu_config_header"],
name = "tensorpipe",
srcs = TENSORPIPE_SRCS + [":tensorpipe_config_header"],
hdrs = TENSORPIPE_HEADERS,
includes = [
".",
],
@ -162,8 +143,8 @@ cc_library(
cc_library(
name = "tensorpipe_cuda",
srcs = TENSORPIPE_CUDA_SOURCES,
hdrs = TENSORPIPE_CUDA_HEADERS + [":tensorpipe_cuda_config_header"],
srcs = TENSORPIPE_SRCS_CUDA + [":tensorpipe_config_header"],
hdrs = TENSORPIPE_HEADERS,
includes = [
".",
],
@ -172,7 +153,8 @@ cc_library(
],
visibility = ["//visibility:public"],
deps = [
":tensorpipe_cpu",
":libnop",
":libuv",
"@cuda",
],
)

View File

@ -356,14 +356,12 @@ libtorch_distributed_extra_sources = [
"torch/csrc/distributed/autograd/rpc_messages/rpc_with_profiling_resp.cpp",
"torch/csrc/distributed/autograd/rpc_messages/rref_backward_req.cpp",
"torch/csrc/distributed/autograd/rpc_messages/rref_backward_resp.cpp",
"torch/csrc/distributed/rpc/agent_utils.cpp",
"torch/csrc/distributed/rpc/message.cpp",
"torch/csrc/distributed/rpc/profiler/remote_profiler_manager.cpp",
"torch/csrc/distributed/rpc/profiler/server_process_global_profiler.cpp",
"torch/csrc/distributed/rpc/python_call.cpp",
"torch/csrc/distributed/rpc/python_remote_call.cpp",
"torch/csrc/distributed/rpc/python_resp.cpp",
"torch/csrc/distributed/rpc/process_group_agent.cpp",
"torch/csrc/distributed/rpc/request_callback.cpp",
"torch/csrc/distributed/rpc/request_callback_no_python.cpp",
"torch/csrc/distributed/rpc/rpc_agent.cpp",
@ -373,9 +371,6 @@ libtorch_distributed_extra_sources = [
"torch/csrc/distributed/rpc/script_call.cpp",
"torch/csrc/distributed/rpc/script_remote_call.cpp",
"torch/csrc/distributed/rpc/script_resp.cpp",
"torch/csrc/distributed/rpc/tensorpipe_agent.cpp",
"torch/csrc/distributed/rpc/tensorpipe_utils.cpp",
"torch/csrc/distributed/rpc/testing/faulty_process_group_agent.cpp",
"torch/csrc/distributed/rpc/torchscript_functions.cpp",
"torch/csrc/distributed/rpc/types.cpp",
"torch/csrc/distributed/rpc/utils.cpp",
@ -531,7 +526,6 @@ libtorch_cuda_distributed_base_sources = [
# These files are only supported on Linux (and others) but not on Windows.
libtorch_cuda_distributed_extra_sources = [
"torch/csrc/distributed/rpc/tensorpipe_cuda.cpp",
"torch/lib/c10d/NCCLUtils.cpp",
"torch/lib/c10d/ProcessGroupNCCL.cpp",
]
@ -720,11 +714,17 @@ libtorch_python_distributed_core_sources = [
libtorch_python_distributed_sources = libtorch_python_distributed_core_sources + [
"torch/csrc/distributed/autograd/init.cpp",
"torch/csrc/distributed/rpc/agent_utils.cpp",
"torch/csrc/distributed/rpc/init.cpp",
"torch/csrc/distributed/rpc/process_group_agent.cpp",
"torch/csrc/distributed/rpc/py_rref.cpp",
"torch/csrc/distributed/rpc/python_functions.cpp",
"torch/csrc/distributed/rpc/python_rpc_handler.cpp",
"torch/csrc/distributed/rpc/request_callback_impl.cpp",
"torch/csrc/distributed/rpc/tensorpipe_agent.cpp",
"torch/csrc/distributed/rpc/tensorpipe_cuda.cpp",
"torch/csrc/distributed/rpc/tensorpipe_utils.cpp",
"torch/csrc/distributed/rpc/testing/faulty_process_group_agent.cpp",
"torch/csrc/distributed/rpc/testing/init.cpp",
"torch/csrc/distributed/rpc/unpickled_python_call.cpp",
"torch/csrc/distributed/rpc/unpickled_python_remote_call.cpp",

View File

@ -261,9 +261,11 @@ if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
endif()
if(USE_DISTRIBUTED)
list(APPEND TORCH_PYTHON_COMPILE_DEFINITIONS USE_DISTRIBUTED)
if(WIN32)
append_filelist("libtorch_python_distributed_core_sources" TORCH_PYTHON_SRCS)
else()
list(APPEND TORCH_PYTHON_COMPILE_DEFINITIONS USE_RPC)
append_filelist("libtorch_python_distributed_sources" TORCH_PYTHON_SRCS)
endif()
# Disable certain warnings for GCC-9.X
@ -272,6 +274,10 @@ if(USE_DISTRIBUTED)
set_source_files_properties(${TORCH_SRC_DIR}/csrc/distributed/rpc/testing/init.cpp PROPERTIES COMPILE_FLAGS "-Wno-cast-function-type")
set_source_files_properties(${TORCH_SRC_DIR}/csrc/distributed/c10d/init.cpp PROPERTIES COMPILE_FLAGS "-Wno-cast-function-type")
endif()
if(USE_TENSORPIPE)
list(APPEND TORCH_PYTHON_LINK_LIBRARIES tensorpipe)
list(APPEND TORCH_PYTHON_PUBLIC_COMPILE_DEFINITIONS USE_TENSORPIPE)
endif()
# NCCL is a private dependency of libtorch, but libtorch_python includes
# some private headers of libtorch, which in turn include NCCL. As a hacky
# alternative to making NCCL a public dependency of libtorch, we make it

View File

@ -0,0 +1,5 @@
#pragma once
#if defined(USE_CUDA) && !defined(__HIP_PLATFORM_HCC__)
#define USE_CUDA_NOT_ROCM
#endif

View File

@ -10,6 +10,7 @@
#include <tensorpipe/tensorpipe.h>
#include <torch/csrc/distributed/rpc/agent_utils.h>
#include <torch/csrc/distributed/rpc/macros.h>
#include <torch/csrc/distributed/rpc/tensorpipe_utils.h>
#include <torch/csrc/distributed/rpc/utils.h>

View File

@ -9,6 +9,7 @@
#include <c10d/PrefixStore.hpp>
#include <c10d/ProcessGroup.hpp>
#include <c10d/Store.hpp>
#include <torch/csrc/distributed/rpc/macros.h>
#include <torch/csrc/distributed/rpc/rpc_agent.h>
// Forward-declare the TensorPipe classes we need, to avoid including its

View File

@ -1,7 +1,8 @@
#include <torch/csrc/distributed/rpc/macros.h>
#include <torch/csrc/distributed/rpc/tensorpipe_agent.h>
#include <torch/csrc/distributed/rpc/tensorpipe_utils.h>
#if defined(USE_TENSORPIPE) && !defined(__HIP_PLATFORM_HCC__)
#if defined(USE_TENSORPIPE) && defined(USE_CUDA_NOT_ROCM)
#include <c10/cuda/CUDACachingAllocator.h>
#include <c10/cuda/CUDAGuard.h>

View File

@ -1,3 +1,4 @@
#include <torch/csrc/distributed/rpc/macros.h>
#include <torch/csrc/distributed/rpc/tensorpipe_utils.h>
#ifdef USE_TENSORPIPE

View File

@ -2,6 +2,7 @@
#ifdef USE_TENSORPIPE
#include <torch/csrc/distributed/rpc/macros.h>
#include <torch/csrc/distributed/rpc/utils.h>
namespace tensorpipe {