mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/29158 My plan is to split out libtorch_cuda.so from libtorch.so. To do this, I need accurate _API annotations for files in these directories. I determined the correct set of annotations by looking at tools/build_variables.py and making sure every file that was a member of the libtorch_cuda/ATen-cu targets had these annotations. (torch-cpp-cuda doesn't count since that's going to be where the stuff that has explicit USE_CUDA lives, so it's going to be in a separate dynamic library). As future work, it would be good to setup a lint rule to help people understand what the correct _API annotation to use in a file is; it would also be good to reorganize folder structure so that the library structure is clearer. Signed-off-by: Edward Z. Yang <ezyang@fb.com> Test Plan: Imported from OSS Differential Revision: D18309593 Pulled By: ezyang fbshipit-source-id: de710e721b6013a09dad17b35f9a358c95a91030
87 lines
2.1 KiB
C++
87 lines
2.1 KiB
C++
#pragma once
|
|
|
|
#include <ATen/ATen.h>
|
|
#include <ATen/cuda/CUDAContext.h>
|
|
#include <THC/THC.h>
|
|
#include <c10/cuda/CUDACachingAllocator.h>
|
|
#include <c10/util/Optional.h>
|
|
|
|
#include <nccl.h>
|
|
|
|
#include <cstddef>
|
|
#include <vector>
|
|
|
|
namespace torch {
|
|
namespace cuda {
|
|
namespace nccl {
|
|
|
|
// NOTE: this is exposed only so that python_nccl.cpp can some of these helpers.
|
|
// Don't use them outside of these files.
|
|
namespace detail {
|
|
|
|
void throw_nccl_error(ncclResult_t status);
|
|
|
|
static inline void NCCL_CHECK(ncclResult_t status) {
|
|
if (status != ncclSuccess) {
|
|
throw_nccl_error(status);
|
|
}
|
|
}
|
|
|
|
struct AutoNcclGroup {
|
|
AutoNcclGroup() {
|
|
(c10::cuda::CUDACachingAllocator::getFreeMutex())->lock();
|
|
#if defined(NCCL_MAJOR) && (NCCL_MAJOR >= 2)
|
|
NCCL_CHECK(ncclGroupStart());
|
|
#endif
|
|
}
|
|
~AutoNcclGroup() {
|
|
#if defined(NCCL_MAJOR) && (NCCL_MAJOR >= 2)
|
|
NCCL_CHECK(ncclGroupEnd());
|
|
#endif
|
|
(c10::cuda::CUDACachingAllocator::getFreeMutex())->unlock();
|
|
}
|
|
};
|
|
|
|
TORCH_CUDA_API at::ArrayRef<ncclComm_t> get_communicators(at::TensorList inputs);
|
|
TORCH_CUDA_API void check_inputs(
|
|
at::TensorList inputs,
|
|
at::TensorList outputs,
|
|
int input_multiplier,
|
|
int output_multiplier);
|
|
TORCH_CUDA_API ncclDataType_t get_data_type(const at::Tensor& t);
|
|
|
|
} // namespace detail
|
|
|
|
using comm_list = std::vector<ncclComm_t>;
|
|
using stream_list = std::vector<c10::optional<at::cuda::CUDAStream>>;
|
|
|
|
TORCH_CUDA_API std::uint64_t version();
|
|
|
|
bool is_available(at::TensorList tensors);
|
|
|
|
TORCH_CUDA_API void broadcast(
|
|
at::TensorList tensors,
|
|
const stream_list& streams = {},
|
|
const comm_list& user_comms = {});
|
|
|
|
size_t get_max_count();
|
|
|
|
TORCH_CUDA_API void reduce(
|
|
const std::vector<at::Tensor>& inputs,
|
|
std::vector<at::Tensor>& outputs,
|
|
int32_t root = 0,
|
|
int32_t op = ncclSum,
|
|
const stream_list& streams = {},
|
|
const comm_list& user_comms = {});
|
|
|
|
TORCH_CUDA_API void reduce(
|
|
std::vector<at::Tensor>& inputs,
|
|
int32_t root = 0,
|
|
int32_t op = ncclSum,
|
|
const stream_list& streams = {},
|
|
const comm_list& user_comms = {});
|
|
|
|
} // namespace nccl
|
|
} // namespace cuda
|
|
} // namespace torch
|