pytorch/torch/csrc/cuda/comm.h
SsnL de7ac60cf4 Add out= variants for cuda.comm.broadcast/gather/scatter (#39681)
Summary:
Partially fixes https://github.com/pytorch/pytorch/issues/38911
Pull Request resolved: https://github.com/pytorch/pytorch/pull/39681

Differential Revision: D22161342

Pulled By: mrshenli

fbshipit-source-id: 60295077159b02087823e93bb6ebac9d70adea0a
2020-06-24 12:58:19 -07:00

47 lines
1.5 KiB
C++

#pragma once
#include <ATen/ATen.h>
#include <torch/csrc/WindowsTorchApiMacro.h>
#include <ATen/cuda/ATenCUDAGeneral.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/util/Optional.h>
#include <cstddef>
#include <vector>
namespace torch { namespace cuda {
using tensor_list2d = std::vector<std::vector<at::Tensor>>;
TORCH_CUDA_API std::vector<at::Tensor>& broadcast_out(const at::Tensor& tensor, std::vector<at::Tensor>& out_tensors);
TORCH_CUDA_API std::vector<at::Tensor> broadcast(const at::Tensor& tensor, at::IntArrayRef devices);
TORCH_CUDA_API tensor_list2d broadcast_coalesced(at::TensorList tensors, at::IntArrayRef devices,
size_t buffer_size);
TORCH_CUDA_API std::vector<at::Tensor>& scatter_out(
const at::Tensor& tensor,
std::vector<at::Tensor>& out_tensors,
int64_t dim = 0,
const c10::optional<std::vector<c10::optional<at::cuda::CUDAStream>>>& streams =
c10::nullopt);
TORCH_CUDA_API std::vector<at::Tensor> scatter(
const at::Tensor& tensor,
at::IntArrayRef devices,
const c10::optional<std::vector<int64_t>>& chunk_sizes = c10::nullopt,
int64_t dim = 0,
const c10::optional<std::vector<c10::optional<at::cuda::CUDAStream>>>& streams =
c10::nullopt);
TORCH_CUDA_API at::Tensor& gather_out(
at::TensorList tensors,
at::Tensor& out_tensor,
int64_t dim);
TORCH_CUDA_API at::Tensor gather(
at::TensorList tensors,
int64_t dim,
c10::optional<int32_t> destination_index);
}}