mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 00:20:18 +01:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/29158 My plan is to split out libtorch_cuda.so from libtorch.so. To do this, I need accurate _API annotations for files in these directories. I determined the correct set of annotations by looking at tools/build_variables.py and making sure every file that was a member of the libtorch_cuda/ATen-cu targets had these annotations. (torch-cpp-cuda doesn't count since that's going to be where the stuff that has explicit USE_CUDA lives, so it's going to be in a separate dynamic library). As future work, it would be good to setup a lint rule to help people understand what the correct _API annotation to use in a file is; it would also be good to reorganize folder structure so that the library structure is clearer. Signed-off-by: Edward Z. Yang <ezyang@fb.com> Test Plan: Imported from OSS Differential Revision: D18309593 Pulled By: ezyang fbshipit-source-id: de710e721b6013a09dad17b35f9a358c95a91030
48 lines
1.2 KiB
C++
48 lines
1.2 KiB
C++
#pragma once
|
|
|
|
#include <torch/csrc/autograd/function.h>
|
|
#include <torch/csrc/autograd/variable.h>
|
|
#include <torch/csrc/WindowsTorchApiMacro.h>
|
|
|
|
#include <ATen/ATen.h>
|
|
#include <ATen/cuda/CUDAContext.h>
|
|
#include <ATen/cuda/ATenCUDAGeneral.h>
|
|
|
|
#include <cstddef>
|
|
#include <vector>
|
|
|
|
namespace torch {
|
|
namespace autograd {
|
|
|
|
struct TORCH_CUDA_API Scatter : public Node {
|
|
explicit Scatter(
|
|
std::vector<at::Device> devices,
|
|
const c10::optional<std::vector<int64_t>>& chunk_sizes = c10::nullopt,
|
|
int64_t dim = 0,
|
|
const c10::optional<std::vector<c10::optional<at::cuda::CUDAStream>>>& streams =
|
|
c10::nullopt,
|
|
bool unsqueeze_scalars = false);
|
|
~Scatter() override;
|
|
|
|
variable_list apply(variable_list&& inputs) override;
|
|
|
|
std::vector<at::Device> devices_;
|
|
c10::optional<std::vector<int64_t>> chunk_sizes_;
|
|
int64_t dim_;
|
|
c10::optional<std::vector<c10::optional<at::cuda::CUDAStream>>> streams_;
|
|
bool unsqueeze_scalars_;
|
|
};
|
|
|
|
struct TORCH_CUDA_API Gather : public Node {
|
|
explicit Gather(const at::Device& destination_device, int64_t dim = 0);
|
|
~Gather() override;
|
|
|
|
variable_list apply(variable_list&& inputs) override;
|
|
|
|
at::Device destination_device_;
|
|
int64_t dim_;
|
|
};
|
|
|
|
} // namespace autograd
|
|
} // namespace torch
|