mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
Renaming CAFFE2_API to TORCH_API (#49496)
Summary: Since caffe2 and torch have been consolidated, CAFFE2_API should be merged with TORCH_API. Addresses a TODO. Manually edited some references of the removed `CAFFE2_API`: * `CONTRIBUTING.md` * `caffe2/proto/CMakeLists.txt` * `cmake/ProtoBuf.cmake` * `c10/macros/Export.h` * `torch/csrc/WindowsTorchApiMacro.h` Pull Request resolved: https://github.com/pytorch/pytorch/pull/49496 Reviewed By: malfet, samestep Differential Revision: D25600726 Pulled By: janeyx99 fbshipit-source-id: 7e068d959e397ac183c097d7e9a9afeca5ddd782
This commit is contained in:
parent
c9e052130a
commit
71ca600af9
|
|
@ -754,7 +754,7 @@ than Linux, which are worth keeping in mind when fixing these problems.
|
|||
1. Symbols are NOT exported by default on Windows; instead, you have to explicitly
|
||||
mark a symbol as exported/imported in a header file with `__declspec(dllexport)` /
|
||||
`__declspec(dllimport)`. We have codified this pattern into a set of macros
|
||||
which follow the convention `*_API`, e.g., `CAFFE2_API` inside Caffe2 and ATen.
|
||||
which follow the convention `*_API`, e.g., `TORCH_API` inside Caffe2, Aten and Torch.
|
||||
(Every separate shared library needs a unique macro name, because symbol visibility
|
||||
is on a per shared library basis. See c10/macros/Macros.h for more details.)
|
||||
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@
|
|||
|
||||
namespace at {
|
||||
|
||||
struct CAFFE2_API CPUGeneratorImpl : public c10::GeneratorImpl {
|
||||
struct TORCH_API CPUGeneratorImpl : public c10::GeneratorImpl {
|
||||
// Constructors
|
||||
CPUGeneratorImpl(uint64_t seed_in = default_rng_seed_val);
|
||||
~CPUGeneratorImpl() = default;
|
||||
|
|
@ -36,8 +36,8 @@ private:
|
|||
|
||||
namespace detail {
|
||||
|
||||
CAFFE2_API const Generator& getDefaultCPUGenerator();
|
||||
CAFFE2_API Generator createCPUGenerator(uint64_t seed_val = default_rng_seed_val);
|
||||
TORCH_API const Generator& getDefaultCPUGenerator();
|
||||
TORCH_API Generator createCPUGenerator(uint64_t seed_val = default_rng_seed_val);
|
||||
|
||||
} // namespace detail
|
||||
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ namespace at {
|
|||
|
||||
class Tensor;
|
||||
|
||||
class CAFFE2_API Context {
|
||||
class TORCH_API Context {
|
||||
public:
|
||||
Context();
|
||||
|
||||
|
|
@ -225,13 +225,13 @@ class CAFFE2_API Context {
|
|||
std::unique_ptr<THHState, void(*)(THHState*)> thh_state;
|
||||
};
|
||||
|
||||
CAFFE2_API Context& globalContext();
|
||||
TORCH_API Context& globalContext();
|
||||
|
||||
static inline void init() {
|
||||
globalContext();
|
||||
}
|
||||
|
||||
CAFFE2_API Allocator* getCPUAllocator();
|
||||
TORCH_API Allocator* getCPUAllocator();
|
||||
|
||||
static inline DeprecatedTypeProperties& getDeprecatedTypeProperties(Backend p, ScalarType s) {
|
||||
return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties(
|
||||
|
|
|
|||
|
|
@ -10,10 +10,10 @@
|
|||
|
||||
namespace at {
|
||||
|
||||
CAFFE2_API ScalarType toScalarType(const DLDataType& dtype);
|
||||
CAFFE2_API DLManagedTensor* toDLPack(const Tensor& src);
|
||||
CAFFE2_API Tensor fromDLPack(const DLManagedTensor* src);
|
||||
CAFFE2_API DLDataType getDLDataType(const Tensor& t);
|
||||
CAFFE2_API DLContext getDLContext(const Tensor& tensor, const int64_t& device_id);
|
||||
TORCH_API ScalarType toScalarType(const DLDataType& dtype);
|
||||
TORCH_API DLManagedTensor* toDLPack(const Tensor& src);
|
||||
TORCH_API Tensor fromDLPack(const DLManagedTensor* src);
|
||||
TORCH_API DLDataType getDLDataType(const Tensor& t);
|
||||
TORCH_API DLContext getDLContext(const Tensor& tensor, const int64_t& device_id);
|
||||
|
||||
} //namespace at
|
||||
|
|
|
|||
|
|
@ -8,11 +8,11 @@ namespace at {
|
|||
struct DynamicLibrary {
|
||||
AT_DISALLOW_COPY_AND_ASSIGN(DynamicLibrary);
|
||||
|
||||
CAFFE2_API DynamicLibrary(const char* name);
|
||||
TORCH_API DynamicLibrary(const char* name);
|
||||
|
||||
CAFFE2_API void* sym(const char* name);
|
||||
TORCH_API void* sym(const char* name);
|
||||
|
||||
CAFFE2_API ~DynamicLibrary();
|
||||
TORCH_API ~DynamicLibrary();
|
||||
|
||||
private:
|
||||
void* handle = nullptr;
|
||||
|
|
|
|||
|
|
@ -9,14 +9,14 @@
|
|||
|
||||
namespace at {
|
||||
|
||||
CAFFE2_API std::vector<int64_t> infer_size(IntArrayRef a, IntArrayRef b);
|
||||
CAFFE2_API std::tuple<std::vector<int64_t>, std::vector<int64_t>>
|
||||
TORCH_API std::vector<int64_t> infer_size(IntArrayRef a, IntArrayRef b);
|
||||
TORCH_API std::tuple<std::vector<int64_t>, std::vector<int64_t>>
|
||||
inferExpandGeometry(
|
||||
IntArrayRef tensor_sizes,
|
||||
IntArrayRef tensor_strides,
|
||||
IntArrayRef sizes);
|
||||
|
||||
CAFFE2_API std::vector<int64_t> infer_dense_strides(
|
||||
TORCH_API std::vector<int64_t> infer_dense_strides(
|
||||
IntArrayRef tensor_sizes,
|
||||
IntArrayRef tensor_strides);
|
||||
|
||||
|
|
|
|||
|
|
@ -15,19 +15,19 @@ enum class MemOverlap { NO, YES, TOO_HARD };
|
|||
|
||||
enum class MemOverlapStatus { FULL, PARTIAL, NO, TOO_HARD };
|
||||
|
||||
CAFFE2_API MemOverlap has_internal_overlap(const Tensor& t);
|
||||
CAFFE2_API MemOverlap has_internal_overlap(TensorImpl* t);
|
||||
TORCH_API MemOverlap has_internal_overlap(const Tensor& t);
|
||||
TORCH_API MemOverlap has_internal_overlap(TensorImpl* t);
|
||||
|
||||
CAFFE2_API void assert_no_internal_overlap(const Tensor& t);
|
||||
CAFFE2_API void assert_no_internal_overlap(TensorImpl* t);
|
||||
TORCH_API void assert_no_internal_overlap(const Tensor& t);
|
||||
TORCH_API void assert_no_internal_overlap(TensorImpl* t);
|
||||
|
||||
CAFFE2_API MemOverlapStatus get_overlap_status(const Tensor& a, const Tensor& b);
|
||||
CAFFE2_API MemOverlapStatus get_overlap_status(TensorImpl* a, TensorImpl* b);
|
||||
TORCH_API MemOverlapStatus get_overlap_status(const Tensor& a, const Tensor& b);
|
||||
TORCH_API MemOverlapStatus get_overlap_status(TensorImpl* a, TensorImpl* b);
|
||||
|
||||
CAFFE2_API void assert_no_partial_overlap(const Tensor& a, const Tensor& b);
|
||||
TORCH_API void assert_no_partial_overlap(const Tensor& a, const Tensor& b);
|
||||
void assert_no_partial_overlap(TensorImpl* a, TensorImpl* b);
|
||||
|
||||
CAFFE2_API void assert_no_overlap(const Tensor& a, const Tensor& b);
|
||||
CAFFE2_API void assert_no_overlap(TensorImpl* a, TensorImpl* b);
|
||||
TORCH_API void assert_no_overlap(const Tensor& a, const Tensor& b);
|
||||
TORCH_API void assert_no_overlap(TensorImpl* a, TensorImpl* b);
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,8 +17,8 @@ inline bool has_names(TensorList tensors) {
|
|||
|
||||
// Converts dim to an positional index. Errors if `dim` cannot be used to
|
||||
// refer to any dimension of tensor.
|
||||
CAFFE2_API int64_t dimname_to_position(const Tensor& tensor, Dimname dim);
|
||||
CAFFE2_API std::vector<int64_t> dimnames_to_positions(const Tensor& tensor, DimnameList dims);
|
||||
TORCH_API int64_t dimname_to_position(const Tensor& tensor, Dimname dim);
|
||||
TORCH_API std::vector<int64_t> dimnames_to_positions(const Tensor& tensor, DimnameList dims);
|
||||
|
||||
// Unifies two DimnameList to produce a third. This is useful for implementing
|
||||
// the named inference rule for binary broadcasting operations like add.
|
||||
|
|
@ -28,7 +28,7 @@ CAFFE2_API std::vector<int64_t> dimnames_to_positions(const Tensor& tensor, Dimn
|
|||
// 2) Check misaligned: If a name `n` is in `names`, then it must appear at
|
||||
// the same index from the right in other.
|
||||
// 3) The output names are obtained by unifying the names individually from the right.
|
||||
CAFFE2_API std::vector<Dimname>
|
||||
TORCH_API std::vector<Dimname>
|
||||
unify_from_right(DimnameList names, DimnameList other, const char* action = "broadcast");
|
||||
|
||||
[[noreturn]] inline void reportNYIDimnameOverload(const char* op_name) {
|
||||
|
|
@ -75,50 +75,50 @@ namespace namedinference {
|
|||
// `names` can be empty; see [NOTE] Writing name inference rules
|
||||
// If `names` is not empty, `names.size()` should equal `result.dim()`.
|
||||
// When in doubt, use this overload instead of the others.
|
||||
CAFFE2_API Tensor& propagate_names_if_nonempty(
|
||||
TORCH_API Tensor& propagate_names_if_nonempty(
|
||||
Tensor& result,
|
||||
DimnameList maybe_names,
|
||||
bool validate_names = false);
|
||||
|
||||
// Propagates `names` to `result`. Only use this if we are certain that there are
|
||||
// names to propagate (that names is not empty).
|
||||
CAFFE2_API Tensor& propagate_names(
|
||||
TORCH_API Tensor& propagate_names(
|
||||
Tensor& result,
|
||||
DimnameList names,
|
||||
bool validate_names = false);
|
||||
|
||||
// Propagates all names from src to result.
|
||||
CAFFE2_API void propagate_names(Tensor& result, const Tensor& src);
|
||||
TORCH_API void propagate_names(Tensor& result, const Tensor& src);
|
||||
|
||||
// Propagates all names except for those at the excluded_idxs.
|
||||
CAFFE2_API void propagate_names_except(Tensor& result, const Tensor& src, IntArrayRef excluded_idxs);
|
||||
TORCH_API void propagate_names_except(Tensor& result, const Tensor& src, IntArrayRef excluded_idxs);
|
||||
|
||||
// Used for reduction ops that have a `keepdim` arg.
|
||||
CAFFE2_API void propagate_names_for_reduction(Tensor& result, const Tensor& src, IntArrayRef excluded_idxs, bool keepdim);
|
||||
TORCH_API void propagate_names_for_reduction(Tensor& result, const Tensor& src, IntArrayRef excluded_idxs, bool keepdim);
|
||||
|
||||
CAFFE2_API void propagate_names_for_expand(Tensor& result, const Tensor& self);
|
||||
TORCH_API void propagate_names_for_expand(Tensor& result, const Tensor& self);
|
||||
|
||||
CAFFE2_API std::vector<Dimname> compute_cat_outnames(TensorList tensors);
|
||||
TORCH_API std::vector<Dimname> compute_cat_outnames(TensorList tensors);
|
||||
|
||||
CAFFE2_API std::vector<Dimname> compute_broadcast_outnames(
|
||||
TORCH_API std::vector<Dimname> compute_broadcast_outnames(
|
||||
const Tensor& self,
|
||||
const Tensor& other);
|
||||
|
||||
CAFFE2_API std::vector<Dimname> broadcast_to_outnames(
|
||||
TORCH_API std::vector<Dimname> broadcast_to_outnames(
|
||||
const Tensor& tensor,
|
||||
const Tensor& reference_tensor,
|
||||
const char* op_name);
|
||||
|
||||
CAFFE2_API std::vector<Dimname> compute_matmul_outnames(const Tensor& self, const Tensor& other);
|
||||
TORCH_API std::vector<Dimname> compute_matmul_outnames(const Tensor& self, const Tensor& other);
|
||||
|
||||
CAFFE2_API std::vector<Dimname> compute_cdist_outnames(const Tensor& self, const Tensor& other);
|
||||
TORCH_API std::vector<Dimname> compute_cdist_outnames(const Tensor& self, const Tensor& other);
|
||||
|
||||
CAFFE2_API std::vector<Dimname> compute_bmm_outnames(
|
||||
TORCH_API std::vector<Dimname> compute_bmm_outnames(
|
||||
Tensor& result,
|
||||
const Tensor& self,
|
||||
const Tensor& other);
|
||||
|
||||
CAFFE2_API std::vector<Dimname> compute_squeeze_outnames(const Tensor& tensor);
|
||||
TORCH_API std::vector<Dimname> compute_squeeze_outnames(const Tensor& tensor);
|
||||
|
||||
std::vector<Dimname> compute_diagonal_outnames(
|
||||
const Tensor& tensor,
|
||||
|
|
@ -127,40 +127,40 @@ std::vector<Dimname> compute_diagonal_outnames(
|
|||
|
||||
// TensorImpl* overloads for Legacy TH/THC code. Use these sparingly.
|
||||
|
||||
CAFFE2_API TensorImpl* propagate_names_if_nonempty(
|
||||
TORCH_API TensorImpl* propagate_names_if_nonempty(
|
||||
TensorImpl* result,
|
||||
DimnameList maybe_names,
|
||||
bool validate_names = false);
|
||||
|
||||
CAFFE2_API TensorImpl* propagate_names(
|
||||
TORCH_API TensorImpl* propagate_names(
|
||||
TensorImpl* result,
|
||||
DimnameList names,
|
||||
bool validate_names = false);
|
||||
|
||||
CAFFE2_API void propagate_names(TensorImpl* result, /*const */TensorImpl* src);
|
||||
TORCH_API void propagate_names(TensorImpl* result, /*const */TensorImpl* src);
|
||||
|
||||
// result = m1 @ m2 + bias
|
||||
CAFFE2_API void propagate_names_for_addmm(
|
||||
TORCH_API void propagate_names_for_addmm(
|
||||
Tensor& result,
|
||||
const Tensor& m1,
|
||||
const Tensor& m2,
|
||||
const Tensor& bias);
|
||||
|
||||
CAFFE2_API void propagate_names_for_addmv(
|
||||
TORCH_API void propagate_names_for_addmv(
|
||||
Tensor& result,
|
||||
const Tensor& mat,
|
||||
const Tensor& vec,
|
||||
const Tensor& bias);
|
||||
|
||||
CAFFE2_API void check_names_for_dot(TensorImpl* vec1, TensorImpl* vec2);
|
||||
TORCH_API void check_names_for_dot(TensorImpl* vec1, TensorImpl* vec2);
|
||||
|
||||
CAFFE2_API std::vector<Dimname> compute_baddbmm_outnames(
|
||||
TORCH_API std::vector<Dimname> compute_baddbmm_outnames(
|
||||
Tensor& result,
|
||||
const Tensor& self,
|
||||
const Tensor& other,
|
||||
const Tensor& bias);
|
||||
|
||||
CAFFE2_API bool are_names_equal(TensorImpl* self, TensorImpl* other);
|
||||
TORCH_API bool are_names_equal(TensorImpl* self, TensorImpl* other);
|
||||
|
||||
} // namespace namedinference
|
||||
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ namespace at {
|
|||
// "shallow copy" in order to add support.
|
||||
|
||||
template <typename OpaqueHandle>
|
||||
struct CAFFE2_API OpaqueTensorImpl : public TensorImpl {
|
||||
struct TORCH_API OpaqueTensorImpl : public TensorImpl {
|
||||
// public constructor for now...
|
||||
OpaqueTensorImpl(
|
||||
at::DispatchKeySet key_set,
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
namespace at {
|
||||
|
||||
class CAFFE2_API PTThreadPool : public c10::ThreadPool {
|
||||
class TORCH_API PTThreadPool : public c10::ThreadPool {
|
||||
public:
|
||||
explicit PTThreadPool(
|
||||
int pool_size,
|
||||
|
|
|
|||
|
|
@ -10,25 +10,25 @@ inline int64_t divup(int64_t x, int64_t y) {
|
|||
}
|
||||
|
||||
// Called during new thread initialization
|
||||
CAFFE2_API void init_num_threads();
|
||||
TORCH_API void init_num_threads();
|
||||
|
||||
// Sets the number of threads to be used in parallel region
|
||||
CAFFE2_API void set_num_threads(int);
|
||||
TORCH_API void set_num_threads(int);
|
||||
|
||||
// Returns the maximum number of threads that may be used in a parallel region
|
||||
CAFFE2_API int get_num_threads();
|
||||
TORCH_API int get_num_threads();
|
||||
|
||||
// Returns the current thread number (starting from 0)
|
||||
// in the current parallel region, or 0 in the sequential region
|
||||
CAFFE2_API int get_thread_num();
|
||||
TORCH_API int get_thread_num();
|
||||
|
||||
// Checks whether the code runs in parallel region
|
||||
CAFFE2_API bool in_parallel_region();
|
||||
TORCH_API bool in_parallel_region();
|
||||
|
||||
namespace internal {
|
||||
|
||||
// Initialise num_threads lazily at first parallel call
|
||||
inline CAFFE2_API void lazy_init_num_threads() {
|
||||
inline TORCH_API void lazy_init_num_threads() {
|
||||
thread_local bool init = false;
|
||||
if (C10_UNLIKELY(!init)) {
|
||||
at::init_num_threads();
|
||||
|
|
@ -110,29 +110,29 @@ inline scalar_t parallel_reduce(
|
|||
const SF& sf);
|
||||
|
||||
// Returns a detailed string describing parallelization settings
|
||||
CAFFE2_API std::string get_parallel_info();
|
||||
TORCH_API std::string get_parallel_info();
|
||||
|
||||
// Sets number of threads used for inter-op parallelism
|
||||
CAFFE2_API void set_num_interop_threads(int);
|
||||
TORCH_API void set_num_interop_threads(int);
|
||||
|
||||
// Returns the number of threads used for inter-op parallelism
|
||||
CAFFE2_API int get_num_interop_threads();
|
||||
TORCH_API int get_num_interop_threads();
|
||||
|
||||
// Launches inter-op parallel task
|
||||
CAFFE2_API void launch(std::function<void()> func);
|
||||
TORCH_API void launch(std::function<void()> func);
|
||||
namespace internal {
|
||||
void launch_no_thread_state(std::function<void()> fn);
|
||||
} // namespace internal
|
||||
|
||||
// Launches intra-op parallel task
|
||||
CAFFE2_API void intraop_launch(std::function<void()> func);
|
||||
TORCH_API void intraop_launch(std::function<void()> func);
|
||||
|
||||
// Launches intra-op parallel task, returns a future
|
||||
CAFFE2_API std::shared_ptr<c10::ivalue::Future> intraop_launch_future(
|
||||
TORCH_API std::shared_ptr<c10::ivalue::Future> intraop_launch_future(
|
||||
std::function<void()> func);
|
||||
|
||||
// Returns number of intra-op threads used by default
|
||||
CAFFE2_API int intraop_default_num_threads();
|
||||
TORCH_API int intraop_default_num_threads();
|
||||
|
||||
} // namespace at
|
||||
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ inline std::tuple<size_t, size_t> calc_num_tasks_and_chunk_size(
|
|||
return std::make_tuple(num_tasks, chunk_size);
|
||||
}
|
||||
|
||||
CAFFE2_API void _parallel_run(
|
||||
TORCH_API void _parallel_run(
|
||||
const int64_t begin,
|
||||
const int64_t end,
|
||||
const int64_t grain_size,
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@
|
|||
#include <c10/util/Exception.h>
|
||||
|
||||
namespace at {
|
||||
struct CAFFE2_API SparseTensorImpl : public TensorImpl {
|
||||
struct TORCH_API SparseTensorImpl : public TensorImpl {
|
||||
// Stored in COO format, indices + values.
|
||||
|
||||
// INVARIANTS:
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
namespace at {
|
||||
|
||||
struct CAFFE2_API TensorGeometry {
|
||||
struct TORCH_API TensorGeometry {
|
||||
TensorGeometry() : storage_offset_(0) {}
|
||||
|
||||
explicit TensorGeometry(IntArrayRef sizes)
|
||||
|
|
|
|||
|
|
@ -20,10 +20,10 @@ enum class TensorIndexType { None, Ellipsis, Integer, Boolean, Slice, Tensor };
|
|||
|
||||
constexpr c10::nullopt_t None = c10::nullopt;
|
||||
|
||||
struct CAFFE2_API EllipsisIndexType final { EllipsisIndexType() {} };
|
||||
CAFFE2_API extern const EllipsisIndexType Ellipsis;
|
||||
struct TORCH_API EllipsisIndexType final { EllipsisIndexType() {} };
|
||||
TORCH_API extern const EllipsisIndexType Ellipsis;
|
||||
|
||||
struct CAFFE2_API Slice final {
|
||||
struct TORCH_API Slice final {
|
||||
public:
|
||||
// This mirrors `__PySlice_Unpack` in torch/csrc/utils/python_compat.h
|
||||
Slice(
|
||||
|
|
@ -73,7 +73,7 @@ struct CAFFE2_API Slice final {
|
|||
int64_t step_;
|
||||
};
|
||||
|
||||
CAFFE2_API std::ostream& operator<<(std::ostream& stream, const Slice& slice);
|
||||
TORCH_API std::ostream& operator<<(std::ostream& stream, const Slice& slice);
|
||||
|
||||
// `at::indexing::TensorIndex` is used for converting C++ tensor indices such as
|
||||
// `{None, "...", Ellipsis, 0, true, Slice(1, None, 2), torch::tensor({1, 2})}`
|
||||
|
|
@ -100,7 +100,7 @@ CAFFE2_API std::ostream& operator<<(std::ostream& stream, const Slice& slice);
|
|||
// `:3:2` | `Slice(None, 3, 2)`
|
||||
// `1:3:2` | `Slice(1, 3, 2)`
|
||||
// `torch.tensor([1, 2])`) | `torch::tensor({1, 2})`
|
||||
struct CAFFE2_API TensorIndex final {
|
||||
struct TORCH_API TensorIndex final {
|
||||
// Case 1: `at::indexing::None`
|
||||
TensorIndex(c10::nullopt_t) : type_(TensorIndexType::None) {}
|
||||
|
||||
|
|
@ -175,8 +175,8 @@ struct CAFFE2_API TensorIndex final {
|
|||
TensorIndexType type_;
|
||||
};
|
||||
|
||||
CAFFE2_API std::ostream& operator<<(std::ostream& stream, const TensorIndex& tensor_index);
|
||||
CAFFE2_API std::ostream& operator<<(std::ostream& stream, const std::vector<TensorIndex>& tensor_indices);
|
||||
TORCH_API std::ostream& operator<<(std::ostream& stream, const TensorIndex& tensor_index);
|
||||
TORCH_API std::ostream& operator<<(std::ostream& stream, const std::vector<TensorIndex>& tensor_indices);
|
||||
|
||||
namespace impl {
|
||||
static inline Tensor applySlice(
|
||||
|
|
|
|||
|
|
@ -70,7 +70,7 @@ struct DimCounter {
|
|||
int64_t offset;
|
||||
};
|
||||
|
||||
struct CAFFE2_API OperandInfo {
|
||||
struct TORCH_API OperandInfo {
|
||||
using StrideVector = SmallVector<int64_t, 6>;
|
||||
OperandInfo() {}
|
||||
explicit OperandInfo(Tensor t) : tensor(std::move(t)) {
|
||||
|
|
@ -141,7 +141,7 @@ enum class FastSetupType : uint8_t {
|
|||
class TensorIteratorConfig;
|
||||
struct TensorIterator;
|
||||
|
||||
struct CAFFE2_API TensorIteratorBase : public impl::MetaBase {
|
||||
struct TORCH_API TensorIteratorBase : public impl::MetaBase {
|
||||
using DimMask = std::bitset<64>;
|
||||
using PtrVector = SmallVector<char*, 4>;
|
||||
using StrideVector = SmallVector<int64_t, 6>;
|
||||
|
|
@ -408,7 +408,7 @@ protected:
|
|||
bool is_meta_ = false;
|
||||
};
|
||||
|
||||
struct CAFFE2_API TensorIterator final : public TensorIteratorBase {
|
||||
struct TORCH_API TensorIterator final : public TensorIteratorBase {
|
||||
TensorIterator() : TensorIteratorBase() {}
|
||||
// Slicing is OK, TensorIterator guaranteed NOT to have any fields
|
||||
TensorIterator(const TensorIteratorBase& iter) : TensorIteratorBase(iter) {}
|
||||
|
|
@ -426,7 +426,7 @@ struct CAFFE2_API TensorIterator final : public TensorIteratorBase {
|
|||
void set_output(int64_t output_idx, IntArrayRef sizes, IntArrayRef strides, TensorOptions options, DimnameList names) override;
|
||||
};
|
||||
|
||||
class CAFFE2_API TensorIteratorConfig final {
|
||||
class TORCH_API TensorIteratorConfig final {
|
||||
public:
|
||||
friend struct TensorIteratorBase;
|
||||
friend struct TensorIterator;
|
||||
|
|
@ -532,8 +532,8 @@ private:
|
|||
/// A container-like struct that acts as if it contains splits of a
|
||||
/// TensorIterator that can use 32-bit indexing. Taken together the splits cover
|
||||
/// the original TensorIterator.
|
||||
struct CAFFE2_API SplitUntil32Bit {
|
||||
struct CAFFE2_API iterator {
|
||||
struct TORCH_API SplitUntil32Bit {
|
||||
struct TORCH_API iterator {
|
||||
iterator() {};
|
||||
iterator(const TensorIteratorBase& iter);
|
||||
iterator(iterator&&) = default;
|
||||
|
|
|
|||
|
|
@ -46,7 +46,7 @@ namespace impl {
|
|||
// (although presently it isn't).
|
||||
//
|
||||
// A notable subclass of this interface is TensorIteratorBase.
|
||||
struct CAFFE2_API MetaBase {
|
||||
struct TORCH_API MetaBase {
|
||||
virtual void set_output(int64_t output_idx, IntArrayRef sizes, IntArrayRef strides, TensorOptions options, DimnameList names) = 0;
|
||||
virtual const Tensor& maybe_get_output(int64_t output_idx) = 0;
|
||||
void set_output(IntArrayRef sizes, TensorOptions options) {
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ namespace at { namespace namedinference {
|
|||
// None (in tensor) cannot match A (in other) because if the None were refined
|
||||
// to A, `tensor` would have duplicate names [A, A]. Therefore we need to check
|
||||
// tensor.names [A, None] for the existence of A.
|
||||
struct CAFFE2_API TensorName {
|
||||
struct TORCH_API TensorName {
|
||||
explicit TensorName(ArrayRef<Dimname> origin, int origin_idx)
|
||||
: origin_(origin),
|
||||
name_(origin[maybe_wrap_dim(origin_idx, origin.size())]),
|
||||
|
|
@ -41,14 +41,14 @@ struct CAFFE2_API TensorName {
|
|||
Dimname name_;
|
||||
int origin_idx_; // A named tensor can have at most 64 dims.
|
||||
|
||||
CAFFE2_API friend std::ostream& operator<<(
|
||||
TORCH_API friend std::ostream& operator<<(
|
||||
std::ostream& out,
|
||||
const TensorName& tensorname);
|
||||
};
|
||||
|
||||
using TensorNameVec = SmallVector<TensorName, 10>;
|
||||
|
||||
struct CAFFE2_API TensorNames {
|
||||
struct TORCH_API TensorNames {
|
||||
explicit TensorNames(ArrayRef<Dimname> names);
|
||||
|
||||
// Create TensorNames from names[start:end]. Each individual TensorName stores
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ namespace at {
|
|||
// make sense. These are particularly useful for native functions,
|
||||
// which do NO argument checking by default.
|
||||
|
||||
struct CAFFE2_API TensorArg {
|
||||
struct TORCH_API TensorArg {
|
||||
Tensor tensor;
|
||||
const char* name;
|
||||
int pos; // 1-indexed
|
||||
|
|
@ -22,7 +22,7 @@ struct CAFFE2_API TensorArg {
|
|||
const Tensor& operator*() const { return tensor; }
|
||||
};
|
||||
|
||||
struct CAFFE2_API TensorGeometryArg {
|
||||
struct TORCH_API TensorGeometryArg {
|
||||
TensorGeometry tensor;
|
||||
const char* name;
|
||||
int pos; // 1-indexed
|
||||
|
|
@ -49,104 +49,104 @@ using CheckedFrom = const char*;
|
|||
// not TensorGeometryArg, because the Tensor to TensorGeometry
|
||||
// conversion will blow up if you have undefined tensors.
|
||||
|
||||
CAFFE2_API std::ostream& operator<<(std::ostream& out, TensorGeometryArg t);
|
||||
CAFFE2_API void checkDim(
|
||||
TORCH_API std::ostream& operator<<(std::ostream& out, TensorGeometryArg t);
|
||||
TORCH_API void checkDim(
|
||||
CheckedFrom c,
|
||||
const TensorGeometryArg& t,
|
||||
int64_t dim);
|
||||
// NB: this is an inclusive-exclusive range
|
||||
CAFFE2_API void checkDimRange(
|
||||
TORCH_API void checkDimRange(
|
||||
CheckedFrom c,
|
||||
const TensorGeometryArg& t,
|
||||
int64_t dim_start,
|
||||
int64_t dim_end);
|
||||
CAFFE2_API void checkSameDim(
|
||||
TORCH_API void checkSameDim(
|
||||
CheckedFrom c,
|
||||
const TensorGeometryArg& t1,
|
||||
const TensorGeometryArg& t2);
|
||||
CAFFE2_API void checkContiguous(CheckedFrom c, const TensorGeometryArg& t);
|
||||
CAFFE2_API void checkAllContiguous(CheckedFrom c, at::ArrayRef<TensorArg> ts);
|
||||
CAFFE2_API void checkSize(
|
||||
TORCH_API void checkContiguous(CheckedFrom c, const TensorGeometryArg& t);
|
||||
TORCH_API void checkAllContiguous(CheckedFrom c, at::ArrayRef<TensorArg> ts);
|
||||
TORCH_API void checkSize(
|
||||
CheckedFrom c,
|
||||
const TensorGeometryArg& t,
|
||||
IntArrayRef sizes);
|
||||
CAFFE2_API void checkSize(
|
||||
TORCH_API void checkSize(
|
||||
CheckedFrom c,
|
||||
const TensorGeometryArg& t,
|
||||
int64_t dim,
|
||||
int64_t size);
|
||||
CAFFE2_API void checkNumel(
|
||||
TORCH_API void checkNumel(
|
||||
CheckedFrom c,
|
||||
const TensorGeometryArg& t,
|
||||
int64_t numel);
|
||||
CAFFE2_API void checkSameNumel(
|
||||
TORCH_API void checkSameNumel(
|
||||
CheckedFrom c,
|
||||
const TensorGeometryArg& t1,
|
||||
const TensorGeometryArg& t2);
|
||||
CAFFE2_API void checkAllSameNumel(CheckedFrom c, ArrayRef<TensorArg> tensors);
|
||||
CAFFE2_API void checkScalarType(
|
||||
TORCH_API void checkAllSameNumel(CheckedFrom c, ArrayRef<TensorArg> tensors);
|
||||
TORCH_API void checkScalarType(
|
||||
CheckedFrom c,
|
||||
const TensorArg& t,
|
||||
ScalarType s);
|
||||
CAFFE2_API void checkScalarTypes(
|
||||
TORCH_API void checkScalarTypes(
|
||||
CheckedFrom c,
|
||||
const TensorArg& t,
|
||||
at::ArrayRef<ScalarType> l);
|
||||
CAFFE2_API void checkSameGPU(
|
||||
TORCH_API void checkSameGPU(
|
||||
CheckedFrom c,
|
||||
const TensorArg& t1,
|
||||
const TensorArg& t2);
|
||||
CAFFE2_API void checkAllSameGPU(CheckedFrom c, ArrayRef<TensorArg> tensors);
|
||||
CAFFE2_API void checkSameType(
|
||||
TORCH_API void checkAllSameGPU(CheckedFrom c, ArrayRef<TensorArg> tensors);
|
||||
TORCH_API void checkSameType(
|
||||
CheckedFrom c,
|
||||
const TensorArg& t1,
|
||||
const TensorArg& t2);
|
||||
CAFFE2_API void checkAllSameType(CheckedFrom c, ArrayRef<TensorArg> tensors);
|
||||
CAFFE2_API void checkSameSize(
|
||||
TORCH_API void checkAllSameType(CheckedFrom c, ArrayRef<TensorArg> tensors);
|
||||
TORCH_API void checkSameSize(
|
||||
CheckedFrom c,
|
||||
const TensorArg& t1,
|
||||
const TensorArg& t2);
|
||||
CAFFE2_API void checkDefined(CheckedFrom c, const TensorArg& t);
|
||||
CAFFE2_API void checkAllDefined(CheckedFrom c, at::ArrayRef<TensorArg> t);
|
||||
TORCH_API void checkDefined(CheckedFrom c, const TensorArg& t);
|
||||
TORCH_API void checkAllDefined(CheckedFrom c, at::ArrayRef<TensorArg> t);
|
||||
|
||||
// FixMe: does TensorArg slow things down?
|
||||
CAFFE2_API void checkBackend(
|
||||
TORCH_API void checkBackend(
|
||||
CheckedFrom c,
|
||||
at::ArrayRef<Tensor> t,
|
||||
at::Backend backend);
|
||||
|
||||
CAFFE2_API void checkDeviceType(
|
||||
TORCH_API void checkDeviceType(
|
||||
CheckedFrom c,
|
||||
at::ArrayRef<Tensor> tensors,
|
||||
at::DeviceType device_type);
|
||||
|
||||
CAFFE2_API void checkLayout(CheckedFrom c, const Tensor& t, Layout layout);
|
||||
TORCH_API void checkLayout(CheckedFrom c, const Tensor& t, Layout layout);
|
||||
|
||||
CAFFE2_API void checkLayout(CheckedFrom c, at::ArrayRef<Tensor> tensors, at::Layout layout);
|
||||
TORCH_API void checkLayout(CheckedFrom c, at::ArrayRef<Tensor> tensors, at::Layout layout);
|
||||
|
||||
// Methods for getting data_ptr if tensor is defined
|
||||
CAFFE2_API void* maybe_data_ptr(const Tensor& tensor);
|
||||
CAFFE2_API void* maybe_data_ptr(const TensorArg& tensor);
|
||||
TORCH_API void* maybe_data_ptr(const Tensor& tensor);
|
||||
TORCH_API void* maybe_data_ptr(const TensorArg& tensor);
|
||||
|
||||
// Return if the tensor geometry represented by `sizes` and `strides` is contiguous
|
||||
// Although we cache is_contiguous in tensor now, this is till useful because it
|
||||
// allows checking if a particular geometry is contiguous without explicitly
|
||||
// constructing a tensor, e.g., when you want to choose a kernel strategy based
|
||||
// on whether a subgeometry is contiguous.
|
||||
CAFFE2_API bool geometry_is_contiguous(IntArrayRef sizes, IntArrayRef strides);
|
||||
TORCH_API bool geometry_is_contiguous(IntArrayRef sizes, IntArrayRef strides);
|
||||
|
||||
// Correspond to THCUNN_check_dim_size/THNN_check_dim_size
|
||||
CAFFE2_API void check_dim_size(
|
||||
TORCH_API void check_dim_size(
|
||||
const Tensor& tensor,
|
||||
int64_t dim,
|
||||
int64_t dim_size,
|
||||
int64_t size);
|
||||
|
||||
namespace detail {
|
||||
CAFFE2_API std::vector<int64_t> defaultStrides(IntArrayRef sizes);
|
||||
CAFFE2_API size_t
|
||||
TORCH_API std::vector<int64_t> defaultStrides(IntArrayRef sizes);
|
||||
TORCH_API size_t
|
||||
computeStorageNbytes(IntArrayRef sizes, IntArrayRef strides, size_t itemsize);
|
||||
CAFFE2_API c10::optional<std::vector<int64_t>> computeStride(
|
||||
TORCH_API c10::optional<std::vector<int64_t>> computeStride(
|
||||
IntArrayRef oldshape,
|
||||
IntArrayRef oldstride,
|
||||
IntArrayRef newshape);
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@
|
|||
|
||||
namespace at {
|
||||
|
||||
CAFFE2_API int _crash_if_asan(int);
|
||||
TORCH_API int _crash_if_asan(int);
|
||||
|
||||
// TODO: This unwrapping code is ONLY used for TH bindings; once TH goes
|
||||
// away, we can delete this function
|
||||
|
|
@ -135,24 +135,24 @@ inline void check_size_nonnegative(IntArrayRef size) {
|
|||
}
|
||||
|
||||
namespace detail {
|
||||
CAFFE2_API
|
||||
TORCH_API
|
||||
Tensor empty_cpu(IntArrayRef size, c10::optional<ScalarType> dtype_opt, c10::optional<Layout> layout_opt,
|
||||
c10::optional<Device> device_opt, c10::optional<bool> pin_memory_opt, c10::optional<c10::MemoryFormat> memory_format_opt);
|
||||
|
||||
template <typename T>
|
||||
CAFFE2_API
|
||||
TORCH_API
|
||||
Tensor tensor_cpu(ArrayRef<T> values, const TensorOptions& options);
|
||||
|
||||
template <typename T>
|
||||
CAFFE2_API
|
||||
TORCH_API
|
||||
Tensor tensor_backend(ArrayRef<T> values, const TensorOptions& options);
|
||||
|
||||
template <typename T>
|
||||
CAFFE2_API
|
||||
TORCH_API
|
||||
Tensor tensor_complex_cpu(ArrayRef<T> values, const TensorOptions& options);
|
||||
|
||||
template <typename T>
|
||||
CAFFE2_API
|
||||
TORCH_API
|
||||
Tensor tensor_complex_backend(ArrayRef<T> values, const TensorOptions& options);
|
||||
} // namespace detail
|
||||
|
||||
|
|
|
|||
|
|
@ -3,14 +3,14 @@
|
|||
namespace at {
|
||||
|
||||
/// Returns a detailed string describing the configuration PyTorch.
|
||||
CAFFE2_API std::string show_config();
|
||||
TORCH_API std::string show_config();
|
||||
|
||||
CAFFE2_API std::string get_mkl_version();
|
||||
TORCH_API std::string get_mkl_version();
|
||||
|
||||
CAFFE2_API std::string get_mkldnn_version();
|
||||
TORCH_API std::string get_mkldnn_version();
|
||||
|
||||
CAFFE2_API std::string get_openmp_version();
|
||||
TORCH_API std::string get_openmp_version();
|
||||
|
||||
CAFFE2_API std::string get_cxx_flags();
|
||||
TORCH_API std::string get_cxx_flags();
|
||||
|
||||
} // namespace at
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ namespace impl {
|
|||
//
|
||||
// NOTE: this is NOT the c++ api for torch.vmap. That doesn't exist yet.
|
||||
|
||||
struct CAFFE2_API VmapMode {
|
||||
struct TORCH_API VmapMode {
|
||||
// Returns the vmap level, aka the count of how many nested vmaps we're in.
|
||||
static int64_t current_vmap_level();
|
||||
|
||||
|
|
|
|||
|
|
@ -9,5 +9,5 @@ struct OperatorName;
|
|||
namespace at {
|
||||
|
||||
// check if an op is a custom op (i.e. did not come from native_functions.yaml)
|
||||
CAFFE2_API bool is_custom_op(const c10::OperatorName& opName);
|
||||
TORCH_API bool is_custom_op(const c10::OperatorName& opName);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ class Tensor;
|
|||
// serves as a replacement return value for Tensor::type(). Previously,
|
||||
// Tensor::type() returned Type&, but we are changing Type to not be
|
||||
// dtype-specific.
|
||||
class CAFFE2_API DeprecatedTypeProperties {
|
||||
class TORCH_API DeprecatedTypeProperties {
|
||||
public:
|
||||
DeprecatedTypeProperties(Backend backend, ScalarType scalar_type)
|
||||
: backend_(backend), scalar_type_(scalar_type) {}
|
||||
|
|
|
|||
|
|
@ -10,11 +10,11 @@ namespace at {
|
|||
|
||||
class DeprecatedTypeProperties;
|
||||
|
||||
struct CAFFE2_API DeprecatedTypePropertiesDeleter {
|
||||
struct TORCH_API DeprecatedTypePropertiesDeleter {
|
||||
void operator()(DeprecatedTypeProperties * ptr);
|
||||
};
|
||||
|
||||
class CAFFE2_API DeprecatedTypePropertiesRegistry {
|
||||
class TORCH_API DeprecatedTypePropertiesRegistry {
|
||||
public:
|
||||
DeprecatedTypePropertiesRegistry();
|
||||
|
||||
|
|
@ -26,6 +26,6 @@ private:
|
|||
[static_cast<int>(ScalarType::NumOptions)];
|
||||
};
|
||||
|
||||
CAFFE2_API DeprecatedTypePropertiesRegistry& globalDeprecatedTypePropertiesRegistry();
|
||||
TORCH_API DeprecatedTypePropertiesRegistry& globalDeprecatedTypePropertiesRegistry();
|
||||
|
||||
} // namespace at
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ namespace at {
|
|||
|
||||
enum class NameType: uint8_t { BASIC, WILDCARD };
|
||||
|
||||
struct CAFFE2_API Dimname {
|
||||
struct TORCH_API Dimname {
|
||||
static Dimname fromSymbol(Symbol name);
|
||||
static Dimname wildcard();
|
||||
static bool isValidName(const std::string& name);
|
||||
|
|
@ -35,7 +35,7 @@ struct CAFFE2_API Dimname {
|
|||
|
||||
using DimnameList = c10::ArrayRef<Dimname>;
|
||||
|
||||
CAFFE2_API std::ostream& operator<<(std::ostream& out, const Dimname& dimname);
|
||||
TORCH_API std::ostream& operator<<(std::ostream& out, const Dimname& dimname);
|
||||
|
||||
inline bool operator==(const Dimname& lhs, const Dimname& rhs) {
|
||||
return lhs.symbol() == rhs.symbol();
|
||||
|
|
|
|||
|
|
@ -6,12 +6,12 @@
|
|||
|
||||
|
||||
namespace c10 {
|
||||
CAFFE2_API std::ostream& operator<<(std::ostream& out, Backend b);
|
||||
TORCH_API std::ostream& operator<<(std::ostream& out, Backend b);
|
||||
}
|
||||
namespace at {
|
||||
|
||||
CAFFE2_API std::ostream& operator<<(std::ostream& out, const DeprecatedTypeProperties& t);
|
||||
CAFFE2_API std::ostream& print(
|
||||
TORCH_API std::ostream& operator<<(std::ostream& out, const DeprecatedTypeProperties& t);
|
||||
TORCH_API std::ostream& print(
|
||||
std::ostream& stream,
|
||||
const Tensor& tensor,
|
||||
int64_t linesize);
|
||||
|
|
|
|||
|
|
@ -56,7 +56,7 @@
|
|||
|
||||
namespace at {
|
||||
|
||||
struct CAFFE2_API Generator {
|
||||
struct TORCH_API Generator {
|
||||
Generator() {}
|
||||
|
||||
explicit Generator(c10::intrusive_ptr<c10::GeneratorImpl> gen_impl)
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ namespace at {
|
|||
// trace). To unify the two, we would first have to move profiling and tracing
|
||||
// out of VariableType.
|
||||
|
||||
struct CAFFE2_API AutoNonVariableTypeMode {
|
||||
struct TORCH_API AutoNonVariableTypeMode {
|
||||
// NB: The enabled parameter must ALWAYS be black, as Henry Ford used to say.
|
||||
// TODO: Eliminate this parameter entirely
|
||||
AutoNonVariableTypeMode(bool enabled = true) :
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ namespace at {
|
|||
//
|
||||
// This class has an important invariant: there must be at least ONE
|
||||
// non-wildcard
|
||||
struct CAFFE2_API NamedTensorMeta final : public c10::NamedTensorMetaInterface {
|
||||
struct TORCH_API NamedTensorMeta final : public c10::NamedTensorMetaInterface {
|
||||
// This enum is to remind people that the invariant on constructors is that
|
||||
// the list of dimnames must have at least one non-wildcard
|
||||
enum HAS_NON_WILDCARD {
|
||||
|
|
@ -69,7 +69,7 @@ struct CAFFE2_API NamedTensorMeta final : public c10::NamedTensorMetaInterface {
|
|||
|
||||
// When NamesMode is disabled, then all operations ignore tensors' names fields.
|
||||
// Concretely speaking, all tensors are treated as having nullopt names.
|
||||
struct CAFFE2_API NamesMode {
|
||||
struct TORCH_API NamesMode {
|
||||
static bool is_enabled();
|
||||
static void set_enabled(bool enabled);
|
||||
};
|
||||
|
|
@ -77,7 +77,7 @@ struct CAFFE2_API NamesMode {
|
|||
|
||||
// A RAII, thread local (!) guard that enables or disables names upon
|
||||
// construction, and sets it back to the original value upon destruction.
|
||||
struct CAFFE2_API NoNamesGuard {
|
||||
struct TORCH_API NoNamesGuard {
|
||||
NoNamesGuard() : prev_mode(NamesMode::is_enabled()), initialized(true) {
|
||||
NamesMode::set_enabled(false);
|
||||
}
|
||||
|
|
@ -99,8 +99,8 @@ void check_names_valid_for(const Tensor& tensor, DimnameList names);
|
|||
void check_names_valid_for(size_t tensor_dim, DimnameList names);
|
||||
|
||||
// Sets the names of `tensor` to be `names`.
|
||||
CAFFE2_API Tensor& internal_set_names_inplace(Tensor& tensor, c10::optional<DimnameList> names);
|
||||
CAFFE2_API Tensor& internal_set_names_inplace(Tensor& tensor, std::vector<Dimname>&& names, bool validate_names);
|
||||
TORCH_API Tensor& internal_set_names_inplace(Tensor& tensor, c10::optional<DimnameList> names);
|
||||
TORCH_API Tensor& internal_set_names_inplace(Tensor& tensor, std::vector<Dimname>&& names, bool validate_names);
|
||||
|
||||
constexpr size_t kMaxNamedTensorDim = 64;
|
||||
|
||||
|
|
@ -110,8 +110,8 @@ namespace impl {
|
|||
|
||||
// Some helper functions on TensorImpl. Useful for working with names in TH.
|
||||
// XXX: Ideally these would exist as methods on TensorImpl
|
||||
CAFFE2_API void internal_set_names_inplace(TensorImpl* impl, c10::optional<DimnameList> names, bool validate_names);
|
||||
CAFFE2_API void internal_set_names_inplace(TensorImpl* impl, std::vector<Dimname>&& names, bool validate_names);
|
||||
TORCH_API void internal_set_names_inplace(TensorImpl* impl, c10::optional<DimnameList> names, bool validate_names);
|
||||
TORCH_API void internal_set_names_inplace(TensorImpl* impl, std::vector<Dimname>&& names, bool validate_names);
|
||||
|
||||
void check_names_valid_for(TensorImpl* impl, DimnameList names);
|
||||
|
||||
|
|
@ -119,19 +119,19 @@ void check_names_valid_for(TensorImpl* impl, DimnameList names);
|
|||
// Returns false if the tensor's names don't exist (were not allocated),
|
||||
// or if all names are 'None'.
|
||||
// We treat not-allocated-names the same as allocated names that are all 'None'.
|
||||
CAFFE2_API bool has_names(const TensorImpl* impl);
|
||||
TORCH_API bool has_names(const TensorImpl* impl);
|
||||
|
||||
// Returns the names of the tensor's dimensions.
|
||||
// Unnamed tensors are treated as having 'None' in all dimension; this method
|
||||
// would return a DimnameList of all 'None's for an unnamed tensor.
|
||||
CAFFE2_API DimnameList get_names(const TensorImpl* impl);
|
||||
TORCH_API DimnameList get_names(const TensorImpl* impl);
|
||||
|
||||
// This is more of an implementation detail; one should use impl::get_names /
|
||||
// Tensor::names() whenever possible because it provides a cleaner API.
|
||||
// Returns the names of the tensor if they have been allocated; returns nullopt
|
||||
// instead if the haven't been. The names of a tensor are not allocated if a
|
||||
// tensor is constructed with names=None.
|
||||
CAFFE2_API c10::optional<DimnameList> get_opt_names(const TensorImpl* impl);
|
||||
TORCH_API c10::optional<DimnameList> get_opt_names(const TensorImpl* impl);
|
||||
|
||||
|
||||
} // namespace impl
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ using QuantizerPtr = c10::intrusive_ptr<Quantizer>;
|
|||
* Quantized Tensor holds an intrusive_ptr to Quantizer, and multiple Tensor can
|
||||
* share the same Quantizer. Quantizer should be immutable.
|
||||
*/
|
||||
struct CAFFE2_API Quantizer : public c10::intrusive_ptr_target {
|
||||
struct TORCH_API Quantizer : public c10::intrusive_ptr_target {
|
||||
const ScalarType scalar_type_;
|
||||
explicit Quantizer(ScalarType scalar_type) : scalar_type_(scalar_type) {}
|
||||
virtual ~Quantizer();
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@
|
|||
// merge the libraries inside Facebook". Well, the problem is that there
|
||||
// are some downstream applications which are at binary size limit, and
|
||||
// incorporating all of the extra code from libtorch would push them
|
||||
// over (admarket/adreview/service:adreviewservice, see also
|
||||
// over (admarket/adreview/service:adreviewservice, see also
|
||||
// https://github.com/pytorch/pytorch/pull/29299) So if you want to do that,
|
||||
// we have to fix all of the services like this.
|
||||
//
|
||||
|
|
@ -38,7 +38,7 @@ struct Node;
|
|||
namespace at {
|
||||
namespace impl {
|
||||
|
||||
struct CAFFE2_API VariableHooksInterface {
|
||||
struct TORCH_API VariableHooksInterface {
|
||||
virtual ~VariableHooksInterface() = default;
|
||||
virtual Tensor tensor_data(const Tensor&) const = 0;
|
||||
virtual Tensor variable_data(const Tensor&) const = 0;
|
||||
|
|
@ -50,10 +50,10 @@ struct CAFFE2_API VariableHooksInterface {
|
|||
virtual const std::string& name(const Tensor&) const = 0;
|
||||
};
|
||||
|
||||
CAFFE2_API void SetVariableHooks(VariableHooksInterface* hooks);
|
||||
CAFFE2_API VariableHooksInterface* GetVariableHooks();
|
||||
TORCH_API void SetVariableHooks(VariableHooksInterface* hooks);
|
||||
TORCH_API VariableHooksInterface* GetVariableHooks();
|
||||
|
||||
struct CAFFE2_API VariableHooksRegisterer {
|
||||
struct TORCH_API VariableHooksRegisterer {
|
||||
explicit VariableHooksRegisterer(VariableHooksInterface* hooks) {
|
||||
SetVariableHooks(hooks);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ class Tensor;
|
|||
* properly when the blob is deallocated or re-allocated with a new type. A blob
|
||||
* could contain anything, although the most common case is to contain a Tensor.
|
||||
*/
|
||||
class CAFFE2_API Blob final : public c10::intrusive_ptr_target {
|
||||
class TORCH_API Blob final : public c10::intrusive_ptr_target {
|
||||
public:
|
||||
/**
|
||||
* Initializes an empty Blob.
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ struct OperatorKernel;
|
|||
// no overhead to fallthrough to the next key. See cpp file for some more
|
||||
// implementation notes; notably, this does NOT actually go through the
|
||||
// boxing/unboxing codepath.
|
||||
CAFFE2_API void fallthrough_kernel(OperatorKernel*, const OperatorHandle&, Stack*);
|
||||
TORCH_API void fallthrough_kernel(OperatorKernel*, const OperatorHandle&, Stack*);
|
||||
|
||||
// Note [Ambiguity in AutogradOther kernel]
|
||||
// This kernel implements reporting an error message when there're kernels registered
|
||||
|
|
@ -27,7 +27,7 @@ CAFFE2_API void fallthrough_kernel(OperatorKernel*, const OperatorHandle&, Stack
|
|||
// See c10/core/DispatchKeySet.cpp for a list of backends mapped to AutogradOther.
|
||||
// Thus if backend extender indeed want to override Math kernel behavior, they should request
|
||||
// a dedicated Autograd key for their backend to resolve the ambiguity.
|
||||
CAFFE2_API void ambiguous_autogradother_kernel(OperatorKernel*, const OperatorHandle&, Stack*);
|
||||
TORCH_API void ambiguous_autogradother_kernel(OperatorKernel*, const OperatorHandle&, Stack*);
|
||||
|
||||
// Note [named_not_supported_kernel]
|
||||
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
|
@ -36,7 +36,7 @@ CAFFE2_API void ambiguous_autogradother_kernel(OperatorKernel*, const OperatorHa
|
|||
// cased in the dispatcher to be triggered before we attempt boxing (so we can
|
||||
// give a good error message in cases when boxing is not supported). When
|
||||
// boxing is universally supported this can be removed.
|
||||
[[noreturn]] CAFFE2_API void named_not_supported_kernel(OperatorKernel*, const OperatorHandle&, Stack*);
|
||||
[[noreturn]] TORCH_API void named_not_supported_kernel(OperatorKernel*, const OperatorHandle&, Stack*);
|
||||
|
||||
/**
|
||||
* KernelFunction is similar to std::function but stores a kernel function.
|
||||
|
|
@ -44,7 +44,7 @@ CAFFE2_API void ambiguous_autogradother_kernel(OperatorKernel*, const OperatorHa
|
|||
* and call it in a boxed or unboxed way. If the way it was created doesn't
|
||||
* match the way it was called, it will do boxing or unboxing as necessary.
|
||||
*/
|
||||
class CAFFE2_API KernelFunction final {
|
||||
class TORCH_API KernelFunction final {
|
||||
public:
|
||||
// This is how boxed kernels are actually stored
|
||||
using InternalBoxedKernelFunction = void(OperatorKernel*, const OperatorHandle&, Stack*);
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ class OperatorHandle;
|
|||
*
|
||||
* See below for how to register this kernel with PyTorch.
|
||||
*/
|
||||
struct CAFFE2_API OperatorKernel {
|
||||
struct TORCH_API OperatorKernel {
|
||||
virtual ~OperatorKernel() = default;
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ namespace impl {
|
|||
|
||||
// A CppSignature object holds RTTI information about a C++ function signature at runtime
|
||||
// and can compare them or get a debug-printable name.
|
||||
class CAFFE2_API CppSignature final {
|
||||
class TORCH_API CppSignature final {
|
||||
public:
|
||||
CppSignature(const CppSignature&) = default;
|
||||
CppSignature(CppSignature&&) noexcept = default;
|
||||
|
|
|
|||
|
|
@ -102,7 +102,7 @@ namespace detail {
|
|||
* varies from operator, as some operators may have overridden the
|
||||
* fallthrough with custom behavior.
|
||||
*/
|
||||
struct CAFFE2_API DispatchKeyExtractor final {
|
||||
struct TORCH_API DispatchKeyExtractor final {
|
||||
public:
|
||||
static DispatchKeyExtractor make(const FunctionSchema& schema) {
|
||||
return DispatchKeyExtractor(makeBitsetForDispatchArgs(schema));
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@
|
|||
|
||||
namespace c10 {
|
||||
|
||||
class CAFFE2_API OperatorHandle;
|
||||
class TORCH_API OperatorHandle;
|
||||
template<class FuncType> class TypedOperatorHandle;
|
||||
|
||||
/**
|
||||
|
|
@ -27,7 +27,7 @@ template<class FuncType> class TypedOperatorHandle;
|
|||
* NB: registration events only occur when a 'def' occurs; we don't trigger
|
||||
* on 'impl' or 'fallback' calls.
|
||||
*/
|
||||
class CAFFE2_API OpRegistrationListener {
|
||||
class TORCH_API OpRegistrationListener {
|
||||
public:
|
||||
virtual ~OpRegistrationListener();
|
||||
|
||||
|
|
@ -45,7 +45,7 @@ class SchemaRegistrationHandleRAII;
|
|||
* Most end users shouldn't use this directly; if you're trying to register
|
||||
* ops look in op_registration
|
||||
*/
|
||||
class CAFFE2_API Dispatcher final {
|
||||
class TORCH_API Dispatcher final {
|
||||
private:
|
||||
// For direct access to backend fallback information
|
||||
friend class impl::OperatorEntry;
|
||||
|
|
@ -267,7 +267,7 @@ private:
|
|||
* This handle can be used to register kernels with the dispatcher or
|
||||
* to lookup a kernel for a certain set of arguments.
|
||||
*/
|
||||
class CAFFE2_API OperatorHandle {
|
||||
class TORCH_API OperatorHandle {
|
||||
public:
|
||||
OperatorHandle(OperatorHandle&&) noexcept = default;
|
||||
OperatorHandle& operator=(OperatorHandle&&) noexcept = default;
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
namespace c10 {
|
||||
|
||||
struct CAFFE2_API ObservedOperators {
|
||||
struct TORCH_API ObservedOperators {
|
||||
ObservedOperators() = delete;
|
||||
|
||||
static bool isObserved(const OperatorName& name);
|
||||
|
|
|
|||
|
|
@ -61,7 +61,7 @@ struct AnnotatedSchema final {
|
|||
// Concurrent writes to OperatorEntry are protected by the GLOBAL Dispatcher
|
||||
// lock (this is important because some methods in OperatorEntry access
|
||||
// dispatcher state)
|
||||
class CAFFE2_API OperatorEntry final {
|
||||
class TORCH_API OperatorEntry final {
|
||||
public:
|
||||
explicit OperatorEntry(OperatorName&& operator_name);
|
||||
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ struct FunctionSchema;
|
|||
};
|
||||
|
||||
namespace at {
|
||||
CAFFE2_API void launch(std::function<void()> func);
|
||||
TORCH_API void launch(std::function<void()> func);
|
||||
}
|
||||
|
||||
namespace torch {
|
||||
|
|
|
|||
|
|
@ -4,14 +4,14 @@
|
|||
|
||||
namespace at {
|
||||
|
||||
struct CAFFE2_API GradMode {
|
||||
struct TORCH_API GradMode {
|
||||
static bool is_enabled();
|
||||
static void set_enabled(bool enabled);
|
||||
};
|
||||
|
||||
// A RAII, thread local (!) guard that enables or disables grad mode upon
|
||||
// construction, and sets it back to the original value upon destruction.
|
||||
struct CAFFE2_API AutoGradMode {
|
||||
struct TORCH_API AutoGradMode {
|
||||
AutoGradMode(bool enabled) : prev_mode(GradMode::is_enabled()) {
|
||||
GradMode::set_enabled(enabled);
|
||||
}
|
||||
|
|
@ -23,7 +23,7 @@ struct CAFFE2_API AutoGradMode {
|
|||
|
||||
// A RAII, thread local (!) guard that stops future operations from building
|
||||
// gradients.
|
||||
struct CAFFE2_API NoGradGuard : public AutoGradMode {
|
||||
struct TORCH_API NoGradGuard : public AutoGradMode {
|
||||
NoGradGuard() : AutoGradMode(/*enabled=*/false) {}
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -435,7 +435,7 @@ const std::string& domain_prefix();
|
|||
// A Symbol is like an interned string, but with a little extra
|
||||
// structure; it is namespaced via SymbolNamespace and the resulting
|
||||
// intern pointers support efficient namespace testing.
|
||||
struct CAFFE2_API Symbol {
|
||||
struct TORCH_API Symbol {
|
||||
explicit constexpr Symbol() : value(0) {};
|
||||
explicit constexpr Symbol(unique_t uniq)
|
||||
: value(uniq) {}
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@
|
|||
|
||||
namespace c10 {
|
||||
|
||||
struct CAFFE2_API InternedStrings {
|
||||
struct TORCH_API InternedStrings {
|
||||
InternedStrings();
|
||||
Symbol symbol(const std::string& s);
|
||||
std::pair<const char*, const char*> string(Symbol sym);
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ void checkCustomClassType(const Type* expected_type, const Type* actual_type) {
|
|||
expected_type->repr_str());
|
||||
}
|
||||
|
||||
CAFFE2_API c10::intrusive_ptr<ConstantString> ConstantString::create(
|
||||
TORCH_API c10::intrusive_ptr<ConstantString> ConstantString::create(
|
||||
std::string str_) {
|
||||
return c10::make_intrusive<ConstantString>(std::move(str_));
|
||||
}
|
||||
|
|
@ -887,7 +887,7 @@ getClassConverter() {
|
|||
return classConverter;
|
||||
}
|
||||
|
||||
CAFFE2_API intrusive_ptr<ivalue::Future> collectAll(
|
||||
TORCH_API intrusive_ptr<ivalue::Future> collectAll(
|
||||
List<intrusive_ptr<ivalue::Future>> srcs) {
|
||||
struct Ctx {
|
||||
explicit Ctx(List<intrusive_ptr<ivalue::Future>> srcs)
|
||||
|
|
@ -919,7 +919,7 @@ CAFFE2_API intrusive_ptr<ivalue::Future> collectAll(
|
|||
return ctx->dstFuture;
|
||||
}
|
||||
|
||||
CAFFE2_API intrusive_ptr<ivalue::Future> collectAny(
|
||||
TORCH_API intrusive_ptr<ivalue::Future> collectAny(
|
||||
List<intrusive_ptr<ivalue::Future>> srcs) {
|
||||
if (srcs.empty()) {
|
||||
auto res = make_intrusive<ivalue::Future>(NoneType::get());
|
||||
|
|
|
|||
|
|
@ -157,7 +157,7 @@ struct Capsule {
|
|||
/// // `my_ivalue` is tagged as an int and cannot be used as another type
|
||||
/// torch::Tensor my_tensor = my_ivalue.toTensor()
|
||||
/// \endrst
|
||||
struct CAFFE2_API IValue final {
|
||||
struct TORCH_API IValue final {
|
||||
IValue(const IValue& rhs)
|
||||
: IValue(rhs.payload, rhs.tag, rhs.is_intrusive_ptr) {
|
||||
if (is_intrusive_ptr) {
|
||||
|
|
@ -744,7 +744,7 @@ struct CAFFE2_API IValue final {
|
|||
// This is different from `repr()` in that there is no expectation that we can
|
||||
// exactly reconstruct an IValue from the output; feel free to use a
|
||||
// concise/pretty form
|
||||
CAFFE2_API friend std::ostream& operator<<(
|
||||
TORCH_API friend std::ostream& operator<<(
|
||||
std::ostream& out,
|
||||
const IValue& v);
|
||||
|
||||
|
|
@ -847,7 +847,7 @@ struct CAFFE2_API IValue final {
|
|||
friend struct WeakIValue;
|
||||
};
|
||||
|
||||
struct CAFFE2_API WeakIValue final {
|
||||
struct TORCH_API WeakIValue final {
|
||||
WeakIValue() : payload{0}, tag(IValue::Tag::None), is_intrusive_ptr(false) {}
|
||||
|
||||
WeakIValue(const WeakIValue& rhs)
|
||||
|
|
|
|||
|
|
@ -180,14 +180,14 @@ inline at::Generator IValue::toGenerator() const& {
|
|||
|
||||
namespace ivalue {
|
||||
|
||||
void CAFFE2_API
|
||||
void TORCH_API
|
||||
checkCustomClassType(const Type* expected_type, const Type* actual_type);
|
||||
|
||||
template <typename T>
|
||||
using Shared = c10::intrusive_ptr<T>;
|
||||
|
||||
// string
|
||||
struct CAFFE2_API ConstantString final : c10::intrusive_ptr_target {
|
||||
struct TORCH_API ConstantString final : c10::intrusive_ptr_target {
|
||||
private:
|
||||
const std::string str_;
|
||||
|
||||
|
|
@ -200,14 +200,14 @@ struct CAFFE2_API ConstantString final : c10::intrusive_ptr_target {
|
|||
operator const std::string&() const {
|
||||
return string();
|
||||
}
|
||||
CAFFE2_API friend std::ostream& operator<<(
|
||||
TORCH_API friend std::ostream& operator<<(
|
||||
std::ostream& out,
|
||||
const ConstantString& v);
|
||||
};
|
||||
|
||||
struct Future;
|
||||
|
||||
struct CAFFE2_API Tuple : c10::intrusive_ptr_target {
|
||||
struct TORCH_API Tuple : c10::intrusive_ptr_target {
|
||||
private:
|
||||
std::vector<IValue> elements_;
|
||||
mutable std::shared_ptr<TupleType>
|
||||
|
|
@ -254,7 +254,7 @@ struct CAFFE2_API Tuple : c10::intrusive_ptr_target {
|
|||
return c10::get_hash(t.elements());
|
||||
}
|
||||
|
||||
CAFFE2_API friend bool operator==(
|
||||
TORCH_API friend bool operator==(
|
||||
const ivalue::Tuple& lhs,
|
||||
const ivalue::Tuple& rhs);
|
||||
|
||||
|
|
@ -283,7 +283,7 @@ struct C10_EXPORT ivalue::Future : c10::intrusive_ptr_target {
|
|||
|
||||
public:
|
||||
explicit Future(TypePtr type) : type_(type) {}
|
||||
struct CAFFE2_API FutureError final : public std::exception {
|
||||
struct TORCH_API FutureError final : public std::exception {
|
||||
explicit FutureError(std::string&& error_msg_)
|
||||
: error_msg(std::move(error_msg_)) {}
|
||||
|
||||
|
|
@ -485,7 +485,7 @@ struct C10_EXPORT ivalue::Future : c10::intrusive_ptr_target {
|
|||
return eptr_;
|
||||
}
|
||||
|
||||
CAFFE2_API friend std::ostream& operator<<(
|
||||
TORCH_API friend std::ostream& operator<<(
|
||||
std::ostream& out,
|
||||
const Future& v);
|
||||
|
||||
|
|
@ -573,11 +573,11 @@ struct C10_EXPORT ivalue::Future : c10::intrusive_ptr_target {
|
|||
|
||||
// Input is a list of Futures with the same target type.
|
||||
// Output is a Future to the List of completed Futures.
|
||||
CAFFE2_API intrusive_ptr<ivalue::Future> collectAll(
|
||||
TORCH_API intrusive_ptr<ivalue::Future> collectAll(
|
||||
c10::List<c10::intrusive_ptr<ivalue::Future>> srcs);
|
||||
// Input is a List of Futures with the same target type.
|
||||
// Output is a Future that will be updated with a seen value.
|
||||
CAFFE2_API intrusive_ptr<ivalue::Future> collectAny(
|
||||
TORCH_API intrusive_ptr<ivalue::Future> collectAny(
|
||||
c10::List<c10::intrusive_ptr<ivalue::Future>> srcs);
|
||||
|
||||
// User-defined object.
|
||||
|
|
@ -692,11 +692,11 @@ struct ivalue::EnumHolder : c10::intrusive_ptr_target {
|
|||
const ivalue::EnumHolder& lhs,
|
||||
const ivalue::EnumHolder& rhs);
|
||||
|
||||
CAFFE2_API friend std::ostream& operator<<(
|
||||
TORCH_API friend std::ostream& operator<<(
|
||||
std::ostream& out,
|
||||
const EnumHolder& v);
|
||||
|
||||
CAFFE2_API const std::string qualifiedClassName() const;
|
||||
TORCH_API const std::string qualifiedClassName() const;
|
||||
|
||||
const std::string unqualifiedClassName() const;
|
||||
|
||||
|
|
|
|||
|
|
@ -67,7 +67,7 @@ enum class TypeKind {
|
|||
#undef DEFINE_TYPE
|
||||
};
|
||||
|
||||
CAFFE2_API const char* typeKindToString(TypeKind kind);
|
||||
TORCH_API const char* typeKindToString(TypeKind kind);
|
||||
|
||||
struct Type;
|
||||
using TypePtr = std::shared_ptr<Type>;
|
||||
|
|
@ -79,7 +79,7 @@ using ConstTypePtr = std::shared_ptr<const Type>;
|
|||
using TypePrinter =
|
||||
std::function<c10::optional<std::string>(const ConstTypePtr&)>;
|
||||
|
||||
struct CAFFE2_API Type : std::enable_shared_from_this<Type> {
|
||||
struct TORCH_API Type : std::enable_shared_from_this<Type> {
|
||||
private:
|
||||
TypeKind kind_;
|
||||
|
||||
|
|
@ -212,7 +212,7 @@ struct AnyType;
|
|||
using AnyTypePtr = std::shared_ptr<AnyType>;
|
||||
// Any is the top of the type hierarchy, all other types are subtypes
|
||||
// T <: Any, forall T
|
||||
struct CAFFE2_API AnyType : public Type {
|
||||
struct TORCH_API AnyType : public Type {
|
||||
static AnyTypePtr create() {
|
||||
return AnyTypePtr(
|
||||
new AnyType()); // NOLINT(modernize-make-shared)
|
||||
|
|
@ -284,7 +284,7 @@ using OptionalTypePtr = std::shared_ptr<OptionalType>;
|
|||
// 1. Optional[T] <: Optional[R] iff T <: R
|
||||
// 2. T <: Optional[R] if T <: R
|
||||
// 3. None <: Optional[T] for all T
|
||||
struct CAFFE2_API OptionalType
|
||||
struct TORCH_API OptionalType
|
||||
: public SingleElementType<TypeKind::OptionalType, OptionalType> {
|
||||
static OptionalTypePtr create(TypePtr element) {
|
||||
TORCH_INTERNAL_ASSERT(element, "OptionalType requires valid TypePtr");
|
||||
|
|
@ -356,7 +356,7 @@ inline c10::optional<T> merge_primitive(
|
|||
// `stride_indices` A contiguity marker on the smallest stride (c0) indicates
|
||||
// the stride is precisely 1, otherwise a contiguity marker means that $stride_n
|
||||
// = size_{n-1}*stride_{n-1}$
|
||||
struct CAFFE2_API Stride {
|
||||
struct TORCH_API Stride {
|
||||
Stride() {}
|
||||
Stride(
|
||||
const c10::optional<size_t>& stride_index,
|
||||
|
|
@ -401,7 +401,7 @@ inline c10::optional<Stride> merge_primitive(
|
|||
return r;
|
||||
}
|
||||
|
||||
struct CAFFE2_API ShapeSymbol {
|
||||
struct TORCH_API ShapeSymbol {
|
||||
// needed for use in `std::map`
|
||||
ShapeSymbol() : value_(-1) {}
|
||||
// is this symbol a fixed/static dimension
|
||||
|
|
@ -426,7 +426,7 @@ struct CAFFE2_API ShapeSymbol {
|
|||
static ShapeSymbol newSymbol() {
|
||||
return fromStaticSize(-static_cast<int64_t>(++num_symbols));
|
||||
};
|
||||
friend CAFFE2_API std::ostream& operator<<(
|
||||
friend TORCH_API std::ostream& operator<<(
|
||||
std::ostream& os,
|
||||
const ShapeSymbol& s);
|
||||
|
||||
|
|
@ -447,7 +447,7 @@ inline ShapeSymbol merge_primitive(
|
|||
|
||||
// Shape of a Tensor represented with ShapeSymbol's. Unranked, ranked unknown
|
||||
// dims, partially known and fully known shapes are all supported.
|
||||
struct CAFFE2_API SymbolicShape {
|
||||
struct TORCH_API SymbolicShape {
|
||||
// Unranked shape constructor.
|
||||
SymbolicShape() : dims_(c10::nullopt) {}
|
||||
|
||||
|
|
@ -576,7 +576,7 @@ struct VaryingShape {
|
|||
return dims_;
|
||||
}
|
||||
|
||||
CAFFE2_API VaryingShape merge(const VaryingShape& other) const;
|
||||
TORCH_API VaryingShape merge(const VaryingShape& other) const;
|
||||
|
||||
c10::optional<std::vector<T>> concrete_sizes() const {
|
||||
if (!dims_) {
|
||||
|
|
@ -611,7 +611,7 @@ struct VaryingShape {
|
|||
struct TensorType;
|
||||
using TensorTypePtr = std::shared_ptr<TensorType>;
|
||||
// This type represents a single Tensor with a specific size
|
||||
struct CAFFE2_API TensorType : public Type {
|
||||
struct TORCH_API TensorType : public Type {
|
||||
static TensorTypePtr create(const at::Tensor& t);
|
||||
|
||||
// used by TensorType::create(size_t dim) which in turn used by
|
||||
|
|
@ -864,7 +864,7 @@ struct CAFFE2_API TensorType : public Type {
|
|||
|
||||
struct ListType;
|
||||
using ListTypePtr = std::shared_ptr<ListType>;
|
||||
struct CAFFE2_API ListType
|
||||
struct TORCH_API ListType
|
||||
: public SingleElementType<TypeKind::ListType, ListType> {
|
||||
// It's not exactly a singleton, but there should be exactly one instance of
|
||||
// List[T] for every T
|
||||
|
|
@ -906,7 +906,7 @@ struct CAFFE2_API ListType
|
|||
|
||||
struct DictType;
|
||||
using DictTypePtr = std::shared_ptr<DictType>;
|
||||
struct CAFFE2_API DictType : public Type {
|
||||
struct TORCH_API DictType : public Type {
|
||||
friend struct Type;
|
||||
static const TypeKind Kind = TypeKind::DictType;
|
||||
|
||||
|
|
@ -988,7 +988,7 @@ struct CAFFE2_API DictType : public Type {
|
|||
struct FutureType;
|
||||
using FutureTypePtr = std::shared_ptr<FutureType>;
|
||||
|
||||
struct CAFFE2_API FutureType
|
||||
struct TORCH_API FutureType
|
||||
: public SingleElementType<TypeKind::FutureType, FutureType> {
|
||||
friend struct Type;
|
||||
template <typename... T>
|
||||
|
|
@ -1030,7 +1030,7 @@ struct CAFFE2_API FutureType
|
|||
struct RRefType;
|
||||
using RRefTypePtr = std::shared_ptr<RRefType>;
|
||||
|
||||
struct CAFFE2_API RRefType
|
||||
struct TORCH_API RRefType
|
||||
: public SingleElementType<TypeKind::RRefType, RRefType> {
|
||||
friend struct Type;
|
||||
template <typename... T>
|
||||
|
|
@ -1064,7 +1064,7 @@ struct NamedType;
|
|||
using NamedTypePtr = std::shared_ptr<NamedType>;
|
||||
using ConstNamedTypePtr = std::shared_ptr<const NamedType>;
|
||||
|
||||
struct CAFFE2_API NamedType : public Type {
|
||||
struct TORCH_API NamedType : public Type {
|
||||
NamedType(TypeKind tk, c10::optional<QualifiedName> name)
|
||||
: Type(tk), name_(std::move(name)) {
|
||||
TORCH_INTERNAL_ASSERT(
|
||||
|
|
@ -1091,7 +1091,7 @@ private:
|
|||
// static types in named types to reconstruct type tags of loaded
|
||||
// values. Lifting this restriction requires solving the serialization
|
||||
// problem first.
|
||||
CAFFE2_API void checkNoAny(
|
||||
TORCH_API void checkNoAny(
|
||||
const Type& base,
|
||||
const char* what,
|
||||
const std::string& attrname,
|
||||
|
|
@ -1101,7 +1101,7 @@ struct TupleType;
|
|||
using TupleTypePtr = std::shared_ptr<TupleType>;
|
||||
using NameList = std::vector<std::string>;
|
||||
// This type represents a Tuple
|
||||
struct CAFFE2_API TupleType : public NamedType {
|
||||
struct TORCH_API TupleType : public NamedType {
|
||||
static TupleTypePtr createNamed(const c10::optional<c10::QualifiedName>& name,
|
||||
const std::vector<std::string>& field_names,
|
||||
const std::vector<TypePtr>& types);
|
||||
|
|
@ -1172,7 +1172,7 @@ struct CAFFE2_API TupleType : public NamedType {
|
|||
struct EnumType;
|
||||
using EnumTypePtr = std::shared_ptr<EnumType>;
|
||||
using EnumNameValue = std::pair<std::string, IValue>;
|
||||
struct CAFFE2_API EnumType : public NamedType {
|
||||
struct TORCH_API EnumType : public NamedType {
|
||||
friend struct Type;
|
||||
static const TypeKind Kind = TypeKind::EnumType;
|
||||
|
||||
|
|
@ -1258,7 +1258,7 @@ struct CAFFE2_API EnumType : public NamedType {
|
|||
// EnumType <: AnyEnumType for all Enums
|
||||
struct AnyEnumType;
|
||||
using AnyEnumTypePtr = std::shared_ptr<AnyEnumType>;
|
||||
struct CAFFE2_API AnyEnumType : public Type {
|
||||
struct TORCH_API AnyEnumType : public Type {
|
||||
static AnyEnumTypePtr create() {
|
||||
return AnyEnumTypePtr(
|
||||
new AnyEnumType()); // NOLINT(modernize-make-shared)
|
||||
|
|
@ -1284,7 +1284,7 @@ using NumberTypePtr = std::shared_ptr<NumberType>;
|
|||
// Subtype hierarchy for Number Types (NumberType as the base type):
|
||||
// IntType <: NumberType
|
||||
// FloatType <: NumberType
|
||||
struct CAFFE2_API NumberType : public Type {
|
||||
struct TORCH_API NumberType : public Type {
|
||||
static NumberTypePtr create() {
|
||||
return NumberTypePtr(new NumberType()); // NOLINT(modernize-make-shared)
|
||||
}
|
||||
|
|
@ -1311,7 +1311,7 @@ struct CAFFE2_API NumberType : public Type {
|
|||
struct FloatType;
|
||||
using FloatTypePtr = std::shared_ptr<FloatType>;
|
||||
// This type represents a Python float number
|
||||
struct CAFFE2_API FloatType : public NumberType {
|
||||
struct TORCH_API FloatType : public NumberType {
|
||||
static FloatTypePtr create() {
|
||||
return FloatTypePtr(new FloatType()); // NOLINT(modernize-make-shared)
|
||||
}
|
||||
|
|
@ -1338,7 +1338,7 @@ struct CAFFE2_API FloatType : public NumberType {
|
|||
struct IntType;
|
||||
using IntTypePtr = std::shared_ptr<IntType>;
|
||||
// This type represents a Python int number
|
||||
struct CAFFE2_API IntType : public NumberType {
|
||||
struct TORCH_API IntType : public NumberType {
|
||||
static IntTypePtr create() {
|
||||
return IntTypePtr(new IntType()); // NOLINT(modernize-make-shared)
|
||||
}
|
||||
|
|
@ -1365,7 +1365,7 @@ struct CAFFE2_API IntType : public NumberType {
|
|||
struct BoolType;
|
||||
using BoolTypePtr = std::shared_ptr<BoolType>;
|
||||
// This node represents a Python bool value
|
||||
struct CAFFE2_API BoolType : public Type {
|
||||
struct TORCH_API BoolType : public Type {
|
||||
static BoolTypePtr create() {
|
||||
return BoolTypePtr(new BoolType());
|
||||
}
|
||||
|
|
@ -1386,7 +1386,7 @@ struct CAFFE2_API BoolType : public Type {
|
|||
struct StringType;
|
||||
using StringTypePtr = std::shared_ptr<StringType>;
|
||||
// This type represents a Python string
|
||||
struct CAFFE2_API StringType : public Type {
|
||||
struct TORCH_API StringType : public Type {
|
||||
static StringTypePtr create() {
|
||||
return StringTypePtr(new StringType()); // NOLINT(modernize-make-shared)
|
||||
}
|
||||
|
|
@ -1410,7 +1410,7 @@ struct CAFFE2_API StringType : public Type {
|
|||
|
||||
struct StorageType;
|
||||
using StorageTypePtr = std::shared_ptr<StorageType>;
|
||||
struct CAFFE2_API StorageType : public Type {
|
||||
struct TORCH_API StorageType : public Type {
|
||||
static StorageTypePtr create() {
|
||||
return StorageTypePtr(new StorageType()); // NOLINT(modernize-make-shared)
|
||||
}
|
||||
|
|
@ -1433,7 +1433,7 @@ struct CAFFE2_API StorageType : public Type {
|
|||
|
||||
struct FunctionType;
|
||||
using FunctionTypePtr = std::shared_ptr<FunctionType>;
|
||||
struct CAFFE2_API FunctionType : public NamedType {
|
||||
struct TORCH_API FunctionType : public NamedType {
|
||||
static FunctionTypePtr create(torch::jit::Function* function) {
|
||||
return FunctionTypePtr(
|
||||
new FunctionType(function)); // NOLINT(modernize-make-shared)
|
||||
|
|
@ -1465,7 +1465,7 @@ struct CAFFE2_API FunctionType : public NamedType {
|
|||
struct NoneType;
|
||||
using NoneTypePtr = std::shared_ptr<NoneType>;
|
||||
// This type represents a Python None
|
||||
struct CAFFE2_API NoneType : public Type {
|
||||
struct TORCH_API NoneType : public Type {
|
||||
static NoneTypePtr create() {
|
||||
return NoneTypePtr(new NoneType()); // NOLINT(modernize-make-shared)
|
||||
}
|
||||
|
|
@ -1492,7 +1492,7 @@ struct CAFFE2_API NoneType : public Type {
|
|||
struct GeneratorType;
|
||||
using GeneratorTypePtr = std::shared_ptr<GeneratorType>;
|
||||
// This type represents a Generator
|
||||
struct CAFFE2_API GeneratorType : public Type {
|
||||
struct TORCH_API GeneratorType : public Type {
|
||||
static GeneratorTypePtr create() {
|
||||
return GeneratorTypePtr(
|
||||
new GeneratorType()); // NOLINT(modernize-make-shared)
|
||||
|
|
@ -1514,7 +1514,7 @@ struct CAFFE2_API GeneratorType : public Type {
|
|||
struct QuantizerType;
|
||||
using QuantizerTypePtr = std::shared_ptr<QuantizerType>;
|
||||
// This type represents a Quantizer
|
||||
struct CAFFE2_API QuantizerType : public Type {
|
||||
struct TORCH_API QuantizerType : public Type {
|
||||
static QuantizerTypePtr create() {
|
||||
return QuantizerTypePtr(
|
||||
new QuantizerType()); // NOLINT(modernize-make-shared)
|
||||
|
|
@ -1536,7 +1536,7 @@ struct CAFFE2_API QuantizerType : public Type {
|
|||
struct QSchemeType;
|
||||
using QSchemeTypePtr = std::shared_ptr<QSchemeType>;
|
||||
// This type represents a QScheme
|
||||
struct CAFFE2_API QSchemeType : public Type {
|
||||
struct TORCH_API QSchemeType : public Type {
|
||||
static QSchemeTypePtr create() {
|
||||
return QSchemeTypePtr(
|
||||
new QSchemeType()); // NOLINT(modernize-make-shared)
|
||||
|
|
@ -1558,7 +1558,7 @@ struct CAFFE2_API QSchemeType : public Type {
|
|||
struct DeviceObjType;
|
||||
using DeviceObjTypePtr = std::shared_ptr<DeviceObjType>;
|
||||
// This type represents a Device
|
||||
struct CAFFE2_API DeviceObjType : public Type {
|
||||
struct TORCH_API DeviceObjType : public Type {
|
||||
static DeviceObjTypePtr create() {
|
||||
return DeviceObjTypePtr(
|
||||
new DeviceObjType()); // NOLINT(modernize-make-shared)
|
||||
|
|
@ -1580,7 +1580,7 @@ struct CAFFE2_API DeviceObjType : public Type {
|
|||
struct StreamObjType;
|
||||
using StreamObjTypePtr = std::shared_ptr<StreamObjType>;
|
||||
// This type represents a Generator
|
||||
struct CAFFE2_API StreamObjType : public Type {
|
||||
struct TORCH_API StreamObjType : public Type {
|
||||
static StreamObjTypePtr create() {
|
||||
return StreamObjTypePtr(
|
||||
new StreamObjType()); // NOLINT(modernize-make-shared)
|
||||
|
|
@ -1630,7 +1630,7 @@ struct CapsuleType;
|
|||
using CapsuleTypePtr = std::shared_ptr<CapsuleType>;
|
||||
// This type represents a Python Capsule.
|
||||
// It does not appear in the IR and is only used during runtime
|
||||
struct CAFFE2_API CapsuleType : public Type {
|
||||
struct TORCH_API CapsuleType : public Type {
|
||||
static CapsuleTypePtr create() {
|
||||
return CapsuleTypePtr(new CapsuleType()); // NOLINT(modernize-make-shared)
|
||||
}
|
||||
|
|
@ -1651,7 +1651,7 @@ private:
|
|||
struct PyObjectType;
|
||||
using PyObjectTypePtr = std::shared_ptr<PyObjectType>;
|
||||
// This type represents a PyObject Type
|
||||
struct CAFFE2_API PyObjectType : public Type {
|
||||
struct TORCH_API PyObjectType : public Type {
|
||||
static PyObjectTypePtr create() {
|
||||
return PyObjectTypePtr(new PyObjectType()); // NOLINT(modernize-make-shared)
|
||||
}
|
||||
|
|
@ -1677,16 +1677,16 @@ enum class TypeVerbosity {
|
|||
Default = Full,
|
||||
};
|
||||
|
||||
CAFFE2_API TypeVerbosity type_verbosity();
|
||||
TORCH_API TypeVerbosity type_verbosity();
|
||||
|
||||
CAFFE2_API std::ostream& operator<<(std::ostream& out, const Type& t);
|
||||
TORCH_API std::ostream& operator<<(std::ostream& out, const Type& t);
|
||||
template <typename T>
|
||||
CAFFE2_API std::ostream& operator<<(
|
||||
TORCH_API std::ostream& operator<<(
|
||||
std::ostream& out,
|
||||
const VaryingShape<T>& t);
|
||||
CAFFE2_API std::ostream& operator<<(std::ostream& os, const SymbolicShape& s);
|
||||
CAFFE2_API std::ostream& operator<<(std::ostream& os, const ShapeSymbol& s);
|
||||
CAFFE2_API std::ostream& operator<<(std::ostream& os, const Stride& s);
|
||||
TORCH_API std::ostream& operator<<(std::ostream& os, const SymbolicShape& s);
|
||||
TORCH_API std::ostream& operator<<(std::ostream& os, const ShapeSymbol& s);
|
||||
TORCH_API std::ostream& operator<<(std::ostream& os, const Stride& s);
|
||||
// what is the type, ignoring extra size/shape information?
|
||||
// e.g. Tensor(2x3) -> Dynamic, and Tuple(Tensor(2x3),...) -> Tuple(Dynamic,...)
|
||||
|
||||
|
|
@ -1738,12 +1738,12 @@ inline at::ScalarType scalarTypeFromJitType(const c10::TypePtr& type) {
|
|||
// Two different tensortypes will return dynamic.
|
||||
// Currently we chose not to support returning a NumberType for a float & int
|
||||
// input because of a lack of operator support for NumberType
|
||||
CAFFE2_API c10::optional<TypePtr> unifyTypes(
|
||||
TORCH_API c10::optional<TypePtr> unifyTypes(
|
||||
const TypePtr& t1,
|
||||
const TypePtr& t2,
|
||||
bool default_to_any = false);
|
||||
|
||||
CAFFE2_API c10::optional<TypePtr> unifyTypeList(
|
||||
TORCH_API c10::optional<TypePtr> unifyTypeList(
|
||||
at::ArrayRef<TypePtr> elements,
|
||||
std::ostream& why_not);
|
||||
|
||||
|
|
@ -1963,15 +1963,15 @@ struct MatchTypeReturn {
|
|||
// note: It is possible to successfully match a formal, but for type variables
|
||||
// in the formal to still not be defined. In particular, None matches Optional[T]
|
||||
// but does not define the value of T.
|
||||
CAFFE2_API MatchTypeReturn
|
||||
TORCH_API MatchTypeReturn
|
||||
matchTypeVariables(TypePtr formal, TypePtr actual, TypeEnv& type_env);
|
||||
|
||||
// replace type variables appearing in `type` with the values in
|
||||
// `type_env`. Returns nullptr if a variable used in `type`
|
||||
// does not appear in `type_env`
|
||||
CAFFE2_API TypePtr tryEvalTypeVariables(TypePtr type, TypeEnv& type_env);
|
||||
TORCH_API TypePtr tryEvalTypeVariables(TypePtr type, TypeEnv& type_env);
|
||||
|
||||
CAFFE2_API bool elementTypeCanBeInferredFromMembers(const TypePtr& elem_type);
|
||||
TORCH_API bool elementTypeCanBeInferredFromMembers(const TypePtr& elem_type);
|
||||
|
||||
// This enumerator represents the 'kind' of an attribute - a buffer, a paramter, or neither.
|
||||
// This state is mutually exclusive. Buffers and Parameters can only appear on modules.
|
||||
|
|
@ -1983,7 +1983,7 @@ enum class AttributeKind {
|
|||
|
||||
// This structure represents all notional booking entities in a class attribute: name, kind (see: AttributeKind), and type (see: TypePtr).
|
||||
// Note: This structure does not represent the value of the attribute.
|
||||
struct CAFFE2_API ClassAttribute {
|
||||
struct TORCH_API ClassAttribute {
|
||||
public:
|
||||
ClassAttribute(AttributeKind kind,
|
||||
TypePtr attributeType,
|
||||
|
|
@ -2019,7 +2019,7 @@ using ClassTypePtr = std::shared_ptr<ClassType>;
|
|||
using ::torch::jit::CompilationUnit;
|
||||
|
||||
// This represents a class in TorchScript.
|
||||
struct CAFFE2_API ClassType : public NamedType {
|
||||
struct TORCH_API ClassType : public NamedType {
|
||||
// This represents an attribute of a class; a name associated with an attribute, and a
|
||||
// getter and (optional) setter for that attribute.
|
||||
struct Property {
|
||||
|
|
@ -2377,7 +2377,7 @@ using ::torch::jit::CompilationUnit;
|
|||
// lhs (ClassType or InterfaceType) is a subtype of rhs if:
|
||||
// 1. lhs methods are a superset of rhs methods
|
||||
// 2. if rhs is module interface, the lhs must be module interface or module itself
|
||||
struct CAFFE2_API InterfaceType : public NamedType {
|
||||
struct TORCH_API InterfaceType : public NamedType {
|
||||
static InterfaceTypePtr create(
|
||||
QualifiedName qualifiedName, bool is_module=false);
|
||||
|
||||
|
|
@ -2441,7 +2441,7 @@ EnumerationType() : Type(Kind) {}
|
|||
struct LayoutType;
|
||||
using LayoutTypePtr = std::shared_ptr<LayoutType>;
|
||||
// This type represents a Generator
|
||||
struct CAFFE2_API LayoutType : public EnumerationType<TypeKind::LayoutType> {
|
||||
struct TORCH_API LayoutType : public EnumerationType<TypeKind::LayoutType> {
|
||||
static LayoutTypePtr create() {
|
||||
return LayoutTypePtr(
|
||||
new LayoutType()); // NOLINT(modernize-make-shared)
|
||||
|
|
@ -2460,7 +2460,7 @@ LayoutType() : EnumerationType() {}
|
|||
struct ScalarTypeType;
|
||||
using ScalarTypeTypePtr = std::shared_ptr<ScalarTypeType>;
|
||||
// This type represents a Generator
|
||||
struct CAFFE2_API ScalarTypeType : public EnumerationType<TypeKind::ScalarTypeType> {
|
||||
struct TORCH_API ScalarTypeType : public EnumerationType<TypeKind::ScalarTypeType> {
|
||||
static ScalarTypeTypePtr create() {
|
||||
return ScalarTypeTypePtr(
|
||||
new ScalarTypeType()); // NOLINT(modernize-make-shared)
|
||||
|
|
@ -2480,7 +2480,7 @@ ScalarTypeType() : EnumerationType() {}
|
|||
// List[T] <: AnyList for all T
|
||||
struct AnyListType;
|
||||
using AnyListTypePtr = std::shared_ptr<AnyListType>;
|
||||
struct CAFFE2_API AnyListType : public Type {
|
||||
struct TORCH_API AnyListType : public Type {
|
||||
static AnyListTypePtr create() {
|
||||
return AnyListTypePtr(
|
||||
new AnyListType()); // NOLINT(modernize-make-shared)
|
||||
|
|
@ -2503,7 +2503,7 @@ private:
|
|||
// Tuple[T...] <: AnyTuple for all T
|
||||
struct AnyTupleType;
|
||||
using AnyTupleTypePtr = std::shared_ptr<AnyTupleType>;
|
||||
struct CAFFE2_API AnyTupleType : public Type {
|
||||
struct TORCH_API AnyTupleType : public Type {
|
||||
static AnyTupleTypePtr create() {
|
||||
return AnyTupleTypePtr(
|
||||
new AnyTupleType()); // NOLINT(modernize-make-shared)
|
||||
|
|
@ -2528,7 +2528,7 @@ private:
|
|||
// ClassType <: AnyClassType for all classes
|
||||
struct AnyClassType;
|
||||
using AnyClassTypePtr = std::shared_ptr<AnyClassType>;
|
||||
struct CAFFE2_API AnyClassType : public Type {
|
||||
struct TORCH_API AnyClassType : public Type {
|
||||
static AnyClassTypePtr create() {
|
||||
return AnyClassTypePtr(
|
||||
new AnyClassType()); // NOLINT(modernize-make-shared)
|
||||
|
|
|
|||
|
|
@ -153,6 +153,6 @@ FunctionSchema inferFunctionSchemaSingleReturn(std::string&& name, std::string&&
|
|||
return detail::infer_schema::createFunctionSchemaFromTraitsSingleReturn<guts::infer_function_traits_t<FuncType>>(std::move(name), std::move(overload_name));
|
||||
}
|
||||
|
||||
CAFFE2_API c10::optional<std::string> findSchemaDifferences(const FunctionSchema& inferred, const FunctionSchema& specified);
|
||||
TORCH_API c10::optional<std::string> findSchemaDifferences(const FunctionSchema& inferred, const FunctionSchema& specified);
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ std::unique_ptr<FunctionSchema> inferFunctionSchemaFromFunctor() {
|
|||
* > .schema("my_op")
|
||||
* > .kernel<my_kernel_cpu>(DispatchKey::CPU));
|
||||
*/
|
||||
class CAFFE2_API RegisterOperators final {
|
||||
class TORCH_API RegisterOperators final {
|
||||
public:
|
||||
RegisterOperators();
|
||||
~RegisterOperators();
|
||||
|
|
@ -53,7 +53,7 @@ public:
|
|||
RegisterOperators(RegisterOperators&&) noexcept;
|
||||
RegisterOperators& operator=(RegisterOperators&&) noexcept;
|
||||
|
||||
class CAFFE2_API Options final {
|
||||
class TORCH_API Options final {
|
||||
public:
|
||||
Options(const Options&) = delete;
|
||||
Options(Options&&) noexcept = delete;
|
||||
|
|
|
|||
|
|
@ -72,8 +72,8 @@ inline bool operator!=(const OperatorName& lhs, const OperatorName& rhs) {
|
|||
return !operator==(lhs, rhs);
|
||||
}
|
||||
|
||||
CAFFE2_API std::string toString(const OperatorName& opName);
|
||||
CAFFE2_API std::ostream& operator<<(std::ostream&, const OperatorName&);
|
||||
TORCH_API std::string toString(const OperatorName& opName);
|
||||
TORCH_API std::ostream& operator<<(std::ostream&, const OperatorName&);
|
||||
|
||||
} // namespace c10
|
||||
|
||||
|
|
|
|||
|
|
@ -469,7 +469,7 @@ MatchTypeReturn matchTypeVariables(
|
|||
}
|
||||
|
||||
// change return types like List[List[t]] into List[List[int]]
|
||||
CAFFE2_API TypePtr tryEvalTypeVariables(TypePtr type, std::unordered_map<std::string, TypePtr>& type_env) {
|
||||
TORCH_API TypePtr tryEvalTypeVariables(TypePtr type, std::unordered_map<std::string, TypePtr>& type_env) {
|
||||
if (!type->hasFreeVariables()) {
|
||||
return type;
|
||||
}
|
||||
|
|
@ -494,7 +494,7 @@ CAFFE2_API TypePtr tryEvalTypeVariables(TypePtr type, std::unordered_map<std::st
|
|||
}
|
||||
}
|
||||
|
||||
CAFFE2_API bool elementTypeCanBeInferredFromMembers(const TypePtr& elem_type) {
|
||||
TORCH_API bool elementTypeCanBeInferredFromMembers(const TypePtr& elem_type) {
|
||||
if (elem_type->kind() == OptionalType::Kind ||
|
||||
elem_type->kind() == NumberType::Kind) {
|
||||
// Builtin Union types
|
||||
|
|
|
|||
|
|
@ -67,7 +67,7 @@ constexpr const char* CUDA_HELP =
|
|||
// TODO: Consider putting the stub definitions in another class, so that one
|
||||
// never forgets to implement each virtual function in the real implementation
|
||||
// in CUDAHooks. This probably doesn't buy us much though.
|
||||
struct CAFFE2_API CUDAHooksInterface {
|
||||
struct TORCH_API CUDAHooksInterface {
|
||||
// This should never actually be implemented, but it is used to
|
||||
// squelch -Werror=non-virtual-dtor
|
||||
virtual ~CUDAHooksInterface() {}
|
||||
|
|
@ -185,13 +185,13 @@ struct CAFFE2_API CUDAHooksInterface {
|
|||
|
||||
// NB: dummy argument to suppress "ISO C++11 requires at least one argument
|
||||
// for the "..." in a variadic macro"
|
||||
struct CAFFE2_API CUDAHooksArgs {};
|
||||
struct TORCH_API CUDAHooksArgs {};
|
||||
|
||||
C10_DECLARE_REGISTRY(CUDAHooksRegistry, CUDAHooksInterface, CUDAHooksArgs);
|
||||
#define REGISTER_CUDA_HOOKS(clsname) \
|
||||
C10_REGISTER_CLASS(CUDAHooksRegistry, clsname, clsname)
|
||||
|
||||
namespace detail {
|
||||
CAFFE2_API const CUDAHooksInterface& getCUDAHooks();
|
||||
TORCH_API const CUDAHooksInterface& getCUDAHooks();
|
||||
} // namespace detail
|
||||
} // namespace at
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ namespace at {
|
|||
// which we may want to call into from CPU code (and thus must be dynamically
|
||||
// dispatched, to allow for separate compilation of HIP code). See
|
||||
// CUDAHooksInterface for more detailed motivation.
|
||||
struct CAFFE2_API HIPHooksInterface {
|
||||
struct TORCH_API HIPHooksInterface {
|
||||
// This should never actually be implemented, but it is used to
|
||||
// squelch -Werror=non-virtual-dtor
|
||||
virtual ~HIPHooksInterface() {}
|
||||
|
|
@ -61,14 +61,14 @@ struct CAFFE2_API HIPHooksInterface {
|
|||
|
||||
// NB: dummy argument to suppress "ISO C++11 requires at least one argument
|
||||
// for the "..." in a variadic macro"
|
||||
struct CAFFE2_API HIPHooksArgs {};
|
||||
struct TORCH_API HIPHooksArgs {};
|
||||
|
||||
C10_DECLARE_REGISTRY(HIPHooksRegistry, HIPHooksInterface, HIPHooksArgs);
|
||||
#define REGISTER_HIP_HOOKS(clsname) \
|
||||
C10_REGISTER_CLASS(HIPHooksRegistry, clsname, clsname)
|
||||
|
||||
namespace detail {
|
||||
CAFFE2_API const HIPHooksInterface& getHIPHooks();
|
||||
TORCH_API const HIPHooksInterface& getHIPHooks();
|
||||
|
||||
} // namespace detail
|
||||
} // namespace at
|
||||
|
|
|
|||
|
|
@ -59,10 +59,10 @@ enum class CPUCapability {
|
|||
CPUCapability get_cpu_capability();
|
||||
|
||||
template <typename FnPtr, typename T>
|
||||
struct CAFFE2_API DispatchStub;
|
||||
struct TORCH_API DispatchStub;
|
||||
|
||||
template <typename rT, typename T, typename... Args>
|
||||
struct CAFFE2_API DispatchStub<rT (*)(Args...), T> {
|
||||
struct TORCH_API DispatchStub<rT (*)(Args...), T> {
|
||||
using FnPtr = rT (*) (Args...);
|
||||
|
||||
DispatchStub() = default;
|
||||
|
|
@ -167,7 +167,7 @@ struct RegisterHIPDispatch {
|
|||
name(const name&) = delete; \
|
||||
name& operator=(const name&) = delete; \
|
||||
}; \
|
||||
extern CAFFE2_API struct name name
|
||||
extern TORCH_API struct name name
|
||||
|
||||
#define DEFINE_DISPATCH(name) struct name name
|
||||
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ namespace at { namespace native {
|
|||
// Issues a warning if the output tensor has one or more elements and
|
||||
// needs resizing
|
||||
// NOTE: In the future the warning will become an error
|
||||
CAFFE2_API void resize_output(Tensor& output, IntArrayRef shape);
|
||||
TORCH_API void resize_output(Tensor& output, IntArrayRef shape);
|
||||
|
||||
// These functions are called by native::resize_ as well as (legacy) TH resize.
|
||||
// They are not in TH/THTensor.cpp because the at namespace is easier
|
||||
|
|
|
|||
|
|
@ -75,6 +75,6 @@ DECLARE_DISPATCH(fft_fill_with_conjugate_symmetry_fn, fft_fill_with_conjugate_sy
|
|||
// self should be the shape of the full signal and dims.back() should be the
|
||||
// one-sided dimension.
|
||||
// See NOTE [ Fourier Transform Conjugate Symmetry ]
|
||||
CAFFE2_API void _fft_fill_with_conjugate_symmetry_(const Tensor& self, IntArrayRef dims);
|
||||
TORCH_API void _fft_fill_with_conjugate_symmetry_(const Tensor& self, IntArrayRef dims);
|
||||
|
||||
}} // at::native
|
||||
|
|
|
|||
|
|
@ -10,9 +10,9 @@ struct ResultTypeState {
|
|||
c10::ScalarType zeroResult = ScalarType::Undefined;
|
||||
};
|
||||
|
||||
CAFFE2_API ResultTypeState update_result_type_state(const Tensor& tensor, const ResultTypeState& in_state);
|
||||
CAFFE2_API ScalarType result_type(const ResultTypeState& state);
|
||||
TORCH_API ResultTypeState update_result_type_state(const Tensor& tensor, const ResultTypeState& in_state);
|
||||
TORCH_API ScalarType result_type(const ResultTypeState& state);
|
||||
|
||||
CAFFE2_API ScalarType result_type(TensorList tensors);
|
||||
TORCH_API ScalarType result_type(TensorList tensors);
|
||||
|
||||
}}
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ namespace at { namespace native {
|
|||
* NOTE: if this is generally useful we may want to move this to its own header.
|
||||
*/
|
||||
template <typename T>
|
||||
struct CAFFE2_API IntrusivePtrTargetWrapper : c10::intrusive_ptr_target {
|
||||
struct TORCH_API IntrusivePtrTargetWrapper : c10::intrusive_ptr_target {
|
||||
private:
|
||||
T target_;
|
||||
|
||||
|
|
|
|||
|
|
@ -396,7 +396,7 @@ void quantize_vec(
|
|||
}
|
||||
|
||||
template <typename T>
|
||||
CAFFE2_API float dequantize_val(double scale, int64_t zero_point, T value) {
|
||||
TORCH_API float dequantize_val(double scale, int64_t zero_point, T value) {
|
||||
// We need to convert the qint8 value to float to ensure the subtraction
|
||||
// subexpression returns a float
|
||||
return (static_cast<float>(value.val_) - zero_point) * scale;
|
||||
|
|
@ -441,67 +441,67 @@ DST_T requantize_from_int(double multiplier, int64_t zero_point, int64_t src) {
|
|||
std::min<int64_t>(std::max<int64_t>(quantize_down, min), max));
|
||||
}
|
||||
|
||||
template CAFFE2_API qint8
|
||||
template TORCH_API qint8
|
||||
quantize_val<qint8>(double scale, int64_t zero_point, float value);
|
||||
template CAFFE2_API quint8
|
||||
template TORCH_API quint8
|
||||
quantize_val<quint8>(double scale, int64_t zero_point, float value);
|
||||
template CAFFE2_API qint32
|
||||
template TORCH_API qint32
|
||||
quantize_val<qint32>(double scale, int64_t zero_point, float value);
|
||||
template CAFFE2_API void quantize_vec<c10::qint8>(
|
||||
template TORCH_API void quantize_vec<c10::qint8>(
|
||||
double scale,
|
||||
int64_t zero_point,
|
||||
const float* src,
|
||||
c10::qint8* dst,
|
||||
size_t count);
|
||||
template CAFFE2_API void quantize_vec<c10::quint8>(
|
||||
template TORCH_API void quantize_vec<c10::quint8>(
|
||||
double scale,
|
||||
int64_t zero_point,
|
||||
const float* src,
|
||||
c10::quint8* dst,
|
||||
size_t count);
|
||||
template CAFFE2_API void quantize_vec<c10::qint32, 32>(
|
||||
template TORCH_API void quantize_vec<c10::qint32, 32>(
|
||||
double scale,
|
||||
int64_t zero_point,
|
||||
const float* src,
|
||||
c10::qint32* dst,
|
||||
size_t count);
|
||||
|
||||
template CAFFE2_API float dequantize_val<qint8>(
|
||||
template TORCH_API float dequantize_val<qint8>(
|
||||
double scale,
|
||||
int64_t zero_point,
|
||||
qint8 value);
|
||||
template CAFFE2_API float dequantize_val<quint8>(
|
||||
template TORCH_API float dequantize_val<quint8>(
|
||||
double scale,
|
||||
int64_t zero_point,
|
||||
quint8 value);
|
||||
template CAFFE2_API float dequantize_val<qint32>(
|
||||
template TORCH_API float dequantize_val<qint32>(
|
||||
double scale,
|
||||
int64_t zero_point,
|
||||
qint32 value);
|
||||
|
||||
template CAFFE2_API qint8
|
||||
template TORCH_API qint8
|
||||
requantize_val<qint8, qint8>(double, int64_t, double, int64_t, qint8);
|
||||
template CAFFE2_API quint8
|
||||
template TORCH_API quint8
|
||||
requantize_val<qint8, quint8>(double, int64_t, double, int64_t, qint8);
|
||||
template CAFFE2_API qint32
|
||||
template TORCH_API qint32
|
||||
requantize_val<qint8, qint32>(double, int64_t, double, int64_t, qint8);
|
||||
template CAFFE2_API qint8
|
||||
template TORCH_API qint8
|
||||
requantize_val<quint8, qint8>(double, int64_t, double, int64_t, quint8);
|
||||
template CAFFE2_API quint8
|
||||
template TORCH_API quint8
|
||||
requantize_val<quint8, quint8>(double, int64_t, double, int64_t, quint8);
|
||||
template CAFFE2_API qint32
|
||||
template TORCH_API qint32
|
||||
requantize_val<quint8, qint32>(double, int64_t, double, int64_t, quint8);
|
||||
template CAFFE2_API qint8
|
||||
template TORCH_API qint8
|
||||
requantize_val<qint32, qint8>(double, int64_t, double, int64_t, qint32);
|
||||
template CAFFE2_API quint8
|
||||
template TORCH_API quint8
|
||||
requantize_val<qint32, quint8>(double, int64_t, double, int64_t, qint32);
|
||||
template CAFFE2_API qint32
|
||||
template TORCH_API qint32
|
||||
requantize_val<qint32, qint32>(double, int64_t, double, int64_t, qint32);
|
||||
|
||||
template CAFFE2_API qint8 requantize_from_int<qint8>(double, int64_t, int64_t);
|
||||
template CAFFE2_API quint8
|
||||
template TORCH_API qint8 requantize_from_int<qint8>(double, int64_t, int64_t);
|
||||
template TORCH_API quint8
|
||||
requantize_from_int<quint8>(double, int64_t, int64_t);
|
||||
template CAFFE2_API qint32
|
||||
template TORCH_API qint32
|
||||
requantize_from_int<qint32>(double, int64_t, int64_t);
|
||||
|
||||
} // namespace native
|
||||
|
|
|
|||
|
|
@ -113,7 +113,7 @@ DECLARE_DISPATCH(
|
|||
|
||||
// Quantize a float value into a uint value given scale and zero_point
|
||||
template <typename T>
|
||||
CAFFE2_API T quantize_val(double scale, int64_t zero_point, float value);
|
||||
TORCH_API T quantize_val(double scale, int64_t zero_point, float value);
|
||||
// TODO combine this with quantize_val once the numerics for ARM are aligned
|
||||
// with it
|
||||
uint8_t quantize_val_arm(
|
||||
|
|
@ -128,34 +128,34 @@ void quantize_vec(
|
|||
T* dst,
|
||||
size_t count = 8);
|
||||
template <typename T>
|
||||
CAFFE2_API Tensor quantize_tensor(
|
||||
TORCH_API Tensor quantize_tensor(
|
||||
Tensor rtensor,
|
||||
Tensor qtensor,
|
||||
double scale,
|
||||
int64_t zero_point);
|
||||
template <typename T>
|
||||
CAFFE2_API float dequantize_val(double scale, int64_t zero_point, T value);
|
||||
TORCH_API float dequantize_val(double scale, int64_t zero_point, T value);
|
||||
template <typename T>
|
||||
CAFFE2_API float dequantize_vec(
|
||||
TORCH_API float dequantize_vec(
|
||||
double scale,
|
||||
int64_t zero_point,
|
||||
const T* src,
|
||||
float* dst,
|
||||
size_t count = 8);
|
||||
template <typename T>
|
||||
CAFFE2_API Tensor dequantize_tensor(
|
||||
TORCH_API Tensor dequantize_tensor(
|
||||
Tensor qtensor,
|
||||
Tensor rtensor,
|
||||
double scale,
|
||||
int64_t zero_point);
|
||||
template <typename SRC_T, typename DST_T>
|
||||
CAFFE2_API DST_T requantize_val(double, int64_t, double, int64_t, SRC_T src);
|
||||
TORCH_API DST_T requantize_val(double, int64_t, double, int64_t, SRC_T src);
|
||||
|
||||
// Given a multiplier and a zero_point, requantize int32_t computed values back
|
||||
// to quantized values. See comment above
|
||||
// make_per_tensor_affine_quantizer function for the usage of int64_t
|
||||
template <typename DST_T>
|
||||
CAFFE2_API DST_T
|
||||
TORCH_API DST_T
|
||||
requantize_from_int(double multiplier, int64_t zero_point, int64_t src);
|
||||
|
||||
int quantize_val_float_qparams(float scale, float zero_point, float value, int qmin, int qmax);
|
||||
|
|
|
|||
|
|
@ -357,7 +357,7 @@ Tensor ConvertConvWeightsToChannelLastTensor<3>(
|
|||
#endif // USE_FBGEMM
|
||||
|
||||
template <int kSpatialDim = 2>
|
||||
CAFFE2_API torch::class_<ConvPackedParamsBase<kSpatialDim>>
|
||||
TORCH_API torch::class_<ConvPackedParamsBase<kSpatialDim>>
|
||||
register_conv_params() {
|
||||
static auto register_conv_params =
|
||||
torch::class_<ConvPackedParamsBase<kSpatialDim>>(
|
||||
|
|
@ -397,9 +397,9 @@ Tensor ConvertConvWeightsToChannelLastTensor<3>(
|
|||
}
|
||||
|
||||
template
|
||||
CAFFE2_API torch::class_<ConvPackedParamsBase<2>> register_conv_params<2>();
|
||||
TORCH_API torch::class_<ConvPackedParamsBase<2>> register_conv_params<2>();
|
||||
template
|
||||
CAFFE2_API torch::class_<ConvPackedParamsBase<3>> register_conv_params<3>();
|
||||
TORCH_API torch::class_<ConvPackedParamsBase<3>> register_conv_params<3>();
|
||||
|
||||
torch::class_<LinearPackedParamsBase> register_linear_params() {
|
||||
using SerializationType = std::tuple<at::Tensor, c10::optional<at::Tensor>>;
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@
|
|||
// of the A rows. The column offsets are needed for the asymmetric quantization
|
||||
// (affine quantization) of input matrix.
|
||||
// Note that in JIT mode we can think of a way to fuse col_offsets with bias.
|
||||
struct CAFFE2_API PackedLinearWeight : public LinearPackedParamsBase {
|
||||
struct TORCH_API PackedLinearWeight : public LinearPackedParamsBase {
|
||||
PackedLinearWeight(
|
||||
std::unique_ptr<fbgemm::PackBMatrix<int8_t>> w,
|
||||
c10::optional<at::Tensor> bias,
|
||||
|
|
@ -74,7 +74,7 @@ struct CAFFE2_API PackedLinearWeight : public LinearPackedParamsBase {
|
|||
at::Tensor apply_dynamic_impl(at::Tensor input, bool reduce_range=false);
|
||||
};
|
||||
|
||||
struct CAFFE2_API PackedLinearWeightFp16 : public LinearPackedParamsBase {
|
||||
struct TORCH_API PackedLinearWeightFp16 : public LinearPackedParamsBase {
|
||||
PackedLinearWeightFp16(
|
||||
std::unique_ptr<fbgemm::PackedGemmMatrixFP16> w,
|
||||
c10::optional<at::Tensor> bias)
|
||||
|
|
@ -117,7 +117,7 @@ struct CAFFE2_API PackedLinearWeightFp16 : public LinearPackedParamsBase {
|
|||
};
|
||||
|
||||
template <int kSpatialDim = 2>
|
||||
struct CAFFE2_API PackedConvWeight : public ConvPackedParamsBase<kSpatialDim> {
|
||||
struct TORCH_API PackedConvWeight : public ConvPackedParamsBase<kSpatialDim> {
|
||||
PackedConvWeight(
|
||||
std::unique_ptr<fbgemm::PackWeightsForConv<kSpatialDim>> w,
|
||||
c10::optional<at::Tensor> bias,
|
||||
|
|
@ -306,7 +306,7 @@ Tensor ConvertConvWeightsToChannelLastTensor(
|
|||
|
||||
#endif // USE_FBGEMM
|
||||
|
||||
struct CAFFE2_API PackedEmbeddingBagWeight : public EmbeddingPackedParamsBase {
|
||||
struct TORCH_API PackedEmbeddingBagWeight : public EmbeddingPackedParamsBase {
|
||||
PackedEmbeddingBagWeight(
|
||||
at::Tensor packed_w,
|
||||
std::vector<float> w_scale,
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ namespace at {
|
|||
*
|
||||
* We'll use QTensor in code or documentation to refer to a Tensor with QTensorImpl.
|
||||
*/
|
||||
struct CAFFE2_API QTensorImpl : public c10::TensorImpl {
|
||||
struct TORCH_API QTensorImpl : public c10::TensorImpl {
|
||||
public:
|
||||
QTensorImpl(
|
||||
Storage&& storage,
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ namespace at {
|
|||
* the quantized value. For example, affine quantizer is
|
||||
* the most commonly used scheme in this category.
|
||||
*/
|
||||
struct CAFFE2_API UniformQuantizer : public Quantizer {
|
||||
struct TORCH_API UniformQuantizer : public Quantizer {
|
||||
explicit UniformQuantizer(ScalarType scalar_type) : Quantizer(scalar_type) {}
|
||||
};
|
||||
|
||||
|
|
@ -33,7 +33,7 @@ struct CAFFE2_API UniformQuantizer : public Quantizer {
|
|||
* These quantization scheme may map float value non-uniformly to the quantized
|
||||
* value. K-means quantization is a representative example in this category.
|
||||
*/
|
||||
struct CAFFE2_API NonUniformQuantizer : public Quantizer {
|
||||
struct TORCH_API NonUniformQuantizer : public Quantizer {
|
||||
explicit NonUniformQuantizer(ScalarType scalar_type) : Quantizer(scalar_type) {}
|
||||
};
|
||||
|
||||
|
|
@ -47,7 +47,7 @@ struct CAFFE2_API NonUniformQuantizer : public Quantizer {
|
|||
* For dequantize:
|
||||
* X = (Y - zero_point) * scale
|
||||
*/
|
||||
struct CAFFE2_API AffineQuantizer : public UniformQuantizer {
|
||||
struct TORCH_API AffineQuantizer : public UniformQuantizer {
|
||||
explicit AffineQuantizer(ScalarType scalar_type) : UniformQuantizer(scalar_type) {}
|
||||
};
|
||||
|
||||
|
|
@ -58,7 +58,7 @@ struct CAFFE2_API AffineQuantizer : public UniformQuantizer {
|
|||
* PerTensorAffineQuantizer stores a scale and a zero_point, which is used for
|
||||
* all the values in the Tensor.
|
||||
*/
|
||||
struct CAFFE2_API PerTensorAffineQuantizer : public AffineQuantizer {
|
||||
struct TORCH_API PerTensorAffineQuantizer : public AffineQuantizer {
|
||||
explicit PerTensorAffineQuantizer(ScalarType scalar_type, double scale, int64_t zero_point)
|
||||
: AffineQuantizer(scalar_type),
|
||||
scale_(scale),
|
||||
|
|
@ -107,7 +107,7 @@ struct CAFFE2_API PerTensorAffineQuantizer : public AffineQuantizer {
|
|||
* processors since it requires each multiplication result within a single
|
||||
* dot-product to have a different scale.
|
||||
*/
|
||||
struct CAFFE2_API PerChannelAffineQuantizer : public AffineQuantizer {
|
||||
struct TORCH_API PerChannelAffineQuantizer : public AffineQuantizer {
|
||||
explicit PerChannelAffineQuantizer(
|
||||
ScalarType scalar_type,
|
||||
Tensor scales,
|
||||
|
|
@ -169,7 +169,7 @@ struct CAFFE2_API PerChannelAffineQuantizer : public AffineQuantizer {
|
|||
* be exactly represented in the quantized space. We can get additional precision by
|
||||
* using floating point values for zero point.
|
||||
*/
|
||||
struct CAFFE2_API PerChannelAffineFloatQParamsQuantizer : public PerChannelAffineQuantizer {
|
||||
struct TORCH_API PerChannelAffineFloatQParamsQuantizer : public PerChannelAffineQuantizer {
|
||||
explicit PerChannelAffineFloatQParamsQuantizer(
|
||||
ScalarType scalar_type,
|
||||
Tensor scales,
|
||||
|
|
@ -205,26 +205,26 @@ struct CAFFE2_API PerChannelAffineFloatQParamsQuantizer : public PerChannelAffin
|
|||
// setters/getters for QTensorImpl fields; otherwise, you should use
|
||||
// the low level setters/getters that were implemented using this.
|
||||
// This may be called repeatedly, so make sure it's pretty cheap.
|
||||
CAFFE2_API QTensorImpl* get_qtensorimpl(const Tensor& self);
|
||||
TORCH_API QTensorImpl* get_qtensorimpl(const Tensor& self);
|
||||
|
||||
// double and int64_t are because of the native function API, we only have these
|
||||
// argument types right now in native functions
|
||||
CAFFE2_API QuantizerPtr
|
||||
TORCH_API QuantizerPtr
|
||||
make_per_tensor_affine_quantizer(
|
||||
double scale, int64_t zero_point, ScalarType scalar_type);
|
||||
|
||||
CAFFE2_API QuantizerPtr make_per_channel_affine_quantizer(
|
||||
TORCH_API QuantizerPtr make_per_channel_affine_quantizer(
|
||||
const Tensor& scales,
|
||||
const Tensor& zero_points,
|
||||
int64_t axis,
|
||||
ScalarType scalar_type);
|
||||
|
||||
// Create a Quantized Tensor given arguments for normal Tensor and a quantizer
|
||||
CAFFE2_API Tensor new_qtensor(
|
||||
TORCH_API Tensor new_qtensor(
|
||||
IntArrayRef sizes,
|
||||
const TensorOptions& options,
|
||||
QuantizerPtr quantizer);
|
||||
|
||||
CAFFE2_API void set_quantizer_(const Tensor& self, ConstQuantizerPtr quantizer);
|
||||
TORCH_API void set_quantizer_(const Tensor& self, ConstQuantizerPtr quantizer);
|
||||
|
||||
} // namespace at
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@
|
|||
#include <functional>
|
||||
|
||||
namespace c10 {
|
||||
class CAFFE2_API OperatorHandle;
|
||||
class TORCH_API OperatorHandle;
|
||||
}
|
||||
|
||||
namespace at {
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ namespace at {
|
|||
|
||||
// These functions are defined in ATen/Utils.cpp.
|
||||
#define TENSOR(T, S) \
|
||||
CAFFE2_API Tensor tensor(ArrayRef<T> values, const TensorOptions& options); \
|
||||
TORCH_API Tensor tensor(ArrayRef<T> values, const TensorOptions& options); \
|
||||
inline Tensor tensor( \
|
||||
std::initializer_list<T> values, const TensorOptions& options) { \
|
||||
return at::tensor(ArrayRef<T>(values), options); \
|
||||
|
|
@ -46,10 +46,10 @@ ${function_declarations}
|
|||
// Special C++ only overloads for std()-like functions (See gh-40287)
|
||||
// These are needed because int -> bool conversion takes precedence over int -> IntArrayRef
|
||||
// So, for example std(0) would select the std(unbiased=False) overload
|
||||
CAFFE2_API Tensor var(const Tensor& self, int dim);
|
||||
CAFFE2_API std::tuple<Tensor,Tensor> var_mean(const Tensor& self, int dim);
|
||||
CAFFE2_API Tensor std(const Tensor& self, int dim);
|
||||
CAFFE2_API std::tuple<Tensor,Tensor> std_mean(const Tensor& self, int dim);
|
||||
TORCH_API Tensor var(const Tensor& self, int dim);
|
||||
TORCH_API std::tuple<Tensor,Tensor> var_mean(const Tensor& self, int dim);
|
||||
TORCH_API Tensor std(const Tensor& self, int dim);
|
||||
TORCH_API std::tuple<Tensor,Tensor> std_mean(const Tensor& self, int dim);
|
||||
|
||||
namespace {
|
||||
inline std::vector<int64_t> zero_sizes(const TensorOptions& options) {
|
||||
|
|
|
|||
|
|
@ -83,7 +83,7 @@ inline bool variable_excluded_from_dispatch() {
|
|||
//
|
||||
// Note that Tensor can also be NULL, i.e. it is not associated with any underlying TensorImpl, and
|
||||
// special care must be taken to handle this.
|
||||
class CAFFE2_API Tensor {
|
||||
class TORCH_API Tensor {
|
||||
public:
|
||||
Tensor(){};
|
||||
// This constructor should not be used by end users and is an implementation
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ TH_API c10::Allocator* getTHDefaultAllocator(void);
|
|||
// the non-file descriptor constructor
|
||||
enum WithFd { WITH_FD };
|
||||
|
||||
class CAFFE2_API THMapAllocator {
|
||||
class TORCH_API THMapAllocator {
|
||||
public:
|
||||
THMapAllocator(const char *filename, int flags, size_t size);
|
||||
THMapAllocator(WithFd, const char *filename, int fd, int flags, size_t size);
|
||||
|
|
@ -71,11 +71,11 @@ protected:
|
|||
};
|
||||
|
||||
// Base-from-member idiom
|
||||
struct CAFFE2_API THRefcountedMapAllocatorArgCheck {
|
||||
struct TORCH_API THRefcountedMapAllocatorArgCheck {
|
||||
THRefcountedMapAllocatorArgCheck(int flags);
|
||||
};
|
||||
|
||||
class CAFFE2_API THRefcountedMapAllocator
|
||||
class TORCH_API THRefcountedMapAllocator
|
||||
: private THRefcountedMapAllocatorArgCheck,
|
||||
public THMapAllocator {
|
||||
public:
|
||||
|
|
|
|||
|
|
@ -42,7 +42,7 @@
|
|||
* Please use the public mutex_ when using any methods from these classes, except for the
|
||||
* read-only methods. You can learn about the usage by looking into the unittests
|
||||
* (aten/src/ATen/cpu_generator_test.cpp) and other places where we have used lock_guard.
|
||||
*
|
||||
*
|
||||
* TODO: Look into changing the threading semantics of Generators in ATen (e.g., making
|
||||
* them non-thread safe and instead making the generator state splittable, to accommodate
|
||||
* forks into other threads).
|
||||
|
|
@ -96,7 +96,7 @@ struct C10_API GeneratorImpl : public c10::intrusive_ptr_target {
|
|||
|
||||
namespace detail {
|
||||
|
||||
CAFFE2_API uint64_t getNonDeterministicRandom(bool is_cuda = false);
|
||||
TORCH_API uint64_t getNonDeterministicRandom(bool is_cuda = false);
|
||||
|
||||
} // namespace detail
|
||||
|
||||
|
|
|
|||
|
|
@ -179,7 +179,7 @@ private:
|
|||
* isHighPriority to true, or a stream for a specific device by setting device
|
||||
* (defaulting to the current CUDA stream.)
|
||||
*/
|
||||
CAFFE2_API CUDAStream
|
||||
TORCH_API CUDAStream
|
||||
getStreamFromPool(const bool isHighPriority = false, DeviceIndex device = -1);
|
||||
|
||||
/**
|
||||
|
|
@ -188,7 +188,7 @@ getStreamFromPool(const bool isHighPriority = false, DeviceIndex device = -1);
|
|||
* where most computation occurs when you aren't explicitly using
|
||||
* streams.
|
||||
*/
|
||||
CAFFE2_API CUDAStream getDefaultCUDAStream(DeviceIndex device_index = -1);
|
||||
TORCH_API CUDAStream getDefaultCUDAStream(DeviceIndex device_index = -1);
|
||||
|
||||
/**
|
||||
* Get the current CUDA stream, for the passed CUDA device, or for the
|
||||
|
|
@ -197,7 +197,7 @@ CAFFE2_API CUDAStream getDefaultCUDAStream(DeviceIndex device_index = -1);
|
|||
* be different if someone called 'setCurrentCUDAStream' or used 'StreamGuard'
|
||||
* or 'CUDAStreamGuard'.
|
||||
*/
|
||||
CAFFE2_API CUDAStream getCurrentCUDAStream(DeviceIndex device_index = -1);
|
||||
TORCH_API CUDAStream getCurrentCUDAStream(DeviceIndex device_index = -1);
|
||||
|
||||
/**
|
||||
* Set the current stream on the device of the passed in stream to be
|
||||
|
|
@ -209,7 +209,7 @@ CAFFE2_API CUDAStream getCurrentCUDAStream(DeviceIndex device_index = -1);
|
|||
* (which will switch both your current device and current stream in the way you
|
||||
* expect, and reset it back to its original state afterwards).
|
||||
*/
|
||||
CAFFE2_API void setCurrentCUDAStream(CUDAStream stream);
|
||||
TORCH_API void setCurrentCUDAStream(CUDAStream stream);
|
||||
|
||||
C10_API std::ostream& operator<<(std::ostream& stream, const CUDAStream& s);
|
||||
|
||||
|
|
|
|||
|
|
@ -92,11 +92,10 @@
|
|||
#endif
|
||||
|
||||
// This one is being used by libtorch.so
|
||||
// TODO: rename this to TORCH_API
|
||||
#ifdef CAFFE2_BUILD_MAIN_LIB
|
||||
#define CAFFE2_API C10_EXPORT
|
||||
#define TORCH_API C10_EXPORT
|
||||
#else
|
||||
#define CAFFE2_API C10_IMPORT
|
||||
#define TORCH_API C10_IMPORT
|
||||
#endif
|
||||
|
||||
// NB: For now, HIP is overloaded to use the same macro, but ideally
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ using DeleterFnPtr = void (*)(void*);
|
|||
namespace detail {
|
||||
|
||||
// Does not delete anything
|
||||
CAFFE2_API void deleteNothing(void*);
|
||||
TORCH_API void deleteNothing(void*);
|
||||
|
||||
// A detail::UniqueVoidPtr is an owning smart pointer like unique_ptr, but
|
||||
// with three major differences:
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ namespace caffe2 {
|
|||
using at::Half; // for AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, ...)
|
||||
|
||||
namespace internal {
|
||||
CAFFE2_API at::Tensor index_with_uint8_handling(
|
||||
TORCH_API at::Tensor index_with_uint8_handling(
|
||||
const at::Tensor& self,
|
||||
at::TensorList indices);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@
|
|||
namespace caffe2 {
|
||||
namespace gloo {
|
||||
|
||||
CAFFE2_API void signalFailure(Blob* status_blob, std::exception& exception);
|
||||
TORCH_API void signalFailure(Blob* status_blob, std::exception& exception);
|
||||
|
||||
struct createDeviceAttr {
|
||||
// "tcp" or "ibverbs"
|
||||
|
|
@ -22,7 +22,7 @@ struct createDeviceAttr {
|
|||
std::string interface;
|
||||
};
|
||||
|
||||
CAFFE2_API std::shared_ptr<::gloo::transport::Device> createDevice(
|
||||
TORCH_API std::shared_ptr<::gloo::transport::Device> createDevice(
|
||||
const createDeviceAttr attr);
|
||||
|
||||
// Captures the parameters passed to Gloo.
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@
|
|||
namespace caffe2 {
|
||||
namespace gloo {
|
||||
|
||||
class CAFFE2_API StoreHandlerWrapper : public ::gloo::rendezvous::Store {
|
||||
class TORCH_API StoreHandlerWrapper : public ::gloo::rendezvous::Store {
|
||||
public:
|
||||
explicit StoreHandlerWrapper(StoreHandler& handler) : handler_(handler) {}
|
||||
|
||||
|
|
|
|||
|
|
@ -14,12 +14,12 @@
|
|||
|
||||
namespace caffe2 {
|
||||
|
||||
CAFFE2_API void BuildInitializationList(
|
||||
TORCH_API void BuildInitializationList(
|
||||
Workspace* ws,
|
||||
::ONNX_NAMESPACE::GraphProto* g,
|
||||
std::unordered_set<std::string>* initialization_list);
|
||||
|
||||
class CAFFE2_API TensorRTTransformer {
|
||||
class TORCH_API TensorRTTransformer {
|
||||
public:
|
||||
TensorRTTransformer(
|
||||
size_t max_batch_size,
|
||||
|
|
|
|||
|
|
@ -40,7 +40,7 @@ constexpr auto kChunkIdSeparator = "#%";
|
|||
* approaches for specific classes. Acceptor should take care of writing data
|
||||
* to the actual storage.
|
||||
*/
|
||||
CAFFE2_API void SerializeBlob(
|
||||
TORCH_API void SerializeBlob(
|
||||
const Blob& blob,
|
||||
const string& name,
|
||||
BlobSerializerBase::SerializationAcceptor acceptor,
|
||||
|
|
@ -56,15 +56,15 @@ CAFFE2_API void SerializeBlob(
|
|||
*
|
||||
* NOTE: this function doesn't do chunking and might break with big tensors.
|
||||
*/
|
||||
CAFFE2_API string SerializeBlob(const Blob& blob, const string& name);
|
||||
TORCH_API string SerializeBlob(const Blob& blob, const string& name);
|
||||
|
||||
/**
|
||||
* Deserializes from a string containing either BlobProto or TensorProto. If
|
||||
* the deserialization fails, the content in the blob should no longer be
|
||||
* trusted.
|
||||
*/
|
||||
CAFFE2_API void DeserializeBlob(const string& content, Blob* result);
|
||||
CAFFE2_API void DeserializeBlob(const BlobProto& proto, Blob* result);
|
||||
TORCH_API void DeserializeBlob(const string& content, Blob* result);
|
||||
TORCH_API void DeserializeBlob(const BlobProto& proto, Blob* result);
|
||||
|
||||
/*
|
||||
* Get an empty Tensor from the TensorProto given the meta data in proto (data
|
||||
|
|
@ -86,7 +86,7 @@ CAFFE2_API void DeserializeBlob(const BlobProto& proto, Blob* result);
|
|||
* these function calls. e.g. mutable_data will allocate memory on the first
|
||||
* call and it will return a pointer to the allocated memory on later calls.
|
||||
*/
|
||||
CAFFE2_API Tensor EmptyTensorFromProto(const TensorProto& proto);
|
||||
TORCH_API Tensor EmptyTensorFromProto(const TensorProto& proto);
|
||||
|
||||
/**
|
||||
* @brief TensorSerializer is the serializer for Tensors.
|
||||
|
|
@ -94,7 +94,7 @@ CAFFE2_API Tensor EmptyTensorFromProto(const TensorProto& proto);
|
|||
* TensorSerializer takes in a blob that contains a Tensor, and serializes it
|
||||
* into a TensorProto protocol buffer.
|
||||
*/
|
||||
class CAFFE2_API TensorSerializer : public BlobSerializerBase {
|
||||
class TORCH_API TensorSerializer : public BlobSerializerBase {
|
||||
public:
|
||||
TensorSerializer() {}
|
||||
~TensorSerializer() override {}
|
||||
|
|
@ -136,7 +136,7 @@ class CAFFE2_API TensorSerializer : public BlobSerializerBase {
|
|||
* tensor, change the TensorProto's corresponding fields before calling
|
||||
* Deserialize.
|
||||
*/
|
||||
class CAFFE2_API TensorDeserializer : public BlobDeserializerBase {
|
||||
class TORCH_API TensorDeserializer : public BlobDeserializerBase {
|
||||
public:
|
||||
void Deserialize(const BlobProto& proto, Blob* blob) override;
|
||||
|
||||
|
|
@ -240,7 +240,7 @@ inline void CopyFromProtoWithCast(
|
|||
// Converts MessageLite to string while also checking that SerializeAsString
|
||||
// succeeds. Pass description of class/function of the call if you'd
|
||||
// like it appended to the error message.
|
||||
CAFFE2_API std::string SerializeAsString_EnforceCheck(
|
||||
TORCH_API std::string SerializeAsString_EnforceCheck(
|
||||
const google::protobuf::MessageLite&,
|
||||
const char* error_location = nullptr);
|
||||
|
||||
|
|
|
|||
|
|
@ -78,7 +78,7 @@ inline unique_ptr<BlobSerializerBase> CreateSerializer(TypeIdentifier id) {
|
|||
* @brief BlobDeserializerBase is an abstract class that deserializes a blob
|
||||
* from a BlobProto or a TensorProto.
|
||||
*/
|
||||
class CAFFE2_API BlobDeserializerBase {
|
||||
class TORCH_API BlobDeserializerBase {
|
||||
public:
|
||||
virtual ~BlobDeserializerBase() {}
|
||||
|
||||
|
|
|
|||
|
|
@ -41,6 +41,6 @@ namespace BlobStat {
|
|||
* Return size in bytes of the blob, if available for a blob of given type.
|
||||
* If not available, return 0.
|
||||
*/
|
||||
CAFFE2_API size_t sizeBytes(const Blob& blob);
|
||||
TORCH_API size_t sizeBytes(const Blob& blob);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -124,18 +124,18 @@ class SkipIndices<> {
|
|||
// linked. This function should not be used in static initialization functions
|
||||
// as the underlying boolean variable is going to be switched on when one
|
||||
// loads libtorch_gpu.so.
|
||||
CAFFE2_API bool HasCudaRuntime();
|
||||
CAFFE2_API bool HasHipRuntime();
|
||||
TORCH_API bool HasCudaRuntime();
|
||||
TORCH_API bool HasHipRuntime();
|
||||
namespace internal {
|
||||
// Sets the Cuda Runtime flag that is used by HasCudaRuntime(). You should
|
||||
// never use this function - it is only used by the Caffe2 gpu code to notify
|
||||
// Caffe2 core that cuda runtime has been loaded.
|
||||
CAFFE2_API void SetCudaRuntimeFlag();
|
||||
CAFFE2_API void SetHipRuntimeFlag();
|
||||
TORCH_API void SetCudaRuntimeFlag();
|
||||
TORCH_API void SetHipRuntimeFlag();
|
||||
} // namespace internal
|
||||
// Returns which setting Caffe2 was configured and built with (exported from
|
||||
// CMake)
|
||||
CAFFE2_API const std::map<string, string>& GetBuildOptions();
|
||||
TORCH_API const std::map<string, string>& GetBuildOptions();
|
||||
|
||||
} // namespace caffe2
|
||||
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ namespace caffe2 {
|
|||
* A function to generate a random number seed that is unique in a best-effort
|
||||
* basis, using an ever-incrementing seed and the current time.
|
||||
*/
|
||||
CAFFE2_API uint32_t RandomNumberSeed();
|
||||
TORCH_API uint32_t RandomNumberSeed();
|
||||
|
||||
/**
|
||||
* The CPU Context, representing the bare minimum of what a Context class in
|
||||
|
|
@ -44,7 +44,7 @@ CAFFE2_API uint32_t RandomNumberSeed();
|
|||
* computation it has.
|
||||
*
|
||||
*/
|
||||
class CAFFE2_API CPUContext final : public BaseContext {
|
||||
class TORCH_API CPUContext final : public BaseContext {
|
||||
public:
|
||||
#if !defined(CAFFE2_IS_XPLAT_BUILD) && !defined(C10_MOBILE)
|
||||
typedef at::CPUGeneratorImpl rand_gen_type;
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ class BaseContext;
|
|||
* functions in the BaseContext class.
|
||||
* TODO: add docs after this is finalized.
|
||||
*/
|
||||
class CAFFE2_API BaseContext {
|
||||
class TORCH_API BaseContext {
|
||||
public:
|
||||
virtual ~BaseContext() noexcept {}
|
||||
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ enum Mode { READ, WRITE, NEW };
|
|||
/**
|
||||
* An abstract class for the cursor of the database while reading.
|
||||
*/
|
||||
class CAFFE2_API Cursor {
|
||||
class TORCH_API Cursor {
|
||||
public:
|
||||
Cursor() {}
|
||||
virtual ~Cursor() {}
|
||||
|
|
@ -60,7 +60,7 @@ class CAFFE2_API Cursor {
|
|||
/**
|
||||
* An abstract class for the current database transaction while writing.
|
||||
*/
|
||||
class CAFFE2_API Transaction {
|
||||
class TORCH_API Transaction {
|
||||
public:
|
||||
Transaction() {}
|
||||
virtual ~Transaction() {}
|
||||
|
|
@ -79,7 +79,7 @@ class CAFFE2_API Transaction {
|
|||
/**
|
||||
* An abstract class for accessing a database of key-value pairs.
|
||||
*/
|
||||
class CAFFE2_API DB {
|
||||
class TORCH_API DB {
|
||||
public:
|
||||
DB(const string& /*source*/, Mode mode) : mode_(mode) {}
|
||||
virtual ~DB() {}
|
||||
|
|
@ -143,7 +143,7 @@ inline bool DBExists(const string& db_type, const string& full_db_name) {
|
|||
/**
|
||||
* A reader wrapper for DB that also allows us to serialize it.
|
||||
*/
|
||||
class CAFFE2_API DBReader {
|
||||
class TORCH_API DBReader {
|
||||
public:
|
||||
friend class DBReaderSerializer;
|
||||
DBReader() {}
|
||||
|
|
@ -296,7 +296,7 @@ class CAFFE2_API DBReader {
|
|||
C10_DISABLE_COPY_AND_ASSIGN(DBReader);
|
||||
};
|
||||
|
||||
class CAFFE2_API DBReaderSerializer : public BlobSerializerBase {
|
||||
class TORCH_API DBReaderSerializer : public BlobSerializerBase {
|
||||
public:
|
||||
/**
|
||||
* Serializes a DBReader. Note that this blob has to contain DBReader,
|
||||
|
|
@ -309,7 +309,7 @@ class CAFFE2_API DBReaderSerializer : public BlobSerializerBase {
|
|||
BlobSerializerBase::SerializationAcceptor acceptor) override;
|
||||
};
|
||||
|
||||
class CAFFE2_API DBReaderDeserializer : public BlobDeserializerBase {
|
||||
class TORCH_API DBReaderDeserializer : public BlobDeserializerBase {
|
||||
public:
|
||||
void Deserialize(const BlobProto& proto, Blob* blob) override;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -2,19 +2,19 @@
|
|||
|
||||
namespace caffe2 {
|
||||
|
||||
CAFFE2_API EventCreateFunction Event::event_creator_[MaxDeviceTypes];
|
||||
CAFFE2_API EventRecordFunction Event::event_recorder_[MaxDeviceTypes];
|
||||
CAFFE2_API EventWaitFunction
|
||||
TORCH_API EventCreateFunction Event::event_creator_[MaxDeviceTypes];
|
||||
TORCH_API EventRecordFunction Event::event_recorder_[MaxDeviceTypes];
|
||||
TORCH_API EventWaitFunction
|
||||
Event::event_waiter_[MaxDeviceTypes][MaxDeviceTypes];
|
||||
CAFFE2_API EventFinishFunction Event::event_finisher_[MaxDeviceTypes];
|
||||
TORCH_API EventFinishFunction Event::event_finisher_[MaxDeviceTypes];
|
||||
|
||||
CAFFE2_API EventQueryFunction Event::event_querier_[MaxDeviceTypes];
|
||||
CAFFE2_API EventErrorMessageFunction
|
||||
TORCH_API EventQueryFunction Event::event_querier_[MaxDeviceTypes];
|
||||
TORCH_API EventErrorMessageFunction
|
||||
Event::event_err_msg_getter_[MaxDeviceTypes];
|
||||
CAFFE2_API EventSetFinishedFunction
|
||||
TORCH_API EventSetFinishedFunction
|
||||
Event::event_finished_setter_[MaxDeviceTypes];
|
||||
CAFFE2_API EventResetFunction Event::event_resetter_[MaxDeviceTypes];
|
||||
CAFFE2_API EventSetCallbackFunction
|
||||
TORCH_API EventResetFunction Event::event_resetter_[MaxDeviceTypes];
|
||||
TORCH_API EventSetCallbackFunction
|
||||
Event::event_callback_setter_[MaxDeviceTypes];
|
||||
|
||||
namespace {
|
||||
|
|
|
|||
|
|
@ -55,7 +55,7 @@ typedef void (*EventResetFunction)(Event*);
|
|||
typedef std::function<void()> EventCallbackFunction;
|
||||
typedef void (*EventSetCallbackFunction)(Event*, EventCallbackFunction);
|
||||
|
||||
class CAFFE2_API Event {
|
||||
class TORCH_API Event {
|
||||
public:
|
||||
explicit Event(const DeviceOption& option)
|
||||
: event_(), type_(option.device_type()), option_(option) {
|
||||
|
|
|
|||
|
|
@ -180,7 +180,7 @@ inline FunctionSchema make_function_schema_for_c10(const char* schema_str) {
|
|||
#define C10_DECLARE_EXPORT_CAFFE2_OP_TO_C10(OperatorName) \
|
||||
namespace caffe2 { \
|
||||
namespace _c10_ops { \
|
||||
CAFFE2_API const FunctionSchema& schema_##OperatorName(); \
|
||||
TORCH_API const FunctionSchema& schema_##OperatorName(); \
|
||||
} \
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ namespace transform {
|
|||
/**
|
||||
* Graph representation of an operator.
|
||||
*/
|
||||
struct CAFFE2_API Node {
|
||||
struct TORCH_API Node {
|
||||
public:
|
||||
// Empty constructor for resize
|
||||
Node() {}
|
||||
|
|
@ -45,7 +45,7 @@ struct CAFFE2_API Node {
|
|||
/**
|
||||
* Graph representation of a Netdef.
|
||||
*/
|
||||
struct CAFFE2_API Graph {
|
||||
struct TORCH_API Graph {
|
||||
public:
|
||||
/**
|
||||
* Given a subgraph, gets all of the parents of the subgraph, as well as
|
||||
|
|
@ -155,7 +155,7 @@ struct CAFFE2_API Graph {
|
|||
|
||||
// Adds an operator def to a netdef.
|
||||
// Returns the ptr, if you want to add anything extra (such as device_option)
|
||||
CAFFE2_API OperatorDef* AddOp(
|
||||
TORCH_API OperatorDef* AddOp(
|
||||
NetDef* netdef_ptr,
|
||||
string op_type,
|
||||
std::vector<string> inputs,
|
||||
|
|
@ -168,12 +168,12 @@ CAFFE2_API OperatorDef* AddOp(
|
|||
* For example, if we wanted to match an operator to Conv or FC, we can give:
|
||||
* "Conv|FC" as the type() of that op.
|
||||
*/
|
||||
CAFFE2_API bool MatchStrings(string p, string s);
|
||||
TORCH_API bool MatchStrings(string p, string s);
|
||||
|
||||
/**
|
||||
* This ensures that each named arg that exists in the pattern exists in g_op,
|
||||
* is equal in value.
|
||||
*/
|
||||
CAFFE2_API bool MatchArguments(const OperatorDef& p_op, const OperatorDef& g_op);
|
||||
TORCH_API bool MatchArguments(const OperatorDef& p_op, const OperatorDef& g_op);
|
||||
|
||||
} // namespace caffe2
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@
|
|||
namespace caffe2 {
|
||||
|
||||
namespace internal {
|
||||
class CAFFE2_API Caffe2InitializeRegistry {
|
||||
class TORCH_API Caffe2InitializeRegistry {
|
||||
public:
|
||||
typedef bool (*InitFunction)(int*, char***);
|
||||
// Registry() is defined in .cpp file to make registration work across
|
||||
|
|
@ -96,12 +96,12 @@ class CAFFE2_API Caffe2InitializeRegistry {
|
|||
};
|
||||
} // namespace internal
|
||||
|
||||
CAFFE2_API bool unsafeRunCaffe2InitFunction(
|
||||
TORCH_API bool unsafeRunCaffe2InitFunction(
|
||||
const char* name,
|
||||
int* pargc = nullptr,
|
||||
char*** pargv = nullptr);
|
||||
|
||||
class CAFFE2_API InitRegisterer {
|
||||
class TORCH_API InitRegisterer {
|
||||
public:
|
||||
InitRegisterer(
|
||||
internal::Caffe2InitializeRegistry::InitFunction function,
|
||||
|
|
@ -128,9 +128,9 @@ class CAFFE2_API InitRegisterer {
|
|||
/**
|
||||
* @brief Determine whether GlobalInit has already been run
|
||||
*/
|
||||
CAFFE2_API bool GlobalInitAlreadyRun();
|
||||
TORCH_API bool GlobalInitAlreadyRun();
|
||||
|
||||
class CAFFE2_API GlobalInitIsCalledGuard {
|
||||
class TORCH_API GlobalInitIsCalledGuard {
|
||||
public:
|
||||
GlobalInitIsCalledGuard() {
|
||||
if (!GlobalInitAlreadyRun()) {
|
||||
|
|
@ -165,7 +165,7 @@ class CAFFE2_API GlobalInitIsCalledGuard {
|
|||
*
|
||||
* GlobalInit is also thread-safe and can be called concurrently.
|
||||
*/
|
||||
CAFFE2_API bool GlobalInit(int* pargc, char*** argv);
|
||||
TORCH_API bool GlobalInit(int* pargc, char*** argv);
|
||||
|
||||
/**
|
||||
* @brief Initialize the global environment without command line arguments
|
||||
|
|
@ -174,6 +174,6 @@ CAFFE2_API bool GlobalInit(int* pargc, char*** argv);
|
|||
* On mobile devices, use this global init, since we cannot pass the
|
||||
* command line options to caffe2, no arguments are passed.
|
||||
*/
|
||||
CAFFE2_API bool GlobalInit();
|
||||
TORCH_API bool GlobalInit();
|
||||
} // namespace caffe2
|
||||
#endif // CAFFE2_CORE_INIT_H_
|
||||
|
|
|
|||
|
|
@ -10,15 +10,15 @@
|
|||
namespace caffe2 {
|
||||
|
||||
// op schema check
|
||||
CAFFE2_API void run_schema_check(const NetDef& net);
|
||||
TORCH_API void run_schema_check(const NetDef& net);
|
||||
|
||||
namespace memonger {
|
||||
|
||||
CAFFE2_API NetDef optimize_inference_net(
|
||||
TORCH_API NetDef optimize_inference_net(
|
||||
const NetDef& net,
|
||||
const std::set<string>& static_blobs);
|
||||
|
||||
CAFFE2_API NetDef compute_blob_recycling_for_dag(
|
||||
TORCH_API NetDef compute_blob_recycling_for_dag(
|
||||
const NetDef& net,
|
||||
const std::vector<string>& heads,
|
||||
const std::vector<int>& op_indices,
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ namespace caffe2 {
|
|||
* different modules. Currently, we only store the name and a simple
|
||||
* description of what this module does.
|
||||
*/
|
||||
class CAFFE2_API ModuleSchema {
|
||||
class TORCH_API ModuleSchema {
|
||||
public:
|
||||
ModuleSchema(const char* name, const char* description);
|
||||
};
|
||||
|
|
@ -41,12 +41,12 @@ class CAFFE2_API ModuleSchema {
|
|||
* the reason we do not include ".so" is for cross-platform compatibility
|
||||
* on platforms like mac os.
|
||||
*/
|
||||
CAFFE2_API const CaffeMap<string, const ModuleSchema*>& CurrentModules();
|
||||
TORCH_API const CaffeMap<string, const ModuleSchema*>& CurrentModules();
|
||||
|
||||
/**
|
||||
* @brief Checks whether a module is already present in the current binary.
|
||||
*/
|
||||
CAFFE2_API bool HasModule(const string& name);
|
||||
TORCH_API bool HasModule(const string& name);
|
||||
|
||||
/**
|
||||
* @brief Load a module.
|
||||
|
|
@ -56,7 +56,7 @@ CAFFE2_API bool HasModule(const string& name);
|
|||
* full path option to only experimental modules.
|
||||
* filename: (optional) a filename that serves as a hint to load the module.
|
||||
*/
|
||||
CAFFE2_API void LoadModule(const string& name, const string& filename="");
|
||||
TORCH_API void LoadModule(const string& name, const string& filename="");
|
||||
|
||||
|
||||
#define CAFFE2_MODULE(name, description) \
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ class Workspace;
|
|||
|
||||
// Net is a thin struct that owns all the operators together with the operator
|
||||
// contexts.
|
||||
class CAFFE2_API NetBase : public Observable<NetBase> {
|
||||
class TORCH_API NetBase : public Observable<NetBase> {
|
||||
public:
|
||||
NetBase(const std::shared_ptr<const NetDef>& net_def, Workspace* ws);
|
||||
virtual ~NetBase() noexcept {}
|
||||
|
|
@ -135,7 +135,7 @@ class CAFFE2_API NetBase : public Observable<NetBase> {
|
|||
C10_DISABLE_COPY_AND_ASSIGN(NetBase);
|
||||
};
|
||||
|
||||
class CAFFE2_API ExecutorHelper {
|
||||
class TORCH_API ExecutorHelper {
|
||||
public:
|
||||
ExecutorHelper() {}
|
||||
virtual TaskThreadPoolBase* GetPool(const DeviceOption& option) const;
|
||||
|
|
@ -161,14 +161,14 @@ C10_DECLARE_REGISTRY(
|
|||
* created net object to the workspace's net map, while this function returns
|
||||
* a standalone net object.
|
||||
*/
|
||||
CAFFE2_API unique_ptr<NetBase> CreateNet(const NetDef& net_def, Workspace* ws);
|
||||
CAFFE2_API unique_ptr<NetBase> CreateNet(
|
||||
TORCH_API unique_ptr<NetBase> CreateNet(const NetDef& net_def, Workspace* ws);
|
||||
TORCH_API unique_ptr<NetBase> CreateNet(
|
||||
const std::shared_ptr<const NetDef>& net_def,
|
||||
Workspace* ws);
|
||||
|
||||
CAFFE2_API void AddGlobalNetObserverCreator(NetObserverCreator creator);
|
||||
TORCH_API void AddGlobalNetObserverCreator(NetObserverCreator creator);
|
||||
|
||||
CAFFE2_API void ClearGlobalNetObservers();
|
||||
TORCH_API void ClearGlobalNetObservers();
|
||||
|
||||
} // namespace caffe2
|
||||
|
||||
|
|
|
|||
|
|
@ -57,13 +57,13 @@ struct ExecutionOptions {
|
|||
bool run_root_tasks_inline_ = false;
|
||||
};
|
||||
|
||||
struct CAFFE2_API AsyncNetCancelled : public std::exception {
|
||||
struct TORCH_API AsyncNetCancelled : public std::exception {
|
||||
const char* what() const noexcept override {
|
||||
return "Cancelled";
|
||||
}
|
||||
};
|
||||
|
||||
class CAFFE2_API AsyncNetBase : public NetBase {
|
||||
class TORCH_API AsyncNetBase : public NetBase {
|
||||
public:
|
||||
AsyncNetBase(const std::shared_ptr<const NetDef>& net_def, Workspace* ws);
|
||||
~AsyncNetBase() override;
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
namespace caffe2 {
|
||||
|
||||
class CAFFE2_API AsyncSchedulingNet : public AsyncNetBase {
|
||||
class TORCH_API AsyncSchedulingNet : public AsyncNetBase {
|
||||
public:
|
||||
AsyncSchedulingNet(
|
||||
const std::shared_ptr<const NetDef>& net_def,
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ C10_DECLARE_int(caffe2_net_async_tracing_nth);
|
|||
namespace caffe2 {
|
||||
namespace tracing {
|
||||
|
||||
struct CAFFE2_API TracerEvent {
|
||||
struct TORCH_API TracerEvent {
|
||||
int op_id_ = -1;
|
||||
int task_id_ = -1;
|
||||
int stream_id_ = -1;
|
||||
|
|
@ -70,7 +70,7 @@ struct TracingConfig {
|
|||
int64_t trace_for_n_ms = 1000; // 1sec
|
||||
};
|
||||
|
||||
class CAFFE2_API Tracer {
|
||||
class TORCH_API Tracer {
|
||||
public:
|
||||
Tracer(
|
||||
const NetBase* net,
|
||||
|
|
@ -111,7 +111,7 @@ class CAFFE2_API Tracer {
|
|||
friend class TracerGuard;
|
||||
};
|
||||
|
||||
class CAFFE2_API TracerGuard {
|
||||
class TORCH_API TracerGuard {
|
||||
public:
|
||||
TracerGuard() {}
|
||||
|
||||
|
|
@ -142,16 +142,16 @@ class CAFFE2_API TracerGuard {
|
|||
|
||||
// Extract the shard id from name of the form "...shard:123..."
|
||||
// Return -1 if there is no shard found
|
||||
CAFFE2_API int extractShardId(const std::string& name);
|
||||
TORCH_API int extractShardId(const std::string& name);
|
||||
|
||||
// Check if the net name is white-listed for tracing (specified via a command
|
||||
// line flag)
|
||||
CAFFE2_API bool isTraceableNetName(const std::string& net_name);
|
||||
TORCH_API bool isTraceableNetName(const std::string& net_name);
|
||||
|
||||
CAFFE2_API std::shared_ptr<Tracer> create(
|
||||
TORCH_API std::shared_ptr<Tracer> create(
|
||||
const NetBase* net,
|
||||
const std::string& net_name);
|
||||
CAFFE2_API bool startIter(const std::shared_ptr<Tracer>& tracer);
|
||||
TORCH_API bool startIter(const std::shared_ptr<Tracer>& tracer);
|
||||
|
||||
} // namespace tracing
|
||||
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ namespace caffe2 {
|
|||
|
||||
class ParallelNetExecutorHelper;
|
||||
|
||||
class CAFFE2_API ParallelNet : public NetBase {
|
||||
class TORCH_API ParallelNet : public NetBase {
|
||||
public:
|
||||
ParallelNet(const std::shared_ptr<const NetDef>& net_def, Workspace* ws);
|
||||
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ namespace caffe2 {
|
|||
// This is the very basic structure you need to run a network - all it
|
||||
// does is simply to run everything in sequence. If you want more fancy control
|
||||
// such as a DAG-like execution, check out other better net implementations.
|
||||
class CAFFE2_API SimpleNet : public NetBase {
|
||||
class TORCH_API SimpleNet : public NetBase {
|
||||
public:
|
||||
SimpleNet(const std::shared_ptr<const NetDef>& net_def, Workspace* ws);
|
||||
bool SupportsAsync() override {
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@
|
|||
namespace nom {
|
||||
namespace repr {
|
||||
|
||||
class CAFFE2_API Value {
|
||||
class TORCH_API Value {
|
||||
public:
|
||||
enum class ValueKind { Value, Instruction, Data };
|
||||
Value(ValueKind K) : kind_(K) {}
|
||||
|
|
@ -22,7 +22,7 @@ class CAFFE2_API Value {
|
|||
const ValueKind kind_;
|
||||
};
|
||||
|
||||
class CAFFE2_API Data : public Value {
|
||||
class TORCH_API Data : public Value {
|
||||
public:
|
||||
Data() : Value(ValueKind::Data) {}
|
||||
static bool classof(const Value* V) {
|
||||
|
|
@ -41,7 +41,7 @@ class CAFFE2_API Data : public Value {
|
|||
size_t version_ = 0;
|
||||
};
|
||||
|
||||
class CAFFE2_API Instruction : public Value {
|
||||
class TORCH_API Instruction : public Value {
|
||||
public:
|
||||
/// \brief All the different types of execution.
|
||||
enum class Opcode {
|
||||
|
|
@ -66,7 +66,7 @@ class CAFFE2_API Instruction : public Value {
|
|||
Opcode op_;
|
||||
};
|
||||
|
||||
class CAFFE2_API Terminator : public Instruction {
|
||||
class TORCH_API Terminator : public Instruction {
|
||||
public:
|
||||
Terminator(Instruction::Opcode op) : Instruction(op) {}
|
||||
|
||||
|
|
@ -80,17 +80,17 @@ class CAFFE2_API Terminator : public Instruction {
|
|||
}
|
||||
};
|
||||
|
||||
class CAFFE2_API Branch : public Terminator {
|
||||
class TORCH_API Branch : public Terminator {
|
||||
public:
|
||||
Branch() : Terminator(Instruction::Opcode::Branch) {}
|
||||
};
|
||||
|
||||
class CAFFE2_API Return : public Terminator {
|
||||
class TORCH_API Return : public Terminator {
|
||||
public:
|
||||
Return() : Terminator(Instruction::Opcode::Return) {}
|
||||
};
|
||||
|
||||
class CAFFE2_API Phi : public Instruction {
|
||||
class TORCH_API Phi : public Instruction {
|
||||
public:
|
||||
Phi() : Instruction(Instruction::Opcode::Phi) {}
|
||||
};
|
||||
|
|
|
|||
|
|
@ -41,7 +41,7 @@ class NeuralNetData;
|
|||
/// a saved void* pointer for external use. Derived classes
|
||||
/// add richer semantics to the annotation and it is encouraged
|
||||
/// to use them.
|
||||
class CAFFE2_API Annotation {
|
||||
class TORCH_API Annotation {
|
||||
public:
|
||||
enum class AnnotationKind { Generic, Caffe2 };
|
||||
|
||||
|
|
@ -57,7 +57,7 @@ class CAFFE2_API Annotation {
|
|||
const AnnotationKind kind_;
|
||||
};
|
||||
|
||||
class CAFFE2_API NeuralNetOperator : public Instruction {
|
||||
class TORCH_API NeuralNetOperator : public Instruction {
|
||||
public:
|
||||
/// Discriminator for LLVM-style RTTI (isa<>)
|
||||
enum class NNKind {
|
||||
|
|
@ -132,7 +132,7 @@ class CAFFE2_API NeuralNetOperator : public Instruction {
|
|||
std::unique_ptr<Annotation> extraAnnotation_;
|
||||
};
|
||||
|
||||
class CAFFE2_API NeuralNetData : public Data {
|
||||
class TORCH_API NeuralNetData : public Data {
|
||||
public:
|
||||
/// Discriminator for LLVM-style RTTI (isa<>)
|
||||
enum class NNDataKind { Generic, Tensor };
|
||||
|
|
@ -155,7 +155,7 @@ class CAFFE2_API NeuralNetData : public Data {
|
|||
NNDataKind kind_;
|
||||
};
|
||||
|
||||
class CAFFE2_API Tensor : public NeuralNetData {
|
||||
class TORCH_API Tensor : public NeuralNetData {
|
||||
public:
|
||||
enum class DataType { Generic, Float, Half, Int8 };
|
||||
enum class Layout { Generic, NCHW, NHWC };
|
||||
|
|
@ -208,21 +208,21 @@ class CAFFE2_API Tensor : public NeuralNetData {
|
|||
|
||||
#include "nomnigraph/Generated/OpClasses.h"
|
||||
|
||||
class CAFFE2_API While : public NeuralNetOperator {
|
||||
class TORCH_API While : public NeuralNetOperator {
|
||||
public:
|
||||
While() : NeuralNetOperator(NNKind::While, Opcode::Branch) {}
|
||||
NOMNIGRAPH_DEFINE_NN_RTTI(While);
|
||||
~While() {}
|
||||
};
|
||||
|
||||
class CAFFE2_API NNPhi : public NeuralNetOperator {
|
||||
class TORCH_API NNPhi : public NeuralNetOperator {
|
||||
public:
|
||||
NNPhi() : NeuralNetOperator(NNKind::NNPhi, Opcode::Phi) {}
|
||||
NOMNIGRAPH_DEFINE_NN_RTTI(NNPhi);
|
||||
~NNPhi() {}
|
||||
};
|
||||
|
||||
class CAFFE2_API GenericOperator : public NeuralNetOperator {
|
||||
class TORCH_API GenericOperator : public NeuralNetOperator {
|
||||
public:
|
||||
GenericOperator() : NeuralNetOperator(NNKind::GenericOperator) {}
|
||||
GenericOperator(std::string name)
|
||||
|
|
@ -244,7 +244,7 @@ using NNGraph = nom::Graph<std::unique_ptr<nom::repr::Value>>;
|
|||
using NNSubgraph = nom::Subgraph<std::unique_ptr<nom::repr::Value>>;
|
||||
using NNCFGraph = nom::repr::ControlFlowGraph<NNGraph>;
|
||||
|
||||
struct CAFFE2_API NNModule {
|
||||
struct TORCH_API NNModule {
|
||||
NNGraph dataFlow;
|
||||
NNCFGraph controlFlow;
|
||||
std::unordered_set<NNGraph::NodeRef> inputs;
|
||||
|
|
@ -464,41 +464,41 @@ NNGraph::NodeRef convertNode(NNGraph& g, NNGraph::NodeRef node) {
|
|||
}
|
||||
|
||||
/// NeuralNetData specific helpers.
|
||||
CAFFE2_API bool hasProducer(NNGraph::NodeRef n);
|
||||
CAFFE2_API NNGraph::NodeRef getProducer(NNGraph::NodeRef n);
|
||||
CAFFE2_API bool hasConsumer(NNGraph::NodeRef n);
|
||||
CAFFE2_API std::vector<NNGraph::NodeRef> getConsumers(NNGraph::NodeRef n);
|
||||
TORCH_API bool hasProducer(NNGraph::NodeRef n);
|
||||
TORCH_API NNGraph::NodeRef getProducer(NNGraph::NodeRef n);
|
||||
TORCH_API bool hasConsumer(NNGraph::NodeRef n);
|
||||
TORCH_API std::vector<NNGraph::NodeRef> getConsumers(NNGraph::NodeRef n);
|
||||
|
||||
CAFFE2_API bool hasInputs(NNGraph::NodeRef n);
|
||||
CAFFE2_API std::vector<NNGraph::NodeRef> getInputs(NNGraph::NodeRef n);
|
||||
CAFFE2_API std::vector<NNGraph::NodeRef> getOutputs(NNGraph::NodeRef n);
|
||||
TORCH_API bool hasInputs(NNGraph::NodeRef n);
|
||||
TORCH_API std::vector<NNGraph::NodeRef> getInputs(NNGraph::NodeRef n);
|
||||
TORCH_API std::vector<NNGraph::NodeRef> getOutputs(NNGraph::NodeRef n);
|
||||
|
||||
CAFFE2_API std::set<NNGraph::NodeRef> getInputs(const NNSubgraph& sg);
|
||||
CAFFE2_API std::set<NNGraph::NodeRef> getOutputs(const NNSubgraph& sg);
|
||||
TORCH_API std::set<NNGraph::NodeRef> getInputs(const NNSubgraph& sg);
|
||||
TORCH_API std::set<NNGraph::NodeRef> getOutputs(const NNSubgraph& sg);
|
||||
|
||||
// Get the name of the node regardless of underlying type.
|
||||
CAFFE2_API std::string getName(NNGraph::NodeRef n);
|
||||
TORCH_API std::string getName(NNGraph::NodeRef n);
|
||||
|
||||
// Replace the producer of the first argument with the second argument
|
||||
CAFFE2_API void replaceProducer(
|
||||
TORCH_API void replaceProducer(
|
||||
NNGraph::NodeRef tensorNode,
|
||||
NNGraph::NodeRef newProducer);
|
||||
// Set all consumers of first argument to consume the second argument
|
||||
CAFFE2_API void replaceAllUsesWith(
|
||||
TORCH_API void replaceAllUsesWith(
|
||||
NNGraph::NodeRef oldTensorNode,
|
||||
NNGraph::NodeRef newTensorNode);
|
||||
// Set the second argument to consume the inputs of the first argument
|
||||
CAFFE2_API void replaceAsConsumer(
|
||||
TORCH_API void replaceAsConsumer(
|
||||
NNGraph::NodeRef oldConsumer,
|
||||
NNGraph::NodeRef newConsumer);
|
||||
|
||||
// Create an output tensor node
|
||||
CAFFE2_API NNGraph::NodeRef
|
||||
TORCH_API NNGraph::NodeRef
|
||||
createOutput(NNModule* nn, NNGraph::NodeRef producer, std::string name);
|
||||
|
||||
// Hack for windows compiler.
|
||||
template <typename T, typename... Args>
|
||||
CAFFE2_API NNGraph::NodeRef createOperator(NNModule* nn, Args... args);
|
||||
TORCH_API NNGraph::NodeRef createOperator(NNModule* nn, Args... args);
|
||||
|
||||
// Create an operator
|
||||
template <typename T, typename... Args>
|
||||
|
|
@ -506,7 +506,7 @@ NNGraph::NodeRef createOperator(NNModule* nn, Args... args) {
|
|||
return nn->dataFlow.createNode(util::make_unique<T>(args...));
|
||||
}
|
||||
|
||||
CAFFE2_API void coalesceInsertedDataDependencies(repr::NNModule* m);
|
||||
TORCH_API void coalesceInsertedDataDependencies(repr::NNModule* m);
|
||||
|
||||
template <NNGraph* G>
|
||||
struct C10_EXPORT NodeHelper {};
|
||||
|
|
@ -517,12 +517,12 @@ using NNMatchPredicate = nom::matcher::MatchPredicate<NNGraph>;
|
|||
// Commonly used node predicate.
|
||||
|
||||
// The node has a single output and the output has a single consumer.
|
||||
CAFFE2_API bool hasSingleOutputAndConsumer(NNGraph::NodeRef nodeRef);
|
||||
TORCH_API bool hasSingleOutputAndConsumer(NNGraph::NodeRef nodeRef);
|
||||
// The node has a unique consumer (there may be multiple edges from output
|
||||
// to the single consumer).
|
||||
CAFFE2_API bool hasUniqueConsumer(NNGraph::NodeRef nodeRef);
|
||||
TORCH_API bool hasUniqueConsumer(NNGraph::NodeRef nodeRef);
|
||||
|
||||
CAFFE2_API NNMatchPredicate matchExternalTensorNode();
|
||||
TORCH_API NNMatchPredicate matchExternalTensorNode();
|
||||
|
||||
} // namespace nn
|
||||
|
||||
|
|
|
|||
|
|
@ -102,9 +102,9 @@ class TestRandom {
|
|||
* return labelMap;
|
||||
* });
|
||||
*/
|
||||
CAFFE2_API nom::Graph<std::string> createGraph();
|
||||
TORCH_API nom::Graph<std::string> createGraph();
|
||||
|
||||
CAFFE2_API nom::Graph<std::string> createGraphWithCycle();
|
||||
TORCH_API nom::Graph<std::string> createGraphWithCycle();
|
||||
|
||||
std::map<std::string, std::string> BBPrinter(typename nom::repr::NNCFGraph::NodeRef node);
|
||||
|
||||
|
|
@ -112,9 +112,9 @@ std::map<std::string, std::string> cfgEdgePrinter(typename nom::repr::NNCFGraph:
|
|||
|
||||
std::map<std::string, std::string> NNPrinter(typename nom::repr::NNGraph::NodeRef node);
|
||||
|
||||
CAFFE2_API nom::Graph<TestClass>::NodeRef createTestNode(
|
||||
TORCH_API nom::Graph<TestClass>::NodeRef createTestNode(
|
||||
nom::Graph<TestClass>& g);
|
||||
|
||||
CAFFE2_API std::map<std::string, std::string> TestNodePrinter(
|
||||
TORCH_API std::map<std::string, std::string> TestNodePrinter(
|
||||
nom::Graph<TestClass>::NodeRef node);
|
||||
#endif // NOM_TESTS_TEST_UTIL_H
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user