mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
Make TypeDefault, TypeDerived and VariableType anonymous namespaces (#26882)
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/26882 Reduce binary size by 500kb by making TypeDerived and VariableType anonymous namespaces instead of classes. TypeDefault is also a namespace now but can't be anonymous because VariableType calls into it.his also has the nice side effect that VariableType.h and ${TypeDerived.h} are much smaller because they don't have to list the operator declarations anymore. ghstack-source-id: 90865080 Test Plan: Measure libtorch.so size Differential Revision: D17599686 fbshipit-source-id: da3c6641060b7410a7808f36a0a18ee3246ce2d2
This commit is contained in:
parent
771bcce6f1
commit
092b2f7fee
|
|
@ -89,11 +89,11 @@ case ScalarType::${ScalarName}: {
|
|||
# In this case, it will be called for all backends, but can be overwritten on a
|
||||
# per backend basis.
|
||||
NATIVE_DISPATCH_DECLARATION = CodeTemplate("""\
|
||||
static ${return_type} ${api_name}(${type_method_formals});
|
||||
${return_type} ${api_name}(${type_method_formals});
|
||||
""")
|
||||
|
||||
NATIVE_DISPATCH_DEFINITION_DEFAULT = CodeTemplate("""\
|
||||
${return_type} TypeDefault::${api_name}(${type_method_formals}) {
|
||||
${return_type} ${api_name}(${type_method_formals}) {
|
||||
#ifdef BUILD_NAMEDTENSOR
|
||||
${named_guard_declaration}
|
||||
#endif
|
||||
|
|
@ -103,7 +103,7 @@ ${return_type} TypeDefault::${api_name}(${type_method_formals}) {
|
|||
""")
|
||||
|
||||
NATIVE_DISPATCH_DEFINITION_BACKEND = CodeTemplate("""\
|
||||
${return_type} ${Type}::${api_name}(${type_method_formals}) {
|
||||
${return_type} ${api_name}(${type_method_formals}) {
|
||||
#ifdef BUILD_NAMEDTENSOR
|
||||
${named_guard_declaration}
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -30,10 +30,23 @@ $extra_cuda_headers
|
|||
|
||||
namespace at {
|
||||
|
||||
namespace ${Type} {
|
||||
#ifndef USE_STATIC_DISPATCH
|
||||
namespace {
|
||||
#endif
|
||||
|
||||
${type_derived_method_definitions}
|
||||
|
||||
#ifndef USE_STATIC_DISPATCH
|
||||
}
|
||||
#endif
|
||||
} // namespace ${Type}
|
||||
|
||||
#ifndef USE_STATIC_DISPATCH
|
||||
namespace {
|
||||
static auto registerer = torch::RegisterOperators()
|
||||
${function_registrations};
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,11 +18,17 @@
|
|||
#include <ATen/core/EnableNamedTensor.h>
|
||||
|
||||
namespace at {
|
||||
namespace TypeDefault {
|
||||
|
||||
${type_method_definitions}
|
||||
|
||||
} // namespace TypeDefault
|
||||
|
||||
#ifndef USE_STATIC_DISPATCH
|
||||
static auto registerer = torch::RegisterOperators()
|
||||
namespace {
|
||||
auto registerer = torch::RegisterOperators()
|
||||
${function_registrations};
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
} // namespace at
|
||||
|
|
|
|||
|
|
@ -30,8 +30,8 @@ struct Quantizer;
|
|||
// to frontend
|
||||
using ConstQuantizerPtr = const c10::intrusive_ptr<Quantizer>&;
|
||||
|
||||
struct TORCH_API TypeDefault {
|
||||
namespace TypeDefault {
|
||||
${type_method_declarations}
|
||||
};
|
||||
} // namespace TypeDefault
|
||||
|
||||
} // namespace at
|
||||
|
|
|
|||
|
|
@ -42,10 +42,23 @@ Tensor * ${Type}::add(Tensor & a, Tensor & b) {
|
|||
}
|
||||
*/
|
||||
|
||||
namespace ${Type} {
|
||||
#ifndef USE_STATIC_DISPATCH
|
||||
namespace {
|
||||
#endif
|
||||
|
||||
${type_derived_method_definitions}
|
||||
|
||||
#ifndef USE_STATIC_DISPATCH
|
||||
static auto registerer = torch::RegisterOperators()
|
||||
${function_registrations};
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
} // namespace ${Type}
|
||||
|
||||
#ifndef USE_STATIC_DISPATCH
|
||||
namespace {
|
||||
auto registerer = torch::RegisterOperators()
|
||||
${function_registrations};
|
||||
}
|
||||
#endif
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -32,8 +32,10 @@ struct Quantizer;
|
|||
// to frontend
|
||||
using ConstQuantizerPtr = const c10::intrusive_ptr<Quantizer>&;
|
||||
|
||||
struct TORCH_API ${Type} final {
|
||||
#ifdef USE_STATIC_DISPATCH
|
||||
namespace ${Type} {
|
||||
${type_derived_method_declarations}
|
||||
};
|
||||
}
|
||||
#endif
|
||||
|
||||
} // namespace at
|
||||
|
|
|
|||
|
|
@ -144,11 +144,11 @@ DONT_ENFORCE_SAME_TENSOR_IMPL_OR_STORAGE = {
|
|||
# END CHECKS FOR [ Invariant: TensorImpl and Storage Pointer Equality ]
|
||||
|
||||
METHOD_DECLARATION = CodeTemplate("""\
|
||||
static ${return_type} ${api_name}(${type_method_formals}) ;
|
||||
${return_type} ${api_name}(${type_method_formals}) ;
|
||||
""")
|
||||
|
||||
METHOD_DEFINITION = CodeTemplate("""\
|
||||
${return_type} VariableType::${api_name}(${type_method_formals}) {
|
||||
${return_type} ${api_name}(${type_method_formals}) {
|
||||
${type_definition_body}
|
||||
}
|
||||
""")
|
||||
|
|
|
|||
|
|
@ -30,11 +30,15 @@ using namespace torch::autograd::generated;
|
|||
|
||||
namespace torch { namespace autograd {
|
||||
|
||||
namespace VariableType {
|
||||
namespace {
|
||||
${type_derived_method_definitions}
|
||||
}
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
static auto registerer = torch::RegisterOperators()
|
||||
auto registerer = torch::RegisterOperators()
|
||||
${wrapper_registrations};
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -45,20 +45,34 @@ using at::Quantizer;
|
|||
using ConstQuantizerPtr = const c10::intrusive_ptr<Quantizer>&;
|
||||
using c10::optional;
|
||||
|
||||
struct TORCH_API VariableType final {
|
||||
static std::vector<at::DeprecatedTypeProperties*> allCUDATypes();
|
||||
static std::vector<at::DeprecatedTypeProperties*> allCPUTypes();
|
||||
namespace VariableType {
|
||||
TORCH_API std::vector<at::DeprecatedTypeProperties*> allCUDATypes();
|
||||
TORCH_API std::vector<at::DeprecatedTypeProperties*> allCPUTypes();
|
||||
|
||||
${type_derived_method_declarations}
|
||||
|
||||
private:
|
||||
// checks that t is actually a Variable
|
||||
static const Variable & checked_cast_variable(const Tensor & t, const char * name, int pos);
|
||||
static Variable & checked_cast_variable(Tensor & t, const char * name, int pos);
|
||||
static at::Tensor & unpack(Tensor & t, const char * name, int pos);
|
||||
static const at::Tensor & unpack(const Tensor & t, const char * name, int pos);
|
||||
static at::Tensor unpack_opt(const Tensor & t, const char * name, int pos);
|
||||
static std::vector<at::Tensor> unpack(at::TensorList tl, const char *name, int pos);
|
||||
const Variable & checked_cast_variable(const Tensor & t, const char * name, int pos);
|
||||
Variable & checked_cast_variable(Tensor & t, const char * name, int pos);
|
||||
|
||||
// TODO These are only needed in the header because they're defined in
|
||||
// VariableTypeManual.cpp but registered from one of the codegened
|
||||
// VariableType_X.cpp. Instead, we should register them from
|
||||
// VariableTypeManual.cpp and then we can remove these declarations
|
||||
// from the header.
|
||||
at::Tensor & unpack(Tensor & t, const char * name, int pos);
|
||||
const at::Tensor & unpack(const Tensor & t, const char * name, int pos);
|
||||
at::Tensor unpack_opt(const Tensor & t, const char * name, int pos);
|
||||
std::vector<at::Tensor> unpack(at::TensorList tl, const char *name, int pos);
|
||||
void backward(const Tensor& self, const Tensor& gradient, bool keep_graph, bool create_graph);
|
||||
void set_data(const Tensor & self, const Tensor & new_data);
|
||||
Tensor data(const Tensor & self);
|
||||
bool is_leaf(const Tensor & self);
|
||||
int64_t output_nr(const Tensor & self);
|
||||
int64_t _version(const Tensor & self);
|
||||
Tensor & copy_(Tensor & self, const Tensor & src, bool non_blocking);
|
||||
Tensor & resize_(Tensor & self, IntArrayRef size);
|
||||
Tensor & resize_as_(Tensor & self, const Tensor & the_template);
|
||||
Tensor detach(const Tensor & self);
|
||||
Tensor & detach_(Tensor & self);
|
||||
};
|
||||
|
||||
}} // namespace torch::autograd
|
||||
|
|
|
|||
|
|
@ -21,16 +21,18 @@ std::vector<at::DeprecatedTypeProperties*> allTypesForBackends(at::ArrayRef<at::
|
|||
}
|
||||
}
|
||||
|
||||
std::vector<at::DeprecatedTypeProperties*> VariableType::allCPUTypes() {
|
||||
namespace VariableType {
|
||||
|
||||
C10_EXPORT std::vector<at::DeprecatedTypeProperties*> allCPUTypes() {
|
||||
return allTypesForBackends({ Backend::CPU, Backend::SparseCPU });
|
||||
}
|
||||
|
||||
std::vector<at::DeprecatedTypeProperties*> VariableType::allCUDATypes() {
|
||||
C10_EXPORT std::vector<at::DeprecatedTypeProperties*> allCUDATypes() {
|
||||
at::globalContext().lazyInitCUDA();
|
||||
return allTypesForBackends({ Backend::CUDA, Backend::SparseCUDA });
|
||||
}
|
||||
|
||||
const Variable & VariableType::checked_cast_variable(const Tensor & t, const char * name, int pos) {
|
||||
const Variable & checked_cast_variable(const Tensor & t, const char * name, int pos) {
|
||||
if (!t.defined()) {
|
||||
AT_ERROR("Expected a Tensor of type Variable but found an undefined Tensor for argument #", pos, " '", name, "'");
|
||||
}
|
||||
|
|
@ -40,7 +42,7 @@ const Variable & VariableType::checked_cast_variable(const Tensor & t, const cha
|
|||
return as_variable_ref(t);
|
||||
}
|
||||
|
||||
Variable & VariableType::checked_cast_variable(Tensor & t, const char * name, int pos) {
|
||||
Variable & checked_cast_variable(Tensor & t, const char * name, int pos) {
|
||||
if (!t.defined()) {
|
||||
AT_ERROR("Expected a Tensor of type Variable but found an undefined Tensor for argument #", pos, " '", name, "'");
|
||||
}
|
||||
|
|
@ -50,22 +52,22 @@ Variable & VariableType::checked_cast_variable(Tensor & t, const char * name, in
|
|||
return as_variable_ref(t);
|
||||
}
|
||||
|
||||
const Tensor & VariableType::unpack(const Tensor & t, const char * name, int pos) {
|
||||
const Tensor & unpack(const Tensor & t, const char * name, int pos) {
|
||||
return checked_cast_variable(t, name, pos);
|
||||
}
|
||||
|
||||
Tensor & VariableType::unpack(Tensor & t, const char * name, int pos) {
|
||||
Tensor & unpack(Tensor & t, const char * name, int pos) {
|
||||
return checked_cast_variable(t, name, pos);
|
||||
}
|
||||
|
||||
Tensor VariableType::unpack_opt(const Tensor & t, const char * name, int pos) {
|
||||
Tensor unpack_opt(const Tensor & t, const char * name, int pos) {
|
||||
if (!t.defined()) {
|
||||
return Tensor();
|
||||
}
|
||||
return unpack(t, name, pos);
|
||||
}
|
||||
|
||||
std::vector<at::Tensor> VariableType::unpack(at::TensorList tl, const char *name, int pos) {
|
||||
std::vector<at::Tensor> unpack(at::TensorList tl, const char *name, int pos) {
|
||||
std::vector<at::Tensor> ret(tl.size());
|
||||
for (size_t i = 0; i < tl.size(); ++i) {
|
||||
const auto &t = tl[i];
|
||||
|
|
@ -81,7 +83,7 @@ std::vector<at::Tensor> VariableType::unpack(at::TensorList tl, const char *name
|
|||
return ret;
|
||||
}
|
||||
|
||||
void VariableType::backward(
|
||||
void backward(
|
||||
const Tensor& self,
|
||||
const Tensor& gradient,
|
||||
bool keep_graph,
|
||||
|
|
@ -89,28 +91,28 @@ void VariableType::backward(
|
|||
as_variable_ref(self).backward(gradient, keep_graph, create_graph);
|
||||
}
|
||||
|
||||
void VariableType::set_data(const Tensor & self, const Tensor & new_data) {
|
||||
void set_data(const Tensor & self, const Tensor & new_data) {
|
||||
as_variable_ref(self).set_data(new_data);
|
||||
}
|
||||
|
||||
Tensor VariableType::data(const Tensor & self) {
|
||||
Tensor data(const Tensor & self) {
|
||||
return as_variable_ref(self).variable_data();
|
||||
}
|
||||
|
||||
bool VariableType::is_leaf(const Tensor & self) {
|
||||
bool is_leaf(const Tensor & self) {
|
||||
return as_variable_ref(self).is_leaf();
|
||||
}
|
||||
|
||||
int64_t VariableType::output_nr(const Tensor & self) {
|
||||
int64_t output_nr(const Tensor & self) {
|
||||
return as_variable_ref(self).output_nr();
|
||||
}
|
||||
|
||||
int64_t VariableType::_version(const Tensor & self) {
|
||||
int64_t _version(const Tensor & self) {
|
||||
return as_variable_ref(self).current_version();
|
||||
}
|
||||
|
||||
// We don't have an outplace copy, so this can't be generated automatically
|
||||
Tensor & VariableType::copy_(Tensor & self, const Tensor & src, bool non_blocking) {
|
||||
Tensor & copy_(Tensor & self, const Tensor & src, bool non_blocking) {
|
||||
jit::Value* output = nullptr;
|
||||
if(torch::jit::tracer::isTracing()) {
|
||||
const jit::tracer::TracingState& state = *jit::tracer::getTracingState();
|
||||
|
|
@ -156,7 +158,7 @@ Tensor & VariableType::copy_(Tensor & self, const Tensor & src, bool non_blockin
|
|||
return self;
|
||||
}
|
||||
|
||||
Tensor & VariableType::resize_(Tensor & self, IntArrayRef size) {
|
||||
Tensor & resize_(Tensor & self, IntArrayRef size) {
|
||||
auto& self_ = unpack(self, "self", 0);
|
||||
if (as_variable_ref(self).requires_grad()) {
|
||||
AT_ERROR("cannot resize variables that require grad");
|
||||
|
|
@ -173,7 +175,7 @@ Tensor & VariableType::resize_(Tensor & self, IntArrayRef size) {
|
|||
return self;
|
||||
}
|
||||
|
||||
Tensor & VariableType::resize_as_(Tensor & self, const Tensor & the_template) {
|
||||
Tensor & resize_as_(Tensor & self, const Tensor & the_template) {
|
||||
auto& self_ = unpack(self, "self", 0);
|
||||
auto& the_template_ = unpack(the_template, "the_template", 1);
|
||||
if (as_variable_ref(self).requires_grad()) {
|
||||
|
|
@ -190,7 +192,7 @@ Tensor & VariableType::resize_as_(Tensor & self, const Tensor & the_template) {
|
|||
return self;
|
||||
}
|
||||
|
||||
Tensor VariableType::detach(const Tensor & self) {
|
||||
Tensor detach(const Tensor & self) {
|
||||
RECORD_FUNCTION("detach", std::vector<c10::IValue>({self}));
|
||||
|
||||
torch::jit::Node* node = nullptr;
|
||||
|
|
@ -211,7 +213,7 @@ Tensor VariableType::detach(const Tensor & self) {
|
|||
return std::move(result);
|
||||
}
|
||||
|
||||
Tensor & VariableType::detach_(Tensor & self) {
|
||||
Tensor & detach_(Tensor & self) {
|
||||
RECORD_FUNCTION("detach_", std::vector<c10::IValue>({self}));
|
||||
|
||||
torch::jit::Node* node = nullptr;
|
||||
|
|
@ -232,4 +234,6 @@ Tensor & VariableType::detach_(Tensor & self) {
|
|||
return self;
|
||||
}
|
||||
|
||||
} // namespace VariableType
|
||||
|
||||
}} // namespace torch::autograd
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user