pytorch/c10/core/SymInt.cpp
Edward Z. Yang 1ff52225f1 Unify SymIntNode and SymFloatNode into SymNode (#87817)
This refactor was prompted by challenges handling mixed int/float
operations in C++.  A previous version of this patch
added overloads for each permutation of int/float and was unwieldy
https://github.com/pytorch/pytorch/pull/87722/  This PR takes a different
approach.

The general outline of the patch is to combine the C++ types SymIntNode
and SymFloatNode into a single type, SymNode.  This is type erased; we
no longer know statically at C++ if we have an int/float and have to test
it with the is_int()/is_float() virtual methods.  This has a number of
knock on effects.

- We no longer have C++ classes to bind to Python.  Instead, we take an
  entirely new approach to our Python API, where we have a SymInt/SymFloat
  class defined entirely in Python, which hold a SymNode (which corresponds
  to the C++ SymNode).  However, SymNode is not pybind11-bound; instead,
  it lives as-is in Python, and is wrapped into C++ SymNode using PythonSymNode
  when it goes into C++.  This implies a userland rename.

  In principle, it is also possible for the canonical implementation of SymNode
  to be written in C++, and then bound to Python with pybind11 (we have
  this code, although it is commented out.)  However, I did not implement
  this as we currently have no C++ implementations of SymNode.

  Because we do return SymInt/SymFloat from C++ bindings, the C++ binding
  code needs to know how to find these classes.  Currently, this is done
  just by manually importing torch and getting the attributes.

- Because SymInt/SymFloat are easy Python wrappers, __sym_dispatch__ now
  takes SymInt/SymFloat, rather than SymNode, bringing it in line with how
  __torch_dispatch__ works.

Some miscellaneous improvements:

- SymInt now has a constructor that takes SymNode.  Note that this
  constructor is ambiguous if you pass in a subclass of SymNode,
  so an explicit downcast is necessary.  This means toSymFloat/toSymInt
  are no more.  This is a mild optimization as it means rvalue reference
  works automatically.

- We uniformly use the caster for c10::SymInt/SymFloat, rather than
  going the long way via the SymIntNode/SymFloatNode.

- Removed some unnecessary toSymInt/toSymFloat calls in normalize_*
  functions, pretty sure this doesn't do anything.

- guard_int is now a free function, since to guard on an int you cannot
  assume the method exists.  A function can handle both int and SymInt
  inputs.

- We clean up the magic method definition code for SymInt/SymFloat/SymNode.
  ONLY the user classes (SymInt/SymFloat) get magic methods; SymNode gets
  plain methods; this is to help avoid confusion between the two types.

Signed-off-by: Edward Z. Yang <ezyang@fb.com>

cc @jansel @mlazos @soumith @voznesenskym @yanboliang @penguinwu @anijain2305
Pull Request resolved: https://github.com/pytorch/pytorch/pull/87817
Approved by: https://github.com/albanD, https://github.com/anjali411
2022-10-27 20:56:02 +00:00

211 lines
5.0 KiB
C++

#include <c10/core/SymFloat.h>
#include <c10/core/SymInt.h>
#include <c10/core/SymNodeImpl.h>
#include <array>
namespace c10 {
static std::array<SymNode, 2> normalize_symints(SymInt a_, SymInt b_) {
SymNode a, b;
if (a_.is_symbolic())
a = a_.toSymNodeImpl();
if (b_.is_symbolic())
b = b_.toSymNodeImpl();
SymNodeImpl* common = a ? a.get() : b.get();
// TODO: technically we need to check that the classes match
if (!a) {
a = common->wrap_int(a_.as_int_unchecked());
}
if (!b) {
b = common->wrap_int(b_.as_int_unchecked());
}
return {a, b};
}
SymNode SymInt::toSymNodeImpl() const {
TORCH_CHECK(is_symbolic());
return SymNode::reclaim_copy(toSymNodeImplUnowned());
}
SymInt::SymInt(SymNode sin_sp) {
TORCH_CHECK(sin_sp->is_int());
auto ptr = static_cast<uint64_t>(
reinterpret_cast<uintptr_t>(static_cast<void*>(sin_sp.release())));
auto rep = (ptr & ~MASK) | IS_SYM;
data_ = static_cast<int64_t>(rep);
}
int64_t SymInt::guard_int(const char* file, int64_t line) const {
if (!is_symbolic()) {
return data_;
}
SymNode a = toSymNodeImpl();
return a->guard_int(file, line);
}
SymInt::operator SymFloat() const {
if (!is_symbolic()) {
return SymFloat(double(data_));
}
return SymFloat(toSymNodeImpl()->sym_float());
}
SymInt SymInt::operator+(SymInt sci) const {
if (!is_symbolic() && !sci.is_symbolic()) {
return SymInt(data_ + sci.data_);
}
auto res = normalize_symints(*this, sci);
return SymInt(res[0]->add(res[1]));
}
SymInt SymInt::operator-(SymInt sci) const {
if (!is_symbolic() && !sci.is_symbolic()) {
return SymInt(data_ - sci.data_);
}
auto res = normalize_symints(*this, sci);
return SymInt(res[0]->sub(res[1]));
}
SymInt SymInt::operator*(SymInt sci) const {
if (!is_symbolic() && !sci.is_symbolic()) {
return SymInt(data_ * sci.data_);
}
auto res = normalize_symints(*this, sci);
return SymInt(res[0]->mul(res[1]));
}
SymInt SymInt::operator/(SymInt sci) const {
if (!is_symbolic() && !sci.is_symbolic()) {
return SymInt(data_ / sci.data_);
}
auto res = normalize_symints(*this, sci);
return SymInt(res[0]->floordiv(res[1]));
}
SymInt SymInt::operator%(SymInt sci) const {
if (!is_symbolic() && !sci.is_symbolic()) {
return SymInt(data_ % sci.data_);
}
auto res = normalize_symints(*this, sci);
return SymInt(res[0]->mod(res[1]));
}
bool SymInt::operator==(SymInt sci) const {
if (!is_symbolic() && !sci.is_symbolic()) {
return data_ == sci.data_;
}
auto res = normalize_symints(*this, sci);
return res[0]->eq(res[1])->bool_();
}
bool SymInt::operator!=(SymInt sci) const {
return !(*this == sci);
}
bool SymInt::operator<(SymInt sci) const {
if (!is_symbolic() && !sci.is_symbolic()) {
return data_ < sci.data_;
}
auto res = normalize_symints(*this, sci);
return res[0]->lt(res[1])->bool_();
}
bool SymInt::operator<=(SymInt sci) const {
if (!is_symbolic() && !sci.is_symbolic()) {
return data_ <= sci.data_;
}
auto res = normalize_symints(*this, sci);
return res[0]->le(res[1])->bool_();
}
bool SymInt::operator>(SymInt sci) const {
if (!is_symbolic() && !sci.is_symbolic()) {
return data_ > sci.data_;
}
auto res = normalize_symints(*this, sci);
return res[0]->gt(res[1])->bool_();
}
bool SymInt::operator>=(SymInt sci) const {
if (!is_symbolic() && !sci.is_symbolic()) {
return data_ >= sci.data_;
}
auto res = normalize_symints(*this, sci);
return res[0]->ge(res[1])->bool_();
}
SymInt SymInt::min(SymInt sci) const {
if (!is_symbolic() && !sci.is_symbolic()) {
return std::min(data_, sci.data_);
}
auto res = normalize_symints(*this, sci);
return SymInt(res[0]->min(res[1]));
}
SymInt SymInt::max(SymInt sci) const {
if (!is_symbolic() && !sci.is_symbolic()) {
return std::max(data_, sci.data_);
}
auto res = normalize_symints(*this, sci);
return SymInt(res[0]->max(res[1]));
}
void SymInt::operator*=(SymInt sci) {
*this = *this * sci;
}
void SymInt::operator/=(SymInt sci) {
*this = *this / sci;
}
void SymInt::operator+=(SymInt sci) {
*this = *this + sci;
}
bool SymInt::operator<(int64_t sci) const {
return *this < c10::SymInt(sci);
}
bool SymInt::operator<=(int64_t sci) const {
return *this <= c10::SymInt(sci);
}
bool SymInt::operator>(int64_t sci) const {
return *this > c10::SymInt(sci);
}
bool SymInt::operator>=(int64_t sci) const {
return *this >= c10::SymInt(sci);
}
bool SymInt::operator==(int64_t sci) const {
return *this == c10::SymInt(sci);
}
bool SymInt::operator!=(int64_t sci) const {
return *this != c10::SymInt(sci);
}
SymInt SymInt::operator*(int64_t sci) const {
return *this * c10::SymInt(sci);
}
std::ostream& operator<<(std::ostream& os, SymInt s) {
if (s.is_symbolic()) {
os << s.toSymNodeImpl()->str();
} else {
os << s.as_int_unchecked();
}
return os;
}
SymInt operator-(SymInt s) {
if (s.is_symbolic()) {
return SymInt(s.toSymNodeImpl()->neg());
} else {
return SymInt(-s.as_int_unchecked());
}
}
} // namespace c10