pytorch/torch/csrc/lazy/backend/backend_device.cpp
Jiewen Tan 6011c35a79 [LTC] Upstream class BackendDevice (#68027)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/68027

This commit upstreams class BackendDevice to the master, which is a backend
specific representation of the actual hardware, for instances, CPU, GPU, or
TPU.

This concept is important for backend like XLA where it needs to tell the
actual hardware type from the c10::DeviceType::Lazy virtual device during
both IR constructions and lowerings.

Test Plan: ./build/bin/test_lazy --gtest_filter=BackendDeviceTest.*

Reviewed By: wconstab

Differential Revision: D32261838

Pulled By: alanwaketan

fbshipit-source-id: 579c3fc5f9da7847c887a383c6047e8ecb9cc5bc
2021-11-10 07:05:43 -08:00

44 lines
1.2 KiB
C++

#include <torch/csrc/lazy/backend/backend_device.h>
#include <c10/util/Exception.h>
#include <c10/util/StringUtil.h>
namespace torch {
namespace lazy {
// TODO(alanwaketan): Use the backend API to get the default device type.
// In the future, we should also get the default device ordinal.
BackendDevice::BackendDevice()
: type_(std::make_shared<BackendDeviceType>()) {}
BackendDevice::BackendDevice(std::shared_ptr<BackendDeviceType>&& type, int ordinal)
: type_(std::move(type)), ordinal_(ordinal) {}
BackendDevice::BackendDevice(const std::string& device_spec)
: BackendDevice::BackendDevice() {}
int8_t BackendDevice::type() const {
TORCH_INTERNAL_ASSERT(type_);
return type_->type;
}
std::string BackendDevice::toString() const {
TORCH_INTERNAL_ASSERT(type_);
return c10::str(type_->toString(), ordinal_);
}
int BackendDevice::compare(const BackendDevice& rhs) const {
if (type() != rhs.type()) {
return type() < rhs.type() ? -1 : +1;
}
return ordinal_ < rhs.ordinal_ ? -1 : (ordinal_ > rhs.ordinal_ ? +1 : 0);
}
std::ostream& operator<<(std::ostream& os, const BackendDevice& device) {
os << device.toString();
return os;
}
} // namespace lazy
} // namespace torch