mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-08 07:39:33 +01:00
Enable -Werror on s390x Example of original issue on s390x: https://github.com/pytorch/pytorch/actions/runs/11014606340/job/30585632704 Most of warnings are not specific to s390x, but specific to gcc-13 or gcc-14. To test it on s390x an image with gcc-13 is needed. For s390x it's tested for new regressions on every merge due to trunk workflow. `-Wdangling-reference` produces either obviously false warnings or suspicious warnings, which on closer inspection look plausibly safe. `-Wredundant-move` with new gcc complains about `std::move(...)` disabling copy elision. But removing `std::move(...)` makes used clang versions complain about copying objects when they could be moved. For now also disable it. Pull Request resolved: https://github.com/pytorch/pytorch/pull/136527 Approved by: https://github.com/malfet
101 lines
2.9 KiB
C++
101 lines
2.9 KiB
C++
#pragma once
|
|
|
|
#include <memory>
|
|
#include <ostream>
|
|
#include <string>
|
|
|
|
#include <ATen/Tensor.h>
|
|
#include <c10/macros/Export.h>
|
|
#include <c10/util/Deprecated.h>
|
|
#include <optional>
|
|
|
|
namespace c10 {
|
|
struct Device;
|
|
}
|
|
|
|
namespace torch::lazy {
|
|
|
|
// Backend should extend it and define their own supported hardware types.
|
|
struct TORCH_API BackendDeviceType {
|
|
int8_t type{(int8_t)at::kCPU};
|
|
// Note: previous default value was '0', which actually maps to at::kCPU, at
|
|
// least now it is explicit, we may want to make default/undefined semantics
|
|
// more clear though
|
|
BackendDeviceType() : type((int8_t)at::kCPU) {}
|
|
BackendDeviceType(int8_t type) : type(type) {}
|
|
|
|
virtual ~BackendDeviceType() = default;
|
|
virtual std::string toString() const {
|
|
return "Unknown";
|
|
}
|
|
};
|
|
|
|
class TORCH_API BackendDevice {
|
|
public:
|
|
// The default constructor will set both the device type and ordinal
|
|
// to backend specific defaults.
|
|
BackendDevice();
|
|
BackendDevice(std::shared_ptr<BackendDeviceType>&& type, int64_t ordinal);
|
|
|
|
int8_t type() const;
|
|
int64_t ordinal() const {
|
|
return ordinal_;
|
|
}
|
|
|
|
bool operator==(const BackendDevice& other) const {
|
|
return compare(other) == 0;
|
|
}
|
|
bool operator!=(const BackendDevice& other) const {
|
|
return compare(other) != 0;
|
|
}
|
|
bool operator<(const BackendDevice& rhs) const {
|
|
return compare(rhs) < 0;
|
|
}
|
|
|
|
std::string toString() const;
|
|
|
|
private:
|
|
int compare(const BackendDevice& rhs) const;
|
|
|
|
// Use shared_ptr instead of unique_ptr so that BackendDevice can be copied.
|
|
std::shared_ptr<BackendDeviceType> type_;
|
|
int64_t ordinal_;
|
|
};
|
|
|
|
TORCH_API std::ostream& operator<<(
|
|
std::ostream& os,
|
|
const BackendDevice& device);
|
|
|
|
// Helpers for converting a c10::Device to BackendDevice and vice versa.
|
|
TORCH_API BackendDevice atenDeviceToBackendDevice(const c10::Device& device);
|
|
TORCH_API c10::Device backendDeviceToAtenDevice(const BackendDevice& device);
|
|
|
|
// Tries to extract the backend device out of the lazy tensor. Returns nullopt
|
|
// if the input is not a lazy tensor.
|
|
TORCH_API std::optional<BackendDevice> GetBackendDevice(
|
|
const at::ITensorListRef tensors);
|
|
TORCH_API std::optional<BackendDevice> GetBackendDevice(
|
|
const at::TensorList tensors);
|
|
TORCH_API std::optional<BackendDevice> GetBackendDevice(
|
|
const at::Tensor& tensor);
|
|
TORCH_API std::optional<BackendDevice> GetBackendDevice(
|
|
const std::optional<c10::Device>& device);
|
|
|
|
// For variadic template.
|
|
TORCH_API std::optional<BackendDevice> GetBackendDevice();
|
|
|
|
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Winfinite-recursion")
|
|
template <typename T, typename... Args>
|
|
std::optional<BackendDevice> GetBackendDevice(
|
|
const T& tensor,
|
|
const Args&... forward_tensors) {
|
|
auto optional_device = GetBackendDevice(tensor);
|
|
if (optional_device) {
|
|
return optional_device;
|
|
}
|
|
return GetBackendDevice(forward_tensors...);
|
|
}
|
|
C10_DIAGNOSTIC_POP()
|
|
|
|
} // namespace torch::lazy
|