pytorch/c10/core/Layout.h
Isalia20 a1282b1823 [MPS] Add boilerplate sparse code support (#157238)
This PR makes minimal changes to support sparse tensors on MPS. In the followup PRs I'll start adding different operations slowly so we can fix the issue of
https://github.com/pytorch/pytorch/issues/129842
which is highly requested(I assume because of whisper using sparse tensors)

Pull Request resolved: https://github.com/pytorch/pytorch/pull/157238
Approved by: https://github.com/malfet
2025-06-30 01:53:45 +00:00

81 lines
2.0 KiB
C++

#pragma once
#include <c10/core/Backend.h>
#include <c10/util/Exception.h>
#include <cstdint>
#include <ostream>
namespace c10 {
enum class Layout : int8_t {
Strided,
Sparse,
SparseCsr,
Mkldnn,
SparseCsc,
SparseBsr,
SparseBsc,
Jagged,
NumOptions
};
constexpr auto kStrided = Layout::Strided;
constexpr auto kSparse = Layout::Sparse;
constexpr auto kSparseCsr = Layout::SparseCsr;
constexpr auto kMkldnn = Layout::Mkldnn;
constexpr auto kSparseCsc = Layout::SparseCsc;
constexpr auto kSparseBsr = Layout::SparseBsr;
constexpr auto kSparseBsc = Layout::SparseBsc;
constexpr auto kJagged = Layout::Jagged;
inline Layout layout_from_backend(Backend backend) {
switch (backend) {
case Backend::SparseCPU:
case Backend::SparseCUDA:
case Backend::SparseMPS:
case Backend::SparseCsrMPS:
case Backend::SparseHIP:
case Backend::SparseVE:
case Backend::SparseXPU:
case Backend::SparsePrivateUse1:
return Layout::Sparse;
case Backend::MkldnnCPU:
return Layout::Mkldnn;
case Backend::SparseCsrCPU:
case Backend::SparseCsrCUDA:
case Backend::SparseCsrHIP:
case Backend::SparseCsrVE:
case Backend::SparseCsrXPU:
TORCH_CHECK(
false,
"Cannot map Backend SparseCsr(CPU|CUDA|HIP|VE|XPU|MPS) to a unique layout.");
default:
return Layout::Strided;
}
}
inline std::ostream& operator<<(std::ostream& stream, at::Layout layout) {
switch (layout) {
case at::kStrided:
return stream << "Strided";
case at::kSparse:
return stream << "Sparse";
case at::kSparseCsr:
return stream << "SparseCsr";
case at::kSparseCsc:
return stream << "SparseCsc";
case at::kSparseBsr:
return stream << "SparseBsr";
case at::kSparseBsc:
return stream << "SparseBsc";
case at::kMkldnn:
return stream << "Mkldnn";
case at::kJagged:
return stream << "Jagged";
default:
TORCH_CHECK(false, "Unknown layout");
}
}
} // namespace c10