[BLAS] Avoid downcasts for fp16fp16->fp32 BLAS (#161999)

Followup after https://github.com/pytorch/pytorch/pull/154012

Fixes CPU part of https://github.com/pytorch/pytorch/issues/160841

Pull Request resolved: https://github.com/pytorch/pytorch/pull/161999
Approved by: https://github.com/drisspg
This commit is contained in:
Nikita Shulga 2025-09-02 14:06:36 -07:00 committed by PyTorch MergeBot
parent aed33a8fcb
commit 02c83f1334

View File

@ -496,18 +496,18 @@ void gemm(
// for the fallback path, first compute gemm with beta = 0, // for the fallback path, first compute gemm with beta = 0,
// and then add c in full precision. // and then add c in full precision.
int64_t c_size = n * m; int64_t c_size = n * m;
std::vector<at::Half> float16_c(c_size, 0.f); std::vector<float> float_c(c_size, 0.f);
gemm_stub( gemm_no_downcast_stub(
at::kCPU, at::kHalf, at::kCPU, at::kHalf,
transa, transb, m, n, k, alpha, a, lda, b, ldb, 0.f, float16_c.data(), m); transa, transb, m, n, k, alpha, a, lda, b, ldb, 0.f, float_c.data(), m);
for (const auto j : c10::irange(n)) { for (const auto j : c10::irange(n)) {
for (const auto i : c10::irange(m)) { for (const auto i : c10::irange(m)) {
auto offset = j * ldc + i; auto offset = j * ldc + i;
// beta == 0 won't propagate NaN from C // beta == 0 won't propagate NaN from C
if (beta == 0.f) { if (beta == 0.f) {
c[offset] = c10::convert<float>(float16_c[j * m + i]); c[offset] = float_c[j * m + i];
} else { } else {
c[offset] = beta * c[offset] + c10::convert<float>(float16_c[j * m + i]); c[offset] = beta * c[offset] + float_c[j * m + i];
} }
} }
} }