mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
Pull Request resolved: https://github.com/pytorch/pytorch/pull/151754 Approved by: https://github.com/malfet, https://github.com/jansel
169 lines
4.3 KiB
Python
169 lines
4.3 KiB
Python
# Owner(s): ["module: mps"]
|
|
import importlib
|
|
import os
|
|
import sys
|
|
|
|
import torch
|
|
from torch.testing import make_tensor
|
|
from torch.testing._internal.common_dtype import get_all_dtypes
|
|
from torch.testing._internal.common_utils import (
|
|
instantiate_parametrized_tests,
|
|
MACOS_VERSION,
|
|
parametrize,
|
|
)
|
|
|
|
|
|
MPS_UNSUPPORTED_TYPES = [torch.double, torch.cdouble] + (
|
|
[torch.bfloat16] if MACOS_VERSION < 14.0 else []
|
|
)
|
|
MPS_DTYPES = [t for t in get_all_dtypes() if t not in MPS_UNSUPPORTED_TYPES]
|
|
|
|
importlib.import_module("filelock")
|
|
|
|
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
|
|
sys.path.append(pytorch_test_dir)
|
|
|
|
from inductor.test_torchinductor import ( # @manual=fbcode//caffe2/test/inductor:test_inductor-library
|
|
check_model_gpu,
|
|
CommonTemplate,
|
|
TestCase,
|
|
)
|
|
|
|
|
|
# TODO: Remove this file.
|
|
# This tests basic MPS compile functionality
|
|
|
|
|
|
@instantiate_parametrized_tests
|
|
class MPSBasicTests(TestCase):
|
|
is_dtype_supported = CommonTemplate.is_dtype_supported
|
|
common = check_model_gpu
|
|
device = "mps"
|
|
|
|
@parametrize("dtype", MPS_DTYPES)
|
|
def test_add(self, dtype):
|
|
self.common(
|
|
lambda a, b: a + b,
|
|
(
|
|
make_tensor(1024, dtype=dtype, device=self.device),
|
|
make_tensor(1024, dtype=dtype, device=self.device),
|
|
),
|
|
check_lowp=False,
|
|
)
|
|
|
|
def test_log(self):
|
|
self.common(lambda x: x.log(), (torch.rand(1024),))
|
|
|
|
def test_acos(self):
|
|
self.common(lambda x: x.acos(), (torch.rand(1024),))
|
|
|
|
def test_atanh(self):
|
|
self.common(lambda x: x.atanh(), (torch.rand(1024),))
|
|
|
|
def test_floor(self):
|
|
self.common(lambda x: x.floor(), (torch.rand(1024),))
|
|
|
|
def test_sign(self):
|
|
self.common(lambda x: x.sign(), (torch.rand(1024),))
|
|
|
|
def test_sliced_input(self):
|
|
self.common(
|
|
lambda x: x[:, ::2].sin() + x[:, 1::2].cos(), (torch.rand(32, 1024),)
|
|
)
|
|
|
|
def test_where(self):
|
|
def foo(x):
|
|
rc = x.abs().sqrt()
|
|
rc[x < 0] = -5
|
|
return rc
|
|
|
|
self.common(foo, (torch.rand(1024),))
|
|
|
|
@parametrize("dtype", MPS_DTYPES)
|
|
def test_cast(self, dtype):
|
|
self.common(lambda a: a.to(dtype), (torch.rand(1024),))
|
|
|
|
pointwise_unary_ops = [
|
|
"i0",
|
|
"i0e",
|
|
"i1",
|
|
"i1e",
|
|
"erf",
|
|
"digamma",
|
|
"sinc",
|
|
"spherical_bessel_j0",
|
|
"bessel_j0",
|
|
"bessel_j1",
|
|
"bessel_y0",
|
|
"bessel_y1",
|
|
"modified_bessel_i0",
|
|
"modified_bessel_i1",
|
|
"modified_bessel_k0",
|
|
"modified_bessel_k1",
|
|
"scaled_modified_bessel_k0",
|
|
"scaled_modified_bessel_k1",
|
|
"entr",
|
|
]
|
|
|
|
@parametrize("op_name", pointwise_unary_ops)
|
|
def test_pointwise_unary_op(self, op_name):
|
|
self.common(
|
|
lambda x: getattr(torch.special, op_name)(x),
|
|
(torch.rand(128, 128),),
|
|
check_lowp=False,
|
|
)
|
|
|
|
def test_pointwise_polygamma(self):
|
|
self.common(
|
|
torch.special.polygamma,
|
|
(
|
|
1,
|
|
torch.rand(128, 128),
|
|
),
|
|
check_lowp=False,
|
|
)
|
|
|
|
@parametrize(
|
|
"op_name",
|
|
[
|
|
"zeta",
|
|
"xlog1py",
|
|
"chebyshev_polynomial_t",
|
|
"chebyshev_polynomial_u",
|
|
"chebyshev_polynomial_v",
|
|
"chebyshev_polynomial_w",
|
|
"hermite_polynomial_he",
|
|
],
|
|
)
|
|
def test_pointwise_binary_op(self, op_name):
|
|
self.common(
|
|
lambda x, y: getattr(torch.special, op_name)(x, y),
|
|
(torch.rand(128, 128), torch.rand(128, 128)),
|
|
check_lowp=False,
|
|
)
|
|
|
|
def test_broadcast(self):
|
|
self.common(torch.add, (torch.rand(32, 1024), torch.rand(1024)))
|
|
|
|
def test_inplace(self):
|
|
def inc_(x):
|
|
x += 1
|
|
return x
|
|
|
|
self.common(inc_, (torch.rand(1024),))
|
|
|
|
def test_rms_norm_nograd(self):
|
|
# Regression test for https://github.com/pytorch/pytorch/issues/150629
|
|
def fn(x, w):
|
|
with torch.no_grad():
|
|
return torch.nn.functional.rms_norm(x, x.shape, w)
|
|
|
|
self.common(fn, (torch.rand(10), torch.ones(10)))
|
|
|
|
|
|
if __name__ == "__main__":
|
|
from torch._dynamo.test_case import run_tests
|
|
|
|
if torch.backends.mps.is_available():
|
|
run_tests(needs="filelock")
|