mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
Remove old ROCm version check in tests (#164245)
This PR removes ROCm<6 version checks. Pull Request resolved: https://github.com/pytorch/pytorch/pull/164245 Approved by: https://github.com/jeffdaily
This commit is contained in:
parent
3912ba3e94
commit
b63bbe1661
|
|
@ -38,7 +38,6 @@ from torch.testing._internal.common_utils import (
|
||||||
gradcheck,
|
gradcheck,
|
||||||
parametrize,
|
parametrize,
|
||||||
run_tests,
|
run_tests,
|
||||||
skipIfRocmVersionLessThan,
|
|
||||||
skipIfTorchDynamo,
|
skipIfTorchDynamo,
|
||||||
TEST_WITH_ROCM,
|
TEST_WITH_ROCM,
|
||||||
TestCase,
|
TestCase,
|
||||||
|
|
@ -196,7 +195,6 @@ class TestForeach(TestCase):
|
||||||
zero_size=True,
|
zero_size=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
@skipIfRocmVersionLessThan((6, 0))
|
|
||||||
@ops(
|
@ops(
|
||||||
foreach_unary_op_db
|
foreach_unary_op_db
|
||||||
+ foreach_binary_op_db
|
+ foreach_binary_op_db
|
||||||
|
|
|
||||||
|
|
@ -29,8 +29,7 @@ from torch.testing._internal.common_device_type import \
|
||||||
(instantiate_device_type_tests, dtypes, has_cusolver, has_hipsolver,
|
(instantiate_device_type_tests, dtypes, has_cusolver, has_hipsolver,
|
||||||
onlyCPU, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
|
onlyCPU, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
|
||||||
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
|
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
|
||||||
onlyCUDA, skipMeta, skipCUDAIfNoCusolver, skipCUDAIfNotRocm, skipCUDAIfRocmVersionLessThan,
|
onlyCUDA, skipMeta, skipCUDAIfNoCusolver, skipCUDAIfNotRocm, dtypesIfMPS, largeTensorTest)
|
||||||
dtypesIfMPS, largeTensorTest)
|
|
||||||
from torch.testing import make_tensor
|
from torch.testing import make_tensor
|
||||||
from torch.testing._internal.common_dtype import (
|
from torch.testing._internal.common_dtype import (
|
||||||
all_types, all_types_and_complex_and, floating_and_complex_types, integral_types,
|
all_types, all_types_and_complex_and, floating_and_complex_types, integral_types,
|
||||||
|
|
@ -7303,7 +7302,6 @@ scipy_lobpcg | {eq_err_scipy:10.2e} | {eq_err_general_scipy:10.2e} | {iters2:
|
||||||
@unittest.skipIf(IS_WINDOWS, "Skipped on Windows!")
|
@unittest.skipIf(IS_WINDOWS, "Skipped on Windows!")
|
||||||
@unittest.skipIf(SM90OrLater and not TEST_WITH_ROCM, "Expected failure on sm90")
|
@unittest.skipIf(SM90OrLater and not TEST_WITH_ROCM, "Expected failure on sm90")
|
||||||
@unittest.skipIf(IS_FBCODE and IS_REMOTE_GPU, "cublas runtime error")
|
@unittest.skipIf(IS_FBCODE and IS_REMOTE_GPU, "cublas runtime error")
|
||||||
@skipCUDAIfRocmVersionLessThan((6, 0))
|
|
||||||
@onlyCUDA
|
@onlyCUDA
|
||||||
@parametrize("k", [16, 32])
|
@parametrize("k", [16, 32])
|
||||||
@parametrize("n", [16, 32])
|
@parametrize("n", [16, 32])
|
||||||
|
|
@ -7374,7 +7372,6 @@ scipy_lobpcg | {eq_err_scipy:10.2e} | {eq_err_general_scipy:10.2e} | {iters2:
|
||||||
|
|
||||||
@unittest.skipIf(IS_WINDOWS, "Skipped on Windows!")
|
@unittest.skipIf(IS_WINDOWS, "Skipped on Windows!")
|
||||||
@unittest.skipIf(IS_FBCODE and IS_REMOTE_GPU, "cublas runtime error")
|
@unittest.skipIf(IS_FBCODE and IS_REMOTE_GPU, "cublas runtime error")
|
||||||
@skipCUDAIfRocmVersionLessThan((6, 0))
|
|
||||||
@onlyCUDA
|
@onlyCUDA
|
||||||
def test__int_mm_errors(self, device):
|
def test__int_mm_errors(self, device):
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -36,7 +36,6 @@ from torch.testing._internal.common_utils import (
|
||||||
parametrize,
|
parametrize,
|
||||||
run_tests,
|
run_tests,
|
||||||
skipIfRocm,
|
skipIfRocm,
|
||||||
skipIfRocmVersionLessThan,
|
|
||||||
TEST_CUDA,
|
TEST_CUDA,
|
||||||
TEST_WITH_ROCM,
|
TEST_WITH_ROCM,
|
||||||
TestCase,
|
TestCase,
|
||||||
|
|
@ -144,7 +143,6 @@ class TestMatmulCuda(InductorTestCase):
|
||||||
torch.backends.cuda.matmul.allow_fp16_accumulation = orig_fp16_accumulate
|
torch.backends.cuda.matmul.allow_fp16_accumulation = orig_fp16_accumulate
|
||||||
|
|
||||||
@onlyCUDA
|
@onlyCUDA
|
||||||
@skipIfRocmVersionLessThan((5, 2))
|
|
||||||
# imported 'tol' as 'xtol' to avoid aliasing in code above
|
# imported 'tol' as 'xtol' to avoid aliasing in code above
|
||||||
@toleranceOverride({torch.float16: xtol(atol=1e-1, rtol=1e-1),
|
@toleranceOverride({torch.float16: xtol(atol=1e-1, rtol=1e-1),
|
||||||
torch.bfloat16: xtol(atol=1e-1, rtol=1e-1),
|
torch.bfloat16: xtol(atol=1e-1, rtol=1e-1),
|
||||||
|
|
@ -158,7 +156,6 @@ class TestMatmulCuda(InductorTestCase):
|
||||||
|
|
||||||
@onlyCUDA
|
@onlyCUDA
|
||||||
@xfailIfSM100OrLaterAndCondition(lambda params: params.get('dtype') == torch.bfloat16 and params.get('size') == 10000)
|
@xfailIfSM100OrLaterAndCondition(lambda params: params.get('dtype') == torch.bfloat16 and params.get('size') == 10000)
|
||||||
@skipIfRocmVersionLessThan((5, 2))
|
|
||||||
# imported 'tol' as 'xtol' to avoid aliasing in code above
|
# imported 'tol' as 'xtol' to avoid aliasing in code above
|
||||||
@toleranceOverride({torch.float16: xtol(atol=7e-1, rtol=2e-1),
|
@toleranceOverride({torch.float16: xtol(atol=7e-1, rtol=2e-1),
|
||||||
torch.bfloat16: xtol(atol=1e1, rtol=2e-1)})
|
torch.bfloat16: xtol(atol=1e1, rtol=2e-1)})
|
||||||
|
|
@ -170,7 +167,6 @@ class TestMatmulCuda(InductorTestCase):
|
||||||
self.cublas_addmm(size, dtype, True)
|
self.cublas_addmm(size, dtype, True)
|
||||||
|
|
||||||
@onlyCUDA
|
@onlyCUDA
|
||||||
@skipIfRocmVersionLessThan((5, 2))
|
|
||||||
@dtypes(torch.float16)
|
@dtypes(torch.float16)
|
||||||
# m == 4 chooses OUTPUT_TYPE reduction on H200
|
# m == 4 chooses OUTPUT_TYPE reduction on H200
|
||||||
# m == 8 chooses OUTPUT_TYPE reduction on A100
|
# m == 8 chooses OUTPUT_TYPE reduction on A100
|
||||||
|
|
@ -191,7 +187,6 @@ class TestMatmulCuda(InductorTestCase):
|
||||||
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = orig_precision
|
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = orig_precision
|
||||||
|
|
||||||
@onlyCUDA
|
@onlyCUDA
|
||||||
@skipIfRocmVersionLessThan((5, 2))
|
|
||||||
# imported 'tol' as 'xtol' to avoid aliasing in code above
|
# imported 'tol' as 'xtol' to avoid aliasing in code above
|
||||||
@toleranceOverride({torch.float16: xtol(atol=7e-1, rtol=2e-1),
|
@toleranceOverride({torch.float16: xtol(atol=7e-1, rtol=2e-1),
|
||||||
torch.bfloat16: xtol(atol=1e1, rtol=2e-1)})
|
torch.bfloat16: xtol(atol=1e1, rtol=2e-1)})
|
||||||
|
|
|
||||||
|
|
@ -16,8 +16,7 @@ from torch.testing._internal.common_utils import \
|
||||||
skipIfRocmVersionLessThan, IS_FBCODE, IS_REMOTE_GPU, suppress_warnings)
|
skipIfRocmVersionLessThan, IS_FBCODE, IS_REMOTE_GPU, suppress_warnings)
|
||||||
from torch.testing._internal.common_device_type import \
|
from torch.testing._internal.common_device_type import \
|
||||||
(ops, instantiate_device_type_tests, dtypes, OpDTypes, dtypesIfCUDA, onlyCPU, onlyCUDA, skipCUDAIfNoSparseGeneric,
|
(ops, instantiate_device_type_tests, dtypes, OpDTypes, dtypesIfCUDA, onlyCPU, onlyCUDA, skipCUDAIfNoSparseGeneric,
|
||||||
precisionOverride, skipMeta, skipCUDAIf, skipCUDAIfRocm, skipCPUIfNoMklSparse, skipCUDAIfRocmVersionLessThan,
|
precisionOverride, skipMeta, skipCUDAIf, skipCUDAIfRocm, skipCPUIfNoMklSparse, largeTensorTest)
|
||||||
largeTensorTest)
|
|
||||||
from torch.testing._internal.common_methods_invocations import \
|
from torch.testing._internal.common_methods_invocations import \
|
||||||
(op_db, sparse_csr_unary_ufuncs, ReductionOpInfo)
|
(op_db, sparse_csr_unary_ufuncs, ReductionOpInfo)
|
||||||
from torch.testing._internal.common_cuda import TEST_CUDA
|
from torch.testing._internal.common_cuda import TEST_CUDA
|
||||||
|
|
@ -1492,8 +1491,6 @@ class TestSparseCSR(TestCase):
|
||||||
csr.matmul(bad_vec)
|
csr.matmul(bad_vec)
|
||||||
|
|
||||||
@onlyCUDA
|
@onlyCUDA
|
||||||
# hmm, the test passes ok on CUDA when Rocm is not available:
|
|
||||||
@skipCUDAIfRocmVersionLessThan((5, 2))
|
|
||||||
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
|
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
|
||||||
def test_baddbmm(self, device, dtype):
|
def test_baddbmm(self, device, dtype):
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue
Block a user