Revert "WIP / TST: allow testing torch._numpy under Dynamo (#110401)"

This reverts commit 5ed4a423de.

Reverted https://github.com/pytorch/pytorch/pull/110401 on behalf of https://github.com/huydhn due to Sorry for reverting your change, but it is failing dynamo job in trunk 5ed4a423de ([comment](https://github.com/pytorch/pytorch/pull/110401#issuecomment-1779811943))
This commit is contained in:
PyTorch MergeBot 2023-10-25 18:21:14 +00:00
parent e9804aaacc
commit 7e654c8f88
36 changed files with 649 additions and 1201 deletions

View File

@ -171,6 +171,13 @@ function install_torchrec_and_fbgemm() {
pip_install --no-use-pep517 --user "git+https://github.com/pytorch/torchrec.git@${torchrec_commit}"
}
function install_numpy_pytorch_interop() {
local commit
commit=$(get_pinned_commit numpy_pytorch_interop)
# TODO: --no-use-pep517 will result in failure.
pip_install --user "git+https://github.com/Quansight-Labs/numpy_pytorch_interop.git@${commit}"
}
function clone_pytorch_xla() {
if [[ ! -d ./xla ]]; then
git clone --recursive --quiet https://github.com/pytorch/xla.git

View File

@ -1061,10 +1061,12 @@ elif [[ "${TEST_CONFIG}" == *inductor* && "${SHARD_NUMBER}" == 1 ]]; then
elif [[ "${TEST_CONFIG}" == *dynamo* && "${SHARD_NUMBER}" == 1 && $NUM_TEST_SHARDS -gt 1 ]]; then
test_without_numpy
install_torchvision
install_numpy_pytorch_interop
test_dynamo_shard 1
test_aten
elif [[ "${TEST_CONFIG}" == *dynamo* && "${SHARD_NUMBER}" == 2 && $NUM_TEST_SHARDS -gt 1 ]]; then
install_torchvision
install_numpy_pytorch_interop
test_dynamo_shard 2
elif [[ "${SHARD_NUMBER}" == 1 && $NUM_TEST_SHARDS -gt 1 ]]; then
test_without_numpy

View File

@ -0,0 +1 @@
0c4e82511d349358d2c8c492dd833334e742f27f

View File

@ -13,5 +13,3 @@ testpaths =
junit_logging_reruns = all
filterwarnings =
ignore:Module already imported so cannot be rewritten.*hypothesis:pytest.PytestAssertRewriteWarning
strict_xfail = True

View File

@ -3,41 +3,29 @@
import functools
import sys
from unittest import skipIf as skipif
import numpy
from unittest import expectedFailure as xfail, skipIf as skipif
import pytest
import torch
import torch._numpy as np
from torch._numpy.testing import assert_array_equal
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase,
xpassIfTorchDynamo,
)
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy.testing import assert_array_equal
else:
import torch._numpy as np
from torch._numpy.testing import assert_array_equal
skip = functools.partial(skipif, True)
IS_PYPY = False
@skipif(numpy.__version__ < "1.24", reason="numpy.dlpack is new in numpy 1.23")
@instantiate_parametrized_tests
class TestDLPack(TestCase):
@xpassIfTorchDynamo # (reason="pytorch seems to handle refcounts differently")
@xfail # (reason="pytorch seems to handle refcounts differently")
@skipif(IS_PYPY, reason="PyPy can't get refcounts.")
def test_dunder_dlpack_refcount(self):
x = np.arange(5)
@ -46,7 +34,7 @@ class TestDLPack(TestCase):
del y
assert sys.getrefcount(x) == 2
@xpassIfTorchDynamo # (reason="pytorch does not raise")
@xfail # (reason="pytorch does not raise")
def test_dunder_dlpack_stream(self):
x = np.arange(5)
x.__dlpack__(stream=None)
@ -54,7 +42,7 @@ class TestDLPack(TestCase):
with pytest.raises(RuntimeError):
x.__dlpack__(stream=1)
@xpassIfTorchDynamo # (reason="pytorch seems to handle refcounts differently")
@xfail # (reason="pytorch seems to handle refcounts differently")
@skipif(IS_PYPY, reason="PyPy can't get refcounts.")
def test_from_dlpack_refcount(self):
x = np.arange(5)

View File

@ -9,31 +9,23 @@ import types
from itertools import permutations
from typing import Any
from unittest import skipIf as skipif
from unittest import expectedFailure as xfail, skipIf as skipif
import pytest
import torch._numpy as np
from pytest import raises as assert_raises
from torch._numpy.testing import assert_, assert_equal
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
subtest,
TEST_WITH_TORCHDYNAMO,
TestCase,
xpassIfTorchDynamo,
)
skip = functools.partial(skipif, True)
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy.testing import assert_, assert_equal
else:
import torch._numpy as np
from torch._numpy.testing import assert_, assert_equal
import numpy
def assert_dtype_equal(a, b):
assert_equal(a, b)
@ -110,10 +102,6 @@ class TestBuiltin(TestCase):
with pytest.raises(TypeError):
operation(np.dtype(np.int32), 7)
@skipif(
numpy.__version__ < "1.24",
reason="older numpies emit DeprecatioWarnings instead",
)
@parametrize(
"dtype",
[
@ -207,8 +195,8 @@ class TestPickling(TestCase):
@parametrize(
"DType",
[
subtest(type(np.dtype(t)), name=f"{np.dtype(t).name}_{i}")
for i, t in enumerate(np.typecodes["All"])
subtest(type(np.dtype(t)), name=f"{np.dtype(t).name}")
for t in np.typecodes["All"]
]
+ [np.dtype],
)
@ -220,7 +208,6 @@ class TestPickling(TestCase):
@skip(reason="XXX: value-based promotions, we don't have.")
@instantiate_parametrized_tests
class TestPromotion(TestCase):
"""Test cases related to more complex DType promotions. Further promotion
tests are defined in `test_numeric.py`
@ -231,12 +218,10 @@ class TestPromotion(TestCase):
[
(2**16 - 1, np.complex64, None),
(2**32 - 1, np.complex128, np.complex64),
subtest((np.float16(2), np.complex64, None), name="float16_complex64_None"),
subtest((np.float32(2), np.complex64, None), name="float32_complex64_None"),
(np.float16(2), np.complex64, None),
(np.float32(2), np.complex64, None),
# repeat for complex scalars:
subtest(
(np.complex64(2), np.complex64, None), name="complex64_complex64_None"
),
(np.complex64(2), np.complex64, None),
],
)
def test_complex_other_value_based(
@ -318,7 +303,7 @@ class TestMisc(TestCase):
assert bool(np.dtype("f8"))
assert bool(np.dtype("i8"))
@xpassIfTorchDynamo # (reason="No keyword arg for dtype ctor.")
@xfail # (reason="No keyword arg for dtype ctor.")
def test_keyword_argument(self):
# test for https://github.com/numpy/numpy/pull/16574#issuecomment-642660971
assert np.dtype(dtype=np.float64) == np.dtype(np.float64)
@ -358,7 +343,6 @@ class TestFromDTypeAttribute(TestCase):
@skip(reason="Parameteric dtypes, our stuff is simpler.")
@skipif(sys.version_info < (3, 9), reason="Requires python 3.9")
@instantiate_parametrized_tests
class TestClassGetItem(TestCase):
def test_dtype(self) -> None:
alias = np.dtype[Any]

View File

@ -5,41 +5,23 @@ import itertools
from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest
import numpy
import torch._numpy as np
from pytest import raises as assert_raises
from torch._numpy.testing import (
assert_,
assert_allclose,
assert_almost_equal,
assert_array_equal,
assert_equal,
suppress_warnings,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase,
xpassIfTorchDynamo,
)
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy.testing import (
assert_,
assert_allclose,
assert_almost_equal,
assert_array_equal,
assert_equal,
suppress_warnings,
)
else:
import torch._numpy as np
from torch._numpy.testing import (
assert_,
assert_allclose,
assert_almost_equal,
assert_array_equal,
assert_equal,
suppress_warnings,
)
skip = functools.partial(skipif, True)
@ -763,15 +745,15 @@ class TestEinsum(TestCase):
np.einsum("ij,i->", x, y, optimize=optimize), [2.0]
) # contig_stride0_outstride0_two
@xpassIfTorchDynamo # (reason="int overflow differs in numpy and pytorch")
@xfail # (reason="int overflow differs in numpy and pytorch")
def test_einsum_sums_int8(self):
self.check_einsum_sums("i1")
@xpassIfTorchDynamo # (reason="int overflow differs in numpy and pytorch")
@xfail # (reason="int overflow differs in numpy and pytorch")
def test_einsum_sums_uint8(self):
self.check_einsum_sums("u1")
@xpassIfTorchDynamo # (reason="int overflow differs in numpy and pytorch")
@xfail # (reason="int overflow differs in numpy and pytorch")
def test_einsum_sums_int16(self):
self.check_einsum_sums("i2")
@ -782,7 +764,7 @@ class TestEinsum(TestCase):
def test_einsum_sums_int64(self):
self.check_einsum_sums("i8")
@xpassIfTorchDynamo # (reason="np.float16(4641) == 4640.0")
@xfail # (reason="np.float16(4641) == 4640.0")
def test_einsum_sums_float16(self):
self.check_einsum_sums("f2")
@ -966,7 +948,7 @@ class TestEinsum(TestCase):
y = tensor.trace(axis1=0, axis2=2).trace()
assert_allclose(x, y)
@xpassIfTorchDynamo # (reason="no base")
@xfail # (reason="no base")
def test_einsum_all_contig_non_contig_output(self):
# Issue gh-5907, tests that the all contiguous special case
# actually checks the contiguity of the output
@ -990,12 +972,7 @@ class TestEinsum(TestCase):
np.einsum("ij,jk->ik", x, x, out=out)
assert_array_equal(out.base, correct_base)
@skipif(
numpy.__version__ < "1.23",
reason="https://github.com/numpy/numpy/issues/20305 is in NumPy 1.22",
)
# @parametrize("dtype", np.typecodes["AllFloat"] + np.typecodes["AllInteger"])
@parametrize("dtype", "efdFD" + "Bbhil")
@parametrize("dtype", np.typecodes["AllFloat"] + np.typecodes["AllInteger"])
def test_different_paths(self, dtype):
# Test originally added to cover broken float16 path: gh-20305
# Likely most are covered elsewhere, at least partially.
@ -1181,7 +1158,7 @@ class TestEinsum(TestCase):
g = np.arange(64).reshape(2, 4, 8)
self.optimize_compare("obk,ijk->ioj", operands=[g, g])
@xpassIfTorchDynamo # (reason="order='F' not supported")
@xfail # (reason="order='F' not supported")
def test_output_order(self):
# Ensure output order is respected for optimize cases, the below
# conraction should yield a reshaped tensor view

View File

@ -8,27 +8,13 @@ import warnings
# from numpy.core.getlimits import _discovered_machar, _float_ma
from unittest import skipIf
import numpy
from unittest import expectedFailure as xfail, skipIf
import torch._numpy as np
from pytest import raises as assert_raises
from torch.testing._internal.common_utils import (
run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase,
xpassIfTorchDynamo,
)
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy import double, finfo, half, iinfo, single
from numpy.testing import assert_, assert_equal
else:
import torch._numpy as np
from torch._numpy import double, finfo, half, iinfo, single
from torch._numpy.testing import assert_, assert_equal
from torch._numpy import double, finfo, half, iinfo, single
from torch._numpy.testing import assert_, assert_equal
from torch.testing._internal.common_utils import run_tests, TestCase
skip = functools.partial(skipIf, True)
@ -68,7 +54,6 @@ class TestDouble(TestCase):
class TestFinfo(TestCase):
@skipIf(numpy.__version__ < "1.23", reason=".smallest_normal is new")
def test_basic(self):
dts = list(
zip(
@ -90,7 +75,7 @@ class TestFinfo(TestCase):
with assert_raises((TypeError, ValueError)):
finfo("i4")
@skip # (reason="Some of these attributes are not implemented vs NP versions")
@xfail # (reason="These attributes are not implemented yet.")
def test_basic_missing(self):
dt = np.float32
for attr in [
@ -140,7 +125,6 @@ class TestRepr(TestCase):
expected = "iinfo(min=-32768, max=32767, dtype=int16)"
assert_equal(repr(np.iinfo(np.int16)), expected)
@skipIf(TEST_WITH_TORCHDYNAMO, reason="repr differs")
def test_finfo_repr(self):
repr_f32 = repr(np.finfo(np.float32))
assert "finfo(resolution=1e-06, min=-3.40282e+38," in repr_f32
@ -202,7 +186,7 @@ class TestMisc(TestCase):
# This test may fail on some platforms
assert len(w) == 0
@xpassIfTorchDynamo # (reason="None of nmant, minexp, maxexp is implemented.")
@xfail # (reason="None of nmant, minexp, maxexp is implemented.")
def test_plausible_finfo(self):
# Assert that finfo returns reasonable results for all types
for ftype in np.sctypes["float"] + np.sctypes["complex"]:

View File

@ -12,37 +12,22 @@ from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest
import pytest
import torch._numpy as np
from pytest import raises as assert_raises
from torch._numpy.testing import (
assert_,
assert_array_equal,
assert_equal,
assert_warns,
HAS_REFCOUNT,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase,
xfailIfTorchDynamo,
xpassIfTorchDynamo,
)
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy.testing import (
assert_,
assert_array_equal,
assert_equal,
assert_warns,
HAS_REFCOUNT,
)
else:
import torch._numpy as np
from torch._numpy.testing import (
assert_,
assert_array_equal,
assert_equal,
assert_warns,
HAS_REFCOUNT,
)
skip = functools.partial(skipif, True)
@ -137,13 +122,15 @@ class TestIndexing(TestCase):
assert_equal(a[None], a[np.newaxis])
assert_equal(a[None].ndim, a.ndim + 1)
@skip
def test_empty_tuple_index(self):
# Empty tuple index creates a view
a = np.array([1, 2, 3])
assert_equal(a[()], a)
assert_(a[()].tensor._base is a.tensor)
a = np.array(0)
raise SkipTest(
"torch doesn't have scalar types with distinct instancing behaviours"
)
assert_(isinstance(a[()], np.int_))
def test_same_kind_index_casting(self):
@ -185,6 +172,7 @@ class TestIndexing(TestCase):
assert_(a[...] is not a)
assert_equal(a[...], a)
# `a[...]` was `a` in numpy <1.9.
assert_(a[...].tensor._base is a.tensor)
# Slicing with ellipsis can skip an
# arbitrary number of dimensions
@ -201,14 +189,6 @@ class TestIndexing(TestCase):
b[(Ellipsis,)] = 2
assert_equal(b, 2)
@xfailIfTorchDynamo # numpy ndarrays do not have `.tensor` attribute
def test_ellipsis_index_2(self):
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert_(a[...] is not a)
assert_equal(a[...], a)
# `a[...]` was `a` in numpy <1.9.
assert_(a[...].tensor._base is a.tensor)
def test_single_int_index(self):
# Single integer index selects one row
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
@ -253,7 +233,6 @@ class TestIndexing(TestCase):
a[b] = 1.0
assert_equal(a, [[1.0, 1.0, 1.0]])
@skip(reason="NP_VER: fails on CI")
def test_boolean_assignment_value_mismatch(self):
# A boolean assignment should fail when the shape of the values
# cannot be broadcast to the subscription. (see also gh-3458)
@ -421,7 +400,7 @@ class TestIndexing(TestCase):
# Unlike the non nd-index:
assert_(arr[index,].shape != (1,))
@xpassIfTorchDynamo # (reason="XXX: low-prio behaviour to support")
@xfail # (reason="XXX: low-prio behaviour to support")
def test_broken_sequence_not_nd_index(self):
# See https://github.com/numpy/numpy/issues/5063
# If we have an object which claims to be a sequence, but fails
@ -579,7 +558,7 @@ class TestBroadcastedAssignments(TestCase):
class TestFancyIndexingCast(TestCase):
@xpassIfTorchDynamo # (
@xfail # (
# reason="XXX: low-prio to support assigning complex values on floating arrays"
# )
def test_boolean_index_cast_assign(self):

View File

@ -20,57 +20,33 @@ from decimal import Decimal
from unittest import expectedFailure as xfail, skipIf as skipif
import numpy
import pytest
import torch._numpy as np
from pytest import raises as assert_raises
from torch._numpy.testing import (
assert_,
assert_allclose, # IS_PYPY, IS_PYSTON, HAS_REFCOUNT,
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
assert_array_less,
assert_equal,
assert_raises_regex,
assert_warns,
# runstring, temppath,
suppress_warnings, # break_cycles,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
slowTest as slow,
subtest,
TEST_WITH_TORCHDYNAMO,
TestCase,
xfailIfTorchDynamo,
xpassIfTorchDynamo,
)
# If we are going to trace through these, we should use NumPy
# If testing on eager mode, we use torch._numpy
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy.testing import (
assert_,
assert_allclose, # IS_PYPY, IS_PYSTON, HAS_REFCOUNT,
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
assert_array_less,
assert_equal,
assert_raises_regex,
assert_warns,
# runstring, temppath,
suppress_warnings, # break_cycles,
)
else:
import torch._numpy as np
from torch._numpy.testing import (
assert_,
assert_allclose, # IS_PYPY, IS_PYSTON, HAS_REFCOUNT,
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
assert_array_less,
assert_equal,
assert_raises_regex,
assert_warns,
# runstring, temppath,
suppress_warnings, # break_cycles,
)
skip = functools.partial(skipif, True)
IS_PYPY = False
@ -154,7 +130,7 @@ def _aligned_zeros(shape, dtype=float, order="C", align=None):
return data
@xpassIfTorchDynamo # (reason="TODO: flags")
@xfail # (reason="TODO: flags")
@instantiate_parametrized_tests
class TestFlag(TestCase):
def setUp(self):
@ -274,7 +250,6 @@ class TestFlag(TestCase):
assert a.__array_interface__["data"][1] is not writeable
assert np.asarray(MyArr()).flags.writeable is writeable
@xpassIfTorchDynamo
def test_otherflags(self):
assert_equal(self.a.flags.carray, True)
assert_equal(self.a.flags["C"], True)
@ -301,7 +276,7 @@ class TestFlag(TestCase):
assert_(a.flags.aligned)
@xpassIfTorchDynamo # (reason="TODO: hash")
@xfail # (reason="TODO: hash")
class TestHash(TestCase):
# see #3793
def test_int(self):
@ -339,7 +314,7 @@ class TestHash(TestCase):
)
@xpassIfTorchDynamo # (reason="TODO: hash")
@xfail # (reason="TODO: hash")
class TestAttributes(TestCase):
def setUp(self):
self.one = np.arange(10)
@ -527,7 +502,7 @@ class TestArrayConstruction(TestCase):
d[1] = 3
assert_array_equal(e, [1, 3, 3])
@xpassIfTorchDynamo # (reason="order='F'")
@xfail # (reason="order='F'")
def test_array_copy_false_2(self):
d = np.array([1, 2, 3])
e = np.array(d, copy=False, order="F")
@ -553,7 +528,6 @@ class TestArrayConstruction(TestCase):
assert_array_equal(e, [[1, 3, 7], [1, 2, 3]])
assert_array_equal(d, [[1, 5, 3], [1, 2, 3]])
@xfailIfTorchDynamo
def test_array_cont(self):
d = np.ones(10)[::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
@ -891,7 +865,7 @@ class TestScalarIndexing(TestCase):
# this assersion fails because 50 > NPY_MAXDIMS = 32
# assert_raises(IndexError, subscript, a, (np.newaxis,)*50)
@xpassIfTorchDynamo # (reason="pytorch disallows overlapping assignments")
@xfail # (reason="pytorch disallows overlapping assignments")
def test_overlapping_assignment(self):
# With positive strides
a = np.arange(4)
@ -1306,7 +1280,7 @@ class TestCreation(TestCase):
class TestBool(TestCase):
@xpassIfTorchDynamo # (reason="bools not interned")
@xfail # (reason="bools not interned")
def test_test_interning(self):
a0 = np.bool_(0)
b0 = np.bool_(False)
@ -1323,7 +1297,7 @@ class TestBool(TestCase):
assert_equal(d[::2].sum(), d[::2].size)
# assert_equal(d[::-2].sum(), d[::-2].size)
@xpassIfTorchDynamo # (reason="frombuffer")
@xfail # (reason="frombuffer")
def test_sum_2(self):
d = np.frombuffer(b"\xff\xff" * 100, dtype=bool)
assert_equal(d.sum(), d.size)
@ -1403,7 +1377,7 @@ class TestBool(TestCase):
class TestMethods(TestCase):
sort_kinds = ["quicksort", "heapsort", "stable"]
@xpassIfTorchDynamo # (reason="all(..., where=...)")
@xfail # (reason="all(..., where=...)")
def test_all_where(self):
a = np.array([[True, False, True], [False, False, False], [True, True, True]])
wh_full = np.array(
@ -1423,7 +1397,7 @@ class TestMethods(TestCase):
assert_equal(a.all(where=False), True)
assert_equal(np.all(a, where=False), True)
@xpassIfTorchDynamo # (reason="any(..., where=...)")
@xfail # (reason="any(..., where=...)")
def test_any_where(self):
a = np.array([[True, False, True], [False, False, False], [True, True, True]])
wh_full = np.array(
@ -1444,7 +1418,7 @@ class TestMethods(TestCase):
assert_equal(a.any(where=False), False)
assert_equal(np.any(a, where=False), False)
@xpassIfTorchDynamo # (reason="TODO: compress")
@xfail # (reason="TODO: compress")
def test_compress(self):
tgt = [[5, 6, 7, 8, 9]]
arr = np.arange(10).reshape(2, 5)
@ -1485,7 +1459,7 @@ class TestMethods(TestCase):
assert out is ret
assert_equal(out[()], 20)
@xpassIfTorchDynamo # (reason="choose(..., mode=...) not implemented")
@xfail # (reason="choose(..., mode=...) not implemented")
def test_choose_2(self):
# gh-6272 check overlap on out
x = np.arange(5)
@ -1536,7 +1510,7 @@ class TestMethods(TestCase):
A = m_rect.repeat(2, axis=1)
assert_equal(A, [[1, 1, 2, 2, 3, 3], [4, 4, 5, 5, 6, 6]])
@xpassIfTorchDynamo # (reason="reshape(..., order='F')")
@xfail # (reason="reshape(..., order='F')")
def test_reshape(self):
arr = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
@ -1595,7 +1569,7 @@ class TestMethods(TestCase):
b = np.sort(a)
assert_equal(b, np.flip(a), msg)
@xpassIfTorchDynamo # (reason="sort complex")
@xfail # (reason="sort complex")
def test_sort_complex_nans(self):
# check complex
msg = "Test complex sort order with nans"
@ -1640,7 +1614,7 @@ class TestMethods(TestCase):
c.sort(kind=kind)
assert_equal(c, a, msg)
@xpassIfTorchDynamo # (reason="sort complex")
@xfail # (reason="sort complex")
@parametrize("dtype", [np.float32, np.float64])
@parametrize("part", ["real", "imag"])
def test_sort_complex(self, part, dtype):
@ -1707,7 +1681,7 @@ class TestMethods(TestCase):
assert_equal(np.sort(d), do)
assert_equal(d[np.argsort(d)], do)
@xpassIfTorchDynamo # (reason="order='F'")
@xfail # (reason="order='F'")
def test_copy(self):
def assert_fortran(arr):
assert_(arr.flags.fortran)
@ -1789,7 +1763,7 @@ class TestMethods(TestCase):
msg = f"byte-swapped complex argsort, dtype={dt}"
assert_equal(arr.argsort(), np.arange(len(arr), dtype=np.intp), msg)
@xpassIfTorchDynamo # (reason="argsort axis TODO")
@xfail # (reason="argsort axis TODO")
def test_argsort_axis(self):
# check axis handling. This should be the same for all type
# specific argsorts, so we only check it for one type and one kind
@ -1826,13 +1800,13 @@ class TestMethods(TestCase):
a = np.array(["aaaaaaaaa" for i in range(100)], dtype=np.unicode_)
assert_equal(a.argsort(kind="m"), r)
@xpassIfTorchDynamo # (reason="TODO: searchsorted with nans differs in pytorch")
@xfail # (reason="TODO: searchsorted with nans differs in pytorch")
@parametrize(
"a",
[
subtest(np.array([0, 1, np.nan], dtype=np.float16), name="f16"),
subtest(np.array([0, 1, np.nan], dtype=np.float32), name="f32"),
subtest(np.array([0, 1, np.nan]), name="default_dtype"),
np.array([0, 1, np.nan], dtype=np.float16),
np.array([0, 1, np.nan], dtype=np.float32),
np.array([0, 1, np.nan]),
],
)
def test_searchsorted_floats(self, a):
@ -1851,7 +1825,7 @@ class TestMethods(TestCase):
y = np.searchsorted(x, x[-1])
assert_equal(y, 2)
@xpassIfTorchDynamo # (
@xfail # (
# reason="'searchsorted_out_cpu' not implemented for 'ComplexDouble'"
# )
def test_searchsorted_complex(self):
@ -1898,7 +1872,7 @@ class TestMethods(TestCase):
b = a.searchsorted([0, 1, 2], "right")
assert_equal(b, [0, 2, 2])
@xpassIfTorchDynamo # (
@xfail # (
# reason="RuntimeError: self.storage_offset() must be divisible by 8"
# )
def test_searchsorted_unaligned_array(self):
@ -1941,7 +1915,7 @@ class TestMethods(TestCase):
b = a.searchsorted(a, "right")
assert_equal(b, out + 1)
@xpassIfTorchDynamo # (reason="ndarray ctor")
@xfail # (reason="ndarray ctor")
def test_searchsorted_type_specific_2(self):
# Test all type specific binary search functions
types = "".join((np.typecodes["AllInteger"], np.typecodes["AllFloat"], "?"))
@ -1977,7 +1951,7 @@ class TestMethods(TestCase):
# assert_raises(ValueError, np.searchsorted, a, 0, sorter=[-1, 0, 1, 2, 3])
# assert_raises(ValueError, np.searchsorted, a, 0, sorter=[4, 0, -1, 2, 3])
@xpassIfTorchDynamo # (reason="self.storage_offset() must be divisible by 8")
@xfail # (reason="self.storage_offset() must be divisible by 8")
def test_searchsorted_with_sorter(self):
a = np.random.rand(300)
s = a.argsort()
@ -2052,23 +2026,23 @@ class TestMethods(TestCase):
b = a.searchsorted(a, "right", s)
assert_equal(b, out + 1)
@xpassIfTorchDynamo # (reason="TODO argpartition")
@parametrize("dtype", "efdFDBbhil?")
@xfail # (reason="TODO argpartition")
@parametrize("dtype", np.typecodes["All"])
def test_argpartition_out_of_range(self, dtype):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10).astype(dtype=dtype)
assert_raises(ValueError, d.argpartition, 10)
assert_raises(ValueError, d.argpartition, -11)
@xpassIfTorchDynamo # (reason="TODO partition")
@parametrize("dtype", "efdFDBbhil?")
@xfail # (reason="TODO partition")
@parametrize("dtype", np.typecodes["All"])
def test_partition_out_of_range(self, dtype):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10).astype(dtype=dtype)
assert_raises(ValueError, d.partition, 10)
assert_raises(ValueError, d.partition, -11)
@xpassIfTorchDynamo # (reason="TODO argpartition")
@xfail # (reason="TODO argpartition")
def test_argpartition_integer(self):
# Test non-integer values in kth raise an error/
d = np.arange(10)
@ -2078,7 +2052,7 @@ class TestMethods(TestCase):
d_obj = np.arange(10, dtype=object)
assert_raises(TypeError, d_obj.argpartition, 9.0)
@xpassIfTorchDynamo # (reason="TODO partition")
@xfail # (reason="TODO partition")
def test_partition_integer(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
@ -2088,8 +2062,8 @@ class TestMethods(TestCase):
d_obj = np.arange(10, dtype=object)
assert_raises(TypeError, d_obj.partition, 9.0)
@xpassIfTorchDynamo # (reason="TODO partition")
@parametrize("kth_dtype", "Bbhil")
@xfail # (reason="TODO partition")
@parametrize("kth_dtype", np.typecodes["AllInteger"])
def test_partition_empty_array(self, kth_dtype):
# check axis handling for multidimensional empty arrays
kth = np.array(0, dtype=kth_dtype)[()]
@ -2101,8 +2075,8 @@ class TestMethods(TestCase):
msg = "test empty array partition with axis=None"
assert_equal(np.partition(a, kth, axis=None), a.ravel(), msg)
@xpassIfTorchDynamo # (reason="TODO argpartition")
@parametrize("kth_dtype", "Bbhil")
@xfail # (reason="TODO argpartition")
@parametrize("kth_dtype", np.typecodes["AllInteger"])
def test_argpartition_empty_array(self, kth_dtype):
# check axis handling for multidimensional empty arrays
kth = np.array(0, dtype=kth_dtype)[()]
@ -2120,7 +2094,7 @@ class TestMethods(TestCase):
msg,
)
@xpassIfTorchDynamo # (reason="TODO partition")
@xfail # (reason="TODO partition")
def test_partition(self):
d = np.arange(10)
assert_raises(TypeError, np.partition, d, 2, kind=1)
@ -2381,7 +2355,7 @@ class TestMethods(TestCase):
)
prev = k + 1
@xpassIfTorchDynamo # (reason="TODO partition")
@xfail # (reason="TODO partition")
def test_partition_iterative(self):
d = np.arange(17)
kth = (0, 1, 2, 429, 231)
@ -2448,7 +2422,7 @@ class TestMethods(TestCase):
for i in range(d0.shape[1]):
self.assert_partitioned(p[:, i], kth)
@xpassIfTorchDynamo # (reason="TODO partition")
@xfail # (reason="TODO partition")
def test_partition_fuzz(self):
# a few rounds of random data testing
for j in range(10, 30):
@ -2465,8 +2439,8 @@ class TestMethods(TestCase):
err_msg=f"data: {d!r}\n kth: {kth!r}",
)
@xpassIfTorchDynamo # (reason="TODO partition")
@parametrize("kth_dtype", "Bbhil")
@xfail # (reason="TODO partition")
@parametrize("kth_dtype", np.typecodes["AllInteger"])
def test_argpartition_gh5524(self, kth_dtype):
# A test for functionality of argpartition on lists.
kth = np.array(1, dtype=kth_dtype)[()]
@ -2474,7 +2448,7 @@ class TestMethods(TestCase):
p = np.argpartition(d, kth)
self.assert_partitioned(np.array(d)[p], [1])
@xpassIfTorchDynamo # (reason="TODO order='F'")
@xfail # (reason="TODO order='F'")
def test_flatten(self):
x0 = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
x1 = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], np.int32)
@ -2617,7 +2591,7 @@ class TestMethods(TestCase):
a.dot(b=b, out=c)
assert_equal(c, np.dot(a, b))
@xpassIfTorchDynamo # (reason="_aligned_zeros")
@xfail # (reason="_aligned_zeros")
def test_dot_out_mem_overlap(self):
np.random.seed(1)
@ -2639,7 +2613,7 @@ class TestMethods(TestCase):
assert_raises(ValueError, np.dot, a, b, out=b[::2])
assert_raises(ValueError, np.dot, a, b, out=b.T)
@xpassIfTorchDynamo # (reason="TODO: overlapping memor in matmul")
@xfail # (reason="TODO: overlapping memor in matmul")
def test_matmul_out(self):
# overlapping memory
a = np.arange(18).reshape(2, 3, 3)
@ -2675,7 +2649,7 @@ class TestMethods(TestCase):
# Order of axis argument doesn't matter:
assert_equal(b.diagonal(0, 2, 1), [[0, 3], [4, 7]])
@xpassIfTorchDynamo # (reason="no readonly views")
@xfail # (reason="no readonly views")
def test_diagonal_view_notwriteable(self):
a = np.eye(3).diagonal()
assert_(not a.flags.writeable)
@ -2766,7 +2740,7 @@ class TestMethods(TestCase):
bad_array = [1, 2, 3]
assert_raises(TypeError, np.put, bad_array, [0, 2], 5)
@xpassIfTorchDynamo # (reason="TODO: implement order='F'")
@xfail # (reason="TODO: implement order='F'")
def test_ravel(self):
a = np.array([[0, 1], [2, 3]])
assert_equal(a.ravel(), [0, 1, 2, 3])
@ -2969,10 +2943,10 @@ class TestMethods(TestCase):
]
for dt in dtypes:
a = np.array([1, 2, 3], dtype=dt)
assert_raises((TypeError, ValueError), complex, a)
assert_raises(ValueError, complex, a)
c = np.array([(1.0, 3), (2e-3, 7)], dtype=dt)
assert_raises((TypeError, ValueError), complex, c)
assert_raises(ValueError, complex, c)
class TestCequenceMethods(TestCase):
@ -3012,7 +2986,7 @@ class TestBinop(TestCase):
assert_equal(b, 3)
@xpassIfTorchDynamo # (reason="TODO")
@xfail # (reason="TODO")
class TestSubscripting(TestCase):
def test_test_zero_rank(self):
x = np.array([1, 2, 3])
@ -3107,7 +3081,6 @@ class TestArgmaxArgminCommon(TestCase):
)
),
)
@skipif(numpy.__version__ < "1.23", reason="keepdims is new in numpy 1.22")
@parametrize("method", [np.argmax, np.argmin])
def test_np_argmin_argmax_keepdims(self, size, axis, method):
arr = np.random.normal(size=size)
@ -3177,7 +3150,7 @@ class TestArgmaxArgminCommon(TestCase):
with pytest.raises(ValueError):
method(arr.T, axis=axis, out=wrong_outarray, keepdims=True)
@xpassIfTorchDynamo # (reason="TODO: implement choose")
@xfail # (reason="TODO: implement choose")
@parametrize("method", ["max", "min"])
def test_all(self, method):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
@ -3460,7 +3433,7 @@ class TestArgmin(TestCase):
class TestMinMax(TestCase):
@xpassIfTorchDynamo
@xfail
def test_scalar(self):
assert_raises(np.AxisError, np.amax, 1, 1)
assert_raises(np.AxisError, np.amin, 1, 1)
@ -3556,7 +3529,7 @@ class TestClip(TestCase):
assert_array_equal(result, expected)
@xpassIfTorchDynamo # (reason="TODO")
@xfail # (reason="TODO")
class TestCompress(TestCase):
def test_axis(self):
tgt = [[5, 6, 7, 8, 9]]
@ -3580,7 +3553,7 @@ class TestCompress(TestCase):
assert_equal(out, 1)
@xpassIfTorchDynamo # (reason="TODO")
@xfail # (reason="TODO")
@instantiate_parametrized_tests
class TestPutmask(TestCase):
def tst_basic(self, x, T, mask, val):
@ -3594,7 +3567,7 @@ class TestPutmask(TestCase):
mask = x < 40
for val in [-100, 0, 15]:
for types in "efdFDBbhil?":
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
if val < 0 and np.dtype(T).kind == "u":
@ -3672,7 +3645,7 @@ class TestTake(TestCase):
def test_ip_types(self):
x = np.random.random(24) * 100
x = np.reshape(x, (2, 3, 4))
for types in "efdFDBbhil?":
for types in np.sctypes.values():
for T in types:
self.tst_basic(x.copy().astype(T))
@ -3683,14 +3656,14 @@ class TestTake(TestCase):
assert_raises(IndexError, np.take, x, [-3], axis=0)
assert_array_equal(np.take(x, [-1], axis=0)[0], x[1])
@xpassIfTorchDynamo # (reason="XXX: take(..., mode='clip')")
@xfail # (reason="XXX: take(..., mode='clip')")
def test_clip(self):
x = np.random.random(24) * 100
x = np.reshape(x, (2, 3, 4))
assert_array_equal(np.take(x, [-1], axis=0, mode="clip")[0], x[0])
assert_array_equal(np.take(x, [2], axis=0, mode="clip")[0], x[1])
@xpassIfTorchDynamo # (reason="XXX: take(..., mode='wrap')")
@xfail # (reason="XXX: take(..., mode='wrap')")
def test_wrap(self):
x = np.random.random(24) * 100
x = np.reshape(x, (2, 3, 4))
@ -3698,7 +3671,7 @@ class TestTake(TestCase):
assert_array_equal(np.take(x, [2], axis=0, mode="wrap")[0], x[0])
assert_array_equal(np.take(x, [3], axis=0, mode="wrap")[0], x[1])
@xpassIfTorchDynamo # (reason="XXX: take(mode='wrap')")
@xfail # (reason="XXX: take(mode='wrap')")
def test_out_overlap(self):
# gh-6272 check overlap on out
x = np.arange(5)
@ -3715,7 +3688,7 @@ class TestTake(TestCase):
assert ret is out
@xpassIfTorchDynamo # (reason="TODO")
@xfail # (reason="TODO")
@instantiate_parametrized_tests
class TestLexsort(TestCase):
@parametrize(
@ -4275,7 +4248,7 @@ class TestIO(TestCase):
assert_array_equal(res, expected)
@xpassIfTorchDynamo # (reason="TODO")
@xfail # (reason="TODO")
@instantiate_parametrized_tests
class TestFromBuffer(TestCase):
@parametrize(
@ -4288,10 +4261,7 @@ class TestFromBuffer(TestCase):
buf = x.tobytes()
assert_array_equal(np.frombuffer(buf, dtype=dt), x.flat)
# @xpassIfTorchDynamo
@parametrize(
"obj", [np.arange(10), subtest("12345678", decorators=[xfailIfTorchDynamo])]
)
@parametrize("obj", [np.arange(10), "12345678"])
def test_array_base(self, obj):
# Objects (including NumPy arrays), which do not use the
# `release_buffer` slot should be directly used as a base object.
@ -4305,7 +4275,6 @@ class TestFromBuffer(TestCase):
def test_empty(self):
assert_array_equal(np.frombuffer(b""), np.array([]))
@skip("fails on CI, we are unlikely to implement this")
@skipif(
IS_PYPY,
reason="PyPy's memoryview currently does not track exports. See: "
@ -4665,7 +4634,7 @@ class TestStats(TestCase):
with assert_raises(np.AxisError):
np.arange(10).mean(axis=2)
@xpassIfTorchDynamo # (reason="implement mean(..., where=...)")
@xfail # (reason="implement mean(..., where=...)")
def test_mean_where(self):
a = np.arange(16).reshape((4, 4))
wh_full = np.array(
@ -4753,7 +4722,7 @@ class TestStats(TestCase):
with assert_raises(np.AxisError):
np.arange(10).var(axis=2)
@xpassIfTorchDynamo # (reason="implement var(..., where=...)")
@xfail # (reason="implement var(..., where=...)")
def test_var_where(self):
a = np.arange(25).reshape((5, 5))
wh_full = np.array(
@ -4798,7 +4767,7 @@ class TestStats(TestCase):
res = _std(mat, axis=axis)
assert_almost_equal(res, tgt)
@xpassIfTorchDynamo # (reason="implement std(..., where=...)")
@xfail # (reason="implement std(..., where=...)")
def test_std_where(self):
a = np.arange(25).reshape((5, 5))[::-1]
whf = np.array(
@ -4868,7 +4837,7 @@ class TestVdot(TestCase):
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), True)
@xpassIfTorchDynamo # (reason="implement order='F'")
@xfail # (reason="implement order='F'")
def test_vdot_array_order(self):
a = np.array([[1, 2], [3, 4]], order="C")
b = np.array([[1, 2], [3, 4]], order="F")
@ -4894,7 +4863,7 @@ class TestVdot(TestCase):
assert_equal(np.vdot(a, b.copy()), np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a.copy(), b), np.vdot(a.flatten(), b.flatten()))
@xpassIfTorchDynamo # (reason="implement order='F'")
@xfail # (reason="implement order='F'")
def test_vdot_uncontiguous_2(self):
# test order='F' separately
for size in [2, 1000]:
@ -5106,7 +5075,7 @@ class TestDot(TestCase):
r = np.empty((1024, 32), dtype=int)
assert_raises(ValueError, dot, f, v, r)
@xpassIfTorchDynamo # (reason="TODO order='F'")
@xfail # (reason="TODO order='F'")
def test_dot_array_order(self):
a = np.array([[1, 2], [3, 4]], order="C")
b = np.array([[1, 2], [3, 4]], order="F")
@ -5253,7 +5222,7 @@ class MatmulCommon:
res = self.matmul(*arg)
assert_(res.dtype == dt)
@xpassIfTorchDynamo # (reason="no scalars")
@xfail # (reason="no scalars")
def test_result_types_2(self):
# in numpy, vector vector returns scalars
# we return a 0D array instead
@ -5476,6 +5445,7 @@ class TestMatmul(MatmulCommon, TestCase):
# test out non-contiguous
out = np.ones((5, 2, 2), dtype=float)
c = self.matmul(a, b, out=out[..., 0])
assert c.tensor._base is out.tensor
assert_array_equal(c, tgt)
c = self.matmul(a, v, out=out[:, 0, 0])
assert_array_equal(c, tgt_mv)
@ -5492,16 +5462,6 @@ class TestMatmul(MatmulCommon, TestCase):
c = self.matmul(b.T, a.T, out=out.T)
assert_array_equal(out, tgt)
@xfailIfTorchDynamo
def test_out_contiguous_2(self):
a = np.ones((5, 2), dtype=float)
b = np.array([[1, 3], [5, 7]], dtype=float)
# test out non-contiguous
out = np.ones((5, 2, 2), dtype=float)
c = self.matmul(a, b, out=out[..., 0])
assert c.tensor._base is out.tensor
m1 = np.arange(15.0).reshape(5, 3)
m2 = np.arange(21.0).reshape(3, 7)
m3 = np.arange(30.0).reshape(5, 6)[:, ::2] # non-contiguous
@ -5631,7 +5591,7 @@ class TestMatmulOperator(MatmulCommon, TestCase):
def test_matmul_raises(self):
assert_raises((RuntimeError, TypeError), self.matmul, np.int8(5), np.int8(5))
@xpassIfTorchDynamo # (reason="torch supports inplace matmul, and so do we")
@xfail # (reason="torch supports inplace matmul, and so do we")
def test_matmul_inplace(self):
# It would be nice to support in-place matmul eventually, but for now
# we don't have a working implementation, so better just to error out
@ -5644,7 +5604,7 @@ class TestMatmulOperator(MatmulCommon, TestCase):
assert_raises(TypeError, operator.imatmul, a, b)
assert_raises(TypeError, exec, "a @= b", globals(), locals())
@xpassIfTorchDynamo # (reason="matmul_axes")
@xfail # (reason="matmul_axes")
def test_matmul_axes(self):
a = np.arange(3 * 4 * 5).reshape(3, 4, 5)
c = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (1, 2)])
@ -5746,8 +5706,6 @@ class TestChoose(TestCase):
A = np.choose(self.ind, (self.x, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
# XXX: revisit xfails when NEP 50 lands in numpy
@skip(reason="XXX: revisit xfails when NEP 50 lands in numpy")
@parametrize(
"ops",
[
@ -5821,7 +5779,7 @@ class TestRepeat(TestCase):
NEIGH_MODE = {"zero": 0, "one": 1, "constant": 2, "circular": 3, "mirror": 4}
@xpassIfTorchDynamo # (reason="TODO")
@xfail # (reason="TODO")
class TestWarnings(TestCase):
def test_complex_warning(self):
x = np.array([1, 2])
@ -5970,8 +5928,7 @@ class TestPEP3118Dtype(TestCase):
self._check("i:f0:", [("f0", "i")])
@skipif(numpy.__version__ < "1.23", reason="CopyMode is new in NumPy 1.22")
@xpassIfTorchDynamo
@xfail # (reason="TODO")
@instantiate_parametrized_tests
class TestArrayCreationCopyArgument(TestCase):
class RaiseOnBool:
@ -6231,7 +6188,7 @@ class TestArrayAttributeDeletion(TestCase):
assert_raises(AttributeError, delattr, a, s)
@xpassIfTorchDynamo # (reason="TODO")
@xfail # (reason="TODO")
@instantiate_parametrized_tests
class TestArrayInterface(TestCase):
class Foo:
@ -6282,7 +6239,7 @@ class TestArrayInterface(TestCase):
class TestDelMisc(TestCase):
@xpassIfTorchDynamo # (reason="TODO")
@xfail # (reason="TODO")
def test_flat_element_deletion(self):
it = np.ones(3).flat
try:
@ -6651,6 +6608,58 @@ class TestWhere(TestCase):
np.where(a, x=a, y=a)
if not IS_PYPY:
# sys.getsizeof() is not valid on PyPy
@xfail # (reason="TODO")
class TestSizeOf(TestCase):
def test_empty_array(self):
pytest.xpass()
x = np.array([])
assert_(sys.getsizeof(x) > 0)
def check_array(self, dtype):
pytest.xpass()
elem_size = dtype(0).itemsize
for length in [10, 50, 100, 500]:
x = np.arange(length, dtype=dtype)
assert_(sys.getsizeof(x) > length * elem_size)
def test_array_int32(self):
self.check_array(np.int32)
def test_array_int64(self):
self.check_array(np.int64)
def test_array_float32(self):
self.check_array(np.float32)
def test_array_float64(self):
self.check_array(np.float64)
def test_view(self):
d = np.ones(100)
assert_(sys.getsizeof(d[...]) < sys.getsizeof(d))
def test_reshape(self):
d = np.ones(100)
assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy()))
@_no_tracing
def test_resize(self):
d = np.ones(100)
old = sys.getsizeof(d)
d.resize(50)
assert_(old > sys.getsizeof(d))
d.resize(150)
assert_(old < sys.getsizeof(d))
def test_error(self):
pytest.xpass()
d = np.ones(100)
assert_raises(TypeError, d.__sizeof__, "a")
class TestHashing(TestCase):
def test_arrays_not_hashable(self):
x = np.ones(3)
@ -6662,7 +6671,7 @@ class TestHashing(TestCase):
class TestFormat(TestCase):
@xpassIfTorchDynamo # (reason="TODO")
@xfail # (reason="TODO")
def test_0d(self):
a = np.array(np.pi)
assert_equal(f"{a:0.3g}", "3.14")
@ -6695,7 +6704,7 @@ class TestWritebackIfCopy(TestCase):
res = np.argmin(mat, 0, out=out)
assert_equal(res, range(5))
@xpassIfTorchDynamo # (reason="XXX: place()")
@xfail # (reason="XXX: place()")
def test_insert_noncontiguous(self):
a = np.arange(6).reshape(2, 3).T # force non-c-contiguous
# uses arr_insert
@ -6710,7 +6719,7 @@ class TestWritebackIfCopy(TestCase):
np.put(a, [0, 2], [44, 55])
assert_equal(a, np.array([[44, 3], [55, 4], [2, 5]]))
@xpassIfTorchDynamo # (reason="XXX: putmask()")
@xfail # (reason="XXX: putmask()")
def test_putmask_noncontiguous(self):
a = np.arange(6).reshape(2, 3).T # force non-c-contiguous
# uses arr_putmask
@ -6730,7 +6739,7 @@ class TestWritebackIfCopy(TestCase):
np.choose(a, choices, out=out, mode="raise")
assert_equal(out, np.array([[10, -10, 10], [-10, 10, -10], [10, -10, 10]]))
@xpassIfTorchDynamo # (reason="XXX: ndarray.flat")
@xfail # (reason="XXX: ndarray.flat")
def test_flatiter__array__(self):
a = np.arange(9).reshape(3, 3)
b = a.T.flat
@ -6774,7 +6783,7 @@ class TestArange(TestCase):
assert_raises(TypeError, np.arange, step=3)
assert_raises(TypeError, np.arange, dtype="int64")
@xpassIfTorchDynamo # (reason="weird arange signature (optionals before required args)")
@xfail # (reason="weird arange signature (optionals before required args)")
def test_require_range_2(self):
assert_raises(TypeError, np.arange, start=4)
@ -6820,13 +6829,19 @@ class TestArange(TestCase):
args[which] = np.float64(2.0)
assert np.arange(*args).dtype == np.float64
# Cover stranger error path, test only to achieve code coverage!
args[which] = [None, []]
with pytest.raises((ValueError, RuntimeError)):
# Fails discovering start dtype
np.arange(*args)
@parametrize("dt", [np.float32, np.uint8, complex])
def test_explicit_dtype(self, dt):
assert np.arange(5.0, dtype=dt).dtype == dt
class TestRichcompareScalar(TestCase):
@xpassIfTorchDynamo # (reason="comparison: builtin.bools or...?")
@xfail # (reason="comparison: builtin.bools or...?")
def test_richcompare_scalar_boolean_singleton_return(self):
# These are currently guaranteed to be the boolean singletons, but maybe
# returning NumPy booleans would also be OK:
@ -6836,7 +6851,7 @@ class TestRichcompareScalar(TestCase):
assert (np.int16(0) != "a") is True
@skip # (reason="implement views/dtypes")
@xfail # (reason="implement views/dtypes")
class TestViewDtype(TestCase):
"""
Verify that making a view of a non-contiguous array works as expected.

View File

@ -7,10 +7,19 @@ import platform
import sys
import warnings
import numpy
import pytest
import torch._numpy as np
from torch._numpy.random import rand, randint, randn
from torch._numpy.testing import (
assert_,
assert_allclose,
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
assert_equal,
assert_warns, # assert_array_max_ulp, HAS_REFCOUNT, IS_WASM
)
IS_WASM = False
HAS_REFCOUNT = True
@ -27,38 +36,8 @@ from torch.testing._internal.common_utils import (
subtest,
TEST_WITH_TORCHDYNAMO,
TestCase,
xfailIfTorchDynamo,
xpassIfTorchDynamo,
)
# If we are going to trace through these, we should use NumPy
# If testing on eager mode, we use torch._numpy
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy.random import rand, randint, randn
from numpy.testing import (
assert_,
assert_allclose,
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
assert_equal,
assert_warns, # assert_array_max_ulp, HAS_REFCOUNT, IS_WASM
)
else:
import torch._numpy as np
from torch._numpy.random import rand, randint, randn
from torch._numpy.testing import (
assert_,
assert_allclose,
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
assert_equal,
assert_warns, # assert_array_max_ulp, HAS_REFCOUNT, IS_WASM
)
skip = functools.partial(skipif, True)
@ -129,7 +108,7 @@ class TestNonarrayArgs(TestCase):
tgt = [2, 5, 2, 3, 7, 2, 2]
assert_equal(out, tgt)
@xpassIfTorchDynamo # (reason="TODO implement compress(...)")
@xfail # (reason="TODO implement compress(...)")
def test_compress(self):
arr = [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
tgt = [[5, 6, 7, 8, 9]]
@ -199,12 +178,12 @@ class TestNonarrayArgs(TestCase):
s = np.float64(1.0)
assert_equal(s.round(), 1.0)
@xpassIfTorchDynamo # (reason="scalar instances")
@xfail # (reason="scalar instances")
def test_round_2(self):
s = np.float64(1.0)
assert_(isinstance(s.round(), np.float64))
@xpassIfTorchDynamo # (reason="scalar instances")
@xfail # (reason="scalar instances")
@parametrize(
"dtype",
[
@ -227,6 +206,7 @@ class TestNonarrayArgs(TestCase):
assert_equal(round(s, None), 1)
assert_equal(round(s, ndigits=None), 1)
@xfail # (reason="scalar instances")
@parametrize(
"val, ndigits",
[
@ -234,14 +214,8 @@ class TestNonarrayArgs(TestCase):
# 2**31 - 1, -1, marks=pytest.mark.xfail(reason="Out of range of int32")
# ),
subtest((2**31 - 1, -1), decorators=[xfail]),
subtest(
(2**31 - 1, 1 - math.ceil(math.log10(2**31 - 1))),
decorators=[xpassIfTorchDynamo],
),
subtest(
(2**31 - 1, -math.ceil(math.log10(2**31 - 1))),
decorators=[xpassIfTorchDynamo],
),
],
)
def test_dunder_round_edgecases(self, val, ndigits):
@ -344,7 +318,7 @@ class TestNonarrayArgs(TestCase):
# assert_(w[0].category is RuntimeWarning)
@xpassIfTorchDynamo # (reason="TODO")
@xfail # (reason="TODO")
class TestIsscalar(TestCase):
def test_isscalar(self):
assert_(np.isscalar(3.1))
@ -583,7 +557,7 @@ class TestBoolCmp(TestCase):
assert_array_equal(np.signbit(self.signd[i:]), self.ed[i:])
@xpassIfTorchDynamo # (reason="TODO")
@xfail # (reason="TODO")
class TestSeterr(TestCase):
def test_default(self):
err = np.geterr()
@ -603,7 +577,6 @@ class TestSeterr(TestCase):
np.seterr(**old)
assert_(np.geterr() == old)
@xfail
@skipif(IS_WASM, reason="no wasm fp exception support")
@skipif(platform.machine() == "armv5tel", reason="See gh-413.")
def test_divide_err(self):
@ -848,7 +821,7 @@ class TestTypes(TestCase):
# assert_equal(b, [0.0, 1.5])
# assert_equal(b.dtype, np.dtype('f4'))
@xpassIfTorchDynamo # (reason="'Scalars do not upcast arrays' rule")
@xfail # (reason="'Scalars do not upcast arrays' rule")
def test_coercion_2(self):
def res_type(a, b):
return np.add(a, b).dtype
@ -899,7 +872,7 @@ class TestTypes(TestCase):
# Also test keyword arguments
assert_(np.can_cast(from_=np.int32, to=np.int64))
@xpassIfTorchDynamo # (reason="value-based casting?")
@xfail # (reason="value-based casting?")
def test_can_cast_values(self):
# gh-5917
for dt in np.sctypes["int"] + np.sctypes["uint"]:
@ -920,8 +893,7 @@ class NIterError(Exception):
pass
@skip(reason="NP_VER: fails on CI")
@xpassIfTorchDynamo # (reason="TODO")
@xfail # (reason="TODO")
@instantiate_parametrized_tests
class TestFromiter(TestCase):
def makegen(self):
@ -967,7 +939,6 @@ class TestFromiter(TestCase):
with pytest.raises(NIterError):
np.fromiter(iterable, dtype=dtype, count=count)
@skip(reason="NP_VER: fails on CI")
def test_empty_result(self):
class MyIter:
def __length_hint__(self):
@ -1016,8 +987,6 @@ class TestNonzeroAndCountNonzero(TestCase):
assert_equal(np.count_nonzero(np.array([1], dtype="?")), 1)
assert_equal(np.nonzero(np.array([1])), ([0],))
@xfailIfTorchDynamo # numpy returns a python int, we return a 0D array
def test_nonzero_trivial_differs(self):
assert isinstance(np.count_nonzero([]), np.ndarray)
def test_nonzero_zerod(self):
@ -1027,8 +996,6 @@ class TestNonzeroAndCountNonzero(TestCase):
assert_equal(np.count_nonzero(np.array(1)), 1)
assert_equal(np.count_nonzero(np.array(1, dtype="?")), 1)
@xfailIfTorchDynamo # numpy returns a python int, we return a 0D array
def test_nonzero_zerod_differs(self):
assert isinstance(np.count_nonzero(np.array(1)), np.ndarray)
def test_nonzero_onedim(self):
@ -1037,9 +1004,6 @@ class TestNonzeroAndCountNonzero(TestCase):
assert_equal(np.count_nonzero(x), 4)
assert_equal(np.nonzero(x), ([0, 2, 3, 6],))
@xfailIfTorchDynamo # numpy returns a python int, we return a 0D array
def test_nonzero_onedim_differs(self):
x = np.array([1, 0, 2, -1, 0, 0, 8])
assert isinstance(np.count_nonzero(x), np.ndarray)
def test_nonzero_twodim(self):
@ -1089,7 +1053,7 @@ class TestNonzeroAndCountNonzero(TestCase):
assert_raises(np.AxisError, np.count_nonzero, m, axis=3)
assert_raises(TypeError, np.count_nonzero, m, axis=np.array([[1], [2]]))
@parametrize("typecode", "efdFDBbhil?")
@parametrize("typecode", np.typecodes["All"])
def test_count_nonzero_axis_all_dtypes(self, typecode):
# More thorough test that the axis argument is respected
# for all dtypes and responds correctly when presented with
@ -1147,7 +1111,7 @@ class TestIndex(TestCase):
assert_equal(c.dtype, np.dtype("int32"))
@xpassIfTorchDynamo # (reason="TODO")
@xfail # (reason="TODO")
class TestBinaryRepr(TestCase):
def test_zero(self):
assert_equal(np.binary_repr(0), "0")
@ -1185,7 +1149,7 @@ class TestBinaryRepr(TestCase):
assert_equal(np.binary_repr(np.int64(-(2**62)), width=64), "11" + "0" * 62)
@xpassIfTorchDynamo # (reason="TODO")
@xfail # (reason="TODO")
class TestBaseRepr(TestCase):
def test_base3(self):
assert_equal(np.base_repr(3**5, 3), "100000")
@ -1397,7 +1361,7 @@ class TestClip(TestCase):
act = self.clip(a, m, M)
assert_array_equal(ac, act)
@xpassIfTorchDynamo # (reason="byteorder not supported in torch")
@xfail # (reason="byteorder not supported in torch")
def test_simple_nonnative(self):
# Test non native double input with scalar min/max.
# Test native double input with non native double scalar min/max.
@ -1417,7 +1381,7 @@ class TestClip(TestCase):
act = self.clip(a, m, M)
assert_array_equal(ac, act)
@xpassIfTorchDynamo # (reason="clamp not supported for complex")
@xfail # (reason="clamp not supported for complex")
def test_simple_complex(self):
# Test native complex input with native double scalar min/max.
# Test native input with complex double scalar min/max.
@ -1470,14 +1434,8 @@ class TestClip(TestCase):
self.clip(a, m, M, act)
assert_array_equal(ac, act)
# @xpassIfTorchDynamo # (reason="casting not supported")
@parametrize(
"casting",
[
subtest(None, decorators=[xfail]),
subtest("unsafe", decorators=[xpassIfTorchDynamo]),
],
)
@xfail # (reason="casting not supported")
@parametrize("casting", [None, "unsafe"])
def test_simple_int32_inout(self, casting):
# Test native int32 input with double min/max and int32 out.
a = self._generate_int32_data(self.nr, self.nc)
@ -1603,7 +1561,7 @@ class TestClip(TestCase):
act = self.clip(a, m * np.zeros(a.shape), M)
assert_array_equal(ac, act)
@xpassIfTorchDynamo # (reason="newbyteorder not supported")
@xfail # (reason="newbyteorder not supported")
def test_type_cast_06(self):
# Test native with NON native scalar min/max.
a = self._generate_data(self.nr, self.nc)
@ -1614,7 +1572,7 @@ class TestClip(TestCase):
ac = self.fastclip(a, m_s, M)
assert_array_equal(ac, act)
@xpassIfTorchDynamo # (reason="newbyteorder not supported")
@xfail # (reason="newbyteorder not supported")
def test_type_cast_07(self):
# Test NON native with native array min/max.
a = self._generate_data(self.nr, self.nc)
@ -1626,7 +1584,7 @@ class TestClip(TestCase):
ac = self.fastclip(a_s, m, M)
assert_array_equal(ac, act)
@xpassIfTorchDynamo # (reason="newbyteorder not supported")
@xfail # (reason="newbyteorder not supported")
def test_type_cast_08(self):
# Test NON native with native scalar min/max.
a = self._generate_data(self.nr, self.nc)
@ -1638,7 +1596,7 @@ class TestClip(TestCase):
act = a_s.clip(m, M)
assert_array_equal(ac, act)
@xpassIfTorchDynamo # (reason="newbyteorder not supported")
@xfail # (reason="newbyteorder not supported")
def test_type_cast_09(self):
# Test native with NON native array min/max.
a = self._generate_data(self.nr, self.nc)
@ -1660,7 +1618,7 @@ class TestClip(TestCase):
ac = self.fastclip(a, m, M, out=b)
assert_array_equal(ac, act)
@xpassIfTorchDynamo # (reason="newbyteorder not supported")
@xfail # (reason="newbyteorder not supported")
def test_type_cast_11(self):
# Test non native with native scalar, min/max, out non native
a = self._generate_non_native_data(self.nr, self.nc)
@ -2133,8 +2091,7 @@ class TestCreationFuncs(TestCase):
# Test ones, zeros, empty and full.
def setUp(self):
# dtypes = {np.dtype(tp) for tp in itertools.chain(*np.sctypes.values())}
dtypes = {np.dtype(tp) for tp in "efdFDBbhil?"}
dtypes = {np.dtype(tp) for tp in itertools.chain(*np.sctypes.values())}
self.dtypes = dtypes
self.orders = {
"C": "c_contiguous"
@ -2507,7 +2464,7 @@ class TestArgwhere(TestCase):
assert_equal(np.argwhere([4, 0, 2, 1, 3]), [[0], [2], [3], [4]])
@xpassIfTorchDynamo # (reason="TODO")
@xfail # (reason="TODO")
class TestStringFunction(TestCase):
def test_set_string_function(self):
a = np.array([1])
@ -2613,7 +2570,7 @@ class TestRollaxis(TestCase):
assert_raises(np.AxisError, np.rollaxis, a, 4, 0)
assert_raises(np.AxisError, np.rollaxis, a, 0, 5)
@xpassIfTorchDynamo # (reason="needs fancy indexing")
@xfail # (reason="needs fancy indexing")
def test_results(self):
a = np.arange(1 * 2 * 3 * 4).reshape(1, 2, 3, 4).copy()
aind = np.indices(a.shape)
@ -2814,7 +2771,6 @@ class TestCross(TestCase):
for axisc in range(-2, 2):
assert_equal(np.cross(u, u, axisc=axisc).shape, (3, 4))
@skipif(numpy.__version__ < "1.24", reason="fix landed in NumPy 1.24")
def test_uint8_int32_mixed_dtypes(self):
# regression test for gh-19138
u = np.array([[195, 8, 9]], np.uint8)
@ -2871,7 +2827,7 @@ class TestIndices(TestCase):
assert_(arr.dtype == dtype)
@xpassIfTorchDynamo # (reason="TODO")
@xfail # (reason="TODO")
class TestRequire(TestCase):
flag_names = [
"C",
@ -2938,7 +2894,7 @@ class TestRequire(TestCase):
assert_raises(ValueError, np.require, a, None, ["C", "F"])
@xpassIfTorchDynamo # (reason="TODO")
@xfail # (reason="TODO")
class TestBroadcast(TestCase):
def test_broadcast_in_args(self):
# gh-5881
@ -2997,7 +2953,7 @@ class TestBroadcast(TestCase):
assert_raises(ValueError, np.broadcast, 1, **{"x": 1})
def test_shape_mismatch_error_message(self):
with assert_raises(
with pytest.raises(
ValueError,
match=r"arg 0 with shape \(1, 3\) and " r"arg 2 with shape \(2,\)",
):

View File

@ -4,30 +4,22 @@ import functools
import itertools
import sys
from unittest import skipIf as skipif
from unittest import expectedFailure as xfail, skipIf as skipif
import torch._numpy as np
from pytest import raises as assert_raises
from torch._numpy.testing import assert_
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase,
xpassIfTorchDynamo,
)
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy.testing import assert_
else:
import torch._numpy as np
from torch._numpy.testing import assert_
skip = functools.partial(skipif, True)
@xpassIfTorchDynamo # (
@xfail # (
# reason="We do not disctinguish between scalar and array types."
# " Thus, scalars can upcast arrays."
# )
@ -109,7 +101,7 @@ class TestIsSubDType(TestCase):
assert np.issubdtype(np.float32, "f")
@xpassIfTorchDynamo # (
@xfail # (
# reason="We do not have (or need) np.core.numerictypes."
# " Our type aliases are in _dtypes.py."
# )

View File

@ -5,33 +5,25 @@ Test the scalar constructors, which also do type-coercion
"""
import functools
from unittest import skipIf as skipif
from unittest import expectedFailure as xfail, skipIf as skipif
import pytest
import torch._numpy as np
from torch._numpy.testing import assert_almost_equal, assert_equal
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
subtest,
TEST_WITH_TORCHDYNAMO,
TestCase,
xpassIfTorchDynamo,
)
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy.testing import assert_almost_equal, assert_equal
else:
import torch._numpy as np
from torch._numpy.testing import assert_almost_equal, assert_equal
skip = functools.partial(skipif, True)
class TestFromString(TestCase):
@xpassIfTorchDynamo # (reason="XXX: floats from strings")
@xfail # (reason="XXX: floats from strings")
def test_floating(self):
# Ticket #640, floats from string
fsingle = np.single("1.234")
@ -39,7 +31,7 @@ class TestFromString(TestCase):
assert_almost_equal(fsingle, 1.234)
assert_almost_equal(fdouble, 1.234)
@xpassIfTorchDynamo # (reason="XXX: floats from strings")
@xfail # (reason="XXX: floats from strings")
def test_floating_overflow(self):
"""Strings containing an unrepresentable float overflow"""
fhalf = np.half("1e10000")

View File

@ -13,24 +13,16 @@ from unittest import skipIf as skipif, SkipTest
import pytest
import torch._numpy as np
from pytest import raises as assert_raises
from torch._numpy.testing import assert_equal
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase,
)
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy.testing import assert_equal
else:
import torch._numpy as np
from torch._numpy.testing import assert_equal
skip = functools.partial(skipif, True)

View File

@ -9,47 +9,31 @@ import warnings
from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest
import numpy
# from numpy._utils import _pep440
import pytest
from pytest import raises as assert_raises
# from hypothesis import given, settings
# from hypothesis.strategies import sampled_from
# from hypothesis.extra import numpy as hynp
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
slowTest as slow,
subtest,
TEST_WITH_TORCHDYNAMO,
TestCase,
xpassIfTorchDynamo,
)
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy.testing import (
_gen_alignment_data,
assert_,
assert_almost_equal,
assert_equal,
)
else:
import torch._numpy as np
from torch._numpy.testing import (
import torch._numpy as np
from pytest import raises as assert_raises
from torch._numpy.testing import (
_gen_alignment_data,
assert_,
assert_almost_equal,
assert_equal,
# assert_array_equal, suppress_warnings, _gen_alignment_data,
# assert_warns,
)
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
slowTest as slow,
subtest,
TestCase,
)
skip = functools.partial(skipif, True)
@ -171,7 +155,7 @@ class TestBaseMath(TestCase):
np.add(2, inp2, out=out)
assert_almost_equal(out, exp1 + 2, err_msg=msg)
@xpassIfTorchDynamo # (reason="pytorch does not have .view")
@xfail # (reason="pytorch does not have .view")
def test_lower_align(self):
# check data that is not aligned to element size
# i.e doubles are aligned to 4 bytes on i386
@ -202,8 +186,7 @@ class TestPower(TestCase):
else:
assert_almost_equal(b, 6765201, err_msg=msg)
@skip(reason="NP_VER: fails on CI on older NumPy")
@xpassIfTorchDynamo # (reason="Value-based casting: (2)**(-2) -> 0 in pytorch.")
@xfail # (reason="Value-based casting: (2)**(-2) -> 0 in pytorch.")
def test_integers_to_negative_integer_power(self):
# Note that the combination of uint64 with a signed integer
# has common type np.float64. The other combinations should all
@ -289,8 +272,7 @@ def _signs(dt):
@instantiate_parametrized_tests
class TestModulus(TestCase):
def test_modulus_basic(self):
# dt = np.typecodes["AllInteger"] + np.typecodes["Float"]
dt = "Bbhil" + "efd"
dt = np.typecodes["AllInteger"] + np.typecodes["Float"]
for op in [floordiv_and_mod, divmod]:
for dt1, dt2 in itertools.product(dt, dt):
for sg1, sg2 in itertools.product(_signs(dt1), _signs(dt2)):
@ -335,8 +317,7 @@ class TestModulus(TestCase):
def test_float_modulus_roundoff(self):
# gh-6127
# dt = np.typecodes["Float"]
dt = "efd"
dt = np.typecodes["Float"]
for op in [floordiv_and_mod, divmod]:
for dt1, dt2 in itertools.product(dt, dt):
for sg1, sg2 in itertools.product((+1, -1), (+1, -1)):
@ -352,7 +333,7 @@ class TestModulus(TestCase):
else:
assert_(b > rem >= 0, msg)
@parametrize("dt", "efd")
@parametrize("dt", np.typecodes["Float"])
def test_float_modulus_corner_cases(self, dt):
if dt == "e":
# FIXME: make xfail
@ -372,7 +353,7 @@ class TestModulus(TestCase):
# sup.filter(RuntimeWarning, "divide by zero encountered in floor_divide")
# sup.filter(RuntimeWarning, "divide by zero encountered in divmod")
# sup.filter(RuntimeWarning, "invalid value encountered in divmod")
for dt in "efd":
for dt in np.typecodes["Float"]:
fone = np.array(1.0, dtype=dt)
fzer = np.array(0.0, dtype=dt)
finf = np.array(np.inf, dtype=dt)
@ -470,8 +451,7 @@ class TestConversion(TestCase):
a = np.array(l, dtype=T)
assert_equal([int(_m) for _m in a], li)
@skipif(numpy.__version__ < "1.24", reason="NP_VER: fails on NumPy 1.23.x")
@xpassIfTorchDynamo # (reason="pytorch does not emit this warning.")
@xfail # (reason="pytorch does not emit this warning.")
def test_iinfo_long_values_1(self):
for code in "bBh":
with pytest.warns(DeprecationWarning):
@ -495,7 +475,7 @@ class TestConversion(TestCase):
dtype(np.iinfo(dtype).max + 1)
for code in [np.int_, np.longlong]:
assert_raises((OverflowError, RuntimeError), overflow_error_func, code)
assert_raises(RuntimeError, overflow_error_func, code)
def test_numpy_scalar_relational_operators(self):
# All integer
@ -574,7 +554,7 @@ class TestConversion(TestCase):
# assert_equal( val, val2 )
@xpassIfTorchDynamo # (reason="can delegate repr to pytorch")
@xfail # (reason="can delegate repr to pytorch")
class TestRepr(TestCase):
def _test_type_repr(self, t):
finfo = np.finfo(t)
@ -809,7 +789,7 @@ def recursionlimit(n):
@instantiate_parametrized_tests
class TestScalarOpsMisc(TestCase):
@xfail # (reason="pytorch does not warn on overflow")
@parametrize("dtype", "Bbhil")
@parametrize("dtype", np.typecodes["AllInteger"])
@parametrize(
"operation",
[
@ -827,7 +807,7 @@ class TestScalarOpsMisc(TestCase):
operation(min, max)
@skip(reason="integer overflow UB: crashes pytorch under ASAN")
@parametrize("dtype", "bhil")
@parametrize("dtype", np.typecodes["Integer"])
@parametrize(
"operation",
[
@ -849,9 +829,8 @@ class TestScalarOpsMisc(TestCase):
with pytest.warns(RuntimeWarning, match="overflow encountered"):
operation(min, neg_1)
@skipif(numpy.__version__ < "1.24", reason="NP_VER: fails on NumPy 1.23.x")
@xpassIfTorchDynamo # (reason="pytorch does not warn on overflow")
@parametrize("dtype", "B")
@xfail # (reason="pytorch does not warn on overflow")
@parametrize("dtype", np.typecodes["UnsignedInteger"])
def test_scalar_unsigned_integer_overflow(self, dtype):
val = np.dtype(dtype).type(8)
with pytest.warns(RuntimeWarning, match="overflow encountered"):

View File

@ -4,54 +4,30 @@ import functools
from unittest import expectedFailure as xfail, skipIf as skipif
import numpy
import pytest
from pytest import raises as assert_raises
import torch._numpy as np
from pytest import raises as assert_raises
from torch._numpy import (
array,
atleast_1d,
atleast_2d,
atleast_3d,
AxisError,
concatenate,
hstack,
newaxis,
stack,
vstack,
)
from torch._numpy.testing import assert_, assert_array_equal, assert_equal
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase,
xpassIfTorchDynamo,
)
# If we are going to trace through these, we should use NumPy
# If testing on eager mode, we use torch._numpy
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy import (
array,
atleast_1d,
atleast_2d,
atleast_3d,
AxisError,
concatenate,
hstack,
newaxis,
stack,
vstack,
)
from numpy.testing import assert_, assert_array_equal, assert_equal
else:
import torch._numpy as np
from torch._numpy import (
array,
atleast_1d,
atleast_2d,
atleast_3d,
AxisError,
concatenate,
hstack,
newaxis,
stack,
vstack,
)
from torch._numpy.testing import assert_, assert_array_equal, assert_equal
skip = functools.partial(skipif, True)
@ -202,7 +178,6 @@ class TestHstack(TestCase):
# with assert_warns(FutureWarning):
hstack(x for x in np.ones((3, 2)))
@skipif(numpy.__version__ < "1.24", reason="NP_VER: fails on NumPy 1.23.x")
def test_casting_and_dtype(self):
a = np.array([1, 2, 3])
b = np.array([2.5, 3.5, 4.5])
@ -257,7 +232,6 @@ class TestVstack(TestCase):
with pytest.raises(TypeError, match="arrays to stack must be"):
vstack(np.arange(3) for _ in range(2))
@skipif(numpy.__version__ < "1.24", reason="casting kwarg is new in NumPy 1.24")
def test_casting_and_dtype(self):
a = np.array([1, 2, 3])
b = np.array([2.5, 3.5, 4.5])
@ -265,7 +239,6 @@ class TestVstack(TestCase):
expected_res = np.array([[1, 2, 3], [2, 3, 4]])
assert_array_equal(res, expected_res)
@skipif(numpy.__version__ < "1.24", reason="casting kwarg is new in NumPy 1.24")
def test_casting_and_dtype_type_error(self):
a = np.array([1, 2, 3])
b = np.array([2.5, 3.5, 4.5])
@ -359,7 +332,7 @@ class TestConcatenate(TestCase):
assert out is rout
assert np.all(r == rout)
@xpassIfTorchDynamo # (reason="concatenate(x, axis=None) relies on x being a sequence")
@xfail # (reason="concatenate(x, axis=None) relies on x being a sequence")
def test_large_concatenate_axis_None(self):
# When no axis is given, concatenate uses flattened versions.
# This also had a bug with many arrays (see gh-5979).
@ -469,7 +442,6 @@ class TestConcatenate(TestCase):
@instantiate_parametrized_tests
class TestStackMisc(TestCase):
@skipif(numpy.__version__ < "1.24", reason="NP_VER: fails on NumPy 1.23.x")
def test_stack(self):
# non-iterable input
assert_raises(TypeError, stack, 1)
@ -551,7 +523,6 @@ class TestStackMisc(TestCase):
with assert_raises(TypeError):
stack((a, b), dtype=np.int64, axis=1, casting="safe")
@skipif(numpy.__version__ < "1.24", reason="NP_VER: fails on NumPy 1.23.x")
@parametrize("axis", [0])
@parametrize("out_dtype", ["c8", "f4", "f8", "i8"]) # torch does not have ">f8",
@parametrize("casting", ["no", "equiv", "safe", "same_kind", "unsafe"])

View File

@ -5,20 +5,10 @@
Copied from fftpack.helper by Pearu Peterson, October 2005
"""
from torch.testing._internal.common_utils import (
run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase,
)
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy import fft, pi
from numpy.testing import assert_array_almost_equal
else:
import torch._numpy as np
from torch._numpy import fft, pi
from torch._numpy.testing import assert_array_almost_equal
import torch._numpy as np
from torch._numpy import fft, pi
from torch._numpy.testing import assert_array_almost_equal
from torch.testing._internal.common_utils import run_tests, TestCase
class TestFFTShift(TestCase):
@ -83,9 +73,6 @@ class TestFFTShift(TestCase):
def test_equal_to_original(self):
"""Test that the new (>=v1.15) implementation (see #10073) is equal to the original (<=v1.14)"""
if TEST_WITH_TORCHDYNAMO:
from numpy import arange, asarray, concatenate, take
else:
from torch._numpy import arange, asarray, concatenate, take
def original_fftshift(x, axes=None):

View File

@ -6,26 +6,19 @@ import threading
from unittest import skipIf as skipif, SkipTest
import pytest
import torch._numpy as np
from pytest import raises as assert_raises
from torch._numpy.random import random
from torch._numpy.testing import assert_allclose, assert_array_equal # , IS_WASM
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase,
)
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy.random import random
from numpy.testing import assert_allclose # , IS_WASM
else:
import torch._numpy as np
from torch._numpy.random import random
from torch._numpy.testing import assert_allclose # , IS_WASM
skip = functools.partial(skipif, True)
@ -69,10 +62,10 @@ class TestFFT1D(TestCase):
def test_ifft(self, norm):
x = random(30) + 1j * random(30)
assert_allclose(x, np.fft.ifft(np.fft.fft(x, norm=norm), norm=norm), atol=1e-6)
# Ensure we get the correct error message
# NB: Exact wording differs slightly under Dynamo and in eager.
with pytest.raises((ValueError, RuntimeError), match="Invalid number of"):
with pytest.raises(
(ValueError, RuntimeError), match="Invalid number of data points"
):
np.fft.ifft([], norm=norm)
def test_fft2(self):
@ -365,13 +358,10 @@ class TestFFTThreadSafe(TestCase):
[x.join() for x in t]
# Make sure all threads returned the correct value
for i in range(self.threads):
# under torch.dynamo `assert_array_equal` fails with relative errors of
# about 1.5e-14. Hence replace it with `assert_allclose(..., rtol=2e-14)`
assert_allclose(
assert_array_equal(
q.get(timeout=5),
expected,
atol=2e-14
# msg="Function returned wrong value in multithreaded context",
"Function returned wrong value in multithreaded context",
)
def test_fft(self):

View File

@ -1,27 +1,15 @@
# Owner(s): ["module: dynamo"]
from unittest import skipIf as skipif
from unittest import expectedFailure as xfail, skipIf as skipif
from torch.testing._internal.common_utils import (
run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase,
xpassIfTorchDynamo,
)
import torch._numpy as np
from torch._numpy.testing import assert_allclose, assert_array_equal
# If we are going to trace through these, we should use NumPy
# If testing on eager mode, we use torch._numpy
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
else:
import torch._numpy as np
from torch._numpy.testing import assert_allclose, assert_array_equal
from torch.testing._internal.common_utils import run_tests, TestCase
class TestConstant(TestCase):
@xpassIfTorchDynamo # (reason="tuple values")
@xfail # (reason="tuple values")
def test_check_constant(self):
a = np.arange(100)
a = np.pad(a, (25, 20), "constant", constant_values=(10, 20))
@ -369,7 +357,7 @@ class TestConstant(TestCase):
)
assert_allclose(test, expected)
@xpassIfTorchDynamo # (reason="tuple values")
@xfail # (reason="tuple values")
def test_check_constant_float3(self):
a = np.arange(100, dtype=float)
a = np.pad(a, (25, 20), "constant", constant_values=(-1.1, -1.2))
@ -540,7 +528,7 @@ class TestConstant(TestCase):
)
assert_allclose(test, expected)
@xpassIfTorchDynamo # (reason="tuple values")
@xfail # (reason="tuple values")
def test_check_constant_pad_2d(self):
arr = np.arange(4).reshape(2, 2)
test = np.lib.pad(

View File

@ -3,39 +3,24 @@
"""Test functions for 1D array set operations.
"""
from unittest import skipIf
import numpy
from unittest import expectedFailure as xfail
import torch._numpy as np
from pytest import raises as assert_raises
from torch._numpy import unique
from torch._numpy.testing import assert_array_equal, assert_equal
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
subtest,
TEST_WITH_TORCHDYNAMO,
TestCase,
xfailIfTorchDynamo,
xpassIfTorchDynamo,
)
# If we are going to trace through these, we should use NumPy
# If testing on eager mode, we use torch._numpy
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy import ediff1d, in1d, intersect1d, setdiff1d, setxor1d, union1d, unique
from numpy.testing import assert_array_equal, assert_equal, assert_raises_regex
else:
import torch._numpy as np
from torch._numpy import unique
from torch._numpy.testing import assert_array_equal, assert_equal
@skipIf(numpy.__version__ < "1.24", reason="NP_VER: fails on NumPy 1.23.x")
@xpassIfTorchDynamo # (reason="TODO")
@xfail # (reason="TODO")
@instantiate_parametrized_tests
class TestSetOps(TestCase):
def test_intersect1d(self):
@ -160,15 +145,12 @@ class TestSetOps(TestCase):
(np.array([1, 2, 3], dtype=np.int64), None, np.nan, "to_end"),
# should fail because attempting
# to downcast to int type:
subtest(
(
np.array([1, 2, 3], dtype=np.int64),
np.array([5, 7, 2], dtype=np.float32),
None,
"to_begin",
),
decorators=[xfailIfTorchDynamo],
),
# should fail because attempting to cast
# two special floating point values
# to integers (on both sides of ary),
@ -223,7 +205,6 @@ class TestSetOps(TestCase):
assert_equal(actual, expected)
assert actual.dtype == expected.dtype
@skipIf(True, reason="NP_VER: fails with NumPy 1.22.x")
@parametrize("kind", [None, "sort", "table"])
def test_isin(self, kind):
# the tests for in1d cover most of isin's behavior
@ -236,7 +217,7 @@ class TestSetOps(TestCase):
isin_slow = np.vectorize(_isin_slow, otypes=[bool], excluded={1})
def assert_isin_equal(a, b):
x = np.isin(a, b, kind=kind)
x = isin(a, b, kind=kind)
y = isin_slow(a, b)
assert_array_equal(x, y)
@ -463,7 +444,7 @@ class TestSetOps(TestCase):
a = np.array([0, 1, 2], dtype="timedelta64[s]")
b = a
# Make sure it raises a value error:
with assert_raises(ValueError):
with pytest.raises(ValueError):
in1d(a, b, kind="table")
@parametrize(
@ -494,7 +475,7 @@ class TestSetOps(TestCase):
)
if expect_failure:
with assert_raises(RuntimeError, match="exceed the maximum"):
with pytest.raises(RuntimeError, match="exceed the maximum"):
in1d(ar1, ar2, kind=kind)
else:
assert_array_equal(in1d(ar1, ar2, kind=kind), expected)
@ -763,7 +744,7 @@ class TestUnique(TestCase):
# assert_equal(a3_idx.dtype, np.intp)
# assert_equal(a3_inv.dtype, np.intp)
@xpassIfTorchDynamo # (reason="unique with nans")
@xfail # (reason="unique with nans")
def test_unique_1d_2(self):
# test for ticket 2111 - float
a = [2.0, np.nan, 1.0, np.nan]
@ -809,7 +790,7 @@ class TestUnique(TestCase):
assert_array_equal(unique(inp, axis=0), unique(inp_arr, axis=0), msg)
assert_array_equal(unique(inp, axis=1), unique(inp_arr, axis=1), msg)
@xpassIfTorchDynamo # _run_axis_tests xfails with the message
@xfail # _run_axis_tests xfails with the message
# torch has different unique ordering behaviour"
def test_unique_axis(self):
types = []
@ -835,7 +816,7 @@ class TestUnique(TestCase):
uniq = unique(x, axis=axis)
assert_array_equal(uniq, [1, 2, 3, 4])
@xpassIfTorchDynamo # (reason="unique / return_index")
@xfail # (reason="unique / return_index")
def test_unique_axis_zeros(self):
# issue 15559
single_zero = np.empty(shape=(2, 0), dtype=np.int8)
@ -942,8 +923,7 @@ class TestUnique(TestCase):
msg = "Unique's return_counts=True failed with axis=1"
assert_array_equal(cnt, np.array([2, 1, 1]), msg)
@skipIf(True, reason="NP_VER: fails on CI with older NumPy")
@xpassIfTorchDynamo # (reason="unique / return_index / nans")
@xfail # (reason="unique / return_index / nans")
def test_unique_nanequals(self):
# issue 20326
a = np.array([1, 1, np.nan, np.nan, np.nan])

View File

@ -11,21 +11,29 @@ from unittest import expectedFailure as xfail, skipIf as skipif
import hypothesis
import hypothesis.strategies as st
import numpy
import pytest
import torch._numpy as np
from hypothesis.extra.numpy import arrays
from pytest import raises as assert_raises
from torch._numpy.testing import (
assert_,
assert_allclose, # IS_PYPY,
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
assert_equal,
assert_raises_regex,
assert_warns,
suppress_warnings, # HAS_REFCOUNT, IS_WASM
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
subtest,
TEST_WITH_TORCHDYNAMO,
TestCase,
xpassIfTorchDynamo,
)
skip = functools.partial(skipif, True)
@ -39,49 +47,7 @@ IS_PYPY = False
# from numpy lib import digitize, piecewise, trapz, select, trim_zeros, interp
from numpy.lib import delete, extract, insert, msort, place, setxor1d, unwrap, vectorize
# If we are going to trace through these, we should use NumPy
# If testing on eager mode, we use torch._numpy
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy import (
angle,
bartlett,
blackman,
corrcoef,
cov,
diff,
digitize,
flipud,
gradient,
hamming,
hanning,
i0,
interp,
kaiser,
meshgrid,
sinc,
trapz,
trim_zeros,
unique,
)
from numpy.core.numeric import normalize_axis_tuple
from numpy.random import rand
from numpy.testing import (
assert_,
assert_allclose, # IS_PYPY,
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
assert_equal,
assert_raises_regex,
assert_warns,
suppress_warnings, # HAS_REFCOUNT, IS_WASM
)
else:
import torch._numpy as np
from torch._numpy import (
from torch._numpy import (
angle,
bartlett,
blackman,
@ -97,21 +63,9 @@ else:
meshgrid,
sinc,
unique,
)
from torch._numpy._util import normalize_axis_tuple
from torch._numpy.random import rand
from torch._numpy.testing import (
assert_,
assert_allclose, # IS_PYPY,
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
assert_equal,
assert_raises_regex,
assert_warns,
suppress_warnings, # HAS_REFCOUNT, IS_WASM
)
)
from torch._numpy._util import normalize_axis_tuple
from torch._numpy.random import rand
def get_mat(n):
@ -297,7 +251,7 @@ class TestCopy(TestCase):
assert_equal(a[0, 0], 1)
assert_equal(a_copy[0, 0], 10)
@xpassIfTorchDynamo # (reason="order='F' not implemented")
@xfail # (reason="order='F' not implemented")
def test_order(self):
# It turns out that people rely on np.copy() preserving order by
# default; changing this broke scikit-learn:
@ -523,7 +477,7 @@ class TestSelect(TestCase):
select(conditions, choices)
@xpassIfTorchDynamo # (reason="TODO: implement")
@xfail # (reason="TODO: implement")
@instantiate_parametrized_tests
class TestInsert(TestCase):
def test_basic(self):
@ -841,7 +795,7 @@ class TestDiff(TestCase):
assert_raises(np.AxisError, diff, x, append=0, axis=3)
@xpassIfTorchDynamo # (reason="TODO: implement")
@xfail # (reason="TODO: implement")
@instantiate_parametrized_tests
class TestDelete(TestCase):
def setUp(self):
@ -913,9 +867,7 @@ class TestDelete(TestCase):
with pytest.raises(IndexError):
np.delete([0, 1, 2], np.array([], dtype=float))
@parametrize(
"indexer", [subtest(np.array([1]), name="array([1])"), subtest([1], name="[1]")]
)
@parametrize("indexer", [np.array([1]), [1]])
def test_single_item_array(self, indexer):
a_del_int = delete(self.a, 1)
a_del = delete(self.a, indexer)
@ -1190,7 +1142,7 @@ class TestAngle(TestCase):
assert_array_almost_equal(z, zo, 11)
@xpassIfTorchDynamo
@xfail # (reason="trim_zeros not implemented")
@instantiate_parametrized_tests
class TestTrimZeros(TestCase):
a = np.array([0, 0, 1, 0, 2, 3, 4, 0])
@ -1199,11 +1151,7 @@ class TestTrimZeros(TestCase):
# d = a.astype(object)
def values(self):
attr_names = (
"a",
"b",
"c",
) # "d")
attr_names = ("a", "b", "c", "d")
return (getattr(self, name) for name in attr_names)
def test_basic(self):
@ -1262,7 +1210,7 @@ class TestTrimZeros(TestCase):
assert isinstance(res, list)
@xpassIfTorchDynamo # (reason="TODO: implement")
@xfail # (reason="TODO: implement")
class TestExtins(TestCase):
def test_basic(self):
a = np.array([1, 3, 2, 1, 2, 3, 3])
@ -1664,7 +1612,7 @@ class TestVectorize(TestCase):
f(x)
@xpassIfTorchDynamo # (reason="TODO: implement")
@xfail # (reason="TODO: implement")
class TestDigitize(TestCase):
def test_forward(self):
x = np.arange(-6, 5)
@ -1768,9 +1716,7 @@ class TestUnwrap(TestCase):
@instantiate_parametrized_tests
class TestFilterwindows(TestCase):
@parametrize(
"dtype", "Bbhil" + "efd"
) # np.typecodes["AllInteger"] + np.typecodes["Float"])
@parametrize("dtype", np.typecodes["AllInteger"] + np.typecodes["Float"])
@parametrize("M", [0, 1, 10])
def test_hanning(self, dtype: str, M: int) -> None:
scalar = M
@ -1790,9 +1736,7 @@ class TestFilterwindows(TestCase):
else:
assert_almost_equal(np.sum(w, axis=0), 4.500, 4)
@parametrize(
"dtype", "Bbhil" + "efd"
) # np.typecodes["AllInteger"] + np.typecodes["Float"])
@parametrize("dtype", np.typecodes["AllInteger"] + np.typecodes["Float"])
@parametrize("M", [0, 1, 10])
def test_hamming(self, dtype: str, M: int) -> None:
scalar = M
@ -1812,9 +1756,7 @@ class TestFilterwindows(TestCase):
else:
assert_almost_equal(np.sum(w, axis=0), 4.9400, 4)
@parametrize(
"dtype", "Bbhil" + "efd"
) # np.typecodes["AllInteger"] + np.typecodes["Float"])
@parametrize("dtype", np.typecodes["AllInteger"] + np.typecodes["Float"])
@parametrize("M", [0, 1, 10])
def test_bartlett(self, dtype: str, M: int) -> None:
scalar = M
@ -1834,9 +1776,7 @@ class TestFilterwindows(TestCase):
else:
assert_almost_equal(np.sum(w, axis=0), 4.4444, 4)
@parametrize(
"dtype", "Bbhil" + "efd"
) # np.typecodes["AllInteger"] + np.typecodes["Float"])
@parametrize("dtype", np.typecodes["AllInteger"] + np.typecodes["Float"])
@parametrize("M", [0, 1, 10])
def test_blackman(self, dtype: str, M: int) -> None:
scalar = M
@ -1856,9 +1796,7 @@ class TestFilterwindows(TestCase):
else:
assert_almost_equal(np.sum(w, axis=0), 3.7800, 4)
@parametrize(
"dtype", "Bbhil" + "efd"
) # np.typecodes["AllInteger"] + np.typecodes["Float"])
@parametrize("dtype", np.typecodes["AllInteger"] + np.typecodes["Float"])
@parametrize("M", [0, 1, 10])
def test_kaiser(self, dtype: str, M: int) -> None:
scalar = M
@ -1879,7 +1817,7 @@ class TestFilterwindows(TestCase):
assert_almost_equal(np.sum(w, axis=0), 10, 15)
@xpassIfTorchDynamo # (reason="TODO: implement")
@xfail # (reason="TODO: implement")
class TestTrapz(TestCase):
def test_simple(self):
x = np.arange(-10, 10, 0.1)
@ -1948,13 +1886,13 @@ class TestUnique(TestCase):
assert_(unique(np.array([1, 1, 1, 1, 1])) == np.array([1]))
@xpassIfTorchDynamo # (reason="unique not implemented for 'ComplexDouble'")
@xfail # (reason="unique not implemented for 'ComplexDouble'")
def test_simple_complex(self):
x = np.array([5 + 6j, 1 + 1j, 1 + 10j, 10, 5 + 6j])
assert_(np.all(unique(x) == [1 + 1j, 1 + 10j, 5 + 6j, 10]))
@xpassIfTorchDynamo # (reason="TODO: implement")
@xfail # (reason="TODO: implement")
class TestCheckFinite(TestCase):
def test_simple(self):
a = [1, 2, 3]
@ -2599,19 +2537,7 @@ class TestBincount(TestCase):
np.bincount(vals)
parametrize_interp_sc = parametrize(
"sc",
[
subtest(lambda x: np.float_(x), name="real"),
subtest(lambda x: _make_complex(x, 0), name="complex-real"),
subtest(lambda x: _make_complex(0, x), name="complex-imag"),
subtest(lambda x: _make_complex(x, np.multiply(x, -2)), name="complex-both"),
],
)
@xpassIfTorchDynamo # (reason="TODO: implement")
@instantiate_parametrized_tests
@xfail # (reason="TODO: implement")
class TestInterp(TestCase):
def test_exceptions(self):
assert_raises(ValueError, interp, 0, [], [])
@ -2686,7 +2612,19 @@ class TestInterp(TestCase):
fp = [1, 2, np.nan, 4]
assert_almost_equal(np.interp(x, xp, fp), [1, 2, np.nan, np.nan, 4])
@parametrize_interp_sc
@pytest.fixture(
params=[
lambda x: np.float_(x),
lambda x: _make_complex(x, 0),
lambda x: _make_complex(0, x),
lambda x: _make_complex(x, np.multiply(x, -2)),
],
ids=["real", "complex-real", "complex-imag", "complex-both"],
)
def sc(self, request):
"""scale function used by the below tests"""
return request.param
def test_non_finite_any_nan(self, sc):
"""test that nans are propagated"""
assert_equal(np.interp(0.5, [np.nan, 1], sc([0, 10])), sc(np.nan))
@ -2694,7 +2632,6 @@ class TestInterp(TestCase):
assert_equal(np.interp(0.5, [0, 1], sc([np.nan, 10])), sc(np.nan))
assert_equal(np.interp(0.5, [0, 1], sc([0, np.nan])), sc(np.nan))
@parametrize_interp_sc
def test_non_finite_inf(self, sc):
"""Test that interp between opposite infs gives nan"""
assert_equal(np.interp(0.5, [-np.inf, +np.inf], sc([0, 10])), sc(np.nan))
@ -2704,7 +2641,6 @@ class TestInterp(TestCase):
# unless the y values are equal
assert_equal(np.interp(0.5, [-np.inf, +np.inf], sc([10, 10])), sc(10))
@parametrize_interp_sc
def test_non_finite_half_inf_xf(self, sc):
"""Test that interp where both axes have a bound at inf gives nan"""
assert_equal(np.interp(0.5, [-np.inf, 1], sc([-np.inf, 10])), sc(np.nan))
@ -2716,7 +2652,6 @@ class TestInterp(TestCase):
assert_equal(np.interp(0.5, [0, +np.inf], sc([0, -np.inf])), sc(np.nan))
assert_equal(np.interp(0.5, [0, +np.inf], sc([0, +np.inf])), sc(np.nan))
@parametrize_interp_sc
def test_non_finite_half_inf_x(self, sc):
"""Test interp where the x axis has a bound at inf"""
assert_equal(np.interp(0.5, [-np.inf, -np.inf], sc([0, 10])), sc(10))
@ -2724,7 +2659,6 @@ class TestInterp(TestCase):
assert_equal(np.interp(0.5, [0, +np.inf], sc([0, 10])), sc(0))
assert_equal(np.interp(0.5, [+np.inf, +np.inf], sc([0, 10])), sc(0))
@parametrize_interp_sc
def test_non_finite_half_inf_f(self, sc):
"""Test interp where the f axis has a bound at inf"""
assert_equal(np.interp(0.5, [0, 1], sc([0, -np.inf])), sc(-np.inf))
@ -2852,7 +2786,7 @@ class TestPercentile(TestCase):
x = np.array([[1, 1, 1], [1, 1, 1], [4, 4, 3], [1, 1, 1], [1, 1, 1]])
assert_array_equal(np.percentile(x, 50, axis=0), [1, 1, 1])
@xpassIfTorchDynamo # (reason="TODO: implement")
@xfail # (reason="TODO: implement")
@parametrize("dtype", np.typecodes["Float"])
def test_linear_nan_1D(self, dtype):
# METHOD 1 of H&F
@ -2862,14 +2796,14 @@ class TestPercentile(TestCase):
np.testing.assert_equal(res.dtype, arr.dtype)
H_F_TYPE_CODES = [
(int_type, np.float64) for int_type in "Bbhil" # np.typecodes["AllInteger"]
(int_type, np.float64) for int_type in np.typecodes["AllInteger"]
] + [
(np.float16, np.float16),
(np.float32, np.float32),
(np.float64, np.float64),
]
@skipif(numpy.__version__ < "1.24", reason="NEP 50 is new in 1.24")
@xfail # (reason="TODO: implement percentile interpolations")
@parametrize("input_dtype, expected_dtype", H_F_TYPE_CODES)
@parametrize(
"method, expected",
@ -3142,7 +3076,7 @@ class TestPercentile(TestCase):
b = np.percentile([2, 3, 4, 1], [50], overwrite_input=True)
assert_equal(b, np.array([2.5]))
@xpassIfTorchDynamo # (reason="pytorch percentile does not support tuple axes.")
@xfail # (reason="pytorch percentile does not support tuple axes.")
def test_extended_axis(self):
o = np.random.normal(size=(71, 23))
x = np.dstack([o] * 10)
@ -3231,7 +3165,6 @@ class TestPercentile(TestCase):
np.percentile(d, [1, 7], axis=(0, 3), keepdims=True).shape, (2, 1, 5, 7, 1)
)
@skipif(numpy.__version__ < "1.24", reason="NP_VER: fails on NumPy 1.23.x")
@parametrize(
"q",
[
@ -3239,7 +3172,7 @@ class TestPercentile(TestCase):
subtest(
[1, 7],
decorators=[
xpassIfTorchDynamo,
xfail,
],
),
],
@ -3253,13 +3186,13 @@ class TestPercentile(TestCase):
subtest(
(0, 1),
decorators=[
xpassIfTorchDynamo,
xfail,
],
),
subtest(
(-3, -1),
decorators=[
xpassIfTorchDynamo,
xfail,
],
),
],
@ -3309,7 +3242,7 @@ class TestPercentile(TestCase):
assert_equal(np.percentile(d, 1, out=o), o)
assert_equal(np.percentile(d, 1, method="nearest", out=o), o)
@xpassIfTorchDynamo # (reason="np.percentile undocumented nan weirdness")
@xfail # (reason="np.percentile undocumented nan weirdness")
def test_nan_behavior(self):
a = np.arange(24, dtype=float)
a[2] = np.nan
@ -3402,7 +3335,7 @@ class TestQuantile(TestCase):
assert_equal(np.quantile(x, 1), 3.5)
assert_equal(np.quantile(x, 0.5), 1.75)
@xpassIfTorchDynamo # (reason="quantile w/integers or bools")
@xfail # (reason="quantile w/integers or bools")
def test_correct_quantile_value(self):
a = np.array([True])
tf_quant = np.quantile(True, False)
@ -3461,8 +3394,8 @@ class TestQuantile(TestCase):
np.quantile(np.arange(100.0), p, method="midpoint")
assert_array_equal(p, p0)
@xpassIfTorchDynamo # (reason="TODO: make quantile preserve integers")
@parametrize("dtype", "Bbhil") # np.typecodes["AllInteger"])
@xfail # (reason="TODO: make quantile preserve integers")
@parametrize("dtype", np.typecodes["AllInteger"])
def test_quantile_preserve_int_type(self, dtype):
res = np.quantile(np.array([1, 2], dtype=dtype), [0.5], method="nearest")
assert res.dtype == dtype
@ -3473,50 +3406,50 @@ class TestQuantile(TestCase):
subtest(
"inverted_cdf",
decorators=[
xpassIfTorchDynamo,
xfail,
],
),
subtest(
"averaged_inverted_cdf",
decorators=[
xpassIfTorchDynamo,
xfail,
],
),
subtest(
"closest_observation",
decorators=[
xpassIfTorchDynamo,
xfail,
],
),
subtest(
"interpolated_inverted_cdf",
decorators=[
xpassIfTorchDynamo,
xfail,
],
),
subtest(
"hazen",
decorators=[
xpassIfTorchDynamo,
xfail,
],
),
subtest(
"weibull",
decorators=[
xpassIfTorchDynamo,
xfail,
],
),
"linear",
subtest(
"median_unbiased",
decorators=[
xpassIfTorchDynamo,
xfail,
],
),
subtest(
"normal_unbiased",
decorators=[
xpassIfTorchDynamo,
xfail,
],
),
"nearest",
@ -3584,7 +3517,7 @@ class TestMedian(TestCase):
a = np.array([0.0444502, 0.141249, 0.0463301])
assert_equal(a[-1], np.median(a))
@xpassIfTorchDynamo # (reason="median: scalar output vs 0-dim")
@xfail # (reason="median: scalar output vs 0-dim")
def test_basic_2(self):
# check array scalar result
a = np.array([0.0444502, 0.141249, 0.0463301])
@ -3693,7 +3626,7 @@ class TestMedian(TestCase):
b[1, 2] = np.nan
assert_equal(np.median(a, 1), b)
@xpassIfTorchDynamo # (reason="median: does not support tuple axes")
@xfail # (reason="median: does not support tuple axes")
def test_nan_behavior_2(self):
a = np.arange(24, dtype=float).reshape(2, 3, 4)
a[1, 2, 3] = np.nan
@ -3705,7 +3638,7 @@ class TestMedian(TestCase):
b[2] = np.nan
assert_equal(np.median(a, (0, 2)), b)
@xpassIfTorchDynamo # (reason="median: scalar vs 0-dim")
@xfail # (reason="median: scalar vs 0-dim")
def test_nan_behavior_3(self):
a = np.arange(24, dtype=float).reshape(2, 3, 4)
a[1, 2, 3] = np.nan
@ -3714,7 +3647,7 @@ class TestMedian(TestCase):
# no axis
assert_equal(np.median(a).ndim, 0)
@xpassIfTorchDynamo # (reason="median: torch.quantile does not handle empty tensors")
@xfail # (reason="median: torch.quantile does not handle empty tensors")
@skipif(IS_WASM, reason="fp errors don't work correctly")
def test_empty(self):
# mean(empty array) emits two warnings: empty slice and divide by 0
@ -3745,7 +3678,7 @@ class TestMedian(TestCase):
assert_equal(np.median(a, axis=2), b)
assert_(w[0].category is RuntimeWarning)
@xpassIfTorchDynamo # (reason="median: tuple axes not implemented")
@xfail # (reason="median: tuple axes not implemented")
def test_extended_axis(self):
o = np.random.normal(size=(71, 23))
x = np.dstack([o] * 10)
@ -3795,7 +3728,7 @@ class TestMedian(TestCase):
d = np.ones((3, 5, 7, 11))
assert_equal(np.median(d, axis=None, keepdims=True).shape, (1, 1, 1, 1))
@xpassIfTorchDynamo # (reason="median: tuple axis")
@xfail # (reason="median: tuple axis")
def test_keepdims_2(self):
d = np.ones((3, 5, 7, 11))
assert_equal(np.median(d, axis=(0, 1), keepdims=True).shape, (1, 1, 7, 11))
@ -3804,7 +3737,6 @@ class TestMedian(TestCase):
assert_equal(np.median(d, axis=(0, 1, 2, 3), keepdims=True).shape, (1, 1, 1, 1))
assert_equal(np.median(d, axis=(0, 1, 3), keepdims=True).shape, (1, 1, 7, 1))
@skipif(numpy.__version__ < "1.24", reason="NP_VER: fails on NumPy 1.23.x")
@parametrize(
"axis",
[
@ -3814,13 +3746,13 @@ class TestMedian(TestCase):
subtest(
(0, 1),
decorators=[
xpassIfTorchDynamo,
xfail,
],
),
subtest(
(-3, -1),
decorators=[
xpassIfTorchDynamo,
xfail,
],
),
],
@ -3840,7 +3772,7 @@ class TestMedian(TestCase):
assert_equal(result.shape, shape_out)
@xpassIfTorchDynamo # (reason="TODO: implement")
@xfail # (reason="TODO: implement")
@instantiate_parametrized_tests
class TestSortComplex(TestCase):
@parametrize(

View File

@ -3,46 +3,32 @@
# from numpy.testing._private.utils import requires_memory
import functools
from unittest import skipIf
from unittest import expectedFailure as xfail, skipIf
import pytest
import torch._numpy as np
from pytest import raises as assert_raises
from torch._numpy import histogram, histogramdd
skip = functools.partial(skipIf, True)
# from numpy.lib.histograms import histogram, histogramdd, histogram_bin_edges
from torch._numpy.testing import (
assert_,
assert_allclose,
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
assert_equal,
# assert_array_max_ulp, #assert_raises_regex, suppress_warnings,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
slowTest as slow,
TEST_WITH_TORCHDYNAMO,
TestCase,
xpassIfTorchDynamo,
)
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy import histogram, histogram_bin_edges, histogramdd
from numpy.testing import (
assert_,
assert_allclose,
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
assert_equal,
# assert_array_max_ulp, #assert_raises_regex, suppress_warnings,
)
else:
import torch._numpy as np
from torch._numpy import histogram, histogramdd
from torch._numpy.testing import (
assert_,
assert_allclose,
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
assert_equal,
# assert_array_max_ulp, #assert_raises_regex, suppress_warnings,
)
skip = functools.partial(skipIf, True)
class TestHistogram(TestCase):
@ -203,7 +189,7 @@ class TestHistogram(TestCase):
)
assert_almost_equal(a, [0.2, 0.1, 0.1, 0.075])
@xpassIfTorchDynamo # (reason="histogram complex weights")
@xfail # (reason="histogram complex weights")
def test_exotic_weights(self):
# Test the use of weights that are not integer or floats, but e.g.
# complex numbers or object types.
@ -265,7 +251,7 @@ class TestHistogram(TestCase):
with assert_raises((RuntimeError, ValueError)):
np.histogram(vals, range=[0.1, 0.01])
@xpassIfTorchDynamo # (reason="edge cases")
@xfail # (reason="edge cases")
def test_bin_edge_cases(self):
# Ensure that floating-point computations correctly place edge cases.
arr = np.array([337, 404, 739, 806, 1007, 1811, 2012])
@ -289,7 +275,7 @@ class TestHistogram(TestCase):
with assert_raises((RuntimeError, ValueError)):
np.histogram(vals, bins=bins)
@xpassIfTorchDynamo # (reason="no uint64")
@xfail # (reason="no uint64")
def test_unsigned_monotonicity_check(self):
# Ensures ValueError is raised if bins not increasing monotonically
# when bins contain unsigned values (see #9222)
@ -315,7 +301,7 @@ class TestHistogram(TestCase):
np.histogram([np.array(0.5) for i in range(10)] + [0.500000000000001])
np.histogram([np.array(0.5) for i in range(10)] + [0.5])
@xpassIfTorchDynamo # (reason="bins='auto'")
@xfail # (reason="bins='auto'")
def test_some_nan_values(self):
# gh-7503
one_nan = np.array([0, 1, np.nan])
@ -353,7 +339,7 @@ class TestHistogram(TestCase):
self.do_signed_overflow_bounds(np.short)
self.do_signed_overflow_bounds(np.intc)
@xpassIfTorchDynamo # (reason="int->float conversin loses precision")
@xfail # (reason="int->float conversin loses precision")
def test_signed_overflow_bounds_2(self):
self.do_signed_overflow_bounds(np.int_)
self.do_signed_overflow_bounds(np.longlong)
@ -396,14 +382,14 @@ class TestHistogram(TestCase):
self.do_precision_lower_bound(float_small, float_large)
self.do_precision_upper_bound(float_small, float_large)
@xpassIfTorchDynamo # (reason="mixed dtypes")
@xfail # (reason="mixed dtypes")
def test_precision(self):
# not looping results in a useful stack trace upon failure
self.do_precision(np.half, np.single)
self.do_precision(np.half, np.double)
self.do_precision(np.single, np.double)
@xpassIfTorchDynamo # (reason="histogram_bin_edges")
@xfail # (reason="histogram_bin_edges")
def test_histogram_bin_edges(self):
hist, e = histogram([1, 2, 3, 4], [1, 2])
edges = histogram_bin_edges([1, 2, 3, 4], [1, 2])
@ -419,7 +405,7 @@ class TestHistogram(TestCase):
assert_array_equal(edges, e)
# @requires_memory(free_bytes=1e10)
@xpassIfTorchDynamo # (reason="pytorch does not support bins = [int, int, array]")
@xfail # (reason="pytorch does not support bins = [int, int, array]")
@slow
def test_big_arrays(self):
sample = np.zeros([100000000, 3])
@ -430,7 +416,7 @@ class TestHistogram(TestCase):
assert_equal(type(hist), type((1, 2)))
@xpassIfTorchDynamo # (reason="TODO")
@xfail # (reason="TODO")
@instantiate_parametrized_tests
class TestHistogramOptimBinNums(TestCase):
"""
@ -712,6 +698,7 @@ class TestHistogramOptimBinNums(TestCase):
"""
Check that weighted data raises a TypeError
"""
pytest.xpass(reason="passes by chance")
estimator_list = ["fd", "scott", "rice", "sturges", "auto"]
for estimator in estimator_list:
assert_raises(TypeError, histogram, [1, 2, 3], estimator, weights=[1, 2, 3])
@ -853,13 +840,13 @@ class TestHistogramdd(TestCase):
(RuntimeError, ValueError), np.histogramdd, x, bins=[1, 1, 1, [1, 2, 3, -3]]
)
@xpassIfTorchDynamo # (reason="pytorch does not support bins = [int, int, array]")
@xfail # (reason="pytorch does not support bins = [int, int, array]")
def test_bins_error_2(self):
# mixing scalar (# of bins) and explicit bin arrays, ugh
x = np.arange(8).reshape(2, 4)
assert_(np.histogramdd(x, bins=[1, 1, 1, [1, 2, 3, 4]]))
@xpassIfTorchDynamo # (reason="pytorch does not support bins = [int, int, array]")
@xfail # (reason="pytorch does not support bins = [int, int, array]")
def test_inf_edges(self):
# Test using +/-inf bin edges works. See #1788.
x = np.arange(6).reshape(3, 2)
@ -910,7 +897,7 @@ class TestHistogramdd(TestCase):
range=[[0.0, 1.0], [np.nan, 0.75], [0.25, 0.5]],
)
@xpassIfTorchDynamo # (reason="pytorch does not allow equal entries")
@xfail # (reason="pytorch does not allow equal entries")
def test_equal_edges(self):
"""Test that adjacent entries in an edge array can be equal"""
x = np.array([0, 1, 2])
@ -941,7 +928,7 @@ class TestHistogramdd(TestCase):
def test_large_integers(self):
big = 2**60 # Too large to represent with a full precision float
x = np.asarray([0], dtype=np.int64)
x = np.array([0], np.int64)
x_edges = np.array([-1, +1], np.int64)
y = big + x
y_edges = big + x_edges

View File

@ -4,52 +4,29 @@ import functools
from unittest import expectedFailure as xfail, skipIf
import torch._numpy as np
from pytest import raises as assert_raises # , assert_raises_regex,
from torch._numpy import diag_indices, diag_indices_from, fill_diagonal, index_exp, s_
from torch._numpy.testing import (
assert_,
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
assert_equal,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase,
xpassIfTorchDynamo,
)
skip = functools.partial(skipIf, True)
# If we are going to trace through these, we should use NumPy
# If testing on eager mode, we use torch._numpy
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy import diag_indices, diag_indices_from, fill_diagonal, index_exp, s_
from numpy.testing import (
assert_,
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
assert_equal,
assert_raises_regex,
)
else:
import torch._numpy as np
from torch._numpy import (
diag_indices,
diag_indices_from,
fill_diagonal,
index_exp,
s_,
)
from torch._numpy.testing import (
assert_,
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
assert_equal,
)
@xpassIfTorchDynamo # (reason="unravel_index not implemented")
@xfail # (reason="unravel_index not implemented")
@instantiate_parametrized_tests
class TestRavelUnravelIndex(TestCase):
def test_basic(self):
@ -451,7 +428,7 @@ class TestIx_(TestCase):
class TestC(TestCase):
@xpassIfTorchDynamo # (reason="c_ not implemented")
@xfail # (reason="c_ not implemented")
def test_c_(self):
a = np.c_[np.array([[1, 2, 3]]), 0, 0, np.array([[4, 5, 6]])]
assert_equal(a, [[1, 2, 3, 0, 0, 4, 5, 6]])

View File

@ -5,62 +5,34 @@ import sys
from unittest import expectedFailure as xfail, skipIf as skipif
from pytest import raises as assert_raises
import torch._numpy as np
from pytest import raises as assert_raises
from torch._numpy import (
array_split,
column_stack,
dsplit,
dstack,
expand_dims,
hsplit,
kron,
put_along_axis,
split,
take_along_axis,
tile,
vsplit,
)
from torch._numpy.random import rand, randint
from torch._numpy.testing import assert_, assert_array_equal, assert_equal
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase,
xfailIfTorchDynamo,
xpassIfTorchDynamo,
)
# If we are going to trace through these, we should use NumPy
# If testing on eager mode, we use torch._numpy
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy import (
apply_along_axis,
array_split,
column_stack,
dsplit,
dstack,
expand_dims,
hsplit,
kron,
put_along_axis,
split,
take_along_axis,
tile,
vsplit,
)
from numpy.random import rand, randint
from numpy.testing import assert_, assert_array_equal, assert_equal
else:
import torch._numpy as np
from torch._numpy import (
array_split,
column_stack,
dsplit,
dstack,
expand_dims,
hsplit,
kron,
put_along_axis,
split,
take_along_axis,
tile,
vsplit,
)
from torch._numpy.random import rand, randint
from torch._numpy.testing import assert_, assert_array_equal, assert_equal
skip = functools.partial(skipif, True)
@ -154,7 +126,7 @@ class TestPutAlongAxis(TestCase):
assert_equal(i_min, i_max)
@xpassIfTorchDynamo # (
@xfail # (
# reason="RuntimeError: Expected index [1, 2, 5] to be smaller than self [3, 4, 1] apart from dimension 1")
def test_broadcast(self):
"""Test that non-indexing dimensions are broadcast in both directions"""
@ -164,7 +136,7 @@ class TestPutAlongAxis(TestCase):
assert_equal(take_along_axis(a, ai, axis=1), 20)
@xpassIfTorchDynamo # (reason="apply_along_axis not implemented")
@xfail # (reason="apply_along_axis not implemented")
class TestApplyAlongAxis(TestCase):
def test_simple(self):
a = np.ones((20, 10), "d")
@ -707,8 +679,6 @@ class TestSqueeze(TestCase):
assert_equal(res.ndim, 0)
assert type(res) is np.ndarray
@xfailIfTorchDynamo
def test_basic_2(self):
aa = np.ones((3, 1, 4, 1, 1))
assert aa.squeeze().tensor._base is aa.tensor
@ -742,7 +712,7 @@ class TestSqueeze(TestCase):
assert_(a.flags.f_contiguous)
assert_(b.flags.f_contiguous)
@xpassIfTorchDynamo # (reason="XXX: noop in torch, while numpy raises")
@xfail # (reason="XXX: noop in torch, while numpy raises")
def test_squeeze_axis_handling(self):
with assert_raises(ValueError):
np.squeeze(np.array([[1], [2], [3]]), axis=0)
@ -840,7 +810,7 @@ class TestTile(TestCase):
assert_equal(large, klarge)
@xpassIfTorchDynamo # (reason="TODO: implement")
@xfail # (reason="TODO: implement")
class TestMayShareMemory(TestCase):
def test_basic(self):
d = np.ones((50, 60))

View File

@ -8,72 +8,40 @@ import functools
from unittest import expectedFailure as xfail, skipIf as skipif
import pytest
import torch._numpy as np
from pytest import raises as assert_raises
from torch._numpy import (
arange,
array,
diag,
eye,
fliplr,
flipud,
histogram2d,
ones,
tri, # mask_indices,
tril_indices,
tril_indices_from,
triu_indices,
triu_indices_from,
vander,
zeros,
)
from torch._numpy.testing import (
assert_allclose,
assert_array_almost_equal,
assert_array_equal, # assert_array_max_ulp,
assert_equal,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase,
xpassIfTorchDynamo,
)
# If we are going to trace through these, we should use NumPy
# If testing on eager mode, we use torch._numpy
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy import (
arange,
array,
diag,
eye,
fliplr,
flipud,
histogram2d,
ones,
tri, # mask_indices,
tril_indices,
tril_indices_from,
triu_indices,
triu_indices_from,
vander,
zeros,
)
from numpy.testing import (
assert_allclose,
assert_array_almost_equal,
assert_array_equal, # assert_array_max_ulp,
assert_equal,
)
else:
import torch._numpy as np
from torch._numpy import (
arange,
array,
diag,
eye,
fliplr,
flipud,
histogram2d,
ones,
tri, # mask_indices,
tril_indices,
tril_indices_from,
triu_indices,
triu_indices_from,
vander,
zeros,
)
from torch._numpy.testing import (
assert_allclose,
assert_array_almost_equal,
assert_array_equal, # assert_array_max_ulp,
assert_equal,
)
skip = functools.partial(skipif, True)
@ -133,7 +101,7 @@ class TestEye(TestCase):
def test_bool(self):
assert_equal(eye(2, 2, dtype=bool), [[True, False], [False, True]])
@xpassIfTorchDynamo # (reason="TODO: implement order=non-default")
@xfail # (reason="TODO: implement order=non-default")
def test_order(self):
mat_c = eye(4, 3, k=-1)
mat_f = eye(4, 3, k=-1, order="F")
@ -159,10 +127,9 @@ class TestDiag(TestCase):
assert_equal(diag(vals, k=2), b)
assert_equal(diag(vals, k=-2), c)
def test_matrix(self):
self.check_matrix(vals=(100 * get_mat(5) + 1).astype("l"))
def check_matrix(self, vals):
def test_matrix(self, vals=None):
if vals is None:
vals = (100 * get_mat(5) + 1).astype("l")
b = zeros((5,))
for k in range(5):
b[k] = vals[k, k]
@ -175,10 +142,10 @@ class TestDiag(TestCase):
b[k] = vals[k + 2, k]
assert_equal(diag(vals, -2), b[:3])
@xpassIfTorchDynamo # (reason="TODO implement orders")
@xfail # (reason="TODO implement orders")
def test_fortran_order(self):
vals = array((100 * get_mat(5) + 1), order="F", dtype="l")
self.check_matrix(vals)
self.test_matrix(vals)
def test_diag_bounds(self):
A = [[1, 2], [3, 4], [5, 6]]
@ -284,7 +251,7 @@ class TestHistogram2d(TestCase):
# assert_array_max_ulp(a, np.zeros((4, 4)))
assert_allclose(a, np.zeros((4, 4)), atol=1e-15)
@xpassIfTorchDynamo # (reason="pytorch does not support bins = [int, array]")
@xfail # (reason="pytorch does not support bins = [int, array]")
def test_binparameter_combination(self):
x = array([0, 0.09207008, 0.64575234, 0.12875982, 0.47390599, 0.59944483, 1])
y = array([0, 0.14344267, 0.48988575, 0.30558665, 0.44700682, 0.15886423, 1])
@ -318,7 +285,6 @@ class TestHistogram2d(TestCase):
assert_array_equal(H, answer)
assert_array_equal(xe, array([0.0, 0.25, 0.5, 0.75, 1]))
@skip(reason="NP_VER: fails on CI with older NumPy")
@parametrize("x_len, y_len", [(10, 11), (20, 19)])
def test_bad_length(self, x_len, y_len):
x, y = np.ones(x_len), np.ones(y_len)
@ -402,7 +368,7 @@ class TestTri(TestCase):
iu1 = mask_indices(3, np.triu, 1)
assert_array_equal(a[iu1], array([1, 2, 5]))
@xpassIfTorchDynamo # (reason="np.tril_indices == our tuple(tril_indices)")
@xfail # (reason="np.tril_indices == our tuple(tril_indices)")
def test_tril_indices(self):
# indices without and with offset
il1 = tril_indices(4)
@ -462,7 +428,7 @@ class TestTri(TestCase):
)
@xpassIfTorchDynamo # (reason="np.triu_indices == our tuple(triu_indices)")
@xfail # (reason="np.triu_indices == our tuple(triu_indices)")
class TestTriuIndices(TestCase):
def test_triu_indices(self):
iu1 = triu_indices(4)

View File

@ -5,44 +5,22 @@ import functools
from unittest import expectedFailure as xfail, skipIf as skipif
import torch._numpy as np
from pytest import raises as assert_raises
from torch.testing._internal.common_utils import (
run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase,
xpassIfTorchDynamo,
from torch._numpy import (
common_type,
iscomplex,
iscomplexobj,
isneginf,
isposinf,
isreal,
isrealobj,
nan_to_num,
real_if_close,
)
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy import (
common_type,
iscomplex,
iscomplexobj,
isneginf,
isposinf,
isreal,
isrealobj,
nan_to_num,
real_if_close,
)
from numpy.testing import assert_, assert_array_equal, assert_equal
else:
import torch._numpy as np
from torch._numpy import (
common_type,
iscomplex,
iscomplexobj,
isneginf,
isposinf,
isreal,
isrealobj,
nan_to_num,
real_if_close,
)
from torch._numpy.testing import assert_, assert_array_equal, assert_equal
from torch._numpy.testing import assert_, assert_array_equal, assert_equal
from torch.testing._internal.common_utils import run_tests, TestCase
skip = functools.partial(skipif, True)
@ -51,7 +29,7 @@ def assert_all(x):
assert_(np.all(x), x)
@xpassIfTorchDynamo # (reason="common_type not implemented")
@xfail # (reason="common_type not implemented")
class TestCommonType(TestCase):
def test_basic(self):
ai32 = np.array([[1, 2], [3, 4]], dtype=np.int32)
@ -118,7 +96,7 @@ class TestMintypecode(TestCase):
assert_equal(mintypecode("idD"), "D")
@xpassIfTorchDynamo # (reason="TODO: decide on if [1] is a scalar or not")
@xfail # (reason="TODO: decide on if [1] is a scalar or not")
class TestIsscalar(TestCase):
def test_basic(self):
assert_(np.isscalar(3))

View File

@ -15,86 +15,46 @@ from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest
import pytest
import torch._numpy as np
from numpy.linalg.linalg import _multi_dot_matrix_chain_order
from pytest import raises as assert_raises
from torch._numpy import (
array,
asarray,
atleast_2d,
cdouble,
csingle,
dot,
double,
identity,
inf,
linalg,
matmul,
single,
swapaxes,
)
from torch._numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm
from torch._numpy.testing import (
assert_,
assert_allclose,
assert_almost_equal,
assert_array_equal,
assert_equal,
suppress_warnings,
# assert_raises_regex, HAS_LAPACK64, IS_WASM
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
slowTest as slow,
TEST_WITH_TORCHDYNAMO,
TestCase,
xpassIfTorchDynamo,
)
# If we are going to trace through these, we should use NumPy
# If testing on eager mode, we use torch._numpy
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy import (
array,
asarray,
atleast_2d,
cdouble,
csingle,
dot,
double,
identity,
inf,
linalg,
matmul,
single,
swapaxes,
)
from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm
from numpy.testing import (
assert_,
assert_allclose,
assert_almost_equal,
assert_array_equal,
assert_equal,
suppress_warnings,
# assert_raises_regex, HAS_LAPACK64, IS_WASM
)
else:
import torch._numpy as np
from torch._numpy import (
array,
asarray,
atleast_2d,
cdouble,
csingle,
dot,
double,
identity,
inf,
linalg,
matmul,
single,
swapaxes,
)
from torch._numpy.linalg import (
LinAlgError,
matrix_power,
matrix_rank,
multi_dot,
norm,
)
from torch._numpy.testing import (
assert_,
assert_allclose,
assert_almost_equal,
assert_array_equal,
assert_equal,
suppress_warnings,
# assert_raises_regex, HAS_LAPACK64, IS_WASM
)
skip = functools.partial(skipif, True)
# FIXME: slow tests have never run (= are broken)
slow = skip
IS_WASM = False
HAS_LAPACK64 = False
@ -347,11 +307,11 @@ def _make_generalized_cases():
if not isinstance(case.a, np.ndarray):
continue
a = np.stack([case.a, 2 * case.a, 3 * case.a])
a = np.array([case.a, 2 * case.a, 3 * case.a])
if case.b is None:
b = None
else:
b = np.stack([case.b, 7 * case.b, 6 * case.b])
b = np.array([case.b, 7 * case.b, 6 * case.b])
new_case = LinalgCase(
case.name + "_tile3", a, b, tags=case.tags | {"generalized"}
)
@ -448,6 +408,7 @@ class LinalgGeneralizedNonsquareTestCase(LinalgTestCase):
class HermitianGeneralizedTestCase(LinalgTestCase):
@xfail # (reason="sort complex")
@slow
def test_generalized_herm_cases(self):
self.check_cases(require={"generalized", "hermitian"}, exclude={"size-0"})
@ -841,7 +802,7 @@ class TestCond(CondCases, TestCase):
for A, p in itertools.product(As, p_neg):
linalg.cond(A, p)
@skip(reason="NP_VER: fails on CI") # (
@xfail # (
# True, run=False, reason="Platform/LAPACK-dependent failure, see gh-18914"
# )
def test_nan(self):
@ -929,7 +890,7 @@ class DetCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
ad = asarray(a).astype(cdouble)
ev = linalg.eigvals(ad)
assert_almost_equal(d, np.prod(ev, axis=-1))
assert_almost_equal(s * np.exp(ld), np.prod(ev, axis=-1), single_decimal=5)
assert_almost_equal(s * np.exp(ld), np.prod(ev, axis=-1))
s = np.atleast_1d(s)
ld = np.atleast_1d(ld)
@ -1015,7 +976,7 @@ class LstsqCases(LinalgSquareTestCase, LinalgNonsquareTestCase):
@instantiate_parametrized_tests
class TestLstsq(LstsqCases, TestCase):
@xpassIfTorchDynamo # (reason="Lstsq: we use the future default =None")
@xfail # (reason="Lstsq: we use the future default =None")
def test_future_rcond(self):
a = np.array(
[
@ -1345,8 +1306,8 @@ class _TestNormGeneral(_TestNormBase):
def test_vector_return_type(self):
a = np.array([1, 0, 1])
exact_types = "Bbhil" # np.typecodes["AllInteger"]
inexact_types = "efdFD" # np.typecodes["AllFloat"]
exact_types = np.typecodes["AllInteger"]
inexact_types = np.typecodes["AllFloat"]
all_types = exact_types + inexact_types
@ -1524,7 +1485,7 @@ class _TestNorm2D(_TestNormBase):
def test_matrix_return_type(self):
a = np.array([[1, 0, 1], [0, 1, 1]])
exact_types = "Bbhil" # np.typecodes["AllInteger"]
exact_types = np.typecodes["AllInteger"]
# float32, complex64, float64, complex128 types are the only types
# allowed by `linalg`, which performs the matrix operations used
@ -1760,7 +1721,7 @@ class TestQR(TestCase):
assert_(isinstance(r2, a_type))
assert_almost_equal(r2, r1)
@xpassIfTorchDynamo # (reason="torch does not allow qr(..., mode='raw'")
@xfail # (reason="torch does not allow qr(..., mode='raw'")
@parametrize("m, n", [(3, 0), (0, 3), (0, 0)])
def test_qr_empty(self, m, n):
k = min(m, n)
@ -1774,7 +1735,7 @@ class TestQR(TestCase):
assert_equal(h.shape, (n, m))
assert_equal(tau.shape, (k,))
@xpassIfTorchDynamo # (reason="torch does not allow qr(..., mode='raw'")
@xfail # (reason="torch does not allow qr(..., mode='raw'")
def test_mode_raw(self):
# The factorization is not unique and varies between libraries,
# so it is not possible to check against known values. Functional
@ -1909,7 +1870,7 @@ class TestCholesky(TestCase):
class TestMisc(TestCase):
@xpassIfTorchDynamo # (reason="endianness")
@xfail # (reason="endianness")
def test_byteorder_check(self):
# Byte order check should pass for native order
if sys.byteorder == "little":
@ -2244,7 +2205,7 @@ class TestTensorsolve(TestCase):
class TestMisc2(TestCase):
@xpassIfTorchDynamo # (reason="TODO")
@xfail # (reason="TODO")
def test_unsupported_commontype(self):
# linalg gracefully handles unsupported type
arr = np.array([[1, -2], [2, 5]], dtype="float16")
@ -2252,6 +2213,7 @@ class TestMisc2(TestCase):
with assert_raises(TypeError):
linalg.cholesky(arr)
@xfail # (reason="TODO")
# @slow
# @pytest.mark.xfail(not HAS_LAPACK64, run=False,
# reason="Numpy not compiled with 64-bit BLAS/LAPACK")

View File

@ -3,20 +3,10 @@
import pytest
from torch.testing._internal.common_utils import (
run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase,
)
import torch._numpy as np
from torch._numpy.testing import assert_equal
# If we are going to trace through these, we should use NumPy
# If testing on eager mode, we use torch._numpy
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy.testing import assert_equal
else:
import torch._numpy as np
from torch._numpy.testing import assert_equal
from torch.testing._internal.common_utils import run_tests, TestCase
class TestAppend(TestCase):

View File

@ -1,34 +1,26 @@
# Owner(s): ["module: dynamo"]
import itertools
from unittest import expectedFailure as xfail, skipIf as skipif
import numpy
from unittest import expectedFailure as xfail, skipIf as skip
import pytest
import torch._numpy as np
# import numpy as np
from pytest import raises as assert_raises
from torch._numpy.testing import assert_equal
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
skipIfTorchDynamo,
subtest,
TEST_WITH_TORCHDYNAMO,
TestCase,
xpassIfTorchDynamo,
)
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy.testing import assert_equal
else:
import torch._numpy as np
from torch._numpy.testing import assert_equal
class TestIndexing(TestCase):
@skipif(TEST_WITH_TORCHDYNAMO, reason=".tensor attr, type of a[0, 0]")
def test_indexing_simple(self):
a = np.array([[1, 2, 3], [4, 5, 6]])
@ -44,7 +36,6 @@ class TestIndexing(TestCase):
class TestReshape(TestCase):
@skipif(TEST_WITH_TORCHDYNAMO, reason=".tensor attribute")
def test_reshape_function(self):
arr = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]
@ -53,7 +44,6 @@ class TestReshape(TestCase):
arr = np.asarray(arr)
assert np.transpose(arr, (1, 0)).tensor._base is arr.tensor
@skipif(TEST_WITH_TORCHDYNAMO, reason=".tensor attribute")
def test_reshape_method(self):
arr = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
arr_shape = arr.shape
@ -95,7 +85,6 @@ class TestReshape(TestCase):
class TestTranspose(TestCase):
@skipif(TEST_WITH_TORCHDYNAMO, reason=".tensor attribute")
def test_transpose_function(self):
arr = [[1, 2], [3, 4], [5, 6]]
tgt = [[1, 3, 5], [2, 4, 6]]
@ -104,7 +93,6 @@ class TestTranspose(TestCase):
arr = np.asarray(arr)
assert np.transpose(arr, (1, 0)).tensor._base is arr.tensor
@skipif(TEST_WITH_TORCHDYNAMO, reason=".tensor attribute")
def test_transpose_method(self):
a = np.array([[1, 2], [3, 4]])
assert_equal(a.transpose(), [[1, 3], [2, 4]])
@ -117,7 +105,6 @@ class TestTranspose(TestCase):
class TestRavel(TestCase):
@skipif(TEST_WITH_TORCHDYNAMO, reason=".tensor attribute")
def test_ravel_function(self):
a = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
tgt = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
@ -126,7 +113,6 @@ class TestRavel(TestCase):
arr = np.asarray(a)
assert np.ravel(arr).tensor._base is arr.tensor
@skipif(TEST_WITH_TORCHDYNAMO, reason=".tensor attribute")
def test_ravel_method(self):
a = np.array([[0, 1], [2, 3]])
assert_equal(a.ravel(), [0, 1, 2, 3])
@ -203,7 +189,6 @@ class TestArgmaxArgminCommon(TestCase):
(256,),
]
@skipif(numpy.__version__ < "1.22", reason="NP_VER: fails on NumPy 1.21.x")
@parametrize(
"size, axis",
list(
@ -287,7 +272,7 @@ class TestArgmaxArgminCommon(TestCase):
with pytest.raises(ValueError):
method(arr.T, axis=axis, out=wrong_outarray, keepdims=True)
@skipif(True, reason="XXX: need ndarray.chooses")
@skip(True, reason="XXX: need ndarray.chooses")
@parametrize("method", ["max", "min"])
def test_all(self, method):
# a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
@ -411,38 +396,27 @@ class TestArgmax(TestCase):
)
]
nan_arr = darr + [
subtest(([0, 1, 2, 3, complex(0, np.nan)], 4), decorators=[xpassIfTorchDynamo]),
subtest(([0, 1, 2, 3, complex(np.nan, 0)], 4), decorators=[xpassIfTorchDynamo]),
subtest(([0, 1, 2, complex(np.nan, 0), 3], 3), decorators=[xpassIfTorchDynamo]),
subtest(([0, 1, 2, complex(0, np.nan), 3], 3), decorators=[xpassIfTorchDynamo]),
subtest(([complex(0, np.nan), 0, 1, 2, 3], 0), decorators=[xpassIfTorchDynamo]),
subtest(
([complex(np.nan, np.nan), 0, 1, 2, 3], 0), decorators=[xpassIfTorchDynamo]
),
subtest(([0, 1, 2, 3, complex(0, np.nan)], 4), decorators=[xfail]),
subtest(([0, 1, 2, 3, complex(np.nan, 0)], 4), decorators=[xfail]),
subtest(([0, 1, 2, complex(np.nan, 0), 3], 3), decorators=[xfail]),
subtest(([0, 1, 2, complex(0, np.nan), 3], 3), decorators=[xfail]),
subtest(([complex(0, np.nan), 0, 1, 2, 3], 0), decorators=[xfail]),
subtest(([complex(np.nan, np.nan), 0, 1, 2, 3], 0), decorators=[xfail]),
subtest(
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
decorators=[xpassIfTorchDynamo],
decorators=[xfail],
),
subtest(
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
decorators=[xpassIfTorchDynamo],
decorators=[xfail],
),
subtest(
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
decorators=[xpassIfTorchDynamo],
),
subtest(
([complex(0, 0), complex(0, 2), complex(0, 1)], 1),
decorators=[xpassIfTorchDynamo],
),
subtest(
([complex(1, 0), complex(0, 2), complex(0, 1)], 0),
decorators=[xpassIfTorchDynamo],
),
subtest(
([complex(1, 0), complex(0, 2), complex(1, 1)], 2),
decorators=[xpassIfTorchDynamo],
decorators=[xfail],
),
subtest(([complex(0, 0), complex(0, 2), complex(0, 1)], 1), decorators=[xfail]),
subtest(([complex(1, 0), complex(0, 2), complex(0, 1)], 0), decorators=[xfail]),
subtest(([complex(1, 0), complex(0, 2), complex(1, 1)], 2), decorators=[xfail]),
([False, False, False, False, True], 4),
([False, False, False, True, False], 3),
([True, False, False, False, False], 0),
@ -645,12 +619,11 @@ class TestNoExtraMethods(TestCase):
class TestIter(TestCase):
@skipIfTorchDynamo
def test_iter_1d(self):
# numpy generates array scalars, we do 0D arrays
a = np.arange(5)
lst = list(a)
assert all(type(x) == np.ndarray for x in lst), f"{[type(x) for x in lst]}"
assert all(type(x) == np.ndarray for x in lst)
assert all(x.ndim == 0 for x in lst)
def test_iter_2d(self):

View File

@ -1,44 +1,28 @@
# Owner(s): ["module: dynamo"]
from unittest import skipIf, SkipTest
import numpy
from unittest import expectedFailure as xfail, SkipTest
import pytest
import torch._numpy as np
from pytest import raises as assert_raises
from torch._numpy import _util
from torch._numpy.testing import (
assert_allclose,
assert_almost_equal,
assert_array_equal,
assert_equal,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase,
xpassIfTorchDynamo,
)
# If we are going to trace through these, we should use NumPy
# If testing on eager mode, we use torch._numpy
if TEST_WITH_TORCHDYNAMO:
import numpy as np
import numpy.core.numeric as _util # for normalize_axis_tuple
from numpy.testing import (
assert_allclose,
assert_almost_equal,
assert_array_equal,
assert_equal,
)
else:
import torch._numpy as np
from torch._numpy import _util
from torch._numpy.testing import (
assert_allclose,
assert_almost_equal,
assert_array_equal,
assert_equal,
)
class TestFlatnonzero(TestCase):
def test_basic(self):
x = np.arange(-2, 3)
@ -128,7 +112,7 @@ class TestMean(TestCase):
# of float32.
assert np.mean(np.ones(100000, dtype="float16")) == 1
@xpassIfTorchDynamo # (reason="XXX: mean(..., where=...) not implemented")
@xfail # (reason="XXX: mean(..., where=...) not implemented")
def test_mean_where(self):
a = np.arange(16).reshape((4, 4))
wh_full = np.array(
@ -194,8 +178,7 @@ class TestSum(TestCase):
assert_allclose(res_float, 4.0, atol=1e-15)
assert res_float.dtype == "float64"
@skipIf(numpy.__version__ < "1.24", reason="NP_VER: fails on NumPy 1.23.x")
@xpassIfTorchDynamo # (reason="sum: does not warn on overflow")
@xfail # (reason="sum: does not warn on overflow")
def test_sum_dtypes_warnings(self):
for dt in (int, np.float16, np.float32, np.float64):
for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127, 128, 1024, 1235):
@ -262,7 +245,7 @@ class TestSum(TestCase):
d += d
assert_allclose(d, 2.0 + 2j, atol=1.5e-7)
@xpassIfTorchDynamo # (reason="initial=... need implementing")
@xfail # (reason="initial=... need implementing")
def test_sum_initial(self):
# Integer, single axis
assert_equal(np.sum([3], initial=2), 5)
@ -276,7 +259,7 @@ class TestSum(TestCase):
[12, 12, 12],
)
@xpassIfTorchDynamo # (reason="where=... need implementing")
@xfail # (reason="where=... need implementing")
def test_sum_where(self):
# More extensive tests done in test_reduction_with_where.
assert_equal(np.sum([[1.0, 2.0], [3.0, 4.0]], where=[True, False]), 4.0)
@ -319,10 +302,6 @@ fails_out_arg = {
np.count_nonzero,
}
restricts_dtype_casts = {np.var, np.std}
fails_empty_tuple = {np.argmin, np.argmax}
@instantiate_parametrized_tests
class TestGenericReductions(TestCase):
@ -357,9 +336,6 @@ class TestGenericReductions(TestCase):
@parametrize_func
def test_axis_empty_generic(self, func):
if func in fails_empty_tuple:
raise SkipTest("func(..., axis=()) is not valid")
a = np.array([[0, 0, 1], [1, 0, 1]])
assert_array_equal(func(a, axis=()), func(np.expand_dims(a, axis=0), axis=0))
@ -385,7 +361,6 @@ class TestGenericReductions(TestCase):
expanded = np.expand_dims(func(a, axis=axis), axis=axis)
assert_array_equal(with_keepdims, expanded)
@skipIf(numpy.__version__ < "1.24", reason="NP_VER: fails on CI w/old numpy")
@parametrize_func
def test_keepdims_generic_axis_none(self, func):
a = np.arange(2 * 3 * 4).reshape((2, 3, 4))
@ -430,7 +405,7 @@ class TestGenericReductions(TestCase):
# Here we follow pytorch, since the result is a superset
# of the numpy functionality
@parametrize("keepdims", [True, False])
@parametrize("keepdims", [True, False, None])
@parametrize("dtype", [bool, "int32", "float64"])
@parametrize_func
@parametrize_axis
@ -440,8 +415,6 @@ class TestGenericReductions(TestCase):
raise SkipTest(f"{func.__name__} does not have out= arg.")
if func in fails_axes_tuples:
raise SkipTest(f"{func.__name__} does not hangle tuple axis.")
if func in restricts_dtype_casts:
raise SkipTest(f"{func.__name__}: test implies float->int casts")
a = np.arange(2 * 3 * 4).reshape((2, 3, 4))
result = func(a, axis=axis, keepdims=keepdims).astype(dtype)

View File

@ -9,25 +9,18 @@ Extensive tests of this sort of functionality is in numpy_tests/core/*scalar*
Also test the isscalar function (which is deliberately a bit more lax).
"""
import torch._numpy as np
from torch._numpy.testing import assert_equal
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
subtest,
TEST_WITH_TORCHDYNAMO,
TestCase,
xfailIfTorchDynamo,
)
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy.testing import assert_equal
else:
import torch._numpy as np
from torch._numpy.testing import assert_equal
parametrize_value = parametrize(
"value",
[
@ -86,7 +79,6 @@ class TestArrayScalars(TestCase):
assert arr == 42
# @xfailIfTorchDynamo
@instantiate_parametrized_tests
class TestIsScalar(TestCase):
#
@ -97,12 +89,12 @@ class TestIsScalar(TestCase):
scalars = [
subtest(42, "literal"),
subtest(int(42.0), "int"),
subtest(np.float32(42), "float32"),
subtest(np.array(42), "array_0D", decorators=[xfailIfTorchDynamo]),
subtest([42], "list", decorators=[xfailIfTorchDynamo]),
subtest([[42]], "list-list", decorators=[xfailIfTorchDynamo]),
subtest(np.array([42]), "array_1D", decorators=[xfailIfTorchDynamo]),
subtest(np.array([[42]]), "array_2D", decorators=[xfailIfTorchDynamo]),
np.float32(42),
np.array(42),
[42],
[[42]],
np.array([42]),
np.array([[42]]),
]
import math
@ -110,8 +102,8 @@ class TestIsScalar(TestCase):
not_scalars = [
int,
np.float32,
subtest("s", decorators=[xfailIfTorchDynamo]),
subtest("string", decorators=[xfailIfTorchDynamo]),
"s",
"string",
(),
[],
math.sin,

View File

@ -13,22 +13,16 @@ import operator
from unittest import skipIf as skip, SkipTest
import torch._numpy as np
from pytest import raises as assert_raises
from torch._numpy.testing import assert_equal
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase,
)
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy.testing import assert_equal
else:
import torch._numpy as np
from torch._numpy.testing import assert_equal
parametrize_unary_ufuncs = parametrize("ufunc", [np.sin])
parametrize_casting = parametrize(

View File

@ -914,8 +914,6 @@ class NumpyNdarrayVariable(TensorVariable):
return insert_into_graph()
elif name in ["base", "flags", "dtype"]:
unimplemented(f"TODO: add support for ndarray.{name}")
elif name in ["__version__"]:
unimplemented("delegate np.__version__ to NumPy")
if result is None:
raise NotImplementedError()
return result

View File

@ -585,6 +585,7 @@ def _conv_corr_impl(a, v, mode):
v = _util.cast_if_needed(v, dt)
padding = v.shape[0] - 1 if mode == "full" else mode
if padding == "same" and v.shape[0] % 2 == 0:
# UserWarning: Using padding='same' with even kernel lengths and odd
# dilation may require a zero-padded copy of the input be created

View File

@ -69,6 +69,7 @@ def normalize_seq_array_like(x, parm=None):
def normalize_dtype(dtype, parm=None):
# cf _decorators.dtype_to_torch
torch_dtype = None
if dtype is not None:
dtype = _dtypes.dtype(dtype)

View File

@ -1189,14 +1189,6 @@ if TEST_WITH_TORCHDYNAMO:
torch._inductor.config.fallback_random = True
def xpassIfTorchDynamo(func):
return func if TEST_WITH_TORCHDYNAMO else unittest.expectedFailure(func)
def xfailIfTorchDynamo(func):
return unittest.expectedFailure(func) if TEST_WITH_TORCHDYNAMO else func
def skipIfTorchDynamo(msg="test doesn't currently work with dynamo"):
def decorator(fn):
if not isinstance(fn, type):