mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
Summary: This causes some functionality to not work, such as the disabling issues e.g., https://github.com/pytorch/pytorch/issues/66641 cc pietern mrshenli pritamdamania87 zhaojuanmao satgera rohan-varma gqchen aazzolini osalpekar jiayisuse SciPioneer H-Huang Pull Request resolved: https://github.com/pytorch/pytorch/pull/66900 Reviewed By: seemethere Differential Revision: D31778293 Pulled By: janeyx99 fbshipit-source-id: df3023ddaf7969ffb60117d1e1d7e36d87bc6139
278 lines
8.8 KiB
Python
278 lines
8.8 KiB
Python
# Owner(s): ["module: autograd"]
|
|
|
|
from torch.testing._internal.common_utils import TestCase, run_tests
|
|
|
|
import torch
|
|
|
|
class TestPublicBindings(TestCase):
|
|
def test_no_new_bindings(self):
|
|
"""
|
|
This test aims to stop the introduction of new JIT bindings into torch._C
|
|
whose names do not start with _. Such bindings are made available as
|
|
torch.XXX, which may not be desirable.
|
|
|
|
If your change causes this test to fail, add your new binding to a relevant
|
|
submodule of torch._C, such as torch._C._jit (or other relevant submodule of
|
|
torch._C). If your binding really needs to be available as torch.XXX, add it
|
|
to torch._C and add it to the allowlist below.
|
|
|
|
If you have removed a binding, remove it from the allowlist as well.
|
|
"""
|
|
# This allowlist contains every binding in torch._C that is copied into torch at
|
|
# the time of writing. It was generated with
|
|
#
|
|
# {elem for elem in dir(torch._C) if not elem.startswith("_")}
|
|
#
|
|
torch_C_allowlist_superset = {
|
|
"AggregationType",
|
|
"AliasDb",
|
|
"AnyType",
|
|
"Argument",
|
|
"ArgumentSpec",
|
|
"autocast_decrement_nesting",
|
|
"autocast_increment_nesting",
|
|
"AVG",
|
|
"BenchmarkConfig",
|
|
"BenchmarkExecutionStats",
|
|
"BFloat16StorageBase",
|
|
"Block",
|
|
"BoolStorageBase",
|
|
"BoolType",
|
|
"BufferDict",
|
|
"ByteStorageBase",
|
|
"CallStack",
|
|
"Capsule",
|
|
"CharStorageBase",
|
|
"ClassType",
|
|
"clear_autocast_cache",
|
|
"Code",
|
|
"CompilationUnit",
|
|
"CompleteArgumentSpec",
|
|
"ComplexDoubleStorageBase",
|
|
"ComplexFloatStorageBase",
|
|
"ComplexType",
|
|
"ConcreteModuleType",
|
|
"ConcreteModuleTypeBuilder",
|
|
"CONV_BN_FUSION",
|
|
"cpp",
|
|
"CudaBFloat16StorageBase",
|
|
"CudaBFloat16TensorBase",
|
|
"CudaBFloat16TensorBase",
|
|
"CudaBoolStorageBase",
|
|
"CudaBoolTensorBase",
|
|
"CudaBoolTensorBase",
|
|
"CudaByteStorageBase",
|
|
"CudaByteTensorBase",
|
|
"CudaByteTensorBase",
|
|
"CudaCharStorageBase",
|
|
"CudaCharTensorBase",
|
|
"CudaCharTensorBase",
|
|
"CudaComplexDoubleStorageBase",
|
|
"CudaComplexDoubleTensorBase",
|
|
"CudaComplexDoubleTensorBase",
|
|
"CudaComplexFloatStorageBase",
|
|
"CudaComplexFloatTensorBase",
|
|
"CudaComplexFloatTensorBase",
|
|
"CudaDoubleStorageBase",
|
|
"CudaDoubleTensorBase",
|
|
"CudaDoubleTensorBase",
|
|
"CudaFloatStorageBase",
|
|
"CudaFloatTensorBase",
|
|
"CudaHalfStorageBase",
|
|
"CudaHalfTensorBase",
|
|
"CudaIntStorageBase",
|
|
"CudaIntTensorBase",
|
|
"CudaIntTensorBase",
|
|
"CudaLongStorageBase",
|
|
"CudaLongTensorBase",
|
|
"CudaLongTensorBase",
|
|
"CudaShortStorageBase",
|
|
"CudaShortTensorBase",
|
|
"CudaShortTensorBase",
|
|
"DeepCopyMemoTable",
|
|
"default_generator",
|
|
"DeserializationStorageContext",
|
|
"device",
|
|
"DeviceObjType",
|
|
"DictType",
|
|
"DisableTorchFunction",
|
|
"DoubleStorageBase",
|
|
"dtype",
|
|
"EnumType",
|
|
"ErrorReport",
|
|
"ExecutionPlan",
|
|
"FatalError",
|
|
"FileCheck",
|
|
"finfo",
|
|
"FloatStorageBase",
|
|
"FloatType",
|
|
"fork",
|
|
"FunctionSchema",
|
|
"FUSE_ADD_RELU",
|
|
"Future",
|
|
"FutureType",
|
|
"Generator",
|
|
"get_autocast_cpu_dtype",
|
|
"get_default_dtype",
|
|
"get_num_interop_threads",
|
|
"get_num_threads",
|
|
"Gradient",
|
|
"Graph",
|
|
"GraphExecutorState",
|
|
"HalfStorageBase",
|
|
"has_cuda",
|
|
"has_cudnn",
|
|
"has_lapack",
|
|
"has_mkl",
|
|
"has_mkldnn",
|
|
"has_mlc",
|
|
"has_openmp",
|
|
"has_spectral",
|
|
"HOIST_CONV_PACKED_PARAMS",
|
|
"iinfo",
|
|
"import_ir_module_from_buffer",
|
|
"import_ir_module",
|
|
"InferredType",
|
|
"init_num_threads",
|
|
"INSERT_FOLD_PREPACK_OPS",
|
|
"InterfaceType",
|
|
"IntStorageBase",
|
|
"IntType",
|
|
"IODescriptor",
|
|
"is_anomaly_enabled",
|
|
"is_autocast_cache_enabled",
|
|
"is_autocast_cpu_enabled",
|
|
"is_autocast_enabled",
|
|
"is_grad_enabled",
|
|
"is_inference_mode_enabled",
|
|
"JITException",
|
|
"layout",
|
|
"ListType",
|
|
"LiteScriptModule",
|
|
"LockingLogger",
|
|
"LoggerBase",
|
|
"LongStorageBase",
|
|
"memory_format",
|
|
"merge_type_from_type_comment",
|
|
"MobileOptimizerType",
|
|
"ModuleDict",
|
|
"Node",
|
|
"NoneType",
|
|
"NoopLogger",
|
|
"NumberType",
|
|
"OperatorInfo",
|
|
"OptionalType",
|
|
"ParameterDict",
|
|
"parse_ir",
|
|
"parse_schema",
|
|
"parse_type_comment",
|
|
"PyObjectType",
|
|
"PyTorchFileReader",
|
|
"PyTorchFileWriter",
|
|
"QInt32StorageBase",
|
|
"QInt8StorageBase",
|
|
"qscheme",
|
|
"QUInt4x2StorageBase",
|
|
"QUInt2x4StorageBase",
|
|
"QUInt8StorageBase",
|
|
"read_vitals",
|
|
"REMOVE_DROPOUT",
|
|
"RRefType",
|
|
"ScriptClass",
|
|
"ScriptClassFunction",
|
|
"ScriptDict",
|
|
"ScriptDictIterator",
|
|
"ScriptDictKeyIterator",
|
|
"ScriptList",
|
|
"ScriptListIterator",
|
|
"ScriptFunction",
|
|
"ScriptMethod",
|
|
"ScriptModule",
|
|
"ScriptModuleSerializer",
|
|
"ScriptObject",
|
|
"ScriptObjectProperty",
|
|
"SerializationStorageContext",
|
|
"set_anomaly_enabled",
|
|
"set_autocast_cache_enabled",
|
|
"set_autocast_cpu_dtype",
|
|
"set_autocast_cpu_enabled",
|
|
"set_autocast_enabled",
|
|
"set_flush_denormal",
|
|
"set_num_interop_threads",
|
|
"set_num_threads",
|
|
"set_vital",
|
|
"ShortStorageBase",
|
|
"Size",
|
|
"StaticModule",
|
|
"Stream",
|
|
"StreamObjType",
|
|
"StringType",
|
|
"SUM",
|
|
"TensorType",
|
|
"ThroughputBenchmark",
|
|
"TracingState",
|
|
"TupleType",
|
|
"Type",
|
|
"unify_type_list",
|
|
"UnionType",
|
|
"Use",
|
|
"Value",
|
|
"autocast_decrement_nesting",
|
|
"autocast_increment_nesting",
|
|
"clear_autocast_cache",
|
|
"cpp",
|
|
"default_generator",
|
|
"device",
|
|
"dtype",
|
|
"finfo",
|
|
"fork",
|
|
"get_default_dtype",
|
|
"get_num_interop_threads",
|
|
"get_num_threads",
|
|
"has_cuda",
|
|
"has_cudnn",
|
|
"has_lapack",
|
|
"has_mkl",
|
|
"has_mkldnn",
|
|
"has_mlc",
|
|
"has_openmp",
|
|
"iinfo",
|
|
"import_ir_module",
|
|
"import_ir_module_from_buffer",
|
|
"init_num_threads",
|
|
"is_anomaly_enabled",
|
|
"is_autocast_enabled",
|
|
"is_grad_enabled",
|
|
"layout",
|
|
"memory_format",
|
|
"merge_type_from_type_comment",
|
|
"parse_ir",
|
|
"parse_schema",
|
|
"parse_type_comment",
|
|
"qscheme",
|
|
"set_anomaly_enabled",
|
|
"set_autocast_enabled",
|
|
'set_autocast_gpu_dtype',
|
|
'get_autocast_gpu_dtype',
|
|
"set_flush_denormal",
|
|
"set_num_interop_threads",
|
|
"set_num_threads",
|
|
"unify_type_list",
|
|
"vitals_enabled",
|
|
|
|
"wait",
|
|
}
|
|
torch_C_bindings = {elem for elem in dir(torch._C) if not elem.startswith("_")}
|
|
|
|
# Check that the torch._C bindings are all in the allowlist. Since
|
|
# bindings can change based on how PyTorch was compiled (e.g. with/without
|
|
# CUDA), the two may not be an exact match but the bindings should be
|
|
# a subset of the allowlist.
|
|
difference = torch_C_bindings.difference(torch_C_allowlist_superset)
|
|
msg = f"torch._C had bindings that are not present in the allowlist:\n{difference}"
|
|
self.assertTrue(torch_C_bindings.issubset(torch_C_allowlist_superset), msg)
|
|
|
|
|
|
if __name__ == '__main__':
|
|
run_tests()
|